paddle_api.h 10.4 KB
Newer Older
Y
Yan Chunwei 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

/*
 * This file defines PaddlePredictor, the api for lite. It supports multiple
 * hardware including ARM, X86, OpenCL, CUDA and so on.
 */

#ifndef PADDLE_LITE_API_H_  // NOLINT
#define PADDLE_LITE_API_H_
#include <memory>
#include <string>
24
#include <utility>
Y
Yan Chunwei 已提交
25 26 27 28 29 30 31 32 33 34 35
#include <vector>
#include "paddle_place.h"  // NOLINT

namespace paddle {
namespace lite_api {

using shape_t = std::vector<int64_t>;
using lod_t = std::vector<std::vector<uint64_t>>;

enum class LiteModelType { kProtobuf = 0, kNaiveBuffer, UNK };

36 37 38
// return true if current device supports OpenCL model
LITE_API bool IsOpenCLBackendValid();

Y
Yan Chunwei 已提交
39 40 41 42 43 44 45 46 47 48 49
struct LITE_API Tensor {
  explicit Tensor(void* raw);
  explicit Tensor(const void* raw);

  void Resize(const shape_t& shape);

  /// Readonly data.
  template <typename T>
  const T* data() const;

  template <typename T>
50
  T* mutable_data(TargetType type = TargetType::kHost) const;
Y
Yan Chunwei 已提交
51

S
sangoly 已提交
52 53 54 55
  template <typename T, TargetType type = TargetType::kHost>
  void CopyFromCpu(const T* data);

  template <typename T>
56
  void CopyToCpu(T* data) const;
Y
Yan Chunwei 已提交
57 58
  /// Shape of the tensor.
  shape_t shape() const;
S
sangoly 已提交
59 60
  TargetType target() const;
  PrecisionType precision() const;
Y
Yan Chunwei 已提交
61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84

  // LoD of the tensor
  lod_t lod() const;

  // Set LoD of the tensor
  void SetLoD(const lod_t& lod);

 private:
  void* raw_tensor_;
};

/// The PaddlePredictor defines the basic interfaces for different kinds of
/// predictors.
class LITE_API PaddlePredictor {
 public:
  PaddlePredictor() = default;

  /// Get i-th input.
  virtual std::unique_ptr<Tensor> GetInput(int i) = 0;

  /// Get i-th output.
  virtual std::unique_ptr<const Tensor> GetOutput(int i) const = 0;

  virtual void Run() = 0;
85
  virtual std::shared_ptr<PaddlePredictor> Clone() = 0;
86 87
  virtual std::shared_ptr<PaddlePredictor> Clone(
      const std::vector<std::string>& var_names) = 0;
Y
Yan Chunwei 已提交
88

89 90
  virtual std::string GetVersion() const = 0;

91
  // Get input names
S
sangoly 已提交
92
  virtual std::vector<std::string> GetInputNames() = 0;
93
  // Get output names
S
sangoly 已提交
94
  virtual std::vector<std::string> GetOutputNames() = 0;
95 96
  // Get output names
  virtual std::vector<std::string> GetParamNames();
97 98 99 100

  // Get Input by name
  virtual std::unique_ptr<Tensor> GetInputByName(const std::string& name) = 0;

Y
Yan Chunwei 已提交
101 102 103
  /// Get a readonly tensor, return null if no one called `name` exists.
  virtual std::unique_ptr<const Tensor> GetTensor(
      const std::string& name) const = 0;
104 105 106
  /// Get a mutable tensor, return null if on one called `name` exists
  /// internal infereces API, not recommanded.
  virtual std::unique_ptr<Tensor> GetMutableTensor(const std::string& name);
Y
Yan Chunwei 已提交
107 108 109 110 111

  /// Persist the optimized model to disk. This API is only supported by
  /// CxxConfig, and the persisted model can be reused for MobileConfig.
  virtual void SaveOptimizedModel(
      const std::string& model_dir,
112 113
      LiteModelType model_type = LiteModelType::kProtobuf,
      bool record_info = false);
Y
Yan Chunwei 已提交
114 115

  virtual ~PaddlePredictor() = default;
T
TianXiaogang 已提交
116 117 118 119

 protected:
  int threads_{1};
  lite_api::PowerMode mode_{lite_api::LITE_POWER_NO_BIND};
Y
Yan Chunwei 已提交
120 121 122 123 124
};

/// Base class for all the configs.
class LITE_API ConfigBase {
  std::string model_dir_;
125 126
  int threads_{1};
  PowerMode mode_{LITE_POWER_NO_BIND};
127 128
  // gpu
  bool enable_opencl_tune_{false};
129 130
  // to save subgraph model for npu/xpu/...
  std::string subgraph_model_cache_dir_{""};
131
  int device_id_{0};
Y
Yan Chunwei 已提交
132 133

 public:
134 135
  explicit ConfigBase(PowerMode mode = LITE_POWER_NO_BIND, int threads = 1);
  // set Model_dir
Y
Yan Chunwei 已提交
136 137
  void set_model_dir(const std::string& x) { model_dir_ = x; }
  const std::string& model_dir() const { return model_dir_; }
138 139 140
  // set Thread
  void set_threads(int threads);
  int threads() const { return threads_; }
141 142 143
  // set Power_mode
  void set_power_mode(PowerMode mode);
  PowerMode power_mode() const { return mode_; }
144 145 146
  // set GPU opencl tune
  void set_opencl_tune(bool enable_tune);
  bool opencl_tune() const { return enable_opencl_tune_; }
147 148 149 150 151 152 153
  // set subgraph_model_dir
  void set_subgraph_model_cache_dir(std::string subgraph_model_cache_dir) {
    subgraph_model_cache_dir_ = subgraph_model_cache_dir;
  }
  const std::string& subgraph_model_cache_dir() const {
    return subgraph_model_cache_dir_;
  }
154 155 156
  // set Device ID
  void set_device_id(int device_id) { device_id_ = device_id; }
  const int get_device_id() const { return device_id_; }
Y
Yan Chunwei 已提交
157 158 159 160 161
};

/// CxxConfig is the config for the Full feature predictor.
class LITE_API CxxConfig : public ConfigBase {
  std::vector<Place> valid_places_;
162 163
  std::string model_file_;
  std::string param_file_;
164
  std::vector<std::string> passes_internal_{};
165
  bool model_from_memory_{false};
166 167 168
#ifdef LITE_WITH_X86
  int x86_math_library_math_threads_ = 1;
#endif
169 170 171
#ifdef LITE_WITH_CUDA
  bool multi_stream_{false};
#endif
172 173 174 175
#ifdef LITE_WITH_MLU
  lite_api::MLUCoreVersion mlu_core_version_{lite_api::MLUCoreVersion::MLU_270};
  int mlu_core_number_{1};
  DataLayoutType mlu_input_layout_{DATALAYOUT(kNCHW)};
176 177
  std::vector<float> mlu_first_conv_mean_{};
  std::vector<float> mlu_first_conv_std_{};
178
#endif
Y
Yan Chunwei 已提交
179 180 181

 public:
  void set_valid_places(const std::vector<Place>& x) { valid_places_ = x; }
182 183
  void set_model_file(const std::string& path) { model_file_ = path; }
  void set_param_file(const std::string& path) { param_file_ = path; }
184 185 186 187 188 189 190 191
  void set_model_buffer(const char* model_buffer,
                        size_t model_buffer_size,
                        const char* param_buffer,
                        size_t param_buffer_size) {
    model_file_ = std::string(model_buffer, model_buffer + model_buffer_size);
    param_file_ = std::string(param_buffer, param_buffer + param_buffer_size);
    model_from_memory_ = true;
  }
192 193 194 195 196 197 198 199 200 201
  // internal inference to choose passes for model optimizing,
  // it's designed for internal developer and not recommanded
  // for comman users.
  void set_passes_internal(
      const std::vector<std::string>& passes_internal = {}) {
    passes_internal_ = passes_internal;
  }
  const std::vector<std::string>& get_passes_internal() const {
    return passes_internal_;
  }
Y
Yan Chunwei 已提交
202
  const std::vector<Place>& valid_places() const { return valid_places_; }
203 204
  std::string model_file() const { return model_file_; }
  std::string param_file() const { return param_file_; }
205
  bool model_from_memory() const { return model_from_memory_; }
206

207 208 209
#ifdef LITE_WITH_X86
  void set_x86_math_library_num_threads(int threads) {
    x86_math_library_math_threads_ = threads;
210
  }
211 212
  int x86_math_library_num_threads() const {
    return x86_math_library_math_threads_;
213
  }
214
#endif
215 216
#ifdef LITE_WITH_CUDA
  void set_multi_stream(bool multi_stream) { multi_stream_ = multi_stream; }
217
  bool multi_stream() const { return multi_stream_; }
218
#endif
219 220 221 222 223 224 225 226 227

#ifdef LITE_WITH_MLU
  // set MLU core version, which is used when compiling MLU kernels
  void set_mlu_core_version(lite_api::MLUCoreVersion core_version);
  // set MLU core number, which is used when compiling MLU kernels
  void set_mlu_core_number(int core_number);
  // whether use MLU's first conv kernel. First conv is a special kernel
  // provided by MLU, its input is uint8, and also needs two 3-dimentional
  // vectors which save all inputs' mean and std values
228 229 230 231 232 233 234
  // set the 3-dimentional mean vector and 3-dimentional std vector used by
  // MLU's first conv
  void set_mlu_firstconv_param(const std::vector<float>& mean,
                               const std::vector<float>& std);
  // set MLU input layout. User can specify layout of input data to be NHWC,
  // default is NCHW
  void set_mlu_input_layout(DataLayoutType layout);
235 236 237 238

  lite_api::MLUCoreVersion mlu_core_version() const;
  int mlu_core_number() const;
  DataLayoutType mlu_input_layout() const;
239 240
  // std::pair<mean, std>
  std::pair<std::vector<float>, std::vector<float>> mlu_firstconv_param() const;
241 242
#endif

243 244 245 246
  // XPU only, set the size of the workspace memory from L3 cache for the
  // current thread.
  void set_xpu_workspace_l3_size_per_thread(int l3_size = 0xfffc00);
  // XPU only, specify the target device ID for the current thread.
C
Cwndmiao 已提交
247 248
  // **DEPRECATED**, use xpu_set_device() at the very beginning of each worker
  // thread
249
  void set_xpu_dev_per_thread(int dev_no = 0);
250
  void set_xpu_multi_encoder_precision(const std::string& precision = "int16");
Y
Yan Chunwei 已提交
251 252 253 254
};

/// MobileConfig is the config for the light weight predictor, it will skip
/// IR optimization or other unnecessary stages.
255
class LITE_API MobileConfig : public ConfigBase {
256 257 258 259 260 261 262 263
  // whether to load data from memory. Model data will be loaded from memory
  // buffer if model_from_memory_ is true.
  bool model_from_memory_{false};

  // model data readed from file or memory buffer in combined format.
  std::string lite_model_file_;

  // NOTE: This is a deprecated variable and will be removed in latter release.
264 265
  std::string model_buffer_;
  std::string param_buffer_;
266 267

 public:
268 269 270 271 272 273 274 275 276 277 278 279 280
  // set model data in combined format, `set_model_from_file` refers to loading
  // model from file, set_model_from_buffer refers to loading model from memory
  // buffer
  void set_model_from_file(const std::string& x);
  void set_model_from_buffer(const std::string& x);
  // return model data in lite_model_file_, which is in combined format.
  const std::string& lite_model_file() const { return lite_model_file_; }

  // return model_from_memory_, which indicates whether to load model from
  // memory buffer.
  bool model_from_memory() const { return model_from_memory_; }

  // NOTE: This is a deprecated API and will be removed in latter release.
281 282 283
  void set_model_buffer(const char* model_buffer,
                        size_t model_buffer_size,
                        const char* param_buffer,
284
                        size_t param_buffer_size);
285

286
  // NOTE: This is a deprecated API and will be removed in latter release.
287
  const std::string& model_buffer() const { return model_buffer_; }
288 289

  // NOTE: This is a deprecated API and will be removed in latter release.
290
  const std::string& param_buffer() const { return param_buffer_; }
291
};
Y
Yan Chunwei 已提交
292 293

template <typename ConfigT>
294
LITE_API std::shared_ptr<PaddlePredictor> CreatePaddlePredictor(const ConfigT&);
Y
Yan Chunwei 已提交
295 296 297 298 299

}  // namespace lite_api
}  // namespace paddle

#endif  // NOLINT