paddle_mobile.h 2.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include <memory>
#include <string>
#include <vector>
20 21 22
#ifdef _OPENMP
#include <omp.h>
#endif  // _OPENMP
23 24

#include "common/types.h"
25
#include "framework/executor.h"
H
hjchen2 已提交
26
#include "framework/load_ops.h"
27
#include "framework/loader.h"
28
#include "framework/tensor.h"
Y
yangfei 已提交
29 30 31
#ifdef PADDLE_MOBILE_CL
#include "framework/cl/cl_engine.h"
#endif
32 33 34 35 36 37 38 39

namespace paddle_mobile {

template <typename Dtype = CPU, Precision P = Precision::FP32>
class PaddleMobile {
  typedef typename PrecisionTrait<P>::ptype Ptype;

 public:
40 41
  PaddleMobile() {
#ifndef PADDLE_MOBILE_CL
xiebaiyuan's avatar
xiebaiyuan 已提交
42
    bool is_gpu = std::is_same<DeviceType<kGPU_CL>, Dtype>::value;
43 44 45 46
    PADDLE_MOBILE_ENFORCE(!is_gpu,
                          "Not Enable GPU in CmakeList but run gpu codes ");
#endif
  }
47
  bool Load(const std::string &dirname, bool optimize = false,
H
hjchen2 已提交
48 49
            bool quantification = false, int batch_size = 1,
            bool loddable = false);
50

51 52
  bool Load(const std::string &model_path, const std::string &para_path,
            bool optimize = false, bool quantification = false,
H
hjchen2 已提交
53
            int batch_size = 1, bool loddable = false);
54 55 56

  std::shared_ptr<framework::Tensor> Predict(const framework::Tensor &t);

xiebaiyuan's avatar
xiebaiyuan 已提交
57 58
  std::shared_ptr<framework::Tensor> PredictLod(const framework::LoDTensor &t);

59 60 61
  std::vector<Ptype> Predict(const std::vector<Ptype> &input,
                             const std::vector<int64_t> &dims);

62 63
  bool LoadCombinedMemory(size_t model_len, const uint8_t *model_buf,
                          size_t combined_params_len,
L
liuruilong 已提交
64
                          uint8_t *combined_params_buf);
65

66
  void SetThreadNum(int num);
67
  void Clear();
68
  double GetCPUPredictTime();
69

70
  ~PaddleMobile();
L
liuruilong 已提交
71

72
#ifdef PADDLE_MOBILE_FPGA
H
hjchen2 已提交
73
  void InjectVariable(const framework::Tensor &t, std::string var_name);
74 75 76 77 78 79 80
  void FeedData(const framework::Tensor &t);
  std::shared_ptr<framework::Tensor> FetchResult(int id = -1);
  void Predict_From_To(int start = 0, int end = -1);
  void Predict_From(int start);
  void Predict_To(int end);
#endif

Y
yangfei 已提交
81 82 83
#ifdef PADDLE_MOBILE_CL
 public:
  void SetCLPath(std::string cl_path);
84 85 86
  double GetGPUPredictTime();
  int readText(const char *kernelPath,
               char **pcode);  // 读取文本文件放入 pcode,返回字符串长度
Y
yangfei 已提交
87 88
#endif

89
 private:
L
liuruilong 已提交
90 91
  std::shared_ptr<framework::Loader<Dtype, P>> loader_;
  std::shared_ptr<framework::Executor<Dtype, P>> executor_;
92 93 94
};

}  // namespace paddle_mobile