api_impl.h 3.0 KB
Newer Older
X
Xin Pan 已提交
1 2
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
X
Xin Pan 已提交
6

7
http://www.apache.org/licenses/LICENSE-2.0
X
Xin Pan 已提交
8

9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
X
Xin Pan 已提交
14 15 16 17

#pragma once

#include <glog/logging.h>
18

19
#include <map>
X
Xin Pan 已提交
20 21 22
#include <memory>
#include <string>
#include <vector>
W
wanghuancoder 已提交
23

X
Xin Pan 已提交
24
#include "paddle/fluid/framework/lod_tensor.h"
Y
Yan Chunwei 已提交
25
#include "paddle/fluid/framework/lod_tensor_array.h"
26
#include "paddle/fluid/framework/naive_executor.h"
Y
Yan Chunwei 已提交
27
#include "paddle/fluid/inference/api/details/reset_tensor_array.h"
W
wanghuancoder 已提交
28
#include "paddle/fluid/inference/api/paddle_api.h"
29
#include "paddle/fluid/inference/api/paddle_inference_api.h"
X
Xin Pan 已提交
30
#include "paddle/fluid/inference/io.h"
31
#include "paddle/fluid/platform/init.h"
W
wanghuancoder 已提交
32
#include "paddle/fluid/platform/place.h"
X
Xin Pan 已提交
33
#include "paddle/fluid/platform/profiler.h"
34
#include "paddle/phi/core/ddim.h"
X
Xin Pan 已提交
35 36 37

namespace paddle {

W
wanghuancoder 已提交
38 39 40 41
namespace framework {
class Scope;
}  // namespace framework

Y
Yan Chunwei 已提交
42
class NativePaddlePredictor : public PaddlePredictor {
W
Wu Yi 已提交
43
 public:
Y
Yan Chunwei 已提交
44 45
  explicit NativePaddlePredictor(const NativeConfig &config)
      : config_(config) {}
X
Xin Pan 已提交
46

47
  // will only create sub scope if have global scope
T
tensor-tang 已提交
48
  bool Init(std::shared_ptr<framework::Scope> parent_scope);
X
Xin Pan 已提交
49 50

  bool Run(const std::vector<PaddleTensor> &inputs,
51 52
           std::vector<PaddleTensor> *output_data,
           int batch_size = -1) override;
X
Xin Pan 已提交
53 54 55

  std::unique_ptr<PaddlePredictor> Clone() override;

56
  ~NativePaddlePredictor() override;
X
Xin Pan 已提交
57

58 59
  framework::Scope *scope() { return sub_scope_ ? sub_scope_ : scope_.get(); }

60
 protected:
X
Xin Pan 已提交
61
  bool SetFeed(const std::vector<PaddleTensor> &input_datas,
62 63 64
               framework::Scope *scope);
  bool GetFetch(std::vector<PaddleTensor> *output_data,
                framework::Scope *scope);
L
luotao1 已提交
65 66 67
  template <typename T>
  void GetFetchOne(const framework::LoDTensor &fetchs,
                   PaddleTensor *output_data);
68
  void PrepareFeedFetch();
X
Xin Pan 已提交
69

Y
Yan Chunwei 已提交
70
  NativeConfig config_;
71 72
  platform::Place place_;
  std::unique_ptr<framework::Executor> executor_;
73
  std::shared_ptr<framework::Scope> scope_;
74 75
  std::unique_ptr<framework::ExecutorPrepareContext> ctx_;
  std::unique_ptr<framework::ProgramDesc> inference_program_;
76 77 78
  std::vector<framework::OpDesc *> feeds_;
  std::map<std::string, size_t> feed_names_;
  std::vector<framework::OpDesc *> fetchs_;
79 80 81
  // Memory buffer for feed inputs. The temporary LoDTensor will cause serious
  // concurrency problems, wrong results and memory leak, so cache them.
  std::vector<framework::LoDTensor> feed_tensors_;
82 83
  // Do not use unique_ptr, use parent scope to delete
  framework::Scope *sub_scope_{nullptr};
Y
Yan Chunwei 已提交
84
  details::TensorArrayBatchCleaner tensor_array_batch_cleaner_;
Y
Yan Chunwei 已提交
85 86
  // A mutex to make Clone thread safe.
  std::mutex clone_mutex_;
X
Xin Pan 已提交
87 88 89
};

}  // namespace paddle