api_impl.h 2.8 KB
Newer Older
X
Xin Pan 已提交
1 2
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
X
Xin Pan 已提交
6

7
http://www.apache.org/licenses/LICENSE-2.0
X
Xin Pan 已提交
8

9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
X
Xin Pan 已提交
14 15 16

#pragma once

D
dzhwinter 已提交
17 18 19 20 21 22
// logging.h and windows.h conflict
#define GLOG_NO_ABBREVIATED_SEVERITIES
// solve static linking error in windows
// https://github.com/google/glog/issues/301
#define GOOGLE_GLOG_DLL_DECL

X
Xin Pan 已提交
23
#include <glog/logging.h>
24
#include <map>
X
Xin Pan 已提交
25 26 27 28 29 30
#include <memory>
#include <string>
#include <vector>

#include "paddle/fluid/framework/ddim.h"
#include "paddle/fluid/framework/lod_tensor.h"
Y
Yan Chunwei 已提交
31
#include "paddle/fluid/framework/lod_tensor_array.h"
32
#include "paddle/fluid/framework/naive_executor.h"
Y
Yan Chunwei 已提交
33
#include "paddle/fluid/inference/api/details/reset_tensor_array.h"
X
Xin Pan 已提交
34
#include "paddle/fluid/inference/io.h"
35
#include "paddle/fluid/platform/init.h"
X
Xin Pan 已提交
36
#include "paddle/fluid/platform/profiler.h"
D
dzhwinter 已提交
37
#include "paddle_inference_api.h"  // NOLINT
X
Xin Pan 已提交
38 39 40

namespace paddle {

Y
Yan Chunwei 已提交
41
class NativePaddlePredictor : public PaddlePredictor {
W
Wu Yi 已提交
42
 public:
Y
Yan Chunwei 已提交
43 44
  explicit NativePaddlePredictor(const NativeConfig &config)
      : config_(config) {}
X
Xin Pan 已提交
45

46
  // will only create sub scope if have global scope
T
tensor-tang 已提交
47
  bool Init(std::shared_ptr<framework::Scope> parent_scope);
X
Xin Pan 已提交
48 49

  bool Run(const std::vector<PaddleTensor> &inputs,
50 51
           std::vector<PaddleTensor> *output_data,
           int batch_size = -1) override;
X
Xin Pan 已提交
52 53 54

  std::unique_ptr<PaddlePredictor> Clone() override;

55
  ~NativePaddlePredictor() override;
X
Xin Pan 已提交
56

57 58
  framework::Scope *scope() { return sub_scope_ ? sub_scope_ : scope_.get(); }

59
 protected:
X
Xin Pan 已提交
60
  bool SetFeed(const std::vector<PaddleTensor> &input_datas,
61 62 63
               framework::Scope *scope);
  bool GetFetch(std::vector<PaddleTensor> *output_data,
                framework::Scope *scope);
L
luotao1 已提交
64 65 66
  template <typename T>
  void GetFetchOne(const framework::LoDTensor &fetchs,
                   PaddleTensor *output_data);
67
  void PrepareFeedFetch();
X
Xin Pan 已提交
68

Y
Yan Chunwei 已提交
69
  NativeConfig config_;
70 71
  platform::Place place_;
  std::unique_ptr<framework::Executor> executor_;
72
  std::shared_ptr<framework::Scope> scope_;
73 74
  std::unique_ptr<framework::ExecutorPrepareContext> ctx_;
  std::unique_ptr<framework::ProgramDesc> inference_program_;
75 76 77
  std::vector<framework::OpDesc *> feeds_;
  std::map<std::string, size_t> feed_names_;
  std::vector<framework::OpDesc *> fetchs_;
78 79
  // Do not use unique_ptr, use parent scope to delete
  framework::Scope *sub_scope_{nullptr};
Y
Yan Chunwei 已提交
80
  details::TensorArrayBatchCleaner tensor_array_batch_cleaner_;
X
Xin Pan 已提交
81 82 83
};

}  // namespace paddle