api_impl.h 2.4 KB
Newer Older
X
Xin Pan 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at

   http://www.apache.org/licenses/LICENSE-2.0

   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an "AS IS" BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License. */

#pragma once

#include <glog/logging.h>
18
#include <map>
X
Xin Pan 已提交
19 20 21 22
#include <memory>
#include <string>
#include <vector>

23
#include "paddle/fluid/inference/api/paddle_inference_api.h"
X
Xin Pan 已提交
24 25 26 27

#include "paddle/fluid/framework/ddim.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/inference/io.h"
28
#include "paddle/fluid/platform/init.h"
X
Xin Pan 已提交
29 30 31 32
#include "paddle/fluid/platform/profiler.h"

namespace paddle {

Y
Yan Chunwei 已提交
33
class NativePaddlePredictor : public PaddlePredictor {
W
Wu Yi 已提交
34
 public:
Y
Yan Chunwei 已提交
35 36
  explicit NativePaddlePredictor(const NativeConfig &config)
      : config_(config) {}
X
Xin Pan 已提交
37

38
  // will only create sub scope if have global scope
T
tensor-tang 已提交
39
  bool Init(std::shared_ptr<framework::Scope> parent_scope);
X
Xin Pan 已提交
40 41

  bool Run(const std::vector<PaddleTensor> &inputs,
42 43
           std::vector<PaddleTensor> *output_data,
           int batch_size = -1) override;
X
Xin Pan 已提交
44 45 46

  std::unique_ptr<PaddlePredictor> Clone() override;

47
  ~NativePaddlePredictor() override;
X
Xin Pan 已提交
48

49
 protected:
X
Xin Pan 已提交
50
  bool SetFeed(const std::vector<PaddleTensor> &input_datas,
51 52 53
               framework::Scope *scope);
  bool GetFetch(std::vector<PaddleTensor> *output_data,
                framework::Scope *scope);
L
luotao1 已提交
54 55 56
  template <typename T>
  void GetFetchOne(const framework::LoDTensor &fetchs,
                   PaddleTensor *output_data);
57
  void PrepareFeedFetch();
X
Xin Pan 已提交
58

Y
Yan Chunwei 已提交
59
  NativeConfig config_;
60 61
  platform::Place place_;
  std::unique_ptr<framework::Executor> executor_;
62
  std::shared_ptr<framework::Scope> scope_;
63 64
  std::unique_ptr<framework::ExecutorPrepareContext> ctx_;
  std::unique_ptr<framework::ProgramDesc> inference_program_;
65 66 67
  std::vector<framework::OpDesc *> feeds_;
  std::map<std::string, size_t> feed_names_;
  std::vector<framework::OpDesc *> fetchs_;
68 69
  // Do not use unique_ptr, use parent scope to delete
  framework::Scope *sub_scope_{nullptr};
X
Xin Pan 已提交
70 71 72
};

}  // namespace paddle