api_impl.cc 9.9 KB
Newer Older
X
Xin Pan 已提交
1 2
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Y
Yan Chunwei 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
X
Xin Pan 已提交
6

Y
Yan Chunwei 已提交
7
http://www.apache.org/licenses/LICENSE-2.0
X
Xin Pan 已提交
8

Y
Yan Chunwei 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
X
Xin Pan 已提交
14 15

#include <algorithm>
D
dzhwinter 已提交
16
#include <fstream>
X
Xin Pan 已提交
17 18 19 20 21 22 23
#include <map>
#include <set>
#include <sstream>
#include <string>
#include <utility>
#include <vector>

24
#include "paddle/fluid/framework/feed_fetch_method.h"
L
Luo Tao 已提交
25
#include "paddle/fluid/inference/api/api_impl.h"
Y
Yan Chunwei 已提交
26
#include "paddle/fluid/inference/api/details/reset_tensor_array.h"
27
#include "paddle/fluid/inference/api/helper.h"
D
dzhwinter 已提交
28
#include "paddle/fluid/inference/api/timer.h"
29
#include "paddle/fluid/platform/cpu_helper.h"
30 31 32
#include "paddle/fluid/platform/profiler.h"

DEFINE_bool(profile, false, "Turn on profiler for fluid");
33
DECLARE_int32(paddle_num_threads);
X
Xin Pan 已提交
34 35 36

namespace paddle {

37 38 39 40
void NativePaddlePredictor::PrepareFeedFetch() {
  for (auto *op : inference_program_->Block(0).AllOps()) {
    if (op->Type() == "feed") {
      int idx = boost::get<int>(op->GetAttr("col"));
T
tensor-tang 已提交
41
      if (feeds_.size() <= static_cast<size_t>(idx)) {
42 43 44 45 46 47
        feeds_.resize(idx + 1);
      }
      feeds_[idx] = op;
      feed_names_[op->Output("Out")[0]] = idx;
    } else if (op->Type() == "fetch") {
      int idx = boost::get<int>(op->GetAttr("col"));
T
tensor-tang 已提交
48
      if (fetchs_.size() <= static_cast<size_t>(idx)) {
49 50 51 52 53 54 55
        fetchs_.resize(idx + 1);
      }
      fetchs_[idx] = op;
    }
  }
}

T
tensor-tang 已提交
56 57
bool NativePaddlePredictor::Init(
    std::shared_ptr<framework::Scope> parent_scope) {
D
dzhwinter 已提交
58
#if !defined(_WIN32)
59 60 61 62 63 64 65 66
  if (FLAGS_profile) {
    LOG(WARNING) << "Profiler is actived, might affect the performance";
    LOG(INFO) << "You can turn off by set gflags '-profile false'";

    auto tracking_device = config_.use_gpu ? platform::ProfilerState::kAll
                                           : platform::ProfilerState::kCPU;
    platform::EnableProfiler(tracking_device);
  }
D
dzhwinter 已提交
67
#endif
68

69 70 71
  // no matter with or without MKLDNN
  paddle::platform::SetNumThreads(FLAGS_paddle_num_threads);

Y
Yan Chunwei 已提交
72
  if (config_.use_gpu) {
X
Xin Pan 已提交
73 74 75 76
    place_ = paddle::platform::CUDAPlace(config_.device);
  } else {
    place_ = paddle::platform::CPUPlace();
  }
T
tensor-tang 已提交
77 78 79
  if (parent_scope) {
    scope_ = parent_scope;
    sub_scope_ = &(parent_scope->NewScope());
T
tensor-tang 已提交
80
    PADDLE_ENFORCE_NOT_NULL(sub_scope_, "create sub scope fail");
81 82 83 84
  } else {
    paddle::framework::InitDevices(false);
    scope_.reset(new paddle::framework::Scope());
  }
X
Xin Pan 已提交
85 86 87 88 89
  executor_.reset(new paddle::framework::Executor(place_));
  // Initialize the inference program
  if (!config_.model_dir.empty()) {
    // Parameters are saved in separate files sited in
    // the specified `dirname`.
90 91
    inference_program_ = paddle::inference::Load(executor_.get(), scope_.get(),
                                                 config_.model_dir);
D
dzhwinter 已提交
92

X
Xin Pan 已提交
93 94 95 96 97 98
  } else if (!config_.prog_file.empty() && !config_.param_file.empty()) {
    // All parameters are saved in a single file.
    // The file names should be consistent with that used
    // in Python API `fluid.io.save_inference_model`.
    inference_program_ = paddle::inference::Load(
        executor_.get(), scope_.get(), config_.prog_file, config_.param_file);
D
dzhwinter 已提交
99

X
Xin Pan 已提交
100
  } else {
Y
Yan Chunwei 已提交
101
    LOG(ERROR) << "fail to load inference model from " << config_.model_dir;
X
Xin Pan 已提交
102 103
    return false;
  }
D
dzhwinter 已提交
104

X
Xin Pan 已提交
105
  ctx_ = executor_->Prepare(*inference_program_, 0);
106 107
  executor_->CreateVariables(*inference_program_,
                             sub_scope_ ? sub_scope_ : scope_.get(), 0);
Y
Yan Chunwei 已提交
108

X
Xin Pan 已提交
109
  // Get the feed_target_names and fetch_target_names
110
  PrepareFeedFetch();
X
Xin Pan 已提交
111 112 113
  return true;
}

114
NativePaddlePredictor::~NativePaddlePredictor() {
D
dzhwinter 已提交
115
#if !defined(_WIN32)
116 117 118 119
  if (FLAGS_profile) {
    platform::DisableProfiler(platform::EventSortingKey::kTotal,
                              "./profile.log");
  }
D
dzhwinter 已提交
120
#endif
121 122 123
  if (sub_scope_) {
    scope_->DeleteScope(sub_scope_);
  }
L
Luo Tao 已提交
124
}
125

Y
Yan Chunwei 已提交
126
bool NativePaddlePredictor::Run(const std::vector<PaddleTensor> &inputs,
127 128
                                std::vector<PaddleTensor> *output_data,
                                int batch_size) {
D
dzhwinter 已提交
129
  using Timer = paddle::inference::Timer;
X
Xin Pan 已提交
130 131 132
  Timer timer;
  timer.tic();
  // set feed variable
133
  std::vector<framework::LoDTensor> feeds;
134 135
  framework::Scope *scope = sub_scope_ != nullptr ? sub_scope_ : scope_.get();
  if (!SetFeed(inputs, scope)) {
X
Xin Pan 已提交
136 137 138 139 140
    LOG(ERROR) << "fail to set feed";
    return false;
  }
  // Run the inference program
  // if share variables, we need not create variables
141 142
  executor_->RunPreparedContext(ctx_.get(), scope,
                                false, /* don't create local scope each time*/
143
                                false /* don't create variable each time */);
144 145
  // get fetch variable
  if (!GetFetch(output_data, scope)) {
146
    LOG(ERROR) << "fail to get fetches";
X
Xin Pan 已提交
147 148 149
    return false;
  }
  VLOG(3) << "predict cost: " << timer.toc() << "ms";
Y
Yan Chunwei 已提交
150 151 152 153

  // Fix TensorArray reuse not cleaned bug.
  tensor_array_batch_cleaner_.CollectTensorArrays(scope_.get());
  tensor_array_batch_cleaner_.ResetTensorArray();
X
Xin Pan 已提交
154 155 156
  return true;
}

Y
Yan Chunwei 已提交
157 158 159
std::unique_ptr<PaddlePredictor> NativePaddlePredictor::Clone() {
  std::unique_ptr<PaddlePredictor> cls(new NativePaddlePredictor(config_));

160
  if (!dynamic_cast<NativePaddlePredictor *>(cls.get())->Init(scope_)) {
Y
Yan Chunwei 已提交
161
    LOG(ERROR) << "fail to call Init";
X
Xin Pan 已提交
162 163
    return nullptr;
  }
J
Fix mac  
JiabinYang 已提交
164 165 166 167
#ifdef __clang__
  // fix clang compile error
  return cls;
#else
168 169
  // fix manylinux compile error.
  return std::move(cls);
J
Fix mac  
JiabinYang 已提交
170
#endif
X
Xin Pan 已提交
171 172
}

Y
Yan Chunwei 已提交
173
bool NativePaddlePredictor::SetFeed(const std::vector<PaddleTensor> &inputs,
174 175
                                    framework::Scope *scope) {
  if (inputs.size() != feeds_.size()) {
176 177
    LOG(ERROR) << "wrong feed input size, need " << feeds_.size() << " but get "
               << inputs.size();
X
Xin Pan 已提交
178 179
    return false;
  }
180
  for (size_t i = 0; i < inputs.size(); ++i) {
181 182
    framework::LoDTensor input;
    framework::DDim ddim = framework::make_ddim(inputs[i].shape);
X
Xin Pan 已提交
183 184
    void *input_ptr;
    if (inputs[i].dtype == PaddleDType::INT64) {
185
      input_ptr = input.mutable_data<int64_t>(ddim, platform::CPUPlace());
X
Xin Pan 已提交
186
    } else if (inputs[i].dtype == PaddleDType::FLOAT32) {
187
      input_ptr = input.mutable_data<float>(ddim, platform::CPUPlace());
X
Xin Pan 已提交
188 189 190 191 192 193
    } else {
      LOG(ERROR) << "unsupported feed type " << inputs[i].dtype;
      return false;
    }

    // TODO(panyx0718): Init LoDTensor from existing memcpy to save a copy.
194
    std::memcpy(static_cast<void *>(input_ptr), inputs[i].data.data(),
195
                inputs[i].data.length());
Y
Yan Chunwei 已提交
196 197 198 199 200 201
    // TODO(Superjomn) Low performance, need optimization for heavy LoD copy.
    framework::LoD lod;
    for (auto &level : inputs[i].lod) {
      lod.emplace_back(level);
    }
    input.set_lod(lod);
202 203
    int idx = -1;
    if (config_.specify_input_name) {
X
polish  
Xin Pan 已提交
204
      idx = feed_names_[inputs[i].name];
205 206 207 208
    } else {
      idx = boost::get<int>(feeds_[i]->GetAttr("col"));
    }
    framework::SetFeedVariable(scope, input, "feed", idx);
X
Xin Pan 已提交
209 210 211
  }
  return true;
}
L
luotao1 已提交
212 213 214
template <typename T>
void NativePaddlePredictor::GetFetchOne(const framework::LoDTensor &fetch,
                                        PaddleTensor *output) {
215 216 217 218 219 220 221 222 223 224 225 226 227 228
  // set shape.
  auto shape = framework::vectorize(fetch.dims());
  output->shape.assign(shape.begin(), shape.end());
  // set data.
  const T *data = fetch.data<T>();
  int num_elems = inference::VecReduceToInt(shape);
  output->data.Resize(num_elems * sizeof(T));
  // The fetched tensor output by fetch op, should always in CPU memory, so just
  // copy.
  memcpy(output->data.data(), data, num_elems * sizeof(T));
  // set lod
  output->lod.clear();
  for (auto &level : fetch.lod()) {
    output->lod.emplace_back(level.begin(), level.end());
L
luotao1 已提交
229 230
  }
}
X
Xin Pan 已提交
231

232 233 234 235 236
bool NativePaddlePredictor::GetFetch(std::vector<PaddleTensor> *outputs,
                                     framework::Scope *scope) {
  outputs->resize(fetchs_.size());
  for (size_t i = 0; i < fetchs_.size(); ++i) {
    int idx = boost::get<int>(fetchs_[i]->GetAttr("col"));
L
luotao1 已提交
237 238
    PADDLE_ENFORCE((size_t)idx == i);
    framework::LoDTensor &fetch =
239
        framework::GetFetchVariable(*scope, "fetch", idx);
L
luotao1 已提交
240 241 242 243 244 245 246 247
    auto type = fetch.type();
    auto output = &(outputs->at(i));
    if (type == typeid(float)) {
      GetFetchOne<float>(fetch, output);
      output->dtype = PaddleDType::FLOAT32;
    } else if (type == typeid(int64_t)) {
      GetFetchOne<int64_t>(fetch, output);
      output->dtype = PaddleDType::INT64;
X
Xin Pan 已提交
248
    } else {
L
luotao1 已提交
249
      LOG(ERROR) << "unknown type, only support float32 and int64 now.";
Y
Yan Chunwei 已提交
250
    }
X
Xin Pan 已提交
251 252 253 254
  }
  return true;
}

255
template <>
256 257
std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<
    NativeConfig, PaddleEngineKind::kNative>(const NativeConfig &config) {
Y
Yan Chunwei 已提交
258 259
  if (config.use_gpu) {
    // 1. GPU memeroy
260
    PADDLE_ENFORCE_GT(
D
dzhwinter 已提交
261 262
        config.fraction_of_gpu_memory, 0.f,
        "fraction_of_gpu_memory in the config should be set to range (0.,1.]");
263
    PADDLE_ENFORCE_GE(config.device, 0, "Invalid device id %d", config.device);
Y
Yan Chunwei 已提交
264 265 266 267 268
    std::vector<std::string> flags;
    if (config.fraction_of_gpu_memory >= 0.0f ||
        config.fraction_of_gpu_memory <= 0.95f) {
      flags.push_back("dummpy");
      std::string flag = "--fraction_of_gpu_memory_to_use=" +
D
dzhwinter 已提交
269
                         std::to_string(config.fraction_of_gpu_memory);
Y
Yan Chunwei 已提交
270 271 272
      flags.push_back(flag);
      framework::InitGflags(flags);
    }
X
Xin Pan 已提交
273
  }
Y
Yan Chunwei 已提交
274
  std::unique_ptr<PaddlePredictor> predictor(new NativePaddlePredictor(config));
T
tensor-tang 已提交
275
  if (!dynamic_cast<NativePaddlePredictor *>(predictor.get())->Init(nullptr)) {
X
Xin Pan 已提交
276 277
    return nullptr;
  }
J
Fix mac  
JiabinYang 已提交
278
#ifdef __clang__
J
Jiabin Yang 已提交
279
  // fix clang compile error
J
Fix mac  
JiabinYang 已提交
280 281
  return predictor;
#else
282
  return std::move(predictor);
J
Fix mac  
JiabinYang 已提交
283
#endif
X
Xin Pan 已提交
284 285
}

Y
Yan Chunwei 已提交
286 287 288 289 290 291
template <>
std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<NativeConfig>(
    const NativeConfig &config) {
  return CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(config);
}

X
Xin Pan 已提交
292
}  // namespace paddle