api_impl.cc 9.9 KB
Newer Older
X
Xin Pan 已提交
1 2
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Y
Yan Chunwei 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
X
Xin Pan 已提交
6

Y
Yan Chunwei 已提交
7
http://www.apache.org/licenses/LICENSE-2.0
X
Xin Pan 已提交
8

Y
Yan Chunwei 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
X
Xin Pan 已提交
14 15

#include <algorithm>
D
dzhwinter 已提交
16
#include <fstream>
X
Xin Pan 已提交
17 18 19 20 21 22 23
#include <map>
#include <set>
#include <sstream>
#include <string>
#include <utility>
#include <vector>

24
#include "paddle/fluid/framework/feed_fetch_method.h"
L
Luo Tao 已提交
25
#include "paddle/fluid/inference/api/api_impl.h"
Y
Yan Chunwei 已提交
26
#include "paddle/fluid/inference/api/details/reset_tensor_array.h"
27
#include "paddle/fluid/inference/api/helper.h"
D
dzhwinter 已提交
28
#include "paddle/fluid/inference/api/timer.h"
29
#include "paddle/fluid/platform/cpu_helper.h"
30 31 32
#include "paddle/fluid/platform/profiler.h"

DEFINE_bool(profile, false, "Turn on profiler for fluid");
33
DECLARE_int32(paddle_num_threads);
X
Xin Pan 已提交
34 35 36

namespace paddle {

37 38 39 40
void NativePaddlePredictor::PrepareFeedFetch() {
  for (auto *op : inference_program_->Block(0).AllOps()) {
    if (op->Type() == "feed") {
      int idx = boost::get<int>(op->GetAttr("col"));
T
tensor-tang 已提交
41
      if (feeds_.size() <= static_cast<size_t>(idx)) {
42 43 44 45 46 47
        feeds_.resize(idx + 1);
      }
      feeds_[idx] = op;
      feed_names_[op->Output("Out")[0]] = idx;
    } else if (op->Type() == "fetch") {
      int idx = boost::get<int>(op->GetAttr("col"));
T
tensor-tang 已提交
48
      if (fetchs_.size() <= static_cast<size_t>(idx)) {
49 50 51 52 53 54 55
        fetchs_.resize(idx + 1);
      }
      fetchs_[idx] = op;
    }
  }
}

T
tensor-tang 已提交
56 57
bool NativePaddlePredictor::Init(
    std::shared_ptr<framework::Scope> parent_scope) {
D
dzhwinter 已提交
58
#if !defined(_WIN32)
59 60 61 62 63 64 65 66
  if (FLAGS_profile) {
    LOG(WARNING) << "Profiler is actived, might affect the performance";
    LOG(INFO) << "You can turn off by set gflags '-profile false'";

    auto tracking_device = config_.use_gpu ? platform::ProfilerState::kAll
                                           : platform::ProfilerState::kCPU;
    platform::EnableProfiler(tracking_device);
  }
D
dzhwinter 已提交
67
#endif
68

69 70 71
  // no matter with or without MKLDNN
  paddle::platform::SetNumThreads(FLAGS_paddle_num_threads);

Y
Yan Chunwei 已提交
72
  if (config_.use_gpu) {
X
Xin Pan 已提交
73 74 75 76
    place_ = paddle::platform::CUDAPlace(config_.device);
  } else {
    place_ = paddle::platform::CPUPlace();
  }
T
tensor-tang 已提交
77 78 79
  if (parent_scope) {
    scope_ = parent_scope;
    sub_scope_ = &(parent_scope->NewScope());
T
tensor-tang 已提交
80
    PADDLE_ENFORCE_NOT_NULL(sub_scope_, "create sub scope fail");
81 82 83 84
  } else {
    paddle::framework::InitDevices(false);
    scope_.reset(new paddle::framework::Scope());
  }
X
Xin Pan 已提交
85 86 87 88 89
  executor_.reset(new paddle::framework::Executor(place_));
  // Initialize the inference program
  if (!config_.model_dir.empty()) {
    // Parameters are saved in separate files sited in
    // the specified `dirname`.
90 91
    inference_program_ = paddle::inference::Load(executor_.get(), scope_.get(),
                                                 config_.model_dir);
D
dzhwinter 已提交
92

X
Xin Pan 已提交
93 94 95 96
  } else if (!config_.prog_file.empty() && !config_.param_file.empty()) {
    // All parameters are saved in a single file.
    // The file names should be consistent with that used
    // in Python API `fluid.io.save_inference_model`.
D
dzhwinter 已提交
97 98
    auto exe = executor_.get();
    auto sc = scope_.get();
X
Xin Pan 已提交
99 100
    inference_program_ = paddle::inference::Load(
        executor_.get(), scope_.get(), config_.prog_file, config_.param_file);
D
dzhwinter 已提交
101

X
Xin Pan 已提交
102
  } else {
Y
Yan Chunwei 已提交
103
    LOG(ERROR) << "fail to load inference model from " << config_.model_dir;
X
Xin Pan 已提交
104 105
    return false;
  }
D
dzhwinter 已提交
106

X
Xin Pan 已提交
107
  ctx_ = executor_->Prepare(*inference_program_, 0);
108 109
  executor_->CreateVariables(*inference_program_,
                             sub_scope_ ? sub_scope_ : scope_.get(), 0);
Y
Yan Chunwei 已提交
110

X
Xin Pan 已提交
111
  // Get the feed_target_names and fetch_target_names
112
  PrepareFeedFetch();
X
Xin Pan 已提交
113 114 115
  return true;
}

116
NativePaddlePredictor::~NativePaddlePredictor() {
D
dzhwinter 已提交
117
#if !defined(_WIN32)
118 119 120 121
  if (FLAGS_profile) {
    platform::DisableProfiler(platform::EventSortingKey::kTotal,
                              "./profile.log");
  }
D
dzhwinter 已提交
122
#endif
123 124 125
  if (sub_scope_) {
    scope_->DeleteScope(sub_scope_);
  }
L
Luo Tao 已提交
126
}
127

Y
Yan Chunwei 已提交
128
bool NativePaddlePredictor::Run(const std::vector<PaddleTensor> &inputs,
129 130
                                std::vector<PaddleTensor> *output_data,
                                int batch_size) {
D
dzhwinter 已提交
131
  using Timer = paddle::inference::Timer;
X
Xin Pan 已提交
132 133 134
  Timer timer;
  timer.tic();
  // set feed variable
135
  std::vector<framework::LoDTensor> feeds;
136 137
  framework::Scope *scope = sub_scope_ != nullptr ? sub_scope_ : scope_.get();
  if (!SetFeed(inputs, scope)) {
X
Xin Pan 已提交
138 139 140 141 142
    LOG(ERROR) << "fail to set feed";
    return false;
  }
  // Run the inference program
  // if share variables, we need not create variables
143 144
  executor_->RunPreparedContext(ctx_.get(), scope,
                                false, /* don't create local scope each time*/
145
                                false /* don't create variable each time */);
146 147
  // get fetch variable
  if (!GetFetch(output_data, scope)) {
148
    LOG(ERROR) << "fail to get fetches";
X
Xin Pan 已提交
149 150 151
    return false;
  }
  VLOG(3) << "predict cost: " << timer.toc() << "ms";
Y
Yan Chunwei 已提交
152 153 154 155

  // Fix TensorArray reuse not cleaned bug.
  tensor_array_batch_cleaner_.CollectTensorArrays(scope_.get());
  tensor_array_batch_cleaner_.ResetTensorArray();
X
Xin Pan 已提交
156 157 158
  return true;
}

Y
Yan Chunwei 已提交
159 160 161
std::unique_ptr<PaddlePredictor> NativePaddlePredictor::Clone() {
  std::unique_ptr<PaddlePredictor> cls(new NativePaddlePredictor(config_));

162
  if (!dynamic_cast<NativePaddlePredictor *>(cls.get())->Init(scope_)) {
Y
Yan Chunwei 已提交
163
    LOG(ERROR) << "fail to call Init";
X
Xin Pan 已提交
164 165
    return nullptr;
  }
J
Fix mac  
JiabinYang 已提交
166 167 168 169
#ifdef __clang__
  // fix clang compile error
  return cls;
#else
170 171
  // fix manylinux compile error.
  return std::move(cls);
J
Fix mac  
JiabinYang 已提交
172
#endif
X
Xin Pan 已提交
173 174
}

Y
Yan Chunwei 已提交
175
bool NativePaddlePredictor::SetFeed(const std::vector<PaddleTensor> &inputs,
176 177
                                    framework::Scope *scope) {
  if (inputs.size() != feeds_.size()) {
178 179
    LOG(ERROR) << "wrong feed input size, need " << feeds_.size() << " but get "
               << inputs.size();
X
Xin Pan 已提交
180 181
    return false;
  }
182
  for (size_t i = 0; i < inputs.size(); ++i) {
183 184
    framework::LoDTensor input;
    framework::DDim ddim = framework::make_ddim(inputs[i].shape);
X
Xin Pan 已提交
185 186
    void *input_ptr;
    if (inputs[i].dtype == PaddleDType::INT64) {
187
      input_ptr = input.mutable_data<int64_t>(ddim, platform::CPUPlace());
X
Xin Pan 已提交
188
    } else if (inputs[i].dtype == PaddleDType::FLOAT32) {
189
      input_ptr = input.mutable_data<float>(ddim, platform::CPUPlace());
X
Xin Pan 已提交
190 191 192 193 194 195
    } else {
      LOG(ERROR) << "unsupported feed type " << inputs[i].dtype;
      return false;
    }

    // TODO(panyx0718): Init LoDTensor from existing memcpy to save a copy.
196
    std::memcpy(static_cast<void *>(input_ptr), inputs[i].data.data(),
197
                inputs[i].data.length());
Y
Yan Chunwei 已提交
198 199 200 201 202 203
    // TODO(Superjomn) Low performance, need optimization for heavy LoD copy.
    framework::LoD lod;
    for (auto &level : inputs[i].lod) {
      lod.emplace_back(level);
    }
    input.set_lod(lod);
204 205
    int idx = -1;
    if (config_.specify_input_name) {
X
polish  
Xin Pan 已提交
206
      idx = feed_names_[inputs[i].name];
207 208 209 210
    } else {
      idx = boost::get<int>(feeds_[i]->GetAttr("col"));
    }
    framework::SetFeedVariable(scope, input, "feed", idx);
X
Xin Pan 已提交
211 212 213
  }
  return true;
}
L
luotao1 已提交
214 215 216
template <typename T>
void NativePaddlePredictor::GetFetchOne(const framework::LoDTensor &fetch,
                                        PaddleTensor *output) {
217 218 219 220 221 222 223 224 225 226 227 228 229 230
  // set shape.
  auto shape = framework::vectorize(fetch.dims());
  output->shape.assign(shape.begin(), shape.end());
  // set data.
  const T *data = fetch.data<T>();
  int num_elems = inference::VecReduceToInt(shape);
  output->data.Resize(num_elems * sizeof(T));
  // The fetched tensor output by fetch op, should always in CPU memory, so just
  // copy.
  memcpy(output->data.data(), data, num_elems * sizeof(T));
  // set lod
  output->lod.clear();
  for (auto &level : fetch.lod()) {
    output->lod.emplace_back(level.begin(), level.end());
L
luotao1 已提交
231 232
  }
}
X
Xin Pan 已提交
233

234 235 236 237 238
bool NativePaddlePredictor::GetFetch(std::vector<PaddleTensor> *outputs,
                                     framework::Scope *scope) {
  outputs->resize(fetchs_.size());
  for (size_t i = 0; i < fetchs_.size(); ++i) {
    int idx = boost::get<int>(fetchs_[i]->GetAttr("col"));
L
luotao1 已提交
239 240
    PADDLE_ENFORCE((size_t)idx == i);
    framework::LoDTensor &fetch =
241
        framework::GetFetchVariable(*scope, "fetch", idx);
L
luotao1 已提交
242 243 244 245 246 247 248 249
    auto type = fetch.type();
    auto output = &(outputs->at(i));
    if (type == typeid(float)) {
      GetFetchOne<float>(fetch, output);
      output->dtype = PaddleDType::FLOAT32;
    } else if (type == typeid(int64_t)) {
      GetFetchOne<int64_t>(fetch, output);
      output->dtype = PaddleDType::INT64;
X
Xin Pan 已提交
250
    } else {
L
luotao1 已提交
251
      LOG(ERROR) << "unknown type, only support float32 and int64 now.";
Y
Yan Chunwei 已提交
252
    }
X
Xin Pan 已提交
253 254 255 256
  }
  return true;
}

257
template <>
258 259
std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<
    NativeConfig, PaddleEngineKind::kNative>(const NativeConfig &config) {
Y
Yan Chunwei 已提交
260 261
  if (config.use_gpu) {
    // 1. GPU memeroy
262
    PADDLE_ENFORCE_GT(
D
dzhwinter 已提交
263 264 265
       config.fraction_of_gpu_memory, 0.f,
       "fraction_of_gpu_memory in the config should be set to range (0.,
       1.]");
266
    PADDLE_ENFORCE_GE(config.device, 0, "Invalid device id %d", config.device);
Y
Yan Chunwei 已提交
267 268 269 270 271
    std::vector<std::string> flags;
    if (config.fraction_of_gpu_memory >= 0.0f ||
        config.fraction_of_gpu_memory <= 0.95f) {
      flags.push_back("dummpy");
      std::string flag = "--fraction_of_gpu_memory_to_use=" +
D
dzhwinter 已提交
272
                         std::to_string(config.fraction_of_gpu_memory);
Y
Yan Chunwei 已提交
273 274 275
      flags.push_back(flag);
      framework::InitGflags(flags);
    }
X
Xin Pan 已提交
276
  }
Y
Yan Chunwei 已提交
277
  std::unique_ptr<PaddlePredictor> predictor(new NativePaddlePredictor(config));
T
tensor-tang 已提交
278
  if (!dynamic_cast<NativePaddlePredictor *>(predictor.get())->Init(nullptr)) {
X
Xin Pan 已提交
279 280
    return nullptr;
  }
J
Fix mac  
JiabinYang 已提交
281
#ifdef __clang__
J
Jiabin Yang 已提交
282
  // fix clang compile error
J
Fix mac  
JiabinYang 已提交
283 284
  return predictor;
#else
285
  return std::move(predictor);
J
Fix mac  
JiabinYang 已提交
286
#endif
X
Xin Pan 已提交
287 288
}

Y
Yan Chunwei 已提交
289 290 291 292 293 294
template <>
std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<NativeConfig>(
    const NativeConfig &config) {
  return CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(config);
}

X
Xin Pan 已提交
295
}  // namespace paddle