api_impl.cc 10.0 KB
Newer Older
X
Xin Pan 已提交
1 2
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Y
Yan Chunwei 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
X
Xin Pan 已提交
6

Y
Yan Chunwei 已提交
7
http://www.apache.org/licenses/LICENSE-2.0
X
Xin Pan 已提交
8

Y
Yan Chunwei 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
X
Xin Pan 已提交
14 15 16 17 18 19 20 21 22

#include <algorithm>
#include <map>
#include <set>
#include <sstream>
#include <string>
#include <utility>
#include <vector>

23
#include "paddle/fluid/framework/feed_fetch_method.h"
L
Luo Tao 已提交
24
#include "paddle/fluid/inference/api/api_impl.h"
25
#include "paddle/fluid/inference/api/helper.h"
26
#include "paddle/fluid/platform/cpu_helper.h"
27 28 29
#include "paddle/fluid/platform/profiler.h"

DEFINE_bool(profile, false, "Turn on profiler for fluid");
30
DECLARE_int32(paddle_num_threads);
X
Xin Pan 已提交
31 32 33

namespace paddle {
namespace {
D
dzhwinter 已提交
34
using paddle::inference::Timer;
X
Xin Pan 已提交
35 36 37 38 39 40 41 42 43

template <class T>
std::string num2str(T a) {
  std::stringstream istr;
  istr << a;
  return istr.str();
}
}  // namespace

44 45 46 47
void NativePaddlePredictor::PrepareFeedFetch() {
  for (auto *op : inference_program_->Block(0).AllOps()) {
    if (op->Type() == "feed") {
      int idx = boost::get<int>(op->GetAttr("col"));
T
tensor-tang 已提交
48
      if (feeds_.size() <= static_cast<size_t>(idx)) {
49 50 51 52 53 54
        feeds_.resize(idx + 1);
      }
      feeds_[idx] = op;
      feed_names_[op->Output("Out")[0]] = idx;
    } else if (op->Type() == "fetch") {
      int idx = boost::get<int>(op->GetAttr("col"));
T
tensor-tang 已提交
55
      if (fetchs_.size() <= static_cast<size_t>(idx)) {
56 57 58 59 60 61 62
        fetchs_.resize(idx + 1);
      }
      fetchs_[idx] = op;
    }
  }
}

T
tensor-tang 已提交
63 64
bool NativePaddlePredictor::Init(
    std::shared_ptr<framework::Scope> parent_scope) {
X
Xin Pan 已提交
65
  VLOG(3) << "Predictor::init()";
D
dzhwinter 已提交
66
#if !defined(_WIN32)
67 68 69 70 71 72 73 74
  if (FLAGS_profile) {
    LOG(WARNING) << "Profiler is actived, might affect the performance";
    LOG(INFO) << "You can turn off by set gflags '-profile false'";

    auto tracking_device = config_.use_gpu ? platform::ProfilerState::kAll
                                           : platform::ProfilerState::kCPU;
    platform::EnableProfiler(tracking_device);
  }
D
dzhwinter 已提交
75
#endif
76

77 78 79
  // no matter with or without MKLDNN
  paddle::platform::SetNumThreads(FLAGS_paddle_num_threads);

Y
Yan Chunwei 已提交
80
  if (config_.use_gpu) {
X
Xin Pan 已提交
81 82 83 84
    place_ = paddle::platform::CUDAPlace(config_.device);
  } else {
    place_ = paddle::platform::CPUPlace();
  }
T
tensor-tang 已提交
85 86 87
  if (parent_scope) {
    scope_ = parent_scope;
    sub_scope_ = &(parent_scope->NewScope());
T
tensor-tang 已提交
88
    PADDLE_ENFORCE_NOT_NULL(sub_scope_, "create sub scope fail");
89 90 91 92 93
  } else {
    paddle::framework::InitDevices(false);
    scope_.reset(new paddle::framework::Scope());
  }

X
Xin Pan 已提交
94 95 96 97 98 99
  executor_.reset(new paddle::framework::Executor(place_));

  // Initialize the inference program
  if (!config_.model_dir.empty()) {
    // Parameters are saved in separate files sited in
    // the specified `dirname`.
100 101
    inference_program_ = paddle::inference::Load(executor_.get(), scope_.get(),
                                                 config_.model_dir);
X
Xin Pan 已提交
102 103 104 105 106 107 108
  } else if (!config_.prog_file.empty() && !config_.param_file.empty()) {
    // All parameters are saved in a single file.
    // The file names should be consistent with that used
    // in Python API `fluid.io.save_inference_model`.
    inference_program_ = paddle::inference::Load(
        executor_.get(), scope_.get(), config_.prog_file, config_.param_file);
  } else {
Y
Yan Chunwei 已提交
109
    LOG(ERROR) << "fail to load inference model from " << config_.model_dir;
X
Xin Pan 已提交
110 111
    return false;
  }
112

X
Xin Pan 已提交
113
  ctx_ = executor_->Prepare(*inference_program_, 0);
114 115
  executor_->CreateVariables(*inference_program_,
                             sub_scope_ ? sub_scope_ : scope_.get(), 0);
Y
Yan Chunwei 已提交
116

X
Xin Pan 已提交
117
  // Get the feed_target_names and fetch_target_names
118
  PrepareFeedFetch();
X
Xin Pan 已提交
119 120 121
  return true;
}

122
NativePaddlePredictor::~NativePaddlePredictor() {
D
dzhwinter 已提交
123
#if !defined(_WIN32)
124 125 126 127
  if (FLAGS_profile) {
    platform::DisableProfiler(platform::EventSortingKey::kTotal,
                              "./profile.log");
  }
D
dzhwinter 已提交
128
#endif
129 130 131
  if (sub_scope_) {
    scope_->DeleteScope(sub_scope_);
  }
L
Luo Tao 已提交
132
}
133

Y
Yan Chunwei 已提交
134
bool NativePaddlePredictor::Run(const std::vector<PaddleTensor> &inputs,
135 136
                                std::vector<PaddleTensor> *output_data,
                                int batch_size) {
X
Xin Pan 已提交
137 138 139 140
  VLOG(3) << "Predictor::predict";
  Timer timer;
  timer.tic();
  // set feed variable
141
  std::vector<framework::LoDTensor> feeds;
142 143
  framework::Scope *scope = sub_scope_ != nullptr ? sub_scope_ : scope_.get();
  if (!SetFeed(inputs, scope)) {
X
Xin Pan 已提交
144 145 146 147 148
    LOG(ERROR) << "fail to set feed";
    return false;
  }
  // Run the inference program
  // if share variables, we need not create variables
149
  VLOG(4) << "Run prepared context";
150 151
  executor_->RunPreparedContext(ctx_.get(), scope,
                                false, /* don't create local scope each time*/
152
                                false /* don't create variable each time */);
153
  VLOG(4) << "Finish prepared context";
154 155
  // get fetch variable
  if (!GetFetch(output_data, scope)) {
156
    LOG(ERROR) << "fail to get fetches";
X
Xin Pan 已提交
157 158 159 160 161 162
    return false;
  }
  VLOG(3) << "predict cost: " << timer.toc() << "ms";
  return true;
}

Y
Yan Chunwei 已提交
163
std::unique_ptr<PaddlePredictor> NativePaddlePredictor::Clone() {
X
Xin Pan 已提交
164
  VLOG(3) << "Predictor::clone";
Y
Yan Chunwei 已提交
165 166
  std::unique_ptr<PaddlePredictor> cls(new NativePaddlePredictor(config_));

167
  if (!dynamic_cast<NativePaddlePredictor *>(cls.get())->Init(scope_)) {
Y
Yan Chunwei 已提交
168
    LOG(ERROR) << "fail to call Init";
X
Xin Pan 已提交
169 170
    return nullptr;
  }
J
Fix mac  
JiabinYang 已提交
171 172 173 174
#ifdef __clang__
  // fix clang compile error
  return cls;
#else
175 176
  // fix manylinux compile error.
  return std::move(cls);
J
Fix mac  
JiabinYang 已提交
177
#endif
X
Xin Pan 已提交
178 179
}

Y
Yan Chunwei 已提交
180
bool NativePaddlePredictor::SetFeed(const std::vector<PaddleTensor> &inputs,
181
                                    framework::Scope *scope) {
X
Xin Pan 已提交
182
  VLOG(3) << "Predictor::set_feed";
183
  if (inputs.size() != feeds_.size()) {
184 185
    LOG(ERROR) << "wrong feed input size, need " << feeds_.size() << " but get "
               << inputs.size();
X
Xin Pan 已提交
186 187
    return false;
  }
188
  for (size_t i = 0; i < inputs.size(); ++i) {
189 190
    framework::LoDTensor input;
    framework::DDim ddim = framework::make_ddim(inputs[i].shape);
X
Xin Pan 已提交
191 192
    void *input_ptr;
    if (inputs[i].dtype == PaddleDType::INT64) {
193
      input_ptr = input.mutable_data<int64_t>(ddim, platform::CPUPlace());
X
Xin Pan 已提交
194
    } else if (inputs[i].dtype == PaddleDType::FLOAT32) {
195
      input_ptr = input.mutable_data<float>(ddim, platform::CPUPlace());
X
Xin Pan 已提交
196 197 198 199 200 201
    } else {
      LOG(ERROR) << "unsupported feed type " << inputs[i].dtype;
      return false;
    }

    // TODO(panyx0718): Init LoDTensor from existing memcpy to save a copy.
202
    std::memcpy(static_cast<void *>(input_ptr), inputs[i].data.data(),
203
                inputs[i].data.length());
Y
Yan Chunwei 已提交
204 205 206 207 208 209
    // TODO(Superjomn) Low performance, need optimization for heavy LoD copy.
    framework::LoD lod;
    for (auto &level : inputs[i].lod) {
      lod.emplace_back(level);
    }
    input.set_lod(lod);
210 211
    int idx = -1;
    if (config_.specify_input_name) {
X
polish  
Xin Pan 已提交
212
      idx = feed_names_[inputs[i].name];
213 214 215 216
    } else {
      idx = boost::get<int>(feeds_[i]->GetAttr("col"));
    }
    framework::SetFeedVariable(scope, input, "feed", idx);
X
Xin Pan 已提交
217 218 219
  }
  return true;
}
L
luotao1 已提交
220 221 222
template <typename T>
void NativePaddlePredictor::GetFetchOne(const framework::LoDTensor &fetch,
                                        PaddleTensor *output) {
223 224 225 226 227 228 229 230 231 232 233 234 235 236
  // set shape.
  auto shape = framework::vectorize(fetch.dims());
  output->shape.assign(shape.begin(), shape.end());
  // set data.
  const T *data = fetch.data<T>();
  int num_elems = inference::VecReduceToInt(shape);
  output->data.Resize(num_elems * sizeof(T));
  // The fetched tensor output by fetch op, should always in CPU memory, so just
  // copy.
  memcpy(output->data.data(), data, num_elems * sizeof(T));
  // set lod
  output->lod.clear();
  for (auto &level : fetch.lod()) {
    output->lod.emplace_back(level.begin(), level.end());
L
luotao1 已提交
237 238
  }
}
X
Xin Pan 已提交
239

240 241
bool NativePaddlePredictor::GetFetch(std::vector<PaddleTensor> *outputs,
                                     framework::Scope *scope) {
X
Xin Pan 已提交
242
  VLOG(3) << "Predictor::get_fetch";
243 244 245
  outputs->resize(fetchs_.size());
  for (size_t i = 0; i < fetchs_.size(); ++i) {
    int idx = boost::get<int>(fetchs_[i]->GetAttr("col"));
L
luotao1 已提交
246 247
    PADDLE_ENFORCE((size_t)idx == i);
    framework::LoDTensor &fetch =
248
        framework::GetFetchVariable(*scope, "fetch", idx);
L
luotao1 已提交
249 250 251 252 253 254 255 256
    auto type = fetch.type();
    auto output = &(outputs->at(i));
    if (type == typeid(float)) {
      GetFetchOne<float>(fetch, output);
      output->dtype = PaddleDType::FLOAT32;
    } else if (type == typeid(int64_t)) {
      GetFetchOne<int64_t>(fetch, output);
      output->dtype = PaddleDType::INT64;
X
Xin Pan 已提交
257
    } else {
L
luotao1 已提交
258
      LOG(ERROR) << "unknown type, only support float32 and int64 now.";
Y
Yan Chunwei 已提交
259
    }
X
Xin Pan 已提交
260 261 262 263
  }
  return true;
}

264
template <>
265 266
std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<
    NativeConfig, PaddleEngineKind::kNative>(const NativeConfig &config) {
Y
Yan Chunwei 已提交
267 268 269
  VLOG(3) << "create NativePaddlePredictor";
  if (config.use_gpu) {
    // 1. GPU memeroy
270
    PADDLE_ENFORCE_GT(
271
        config.fraction_of_gpu_memory, 0.f,
Y
Yan Chunwei 已提交
272
        "fraction_of_gpu_memory in the config should be set to range (0., 1.]");
273
    PADDLE_ENFORCE_GE(config.device, 0, "Invalid device id %d", config.device);
Y
Yan Chunwei 已提交
274 275 276 277 278 279 280 281 282 283
    std::vector<std::string> flags;
    if (config.fraction_of_gpu_memory >= 0.0f ||
        config.fraction_of_gpu_memory <= 0.95f) {
      flags.push_back("dummpy");
      std::string flag = "--fraction_of_gpu_memory_to_use=" +
                         num2str<float>(config.fraction_of_gpu_memory);
      flags.push_back(flag);
      VLOG(3) << "set flag: " << flag;
      framework::InitGflags(flags);
    }
X
Xin Pan 已提交
284 285
  }

Y
Yan Chunwei 已提交
286
  std::unique_ptr<PaddlePredictor> predictor(new NativePaddlePredictor(config));
T
tensor-tang 已提交
287
  if (!dynamic_cast<NativePaddlePredictor *>(predictor.get())->Init(nullptr)) {
X
Xin Pan 已提交
288 289
    return nullptr;
  }
J
Fix mac  
JiabinYang 已提交
290
#ifdef __clang__
J
Jiabin Yang 已提交
291
  // fix clang compile error
J
Fix mac  
JiabinYang 已提交
292 293
  return predictor;
#else
294
  return std::move(predictor);
J
Fix mac  
JiabinYang 已提交
295
#endif
X
Xin Pan 已提交
296 297
}

Y
Yan Chunwei 已提交
298 299 300 301 302 303
template <>
std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<NativeConfig>(
    const NativeConfig &config) {
  return CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(config);
}

X
Xin Pan 已提交
304
}  // namespace paddle