api_impl.cc 9.9 KB
Newer Older
X
Xin Pan 已提交
1 2
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Y
Yan Chunwei 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
X
Xin Pan 已提交
6

Y
Yan Chunwei 已提交
7
http://www.apache.org/licenses/LICENSE-2.0
X
Xin Pan 已提交
8

Y
Yan Chunwei 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
X
Xin Pan 已提交
14 15 16 17 18 19 20 21 22

#include <algorithm>
#include <map>
#include <set>
#include <sstream>
#include <string>
#include <utility>
#include <vector>

23
#include "paddle/fluid/framework/feed_fetch_method.h"
L
Luo Tao 已提交
24
#include "paddle/fluid/inference/api/api_impl.h"
25
#include "paddle/fluid/inference/api/helper.h"
D
dzhwinter 已提交
26
#include "paddle/fluid/inference/api/timer.h"
27 28 29
#include "paddle/fluid/platform/profiler.h"

DEFINE_bool(profile, false, "Turn on profiler for fluid");
X
Xin Pan 已提交
30 31 32

namespace paddle {
namespace {
D
dzhwinter 已提交
33
using paddle::inference::Timer;
X
Xin Pan 已提交
34 35 36 37 38 39 40 41 42

template <class T>
std::string num2str(T a) {
  std::stringstream istr;
  istr << a;
  return istr.str();
}
}  // namespace

43 44 45 46
void NativePaddlePredictor::PrepareFeedFetch() {
  for (auto *op : inference_program_->Block(0).AllOps()) {
    if (op->Type() == "feed") {
      int idx = boost::get<int>(op->GetAttr("col"));
T
tensor-tang 已提交
47
      if (feeds_.size() <= static_cast<size_t>(idx)) {
48 49 50 51 52 53
        feeds_.resize(idx + 1);
      }
      feeds_[idx] = op;
      feed_names_[op->Output("Out")[0]] = idx;
    } else if (op->Type() == "fetch") {
      int idx = boost::get<int>(op->GetAttr("col"));
T
tensor-tang 已提交
54
      if (fetchs_.size() <= static_cast<size_t>(idx)) {
55 56 57 58 59 60 61
        fetchs_.resize(idx + 1);
      }
      fetchs_[idx] = op;
    }
  }
}

T
tensor-tang 已提交
62 63
bool NativePaddlePredictor::Init(
    std::shared_ptr<framework::Scope> parent_scope) {
X
Xin Pan 已提交
64
  VLOG(3) << "Predictor::init()";
D
dzhwinter 已提交
65
#if !defined(_WIN32)
66 67 68 69 70 71 72 73
  if (FLAGS_profile) {
    LOG(WARNING) << "Profiler is actived, might affect the performance";
    LOG(INFO) << "You can turn off by set gflags '-profile false'";

    auto tracking_device = config_.use_gpu ? platform::ProfilerState::kAll
                                           : platform::ProfilerState::kCPU;
    platform::EnableProfiler(tracking_device);
  }
D
dzhwinter 已提交
74
#endif
75

Y
Yan Chunwei 已提交
76
  if (config_.use_gpu) {
X
Xin Pan 已提交
77 78 79 80
    place_ = paddle::platform::CUDAPlace(config_.device);
  } else {
    place_ = paddle::platform::CPUPlace();
  }
T
tensor-tang 已提交
81 82 83
  if (parent_scope) {
    scope_ = parent_scope;
    sub_scope_ = &(parent_scope->NewScope());
T
tensor-tang 已提交
84
    PADDLE_ENFORCE_NOT_NULL(sub_scope_, "create sub scope fail");
85 86 87 88 89
  } else {
    paddle::framework::InitDevices(false);
    scope_.reset(new paddle::framework::Scope());
  }

X
Xin Pan 已提交
90 91 92 93 94 95
  executor_.reset(new paddle::framework::Executor(place_));

  // Initialize the inference program
  if (!config_.model_dir.empty()) {
    // Parameters are saved in separate files sited in
    // the specified `dirname`.
96 97
    inference_program_ = paddle::inference::Load(executor_.get(), scope_.get(),
                                                 config_.model_dir);
X
Xin Pan 已提交
98 99 100 101 102 103 104
  } else if (!config_.prog_file.empty() && !config_.param_file.empty()) {
    // All parameters are saved in a single file.
    // The file names should be consistent with that used
    // in Python API `fluid.io.save_inference_model`.
    inference_program_ = paddle::inference::Load(
        executor_.get(), scope_.get(), config_.prog_file, config_.param_file);
  } else {
Y
Yan Chunwei 已提交
105
    LOG(ERROR) << "fail to load inference model from " << config_.model_dir;
X
Xin Pan 已提交
106 107
    return false;
  }
108

X
Xin Pan 已提交
109
  ctx_ = executor_->Prepare(*inference_program_, 0);
110 111
  executor_->CreateVariables(*inference_program_,
                             sub_scope_ ? sub_scope_ : scope_.get(), 0);
Y
Yan Chunwei 已提交
112

X
Xin Pan 已提交
113
  // Get the feed_target_names and fetch_target_names
114
  PrepareFeedFetch();
X
Xin Pan 已提交
115 116 117
  return true;
}

118
NativePaddlePredictor::~NativePaddlePredictor() {
D
dzhwinter 已提交
119
#if !defined(_WIN32)
120 121 122 123
  if (FLAGS_profile) {
    platform::DisableProfiler(platform::EventSortingKey::kTotal,
                              "./profile.log");
  }
D
dzhwinter 已提交
124
#endif
125 126 127
  if (sub_scope_) {
    scope_->DeleteScope(sub_scope_);
  }
L
Luo Tao 已提交
128
}
129

Y
Yan Chunwei 已提交
130
bool NativePaddlePredictor::Run(const std::vector<PaddleTensor> &inputs,
131 132
                                std::vector<PaddleTensor> *output_data,
                                int batch_size) {
X
Xin Pan 已提交
133 134 135 136
  VLOG(3) << "Predictor::predict";
  Timer timer;
  timer.tic();
  // set feed variable
137
  std::vector<framework::LoDTensor> feeds;
138 139
  framework::Scope *scope = sub_scope_ != nullptr ? sub_scope_ : scope_.get();
  if (!SetFeed(inputs, scope)) {
X
Xin Pan 已提交
140 141 142 143 144
    LOG(ERROR) << "fail to set feed";
    return false;
  }
  // Run the inference program
  // if share variables, we need not create variables
145
  VLOG(4) << "Run prepared context";
146 147 148
  executor_->RunPreparedContext(ctx_.get(), scope,
                                false, /* don't create local scope each time*/
                                false /* don't create variable eatch time */);
149
  VLOG(4) << "Finish prepared context";
150 151
  // get fetch variable
  if (!GetFetch(output_data, scope)) {
152
    LOG(ERROR) << "fail to get fetches";
X
Xin Pan 已提交
153 154 155 156 157 158
    return false;
  }
  VLOG(3) << "predict cost: " << timer.toc() << "ms";
  return true;
}

Y
Yan Chunwei 已提交
159
std::unique_ptr<PaddlePredictor> NativePaddlePredictor::Clone() {
X
Xin Pan 已提交
160
  VLOG(3) << "Predictor::clone";
Y
Yan Chunwei 已提交
161 162
  std::unique_ptr<PaddlePredictor> cls(new NativePaddlePredictor(config_));

163
  if (!dynamic_cast<NativePaddlePredictor *>(cls.get())->Init(scope_)) {
Y
Yan Chunwei 已提交
164
    LOG(ERROR) << "fail to call Init";
X
Xin Pan 已提交
165 166
    return nullptr;
  }
J
Fix mac  
JiabinYang 已提交
167 168 169 170
#ifdef __clang__
  // fix clang compile error
  return cls;
#else
171 172
  // fix manylinux compile error.
  return std::move(cls);
J
Fix mac  
JiabinYang 已提交
173
#endif
X
Xin Pan 已提交
174 175
}

Y
Yan Chunwei 已提交
176
bool NativePaddlePredictor::SetFeed(const std::vector<PaddleTensor> &inputs,
177
                                    framework::Scope *scope) {
X
Xin Pan 已提交
178
  VLOG(3) << "Predictor::set_feed";
179
  if (inputs.size() != feeds_.size()) {
180 181
    LOG(ERROR) << "wrong feed input size, need " << feeds_.size() << " but get "
               << inputs.size();
X
Xin Pan 已提交
182 183
    return false;
  }
184
  for (size_t i = 0; i < inputs.size(); ++i) {
185 186
    framework::LoDTensor input;
    framework::DDim ddim = framework::make_ddim(inputs[i].shape);
X
Xin Pan 已提交
187 188
    void *input_ptr;
    if (inputs[i].dtype == PaddleDType::INT64) {
189
      input_ptr = input.mutable_data<int64_t>(ddim, platform::CPUPlace());
X
Xin Pan 已提交
190
    } else if (inputs[i].dtype == PaddleDType::FLOAT32) {
191
      input_ptr = input.mutable_data<float>(ddim, platform::CPUPlace());
X
Xin Pan 已提交
192 193 194 195 196 197
    } else {
      LOG(ERROR) << "unsupported feed type " << inputs[i].dtype;
      return false;
    }

    // TODO(panyx0718): Init LoDTensor from existing memcpy to save a copy.
198
    std::memcpy(static_cast<void *>(input_ptr), inputs[i].data.data(),
199
                inputs[i].data.length());
Y
Yan Chunwei 已提交
200 201 202 203 204 205
    // TODO(Superjomn) Low performance, need optimization for heavy LoD copy.
    framework::LoD lod;
    for (auto &level : inputs[i].lod) {
      lod.emplace_back(level);
    }
    input.set_lod(lod);
206 207
    int idx = -1;
    if (config_.specify_input_name) {
X
polish  
Xin Pan 已提交
208
      idx = feed_names_[inputs[i].name];
209 210 211 212
    } else {
      idx = boost::get<int>(feeds_[i]->GetAttr("col"));
    }
    framework::SetFeedVariable(scope, input, "feed", idx);
X
Xin Pan 已提交
213 214 215
  }
  return true;
}
L
luotao1 已提交
216 217 218
template <typename T>
void NativePaddlePredictor::GetFetchOne(const framework::LoDTensor &fetch,
                                        PaddleTensor *output) {
219 220 221 222 223 224 225 226 227 228 229 230 231 232
  // set shape.
  auto shape = framework::vectorize(fetch.dims());
  output->shape.assign(shape.begin(), shape.end());
  // set data.
  const T *data = fetch.data<T>();
  int num_elems = inference::VecReduceToInt(shape);
  output->data.Resize(num_elems * sizeof(T));
  // The fetched tensor output by fetch op, should always in CPU memory, so just
  // copy.
  memcpy(output->data.data(), data, num_elems * sizeof(T));
  // set lod
  output->lod.clear();
  for (auto &level : fetch.lod()) {
    output->lod.emplace_back(level.begin(), level.end());
L
luotao1 已提交
233 234
  }
}
X
Xin Pan 已提交
235

236 237
bool NativePaddlePredictor::GetFetch(std::vector<PaddleTensor> *outputs,
                                     framework::Scope *scope) {
X
Xin Pan 已提交
238
  VLOG(3) << "Predictor::get_fetch";
239 240 241
  outputs->resize(fetchs_.size());
  for (size_t i = 0; i < fetchs_.size(); ++i) {
    int idx = boost::get<int>(fetchs_[i]->GetAttr("col"));
L
luotao1 已提交
242 243
    PADDLE_ENFORCE((size_t)idx == i);
    framework::LoDTensor &fetch =
244
        framework::GetFetchVariable(*scope, "fetch", idx);
L
luotao1 已提交
245 246 247 248 249 250 251 252
    auto type = fetch.type();
    auto output = &(outputs->at(i));
    if (type == typeid(float)) {
      GetFetchOne<float>(fetch, output);
      output->dtype = PaddleDType::FLOAT32;
    } else if (type == typeid(int64_t)) {
      GetFetchOne<int64_t>(fetch, output);
      output->dtype = PaddleDType::INT64;
X
Xin Pan 已提交
253
    } else {
L
luotao1 已提交
254
      LOG(ERROR) << "unknown type, only support float32 and int64 now.";
Y
Yan Chunwei 已提交
255
    }
X
Xin Pan 已提交
256 257 258 259
  }
  return true;
}

260
template <>
261 262
std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<
    NativeConfig, PaddleEngineKind::kNative>(const NativeConfig &config) {
Y
Yan Chunwei 已提交
263 264 265
  VLOG(3) << "create NativePaddlePredictor";
  if (config.use_gpu) {
    // 1. GPU memeroy
266
    PADDLE_ENFORCE_GT(
267
        config.fraction_of_gpu_memory, 0.f,
Y
Yan Chunwei 已提交
268
        "fraction_of_gpu_memory in the config should be set to range (0., 1.]");
269
    PADDLE_ENFORCE_GE(config.device, 0, "Invalid device id %d", config.device);
Y
Yan Chunwei 已提交
270 271 272 273 274 275 276 277 278 279
    std::vector<std::string> flags;
    if (config.fraction_of_gpu_memory >= 0.0f ||
        config.fraction_of_gpu_memory <= 0.95f) {
      flags.push_back("dummpy");
      std::string flag = "--fraction_of_gpu_memory_to_use=" +
                         num2str<float>(config.fraction_of_gpu_memory);
      flags.push_back(flag);
      VLOG(3) << "set flag: " << flag;
      framework::InitGflags(flags);
    }
X
Xin Pan 已提交
280 281
  }

Y
Yan Chunwei 已提交
282
  std::unique_ptr<PaddlePredictor> predictor(new NativePaddlePredictor(config));
T
tensor-tang 已提交
283
  if (!dynamic_cast<NativePaddlePredictor *>(predictor.get())->Init(nullptr)) {
X
Xin Pan 已提交
284 285
    return nullptr;
  }
J
Fix mac  
JiabinYang 已提交
286
#ifdef __clang__
J
Jiabin Yang 已提交
287
  // fix clang compile error
J
Fix mac  
JiabinYang 已提交
288 289
  return predictor;
#else
290
  return std::move(predictor);
J
Fix mac  
JiabinYang 已提交
291
#endif
X
Xin Pan 已提交
292 293
}

Y
Yan Chunwei 已提交
294 295 296 297 298 299
template <>
std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<NativeConfig>(
    const NativeConfig &config) {
  return CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(config);
}

X
Xin Pan 已提交
300
}  // namespace paddle