api_impl.cc 9.4 KB
Newer Older
X
Xin Pan 已提交
1 2
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Y
Yan Chunwei 已提交
3 4 5
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
X
Xin Pan 已提交
6

Y
Yan Chunwei 已提交
7
http://www.apache.org/licenses/LICENSE-2.0
X
Xin Pan 已提交
8

Y
Yan Chunwei 已提交
9 10 11 12 13
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
X
Xin Pan 已提交
14 15 16 17 18 19 20 21 22 23

#include <sys/time.h>
#include <algorithm>
#include <map>
#include <set>
#include <sstream>
#include <string>
#include <utility>
#include <vector>

L
Luo Tao 已提交
24
#include "paddle/fluid/inference/api/api_impl.h"
X
Xin Pan 已提交
25 26 27 28 29 30

namespace paddle {
namespace {

// Timer for timer
class Timer {
W
Wu Yi 已提交
31
 public:
X
Xin Pan 已提交
32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56
  double start;
  double startu;
  void tic() {
    struct timeval tp;
    gettimeofday(&tp, NULL);
    start = tp.tv_sec;
    startu = tp.tv_usec;
  }
  double toc() {
    struct timeval tp;
    gettimeofday(&tp, NULL);
    double used_time_ms =
        (tp.tv_sec - start) * 1000.0 + (tp.tv_usec - startu) / 1000.0;
    return used_time_ms;
  }
};

template <class T>
std::string num2str(T a) {
  std::stringstream istr;
  istr << a;
  return istr.str();
}
}  // namespace

T
tensor-tang 已提交
57 58
bool NativePaddlePredictor::Init(
    std::shared_ptr<framework::Scope> parent_scope) {
X
Xin Pan 已提交
59 60
  VLOG(3) << "Predictor::init()";

Y
Yan Chunwei 已提交
61
  if (config_.use_gpu) {
X
Xin Pan 已提交
62 63 64 65
    place_ = paddle::platform::CUDAPlace(config_.device);
  } else {
    place_ = paddle::platform::CPUPlace();
  }
T
tensor-tang 已提交
66 67 68
  if (parent_scope) {
    scope_ = parent_scope;
    sub_scope_ = &(parent_scope->NewScope());
T
tensor-tang 已提交
69
    PADDLE_ENFORCE_NOT_NULL(sub_scope_, "create sub scope fail");
70 71 72 73 74
  } else {
    paddle::framework::InitDevices(false);
    scope_.reset(new paddle::framework::Scope());
  }

X
Xin Pan 已提交
75 76 77 78 79 80
  executor_.reset(new paddle::framework::Executor(place_));

  // Initialize the inference program
  if (!config_.model_dir.empty()) {
    // Parameters are saved in separate files sited in
    // the specified `dirname`.
81 82
    inference_program_ = paddle::inference::Load(executor_.get(), scope_.get(),
                                                 config_.model_dir);
X
Xin Pan 已提交
83 84 85 86 87 88 89 90 91 92
  } else if (!config_.prog_file.empty() && !config_.param_file.empty()) {
    // All parameters are saved in a single file.
    // The file names should be consistent with that used
    // in Python API `fluid.io.save_inference_model`.
    inference_program_ = paddle::inference::Load(
        executor_.get(), scope_.get(), config_.prog_file, config_.param_file);
  } else {
    LOG(ERROR) << "fail to load inference model.";
    return false;
  }
93

X
Xin Pan 已提交
94
  ctx_ = executor_->Prepare(*inference_program_, 0);
95 96
  executor_->CreateVariables(*inference_program_,
                             sub_scope_ ? sub_scope_ : scope_.get(), 0);
Y
Yan Chunwei 已提交
97

X
Xin Pan 已提交
98 99 100 101 102 103
  // Get the feed_target_names and fetch_target_names
  feed_target_names_ = inference_program_->GetFeedTargetNames();
  fetch_target_names_ = inference_program_->GetFetchTargetNames();
  return true;
}

104 105 106 107
NativePaddlePredictor::~NativePaddlePredictor() {
  if (sub_scope_) {
    scope_->DeleteScope(sub_scope_);
  }
L
Luo Tao 已提交
108
}
109

Y
Yan Chunwei 已提交
110
bool NativePaddlePredictor::Run(const std::vector<PaddleTensor> &inputs,
111 112
                                std::vector<PaddleTensor> *output_data,
                                int batch_size) {
X
Xin Pan 已提交
113 114 115 116
  VLOG(3) << "Predictor::predict";
  Timer timer;
  timer.tic();
  // set feed variable
117 118
  std::map<std::string, const framework::LoDTensor *> feed_targets;
  std::vector<framework::LoDTensor> feeds;
X
Xin Pan 已提交
119 120 121 122 123
  if (!SetFeed(inputs, &feeds)) {
    LOG(ERROR) << "fail to set feed";
    return false;
  }
  for (size_t i = 0; i < feed_target_names_.size(); ++i) {
124
    VLOG(4) << "setting " << i << "-th target";
X
Xin Pan 已提交
125 126 127
    feed_targets[feed_target_names_[i]] = &feeds[i];
  }
  // get fetch variable
128 129
  std::map<std::string, framework::LoDTensor *> fetch_targets;
  std::vector<framework::LoDTensor> fetchs;
X
Xin Pan 已提交
130 131 132 133 134 135
  fetchs.resize(fetch_target_names_.size());
  for (size_t i = 0; i < fetch_target_names_.size(); ++i) {
    fetch_targets[fetch_target_names_[i]] = &fetchs[i];
  }
  // Run the inference program
  // if share variables, we need not create variables
136
  VLOG(4) << "Run prepared context";
137
  executor_->RunPreparedContext(
138 139
      ctx_.get(), sub_scope_ != nullptr ? sub_scope_ : scope_.get(),
      &feed_targets, &fetch_targets,
140
      false /* don't create variable eatch time */);
141
  VLOG(4) << "Finish prepared context";
X
Xin Pan 已提交
142
  if (!GetFetch(fetchs, output_data)) {
143
    LOG(ERROR) << "fail to get fetches";
X
Xin Pan 已提交
144 145 146 147 148 149
    return false;
  }
  VLOG(3) << "predict cost: " << timer.toc() << "ms";
  return true;
}

Y
Yan Chunwei 已提交
150
std::unique_ptr<PaddlePredictor> NativePaddlePredictor::Clone() {
X
Xin Pan 已提交
151
  VLOG(3) << "Predictor::clone";
Y
Yan Chunwei 已提交
152 153
  std::unique_ptr<PaddlePredictor> cls(new NativePaddlePredictor(config_));

154
  if (!dynamic_cast<NativePaddlePredictor *>(cls.get())->Init(scope_)) {
Y
Yan Chunwei 已提交
155
    LOG(ERROR) << "fail to call Init";
X
Xin Pan 已提交
156 157
    return nullptr;
  }
158 159
  // fix manylinux compile error.
  return std::move(cls);
X
Xin Pan 已提交
160 161
}

Y
Yan Chunwei 已提交
162 163
bool NativePaddlePredictor::SetFeed(const std::vector<PaddleTensor> &inputs,
                                    std::vector<framework::LoDTensor> *feeds) {
X
Xin Pan 已提交
164 165 166 167 168 169
  VLOG(3) << "Predictor::set_feed";
  if (inputs.size() != feed_target_names_.size()) {
    LOG(ERROR) << "wrong feed input size.";
    return false;
  }
  for (size_t i = 0; i < feed_target_names_.size(); ++i) {
170 171
    framework::LoDTensor input;
    framework::DDim ddim = framework::make_ddim(inputs[i].shape);
X
Xin Pan 已提交
172 173
    void *input_ptr;
    if (inputs[i].dtype == PaddleDType::INT64) {
174
      input_ptr = input.mutable_data<int64_t>(ddim, platform::CPUPlace());
X
Xin Pan 已提交
175
    } else if (inputs[i].dtype == PaddleDType::FLOAT32) {
176
      input_ptr = input.mutable_data<float>(ddim, platform::CPUPlace());
X
Xin Pan 已提交
177 178 179 180 181 182
    } else {
      LOG(ERROR) << "unsupported feed type " << inputs[i].dtype;
      return false;
    }

    // TODO(panyx0718): Init LoDTensor from existing memcpy to save a copy.
183
    std::memcpy(static_cast<void *>(input_ptr), inputs[i].data.data(),
184
                inputs[i].data.length());
X
Xin Pan 已提交
185 186 187 188 189
    feeds->push_back(input);
  }
  return true;
}

Y
Yan Chunwei 已提交
190
bool NativePaddlePredictor::GetFetch(
191
    const std::vector<framework::LoDTensor> &fetchs,
X
Xin Pan 已提交
192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
    std::vector<PaddleTensor> *outputs) {
  VLOG(3) << "Predictor::get_fetch";
  outputs->resize(fetchs.size());
  for (size_t i = 0; i < fetchs.size(); ++i) {
    // TODO(panyx0718): Support fetch of other types.
    if (fetchs[i].type() != typeid(float)) {
      LOG(ERROR) << "only support fetching float now.";
      return false;
    }
    std::vector<int> shape;
    auto dims_i = fetchs[i].dims();
    auto lod = fetchs[i].lod();
    const float *output_ptr = fetchs[i].data<float>();
    // const int64_t* output_ptr = fetchs[i].data<int64_t>();
    auto num = fetchs[i].numel();
    std::vector<float> data;
    if (0 == lod.size()) {
      std::copy(output_ptr, output_ptr + num, std::back_inserter(data));
      for (int j = 0; j < dims_i.size(); ++j) {
        shape.push_back(dims_i[j]);
      }
    } else {
      // for batch detection
      // image[0] -> output[0] shape {145, 6}
      // image[1] -> output[1] shape {176, 6}
      // then,
      // the batch output shape {321, 6}
      // the lod {{0, 145, 321}}
      // so we should append output[0] to {176, 6}
      size_t max_dim = 0;
      for (size_t j = 1; j < lod[0].size(); j++) {
        max_dim = std::max(max_dim, lod[0][j] - lod[0][j - 1]);
      }
      size_t common_dim = lod[0].back() == 0 ? 0 : num / lod[0].back();
      if (max_dim > 0) {
        data.resize((lod[0].size() - 1) * max_dim * common_dim, 0);
      }
      for (size_t j = 1; j < lod[0].size(); j++) {
        size_t start = lod[0][j - 1] * common_dim;
        size_t end = lod[0][j] * common_dim;
        if (end > start) {
233
          std::copy(output_ptr + start, output_ptr + end,
X
Xin Pan 已提交
234 235 236 237 238 239 240 241 242 243 244
                    data.begin() + (j - 1) * max_dim * common_dim);
        }
      }
      shape.push_back(lod[0].size() - 1);
      shape.push_back(max_dim);
      for (int j = 1; j < dims_i.size(); ++j) {
        shape.push_back(dims_i[j]);
      }
    }

    outputs->at(i).shape = shape;
245 246 247 248 249
    auto &buffer = outputs->at(i).data;
    if (buffer.empty() || buffer.length() < sizeof(float) * data.size()) {
      buffer.Resize(sizeof(float) * data.size());
    }
    std::memcpy(buffer.data(), data.data(), buffer.length());
X
Xin Pan 已提交
250 251 252 253 254 255
    outputs->at(i).dtype = PaddleDType::FLOAT32;
    // TODO(panyx0718): support other types? fill tensor name? avoid a copy.
  }
  return true;
}

256
template <>
257 258
std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<
    NativeConfig, PaddleEngineKind::kNative>(const NativeConfig &config) {
Y
Yan Chunwei 已提交
259 260 261
  VLOG(3) << "create NativePaddlePredictor";
  if (config.use_gpu) {
    // 1. GPU memeroy
262
    PADDLE_ENFORCE_GT(
263
        config.fraction_of_gpu_memory, 0.f,
Y
Yan Chunwei 已提交
264
        "fraction_of_gpu_memory in the config should be set to range (0., 1.]");
265
    PADDLE_ENFORCE_GE(config.device, 0, "Invalid device id %d", config.device);
Y
Yan Chunwei 已提交
266 267 268 269 270 271 272 273 274 275
    std::vector<std::string> flags;
    if (config.fraction_of_gpu_memory >= 0.0f ||
        config.fraction_of_gpu_memory <= 0.95f) {
      flags.push_back("dummpy");
      std::string flag = "--fraction_of_gpu_memory_to_use=" +
                         num2str<float>(config.fraction_of_gpu_memory);
      flags.push_back(flag);
      VLOG(3) << "set flag: " << flag;
      framework::InitGflags(flags);
    }
X
Xin Pan 已提交
276 277
  }

Y
Yan Chunwei 已提交
278
  std::unique_ptr<PaddlePredictor> predictor(new NativePaddlePredictor(config));
T
tensor-tang 已提交
279
  if (!dynamic_cast<NativePaddlePredictor *>(predictor.get())->Init(nullptr)) {
X
Xin Pan 已提交
280 281
    return nullptr;
  }
282
  return std::move(predictor);
X
Xin Pan 已提交
283 284 285
}

}  // namespace paddle