vis_demo.cc 2.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

/*
 * This file contains demo for mobilenet, se-resnext50 and ocr.
 */

#include <gflags/gflags.h>
20
#include <glog/logging.h>  // use glog instead of CHECK to avoid importing other paddle header files.
21
#include "utils.h"  // NOLINT
22 23 24 25 26 27 28

#ifdef PADDLE_WITH_CUDA
DECLARE_double(fraction_of_gpu_memory_to_use);
#endif
DEFINE_string(modeldir, "", "Directory of the inference model.");
DEFINE_string(refer, "", "path to reference result for comparison.");
DEFINE_string(
29
    data, "",
30 31
    "path of data; each line is a record, format is "
    "'<space splitted floats as data>\t<space splitted ints as shape'");
L
Luo Tao 已提交
32 33 34 35
DEFINE_bool(use_gpu, false, "Whether use gpu.");

namespace paddle {
namespace demo {
36 37 38 39

/*
 * Use the native fluid engine to inference the demo.
 */
N
nhzlx 已提交
40
void Main(bool use_gpu) {
N
nhzlx 已提交
41
  std::unique_ptr<PaddlePredictor> predictor;
N
nhzlx 已提交
42 43 44 45 46 47
  NativeConfig config;
  config.param_file = FLAGS_modeldir + "/__params__";
  config.prog_file = FLAGS_modeldir + "/__model__";
  config.use_gpu = use_gpu;
  config.device = 0;
  if (FLAGS_use_gpu) {
L
Luo Tao 已提交
48 49
    config.fraction_of_gpu_memory = 0.1;  // set by yourself
  }
50

N
nhzlx 已提交
51 52 53 54
  VLOG(3) << "init predictor";
  predictor =
      CreatePaddlePredictor<NativeConfig, PaddleEngineKind::kNative>(config);

L
Luo Tao 已提交
55
  VLOG(3) << "begin to process data";
56 57 58 59 60 61 62 63
  // Just a single batch of data.
  std::string line;
  std::ifstream file(FLAGS_data);
  std::getline(file, line);
  auto record = ProcessALine(line);
  file.close();

  // Inference.
Y
Yan Chunwei 已提交
64 65 66 67 68
  PaddleTensor input;
  input.shape = record.shape;
  input.data =
      PaddleBuf(record.data.data(), record.data.size() * sizeof(float));
  input.dtype = PaddleDType::FLOAT32;
69

L
Luo Tao 已提交
70
  VLOG(3) << "run executor";
71
  std::vector<PaddleTensor> output;
N
nhzlx 已提交
72
  predictor->Run({input}, &output, 1);
73

L
Luo Tao 已提交
74
  VLOG(3) << "output.size " << output.size();
75
  auto& tensor = output.front();
L
Luo Tao 已提交
76
  VLOG(3) << "output: " << SummaryTensor(tensor);
77 78 79 80 81 82 83

  // compare with reference result
  CheckOutput(FLAGS_refer, tensor);
}

}  // namespace demo
}  // namespace paddle
L
Luo Tao 已提交
84 85 86

int main(int argc, char** argv) {
  google::ParseCommandLineFlags(&argc, &argv, true);
N
nhzlx 已提交
87 88
  if (FLAGS_use_gpu) {
    paddle::demo::Main(true /*use_gpu*/);
N
nhzlx 已提交
89
  } else {
N
nhzlx 已提交
90
    paddle::demo::Main(false /*use_gpu*/);
L
Luo Tao 已提交
91 92 93
  }
  return 0;
}