vis_demo.cc 2.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

/*
 * This file contains demo for mobilenet, se-resnext50 and ocr.
 */

#include <gflags/gflags.h>
20
#include <glog/logging.h>
21
#include "utils.h"  // NOLINT
22 23 24 25 26 27 28

#ifdef PADDLE_WITH_CUDA
DECLARE_double(fraction_of_gpu_memory_to_use);
#endif
DEFINE_string(modeldir, "", "Directory of the inference model.");
DEFINE_string(refer, "", "path to reference result for comparison.");
DEFINE_string(
29
    data, "",
30 31
    "path of data; each line is a record, format is "
    "'<space splitted floats as data>\t<space splitted ints as shape'");
L
Luo Tao 已提交
32
DEFINE_bool(use_gpu, false, "Whether use gpu.");
33
#ifdef PADDLE_WITH_SHARED_LIB
34
DECLARE_bool(profile);
35
#endif
L
Luo Tao 已提交
36 37 38

namespace paddle {
namespace demo {
39 40

/*
T
Tao Luo 已提交
41
 * Use the native and analysis fluid engine to inference the demo.
42
 */
N
nhzlx 已提交
43
void Main(bool use_gpu) {
T
Tao Luo 已提交
44
  std::unique_ptr<PaddlePredictor> predictor, analysis_predictor;
45 46 47
  AnalysisConfig config;
  if (use_gpu) {
    config.EnableUseGpu(100, 0);
L
Luo Tao 已提交
48
  }
49 50
  config.SetModel(FLAGS_modeldir + "/__model__",
                  FLAGS_modeldir + "/__params__");
51

52
  predictor = CreatePaddlePredictor<NativeConfig>(config.ToNativeConfig());
53
  analysis_predictor = CreatePaddlePredictor(config);
N
nhzlx 已提交
54

55 56 57 58 59 60 61 62
  // Just a single batch of data.
  std::string line;
  std::ifstream file(FLAGS_data);
  std::getline(file, line);
  auto record = ProcessALine(line);
  file.close();

  // Inference.
Y
Yan Chunwei 已提交
63 64 65 66 67
  PaddleTensor input;
  input.shape = record.shape;
  input.data =
      PaddleBuf(record.data.data(), record.data.size() * sizeof(float));
  input.dtype = PaddleDType::FLOAT32;
68

T
Tao Luo 已提交
69
  std::vector<PaddleTensor> output, analysis_output;
N
nhzlx 已提交
70
  predictor->Run({input}, &output, 1);
71 72 73 74 75

  auto& tensor = output.front();

  // compare with reference result
  CheckOutput(FLAGS_refer, tensor);
T
Tao Luo 已提交
76 77 78 79

  // the analysis_output has some diff with native_output,
  // TODO(luotao): add CheckOutput for analysis_output later.
  analysis_predictor->Run({input}, &analysis_output, 1);
80 81 82 83
}

}  // namespace demo
}  // namespace paddle
L
Luo Tao 已提交
84 85 86

int main(int argc, char** argv) {
  google::ParseCommandLineFlags(&argc, &argv, true);
N
nhzlx 已提交
87 88
  if (FLAGS_use_gpu) {
    paddle::demo::Main(true /*use_gpu*/);
N
nhzlx 已提交
89
  } else {
N
nhzlx 已提交
90
    paddle::demo::Main(false /*use_gpu*/);
L
Luo Tao 已提交
91 92 93
  }
  return 0;
}