vis_demo.cc 2.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

/*
 * This file contains demo for mobilenet, se-resnext50 and ocr.
 */

#include <gflags/gflags.h>
20
#include <glog/logging.h>
21
#include "utils.h"  // NOLINT
22 23 24 25 26 27 28

#ifdef PADDLE_WITH_CUDA
DECLARE_double(fraction_of_gpu_memory_to_use);
#endif
DEFINE_string(modeldir, "", "Directory of the inference model.");
DEFINE_string(refer, "", "path to reference result for comparison.");
DEFINE_string(
29
    data, "",
30 31
    "path of data; each line is a record, format is "
    "'<space splitted floats as data>\t<space splitted ints as shape'");
L
Luo Tao 已提交
32 33 34 35
DEFINE_bool(use_gpu, false, "Whether use gpu.");

namespace paddle {
namespace demo {
36 37

/*
T
Tao Luo 已提交
38
 * Use the native and analysis fluid engine to inference the demo.
39
 */
N
nhzlx 已提交
40
void Main(bool use_gpu) {
T
Tao Luo 已提交
41
  std::unique_ptr<PaddlePredictor> predictor, analysis_predictor;
42 43 44
  AnalysisConfig config;
  if (use_gpu) {
    config.EnableUseGpu(100, 0);
L
Luo Tao 已提交
45
  }
46 47
  config.SetModel(FLAGS_modeldir + "/__model__",
                  FLAGS_modeldir + "/__params__");
48

49
  predictor = CreatePaddlePredictor<NativeConfig>(config.ToNativeConfig());
50
  analysis_predictor = CreatePaddlePredictor(config);
N
nhzlx 已提交
51

52 53 54 55 56 57 58 59
  // Just a single batch of data.
  std::string line;
  std::ifstream file(FLAGS_data);
  std::getline(file, line);
  auto record = ProcessALine(line);
  file.close();

  // Inference.
Y
Yan Chunwei 已提交
60 61 62 63 64
  PaddleTensor input;
  input.shape = record.shape;
  input.data =
      PaddleBuf(record.data.data(), record.data.size() * sizeof(float));
  input.dtype = PaddleDType::FLOAT32;
65

T
Tao Luo 已提交
66
  std::vector<PaddleTensor> output, analysis_output;
N
nhzlx 已提交
67
  predictor->Run({input}, &output, 1);
68 69 70 71 72

  auto& tensor = output.front();

  // compare with reference result
  CheckOutput(FLAGS_refer, tensor);
T
Tao Luo 已提交
73 74 75 76

  // the analysis_output has some diff with native_output,
  // TODO(luotao): add CheckOutput for analysis_output later.
  analysis_predictor->Run({input}, &analysis_output, 1);
77 78 79 80
}

}  // namespace demo
}  // namespace paddle
L
Luo Tao 已提交
81 82 83

int main(int argc, char** argv) {
  google::ParseCommandLineFlags(&argc, &argv, true);
N
nhzlx 已提交
84 85
  if (FLAGS_use_gpu) {
    paddle::demo::Main(true /*use_gpu*/);
N
nhzlx 已提交
86
  } else {
N
nhzlx 已提交
87
    paddle::demo::Main(false /*use_gpu*/);
L
Luo Tao 已提交
88 89 90
  }
  return 0;
}