analyzer_image_classification_tester.cc 5.0 KB
Newer Older
T
Tao Luo 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include <fstream>
#include <iostream>
17

T
Tao Luo 已提交
18 19
#include "paddle/fluid/inference/tests/api/tester_helper.h"

20 21
DEFINE_bool(disable_mkldnn_fc, false, "Disable usage of MKL-DNN's FC op");

T
Tao Luo 已提交
22 23 24 25
namespace paddle {
namespace inference {
namespace analysis {

T
Tao Luo 已提交
26
void SetConfig(AnalysisConfig *cfg) {
27 28 29 30
  cfg->SetModel(FLAGS_infer_model + "/model", FLAGS_infer_model + "/params");
  cfg->DisableGpu();
  cfg->SwitchIrOptim();
  cfg->SwitchSpecifyInputNames();
31
  cfg->SetCpuMathLibraryNumThreads(FLAGS_cpu_num_threads);
T
Tao Luo 已提交
32 33 34
}

void SetInput(std::vector<std::vector<PaddleTensor>> *inputs) {
T
Tao Luo 已提交
35
  SetFakeImageInput(inputs, FLAGS_infer_model);
T
Tao Luo 已提交
36 37
}

38
void SetOptimConfig(AnalysisConfig *cfg) {
39
  std::string optimModelPath = FLAGS_infer_model + "/saved_optim_model";
40 41 42 43
  cfg->SetModel(optimModelPath + "/model", optimModelPath + "/params");
  cfg->DisableGpu();
  cfg->SwitchIrOptim();
  cfg->SwitchSpecifyInputNames();
44
  cfg->SetCpuMathLibraryNumThreads(FLAGS_cpu_num_threads);
45 46
}

T
Tao Luo 已提交
47
// Easy for profiling independently.
T
Tao Luo 已提交
48
void profile(bool use_mkldnn = false) {
T
Tao Luo 已提交
49 50
  AnalysisConfig cfg;
  SetConfig(&cfg);
51 52 53

  if (use_mkldnn) {
    cfg.EnableMKLDNN();
54
    if (!FLAGS_disable_mkldnn_fc) {
55
      cfg.pass_builder()->AppendPass("fc_mkldnn_pass");
56 57
      cfg.pass_builder()->AppendPass("fc_act_mkldnn_fuse_pass");
    }
58
  }
59
  std::vector<std::vector<PaddleTensor>> outputs;
T
Tao Luo 已提交
60 61 62

  std::vector<std::vector<PaddleTensor>> input_slots_all;
  SetInput(&input_slots_all);
63
  TestPrediction(reinterpret_cast<const PaddlePredictor::Config *>(&cfg),
64 65 66
                 input_slots_all,
                 &outputs,
                 FLAGS_num_threads);
T
Tao Luo 已提交
67 68 69 70 71 72 73
}

// Check the fuse status
TEST(Analyzer_resnet50, fuse_statis) {
  AnalysisConfig cfg;
  SetConfig(&cfg);
  int num_ops;
T
Tao Luo 已提交
74 75 76
  auto predictor = CreatePaddlePredictor<AnalysisConfig>(cfg);
  auto fuse_statis = GetFuseStatis(
      static_cast<AnalysisPredictor *>(predictor.get()), &num_ops);
77
  LOG(INFO) << "num_ops: " << num_ops;
T
Tao Luo 已提交
78 79
}

80 81 82 83 84
TEST(Analyzer_resnet50, profile) { profile(); }
#ifdef PADDLE_WITH_MKLDNN
TEST(Analyzer_resnet50, profile_mkldnn) { profile(true /* use_mkldnn */); }
#endif

T
Tao Luo 已提交
85
// Compare result of NativeConfig and AnalysisConfig
T
Tao Luo 已提交
86
void compare(bool use_mkldnn = false) {
T
Tao Luo 已提交
87 88
  AnalysisConfig cfg;
  SetConfig(&cfg);
89 90
  if (use_mkldnn) {
    cfg.EnableMKLDNN();
91
    if (!FLAGS_disable_mkldnn_fc) {
92
      cfg.pass_builder()->AppendPass("fc_mkldnn_pass");
93 94
      cfg.pass_builder()->AppendPass("fc_act_mkldnn_fuse_pass");
    }
95
  }
T
Tao Luo 已提交
96 97 98

  std::vector<std::vector<PaddleTensor>> input_slots_all;
  SetInput(&input_slots_all);
99 100
  CompareNativeAndAnalysis(
      reinterpret_cast<const PaddlePredictor::Config *>(&cfg), input_slots_all);
T
Tao Luo 已提交
101 102
}

T
Tao Luo 已提交
103
TEST(Analyzer_resnet50, compare) { compare(); }
T
Tao Luo 已提交
104
#ifdef PADDLE_WITH_MKLDNN
T
Tao Luo 已提交
105
TEST(Analyzer_resnet50, compare_mkldnn) { compare(true /* use_mkldnn */); }
T
Tao Luo 已提交
106
#endif
T
Tao Luo 已提交
107

L
luotao1 已提交
108 109 110 111 112 113 114 115 116 117
// Compare Deterministic result
TEST(Analyzer_resnet50, compare_determine) {
  AnalysisConfig cfg;
  SetConfig(&cfg);
  std::vector<std::vector<PaddleTensor>> input_slots_all;
  SetInput(&input_slots_all);
  CompareDeterministic(reinterpret_cast<const PaddlePredictor::Config *>(&cfg),
                       input_slots_all);
}

118 119 120
// Save optim model
TEST(Analyzer_resnet50, save_optim_model) {
  AnalysisConfig cfg;
121
  std::string optimModelPath = FLAGS_infer_model + "/saved_optim_model";
122 123 124
#ifdef _WIN32
  _mkdir(optimModelPath.c_str());
#else
125
  mkdir(optimModelPath.c_str(), 0777);
126
#endif
127 128
  SetConfig(&cfg);
  SaveOptimModel(&cfg, optimModelPath);
129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
}

void CompareOptimAndOrig(const PaddlePredictor::Config *orig_config,
                         const PaddlePredictor::Config *optim_config,
                         const std::vector<std::vector<PaddleTensor>> &inputs) {
  PrintConfig(orig_config, true);
  PrintConfig(optim_config, true);
  std::vector<std::vector<PaddleTensor>> orig_outputs, optim_outputs;
  TestOneThreadPrediction(orig_config, inputs, &orig_outputs, false);
  TestOneThreadPrediction(optim_config, inputs, &optim_outputs, false);
  CompareResult(orig_outputs.back(), optim_outputs.back());
}

TEST(Analyzer_resnet50, compare_optim_orig) {
  AnalysisConfig orig_cfg;
  AnalysisConfig optim_cfg;
  SetConfig(&orig_cfg);
  SetOptimConfig(&optim_cfg);
  std::vector<std::vector<PaddleTensor>> input_slots_all;
  SetInput(&input_slots_all);
  CompareOptimAndOrig(
      reinterpret_cast<const PaddlePredictor::Config *>(&orig_cfg),
      reinterpret_cast<const PaddlePredictor::Config *>(&optim_cfg),
      input_slots_all);
}

T
Tao Luo 已提交
155 156 157
}  // namespace analysis
}  // namespace inference
}  // namespace paddle