test_helper.h 9.2 KB
Newer Older
1
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
14 15 16 17 18 19
#pragma once

#include <map>
#include <random>
#include <string>
#include <vector>
20

Y
Yi Wang 已提交
21 22
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/inference/io.h"
23
#include "paddle/fluid/platform/profiler.h"
24

25 26
DECLARE_bool(use_mkldnn);

27
template <typename T>
28
void SetupTensor(paddle::framework::LoDTensor* input,
29
                 paddle::framework::DDim dims, T lower, T upper) {
30 31
  static unsigned int seed = 100;
  std::mt19937 rng(seed++);
32 33 34 35 36
  std::uniform_real_distribution<double> uniform_dist(0, 1);

  T* input_ptr = input->mutable_data<T>(dims, paddle::platform::CPUPlace());
  for (int i = 0; i < input->numel(); ++i) {
    input_ptr[i] = static_cast<T>(uniform_dist(rng) * (upper - lower) + lower);
37 38 39
  }
}

40
template <typename T>
41 42
void SetupTensor(paddle::framework::LoDTensor* input,
                 paddle::framework::DDim dims, const std::vector<T>& data) {
43
  CHECK_EQ(paddle::framework::product(dims), static_cast<int64_t>(data.size()));
44 45
  T* input_ptr = input->mutable_data<T>(dims, paddle::platform::CPUPlace());
  memcpy(input_ptr, data.data(), input->numel() * sizeof(T));
46 47
}

48
template <typename T>
49 50 51
void SetupLoDTensor(paddle::framework::LoDTensor* input,
                    const paddle::framework::LoD& lod, T lower, T upper) {
  input->set_lod(lod);
52
  int dim = lod[0][lod[0].size() - 1];
53 54 55 56
  SetupTensor<T>(input, {dim, 1}, lower, upper);
}

template <typename T>
57
void SetupLoDTensor(paddle::framework::LoDTensor* input,
58
                    paddle::framework::DDim dims,
59 60
                    const paddle::framework::LoD lod,
                    const std::vector<T>& data) {
61
  const size_t level = lod.size() - 1;
62
  CHECK_EQ(dims[0], static_cast<int64_t>((lod[level]).back()));
63
  input->set_lod(lod);
64
  SetupTensor<T>(input, dims, data);
65 66 67
}

template <typename T>
68 69
void CheckError(const paddle::framework::LoDTensor& output1,
                const paddle::framework::LoDTensor& output2) {
70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
  // Check lod information
  EXPECT_EQ(output1.lod(), output2.lod());

  EXPECT_EQ(output1.dims(), output2.dims());
  EXPECT_EQ(output1.numel(), output2.numel());

  T err = static_cast<T>(0);
  if (typeid(T) == typeid(float)) {
    err = 1E-3;
  } else if (typeid(T) == typeid(double)) {
    err = 1E-6;
  } else {
    err = 0;
  }

  size_t count = 0;
  for (int64_t i = 0; i < output1.numel(); ++i) {
    if (fabs(output1.data<T>()[i] - output2.data<T>()[i]) > err) {
      count++;
    }
  }
91
  EXPECT_EQ(count, 0U) << "There are " << count << " different elements.";
92 93
}

94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137
std::unique_ptr<paddle::framework::ProgramDesc> InitProgram(
    paddle::framework::Executor* executor, paddle::framework::Scope* scope,
    const std::string& dirname, const bool is_combined = false) {
  std::unique_ptr<paddle::framework::ProgramDesc> inference_program;
  if (is_combined) {
    // All parameters are saved in a single file.
    // Hard-coding the file names of program and parameters in unittest.
    // The file names should be consistent with that used in Python API
    //  `fluid.io.save_inference_model`.
    std::string prog_filename = "__model_combined__";
    std::string param_filename = "__params_combined__";
    inference_program =
        paddle::inference::Load(executor, scope, dirname + "/" + prog_filename,
                                dirname + "/" + param_filename);
  } else {
    // Parameters are saved in separate files sited in the specified
    // `dirname`.
    inference_program = paddle::inference::Load(executor, scope, dirname);
  }
  return inference_program;
}

std::vector<std::vector<int64_t>> GetFeedTargetShapes(
    const std::string& dirname, const bool is_combined = false) {
  auto place = paddle::platform::CPUPlace();
  auto executor = paddle::framework::Executor(place);
  auto* scope = new paddle::framework::Scope();

  auto inference_program = InitProgram(&executor, scope, dirname, is_combined);
  auto& global_block = inference_program->Block(0);

  const std::vector<std::string>& feed_target_names =
      inference_program->GetFeedTargetNames();
  std::vector<std::vector<int64_t>> feed_target_shapes;
  for (size_t i = 0; i < feed_target_names.size(); ++i) {
    auto* var = global_block.FindVar(feed_target_names[i]);
    std::vector<int64_t> var_shape = var->GetShape();
    feed_target_shapes.push_back(var_shape);
  }

  delete scope;
  return feed_target_shapes;
}

138
template <typename Place, bool CreateVars = true, bool PrepareContext = false>
139 140
void TestInference(const std::string& dirname,
                   const std::vector<paddle::framework::LoDTensor*>& cpu_feeds,
141
                   const std::vector<paddle::framework::LoDTensor*>& cpu_fetchs,
142
                   const int repeat = 1, const bool is_combined = false) {
143
  // 1. Define place, executor, scope
144 145 146 147
  auto place = Place();
  auto executor = paddle::framework::Executor(place);
  auto* scope = new paddle::framework::Scope();

148 149 150 151 152 153
  // Profile the performance
  paddle::platform::ProfilerState state;
  if (paddle::platform::is_cpu_place(place)) {
    state = paddle::platform::ProfilerState::kCPU;
  } else {
#ifdef PADDLE_WITH_CUDA
154
    state = paddle::platform::ProfilerState::kAll;
155 156 157 158
    // The default device_id of paddle::platform::CUDAPlace is 0.
    // Users can get the device_id using:
    //   int device_id = place.GetDeviceId();
    paddle::platform::SetDeviceId(0);
Q
QI JUN 已提交
159 160
#else
    PADDLE_THROW("'CUDAPlace' is not supported in CPU only device.");
161 162 163
#endif
  }

164 165
  // 2. Initialize the inference_program and load parameters
  std::unique_ptr<paddle::framework::ProgramDesc> inference_program;
166 167 168

  // Enable the profiler
  paddle::platform::EnableProfiler(state);
169 170 171 172
  {
    paddle::platform::RecordEvent record_event(
        "init_program",
        paddle::platform::DeviceContextPool::Instance().Get(place));
173
    inference_program = InitProgram(&executor, scope, dirname, is_combined);
174
  }
175 176
  // Disable the profiler and print the timing information
  paddle::platform::DisableProfiler(paddle::platform::EventSortingKey::kDefault,
177
                                    "load_program_profiler");
178
  paddle::platform::ResetProfiler();
179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198

  // 3. Get the feed_target_names and fetch_target_names
  const std::vector<std::string>& feed_target_names =
      inference_program->GetFeedTargetNames();
  const std::vector<std::string>& fetch_target_names =
      inference_program->GetFetchTargetNames();

  // 4. Prepare inputs: set up maps for feed targets
  std::map<std::string, const paddle::framework::LoDTensor*> feed_targets;
  for (size_t i = 0; i < feed_target_names.size(); ++i) {
    // Please make sure that cpu_feeds[i] is right for feed_target_names[i]
    feed_targets[feed_target_names[i]] = cpu_feeds[i];
  }

  // 5. Define Tensor to get the outputs: set up maps for fetch targets
  std::map<std::string, paddle::framework::LoDTensor*> fetch_targets;
  for (size_t i = 0; i < fetch_target_names.size(); ++i) {
    fetch_targets[fetch_target_names[i]] = cpu_fetchs[i];
  }

199 200 201 202
  // 6. If export Flags_use_mkldnn=True, use mkldnn related ops.
  if (FLAGS_use_mkldnn) executor.EnableMKLDNN(*inference_program);

  // 7. Run the inference program
203
  {
204 205 206 207
    if (!CreateVars) {
      // If users don't want to create and destroy variables every time they
      // run, they need to set `create_vars` to false and manually call
      // `CreateVariables` before running.
L
Liu Yiqun 已提交
208
      executor.CreateVariables(*inference_program, scope, 0);
209 210
    }

211
    // Ignore the profiling results of the first run
212 213 214
    std::unique_ptr<paddle::framework::ExecutorPrepareContext> ctx;
    if (PrepareContext) {
      ctx = executor.Prepare(*inference_program, 0);
215
      executor.RunPreparedContext(ctx.get(), scope, &feed_targets,
W
Wu Yi 已提交
216
                                  &fetch_targets, true, CreateVars);
217
    } else {
218
      executor.Run(*inference_program, scope, &feed_targets, &fetch_targets,
W
Wu Yi 已提交
219
                   true, CreateVars);
220
    }
221 222 223 224

    // Enable the profiler
    paddle::platform::EnableProfiler(state);

225 226 227 228 229 230
    // Run repeat times to profile the performance
    for (int i = 0; i < repeat; ++i) {
      paddle::platform::RecordEvent record_event(
          "run_inference",
          paddle::platform::DeviceContextPool::Instance().Get(place));

231
      if (PrepareContext) {
L
Liu Yiqun 已提交
232
        // Note: if you change the inference_program, you need to call
233
        // executor.Prepare() again to get a new ExecutorPrepareContext.
234 235
        executor.RunPreparedContext(ctx.get(), scope, &feed_targets,
                                    &fetch_targets, CreateVars);
236
      } else {
237
        executor.Run(*inference_program, scope, &feed_targets, &fetch_targets,
238
                     CreateVars);
239
      }
240 241
    }

242 243
    // Disable the profiler and print the timing information
    paddle::platform::DisableProfiler(
D
daminglu 已提交
244
        paddle::platform::EventSortingKey::kDefault, "run_inference_profiler");
245 246
    paddle::platform::ResetProfiler();
  }
247 248 249

  delete scope;
}