test_helper.h 7.1 KB
Newer Older
1
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

15 16
#pragma once

17
#include <time.h>
18
#include <cstdint>
19 20 21
#include <map>
#include <string>
#include <vector>
Y
Yi Wang 已提交
22 23
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/inference/io.h"
24
#include "paddle/fluid/platform/profiler.h"
25 26

template <typename T>
27
void SetupTensor(paddle::framework::LoDTensor* input,
28
                 paddle::framework::DDim dims,
29 30 31
                 const T lower,
                 const T upper) {
  T* input_ptr = input->mutable_data<T>(dims, paddle::platform::CPUPlace());
32
  unsigned int seed = reinterpret_cast<std::uintptr_t>(input);
33 34 35 36
  for (int i = 0; i < input->numel(); ++i) {
    input_ptr[i] = (static_cast<T>(rand_r(&seed)) / static_cast<T>(RAND_MAX)) *
                       (upper - lower) +
                   lower;
37 38 39
  }
}

40
template <typename T>
41
void SetupTensor(paddle::framework::LoDTensor* input,
42
                 paddle::framework::DDim dims,
43
                 const std::vector<T>& data) {
44
  CHECK_EQ(paddle::framework::product(dims), static_cast<int64_t>(data.size()));
45 46
  T* input_ptr = input->mutable_data<T>(dims, paddle::platform::CPUPlace());
  memcpy(input_ptr, data.data(), input->numel() * sizeof(T));
47 48
}

49
template <typename T>
50 51 52 53 54
void SetupLoDTensor(paddle::framework::LoDTensor* input,
                    const paddle::framework::LoD& lod,
                    const T lower,
                    const T upper) {
  input->set_lod(lod);
55
  int dim = lod[0][lod[0].size() - 1];
56 57 58 59
  SetupTensor<T>(input, {dim, 1}, lower, upper);
}

template <typename T>
60
void SetupLoDTensor(paddle::framework::LoDTensor* input,
61 62
                    paddle::framework::DDim dims,
                    paddle::framework::LoD lod,
63
                    const std::vector<T>& data) {
64
  const size_t level = lod.size() - 1;
65
  CHECK_EQ(dims[0], static_cast<int64_t>((lod[level]).back()));
66
  input->set_lod(lod);
67
  SetupTensor<T>(input, dims, data);
68 69 70
}

template <typename T>
71 72
void CheckError(const paddle::framework::LoDTensor& output1,
                const paddle::framework::LoDTensor& output2) {
73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
  // Check lod information
  EXPECT_EQ(output1.lod(), output2.lod());

  EXPECT_EQ(output1.dims(), output2.dims());
  EXPECT_EQ(output1.numel(), output2.numel());

  T err = static_cast<T>(0);
  if (typeid(T) == typeid(float)) {
    err = 1E-3;
  } else if (typeid(T) == typeid(double)) {
    err = 1E-6;
  } else {
    err = 0;
  }

  size_t count = 0;
  for (int64_t i = 0; i < output1.numel(); ++i) {
    if (fabs(output1.data<T>()[i] - output2.data<T>()[i]) > err) {
      count++;
    }
  }
94
  EXPECT_EQ(count, 0U) << "There are " << count << " different elements.";
95 96
}

97
template <typename Place>
98 99
void TestInference(const std::string& dirname,
                   const std::vector<paddle::framework::LoDTensor*>& cpu_feeds,
100
                   const std::vector<paddle::framework::LoDTensor*>& cpu_fetchs,
101 102
                   const int repeat = 1,
                   const bool is_combined = false) {
103
  // 1. Define place, executor, scope
104 105 106 107
  auto place = Place();
  auto executor = paddle::framework::Executor(place);
  auto* scope = new paddle::framework::Scope();

108 109 110 111 112 113 114 115 116 117 118
  // Profile the performance
  paddle::platform::ProfilerState state;
  if (paddle::platform::is_cpu_place(place)) {
    state = paddle::platform::ProfilerState::kCPU;
  } else {
#ifdef PADDLE_WITH_CUDA
    state = paddle::platform::ProfilerState::kCUDA;
    // The default device_id of paddle::platform::CUDAPlace is 0.
    // Users can get the device_id using:
    //   int device_id = place.GetDeviceId();
    paddle::platform::SetDeviceId(0);
Q
QI JUN 已提交
119 120
#else
    PADDLE_THROW("'CUDAPlace' is not supported in CPU only device.");
121 122 123
#endif
  }

124 125
  // 2. Initialize the inference_program and load parameters
  std::unique_ptr<paddle::framework::ProgramDesc> inference_program;
126 127 128

  // Enable the profiler
  paddle::platform::EnableProfiler(state);
129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
  {
    paddle::platform::RecordEvent record_event(
        "init_program",
        paddle::platform::DeviceContextPool::Instance().Get(place));

    if (is_combined) {
      // All parameters are saved in a single file.
      // Hard-coding the file names of program and parameters in unittest.
      // The file names should be consistent with that used in Python API
      //  `fluid.io.save_inference_model`.
      std::string prog_filename = "__model_combined__";
      std::string param_filename = "__params_combined__";
      inference_program =
          paddle::inference::Load(executor,
                                  *scope,
                                  dirname + "/" + prog_filename,
                                  dirname + "/" + param_filename);
    } else {
      // Parameters are saved in separate files sited in the specified
      // `dirname`.
      inference_program = paddle::inference::Load(executor, *scope, dirname);
    }
151
  }
152 153 154 155
  // Disable the profiler and print the timing information
  paddle::platform::DisableProfiler(paddle::platform::EventSortingKey::kDefault,
                                    "load_program_profiler.txt");
  paddle::platform::ResetProfiler();
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176

  // 3. Get the feed_target_names and fetch_target_names
  const std::vector<std::string>& feed_target_names =
      inference_program->GetFeedTargetNames();
  const std::vector<std::string>& fetch_target_names =
      inference_program->GetFetchTargetNames();

  // 4. Prepare inputs: set up maps for feed targets
  std::map<std::string, const paddle::framework::LoDTensor*> feed_targets;
  for (size_t i = 0; i < feed_target_names.size(); ++i) {
    // Please make sure that cpu_feeds[i] is right for feed_target_names[i]
    feed_targets[feed_target_names[i]] = cpu_feeds[i];
  }

  // 5. Define Tensor to get the outputs: set up maps for fetch targets
  std::map<std::string, paddle::framework::LoDTensor*> fetch_targets;
  for (size_t i = 0; i < fetch_target_names.size(); ++i) {
    fetch_targets[fetch_target_names[i]] = cpu_fetchs[i];
  }

  // 6. Run the inference program
177
  {
178 179 180 181 182 183
    // Ignore the profiling results of the first run
    executor.Run(*inference_program, scope, feed_targets, fetch_targets);

    // Enable the profiler
    paddle::platform::EnableProfiler(state);

184 185 186 187 188 189 190 191 192
    // Run repeat times to profile the performance
    for (int i = 0; i < repeat; ++i) {
      paddle::platform::RecordEvent record_event(
          "run_inference",
          paddle::platform::DeviceContextPool::Instance().Get(place));

      executor.Run(*inference_program, scope, feed_targets, fetch_targets);
    }

193 194 195 196 197 198
    // Disable the profiler and print the timing information
    paddle::platform::DisableProfiler(
        paddle::platform::EventSortingKey::kDefault,
        "run_inference_profiler.txt");
    paddle::platform::ResetProfiler();
  }
199 200 201

  delete scope;
}