mobilenetv1_light_api.cc 7.3 KB
Newer Older
Y
Yan Chunwei 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

15 16
#include <sys/time.h>
#include <time.h>
17
#include <cmath>
18
#include <iostream>
19
#include <string>
Y
Yan Chunwei 已提交
20
#include <vector>
21

22
#include "paddle_api.h"  // NOLINT
Y
Yan Chunwei 已提交
23 24 25 26 27 28 29 30 31

using namespace paddle::lite_api;  // NOLINT

int64_t ShapeProduction(const shape_t& shape) {
  int64_t res = 1;
  for (auto i : shape) res *= i;
  return res;
}

32 33 34
std::string ShapePrint(const shape_t& shape) {
  std::string shape_str{""};
  for (auto i : shape) {
35
    shape_str += std::to_string(i) + " ";
36 37 38 39
  }
  return shape_str;
}

40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65
template <typename T>
double compute_mean(const T* in, const size_t length) {
  double sum = 0.;
  for (size_t i = 0; i < length; ++i) {
    sum += in[i];
  }
  return sum / length;
}

template <typename T>
double compute_standard_deviation(const T* in,
                                  const size_t length,
                                  bool has_mean = false,
                                  double mean = 10000) {
  if (!has_mean) {
    mean = compute_mean<T>(in, length);
  }

  double variance = 0.;
  for (size_t i = 0; i < length; ++i) {
    variance += pow((in[i] - mean), 2);
  }
  variance /= length;
  return sqrt(variance);
}

66 67 68 69 70 71 72 73
inline double GetCurrentUS() {
  struct timeval time;
  gettimeofday(&time, NULL);
  return 1e+6 * time.tv_sec + time.tv_usec;
}

void RunModel(std::string model_dir,
              const shape_t& input_shape,
74 75 76 77
              size_t repeats,
              size_t warmup,
              size_t print_output_elem,
              size_t power_mode) {
Y
Yan Chunwei 已提交
78 79
  // 1. Set MobileConfig
  MobileConfig config;
H
huzhiqiang 已提交
80
  config.set_model_from_file(model_dir);
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102

  // NOTE: Use android gpu with opencl, you should ensure:
  //  first, [compile **cpu+opencl** paddlelite
  //    lib](https://github.com/PaddlePaddle/Paddle-Lite/blob/develop/docs/demo_guides/opencl.md);
  //  second, [convert and use opencl nb
  //    model](https://github.com/PaddlePaddle/Paddle-Lite/blob/develop/docs/user_guides/opt/opt_bin.md).
  //
  /*  Uncomment code below to enable OpenCL
  bool is_opencl_backend_valid = ::IsOpenCLBackendValid();
  std::cout << "is_opencl_backend_valid:" << is_opencl_backend_valid <<
  std::endl;
  if (is_opencl_backend_valid) {
    // give opencl nb model dir
    config.set_model_from_file(model_dir);
  } else {
    std::cout << "Unsupport opencl nb model." << std::endl;
    exit(1);
    // you can give backup cpu nb model instead
    // config.set_model_from_file(cpu_nb_model_dir);
  }
  */

H
huzhiqiang 已提交
103 104 105
  // NOTE: To load model transformed by model_optimize_tool before
  // release/v2.3.0, plese use `set_model_dir` API as listed below.
  // config.set_model_dir(model_dir);
106
  config.set_power_mode(static_cast<paddle::lite_api::PowerMode>(power_mode));
Y
Yan Chunwei 已提交
107 108 109 110 111 112 113

  // 2. Create PaddlePredictor by MobileConfig
  std::shared_ptr<PaddlePredictor> predictor =
      CreatePaddlePredictor<MobileConfig>(config);

  // 3. Prepare input data
  std::unique_ptr<Tensor> input_tensor(std::move(predictor->GetInput(0)));
114 115
  input_tensor->Resize(
      {input_shape[0], input_shape[1], input_shape[2], input_shape[3]});
Y
Yan Chunwei 已提交
116 117 118 119 120 121
  auto* data = input_tensor->mutable_data<float>();
  for (int i = 0; i < ShapeProduction(input_tensor->shape()); ++i) {
    data[i] = 1;
  }

  // 4. Run predictor
122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
  for (size_t widx = 0; widx < warmup; ++widx) {
    predictor->Run();
  }

  double sum_duration = 0.0;  // millisecond;
  double max_duration = 1e-5;
  double min_duration = 1e5;
  double avg_duration = -1;
  for (size_t ridx = 0; ridx < repeats; ++ridx) {
    auto start = GetCurrentUS();

    predictor->Run();

    auto duration = (GetCurrentUS() - start) / 1000.0;
    sum_duration += duration;
    max_duration = duration > max_duration ? duration : max_duration;
    min_duration = duration < min_duration ? duration : min_duration;
    std::cout << "run_idx:" << ridx + 1 << " / " << repeats << ": " << duration
              << " ms" << std::endl;
  }
  avg_duration = sum_duration / static_cast<float>(repeats);
  std::cout << "\n======= benchmark summary =======\n"
            << "input_shape(NCHW):" << ShapePrint(input_shape) << "\n"
            << "model_dir:" << model_dir << "\n"
            << "warmup:" << warmup << "\n"
            << "repeats:" << repeats << "\n"
            << "max_duration:" << max_duration << "\n"
            << "min_duration:" << min_duration << "\n"
            << "avg_duration:" << avg_duration << "\n";
Y
Yan Chunwei 已提交
151 152

  // 5. Get output
153 154
  std::cout << "\n====== output summary ====== " << std::endl;
  size_t output_tensor_num = predictor->GetOutputNames().size();
155
  std::cout << "output tensor num:" << output_tensor_num << std::endl;
156 157 158 159 160 161

  for (size_t tidx = 0; tidx < output_tensor_num; ++tidx) {
    std::unique_ptr<const paddle::lite_api::Tensor> output_tensor =
        predictor->GetOutput(tidx);
    std::cout << "\n--- output tensor " << tidx << " ---" << std::endl;
    auto out_shape = output_tensor->shape();
162 163 164 165
    auto out_data = output_tensor->data<float>();
    auto out_mean = compute_mean<float>(out_data, ShapeProduction(out_shape));
    auto out_std_dev = compute_standard_deviation<float>(
        out_data, ShapeProduction(out_shape), true, out_mean);
166

167
    std::cout << "output shape(NCHW):" << ShapePrint(out_shape) << std::endl;
168 169 170
    std::cout << "output tensor " << tidx
              << " elem num:" << ShapeProduction(out_shape) << std::endl;
    std::cout << "output tensor " << tidx
171 172
              << " standard deviation:" << out_std_dev << std::endl;
    std::cout << "output tensor " << tidx << " mean value:" << out_mean
173
              << std::endl;
174 175 176 177 178 179 180 181

    // print output
    if (print_output_elem) {
      for (int i = 0; i < ShapeProduction(out_shape); ++i) {
        std::cout << "out[" << tidx << "][" << i
                  << "]:" << output_tensor->data<float>()[i] << std::endl;
      }
    }
Y
Yan Chunwei 已提交
182 183 184 185
  }
}

int main(int argc, char** argv) {
186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201
  shape_t input_shape{1, 3, 224, 224};  // shape_t ==> std::vector<int64_t>
  int repeats = 10;
  int warmup = 10;
  int print_output_elem = 0;

  if (argc > 2 && argc < 9) {
    std::cerr << "usage: ./" << argv[0] << "\n"
              << "  <naive_buffer_model_dir>\n"
              << "  <input_n>\n"
              << "  <input_c>\n"
              << "  <input_h>\n"
              << "  <input_w>\n"
              << "  <repeats>\n"
              << "  <warmup>\n"
              << "  <print_output>" << std::endl;
    return 0;
202
  }
203

204
  std::string model_dir = argv[1];
205 206
  if (argc >= 9) {
    input_shape[0] = atoi(argv[2]);
207 208 209
    input_shape[1] = atoi(argv[3]);
    input_shape[2] = atoi(argv[4]);
    input_shape[3] = atoi(argv[5]);
210 211 212 213
    repeats = atoi(argv[6]);
    warmup = atoi(argv[7]);
    print_output_elem = atoi(argv[8]);
  }
214 215 216 217 218 219 220 221 222
  // set arm power mode:
  // 0 for big cluster, high performance
  // 1 for little cluster
  // 2 for all cores
  // 3 for no bind
  size_t power_mode = 0;

  RunModel(
      model_dir, input_shape, repeats, warmup, print_output_elem, power_mode);
223

Y
Yan Chunwei 已提交
224 225
  return 0;
}