helper.h 4.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

L
luotao1 已提交
17
#include <glog/logging.h>
18 19
#include <sys/time.h>
#include <algorithm>
20
#include <numeric>
21 22 23 24
#include <sstream>
#include <string>
#include <vector>
#include "paddle/fluid/inference/api/paddle_inference_api.h"
D
dzhwinter 已提交
25
#include "paddle/fluid/inference/api/timer.h"
26 27 28 29

namespace paddle {
namespace inference {

30 31
static void split(const std::string &str, char sep,
                  std::vector<std::string> *pieces) {
32 33 34 35 36 37 38 39 40 41 42 43 44 45 46
  pieces->clear();
  if (str.empty()) {
    return;
  }
  size_t pos = 0;
  size_t next = str.find(sep, pos);
  while (next != std::string::npos) {
    pieces->push_back(str.substr(pos, next - pos));
    pos = next + 1;
    next = str.find(sep, pos);
  }
  if (!str.substr(pos).empty()) {
    pieces->push_back(str.substr(pos));
  }
}
47 48
static void split_to_float(const std::string &str, char sep,
                           std::vector<float> *fs) {
49 50 51 52 53
  std::vector<std::string> pieces;
  split(str, sep, &pieces);
  std::transform(pieces.begin(), pieces.end(), std::back_inserter(*fs),
                 [](const std::string &v) { return std::stof(v); });
}
L
luotao1 已提交
54 55 56 57 58 59 60
static void split_to_int64(const std::string &str, char sep,
                           std::vector<int64_t> *is) {
  std::vector<std::string> pieces;
  split(str, sep, &pieces);
  std::transform(pieces.begin(), pieces.end(), std::back_inserter(*is),
                 [](const std::string &v) { return std::stoi(v); });
}
61 62 63 64 65 66 67 68 69 70
template <typename T>
std::string to_string(const std::vector<T> &vec) {
  std::stringstream ss;
  for (const auto &c : vec) {
    ss << c << " ";
  }
  return ss.str();
}
template <>
std::string to_string<std::vector<float>>(
71 72
    const std::vector<std::vector<float>> &vec);

73 74
template <>
std::string to_string<std::vector<std::vector<float>>>(
75 76
    const std::vector<std::vector<std::vector<float>>> &vec);

L
luotao1 已提交
77 78 79
template <typename T>
static void TensorAssignData(PaddleTensor *tensor,
                             const std::vector<std::vector<T>> &data) {
80
  // Assign buffer
L
luotao1 已提交
81 82 83
  int dim = std::accumulate(tensor->shape.begin(), tensor->shape.end(), 1,
                            [](int a, int b) { return a * b; });
  tensor->data.Resize(sizeof(T) * dim);
84 85
  int c = 0;
  for (const auto &f : data) {
L
luotao1 已提交
86 87 88
    for (T v : f) {
      static_cast<T *>(tensor->data.data())[c++] = v;
    }
89 90 91
  }
}

L
luotao1 已提交
92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
std::string DescribeTensor(const PaddleTensor &tensor) {
  std::stringstream os;
  os << "Tensor [" << tensor.name << "]\n";
  os << " - type: ";
  switch (tensor.dtype) {
    case PaddleDType::FLOAT32:
      os << "float32";
      break;
    case PaddleDType::INT64:
      os << "int64";
      break;
    default:
      os << "unset";
  }
  os << '\n';

  os << " - shape: " << to_string(tensor.shape) << '\n';
  os << " - lod: ";
  for (auto &l : tensor.lod) {
    os << to_string(l) << "; ";
  }
  os << "\n";
  os << " - data: ";

  int dim = std::accumulate(tensor.shape.begin(), tensor.shape.end(), 1,
                            [](int a, int b) { return a * b; });
  for (int i = 0; i < dim; i++) {
    os << static_cast<float *>(tensor.data.data())[i] << " ";
  }
  os << '\n';
  return os.str();
}

L
luotao1 已提交
125
void PrintTime(int batch_size, int repeat, int num_threads, int tid,
L
luotao1 已提交
126
               double latency, int epoch = 1) {
127
  LOG(INFO) << "====== batch_size: " << batch_size << ", repeat: " << repeat
L
luotao1 已提交
128
            << ", threads: " << num_threads << ", thread id: " << tid
129
            << ", latency: " << latency << "ms ======";
L
luotao1 已提交
130 131 132 133 134 135
  if (epoch > 1) {
    int samples = batch_size * epoch;
    LOG(INFO) << "====== sample number: " << samples
              << ", average latency of each sample: " << latency / samples
              << "ms ======";
  }
L
luotao1 已提交
136 137
}

138 139
}  // namespace inference
}  // namespace paddle