debugger.hpp 3.9 KB
Newer Older
C
chonwhite 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include <string>
#include <unordered_map>
19 20 21 22 23 24 25

// #include "lite/backends/fpga/lite_tensor.h"
#include "lite/core/tensor.h"

namespace paddle {
namespace lite {

C
chonwhite 已提交
26 27
#define FPGA_PRINT_TENSOR

28 29 30 31 32 33 34
class Debugger {
 public:
  static Debugger& get_instance() {
    static Debugger s_instance;
    return s_instance;
  }

C
chonwhite 已提交
35
  void registerOutput(std::string op_type, zynqmp::Tensor* tensor) {
36
    // tensor->printScale();
C
chonwhite 已提交
37 38 39 40 41
    // if (op_type != "conv") {
    //   // tensor->saveToFile(op_type, true);
    // }
    if (op_config[op_type]) {
      tensor->saveToFile(op_type, true);
C
chonwhite 已提交
42
    }
43 44 45 46 47 48
  }

 private:
  std::unordered_map<std::string, bool> op_config;
  Debugger() {
    op_config["concat"] = true;
C
chonwhite 已提交
49
    op_config["conv"] = true;
50
    op_config["crop"] = true;
C
chonwhite 已提交
51 52 53
    op_config["fetch"] = true;
    op_config["fc"] = true;
    op_config["softmax"] = true;
54 55 56 57 58 59 60 61 62
  }
};

inline void chw_to_hwc(Tensor* t, float* dst) {
  int num = t->dims()[0];
  int channel = t->dims()[1];

  int height = 1;
  int width = 1;
C
chonwhite 已提交
63 64
  if (t->dims().size() > 2) {
    height = t->dims()[2];
65
  }
C
chonwhite 已提交
66 67
  if (t->dims().size() > 3) {
    width = t->dims()[3];
68 69 70 71
  }
  // int width = t->dims()[3];
  const float* chw_data = t->data<float>();
  float* hwc_data = dst;
C
chonwhite 已提交
72

73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
  int chw = channel * height * width;
  int wc = width * channel;
  int index = 0;
  for (int n = 0; n < num; n++) {
    for (int c = 0; c < channel; c++) {
      for (int h = 0; h < height; h++) {
        for (int w = 0; w < width; w++) {
          hwc_data[n * chw + h * wc + w * channel + c] = chw_data[index];
          index++;
        }
      }
    }
  }
}

C
chonwhite 已提交
88
inline void read_from_file(lite::Tensor* t, const std::string& path) {
89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
  std::ifstream file_stream;
  file_stream.open(path);
  if (!file_stream) {
    return;
  }
  float* data = t->mutable_data<float>();
  int num = t->numel();
  for (int i = 0; i < num; ++i) {
    float value = 0;
    file_stream >> value;
    data[i] = value;
  }
  // flush();
}

inline void save_float(float* data, const std::string& name, int len) {
C
chonwhite 已提交
105
  // return;
106 107
  static int counter = 0;
  std::string old_string = std::to_string(counter);
C
chonwhite 已提交
108 109
  std::string new_string =
      std::string(3 - old_string.length(), '0') + old_string;
110 111 112 113

  std::string file = "arm_" + new_string + name;
  counter++;

C
chonwhite 已提交
114 115 116
  std::cout
      << "-------------------------- saving file: --------------------------"
      << file << std::endl;
117 118 119 120 121 122 123 124 125 126
  std::ofstream ofs;
  ofs.open(file);
  // float* data = dst;
  for (int i = 0; i < len; i++) {
    float value = data[i];
    ofs << value << std::endl;
  }
  ofs.close();
}

C
chonwhite 已提交
127 128 129
inline void save_tensor(lite::Tensor* t,
                        const std::string& name,
                        bool convert = true) {
130
  float* data = const_cast<float*>(t->data<float>());
C
chonwhite 已提交
131
  float* dst = new float[t->numel()];
132 133 134 135 136 137 138 139 140
  if (convert) {
    chw_to_hwc(t, dst);
    data = dst;
  }

  save_float(data, name, t->numel());
  delete[] dst;
}

C
chonwhite 已提交
141 142 143
inline void save_tensor(const lite::Tensor* t,
                        const std::string& name,
                        bool convert = true) {
144 145 146 147 148 149 150 151 152 153 154 155
  // return;
  float* data = const_cast<float*>(t->data<float>());
  float* dst = new float[t->numel()];
  if (convert) {
    chw_to_hwc(const_cast<lite::Tensor*>(t), dst);
    data = dst;
  }

  save_float(data, name, t->numel());

  delete[] dst;
}
C
chonwhite 已提交
156 157
}  // namespace lite
}  // namespace paddle