MKLDNNTester.h 4.3 KB
Newer Older
1
/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved.
T
tensor-tang 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include <string>
#include <vector>
#include "LayerGradUtil.h"
X
Xin Pan 已提交
20 21
#include "paddle/legacy/gserver/layers/MKLDNNBase.h"
#include "paddle/legacy/gserver/layers/MKLDNNLayer.h"
T
tensor-tang 已提交
22 23 24 25

namespace paddle {

/**
T
tensor-tang 已提交
26
 * @brief test the functionality of MKLDNNlayers and MKLDNNActivations
T
tensor-tang 已提交
27 28
 * refer to paddle original function
 */
29
class MKLDNNTester {
T
tensor-tang 已提交
30
  enum {
T
tensor-tang 已提交
31 32 33
    DNN = 0,  // MKLDNN layer
    REF = 1,  // Reference layer
    NUM = 2,  // Number of total
T
tensor-tang 已提交
34 35
  };

36 37 38 39 40 41 42 43 44 45 46
  struct DataIn {
    std::vector<std::vector<Argument>> inArgs;
    std::vector<std::vector<MatrixPtr>> outGrads;
    std::vector<VectorPtr> paraValues;
  };

  struct DataOut {
    std::vector<MatrixPtr> outValues;
    std::vector<VectorPtr> paraValues;
  };

W
Wu Yi 已提交
47
 protected:
T
tensor-tang 已提交
48 49 50 51 52 53 54
  std::vector<TestConfig> configs_;
  vector<string> layerNames_;
  vector<vector<DataLayerPtr>> dataLayers_;
  vector<vector<Argument>> datas_;
  vector<LayerMap> layerMaps_;
  vector<vector<ParameterPtr>> parameters_;
  vector<LayerPtr> testLayers_;
55
  LayerPtr refLayer_, dnnLayer_;
T
tensor-tang 已提交
56 57 58 59 60 61 62 63 64

  /// run some iterations, all the result should pass
  size_t iter_;
  /// whether to print out the details
  bool log_;
  /// epsilon
  float eps_;
  /// input image size, default 1
  size_t ih_, iw_;
65 66
  /// passType, PASS_TRAIN, PASS_TEST or PASS_GC (Gradient Check pass)
  PassType passType_;
T
tensor-tang 已提交
67

W
Wu Yi 已提交
68
 public:
69
  explicit MKLDNNTester(size_t iter = 3, float epsilon = 1e-4) {
T
tensor-tang 已提交
70 71 72
    iter_ = iter;
    eps_ = epsilon;
    log_ = false;
73
    passType_ = PASS_TRAIN;
T
tensor-tang 已提交
74 75
  }

76
  ~MKLDNNTester() {}
T
tensor-tang 已提交
77

W
Wu Yi 已提交
78
 public:
T
tensor-tang 已提交
79 80 81 82 83
  void run(const TestConfig& dnn,
           const TestConfig& ref,
           size_t batchSize,
           size_t inputImgH = 1,
           size_t inputImgW = 1,
84
           PassType passType = PASS_TRAIN,
85
           bool printDetails = false,
T
tensor-tang 已提交
86
           size_t iter = 3,
87
           float epsilon = 1e-4);
88 89 90
  static void runNetTest(const std::string& configPath,
                         size_t iter = 2,
                         float eps = 1e-4);
91 92
  static void initArgument(DataIn& data,
                           const std::string& configPath,
93
                           size_t iter = 2);
94 95 96 97
  static void getOutResult(const std::string& configPath,
                           DataIn& in,
                           DataOut& out,
                           bool use_mkldnn,
98
                           size_t iter = 2);
T
tensor-tang 已提交
99

W
Wu Yi 已提交
100
 private:
T
tensor-tang 已提交
101 102 103 104 105 106 107 108 109 110 111 112
  void reset(const TestConfig& dnn, const TestConfig& ref, size_t batchSize);
  void setInputImgSize();
  void runOnce();

  void randomWgtDatas();
  void randomBotDatas();
  void randomTopDiffs();

  void checkForward();
  void checkBackwardData();
  void checkBackwardWgts();

113 114 115 116
  // clear specific layer, clear all when id equals NUM
  void clearWgtDiffs(size_t id = NUM);
  void clearBotDiffs(size_t id = NUM);
  void clearTopDatas(size_t id = NUM);
T
tensor-tang 已提交
117 118 119 120 121 122 123 124

  void printTopDatas();
  void printMatrix(const MatrixPtr& m);
  void printVector(const VectorPtr& v);

  void saveWgt(const vector<ParameterPtr>& from, vector<VectorPtr>& to);
  void restoreWgt(const vector<VectorPtr>& from, vector<ParameterPtr>& to);

125 126 127
  static double compareMatrix(const MatrixPtr& m1, const MatrixPtr& m2);
  static double compareVector(const VectorPtr& v1, const VectorPtr& v2);
  static void compareResult(DataOut& ref, DataOut& dnn, float eps = 1e-4);
T
tensor-tang 已提交
128 129 130

  /**
   * Get delta percent
131 132 133
   * if many(>failRate) wrong(abs(val-ref)/abs(ref) > thres) points
   * return the max(diff/ref)
   * else return sum(abs(diff)) / sum(abs(ref))
T
tensor-tang 已提交
134
   * The return value should be smaller than eps when passing.
T
tensor-tang 已提交
135
   */
136 137
  static double getDelta(const real* refer,
                         const real* value,
138 139 140
                         size_t len,
                         const float failRate = 1e-3,
                         const float thres = 0.1);
T
tensor-tang 已提交
141 142 143
};

}  //  namespace paddle