test_batchnorm_op.cpp 6.5 KB
Newer Older
E
eclipsess 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

E
eclipsess 已提交
15
#pragma once
L
liuruilong 已提交
16 17

#include "../test_helper.h"
E
eclipsess 已提交
18 19 20 21 22 23
#include "../test_include.h"
#include "operators/batchnorm_op.h"

namespace paddle_mobile {
namespace framework {

24 25 26
template <typename Dtype>
class TestBatchNormOp {
 public:
27 28 29 30 31
  explicit TestBatchNormOp(const Program<Dtype> p) : program_(p) {
    if (use_optimize_) {
      to_predict_program_ = program_.optimizeProgram;
    } else {
      to_predict_program_ = program_.originProgram;
E
eclipsess 已提交
32 33
    }

34 35 36 37 38 39 40 41 42 43
    const std::vector<std::shared_ptr<BlockDesc>> blocks =
        to_predict_program_->Blocks();
    //  DLOG << " **block size " << blocks.size();
    for (int i = 0; i < blocks.size(); ++i) {
      std::shared_ptr<BlockDesc> block_desc = blocks[i];
      std::vector<std::shared_ptr<OpDesc>> ops = block_desc->Ops();
      //    DLOG << " ops " << ops.size();
      for (int j = 0; j < ops.size(); ++j) {
        std::shared_ptr<OpDesc> op = ops[j];
        if (op->Type() == "batch_norm" &&
L
liuruilong 已提交
44
            op->Input("X")[0] == "conv2d_5.tmp_0") {
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
          DLOG << " mul attr size: " << op->GetAttrMap().size();
          DLOG << " inputs size: " << op->GetInputs().size();
          DLOG << " outputs size: " << op->GetOutputs().size();
          DLOG << " Input X is : " << op->Input("X")[0];
          DLOG << " Input Mean is : " << op->Input("Mean")[0];
          DLOG << " Input Variance is : " << op->Input("Variance")[0];
          DLOG << " Input Scale is : " << op->Input("Scale")[0];
          DLOG << " Input Bias is : " << op->Input("Bias")[0];
          DLOG << " Output Y is : " << op->Output("Y")[0];
          DLOG << " epsilon : " << op->GetAttrMap().at("epsilon").Get<float>();
          std::shared_ptr<operators::BatchNormOp<Dtype, float>> lrn =
              std::make_shared<operators::BatchNormOp<Dtype, float>>(
                  op->Type(), op->GetInputs(), op->GetOutputs(),
                  op->GetAttrMap(), program_.scope);
          ops_of_block_[*block_desc.get()].push_back(lrn);
E
eclipsess 已提交
60
        }
61 62 63 64
      }
    }
  }

65 66 67
  std::shared_ptr<Tensor> predict_bn(const Tensor &t1, const Tensor &t2,
                                     const Tensor &t3, const Tensor &t4,
                                     const Tensor &t5) {
68 69
    // feed
    auto scope = program_.scope;
L
liuruilong 已提交
70
    Variable *x1_feed_value = scope->Var("conv2d_5.tmp_0");
E
eclipsess 已提交
71
    auto tensor_x1 = x1_feed_value->GetMutable<LoDTensor>();
72 73
    tensor_x1->ShareDataWith(t1);

L
liuruilong 已提交
74
    Variable *mean_feed_value = scope->Var("batch_norm_10.w_1");
E
eclipsess 已提交
75
    auto tensor_mean = mean_feed_value->GetMutable<LoDTensor>();
76 77
    tensor_mean->ShareDataWith(t2);

L
liuruilong 已提交
78
    Variable *scale_feed_value = scope->Var("batch_norm_10.w_0");
E
eclipsess 已提交
79
    auto tensor_scale = scale_feed_value->GetMutable<LoDTensor>();
80 81
    tensor_scale->ShareDataWith(t3);

L
liuruilong 已提交
82
    Variable *variance_feed_value = scope->Var("batch_norm_10.w_2");
E
eclipsess 已提交
83
    auto tensor_variance = variance_feed_value->GetMutable<LoDTensor>();
84 85
    tensor_variance->ShareDataWith(t4);

L
liuruilong 已提交
86
    Variable *bias_feed_value = scope->Var("batch_norm_10.b_0");
E
eclipsess 已提交
87
    auto tensor_bias = bias_feed_value->GetMutable<LoDTensor>();
88 89
    tensor_bias->ShareDataWith(t5);

L
liuruilong 已提交
90
    Variable *output = scope->Var("batch_norm_10.tmp_2");
E
eclipsess 已提交
91
    auto *output_tensor = output->GetMutable<LoDTensor>();
L
liuruilong 已提交
92
    output_tensor->mutable_data<float>({1, 256, 38, 38});
93 94 95 96 97 98 99 100 101 102
    //  DLOG << typeid(output_tensor).name();
    //  DLOG << "output_tensor dims: " << output_tensor->dims();

    std::shared_ptr<Tensor> out_tensor = std::make_shared<LoDTensor>();
    out_tensor.reset(output_tensor);

    predict_bn(t1, t2, t3, t4, t5, 0);
    return out_tensor;
  }

103
 private:
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
  const framework::Program<Dtype> program_;
  std::shared_ptr<ProgramDesc> to_predict_program_;
  std::map<framework::BlockDesc,
           std::vector<std::shared_ptr<OperatorBase<Dtype>>>>
      ops_of_block_;
  bool use_optimize_ = false;

  void predict_bn(const Tensor &t1, const Tensor &t2, const Tensor &t3,
                  const Tensor &t4, const Tensor &t5, int block_id) {
    std::shared_ptr<BlockDesc> to_predict_block =
        to_predict_program_->Block(block_id);
    for (int j = 0; j < ops_of_block_[*to_predict_block.get()].size(); ++j) {
      auto op = ops_of_block_[*to_predict_block.get()][j];
      DLOG << "op -> run()";
      op->Run();
E
eclipsess 已提交
119
    }
120
  }
E
eclipsess 已提交
121 122 123
};

template class TestBatchNormOp<CPU>;
124 125
}  // namespace framework
}  // namespace paddle_mobile
E
eclipsess 已提交
126 127

int main() {
128 129 130
  DLOG << "----------**********----------";
  DLOG << "begin to run BatchNormOp Test";
  paddle_mobile::Loader<paddle_mobile::CPU> loader;
L
liuruilong 已提交
131
  auto program = loader.Load(std::string(g_mobilenet_ssd));
132 133 134

  /// input x (4,10,2,2)
  paddle_mobile::framework::Tensor inputx1;
L
liuruilong 已提交
135
  SetupTensor<float>(&inputx1, {1, 256, 38, 38}, static_cast<float>(0),
136 137 138 139
                     static_cast<float>(1));
  auto *inputx1_ptr = inputx1.data<float>();

  paddle_mobile::framework::Tensor mean;
L
liuruilong 已提交
140
  SetupTensor<float>(&mean, {256}, static_cast<float>(0), static_cast<float>(1));
141 142 143
  auto *mean_ptr = mean.data<float>();

  paddle_mobile::framework::Tensor scale;
L
liuruilong 已提交
144
  SetupTensor<float>(&scale, {256}, static_cast<float>(0),
145 146 147 148
                     static_cast<float>(1));
  auto *scale_ptr = scale.data<float>();

  paddle_mobile::framework::Tensor variance;
L
liuruilong 已提交
149
  SetupTensor<float>(&variance, {256}, static_cast<float>(0),
150 151 152 153
                     static_cast<float>(1));
  auto *variance_ptr = variance.data<float>();

  paddle_mobile::framework::Tensor bias;
L
liuruilong 已提交
154
  SetupTensor<float>(&bias, {256}, static_cast<float>(0), static_cast<float>(1));
155 156 157 158 159 160 161 162 163
  auto *bias_ptr = bias.data<float>();

  paddle_mobile::framework::TestBatchNormOp<paddle_mobile::CPU> testBatchNormOp(
      program);

  auto output_bn =
      testBatchNormOp.predict_bn(inputx1, mean, scale, variance, bias);
  auto *output_bn_ptr = output_bn->data<float>();

L
liuruilong 已提交
164 165 166 167 168 169 170
  DLOG << " (" << inputx1_ptr[0] << " - " << mean_ptr[0] << ")/(("
       << variance_ptr[0] << " + 0.00001"
       << ")^0.5)* " << scale_ptr[0] << " + " << bias_ptr[0] << " = ";
  DLOG << output_bn_ptr[0];

  DLOG << "input_ptr 0 : " << inputx1_ptr[0];
  DLOG << "output_ptr 0 : " << output_bn_ptr[0];
171 172

  return 0;
E
eclipsess 已提交
173
}