test_lrn_op.cpp 5.2 KB
Newer Older
E
eclipsess 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

E
eclipsess 已提交
15 16 17 18 19 20 21 22
#pragma once
#include "../test_include.h"
#include "operators/lrn_op.h"

namespace paddle_mobile {
namespace framework {

template <typename Dtype> class TestLrnOp {
23 24 25 26 27 28
public:
  explicit TestLrnOp(const Program<Dtype> p) : program_(p) {
    if (use_optimize_) {
      to_predict_program_ = program_.optimizeProgram;
    } else {
      to_predict_program_ = program_.originProgram;
E
eclipsess 已提交
29 30
    }

31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
    const std::vector<std::shared_ptr<BlockDesc>> blocks =
        to_predict_program_->Blocks();
    //  DLOG << " **block size " << blocks.size();
    for (int i = 0; i < blocks.size(); ++i) {
      std::shared_ptr<BlockDesc> block_desc = blocks[i];
      std::vector<std::shared_ptr<OpDesc>> ops = block_desc->Ops();
      //    DLOG << " ops " << ops.size();
      for (int j = 0; j < ops.size(); ++j) {
        std::shared_ptr<OpDesc> op = ops[j];
        if (op->Type() == "lrn" && op->Input("X")[0] == "pool2d_0.tmp_0") {
          DLOG << " mul attr size: " << op->GetAttrMap().size();
          DLOG << " inputs size: " << op->GetInputs().size();
          DLOG << " outputs size: " << op->GetOutputs().size();
          DLOG << " Input X is : " << op->Input("X")[0];
          DLOG << " Output Out is : " << op->Output("Out")[0];
          DLOG << " n : " << op->GetAttrMap().at("n").Get<int>();
          DLOG << " alpha : " << op->GetAttrMap().at("alpha").Get<float>();
          DLOG << " beta : " << op->GetAttrMap().at("beta").Get<float>();
          DLOG << " k : " << op->GetAttrMap().at("k").Get<float>();
          std::shared_ptr<operators::LrnOp<Dtype, float>> lrn =
              std::make_shared<operators::LrnOp<Dtype, float>>(
                  op->Type(), op->GetInputs(), op->GetOutputs(),
                  op->GetAttrMap(), program_.scope);
          ops_of_block_[*block_desc.get()].push_back(lrn);
E
eclipsess 已提交
55
        }
56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
      }
    }
  }

  std::shared_ptr<Tensor> predict_lrn(Tensor &t1) {
    // feed
    auto scope = program_.scope;
    Variable *x1_feed_value = scope->Var("pool2d_0.tmp_0");
    auto tensor_x1 = x1_feed_value->GetMutable<Tensor>();
    tensor_x1->ShareDataWith(t1);

    Variable *con_output = scope->Var("pool1_norm1.tmp_1");
    auto *output_tensor = con_output->GetMutable<Tensor>();
    output_tensor->mutable_data<float>({3, 4, 2, 2});
    //  DLOG << typeid(output_tensor).name();
    //  DLOG << "output_tensor dims: " << output_tensor->dims();

    std::shared_ptr<Tensor> out_tensor = std::make_shared<LoDTensor>();
    out_tensor.reset(output_tensor);

    predict_lrn(t1, 0);
    return out_tensor;
  }

private:
  const framework::Program<Dtype> program_;
  std::shared_ptr<ProgramDesc> to_predict_program_;
  std::map<framework::BlockDesc,
           std::vector<std::shared_ptr<OperatorBase<Dtype>>>>
      ops_of_block_;
  bool use_optimize_ = false;

  void predict_lrn(const Tensor &t1, int block_id) {
    std::shared_ptr<BlockDesc> to_predict_block =
        to_predict_program_->Block(block_id);
    for (int j = 0; j < ops_of_block_[*to_predict_block.get()].size(); ++j) {
      auto op = ops_of_block_[*to_predict_block.get()][j];
      DLOG << "op -> run()";
      op->Run();
E
eclipsess 已提交
95
    }
96
  }
E
eclipsess 已提交
97 98 99 100 101 102 103
};

template class TestLrnOp<CPU>;
} // namespace framework
} // namespace paddle_mobile

int main() {
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
  DLOG << "----------**********----------";
  DLOG << "begin to run LrnOp Test";
  paddle_mobile::Loader<paddle_mobile::CPU> loader;
  auto program = loader.Load(std::string("../../test/models/googlenet"));

  /// input x (3,4,2,2)
  paddle_mobile::framework::Tensor inputx1;
  SetupTensor<float>(&inputx1, {3, 4, 2, 2}, static_cast<float>(0),
                     static_cast<float>(1));
  auto *inputx1_ptr = inputx1.data<float>();

  paddle_mobile::framework::TestLrnOp<paddle_mobile::CPU> testLrnOp(program);

  auto output_lrn = testLrnOp.predict_lrn(inputx1);
  auto *output_lrn_ptr = output_lrn->data<float>();

  DLOG << " LrnOp input: ";
  for (int i = 0; i < 3; i++) {
    for (int j = 0; j < 4; j++) {
      for (int c = 0; c < 2; c++) {
        for (int d = 0; d < 2; d++) {
          DLOGF("%f ", inputx1_ptr[i * 16 + j * 4 + c * 2 + d]);
E
eclipsess 已提交
126 127
        }
        DLOGF("\n");
128 129
      }
      DLOGF("\n");
E
eclipsess 已提交
130
    }
131 132 133 134 135 136 137 138
    DLOGF("\n");
  }
  DLOG << " LrnOp output: ";
  for (int i = 0; i < 3; i++) {
    for (int j = 0; j < 4; j++) {
      for (int c = 0; c < 2; c++) {
        for (int d = 0; d < 2; d++) {
          DLOGF("%f ", output_lrn_ptr[i * 16 + j * 4 + c * 2 + d]);
E
eclipsess 已提交
139 140
        }
        DLOGF("\n");
141 142
      }
      DLOGF("\n");
E
eclipsess 已提交
143
    }
144 145 146 147 148 149
    DLOGF("\n");
  }
  DLOG << inputx1_ptr[0] << " / ((1 + 0.00002 * ( " << inputx1_ptr[0] << "^2 + "
       << inputx1_ptr[4] << "^2 + " << inputx1_ptr[8] << "^2 ))^0.75) = ";
  DLOG << output_lrn_ptr[0];
  return 0;
E
eclipsess 已提交
150
}