#pragma once #include "../test_include.h" #include "operators/lrn_op.h" namespace paddle_mobile { namespace framework { template class TestLrnOp { public: explicit TestLrnOp(const Program p) : program_(p) { if (use_optimize_) { to_predict_program_ = program_.optimizeProgram; } else { to_predict_program_ = program_.originProgram; } const std::vector> blocks = to_predict_program_->Blocks(); // DLOG << " **block size " << blocks.size(); for (int i = 0; i < blocks.size(); ++i) { std::shared_ptr block_desc = blocks[i]; std::vector> ops = block_desc->Ops(); // DLOG << " ops " << ops.size(); for (int j = 0; j < ops.size(); ++j) { std::shared_ptr op = ops[j]; if (op->Type() == "lrn" && op->Input("X")[0] == "pool2d_0.tmp_0") { DLOG << " mul attr size: " << op->GetAttrMap().size(); DLOG << " inputs size: " << op->GetInputs().size(); DLOG << " outputs size: " << op->GetOutputs().size(); DLOG << " Input X is : " << op->Input("X")[0]; DLOG << " Output Out is : " << op->Output("Out")[0]; DLOG << " n : " << op->GetAttrMap().at("n").Get(); DLOG << " alpha : " << op->GetAttrMap().at("alpha").Get(); DLOG << " beta : " << op->GetAttrMap().at("beta").Get(); DLOG << " k : " << op->GetAttrMap().at("k").Get(); std::shared_ptr> lrn = std::make_shared>( op->Type(), op->GetInputs(), op->GetOutputs(), op->GetAttrMap(), program_.scope); ops_of_block_[*block_desc.get()].push_back(lrn); } } } } std::shared_ptr predict_lrn(Tensor &t1) { // feed auto scope = program_.scope; Variable *x1_feed_value = scope->Var("pool2d_0.tmp_0"); auto tensor_x1 = x1_feed_value->GetMutable(); tensor_x1->ShareDataWith(t1); Variable *con_output = scope->Var("pool1_norm1.tmp_1"); auto *output_tensor = con_output->GetMutable(); output_tensor->mutable_data({3, 4, 2, 2}); // DLOG << typeid(output_tensor).name(); // DLOG << "output_tensor dims: " << output_tensor->dims(); std::shared_ptr out_tensor = std::make_shared(); out_tensor.reset(output_tensor); predict_lrn(t1, 0); return out_tensor; } private: const framework::Program program_; std::shared_ptr to_predict_program_; std::map>>> ops_of_block_; bool use_optimize_ = false; void predict_lrn(const Tensor &t1, int block_id) { std::shared_ptr to_predict_block = to_predict_program_->Block(block_id); for (int j = 0; j < ops_of_block_[*to_predict_block.get()].size(); ++j) { auto op = ops_of_block_[*to_predict_block.get()][j]; DLOG << "op -> run()"; op->Run(); } } }; template class TestLrnOp; } // namespace framework } // namespace paddle_mobile int main() { DLOG << "----------**********----------"; DLOG << "begin to run LrnOp Test"; paddle_mobile::Loader loader; auto program = loader.Load(std::string("../../test/models/googlenet")); /// input x (3,4,2,2) paddle_mobile::framework::Tensor inputx1; SetupTensor(&inputx1, {3, 4, 2, 2}, static_cast(0), static_cast(1)); auto *inputx1_ptr = inputx1.data(); paddle_mobile::framework::TestLrnOp testLrnOp(program); auto output_lrn = testLrnOp.predict_lrn(inputx1); auto *output_lrn_ptr = output_lrn->data(); DLOG << " LrnOp input: "; for (int i = 0; i < 3; i++) { for (int j = 0; j < 4; j++) { for (int c = 0; c < 2; c++) { for (int d = 0; d < 2; d++) { DLOGF("%f ", inputx1_ptr[i * 16 + j * 4 + c * 2 + d]); } DLOGF("\n"); } DLOGF("\n"); } DLOGF("\n"); } DLOG << " LrnOp output: "; for (int i = 0; i < 3; i++) { for (int j = 0; j < 4; j++) { for (int c = 0; c < 2; c++) { for (int d = 0; d < 2; d++) { DLOGF("%f ", output_lrn_ptr[i * 16 + j * 4 + c * 2 + d]); } DLOGF("\n"); } DLOGF("\n"); } DLOGF("\n"); } DLOG << inputx1_ptr[0] << " / ((1 + 0.00002 * ( " << inputx1_ptr[0] << "^2 + " << inputx1_ptr[4] << "^2 + " << inputx1_ptr[8] << "^2 ))^0.75) = "; DLOG << output_lrn_ptr[0]; return 0; }