#pragma once #include "../test_include.h" #include "operators/batchnorm_op.h" namespace paddle_mobile { namespace framework { template class TestBatchNormOp { public: explicit TestBatchNormOp(const Program p) : program_(p) { if (use_optimize_) { to_predict_program_ = program_.optimizeProgram; } else { to_predict_program_ = program_.originProgram; } const std::vector> blocks = to_predict_program_->Blocks(); // DLOG << " **block size " << blocks.size(); for (int i = 0; i < blocks.size(); ++i) { std::shared_ptr block_desc = blocks[i]; std::vector> ops = block_desc->Ops(); // DLOG << " ops " << ops.size(); for (int j = 0; j < ops.size(); ++j) { std::shared_ptr op = ops[j]; if (op->Type() == "batch_norm" && op->Input("X")[0] == "conv2d_0.tmp_0") { DLOG << " mul attr size: " << op->GetAttrMap().size(); DLOG << " inputs size: " << op->GetInputs().size(); DLOG << " outputs size: " << op->GetOutputs().size(); DLOG << " Input X is : " << op->Input("X")[0]; DLOG << " Input Mean is : " << op->Input("Mean")[0]; DLOG << " Input Variance is : " << op->Input("Variance")[0]; DLOG << " Input Scale is : " << op->Input("Scale")[0]; DLOG << " Input Bias is : " << op->Input("Bias")[0]; DLOG << " Output Y is : " << op->Output("Y")[0]; DLOG << " epsilon : " << op->GetAttrMap().at("epsilon").Get(); std::shared_ptr> lrn = std::make_shared>( op->Type(), op->GetInputs(), op->GetOutputs(), op->GetAttrMap(), program_.scope); ops_of_block_[*block_desc.get()].push_back(lrn); } } } } std::shared_ptr predict_bn(Tensor &t1, Tensor &t2, Tensor &t3, Tensor &t4, Tensor &t5) { // feed auto scope = program_.scope; Variable *x1_feed_value = scope->Var("conv2d_0.tmp_0"); auto tensor_x1 = x1_feed_value->GetMutable(); tensor_x1->ShareDataWith(t1); Variable *mean_feed_value = scope->Var("batch_norm_0.w_1"); auto tensor_mean = mean_feed_value->GetMutable(); tensor_mean->ShareDataWith(t2); Variable *scale_feed_value = scope->Var("batch_norm_0.w_0"); auto tensor_scale = scale_feed_value->GetMutable(); tensor_scale->ShareDataWith(t3); Variable *variance_feed_value = scope->Var("batch_norm_0.w_2"); auto tensor_variance = variance_feed_value->GetMutable(); tensor_variance->ShareDataWith(t4); Variable *bias_feed_value = scope->Var("batch_norm_0.b_0"); auto tensor_bias = bias_feed_value->GetMutable(); tensor_bias->ShareDataWith(t5); Variable *output = scope->Var("batch_norm_0.tmp_2"); auto *output_tensor = output->GetMutable(); output_tensor->mutable_data({4, 10, 2, 2}); // DLOG << typeid(output_tensor).name(); // DLOG << "output_tensor dims: " << output_tensor->dims(); std::shared_ptr out_tensor = std::make_shared(); out_tensor.reset(output_tensor); predict_bn(t1, t2, t3, t4, t5, 0); return out_tensor; } private: const framework::Program program_; std::shared_ptr to_predict_program_; std::map>>> ops_of_block_; bool use_optimize_ = false; void predict_bn(const Tensor &t1, const Tensor &t2, const Tensor &t3, const Tensor &t4, const Tensor &t5, int block_id) { std::shared_ptr to_predict_block = to_predict_program_->Block(block_id); for (int j = 0; j < ops_of_block_[*to_predict_block.get()].size(); ++j) { auto op = ops_of_block_[*to_predict_block.get()][j]; DLOG << "op -> run()"; op->Run(); } } }; template class TestBatchNormOp; } // namespace framework } // namespace paddle_mobile int main() { DLOG << "----------**********----------"; DLOG << "begin to run BatchNormOp Test"; paddle_mobile::Loader loader; auto program = loader.Load(std::string( "../../test/models/image_classification_resnet.inference.model")); /// input x (4,10,2,2) paddle_mobile::framework::Tensor inputx1; SetupTensor(&inputx1, {4, 10, 2, 2}, static_cast(0), static_cast(1)); auto *inputx1_ptr = inputx1.data(); paddle_mobile::framework::Tensor mean; SetupTensor(&mean, {10}, static_cast(0), static_cast(1)); auto *mean_ptr = mean.data(); paddle_mobile::framework::Tensor scale; SetupTensor(&scale, {10}, static_cast(0), static_cast(1)); auto *scale_ptr = scale.data(); paddle_mobile::framework::Tensor variance; SetupTensor(&variance, {10}, static_cast(0), static_cast(1)); auto *variance_ptr = variance.data(); paddle_mobile::framework::Tensor bias; SetupTensor(&bias, {10}, static_cast(0), static_cast(1)); auto *bias_ptr = bias.data(); paddle_mobile::framework::TestBatchNormOp testBatchNormOp( program); auto output_bn = testBatchNormOp.predict_bn(inputx1, mean, scale, variance, bias); auto *output_bn_ptr = output_bn->data(); /// [2, 5, 1, 0] DLOG << " (" << inputx1_ptr[102] << " - " << mean_ptr[5] << ")/((" << variance_ptr[5] << " + 0.00001" << ")^0.5)* " << scale_ptr[5] << " + " << bias_ptr[5] << " = "; DLOG << output_bn_ptr[102]; return 0; }