From 3a6201af8d8116119bfe9bdc931d44320103e7c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E6=98=8E=E5=86=AC?= <78149749+winter-wang@users.noreply.github.com> Date: Sat, 26 Mar 2022 12:20:45 +0800 Subject: [PATCH] [infrt] add resnet50 unit test. test=develop (#40950) --- paddle/infrt/host_context/paddle_mlir.cc | 74 ++- paddle/infrt/host_context/paddle_mlir.h | 4 +- .../phi/infershaped/phi_kernel_launcher.h | 3 +- paddle/infrt/tests/CMakeLists.txt | 3 +- .../infrt/tests/dialect/phi/resnet50.mlir.in | 458 ++++++++++++++++++ .../tests/{model => models}/abs_model.py | 0 .../infrt/tests/{model => models}/linear.py | 0 paddle/infrt/tests/models/resnet50_model.py | 25 + .../infrt/tests/{model => models}/test_abs.cc | 0 paddle/scripts/infrt_build.sh | 6 +- 10 files changed, 548 insertions(+), 25 deletions(-) create mode 100644 paddle/infrt/tests/dialect/phi/resnet50.mlir.in rename paddle/infrt/tests/{model => models}/abs_model.py (100%) rename paddle/infrt/tests/{model => models}/linear.py (100%) create mode 100644 paddle/infrt/tests/models/resnet50_model.py rename paddle/infrt/tests/{model => models}/test_abs.cc (100%) diff --git a/paddle/infrt/host_context/paddle_mlir.cc b/paddle/infrt/host_context/paddle_mlir.cc index ec12815e3ce..8b7bbe13260 100644 --- a/paddle/infrt/host_context/paddle_mlir.cc +++ b/paddle/infrt/host_context/paddle_mlir.cc @@ -13,6 +13,9 @@ // limitations under the License. #include "paddle/infrt/host_context/paddle_mlir.h" + +#include + #include "paddle/infrt/dialect/infrt/ir/basic_kernels.h" #include "paddle/infrt/dialect/infrt/ir/infrt_dialect.h" #include "paddle/infrt/dialect/pd/common/pd_ops_info.h" @@ -95,13 +98,13 @@ llvm::SmallVector MLIRModelGenImpl::GetModelInputsType( std::vector dims = RepeatedToVector( var_desc.type().lod_tensor().tensor().dims()); infrt::PrecisionType precision_; - ConvertDataTypeToPhi( + ConvertDataTypeToInfrt( var_desc.type().lod_tensor().tensor().data_type(), &precision_); mlir::Type type_ = infrt::DenseTensorType::get(context_, infrt::TargetType::CPU, precision_, - infrt::LayoutType::ANY); + infrt::LayoutType::NCHW); operandTypes.push_back(type_); } @@ -125,13 +128,13 @@ llvm::SmallVector MLIRModelGenImpl::GetModelOutputsType( std::vector dims = RepeatedToVector( var_desc.type().lod_tensor().tensor().dims()); infrt::PrecisionType precision_; - ConvertDataTypeToPhi( + ConvertDataTypeToInfrt( var_desc.type().lod_tensor().tensor().data_type(), &precision_); mlir::Type type_ = infrt::DenseTensorType::get(context_, infrt::TargetType::CPU, precision_, - infrt::LayoutType::ANY); + infrt::LayoutType::NCHW); resultTypes.push_back(type_); } } @@ -179,10 +182,12 @@ void MLIRModelGenImpl::UpdateModelParams( std::vector dims = RepeatedToVector( var_desc.type().lod_tensor().tensor().dims()); infrt::PrecisionType precision_; - ConvertDataTypeToPhi(var_desc.type().lod_tensor().tensor().data_type(), - &precision_); - mlir::Type type_ = infrt::DenseTensorType::get( - context_, infrt::TargetType::CPU, precision_, infrt::LayoutType::ANY); + ConvertDataTypeToInfrt(var_desc.type().lod_tensor().tensor().data_type(), + &precision_); + mlir::Type type_ = infrt::DenseTensorType::get(context_, + infrt::TargetType::CPU, + precision_, + infrt::LayoutType::NCHW); auto op = builder_.create<::infrt::phi::TensorMapGetTensorOp>( mlir::UnknownLoc::get(context_), type_, map, name); params_map_.insert(std::pair( @@ -222,12 +227,42 @@ void MLIRModelGenImpl::UpdateModelOutputs( void MLIRModelGenImpl::buildOperation( const infrt::paddle::framework_proto::OpDesc &op_) { - const std::string &op_name = "pd." + op_.type(); + const std::string op_name = "pd." + op_.type(); mlir::Location loc = mlir::UnknownLoc::get(context_); + mlir::OperationState result(loc, op_name); + + if (!result.name.isRegistered()) { + LOG(FATAL) << "Find unregistered operation: " << op_name; + return; + } + + if (result.name.hasTrait()) { + LOG(FATAL) << "Find operation: " << op_name + << "has trait: AttrSizedOperandSegments. Current not support!"; + return; + } + llvm::SmallVector operands = GetOpInputValue(op_); + int empty_operand_cnt = 0; + for (auto it = operands.begin(); it != operands.end();) { + if (*it) { + ++it; + } else { + operands.erase(it); + ++empty_operand_cnt; + } + } + if (empty_operand_cnt > 1) { + LOG(FATAL) + << "Find operation: " << op_name << ", has " << empty_operand_cnt + << " empty operands. current not support empty operands more than one!"; + return; + } + result.addOperands(operands); llvm::SmallVector resultTypes = GetOpOutputType(op_); llvm::SmallVector attrs = GetOpAttributes(op_); - mlir::OperationState result(loc, op_name, operands, resultTypes, attrs); + result.addTypes(resultTypes); + result.addAttributes(attrs); mlir::Operation *mlir_op_ = builder_.createOperation(result); RegisterOpOutputVars(op_, mlir_op_); } @@ -239,11 +274,13 @@ llvm::SmallVector MLIRModelGenImpl::GetOpInputValue( std::unordered_map inputs_info = {}; if (pd_dialect_inputs_info_map_.count(op_.type())) inputs_info = pd_dialect_inputs_info_map_.at(op_.type()); + operands.resize(inputs_info.size()); for (int var_idx = 0; var_idx < op_.inputs_size(); ++var_idx) { auto &var = op_.inputs(var_idx); if (!var.arguments().empty()) { if (!inputs_info.count(var.parameter())) continue; - operands.push_back((params_map_[var.arguments()[0]])); + operands[inputs_info.at(var.parameter())] = + params_map_[var.arguments()[0]]; } } return operands; @@ -256,7 +293,7 @@ llvm::SmallVector MLIRModelGenImpl::GetOpOutputType( std::unordered_map pd_dialect_outputs_info = {}; if (pd_dialect_outputs_info_map_.count(op_.type())) pd_dialect_outputs_info = pd_dialect_outputs_info_map_.at(op_.type()); - + resultTypes.resize(pd_dialect_outputs_info.size()); // update op outputs info for (int var_idx = 0; var_idx < op_.outputs_size(); ++var_idx) { auto &var_name = op_.outputs(var_idx).arguments()[0]; @@ -269,13 +306,14 @@ llvm::SmallVector MLIRModelGenImpl::GetOpOutputType( std::vector dims = RepeatedToVector( var_desc.type().lod_tensor().tensor().dims()); infrt::PrecisionType precision_; - ConvertDataTypeToPhi(var_desc.type().lod_tensor().tensor().data_type(), - &precision_); + ConvertDataTypeToInfrt( + var_desc.type().lod_tensor().tensor().data_type(), &precision_); mlir::Type type_ = infrt::DenseTensorType::get(context_, infrt::TargetType::CPU, precision_, - infrt::LayoutType::ANY); - resultTypes.push_back(type_); + infrt::LayoutType::NCHW); + resultTypes[pd_dialect_outputs_info.at( + op_.outputs(var_idx).parameter())] = type_; } } } @@ -412,8 +450,8 @@ bool ConvertDataType(infrt::paddle::framework_proto::VarType::Type dtype, } } -bool ConvertDataTypeToPhi(infrt::paddle::framework_proto::VarType::Type dtype, - infrt::PrecisionType *type) { +bool ConvertDataTypeToInfrt(infrt::paddle::framework_proto::VarType::Type dtype, + infrt::PrecisionType *type) { switch (dtype) { case infrt::paddle::framework_proto::VarType::Type::VarType_Type_FP16: *type = infrt::PrecisionType::FLOAT16; diff --git a/paddle/infrt/host_context/paddle_mlir.h b/paddle/infrt/host_context/paddle_mlir.h index a351b5cf80e..3d79d608e70 100644 --- a/paddle/infrt/host_context/paddle_mlir.h +++ b/paddle/infrt/host_context/paddle_mlir.h @@ -102,7 +102,7 @@ inline std::vector RepeatedToVector( bool ConvertDataType(infrt::paddle::framework_proto::VarType::Type dtype, mlir::Builder builder, mlir::Type *type); -bool ConvertDataTypeToPhi(infrt::paddle::framework_proto::VarType::Type dtype, - infrt::PrecisionType *type); +bool ConvertDataTypeToInfrt(infrt::paddle::framework_proto::VarType::Type dtype, + infrt::PrecisionType *type); #endif // PADDLE_INFRT_HOST_CONTEXT_PADDLE_MLIR_H_ diff --git a/paddle/infrt/kernel/phi/infershaped/phi_kernel_launcher.h b/paddle/infrt/kernel/phi/infershaped/phi_kernel_launcher.h index 2dab7f2324d..34ef4460fc6 100644 --- a/paddle/infrt/kernel/phi/infershaped/phi_kernel_launcher.h +++ b/paddle/infrt/kernel/phi/infershaped/phi_kernel_launcher.h @@ -37,8 +37,7 @@ template void KernelLauncherFunc(host_context::KernelFrame* frame) { - static InferShapedKernelLauncher launcher( - FuncArgStatics::arg_size); + InferShapedKernelLauncher launcher(FuncArgStatics::arg_size); static const uint16_t num_input_tensors{InferShapeHelper::count}; static const bool turn_on_infer_shape_cache{true}; diff --git a/paddle/infrt/tests/CMakeLists.txt b/paddle/infrt/tests/CMakeLists.txt index 3c4a2f1cbb8..a720ad82479 100644 --- a/paddle/infrt/tests/CMakeLists.txt +++ b/paddle/infrt/tests/CMakeLists.txt @@ -1,4 +1,4 @@ -cc_test_tiny(test_abs_model SRCS model/test_abs.cc DEPS infrt ${MLIR_IR_LIBS}) +cc_test_tiny(test_abs_model SRCS models/test_abs.cc DEPS infrt ${MLIR_IR_LIBS}) configure_file(lit.cfg.py.in "${CMAKE_SOURCE_DIR}/paddle/infrt/tests/lit.cfg.py") @@ -7,4 +7,5 @@ add_test(NAME test_infrt_by_lit COMMAND sh -c "lit -v ${CMAKE_SOURCE_DIR}/paddle configure_file(${CMAKE_CURRENT_SOURCE_DIR}/dialect/tensor/tensor_map.mlir.in ${CMAKE_CURRENT_SOURCE_DIR}/dialect/tensor/tensor_map.mlir) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/dialect/phi/linear_cpu.mlir.in ${CMAKE_CURRENT_SOURCE_DIR}/dialect/phi/linear_cpu.mlir) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/dialect/phi/resnet50.mlir.in ${CMAKE_CURRENT_SOURCE_DIR}/dialect/phi/resnet50.mlir) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/dialect/tensorrt/disabled_linear.mlir.in ${CMAKE_CURRENT_SOURCE_DIR}/dialect/tensorrt/disabled_linear.mlir) diff --git a/paddle/infrt/tests/dialect/phi/resnet50.mlir.in b/paddle/infrt/tests/dialect/phi/resnet50.mlir.in new file mode 100644 index 00000000000..2803ebb41cf --- /dev/null +++ b/paddle/infrt/tests/dialect/phi/resnet50.mlir.in @@ -0,0 +1,458 @@ +// RUN: infrtexec -i %s +module { + func @main_graph(%arg0: !phi.dense_tensor_map, %arg1: !infrt.dense_tensor) -> !infrt.dense_tensor { + %0 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_37.w_2"} -> !infrt.dense_tensor + %1 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_47.w_0"} -> !infrt.dense_tensor + %2 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_6.w_0"} -> !infrt.dense_tensor + %3 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_13.w_0"} -> !infrt.dense_tensor + %4 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_5.w_2"} -> !infrt.dense_tensor + %5 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_40.w_1"} -> !infrt.dense_tensor + %6 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_6.w_2"} -> !infrt.dense_tensor + %7 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_27.w_1"} -> !infrt.dense_tensor + %8 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_11.w_1"} -> !infrt.dense_tensor + %9 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_40.b_0"} -> !infrt.dense_tensor + %10 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_38.w_0"} -> !infrt.dense_tensor + %11 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_2.w_0"} -> !infrt.dense_tensor + %12 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_21.b_0"} -> !infrt.dense_tensor + %13 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_15.w_1"} -> !infrt.dense_tensor + %14 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_8.b_0"} -> !infrt.dense_tensor + %15 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_29.w_0"} -> !infrt.dense_tensor + %16 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_35.w_0"} -> !infrt.dense_tensor + %17 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_26.w_0"} -> !infrt.dense_tensor + %18 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_50.w_1"} -> !infrt.dense_tensor + %19 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_31.b_0"} -> !infrt.dense_tensor + %20 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_22.w_0"} -> !infrt.dense_tensor + %21 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_27.w_0"} -> !infrt.dense_tensor + %22 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_28.b_0"} -> !infrt.dense_tensor + %23 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_46.w_2"} -> !infrt.dense_tensor + %24 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_37.w_0"} -> !infrt.dense_tensor + %25 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_18.w_0"} -> !infrt.dense_tensor + %26 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_38.w_0"} -> !infrt.dense_tensor + %27 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_39.w_0"} -> !infrt.dense_tensor + %28 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_43.w_0"} -> !infrt.dense_tensor + %29 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_3.w_0"} -> !infrt.dense_tensor + %30 = phi_dt.tensor_map_get_tensor(%arg0) {name = "linear_0.b_0"} -> !infrt.dense_tensor + %31 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_34.w_1"} -> !infrt.dense_tensor + %32 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_49.w_0"} -> !infrt.dense_tensor + %33 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_52.w_1"} -> !infrt.dense_tensor + %34 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_8.w_0"} -> !infrt.dense_tensor + %35 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_45.w_2"} -> !infrt.dense_tensor + %36 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_43.b_0"} -> !infrt.dense_tensor + %37 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_5.w_0"} -> !infrt.dense_tensor + %38 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_29.w_2"} -> !infrt.dense_tensor + %39 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_33.w_1"} -> !infrt.dense_tensor + %40 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_10.w_0"} -> !infrt.dense_tensor + %41 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_43.w_1"} -> !infrt.dense_tensor + %42 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_9.w_1"} -> !infrt.dense_tensor + %43 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_7.w_1"} -> !infrt.dense_tensor + %44 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_7.w_0"} -> !infrt.dense_tensor + %45 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_50.w_2"} -> !infrt.dense_tensor + %46 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_40.w_0"} -> !infrt.dense_tensor + %47 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_42.w_0"} -> !infrt.dense_tensor + %48 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_42.w_1"} -> !infrt.dense_tensor + %49 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_31.w_1"} -> !infrt.dense_tensor + %50 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_7.b_0"} -> !infrt.dense_tensor + %51 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_12.w_1"} -> !infrt.dense_tensor + %52 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_39.w_0"} -> !infrt.dense_tensor + %53 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_30.w_1"} -> !infrt.dense_tensor + %54 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_13.b_0"} -> !infrt.dense_tensor + %55 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_46.b_0"} -> !infrt.dense_tensor + %56 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_36.w_1"} -> !infrt.dense_tensor + %57 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_29.w_1"} -> !infrt.dense_tensor + %58 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_36.w_2"} -> !infrt.dense_tensor + %59 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_49.b_0"} -> !infrt.dense_tensor + %60 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_29.w_0"} -> !infrt.dense_tensor + %61 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_28.w_1"} -> !infrt.dense_tensor + %62 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_51.w_2"} -> !infrt.dense_tensor + %63 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_27.b_0"} -> !infrt.dense_tensor + %64 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_47.w_0"} -> !infrt.dense_tensor + %65 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_30.w_0"} -> !infrt.dense_tensor + %66 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_33.w_0"} -> !infrt.dense_tensor + %67 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_24.w_2"} -> !infrt.dense_tensor + %68 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_22.w_1"} -> !infrt.dense_tensor + %69 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_1.w_1"} -> !infrt.dense_tensor + %70 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_32.w_0"} -> !infrt.dense_tensor + %71 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_20.w_2"} -> !infrt.dense_tensor + %72 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_16.w_0"} -> !infrt.dense_tensor + %73 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_23.w_1"} -> !infrt.dense_tensor + %74 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_11.w_0"} -> !infrt.dense_tensor + %75 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_30.w_0"} -> !infrt.dense_tensor + %76 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_37.w_0"} -> !infrt.dense_tensor + %77 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_16.b_0"} -> !infrt.dense_tensor + %78 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_36.b_0"} -> !infrt.dense_tensor + %79 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_1.w_0"} -> !infrt.dense_tensor + %80 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_31.w_0"} -> !infrt.dense_tensor + %81 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_0.w_2"} -> !infrt.dense_tensor + %82 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_10.w_2"} -> !infrt.dense_tensor + %83 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_1.w_2"} -> !infrt.dense_tensor + %84 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_13.w_0"} -> !infrt.dense_tensor + %85 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_12.b_0"} -> !infrt.dense_tensor + %86 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_0.w_0"} -> !infrt.dense_tensor + %87 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_30.b_0"} -> !infrt.dense_tensor + %88 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_13.w_1"} -> !infrt.dense_tensor + %89 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_3.w_1"} -> !infrt.dense_tensor + %90 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_52.b_0"} -> !infrt.dense_tensor + %91 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_26.b_0"} -> !infrt.dense_tensor + %92 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_48.w_2"} -> !infrt.dense_tensor + %93 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_25.b_0"} -> !infrt.dense_tensor + %94 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_33.b_0"} -> !infrt.dense_tensor + %95 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_30.w_2"} -> !infrt.dense_tensor + %96 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_35.w_1"} -> !infrt.dense_tensor + %97 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_8.w_0"} -> !infrt.dense_tensor + %98 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_18.w_0"} -> !infrt.dense_tensor + %99 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_4.w_0"} -> !infrt.dense_tensor + %100 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_15.b_0"} -> !infrt.dense_tensor + %101 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_16.w_2"} -> !infrt.dense_tensor + %102 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_32.w_1"} -> !infrt.dense_tensor + %103 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_50.b_0"} -> !infrt.dense_tensor + %104 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_44.w_0"} -> !infrt.dense_tensor + %105 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_24.w_0"} -> !infrt.dense_tensor + %106 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_11.w_2"} -> !infrt.dense_tensor + %107 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_2.b_0"} -> !infrt.dense_tensor + %108 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_20.w_0"} -> !infrt.dense_tensor + %109 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_15.w_2"} -> !infrt.dense_tensor + %110 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_44.w_1"} -> !infrt.dense_tensor + %111 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_23.w_0"} -> !infrt.dense_tensor + %112 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_17.w_2"} -> !infrt.dense_tensor + %113 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_25.w_0"} -> !infrt.dense_tensor + %114 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_3.w_2"} -> !infrt.dense_tensor + %115 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_0.w_1"} -> !infrt.dense_tensor + %116 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_38.w_1"} -> !infrt.dense_tensor + %117 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_20.w_0"} -> !infrt.dense_tensor + %118 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_47.w_1"} -> !infrt.dense_tensor + %119 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_50.w_0"} -> !infrt.dense_tensor + %120 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_48.b_0"} -> !infrt.dense_tensor + %121 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_14.b_0"} -> !infrt.dense_tensor + %122 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_47.b_0"} -> !infrt.dense_tensor + %123 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_46.w_1"} -> !infrt.dense_tensor + %124 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_34.w_0"} -> !infrt.dense_tensor + %125 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_45.b_0"} -> !infrt.dense_tensor + %126 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_25.w_1"} -> !infrt.dense_tensor + %127 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_22.w_2"} -> !infrt.dense_tensor + %128 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_21.w_2"} -> !infrt.dense_tensor + %129 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_17.w_0"} -> !infrt.dense_tensor + %130 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_19.b_0"} -> !infrt.dense_tensor + %131 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_1.b_0"} -> !infrt.dense_tensor + %132 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_52.w_0"} -> !infrt.dense_tensor + %133 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_21.w_1"} -> !infrt.dense_tensor + %134 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_9.w_0"} -> !infrt.dense_tensor + %135 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_4.b_0"} -> !infrt.dense_tensor + %136 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_9.w_2"} -> !infrt.dense_tensor + %137 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_45.w_0"} -> !infrt.dense_tensor + %138 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_8.w_2"} -> !infrt.dense_tensor + %139 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_35.w_0"} -> !infrt.dense_tensor + %140 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_39.w_1"} -> !infrt.dense_tensor + %141 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_44.w_2"} -> !infrt.dense_tensor + %142 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_19.w_2"} -> !infrt.dense_tensor + %143 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_27.w_0"} -> !infrt.dense_tensor + %144 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_2.w_1"} -> !infrt.dense_tensor + %145 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_19.w_0"} -> !infrt.dense_tensor + %146 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_23.w_2"} -> !infrt.dense_tensor + %147 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_32.w_2"} -> !infrt.dense_tensor + %148 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_51.w_0"} -> !infrt.dense_tensor + %149 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_17.b_0"} -> !infrt.dense_tensor + %150 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_2.w_0"} -> !infrt.dense_tensor + %151 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_15.w_0"} -> !infrt.dense_tensor + %152 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_23.w_0"} -> !infrt.dense_tensor + %153 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_18.w_1"} -> !infrt.dense_tensor + %154 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_1.w_0"} -> !infrt.dense_tensor + %155 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_21.w_0"} -> !infrt.dense_tensor + %156 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_37.b_0"} -> !infrt.dense_tensor + %157 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_28.w_0"} -> !infrt.dense_tensor + %158 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_31.w_0"} -> !infrt.dense_tensor + %159 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_3.b_0"} -> !infrt.dense_tensor + %160 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_19.w_0"} -> !infrt.dense_tensor + %161 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_38.b_0"} -> !infrt.dense_tensor + %162 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_7.w_0"} -> !infrt.dense_tensor + %163 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_33.w_2"} -> !infrt.dense_tensor + %164 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_44.w_0"} -> !infrt.dense_tensor + %165 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_25.w_2"} -> !infrt.dense_tensor + %166 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_32.b_0"} -> !infrt.dense_tensor + %167 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_26.w_2"} -> !infrt.dense_tensor + %168 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_4.w_0"} -> !infrt.dense_tensor + %169 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_40.w_2"} -> !infrt.dense_tensor + %170 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_17.w_1"} -> !infrt.dense_tensor + %171 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_5.w_1"} -> !infrt.dense_tensor + %172 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_28.w_2"} -> !infrt.dense_tensor + %173 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_27.w_2"} -> !infrt.dense_tensor + %174 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_20.w_1"} -> !infrt.dense_tensor + %175 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_15.w_0"} -> !infrt.dense_tensor + %176 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_18.b_0"} -> !infrt.dense_tensor + %177 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_41.w_0"} -> !infrt.dense_tensor + %178 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_42.w_0"} -> !infrt.dense_tensor + %179 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_25.w_0"} -> !infrt.dense_tensor + %180 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_22.b_0"} -> !infrt.dense_tensor + %181 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_35.w_2"} -> !infrt.dense_tensor + %182 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_24.w_0"} -> !infrt.dense_tensor + %183 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_49.w_1"} -> !infrt.dense_tensor + %184 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_22.w_0"} -> !infrt.dense_tensor + %185 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_26.w_1"} -> !infrt.dense_tensor + %186 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_12.w_2"} -> !infrt.dense_tensor + %187 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_43.w_0"} -> !infrt.dense_tensor + %188 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_38.w_2"} -> !infrt.dense_tensor + %189 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_0.b_0"} -> !infrt.dense_tensor + %190 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_50.w_0"} -> !infrt.dense_tensor + %191 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_10.w_1"} -> !infrt.dense_tensor + %192 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_19.w_1"} -> !infrt.dense_tensor + %193 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_41.w_2"} -> !infrt.dense_tensor + %194 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_10.b_0"} -> !infrt.dense_tensor + %195 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_14.w_2"} -> !infrt.dense_tensor + %196 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_14.w_0"} -> !infrt.dense_tensor + %197 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_12.w_0"} -> !infrt.dense_tensor + %198 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_9.b_0"} -> !infrt.dense_tensor + %199 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_16.w_0"} -> !infrt.dense_tensor + %200 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_29.b_0"} -> !infrt.dense_tensor + %201 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_42.b_0"} -> !infrt.dense_tensor + %202 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_2.w_2"} -> !infrt.dense_tensor + %203 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_48.w_0"} -> !infrt.dense_tensor + %204 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_14.w_1"} -> !infrt.dense_tensor + %205 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_3.w_0"} -> !infrt.dense_tensor + %206 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_6.w_0"} -> !infrt.dense_tensor + %207 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_20.b_0"} -> !infrt.dense_tensor + %208 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_39.b_0"} -> !infrt.dense_tensor + %209 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_34.w_2"} -> !infrt.dense_tensor + %210 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_16.w_1"} -> !infrt.dense_tensor + %211 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_36.w_0"} -> !infrt.dense_tensor + %212 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_48.w_1"} -> !infrt.dense_tensor + %213 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_7.w_2"} -> !infrt.dense_tensor + %214 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_32.w_0"} -> !infrt.dense_tensor + %215 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_52.w_0"} -> !infrt.dense_tensor + %216 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_18.w_2"} -> !infrt.dense_tensor + %217 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_44.b_0"} -> !infrt.dense_tensor + %218 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_6.b_0"} -> !infrt.dense_tensor + %219 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_10.w_0"} -> !infrt.dense_tensor + %220 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_47.w_2"} -> !infrt.dense_tensor + %221 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_51.w_1"} -> !infrt.dense_tensor + %222 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_9.w_0"} -> !infrt.dense_tensor + %223 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_52.w_2"} -> !infrt.dense_tensor + %224 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_45.w_1"} -> !infrt.dense_tensor + %225 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_8.w_1"} -> !infrt.dense_tensor + %226 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_13.w_2"} -> !infrt.dense_tensor + %227 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_46.w_0"} -> !infrt.dense_tensor + %228 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_49.w_2"} -> !infrt.dense_tensor + %229 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_12.w_0"} -> !infrt.dense_tensor + %230 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_4.w_2"} -> !infrt.dense_tensor + %231 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_5.w_0"} -> !infrt.dense_tensor + %232 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_51.b_0"} -> !infrt.dense_tensor + %233 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_33.w_0"} -> !infrt.dense_tensor + %234 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_46.w_0"} -> !infrt.dense_tensor + %235 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_45.w_0"} -> !infrt.dense_tensor + %236 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_6.w_1"} -> !infrt.dense_tensor + %237 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_48.w_0"} -> !infrt.dense_tensor + %238 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_37.w_1"} -> !infrt.dense_tensor + %239 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_14.w_0"} -> !infrt.dense_tensor + %240 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_21.w_0"} -> !infrt.dense_tensor + %241 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_28.w_0"} -> !infrt.dense_tensor + %242 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_26.w_0"} -> !infrt.dense_tensor + %243 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_23.b_0"} -> !infrt.dense_tensor + %244 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_49.w_0"} -> !infrt.dense_tensor + %245 = phi_dt.tensor_map_get_tensor(%arg0) {name = "linear_0.w_0"} -> !infrt.dense_tensor + %246 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_51.w_0"} -> !infrt.dense_tensor + %247 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_41.w_0"} -> !infrt.dense_tensor + %248 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_35.b_0"} -> !infrt.dense_tensor + %249 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_42.w_2"} -> !infrt.dense_tensor + %250 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_43.w_2"} -> !infrt.dense_tensor + %251 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_24.w_1"} -> !infrt.dense_tensor + %252 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_31.w_2"} -> !infrt.dense_tensor + %253 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_41.w_1"} -> !infrt.dense_tensor + %254 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_11.w_0"} -> !infrt.dense_tensor + %255 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_41.b_0"} -> !infrt.dense_tensor + %256 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_34.w_0"} -> !infrt.dense_tensor + %257 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_4.w_1"} -> !infrt.dense_tensor + %258 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_40.w_0"} -> !infrt.dense_tensor + %259 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_0.w_0"} -> !infrt.dense_tensor + %260 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_36.w_0"} -> !infrt.dense_tensor + %261 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_5.b_0"} -> !infrt.dense_tensor + %262 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_11.b_0"} -> !infrt.dense_tensor + %263 = phi_dt.tensor_map_get_tensor(%arg0) {name = "conv2d_17.w_0"} -> !infrt.dense_tensor + %264 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_39.w_2"} -> !infrt.dense_tensor + %265 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_34.b_0"} -> !infrt.dense_tensor + %266 = phi_dt.tensor_map_get_tensor(%arg0) {name = "batch_norm2d_24.b_0"} -> !infrt.dense_tensor + %267 = "pd.conv2d"(%arg1, %86) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [3 : i32, 3 : i32], strides = [2 : i32, 2 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y, %MeanOut, %VarianceOut = "pd.batch_norm"(%267, %259, %189, %115, %81) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %268 = "pd.relu"(%Y) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %269 = "pd.pool2d"(%268) {adaptive = false, ceil_mode = false, data_format = "NCHW", exclusive = true, global_pooling = false, ksize = [3 : i32, 3 : i32], padding_algorithm = "EXPLICIT", paddings = [1 : i32, 1 : i32], pooling_type = "max", strides = [2 : i32, 2 : i32]} : (!infrt.dense_tensor) -> !infrt.dense_tensor + %270 = "pd.conv2d"(%269, %11) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [0 : i32, 0 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_0, %MeanOut_1, %VarianceOut_2 = "pd.batch_norm"(%270, %150, %107, %144, %202) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %271 = "pd.relu"(%Y_0) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %272 = "pd.conv2d"(%271, %29) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [1 : i32, 1 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_3, %MeanOut_4, %VarianceOut_5 = "pd.batch_norm"(%272, %205, %159, %89, %114) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %273 = "pd.relu"(%Y_3) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %274 = "pd.conv2d"(%273, %99) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [0 : i32, 0 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_6, %MeanOut_7, %VarianceOut_8 = "pd.batch_norm"(%274, %168, %135, %257, %230) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %275 = "pd.conv2d"(%269, %154) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [0 : i32, 0 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_9, %MeanOut_10, %VarianceOut_11 = "pd.batch_norm"(%275, %79, %131, %69, %83) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %276 = "pd.elementwise_add"(%Y_6, %Y_9) {axis = -1 : si32} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %277 = "pd.relu"(%276) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %278 = "pd.conv2d"(%277, %231) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [0 : i32, 0 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_12, %MeanOut_13, %VarianceOut_14 = "pd.batch_norm"(%278, %37, %261, %171, %4) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %279 = "pd.relu"(%Y_12) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %280 = "pd.conv2d"(%279, %2) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [1 : i32, 1 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_15, %MeanOut_16, %VarianceOut_17 = "pd.batch_norm"(%280, %206, %218, %236, %6) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %281 = "pd.relu"(%Y_15) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %282 = "pd.conv2d"(%281, %44) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [0 : i32, 0 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_18, %MeanOut_19, %VarianceOut_20 = "pd.batch_norm"(%282, %162, %50, %43, %213) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %283 = "pd.elementwise_add"(%Y_18, %277) {axis = -1 : si32} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %284 = "pd.relu"(%283) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %285 = "pd.conv2d"(%284, %34) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [0 : i32, 0 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_21, %MeanOut_22, %VarianceOut_23 = "pd.batch_norm"(%285, %97, %14, %225, %138) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %286 = "pd.relu"(%Y_21) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %287 = "pd.conv2d"(%286, %134) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [1 : i32, 1 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_24, %MeanOut_25, %VarianceOut_26 = "pd.batch_norm"(%287, %222, %198, %42, %136) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %288 = "pd.relu"(%Y_24) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %289 = "pd.conv2d"(%288, %219) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [0 : i32, 0 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_27, %MeanOut_28, %VarianceOut_29 = "pd.batch_norm"(%289, %40, %194, %191, %82) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %290 = "pd.elementwise_add"(%Y_27, %284) {axis = -1 : si32} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %291 = "pd.relu"(%290) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %292 = "pd.conv2d"(%291, %197) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [0 : i32, 0 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_30, %MeanOut_31, %VarianceOut_32 = "pd.batch_norm"(%292, %229, %85, %51, %186) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %293 = "pd.relu"(%Y_30) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %294 = "pd.conv2d"(%293, %84) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [1 : i32, 1 : i32], strides = [2 : i32, 2 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_33, %MeanOut_34, %VarianceOut_35 = "pd.batch_norm"(%294, %3, %54, %88, %226) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %295 = "pd.relu"(%Y_33) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %296 = "pd.conv2d"(%295, %239) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [0 : i32, 0 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_36, %MeanOut_37, %VarianceOut_38 = "pd.batch_norm"(%296, %196, %121, %204, %195) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %297 = "pd.conv2d"(%291, %74) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [0 : i32, 0 : i32], strides = [2 : i32, 2 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_39, %MeanOut_40, %VarianceOut_41 = "pd.batch_norm"(%297, %254, %262, %8, %106) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %298 = "pd.elementwise_add"(%Y_36, %Y_39) {axis = -1 : si32} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %299 = "pd.relu"(%298) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %300 = "pd.conv2d"(%299, %175) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [0 : i32, 0 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_42, %MeanOut_43, %VarianceOut_44 = "pd.batch_norm"(%300, %151, %100, %13, %109) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %301 = "pd.relu"(%Y_42) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %302 = "pd.conv2d"(%301, %199) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [1 : i32, 1 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_45, %MeanOut_46, %VarianceOut_47 = "pd.batch_norm"(%302, %72, %77, %210, %101) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %303 = "pd.relu"(%Y_45) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %304 = "pd.conv2d"(%303, %263) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [0 : i32, 0 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_48, %MeanOut_49, %VarianceOut_50 = "pd.batch_norm"(%304, %129, %149, %170, %112) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %305 = "pd.elementwise_add"(%Y_48, %299) {axis = -1 : si32} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %306 = "pd.relu"(%305) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %307 = "pd.conv2d"(%306, %25) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [0 : i32, 0 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_51, %MeanOut_52, %VarianceOut_53 = "pd.batch_norm"(%307, %98, %176, %153, %216) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %308 = "pd.relu"(%Y_51) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %309 = "pd.conv2d"(%308, %160) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [1 : i32, 1 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_54, %MeanOut_55, %VarianceOut_56 = "pd.batch_norm"(%309, %145, %130, %192, %142) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %310 = "pd.relu"(%Y_54) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %311 = "pd.conv2d"(%310, %108) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [0 : i32, 0 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_57, %MeanOut_58, %VarianceOut_59 = "pd.batch_norm"(%311, %117, %207, %174, %71) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %312 = "pd.elementwise_add"(%Y_57, %306) {axis = -1 : si32} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %313 = "pd.relu"(%312) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %314 = "pd.conv2d"(%313, %155) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [0 : i32, 0 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_60, %MeanOut_61, %VarianceOut_62 = "pd.batch_norm"(%314, %240, %12, %133, %128) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %315 = "pd.relu"(%Y_60) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %316 = "pd.conv2d"(%315, %20) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [1 : i32, 1 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_63, %MeanOut_64, %VarianceOut_65 = "pd.batch_norm"(%316, %184, %180, %68, %127) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %317 = "pd.relu"(%Y_63) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %318 = "pd.conv2d"(%317, %111) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [0 : i32, 0 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_66, %MeanOut_67, %VarianceOut_68 = "pd.batch_norm"(%318, %152, %243, %73, %146) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %319 = "pd.elementwise_add"(%Y_66, %313) {axis = -1 : si32} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %320 = "pd.relu"(%319) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %321 = "pd.conv2d"(%320, %113) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [0 : i32, 0 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_69, %MeanOut_70, %VarianceOut_71 = "pd.batch_norm"(%321, %179, %93, %126, %165) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %322 = "pd.relu"(%Y_69) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %323 = "pd.conv2d"(%322, %242) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [1 : i32, 1 : i32], strides = [2 : i32, 2 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_72, %MeanOut_73, %VarianceOut_74 = "pd.batch_norm"(%323, %17, %91, %185, %167) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %324 = "pd.relu"(%Y_72) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %325 = "pd.conv2d"(%324, %21) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [0 : i32, 0 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_75, %MeanOut_76, %VarianceOut_77 = "pd.batch_norm"(%325, %143, %63, %7, %173) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %326 = "pd.conv2d"(%320, %105) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [0 : i32, 0 : i32], strides = [2 : i32, 2 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_78, %MeanOut_79, %VarianceOut_80 = "pd.batch_norm"(%326, %182, %266, %251, %67) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %327 = "pd.elementwise_add"(%Y_75, %Y_78) {axis = -1 : si32} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %328 = "pd.relu"(%327) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %329 = "pd.conv2d"(%328, %157) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [0 : i32, 0 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_81, %MeanOut_82, %VarianceOut_83 = "pd.batch_norm"(%329, %241, %22, %61, %172) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %330 = "pd.relu"(%Y_81) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %331 = "pd.conv2d"(%330, %15) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [1 : i32, 1 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_84, %MeanOut_85, %VarianceOut_86 = "pd.batch_norm"(%331, %60, %200, %57, %38) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %332 = "pd.relu"(%Y_84) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %333 = "pd.conv2d"(%332, %75) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [0 : i32, 0 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_87, %MeanOut_88, %VarianceOut_89 = "pd.batch_norm"(%333, %65, %87, %53, %95) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %334 = "pd.elementwise_add"(%Y_87, %328) {axis = -1 : si32} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %335 = "pd.relu"(%334) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %336 = "pd.conv2d"(%335, %158) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [0 : i32, 0 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_90, %MeanOut_91, %VarianceOut_92 = "pd.batch_norm"(%336, %80, %19, %49, %252) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %337 = "pd.relu"(%Y_90) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %338 = "pd.conv2d"(%337, %214) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [1 : i32, 1 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_93, %MeanOut_94, %VarianceOut_95 = "pd.batch_norm"(%338, %70, %166, %102, %147) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %339 = "pd.relu"(%Y_93) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %340 = "pd.conv2d"(%339, %233) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [0 : i32, 0 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_96, %MeanOut_97, %VarianceOut_98 = "pd.batch_norm"(%340, %66, %94, %39, %163) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %341 = "pd.elementwise_add"(%Y_96, %335) {axis = -1 : si32} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %342 = "pd.relu"(%341) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %343 = "pd.conv2d"(%342, %124) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [0 : i32, 0 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_99, %MeanOut_100, %VarianceOut_101 = "pd.batch_norm"(%343, %256, %265, %31, %209) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %344 = "pd.relu"(%Y_99) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %345 = "pd.conv2d"(%344, %16) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [1 : i32, 1 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_102, %MeanOut_103, %VarianceOut_104 = "pd.batch_norm"(%345, %139, %248, %96, %181) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %346 = "pd.relu"(%Y_102) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %347 = "pd.conv2d"(%346, %211) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [0 : i32, 0 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_105, %MeanOut_106, %VarianceOut_107 = "pd.batch_norm"(%347, %260, %78, %56, %58) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %348 = "pd.elementwise_add"(%Y_105, %342) {axis = -1 : si32} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %349 = "pd.relu"(%348) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %350 = "pd.conv2d"(%349, %24) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [0 : i32, 0 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_108, %MeanOut_109, %VarianceOut_110 = "pd.batch_norm"(%350, %76, %156, %238, %0) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %351 = "pd.relu"(%Y_108) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %352 = "pd.conv2d"(%351, %26) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [1 : i32, 1 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_111, %MeanOut_112, %VarianceOut_113 = "pd.batch_norm"(%352, %10, %161, %116, %188) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %353 = "pd.relu"(%Y_111) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %354 = "pd.conv2d"(%353, %27) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [0 : i32, 0 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_114, %MeanOut_115, %VarianceOut_116 = "pd.batch_norm"(%354, %52, %208, %140, %264) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %355 = "pd.elementwise_add"(%Y_114, %349) {axis = -1 : si32} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %356 = "pd.relu"(%355) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %357 = "pd.conv2d"(%356, %46) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [0 : i32, 0 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_117, %MeanOut_118, %VarianceOut_119 = "pd.batch_norm"(%357, %258, %9, %5, %169) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %358 = "pd.relu"(%Y_117) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %359 = "pd.conv2d"(%358, %247) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [1 : i32, 1 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_120, %MeanOut_121, %VarianceOut_122 = "pd.batch_norm"(%359, %177, %255, %253, %193) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %360 = "pd.relu"(%Y_120) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %361 = "pd.conv2d"(%360, %178) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [0 : i32, 0 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_123, %MeanOut_124, %VarianceOut_125 = "pd.batch_norm"(%361, %47, %201, %48, %249) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %362 = "pd.elementwise_add"(%Y_123, %356) {axis = -1 : si32} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %363 = "pd.relu"(%362) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %364 = "pd.conv2d"(%363, %104) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [0 : i32, 0 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_126, %MeanOut_127, %VarianceOut_128 = "pd.batch_norm"(%364, %164, %217, %110, %141) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %365 = "pd.relu"(%Y_126) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %366 = "pd.conv2d"(%365, %235) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [1 : i32, 1 : i32], strides = [2 : i32, 2 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_129, %MeanOut_130, %VarianceOut_131 = "pd.batch_norm"(%366, %137, %125, %224, %35) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %367 = "pd.relu"(%Y_129) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %368 = "pd.conv2d"(%367, %234) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [0 : i32, 0 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_132, %MeanOut_133, %VarianceOut_134 = "pd.batch_norm"(%368, %227, %55, %123, %23) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %369 = "pd.conv2d"(%363, %28) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [0 : i32, 0 : i32], strides = [2 : i32, 2 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_135, %MeanOut_136, %VarianceOut_137 = "pd.batch_norm"(%369, %187, %36, %41, %250) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %370 = "pd.elementwise_add"(%Y_132, %Y_135) {axis = -1 : si32} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %371 = "pd.relu"(%370) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %372 = "pd.conv2d"(%371, %64) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [0 : i32, 0 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_138, %MeanOut_139, %VarianceOut_140 = "pd.batch_norm"(%372, %1, %122, %118, %220) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %373 = "pd.relu"(%Y_138) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %374 = "pd.conv2d"(%373, %203) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [1 : i32, 1 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_141, %MeanOut_142, %VarianceOut_143 = "pd.batch_norm"(%374, %237, %120, %212, %92) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %375 = "pd.relu"(%Y_141) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %376 = "pd.conv2d"(%375, %32) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [0 : i32, 0 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_144, %MeanOut_145, %VarianceOut_146 = "pd.batch_norm"(%376, %244, %59, %183, %228) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %377 = "pd.elementwise_add"(%Y_144, %371) {axis = -1 : si32} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %378 = "pd.relu"(%377) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %379 = "pd.conv2d"(%378, %190) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [0 : i32, 0 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_147, %MeanOut_148, %VarianceOut_149 = "pd.batch_norm"(%379, %119, %103, %18, %45) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %380 = "pd.relu"(%Y_147) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %381 = "pd.conv2d"(%380, %246) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [1 : i32, 1 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_150, %MeanOut_151, %VarianceOut_152 = "pd.batch_norm"(%381, %148, %232, %221, %62) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %382 = "pd.relu"(%Y_150) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %383 = "pd.conv2d"(%382, %132) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [0 : i32, 0 : i32], strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y_153, %MeanOut_154, %VarianceOut_155 = "pd.batch_norm"(%383, %215, %90, %33, %223) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %384 = "pd.elementwise_add"(%Y_153, %378) {axis = -1 : si32} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %385 = "pd.relu"(%384) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %386 = "pd.pool2d"(%385) {adaptive = true, ceil_mode = false, data_format = "NCHW", exclusive = true, global_pooling = false, ksize = [1 : i32, 1 : i32], padding_algorithm = "EXPLICIT", paddings = [0 : i32, 0 : i32], pooling_type = "avg", strides = [1 : i32, 1 : i32]} : (!infrt.dense_tensor) -> !infrt.dense_tensor + %387 = "pd.flatten_contiguous_range"(%386) {start_axis = 1 : si32, stop_axis = 3 : si32} : (!infrt.dense_tensor) -> !infrt.dense_tensor + %388 = "pd.matmul_v2"(%387, %245) {trans_x = false, trans_y = false} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %389 = "pd.elementwise_add"(%388, %30) {axis = 1 : si32} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + infrt.return %270 : !infrt.dense_tensor + } + + func @main() { + %ctx = "phi_dt.create_context.cpu" (): () -> !phi.context + %1 = "phi_dt.create_inited_dense_tensor.cpu.f32" (%ctx) {value = 12.0 : f32, layout=#infrt.layout, lod=[1:i64], dims=[1, 3, 256, 256]}: (!phi.context) -> (!infrt.dense_tensor) + %map = phi_dt.load_combined_params(){model_path="@CMAKE_BINARY_DIR@/models/resnet50/model.pdmodel",params_path="@CMAKE_BINARY_DIR@/models/resnet50/model.pdiparams"} + %2 = infrt.call@main_graph(%map, %1) : (!phi.dense_tensor_map, !infrt.dense_tensor) -> !infrt.dense_tensor + phi_dt.print_tensor (%2 : !infrt.dense_tensor) + infrt.return + } + } diff --git a/paddle/infrt/tests/model/abs_model.py b/paddle/infrt/tests/models/abs_model.py similarity index 100% rename from paddle/infrt/tests/model/abs_model.py rename to paddle/infrt/tests/models/abs_model.py diff --git a/paddle/infrt/tests/model/linear.py b/paddle/infrt/tests/models/linear.py similarity index 100% rename from paddle/infrt/tests/model/linear.py rename to paddle/infrt/tests/models/linear.py diff --git a/paddle/infrt/tests/models/resnet50_model.py b/paddle/infrt/tests/models/resnet50_model.py new file mode 100644 index 00000000000..6edd75116e8 --- /dev/null +++ b/paddle/infrt/tests/models/resnet50_model.py @@ -0,0 +1,25 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +from paddle.vision.models import resnet50 +from paddle.jit import to_static +from paddle.static import InputSpec +import sys + +model = resnet50(True) +net = to_static( + model, input_spec=[InputSpec( + shape=[None, 3, 256, 256], name='x')]) +paddle.jit.save(net, sys.argv[1]) diff --git a/paddle/infrt/tests/model/test_abs.cc b/paddle/infrt/tests/models/test_abs.cc similarity index 100% rename from paddle/infrt/tests/model/test_abs.cc rename to paddle/infrt/tests/models/test_abs.cc diff --git a/paddle/scripts/infrt_build.sh b/paddle/scripts/infrt_build.sh index 37e19b49f1c..6634f5396ac 100755 --- a/paddle/scripts/infrt_build.sh +++ b/paddle/scripts/infrt_build.sh @@ -46,7 +46,9 @@ function update_pd_ops() { python3 generate_phi_kernel_dialect.py # generate test model cd ${PADDLE_ROOT} - python3 paddle/infrt/tests/model/abs_model.py ${PADDLE_ROOT}/build/paddle/infrt/tests/abs + mkdir -p ${PADDLE_ROOT}/build/models + python3 paddle/infrt/tests/models/abs_model.py ${PADDLE_ROOT}/build/paddle/infrt/tests/abs + python3 paddle/infrt/tests/models/resnet50_model.py ${PADDLE_ROOT}/build/models/resnet50/model } function init() { @@ -114,7 +116,7 @@ function create_fake_models() { python3 -m pip install *whl cd ${PADDLE_ROOT}/build python3 ${PADDLE_ROOT}/tools/infrt/fake_models/multi_fc.py - python3 ${PADDLE_ROOT}/paddle/infrt/tests/model/linear.py + python3 ${PADDLE_ROOT}/paddle/infrt/tests/models/linear.py } function test_infrt() { -- GitLab