From d5bebf0b3b1bb6f22332b4dd898080f76d39260d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E6=98=8E=E5=86=AC?= <78149749+winter-wang@users.noreply.github.com> Date: Thu, 24 Mar 2022 11:10:28 +0800 Subject: [PATCH] [infrt] fix bug in emit si32 attribute. (#40860) --- .../dialect/phi/pass/phi_op_convert_pass.cc | 5 ++-- .../host_context/mlir_to_runtime_translate.cc | 6 ++--- paddle/infrt/tests/dialect/phi/phi_test.mlir | 23 +++++++++++-------- paddle/phi/core/compat/op_utils.h | 4 ++++ 4 files changed, 24 insertions(+), 14 deletions(-) diff --git a/paddle/infrt/dialect/phi/pass/phi_op_convert_pass.cc b/paddle/infrt/dialect/phi/pass/phi_op_convert_pass.cc index 18d40ce576..4abdb388dc 100644 --- a/paddle/infrt/dialect/phi/pass/phi_op_convert_pass.cc +++ b/paddle/infrt/dialect/phi/pass/phi_op_convert_pass.cc @@ -97,8 +97,9 @@ void PhiOpConvertPass::convertStage() { } auto loc = getFunction().getLoc(); builder.setInsertionPoint(op); - op_name = phi::TransToPhiKernelName(op_name); - if (!::phi::OpUtilsMap::Instance().Contains(op_name)) { + + if (!::phi::OpUtilsMap::Instance().HasArgumentMappingFn(op_name)) { + op_name = phi::TransToPhiKernelName(op_name); auto kernel_op = builder.create(loc, op->getResultTypes(), op->getOperands(), diff --git a/paddle/infrt/host_context/mlir_to_runtime_translate.cc b/paddle/infrt/host_context/mlir_to_runtime_translate.cc index 609524bead..007730151e 100644 --- a/paddle/infrt/host_context/mlir_to_runtime_translate.cc +++ b/paddle/infrt/host_context/mlir_to_runtime_translate.cc @@ -130,7 +130,7 @@ boost::optional MlirToRuntimeTranslator::EmitAttribute( if (attr.isa()) { auto val = attr.cast(); if (val.getType().isInteger(32)) { - return val.getInt(); + return val.getValue().getSExtValue(); } } return boost::none; @@ -142,7 +142,7 @@ boost::optional MlirToRuntimeTranslator::EmitAttribute( if (attr.isa()) { auto val = attr.cast(); if (val.getType().isInteger(64)) { - return val.getInt(); + return val.getValue().getSExtValue(); } } return boost::none; @@ -233,7 +233,7 @@ boost::optional MlirToRuntimeTranslator::EmitAttribute( \ std::vector res; \ for (auto& v : array) { \ - res.push_back(v.cast().getInt()); \ + res.push_back(v.cast().getValue().getSExtValue()); \ } \ return res; \ } diff --git a/paddle/infrt/tests/dialect/phi/phi_test.mlir b/paddle/infrt/tests/dialect/phi/phi_test.mlir index 4dda2b7a79..d1e561cd5f 100644 --- a/paddle/infrt/tests/dialect/phi/phi_test.mlir +++ b/paddle/infrt/tests/dialect/phi/phi_test.mlir @@ -1,25 +1,30 @@ // RUN: infrtexec -i %s module { - func @predict(%arg0: !infrt.dense_tensor, %arg1: !infrt.dense_tensor, %arg2: !infrt.dense_tensor, %arg3: !infrt.dense_tensor, %arg4: !infrt.dense_tensor) -> !infrt.dense_tensor { + func @predict(%arg0: !infrt.dense_tensor,%filter: !infrt.dense_tensor, %arg1: !infrt.dense_tensor, %arg2: !infrt.dense_tensor, %arg3: !infrt.dense_tensor, %arg4: !infrt.dense_tensor) -> !infrt.dense_tensor { %2 = "pd.abs"(%arg0) : (!infrt.dense_tensor) -> !infrt.dense_tensor %3 = "pd.matmul_v2"(%arg0, %2) {trans_x = false, trans_y = false} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor - %Y, %MeanOut, %VarianceOut = "pd.batch_norm"(%3, %arg1, %arg2, %arg3, %arg4) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) - infrt.return %Y : !infrt.dense_tensor + %4 = "pd.conv2d"(%3, %filter) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [1 : i32, 1 : i32], strides = [2 : i32, 2 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y, %MeanOut, %VarianceOut = "pd.batch_norm"(%4, %arg1, %arg2, %arg3, %arg4) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %out = "pd.relu"(%Y) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %5 = "pd.elementwise_add"(%out, %out) {axis = -1:si32} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + infrt.return %5 : !infrt.dense_tensor } func @main() { %ctx = "phi_dt.create_context.cpu" (): () -> !phi.context - %t = "phi_dt.create_dense_tensor.cpu" (%ctx) {precision=#infrt.precision, layout=#infrt.layout, lod=[1:i64], dims=[1:i64, 3:i64, 8:i64, 8:i64]}: (!phi.context) -> (!infrt.dense_tensor) + %t = "phi_dt.create_dense_tensor.cpu" (%ctx) {precision=#infrt.precision, layout=#infrt.layout, lod=[1], dims=[1, 3, 8, 8]}: (!phi.context) -> (!infrt.dense_tensor) "phi_dt.fill_dense_tensor.f32"(%t) {value=[3.8:f32]} : (!infrt.dense_tensor) -> () - %bias = "phi_dt.create_dense_tensor.cpu" (%ctx) {precision=#infrt.precision, layout=#infrt.layout, lod=[1:i64], dims=[3:i64]}: (!phi.context) -> (!infrt.dense_tensor) + %filter = "phi_dt.create_dense_tensor.cpu" (%ctx) {precision=#infrt.precision, layout=#infrt.layout, lod=[1], dims=[3, 3, 8, 8]}: (!phi.context) -> (!infrt.dense_tensor) + "phi_dt.fill_dense_tensor.f32"(%filter) {value=[3.8:f32]} : (!infrt.dense_tensor) -> () + %bias = "phi_dt.create_dense_tensor.cpu" (%ctx) {precision=#infrt.precision, layout=#infrt.layout, lod=[1], dims=[3]}: (!phi.context) -> (!infrt.dense_tensor) "phi_dt.fill_dense_tensor.f32"(%bias) {value=[1.5:f32]} : (!infrt.dense_tensor) -> () - %mean = "phi_dt.create_dense_tensor.cpu" (%ctx) {precision=#infrt.precision, layout=#infrt.layout, lod=[1:i64], dims=[3:i64]}: (!phi.context) -> (!infrt.dense_tensor) + %mean = "phi_dt.create_dense_tensor.cpu" (%ctx) {precision=#infrt.precision, layout=#infrt.layout, lod=[1], dims=[3]}: (!phi.context) -> (!infrt.dense_tensor) "phi_dt.fill_dense_tensor.f32"(%mean) {value=[3.5:f32]} : (!infrt.dense_tensor) -> () - %scale = "phi_dt.create_dense_tensor.cpu" (%ctx) {precision=#infrt.precision, layout=#infrt.layout, lod=[1:i64], dims=[3:i64]}: (!phi.context) -> (!infrt.dense_tensor) + %scale = "phi_dt.create_dense_tensor.cpu" (%ctx) {precision=#infrt.precision, layout=#infrt.layout, lod=[1], dims=[3]}: (!phi.context) -> (!infrt.dense_tensor) "phi_dt.fill_dense_tensor.f32"(%scale) {value=[1.0:f32]} : (!infrt.dense_tensor) -> () - %var = "phi_dt.create_dense_tensor.cpu" (%ctx) {precision=#infrt.precision, layout=#infrt.layout, lod=[1:i64], dims=[3:i64]}: (!phi.context) -> (!infrt.dense_tensor) + %var = "phi_dt.create_dense_tensor.cpu" (%ctx) {precision=#infrt.precision, layout=#infrt.layout, lod=[1], dims=[3]}: (!phi.context) -> (!infrt.dense_tensor) "phi_dt.fill_dense_tensor.f32"(%var) {value=[0.0:f32]} : (!infrt.dense_tensor) -> () - %2 = infrt.call@predict(%t, %bias, %mean, %scale, %var) : (!infrt.dense_tensor, !infrt.dense_tensor,!infrt.dense_tensor,!infrt.dense_tensor,!infrt.dense_tensor) -> !infrt.dense_tensor + %2 = infrt.call@predict(%t, %filter, %bias, %mean, %scale, %var) : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor,!infrt.dense_tensor,!infrt.dense_tensor,!infrt.dense_tensor) -> !infrt.dense_tensor //phi_dt.print_tensor(%t : !infrt.dense_tensor) phi_dt.print_tensor(%2 : !infrt.dense_tensor) diff --git a/paddle/phi/core/compat/op_utils.h b/paddle/phi/core/compat/op_utils.h index 613a2f9960..d9cff03e89 100644 --- a/paddle/phi/core/compat/op_utils.h +++ b/paddle/phi/core/compat/op_utils.h @@ -124,6 +124,10 @@ class OpUtilsMap { {std::move(op_type), std::move(base_kernel_name)}); } + bool HasArgumentMappingFn(const std::string& op_type) const { + return arg_mapping_fn_map_.count(op_type); + } + void InsertArgumentMappingFn(std::string op_type, ArgumentMappingFn fn) { PADDLE_ENFORCE_EQ( arg_mapping_fn_map_.count(op_type), -- GitLab