diff --git a/paddle/infrt/dialect/phi/pass/phi_op_convert_pass.cc b/paddle/infrt/dialect/phi/pass/phi_op_convert_pass.cc index 18d40ce57649da79cbefa6c7c81cb54da1f226da..4abdb388dc23c4be1280e1b33097fe55d8655710 100644 --- a/paddle/infrt/dialect/phi/pass/phi_op_convert_pass.cc +++ b/paddle/infrt/dialect/phi/pass/phi_op_convert_pass.cc @@ -97,8 +97,9 @@ void PhiOpConvertPass::convertStage() { } auto loc = getFunction().getLoc(); builder.setInsertionPoint(op); - op_name = phi::TransToPhiKernelName(op_name); - if (!::phi::OpUtilsMap::Instance().Contains(op_name)) { + + if (!::phi::OpUtilsMap::Instance().HasArgumentMappingFn(op_name)) { + op_name = phi::TransToPhiKernelName(op_name); auto kernel_op = builder.create(loc, op->getResultTypes(), op->getOperands(), diff --git a/paddle/infrt/host_context/mlir_to_runtime_translate.cc b/paddle/infrt/host_context/mlir_to_runtime_translate.cc index 609524bead11e8f18d3af95bb9c3561c06ce9995..007730151e370da4f53da74b302c4ff43f4b2238 100644 --- a/paddle/infrt/host_context/mlir_to_runtime_translate.cc +++ b/paddle/infrt/host_context/mlir_to_runtime_translate.cc @@ -130,7 +130,7 @@ boost::optional MlirToRuntimeTranslator::EmitAttribute( if (attr.isa()) { auto val = attr.cast(); if (val.getType().isInteger(32)) { - return val.getInt(); + return val.getValue().getSExtValue(); } } return boost::none; @@ -142,7 +142,7 @@ boost::optional MlirToRuntimeTranslator::EmitAttribute( if (attr.isa()) { auto val = attr.cast(); if (val.getType().isInteger(64)) { - return val.getInt(); + return val.getValue().getSExtValue(); } } return boost::none; @@ -233,7 +233,7 @@ boost::optional MlirToRuntimeTranslator::EmitAttribute( \ std::vector res; \ for (auto& v : array) { \ - res.push_back(v.cast().getInt()); \ + res.push_back(v.cast().getValue().getSExtValue()); \ } \ return res; \ } diff --git a/paddle/infrt/tests/dialect/phi/phi_test.mlir b/paddle/infrt/tests/dialect/phi/phi_test.mlir index 4dda2b7a79d30513c8c80168990f50f71d83e2cc..d1e561cd5f995999b4400d6569bcd3fcad9aea0f 100644 --- a/paddle/infrt/tests/dialect/phi/phi_test.mlir +++ b/paddle/infrt/tests/dialect/phi/phi_test.mlir @@ -1,25 +1,30 @@ // RUN: infrtexec -i %s module { - func @predict(%arg0: !infrt.dense_tensor, %arg1: !infrt.dense_tensor, %arg2: !infrt.dense_tensor, %arg3: !infrt.dense_tensor, %arg4: !infrt.dense_tensor) -> !infrt.dense_tensor { + func @predict(%arg0: !infrt.dense_tensor,%filter: !infrt.dense_tensor, %arg1: !infrt.dense_tensor, %arg2: !infrt.dense_tensor, %arg3: !infrt.dense_tensor, %arg4: !infrt.dense_tensor) -> !infrt.dense_tensor { %2 = "pd.abs"(%arg0) : (!infrt.dense_tensor) -> !infrt.dense_tensor %3 = "pd.matmul_v2"(%arg0, %2) {trans_x = false, trans_y = false} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor - %Y, %MeanOut, %VarianceOut = "pd.batch_norm"(%3, %arg1, %arg2, %arg3, %arg4) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) - infrt.return %Y : !infrt.dense_tensor + %4 = "pd.conv2d"(%3, %filter) {data_format = "NCHW", dilations = [1 : i32, 1 : i32], groups = 1 : si32, padding_algorithm = "EXPLICIT", paddings = [1 : i32, 1 : i32], strides = [2 : i32, 2 : i32]} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + %Y, %MeanOut, %VarianceOut = "pd.batch_norm"(%4, %arg1, %arg2, %arg3, %arg4) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) + %out = "pd.relu"(%Y) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %5 = "pd.elementwise_add"(%out, %out) {axis = -1:si32} : (!infrt.dense_tensor, !infrt.dense_tensor) -> !infrt.dense_tensor + infrt.return %5 : !infrt.dense_tensor } func @main() { %ctx = "phi_dt.create_context.cpu" (): () -> !phi.context - %t = "phi_dt.create_dense_tensor.cpu" (%ctx) {precision=#infrt.precision, layout=#infrt.layout, lod=[1:i64], dims=[1:i64, 3:i64, 8:i64, 8:i64]}: (!phi.context) -> (!infrt.dense_tensor) + %t = "phi_dt.create_dense_tensor.cpu" (%ctx) {precision=#infrt.precision, layout=#infrt.layout, lod=[1], dims=[1, 3, 8, 8]}: (!phi.context) -> (!infrt.dense_tensor) "phi_dt.fill_dense_tensor.f32"(%t) {value=[3.8:f32]} : (!infrt.dense_tensor) -> () - %bias = "phi_dt.create_dense_tensor.cpu" (%ctx) {precision=#infrt.precision, layout=#infrt.layout, lod=[1:i64], dims=[3:i64]}: (!phi.context) -> (!infrt.dense_tensor) + %filter = "phi_dt.create_dense_tensor.cpu" (%ctx) {precision=#infrt.precision, layout=#infrt.layout, lod=[1], dims=[3, 3, 8, 8]}: (!phi.context) -> (!infrt.dense_tensor) + "phi_dt.fill_dense_tensor.f32"(%filter) {value=[3.8:f32]} : (!infrt.dense_tensor) -> () + %bias = "phi_dt.create_dense_tensor.cpu" (%ctx) {precision=#infrt.precision, layout=#infrt.layout, lod=[1], dims=[3]}: (!phi.context) -> (!infrt.dense_tensor) "phi_dt.fill_dense_tensor.f32"(%bias) {value=[1.5:f32]} : (!infrt.dense_tensor) -> () - %mean = "phi_dt.create_dense_tensor.cpu" (%ctx) {precision=#infrt.precision, layout=#infrt.layout, lod=[1:i64], dims=[3:i64]}: (!phi.context) -> (!infrt.dense_tensor) + %mean = "phi_dt.create_dense_tensor.cpu" (%ctx) {precision=#infrt.precision, layout=#infrt.layout, lod=[1], dims=[3]}: (!phi.context) -> (!infrt.dense_tensor) "phi_dt.fill_dense_tensor.f32"(%mean) {value=[3.5:f32]} : (!infrt.dense_tensor) -> () - %scale = "phi_dt.create_dense_tensor.cpu" (%ctx) {precision=#infrt.precision, layout=#infrt.layout, lod=[1:i64], dims=[3:i64]}: (!phi.context) -> (!infrt.dense_tensor) + %scale = "phi_dt.create_dense_tensor.cpu" (%ctx) {precision=#infrt.precision, layout=#infrt.layout, lod=[1], dims=[3]}: (!phi.context) -> (!infrt.dense_tensor) "phi_dt.fill_dense_tensor.f32"(%scale) {value=[1.0:f32]} : (!infrt.dense_tensor) -> () - %var = "phi_dt.create_dense_tensor.cpu" (%ctx) {precision=#infrt.precision, layout=#infrt.layout, lod=[1:i64], dims=[3:i64]}: (!phi.context) -> (!infrt.dense_tensor) + %var = "phi_dt.create_dense_tensor.cpu" (%ctx) {precision=#infrt.precision, layout=#infrt.layout, lod=[1], dims=[3]}: (!phi.context) -> (!infrt.dense_tensor) "phi_dt.fill_dense_tensor.f32"(%var) {value=[0.0:f32]} : (!infrt.dense_tensor) -> () - %2 = infrt.call@predict(%t, %bias, %mean, %scale, %var) : (!infrt.dense_tensor, !infrt.dense_tensor,!infrt.dense_tensor,!infrt.dense_tensor,!infrt.dense_tensor) -> !infrt.dense_tensor + %2 = infrt.call@predict(%t, %filter, %bias, %mean, %scale, %var) : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor,!infrt.dense_tensor,!infrt.dense_tensor,!infrt.dense_tensor) -> !infrt.dense_tensor //phi_dt.print_tensor(%t : !infrt.dense_tensor) phi_dt.print_tensor(%2 : !infrt.dense_tensor) diff --git a/paddle/phi/core/compat/op_utils.h b/paddle/phi/core/compat/op_utils.h index 613a2f9960a6ffd2ca4a02f20710018fcc00eaed..d9cff03e89ca212a4bdbde84dbc031ca68f8be6f 100644 --- a/paddle/phi/core/compat/op_utils.h +++ b/paddle/phi/core/compat/op_utils.h @@ -124,6 +124,10 @@ class OpUtilsMap { {std::move(op_type), std::move(base_kernel_name)}); } + bool HasArgumentMappingFn(const std::string& op_type) const { + return arg_mapping_fn_map_.count(op_type); + } + void InsertArgumentMappingFn(std::string op_type, ArgumentMappingFn fn) { PADDLE_ENFORCE_EQ( arg_mapping_fn_map_.count(op_type),