From 4aed099d5659ecd81f770da2ed11cd47b2e8de0d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=8E=8B=E6=98=8E=E5=86=AC?= <78149749+winter-wang@users.noreply.github.com> Date: Thu, 23 Dec 2021 14:06:25 +0800 Subject: [PATCH] [infrt] unify the paddle dialect operation name. test=develop (#38354) --- paddle/infrt/dialect/mlir_loader.cc | 4 +-- .../infrt/dialect/mlir_tests/paddle_ops.mlir | 6 ++-- paddle/infrt/dialect/mlir_tests/rewrite.mlir | 32 +++++++++---------- .../dialect/mlir_tests/rewrite_conv_bn.mlir | 14 ++++---- paddle/infrt/dialect/mlir_tests/trt_ops.mlir | 24 ++++++++++++++ paddle/infrt/dialect/pd_ops.td | 22 ++++++------- paddle/infrt/dialect/print_ir.cc | 2 +- 7 files changed, 64 insertions(+), 40 deletions(-) create mode 100644 paddle/infrt/dialect/mlir_tests/trt_ops.mlir diff --git a/paddle/infrt/dialect/mlir_loader.cc b/paddle/infrt/dialect/mlir_loader.cc index 8df8727dbe2..5a6654b6c92 100644 --- a/paddle/infrt/dialect/mlir_loader.cc +++ b/paddle/infrt/dialect/mlir_loader.cc @@ -34,7 +34,7 @@ namespace infrt::dialect { mlir::OwningModuleRef LoadMlirSource(mlir::MLIRContext* context, const std::string& mlir_source) { - context->allowUnregisteredDialects(); + // context->allowUnregisteredDialects(); RegisterCinnDialects(context->getDialectRegistry()); context->getDialectRegistry().insert(); @@ -54,7 +54,7 @@ mlir::OwningModuleRef LoadMlirSource(mlir::MLIRContext* context, mlir::OwningModuleRef LoadMlirFile(const std::string& file_name, mlir::MLIRContext* context) { - context->allowUnregisteredDialects(); + // context->allowUnregisteredDialects(); RegisterCinnDialects(context->getDialectRegistry()); context->getDialectRegistry().insert(); diff --git a/paddle/infrt/dialect/mlir_tests/paddle_ops.mlir b/paddle/infrt/dialect/mlir_tests/paddle_ops.mlir index 1855a68dd91..ee9fb1740a5 100644 --- a/paddle/infrt/dialect/mlir_tests/paddle_ops.mlir +++ b/paddle/infrt/dialect/mlir_tests/paddle_ops.mlir @@ -1,8 +1,8 @@ func @ops() { - %a = pd.Feed() : tensor - %b = pd.Feed() : tensor + %a = pd.feed() : tensor + %b = pd.feed() : tensor - %c = "pd.Matmul"(%a, %b) {transpose_x=true, transpose_y=false} : (tensor, tensor) -> tensor + %c = "pd.matmul"(%a, %b) {transpose_x=true, transpose_y=false} : (tensor, tensor) -> tensor infrt.return } diff --git a/paddle/infrt/dialect/mlir_tests/rewrite.mlir b/paddle/infrt/dialect/mlir_tests/rewrite.mlir index c984fda3e62..39f5e8f5957 100644 --- a/paddle/infrt/dialect/mlir_tests/rewrite.mlir +++ b/paddle/infrt/dialect/mlir_tests/rewrite.mlir @@ -1,24 +1,24 @@ // CHECK-LABEL: @main func @main() -> tensor { - %a = "pd.Feed"() : () -> tensor - %b = "pd.Feed"() : () -> tensor - %bias = "pd.Feed"() : () -> tensor + %a = "pd.feed"() : () -> tensor + %b = "pd.feed"() : () -> tensor + %bias = "pd.feed"() : () -> tensor - %b1 = "pd.Feed"() : () -> tensor - %b2 = "pd.Feed"() : () -> tensor - %bias1 = "pd.Feed"() : () -> tensor - %bias2 = "pd.Feed"() : () -> tensor + %b1 = "pd.feed"() : () -> tensor + %b2 = "pd.feed"() : () -> tensor + %bias1 = "pd.feed"() : () -> tensor + %bias2 = "pd.feed"() : () -> tensor - %c = "pd.Matmul"(%a, %b) {transpose_y=false} : (tensor, tensor) -> tensor - %d = "pd.ElementwiseAdd"(%c, %bias) {axis=1:i32} : (tensor, tensor) -> tensor - %e = "pd.Relu6"(%d) {} : (tensor) -> tensor + %c = "pd.matmul"(%a, %b) {transpose_y=false} : (tensor, tensor) -> tensor + %d = "pd.elementwise_add"(%c, %bias) {axis=1:i32} : (tensor, tensor) -> tensor + %e = "pd.relu6"(%d) {} : (tensor) -> tensor - %c1 = "pd.Matmul"(%e, %b1) {transpose_x=false, transpose_y=false} : (tensor, tensor) -> tensor - %d1 = "pd.ElementwiseAdd"(%c1, %bias1) {axis=1:i32} : (tensor, tensor) -> tensor - %e1 = "pd.Relu"(%d1) {} : (tensor) -> tensor + %c1 = "pd.matmul"(%e, %b1) {transpose_x=false, transpose_y=false} : (tensor, tensor) -> tensor + %d1 = "pd.elementwise_add"(%c1, %bias1) {axis=1:i32} : (tensor, tensor) -> tensor + %e1 = "pd.relu"(%d1) {} : (tensor) -> tensor - %c2 = "pd.Matmul"(%e1, %b2) {transpose_x=true, transpose_y=false} : (tensor, tensor) -> tensor - %d2 = "pd.ElementwiseAdd"(%c2, %bias2) {axis=1:i32} : (tensor, tensor) -> tensor - %e2 = "pd.Relu"(%d2) {} : (tensor) -> tensor + %c2 = "pd.matmul"(%e1, %b2) {transpose_x=true, transpose_y=false} : (tensor, tensor) -> tensor + %d2 = "pd.elementwise_add"(%c2, %bias2) {axis=1:i32} : (tensor, tensor) -> tensor + %e2 = "pd.relu"(%d2) {} : (tensor) -> tensor infrt.return %e2 : tensor } \ No newline at end of file diff --git a/paddle/infrt/dialect/mlir_tests/rewrite_conv_bn.mlir b/paddle/infrt/dialect/mlir_tests/rewrite_conv_bn.mlir index d41d4b2f9f6..1360efe17b1 100644 --- a/paddle/infrt/dialect/mlir_tests/rewrite_conv_bn.mlir +++ b/paddle/infrt/dialect/mlir_tests/rewrite_conv_bn.mlir @@ -1,13 +1,13 @@ // CHECK-LABEL: @main func @main() -> tensor { - %a = "pd.Feed"() : () -> tensor - %filter = "pd.Constant"(){value = dense<1.000000e+00> : tensor<3x64x3x3xf32>} : () -> tensor<3x64x3x3xf32> - %bias = "pd.Constant"(){value = dense<1.000000e+00> : tensor<64xf32>} : () -> tensor<64xf32> + %a = "pd.feed"() : () -> tensor + %filter = "pd.constant"(){value = dense<1.000000e+00> : tensor<3x64x3x3xf32>} : () -> tensor<3x64x3x3xf32> + %bias = "pd.constant"(){value = dense<1.000000e+00> : tensor<64xf32>} : () -> tensor<64xf32> - %scale = "pd.Constant"(){value = dense<1.000000e+00> : tensor<64xf32>} : () -> tensor<64xf32> - %bias2 = "pd.Constant"(){value = dense<1.000000e+00> : tensor<64xf32>} : () -> tensor<64xf32> - %mean = "pd.Constant"(){value = dense<1.000000e+00> : tensor<64xf32>} : () -> tensor<64xf32> - %var = "pd.Constant"(){value = dense<1.000000e+00> : tensor<64xf32>} : () -> tensor<64xf32> + %scale = "pd.constant"(){value = dense<1.000000e+00> : tensor<64xf32>} : () -> tensor<64xf32> + %bias2 = "pd.constant"(){value = dense<1.000000e+00> : tensor<64xf32>} : () -> tensor<64xf32> + %mean = "pd.constant"(){value = dense<1.000000e+00> : tensor<64xf32>} : () -> tensor<64xf32> + %var = "pd.constant"(){value = dense<1.000000e+00> : tensor<64xf32>} : () -> tensor<64xf32> %c = "pd.conv2d"(%a, %filter, %bias) {} : (tensor, tensor<3x64x3x3xf32>, tensor<64xf32>) -> tensor %d = "pd.batch_norm"(%c, %scale, %bias2, %mean, %var) {} : (tensor, tensor<64xf32>, tensor<64xf32>, tensor<64xf32>, tensor<64xf32>) -> tensor diff --git a/paddle/infrt/dialect/mlir_tests/trt_ops.mlir b/paddle/infrt/dialect/mlir_tests/trt_ops.mlir new file mode 100644 index 00000000000..539ad875f71 --- /dev/null +++ b/paddle/infrt/dialect/mlir_tests/trt_ops.mlir @@ -0,0 +1,24 @@ +// CHECK-LABEL: @main +func @main() -> tensor { + %a = "pd.feed"() : () -> tensor + %b = "pd.feed"() : () -> tensor + %bias = "pd.feed"() : () -> tensor + %c = "pd.feed"() : () -> tensor + %b1 = "pd.feed"() : () -> tensor + %b2 = "pd.feed"() : () -> tensor + %bias1 = "pd.feed"() : () -> tensor + %bias2 = "pd.feed"() : () -> tensor + + %d = "pd.elementwise_add"(%c, %bias) {axis=1:i32} : (tensor, tensor) -> tensor + %e = "pd.relu6"(%d) {} : (tensor) -> tensor + + %c1 = "pd.matmul"(%e, %b1) {transpose_x=false, transpose_y=false} : (tensor, tensor) -> tensor + %d1 = "pd.elementwise_add"(%c1, %bias1) {axis=1:i32} : (tensor, tensor) -> tensor + %e1 = "pd.relu"(%d1) {} : (tensor) -> tensor + + %c2 = "pd.matmul"(%e1, %b2) {transpose_x=true, transpose_y=false} : (tensor, tensor) -> tensor + %d2 = "pd.elementwise_add"(%c2, %bias2) {axis=1:i32} : (tensor, tensor) -> tensor + %e2 = "pd.relu"(%d2) {} : (tensor) -> tensor + + "pd.fetch"(%e2) :(tensor)->() +} diff --git a/paddle/infrt/dialect/pd_ops.td b/paddle/infrt/dialect/pd_ops.td index 2aa7ab576af..ff049689ed3 100644 --- a/paddle/infrt/dialect/pd_ops.td +++ b/paddle/infrt/dialect/pd_ops.td @@ -41,7 +41,7 @@ def PD_GraphOp : PD_Op<"graph", [SingleBlockImplicitTerminator<"FetchOp">]> { let results = (outs Variadic:$outputs); } -def PD_ConstantOp : PD_Op<"Constant", [NoSideEffect, ConstantLike, DeclareOpInterfaceMethods, AllTypesMatch<["value", "output"]>]> { +def PD_ConstantOp : PD_Op<"constant", [NoSideEffect, ConstantLike, DeclareOpInterfaceMethods, AllTypesMatch<["value", "output"]>]> { let summary = "constant Op"; let description = [{}]; @@ -54,7 +54,7 @@ def PD_ConstantOp : PD_Op<"Constant", [NoSideEffect, ConstantLike, DeclareOpInte ]; } -def PD_AbsOp : PD_Op<"Abs", [NoSideEffect, SameOperandsAndResultType]> { +def PD_AbsOp : PD_Op<"abs", [NoSideEffect, SameOperandsAndResultType]> { let summary = "Computes the absolute value of a tensor"; let description = [{ @@ -74,7 +74,7 @@ def PD_SqrtOp : PD_Op<"sqrt", [NoSideEffect, SameOperandsAndResultType]> { let results = (outs PD_Tensor:$y); } -def PD_ReluOp : PD_Op<"Relu", [NoSideEffect, SameOperandsAndResultType]> { +def PD_ReluOp : PD_Op<"relu", [NoSideEffect, SameOperandsAndResultType]> { let summary = "Computes the Relu of a tensor"; let description = [{ @@ -85,7 +85,7 @@ def PD_ReluOp : PD_Op<"Relu", [NoSideEffect, SameOperandsAndResultType]> { let hasCanonicalizer = 1; } -def PD_Relu6Op : PD_Op<"Relu6", [NoSideEffect, SameOperandsAndResultType]> { +def PD_Relu6Op : PD_Op<"relu6", [NoSideEffect, SameOperandsAndResultType]> { let summary = "Computes the Relu6 of a tensor"; let description = [{ @@ -95,7 +95,7 @@ def PD_Relu6Op : PD_Op<"Relu6", [NoSideEffect, SameOperandsAndResultType]> { let results = (outs PD_Tensor:$y); } -def PD_ElementwiseAdd : PD_Op<"ElementwiseAdd", [NoSideEffect, Commutative, DeclareOpInterfaceMethods]> { +def PD_ElementwiseAdd : PD_Op<"elementwise_add", [NoSideEffect, Commutative, DeclareOpInterfaceMethods]> { let summary = "ElementwiseAdd Op"; let description = [{ }]; @@ -106,7 +106,7 @@ def PD_ElementwiseAdd : PD_Op<"ElementwiseAdd", [NoSideEffect, Commutative, Decl let hasFolder = 1; } -def PD_ElementwiseSub : PD_Op<"ElementwiseSub", [NoSideEffect, DeclareOpInterfaceMethods]> { +def PD_ElementwiseSub : PD_Op<"elementwise_sub", [NoSideEffect, DeclareOpInterfaceMethods]> { let summary = "ElementwiseSub Op"; let description = [{ }]; @@ -115,7 +115,7 @@ def PD_ElementwiseSub : PD_Op<"ElementwiseSub", [NoSideEffect, DeclareOpInterfac let results = (outs PD_Tensor:$out); } -def PD_ElementwiseMul : PD_Op<"ElementwiseMul", [NoSideEffect, Commutative, DeclareOpInterfaceMethods]> { +def PD_ElementwiseMul : PD_Op<"elementwise_mul", [NoSideEffect, Commutative, DeclareOpInterfaceMethods]> { let summary = "ElementwiseMul Op"; let description = [{ }]; @@ -124,7 +124,7 @@ def PD_ElementwiseMul : PD_Op<"ElementwiseMul", [NoSideEffect, Commutative, Decl let results = (outs PD_Tensor:$out); } -def PD_ElementwiseDiv : PD_Op<"ElementwiseDiv", [NoSideEffect, DeclareOpInterfaceMethods]> { +def PD_ElementwiseDiv : PD_Op<"elementwise_div", [NoSideEffect, DeclareOpInterfaceMethods]> { let summary = "ElementwiseDiv Op"; let description = [{ }]; @@ -133,7 +133,7 @@ def PD_ElementwiseDiv : PD_Op<"ElementwiseDiv", [NoSideEffect, DeclareOpInterfac let results = (outs PD_Tensor:$out); } -def PD_MatmulOp : PD_Op<"Matmul", [NoSideEffect]> { +def PD_MatmulOp : PD_Op<"matmul", [NoSideEffect]> { let summary = "Computes the matrix mulplication result of two tensors"; let description = [{ }]; @@ -181,7 +181,7 @@ def PD_BatchNormOp : PD_Op<"batch_norm", [NoSideEffect]> { let hasCanonicalizer = 1; } -def PD_FusedFC : PD_Op<"FC", [NoSideEffect]> { +def PD_FusedFC : PD_Op<"fc", [NoSideEffect]> { let summary = "Computes the Fully Connected result of two tensors"; let description = [{ }]; @@ -190,7 +190,7 @@ def PD_FusedFC : PD_Op<"FC", [NoSideEffect]> { let results = (outs PD_Tensor:$out); } -def PD_FusedRepeatedFCRelu : PD_Op<"RepeatedFCRelu", [SameVariadicOperandSize, NoSideEffect]> { +def PD_FusedRepeatedFCRelu : PD_Op<"fusion_repeated_fc_relu", [SameVariadicOperandSize, NoSideEffect]> { let summary = ""; let description = [{ }]; diff --git a/paddle/infrt/dialect/print_ir.cc b/paddle/infrt/dialect/print_ir.cc index 3c5a2b6a7bf..43a3577b90f 100644 --- a/paddle/infrt/dialect/print_ir.cc +++ b/paddle/infrt/dialect/print_ir.cc @@ -115,7 +115,7 @@ int main(int argc, char **argv) { cl::ParseCommandLineOptions(argc, argv, "mlir demo"); mlir::MLIRContext *context = infrt::Global::getMLIRContext(); - context->allowUnregisteredDialects(); + // context->allowUnregisteredDialects(); auto ®istry = context->getDialectRegistry(); infrt::RegisterCinnDialects(registry); -- GitLab