未验证 提交 4aed099d 编写于 作者: 王明冬 提交者: GitHub

[infrt] unify the paddle dialect operation name. test=develop (#38354)

上级 4e4d58b3
......@@ -34,7 +34,7 @@ namespace infrt::dialect {
mlir::OwningModuleRef LoadMlirSource(mlir::MLIRContext* context,
const std::string& mlir_source) {
context->allowUnregisteredDialects();
// context->allowUnregisteredDialects();
RegisterCinnDialects(context->getDialectRegistry());
context->getDialectRegistry().insert<mlir::StandardOpsDialect>();
......@@ -54,7 +54,7 @@ mlir::OwningModuleRef LoadMlirSource(mlir::MLIRContext* context,
mlir::OwningModuleRef LoadMlirFile(const std::string& file_name,
mlir::MLIRContext* context) {
context->allowUnregisteredDialects();
// context->allowUnregisteredDialects();
RegisterCinnDialects(context->getDialectRegistry());
context->getDialectRegistry().insert<mlir::StandardOpsDialect>();
......
func @ops() {
%a = pd.Feed() : tensor<?xf32>
%b = pd.Feed() : tensor<?xf32>
%a = pd.feed() : tensor<?xf32>
%b = pd.feed() : tensor<?xf32>
%c = "pd.Matmul"(%a, %b) {transpose_x=true, transpose_y=false} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%c = "pd.matmul"(%a, %b) {transpose_x=true, transpose_y=false} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
infrt.return
}
// CHECK-LABEL: @main
func @main() -> tensor<?xf32> {
%a = "pd.Feed"() : () -> tensor<?xf32>
%b = "pd.Feed"() : () -> tensor<?xf32>
%bias = "pd.Feed"() : () -> tensor<?xf32>
%a = "pd.feed"() : () -> tensor<?xf32>
%b = "pd.feed"() : () -> tensor<?xf32>
%bias = "pd.feed"() : () -> tensor<?xf32>
%b1 = "pd.Feed"() : () -> tensor<?xf32>
%b2 = "pd.Feed"() : () -> tensor<?xf32>
%bias1 = "pd.Feed"() : () -> tensor<?xf32>
%bias2 = "pd.Feed"() : () -> tensor<?xf32>
%b1 = "pd.feed"() : () -> tensor<?xf32>
%b2 = "pd.feed"() : () -> tensor<?xf32>
%bias1 = "pd.feed"() : () -> tensor<?xf32>
%bias2 = "pd.feed"() : () -> tensor<?xf32>
%c = "pd.Matmul"(%a, %b) {transpose_y=false} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%d = "pd.ElementwiseAdd"(%c, %bias) {axis=1:i32} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%e = "pd.Relu6"(%d) {} : (tensor<?xf32>) -> tensor<?xf32>
%c = "pd.matmul"(%a, %b) {transpose_y=false} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%d = "pd.elementwise_add"(%c, %bias) {axis=1:i32} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%e = "pd.relu6"(%d) {} : (tensor<?xf32>) -> tensor<?xf32>
%c1 = "pd.Matmul"(%e, %b1) {transpose_x=false, transpose_y=false} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%d1 = "pd.ElementwiseAdd"(%c1, %bias1) {axis=1:i32} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%e1 = "pd.Relu"(%d1) {} : (tensor<?xf32>) -> tensor<?xf32>
%c1 = "pd.matmul"(%e, %b1) {transpose_x=false, transpose_y=false} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%d1 = "pd.elementwise_add"(%c1, %bias1) {axis=1:i32} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%e1 = "pd.relu"(%d1) {} : (tensor<?xf32>) -> tensor<?xf32>
%c2 = "pd.Matmul"(%e1, %b2) {transpose_x=true, transpose_y=false} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%d2 = "pd.ElementwiseAdd"(%c2, %bias2) {axis=1:i32} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%e2 = "pd.Relu"(%d2) {} : (tensor<?xf32>) -> tensor<?xf32>
%c2 = "pd.matmul"(%e1, %b2) {transpose_x=true, transpose_y=false} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%d2 = "pd.elementwise_add"(%c2, %bias2) {axis=1:i32} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%e2 = "pd.relu"(%d2) {} : (tensor<?xf32>) -> tensor<?xf32>
infrt.return %e2 : tensor<?xf32>
}
\ No newline at end of file
// CHECK-LABEL: @main
func @main() -> tensor<?xf32> {
%a = "pd.Feed"() : () -> tensor<?x3x256x256xf32>
%filter = "pd.Constant"(){value = dense<1.000000e+00> : tensor<3x64x3x3xf32>} : () -> tensor<3x64x3x3xf32>
%bias = "pd.Constant"(){value = dense<1.000000e+00> : tensor<64xf32>} : () -> tensor<64xf32>
%a = "pd.feed"() : () -> tensor<?x3x256x256xf32>
%filter = "pd.constant"(){value = dense<1.000000e+00> : tensor<3x64x3x3xf32>} : () -> tensor<3x64x3x3xf32>
%bias = "pd.constant"(){value = dense<1.000000e+00> : tensor<64xf32>} : () -> tensor<64xf32>
%scale = "pd.Constant"(){value = dense<1.000000e+00> : tensor<64xf32>} : () -> tensor<64xf32>
%bias2 = "pd.Constant"(){value = dense<1.000000e+00> : tensor<64xf32>} : () -> tensor<64xf32>
%mean = "pd.Constant"(){value = dense<1.000000e+00> : tensor<64xf32>} : () -> tensor<64xf32>
%var = "pd.Constant"(){value = dense<1.000000e+00> : tensor<64xf32>} : () -> tensor<64xf32>
%scale = "pd.constant"(){value = dense<1.000000e+00> : tensor<64xf32>} : () -> tensor<64xf32>
%bias2 = "pd.constant"(){value = dense<1.000000e+00> : tensor<64xf32>} : () -> tensor<64xf32>
%mean = "pd.constant"(){value = dense<1.000000e+00> : tensor<64xf32>} : () -> tensor<64xf32>
%var = "pd.constant"(){value = dense<1.000000e+00> : tensor<64xf32>} : () -> tensor<64xf32>
%c = "pd.conv2d"(%a, %filter, %bias) {} : (tensor<?x3x256x256xf32>, tensor<3x64x3x3xf32>, tensor<64xf32>) -> tensor<?x3x256x256xf32>
%d = "pd.batch_norm"(%c, %scale, %bias2, %mean, %var) {} : (tensor<?x3x256x256xf32>, tensor<64xf32>, tensor<64xf32>, tensor<64xf32>, tensor<64xf32>) -> tensor<?x3x256x256xf32>
......
// CHECK-LABEL: @main
func @main() -> tensor<?xf32> {
%a = "pd.feed"() : () -> tensor<?xf32>
%b = "pd.feed"() : () -> tensor<?xf32>
%bias = "pd.feed"() : () -> tensor<?xf32>
%c = "pd.feed"() : () -> tensor<?xf32>
%b1 = "pd.feed"() : () -> tensor<?xf32>
%b2 = "pd.feed"() : () -> tensor<?xf32>
%bias1 = "pd.feed"() : () -> tensor<?xf32>
%bias2 = "pd.feed"() : () -> tensor<?xf32>
%d = "pd.elementwise_add"(%c, %bias) {axis=1:i32} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%e = "pd.relu6"(%d) {} : (tensor<?xf32>) -> tensor<?xf32>
%c1 = "pd.matmul"(%e, %b1) {transpose_x=false, transpose_y=false} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%d1 = "pd.elementwise_add"(%c1, %bias1) {axis=1:i32} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%e1 = "pd.relu"(%d1) {} : (tensor<?xf32>) -> tensor<?xf32>
%c2 = "pd.matmul"(%e1, %b2) {transpose_x=true, transpose_y=false} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%d2 = "pd.elementwise_add"(%c2, %bias2) {axis=1:i32} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%e2 = "pd.relu"(%d2) {} : (tensor<?xf32>) -> tensor<?xf32>
"pd.fetch"(%e2) :(tensor<?xf32>)->()
}
......@@ -41,7 +41,7 @@ def PD_GraphOp : PD_Op<"graph", [SingleBlockImplicitTerminator<"FetchOp">]> {
let results = (outs Variadic<PD_Tensor>:$outputs);
}
def PD_ConstantOp : PD_Op<"Constant", [NoSideEffect, ConstantLike, DeclareOpInterfaceMethods<InferTypeOpInterface>, AllTypesMatch<["value", "output"]>]> {
def PD_ConstantOp : PD_Op<"constant", [NoSideEffect, ConstantLike, DeclareOpInterfaceMethods<InferTypeOpInterface>, AllTypesMatch<["value", "output"]>]> {
let summary = "constant Op";
let description = [{}];
......@@ -54,7 +54,7 @@ def PD_ConstantOp : PD_Op<"Constant", [NoSideEffect, ConstantLike, DeclareOpInte
];
}
def PD_AbsOp : PD_Op<"Abs", [NoSideEffect, SameOperandsAndResultType]> {
def PD_AbsOp : PD_Op<"abs", [NoSideEffect, SameOperandsAndResultType]> {
let summary = "Computes the absolute value of a tensor";
let description = [{
......@@ -74,7 +74,7 @@ def PD_SqrtOp : PD_Op<"sqrt", [NoSideEffect, SameOperandsAndResultType]> {
let results = (outs PD_Tensor:$y);
}
def PD_ReluOp : PD_Op<"Relu", [NoSideEffect, SameOperandsAndResultType]> {
def PD_ReluOp : PD_Op<"relu", [NoSideEffect, SameOperandsAndResultType]> {
let summary = "Computes the Relu of a tensor";
let description = [{
......@@ -85,7 +85,7 @@ def PD_ReluOp : PD_Op<"Relu", [NoSideEffect, SameOperandsAndResultType]> {
let hasCanonicalizer = 1;
}
def PD_Relu6Op : PD_Op<"Relu6", [NoSideEffect, SameOperandsAndResultType]> {
def PD_Relu6Op : PD_Op<"relu6", [NoSideEffect, SameOperandsAndResultType]> {
let summary = "Computes the Relu6 of a tensor";
let description = [{
......@@ -95,7 +95,7 @@ def PD_Relu6Op : PD_Op<"Relu6", [NoSideEffect, SameOperandsAndResultType]> {
let results = (outs PD_Tensor:$y);
}
def PD_ElementwiseAdd : PD_Op<"ElementwiseAdd", [NoSideEffect, Commutative, DeclareOpInterfaceMethods<InferTypeOpInterface>]> {
def PD_ElementwiseAdd : PD_Op<"elementwise_add", [NoSideEffect, Commutative, DeclareOpInterfaceMethods<InferTypeOpInterface>]> {
let summary = "ElementwiseAdd Op";
let description = [{
}];
......@@ -106,7 +106,7 @@ def PD_ElementwiseAdd : PD_Op<"ElementwiseAdd", [NoSideEffect, Commutative, Decl
let hasFolder = 1;
}
def PD_ElementwiseSub : PD_Op<"ElementwiseSub", [NoSideEffect, DeclareOpInterfaceMethods<InferTypeOpInterface>]> {
def PD_ElementwiseSub : PD_Op<"elementwise_sub", [NoSideEffect, DeclareOpInterfaceMethods<InferTypeOpInterface>]> {
let summary = "ElementwiseSub Op";
let description = [{
}];
......@@ -115,7 +115,7 @@ def PD_ElementwiseSub : PD_Op<"ElementwiseSub", [NoSideEffect, DeclareOpInterfac
let results = (outs PD_Tensor:$out);
}
def PD_ElementwiseMul : PD_Op<"ElementwiseMul", [NoSideEffect, Commutative, DeclareOpInterfaceMethods<InferTypeOpInterface>]> {
def PD_ElementwiseMul : PD_Op<"elementwise_mul", [NoSideEffect, Commutative, DeclareOpInterfaceMethods<InferTypeOpInterface>]> {
let summary = "ElementwiseMul Op";
let description = [{
}];
......@@ -124,7 +124,7 @@ def PD_ElementwiseMul : PD_Op<"ElementwiseMul", [NoSideEffect, Commutative, Decl
let results = (outs PD_Tensor:$out);
}
def PD_ElementwiseDiv : PD_Op<"ElementwiseDiv", [NoSideEffect, DeclareOpInterfaceMethods<InferTypeOpInterface>]> {
def PD_ElementwiseDiv : PD_Op<"elementwise_div", [NoSideEffect, DeclareOpInterfaceMethods<InferTypeOpInterface>]> {
let summary = "ElementwiseDiv Op";
let description = [{
}];
......@@ -133,7 +133,7 @@ def PD_ElementwiseDiv : PD_Op<"ElementwiseDiv", [NoSideEffect, DeclareOpInterfac
let results = (outs PD_Tensor:$out);
}
def PD_MatmulOp : PD_Op<"Matmul", [NoSideEffect]> {
def PD_MatmulOp : PD_Op<"matmul", [NoSideEffect]> {
let summary = "Computes the matrix mulplication result of two tensors";
let description = [{
}];
......@@ -181,7 +181,7 @@ def PD_BatchNormOp : PD_Op<"batch_norm", [NoSideEffect]> {
let hasCanonicalizer = 1;
}
def PD_FusedFC : PD_Op<"FC", [NoSideEffect]> {
def PD_FusedFC : PD_Op<"fc", [NoSideEffect]> {
let summary = "Computes the Fully Connected result of two tensors";
let description = [{
}];
......@@ -190,7 +190,7 @@ def PD_FusedFC : PD_Op<"FC", [NoSideEffect]> {
let results = (outs PD_Tensor:$out);
}
def PD_FusedRepeatedFCRelu : PD_Op<"RepeatedFCRelu", [SameVariadicOperandSize, NoSideEffect]> {
def PD_FusedRepeatedFCRelu : PD_Op<"fusion_repeated_fc_relu", [SameVariadicOperandSize, NoSideEffect]> {
let summary = "";
let description = [{ }];
......
......@@ -115,7 +115,7 @@ int main(int argc, char **argv) {
cl::ParseCommandLineOptions(argc, argv, "mlir demo");
mlir::MLIRContext *context = infrt::Global::getMLIRContext();
context->allowUnregisteredDialects();
// context->allowUnregisteredDialects();
auto &registry = context->getDialectRegistry();
infrt::RegisterCinnDialects(registry);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册