未验证 提交 9bb3744f 编写于 作者: 王明冬 提交者: GitHub

[Infrt] delete custom_pdop.td and move op to infrt dialect. (#41021)

上级 aeade538
...@@ -10,6 +10,7 @@ def Infrt_Dialect : Dialect { ...@@ -10,6 +10,7 @@ def Infrt_Dialect : Dialect {
let name = "infrt"; let name = "infrt";
let cppNamespace = "::infrt"; let cppNamespace = "::infrt";
let hasConstantMaterializer = 1;
let useDefaultAttributePrinterParser = 1; let useDefaultAttributePrinterParser = 1;
} }
......
...@@ -183,16 +183,41 @@ void InfrtDialect::printType(::mlir::Type type, ...@@ -183,16 +183,41 @@ void InfrtDialect::printType(::mlir::Type type,
llvm_unreachable("unknown infrt type."); llvm_unreachable("unknown infrt type.");
} }
// /// Parse an attribute registered to this dialect. mlir::Operation *InfrtDialect::materializeConstant(mlir::OpBuilder &builder,
// ::mlir::Attribute InfrtDialect::parseAttribute(::mlir::DialectAsmParser mlir::Attribute value,
// &parser, mlir::Type type,
// ::mlir::Type type) const { mlir::Location loc) {
// return mlir::Attribute(); return builder.create<ConstantOp>(loc, value);
// } }
// /// Print an attribute registered to this dialect.
// void InfrtDialect::printAttribute(::mlir::Attribute attr, void ConstantOp::build(mlir::OpBuilder &builder,
// ::mlir::DialectAsmPrinter &os) const { mlir::OperationState &state,
mlir::Attribute value) {
// } if (auto elem_attr = value.dyn_cast<mlir::ElementsAttr>()) {
return ConstantOp::build(builder, state, elem_attr);
} else if (value.isa<mlir::BoolAttr, mlir::FloatAttr, mlir::IntegerAttr>()) {
mlir::ShapedType type =
mlir::RankedTensorType::get(/*shape=*/{}, value.getType());
state.addAttribute("value", mlir::DenseElementsAttr::get(type, value));
state.addTypes(type);
return;
}
llvm_unreachable("unsupported attribute type for building pd.constant");
}
mlir::LogicalResult ConstantOp::inferReturnTypes(
mlir::MLIRContext *context,
mlir::Optional<mlir::Location> location,
mlir::ValueRange operands,
mlir::DictionaryAttr attributes,
mlir::RegionRange regions,
llvm::SmallVectorImpl<mlir::Type> &inferredReturnTypes) {
inferredReturnTypes.push_back(attributes.get("value").getType());
return mlir::success();
}
mlir::OpFoldResult ConstantOp::fold(
::llvm::ArrayRef<mlir::Attribute> operands) {
return value();
}
} // namespace infrt } // namespace infrt
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <mlir/IR/BuiltinTypes.h> #include <mlir/IR/BuiltinTypes.h>
#include <mlir/IR/Dialect.h> #include <mlir/IR/Dialect.h>
#include <mlir/IR/OpDefinition.h> #include <mlir/IR/OpDefinition.h>
#include <mlir/Interfaces/InferTypeOpInterface.h>
#include <mlir/Interfaces/SideEffectInterfaces.h> #include <mlir/Interfaces/SideEffectInterfaces.h>
#include "paddle/infrt/dialect/infrt/common/types.h" #include "paddle/infrt/dialect/infrt/common/types.h"
......
include "mlir/Interfaces/InferTypeOpInterface.td"
include "paddle/infrt/dialect/infrt/ir/infrt_base.td" include "paddle/infrt/dialect/infrt/ir/infrt_base.td"
// Op definition // Op definition
...@@ -9,7 +10,7 @@ class Infrt_Op<string mnemonic, list<OpTrait> traits = []> : Op<Infrt_Dialect, m ...@@ -9,7 +10,7 @@ class Infrt_Op<string mnemonic, list<OpTrait> traits = []> : Op<Infrt_Dialect, m
// let parser = [{ return infrt::parse$cppClass(parser, result); }]; // let parser = [{ return infrt::parse$cppClass(parser, result); }];
} }
def PD_GraphOp : Infrt_Op<"graph", [SingleBlockImplicitTerminator<"::infrt::ReturnOp">]> { def Infrt_GraphOp : Infrt_Op<"graph", [SingleBlockImplicitTerminator<"::infrt::ReturnOp">]> {
let summary = "paddle graph Op"; let summary = "paddle graph Op";
let description = [{ let description = [{
Describe a paddle graph or subgraph. Describe a paddle graph or subgraph.
...@@ -69,3 +70,16 @@ def Infrt_TensorCastOp : Infrt_Op<"tensor_cast", [NoSideEffect]> { ...@@ -69,3 +70,16 @@ def Infrt_TensorCastOp : Infrt_Op<"tensor_cast", [NoSideEffect]> {
let arguments = (ins AnyType:$input); let arguments = (ins AnyType:$input);
let results = (outs AnyType:$output); let results = (outs AnyType:$output);
} }
def Infrt_ConstantOp : Infrt_Op<"constant", [NoSideEffect, ConstantLike, DeclareOpInterfaceMethods<InferTypeOpInterface>, AllTypesMatch<["value", "output"]>]> {
let summary = "constant Op";
let description = [{}];
let arguments = (ins ElementsAttr:$value);
let results = (outs AnyType:$output);
let hasFolder = 1;
let builders = [
OpBuilder<(ins "mlir::Attribute":$value)>,
];
}
...@@ -9,10 +9,6 @@ def FuseTensorCastPattern : Pat< ...@@ -9,10 +9,6 @@ def FuseTensorCastPattern : Pat<
(Infrt_TensorCastOp (Infrt_TensorCastOp $arg)), (Infrt_TensorCastOp (Infrt_TensorCastOp $arg)),
(Infrt_TensorCastOp $arg)>; (Infrt_TensorCastOp $arg)>;
def FuseFeedTensorCastPattern : Pat<
(Infrt_TensorCastOp (PD_FeedOp $name)),
(PD_FeedOp $name)>;
def TypesAreIdentical : Constraint<CPred<"$0.getType() == $1.getType()">>; def TypesAreIdentical : Constraint<CPred<"$0.getType() == $1.getType()">>;
def RedundantTensorCastOptPattern : Pat< def RedundantTensorCastOptPattern : Pat<
(Infrt_TensorCastOp:$res $arg), (replaceWithValue $arg), (Infrt_TensorCastOp:$res $arg), (replaceWithValue $arg),
......
...@@ -38,7 +38,7 @@ void InfrtOpFusePass::runOnFunction() { ...@@ -38,7 +38,7 @@ void InfrtOpFusePass::runOnFunction() {
::mlir::RewritePatternSet patterns(&getContext()); ::mlir::RewritePatternSet patterns(&getContext());
populateWithGenerated(patterns); populateWithGenerated(patterns);
(void)applyPatternsAndFoldGreedily(getOperation(), std::move(patterns)); (void)applyPatternsAndFoldGreedily(getOperation(), std::move(patterns));
// Fuse pd.return Operation // Fuse infrt.return Operation
auto terminator_op = getFunction().front().getTerminator(); auto terminator_op = getFunction().front().getTerminator();
if (nullptr == terminator_op) return; if (nullptr == terminator_op) return;
for (auto operand : terminator_op->getOperands()) { for (auto operand : terminator_op->getOperands()) {
......
...@@ -16,7 +16,6 @@ def Paddle_Dialect : Dialect { ...@@ -16,7 +16,6 @@ def Paddle_Dialect : Dialect {
This dialect contains the PaddlePaddle operators. This dialect contains the PaddlePaddle operators.
}]; }];
let hasConstantMaterializer = 1;
let cppNamespace = "infrt::pd"; let cppNamespace = "infrt::pd";
} }
......
...@@ -35,42 +35,5 @@ void PaddleDialect::initialize() { ...@@ -35,42 +35,5 @@ void PaddleDialect::initialize() {
#include "paddle/infrt/dialect/pd/ir/pd_extra_ops.cpp.inc" // NOLINT #include "paddle/infrt/dialect/pd/ir/pd_extra_ops.cpp.inc" // NOLINT
>(); >();
} }
mlir::Operation *PaddleDialect::materializeConstant(mlir::OpBuilder &builder,
mlir::Attribute value,
mlir::Type type,
mlir::Location loc) {
return builder.create<ConstantOp>(loc, value);
}
void ConstantOp::build(mlir::OpBuilder &builder,
mlir::OperationState &state,
mlir::Attribute value) {
if (auto elem_attr = value.dyn_cast<mlir::ElementsAttr>()) {
return ConstantOp::build(builder, state, elem_attr);
} else if (value.isa<mlir::BoolAttr, mlir::FloatAttr, mlir::IntegerAttr>()) {
mlir::ShapedType type =
mlir::RankedTensorType::get(/*shape=*/{}, value.getType());
state.addAttribute("value", mlir::DenseElementsAttr::get(type, value));
state.addTypes(type);
return;
}
llvm_unreachable("unsupported attribute type for building pd.constant");
}
mlir::LogicalResult ConstantOp::inferReturnTypes(
mlir::MLIRContext *context,
mlir::Optional<mlir::Location> location,
mlir::ValueRange operands,
mlir::DictionaryAttr attributes,
mlir::RegionRange regions,
llvm::SmallVectorImpl<mlir::Type> &inferredReturnTypes) {
inferredReturnTypes.push_back(attributes.get("value").getType());
return mlir::success();
}
mlir::OpFoldResult ConstantOp::fold(
::llvm::ArrayRef<mlir::Attribute> operands) {
return value();
}
} // namespace pd } // namespace pd
} // namespace infrt } // namespace infrt
...@@ -39,13 +39,6 @@ void TRTOpTellerPass::runOnFunction() { ...@@ -39,13 +39,6 @@ void TRTOpTellerPass::runOnFunction() {
worklist.pop_back(); worklist.pop_back();
if (op == nullptr) continue; if (op == nullptr) continue;
if (op->getName().getStringRef().substr(0, 3) != "pd.") continue; if (op->getName().getStringRef().substr(0, 3) != "pd.") continue;
if (::llvm::dyn_cast_or_null<infrt::pd::FeedOp>(op)) continue;
if (::llvm::dyn_cast_or_null<infrt::pd::FetchOp>(op)) continue;
if (::llvm::dyn_cast_or_null<::infrt::GraphOp>(op)) continue;
if (::llvm::dyn_cast_or_null<::infrt::ReturnOp>(op)) continue;
if (::llvm::dyn_cast_or_null<::infrt::phi::TensorMapGetTensorOp>(op))
continue;
builder.setInsertionPoint(op); builder.setInsertionPoint(op);
auto loc = getFunction().getLoc(); auto loc = getFunction().getLoc();
auto graph_op = builder.create<::infrt::GraphOp>( auto graph_op = builder.create<::infrt::GraphOp>(
......
// CHECK-LABEL: @main // CHECK-LABEL: @main
func @main() -> tensor<?xf32> { func @main(%a:tensor<?x3x256x256xf32>) -> tensor<?xf32> {
%a = "pd.feed"() {name="input0"} : () -> tensor<?x3x256x256xf32>
%filter = "pd.constant"(){value = dense<1.000000e+00> : tensor<3x64x3x3xf32>} : () -> tensor<3x64x3x3xf32> %filter = "pd.constant"(){value = dense<1.000000e+00> : tensor<3x64x3x3xf32>} : () -> tensor<3x64x3x3xf32>
%bias = "pd.constant"(){value = dense<1.000000e+00> : tensor<64xf32>} : () -> tensor<64xf32> %bias = "pd.constant"(){value = dense<1.000000e+00> : tensor<64xf32>} : () -> tensor<64xf32>
...@@ -11,5 +10,5 @@ func @main() -> tensor<?xf32> { ...@@ -11,5 +10,5 @@ func @main() -> tensor<?xf32> {
%c = "pd.conv2d"(%a, %filter, %bias) {} : (tensor<?x3x256x256xf32>, tensor<3x64x3x3xf32>, tensor<64xf32>) -> tensor<?x3x256x256xf32> %c = "pd.conv2d"(%a, %filter, %bias) {} : (tensor<?x3x256x256xf32>, tensor<3x64x3x3xf32>, tensor<64xf32>) -> tensor<?x3x256x256xf32>
%d = "pd.batch_norm"(%c, %scale, %bias2, %mean, %var) {} : (tensor<?x3x256x256xf32>, tensor<64xf32>, tensor<64xf32>, tensor<64xf32>, tensor<64xf32>) -> tensor<?x3x256x256xf32> %d = "pd.batch_norm"(%c, %scale, %bias2, %mean, %var) {} : (tensor<?x3x256x256xf32>, tensor<64xf32>, tensor<64xf32>, tensor<64xf32>, tensor<64xf32>) -> tensor<?x3x256x256xf32>
"pd.fetch"(%d) {name="output"} :(tensor<?x3x256x256xf32>)->() infrt.return %d:tensor<?x3x256x256xf32>
} }
\ No newline at end of file
// RUN: infrtopt %s | FileCheck %s
// CHECK-LABEL: @ops
func @ops() {
%a = pd.feed() {name="input0"} : tensor<?xf32>
%b = pd.feed() {name="input1"}: tensor<?xf32>
%d = pd.feed() {name="input3"}: !infrt.lod_tensor<3x4x9xf32, 0>
%c = "pd.matmul"(%a, %b) {transpose_x=true, transpose_y=false} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
infrt.return
}
// RUN: infrtopt --pd-op-fuse %s | FileCheck %s // RUN: infrtopt --pd-op-fuse %s | FileCheck %s
// CHECK-LABEL: @main // CHECK-LABEL: @main
func @main() -> tensor<?xf32> { func @main(%arg0: tensor<?xf32>, %arg1: tensor<?xf32>, %arg2:tensor<?xf32>, %arg3:tensor<?xf32>, %arg4:tensor<?xf32>, %arg5:tensor<?xf32>, %arg6:tensor<?xf32>) -> tensor<?xf32> {
%a = "pd.feed"() {name="input0"} : () -> tensor<?xf32>
%b = "pd.feed"() {name="input1"} : () -> tensor<?xf32>
%bias = "pd.feed"() {name="input2"} : () -> tensor<?xf32>
%b1 = "pd.feed"() {name="input3"} : () -> tensor<?xf32> // CHECK: %0 = "pd.FC"(%arg0, %arg1, %arg4) {in_num_col_dims = 1 : i32} : (tensor<?xf32>, tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%b2 = "pd.feed"() {name="input4"} : () -> tensor<?xf32> %c = "pd.matmul_v2"(%arg0, %arg1) {transpose_y=false} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%bias1 = "pd.feed"() {name="input5"} : () -> tensor<?xf32> %d = "pd.elementwise_add"(%c, %arg4) {axis=1:si32} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%bias2 = "pd.feed"() {name="input6"} : () -> tensor<?xf32>
// CHECK: %{{[0-9]+}} = "pd.FC"(%{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}) {in_num_col_dims = 1 : i32} : (tensor<?xf32>, tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%c = "pd.matmul_v2"(%a, %b) {transpose_y=false} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%d = "pd.elementwise_add"(%c, %bias) {axis=1:si32} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%e = "pd.relu6"(%d) {} : (tensor<?xf32>) -> tensor<?xf32> %e = "pd.relu6"(%d) {} : (tensor<?xf32>) -> tensor<?xf32>
// CHECK: %{{[0-9]+}} = "pd.FC"(%{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}) {in_num_col_dims = 1 : i32} : (tensor<?xf32>, tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32> // CHECK: %2 = "pd.FC"(%1, %arg2, %arg5) {in_num_col_dims = 1 : i32} : (tensor<?xf32>, tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%c1 = "pd.matmul_v2"(%e, %b1) {transpose_x=false, transpose_y=false} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32> %c1 = "pd.matmul_v2"(%e, %arg2) {transpose_x=false, transpose_y=false} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%d1 = "pd.elementwise_add"(%c1, %bias1) {axis=1:si32} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32> %d1 = "pd.elementwise_add"(%c1, %arg5) {axis=1:si32} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%e1 = "pd.relu"(%d1) {} : (tensor<?xf32>) -> tensor<?xf32> %e1 = "pd.relu"(%d1) {} : (tensor<?xf32>) -> tensor<?xf32>
// CHECK: %{{[0-9]+}} = "pd.FC"(%{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}) {in_num_col_dims = 1 : i32} : (tensor<?xf32>, tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32> // CHECK: %4 = "pd.FC"(%3, %arg3, %arg6) {in_num_col_dims = 1 : i32} : (tensor<?xf32>, tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%c2 = "pd.matmul_v2"(%e1, %b2) {transpose_x=true, transpose_y=false} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32> %c2 = "pd.matmul_v2"(%e1, %arg3) {transpose_x=true, transpose_y=false} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%d2 = "pd.elementwise_add"(%c2, %bias2) {axis=1:si32} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32> %d2 = "pd.elementwise_add"(%c2, %arg6) {axis=1:si32} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%e2 = "pd.relu"(%d2) {} : (tensor<?xf32>) -> tensor<?xf32> %e2 = "pd.relu"(%d2) {} : (tensor<?xf32>) -> tensor<?xf32>
"pd.fetch"(%e2) {name="output"} :(tensor<?xf32>)->() infrt.return %e2:tensor<?xf32>
} }
// RUN: infrtopt -phi-op-convert -infrt-op-fuse %s // RUN: infrtopt -phi-op-convert -infrt-op-fuse %s
// CHECK-LABEL: @ops // CHECK-LABEL: @ops
func @ops() { func @ops(%a:!infrt.lod_tensor<?xf32,0>, %b:!infrt.lod_tensor<?xf32,0>) {
%a = pd.feed() {name="input0"} : !infrt.lod_tensor<?xf32,0>
%b = pd.feed() {name="input1"} : !infrt.lod_tensor<?xf32,0>
%d = pd.feed() {name="input3"} : !infrt.lod_tensor<3x4x9xf32, 0>
%g = "pd.elementwise_add"(%a, %b) {axis=1:si32} : (!infrt.lod_tensor<?xf32,0>, !infrt.lod_tensor<?xf32>) -> tensor<?xf32> %g = "pd.elementwise_add"(%a, %b) {axis=1:si32} : (!infrt.lod_tensor<?xf32,0>, !infrt.lod_tensor<?xf32>) -> tensor<?xf32>
%h = "pd.abs"(%g):(tensor<?xf32>) -> tensor<?xf32> %h = "pd.abs"(%g):(tensor<?xf32>) -> tensor<?xf32>
"pd.fetch"(%h) {name="output"} :(tensor<?xf32>)->() infrt.return %h:tensor<?xf32>
} }
// CHECK-LABEL: @op_execute // CHECK-LABEL: @op_execute
func @op_execute(%a:!infrt.lod_tensor<?xf32,0>, %b:!infrt.lod_tensor<?xf32,0>, %c:!infrt.lod_tensor<?xf32,0>) -> !infrt.lod_tensor<?xf32,0> { func @op_execute(%a:!infrt.lod_tensor<?xf32,0>, %b:!infrt.lod_tensor<?xf32,0>, %c:!infrt.lod_tensor<?xf32,0>) -> !infrt.lod_tensor<?xf32,0> {
%g = "pd.elementwise_add"(%a, %b) {axis=1:si32} : (!infrt.lod_tensor<?xf32,0>, !infrt.lod_tensor<?xf32>) -> tensor<?xf32> %g = "pd.elementwise_add"(%a, %b) {axis=1:si32} : (!infrt.lod_tensor<?xf32,0>, !infrt.lod_tensor<?xf32>) -> tensor<?xf32>
%h = "pd.abs"(%g):(tensor<?xf32>) -> tensor<?xf32> %h = "pd.abs"(%g):(tensor<?xf32>) -> tensor<?xf32>
"pd.fetch"(%h) {name="output"} :(tensor<?xf32>)->() infrt.return %h:tensor<?xf32>
} }
def PD_FeedOp : PD_Op<"feed", [NoSideEffect]> {
let summary = "Feed Op";
let description = [{
Feed a tensor into the model.
}];
let arguments = (ins StrAttr:$name);
let results = (outs PD_Tensor:$out);
let assemblyFormat = [{
`(` `)` attr-dict `:` type($out)
}];
}
def PD_FetchOp : PD_Op<"fetch", [Terminator]> {
let summary = "fetch Op";
let description = [{
Return the output tensor from the subgraph.
}];
let arguments = (ins PD_Tensor :$inputs, StrAttr:$name);
}
def PD_ConstantOp : PD_Op<"constant", [NoSideEffect, ConstantLike, DeclareOpInterfaceMethods<InferTypeOpInterface>, AllTypesMatch<["value", "output"]>]> {
let summary = "constant Op";
let description = [{}];
let arguments = (ins ElementsAttr:$value);
let results = (outs PD_Tensor:$output);
let hasFolder = 1;
let builders = [
OpBuilder<(ins "mlir::Attribute":$value)>,
];
}
...@@ -209,7 +209,6 @@ def get_constraint(op_type, op_proto): ...@@ -209,7 +209,6 @@ def get_constraint(op_type, op_proto):
# funtion to generate paddle op dialect file # funtion to generate paddle op dialect file
def convert_op_proto_into_mlir(op_descs): def convert_op_proto_into_mlir(op_descs):
dst_dialect_file = "../../paddle/infrt/dialect/pd/ir/pd_ops.td" dst_dialect_file = "../../paddle/infrt/dialect/pd/ir/pd_ops.td"
custom_dialect_file = "custom_pdop.td"
# 1. Head files # 1. Head files
comment_ = "/*===- TableGen'source file -----------------------------------------------===*\\\n\ comment_ = "/*===- TableGen'source file -----------------------------------------------===*\\\n\
...@@ -372,19 +371,13 @@ def convert_op_proto_into_mlir(op_descs): ...@@ -372,19 +371,13 @@ def convert_op_proto_into_mlir(op_descs):
ops_mlir_file.write(RESULTS) ops_mlir_file.write(RESULTS)
ops_mlir_file.write("}\n") ops_mlir_file.write("}\n")
with open(dst_dialect_file, 'a') as ops_mlir_file:
ops_mlir_file.write("\n#endif // PD_OPS")
print("Skipped ops num: " + str(len(skipped_op_list))) print("Skipped ops num: " + str(len(skipped_op_list)))
print("Automatically generated op dialects num: " + str( print("Automatically generated op dialects num: " + str(
len(automatically_generated_op_dialect))) len(automatically_generated_op_dialect)))
# 3. custom op dialect and end of file
with open(dst_dialect_file, 'a') as ops_mlir_file:
with open(custom_dialect_file, 'r') as custom_ops_file:
custom_ops = custom_ops_file.readlines()
ops_mlir_file.writelines(custom_ops)
end_ = "\n#endif // PD_OPS"
ops_mlir_file.write(end_)
if __name__ == "__main__": if __name__ == "__main__":
all_op_protos_dict = get_all_ops_desc() all_op_protos_dict = get_all_ops_desc()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册