未验证 提交 2c7f6e6d 编写于 作者: 王明冬 提交者: GitHub

[infrt] add infrt dialect ir. test=develop (#39455)

上级 c6478270
...@@ -107,6 +107,6 @@ endif() ...@@ -107,6 +107,6 @@ endif()
cc_library(infrt SHARED SRCS ${infrt_src} DEPS glog boost ${mlir_libs} paddle_framework_proto infrt_naive) cc_library(infrt SHARED SRCS ${infrt_src} DEPS glog boost ${mlir_libs} paddle_framework_proto infrt_naive)
cc_library(infrt_static SRCS ${infrt_src} DEPS glog boost ${mlir_libs} paddle_framework_proto) cc_library(infrt_static SRCS ${infrt_src} DEPS glog boost ${mlir_libs} paddle_framework_proto)
add_dependencies(infrt ${infrt_mlir_incs}) add_dependencies(infrt ${infrt_mlir_incs} mlir-headers)
add_custom_target(test_infrt_exec DEPENDS ${INFRT_TEST_TARGETS}) add_custom_target(test_infrt_exec DEPENDS ${INFRT_TEST_TARGETS})
...@@ -31,9 +31,9 @@ target_link_libraries(infrtopt infrt) ...@@ -31,9 +31,9 @@ target_link_libraries(infrtopt infrt)
add_executable(print-ir print_ir.cc) add_executable(print-ir print_ir.cc)
target_link_libraries(print-ir infrt ${mlir_libs}) target_link_libraries(print-ir infrt ${mlir_libs})
add_dependencies(print-ir pd_ops_inc) add_dependencies(print-ir pd_ops_inc)
cc_test_tiny(test_infrt_mlir_loader SRCS mlir_loader_test.cc DEPS infrt ${MLIR_IR_LIBS}) cc_test_tiny(test_infrt_mlir_loader SRCS mlir_loader_test.cc DEPS infrt ${MLIR_IR_LIBS})
add_subdirectory(infrt)
add_subdirectory(tensorrt) add_subdirectory(tensorrt)
if (INFRT_WITH_PTEN) if (INFRT_WITH_PTEN)
......
core_gather_headers()
gather_srcs(infrt_src SRCS
infrt_dialect.cc
)
add_mlir_dialect(infrt_ops Infrt)
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/infrt/dialect/infrt/infrt_dialect.h"
#include <mlir/IR/Builders.h>
#include <mlir/IR/BuiltinOps.h>
#include <mlir/IR/DialectImplementation.h>
#include "paddle/infrt/dialect/dense_tensor.h"
#include "paddle/infrt/dialect/infrt/infrt_opsDialect.cpp.inc"
#define GET_TYPEDEF_CLASSES
#include "paddle/infrt/dialect/infrt/infrt_opsTypes.cpp.inc"
#define GET_OP_CLASSES
#include "paddle/infrt/dialect/infrt/infrt_ops.cpp.inc"
namespace infrt {
void InfrtDialect::initialize() {
addTypes<
#define GET_TYPEDEF_LIST
#include "paddle/infrt/dialect/infrt/infrt_opsTypes.cpp.inc" // NOLINT
>();
addOperations<
#define GET_OP_LIST
#include "paddle/infrt/dialect/infrt/infrt_ops.cpp.inc" // NOLINT
>();
}
/// Parse a type registered to this dialect.
mlir::Type InfrtDialect::parseType(::mlir::DialectAsmParser &parser) const {
llvm::StringRef keyword;
if (parser.parseKeyword(&keyword)) return nullptr;
// parse TensorType, for example: !infrt.lod_tensor<3x64x3x3xf32,5>
// 5 is the lod_level
if (keyword == "lod_tensor") {
// Parse the size and elementType.
llvm::SmallVector<int64_t, 4> shape;
mlir::Type elementType;
int32_t lod_level = 0;
// parse "<"
if (parser.parseLess()) return nullptr;
if (parser.parseDimensionList(shape)) return nullptr;
// Parse the element type.
if (parser.parseType(elementType)) return nullptr;
// parse ","
if (parser.parseComma()) return nullptr;
// llvm::APInt lod_level;
if (parser.parseInteger(lod_level)) return nullptr;
// parse ">"
if (parser.parseGreater()) return nullptr;
return LoDTensorType::get(
parser.getContext(), shape, elementType, lod_level);
}
// Todo: parse other type
return mlir::Type();
}
void InfrtDialect::printType(::mlir::Type type,
::mlir::DialectAsmPrinter &os) const {
// print TensorType, for example: !infrt.tensor<X86, CUDA, F32>
if (type.isa<infrt::LoDTensorType>()) {
auto lodTensorType = type.cast<infrt::LoDTensorType>();
os << "lod_tensor<";
auto shape = lodTensorType.getShape();
for (auto dim = shape.begin(), e = shape.end() - 1; dim != e; ++dim)
os << *dim << 'x';
os << shape.back() << 'x' << lodTensorType.getElementType() << ", "
<< lodTensorType.getLod_level() << ">";
return;
}
llvm_unreachable("unknown infrt type.");
}
} // namespace infrt
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
//===----------------------------------------------------------------------===//
// Dialect
//===----------------------------------------------------------------------===//
#include <mlir/IR/BuiltinTypes.h>
#include <mlir/IR/Dialect.h>
#include <mlir/IR/OpDefinition.h>
#include <mlir/Interfaces/SideEffectInterfaces.h>
#include "paddle/infrt/dialect/infrt/infrt_opsDialect.h.inc"
#define GET_TYPEDEF_CLASSES
#include "paddle/infrt/dialect/infrt/infrt_opsTypes.h.inc"
#define GET_OP_CLASSES
#include "paddle/infrt/dialect/infrt/infrt_ops.h.inc"
#ifndef Infrt_OpS
#define Infrt_OpS
include "mlir/IR/OpBase.td"
include "mlir/Interfaces/SideEffectInterfaces.td"
def Infrt_Dialect : Dialect {
let summary =
"A dialect containing the Infrt Attributes, Operations, and Types";
let name = "Infrt";
let cppNamespace = "::infrt";
}
// Type definitions
// Base class for Infrt dialect types.
class Infrt_Type<string name, list<Trait> traits = [],
string baseCppClass = "::mlir::Type">
: TypeDef<Infrt_Dialect, name, traits, baseCppClass> {
}
def LoDTensor : Infrt_Type<"LoDTensor"> {
let summary = "infrt lod tensor";
let description = [{lod_tensor<3x64x3x3xf32, 3>}];
let parameters = (ins
ArrayRefParameter<"int64_t">:$shape,
"mlir::Type":$elementType,
"int32_t":$lod_level
);
}
// Op definition
class Infrt_Op<string mnemonic, list<OpTrait> traits = []> : Op<Infrt_Dialect, mnemonic, traits> {
// Each registered op needs to provide all of a printer, parser and verifier.
// let printer = [{ return infrt::print(p, *this); }];
// let verifier = [{ return infrt::verify(*this); }];
// let parser = [{ return infrt::parse$cppClass(parser, result); }];
}
// def InfRT_KernelOp : Infrt_Op<"kernel", [NoSideEffect]> {
// let summary = "kernel op";
// let description = [{
// kernel op!
// }];
// let arguments = (ins StrAttr:$name, PD_Tensor:$X, PD_Tensor:$Y, DefaultValuedAttr<F32Attr, "1.0">:$Alpha, DefaultValuedAttr<F32Attr, "1.0">:$Beta);
//
// let results = (outs PD_Tensor:$Out);
// }
#endif // Infrt_OpS
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include "paddle/infrt/dialect/basic_kernels.h" #include "paddle/infrt/dialect/basic_kernels.h"
#include "paddle/infrt/dialect/dense_tensor.h" #include "paddle/infrt/dialect/dense_tensor.h"
#include "paddle/infrt/dialect/infrt/infrt_dialect.h"
#include "paddle/infrt/dialect/infrt_base.h" #include "paddle/infrt/dialect/infrt_base.h"
#include "paddle/infrt/dialect/pd_ops.h" #include "paddle/infrt/dialect/pd_ops.h"
#include "paddle/infrt/dialect/pten/infrt_pten_tensor.h" #include "paddle/infrt/dialect/pten/infrt_pten_tensor.h"
...@@ -28,6 +29,7 @@ namespace infrt { ...@@ -28,6 +29,7 @@ namespace infrt {
void registerCinnDialects(mlir::DialectRegistry &registry) { // NOLINT void registerCinnDialects(mlir::DialectRegistry &registry) { // NOLINT
registry.insert<ts::TensorShapeDialect, registry.insert<ts::TensorShapeDialect,
dialect::INFRTDialect, dialect::INFRTDialect,
infrt::InfrtDialect,
dt::DTDialect, dt::DTDialect,
mlir::pd::PaddleDialect, mlir::pd::PaddleDialect,
#ifdef INFRT_WITH_PTEN #ifdef INFRT_WITH_PTEN
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
include "mlir/IR/OpBase.td" include "mlir/IR/OpBase.td"
include "mlir/Interfaces/SideEffectInterfaces.td" include "mlir/Interfaces/SideEffectInterfaces.td"
include "paddle/infrt/dialect/infrt/infrt_ops.td"
def PD_Dialect : Dialect { def PD_Dialect : Dialect {
let name = "pd"; let name = "pd";
...@@ -71,7 +72,10 @@ def PD_ElementType : Type<Or<[PD_Float.predicate, ...@@ -71,7 +72,10 @@ def PD_ElementType : Type<Or<[PD_Float.predicate,
PD_Int.predicate]>, PD_Int.predicate]>,
"pd.dtype">; "pd.dtype">;
def PD_Tensor : TensorOf<[PD_ElementType]>; // def PD_Tensor : TensorOf<[PD_ElementType]>;
def PD_Tensor1 : TensorOf<[PD_ElementType]>;
def PD_Tensor : AnyTypeOf<[PD_Tensor1, LoDTensor],"pd.ttype">;
def PD_Tensor_Array : VectorOf<[PD_Tensor]>; def PD_Tensor_Array : VectorOf<[PD_Tensor]>;
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <mlir/IR/Matchers.h> #include <mlir/IR/Matchers.h>
#include <mlir/IR/PatternMatch.h> #include <mlir/IR/PatternMatch.h>
#include "paddle/infrt/dialect/infrt/infrt_dialect.h"
#include "paddle/infrt/dialect/infrt_base.h" #include "paddle/infrt/dialect/infrt_base.h"
#define GET_OP_CLASSES #define GET_OP_CLASSES
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
func @ops() { func @ops() {
%a = pd.feed() {name="input0"} : tensor<?xf32> %a = pd.feed() {name="input0"} : tensor<?xf32>
%b = pd.feed() {name="input1"}: tensor<?xf32> %b = pd.feed() {name="input1"}: tensor<?xf32>
%d = pd.feed() {name="input3"}: !Infrt.lod_tensor<3x4x9xf32, 0>
%c = "pd.matmul"(%a, %b) {transpose_x=true, transpose_y=false} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32> %c = "pd.matmul"(%a, %b) {transpose_x=true, transpose_y=false} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
infrt.return infrt.return
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册