diff --git a/cmake/external/llvm.cmake b/cmake/external/llvm.cmake index 9f6fd32ad986c4a5911b1d00dfb548fa3320c34d..5c48afa2806aab10bb08317679c0a00c8f177f7b 100644 --- a/cmake/external/llvm.cmake +++ b/cmake/external/llvm.cmake @@ -99,7 +99,8 @@ endfunction() function(mlir_add_rewriter td_base) set(LLVM_TARGET_DEFINITIONS ${td_base}.td) - mlir_tablegen(${td_base}.cpp.inc -gen-rewriters "-I${CMAKE_SOURCE_DIR}/infrt/dialect/pass") + set(LLVM_TARGET_DEPENDS ${LLVM_TARGET_DEPENDS} ${CMAKE_SOURCE_DIR}/paddle/infrt/dialect/infrt/ir/infrt_base.td) + mlir_tablegen(${td_base}.cpp.inc -gen-rewriters) add_public_tablegen_target(MLIR${td_base}IncGen) add_dependencies(mlir-headers MLIR${td_base}IncGen) endfunction() diff --git a/paddle/infrt/CMakeLists.txt b/paddle/infrt/CMakeLists.txt index ed29b5b44c7791d356ec1283a0027cacf1fd5e7a..4e273f6d551edd74ec979e6ec34aedabdb58bd10 100644 --- a/paddle/infrt/CMakeLists.txt +++ b/paddle/infrt/CMakeLists.txt @@ -90,7 +90,6 @@ add_subdirectory(tests) set(infrt_mlir_incs basic_kernels_inc test_kernels_inc - infrt_base_inc tensor_shape_inc dense_tensor_inc pd_ops_inc diff --git a/paddle/infrt/api/infrt_api.cc b/paddle/infrt/api/infrt_api.cc index e0488117783d5657aa97c301d9d12ce1c77017e7..0500a8123044cd05695c5167b1afaa48a6027b57 100644 --- a/paddle/infrt/api/infrt_api.cc +++ b/paddle/infrt/api/infrt_api.cc @@ -24,7 +24,7 @@ #include "paddle/infrt/common/global.h" #include "paddle/infrt/dialect/dense_tensor.h" -#include "paddle/infrt/dialect/infrt/infrt_dialect.h" +#include "paddle/infrt/dialect/infrt/ir/infrt_dialect.h" #include "paddle/infrt/dialect/mlir_loader.h" #include "paddle/infrt/host_context/core_runtime.h" #include "paddle/infrt/host_context/kernel_registry.h" @@ -144,7 +144,7 @@ class PredictExecutor : public MlirToRuntimeTranslator { // process results auto& last_op = predict_func.front().back(); - if (last_op.getName().getStringRef() == "Infrt.return") { + if (last_op.getName().getStringRef() == "infrt.return") { for (size_t i = 0; i < last_op.getNumOperands(); ++i) { auto* value = AddValue(mlir::Value(last_op.getOperand(i))); results_.push_back(ValueRef(value)); diff --git a/paddle/infrt/dialect/CMakeLists.txt b/paddle/infrt/dialect/CMakeLists.txt index e35989da2085b21f4dbfaadea05793fc9dcb8753..a3f2d0afafc417cc7a4cbba8a3d6bfa92c9bef00 100644 --- a/paddle/infrt/dialect/CMakeLists.txt +++ b/paddle/infrt/dialect/CMakeLists.txt @@ -2,26 +2,20 @@ core_gather_headers() gather_srcs(infrt_src SRCS dialect.cc - basic_kernels.cc - test_kernels.cc - infrt_base.cc - init_infrt_dialects.cc + init_dialects.cc tensor_shape.cc dense_tensor.cc mlir_loader.cc diagnostic_utils.cc - pd_types.cc pd_ops.cc ) -mlir_tablegen_on(basic_kernels) -mlir_tablegen_on(test_kernels) -mlir_tablegen_on(infrt_base DIALECT Infrt) mlir_tablegen_on(tensor_shape DIALECT ts) mlir_tablegen_on(dense_tensor DIALECT dt) mlir_tablegen_on(pd_op_base DIALECT pd) mlir_tablegen_on(pd_ops) mlir_tablegen_on(pd_extra_ops) + mlir_add_rewriter(rewrite) # TODO(Superjomn) add a cmake function cc_executable to ecapsulate the following code diff --git a/paddle/infrt/dialect/dense_tensor.h b/paddle/infrt/dialect/dense_tensor.h index 27febffe8156379c63a0b6b3fb048f7441255f0e..7fbd1e8a4efe1e9dc1d022beb7673ee8a59c7e36 100644 --- a/paddle/infrt/dialect/dense_tensor.h +++ b/paddle/infrt/dialect/dense_tensor.h @@ -19,7 +19,7 @@ #include -#include "paddle/infrt/dialect/infrt/infrt_dialect.h" +#include "paddle/infrt/dialect/infrt/ir/infrt_dialect.h" #include "paddle/infrt/dialect/dense_tensor_dialect.hpp.inc" diff --git a/paddle/infrt/dialect/dense_tensor.td b/paddle/infrt/dialect/dense_tensor.td index f5db90648eec9933eadf897a8090260bdbfe575b..666c7b300af33db0c27e5b3ab8a74aa4b1591c9b 100644 --- a/paddle/infrt/dialect/dense_tensor.td +++ b/paddle/infrt/dialect/dense_tensor.td @@ -2,7 +2,7 @@ #else #define DT_OPS -include "paddle/infrt/dialect/infrt_base.td" +include "paddle/infrt/dialect/infrt/ir/infrt_base.td" include "paddle/infrt/dialect/tensor_shape_base.td" include "mlir/Interfaces/SideEffectInterfaces.td" diff --git a/paddle/infrt/dialect/infrt/CMakeLists.txt b/paddle/infrt/dialect/infrt/CMakeLists.txt index 08ce2d4707bfdc8498610793437675ae8238475e..5f65336453fbdf82f30948aeea8dc52b0367159b 100644 --- a/paddle/infrt/dialect/infrt/CMakeLists.txt +++ b/paddle/infrt/dialect/infrt/CMakeLists.txt @@ -1,17 +1,3 @@ -core_gather_headers() - -gather_srcs(infrt_src SRCS - common_type.cc - infrt_dialect.cc - ) - - -add_mlir_dialect(infrt_ops infrt) - -set(LLVM_TARGET_DEFINITIONS infrt_ops.td) -mlir_tablegen(infrt_opsAttributes.h.inc -gen-attrdef-decls -dialect=infrt) -mlir_tablegen(infrt_opsAttributes.cpp.inc -gen-attrdef-defs -dialect=infrt) -add_public_tablegen_target(MLIRinfrt_opsAttributesIncGen) -add_dependencies(mlir-headers MLIRinfrt_opsAttributesIncGen) - +add_subdirectory(common) +add_subdirectory(ir) add_subdirectory(pass) diff --git a/paddle/infrt/dialect/infrt/common/CMakeLists.txt b/paddle/infrt/dialect/infrt/common/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..f693c82b5060ef35eecbc1ef9ad5053d6b93e4ad --- /dev/null +++ b/paddle/infrt/dialect/infrt/common/CMakeLists.txt @@ -0,0 +1,6 @@ +core_gather_headers() + +gather_srcs(infrt_src SRCS + types.cc + utils.cc + ) diff --git a/paddle/infrt/dialect/infrt/common_type.cc b/paddle/infrt/dialect/infrt/common/types.cc similarity index 97% rename from paddle/infrt/dialect/infrt/common_type.cc rename to paddle/infrt/dialect/infrt/common/types.cc index 00684c505268c09e97d262a3526c946d1bc3095c..62419a196288bb052a9f240ecc25f34c102a5b35 100644 --- a/paddle/infrt/dialect/infrt/common_type.cc +++ b/paddle/infrt/dialect/infrt/common/types.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/infrt/dialect/infrt/common_type.h" +#include "paddle/infrt/dialect/infrt/common/types.h" namespace infrt { diff --git a/paddle/infrt/dialect/infrt/common_type.h b/paddle/infrt/dialect/infrt/common/types.h similarity index 100% rename from paddle/infrt/dialect/infrt/common_type.h rename to paddle/infrt/dialect/infrt/common/types.h diff --git a/paddle/infrt/dialect/infrt/common/utils.cc b/paddle/infrt/dialect/infrt/common/utils.cc new file mode 100644 index 0000000000000000000000000000000000000000..0ffb23c490f8f52044d35d20508f42f3f9a89413 --- /dev/null +++ b/paddle/infrt/dialect/infrt/common/utils.cc @@ -0,0 +1,28 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/infrt/dialect/infrt/common/utils.h" + +mlir::SmallVector infrt::cvtValueToValueRange( + const mlir::Value &operand) { + return mlir::SmallVector(1, operand); +} + +mlir::SmallVector infrt::concatTwoValueRange( + mlir::ValueRange operand_0, mlir::ValueRange operand_1) { + mlir::SmallVector operands; + operands.append(operand_0.begin(), operand_0.end()); + operands.append(operand_1.begin(), operand_1.end()); + return operands; +} diff --git a/paddle/infrt/dialect/pd_types.cc b/paddle/infrt/dialect/infrt/common/utils.h similarity index 57% rename from paddle/infrt/dialect/pd_types.cc rename to paddle/infrt/dialect/infrt/common/utils.h index 94856e362d301978970279846907f41dfbc00b56..886407b56649a296046d570826cf2b1b0e8aade8 100644 --- a/paddle/infrt/dialect/pd_types.cc +++ b/paddle/infrt/dialect/infrt/common/utils.h @@ -12,4 +12,20 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/infrt/dialect/pd_types.h" +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace infrt { + +mlir::SmallVector cvtValueToValueRange( + const mlir::Value &operand); + +mlir::SmallVector concatTwoValueRange( + mlir::ValueRange operand_0, mlir::ValueRange operand_1); +} // namespace infrt diff --git a/paddle/infrt/dialect/infrt/ir/CMakeLists.txt b/paddle/infrt/dialect/infrt/ir/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..7c009bdb267e6ea1dd5a5fb392f64dddb7a05f06 --- /dev/null +++ b/paddle/infrt/dialect/infrt/ir/CMakeLists.txt @@ -0,0 +1,18 @@ +core_gather_headers() + +gather_srcs(infrt_src SRCS + infrt_dialect.cc + basic_kernels.cc + test_kernels.cc + ) + +add_mlir_dialect(infrt_ops infrt) + +set(LLVM_TARGET_DEFINITIONS infrt_ops.td) +mlir_tablegen(infrt_opsAttributes.h.inc -gen-attrdef-decls -dialect=infrt) +mlir_tablegen(infrt_opsAttributes.cpp.inc -gen-attrdef-defs -dialect=infrt) +add_public_tablegen_target(MLIRinfrt_opsAttributesIncGen) +add_dependencies(mlir-headers MLIRinfrt_opsAttributesIncGen) + +mlir_tablegen_on(basic_kernels) +mlir_tablegen_on(test_kernels) diff --git a/paddle/infrt/dialect/basic_kernels.cc b/paddle/infrt/dialect/infrt/ir/basic_kernels.cc similarity index 63% rename from paddle/infrt/dialect/basic_kernels.cc rename to paddle/infrt/dialect/infrt/ir/basic_kernels.cc index c1aa75fb24650b99ea8371c0ecbe7e572df2f0ce..ba83f3e36c94a173accad9fb6e746eaec0ec8e6c 100644 --- a/paddle/infrt/dialect/basic_kernels.cc +++ b/paddle/infrt/dialect/infrt/ir/basic_kernels.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/infrt/dialect/basic_kernels.h" +#include "paddle/infrt/dialect/infrt/ir/basic_kernels.h" #include #include @@ -30,23 +30,6 @@ namespace infrt { namespace dialect { using namespace mlir; // NOLINT -static ParseResult parseCallOp(OpAsmParser &parser, // NOLINT - OperationState &result) { // NOLINT - SymbolRefAttr callee_attr; - FunctionType callee_type; - SmallVector operands; - auto callee_loc = parser.getNameLoc(); - if (parser.parseAttribute(callee_attr, "callee", result.attributes) || - parser.parseOperandList(operands, OpAsmParser::Delimiter::Paren) || - parser.parseOptionalAttrDict(result.attributes) || - parser.parseColonType(callee_type) || - parser.addTypesToList(callee_type.getResults(), result.types) || - parser.resolveOperands( - operands, callee_type.getInputs(), callee_loc, result.operands)) - return failure(); - return success(); -} - static ParseResult parseConstantOp(Type attrType, OpAsmParser &parser, // NOLINT OperationState &result) { // NOLINT @@ -79,24 +62,6 @@ static ParseResult parseConstantI64Op(OpAsmParser &parser, // NOLINT IntegerType::get(result.getContext(), 64), parser, result); } -static ParseResult parseReturnOp(OpAsmParser &parser, // NOLINT - OperationState &result) { // NOLINT - SmallVector opInfo; - SmallVector types; - llvm::SMLoc loc = parser.getCurrentLocation(); - return failure(parser.parseOperandList(opInfo) || - (!opInfo.empty() && parser.parseColonTypeList(types)) || - parser.resolveOperands(opInfo, types, loc, result.operands)); -} - -static void print(OpAsmPrinter &p, CallOp op) { // NOLINT - p << op->getAttr("callee") << "("; - p.printOperands(op.getOperands()); - p << ")"; - p.printOptionalAttrDict(op->getAttrs(), {"callee"}); - p << " : "; -} - static void printConstant(OpAsmPrinter &p, mlir::Operation *op) { // NOLINT p << " "; p.printOptionalAttrDict(op->getAttrs(), /*elidedAttrs=*/{"value"}); @@ -127,37 +92,13 @@ static void print(OpAsmPrinter &p, ConstantI64Op op) { // NOLINT printConstant(p, op); } -static void print(OpAsmPrinter &p, ReturnOp op) { // NOLINT - if (op.getNumOperands() > 0) { - p << ' '; - p.printOperands(op.getOperands()); - p << " : "; - llvm::interleaveComma(op.getOperands(), p); - } -} - -static LogicalResult verify(CallOp op) { return success(); } - static LogicalResult verify(ConstantF32Op op) { return success(); } static LogicalResult verify(ConstantI32Op op) { return success(); } static LogicalResult verify(ConstantF64Op op) { return success(); } static LogicalResult verify(ConstantI64Op op) { return success(); } -static LogicalResult verify(ReturnOp op) { - auto function = dyn_cast(op->getParentOp()); - - if (!function) return success(); - - auto results = function.getType().getResults(); - if (op.getNumOperands() != results.size()) - return op.emitOpError("has ") - << op.getNumOperands() - << " operands, but enclosing function returns " << results.size(); - - return success(); -} } // namespace dialect } // namespace infrt #define GET_OP_CLASSES -#include "paddle/infrt/dialect/basic_kernels.cpp.inc" +#include "paddle/infrt/dialect/infrt/ir/basic_kernels.cpp.inc" diff --git a/paddle/infrt/dialect/basic_kernels.h b/paddle/infrt/dialect/infrt/ir/basic_kernels.h similarity index 92% rename from paddle/infrt/dialect/basic_kernels.h rename to paddle/infrt/dialect/infrt/ir/basic_kernels.h index b82abcd52d28f45b18824d9ea6f9e12c2ec1c574..a36f55691b716dda51120e8c4be7c956df9b9f25 100644 --- a/paddle/infrt/dialect/basic_kernels.h +++ b/paddle/infrt/dialect/infrt/ir/basic_kernels.h @@ -18,4 +18,4 @@ #include #define GET_OP_CLASSES -#include "paddle/infrt/dialect/basic_kernels.hpp.inc" +#include "paddle/infrt/dialect/infrt/ir/basic_kernels.hpp.inc" diff --git a/paddle/infrt/dialect/basic_kernels.td b/paddle/infrt/dialect/infrt/ir/basic_kernels.td similarity index 69% rename from paddle/infrt/dialect/basic_kernels.td rename to paddle/infrt/dialect/infrt/ir/basic_kernels.td index 89d8cd65b85cd39c9eb50edca1aa1bfaf47073a4..60315b45dd0dfaee8437c1dd312691445fdede56 100644 --- a/paddle/infrt/dialect/basic_kernels.td +++ b/paddle/infrt/dialect/infrt/ir/basic_kernels.td @@ -4,10 +4,10 @@ #else #define BASIC_OPS -include "paddle/infrt/dialect/infrt_base.td" +include "paddle/infrt/dialect/infrt/ir/infrt_base.td" include "mlir/Interfaces/SideEffectInterfaces.td" -class INFRT_Op traits = []> : Op { +class INFRT_Op traits = []> : Op { // Each registered op needs to provide all of a printer, parser and verifier. let printer = [{ return infrt::dialect::print(p, *this); }]; @@ -15,23 +15,6 @@ class INFRT_Op traits = []> : Op { - let summary = "call a host operation"; - let description = [{ - The "infrt.call" operation represents a direct call to a function. The operands and result types of the call must match the specified function type. - - %2 = infrt.call @add(%0, %1) : (f32, f32) -> f32 - }]; - - let arguments = (ins FlatSymbolRefAttr:$callee, Variadic:$operands); - let results = (outs Variadic); - - let extraClassDeclaration = [{ - mlir::StringRef getCallee() { return callee(); } - mlir::FunctionType getCalleeType(); - }]; -} - class ConstantOp : INFRT_Op<"constant." # suffix, [NoSideEffect]> { let summary = "constant value constructor in host"; @@ -45,22 +28,6 @@ def ConstantI64Op : ConstantOp<"i64", I64, I64Attr>; def ConstantF32Op : ConstantOp<"f32", F32, F32Attr>; def ConstantF64Op : ConstantOp<"f64", F64, F64Attr>; -def ReturnOp : INFRT_Op<"return", [Terminator]> { - let summary = "host executor return operation"; - let description = [{ - The "Infrt.return" operation represents a return operation within a function. - - func @foo() : (i32, f8) { - Infrt.return %0, %1 : i32, f8 - } - }]; - - let arguments = (ins Variadic:$operands); - - let builders = [OpBuilder<(ins), - [{ build($_builder, $_state, llvm::None); }]>]; -} - class AddOp : INFRT_Op<"add." # suffix, [NoSideEffect]> { let summary = "infrt.add operation"; let description = [{ @@ -112,7 +79,7 @@ def PrintF32Op : PrintOp<"f32", F32>; def PrintF64Op : PrintOp<"f64", F64>; def PrintStringOp : INFRT_Op<"print_string"> { - let summary = "Infrt.print_string"; + let summary = "infrt.print_string"; let description = [{ An operation that prints a string. }]; diff --git a/paddle/infrt/dialect/infrt/infrt_ops_base.td b/paddle/infrt/dialect/infrt/ir/infrt_base.td similarity index 85% rename from paddle/infrt/dialect/infrt/infrt_ops_base.td rename to paddle/infrt/dialect/infrt/ir/infrt_base.td index 3190c1c84b8c04ceb7e91d829865c65503f5d708..c5130e89bb13a58a0aa0cf3aeae1b00e269eb259 100644 --- a/paddle/infrt/dialect/infrt/infrt_ops_base.td +++ b/paddle/infrt/dialect/infrt/ir/infrt_base.td @@ -101,4 +101,21 @@ class Infrt_Attr traits = [], : AttrDef { let mnemonic = ?; } + +// tools function. used for pattern rewriter +class INFRT_createI32Attr : NativeCodeCall< + "$_builder.getI32IntegerAttr(" # value # ")">; + +class INFRT_createSI32Attr : NativeCodeCall< + "$_builder.getSI32IntegerAttr(" # value # ")">; + +class INFRT_createF32Attr : NativeCodeCall< + "$_builder.getF32FloatAttr(" # value # ")">; + +def INFRT_cvtValueToValueRange : NativeCodeCall< + "infrt::cvtValueToValueRange($0)">; + +def INFRT_concatTwoValueRange : NativeCodeCall< + "infrt::concatTwoValueRange($0, $1)">; + #endif // INFRT_OPS_BASE diff --git a/paddle/infrt/dialect/infrt/infrt_dialect.cc b/paddle/infrt/dialect/infrt/ir/infrt_dialect.cc similarity index 84% rename from paddle/infrt/dialect/infrt/infrt_dialect.cc rename to paddle/infrt/dialect/infrt/ir/infrt_dialect.cc index 400e4921c944491e0ce8cded38fec9435f4ad0bd..42de08ebc41938c40675435d4af10f758c52052b 100644 --- a/paddle/infrt/dialect/infrt/infrt_dialect.cc +++ b/paddle/infrt/dialect/infrt/ir/infrt_dialect.cc @@ -12,40 +12,52 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/infrt/dialect/infrt/infrt_dialect.h" +#include "paddle/infrt/dialect/infrt/ir/infrt_dialect.h" #include #include #include #include #include "paddle/infrt/dialect/dense_tensor.h" -#include "paddle/infrt/dialect/infrt/infrt_opsDialect.cpp.inc" +#include "paddle/infrt/dialect/infrt/ir/infrt_opsDialect.cpp.inc" #define GET_TYPEDEF_CLASSES -#include "paddle/infrt/dialect/infrt/infrt_opsTypes.cpp.inc" +#include "paddle/infrt/dialect/infrt/ir/infrt_opsTypes.cpp.inc" #define GET_ATTRDEF_CLASSES -#include "paddle/infrt/dialect/infrt/infrt_opsAttributes.cpp.inc" +#include "paddle/infrt/dialect/infrt/ir/infrt_opsAttributes.cpp.inc" #define GET_OP_CLASSES -#include "paddle/infrt/dialect/infrt/infrt_ops.cpp.inc" +#include "paddle/infrt/dialect/infrt/ir/infrt_ops.cpp.inc" + +#include "paddle/infrt/dialect/infrt/ir/basic_kernels.h" + +#include "paddle/infrt/dialect/infrt/ir/test_kernels.h" namespace infrt { void InfrtDialect::initialize() { addTypes< #define GET_TYPEDEF_LIST -#include "paddle/infrt/dialect/infrt/infrt_opsTypes.cpp.inc" // NOLINT +#include "paddle/infrt/dialect/infrt/ir/infrt_opsTypes.cpp.inc" // NOLINT >(); addAttributes< #define GET_ATTRDEF_LIST -#include "paddle/infrt/dialect/infrt/infrt_opsAttributes.cpp.inc" // NOLINT +#include "paddle/infrt/dialect/infrt/ir/infrt_opsAttributes.cpp.inc" // NOLINT >(); addOperations< #define GET_OP_LIST -#include "paddle/infrt/dialect/infrt/infrt_ops.cpp.inc" // NOLINT +#include "paddle/infrt/dialect/infrt/ir/infrt_ops.cpp.inc" // NOLINT + >(); + addOperations< +#define GET_OP_LIST +#include "paddle/infrt/dialect/infrt/ir/basic_kernels.cpp.inc" + >(); + addOperations< +#define GET_OP_LIST +#include "paddle/infrt/dialect/infrt/ir/test_kernels.cpp.inc" >(); } @@ -128,7 +140,7 @@ mlir::Type InfrtDialect::parseType(::mlir::DialectAsmParser &parser) const { void InfrtDialect::printType(::mlir::Type type, ::mlir::DialectAsmPrinter &os) const { - // print LoDTensorType, for example: !Infrt.lod_tensor<3x64x3x3xf32,5> + // print LoDTensorType, for example: !infrt.lod_tensor<3x64x3x3xf32,5> if (type.isa()) { auto lod_tensor_type = type.cast(); os << "lod_tensor<"; diff --git a/paddle/infrt/dialect/infrt/infrt_dialect.h b/paddle/infrt/dialect/infrt/ir/infrt_dialect.h similarity index 77% rename from paddle/infrt/dialect/infrt/infrt_dialect.h rename to paddle/infrt/dialect/infrt/ir/infrt_dialect.h index ed5b36e556149dbc3026e732cf953c5562841921..3e6ea2a74c79d43015a62f166928e10adb48698a 100644 --- a/paddle/infrt/dialect/infrt/infrt_dialect.h +++ b/paddle/infrt/dialect/infrt/ir/infrt_dialect.h @@ -22,14 +22,14 @@ #include #include #include -#include "paddle/infrt/dialect/infrt/common_type.h" +#include "paddle/infrt/dialect/infrt/common/types.h" -#include "paddle/infrt/dialect/infrt/infrt_opsDialect.h.inc" +#include "paddle/infrt/dialect/infrt/ir/infrt_opsDialect.h.inc" #define GET_TYPEDEF_CLASSES -#include "paddle/infrt/dialect/infrt/infrt_opsTypes.h.inc" +#include "paddle/infrt/dialect/infrt/ir/infrt_opsTypes.h.inc" #define GET_ATTRDEF_CLASSES -#include "paddle/infrt/dialect/infrt/infrt_opsAttributes.h.inc" +#include "paddle/infrt/dialect/infrt/ir/infrt_opsAttributes.h.inc" #define GET_OP_CLASSES -#include "paddle/infrt/dialect/infrt/infrt_ops.h.inc" +#include "paddle/infrt/dialect/infrt/ir/infrt_ops.h.inc" diff --git a/paddle/infrt/dialect/infrt/infrt_ops.td b/paddle/infrt/dialect/infrt/ir/infrt_ops.td similarity index 64% rename from paddle/infrt/dialect/infrt/infrt_ops.td rename to paddle/infrt/dialect/infrt/ir/infrt_ops.td index 16ade66d47b8ee538a6e7c4f19bf571a25c3e416..f5430b03d0d75cfa8ba91f03ebc90ee0f73c25d7 100644 --- a/paddle/infrt/dialect/infrt/infrt_ops.td +++ b/paddle/infrt/dialect/infrt/ir/infrt_ops.td @@ -1,4 +1,4 @@ -include "paddle/infrt/dialect/infrt/infrt_ops_base.td" +include "paddle/infrt/dialect/infrt/ir/infrt_base.td" // Op definition class Infrt_Op traits = []> : Op { @@ -33,6 +33,26 @@ def Infrt_ReturnOp : Infrt_Op<"return", [Terminator]> { let assemblyFormat = "attr-dict ($operands^ `:` type($operands))?"; } +def Infrt_CallOp : Infrt_Op<"call"> { + let summary = "call a host operation"; + let description = [{ + The "infrt.call" operation represents a direct call to a function. The operands and result types of the call must match the specified function type. + + %2 = infrt.call @add(%0, %1) : (f32, f32) -> f32 + }]; + + let arguments = (ins FlatSymbolRefAttr:$callee, Variadic:$operands); + let results = (outs Variadic); + + //let extraClassDeclaration = [{ + // mlir::StringRef getCallee() { return callee(); } + // mlir::FunctionType getCalleeType(); + // }]; + let assemblyFormat = [{ + $callee `(` $operands `)` attr-dict `:` functional-type($operands, results) + }]; +} + def Infrt_CvtTensorOp : Infrt_Op<"cvt_tensor", [NoSideEffect]> { let summary = "convert tensor type op"; let description = [{convert tensor type op!}]; diff --git a/paddle/infrt/dialect/test_kernels.cc b/paddle/infrt/dialect/infrt/ir/test_kernels.cc similarity index 96% rename from paddle/infrt/dialect/test_kernels.cc rename to paddle/infrt/dialect/infrt/ir/test_kernels.cc index f0c4723b49a7906cf5327771e26eb87e8b1248c0..5f7f83a9dfa8011b3043e20da7d9f21f3afe5cf6 100644 --- a/paddle/infrt/dialect/test_kernels.cc +++ b/paddle/infrt/dialect/infrt/ir/test_kernels.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/infrt/dialect/test_kernels.h" +#include "paddle/infrt/dialect/infrt/ir/test_kernels.h" #include #include @@ -147,7 +147,7 @@ static mlir::LogicalResult verify(BenchmarkOp op) { // Verify that the target benchmark region has exactly one return value. auto ®ion = op.region(); auto &last_op = region.front().back(); - if (last_op.getName().getStringRef() != "Infrt.return") { + if (last_op.getName().getStringRef() != "infrt.return") { return op.emitOpError("missing return statement"); } if (last_op.getNumOperands() != 1) { @@ -161,4 +161,4 @@ static mlir::LogicalResult verify(BenchmarkOp op) { } // namespace infrt #define GET_OP_CLASSES -#include "paddle/infrt/dialect/test_kernels.cpp.inc" +#include "paddle/infrt/dialect/infrt/ir/test_kernels.cpp.inc" diff --git a/paddle/infrt/dialect/test_kernels.h b/paddle/infrt/dialect/infrt/ir/test_kernels.h similarity index 92% rename from paddle/infrt/dialect/test_kernels.h rename to paddle/infrt/dialect/infrt/ir/test_kernels.h index 73c8a6fb387bca6ebc7ae393e4bba32ab94aa951..1fe5020b240046f71571e3a4c999b1eae07741a1 100644 --- a/paddle/infrt/dialect/test_kernels.h +++ b/paddle/infrt/dialect/infrt/ir/test_kernels.h @@ -17,4 +17,4 @@ #include #define GET_OP_CLASSES -#include "paddle/infrt/dialect/test_kernels.hpp.inc" +#include "paddle/infrt/dialect/infrt/ir/test_kernels.hpp.inc" diff --git a/paddle/infrt/dialect/test_kernels.td b/paddle/infrt/dialect/infrt/ir/test_kernels.td similarity index 93% rename from paddle/infrt/dialect/test_kernels.td rename to paddle/infrt/dialect/infrt/ir/test_kernels.td index 6e4bc26aa1496dcb4caed83f98fc42dab9e3cce0..0ce1f3f65e8f7f46cf32794b3191e66ae71e3eae 100644 --- a/paddle/infrt/dialect/test_kernels.td +++ b/paddle/infrt/dialect/infrt/ir/test_kernels.td @@ -4,12 +4,12 @@ #else #define TEST_OPS -include "paddle/infrt/dialect/infrt_base.td" +include "paddle/infrt/dialect/infrt/ir/infrt_base.td" include "mlir/Interfaces/SideEffectInterfaces.td" // Base class for Test dialect ops. class Test_Op traits = []> : - Op { + Op { // Each registered op in the Test namespace needs to provide all of a printer, // parser and verifier. @@ -45,7 +45,7 @@ def BenchmarkOp : Test_Op<"benchmark"> { // The following code benchmarks the infrt.add.i32 kernel. %x = infrt.add.i32 %c, %c // The benchmarked function needs to return exactly one value. - Infrt.return %x : i32 + infrt.return %x : i32 } }]; diff --git a/paddle/infrt/dialect/infrt/pass/infrt_op_fuse.td b/paddle/infrt/dialect/infrt/pass/infrt_op_fuse.td index ef702650b6f1bbd3615ca7a70880d3c2c04e254b..51addb4deb43824965806962613d1ab4bd1c1e3d 100644 --- a/paddle/infrt/dialect/infrt/pass/infrt_op_fuse.td +++ b/paddle/infrt/dialect/infrt/pass/infrt_op_fuse.td @@ -2,7 +2,7 @@ #define INFRT_OP_FUSE include "mlir/Interfaces/SideEffectInterfaces.td" -include "paddle/infrt/dialect/infrt/infrt_ops.td" +include "paddle/infrt/dialect/infrt/ir/infrt_ops.td" include "paddle/infrt/dialect/pd_ops.td" def FuseCvtTensorPattern : Pat< diff --git a/paddle/infrt/dialect/infrt/pass/infrt_op_fuse_pass.cc b/paddle/infrt/dialect/infrt/pass/infrt_op_fuse_pass.cc index cb16e054418b3b2c6ff843fdaf464d24a42249c2..25ecf2ae99dc3613944fcedaee427b540f0faae4 100644 --- a/paddle/infrt/dialect/infrt/pass/infrt_op_fuse_pass.cc +++ b/paddle/infrt/dialect/infrt/pass/infrt_op_fuse_pass.cc @@ -15,7 +15,7 @@ #include "paddle/infrt/dialect/infrt/pass/infrt_op_fuse_pass.h" #include -#include "paddle/infrt/dialect/infrt/infrt_dialect.h" +#include "paddle/infrt/dialect/infrt/ir/infrt_dialect.h" #include "paddle/infrt/dialect/pd_ops.h" namespace { #include "paddle/infrt/dialect/infrt/pass/infrt_op_fuse.cpp.inc" // NOLINT diff --git a/paddle/infrt/dialect/infrt_base.cc b/paddle/infrt/dialect/infrt_base.cc deleted file mode 100644 index e951762abb20c232232af66d6bf1f2e7568a763b..0000000000000000000000000000000000000000 --- a/paddle/infrt/dialect/infrt_base.cc +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/infrt/dialect/infrt_base.h" - -#include "paddle/infrt/dialect/basic_kernels.h" -#include "paddle/infrt/dialect/dense_tensor.h" -#include "paddle/infrt/dialect/test_kernels.h" - -namespace infrt { -namespace dialect { - -// ----INFRTDialect definition begin---- -void INFRTDialect::initialize() { - allowUnknownTypes(); - allowUnknownOperations(); - addOperations< -#define GET_OP_LIST -#include "paddle/infrt/dialect/basic_kernels.cpp.inc" - >(); - addOperations< -#define GET_OP_LIST -#include "paddle/infrt/dialect/test_kernels.cpp.inc" - >(); -} - -mlir::Type INFRTDialect::parseType(mlir::DialectAsmParser &parser) const { - llvm::StringRef keyword; - if (parser.parseKeyword(&keyword)) return mlir::Type(); - // parse TensorMapType, for example: !infrt.tensor_map - parser.emitError(parser.getCurrentLocation(), "unknown infrt type: ") - << keyword; - return mlir::Type(); -} - -void INFRTDialect::printType(mlir::Type type, - mlir::DialectAsmPrinter &printer) const { - // print TensorMapType, for example: !infrt.tensor_map - llvm_unreachable("unknown infrt type."); -} - -// ----INFRTDialect definition end---- - -} // namespace dialect -} // namespace infrt diff --git a/paddle/infrt/dialect/infrt_base.h b/paddle/infrt/dialect/infrt_base.h deleted file mode 100644 index 3ef73171dcdea4e0367837f4b3893405c29a1580..0000000000000000000000000000000000000000 --- a/paddle/infrt/dialect/infrt_base.h +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#include -#include -#include -#include -#include -#include - -#include "paddle/infrt/dialect/infrt_base.hpp.inc" - -namespace infrt { -namespace dialect { - -class INFRTDialect : public mlir::Dialect { - explicit INFRTDialect(mlir::MLIRContext *context) - : mlir::Dialect( - getDialectNamespace(), context, mlir::TypeID::get()) { - initialize(); - } - - // parse types registered to the dialect. - mlir::Type parseType(mlir::DialectAsmParser &parser) const override; - // print types registered to the dialect. - void printType(mlir::Type type, - mlir::DialectAsmPrinter &printer) const override; - - void initialize(); - friend class mlir::MLIRContext; - - public: - static ::llvm::StringRef getDialectNamespace() { return "Infrt"; } -}; -} // namespace dialect - -template -static mlir::IntegerAttr createI32Attr(mlir::OpBuilder &b, // NOLINT - mlir::Location loc, - T constant) { - return b.getIntegerAttr(b.getI32Type(), constant); -} - -template -static mlir::IntegerAttr createSI32Attr(mlir::OpBuilder &b, // NOLINT - mlir::Location loc, - T constant) { - return b.getSI32IntegerAttr(constant); -} - -template -static mlir::FloatAttr createF32Attr(mlir::OpBuilder &b, // NOLINT - mlir::Location loc, - T constant) { - return b.getF32FloatAttr(constant); -} - -static mlir::SmallVector cvtValueToValueRange( - const mlir::Value &operand) { - return mlir::SmallVector(1, operand); -} - -static mlir::SmallVector concatTwoValueRange( - mlir::ValueRange operand_0, mlir::ValueRange operand_1) { - mlir::SmallVector operands; - operands.append(operand_0.begin(), operand_0.end()); - operands.append(operand_1.begin(), operand_1.end()); - return operands; -} -} // namespace infrt diff --git a/paddle/infrt/dialect/infrt_base.td b/paddle/infrt/dialect/infrt_base.td deleted file mode 100644 index 45e6b116f489709b1d854727870010c7545d92e7..0000000000000000000000000000000000000000 --- a/paddle/infrt/dialect/infrt_base.td +++ /dev/null @@ -1,33 +0,0 @@ -#ifndef INFRT_BASE -#define INFRT_BASE - -include "mlir/IR/OpBase.td" -include "paddle/infrt/dialect/infrt/infrt_ops_base.td" - -def INFRT_Dialect : Dialect { - let name = "Infrt"; - - let description = [{ - The INFRT host dialect. - }]; - - let cppNamespace = "::infrt::dialect"; -} - -def BufferType : OpaqueType<"b", "buffer", "buffer">; - -class INFRT_createI32Attr : NativeCodeCall< - "infrt::createI32Attr($_builder, $_loc, " # value # ")">; - -class INFRT_createSI32Attr : NativeCodeCall< - "infrt::createSI32Attr($_builder, $_loc, " # value # ")">; - -class INFRT_createF32Attr : NativeCodeCall< - "infrt::createF32Attr($_builder, $_loc, " # value # ")">; - -def INFRT_cvtValueToValueRange : NativeCodeCall< - "infrt::cvtValueToValueRange($0)">; - -def INFRT_concatTwoValueRange : NativeCodeCall< - "infrt::concatTwoValueRange($0, $1)">; -#endif // INFRT_BASE diff --git a/paddle/infrt/dialect/init_infrt_dialects.cc b/paddle/infrt/dialect/init_dialects.cc similarity index 83% rename from paddle/infrt/dialect/init_infrt_dialects.cc rename to paddle/infrt/dialect/init_dialects.cc index 5eae01719361dd5bc21c139b54cbcf16f226b4cc..0c5944ebf84750be8cf789552219157da3170c39 100644 --- a/paddle/infrt/dialect/init_infrt_dialects.cc +++ b/paddle/infrt/dialect/init_dialects.cc @@ -12,14 +12,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/infrt/dialect/init_infrt_dialects.h" +#include "paddle/infrt/dialect/init_dialects.h" #include -#include "paddle/infrt/dialect/basic_kernels.h" #include "paddle/infrt/dialect/dense_tensor.h" -#include "paddle/infrt/dialect/infrt/infrt_dialect.h" -#include "paddle/infrt/dialect/infrt_base.h" +#include "paddle/infrt/dialect/infrt/ir/basic_kernels.h" +#include "paddle/infrt/dialect/infrt/ir/infrt_dialect.h" + #include "paddle/infrt/dialect/pd_ops.h" #include "paddle/infrt/dialect/phi/ir/infrt_phi_tensor.h" #include "paddle/infrt/dialect/phi/ir/phi_base.h" @@ -30,8 +30,7 @@ namespace infrt { void registerCinnDialects(mlir::DialectRegistry ®istry) { // NOLINT registry.insert #include "paddle/infrt/dialect/diagnostic_utils.h" -#include "paddle/infrt/dialect/init_infrt_dialects.h" +#include "paddle/infrt/dialect/init_dialects.h" namespace infrt { namespace dialect { diff --git a/paddle/infrt/dialect/mlir_loader_test.cc b/paddle/infrt/dialect/mlir_loader_test.cc index 2f721e49a63096d1c3168805d373cbc8809542da..8ccb07161d364e968ead568f20c4b98b18a7e04e 100644 --- a/paddle/infrt/dialect/mlir_loader_test.cc +++ b/paddle/infrt/dialect/mlir_loader_test.cc @@ -22,7 +22,7 @@ #include -#include "paddle/infrt/dialect/init_infrt_dialects.h" +#include "paddle/infrt/dialect/init_dialects.h" namespace infrt { namespace dialect { @@ -32,13 +32,13 @@ TEST(MlirLoader, basic) { auto source = R"ROC( func @main() -> f32 { - %v0 = Infrt.constant.f32 1.0 - %v1 = Infrt.constant.f32 2.0 - %value = "Infrt.add.f32"(%v0, %v1) : (f32, f32) -> f32 + %v0 = infrt.constant.f32 1.0 + %v1 = infrt.constant.f32 2.0 + %value = "infrt.add.f32"(%v0, %v1) : (f32, f32) -> f32 - "Infrt.print.f32"(%v0) : (f32) -> () + "infrt.print.f32"(%v0) : (f32) -> () - Infrt.return %value : f32 + infrt.return %value : f32 } )ROC"; diff --git a/paddle/infrt/dialect/opt.cc b/paddle/infrt/dialect/opt.cc index 5bcf5a23f4c532b1056ceaa54c80902b32e4061a..2006530958f0b5223edfcee87a5895e101f0e240 100644 --- a/paddle/infrt/dialect/opt.cc +++ b/paddle/infrt/dialect/opt.cc @@ -14,7 +14,7 @@ #include #include -#include "paddle/infrt/dialect/init_infrt_dialects.h" +#include "paddle/infrt/dialect/init_dialects.h" int main(int argc, char **argv) { mlir::DialectRegistry registry; diff --git a/paddle/infrt/dialect/pd_op_base.td b/paddle/infrt/dialect/pd_op_base.td index 26425e3945caa2f85547b7b8e8be7dbeaf10e630..f6af4c83aed8bd0b7ce04c172169b036e674777b 100644 --- a/paddle/infrt/dialect/pd_op_base.td +++ b/paddle/infrt/dialect/pd_op_base.td @@ -6,7 +6,7 @@ include "mlir/IR/OpBase.td" include "mlir/Interfaces/SideEffectInterfaces.td" -include "paddle/infrt/dialect/infrt/infrt_ops_base.td" +include "paddle/infrt/dialect/infrt/ir/infrt_base.td" def PD_Dialect : Dialect { let name = "pd"; diff --git a/paddle/infrt/dialect/pd_ops.cc b/paddle/infrt/dialect/pd_ops.cc index 55ab174fcaf059d81f83e54e8f1e5864ef25b7e3..96e9e307f2fd3f33be3d2273a7aa66c363e4beb1 100644 --- a/paddle/infrt/dialect/pd_ops.cc +++ b/paddle/infrt/dialect/pd_ops.cc @@ -16,7 +16,6 @@ #include #include -#include "paddle/infrt/dialect/infrt_base.h" #define GET_OP_CLASSES #include "paddle/infrt/dialect/pd_ops.cpp.inc" // NOLINT diff --git a/paddle/infrt/dialect/pd_ops.h b/paddle/infrt/dialect/pd_ops.h index 41dd2ddd94eb161735568170a9a8bdc2ec259cdf..e6b0f30c059054189fe3a86bb112da923ad76423 100644 --- a/paddle/infrt/dialect/pd_ops.h +++ b/paddle/infrt/dialect/pd_ops.h @@ -28,7 +28,7 @@ #include #include #include -#include "paddle/infrt/dialect/infrt/infrt_dialect.h" +#include "paddle/infrt/dialect/infrt/ir/infrt_dialect.h" namespace mlir { namespace pd { diff --git a/paddle/infrt/dialect/pd_types.h b/paddle/infrt/dialect/pd_types.h deleted file mode 100644 index 0da888a9c076922fc21d5cce004dc839bd705762..0000000000000000000000000000000000000000 --- a/paddle/infrt/dialect/pd_types.h +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// This file defines the types used in PaddlePaddle MLIR dialect. -// We borrowed much ideas from tensorflow mlir dialect (tf_types.h in -// tensorflow). - -#pragma once - -#include -#include -#include -#include -#include - -namespace mlir { -namespace PD { - -class PaddleType : public Type { - public: - using Type::Type; - - static bool classof(Type type); -}; - -namespace detail { - -template -class PaddleTypeImpl : public Type::TypeBase { - public: - using Base = typename Type::TypeBase; - using PDBase = PaddleTypeImpl; - using Base::Base; -}; - -} // namespace detail - -#define HANDLE_PD_TYPE(pdtype, enumerant, name) \ - class pdtype##Type : public detail::PaddleTypeImpl { \ - public: \ - using PDBase::PDBase; \ - }; - -} // namespace PD -} // namespace mlir diff --git a/paddle/infrt/dialect/phi/data_type.h b/paddle/infrt/dialect/phi/data_type.h index b618ef3861303334b697382f11bfa4fdb4a35c7a..f2a76507b850d956d893cef2a06005100c819975 100644 --- a/paddle/infrt/dialect/phi/data_type.h +++ b/paddle/infrt/dialect/phi/data_type.h @@ -14,7 +14,7 @@ #pragma once -#include "paddle/infrt/dialect/infrt/common_type.h" +#include "paddle/infrt/dialect/infrt/common/types.h" #include "paddle/phi/common/backend.h" #include "paddle/phi/common/data_type.h" #include "paddle/phi/common/layout.h" diff --git a/paddle/infrt/dialect/phi/ir/infrt_phi_base.td b/paddle/infrt/dialect/phi/ir/infrt_phi_base.td index 671646b9259ccfd2399862d71d6860db93608eb8..5d7338ec4292ed49112c3cce45a30816e686886d 100644 --- a/paddle/infrt/dialect/phi/ir/infrt_phi_base.td +++ b/paddle/infrt/dialect/phi/ir/infrt_phi_base.td @@ -2,7 +2,7 @@ #define PHI_BASE include "mlir/IR/OpBase.td" -include "paddle/infrt/dialect/infrt_base.td" +include "paddle/infrt/dialect/infrt/ir/infrt_base.td" include "mlir/Interfaces/InferTypeOpInterface.td" def PHI_Dialect : Dialect { diff --git a/paddle/infrt/dialect/phi/ir/infrt_phi_kernel.td b/paddle/infrt/dialect/phi/ir/infrt_phi_kernel.td index ee23470fc754a56ef323c167613f7f32982eedd8..d2ff7acfba8b26f5c0ca1ec459d3b5e2f7fb3d93 100644 --- a/paddle/infrt/dialect/phi/ir/infrt_phi_kernel.td +++ b/paddle/infrt/dialect/phi/ir/infrt_phi_kernel.td @@ -3,7 +3,7 @@ include "mlir/Interfaces/SideEffectInterfaces.td" include "mlir/IR/OpBase.td" -include "paddle/infrt/dialect/infrt_base.td" +include "paddle/infrt/dialect/infrt/ir/infrt_base.td" include "paddle/infrt/dialect/phi/ir/infrt_phi_base.td" def PHI_CPUKernelDialect : Dialect { diff --git a/paddle/infrt/dialect/phi/ir/infrt_phi_tensor.td b/paddle/infrt/dialect/phi/ir/infrt_phi_tensor.td index 21c4669b645fb6c7622fb01ae1c7bacaee0f5ca2..8c3a79498d74d3b80e1590bbc2c0530c7af6411e 100644 --- a/paddle/infrt/dialect/phi/ir/infrt_phi_tensor.td +++ b/paddle/infrt/dialect/phi/ir/infrt_phi_tensor.td @@ -5,7 +5,7 @@ include "paddle/infrt/dialect/phi/ir/infrt_phi_base.td" include "mlir/Interfaces/SideEffectInterfaces.td" include "mlir/IR/OpBase.td" -include "paddle/infrt/dialect/infrt_base.td" +include "paddle/infrt/dialect/infrt/ir/infrt_base.td" def PHI_DenseTensorDialect : Dialect { let name = "phi_dt"; diff --git a/paddle/infrt/dialect/phi/ir/phi_base.h b/paddle/infrt/dialect/phi/ir/phi_base.h index 0ea1973a7331b8a34bf2a286cb55e19a4d09118b..64cd08cc05ed42fe8d53b8c5b8a5bc994bae8824 100644 --- a/paddle/infrt/dialect/phi/ir/phi_base.h +++ b/paddle/infrt/dialect/phi/ir/phi_base.h @@ -18,7 +18,7 @@ #include #include -#include "paddle/infrt/dialect/infrt/common_type.h" +#include "paddle/infrt/dialect/infrt/common/types.h" #include "paddle/infrt/dialect/phi/ir/infrt_phi_baseDialect.h.inc" diff --git a/paddle/infrt/dialect/phi/ir/phi_kernels.h b/paddle/infrt/dialect/phi/ir/phi_kernels.h index b84d1b2b7294baf789fe4e1f3911edede8172cf7..4f8b41852cc67e32c510c247e907092046731452 100644 --- a/paddle/infrt/dialect/phi/ir/phi_kernels.h +++ b/paddle/infrt/dialect/phi/ir/phi_kernels.h @@ -30,7 +30,7 @@ #include #include "paddle/infrt/dialect/dense_tensor.h" -#include "paddle/infrt/dialect/infrt/infrt_dialect.h" +#include "paddle/infrt/dialect/infrt/ir/infrt_dialect.h" #include "paddle/infrt/dialect/phi/ir/phi_base.h" #include "paddle/infrt/dialect/phi/ir/phi_cpu_kernelsDialect.h.inc" diff --git a/paddle/infrt/dialect/phi/pass/kernel_op_desc.h b/paddle/infrt/dialect/phi/pass/kernel_op_desc.h index 34fd2f0f62dcd9b793f9157003bfd3772d0e1307..b1f7c6c0811def9141e8012518fff5f504934149 100644 --- a/paddle/infrt/dialect/phi/pass/kernel_op_desc.h +++ b/paddle/infrt/dialect/phi/pass/kernel_op_desc.h @@ -16,7 +16,7 @@ #include #include -#include "paddle/infrt/dialect/infrt/common_type.h" +#include "paddle/infrt/dialect/infrt/common/types.h" namespace infrt { diff --git a/paddle/infrt/dialect/phi/pass/phi_op_cvt_pass.cc b/paddle/infrt/dialect/phi/pass/phi_op_cvt_pass.cc index fb00a3de3fc0c82dce2489c0f412c64118e3101e..485bf2a75d890aa0df5e888e7284ae3451aa514c 100644 --- a/paddle/infrt/dialect/phi/pass/phi_op_cvt_pass.cc +++ b/paddle/infrt/dialect/phi/pass/phi_op_cvt_pass.cc @@ -24,13 +24,29 @@ #include #include -#include "paddle/infrt/dialect/infrt/infrt_dialect.h" +#include "paddle/infrt/dialect/infrt/ir/infrt_dialect.h" #include "paddle/infrt/dialect/phi/ir/infrt_phi_tensor.h" #include "paddle/infrt/dialect/phi/pass/kernel_op_desc.h" #include "paddle/infrt/dialect/phi/pass/proto_arg_map_context.h" #include "paddle/phi/core/compat/op_utils.h" #include "paddle/phi/ops/compat/signatures.h" -namespace infrt { + +namespace { +class phiOpCvtPass + : public mlir::PassWrapper { + public: + ::llvm::StringRef getName() const override { return "phiOpCvtPass"; } + void runOnFunction() override; + explicit phiOpCvtPass( + std::vector valid_places = std::vector()) + : valid_places_(valid_places) {} + + private: + void convertStage(); + void diapatchStage(); + std::vector valid_places_; +}; + // Implementation of the phiOpCvtPass. void phiOpCvtPass::runOnFunction() { convertStage(); @@ -63,7 +79,7 @@ void phiOpCvtPass::convertStage() { ::phi::KernelSignature kernel_sign = ::phi::OpUtilsMap::Instance().GetArgumentMappingFn(op_name)( - ProtoArgumentMappingContext(op)); + infrt::ProtoArgumentMappingContext(op)); // resort input&output according to kernel_sign ::llvm::SmallVector inputs, ori_output; ::llvm::SmallVector output_types; @@ -109,10 +125,10 @@ void phiOpCvtPass::diapatchStage() { } mlir::OpBuilder builder(&block, block.begin()); - std::map phi_context; + std::map phi_context; for (infrt::KernelOp kernel_op : worklist) { std::string kernel_name = kernel_op.name().str(); - std::vector candidates = + std::vector candidates = getCandidateKernels(kernel_name, valid_places_); if (candidates.empty()) { LOG(FATAL) << "No candidate kernels for op:" << kernel_name; @@ -121,12 +137,13 @@ void phiOpCvtPass::diapatchStage() { builder.setInsertionPoint(kernel_op); // Todo: Implimentation the concrete pass pick strategy - const PhiKernelDesc &phi_kernel_desc = candidates.front(); + const infrt::PhiKernelDesc &phi_kernel_desc = candidates.front(); - kernel_name = getPhiTargetPrefix(phi_kernel_desc.kernelType.target) + - kernel_name + - getPhiPrecisionSuffix(phi_kernel_desc.kernelType.precision) + - getPhiLayoutSuffix(phi_kernel_desc.kernelType.layout); + kernel_name = + infrt::getPhiTargetPrefix(phi_kernel_desc.kernelType.target) + + kernel_name + + infrt::getPhiPrecisionSuffix(phi_kernel_desc.kernelType.precision) + + infrt::getPhiLayoutSuffix(phi_kernel_desc.kernelType.layout); mlir::OperationName operation_name(kernel_name, kernel_op.getContext()); mlir::OperationState operation_state(kernel_op.getLoc(), operation_name); @@ -134,18 +151,18 @@ void phiOpCvtPass::diapatchStage() { if (phi_context.find(phi_kernel_desc.kernelType.target) == phi_context.end()) { switch (phi_kernel_desc.kernelType.target) { - case TargetType::CPU: { + case infrt::TargetType::CPU: { auto context_value = builder .create( kernel_op.getLoc(), - phi::ContextType::get(kernel_op.getContext(), - TargetType::CPU)) + infrt::phi::ContextType::get(kernel_op.getContext(), + infrt::TargetType::CPU)) .output(); - phi_context[TargetType::CPU] = context_value; + phi_context[infrt::TargetType::CPU] = context_value; } break; - case TargetType::GPU: - case TargetType::UNK: + case infrt::TargetType::GPU: + case infrt::TargetType::UNK: default: LOG(FATAL) << "Unsupported TargetType"; break; @@ -155,29 +172,30 @@ void phiOpCvtPass::diapatchStage() { phi_context.at(phi_kernel_desc.kernelType.target)); for (size_t index = 0; index < phi_kernel_desc.inputsType.size(); ++index) { mlir::Value input = kernel_op.getOperand(index); - auto cvt_tensor_type_op = builder.create( + auto cvt_tensor_type_op = builder.create( kernel_op.getLoc(), - DenseTensorType::get(kernel_op.getContext(), - phi_kernel_desc.inputsType[index].target, - phi_kernel_desc.inputsType[index].precision, - phi_kernel_desc.inputsType[index].layout), + infrt::DenseTensorType::get( + kernel_op.getContext(), + phi_kernel_desc.inputsType[index].target, + phi_kernel_desc.inputsType[index].precision, + phi_kernel_desc.inputsType[index].layout), input); operation_state.addOperands(cvt_tensor_type_op.output()); } for (size_t index = 0; index < phi_kernel_desc.outputsType.size(); ++index) { - operation_state.addTypes( - DenseTensorType::get(kernel_op.getContext(), - phi_kernel_desc.outputsType[index].target, - phi_kernel_desc.outputsType[index].precision, - phi_kernel_desc.outputsType[index].layout)); + operation_state.addTypes(infrt::DenseTensorType::get( + kernel_op.getContext(), + phi_kernel_desc.outputsType[index].target, + phi_kernel_desc.outputsType[index].precision, + phi_kernel_desc.outputsType[index].layout)); } operation_state.addAttributes(kernel_op.attrsAttr().getValue()); mlir::Operation *phi_operation = builder.createOperation(operation_state); for (size_t index = 0; index < phi_kernel_desc.outputsType.size(); ++index) { mlir::Value input = phi_operation->getResult(index); - auto cvt_tensor_type_op = builder.create( + auto cvt_tensor_type_op = builder.create( kernel_op.getLoc(), kernel_op.getResultTypes()[index], input); kernel_op.getResult(index).replaceAllUsesWith( cvt_tensor_type_op.output()); @@ -185,4 +203,10 @@ void phiOpCvtPass::diapatchStage() { kernel_op.erase(); } } -} // namespace infrt + +} // namespace + +std::unique_ptr infrt::createPhiOpCvtPass( + std::vector valid_places) { + return std::make_unique(valid_places); +} diff --git a/paddle/infrt/dialect/phi/pass/phi_op_cvt_pass.h b/paddle/infrt/dialect/phi/pass/phi_op_cvt_pass.h index 051fee9b61a24772ff2295280fa1b0a1588d7bae..8b1944042aa7c42fef87786af0d0fa131c6f0535 100644 --- a/paddle/infrt/dialect/phi/pass/phi_op_cvt_pass.h +++ b/paddle/infrt/dialect/phi/pass/phi_op_cvt_pass.h @@ -14,44 +14,14 @@ #pragma once #include -#include "paddle/infrt/dialect/infrt/common_type.h" +#include "paddle/infrt/dialect/infrt/common/types.h" namespace infrt { /* * phiOpCvtPass. - * - * Convert the general operators in pd Dialect to a infrt.kernelOp. - * - * source func: - * - * func @main() -> tensor { - * %a = "pd.feed"()... - * %c = "pd.conv2d"(%a) ... - * %d = "pd.conv3d"(%c) ... - * %f = "pd.conv2d"(%a) ... - * "pd.fetch" (%d, %f) - * } - * - * destination func: - * func @main() -> tensor { - * %a = "pd.feed"()... - * %c = "infrt.kernel"(%a){name = "conv2d"} ... - * %d = "infrt.kernel"(%c){name = "conv3d"}... - * %f = "infrt.kernel"(%a){name = "conv2d"}... - * "pd.fetch" (%d, %f) - * } + * Convert the general operators from pd Dialect to phi dialect. */ -class phiOpCvtPass - : public mlir::PassWrapper { - public: - ::llvm::StringRef getName() const override { return "phiOpCvtPass"; } - void runOnFunction() override; - explicit phiOpCvtPass(std::vector valid_places = std::vector()) - : valid_places_(valid_places) {} +std::unique_ptr createPhiOpCvtPass( + std::vector valid_places = std::vector()); - private: - void convertStage(); - void diapatchStage(); - std::vector valid_places_; -}; } // namespace infrt diff --git a/paddle/infrt/dialect/phi/phi_ir_exec.cc b/paddle/infrt/dialect/phi/phi_ir_exec.cc index 559fb90a64a7868c9c150e12e881d73df7a4aaf2..de61dba8e744c88f279761520ac1815bb265d875 100644 --- a/paddle/infrt/dialect/phi/phi_ir_exec.cc +++ b/paddle/infrt/dialect/phi/phi_ir_exec.cc @@ -38,7 +38,7 @@ int main(int argc, char** argv) { std::vector valid_places = {{infrt::TargetType::CPU, infrt::PrecisionType::FLOAT32, infrt::LayoutType::NCHW}}; - phi_pass_manager.addPass(std::make_unique(valid_places)); + phi_pass_manager.addPass(infrt::createPhiOpCvtPass(valid_places)); phi_pass_manager.addPass(infrt::createInfrtOpFusePass()); if (mlir::failed(pm.run(*module))) { std::cout << "\npass failed!\n" << std::endl; diff --git a/paddle/infrt/dialect/print_ir.cc b/paddle/infrt/dialect/print_ir.cc index a37df265955e70cdf735f251bc8853c7ad4fe831..b118a5f7a9caf42f4aa63dd0222e7a2647addac5 100644 --- a/paddle/infrt/dialect/print_ir.cc +++ b/paddle/infrt/dialect/print_ir.cc @@ -31,7 +31,7 @@ #include #include "paddle/infrt/common/global.h" -#include "paddle/infrt/dialect/init_infrt_dialects.h" +#include "paddle/infrt/dialect/init_dialects.h" namespace cl = llvm::cl; diff --git a/paddle/infrt/dialect/rewrite.td b/paddle/infrt/dialect/rewrite.td index 5e228fed4d57eb283705c725797c42c5da133c3f..62e7471a390dfeee1a9ddfc15033e85db0adca2e 100644 --- a/paddle/infrt/dialect/rewrite.td +++ b/paddle/infrt/dialect/rewrite.td @@ -1,7 +1,7 @@ #ifndef INFRT_REWRITE #define INFRT_REWRITE -include "paddle/infrt/dialect/infrt_base.td" +include "paddle/infrt/dialect/infrt/ir/infrt_base.td" include "mlir/Interfaces/SideEffectInterfaces.td" include "paddle/infrt/dialect/pd_ops.td" include "paddle/infrt/dialect/pd_extra_ops.td" diff --git a/paddle/infrt/dialect/tensor_shape.td b/paddle/infrt/dialect/tensor_shape.td index d3714c8ed14d3f1aea50ec4c55a9c4c2fb85e958..2be21d6aa772020519e3d909c9bdf7232f7ff985 100644 --- a/paddle/infrt/dialect/tensor_shape.td +++ b/paddle/infrt/dialect/tensor_shape.td @@ -2,7 +2,7 @@ #else #define INFRT_OPS -include "paddle/infrt/dialect/infrt_base.td" +include "paddle/infrt/dialect/infrt/ir/infrt_base.td" include "paddle/infrt/dialect/tensor_shape_base.td" include "mlir/Interfaces/SideEffectInterfaces.td" diff --git a/paddle/infrt/dialect/tensorrt/pd_lower_to_trt.td b/paddle/infrt/dialect/tensorrt/pd_lower_to_trt.td index 68ca1559acee03580eea0842bfbac3593d418c02..46c250b05492cefe61d8e677a352a217718189b8 100644 --- a/paddle/infrt/dialect/tensorrt/pd_lower_to_trt.td +++ b/paddle/infrt/dialect/tensorrt/pd_lower_to_trt.td @@ -2,7 +2,7 @@ #define PD_LOWER_TO_TRT include "mlir/Interfaces/SideEffectInterfaces.td" -include "paddle/infrt/dialect/infrt_base.td" +include "paddle/infrt/dialect/infrt/ir/infrt_base.td" include "paddle/infrt/dialect/pd_ops.td" include "paddle/infrt/dialect/tensorrt/trt_ops.td" diff --git a/paddle/infrt/dialect/tensorrt/trt_graph_fuse_pass.h b/paddle/infrt/dialect/tensorrt/trt_graph_fuse_pass.h index 803e53e3244f92134928e1105a8248e9f49e5432..18afba19e06189294078bcfc1a0b2bb341eb7126 100644 --- a/paddle/infrt/dialect/tensorrt/trt_graph_fuse_pass.h +++ b/paddle/infrt/dialect/tensorrt/trt_graph_fuse_pass.h @@ -14,7 +14,6 @@ #pragma once #include -#include "paddle/infrt/dialect/infrt_base.h" namespace infrt { namespace trt { @@ -28,17 +27,17 @@ namespace trt { * func @main(%a : tensor) -> tensor { * %c = "pd.graph"(%a) { * %m = "pd.conv2d"(%a)... - * "infrt.return" (%m) + * infrt.return %m... * } ... * %d = "pd.graph"(%c) { * %m = "pd.conv3d"(%c)... - * "infrt.return" (%m) + * infrt.return %m... * } ... * %f = "pd.graph"(%a) { * %m = "pd.conv2d"(%a)... - * "infrt.return" (%m) + * infrt.return %m... * } ... - * "infrt.return" (%d, %f).. + * infrt.return %d, %f :... * } * * destination func: @@ -47,9 +46,9 @@ namespace trt { * %m = "pd.conv2d"(%a)... * %n = "pd.conv3d"(%m)... * %s = "pd.conv2d"(%a)... - * "infrt.return" (%n, %s) + * infrt.return %n, %s:... * } ... - * "infrt.return" (%d, %f) + * infrt.return %d, %f:... * } */ class TRTGraphFusePass diff --git a/paddle/infrt/dialect/tensorrt/trt_graph_split_pass.h b/paddle/infrt/dialect/tensorrt/trt_graph_split_pass.h index 1c44a13cf9dfb65a1747a596dc1012e7f54d792e..a5dd4f14b2946fe232b7b725f6ace7caf74ff4d4 100644 --- a/paddle/infrt/dialect/tensorrt/trt_graph_split_pass.h +++ b/paddle/infrt/dialect/tensorrt/trt_graph_split_pass.h @@ -14,7 +14,6 @@ #pragma once #include -#include "paddle/infrt/dialect/infrt_base.h" namespace infrt { namespace trt { @@ -31,9 +30,9 @@ namespace trt { * %m = "pd.conv2d"(%a)... * %n = "pd.conv3d"(%m)... * %s = "pd.conv2d"(%a)... - * "infrt.return" (%n, %s)... + * infrt.return %n, %s : ... * } ... - * "infrt.return" (%d, %f)... + * infrt.return %d, %f : ... * } * * destination func: @@ -41,7 +40,7 @@ namespace trt { * %c = "pd.conv2d"(%a) ... * %d = "pd.conv3d"(%c) ... * %f = "pd.conv2d"(%a) ... - * "infrt.return" (%d, %f)... + * infrt.return %d, %f:... * } */ class TRTGraphSplitPass diff --git a/paddle/infrt/dialect/tensorrt/trt_op_converter_pass.cc b/paddle/infrt/dialect/tensorrt/trt_op_converter_pass.cc index 1be5f4dbc39d7699b6d8a36cfb3e164694e908c1..83bebdb6bf19bdf8f75d11d693813b8169e297a0 100644 --- a/paddle/infrt/dialect/tensorrt/trt_op_converter_pass.cc +++ b/paddle/infrt/dialect/tensorrt/trt_op_converter_pass.cc @@ -14,7 +14,6 @@ #include "paddle/infrt/dialect/tensorrt/trt_op_converter_pass.h" #include #include -#include "paddle/infrt/dialect/infrt_base.h" #include "paddle/infrt/dialect/pd_ops.h" #include "paddle/infrt/dialect/tensorrt/trt_dialect_types.h" @@ -24,7 +23,7 @@ namespace trt { #include "paddle/infrt/dialect/tensorrt/pd_lower_to_trt.cpp.inc" // NOLINT struct PD2TRT_GraphLower : public ::mlir::RewritePattern { - PD2TRT_GraphLower(::mlir::MLIRContext *context) + explicit PD2TRT_GraphLower(::mlir::MLIRContext *context) : ::mlir::RewritePattern("pd.graph", 1, context, {"trt.create_engine"}) {} ::mlir::LogicalResult matchAndRewrite( ::mlir::Operation *op, ::mlir::PatternRewriter &rewriter) const override { diff --git a/paddle/infrt/dialect/tensorrt/trt_op_converter_pass.h b/paddle/infrt/dialect/tensorrt/trt_op_converter_pass.h index 7550d8c84e19504fc0f41067c1194703a55410ba..ede64f8bcd556a73b779fc3b772bf3fa8f74eaf9 100644 --- a/paddle/infrt/dialect/tensorrt/trt_op_converter_pass.h +++ b/paddle/infrt/dialect/tensorrt/trt_op_converter_pass.h @@ -15,7 +15,7 @@ #pragma once #include "mlir/IR/Dialect.h" #include "mlir/Pass/Pass.h" -#include "paddle/infrt/dialect/infrt/infrt_dialect.h" +#include "paddle/infrt/dialect/infrt/ir/infrt_dialect.h" #include "paddle/infrt/dialect/tensorrt/trt_ops.h" namespace infrt { @@ -29,9 +29,9 @@ namespace trt { * %m = "pd.conv2d"(%a)... * %n = "pd.conv3d"(%m)... * %s = "pd.conv2d"(%a)... - * "infrt.return" (%n, %s)... + * infrt.return %n, %s:... * } ... - * "infrt.return" (%d, %f)... + * infrt.return %d, %f:... * } * * destination ir: @@ -40,10 +40,10 @@ namespace trt { * %m = "trt.Convolution"(%a)... * %n = "trt.Convolution"(%m)... * %s = "trt.Convolution"(%a)... - * "infrt.return" (%n, %s)... + * infrt.return %n, %s :... * }){run_once = true} ... * %d, %f = "trt.execute"(%engine, %a)... - * "infrt.return" (%d, %f)... + * infrt.return %d, %f :... * } */ struct TRTOpConverterPass diff --git a/paddle/infrt/dialect/tensorrt/trt_op_teller_pass.cc b/paddle/infrt/dialect/tensorrt/trt_op_teller_pass.cc index 13b7f1aee55d2a2d30822a878bbd50d385411f43..9f348b4122fc74033703c92459e6cfa5b3a1f3a2 100644 --- a/paddle/infrt/dialect/tensorrt/trt_op_teller_pass.cc +++ b/paddle/infrt/dialect/tensorrt/trt_op_teller_pass.cc @@ -15,8 +15,8 @@ #include "paddle/infrt/dialect/tensorrt/trt_op_teller_pass.h" #include -#include "paddle/infrt/dialect/basic_kernels.h" -#include "paddle/infrt/dialect/infrt/infrt_dialect.h" +#include "paddle/infrt/dialect/infrt/ir/basic_kernels.h" +#include "paddle/infrt/dialect/infrt/ir/infrt_dialect.h" #include "paddle/infrt/dialect/pd_ops.h" namespace infrt { diff --git a/paddle/infrt/dialect/tensorrt/trt_op_teller_pass.h b/paddle/infrt/dialect/tensorrt/trt_op_teller_pass.h index b9e461c8633d906fd46e9f7d6799e8a157915048..1cb08dc0a2161eeb5720191bada52f9b54e94893 100644 --- a/paddle/infrt/dialect/tensorrt/trt_op_teller_pass.h +++ b/paddle/infrt/dialect/tensorrt/trt_op_teller_pass.h @@ -14,7 +14,6 @@ #pragma once #include -#include "paddle/infrt/dialect/infrt_base.h" namespace infrt { namespace trt { @@ -29,24 +28,24 @@ namespace trt { * %c = "pd.conv2d"(%a) ... * %d = "pd.conv3d"(%c) ... * %f = "pd.conv2d"(%a) ... - * "infrt.return"(%d, %f) ... + * infrt.return %d, %f: ... * } * * destination func: * func @main(%a : tensor) -> tensor { * %c = "pd.graph"(%a) { * %m = "pd.conv2d"(%a)... - * "infrt.return" (%m) + * infrt.return %m:... * } ... * %d = "pd.graph"(%c) { * %m = "pd.conv3d"(%c)... - * "infrt.return" (%m) + * infrt.return %m:... * } ... * %f = "pd.graph"(%a) { * %m = "pd.conv2d"(%a)... - * "infrt.return" (%m) + * infrt.return %m:... * } ... - * "infrt.return" (%d, %f) + * infrt.return %d, %f:... * } * TODO(winter-wang): Supplementary how to judge the operators can be supported * by tensorrt. diff --git a/paddle/infrt/dialect/tensorrt/trt_ops.h b/paddle/infrt/dialect/tensorrt/trt_ops.h index 44444232915bad7d25b0ecedfa8e8427f4567e49..78d960b5120454bdd01b779abedbe2f7ec0d5853 100644 --- a/paddle/infrt/dialect/tensorrt/trt_ops.h +++ b/paddle/infrt/dialect/tensorrt/trt_ops.h @@ -28,8 +28,8 @@ #include #include #include -#include "paddle/infrt/dialect/basic_kernels.h" -#include "paddle/infrt/dialect/infrt/infrt_dialect.h" +#include "paddle/infrt/dialect/infrt/ir/basic_kernels.h" +#include "paddle/infrt/dialect/infrt/ir/infrt_dialect.h" #include "paddle/infrt/dialect/pd_ops.h" namespace infrt { diff --git a/paddle/infrt/external_kernels/basic.mlir b/paddle/infrt/external_kernels/basic.mlir index 1a7ea854c9ce469ee5719743287b4ee1b5de9286..843b12ced21a982b18b5a63f7bbef1d4d24eea16 100644 --- a/paddle/infrt/external_kernels/basic.mlir +++ b/paddle/infrt/external_kernels/basic.mlir @@ -1,7 +1,7 @@ // CHECK: basic func @basic() -> f32 { - %v0 = Infrt.constant.f32 1.0 - %v1 = Infrt.constant.f32 2.0 + %v0 = infrt.constant.f32 1.0 + %v1 = infrt.constant.f32 2.0 %v2 = "external.add.f32"(%v0, %v1) : (f32, f32) -> f32 // CHECK: 1 @@ -17,5 +17,5 @@ func @basic() -> f32 { // CHECK: 6 "external.print.f32"(%v3) : (f32) -> () - Infrt.return %v3 : f32 + infrt.return %v3 : f32 } diff --git a/paddle/infrt/external_kernels/fc.mlir b/paddle/infrt/external_kernels/fc.mlir index b0cabddc3ebc4a9ede73d506ac58acaa140f03d5..26b2d24cace70455d4a0e21dddf23c9bd628ae81 100644 --- a/paddle/infrt/external_kernels/fc.mlir +++ b/paddle/infrt/external_kernels/fc.mlir @@ -1,43 +1,43 @@ // CHECK-LABEL: @fc -func @fc(%input : !Infrt.tensor, - %w : !Infrt.tensor, - %bias : !Infrt.tensor) -> !Infrt.tensor +func @fc(%input : !infrt.dense_tensor, + %w : !infrt.dense_tensor, + %bias : !infrt.dense_tensor) -> !infrt.dense_tensor { - %out = dt.create_uninit_tensor.f32 [30, 50] -> !Infrt.tensor - // dt.fill_tensor_with_constant.f32 (%out : !Infrt.tensor) {value=0.0:f32} + %out = dt.create_uninit_tensor.f32 [30, 50] -> !infrt.dense_tensor + // dt.fill_tensor_with_constant.f32 (%out : !infrt.dense_tensor) {value=0.0:f32} // fc1 - "external.matmul"(%input, %w, %out) {}: (!Infrt.tensor, !Infrt.tensor, !Infrt.tensor) -> () - "external.elementwise_add"(%out, %bias, %out) {axis = -1}: (!Infrt.tensor, !Infrt.tensor, !Infrt.tensor) -> () - "external.sigmoid"(%out, %out) {}: (!Infrt.tensor, !Infrt.tensor) -> () + "external.matmul"(%input, %w, %out) {}: (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> () + "external.elementwise_add"(%out, %bias, %out) {axis = -1}: (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> () + "external.sigmoid"(%out, %out) {}: (!infrt.dense_tensor, !infrt.dense_tensor) -> () // fc2 - "external.matmul"(%out, %w, %out) {}: (!Infrt.tensor, !Infrt.tensor, !Infrt.tensor) -> () - "external.elementwise_add"(%out, %bias, %out) {axis = -1}: (!Infrt.tensor, !Infrt.tensor, !Infrt.tensor) -> () - "external.sigmoid"(%out, %out) {}: (!Infrt.tensor, !Infrt.tensor) -> () + "external.matmul"(%out, %w, %out) {}: (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> () + "external.elementwise_add"(%out, %bias, %out) {axis = -1}: (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> () + "external.sigmoid"(%out, %out) {}: (!infrt.dense_tensor, !infrt.dense_tensor) -> () - Infrt.return %out : !Infrt.tensor + infrt.return %out : !infrt.dense_tensor } // CHECK-LABEL: @benchmark func @benchmark() { - %input = dt.create_uninit_tensor.f32 [30, 50] -> !Infrt.tensor - dt.fill_tensor_with_constant.f32 (%input : !Infrt.tensor) {value=1.0:f32} + %input = dt.create_uninit_tensor.f32 [30, 50] -> !infrt.dense_tensor + dt.fill_tensor_with_constant.f32 (%input : !infrt.dense_tensor) {value=1.0:f32} - %w = dt.create_uninit_tensor.f32 [50, 50] -> !Infrt.tensor - dt.fill_tensor_with_constant.f32 (%w : !Infrt.tensor) {value=2.0:f32} + %w = dt.create_uninit_tensor.f32 [50, 50] -> !infrt.dense_tensor + dt.fill_tensor_with_constant.f32 (%w : !infrt.dense_tensor) {value=2.0:f32} - %bias = dt.create_uninit_tensor.f32 [30, 50] -> !Infrt.tensor - dt.fill_tensor_with_constant.f32 (%bias : !Infrt.tensor) {value=3.0:f32} + %bias = dt.create_uninit_tensor.f32 [30, 50] -> !infrt.dense_tensor + dt.fill_tensor_with_constant.f32 (%bias : !infrt.dense_tensor) {value=3.0:f32} - Infrt.benchmark "add.f32"( - %input:!Infrt.tensor, - %w:!Infrt.tensor, - %bias:!Infrt.tensor) + infrt.benchmark "add.f32"( + %input:!infrt.dense_tensor, + %w:!infrt.dense_tensor, + %bias:!infrt.dense_tensor) duration_secs = 100, max_count = 300000, num_warmup_runs = 3 { - %res = Infrt.call @fc(%input, %w, %bias) : (!Infrt.tensor, !Infrt.tensor, !Infrt.tensor) -> (!Infrt.tensor) - Infrt.return %res : !Infrt.tensor + %res = infrt.call @fc(%input, %w, %bias) : (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor) + infrt.return %res : !infrt.dense_tensor } - Infrt.return + infrt.return } diff --git a/paddle/infrt/external_kernels/paddle.mlir b/paddle/infrt/external_kernels/paddle.mlir index d55d9904b5bc4e43388abacf9e4b62bf06db458b..97781e5c8c5e544bba53b561f2adcae16263886f 100644 --- a/paddle/infrt/external_kernels/paddle.mlir +++ b/paddle/infrt/external_kernels/paddle.mlir @@ -1,50 +1,50 @@ // CHECK: paddle_func func @paddle_func() -> () { - %input = dt.create_uninit_tensor.f32 [3, 5] -> !Infrt.tensor - dt.fill_tensor_with_constant.f32 (%input : !Infrt.tensor) {value=1.0:f32} + %input = dt.create_uninit_tensor.f32 [3, 5] -> !infrt.dense_tensor + dt.fill_tensor_with_constant.f32 (%input : !infrt.dense_tensor) {value=1.0:f32} - %w = dt.create_uninit_tensor.f32 [5, 4] -> !Infrt.tensor - dt.fill_tensor_with_constant.f32 (%w : !Infrt.tensor) {value=2.0:f32} + %w = dt.create_uninit_tensor.f32 [5, 4] -> !infrt.dense_tensor + dt.fill_tensor_with_constant.f32 (%w : !infrt.dense_tensor) {value=2.0:f32} - %bias = dt.create_uninit_tensor.f32 [4] -> !Infrt.tensor - dt.fill_tensor_with_constant.f32 (%bias : !Infrt.tensor) {value=3.0:f32} + %bias = dt.create_uninit_tensor.f32 [4] -> !infrt.dense_tensor + dt.fill_tensor_with_constant.f32 (%bias : !infrt.dense_tensor) {value=3.0:f32} - %out = dt.create_uninit_tensor.f32 [3, 4] -> !Infrt.tensor - dt.fill_tensor_with_constant.f32 (%out : !Infrt.tensor) {value=0.0:f32} + %out = dt.create_uninit_tensor.f32 [3, 4] -> !infrt.dense_tensor + dt.fill_tensor_with_constant.f32 (%out : !infrt.dense_tensor) {value=0.0:f32} - "external.fc2"(%input, %w, %bias, %out) {in_num_col_dims=3:i32, test_attr=5:i32}: (!Infrt.tensor, !Infrt.tensor, !Infrt.tensor, !Infrt.tensor) -> () + "external.fc2"(%input, %w, %bias, %out) {in_num_col_dims=3:i32, test_attr=5:i32}: (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> () // CHECK-LABEL: tensor: shape=shape[3,5], values=[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] - dt.print_tensor (%input : !Infrt.tensor) + dt.print_tensor (%input : !infrt.dense_tensor) // CHECK-LABEL: tensor: shape=shape[5,4], values=[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2] - dt.print_tensor (%w : !Infrt.tensor) - dt.print_tensor (%bias : !Infrt.tensor) - dt.print_tensor (%out : !Infrt.tensor) + dt.print_tensor (%w : !infrt.dense_tensor) + dt.print_tensor (%bias : !infrt.dense_tensor) + dt.print_tensor (%out : !infrt.dense_tensor) // test external.matmul - %out1 = dt.create_uninit_tensor.f32 [3, 4] -> !Infrt.tensor - dt.fill_tensor_with_constant.f32 (%out1 : !Infrt.tensor) {value=0.0:f32} - "external.matmul"(%input, %w, %out1) {}: (!Infrt.tensor, !Infrt.tensor, !Infrt.tensor) -> () - dt.print_tensor (%out1 : !Infrt.tensor) + %out1 = dt.create_uninit_tensor.f32 [3, 4] -> !infrt.dense_tensor + dt.fill_tensor_with_constant.f32 (%out1 : !infrt.dense_tensor) {value=0.0:f32} + "external.matmul"(%input, %w, %out1) {}: (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> () + dt.print_tensor (%out1 : !infrt.dense_tensor) // test external.elementwise_add - %out2 = dt.create_uninit_tensor.f32 [3, 4] -> !Infrt.tensor - dt.fill_tensor_with_constant.f32 (%out2 : !Infrt.tensor) {value=0.0:f32} - %bias1 = dt.create_uninit_tensor.f32 [3, 4] -> !Infrt.tensor - dt.fill_tensor_with_constant.f32 (%bias1 : !Infrt.tensor) {value=3.0:f32} - "external.elementwise_add"(%out1, %bias1, %out2) {axis=-1}: (!Infrt.tensor, !Infrt.tensor, !Infrt.tensor) -> () - dt.print_tensor (%out2 : !Infrt.tensor) + %out2 = dt.create_uninit_tensor.f32 [3, 4] -> !infrt.dense_tensor + dt.fill_tensor_with_constant.f32 (%out2 : !infrt.dense_tensor) {value=0.0:f32} + %bias1 = dt.create_uninit_tensor.f32 [3, 4] -> !infrt.dense_tensor + dt.fill_tensor_with_constant.f32 (%bias1 : !infrt.dense_tensor) {value=3.0:f32} + "external.elementwise_add"(%out1, %bias1, %out2) {axis=-1}: (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> () + dt.print_tensor (%out2 : !infrt.dense_tensor) // test external.relu - %out3 = dt.create_uninit_tensor.f32 [3, 4] -> !Infrt.tensor - dt.fill_tensor_with_constant.f32 (%out3 : !Infrt.tensor) {value=0.0:f32} - "external.relu"(%out1, %out3) {}: (!Infrt.tensor, !Infrt.tensor) -> () - dt.print_tensor (%out3 : !Infrt.tensor) + %out3 = dt.create_uninit_tensor.f32 [3, 4] -> !infrt.dense_tensor + dt.fill_tensor_with_constant.f32 (%out3 : !infrt.dense_tensor) {value=0.0:f32} + "external.relu"(%out1, %out3) {}: (!infrt.dense_tensor, !infrt.dense_tensor) -> () + dt.print_tensor (%out3 : !infrt.dense_tensor) // test external.sigmoid - %out4 = dt.create_uninit_tensor.f32 [3, 4] -> !Infrt.tensor - dt.fill_tensor_with_constant.f32 (%out4 : !Infrt.tensor) {value=0.0:f32} - "external.sigmoid"(%out1, %out4) {}: (!Infrt.tensor, !Infrt.tensor) -> () - dt.print_tensor (%out4 : !Infrt.tensor) + %out4 = dt.create_uninit_tensor.f32 [3, 4] -> !infrt.dense_tensor + dt.fill_tensor_with_constant.f32 (%out4 : !infrt.dense_tensor) {value=0.0:f32} + "external.sigmoid"(%out1, %out4) {}: (!infrt.dense_tensor, !infrt.dense_tensor) -> () + dt.print_tensor (%out4 : !infrt.dense_tensor) - Infrt.return + infrt.return } diff --git a/paddle/infrt/host_context/mlir_exec.cc b/paddle/infrt/host_context/mlir_exec.cc index 90bcb1df220c0f4c558ece80a09fccc93aada41c..1506282f6268191a2eece5540d30fbe90d8eeb52 100644 --- a/paddle/infrt/host_context/mlir_exec.cc +++ b/paddle/infrt/host_context/mlir_exec.cc @@ -92,7 +92,7 @@ int main(int argc, char** argv) { std::vector valid_places = {{infrt::TargetType::CPU, infrt::PrecisionType::FLOAT32, infrt::LayoutType::NCHW}}; - phi_pass_manager.addPass(std::make_unique(valid_places)); + phi_pass_manager.addPass(infrt::createPhiOpCvtPass(valid_places)); phi_pass_manager.addPass(infrt::createInfrtOpFusePass()); #endif diff --git a/paddle/infrt/host_context/mlir_tests/basic.mlir b/paddle/infrt/host_context/mlir_tests/basic.mlir index 1b55b408f2b082c09d06d51037e8c9d967a171f4..263d5884134b143aa8d3403c5cd05672df39636f 100644 --- a/paddle/infrt/host_context/mlir_tests/basic.mlir +++ b/paddle/infrt/host_context/mlir_tests/basic.mlir @@ -1,30 +1,30 @@ // CHECK-LABEL: basic func @basic() -> f32 { - %v0 = Infrt.constant.f32 1.0 - %v1 = Infrt.constant.f32 2.0 - %v2 = "Infrt.add.f32"(%v0, %v1) : (f32, f32) -> f32 + %v0 = infrt.constant.f32 1.0 + %v1 = infrt.constant.f32 2.0 + %v2 = "infrt.add.f32"(%v0, %v1) : (f32, f32) -> f32 // CHECK: 1 - "Infrt.print.f32"(%v0) : (f32) -> () + "infrt.print.f32"(%v0) : (f32) -> () // CHECK: 2 - "Infrt.print.f32"(%v1) : (f32) -> () + "infrt.print.f32"(%v1) : (f32) -> () // CHECK: 3 - "Infrt.print.f32"(%v2) : (f32) -> () + "infrt.print.f32"(%v2) : (f32) -> () - %v3 = "Infrt.mul.f32"(%v2, %v1) : (f32, f32) -> f32 + %v3 = "infrt.mul.f32"(%v2, %v1) : (f32, f32) -> f32 // CHECK: 6 - "Infrt.print.f32"(%v3) : (f32) -> () + "infrt.print.f32"(%v3) : (f32) -> () - Infrt.return %v3 : f32 + infrt.return %v3 : f32 } // CHECK-LABEL: basic1 // Check the mlir executor can work with more than one function in a file. func @basic1() -> () { - %v0 = Infrt.constant.f32 1.0 - "Infrt.print.f32"(%v0) : (f32) -> () + %v0 = infrt.constant.f32 1.0 + "infrt.print.f32"(%v0) : (f32) -> () // CHECK: 1 - Infrt.return + infrt.return } \ No newline at end of file diff --git a/paddle/infrt/host_context/mlir_tests/dense_tensor.mlir b/paddle/infrt/host_context/mlir_tests/dense_tensor.mlir index 5a973a3eb23e6015ede2d69d83ab8c26de669908..1a7fa28f1e58bd400671099f5af7bedbb3c04e4d 100644 --- a/paddle/infrt/host_context/mlir_tests/dense_tensor.mlir +++ b/paddle/infrt/host_context/mlir_tests/dense_tensor.mlir @@ -1,9 +1,9 @@ // CHECK-LABEL: build_tensor1 func @build_tensor1() { - %a = dt.create_uninit_tensor.f32 [3, 4] -> !Infrt.tensor - dt.fill_tensor_with_constant.f32 (%a : !Infrt.tensor) {value=1.0:f32} + %a = dt.create_uninit_tensor.f32 [3, 4] -> !infrt.dense_tensor + dt.fill_tensor_with_constant.f32 (%a : !infrt.dense_tensor) {value=1.0:f32} // CHECK: tensor: shape=shape[3,4], values=[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] - dt.print_tensor (%a : !Infrt.tensor) + dt.print_tensor (%a : !infrt.dense_tensor) - Infrt.return + infrt.return } diff --git a/paddle/infrt/host_context/mlir_tests/shape.mlir b/paddle/infrt/host_context/mlir_tests/shape.mlir index 22df1c8010d8dbd6a4b8e332e01602b4421ebcdd..691ce62cbf82ad4dc0d3b0199a9c1d1127213de5 100644 --- a/paddle/infrt/host_context/mlir_tests/shape.mlir +++ b/paddle/infrt/host_context/mlir_tests/shape.mlir @@ -3,5 +3,5 @@ func @build_tensor1() { %a = ts.build_shape [1:i64, 57:i64, 92:i64] // CHECK: shape[1,57,92] ts.print_shape %a - Infrt.return -} \ No newline at end of file + infrt.return +} diff --git a/paddle/infrt/host_context/mlir_to_runtime_translate.cc b/paddle/infrt/host_context/mlir_to_runtime_translate.cc index a901c323ec03a418a32eee3cb8ea17708e38bdb9..b3ea930e8cebcae671e88d5881dead9ed11e7f4e 100644 --- a/paddle/infrt/host_context/mlir_to_runtime_translate.cc +++ b/paddle/infrt/host_context/mlir_to_runtime_translate.cc @@ -75,7 +75,7 @@ struct MlirToRuntimeTranslator::Impl { }; bool MlirToRuntimeTranslator::EmitConstantOp(mlir::Operation* op) { - if (!infrt::Startswith(op->getName().getStringRef().str(), "Infrt.constant")) + if (!infrt::Startswith(op->getName().getStringRef().str(), "infrt.constant")) return false; VLOG(3) << "Emitting constant op [" << op->getName().getStringRef().str() << "]"; @@ -267,7 +267,7 @@ boost::optional> MlirToRuntimeTranslator::EmitAttribute( } static bool IsReturn(mlir::Operation* op) { - return op->getName().getStringRef() == "Infrt.return"; + return op->getName().getStringRef() == "infrt.return"; } bool MlirToRuntimeTranslator::EmitGeneralOp(mlir::Operation* op) { @@ -405,7 +405,7 @@ bool MlirToRuntimeTranslator::EmitGeneralOp(mlir::Operation* op) { bool MlirToRuntimeTranslator::EmitReturnOp( mlir::Operation* op, llvm::SmallVectorImpl* results) { CHECK(results); - if (op->getName().getStringRef() == "Infrt.return") { + if (op->getName().getStringRef() == "infrt.return") { for (size_t i = 0; i < op->getNumOperands(); i++) { results->push_back(op->getOperand(i)); } @@ -478,7 +478,7 @@ bool MlirToRuntimeTranslator::EmitCallOp(mlir::Operation* op, function_defs_t* function_table) { CHECK(op); CHECK(function_table); - if (op->getName().getStringRef() != "Infrt.call") return false; + if (op->getName().getStringRef() != "infrt.call") return false; impl_->cur_op = impl_->runtime->NewOpExecutable(op->getName().getStringRef().str()); diff --git a/paddle/infrt/host_context/mlir_to_runtime_translate.h b/paddle/infrt/host_context/mlir_to_runtime_translate.h index 0c453651d9e6dc44adaf108ec6a1b0df984fe8be..fcd79eaf386eed5a6a8eaa31712e344bab56dbd4 100644 --- a/paddle/infrt/host_context/mlir_to_runtime_translate.h +++ b/paddle/infrt/host_context/mlir_to_runtime_translate.h @@ -57,7 +57,7 @@ class MlirToRuntimeTranslator { protected: //! Emit a "infrt.constant.*" operation, return true if succeed. bool EmitConstantOp(mlir::Operation* op); - //! Emit a "Infrt.return" operation. + //! Emit a "infrt.return" operation. bool EmitReturnOp(mlir::Operation* op, llvm::SmallVectorImpl* results); //! Emit a "ts.build_shape" operation. diff --git a/paddle/infrt/host_context/mlir_to_runtime_translate_test.cc b/paddle/infrt/host_context/mlir_to_runtime_translate_test.cc index 5824e40abf97a4d63543948d056e815bbeebce3a..31615fbc3f6e46f55ddc5f56641750feb0972772 100644 --- a/paddle/infrt/host_context/mlir_to_runtime_translate_test.cc +++ b/paddle/infrt/host_context/mlir_to_runtime_translate_test.cc @@ -37,14 +37,14 @@ TEST(MlirToRuntimeTranslate, basic) { auto source = R"ROC( func @main() -> () { - %v0 = Infrt.constant.f32 1.0 - %v1 = Infrt.constant.f32 2.0 - %v2 = "Infrt.add.f32"(%v0, %v1) : (f32, f32) -> f32 - %v3 = "Infrt.mul.f32"(%v2, %v1) : (f32, f32) -> f32 + %v0 = infrt.constant.f32 1.0 + %v1 = infrt.constant.f32 2.0 + %v2 = "infrt.add.f32"(%v0, %v1) : (f32, f32) -> f32 + %v3 = "infrt.mul.f32"(%v2, %v1) : (f32, f32) -> f32 - "Infrt.print.f32"(%v1) : (f32) -> () + "infrt.print.f32"(%v1) : (f32) -> () - Infrt.return + infrt.return } )ROC"; @@ -63,14 +63,14 @@ TEST(TestMlir, basic) { auto source = R"ROC( func @main() -> () { - %v0 = Infrt.constant.f32 1.0 - %v1 = Infrt.constant.f32 2.0 - %v2 = "Infrt.add.f32"(%v0, %v1) : (f32, f32) -> f32 - %v3 = "Infrt.mul.f32"(%v2, %v1) : (f32, f32) -> f32 + %v0 = infrt.constant.f32 1.0 + %v1 = infrt.constant.f32 2.0 + %v2 = "infrt.add.f32"(%v0, %v1) : (f32, f32) -> f32 + %v3 = "infrt.mul.f32"(%v2, %v1) : (f32, f32) -> f32 - "Infrt.print.f32"(%v1) : (f32) -> () + "infrt.print.f32"(%v1) : (f32) -> () - Infrt.return + infrt.return } )ROC"; @@ -101,7 +101,7 @@ func @predict(%a: !infrt.dense_tensor, %b: !infrt.dense_tensor< "!infrt.dense_tensor"; auto end = R"ROC( -Infrt.return %a0, %b0: !infrt.dense_tensor, !infrt.dense_tensor +infrt.return %a0, %b0: !infrt.dense_tensor, !infrt.dense_tensor } )ROC"; diff --git a/paddle/infrt/host_context/paddle_mlir.cc b/paddle/infrt/host_context/paddle_mlir.cc index 6afef5935c73450b4865c0e02593aa372299c95f..18c25827b8ec5a71907e694cea4e7680b598e883 100644 --- a/paddle/infrt/host_context/paddle_mlir.cc +++ b/paddle/infrt/host_context/paddle_mlir.cc @@ -19,7 +19,6 @@ MLIRModelGenImpl::MLIRModelGenImpl() : context_(infrt::Global::getMLIRContext()), builder_(context_) { context_->allowUnregisteredDialects(); context_->getOrLoadDialect(); - context_->getOrLoadDialect(); context_->getOrLoadDialect(); context_->getOrLoadDialect(); context_->getOrLoadDialect(); diff --git a/paddle/infrt/host_context/paddle_mlir.h b/paddle/infrt/host_context/paddle_mlir.h index 78dfefcfda2c83760492766507999322152187eb..e825cbb5a11ea0dfcacfc2b1bbb63bf201219c9d 100644 --- a/paddle/infrt/host_context/paddle_mlir.h +++ b/paddle/infrt/host_context/paddle_mlir.h @@ -25,10 +25,10 @@ #include "mlir/IR/MLIRContext.h" #include "paddle/infrt/common/global.h" #include "paddle/infrt/common/string.h" -#include "paddle/infrt/dialect/basic_kernels.h" #include "paddle/infrt/dialect/dense_tensor.h" -#include "paddle/infrt/dialect/infrt_base.h" -#include "paddle/infrt/dialect/init_infrt_dialects.h" +#include "paddle/infrt/dialect/infrt/ir/basic_kernels.h" + +#include "paddle/infrt/dialect/init_dialects.h" #include "paddle/infrt/dialect/pd_ops.h" #include "paddle/infrt/dialect/tensor_shape.h" #include "paddle/infrt/paddle/model_parser.h" diff --git a/paddle/infrt/host_context/value.h b/paddle/infrt/host_context/value.h index 86df3508cf813628b4a8ba8412ce93d6b1dfc5a2..957d852442b10620244e230a2f7704eb7fa0a33e 100644 --- a/paddle/infrt/host_context/value.h +++ b/paddle/infrt/host_context/value.h @@ -22,7 +22,7 @@ #include "paddle/infrt/common/object.h" #include "paddle/infrt/common/shared.h" -#include "paddle/infrt/dialect/infrt/common_type.h" +#include "paddle/infrt/dialect/infrt/common/types.h" #include "paddle/infrt/host_context/function.h" #include "paddle/infrt/support/variant.h" #include "paddle/infrt/tensor/dense_host_tensor.h" diff --git a/paddle/infrt/kernel/basic_kernels.cc b/paddle/infrt/kernel/basic_kernels.cc index 23e50a5ddc87427bbf0f49c559f185084e42c8ec..b186cfcfd2b355f97711ecc916e497c2916d4060 100644 --- a/paddle/infrt/kernel/basic_kernels.cc +++ b/paddle/infrt/kernel/basic_kernels.cc @@ -63,24 +63,24 @@ static void PrintString(const std::string &str) { void RegisterBasicKernels(host_context::KernelRegistry *registry) { RegisterIntBasicKernels(registry); RegisterFloatBasicKernels(registry); - registry->AddKernel("Infrt.get_string", INFRT_KERNEL(GetString)); - registry->AddKernel("Infrt.print_string", INFRT_KERNEL(PrintString)); + registry->AddKernel("infrt.get_string", INFRT_KERNEL(GetString)); + registry->AddKernel("infrt.print_string", INFRT_KERNEL(PrintString)); } void RegisterIntBasicKernels(host_context::KernelRegistry *registry) { - registry->AddKernel("Infrt.add.i32", INFRT_KERNEL(add)); - registry->AddKernel("Infrt.sub.i32", INFRT_KERNEL(sub)); - registry->AddKernel("Infrt.mul.i32", INFRT_KERNEL(mul)); - registry->AddKernel("Infrt.div.i32", INFRT_KERNEL(div)); - registry->AddKernel("Infrt.print.i32", INFRT_KERNEL(print)); + registry->AddKernel("infrt.add.i32", INFRT_KERNEL(add)); + registry->AddKernel("infrt.sub.i32", INFRT_KERNEL(sub)); + registry->AddKernel("infrt.mul.i32", INFRT_KERNEL(mul)); + registry->AddKernel("infrt.div.i32", INFRT_KERNEL(div)); + registry->AddKernel("infrt.print.i32", INFRT_KERNEL(print)); } void RegisterFloatBasicKernels(host_context::KernelRegistry *registry) { - registry->AddKernel("Infrt.add.f32", INFRT_KERNEL(add)); - registry->AddKernel("Infrt.sub.f32", INFRT_KERNEL(sub)); - registry->AddKernel("Infrt.mul.f32", INFRT_KERNEL(mul)); - registry->AddKernel("Infrt.div.f32", INFRT_KERNEL(div)); - registry->AddKernel("Infrt.print.f32", INFRT_KERNEL(print)); + registry->AddKernel("infrt.add.f32", INFRT_KERNEL(add)); + registry->AddKernel("infrt.sub.f32", INFRT_KERNEL(sub)); + registry->AddKernel("infrt.mul.f32", INFRT_KERNEL(mul)); + registry->AddKernel("infrt.div.f32", INFRT_KERNEL(div)); + registry->AddKernel("infrt.print.f32", INFRT_KERNEL(print)); } } // namespace kernel diff --git a/paddle/infrt/kernel/control_flow_kernels.cc b/paddle/infrt/kernel/control_flow_kernels.cc index 8b18aca0210860f4ae688f2133ffa022fda3195d..6cc94dbcce0775cb6b74f993bfdd262fd6a47e6f 100644 --- a/paddle/infrt/kernel/control_flow_kernels.cc +++ b/paddle/infrt/kernel/control_flow_kernels.cc @@ -37,7 +37,7 @@ static void INFRTCall( } void RegisterControlFlowKernels(host_context::KernelRegistry* registry) { - registry->AddKernel("Infrt.call", INFRT_KERNEL(INFRTCall)); + registry->AddKernel("infrt.call", INFRT_KERNEL(INFRTCall)); } } // namespace kernel diff --git a/paddle/infrt/kernel/phi/dense_tensor_kernels.h b/paddle/infrt/kernel/phi/dense_tensor_kernels.h index 187e5c64511e83556bec50f4368ae7cbe89dda90..e77e9becb6ff30995b77eb9df3b029f02ab9f69d 100644 --- a/paddle/infrt/kernel/phi/dense_tensor_kernels.h +++ b/paddle/infrt/kernel/phi/dense_tensor_kernels.h @@ -15,7 +15,7 @@ #pragma once #include "paddle/infrt/backends/host/phi_allocator.h" -#include "paddle/infrt/dialect/infrt/common_type.h" +#include "paddle/infrt/dialect/infrt/common/types.h" #include "paddle/infrt/host_context/kernel_utils.h" #include "paddle/phi/core/dense_tensor.h" diff --git a/paddle/infrt/kernel/test_kernels.cc b/paddle/infrt/kernel/test_kernels.cc index d15bbe221f91a87b047863121f32699175183c54..bcf475d1bc09dab8be1b7a23359e1eb935ee02e0 100644 --- a/paddle/infrt/kernel/test_kernels.cc +++ b/paddle/infrt/kernel/test_kernels.cc @@ -193,7 +193,7 @@ tensor::DenseHostTensor ShadowCopyTensor(tensor::DenseHostTensor src) { } void RegisterTestKernels(host_context::KernelRegistry *registry) { - registry->AddKernel("Infrt.benchmark", INFRT_KERNEL(benchmark)); + registry->AddKernel("infrt.benchmark", INFRT_KERNEL(benchmark)); registry->AddKernel("Infrt.test.shadow_copy_tensor", INFRT_KERNEL(ShadowCopyTensor)); } diff --git a/paddle/infrt/tests/dialect/basic.mlir b/paddle/infrt/tests/dialect/basic.mlir index 2d4d6f2629ec7df989499f0a2e9649c01ae8428a..f534a3aa44aac964c262465da199ac926fa0904e 100644 --- a/paddle/infrt/tests/dialect/basic.mlir +++ b/paddle/infrt/tests/dialect/basic.mlir @@ -1,33 +1,33 @@ // RUN: infrtexec -i %s | FileCheck %s // CHECK-LABEL: @basic_f32 func @basic_f32() -> f32 { - %v0 = Infrt.constant.f32 1.0 - %v1 = Infrt.constant.f32 2.0 - %value = "Infrt.add.f32"(%v0, %v1) : (f32, f32) -> f32 + %v0 = infrt.constant.f32 1.0 + %v1 = infrt.constant.f32 2.0 + %value = "infrt.add.f32"(%v0, %v1) : (f32, f32) -> f32 // CHECK-NEXT: 3 - "Infrt.print.f32"(%value) : (f32) -> () + "infrt.print.f32"(%value) : (f32) -> () - Infrt.return %value : f32 + infrt.return %value : f32 } /// ================================================================ /// @caller call the other function @callee func @callee.add.f32(%x : f32, %y : f32, %y1 : f32) -> f32 { - %z = "Infrt.add.f32"(%x, %y) : (f32, f32) -> f32 - %z1 = "Infrt.add.f32"(%z, %y1) : (f32, f32) -> f32 - Infrt.return %z1 : f32 + %z = "infrt.add.f32"(%x, %y) : (f32, f32) -> f32 + %z1 = "infrt.add.f32"(%z, %y1) : (f32, f32) -> f32 + infrt.return %z1 : f32 } // CHECK-LABEL: @caller.add.f32 func @caller.add.f32() -> f32 { - %x = Infrt.constant.f32 1.0 - %y = Infrt.constant.f32 2.0 - %y1 = Infrt.constant.f32 3.0 - %z = Infrt.call @callee.add.f32(%x, %y, %y1) : (f32, f32, f32) -> f32 + %x = infrt.constant.f32 1.0 + %y = infrt.constant.f32 2.0 + %y1 = infrt.constant.f32 3.0 + %z = infrt.call @callee.add.f32(%x, %y, %y1) : (f32, f32, f32) -> f32 // CHECK-NEXT: 6 - "Infrt.print.f32"(%z) : (f32) -> () - Infrt.return %z : f32 + "infrt.print.f32"(%z) : (f32) -> () + infrt.return %z : f32 } /// <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< diff --git a/paddle/infrt/tests/dialect/benchmark.mlir b/paddle/infrt/tests/dialect/benchmark.mlir index 381fd534f6a5a09e3091203de88ebf00101074af..1a57b43499062410b346b38412a533d3edd6fbcc 100644 --- a/paddle/infrt/tests/dialect/benchmark.mlir +++ b/paddle/infrt/tests/dialect/benchmark.mlir @@ -12,13 +12,13 @@ func @benchmark() { // CHECK-LABEL: BM:add.f32:CPU 95%(ns) // CHECK-LABEL: BM:add.f32:CPU 99%(ns) // CHECK-LABEL: BM:add.f32:CPU utilization(percent) - Infrt.benchmark "add.f32"() duration_secs = 1, max_count = 3, num_warmup_runs = 3 + infrt.benchmark "add.f32"() duration_secs = 1, max_count = 3, num_warmup_runs = 3 { - %0 = Infrt.constant.f32 1.0 - %1 = Infrt.constant.f32 2.0 - %res = "Infrt.add.f32"(%0, %1) : (f32, f32) -> f32 - "Infrt.print.f32"(%res) : (f32) -> () - Infrt.return %res : f32 + %0 = infrt.constant.f32 1.0 + %1 = infrt.constant.f32 2.0 + %res = "infrt.add.f32"(%0, %1) : (f32, f32) -> f32 + "infrt.print.f32"(%res) : (f32) -> () + infrt.return %res : f32 } - Infrt.return + infrt.return } diff --git a/paddle/infrt/tests/dialect/dense_tensor.mlir b/paddle/infrt/tests/dialect/dense_tensor.mlir index faade62d35063b1d85c4c1d3ddad98b085a7726c..6dc9904610477139b6c254d0f9f7b754041a83cc 100644 --- a/paddle/infrt/tests/dialect/dense_tensor.mlir +++ b/paddle/infrt/tests/dialect/dense_tensor.mlir @@ -4,14 +4,14 @@ func @dense_shape0() { %shape = ts.build_shape [1:i64, 57:i64] %a = dt.create_uninit_tensor.f32 [12:i64, 23:i64] -> !infrt.dense_tensor - Infrt.return + infrt.return } func @predict(%a: !infrt.dense_tensor, %b: !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor) { %a0 = dt.shallow_copy_tensor %a : !infrt.dense_tensor -> !infrt.dense_tensor %b0 = dt.shallow_copy_tensor %b : !infrt.dense_tensor -> !infrt.dense_tensor - Infrt.return %a0, %b0: !infrt.dense_tensor, !infrt.dense_tensor + infrt.return %a0, %b0: !infrt.dense_tensor, !infrt.dense_tensor } @@ -19,6 +19,6 @@ func @main() { %shape = ts.build_shape [1:i64, 57:i64] %a = dt.create_uninit_tensor.f32 [12:i64, 23:i64] -> !infrt.dense_tensor - %b, %c = Infrt.call @predict(%a, %a) : (!infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor) - Infrt.return + %b, %c = infrt.call @predict(%a, %a) : (!infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor) + infrt.return } diff --git a/paddle/infrt/tests/dialect/disabled_tensor_map.mlir b/paddle/infrt/tests/dialect/disabled_tensor_map.mlir index 1cae065bd5fb6a6a1aa06b4cd6605a240917b55f..936c8f32c01521817e185fa80e836018e7b02aa8 100644 --- a/paddle/infrt/tests/dialect/disabled_tensor_map.mlir +++ b/paddle/infrt/tests/dialect/disabled_tensor_map.mlir @@ -1,30 +1,30 @@ // CHECK-LABEL: @predict -func @predict(%input:!Infrt.tensor, %map: !Infrt.tensor_map) -> (!Infrt.tensor) { - %w = dt.get_param(%map, "create_parameter_0.w_0") -> !Infrt.tensor - %bias = dt.get_param(%map, "create_parameter_1.w_0") -> !Infrt.tensor +func @predict(%input:!infrt.dense_tensor, %map: !infrt.dense_tensor_map) -> (!infrt.dense_tensor) { + %w = dt.get_param(%map, "create_parameter_0.w_0") -> !infrt.dense_tensor + %bias = dt.get_param(%map, "create_parameter_1.w_0") -> !infrt.dense_tensor - %out = dt.create_uninit_tensor.f32 [3, 3] -> !Infrt.tensor + %out = dt.create_uninit_tensor.f32 [3, 3] -> !infrt.dense_tensor // fc - "external.matmul"(%input, %w, %out) {}: (!Infrt.tensor, !Infrt.tensor, !Infrt.tensor) -> () - "external.elementwise_add"(%out, %bias, %out) {axis = -1}: (!Infrt.tensor, !Infrt.tensor, !Infrt.tensor) -> () - "external.sigmoid"(%out, %out) {}: (!Infrt.tensor, !Infrt.tensor) -> () - //dt.print_tensor (%out : !Infrt.tensor) + "external.matmul"(%input, %w, %out) {}: (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> () + "external.elementwise_add"(%out, %bias, %out) {axis = -1}: (!infrt.dense_tensor, !infrt.dense_tensor, !infrt.dense_tensor) -> () + "external.sigmoid"(%out, %out) {}: (!infrt.dense_tensor, !infrt.dense_tensor) -> () + //dt.print_tensor (%out : !infrt.dense_tensor) - Infrt.return %out : !Infrt.tensor + infrt.return %out : !infrt.dense_tensor } // CHECK-LABEL: @main func @main() { - %input = dt.create_uninit_tensor.f32 [3, 3] -> !Infrt.tensor - dt.fill_tensor_with_constant.f32 (%input : !Infrt.tensor) {value=1.0:f32} + %input = dt.create_uninit_tensor.f32 [3, 3] -> !infrt.dense_tensor + dt.fill_tensor_with_constant.f32 (%input : !infrt.dense_tensor) {value=1.0:f32} // CHECK-LABEL: loading params %map = dt.load_params() {path="/Infrt/build/paddle/paddle_1.8_fc_model"} - %out = Infrt.call @predict(%input, %map): (!Infrt.tensor, !Infrt.tensor_map) -> (!Infrt.tensor) - dt.print_tensor (%out : !Infrt.tensor) + %out = infrt.call @predict(%input, %map): (!infrt.dense_tensor, !infrt.dense_tensor_map) -> (!infrt.dense_tensor) + dt.print_tensor (%out : !infrt.dense_tensor) - Infrt.return + infrt.return } diff --git a/paddle/infrt/tests/dialect/paddle_ops.mlir b/paddle/infrt/tests/dialect/paddle_ops.mlir index 48ee4b9d725c0aa36d4849c2842c99997de5c8ee..4b8055514936417dd83a6bb23afaea31eb2d1013 100644 --- a/paddle/infrt/tests/dialect/paddle_ops.mlir +++ b/paddle/infrt/tests/dialect/paddle_ops.mlir @@ -5,5 +5,5 @@ func @ops() { %b = pd.feed() {name="input1"}: tensor %d = pd.feed() {name="input3"}: !infrt.lod_tensor<3x4x9xf32, 0> %c = "pd.matmul"(%a, %b) {transpose_x=true, transpose_y=false} : (tensor, tensor) -> tensor - Infrt.return + infrt.return } diff --git a/paddle/infrt/tests/dialect/phi/dense_tensor.mlir b/paddle/infrt/tests/dialect/phi/dense_tensor.mlir index e8f09f07c82c4003e23a54c7275f576f7916f853..b40184e7266a769defa5f879c88c71ff80ad8c3f 100644 --- a/paddle/infrt/tests/dialect/phi/dense_tensor.mlir +++ b/paddle/infrt/tests/dialect/phi/dense_tensor.mlir @@ -11,6 +11,6 @@ func @sign_any_float32_execute() { // CHECK: dense_tensor: shape=shape[1], values=[1] "phi_dt.print_tensor" (%e) : (!infrt.dense_tensor) -> () - Infrt.return + infrt.return } diff --git a/paddle/infrt/tests/dialect/phi/phi_test.mlir b/paddle/infrt/tests/dialect/phi/phi_test.mlir index 923f4e9d9d2ce6f6a24f91f04721f49712f900b5..5b0fa735897a31287bb6dea487e2f22eacd7b0aa 100644 --- a/paddle/infrt/tests/dialect/phi/phi_test.mlir +++ b/paddle/infrt/tests/dialect/phi/phi_test.mlir @@ -2,14 +2,14 @@ module { func @predict(%arg0: !infrt.dense_tensor) -> !infrt.dense_tensor { %2 = "pd.abs"(%arg0) : (!infrt.dense_tensor) -> !infrt.dense_tensor - Infrt.return %2 : !infrt.dense_tensor + infrt.return %2 : !infrt.dense_tensor } func @main() { %ctx = "phi_dt.create_context.cpu" (): () -> !phi.context %t = "phi_dt.create_dense_tensor" (%ctx) {precision=#infrt.precision, layout=#infrt.layout, lod=[1:i64], dims=[1:i64]}: (!phi.context) -> (!infrt.dense_tensor) "phi_dt.fill_dense_tensor.f32"(%t) {value=[3.8:f32]} : (!infrt.dense_tensor) -> () - %2 = Infrt.call@predict(%t) : (!infrt.dense_tensor) -> !infrt.dense_tensor + %2 = infrt.call@predict(%t) : (!infrt.dense_tensor) -> !infrt.dense_tensor phi_dt.print_tensor(%2 : !infrt.dense_tensor) - Infrt.return + infrt.return } } diff --git a/paddle/infrt/tests/dialect/tensor/dense_tensor.mlir b/paddle/infrt/tests/dialect/tensor/dense_tensor.mlir index 76ae140dd6cbd741f992315ee35d3e94058d4674..47bc1f7833140c8a876660673fa11f148d42db90 100644 --- a/paddle/infrt/tests/dialect/tensor/dense_tensor.mlir +++ b/paddle/infrt/tests/dialect/tensor/dense_tensor.mlir @@ -3,14 +3,14 @@ func @dense_shape0() { %a = dt.create_uninit_tensor.f32 [12:i64, 23:i64] -> !infrt.dense_tensor - Infrt.return + infrt.return } func @predict(%a: !infrt.dense_tensor, %b: !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor) { %a0 = dt.shallow_copy_tensor %a : !infrt.dense_tensor -> !infrt.dense_tensor %b0 = dt.shallow_copy_tensor %b : !infrt.dense_tensor -> !infrt.dense_tensor - Infrt.return %a0, %b0: !infrt.dense_tensor, !infrt.dense_tensor + infrt.return %a0, %b0: !infrt.dense_tensor, !infrt.dense_tensor } @@ -18,6 +18,6 @@ func @main() { %shape = ts.build_shape [1:i64, 57:i64] %a = dt.create_uninit_tensor.f32 [12:i64, 23:i64] -> !infrt.dense_tensor - %b, %c = Infrt.call @predict(%a, %a) : (!infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor) - Infrt.return + %b, %c = infrt.call @predict(%a, %a) : (!infrt.dense_tensor, !infrt.dense_tensor) -> (!infrt.dense_tensor, !infrt.dense_tensor) + infrt.return } diff --git a/paddle/infrt/tests/dialect/tensor/naive_kernels.mlir b/paddle/infrt/tests/dialect/tensor/naive_kernels.mlir index 52b296e06cd365fbaa1249108f877dc9f7480ff0..d6b69fdd595ea520f623e4b9651fc6e2b321c26f 100644 --- a/paddle/infrt/tests/dialect/tensor/naive_kernels.mlir +++ b/paddle/infrt/tests/dialect/tensor/naive_kernels.mlir @@ -13,7 +13,7 @@ func @naive_elementwise_add() { // CHECK: tensor: shape=shape[2,8], values=[3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3] dt.print_tensor (%c : !infrt.dense_tensor) - Infrt.return + infrt.return } // RUN: infrtexec -i %s | FileCheck %s @@ -31,5 +31,5 @@ func @naive_matmul() { // CHECK: tensor: shape=shape[2,4], values=[16, 16, 16, 16, 16, 16, 16, 16] dt.print_tensor (%c : !infrt.dense_tensor) - Infrt.return + infrt.return } diff --git a/paddle/infrt/tests/dialect/tensor/tensor_map.mlir.in b/paddle/infrt/tests/dialect/tensor/tensor_map.mlir.in index 28450ed6bd823f7d18eff19371a2a1a49292b329..7aeb3f8a4d0513deaed6bda73a591790b633d0db 100644 --- a/paddle/infrt/tests/dialect/tensor/tensor_map.mlir.in +++ b/paddle/infrt/tests/dialect/tensor/tensor_map.mlir.in @@ -3,12 +3,12 @@ func @load_tensor_map() { %map = dt.load_params(){path="@CMAKE_BINARY_DIR@/multi_fc_model"} %size = dt.tensor_map_get_size(%map) -> i32 - Infrt.print.i32 %size + infrt.print.i32 %size %a = dt.tensor_map_get_tensor(%map) {name="fc_bias"} -> !infrt.dense_tensor // CHECK: tensor: shape=shape[2], values=[0, 0] dt.print_tensor (%a : !infrt.dense_tensor) - Infrt.return + infrt.return } diff --git a/paddle/infrt/tests/dialect/tensor/tensor_shape.mlir b/paddle/infrt/tests/dialect/tensor/tensor_shape.mlir index 5623aef71aa2c33ff0bd3524855c56e9dcab5e9b..09210078b9d7d139f2bc2534acf07e83aa1146bb 100644 --- a/paddle/infrt/tests/dialect/tensor/tensor_shape.mlir +++ b/paddle/infrt/tests/dialect/tensor/tensor_shape.mlir @@ -4,5 +4,5 @@ func @build_tensor1() { %a = ts.build_shape [1:i64, 57:i64, 92:i64] // CHECK: shape[1,57,92] ts.print_shape %a - Infrt.return + infrt.return } diff --git a/paddle/infrt/tests/dialect/tensor/tensor_type.mlir b/paddle/infrt/tests/dialect/tensor/tensor_type.mlir index e580634055a72eae66196f67c8321c308599a1af..5847d567cf6b42a9404d33a938a67c6dc2f4aefc 100644 --- a/paddle/infrt/tests/dialect/tensor/tensor_type.mlir +++ b/paddle/infrt/tests/dialect/tensor/tensor_type.mlir @@ -6,5 +6,5 @@ func @test_tensor_type() { // CHECK: tensor: shape=shape[3,4], values=[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] dt.print_tensor (%a : !infrt.dense_tensor) - Infrt.return + infrt.return } diff --git a/paddle/infrt/tests/dialect/tensor_shape.mlir b/paddle/infrt/tests/dialect/tensor_shape.mlir index 5623aef71aa2c33ff0bd3524855c56e9dcab5e9b..09210078b9d7d139f2bc2534acf07e83aa1146bb 100644 --- a/paddle/infrt/tests/dialect/tensor_shape.mlir +++ b/paddle/infrt/tests/dialect/tensor_shape.mlir @@ -4,5 +4,5 @@ func @build_tensor1() { %a = ts.build_shape [1:i64, 57:i64, 92:i64] // CHECK: shape[1,57,92] ts.print_shape %a - Infrt.return + infrt.return } diff --git a/paddle/infrt/tests/dialect/tensor_type.mlir b/paddle/infrt/tests/dialect/tensor_type.mlir index e580634055a72eae66196f67c8321c308599a1af..5847d567cf6b42a9404d33a938a67c6dc2f4aefc 100644 --- a/paddle/infrt/tests/dialect/tensor_type.mlir +++ b/paddle/infrt/tests/dialect/tensor_type.mlir @@ -6,5 +6,5 @@ func @test_tensor_type() { // CHECK: tensor: shape=shape[3,4], values=[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] dt.print_tensor (%a : !infrt.dense_tensor) - Infrt.return + infrt.return } diff --git a/paddle/infrt/tests/dialect/trt_ops.mlir b/paddle/infrt/tests/dialect/trt_ops.mlir index 6d25044d139f32c0a29adefb44c8fd2640cadd82..e3cb9670bec015e58e2a538bb55dfbe7c8b7f554 100644 --- a/paddle/infrt/tests/dialect/trt_ops.mlir +++ b/paddle/infrt/tests/dialect/trt_ops.mlir @@ -12,5 +12,5 @@ func @main(%bias:tensor, %c:tensor, %b1:tensor, %b2:tensor< %d2 = "pd.elementwise_add"(%c2, %bias2) {axis=-1:si32} : (tensor, tensor) -> tensor %e2 = "pd.relu"(%d2) {} : (tensor) -> tensor - "infrt.return"(%e2) : (tensor)->() + infrt.return %e2 : tensor }