未验证 提交 481db5e9 编写于 作者: 王明冬 提交者: GitHub

[infrt] unify the infrt dialect. test=develop (#40451)

上级 e5c59fc9
...@@ -99,7 +99,8 @@ endfunction() ...@@ -99,7 +99,8 @@ endfunction()
function(mlir_add_rewriter td_base) function(mlir_add_rewriter td_base)
set(LLVM_TARGET_DEFINITIONS ${td_base}.td) set(LLVM_TARGET_DEFINITIONS ${td_base}.td)
mlir_tablegen(${td_base}.cpp.inc -gen-rewriters "-I${CMAKE_SOURCE_DIR}/infrt/dialect/pass") set(LLVM_TARGET_DEPENDS ${LLVM_TARGET_DEPENDS} ${CMAKE_SOURCE_DIR}/paddle/infrt/dialect/infrt/ir/infrt_base.td)
mlir_tablegen(${td_base}.cpp.inc -gen-rewriters)
add_public_tablegen_target(MLIR${td_base}IncGen) add_public_tablegen_target(MLIR${td_base}IncGen)
add_dependencies(mlir-headers MLIR${td_base}IncGen) add_dependencies(mlir-headers MLIR${td_base}IncGen)
endfunction() endfunction()
......
...@@ -90,7 +90,6 @@ add_subdirectory(tests) ...@@ -90,7 +90,6 @@ add_subdirectory(tests)
set(infrt_mlir_incs set(infrt_mlir_incs
basic_kernels_inc basic_kernels_inc
test_kernels_inc test_kernels_inc
infrt_base_inc
tensor_shape_inc tensor_shape_inc
dense_tensor_inc dense_tensor_inc
pd_ops_inc pd_ops_inc
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#include "paddle/infrt/common/global.h" #include "paddle/infrt/common/global.h"
#include "paddle/infrt/dialect/dense_tensor.h" #include "paddle/infrt/dialect/dense_tensor.h"
#include "paddle/infrt/dialect/infrt/infrt_dialect.h" #include "paddle/infrt/dialect/infrt/ir/infrt_dialect.h"
#include "paddle/infrt/dialect/mlir_loader.h" #include "paddle/infrt/dialect/mlir_loader.h"
#include "paddle/infrt/host_context/core_runtime.h" #include "paddle/infrt/host_context/core_runtime.h"
#include "paddle/infrt/host_context/kernel_registry.h" #include "paddle/infrt/host_context/kernel_registry.h"
...@@ -144,7 +144,7 @@ class PredictExecutor : public MlirToRuntimeTranslator { ...@@ -144,7 +144,7 @@ class PredictExecutor : public MlirToRuntimeTranslator {
// process results // process results
auto& last_op = predict_func.front().back(); auto& last_op = predict_func.front().back();
if (last_op.getName().getStringRef() == "Infrt.return") { if (last_op.getName().getStringRef() == "infrt.return") {
for (size_t i = 0; i < last_op.getNumOperands(); ++i) { for (size_t i = 0; i < last_op.getNumOperands(); ++i) {
auto* value = AddValue(mlir::Value(last_op.getOperand(i))); auto* value = AddValue(mlir::Value(last_op.getOperand(i)));
results_.push_back(ValueRef(value)); results_.push_back(ValueRef(value));
......
...@@ -2,26 +2,20 @@ core_gather_headers() ...@@ -2,26 +2,20 @@ core_gather_headers()
gather_srcs(infrt_src SRCS gather_srcs(infrt_src SRCS
dialect.cc dialect.cc
basic_kernels.cc init_dialects.cc
test_kernels.cc
infrt_base.cc
init_infrt_dialects.cc
tensor_shape.cc tensor_shape.cc
dense_tensor.cc dense_tensor.cc
mlir_loader.cc mlir_loader.cc
diagnostic_utils.cc diagnostic_utils.cc
pd_types.cc
pd_ops.cc pd_ops.cc
) )
mlir_tablegen_on(basic_kernels)
mlir_tablegen_on(test_kernels)
mlir_tablegen_on(infrt_base DIALECT Infrt)
mlir_tablegen_on(tensor_shape DIALECT ts) mlir_tablegen_on(tensor_shape DIALECT ts)
mlir_tablegen_on(dense_tensor DIALECT dt) mlir_tablegen_on(dense_tensor DIALECT dt)
mlir_tablegen_on(pd_op_base DIALECT pd) mlir_tablegen_on(pd_op_base DIALECT pd)
mlir_tablegen_on(pd_ops) mlir_tablegen_on(pd_ops)
mlir_tablegen_on(pd_extra_ops) mlir_tablegen_on(pd_extra_ops)
mlir_add_rewriter(rewrite) mlir_add_rewriter(rewrite)
# TODO(Superjomn) add a cmake function cc_executable to ecapsulate the following code # TODO(Superjomn) add a cmake function cc_executable to ecapsulate the following code
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#include <string> #include <string>
#include "paddle/infrt/dialect/infrt/infrt_dialect.h" #include "paddle/infrt/dialect/infrt/ir/infrt_dialect.h"
#include "paddle/infrt/dialect/dense_tensor_dialect.hpp.inc" #include "paddle/infrt/dialect/dense_tensor_dialect.hpp.inc"
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#else #else
#define DT_OPS #define DT_OPS
include "paddle/infrt/dialect/infrt_base.td" include "paddle/infrt/dialect/infrt/ir/infrt_base.td"
include "paddle/infrt/dialect/tensor_shape_base.td" include "paddle/infrt/dialect/tensor_shape_base.td"
include "mlir/Interfaces/SideEffectInterfaces.td" include "mlir/Interfaces/SideEffectInterfaces.td"
......
core_gather_headers() add_subdirectory(common)
add_subdirectory(ir)
gather_srcs(infrt_src SRCS
common_type.cc
infrt_dialect.cc
)
add_mlir_dialect(infrt_ops infrt)
set(LLVM_TARGET_DEFINITIONS infrt_ops.td)
mlir_tablegen(infrt_opsAttributes.h.inc -gen-attrdef-decls -dialect=infrt)
mlir_tablegen(infrt_opsAttributes.cpp.inc -gen-attrdef-defs -dialect=infrt)
add_public_tablegen_target(MLIRinfrt_opsAttributesIncGen)
add_dependencies(mlir-headers MLIRinfrt_opsAttributesIncGen)
add_subdirectory(pass) add_subdirectory(pass)
core_gather_headers()
gather_srcs(infrt_src SRCS
types.cc
utils.cc
)
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "paddle/infrt/dialect/infrt/common_type.h" #include "paddle/infrt/dialect/infrt/common/types.h"
namespace infrt { namespace infrt {
......
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/infrt/dialect/infrt/common/utils.h"
mlir::SmallVector<mlir::Value, 4> infrt::cvtValueToValueRange(
const mlir::Value &operand) {
return mlir::SmallVector<mlir::Value, 4>(1, operand);
}
mlir::SmallVector<mlir::Value, 4> infrt::concatTwoValueRange(
mlir::ValueRange operand_0, mlir::ValueRange operand_1) {
mlir::SmallVector<mlir::Value, 4> operands;
operands.append(operand_0.begin(), operand_0.end());
operands.append(operand_1.begin(), operand_1.end());
return operands;
}
...@@ -12,4 +12,20 @@ ...@@ -12,4 +12,20 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "paddle/infrt/dialect/pd_types.h" #pragma once
#include <mlir/IR/Builders.h>
#include <mlir/IR/Dialect.h>
#include <mlir/IR/DialectImplementation.h>
#include <mlir/IR/MLIRContext.h>
#include <mlir/IR/TypeUtilities.h>
#include <mlir/IR/Types.h>
namespace infrt {
mlir::SmallVector<mlir::Value, 4> cvtValueToValueRange(
const mlir::Value &operand);
mlir::SmallVector<mlir::Value, 4> concatTwoValueRange(
mlir::ValueRange operand_0, mlir::ValueRange operand_1);
} // namespace infrt
core_gather_headers()
gather_srcs(infrt_src SRCS
infrt_dialect.cc
basic_kernels.cc
test_kernels.cc
)
add_mlir_dialect(infrt_ops infrt)
set(LLVM_TARGET_DEFINITIONS infrt_ops.td)
mlir_tablegen(infrt_opsAttributes.h.inc -gen-attrdef-decls -dialect=infrt)
mlir_tablegen(infrt_opsAttributes.cpp.inc -gen-attrdef-defs -dialect=infrt)
add_public_tablegen_target(MLIRinfrt_opsAttributesIncGen)
add_dependencies(mlir-headers MLIRinfrt_opsAttributesIncGen)
mlir_tablegen_on(basic_kernels)
mlir_tablegen_on(test_kernels)
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "paddle/infrt/dialect/basic_kernels.h" #include "paddle/infrt/dialect/infrt/ir/basic_kernels.h"
#include <llvm/ADT/STLExtras.h> #include <llvm/ADT/STLExtras.h>
#include <mlir/IR/Attributes.h> #include <mlir/IR/Attributes.h>
...@@ -30,23 +30,6 @@ namespace infrt { ...@@ -30,23 +30,6 @@ namespace infrt {
namespace dialect { namespace dialect {
using namespace mlir; // NOLINT using namespace mlir; // NOLINT
static ParseResult parseCallOp(OpAsmParser &parser, // NOLINT
OperationState &result) { // NOLINT
SymbolRefAttr callee_attr;
FunctionType callee_type;
SmallVector<OpAsmParser::OperandType, 4> operands;
auto callee_loc = parser.getNameLoc();
if (parser.parseAttribute(callee_attr, "callee", result.attributes) ||
parser.parseOperandList(operands, OpAsmParser::Delimiter::Paren) ||
parser.parseOptionalAttrDict(result.attributes) ||
parser.parseColonType(callee_type) ||
parser.addTypesToList(callee_type.getResults(), result.types) ||
parser.resolveOperands(
operands, callee_type.getInputs(), callee_loc, result.operands))
return failure();
return success();
}
static ParseResult parseConstantOp(Type attrType, static ParseResult parseConstantOp(Type attrType,
OpAsmParser &parser, // NOLINT OpAsmParser &parser, // NOLINT
OperationState &result) { // NOLINT OperationState &result) { // NOLINT
...@@ -79,24 +62,6 @@ static ParseResult parseConstantI64Op(OpAsmParser &parser, // NOLINT ...@@ -79,24 +62,6 @@ static ParseResult parseConstantI64Op(OpAsmParser &parser, // NOLINT
IntegerType::get(result.getContext(), 64), parser, result); IntegerType::get(result.getContext(), 64), parser, result);
} }
static ParseResult parseReturnOp(OpAsmParser &parser, // NOLINT
OperationState &result) { // NOLINT
SmallVector<OpAsmParser::OperandType, 2> opInfo;
SmallVector<Type, 2> types;
llvm::SMLoc loc = parser.getCurrentLocation();
return failure(parser.parseOperandList(opInfo) ||
(!opInfo.empty() && parser.parseColonTypeList(types)) ||
parser.resolveOperands(opInfo, types, loc, result.operands));
}
static void print(OpAsmPrinter &p, CallOp op) { // NOLINT
p << op->getAttr("callee") << "(";
p.printOperands(op.getOperands());
p << ")";
p.printOptionalAttrDict(op->getAttrs(), {"callee"});
p << " : ";
}
static void printConstant(OpAsmPrinter &p, mlir::Operation *op) { // NOLINT static void printConstant(OpAsmPrinter &p, mlir::Operation *op) { // NOLINT
p << " "; p << " ";
p.printOptionalAttrDict(op->getAttrs(), /*elidedAttrs=*/{"value"}); p.printOptionalAttrDict(op->getAttrs(), /*elidedAttrs=*/{"value"});
...@@ -127,37 +92,13 @@ static void print(OpAsmPrinter &p, ConstantI64Op op) { // NOLINT ...@@ -127,37 +92,13 @@ static void print(OpAsmPrinter &p, ConstantI64Op op) { // NOLINT
printConstant(p, op); printConstant(p, op);
} }
static void print(OpAsmPrinter &p, ReturnOp op) { // NOLINT
if (op.getNumOperands() > 0) {
p << ' ';
p.printOperands(op.getOperands());
p << " : ";
llvm::interleaveComma(op.getOperands(), p);
}
}
static LogicalResult verify(CallOp op) { return success(); }
static LogicalResult verify(ConstantF32Op op) { return success(); } static LogicalResult verify(ConstantF32Op op) { return success(); }
static LogicalResult verify(ConstantI32Op op) { return success(); } static LogicalResult verify(ConstantI32Op op) { return success(); }
static LogicalResult verify(ConstantF64Op op) { return success(); } static LogicalResult verify(ConstantF64Op op) { return success(); }
static LogicalResult verify(ConstantI64Op op) { return success(); } static LogicalResult verify(ConstantI64Op op) { return success(); }
static LogicalResult verify(ReturnOp op) {
auto function = dyn_cast<FuncOp>(op->getParentOp());
if (!function) return success();
auto results = function.getType().getResults();
if (op.getNumOperands() != results.size())
return op.emitOpError("has ")
<< op.getNumOperands()
<< " operands, but enclosing function returns " << results.size();
return success();
}
} // namespace dialect } // namespace dialect
} // namespace infrt } // namespace infrt
#define GET_OP_CLASSES #define GET_OP_CLASSES
#include "paddle/infrt/dialect/basic_kernels.cpp.inc" #include "paddle/infrt/dialect/infrt/ir/basic_kernels.cpp.inc"
...@@ -18,4 +18,4 @@ ...@@ -18,4 +18,4 @@
#include <mlir/Interfaces/SideEffectInterfaces.h> #include <mlir/Interfaces/SideEffectInterfaces.h>
#define GET_OP_CLASSES #define GET_OP_CLASSES
#include "paddle/infrt/dialect/basic_kernels.hpp.inc" #include "paddle/infrt/dialect/infrt/ir/basic_kernels.hpp.inc"
...@@ -4,10 +4,10 @@ ...@@ -4,10 +4,10 @@
#else #else
#define BASIC_OPS #define BASIC_OPS
include "paddle/infrt/dialect/infrt_base.td" include "paddle/infrt/dialect/infrt/ir/infrt_base.td"
include "mlir/Interfaces/SideEffectInterfaces.td" include "mlir/Interfaces/SideEffectInterfaces.td"
class INFRT_Op<string mnemonic, list<OpTrait> traits = []> : Op<INFRT_Dialect, mnemonic, !listconcat(traits, [IsolatedFromAbove])> { class INFRT_Op<string mnemonic, list<OpTrait> traits = []> : Op<Infrt_Dialect, mnemonic, !listconcat(traits, [IsolatedFromAbove])> {
// Each registered op needs to provide all of a printer, parser and verifier. // Each registered op needs to provide all of a printer, parser and verifier.
let printer = [{ return infrt::dialect::print(p, *this); }]; let printer = [{ return infrt::dialect::print(p, *this); }];
...@@ -15,23 +15,6 @@ class INFRT_Op<string mnemonic, list<OpTrait> traits = []> : Op<INFRT_Dialect, m ...@@ -15,23 +15,6 @@ class INFRT_Op<string mnemonic, list<OpTrait> traits = []> : Op<INFRT_Dialect, m
let parser = [{ return infrt::dialect::parse$cppClass(parser, result); }]; let parser = [{ return infrt::dialect::parse$cppClass(parser, result); }];
} }
def CallOp : INFRT_Op<"call"> {
let summary = "call a host operation";
let description = [{
The "infrt.call" operation represents a direct call to a function. The operands and result types of the call must match the specified function type.
%2 = infrt.call @add(%0, %1) : (f32, f32) -> f32
}];
let arguments = (ins FlatSymbolRefAttr:$callee, Variadic<AnyType>:$operands);
let results = (outs Variadic<AnyType>);
let extraClassDeclaration = [{
mlir::StringRef getCallee() { return callee(); }
mlir::FunctionType getCalleeType();
}];
}
class ConstantOp<string suffix, Type baseType, Attr attr> class ConstantOp<string suffix, Type baseType, Attr attr>
: INFRT_Op<"constant." # suffix, [NoSideEffect]> { : INFRT_Op<"constant." # suffix, [NoSideEffect]> {
let summary = "constant value constructor in host"; let summary = "constant value constructor in host";
...@@ -45,22 +28,6 @@ def ConstantI64Op : ConstantOp<"i64", I64, I64Attr>; ...@@ -45,22 +28,6 @@ def ConstantI64Op : ConstantOp<"i64", I64, I64Attr>;
def ConstantF32Op : ConstantOp<"f32", F32, F32Attr>; def ConstantF32Op : ConstantOp<"f32", F32, F32Attr>;
def ConstantF64Op : ConstantOp<"f64", F64, F64Attr>; def ConstantF64Op : ConstantOp<"f64", F64, F64Attr>;
def ReturnOp : INFRT_Op<"return", [Terminator]> {
let summary = "host executor return operation";
let description = [{
The "Infrt.return" operation represents a return operation within a function.
func @foo() : (i32, f8) {
Infrt.return %0, %1 : i32, f8
}
}];
let arguments = (ins Variadic<AnyType>:$operands);
let builders = [OpBuilder<(ins),
[{ build($_builder, $_state, llvm::None); }]>];
}
class AddOp<string suffix, Type type> : INFRT_Op<"add." # suffix, [NoSideEffect]> { class AddOp<string suffix, Type type> : INFRT_Op<"add." # suffix, [NoSideEffect]> {
let summary = "infrt.add operation"; let summary = "infrt.add operation";
let description = [{ let description = [{
...@@ -112,7 +79,7 @@ def PrintF32Op : PrintOp<"f32", F32>; ...@@ -112,7 +79,7 @@ def PrintF32Op : PrintOp<"f32", F32>;
def PrintF64Op : PrintOp<"f64", F64>; def PrintF64Op : PrintOp<"f64", F64>;
def PrintStringOp : INFRT_Op<"print_string"> { def PrintStringOp : INFRT_Op<"print_string"> {
let summary = "Infrt.print_string"; let summary = "infrt.print_string";
let description = [{ let description = [{
An operation that prints a string. An operation that prints a string.
}]; }];
......
...@@ -101,4 +101,21 @@ class Infrt_Attr<string name, list<Trait> traits = [], ...@@ -101,4 +101,21 @@ class Infrt_Attr<string name, list<Trait> traits = [],
: AttrDef<Infrt_Dialect, name, traits, baseCppClass> { : AttrDef<Infrt_Dialect, name, traits, baseCppClass> {
let mnemonic = ?; let mnemonic = ?;
} }
// tools function. used for pattern rewriter
class INFRT_createI32Attr<string value> : NativeCodeCall<
"$_builder.getI32IntegerAttr(" # value # ")">;
class INFRT_createSI32Attr<string value> : NativeCodeCall<
"$_builder.getSI32IntegerAttr(" # value # ")">;
class INFRT_createF32Attr<string value> : NativeCodeCall<
"$_builder.getF32FloatAttr(" # value # ")">;
def INFRT_cvtValueToValueRange : NativeCodeCall<
"infrt::cvtValueToValueRange($0)">;
def INFRT_concatTwoValueRange : NativeCodeCall<
"infrt::concatTwoValueRange($0, $1)">;
#endif // INFRT_OPS_BASE #endif // INFRT_OPS_BASE
...@@ -12,40 +12,52 @@ ...@@ -12,40 +12,52 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "paddle/infrt/dialect/infrt/infrt_dialect.h" #include "paddle/infrt/dialect/infrt/ir/infrt_dialect.h"
#include <llvm/ADT/TypeSwitch.h> #include <llvm/ADT/TypeSwitch.h>
#include <mlir/IR/Builders.h> #include <mlir/IR/Builders.h>
#include <mlir/IR/BuiltinOps.h> #include <mlir/IR/BuiltinOps.h>
#include <mlir/IR/DialectImplementation.h> #include <mlir/IR/DialectImplementation.h>
#include "paddle/infrt/dialect/dense_tensor.h" #include "paddle/infrt/dialect/dense_tensor.h"
#include "paddle/infrt/dialect/infrt/infrt_opsDialect.cpp.inc" #include "paddle/infrt/dialect/infrt/ir/infrt_opsDialect.cpp.inc"
#define GET_TYPEDEF_CLASSES #define GET_TYPEDEF_CLASSES
#include "paddle/infrt/dialect/infrt/infrt_opsTypes.cpp.inc" #include "paddle/infrt/dialect/infrt/ir/infrt_opsTypes.cpp.inc"
#define GET_ATTRDEF_CLASSES #define GET_ATTRDEF_CLASSES
#include "paddle/infrt/dialect/infrt/infrt_opsAttributes.cpp.inc" #include "paddle/infrt/dialect/infrt/ir/infrt_opsAttributes.cpp.inc"
#define GET_OP_CLASSES #define GET_OP_CLASSES
#include "paddle/infrt/dialect/infrt/infrt_ops.cpp.inc" #include "paddle/infrt/dialect/infrt/ir/infrt_ops.cpp.inc"
#include "paddle/infrt/dialect/infrt/ir/basic_kernels.h"
#include "paddle/infrt/dialect/infrt/ir/test_kernels.h"
namespace infrt { namespace infrt {
void InfrtDialect::initialize() { void InfrtDialect::initialize() {
addTypes< addTypes<
#define GET_TYPEDEF_LIST #define GET_TYPEDEF_LIST
#include "paddle/infrt/dialect/infrt/infrt_opsTypes.cpp.inc" // NOLINT #include "paddle/infrt/dialect/infrt/ir/infrt_opsTypes.cpp.inc" // NOLINT
>(); >();
addAttributes< addAttributes<
#define GET_ATTRDEF_LIST #define GET_ATTRDEF_LIST
#include "paddle/infrt/dialect/infrt/infrt_opsAttributes.cpp.inc" // NOLINT #include "paddle/infrt/dialect/infrt/ir/infrt_opsAttributes.cpp.inc" // NOLINT
>(); >();
addOperations< addOperations<
#define GET_OP_LIST #define GET_OP_LIST
#include "paddle/infrt/dialect/infrt/infrt_ops.cpp.inc" // NOLINT #include "paddle/infrt/dialect/infrt/ir/infrt_ops.cpp.inc" // NOLINT
>();
addOperations<
#define GET_OP_LIST
#include "paddle/infrt/dialect/infrt/ir/basic_kernels.cpp.inc"
>();
addOperations<
#define GET_OP_LIST
#include "paddle/infrt/dialect/infrt/ir/test_kernels.cpp.inc"
>(); >();
} }
...@@ -128,7 +140,7 @@ mlir::Type InfrtDialect::parseType(::mlir::DialectAsmParser &parser) const { ...@@ -128,7 +140,7 @@ mlir::Type InfrtDialect::parseType(::mlir::DialectAsmParser &parser) const {
void InfrtDialect::printType(::mlir::Type type, void InfrtDialect::printType(::mlir::Type type,
::mlir::DialectAsmPrinter &os) const { ::mlir::DialectAsmPrinter &os) const {
// print LoDTensorType, for example: !Infrt.lod_tensor<3x64x3x3xf32,5> // print LoDTensorType, for example: !infrt.lod_tensor<3x64x3x3xf32,5>
if (type.isa<infrt::LoDTensorType>()) { if (type.isa<infrt::LoDTensorType>()) {
auto lod_tensor_type = type.cast<infrt::LoDTensorType>(); auto lod_tensor_type = type.cast<infrt::LoDTensorType>();
os << "lod_tensor<"; os << "lod_tensor<";
......
...@@ -22,14 +22,14 @@ ...@@ -22,14 +22,14 @@
#include <mlir/IR/Dialect.h> #include <mlir/IR/Dialect.h>
#include <mlir/IR/OpDefinition.h> #include <mlir/IR/OpDefinition.h>
#include <mlir/Interfaces/SideEffectInterfaces.h> #include <mlir/Interfaces/SideEffectInterfaces.h>
#include "paddle/infrt/dialect/infrt/common_type.h" #include "paddle/infrt/dialect/infrt/common/types.h"
#include "paddle/infrt/dialect/infrt/infrt_opsDialect.h.inc" #include "paddle/infrt/dialect/infrt/ir/infrt_opsDialect.h.inc"
#define GET_TYPEDEF_CLASSES #define GET_TYPEDEF_CLASSES
#include "paddle/infrt/dialect/infrt/infrt_opsTypes.h.inc" #include "paddle/infrt/dialect/infrt/ir/infrt_opsTypes.h.inc"
#define GET_ATTRDEF_CLASSES #define GET_ATTRDEF_CLASSES
#include "paddle/infrt/dialect/infrt/infrt_opsAttributes.h.inc" #include "paddle/infrt/dialect/infrt/ir/infrt_opsAttributes.h.inc"
#define GET_OP_CLASSES #define GET_OP_CLASSES
#include "paddle/infrt/dialect/infrt/infrt_ops.h.inc" #include "paddle/infrt/dialect/infrt/ir/infrt_ops.h.inc"
include "paddle/infrt/dialect/infrt/infrt_ops_base.td" include "paddle/infrt/dialect/infrt/ir/infrt_base.td"
// Op definition // Op definition
class Infrt_Op<string mnemonic, list<OpTrait> traits = []> : Op<Infrt_Dialect, mnemonic, traits> { class Infrt_Op<string mnemonic, list<OpTrait> traits = []> : Op<Infrt_Dialect, mnemonic, traits> {
...@@ -33,6 +33,26 @@ def Infrt_ReturnOp : Infrt_Op<"return", [Terminator]> { ...@@ -33,6 +33,26 @@ def Infrt_ReturnOp : Infrt_Op<"return", [Terminator]> {
let assemblyFormat = "attr-dict ($operands^ `:` type($operands))?"; let assemblyFormat = "attr-dict ($operands^ `:` type($operands))?";
} }
def Infrt_CallOp : Infrt_Op<"call"> {
let summary = "call a host operation";
let description = [{
The "infrt.call" operation represents a direct call to a function. The operands and result types of the call must match the specified function type.
%2 = infrt.call @add(%0, %1) : (f32, f32) -> f32
}];
let arguments = (ins FlatSymbolRefAttr:$callee, Variadic<AnyType>:$operands);
let results = (outs Variadic<AnyType>);
//let extraClassDeclaration = [{
// mlir::StringRef getCallee() { return callee(); }
// mlir::FunctionType getCalleeType();
// }];
let assemblyFormat = [{
$callee `(` $operands `)` attr-dict `:` functional-type($operands, results)
}];
}
def Infrt_CvtTensorOp : Infrt_Op<"cvt_tensor", [NoSideEffect]> { def Infrt_CvtTensorOp : Infrt_Op<"cvt_tensor", [NoSideEffect]> {
let summary = "convert tensor type op"; let summary = "convert tensor type op";
let description = [{convert tensor type op!}]; let description = [{convert tensor type op!}];
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "paddle/infrt/dialect/test_kernels.h" #include "paddle/infrt/dialect/infrt/ir/test_kernels.h"
#include <mlir/IR/Builders.h> #include <mlir/IR/Builders.h>
#include <mlir/IR/OpDefinition.h> #include <mlir/IR/OpDefinition.h>
...@@ -147,7 +147,7 @@ static mlir::LogicalResult verify(BenchmarkOp op) { ...@@ -147,7 +147,7 @@ static mlir::LogicalResult verify(BenchmarkOp op) {
// Verify that the target benchmark region has exactly one return value. // Verify that the target benchmark region has exactly one return value.
auto &region = op.region(); auto &region = op.region();
auto &last_op = region.front().back(); auto &last_op = region.front().back();
if (last_op.getName().getStringRef() != "Infrt.return") { if (last_op.getName().getStringRef() != "infrt.return") {
return op.emitOpError("missing return statement"); return op.emitOpError("missing return statement");
} }
if (last_op.getNumOperands() != 1) { if (last_op.getNumOperands() != 1) {
...@@ -161,4 +161,4 @@ static mlir::LogicalResult verify(BenchmarkOp op) { ...@@ -161,4 +161,4 @@ static mlir::LogicalResult verify(BenchmarkOp op) {
} // namespace infrt } // namespace infrt
#define GET_OP_CLASSES #define GET_OP_CLASSES
#include "paddle/infrt/dialect/test_kernels.cpp.inc" #include "paddle/infrt/dialect/infrt/ir/test_kernels.cpp.inc"
...@@ -17,4 +17,4 @@ ...@@ -17,4 +17,4 @@
#include <mlir/Interfaces/SideEffectInterfaces.h> #include <mlir/Interfaces/SideEffectInterfaces.h>
#define GET_OP_CLASSES #define GET_OP_CLASSES
#include "paddle/infrt/dialect/test_kernels.hpp.inc" #include "paddle/infrt/dialect/infrt/ir/test_kernels.hpp.inc"
...@@ -4,12 +4,12 @@ ...@@ -4,12 +4,12 @@
#else #else
#define TEST_OPS #define TEST_OPS
include "paddle/infrt/dialect/infrt_base.td" include "paddle/infrt/dialect/infrt/ir/infrt_base.td"
include "mlir/Interfaces/SideEffectInterfaces.td" include "mlir/Interfaces/SideEffectInterfaces.td"
// Base class for Test dialect ops. // Base class for Test dialect ops.
class Test_Op<string mnemonic, list<OpTrait> traits = []> : class Test_Op<string mnemonic, list<OpTrait> traits = []> :
Op<INFRT_Dialect, mnemonic, !listconcat(traits, [IsolatedFromAbove])> { Op<Infrt_Dialect, mnemonic, !listconcat(traits, [IsolatedFromAbove])> {
// Each registered op in the Test namespace needs to provide all of a printer, // Each registered op in the Test namespace needs to provide all of a printer,
// parser and verifier. // parser and verifier.
...@@ -45,7 +45,7 @@ def BenchmarkOp : Test_Op<"benchmark"> { ...@@ -45,7 +45,7 @@ def BenchmarkOp : Test_Op<"benchmark"> {
// The following code benchmarks the infrt.add.i32 kernel. // The following code benchmarks the infrt.add.i32 kernel.
%x = infrt.add.i32 %c, %c %x = infrt.add.i32 %c, %c
// The benchmarked function needs to return exactly one value. // The benchmarked function needs to return exactly one value.
Infrt.return %x : i32 infrt.return %x : i32
} }
}]; }];
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#define INFRT_OP_FUSE #define INFRT_OP_FUSE
include "mlir/Interfaces/SideEffectInterfaces.td" include "mlir/Interfaces/SideEffectInterfaces.td"
include "paddle/infrt/dialect/infrt/infrt_ops.td" include "paddle/infrt/dialect/infrt/ir/infrt_ops.td"
include "paddle/infrt/dialect/pd_ops.td" include "paddle/infrt/dialect/pd_ops.td"
def FuseCvtTensorPattern : Pat< def FuseCvtTensorPattern : Pat<
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
#include "paddle/infrt/dialect/infrt/pass/infrt_op_fuse_pass.h" #include "paddle/infrt/dialect/infrt/pass/infrt_op_fuse_pass.h"
#include <mlir/Transforms/GreedyPatternRewriteDriver.h> #include <mlir/Transforms/GreedyPatternRewriteDriver.h>
#include "paddle/infrt/dialect/infrt/infrt_dialect.h" #include "paddle/infrt/dialect/infrt/ir/infrt_dialect.h"
#include "paddle/infrt/dialect/pd_ops.h" #include "paddle/infrt/dialect/pd_ops.h"
namespace { namespace {
#include "paddle/infrt/dialect/infrt/pass/infrt_op_fuse.cpp.inc" // NOLINT #include "paddle/infrt/dialect/infrt/pass/infrt_op_fuse.cpp.inc" // NOLINT
......
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/infrt/dialect/infrt_base.h"
#include "paddle/infrt/dialect/basic_kernels.h"
#include "paddle/infrt/dialect/dense_tensor.h"
#include "paddle/infrt/dialect/test_kernels.h"
namespace infrt {
namespace dialect {
// ----INFRTDialect definition begin----
void INFRTDialect::initialize() {
allowUnknownTypes();
allowUnknownOperations();
addOperations<
#define GET_OP_LIST
#include "paddle/infrt/dialect/basic_kernels.cpp.inc"
>();
addOperations<
#define GET_OP_LIST
#include "paddle/infrt/dialect/test_kernels.cpp.inc"
>();
}
mlir::Type INFRTDialect::parseType(mlir::DialectAsmParser &parser) const {
llvm::StringRef keyword;
if (parser.parseKeyword(&keyword)) return mlir::Type();
// parse TensorMapType, for example: !infrt.tensor_map
parser.emitError(parser.getCurrentLocation(), "unknown infrt type: ")
<< keyword;
return mlir::Type();
}
void INFRTDialect::printType(mlir::Type type,
mlir::DialectAsmPrinter &printer) const {
// print TensorMapType, for example: !infrt.tensor_map
llvm_unreachable("unknown infrt type.");
}
// ----INFRTDialect definition end----
} // namespace dialect
} // namespace infrt
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <mlir/IR/Builders.h>
#include <mlir/IR/Dialect.h>
#include <mlir/IR/DialectImplementation.h>
#include <mlir/IR/MLIRContext.h>
#include <mlir/IR/TypeUtilities.h>
#include <mlir/IR/Types.h>
#include "paddle/infrt/dialect/infrt_base.hpp.inc"
namespace infrt {
namespace dialect {
class INFRTDialect : public mlir::Dialect {
explicit INFRTDialect(mlir::MLIRContext *context)
: mlir::Dialect(
getDialectNamespace(), context, mlir::TypeID::get<INFRTDialect>()) {
initialize();
}
// parse types registered to the dialect.
mlir::Type parseType(mlir::DialectAsmParser &parser) const override;
// print types registered to the dialect.
void printType(mlir::Type type,
mlir::DialectAsmPrinter &printer) const override;
void initialize();
friend class mlir::MLIRContext;
public:
static ::llvm::StringRef getDialectNamespace() { return "Infrt"; }
};
} // namespace dialect
template <typename T>
static mlir::IntegerAttr createI32Attr(mlir::OpBuilder &b, // NOLINT
mlir::Location loc,
T constant) {
return b.getIntegerAttr(b.getI32Type(), constant);
}
template <typename T>
static mlir::IntegerAttr createSI32Attr(mlir::OpBuilder &b, // NOLINT
mlir::Location loc,
T constant) {
return b.getSI32IntegerAttr(constant);
}
template <typename T>
static mlir::FloatAttr createF32Attr(mlir::OpBuilder &b, // NOLINT
mlir::Location loc,
T constant) {
return b.getF32FloatAttr(constant);
}
static mlir::SmallVector<mlir::Value, 4> cvtValueToValueRange(
const mlir::Value &operand) {
return mlir::SmallVector<mlir::Value, 4>(1, operand);
}
static mlir::SmallVector<mlir::Value, 4> concatTwoValueRange(
mlir::ValueRange operand_0, mlir::ValueRange operand_1) {
mlir::SmallVector<mlir::Value, 4> operands;
operands.append(operand_0.begin(), operand_0.end());
operands.append(operand_1.begin(), operand_1.end());
return operands;
}
} // namespace infrt
#ifndef INFRT_BASE
#define INFRT_BASE
include "mlir/IR/OpBase.td"
include "paddle/infrt/dialect/infrt/infrt_ops_base.td"
def INFRT_Dialect : Dialect {
let name = "Infrt";
let description = [{
The INFRT host dialect.
}];
let cppNamespace = "::infrt::dialect";
}
def BufferType : OpaqueType<"b", "buffer", "buffer">;
class INFRT_createI32Attr<string value> : NativeCodeCall<
"infrt::createI32Attr($_builder, $_loc, " # value # ")">;
class INFRT_createSI32Attr<string value> : NativeCodeCall<
"infrt::createSI32Attr($_builder, $_loc, " # value # ")">;
class INFRT_createF32Attr<string value> : NativeCodeCall<
"infrt::createF32Attr($_builder, $_loc, " # value # ")">;
def INFRT_cvtValueToValueRange : NativeCodeCall<
"infrt::cvtValueToValueRange($0)">;
def INFRT_concatTwoValueRange : NativeCodeCall<
"infrt::concatTwoValueRange($0, $1)">;
#endif // INFRT_BASE
...@@ -12,14 +12,14 @@ ...@@ -12,14 +12,14 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#include "paddle/infrt/dialect/init_infrt_dialects.h" #include "paddle/infrt/dialect/init_dialects.h"
#include <glog/logging.h> #include <glog/logging.h>
#include "paddle/infrt/dialect/basic_kernels.h"
#include "paddle/infrt/dialect/dense_tensor.h" #include "paddle/infrt/dialect/dense_tensor.h"
#include "paddle/infrt/dialect/infrt/infrt_dialect.h" #include "paddle/infrt/dialect/infrt/ir/basic_kernels.h"
#include "paddle/infrt/dialect/infrt_base.h" #include "paddle/infrt/dialect/infrt/ir/infrt_dialect.h"
#include "paddle/infrt/dialect/pd_ops.h" #include "paddle/infrt/dialect/pd_ops.h"
#include "paddle/infrt/dialect/phi/ir/infrt_phi_tensor.h" #include "paddle/infrt/dialect/phi/ir/infrt_phi_tensor.h"
#include "paddle/infrt/dialect/phi/ir/phi_base.h" #include "paddle/infrt/dialect/phi/ir/phi_base.h"
...@@ -30,8 +30,7 @@ ...@@ -30,8 +30,7 @@
namespace infrt { namespace infrt {
void registerCinnDialects(mlir::DialectRegistry &registry) { // NOLINT void registerCinnDialects(mlir::DialectRegistry &registry) { // NOLINT
registry.insert<ts::TensorShapeDialect, registry.insert<ts::TensorShapeDialect,
dialect::INFRTDialect, InfrtDialect,
infrt::InfrtDialect,
dt::DTDialect, dt::DTDialect,
mlir::pd::PaddleDialect, mlir::pd::PaddleDialect,
#ifdef INFRT_WITH_PHI #ifdef INFRT_WITH_PHI
......
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
#include <vector> #include <vector>
#include "paddle/infrt/dialect/diagnostic_utils.h" #include "paddle/infrt/dialect/diagnostic_utils.h"
#include "paddle/infrt/dialect/init_infrt_dialects.h" #include "paddle/infrt/dialect/init_dialects.h"
namespace infrt { namespace infrt {
namespace dialect { namespace dialect {
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
#include <string> #include <string>
#include "paddle/infrt/dialect/init_infrt_dialects.h" #include "paddle/infrt/dialect/init_dialects.h"
namespace infrt { namespace infrt {
namespace dialect { namespace dialect {
...@@ -32,13 +32,13 @@ TEST(MlirLoader, basic) { ...@@ -32,13 +32,13 @@ TEST(MlirLoader, basic) {
auto source = R"ROC( auto source = R"ROC(
func @main() -> f32 { func @main() -> f32 {
%v0 = Infrt.constant.f32 1.0 %v0 = infrt.constant.f32 1.0
%v1 = Infrt.constant.f32 2.0 %v1 = infrt.constant.f32 2.0
%value = "Infrt.add.f32"(%v0, %v1) : (f32, f32) -> f32 %value = "infrt.add.f32"(%v0, %v1) : (f32, f32) -> f32
"Infrt.print.f32"(%v0) : (f32) -> () "infrt.print.f32"(%v0) : (f32) -> ()
Infrt.return %value : f32 infrt.return %value : f32
} }
)ROC"; )ROC";
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
#include <mlir/Support/MlirOptMain.h> #include <mlir/Support/MlirOptMain.h>
#include <mlir/Transforms/Passes.h> #include <mlir/Transforms/Passes.h>
#include "paddle/infrt/dialect/init_infrt_dialects.h" #include "paddle/infrt/dialect/init_dialects.h"
int main(int argc, char **argv) { int main(int argc, char **argv) {
mlir::DialectRegistry registry; mlir::DialectRegistry registry;
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
include "mlir/IR/OpBase.td" include "mlir/IR/OpBase.td"
include "mlir/Interfaces/SideEffectInterfaces.td" include "mlir/Interfaces/SideEffectInterfaces.td"
include "paddle/infrt/dialect/infrt/infrt_ops_base.td" include "paddle/infrt/dialect/infrt/ir/infrt_base.td"
def PD_Dialect : Dialect { def PD_Dialect : Dialect {
let name = "pd"; let name = "pd";
......
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
#include <mlir/IR/Matchers.h> #include <mlir/IR/Matchers.h>
#include <mlir/IR/PatternMatch.h> #include <mlir/IR/PatternMatch.h>
#include "paddle/infrt/dialect/infrt_base.h"
#define GET_OP_CLASSES #define GET_OP_CLASSES
#include "paddle/infrt/dialect/pd_ops.cpp.inc" // NOLINT #include "paddle/infrt/dialect/pd_ops.cpp.inc" // NOLINT
......
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
#include <mlir/Interfaces/InferTypeOpInterface.h> #include <mlir/Interfaces/InferTypeOpInterface.h>
#include <mlir/Interfaces/LoopLikeInterface.h> #include <mlir/Interfaces/LoopLikeInterface.h>
#include <mlir/Interfaces/SideEffectInterfaces.h> #include <mlir/Interfaces/SideEffectInterfaces.h>
#include "paddle/infrt/dialect/infrt/infrt_dialect.h" #include "paddle/infrt/dialect/infrt/ir/infrt_dialect.h"
namespace mlir { namespace mlir {
namespace pd { namespace pd {
......
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This file defines the types used in PaddlePaddle MLIR dialect.
// We borrowed much ideas from tensorflow mlir dialect (tf_types.h in
// tensorflow).
#pragma once
#include <mlir/IR/Diagnostics.h>
#include <mlir/IR/Location.h>
#include <mlir/IR/Operation.h>
#include <mlir/IR/TypeUtilities.h>
#include <mlir/IR/Types.h>
namespace mlir {
namespace PD {
class PaddleType : public Type {
public:
using Type::Type;
static bool classof(Type type);
};
namespace detail {
template <typename Derived>
class PaddleTypeImpl : public Type::TypeBase<Derived, PaddleType, TypeStorage> {
public:
using Base = typename Type::TypeBase<Derived, PaddleType, TypeStorage>;
using PDBase = PaddleTypeImpl<Derived>;
using Base::Base;
};
} // namespace detail
#define HANDLE_PD_TYPE(pdtype, enumerant, name) \
class pdtype##Type : public detail::PaddleTypeImpl<pdtype##Type> { \
public: \
using PDBase::PDBase; \
};
} // namespace PD
} // namespace mlir
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
#pragma once #pragma once
#include "paddle/infrt/dialect/infrt/common_type.h" #include "paddle/infrt/dialect/infrt/common/types.h"
#include "paddle/phi/common/backend.h" #include "paddle/phi/common/backend.h"
#include "paddle/phi/common/data_type.h" #include "paddle/phi/common/data_type.h"
#include "paddle/phi/common/layout.h" #include "paddle/phi/common/layout.h"
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#define PHI_BASE #define PHI_BASE
include "mlir/IR/OpBase.td" include "mlir/IR/OpBase.td"
include "paddle/infrt/dialect/infrt_base.td" include "paddle/infrt/dialect/infrt/ir/infrt_base.td"
include "mlir/Interfaces/InferTypeOpInterface.td" include "mlir/Interfaces/InferTypeOpInterface.td"
def PHI_Dialect : Dialect { def PHI_Dialect : Dialect {
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
include "mlir/Interfaces/SideEffectInterfaces.td" include "mlir/Interfaces/SideEffectInterfaces.td"
include "mlir/IR/OpBase.td" include "mlir/IR/OpBase.td"
include "paddle/infrt/dialect/infrt_base.td" include "paddle/infrt/dialect/infrt/ir/infrt_base.td"
include "paddle/infrt/dialect/phi/ir/infrt_phi_base.td" include "paddle/infrt/dialect/phi/ir/infrt_phi_base.td"
def PHI_CPUKernelDialect : Dialect { def PHI_CPUKernelDialect : Dialect {
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
include "paddle/infrt/dialect/phi/ir/infrt_phi_base.td" include "paddle/infrt/dialect/phi/ir/infrt_phi_base.td"
include "mlir/Interfaces/SideEffectInterfaces.td" include "mlir/Interfaces/SideEffectInterfaces.td"
include "mlir/IR/OpBase.td" include "mlir/IR/OpBase.td"
include "paddle/infrt/dialect/infrt_base.td" include "paddle/infrt/dialect/infrt/ir/infrt_base.td"
def PHI_DenseTensorDialect : Dialect { def PHI_DenseTensorDialect : Dialect {
let name = "phi_dt"; let name = "phi_dt";
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
#include <mlir/Interfaces/SideEffectInterfaces.h> #include <mlir/Interfaces/SideEffectInterfaces.h>
#include <string> #include <string>
#include "paddle/infrt/dialect/infrt/common_type.h" #include "paddle/infrt/dialect/infrt/common/types.h"
#include "paddle/infrt/dialect/phi/ir/infrt_phi_baseDialect.h.inc" #include "paddle/infrt/dialect/phi/ir/infrt_phi_baseDialect.h.inc"
......
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
#include <mlir/Interfaces/SideEffectInterfaces.h> #include <mlir/Interfaces/SideEffectInterfaces.h>
#include "paddle/infrt/dialect/dense_tensor.h" #include "paddle/infrt/dialect/dense_tensor.h"
#include "paddle/infrt/dialect/infrt/infrt_dialect.h" #include "paddle/infrt/dialect/infrt/ir/infrt_dialect.h"
#include "paddle/infrt/dialect/phi/ir/phi_base.h" #include "paddle/infrt/dialect/phi/ir/phi_base.h"
#include "paddle/infrt/dialect/phi/ir/phi_cpu_kernelsDialect.h.inc" #include "paddle/infrt/dialect/phi/ir/phi_cpu_kernelsDialect.h.inc"
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
#include <string> #include <string>
#include <vector> #include <vector>
#include "paddle/infrt/dialect/infrt/common_type.h" #include "paddle/infrt/dialect/infrt/common/types.h"
namespace infrt { namespace infrt {
......
...@@ -24,13 +24,29 @@ ...@@ -24,13 +24,29 @@
#include <unordered_set> #include <unordered_set>
#include <vector> #include <vector>
#include "paddle/infrt/dialect/infrt/infrt_dialect.h" #include "paddle/infrt/dialect/infrt/ir/infrt_dialect.h"
#include "paddle/infrt/dialect/phi/ir/infrt_phi_tensor.h" #include "paddle/infrt/dialect/phi/ir/infrt_phi_tensor.h"
#include "paddle/infrt/dialect/phi/pass/kernel_op_desc.h" #include "paddle/infrt/dialect/phi/pass/kernel_op_desc.h"
#include "paddle/infrt/dialect/phi/pass/proto_arg_map_context.h" #include "paddle/infrt/dialect/phi/pass/proto_arg_map_context.h"
#include "paddle/phi/core/compat/op_utils.h" #include "paddle/phi/core/compat/op_utils.h"
#include "paddle/phi/ops/compat/signatures.h" #include "paddle/phi/ops/compat/signatures.h"
namespace infrt {
namespace {
class phiOpCvtPass
: public mlir::PassWrapper<phiOpCvtPass, mlir::FunctionPass> {
public:
::llvm::StringRef getName() const override { return "phiOpCvtPass"; }
void runOnFunction() override;
explicit phiOpCvtPass(
std::vector<infrt::Place> valid_places = std::vector<infrt::Place>())
: valid_places_(valid_places) {}
private:
void convertStage();
void diapatchStage();
std::vector<infrt::Place> valid_places_;
};
// Implementation of the phiOpCvtPass. // Implementation of the phiOpCvtPass.
void phiOpCvtPass::runOnFunction() { void phiOpCvtPass::runOnFunction() {
convertStage(); convertStage();
...@@ -63,7 +79,7 @@ void phiOpCvtPass::convertStage() { ...@@ -63,7 +79,7 @@ void phiOpCvtPass::convertStage() {
::phi::KernelSignature kernel_sign = ::phi::KernelSignature kernel_sign =
::phi::OpUtilsMap::Instance().GetArgumentMappingFn(op_name)( ::phi::OpUtilsMap::Instance().GetArgumentMappingFn(op_name)(
ProtoArgumentMappingContext(op)); infrt::ProtoArgumentMappingContext(op));
// resort input&output according to kernel_sign // resort input&output according to kernel_sign
::llvm::SmallVector<mlir::Value, 4> inputs, ori_output; ::llvm::SmallVector<mlir::Value, 4> inputs, ori_output;
::llvm::SmallVector<mlir::Type, 4> output_types; ::llvm::SmallVector<mlir::Type, 4> output_types;
...@@ -109,10 +125,10 @@ void phiOpCvtPass::diapatchStage() { ...@@ -109,10 +125,10 @@ void phiOpCvtPass::diapatchStage() {
} }
mlir::OpBuilder builder(&block, block.begin()); mlir::OpBuilder builder(&block, block.begin());
std::map<TargetType, mlir::Value> phi_context; std::map<infrt::TargetType, mlir::Value> phi_context;
for (infrt::KernelOp kernel_op : worklist) { for (infrt::KernelOp kernel_op : worklist) {
std::string kernel_name = kernel_op.name().str(); std::string kernel_name = kernel_op.name().str();
std::vector<PhiKernelDesc> candidates = std::vector<infrt::PhiKernelDesc> candidates =
getCandidateKernels(kernel_name, valid_places_); getCandidateKernels(kernel_name, valid_places_);
if (candidates.empty()) { if (candidates.empty()) {
LOG(FATAL) << "No candidate kernels for op:" << kernel_name; LOG(FATAL) << "No candidate kernels for op:" << kernel_name;
...@@ -121,12 +137,13 @@ void phiOpCvtPass::diapatchStage() { ...@@ -121,12 +137,13 @@ void phiOpCvtPass::diapatchStage() {
builder.setInsertionPoint(kernel_op); builder.setInsertionPoint(kernel_op);
// Todo: Implimentation the concrete pass pick strategy // Todo: Implimentation the concrete pass pick strategy
const PhiKernelDesc &phi_kernel_desc = candidates.front(); const infrt::PhiKernelDesc &phi_kernel_desc = candidates.front();
kernel_name = getPhiTargetPrefix(phi_kernel_desc.kernelType.target) + kernel_name =
kernel_name + infrt::getPhiTargetPrefix(phi_kernel_desc.kernelType.target) +
getPhiPrecisionSuffix(phi_kernel_desc.kernelType.precision) + kernel_name +
getPhiLayoutSuffix(phi_kernel_desc.kernelType.layout); infrt::getPhiPrecisionSuffix(phi_kernel_desc.kernelType.precision) +
infrt::getPhiLayoutSuffix(phi_kernel_desc.kernelType.layout);
mlir::OperationName operation_name(kernel_name, kernel_op.getContext()); mlir::OperationName operation_name(kernel_name, kernel_op.getContext());
mlir::OperationState operation_state(kernel_op.getLoc(), operation_name); mlir::OperationState operation_state(kernel_op.getLoc(), operation_name);
...@@ -134,18 +151,18 @@ void phiOpCvtPass::diapatchStage() { ...@@ -134,18 +151,18 @@ void phiOpCvtPass::diapatchStage() {
if (phi_context.find(phi_kernel_desc.kernelType.target) == if (phi_context.find(phi_kernel_desc.kernelType.target) ==
phi_context.end()) { phi_context.end()) {
switch (phi_kernel_desc.kernelType.target) { switch (phi_kernel_desc.kernelType.target) {
case TargetType::CPU: { case infrt::TargetType::CPU: {
auto context_value = auto context_value =
builder builder
.create<infrt::phi::CreateCPUContextOp>( .create<infrt::phi::CreateCPUContextOp>(
kernel_op.getLoc(), kernel_op.getLoc(),
phi::ContextType::get(kernel_op.getContext(), infrt::phi::ContextType::get(kernel_op.getContext(),
TargetType::CPU)) infrt::TargetType::CPU))
.output(); .output();
phi_context[TargetType::CPU] = context_value; phi_context[infrt::TargetType::CPU] = context_value;
} break; } break;
case TargetType::GPU: case infrt::TargetType::GPU:
case TargetType::UNK: case infrt::TargetType::UNK:
default: default:
LOG(FATAL) << "Unsupported TargetType"; LOG(FATAL) << "Unsupported TargetType";
break; break;
...@@ -155,29 +172,30 @@ void phiOpCvtPass::diapatchStage() { ...@@ -155,29 +172,30 @@ void phiOpCvtPass::diapatchStage() {
phi_context.at(phi_kernel_desc.kernelType.target)); phi_context.at(phi_kernel_desc.kernelType.target));
for (size_t index = 0; index < phi_kernel_desc.inputsType.size(); ++index) { for (size_t index = 0; index < phi_kernel_desc.inputsType.size(); ++index) {
mlir::Value input = kernel_op.getOperand(index); mlir::Value input = kernel_op.getOperand(index);
auto cvt_tensor_type_op = builder.create<CvtTensorOp>( auto cvt_tensor_type_op = builder.create<infrt::CvtTensorOp>(
kernel_op.getLoc(), kernel_op.getLoc(),
DenseTensorType::get(kernel_op.getContext(), infrt::DenseTensorType::get(
phi_kernel_desc.inputsType[index].target, kernel_op.getContext(),
phi_kernel_desc.inputsType[index].precision, phi_kernel_desc.inputsType[index].target,
phi_kernel_desc.inputsType[index].layout), phi_kernel_desc.inputsType[index].precision,
phi_kernel_desc.inputsType[index].layout),
input); input);
operation_state.addOperands(cvt_tensor_type_op.output()); operation_state.addOperands(cvt_tensor_type_op.output());
} }
for (size_t index = 0; index < phi_kernel_desc.outputsType.size(); for (size_t index = 0; index < phi_kernel_desc.outputsType.size();
++index) { ++index) {
operation_state.addTypes( operation_state.addTypes(infrt::DenseTensorType::get(
DenseTensorType::get(kernel_op.getContext(), kernel_op.getContext(),
phi_kernel_desc.outputsType[index].target, phi_kernel_desc.outputsType[index].target,
phi_kernel_desc.outputsType[index].precision, phi_kernel_desc.outputsType[index].precision,
phi_kernel_desc.outputsType[index].layout)); phi_kernel_desc.outputsType[index].layout));
} }
operation_state.addAttributes(kernel_op.attrsAttr().getValue()); operation_state.addAttributes(kernel_op.attrsAttr().getValue());
mlir::Operation *phi_operation = builder.createOperation(operation_state); mlir::Operation *phi_operation = builder.createOperation(operation_state);
for (size_t index = 0; index < phi_kernel_desc.outputsType.size(); for (size_t index = 0; index < phi_kernel_desc.outputsType.size();
++index) { ++index) {
mlir::Value input = phi_operation->getResult(index); mlir::Value input = phi_operation->getResult(index);
auto cvt_tensor_type_op = builder.create<CvtTensorOp>( auto cvt_tensor_type_op = builder.create<infrt::CvtTensorOp>(
kernel_op.getLoc(), kernel_op.getResultTypes()[index], input); kernel_op.getLoc(), kernel_op.getResultTypes()[index], input);
kernel_op.getResult(index).replaceAllUsesWith( kernel_op.getResult(index).replaceAllUsesWith(
cvt_tensor_type_op.output()); cvt_tensor_type_op.output());
...@@ -185,4 +203,10 @@ void phiOpCvtPass::diapatchStage() { ...@@ -185,4 +203,10 @@ void phiOpCvtPass::diapatchStage() {
kernel_op.erase(); kernel_op.erase();
} }
} }
} // namespace infrt
} // namespace
std::unique_ptr<mlir::Pass> infrt::createPhiOpCvtPass(
std::vector<Place> valid_places) {
return std::make_unique<phiOpCvtPass>(valid_places);
}
...@@ -14,44 +14,14 @@ ...@@ -14,44 +14,14 @@
#pragma once #pragma once
#include <mlir/Pass/Pass.h> #include <mlir/Pass/Pass.h>
#include "paddle/infrt/dialect/infrt/common_type.h" #include "paddle/infrt/dialect/infrt/common/types.h"
namespace infrt { namespace infrt {
/* /*
* phiOpCvtPass. * phiOpCvtPass.
* * Convert the general operators from pd Dialect to phi dialect.
* Convert the general operators in pd Dialect to a infrt.kernelOp.
*
* source func:
*
* func @main() -> tensor<?xf32> {
* %a = "pd.feed"()...
* %c = "pd.conv2d"(%a) ...
* %d = "pd.conv3d"(%c) ...
* %f = "pd.conv2d"(%a) ...
* "pd.fetch" (%d, %f)
* }
*
* destination func:
* func @main() -> tensor<?xf32> {
* %a = "pd.feed"()...
* %c = "infrt.kernel"(%a){name = "conv2d"} ...
* %d = "infrt.kernel"(%c){name = "conv3d"}...
* %f = "infrt.kernel"(%a){name = "conv2d"}...
* "pd.fetch" (%d, %f)
* }
*/ */
class phiOpCvtPass std::unique_ptr<mlir::Pass> createPhiOpCvtPass(
: public mlir::PassWrapper<phiOpCvtPass, mlir::FunctionPass> { std::vector<Place> valid_places = std::vector<Place>());
public:
::llvm::StringRef getName() const override { return "phiOpCvtPass"; }
void runOnFunction() override;
explicit phiOpCvtPass(std::vector<Place> valid_places = std::vector<Place>())
: valid_places_(valid_places) {}
private:
void convertStage();
void diapatchStage();
std::vector<Place> valid_places_;
};
} // namespace infrt } // namespace infrt
...@@ -38,7 +38,7 @@ int main(int argc, char** argv) { ...@@ -38,7 +38,7 @@ int main(int argc, char** argv) {
std::vector<infrt::Place> valid_places = {{infrt::TargetType::CPU, std::vector<infrt::Place> valid_places = {{infrt::TargetType::CPU,
infrt::PrecisionType::FLOAT32, infrt::PrecisionType::FLOAT32,
infrt::LayoutType::NCHW}}; infrt::LayoutType::NCHW}};
phi_pass_manager.addPass(std::make_unique<infrt::phiOpCvtPass>(valid_places)); phi_pass_manager.addPass(infrt::createPhiOpCvtPass(valid_places));
phi_pass_manager.addPass(infrt::createInfrtOpFusePass()); phi_pass_manager.addPass(infrt::createInfrtOpFusePass());
if (mlir::failed(pm.run(*module))) { if (mlir::failed(pm.run(*module))) {
std::cout << "\npass failed!\n" << std::endl; std::cout << "\npass failed!\n" << std::endl;
......
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
#include <iostream> #include <iostream>
#include "paddle/infrt/common/global.h" #include "paddle/infrt/common/global.h"
#include "paddle/infrt/dialect/init_infrt_dialects.h" #include "paddle/infrt/dialect/init_dialects.h"
namespace cl = llvm::cl; namespace cl = llvm::cl;
......
#ifndef INFRT_REWRITE #ifndef INFRT_REWRITE
#define INFRT_REWRITE #define INFRT_REWRITE
include "paddle/infrt/dialect/infrt_base.td" include "paddle/infrt/dialect/infrt/ir/infrt_base.td"
include "mlir/Interfaces/SideEffectInterfaces.td" include "mlir/Interfaces/SideEffectInterfaces.td"
include "paddle/infrt/dialect/pd_ops.td" include "paddle/infrt/dialect/pd_ops.td"
include "paddle/infrt/dialect/pd_extra_ops.td" include "paddle/infrt/dialect/pd_extra_ops.td"
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#else #else
#define INFRT_OPS #define INFRT_OPS
include "paddle/infrt/dialect/infrt_base.td" include "paddle/infrt/dialect/infrt/ir/infrt_base.td"
include "paddle/infrt/dialect/tensor_shape_base.td" include "paddle/infrt/dialect/tensor_shape_base.td"
include "mlir/Interfaces/SideEffectInterfaces.td" include "mlir/Interfaces/SideEffectInterfaces.td"
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#define PD_LOWER_TO_TRT #define PD_LOWER_TO_TRT
include "mlir/Interfaces/SideEffectInterfaces.td" include "mlir/Interfaces/SideEffectInterfaces.td"
include "paddle/infrt/dialect/infrt_base.td" include "paddle/infrt/dialect/infrt/ir/infrt_base.td"
include "paddle/infrt/dialect/pd_ops.td" include "paddle/infrt/dialect/pd_ops.td"
include "paddle/infrt/dialect/tensorrt/trt_ops.td" include "paddle/infrt/dialect/tensorrt/trt_ops.td"
......
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
#pragma once #pragma once
#include <mlir/Pass/Pass.h> #include <mlir/Pass/Pass.h>
#include "paddle/infrt/dialect/infrt_base.h"
namespace infrt { namespace infrt {
namespace trt { namespace trt {
...@@ -28,17 +27,17 @@ namespace trt { ...@@ -28,17 +27,17 @@ namespace trt {
* func @main(%a : tensor<?xf32>) -> tensor<?xf32> { * func @main(%a : tensor<?xf32>) -> tensor<?xf32> {
* %c = "pd.graph"(%a) { * %c = "pd.graph"(%a) {
* %m = "pd.conv2d"(%a)... * %m = "pd.conv2d"(%a)...
* "infrt.return" (%m) * infrt.return %m...
* } ... * } ...
* %d = "pd.graph"(%c) { * %d = "pd.graph"(%c) {
* %m = "pd.conv3d"(%c)... * %m = "pd.conv3d"(%c)...
* "infrt.return" (%m) * infrt.return %m...
* } ... * } ...
* %f = "pd.graph"(%a) { * %f = "pd.graph"(%a) {
* %m = "pd.conv2d"(%a)... * %m = "pd.conv2d"(%a)...
* "infrt.return" (%m) * infrt.return %m...
* } ... * } ...
* "infrt.return" (%d, %f).. * infrt.return %d, %f :...
* } * }
* *
* destination func: * destination func:
...@@ -47,9 +46,9 @@ namespace trt { ...@@ -47,9 +46,9 @@ namespace trt {
* %m = "pd.conv2d"(%a)... * %m = "pd.conv2d"(%a)...
* %n = "pd.conv3d"(%m)... * %n = "pd.conv3d"(%m)...
* %s = "pd.conv2d"(%a)... * %s = "pd.conv2d"(%a)...
* "infrt.return" (%n, %s) * infrt.return %n, %s:...
* } ... * } ...
* "infrt.return" (%d, %f) * infrt.return %d, %f:...
* } * }
*/ */
class TRTGraphFusePass class TRTGraphFusePass
......
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
#pragma once #pragma once
#include <mlir/Pass/Pass.h> #include <mlir/Pass/Pass.h>
#include "paddle/infrt/dialect/infrt_base.h"
namespace infrt { namespace infrt {
namespace trt { namespace trt {
...@@ -31,9 +30,9 @@ namespace trt { ...@@ -31,9 +30,9 @@ namespace trt {
* %m = "pd.conv2d"(%a)... * %m = "pd.conv2d"(%a)...
* %n = "pd.conv3d"(%m)... * %n = "pd.conv3d"(%m)...
* %s = "pd.conv2d"(%a)... * %s = "pd.conv2d"(%a)...
* "infrt.return" (%n, %s)... * infrt.return %n, %s : ...
* } ... * } ...
* "infrt.return" (%d, %f)... * infrt.return %d, %f : ...
* } * }
* *
* destination func: * destination func:
...@@ -41,7 +40,7 @@ namespace trt { ...@@ -41,7 +40,7 @@ namespace trt {
* %c = "pd.conv2d"(%a) ... * %c = "pd.conv2d"(%a) ...
* %d = "pd.conv3d"(%c) ... * %d = "pd.conv3d"(%c) ...
* %f = "pd.conv2d"(%a) ... * %f = "pd.conv2d"(%a) ...
* "infrt.return" (%d, %f)... * infrt.return %d, %f:...
* } * }
*/ */
class TRTGraphSplitPass class TRTGraphSplitPass
......
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
#include "paddle/infrt/dialect/tensorrt/trt_op_converter_pass.h" #include "paddle/infrt/dialect/tensorrt/trt_op_converter_pass.h"
#include <mlir/IR/Builders.h> #include <mlir/IR/Builders.h>
#include <mlir/Transforms/DialectConversion.h> #include <mlir/Transforms/DialectConversion.h>
#include "paddle/infrt/dialect/infrt_base.h"
#include "paddle/infrt/dialect/pd_ops.h" #include "paddle/infrt/dialect/pd_ops.h"
#include "paddle/infrt/dialect/tensorrt/trt_dialect_types.h" #include "paddle/infrt/dialect/tensorrt/trt_dialect_types.h"
...@@ -24,7 +23,7 @@ namespace trt { ...@@ -24,7 +23,7 @@ namespace trt {
#include "paddle/infrt/dialect/tensorrt/pd_lower_to_trt.cpp.inc" // NOLINT #include "paddle/infrt/dialect/tensorrt/pd_lower_to_trt.cpp.inc" // NOLINT
struct PD2TRT_GraphLower : public ::mlir::RewritePattern { struct PD2TRT_GraphLower : public ::mlir::RewritePattern {
PD2TRT_GraphLower(::mlir::MLIRContext *context) explicit PD2TRT_GraphLower(::mlir::MLIRContext *context)
: ::mlir::RewritePattern("pd.graph", 1, context, {"trt.create_engine"}) {} : ::mlir::RewritePattern("pd.graph", 1, context, {"trt.create_engine"}) {}
::mlir::LogicalResult matchAndRewrite( ::mlir::LogicalResult matchAndRewrite(
::mlir::Operation *op, ::mlir::PatternRewriter &rewriter) const override { ::mlir::Operation *op, ::mlir::PatternRewriter &rewriter) const override {
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
#pragma once #pragma once
#include "mlir/IR/Dialect.h" #include "mlir/IR/Dialect.h"
#include "mlir/Pass/Pass.h" #include "mlir/Pass/Pass.h"
#include "paddle/infrt/dialect/infrt/infrt_dialect.h" #include "paddle/infrt/dialect/infrt/ir/infrt_dialect.h"
#include "paddle/infrt/dialect/tensorrt/trt_ops.h" #include "paddle/infrt/dialect/tensorrt/trt_ops.h"
namespace infrt { namespace infrt {
...@@ -29,9 +29,9 @@ namespace trt { ...@@ -29,9 +29,9 @@ namespace trt {
* %m = "pd.conv2d"(%a)... * %m = "pd.conv2d"(%a)...
* %n = "pd.conv3d"(%m)... * %n = "pd.conv3d"(%m)...
* %s = "pd.conv2d"(%a)... * %s = "pd.conv2d"(%a)...
* "infrt.return" (%n, %s)... * infrt.return %n, %s:...
* } ... * } ...
* "infrt.return" (%d, %f)... * infrt.return %d, %f:...
* } * }
* *
* destination ir: * destination ir:
...@@ -40,10 +40,10 @@ namespace trt { ...@@ -40,10 +40,10 @@ namespace trt {
* %m = "trt.Convolution"(%a)... * %m = "trt.Convolution"(%a)...
* %n = "trt.Convolution"(%m)... * %n = "trt.Convolution"(%m)...
* %s = "trt.Convolution"(%a)... * %s = "trt.Convolution"(%a)...
* "infrt.return" (%n, %s)... * infrt.return %n, %s :...
* }){run_once = true} ... * }){run_once = true} ...
* %d, %f = "trt.execute"(%engine, %a)... * %d, %f = "trt.execute"(%engine, %a)...
* "infrt.return" (%d, %f)... * infrt.return %d, %f :...
* } * }
*/ */
struct TRTOpConverterPass struct TRTOpConverterPass
......
...@@ -15,8 +15,8 @@ ...@@ -15,8 +15,8 @@
#include "paddle/infrt/dialect/tensorrt/trt_op_teller_pass.h" #include "paddle/infrt/dialect/tensorrt/trt_op_teller_pass.h"
#include <mlir/IR/Builders.h> #include <mlir/IR/Builders.h>
#include "paddle/infrt/dialect/basic_kernels.h" #include "paddle/infrt/dialect/infrt/ir/basic_kernels.h"
#include "paddle/infrt/dialect/infrt/infrt_dialect.h" #include "paddle/infrt/dialect/infrt/ir/infrt_dialect.h"
#include "paddle/infrt/dialect/pd_ops.h" #include "paddle/infrt/dialect/pd_ops.h"
namespace infrt { namespace infrt {
......
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
#pragma once #pragma once
#include <mlir/Pass/Pass.h> #include <mlir/Pass/Pass.h>
#include "paddle/infrt/dialect/infrt_base.h"
namespace infrt { namespace infrt {
namespace trt { namespace trt {
...@@ -29,24 +28,24 @@ namespace trt { ...@@ -29,24 +28,24 @@ namespace trt {
* %c = "pd.conv2d"(%a) ... * %c = "pd.conv2d"(%a) ...
* %d = "pd.conv3d"(%c) ... * %d = "pd.conv3d"(%c) ...
* %f = "pd.conv2d"(%a) ... * %f = "pd.conv2d"(%a) ...
* "infrt.return"(%d, %f) ... * infrt.return %d, %f: ...
* } * }
* *
* destination func: * destination func:
* func @main(%a : tensor<?xf32>) -> tensor<?xf32> { * func @main(%a : tensor<?xf32>) -> tensor<?xf32> {
* %c = "pd.graph"(%a) { * %c = "pd.graph"(%a) {
* %m = "pd.conv2d"(%a)... * %m = "pd.conv2d"(%a)...
* "infrt.return" (%m) * infrt.return %m:...
* } ... * } ...
* %d = "pd.graph"(%c) { * %d = "pd.graph"(%c) {
* %m = "pd.conv3d"(%c)... * %m = "pd.conv3d"(%c)...
* "infrt.return" (%m) * infrt.return %m:...
* } ... * } ...
* %f = "pd.graph"(%a) { * %f = "pd.graph"(%a) {
* %m = "pd.conv2d"(%a)... * %m = "pd.conv2d"(%a)...
* "infrt.return" (%m) * infrt.return %m:...
* } ... * } ...
* "infrt.return" (%d, %f) * infrt.return %d, %f:...
* } * }
* TODO(winter-wang): Supplementary how to judge the operators can be supported * TODO(winter-wang): Supplementary how to judge the operators can be supported
* by tensorrt. * by tensorrt.
......
...@@ -28,8 +28,8 @@ ...@@ -28,8 +28,8 @@
#include <mlir/Interfaces/InferTypeOpInterface.h> #include <mlir/Interfaces/InferTypeOpInterface.h>
#include <mlir/Interfaces/LoopLikeInterface.h> #include <mlir/Interfaces/LoopLikeInterface.h>
#include <mlir/Interfaces/SideEffectInterfaces.h> #include <mlir/Interfaces/SideEffectInterfaces.h>
#include "paddle/infrt/dialect/basic_kernels.h" #include "paddle/infrt/dialect/infrt/ir/basic_kernels.h"
#include "paddle/infrt/dialect/infrt/infrt_dialect.h" #include "paddle/infrt/dialect/infrt/ir/infrt_dialect.h"
#include "paddle/infrt/dialect/pd_ops.h" #include "paddle/infrt/dialect/pd_ops.h"
namespace infrt { namespace infrt {
......
// CHECK: basic // CHECK: basic
func @basic() -> f32 { func @basic() -> f32 {
%v0 = Infrt.constant.f32 1.0 %v0 = infrt.constant.f32 1.0
%v1 = Infrt.constant.f32 2.0 %v1 = infrt.constant.f32 2.0
%v2 = "external.add.f32"(%v0, %v1) : (f32, f32) -> f32 %v2 = "external.add.f32"(%v0, %v1) : (f32, f32) -> f32
// CHECK: 1 // CHECK: 1
...@@ -17,5 +17,5 @@ func @basic() -> f32 { ...@@ -17,5 +17,5 @@ func @basic() -> f32 {
// CHECK: 6 // CHECK: 6
"external.print.f32"(%v3) : (f32) -> () "external.print.f32"(%v3) : (f32) -> ()
Infrt.return %v3 : f32 infrt.return %v3 : f32
} }
// CHECK-LABEL: @fc // CHECK-LABEL: @fc
func @fc(%input : !Infrt.tensor<X86, NCHW, F32>, func @fc(%input : !infrt.dense_tensor<CPU, FP32, NCHW>,
%w : !Infrt.tensor<X86, NCHW, F32>, %w : !infrt.dense_tensor<CPU, FP32, NCHW>,
%bias : !Infrt.tensor<X86, NCHW, F32>) -> !Infrt.tensor<X86, NCHW, F32> %bias : !infrt.dense_tensor<CPU, FP32, NCHW>) -> !infrt.dense_tensor<CPU, FP32, NCHW>
{ {
%out = dt.create_uninit_tensor.f32 [30, 50] -> !Infrt.tensor<X86, NCHW, F32> %out = dt.create_uninit_tensor.f32 [30, 50] -> !infrt.dense_tensor<CPU, FP32, NCHW>
// dt.fill_tensor_with_constant.f32 (%out : !Infrt.tensor<X86, NCHW, F32>) {value=0.0:f32} // dt.fill_tensor_with_constant.f32 (%out : !infrt.dense_tensor<CPU, FP32, NCHW>) {value=0.0:f32}
// fc1 // fc1
"external.matmul"(%input, %w, %out) {}: (!Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>) -> () "external.matmul"(%input, %w, %out) {}: (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) -> ()
"external.elementwise_add"(%out, %bias, %out) {axis = -1}: (!Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>) -> () "external.elementwise_add"(%out, %bias, %out) {axis = -1}: (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) -> ()
"external.sigmoid"(%out, %out) {}: (!Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>) -> () "external.sigmoid"(%out, %out) {}: (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) -> ()
// fc2 // fc2
"external.matmul"(%out, %w, %out) {}: (!Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>) -> () "external.matmul"(%out, %w, %out) {}: (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) -> ()
"external.elementwise_add"(%out, %bias, %out) {axis = -1}: (!Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>) -> () "external.elementwise_add"(%out, %bias, %out) {axis = -1}: (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) -> ()
"external.sigmoid"(%out, %out) {}: (!Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>) -> () "external.sigmoid"(%out, %out) {}: (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) -> ()
Infrt.return %out : !Infrt.tensor<X86, NCHW, F32> infrt.return %out : !infrt.dense_tensor<CPU, FP32, NCHW>
} }
// CHECK-LABEL: @benchmark // CHECK-LABEL: @benchmark
func @benchmark() { func @benchmark() {
%input = dt.create_uninit_tensor.f32 [30, 50] -> !Infrt.tensor<X86, NCHW, F32> %input = dt.create_uninit_tensor.f32 [30, 50] -> !infrt.dense_tensor<CPU, FP32, NCHW>
dt.fill_tensor_with_constant.f32 (%input : !Infrt.tensor<X86, NCHW, F32>) {value=1.0:f32} dt.fill_tensor_with_constant.f32 (%input : !infrt.dense_tensor<CPU, FP32, NCHW>) {value=1.0:f32}
%w = dt.create_uninit_tensor.f32 [50, 50] -> !Infrt.tensor<X86, NCHW, F32> %w = dt.create_uninit_tensor.f32 [50, 50] -> !infrt.dense_tensor<CPU, FP32, NCHW>
dt.fill_tensor_with_constant.f32 (%w : !Infrt.tensor<X86, NCHW, F32>) {value=2.0:f32} dt.fill_tensor_with_constant.f32 (%w : !infrt.dense_tensor<CPU, FP32, NCHW>) {value=2.0:f32}
%bias = dt.create_uninit_tensor.f32 [30, 50] -> !Infrt.tensor<X86, NCHW, F32> %bias = dt.create_uninit_tensor.f32 [30, 50] -> !infrt.dense_tensor<CPU, FP32, NCHW>
dt.fill_tensor_with_constant.f32 (%bias : !Infrt.tensor<X86, NCHW, F32>) {value=3.0:f32} dt.fill_tensor_with_constant.f32 (%bias : !infrt.dense_tensor<CPU, FP32, NCHW>) {value=3.0:f32}
Infrt.benchmark "add.f32"( infrt.benchmark "add.f32"(
%input:!Infrt.tensor<X86, NCHW, F32>, %input:!infrt.dense_tensor<CPU, FP32, NCHW>,
%w:!Infrt.tensor<X86, NCHW, F32>, %w:!infrt.dense_tensor<CPU, FP32, NCHW>,
%bias:!Infrt.tensor<X86, NCHW, F32>) %bias:!infrt.dense_tensor<CPU, FP32, NCHW>)
duration_secs = 100, max_count = 300000, num_warmup_runs = 3 duration_secs = 100, max_count = 300000, num_warmup_runs = 3
{ {
%res = Infrt.call @fc(%input, %w, %bias) : (!Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>) -> (!Infrt.tensor<X86, NCHW, F32>) %res = infrt.call @fc(%input, %w, %bias) : (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>)
Infrt.return %res : !Infrt.tensor<X86, NCHW, F32> infrt.return %res : !infrt.dense_tensor<CPU, FP32, NCHW>
} }
Infrt.return infrt.return
} }
// CHECK: paddle_func // CHECK: paddle_func
func @paddle_func() -> () { func @paddle_func() -> () {
%input = dt.create_uninit_tensor.f32 [3, 5] -> !Infrt.tensor<X86, NCHW, F32> %input = dt.create_uninit_tensor.f32 [3, 5] -> !infrt.dense_tensor<CPU, FP32, NCHW>
dt.fill_tensor_with_constant.f32 (%input : !Infrt.tensor<X86, NCHW, F32>) {value=1.0:f32} dt.fill_tensor_with_constant.f32 (%input : !infrt.dense_tensor<CPU, FP32, NCHW>) {value=1.0:f32}
%w = dt.create_uninit_tensor.f32 [5, 4] -> !Infrt.tensor<X86, NCHW, F32> %w = dt.create_uninit_tensor.f32 [5, 4] -> !infrt.dense_tensor<CPU, FP32, NCHW>
dt.fill_tensor_with_constant.f32 (%w : !Infrt.tensor<X86, NCHW, F32>) {value=2.0:f32} dt.fill_tensor_with_constant.f32 (%w : !infrt.dense_tensor<CPU, FP32, NCHW>) {value=2.0:f32}
%bias = dt.create_uninit_tensor.f32 [4] -> !Infrt.tensor<X86, NCHW, F32> %bias = dt.create_uninit_tensor.f32 [4] -> !infrt.dense_tensor<CPU, FP32, NCHW>
dt.fill_tensor_with_constant.f32 (%bias : !Infrt.tensor<X86, NCHW, F32>) {value=3.0:f32} dt.fill_tensor_with_constant.f32 (%bias : !infrt.dense_tensor<CPU, FP32, NCHW>) {value=3.0:f32}
%out = dt.create_uninit_tensor.f32 [3, 4] -> !Infrt.tensor<X86, NCHW, F32> %out = dt.create_uninit_tensor.f32 [3, 4] -> !infrt.dense_tensor<CPU, FP32, NCHW>
dt.fill_tensor_with_constant.f32 (%out : !Infrt.tensor<X86, NCHW, F32>) {value=0.0:f32} dt.fill_tensor_with_constant.f32 (%out : !infrt.dense_tensor<CPU, FP32, NCHW>) {value=0.0:f32}
"external.fc2"(%input, %w, %bias, %out) {in_num_col_dims=3:i32, test_attr=5:i32}: (!Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>) -> () "external.fc2"(%input, %w, %bias, %out) {in_num_col_dims=3:i32, test_attr=5:i32}: (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) -> ()
// CHECK-LABEL: tensor: shape=shape[3,5], values=[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] // CHECK-LABEL: tensor: shape=shape[3,5], values=[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
dt.print_tensor (%input : !Infrt.tensor<X86, NCHW, F32>) dt.print_tensor (%input : !infrt.dense_tensor<CPU, FP32, NCHW>)
// CHECK-LABEL: tensor: shape=shape[5,4], values=[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2] // CHECK-LABEL: tensor: shape=shape[5,4], values=[2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]
dt.print_tensor (%w : !Infrt.tensor<X86, NCHW, F32>) dt.print_tensor (%w : !infrt.dense_tensor<CPU, FP32, NCHW>)
dt.print_tensor (%bias : !Infrt.tensor<X86, NCHW, F32>) dt.print_tensor (%bias : !infrt.dense_tensor<CPU, FP32, NCHW>)
dt.print_tensor (%out : !Infrt.tensor<X86, NCHW, F32>) dt.print_tensor (%out : !infrt.dense_tensor<CPU, FP32, NCHW>)
// test external.matmul // test external.matmul
%out1 = dt.create_uninit_tensor.f32 [3, 4] -> !Infrt.tensor<X86, NCHW, F32> %out1 = dt.create_uninit_tensor.f32 [3, 4] -> !infrt.dense_tensor<CPU, FP32, NCHW>
dt.fill_tensor_with_constant.f32 (%out1 : !Infrt.tensor<X86, NCHW, F32>) {value=0.0:f32} dt.fill_tensor_with_constant.f32 (%out1 : !infrt.dense_tensor<CPU, FP32, NCHW>) {value=0.0:f32}
"external.matmul"(%input, %w, %out1) {}: (!Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>) -> () "external.matmul"(%input, %w, %out1) {}: (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) -> ()
dt.print_tensor (%out1 : !Infrt.tensor<X86, NCHW, F32>) dt.print_tensor (%out1 : !infrt.dense_tensor<CPU, FP32, NCHW>)
// test external.elementwise_add // test external.elementwise_add
%out2 = dt.create_uninit_tensor.f32 [3, 4] -> !Infrt.tensor<X86, NCHW, F32> %out2 = dt.create_uninit_tensor.f32 [3, 4] -> !infrt.dense_tensor<CPU, FP32, NCHW>
dt.fill_tensor_with_constant.f32 (%out2 : !Infrt.tensor<X86, NCHW, F32>) {value=0.0:f32} dt.fill_tensor_with_constant.f32 (%out2 : !infrt.dense_tensor<CPU, FP32, NCHW>) {value=0.0:f32}
%bias1 = dt.create_uninit_tensor.f32 [3, 4] -> !Infrt.tensor<X86, NCHW, F32> %bias1 = dt.create_uninit_tensor.f32 [3, 4] -> !infrt.dense_tensor<CPU, FP32, NCHW>
dt.fill_tensor_with_constant.f32 (%bias1 : !Infrt.tensor<X86, NCHW, F32>) {value=3.0:f32} dt.fill_tensor_with_constant.f32 (%bias1 : !infrt.dense_tensor<CPU, FP32, NCHW>) {value=3.0:f32}
"external.elementwise_add"(%out1, %bias1, %out2) {axis=-1}: (!Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>) -> () "external.elementwise_add"(%out1, %bias1, %out2) {axis=-1}: (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) -> ()
dt.print_tensor (%out2 : !Infrt.tensor<X86, NCHW, F32>) dt.print_tensor (%out2 : !infrt.dense_tensor<CPU, FP32, NCHW>)
// test external.relu // test external.relu
%out3 = dt.create_uninit_tensor.f32 [3, 4] -> !Infrt.tensor<X86, NCHW, F32> %out3 = dt.create_uninit_tensor.f32 [3, 4] -> !infrt.dense_tensor<CPU, FP32, NCHW>
dt.fill_tensor_with_constant.f32 (%out3 : !Infrt.tensor<X86, NCHW, F32>) {value=0.0:f32} dt.fill_tensor_with_constant.f32 (%out3 : !infrt.dense_tensor<CPU, FP32, NCHW>) {value=0.0:f32}
"external.relu"(%out1, %out3) {}: (!Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>) -> () "external.relu"(%out1, %out3) {}: (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) -> ()
dt.print_tensor (%out3 : !Infrt.tensor<X86, NCHW, F32>) dt.print_tensor (%out3 : !infrt.dense_tensor<CPU, FP32, NCHW>)
// test external.sigmoid // test external.sigmoid
%out4 = dt.create_uninit_tensor.f32 [3, 4] -> !Infrt.tensor<X86, NCHW, F32> %out4 = dt.create_uninit_tensor.f32 [3, 4] -> !infrt.dense_tensor<CPU, FP32, NCHW>
dt.fill_tensor_with_constant.f32 (%out4 : !Infrt.tensor<X86, NCHW, F32>) {value=0.0:f32} dt.fill_tensor_with_constant.f32 (%out4 : !infrt.dense_tensor<CPU, FP32, NCHW>) {value=0.0:f32}
"external.sigmoid"(%out1, %out4) {}: (!Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>) -> () "external.sigmoid"(%out1, %out4) {}: (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) -> ()
dt.print_tensor (%out4 : !Infrt.tensor<X86, NCHW, F32>) dt.print_tensor (%out4 : !infrt.dense_tensor<CPU, FP32, NCHW>)
Infrt.return infrt.return
} }
...@@ -92,7 +92,7 @@ int main(int argc, char** argv) { ...@@ -92,7 +92,7 @@ int main(int argc, char** argv) {
std::vector<infrt::Place> valid_places = {{infrt::TargetType::CPU, std::vector<infrt::Place> valid_places = {{infrt::TargetType::CPU,
infrt::PrecisionType::FLOAT32, infrt::PrecisionType::FLOAT32,
infrt::LayoutType::NCHW}}; infrt::LayoutType::NCHW}};
phi_pass_manager.addPass(std::make_unique<infrt::phiOpCvtPass>(valid_places)); phi_pass_manager.addPass(infrt::createPhiOpCvtPass(valid_places));
phi_pass_manager.addPass(infrt::createInfrtOpFusePass()); phi_pass_manager.addPass(infrt::createInfrtOpFusePass());
#endif #endif
......
// CHECK-LABEL: basic // CHECK-LABEL: basic
func @basic() -> f32 { func @basic() -> f32 {
%v0 = Infrt.constant.f32 1.0 %v0 = infrt.constant.f32 1.0
%v1 = Infrt.constant.f32 2.0 %v1 = infrt.constant.f32 2.0
%v2 = "Infrt.add.f32"(%v0, %v1) : (f32, f32) -> f32 %v2 = "infrt.add.f32"(%v0, %v1) : (f32, f32) -> f32
// CHECK: 1 // CHECK: 1
"Infrt.print.f32"(%v0) : (f32) -> () "infrt.print.f32"(%v0) : (f32) -> ()
// CHECK: 2 // CHECK: 2
"Infrt.print.f32"(%v1) : (f32) -> () "infrt.print.f32"(%v1) : (f32) -> ()
// CHECK: 3 // CHECK: 3
"Infrt.print.f32"(%v2) : (f32) -> () "infrt.print.f32"(%v2) : (f32) -> ()
%v3 = "Infrt.mul.f32"(%v2, %v1) : (f32, f32) -> f32 %v3 = "infrt.mul.f32"(%v2, %v1) : (f32, f32) -> f32
// CHECK: 6 // CHECK: 6
"Infrt.print.f32"(%v3) : (f32) -> () "infrt.print.f32"(%v3) : (f32) -> ()
Infrt.return %v3 : f32 infrt.return %v3 : f32
} }
// CHECK-LABEL: basic1 // CHECK-LABEL: basic1
// Check the mlir executor can work with more than one function in a file. // Check the mlir executor can work with more than one function in a file.
func @basic1() -> () { func @basic1() -> () {
%v0 = Infrt.constant.f32 1.0 %v0 = infrt.constant.f32 1.0
"Infrt.print.f32"(%v0) : (f32) -> () "infrt.print.f32"(%v0) : (f32) -> ()
// CHECK: 1 // CHECK: 1
Infrt.return infrt.return
} }
\ No newline at end of file
// CHECK-LABEL: build_tensor1 // CHECK-LABEL: build_tensor1
func @build_tensor1() { func @build_tensor1() {
%a = dt.create_uninit_tensor.f32 [3, 4] -> !Infrt.tensor<X86, NCHW, F32> %a = dt.create_uninit_tensor.f32 [3, 4] -> !infrt.dense_tensor<CPU, FP32, NCHW>
dt.fill_tensor_with_constant.f32 (%a : !Infrt.tensor<X86, NCHW, F32>) {value=1.0:f32} dt.fill_tensor_with_constant.f32 (%a : !infrt.dense_tensor<CPU, FP32, NCHW>) {value=1.0:f32}
// CHECK: tensor: shape=shape[3,4], values=[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] // CHECK: tensor: shape=shape[3,4], values=[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
dt.print_tensor (%a : !Infrt.tensor<X86, NCHW, F32>) dt.print_tensor (%a : !infrt.dense_tensor<CPU, FP32, NCHW>)
Infrt.return infrt.return
} }
...@@ -3,5 +3,5 @@ func @build_tensor1() { ...@@ -3,5 +3,5 @@ func @build_tensor1() {
%a = ts.build_shape [1:i64, 57:i64, 92:i64] %a = ts.build_shape [1:i64, 57:i64, 92:i64]
// CHECK: shape[1,57,92] // CHECK: shape[1,57,92]
ts.print_shape %a ts.print_shape %a
Infrt.return infrt.return
} }
\ No newline at end of file
...@@ -75,7 +75,7 @@ struct MlirToRuntimeTranslator::Impl { ...@@ -75,7 +75,7 @@ struct MlirToRuntimeTranslator::Impl {
}; };
bool MlirToRuntimeTranslator::EmitConstantOp(mlir::Operation* op) { bool MlirToRuntimeTranslator::EmitConstantOp(mlir::Operation* op) {
if (!infrt::Startswith(op->getName().getStringRef().str(), "Infrt.constant")) if (!infrt::Startswith(op->getName().getStringRef().str(), "infrt.constant"))
return false; return false;
VLOG(3) << "Emitting constant op [" << op->getName().getStringRef().str() VLOG(3) << "Emitting constant op [" << op->getName().getStringRef().str()
<< "]"; << "]";
...@@ -267,7 +267,7 @@ boost::optional<std::vector<double>> MlirToRuntimeTranslator::EmitAttribute( ...@@ -267,7 +267,7 @@ boost::optional<std::vector<double>> MlirToRuntimeTranslator::EmitAttribute(
} }
static bool IsReturn(mlir::Operation* op) { static bool IsReturn(mlir::Operation* op) {
return op->getName().getStringRef() == "Infrt.return"; return op->getName().getStringRef() == "infrt.return";
} }
bool MlirToRuntimeTranslator::EmitGeneralOp(mlir::Operation* op) { bool MlirToRuntimeTranslator::EmitGeneralOp(mlir::Operation* op) {
...@@ -405,7 +405,7 @@ bool MlirToRuntimeTranslator::EmitGeneralOp(mlir::Operation* op) { ...@@ -405,7 +405,7 @@ bool MlirToRuntimeTranslator::EmitGeneralOp(mlir::Operation* op) {
bool MlirToRuntimeTranslator::EmitReturnOp( bool MlirToRuntimeTranslator::EmitReturnOp(
mlir::Operation* op, llvm::SmallVectorImpl<mlir::Value>* results) { mlir::Operation* op, llvm::SmallVectorImpl<mlir::Value>* results) {
CHECK(results); CHECK(results);
if (op->getName().getStringRef() == "Infrt.return") { if (op->getName().getStringRef() == "infrt.return") {
for (size_t i = 0; i < op->getNumOperands(); i++) { for (size_t i = 0; i < op->getNumOperands(); i++) {
results->push_back(op->getOperand(i)); results->push_back(op->getOperand(i));
} }
...@@ -478,7 +478,7 @@ bool MlirToRuntimeTranslator::EmitCallOp(mlir::Operation* op, ...@@ -478,7 +478,7 @@ bool MlirToRuntimeTranslator::EmitCallOp(mlir::Operation* op,
function_defs_t* function_table) { function_defs_t* function_table) {
CHECK(op); CHECK(op);
CHECK(function_table); CHECK(function_table);
if (op->getName().getStringRef() != "Infrt.call") return false; if (op->getName().getStringRef() != "infrt.call") return false;
impl_->cur_op = impl_->cur_op =
impl_->runtime->NewOpExecutable(op->getName().getStringRef().str()); impl_->runtime->NewOpExecutable(op->getName().getStringRef().str());
......
...@@ -57,7 +57,7 @@ class MlirToRuntimeTranslator { ...@@ -57,7 +57,7 @@ class MlirToRuntimeTranslator {
protected: protected:
//! Emit a "infrt.constant.*" operation, return true if succeed. //! Emit a "infrt.constant.*" operation, return true if succeed.
bool EmitConstantOp(mlir::Operation* op); bool EmitConstantOp(mlir::Operation* op);
//! Emit a "Infrt.return" operation. //! Emit a "infrt.return" operation.
bool EmitReturnOp(mlir::Operation* op, bool EmitReturnOp(mlir::Operation* op,
llvm::SmallVectorImpl<mlir::Value>* results); llvm::SmallVectorImpl<mlir::Value>* results);
//! Emit a "ts.build_shape" operation. //! Emit a "ts.build_shape" operation.
......
...@@ -37,14 +37,14 @@ TEST(MlirToRuntimeTranslate, basic) { ...@@ -37,14 +37,14 @@ TEST(MlirToRuntimeTranslate, basic) {
auto source = R"ROC( auto source = R"ROC(
func @main() -> () { func @main() -> () {
%v0 = Infrt.constant.f32 1.0 %v0 = infrt.constant.f32 1.0
%v1 = Infrt.constant.f32 2.0 %v1 = infrt.constant.f32 2.0
%v2 = "Infrt.add.f32"(%v0, %v1) : (f32, f32) -> f32 %v2 = "infrt.add.f32"(%v0, %v1) : (f32, f32) -> f32
%v3 = "Infrt.mul.f32"(%v2, %v1) : (f32, f32) -> f32 %v3 = "infrt.mul.f32"(%v2, %v1) : (f32, f32) -> f32
"Infrt.print.f32"(%v1) : (f32) -> () "infrt.print.f32"(%v1) : (f32) -> ()
Infrt.return infrt.return
} }
)ROC"; )ROC";
...@@ -63,14 +63,14 @@ TEST(TestMlir, basic) { ...@@ -63,14 +63,14 @@ TEST(TestMlir, basic) {
auto source = R"ROC( auto source = R"ROC(
func @main() -> () { func @main() -> () {
%v0 = Infrt.constant.f32 1.0 %v0 = infrt.constant.f32 1.0
%v1 = Infrt.constant.f32 2.0 %v1 = infrt.constant.f32 2.0
%v2 = "Infrt.add.f32"(%v0, %v1) : (f32, f32) -> f32 %v2 = "infrt.add.f32"(%v0, %v1) : (f32, f32) -> f32
%v3 = "Infrt.mul.f32"(%v2, %v1) : (f32, f32) -> f32 %v3 = "infrt.mul.f32"(%v2, %v1) : (f32, f32) -> f32
"Infrt.print.f32"(%v1) : (f32) -> () "infrt.print.f32"(%v1) : (f32) -> ()
Infrt.return infrt.return
} }
)ROC"; )ROC";
...@@ -101,7 +101,7 @@ func @predict(%a: !infrt.dense_tensor<CPU, FP32, NCHW>, %b: !infrt.dense_tensor< ...@@ -101,7 +101,7 @@ func @predict(%a: !infrt.dense_tensor<CPU, FP32, NCHW>, %b: !infrt.dense_tensor<
"!infrt.dense_tensor<CPU, FP32, NCHW>"; "!infrt.dense_tensor<CPU, FP32, NCHW>";
auto end = R"ROC( auto end = R"ROC(
Infrt.return %a0, %b0: !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW> infrt.return %a0, %b0: !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>
} }
)ROC"; )ROC";
......
...@@ -19,7 +19,6 @@ MLIRModelGenImpl::MLIRModelGenImpl() ...@@ -19,7 +19,6 @@ MLIRModelGenImpl::MLIRModelGenImpl()
: context_(infrt::Global::getMLIRContext()), builder_(context_) { : context_(infrt::Global::getMLIRContext()), builder_(context_) {
context_->allowUnregisteredDialects(); context_->allowUnregisteredDialects();
context_->getOrLoadDialect<mlir::StandardOpsDialect>(); context_->getOrLoadDialect<mlir::StandardOpsDialect>();
context_->getOrLoadDialect<infrt::dialect::INFRTDialect>();
context_->getOrLoadDialect<infrt::ts::TensorShapeDialect>(); context_->getOrLoadDialect<infrt::ts::TensorShapeDialect>();
context_->getOrLoadDialect<infrt::dt::DTDialect>(); context_->getOrLoadDialect<infrt::dt::DTDialect>();
context_->getOrLoadDialect<mlir::pd::PaddleDialect>(); context_->getOrLoadDialect<mlir::pd::PaddleDialect>();
......
...@@ -25,10 +25,10 @@ ...@@ -25,10 +25,10 @@
#include "mlir/IR/MLIRContext.h" #include "mlir/IR/MLIRContext.h"
#include "paddle/infrt/common/global.h" #include "paddle/infrt/common/global.h"
#include "paddle/infrt/common/string.h" #include "paddle/infrt/common/string.h"
#include "paddle/infrt/dialect/basic_kernels.h"
#include "paddle/infrt/dialect/dense_tensor.h" #include "paddle/infrt/dialect/dense_tensor.h"
#include "paddle/infrt/dialect/infrt_base.h" #include "paddle/infrt/dialect/infrt/ir/basic_kernels.h"
#include "paddle/infrt/dialect/init_infrt_dialects.h"
#include "paddle/infrt/dialect/init_dialects.h"
#include "paddle/infrt/dialect/pd_ops.h" #include "paddle/infrt/dialect/pd_ops.h"
#include "paddle/infrt/dialect/tensor_shape.h" #include "paddle/infrt/dialect/tensor_shape.h"
#include "paddle/infrt/paddle/model_parser.h" #include "paddle/infrt/paddle/model_parser.h"
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
#include "paddle/infrt/common/object.h" #include "paddle/infrt/common/object.h"
#include "paddle/infrt/common/shared.h" #include "paddle/infrt/common/shared.h"
#include "paddle/infrt/dialect/infrt/common_type.h" #include "paddle/infrt/dialect/infrt/common/types.h"
#include "paddle/infrt/host_context/function.h" #include "paddle/infrt/host_context/function.h"
#include "paddle/infrt/support/variant.h" #include "paddle/infrt/support/variant.h"
#include "paddle/infrt/tensor/dense_host_tensor.h" #include "paddle/infrt/tensor/dense_host_tensor.h"
......
...@@ -63,24 +63,24 @@ static void PrintString(const std::string &str) { ...@@ -63,24 +63,24 @@ static void PrintString(const std::string &str) {
void RegisterBasicKernels(host_context::KernelRegistry *registry) { void RegisterBasicKernels(host_context::KernelRegistry *registry) {
RegisterIntBasicKernels(registry); RegisterIntBasicKernels(registry);
RegisterFloatBasicKernels(registry); RegisterFloatBasicKernels(registry);
registry->AddKernel("Infrt.get_string", INFRT_KERNEL(GetString)); registry->AddKernel("infrt.get_string", INFRT_KERNEL(GetString));
registry->AddKernel("Infrt.print_string", INFRT_KERNEL(PrintString)); registry->AddKernel("infrt.print_string", INFRT_KERNEL(PrintString));
} }
void RegisterIntBasicKernels(host_context::KernelRegistry *registry) { void RegisterIntBasicKernels(host_context::KernelRegistry *registry) {
registry->AddKernel("Infrt.add.i32", INFRT_KERNEL(add<int32_t>)); registry->AddKernel("infrt.add.i32", INFRT_KERNEL(add<int32_t>));
registry->AddKernel("Infrt.sub.i32", INFRT_KERNEL(sub<int32_t>)); registry->AddKernel("infrt.sub.i32", INFRT_KERNEL(sub<int32_t>));
registry->AddKernel("Infrt.mul.i32", INFRT_KERNEL(mul<int32_t>)); registry->AddKernel("infrt.mul.i32", INFRT_KERNEL(mul<int32_t>));
registry->AddKernel("Infrt.div.i32", INFRT_KERNEL(div<int32_t>)); registry->AddKernel("infrt.div.i32", INFRT_KERNEL(div<int32_t>));
registry->AddKernel("Infrt.print.i32", INFRT_KERNEL(print<int32_t>)); registry->AddKernel("infrt.print.i32", INFRT_KERNEL(print<int32_t>));
} }
void RegisterFloatBasicKernels(host_context::KernelRegistry *registry) { void RegisterFloatBasicKernels(host_context::KernelRegistry *registry) {
registry->AddKernel("Infrt.add.f32", INFRT_KERNEL(add<float>)); registry->AddKernel("infrt.add.f32", INFRT_KERNEL(add<float>));
registry->AddKernel("Infrt.sub.f32", INFRT_KERNEL(sub<float>)); registry->AddKernel("infrt.sub.f32", INFRT_KERNEL(sub<float>));
registry->AddKernel("Infrt.mul.f32", INFRT_KERNEL(mul<float>)); registry->AddKernel("infrt.mul.f32", INFRT_KERNEL(mul<float>));
registry->AddKernel("Infrt.div.f32", INFRT_KERNEL(div<float>)); registry->AddKernel("infrt.div.f32", INFRT_KERNEL(div<float>));
registry->AddKernel("Infrt.print.f32", INFRT_KERNEL(print<float>)); registry->AddKernel("infrt.print.f32", INFRT_KERNEL(print<float>));
} }
} // namespace kernel } // namespace kernel
......
...@@ -37,7 +37,7 @@ static void INFRTCall( ...@@ -37,7 +37,7 @@ static void INFRTCall(
} }
void RegisterControlFlowKernels(host_context::KernelRegistry* registry) { void RegisterControlFlowKernels(host_context::KernelRegistry* registry) {
registry->AddKernel("Infrt.call", INFRT_KERNEL(INFRTCall)); registry->AddKernel("infrt.call", INFRT_KERNEL(INFRTCall));
} }
} // namespace kernel } // namespace kernel
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
#pragma once #pragma once
#include "paddle/infrt/backends/host/phi_allocator.h" #include "paddle/infrt/backends/host/phi_allocator.h"
#include "paddle/infrt/dialect/infrt/common_type.h" #include "paddle/infrt/dialect/infrt/common/types.h"
#include "paddle/infrt/host_context/kernel_utils.h" #include "paddle/infrt/host_context/kernel_utils.h"
#include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/dense_tensor.h"
......
...@@ -193,7 +193,7 @@ tensor::DenseHostTensor ShadowCopyTensor(tensor::DenseHostTensor src) { ...@@ -193,7 +193,7 @@ tensor::DenseHostTensor ShadowCopyTensor(tensor::DenseHostTensor src) {
} }
void RegisterTestKernels(host_context::KernelRegistry *registry) { void RegisterTestKernels(host_context::KernelRegistry *registry) {
registry->AddKernel("Infrt.benchmark", INFRT_KERNEL(benchmark)); registry->AddKernel("infrt.benchmark", INFRT_KERNEL(benchmark));
registry->AddKernel("Infrt.test.shadow_copy_tensor", registry->AddKernel("Infrt.test.shadow_copy_tensor",
INFRT_KERNEL(ShadowCopyTensor)); INFRT_KERNEL(ShadowCopyTensor));
} }
......
// RUN: infrtexec -i %s | FileCheck %s // RUN: infrtexec -i %s | FileCheck %s
// CHECK-LABEL: @basic_f32 // CHECK-LABEL: @basic_f32
func @basic_f32() -> f32 { func @basic_f32() -> f32 {
%v0 = Infrt.constant.f32 1.0 %v0 = infrt.constant.f32 1.0
%v1 = Infrt.constant.f32 2.0 %v1 = infrt.constant.f32 2.0
%value = "Infrt.add.f32"(%v0, %v1) : (f32, f32) -> f32 %value = "infrt.add.f32"(%v0, %v1) : (f32, f32) -> f32
// CHECK-NEXT: 3 // CHECK-NEXT: 3
"Infrt.print.f32"(%value) : (f32) -> () "infrt.print.f32"(%value) : (f32) -> ()
Infrt.return %value : f32 infrt.return %value : f32
} }
/// ================================================================ /// ================================================================
/// @caller call the other function @callee /// @caller call the other function @callee
func @callee.add.f32(%x : f32, %y : f32, %y1 : f32) -> f32 { func @callee.add.f32(%x : f32, %y : f32, %y1 : f32) -> f32 {
%z = "Infrt.add.f32"(%x, %y) : (f32, f32) -> f32 %z = "infrt.add.f32"(%x, %y) : (f32, f32) -> f32
%z1 = "Infrt.add.f32"(%z, %y1) : (f32, f32) -> f32 %z1 = "infrt.add.f32"(%z, %y1) : (f32, f32) -> f32
Infrt.return %z1 : f32 infrt.return %z1 : f32
} }
// CHECK-LABEL: @caller.add.f32 // CHECK-LABEL: @caller.add.f32
func @caller.add.f32() -> f32 { func @caller.add.f32() -> f32 {
%x = Infrt.constant.f32 1.0 %x = infrt.constant.f32 1.0
%y = Infrt.constant.f32 2.0 %y = infrt.constant.f32 2.0
%y1 = Infrt.constant.f32 3.0 %y1 = infrt.constant.f32 3.0
%z = Infrt.call @callee.add.f32(%x, %y, %y1) : (f32, f32, f32) -> f32 %z = infrt.call @callee.add.f32(%x, %y, %y1) : (f32, f32, f32) -> f32
// CHECK-NEXT: 6 // CHECK-NEXT: 6
"Infrt.print.f32"(%z) : (f32) -> () "infrt.print.f32"(%z) : (f32) -> ()
Infrt.return %z : f32 infrt.return %z : f32
} }
/// <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< /// <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
...@@ -12,13 +12,13 @@ func @benchmark() { ...@@ -12,13 +12,13 @@ func @benchmark() {
// CHECK-LABEL: BM:add.f32:CPU 95%(ns) // CHECK-LABEL: BM:add.f32:CPU 95%(ns)
// CHECK-LABEL: BM:add.f32:CPU 99%(ns) // CHECK-LABEL: BM:add.f32:CPU 99%(ns)
// CHECK-LABEL: BM:add.f32:CPU utilization(percent) // CHECK-LABEL: BM:add.f32:CPU utilization(percent)
Infrt.benchmark "add.f32"() duration_secs = 1, max_count = 3, num_warmup_runs = 3 infrt.benchmark "add.f32"() duration_secs = 1, max_count = 3, num_warmup_runs = 3
{ {
%0 = Infrt.constant.f32 1.0 %0 = infrt.constant.f32 1.0
%1 = Infrt.constant.f32 2.0 %1 = infrt.constant.f32 2.0
%res = "Infrt.add.f32"(%0, %1) : (f32, f32) -> f32 %res = "infrt.add.f32"(%0, %1) : (f32, f32) -> f32
"Infrt.print.f32"(%res) : (f32) -> () "infrt.print.f32"(%res) : (f32) -> ()
Infrt.return %res : f32 infrt.return %res : f32
} }
Infrt.return infrt.return
} }
...@@ -4,14 +4,14 @@ func @dense_shape0() { ...@@ -4,14 +4,14 @@ func @dense_shape0() {
%shape = ts.build_shape [1:i64, 57:i64] %shape = ts.build_shape [1:i64, 57:i64]
%a = dt.create_uninit_tensor.f32 [12:i64, 23:i64] -> !infrt.dense_tensor<CPU, FP32, NCHW> %a = dt.create_uninit_tensor.f32 [12:i64, 23:i64] -> !infrt.dense_tensor<CPU, FP32, NCHW>
Infrt.return infrt.return
} }
func @predict(%a: !infrt.dense_tensor<CPU, FP32, NCHW>, %b: !infrt.dense_tensor<CPU, FP32, NCHW>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) { func @predict(%a: !infrt.dense_tensor<CPU, FP32, NCHW>, %b: !infrt.dense_tensor<CPU, FP32, NCHW>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) {
%a0 = dt.shallow_copy_tensor %a : !infrt.dense_tensor<CPU, FP32, NCHW> -> !infrt.dense_tensor<CPU, FP32, NCHW> %a0 = dt.shallow_copy_tensor %a : !infrt.dense_tensor<CPU, FP32, NCHW> -> !infrt.dense_tensor<CPU, FP32, NCHW>
%b0 = dt.shallow_copy_tensor %b : !infrt.dense_tensor<CPU, FP32, NCHW> -> !infrt.dense_tensor<CPU, FP32, NCHW> %b0 = dt.shallow_copy_tensor %b : !infrt.dense_tensor<CPU, FP32, NCHW> -> !infrt.dense_tensor<CPU, FP32, NCHW>
Infrt.return %a0, %b0: !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW> infrt.return %a0, %b0: !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>
} }
...@@ -19,6 +19,6 @@ func @main() { ...@@ -19,6 +19,6 @@ func @main() {
%shape = ts.build_shape [1:i64, 57:i64] %shape = ts.build_shape [1:i64, 57:i64]
%a = dt.create_uninit_tensor.f32 [12:i64, 23:i64] -> !infrt.dense_tensor<CPU, FP32, NCHW> %a = dt.create_uninit_tensor.f32 [12:i64, 23:i64] -> !infrt.dense_tensor<CPU, FP32, NCHW>
%b, %c = Infrt.call @predict(%a, %a) : (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) %b, %c = infrt.call @predict(%a, %a) : (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>)
Infrt.return infrt.return
} }
// CHECK-LABEL: @predict // CHECK-LABEL: @predict
func @predict(%input:!Infrt.tensor<X86, NCHW, F32>, %map: !Infrt.tensor_map) -> (!Infrt.tensor<X86, NCHW, F32>) { func @predict(%input:!infrt.dense_tensor<CPU, FP32, NCHW>, %map: !infrt.dense_tensor_map) -> (!infrt.dense_tensor<CPU, FP32, NCHW>) {
%w = dt.get_param(%map, "create_parameter_0.w_0") -> !Infrt.tensor<X86, NCHW, F32> %w = dt.get_param(%map, "create_parameter_0.w_0") -> !infrt.dense_tensor<CPU, FP32, NCHW>
%bias = dt.get_param(%map, "create_parameter_1.w_0") -> !Infrt.tensor<X86, NCHW, F32> %bias = dt.get_param(%map, "create_parameter_1.w_0") -> !infrt.dense_tensor<CPU, FP32, NCHW>
%out = dt.create_uninit_tensor.f32 [3, 3] -> !Infrt.tensor<X86, NCHW, F32> %out = dt.create_uninit_tensor.f32 [3, 3] -> !infrt.dense_tensor<CPU, FP32, NCHW>
// fc // fc
"external.matmul"(%input, %w, %out) {}: (!Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>) -> () "external.matmul"(%input, %w, %out) {}: (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) -> ()
"external.elementwise_add"(%out, %bias, %out) {axis = -1}: (!Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>) -> () "external.elementwise_add"(%out, %bias, %out) {axis = -1}: (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) -> ()
"external.sigmoid"(%out, %out) {}: (!Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor<X86, NCHW, F32>) -> () "external.sigmoid"(%out, %out) {}: (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) -> ()
//dt.print_tensor (%out : !Infrt.tensor<X86, NCHW, F32>) //dt.print_tensor (%out : !infrt.dense_tensor<CPU, FP32, NCHW>)
Infrt.return %out : !Infrt.tensor<X86, NCHW, F32> infrt.return %out : !infrt.dense_tensor<CPU, FP32, NCHW>
} }
// CHECK-LABEL: @main // CHECK-LABEL: @main
func @main() { func @main() {
%input = dt.create_uninit_tensor.f32 [3, 3] -> !Infrt.tensor<X86, NCHW, F32> %input = dt.create_uninit_tensor.f32 [3, 3] -> !infrt.dense_tensor<CPU, FP32, NCHW>
dt.fill_tensor_with_constant.f32 (%input : !Infrt.tensor<X86, NCHW, F32>) {value=1.0:f32} dt.fill_tensor_with_constant.f32 (%input : !infrt.dense_tensor<CPU, FP32, NCHW>) {value=1.0:f32}
// CHECK-LABEL: loading params // CHECK-LABEL: loading params
%map = dt.load_params() {path="/Infrt/build/paddle/paddle_1.8_fc_model"} %map = dt.load_params() {path="/Infrt/build/paddle/paddle_1.8_fc_model"}
%out = Infrt.call @predict(%input, %map): (!Infrt.tensor<X86, NCHW, F32>, !Infrt.tensor_map) -> (!Infrt.tensor<X86, NCHW, F32>) %out = infrt.call @predict(%input, %map): (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor_map) -> (!infrt.dense_tensor<CPU, FP32, NCHW>)
dt.print_tensor (%out : !Infrt.tensor<X86, NCHW, F32>) dt.print_tensor (%out : !infrt.dense_tensor<CPU, FP32, NCHW>)
Infrt.return infrt.return
} }
...@@ -5,5 +5,5 @@ func @ops() { ...@@ -5,5 +5,5 @@ func @ops() {
%b = pd.feed() {name="input1"}: tensor<?xf32> %b = pd.feed() {name="input1"}: tensor<?xf32>
%d = pd.feed() {name="input3"}: !infrt.lod_tensor<3x4x9xf32, 0> %d = pd.feed() {name="input3"}: !infrt.lod_tensor<3x4x9xf32, 0>
%c = "pd.matmul"(%a, %b) {transpose_x=true, transpose_y=false} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32> %c = "pd.matmul"(%a, %b) {transpose_x=true, transpose_y=false} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
Infrt.return infrt.return
} }
...@@ -11,6 +11,6 @@ func @sign_any_float32_execute() { ...@@ -11,6 +11,6 @@ func @sign_any_float32_execute() {
// CHECK: dense_tensor: shape=shape[1], values=[1] // CHECK: dense_tensor: shape=shape[1], values=[1]
"phi_dt.print_tensor" (%e) : (!infrt.dense_tensor<CPU, FP32, NCHW>) -> () "phi_dt.print_tensor" (%e) : (!infrt.dense_tensor<CPU, FP32, NCHW>) -> ()
Infrt.return infrt.return
} }
...@@ -2,14 +2,14 @@ ...@@ -2,14 +2,14 @@
module { module {
func @predict(%arg0: !infrt.dense_tensor<CPU, FP32, NCHW>) -> !infrt.dense_tensor<CPU, FP32, NCHW> { func @predict(%arg0: !infrt.dense_tensor<CPU, FP32, NCHW>) -> !infrt.dense_tensor<CPU, FP32, NCHW> {
%2 = "pd.abs"(%arg0) : (!infrt.dense_tensor<CPU, FP32, NCHW>) -> !infrt.dense_tensor<CPU, FP32, NCHW> %2 = "pd.abs"(%arg0) : (!infrt.dense_tensor<CPU, FP32, NCHW>) -> !infrt.dense_tensor<CPU, FP32, NCHW>
Infrt.return %2 : !infrt.dense_tensor<CPU, FP32, NCHW> infrt.return %2 : !infrt.dense_tensor<CPU, FP32, NCHW>
} }
func @main() { func @main() {
%ctx = "phi_dt.create_context.cpu" (): () -> !phi.context<CPU> %ctx = "phi_dt.create_context.cpu" (): () -> !phi.context<CPU>
%t = "phi_dt.create_dense_tensor" (%ctx) {precision=#infrt.precision<FP32>, layout=#infrt.layout<NCHW>, lod=[1:i64], dims=[1:i64]}: (!phi.context<CPU>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>) %t = "phi_dt.create_dense_tensor" (%ctx) {precision=#infrt.precision<FP32>, layout=#infrt.layout<NCHW>, lod=[1:i64], dims=[1:i64]}: (!phi.context<CPU>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>)
"phi_dt.fill_dense_tensor.f32"(%t) {value=[3.8:f32]} : (!infrt.dense_tensor<CPU, FP32, NCHW>) -> () "phi_dt.fill_dense_tensor.f32"(%t) {value=[3.8:f32]} : (!infrt.dense_tensor<CPU, FP32, NCHW>) -> ()
%2 = Infrt.call@predict(%t) : (!infrt.dense_tensor<CPU, FP32, NCHW>) -> !infrt.dense_tensor<CPU, FP32, NCHW> %2 = infrt.call@predict(%t) : (!infrt.dense_tensor<CPU, FP32, NCHW>) -> !infrt.dense_tensor<CPU, FP32, NCHW>
phi_dt.print_tensor(%2 : !infrt.dense_tensor<CPU, FP32, NCHW>) phi_dt.print_tensor(%2 : !infrt.dense_tensor<CPU, FP32, NCHW>)
Infrt.return infrt.return
} }
} }
...@@ -3,14 +3,14 @@ ...@@ -3,14 +3,14 @@
func @dense_shape0() { func @dense_shape0() {
%a = dt.create_uninit_tensor.f32 [12:i64, 23:i64] -> !infrt.dense_tensor<CPU, FP32, NCHW> %a = dt.create_uninit_tensor.f32 [12:i64, 23:i64] -> !infrt.dense_tensor<CPU, FP32, NCHW>
Infrt.return infrt.return
} }
func @predict(%a: !infrt.dense_tensor<CPU, FP32, NCHW>, %b: !infrt.dense_tensor<CPU, FP32, NCHW>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) { func @predict(%a: !infrt.dense_tensor<CPU, FP32, NCHW>, %b: !infrt.dense_tensor<CPU, FP32, NCHW>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) {
%a0 = dt.shallow_copy_tensor %a : !infrt.dense_tensor<CPU, FP32, NCHW> -> !infrt.dense_tensor<CPU, FP32, NCHW> %a0 = dt.shallow_copy_tensor %a : !infrt.dense_tensor<CPU, FP32, NCHW> -> !infrt.dense_tensor<CPU, FP32, NCHW>
%b0 = dt.shallow_copy_tensor %b : !infrt.dense_tensor<CPU, FP32, NCHW> -> !infrt.dense_tensor<CPU, FP32, NCHW> %b0 = dt.shallow_copy_tensor %b : !infrt.dense_tensor<CPU, FP32, NCHW> -> !infrt.dense_tensor<CPU, FP32, NCHW>
Infrt.return %a0, %b0: !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW> infrt.return %a0, %b0: !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>
} }
...@@ -18,6 +18,6 @@ func @main() { ...@@ -18,6 +18,6 @@ func @main() {
%shape = ts.build_shape [1:i64, 57:i64] %shape = ts.build_shape [1:i64, 57:i64]
%a = dt.create_uninit_tensor.f32 [12:i64, 23:i64] -> !infrt.dense_tensor<CPU, FP32, NCHW> %a = dt.create_uninit_tensor.f32 [12:i64, 23:i64] -> !infrt.dense_tensor<CPU, FP32, NCHW>
%b, %c = Infrt.call @predict(%a, %a) : (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) %b, %c = infrt.call @predict(%a, %a) : (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>)
Infrt.return infrt.return
} }
...@@ -13,7 +13,7 @@ func @naive_elementwise_add() { ...@@ -13,7 +13,7 @@ func @naive_elementwise_add() {
// CHECK: tensor: shape=shape[2,8], values=[3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3] // CHECK: tensor: shape=shape[2,8], values=[3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]
dt.print_tensor (%c : !infrt.dense_tensor<CPU, FP32, NCHW>) dt.print_tensor (%c : !infrt.dense_tensor<CPU, FP32, NCHW>)
Infrt.return infrt.return
} }
// RUN: infrtexec -i %s | FileCheck %s // RUN: infrtexec -i %s | FileCheck %s
...@@ -31,5 +31,5 @@ func @naive_matmul() { ...@@ -31,5 +31,5 @@ func @naive_matmul() {
// CHECK: tensor: shape=shape[2,4], values=[16, 16, 16, 16, 16, 16, 16, 16] // CHECK: tensor: shape=shape[2,4], values=[16, 16, 16, 16, 16, 16, 16, 16]
dt.print_tensor (%c : !infrt.dense_tensor<CPU, FP32, NCHW>) dt.print_tensor (%c : !infrt.dense_tensor<CPU, FP32, NCHW>)
Infrt.return infrt.return
} }
...@@ -3,12 +3,12 @@ ...@@ -3,12 +3,12 @@
func @load_tensor_map() { func @load_tensor_map() {
%map = dt.load_params(){path="@CMAKE_BINARY_DIR@/multi_fc_model"} %map = dt.load_params(){path="@CMAKE_BINARY_DIR@/multi_fc_model"}
%size = dt.tensor_map_get_size(%map) -> i32 %size = dt.tensor_map_get_size(%map) -> i32
Infrt.print.i32 %size infrt.print.i32 %size
%a = dt.tensor_map_get_tensor(%map) {name="fc_bias"} -> !infrt.dense_tensor<CPU, FP32, NCHW> %a = dt.tensor_map_get_tensor(%map) {name="fc_bias"} -> !infrt.dense_tensor<CPU, FP32, NCHW>
// CHECK: tensor: shape=shape[2], values=[0, 0] // CHECK: tensor: shape=shape[2], values=[0, 0]
dt.print_tensor (%a : !infrt.dense_tensor<CPU, FP32, NCHW>) dt.print_tensor (%a : !infrt.dense_tensor<CPU, FP32, NCHW>)
Infrt.return infrt.return
} }
...@@ -4,5 +4,5 @@ func @build_tensor1() { ...@@ -4,5 +4,5 @@ func @build_tensor1() {
%a = ts.build_shape [1:i64, 57:i64, 92:i64] %a = ts.build_shape [1:i64, 57:i64, 92:i64]
// CHECK: shape[1,57,92] // CHECK: shape[1,57,92]
ts.print_shape %a ts.print_shape %a
Infrt.return infrt.return
} }
...@@ -6,5 +6,5 @@ func @test_tensor_type() { ...@@ -6,5 +6,5 @@ func @test_tensor_type() {
// CHECK: tensor: shape=shape[3,4], values=[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] // CHECK: tensor: shape=shape[3,4], values=[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
dt.print_tensor (%a : !infrt.dense_tensor<CPU, FP32, NCHW>) dt.print_tensor (%a : !infrt.dense_tensor<CPU, FP32, NCHW>)
Infrt.return infrt.return
} }
...@@ -4,5 +4,5 @@ func @build_tensor1() { ...@@ -4,5 +4,5 @@ func @build_tensor1() {
%a = ts.build_shape [1:i64, 57:i64, 92:i64] %a = ts.build_shape [1:i64, 57:i64, 92:i64]
// CHECK: shape[1,57,92] // CHECK: shape[1,57,92]
ts.print_shape %a ts.print_shape %a
Infrt.return infrt.return
} }
...@@ -6,5 +6,5 @@ func @test_tensor_type() { ...@@ -6,5 +6,5 @@ func @test_tensor_type() {
// CHECK: tensor: shape=shape[3,4], values=[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] // CHECK: tensor: shape=shape[3,4], values=[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
dt.print_tensor (%a : !infrt.dense_tensor<CPU, FP32, NCHW>) dt.print_tensor (%a : !infrt.dense_tensor<CPU, FP32, NCHW>)
Infrt.return infrt.return
} }
...@@ -12,5 +12,5 @@ func @main(%bias:tensor<?xf32>, %c:tensor<?xf32>, %b1:tensor<?xf32>, %b2:tensor< ...@@ -12,5 +12,5 @@ func @main(%bias:tensor<?xf32>, %c:tensor<?xf32>, %b1:tensor<?xf32>, %b2:tensor<
%d2 = "pd.elementwise_add"(%c2, %bias2) {axis=-1:si32} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32> %d2 = "pd.elementwise_add"(%c2, %bias2) {axis=-1:si32} : (tensor<?xf32>, tensor<?xf32>) -> tensor<?xf32>
%e2 = "pd.relu"(%d2) {} : (tensor<?xf32>) -> tensor<?xf32> %e2 = "pd.relu"(%d2) {} : (tensor<?xf32>) -> tensor<?xf32>
"infrt.return"(%e2) : (tensor<?xf32>)->() infrt.return %e2 : tensor<?xf32>
} }
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册