未验证 提交 3990e0bb 编写于 作者: Y Yan Chunwei 提交者: GitHub

INFRT/Add pten dialect (4th PR) (#39374)

上级 60f1461a
#TO DO:remove fluid
include_directories(${PADDLE_SOURCE_DIR}/paddle/fluid/platform)
if (NOT WITH_INFRT)
return()
endif()
option(INFRT_WITH_PTEN "Compile INFRT with PTEN" ON)
#TODO(xiaowei) remove fluid
include_directories(${PADDLE_SOURCE_DIR}/paddle/fluid/platform)
if (INFRT_WITH_PTEN)
add_definitions("-DINFRT_WITH_PTEN")
endif()
# compile flags
set(INFRT_FLAGS -Wno-comment)
foreach(flag ${INFRT_FLAGS})
......@@ -92,7 +98,12 @@ set(infrt_mlir_incs
rewrite_inc
trt_ops_inc
)
message(STATUS "infrt srcs:\n${infrt_src}")
if (INFRT_WITH_PTEN)
set(infrt_mlir_incs ${infrt_mlir_incs}
MLIRinfrt_pten_tensorIncGen
MLIRinfrt_pten_baseIncGen
)
endif()
cc_library(infrt SHARED SRCS ${infrt_src} DEPS glog boost ${mlir_libs} paddle_framework_proto infrt_naive)
cc_library(infrt_static SRCS ${infrt_src} DEPS glog boost ${mlir_libs} paddle_framework_proto)
......
......@@ -13,6 +13,7 @@ gather_srcs(infrt_src SRCS
pd_types.cc
pd_ops.cc
)
mlir_tablegen_on(basic_kernels)
mlir_tablegen_on(test_kernels)
mlir_tablegen_on(infrt_base DIALECT infrt)
......@@ -34,3 +35,7 @@ add_dependencies(print-ir pd_ops_inc)
cc_test_tiny(test_infrt_mlir_loader SRCS mlir_loader_test.cc DEPS infrt ${MLIR_IR_LIBS})
add_subdirectory(tensorrt)
if (INFRT_WITH_PTEN)
add_subdirectory(pten)
endif()
......@@ -66,11 +66,11 @@ llvm::Optional<PrecisionType> GetPrecisionType(mlir::StringRef key) {
return llvm::None;
}
TensorType TensorType::get(TargetType target,
TensorType TensorType::get(mlir::MLIRContext *ctx,
TargetType target,
LayoutType layout,
PrecisionType precision) {
return Base::get(
::infrt::Global::getMLIRContext(), target, layout, precision);
return Base::get(ctx, target, layout, precision);
}
TargetType TensorType::target() { return getImpl()->target_; }
......@@ -207,5 +207,4 @@ static void printSetTensorOp(mlir::OpAsmPrinter &p, SetTensorOp op) { // NOLINT
#define GET_OP_CLASSES
#include "paddle/infrt/dialect/dense_tensor.cpp.inc" // NOLINT
#include "paddle/infrt/dialect/dense_tensor_dialect.cpp.inc"
......@@ -68,7 +68,9 @@ class TensorType : public mlir::Type::TypeBase<TensorType,
detail::TensorTypeStorage> {
public:
using Base::Base;
static TensorType get(TargetType target,
static TensorType get(mlir::MLIRContext *ctx,
TargetType target,
LayoutType layout,
PrecisionType precision);
......
......@@ -85,7 +85,8 @@ mlir::Type INFRTDialect::parseType(mlir::DialectAsmParser &parser) const {
// parse ">"
if (parser.parseGreater()) return mlir::Type();
return infrt::dt::TensorType::get(*targetType, *layoutType, *precisionType);
return infrt::dt::TensorType::get(
parser.getContext(), *targetType, *layoutType, *precisionType);
}
// parse TensorMapType, for example: !infrt.tensor_map
if (keyword == "tensor_map") {
......
......@@ -25,6 +25,7 @@
namespace infrt {
namespace dialect {
class INFRTDialect : public mlir::Dialect {
explicit INFRTDialect(mlir::MLIRContext *context)
: mlir::Dialect(
......
......@@ -20,6 +20,8 @@
#include "paddle/infrt/dialect/dense_tensor.h"
#include "paddle/infrt/dialect/infrt_base.h"
#include "paddle/infrt/dialect/pd_ops.h"
#include "paddle/infrt/dialect/pten/infrt_pten_tensor.h"
#include "paddle/infrt/dialect/pten/pten_base.h"
#include "paddle/infrt/dialect/tensor_shape.h"
namespace infrt {
......@@ -27,6 +29,11 @@ void registerCinnDialects(mlir::DialectRegistry &registry) { // NOLINT
registry.insert<ts::TensorShapeDialect,
dialect::INFRTDialect,
dt::DTDialect,
mlir::pd::PaddleDialect>();
mlir::pd::PaddleDialect,
#ifdef INFRT_WITH_PTEN
pten::PTENDenseTensorDialect,
pten::PTENDialect
#endif
>();
}
} // namespace infrt
if (NOT INFRT_WITH_PTEN)
return()
endif()
#mlir_tablegen_on(infrt_pten_base DIALECT pten)
add_mlir_dialect(infrt_pten_base pten)
add_mlir_dialect(infrt_pten_tensor pten_dt)
#mlir_tablegen_on(infrt_pten_tensor)
gather_srcs(infrt_src SRCS
pten_base.cc infrt_pten_tensor.cc
infrt_pten_tensor.cc)
#ifndef PTEN_BASE
#define PTEN_BASE
include "mlir/IR/OpBase.td"
def PTEN_Dialect : Dialect {
let name = "pten";
let description = [{
The PTEN host dialect.
}];
let cppNamespace = "::infrt::pten";
}
class AllocatorTypeOf<string place, list<Trait> traits=[]>:
TypeDef<PTEN_Dialect, place # "Allocator", traits> {
let summary = !strconcat("!pten.allocator_", place, " type");
}
class ContextTypeOf<string place, list<Trait> traits=[]>:
TypeDef<PTEN_Dialect, place # "Context", traits> {
let summary = !strconcat("!pten.context_", place, " type");
}
def CPU_Allocator : AllocatorTypeOf<"CPU">;
def GPU_Allocator : AllocatorTypeOf<"GPU">;
def CPU_Context : ContextTypeOf<"CPU">;
def GPU_Context : ContextTypeOf<"GPU">;
def Allocator : AnyTypeOf<[CPU_Allocator, GPU_Allocator], "Allocator type">;
def Context : AnyTypeOf<[CPU_Context, GPU_Context], "Context type">;
#endif
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/infrt/dialect/pten/infrt_pten_tensor.h"
#include <mlir/IR/BuiltinTypes.h>
#include "paddle/infrt/dialect/pten/infrt_pten_tensorDialect.cpp.inc"
#include "paddle/infrt/dialect/pten/infrt_pten_tensorTypes.cpp.inc"
namespace infrt {
namespace pten {
void PTENDenseTensorDialect::initialize() {
#define GET_OP_LIST
addOperations<
#include "paddle/infrt/dialect/pten/infrt_pten_tensor.cpp.inc"
>();
}
} // namespace pten
} // namespace infrt
#define GET_OP_CLASSES
#include "paddle/infrt/dialect/pten/infrt_pten_tensor.cpp.inc" // NOLINT
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <mlir/Dialect/Traits.h>
#include <mlir/IR/Attributes.h>
#include <mlir/IR/Builders.h>
#include <mlir/IR/BuiltinOps.h>
#include <mlir/IR/BuiltinTypes.h>
#include <mlir/IR/Dialect.h>
#include <mlir/IR/Matchers.h>
#include <mlir/IR/OpImplementation.h>
#include <mlir/IR/TypeUtilities.h>
#include <mlir/Interfaces/CallInterfaces.h>
#include <mlir/Interfaces/DerivedAttributeOpInterface.h>
#include <mlir/Interfaces/InferTypeOpInterface.h>
#include <mlir/Interfaces/LoopLikeInterface.h>
#include <mlir/Interfaces/SideEffectInterfaces.h>
#include "paddle/infrt/dialect/pten/infrt_pten_tensorDialect.h.inc"
#include "paddle/infrt/dialect/pten/infrt_pten_tensorTypes.h.inc"
#include "paddle/infrt/dialect/dense_tensor.h"
// NOLINT
#define GET_OP_CLASSES
#include "paddle/infrt/dialect/pten/infrt_pten_tensor.h.inc"
#ifdef PTEN_TENSOR
#else
#define PTEN_TENSOR
include "paddle/infrt/dialect/pten/infrt_pten_base.td"
include "mlir/Interfaces/SideEffectInterfaces.td"
include "mlir/IR/OpBase.td"
include "paddle/infrt/dialect/infrt_base.td"
def PTEN_DenseTensorDialect : Dialect {
let name = "pten_dt";
let description = [{
The PTEN DenseTensor dialect.
}];
let cppNamespace = "::infrt::pten";
}
// PTEN DenseTensor related Op.
class PDT_Op<string mnemonic, list<OpTrait> traits = []> : Op<PTEN_DenseTensorDialect, mnemonic, !listconcat(traits, [IsolatedFromAbove])> {
}
class CreateUninitTensorOp<string dtype>
: PDT_Op<"create_uninit_tensor." # dtype, [NoSideEffect]> {
let summary = "pdt.create_uninit_tensor operation";
let description = [{
An operation that creates an uninitialized tensor.
}];
let arguments = (ins I64ArrayAttr:$shape);
let results = (outs TensorType:$output);
}
class CreateInitedTensorOp<string dtype, Attr array_attr>
: PDT_Op<"create_inited_tensor." #dtype, [NoSideEffect]> {
let summary = "pdt.create_inited_tensor operation";
let description = [{
An operation that creates an tensor with shape and values assigned.
}];
let arguments = (ins I64ArrayAttr:$shape, array_attr:$values);
let results = (outs TensorType:$output);
}
def PrintTensorOp : PDT_Op<"print_tensor"> {
let summary = "pdt.print_tensor operation";
let description = [{
An operation that prints a tensor.
}];
let arguments = (ins TensorType:$input);
let results = (outs);
let assemblyFormat = "`(` $input `:` type($input) `)` attr-dict";
}
class FillTensor<string dtype, Attr attr_type> :
PDT_Op<"fill_tensor." # dtype> {
let summary = "dt.fill_tensor operation";
let description = [{
An operation that fills an input tensor with a values.
}];
let arguments = (ins
TensorType:$input,
attr_type:$value
);
let results = (outs);
let assemblyFormat = "`(` $input `:` type($input) `)` attr-dict";
}
class FillTensorWithConstantOp<string dtype> :
PDT_Op<"fill_tensor_with_constant." # dtype> {
let summary = "dt.fill_tensor_with_constant operation";
let description = [{
An operation that fills an input tensor with a single value.
}];
let arguments = (ins
TensorType:$input,
AnyAttr:$value
);
let results = (outs);
let assemblyFormat = "`(` $input `:` type($input) `)` attr-dict";
}
foreach dtype = ["ui8", "ui16", "ui32", "ui64", "i32", "f32", "f64", "i64"] in {
def PDT_CreateUninitTensorOp_#dtype : CreateUninitTensorOp<dtype>;
def PDT_FillTensorWithConstantOp_#dtype : FillTensorWithConstantOp<dtype>;
}
def PDT_FillTensor_f32: FillTensor<"f32", F32ArrayAttr>;
def PDT_FillTensor_i32: FillTensor<"i32", I32ArrayAttr>;
def PDT_CreateInitedTensorOp_f32 : CreateInitedTensorOp<"f32", F32ArrayAttr>;
def PDT_CreateInitedTensorOp_i32 : CreateInitedTensorOp<"i32", I32ArrayAttr>;
#endif
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/infrt/dialect/pten/pten_base.h"
#include <mlir/IR/Builders.h>
#include <mlir/IR/Dialect.h>
#include <mlir/IR/DialectImplementation.h>
#include <mlir/IR/MLIRContext.h>
#include <mlir/IR/TypeUtilities.h>
#include <mlir/IR/Types.h>
#include "paddle/infrt/common/global.h"
#include "paddle/infrt/dialect/pten/infrt_pten_base.cpp.inc"
#include "paddle/infrt/dialect/pten/infrt_pten_baseDialect.cpp.inc"
namespace infrt {
namespace pten {
void PTENDialect::printType(::mlir::Type type,
mlir::DialectAsmPrinter& os) const {
Dialect::printType(type, os);
}
void PTENDialect::initialize() {
addOperations<
#define GET_OP_LIST
#include "paddle/infrt/dialect/pten/infrt_pten_base.cpp.inc" // NOLINT
>();
addTypes<
#define GET_TYPEDEF_LIST
#include "paddle/infrt/dialect/pten/infrt_pten_baseTypes.cpp.inc" // NOLINT
>();
}
mlir::Type PTENDialect::parseType(mlir::DialectAsmParser& parser) const {
llvm::StringRef keyword;
if (parser.parseKeyword(&keyword)) return mlir::Type();
if (keyword == "allocator_CPU") {
return CPUAllocatorType::get(parser.getContext());
} else if (keyword == "allocator_GPU") {
return GPUAllocatorType::get(parser.getContext());
} else if (keyword == "context_CPU") {
return CPUContextType::get(parser.getContext());
} else if (keyword == "context_GPU") {
return GPUContextType::get(parser.getContext());
}
return mlir::Type();
}
} // namespace pten
} // namespace infrt
#define GET_TYPEDEF_CLASSES
#include "paddle/infrt/dialect/pten/infrt_pten_baseTypes.cpp.inc" // NOLINT
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <mlir/IR/Dialect.h>
#include <mlir/IR/OpDefinition.h>
#include <mlir/Interfaces/SideEffectInterfaces.h>
#include <string>
#include "paddle/infrt/dialect/pten/infrt_pten_base.h.inc"
#include "paddle/infrt/dialect/pten/infrt_pten_baseDialect.h.inc"
#define GET_TYPEDEF_CLASSES
#include "paddle/infrt/dialect/pten/infrt_pten_baseTypes.h.inc"
namespace infrt {
namespace pten {} // namespace pten
} // namespace infrt
// RUN: infrtopt %s | FileCheck %s
// CHECK-LABEL: basic_tensor
func @basic_tensor() {
%a = "pten_dt.create_uninit_tensor.f32" () { shape=[12:i64, 23:i64] } : () -> !infrt.tensor<X86, NCHW, F32>
%b = "pten_dt.create_inited_tensor.f32" () { shape=[2:i64, 2:i64], values=[0.1:f32, 0.2:f32, 0.3:f32, 0.4:f32] } : () -> !infrt.tensor<X86, NCHW, F32>
"pten_dt.fill_tensor_with_constant.f32" (%a) { value=0.1:f32 } : (!infrt.tensor<X86, NCHW, F32>) -> ()
infrt.return
}
// RUN: infrtexec -i %s | FileCheck %s
// CHECK-LABEL: dense_shape0
func @dense_shape0() {
%shape = ts.build_shape [1:i64, 57:i64]
%a = dt.create_uninit_tensor.f32 [12:i64, 23:i64] -> !infrt.tensor<X86, NCHW, F32>
infrt.return
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册