From 3990e0bb517ea5e8e7bff21b1923aa7a8eb005c1 Mon Sep 17 00:00:00 2001 From: Yan Chunwei Date: Tue, 8 Feb 2022 22:09:48 +0800 Subject: [PATCH] INFRT/Add pten dialect (4th PR) (#39374) --- paddle/infrt/CMakeLists.txt | 19 +++- paddle/infrt/dialect/CMakeLists.txt | 5 + paddle/infrt/dialect/dense_tensor.cc | 7 +- paddle/infrt/dialect/dense_tensor.h | 4 +- paddle/infrt/dialect/infrt_base.cc | 3 +- paddle/infrt/dialect/infrt_base.h | 1 + paddle/infrt/dialect/init_infrt_dialects.cc | 9 +- paddle/infrt/dialect/pten/CMakeLists.txt | 12 ++ paddle/infrt/dialect/pten/infrt_pten_base.td | 35 ++++++ .../infrt/dialect/pten/infrt_pten_tensor.cc | 36 ++++++ paddle/infrt/dialect/pten/infrt_pten_tensor.h | 38 +++++++ .../infrt/dialect/pten/infrt_pten_tensor.td | 104 ++++++++++++++++++ paddle/infrt/dialect/pten/pten_base.cc | 66 +++++++++++ paddle/infrt/dialect/pten/pten_base.h | 30 +++++ .../tests/dialect/pten/dense_tensor.mlir | 10 ++ .../tests/dialect/tensor/dense_tensor.mlir | 1 - 16 files changed, 368 insertions(+), 12 deletions(-) create mode 100644 paddle/infrt/dialect/pten/CMakeLists.txt create mode 100644 paddle/infrt/dialect/pten/infrt_pten_base.td create mode 100644 paddle/infrt/dialect/pten/infrt_pten_tensor.cc create mode 100644 paddle/infrt/dialect/pten/infrt_pten_tensor.h create mode 100644 paddle/infrt/dialect/pten/infrt_pten_tensor.td create mode 100644 paddle/infrt/dialect/pten/pten_base.cc create mode 100644 paddle/infrt/dialect/pten/pten_base.h create mode 100644 paddle/infrt/tests/dialect/pten/dense_tensor.mlir diff --git a/paddle/infrt/CMakeLists.txt b/paddle/infrt/CMakeLists.txt index 5337c423b1c..62e54e26eda 100644 --- a/paddle/infrt/CMakeLists.txt +++ b/paddle/infrt/CMakeLists.txt @@ -1,10 +1,16 @@ -#TO DO:remove fluid -include_directories(${PADDLE_SOURCE_DIR}/paddle/fluid/platform) - if (NOT WITH_INFRT) return() endif() +option(INFRT_WITH_PTEN "Compile INFRT with PTEN" ON) + +#TODO(xiaowei) remove fluid +include_directories(${PADDLE_SOURCE_DIR}/paddle/fluid/platform) + +if (INFRT_WITH_PTEN) + add_definitions("-DINFRT_WITH_PTEN") +endif() + # compile flags set(INFRT_FLAGS -Wno-comment) foreach(flag ${INFRT_FLAGS}) @@ -92,7 +98,12 @@ set(infrt_mlir_incs rewrite_inc trt_ops_inc ) -message(STATUS "infrt srcs:\n${infrt_src}") +if (INFRT_WITH_PTEN) + set(infrt_mlir_incs ${infrt_mlir_incs} + MLIRinfrt_pten_tensorIncGen + MLIRinfrt_pten_baseIncGen + ) +endif() cc_library(infrt SHARED SRCS ${infrt_src} DEPS glog boost ${mlir_libs} paddle_framework_proto infrt_naive) cc_library(infrt_static SRCS ${infrt_src} DEPS glog boost ${mlir_libs} paddle_framework_proto) diff --git a/paddle/infrt/dialect/CMakeLists.txt b/paddle/infrt/dialect/CMakeLists.txt index a5e049ea154..ce38c53617c 100644 --- a/paddle/infrt/dialect/CMakeLists.txt +++ b/paddle/infrt/dialect/CMakeLists.txt @@ -13,6 +13,7 @@ gather_srcs(infrt_src SRCS pd_types.cc pd_ops.cc ) + mlir_tablegen_on(basic_kernels) mlir_tablegen_on(test_kernels) mlir_tablegen_on(infrt_base DIALECT infrt) @@ -34,3 +35,7 @@ add_dependencies(print-ir pd_ops_inc) cc_test_tiny(test_infrt_mlir_loader SRCS mlir_loader_test.cc DEPS infrt ${MLIR_IR_LIBS}) add_subdirectory(tensorrt) + +if (INFRT_WITH_PTEN) + add_subdirectory(pten) +endif() diff --git a/paddle/infrt/dialect/dense_tensor.cc b/paddle/infrt/dialect/dense_tensor.cc index 7685cdc65b9..fde265765c6 100644 --- a/paddle/infrt/dialect/dense_tensor.cc +++ b/paddle/infrt/dialect/dense_tensor.cc @@ -66,11 +66,11 @@ llvm::Optional GetPrecisionType(mlir::StringRef key) { return llvm::None; } -TensorType TensorType::get(TargetType target, +TensorType TensorType::get(mlir::MLIRContext *ctx, + TargetType target, LayoutType layout, PrecisionType precision) { - return Base::get( - ::infrt::Global::getMLIRContext(), target, layout, precision); + return Base::get(ctx, target, layout, precision); } TargetType TensorType::target() { return getImpl()->target_; } @@ -207,5 +207,4 @@ static void printSetTensorOp(mlir::OpAsmPrinter &p, SetTensorOp op) { // NOLINT #define GET_OP_CLASSES #include "paddle/infrt/dialect/dense_tensor.cpp.inc" // NOLINT - #include "paddle/infrt/dialect/dense_tensor_dialect.cpp.inc" diff --git a/paddle/infrt/dialect/dense_tensor.h b/paddle/infrt/dialect/dense_tensor.h index 416925d3382..08ba8d72066 100644 --- a/paddle/infrt/dialect/dense_tensor.h +++ b/paddle/infrt/dialect/dense_tensor.h @@ -68,7 +68,9 @@ class TensorType : public mlir::Type::TypeBase { public: using Base::Base; - static TensorType get(TargetType target, + + static TensorType get(mlir::MLIRContext *ctx, + TargetType target, LayoutType layout, PrecisionType precision); diff --git a/paddle/infrt/dialect/infrt_base.cc b/paddle/infrt/dialect/infrt_base.cc index e8005661bbd..c0101a8c166 100644 --- a/paddle/infrt/dialect/infrt_base.cc +++ b/paddle/infrt/dialect/infrt_base.cc @@ -85,7 +85,8 @@ mlir::Type INFRTDialect::parseType(mlir::DialectAsmParser &parser) const { // parse ">" if (parser.parseGreater()) return mlir::Type(); - return infrt::dt::TensorType::get(*targetType, *layoutType, *precisionType); + return infrt::dt::TensorType::get( + parser.getContext(), *targetType, *layoutType, *precisionType); } // parse TensorMapType, for example: !infrt.tensor_map if (keyword == "tensor_map") { diff --git a/paddle/infrt/dialect/infrt_base.h b/paddle/infrt/dialect/infrt_base.h index 1a7fbcf395a..4021a5a6d3c 100644 --- a/paddle/infrt/dialect/infrt_base.h +++ b/paddle/infrt/dialect/infrt_base.h @@ -25,6 +25,7 @@ namespace infrt { namespace dialect { + class INFRTDialect : public mlir::Dialect { explicit INFRTDialect(mlir::MLIRContext *context) : mlir::Dialect( diff --git a/paddle/infrt/dialect/init_infrt_dialects.cc b/paddle/infrt/dialect/init_infrt_dialects.cc index c3769414dbb..9afefc01587 100644 --- a/paddle/infrt/dialect/init_infrt_dialects.cc +++ b/paddle/infrt/dialect/init_infrt_dialects.cc @@ -20,6 +20,8 @@ #include "paddle/infrt/dialect/dense_tensor.h" #include "paddle/infrt/dialect/infrt_base.h" #include "paddle/infrt/dialect/pd_ops.h" +#include "paddle/infrt/dialect/pten/infrt_pten_tensor.h" +#include "paddle/infrt/dialect/pten/pten_base.h" #include "paddle/infrt/dialect/tensor_shape.h" namespace infrt { @@ -27,6 +29,11 @@ void registerCinnDialects(mlir::DialectRegistry ®istry) { // NOLINT registry.insert(); + mlir::pd::PaddleDialect, +#ifdef INFRT_WITH_PTEN + pten::PTENDenseTensorDialect, + pten::PTENDialect +#endif + >(); } } // namespace infrt diff --git a/paddle/infrt/dialect/pten/CMakeLists.txt b/paddle/infrt/dialect/pten/CMakeLists.txt new file mode 100644 index 00000000000..0fb268952d5 --- /dev/null +++ b/paddle/infrt/dialect/pten/CMakeLists.txt @@ -0,0 +1,12 @@ +if (NOT INFRT_WITH_PTEN) + return() +endif() + +#mlir_tablegen_on(infrt_pten_base DIALECT pten) +add_mlir_dialect(infrt_pten_base pten) +add_mlir_dialect(infrt_pten_tensor pten_dt) +#mlir_tablegen_on(infrt_pten_tensor) + +gather_srcs(infrt_src SRCS + pten_base.cc infrt_pten_tensor.cc + infrt_pten_tensor.cc) diff --git a/paddle/infrt/dialect/pten/infrt_pten_base.td b/paddle/infrt/dialect/pten/infrt_pten_base.td new file mode 100644 index 00000000000..20a43f9a926 --- /dev/null +++ b/paddle/infrt/dialect/pten/infrt_pten_base.td @@ -0,0 +1,35 @@ +#ifndef PTEN_BASE +#define PTEN_BASE + +include "mlir/IR/OpBase.td" + +def PTEN_Dialect : Dialect { + let name = "pten"; + + let description = [{ + The PTEN host dialect. + }]; + + let cppNamespace = "::infrt::pten"; +} + +class AllocatorTypeOf traits=[]>: + TypeDef { + let summary = !strconcat("!pten.allocator_", place, " type"); +} + +class ContextTypeOf traits=[]>: + TypeDef { + let summary = !strconcat("!pten.context_", place, " type"); +} + +def CPU_Allocator : AllocatorTypeOf<"CPU">; +def GPU_Allocator : AllocatorTypeOf<"GPU">; + +def CPU_Context : ContextTypeOf<"CPU">; +def GPU_Context : ContextTypeOf<"GPU">; + +def Allocator : AnyTypeOf<[CPU_Allocator, GPU_Allocator], "Allocator type">; +def Context : AnyTypeOf<[CPU_Context, GPU_Context], "Context type">; + +#endif diff --git a/paddle/infrt/dialect/pten/infrt_pten_tensor.cc b/paddle/infrt/dialect/pten/infrt_pten_tensor.cc new file mode 100644 index 00000000000..b3e99da8750 --- /dev/null +++ b/paddle/infrt/dialect/pten/infrt_pten_tensor.cc @@ -0,0 +1,36 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/infrt/dialect/pten/infrt_pten_tensor.h" + +#include + +#include "paddle/infrt/dialect/pten/infrt_pten_tensorDialect.cpp.inc" +#include "paddle/infrt/dialect/pten/infrt_pten_tensorTypes.cpp.inc" + +namespace infrt { +namespace pten { + +void PTENDenseTensorDialect::initialize() { +#define GET_OP_LIST + addOperations< +#include "paddle/infrt/dialect/pten/infrt_pten_tensor.cpp.inc" + >(); +} + +} // namespace pten +} // namespace infrt + +#define GET_OP_CLASSES +#include "paddle/infrt/dialect/pten/infrt_pten_tensor.cpp.inc" // NOLINT diff --git a/paddle/infrt/dialect/pten/infrt_pten_tensor.h b/paddle/infrt/dialect/pten/infrt_pten_tensor.h new file mode 100644 index 00000000000..24ac2d851fe --- /dev/null +++ b/paddle/infrt/dialect/pten/infrt_pten_tensor.h @@ -0,0 +1,38 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "paddle/infrt/dialect/pten/infrt_pten_tensorDialect.h.inc" +#include "paddle/infrt/dialect/pten/infrt_pten_tensorTypes.h.inc" + +#include "paddle/infrt/dialect/dense_tensor.h" +// NOLINT +#define GET_OP_CLASSES +#include "paddle/infrt/dialect/pten/infrt_pten_tensor.h.inc" diff --git a/paddle/infrt/dialect/pten/infrt_pten_tensor.td b/paddle/infrt/dialect/pten/infrt_pten_tensor.td new file mode 100644 index 00000000000..040c8ec3d36 --- /dev/null +++ b/paddle/infrt/dialect/pten/infrt_pten_tensor.td @@ -0,0 +1,104 @@ +#ifdef PTEN_TENSOR +#else +#define PTEN_TENSOR + +include "paddle/infrt/dialect/pten/infrt_pten_base.td" +include "mlir/Interfaces/SideEffectInterfaces.td" +include "mlir/IR/OpBase.td" +include "paddle/infrt/dialect/infrt_base.td" + +def PTEN_DenseTensorDialect : Dialect { + let name = "pten_dt"; + + let description = [{ + The PTEN DenseTensor dialect. + }]; + + let cppNamespace = "::infrt::pten"; +} + +// PTEN DenseTensor related Op. +class PDT_Op traits = []> : Op { +} + +class CreateUninitTensorOp + : PDT_Op<"create_uninit_tensor." # dtype, [NoSideEffect]> { + let summary = "pdt.create_uninit_tensor operation"; + + let description = [{ + An operation that creates an uninitialized tensor. + }]; + + let arguments = (ins I64ArrayAttr:$shape); + let results = (outs TensorType:$output); +} + +class CreateInitedTensorOp + : PDT_Op<"create_inited_tensor." #dtype, [NoSideEffect]> { + let summary = "pdt.create_inited_tensor operation"; + + let description = [{ + An operation that creates an tensor with shape and values assigned. + }]; + + let arguments = (ins I64ArrayAttr:$shape, array_attr:$values); + let results = (outs TensorType:$output); +} + +def PrintTensorOp : PDT_Op<"print_tensor"> { + let summary = "pdt.print_tensor operation"; + + let description = [{ + An operation that prints a tensor. + }]; + + let arguments = (ins TensorType:$input); + let results = (outs); + let assemblyFormat = "`(` $input `:` type($input) `)` attr-dict"; +} + +class FillTensor : + PDT_Op<"fill_tensor." # dtype> { + let summary = "dt.fill_tensor operation"; + + let description = [{ + An operation that fills an input tensor with a values. + }]; + + let arguments = (ins + TensorType:$input, + attr_type:$value + ); + let results = (outs); + + let assemblyFormat = "`(` $input `:` type($input) `)` attr-dict"; +} + +class FillTensorWithConstantOp : + PDT_Op<"fill_tensor_with_constant." # dtype> { + let summary = "dt.fill_tensor_with_constant operation"; + + let description = [{ + An operation that fills an input tensor with a single value. + }]; + + let arguments = (ins + TensorType:$input, + AnyAttr:$value + ); + let results = (outs); + + let assemblyFormat = "`(` $input `:` type($input) `)` attr-dict"; +} + +foreach dtype = ["ui8", "ui16", "ui32", "ui64", "i32", "f32", "f64", "i64"] in { + def PDT_CreateUninitTensorOp_#dtype : CreateUninitTensorOp; + def PDT_FillTensorWithConstantOp_#dtype : FillTensorWithConstantOp; +} + +def PDT_FillTensor_f32: FillTensor<"f32", F32ArrayAttr>; +def PDT_FillTensor_i32: FillTensor<"i32", I32ArrayAttr>; +def PDT_CreateInitedTensorOp_f32 : CreateInitedTensorOp<"f32", F32ArrayAttr>; +def PDT_CreateInitedTensorOp_i32 : CreateInitedTensorOp<"i32", I32ArrayAttr>; + +#endif diff --git a/paddle/infrt/dialect/pten/pten_base.cc b/paddle/infrt/dialect/pten/pten_base.cc new file mode 100644 index 00000000000..ac23d442489 --- /dev/null +++ b/paddle/infrt/dialect/pten/pten_base.cc @@ -0,0 +1,66 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/infrt/dialect/pten/pten_base.h" + +#include +#include +#include +#include +#include +#include +#include "paddle/infrt/common/global.h" +#include "paddle/infrt/dialect/pten/infrt_pten_base.cpp.inc" +#include "paddle/infrt/dialect/pten/infrt_pten_baseDialect.cpp.inc" + +namespace infrt { +namespace pten { + +void PTENDialect::printType(::mlir::Type type, + mlir::DialectAsmPrinter& os) const { + Dialect::printType(type, os); +} + +void PTENDialect::initialize() { + addOperations< +#define GET_OP_LIST +#include "paddle/infrt/dialect/pten/infrt_pten_base.cpp.inc" // NOLINT + >(); + addTypes< +#define GET_TYPEDEF_LIST +#include "paddle/infrt/dialect/pten/infrt_pten_baseTypes.cpp.inc" // NOLINT + >(); +} + +mlir::Type PTENDialect::parseType(mlir::DialectAsmParser& parser) const { + llvm::StringRef keyword; + if (parser.parseKeyword(&keyword)) return mlir::Type(); + if (keyword == "allocator_CPU") { + return CPUAllocatorType::get(parser.getContext()); + } else if (keyword == "allocator_GPU") { + return GPUAllocatorType::get(parser.getContext()); + } else if (keyword == "context_CPU") { + return CPUContextType::get(parser.getContext()); + } else if (keyword == "context_GPU") { + return GPUContextType::get(parser.getContext()); + } + + return mlir::Type(); +} + +} // namespace pten +} // namespace infrt + +#define GET_TYPEDEF_CLASSES +#include "paddle/infrt/dialect/pten/infrt_pten_baseTypes.cpp.inc" // NOLINT diff --git a/paddle/infrt/dialect/pten/pten_base.h b/paddle/infrt/dialect/pten/pten_base.h new file mode 100644 index 00000000000..c3be6ef4e8b --- /dev/null +++ b/paddle/infrt/dialect/pten/pten_base.h @@ -0,0 +1,30 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include +#include + +#include + +#include "paddle/infrt/dialect/pten/infrt_pten_base.h.inc" +#include "paddle/infrt/dialect/pten/infrt_pten_baseDialect.h.inc" + +#define GET_TYPEDEF_CLASSES +#include "paddle/infrt/dialect/pten/infrt_pten_baseTypes.h.inc" + +namespace infrt { +namespace pten {} // namespace pten +} // namespace infrt diff --git a/paddle/infrt/tests/dialect/pten/dense_tensor.mlir b/paddle/infrt/tests/dialect/pten/dense_tensor.mlir new file mode 100644 index 00000000000..109fa2d6fa7 --- /dev/null +++ b/paddle/infrt/tests/dialect/pten/dense_tensor.mlir @@ -0,0 +1,10 @@ +// RUN: infrtopt %s | FileCheck %s + +// CHECK-LABEL: basic_tensor +func @basic_tensor() { + %a = "pten_dt.create_uninit_tensor.f32" () { shape=[12:i64, 23:i64] } : () -> !infrt.tensor + %b = "pten_dt.create_inited_tensor.f32" () { shape=[2:i64, 2:i64], values=[0.1:f32, 0.2:f32, 0.3:f32, 0.4:f32] } : () -> !infrt.tensor + "pten_dt.fill_tensor_with_constant.f32" (%a) { value=0.1:f32 } : (!infrt.tensor) -> () + + infrt.return +} diff --git a/paddle/infrt/tests/dialect/tensor/dense_tensor.mlir b/paddle/infrt/tests/dialect/tensor/dense_tensor.mlir index f1def17aa87..ff7f36f5078 100644 --- a/paddle/infrt/tests/dialect/tensor/dense_tensor.mlir +++ b/paddle/infrt/tests/dialect/tensor/dense_tensor.mlir @@ -1,7 +1,6 @@ // RUN: infrtexec -i %s | FileCheck %s // CHECK-LABEL: dense_shape0 func @dense_shape0() { - %shape = ts.build_shape [1:i64, 57:i64] %a = dt.create_uninit_tensor.f32 [12:i64, 23:i64] -> !infrt.tensor infrt.return -- GitLab