Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
3990e0bb
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
3990e0bb
编写于
2月 08, 2022
作者:
Y
Yan Chunwei
提交者:
GitHub
2月 08, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
INFRT/Add pten dialect (4th PR) (#39374)
上级
60f1461a
变更
16
隐藏空白更改
内联
并排
Showing
16 changed file
with
368 addition
and
12 deletion
+368
-12
paddle/infrt/CMakeLists.txt
paddle/infrt/CMakeLists.txt
+15
-4
paddle/infrt/dialect/CMakeLists.txt
paddle/infrt/dialect/CMakeLists.txt
+5
-0
paddle/infrt/dialect/dense_tensor.cc
paddle/infrt/dialect/dense_tensor.cc
+3
-4
paddle/infrt/dialect/dense_tensor.h
paddle/infrt/dialect/dense_tensor.h
+3
-1
paddle/infrt/dialect/infrt_base.cc
paddle/infrt/dialect/infrt_base.cc
+2
-1
paddle/infrt/dialect/infrt_base.h
paddle/infrt/dialect/infrt_base.h
+1
-0
paddle/infrt/dialect/init_infrt_dialects.cc
paddle/infrt/dialect/init_infrt_dialects.cc
+8
-1
paddle/infrt/dialect/pten/CMakeLists.txt
paddle/infrt/dialect/pten/CMakeLists.txt
+12
-0
paddle/infrt/dialect/pten/infrt_pten_base.td
paddle/infrt/dialect/pten/infrt_pten_base.td
+35
-0
paddle/infrt/dialect/pten/infrt_pten_tensor.cc
paddle/infrt/dialect/pten/infrt_pten_tensor.cc
+36
-0
paddle/infrt/dialect/pten/infrt_pten_tensor.h
paddle/infrt/dialect/pten/infrt_pten_tensor.h
+38
-0
paddle/infrt/dialect/pten/infrt_pten_tensor.td
paddle/infrt/dialect/pten/infrt_pten_tensor.td
+104
-0
paddle/infrt/dialect/pten/pten_base.cc
paddle/infrt/dialect/pten/pten_base.cc
+66
-0
paddle/infrt/dialect/pten/pten_base.h
paddle/infrt/dialect/pten/pten_base.h
+30
-0
paddle/infrt/tests/dialect/pten/dense_tensor.mlir
paddle/infrt/tests/dialect/pten/dense_tensor.mlir
+10
-0
paddle/infrt/tests/dialect/tensor/dense_tensor.mlir
paddle/infrt/tests/dialect/tensor/dense_tensor.mlir
+0
-1
未找到文件。
paddle/infrt/CMakeLists.txt
浏览文件 @
3990e0bb
#TO DO:remove fluid
include_directories
(
${
PADDLE_SOURCE_DIR
}
/paddle/fluid/platform
)
if
(
NOT WITH_INFRT
)
return
()
endif
()
option
(
INFRT_WITH_PTEN
"Compile INFRT with PTEN"
ON
)
#TODO(xiaowei) remove fluid
include_directories
(
${
PADDLE_SOURCE_DIR
}
/paddle/fluid/platform
)
if
(
INFRT_WITH_PTEN
)
add_definitions
(
"-DINFRT_WITH_PTEN"
)
endif
()
# compile flags
set
(
INFRT_FLAGS -Wno-comment
)
foreach
(
flag
${
INFRT_FLAGS
}
)
...
...
@@ -92,7 +98,12 @@ set(infrt_mlir_incs
rewrite_inc
trt_ops_inc
)
message
(
STATUS
"infrt srcs:
\n
${
infrt_src
}
"
)
if
(
INFRT_WITH_PTEN
)
set
(
infrt_mlir_incs
${
infrt_mlir_incs
}
MLIRinfrt_pten_tensorIncGen
MLIRinfrt_pten_baseIncGen
)
endif
()
cc_library
(
infrt SHARED SRCS
${
infrt_src
}
DEPS glog boost
${
mlir_libs
}
paddle_framework_proto infrt_naive
)
cc_library
(
infrt_static SRCS
${
infrt_src
}
DEPS glog boost
${
mlir_libs
}
paddle_framework_proto
)
...
...
paddle/infrt/dialect/CMakeLists.txt
浏览文件 @
3990e0bb
...
...
@@ -13,6 +13,7 @@ gather_srcs(infrt_src SRCS
pd_types.cc
pd_ops.cc
)
mlir_tablegen_on
(
basic_kernels
)
mlir_tablegen_on
(
test_kernels
)
mlir_tablegen_on
(
infrt_base DIALECT infrt
)
...
...
@@ -34,3 +35,7 @@ add_dependencies(print-ir pd_ops_inc)
cc_test_tiny
(
test_infrt_mlir_loader SRCS mlir_loader_test.cc DEPS infrt
${
MLIR_IR_LIBS
}
)
add_subdirectory
(
tensorrt
)
if
(
INFRT_WITH_PTEN
)
add_subdirectory
(
pten
)
endif
()
paddle/infrt/dialect/dense_tensor.cc
浏览文件 @
3990e0bb
...
...
@@ -66,11 +66,11 @@ llvm::Optional<PrecisionType> GetPrecisionType(mlir::StringRef key) {
return
llvm
::
None
;
}
TensorType
TensorType
::
get
(
TargetType
target
,
TensorType
TensorType
::
get
(
mlir
::
MLIRContext
*
ctx
,
TargetType
target
,
LayoutType
layout
,
PrecisionType
precision
)
{
return
Base
::
get
(
::
infrt
::
Global
::
getMLIRContext
(),
target
,
layout
,
precision
);
return
Base
::
get
(
ctx
,
target
,
layout
,
precision
);
}
TargetType
TensorType
::
target
()
{
return
getImpl
()
->
target_
;
}
...
...
@@ -207,5 +207,4 @@ static void printSetTensorOp(mlir::OpAsmPrinter &p, SetTensorOp op) { // NOLINT
#define GET_OP_CLASSES
#include "paddle/infrt/dialect/dense_tensor.cpp.inc" // NOLINT
#include "paddle/infrt/dialect/dense_tensor_dialect.cpp.inc"
paddle/infrt/dialect/dense_tensor.h
浏览文件 @
3990e0bb
...
...
@@ -68,7 +68,9 @@ class TensorType : public mlir::Type::TypeBase<TensorType,
detail
::
TensorTypeStorage
>
{
public:
using
Base
::
Base
;
static
TensorType
get
(
TargetType
target
,
static
TensorType
get
(
mlir
::
MLIRContext
*
ctx
,
TargetType
target
,
LayoutType
layout
,
PrecisionType
precision
);
...
...
paddle/infrt/dialect/infrt_base.cc
浏览文件 @
3990e0bb
...
...
@@ -85,7 +85,8 @@ mlir::Type INFRTDialect::parseType(mlir::DialectAsmParser &parser) const {
// parse ">"
if
(
parser
.
parseGreater
())
return
mlir
::
Type
();
return
infrt
::
dt
::
TensorType
::
get
(
*
targetType
,
*
layoutType
,
*
precisionType
);
return
infrt
::
dt
::
TensorType
::
get
(
parser
.
getContext
(),
*
targetType
,
*
layoutType
,
*
precisionType
);
}
// parse TensorMapType, for example: !infrt.tensor_map
if
(
keyword
==
"tensor_map"
)
{
...
...
paddle/infrt/dialect/infrt_base.h
浏览文件 @
3990e0bb
...
...
@@ -25,6 +25,7 @@
namespace
infrt
{
namespace
dialect
{
class
INFRTDialect
:
public
mlir
::
Dialect
{
explicit
INFRTDialect
(
mlir
::
MLIRContext
*
context
)
:
mlir
::
Dialect
(
...
...
paddle/infrt/dialect/init_infrt_dialects.cc
浏览文件 @
3990e0bb
...
...
@@ -20,6 +20,8 @@
#include "paddle/infrt/dialect/dense_tensor.h"
#include "paddle/infrt/dialect/infrt_base.h"
#include "paddle/infrt/dialect/pd_ops.h"
#include "paddle/infrt/dialect/pten/infrt_pten_tensor.h"
#include "paddle/infrt/dialect/pten/pten_base.h"
#include "paddle/infrt/dialect/tensor_shape.h"
namespace
infrt
{
...
...
@@ -27,6 +29,11 @@ void registerCinnDialects(mlir::DialectRegistry ®istry) { // NOLINT
registry
.
insert
<
ts
::
TensorShapeDialect
,
dialect
::
INFRTDialect
,
dt
::
DTDialect
,
mlir
::
pd
::
PaddleDialect
>
();
mlir
::
pd
::
PaddleDialect
,
#ifdef INFRT_WITH_PTEN
pten
::
PTENDenseTensorDialect
,
pten
::
PTENDialect
#endif
>
();
}
}
// namespace infrt
paddle/infrt/dialect/pten/CMakeLists.txt
0 → 100644
浏览文件 @
3990e0bb
if
(
NOT INFRT_WITH_PTEN
)
return
()
endif
()
#mlir_tablegen_on(infrt_pten_base DIALECT pten)
add_mlir_dialect
(
infrt_pten_base pten
)
add_mlir_dialect
(
infrt_pten_tensor pten_dt
)
#mlir_tablegen_on(infrt_pten_tensor)
gather_srcs
(
infrt_src SRCS
pten_base.cc infrt_pten_tensor.cc
infrt_pten_tensor.cc
)
paddle/infrt/dialect/pten/infrt_pten_base.td
0 → 100644
浏览文件 @
3990e0bb
#ifndef PTEN_BASE
#define PTEN_BASE
include "mlir/IR/OpBase.td"
def PTEN_Dialect : Dialect {
let name = "pten";
let description = [{
The PTEN host dialect.
}];
let cppNamespace = "::infrt::pten";
}
class AllocatorTypeOf<string place, list<Trait> traits=[]>:
TypeDef<PTEN_Dialect, place # "Allocator", traits> {
let summary = !strconcat("!pten.allocator_", place, " type");
}
class ContextTypeOf<string place, list<Trait> traits=[]>:
TypeDef<PTEN_Dialect, place # "Context", traits> {
let summary = !strconcat("!pten.context_", place, " type");
}
def CPU_Allocator : AllocatorTypeOf<"CPU">;
def GPU_Allocator : AllocatorTypeOf<"GPU">;
def CPU_Context : ContextTypeOf<"CPU">;
def GPU_Context : ContextTypeOf<"GPU">;
def Allocator : AnyTypeOf<[CPU_Allocator, GPU_Allocator], "Allocator type">;
def Context : AnyTypeOf<[CPU_Context, GPU_Context], "Context type">;
#endif
paddle/infrt/dialect/pten/infrt_pten_tensor.cc
0 → 100644
浏览文件 @
3990e0bb
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/infrt/dialect/pten/infrt_pten_tensor.h"
#include <mlir/IR/BuiltinTypes.h>
#include "paddle/infrt/dialect/pten/infrt_pten_tensorDialect.cpp.inc"
#include "paddle/infrt/dialect/pten/infrt_pten_tensorTypes.cpp.inc"
namespace
infrt
{
namespace
pten
{
void
PTENDenseTensorDialect
::
initialize
()
{
#define GET_OP_LIST
addOperations
<
#include "paddle/infrt/dialect/pten/infrt_pten_tensor.cpp.inc"
>
();
}
}
// namespace pten
}
// namespace infrt
#define GET_OP_CLASSES
#include "paddle/infrt/dialect/pten/infrt_pten_tensor.cpp.inc" // NOLINT
paddle/infrt/dialect/pten/infrt_pten_tensor.h
0 → 100644
浏览文件 @
3990e0bb
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <mlir/Dialect/Traits.h>
#include <mlir/IR/Attributes.h>
#include <mlir/IR/Builders.h>
#include <mlir/IR/BuiltinOps.h>
#include <mlir/IR/BuiltinTypes.h>
#include <mlir/IR/Dialect.h>
#include <mlir/IR/Matchers.h>
#include <mlir/IR/OpImplementation.h>
#include <mlir/IR/TypeUtilities.h>
#include <mlir/Interfaces/CallInterfaces.h>
#include <mlir/Interfaces/DerivedAttributeOpInterface.h>
#include <mlir/Interfaces/InferTypeOpInterface.h>
#include <mlir/Interfaces/LoopLikeInterface.h>
#include <mlir/Interfaces/SideEffectInterfaces.h>
#include "paddle/infrt/dialect/pten/infrt_pten_tensorDialect.h.inc"
#include "paddle/infrt/dialect/pten/infrt_pten_tensorTypes.h.inc"
#include "paddle/infrt/dialect/dense_tensor.h"
// NOLINT
#define GET_OP_CLASSES
#include "paddle/infrt/dialect/pten/infrt_pten_tensor.h.inc"
paddle/infrt/dialect/pten/infrt_pten_tensor.td
0 → 100644
浏览文件 @
3990e0bb
#ifdef PTEN_TENSOR
#else
#define PTEN_TENSOR
include "paddle/infrt/dialect/pten/infrt_pten_base.td"
include "mlir/Interfaces/SideEffectInterfaces.td"
include "mlir/IR/OpBase.td"
include "paddle/infrt/dialect/infrt_base.td"
def PTEN_DenseTensorDialect : Dialect {
let name = "pten_dt";
let description = [{
The PTEN DenseTensor dialect.
}];
let cppNamespace = "::infrt::pten";
}
// PTEN DenseTensor related Op.
class PDT_Op<string mnemonic, list<OpTrait> traits = []> : Op<PTEN_DenseTensorDialect, mnemonic, !listconcat(traits, [IsolatedFromAbove])> {
}
class CreateUninitTensorOp<string dtype>
: PDT_Op<"create_uninit_tensor." # dtype, [NoSideEffect]> {
let summary = "pdt.create_uninit_tensor operation";
let description = [{
An operation that creates an uninitialized tensor.
}];
let arguments = (ins I64ArrayAttr:$shape);
let results = (outs TensorType:$output);
}
class CreateInitedTensorOp<string dtype, Attr array_attr>
: PDT_Op<"create_inited_tensor." #dtype, [NoSideEffect]> {
let summary = "pdt.create_inited_tensor operation";
let description = [{
An operation that creates an tensor with shape and values assigned.
}];
let arguments = (ins I64ArrayAttr:$shape, array_attr:$values);
let results = (outs TensorType:$output);
}
def PrintTensorOp : PDT_Op<"print_tensor"> {
let summary = "pdt.print_tensor operation";
let description = [{
An operation that prints a tensor.
}];
let arguments = (ins TensorType:$input);
let results = (outs);
let assemblyFormat = "`(` $input `:` type($input) `)` attr-dict";
}
class FillTensor<string dtype, Attr attr_type> :
PDT_Op<"fill_tensor." # dtype> {
let summary = "dt.fill_tensor operation";
let description = [{
An operation that fills an input tensor with a values.
}];
let arguments = (ins
TensorType:$input,
attr_type:$value
);
let results = (outs);
let assemblyFormat = "`(` $input `:` type($input) `)` attr-dict";
}
class FillTensorWithConstantOp<string dtype> :
PDT_Op<"fill_tensor_with_constant." # dtype> {
let summary = "dt.fill_tensor_with_constant operation";
let description = [{
An operation that fills an input tensor with a single value.
}];
let arguments = (ins
TensorType:$input,
AnyAttr:$value
);
let results = (outs);
let assemblyFormat = "`(` $input `:` type($input) `)` attr-dict";
}
foreach dtype = ["ui8", "ui16", "ui32", "ui64", "i32", "f32", "f64", "i64"] in {
def PDT_CreateUninitTensorOp_#dtype : CreateUninitTensorOp<dtype>;
def PDT_FillTensorWithConstantOp_#dtype : FillTensorWithConstantOp<dtype>;
}
def PDT_FillTensor_f32: FillTensor<"f32", F32ArrayAttr>;
def PDT_FillTensor_i32: FillTensor<"i32", I32ArrayAttr>;
def PDT_CreateInitedTensorOp_f32 : CreateInitedTensorOp<"f32", F32ArrayAttr>;
def PDT_CreateInitedTensorOp_i32 : CreateInitedTensorOp<"i32", I32ArrayAttr>;
#endif
paddle/infrt/dialect/pten/pten_base.cc
0 → 100644
浏览文件 @
3990e0bb
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/infrt/dialect/pten/pten_base.h"
#include <mlir/IR/Builders.h>
#include <mlir/IR/Dialect.h>
#include <mlir/IR/DialectImplementation.h>
#include <mlir/IR/MLIRContext.h>
#include <mlir/IR/TypeUtilities.h>
#include <mlir/IR/Types.h>
#include "paddle/infrt/common/global.h"
#include "paddle/infrt/dialect/pten/infrt_pten_base.cpp.inc"
#include "paddle/infrt/dialect/pten/infrt_pten_baseDialect.cpp.inc"
namespace
infrt
{
namespace
pten
{
void
PTENDialect
::
printType
(
::
mlir
::
Type
type
,
mlir
::
DialectAsmPrinter
&
os
)
const
{
Dialect
::
printType
(
type
,
os
);
}
void
PTENDialect
::
initialize
()
{
addOperations
<
#define GET_OP_LIST
#include "paddle/infrt/dialect/pten/infrt_pten_base.cpp.inc" // NOLINT
>
();
addTypes
<
#define GET_TYPEDEF_LIST
#include "paddle/infrt/dialect/pten/infrt_pten_baseTypes.cpp.inc" // NOLINT
>
();
}
mlir
::
Type
PTENDialect
::
parseType
(
mlir
::
DialectAsmParser
&
parser
)
const
{
llvm
::
StringRef
keyword
;
if
(
parser
.
parseKeyword
(
&
keyword
))
return
mlir
::
Type
();
if
(
keyword
==
"allocator_CPU"
)
{
return
CPUAllocatorType
::
get
(
parser
.
getContext
());
}
else
if
(
keyword
==
"allocator_GPU"
)
{
return
GPUAllocatorType
::
get
(
parser
.
getContext
());
}
else
if
(
keyword
==
"context_CPU"
)
{
return
CPUContextType
::
get
(
parser
.
getContext
());
}
else
if
(
keyword
==
"context_GPU"
)
{
return
GPUContextType
::
get
(
parser
.
getContext
());
}
return
mlir
::
Type
();
}
}
// namespace pten
}
// namespace infrt
#define GET_TYPEDEF_CLASSES
#include "paddle/infrt/dialect/pten/infrt_pten_baseTypes.cpp.inc" // NOLINT
paddle/infrt/dialect/pten/pten_base.h
0 → 100644
浏览文件 @
3990e0bb
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <mlir/IR/Dialect.h>
#include <mlir/IR/OpDefinition.h>
#include <mlir/Interfaces/SideEffectInterfaces.h>
#include <string>
#include "paddle/infrt/dialect/pten/infrt_pten_base.h.inc"
#include "paddle/infrt/dialect/pten/infrt_pten_baseDialect.h.inc"
#define GET_TYPEDEF_CLASSES
#include "paddle/infrt/dialect/pten/infrt_pten_baseTypes.h.inc"
namespace
infrt
{
namespace
pten
{}
// namespace pten
}
// namespace infrt
paddle/infrt/tests/dialect/pten/dense_tensor.mlir
0 → 100644
浏览文件 @
3990e0bb
// RUN: infrtopt %s | FileCheck %s
// CHECK-LABEL: basic_tensor
func @basic_tensor() {
%a = "pten_dt.create_uninit_tensor.f32" () { shape=[12:i64, 23:i64] } : () -> !infrt.tensor<X86, NCHW, F32>
%b = "pten_dt.create_inited_tensor.f32" () { shape=[2:i64, 2:i64], values=[0.1:f32, 0.2:f32, 0.3:f32, 0.4:f32] } : () -> !infrt.tensor<X86, NCHW, F32>
"pten_dt.fill_tensor_with_constant.f32" (%a) { value=0.1:f32 } : (!infrt.tensor<X86, NCHW, F32>) -> ()
infrt.return
}
paddle/infrt/tests/dialect/tensor/dense_tensor.mlir
浏览文件 @
3990e0bb
// RUN: infrtexec -i %s | FileCheck %s
// CHECK-LABEL: dense_shape0
func @dense_shape0() {
%shape = ts.build_shape [1:i64, 57:i64]
%a = dt.create_uninit_tensor.f32 [12:i64, 23:i64] -> !infrt.tensor<X86, NCHW, F32>
infrt.return
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录