Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
65478332
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
65478332
编写于
3月 25, 2022
作者:
王
王明冬
提交者:
GitHub
3月 25, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[infrt] add phi_dt.create_inited_dense_tensor.cpu.f32 kernel. (#40902)
上级
cfadf61b
变更
7
隐藏空白更改
内联
并排
Showing
7 changed file
with
55 addition
and
18 deletion
+55
-18
paddle/infrt/dialect/phi/ir/infrt_phi_tensor.td
paddle/infrt/dialect/phi/ir/infrt_phi_tensor.td
+7
-0
paddle/infrt/kernel/phi/context_kernels.cc
paddle/infrt/kernel/phi/context_kernels.cc
+4
-1
paddle/infrt/kernel/phi/dense_tensor_kernels.cc
paddle/infrt/kernel/phi/dense_tensor_kernels.cc
+20
-0
paddle/infrt/kernel/phi/dense_tensor_kernels.h
paddle/infrt/kernel/phi/dense_tensor_kernels.h
+7
-0
paddle/infrt/kernel/phi/registry.cc
paddle/infrt/kernel/phi/registry.cc
+6
-0
paddle/infrt/tests/dialect/phi/phi_test.mlir
paddle/infrt/tests/dialect/phi/phi_test.mlir
+9
-15
tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py
tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py
+2
-2
未找到文件。
paddle/infrt/dialect/phi/ir/infrt_phi_tensor.td
浏览文件 @
65478332
...
...
@@ -28,6 +28,13 @@ class CreateDenseTensorOp<string target>
let results = (outs DenseTensor:$output);
}
def CreateInitedCpuFLOAT32DenseTensorOp
: PDT_Op<"create_inited_dense_tensor.cpu.f32", [NoSideEffect]> {
let arguments = (ins Context:$context, I64ArrayAttr:$dims,
LayoutAttr:$layout, I64ArrayAttr:$lod, F32Attr:$value);
let results = (outs DenseTensor:$output);
}
class FillDenseTensorOp<Attr attr_type, string dtype> :
PDT_Op<"fill_dense_tensor." # dtype> {
let arguments = (ins
...
...
paddle/infrt/kernel/phi/context_kernels.cc
浏览文件 @
65478332
...
...
@@ -21,7 +21,10 @@ namespace phi {
::
phi
::
CPUContext
CreateCPUContext
()
{
::
phi
::
CPUContext
ctx
{};
ctx
.
Init
();
ctx
.
SetAllocator
(
new
backends
::
CpuPhiAllocator
{});
auto
allocator
=
new
backends
::
CpuPhiAllocator
{};
ctx
.
SetAllocator
(
allocator
);
ctx
.
SetHostAllocator
(
allocator
);
ctx
.
SetZeroAllocator
(
allocator
);
return
ctx
;
}
...
...
paddle/infrt/kernel/phi/dense_tensor_kernels.cc
浏览文件 @
65478332
...
...
@@ -56,6 +56,26 @@ namespace phi {
{}));
}
::
phi
::
DenseTensor
CreateInitedDenseTensorF32
(
const
::
phi
::
CPUContext
&
context
,
host_context
::
Attribute
<
std
::
vector
<
int64_t
>>
dims
,
host_context
::
Attribute
<
std
::
vector
<
int64_t
>>
lod
,
host_context
::
Attribute
<::
infrt
::
LayoutType
>
layout
,
host_context
::
Attribute
<
float
>
value
)
{
::
phi
::
DenseTensor
dense_tensor
(
const_cast
<::
phi
::
Allocator
*>
(
&
context
.
GetAllocator
()),
::
phi
::
DenseTensorMeta
(
ConvertPrecisionToPhi
(
::
infrt
::
PrecisionType
::
FLOAT32
),
::
phi
::
make_ddim
(
dims
.
get
()),
ConvertLayoutToPhi
(
layout
.
get
()),
{}));
float
*
a_data
=
dense_tensor
.
mutable_data
<
float
>
(
::
phi
::
CPUPlace
());
for
(
int64_t
i
=
0
;
i
<
dense_tensor
.
numel
();
++
i
)
{
a_data
[
i
]
=
value
.
get
();
}
return
dense_tensor
;
}
::
phi
::
DenseTensor
CreateGPUDenseTensor
(
const
::
phi
::
GPUContext
&
context
,
host_context
::
Attribute
<
std
::
vector
<
int64_t
>>
dims
,
...
...
paddle/infrt/kernel/phi/dense_tensor_kernels.h
浏览文件 @
65478332
...
...
@@ -32,6 +32,13 @@ namespace phi {
host_context
::
Attribute
<::
infrt
::
LayoutType
>
layout
,
host_context
::
Attribute
<::
infrt
::
PrecisionType
>
precision
);
::
phi
::
DenseTensor
CreateInitedDenseTensorF32
(
const
::
phi
::
CPUContext
&
context
,
host_context
::
Attribute
<
std
::
vector
<
int64_t
>>
dims
,
host_context
::
Attribute
<
std
::
vector
<
int64_t
>>
lod
,
host_context
::
Attribute
<::
infrt
::
LayoutType
>
layout
,
host_context
::
Attribute
<
float
>
value
);
::
phi
::
DenseTensor
CreateGPUDenseTensor
(
const
::
phi
::
GPUContext
&
context
,
host_context
::
Attribute
<
std
::
vector
<
int64_t
>>
dims
,
...
...
paddle/infrt/kernel/phi/registry.cc
浏览文件 @
65478332
...
...
@@ -38,6 +38,12 @@ void RegisterPhiKernels(host_context::KernelRegistry* registry) {
"phi_dt.create_dense_tensor.cpu"
,
INFRT_KERNEL
(
infrt
::
kernel
::
phi
::
CreateDenseTensor
),
{
"dims"
,
"lod"
,
"layout"
,
"precision"
});
registry
->
AddKernelWithAttrs
(
"phi_dt.create_inited_dense_tensor.cpu.f32"
,
INFRT_KERNEL
(
infrt
::
kernel
::
phi
::
CreateInitedDenseTensorF32
),
{
"dims"
,
"lod"
,
"layout"
,
"value"
});
registry
->
AddKernelWithAttrs
(
"phi_dt.fill_dense_tensor.f32"
,
INFRT_KERNEL
(
infrt
::
kernel
::
phi
::
FillDenseTensorF32
),
...
...
paddle/infrt/tests/dialect/phi/phi_test.mlir
浏览文件 @
65478332
...
...
@@ -7,26 +7,20 @@ module {
%Y, %MeanOut, %VarianceOut = "pd.batch_norm"(%4, %arg1, %arg2, %arg3, %arg4) {data_layout = "NCHW", epsilon = 9.99999974E-6 : f32, momentum = 0.899999976 : f32} : (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>)
%out = "pd.relu"(%Y) : (!infrt.dense_tensor<CPU, FP32, NCHW>) -> !infrt.dense_tensor<CPU, FP32, NCHW>
%5 = "pd.elementwise_add"(%out, %out) {axis = -1:si32} : (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>) -> !infrt.dense_tensor<CPU, FP32, NCHW>
infrt.return %5 : !infrt.dense_tensor<CPU, FP32, NCHW>
%6 = "pd.pool2d"(%5) {adaptive = false, pooling_type = "avg", ceil_mode = false, data_format = "NCHW", exclusive = true, global_pooling = false, ksize = [3 : i32, 3 : i32], padding_algorithm = "EXPLICIT", paddings = [1 : i32, 1 : i32], strides = [2 : i32, 2 : i32]} : (!infrt.dense_tensor<CPU, FP32, NCHW>) -> !infrt.dense_tensor<CPU, FP32, NCHW>
%7 = "pd.flatten_contiguous_range"(%6) {start_axis = 1 : si32, stop_axis = 3 : si32} : (!infrt.dense_tensor<CPU, FP32, NCHW>) -> !infrt.dense_tensor<CPU, FP32, NCHW>
infrt.return %7 : !infrt.dense_tensor<CPU, FP32, NCHW>
}
func @main() {
%ctx = "phi_dt.create_context.cpu" (): () -> !phi.context<CPU>
%t = "phi_dt.create_dense_tensor.cpu" (%ctx) {precision=#infrt.precision<FP32>, layout=#infrt.layout<NCHW>, lod=[1], dims=[1, 3, 8, 8]}: (!phi.context<CPU>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>)
"phi_dt.fill_dense_tensor.f32"(%t) {value=[3.8:f32]} : (!infrt.dense_tensor<CPU, FP32, NCHW>) -> ()
%filter = "phi_dt.create_dense_tensor.cpu" (%ctx) {precision=#infrt.precision<FP32>, layout=#infrt.layout<NCHW>, lod=[1], dims=[3, 3, 8, 8]}: (!phi.context<CPU>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>)
"phi_dt.fill_dense_tensor.f32"(%filter) {value=[3.8:f32]} : (!infrt.dense_tensor<CPU, FP32, NCHW>) -> ()
%bias = "phi_dt.create_dense_tensor.cpu" (%ctx) {precision=#infrt.precision<FP32>, layout=#infrt.layout<NCHW>, lod=[1], dims=[3]}: (!phi.context<CPU>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>)
"phi_dt.fill_dense_tensor.f32"(%bias) {value=[1.5:f32]} : (!infrt.dense_tensor<CPU, FP32, NCHW>) -> ()
%mean = "phi_dt.create_dense_tensor.cpu" (%ctx) {precision=#infrt.precision<FP32>, layout=#infrt.layout<NCHW>, lod=[1], dims=[3]}: (!phi.context<CPU>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>)
"phi_dt.fill_dense_tensor.f32"(%mean) {value=[3.5:f32]} : (!infrt.dense_tensor<CPU, FP32, NCHW>) -> ()
%scale = "phi_dt.create_dense_tensor.cpu" (%ctx) {precision=#infrt.precision<FP32>, layout=#infrt.layout<NCHW>, lod=[1], dims=[3]}: (!phi.context<CPU>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>)
"phi_dt.fill_dense_tensor.f32"(%scale) {value=[1.0:f32]} : (!infrt.dense_tensor<CPU, FP32, NCHW>) -> ()
%var = "phi_dt.create_dense_tensor.cpu" (%ctx) {precision=#infrt.precision<FP32>, layout=#infrt.layout<NCHW>, lod=[1], dims=[3]}: (!phi.context<CPU>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>)
"phi_dt.fill_dense_tensor.f32"(%var) {value=[0.0:f32]} : (!infrt.dense_tensor<CPU, FP32, NCHW>) -> ()
%t = "phi_dt.create_inited_dense_tensor.cpu.f32"(%ctx) {value=3.8:f32, layout=#infrt.layout<NCHW>, lod=[1], dims=[1, 3, 8, 8]}: (!phi.context<CPU>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>)
%filter = "phi_dt.create_inited_dense_tensor.cpu.f32" (%ctx) {value=3.8:f32, layout=#infrt.layout<NCHW>, lod=[1], dims=[3, 3, 8, 8]}: (!phi.context<CPU>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>)
%bias = "phi_dt.create_inited_dense_tensor.cpu.f32" (%ctx) {value=1.5:f32, layout=#infrt.layout<NCHW>, lod=[1], dims=[3]}: (!phi.context<CPU>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>)
%mean = "phi_dt.create_inited_dense_tensor.cpu.f32" (%ctx) {value=3.8:f32, layout=#infrt.layout<NCHW>, lod=[1], dims=[3]}: (!phi.context<CPU>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>)
%scale = "phi_dt.create_inited_dense_tensor.cpu.f32" (%ctx) {value=3.8:f32, layout=#infrt.layout<NCHW>, lod=[1], dims=[3]}: (!phi.context<CPU>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>)
%var = "phi_dt.create_inited_dense_tensor.cpu.f32" (%ctx) {value=3.8:f32, layout=#infrt.layout<NCHW>, lod=[1], dims=[3]}: (!phi.context<CPU>) -> (!infrt.dense_tensor<CPU, FP32, NCHW>)
%2 = infrt.call@predict(%t, %filter, %bias, %mean, %scale, %var) : (!infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>, !infrt.dense_tensor<CPU, FP32, NCHW>,!infrt.dense_tensor<CPU, FP32, NCHW>,!infrt.dense_tensor<CPU, FP32, NCHW>,!infrt.dense_tensor<CPU, FP32, NCHW>) -> !infrt.dense_tensor<CPU, FP32, NCHW>
//phi_dt.print_tensor(%t : !infrt.dense_tensor<CPU, FP32, NCHW>)
phi_dt.print_tensor(%2 : !infrt.dense_tensor<CPU, FP32, NCHW>)
infrt.return
}
...
...
tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py
浏览文件 @
65478332
...
...
@@ -340,8 +340,8 @@ def convert_op_proto_into_mlir(op_descs):
attr_type_
=
attr_mlir_converter
[
op_proto
[
ATTRS
][
attr
][
TYPE
]]
if
(
attr_type_
in
[
'
I32ArrayAttr'
,
'F32ArrayAttr'
,
'Str
ArrayAttr'
,
'BoolArrayAttr'
,
'I64ArrayAttr'
'
StrAttr'
,
'I32ArrayAttr'
,
'F32
ArrayAttr'
,
'
StrArrayAttr'
,
'
BoolArrayAttr'
,
'I64ArrayAttr'
]):
attr_list
=
attr_type_
+
":$"
+
attr
+
","
ARGUMENTS
+=
attr_list
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录