Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
ac744db1
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
ac744db1
编写于
1月 28, 2022
作者:
W
Wangzheee
提交者:
GitHub
1月 28, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix Paddle-Trt concat, slice (#39277)
上级
397781f1
变更
5
显示空白变更内容
内联
并排
Showing
5 changed file
with
32 addition
and
28 deletion
+32
-28
paddle/fluid/inference/tensorrt/convert/concat_op.cc
paddle/fluid/inference/tensorrt/convert/concat_op.cc
+9
-4
paddle/fluid/inference/tensorrt/convert/slice_op.cc
paddle/fluid/inference/tensorrt/convert/slice_op.cc
+2
-4
paddle/fluid/inference/tensorrt/op_teller.cc
paddle/fluid/inference/tensorrt/op_teller.cc
+2
-4
paddle/fluid/inference/tensorrt/plugin/special_slice_plugin.cu
...e/fluid/inference/tensorrt/plugin/special_slice_plugin.cu
+16
-10
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_concat.py
...d/tests/unittests/ir/inference/test_trt_convert_concat.py
+3
-6
未找到文件。
paddle/fluid/inference/tensorrt/convert/concat_op.cc
浏览文件 @
ac744db1
...
...
@@ -44,12 +44,17 @@ class ConcatOpConverter : public OpConverter {
itensors
.
push_back
(
engine_
->
GetITensor
(
input_name
));
}
int
axis
=
BOOST_GET_CONST
(
int
,
op_desc
.
GetAttr
(
"axis"
));
auto
*
layer
=
TRT_ENGINE_ADD_LAYER
(
engine_
,
Concatenation
,
itensors
.
data
(),
itensors
.
size
());
if
(
axis
==
-
1
)
{
axis
=
(
engine_
->
GetITensor
(
op_desc
.
Input
(
"X"
).
front
())
->
getDimensions
())
.
nbDims
-
1
;
}
else
{
if
(
!
engine_
->
with_dynamic_shape
())
{
axis
=
axis
-
1
;
// Remove batch dim
}
}
auto
*
layer
=
TRT_ENGINE_ADD_LAYER
(
engine_
,
Concatenation
,
itensors
.
data
(),
itensors
.
size
());
layer
->
setAxis
(
axis
);
auto
output_name
=
op_desc
.
Output
(
"Out"
)[
0
];
RreplenishLayerAndOutput
(
layer
,
"concat"
,
{
output_name
},
test_mode
);
...
...
paddle/fluid/inference/tensorrt/convert/slice_op.cc
浏览文件 @
ac744db1
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
...
...
@@ -72,7 +69,8 @@ class SliceOpConverter : public OpConverter {
nvinfer1
::
ILayer
*
layer
=
nullptr
;
if
(
engine_
->
with_dynamic_shape
())
{
#if IS_TRT_VERSION_GE(6000)
if
(
engine_
->
use_oss
()
&&
engine_
->
with_ernie
())
{
if
(
engine_
->
use_oss
()
&&
engine_
->
with_ernie
()
&&
input_dims
.
nbDims
==
4
)
{
std
::
vector
<
nvinfer1
::
ITensor
*>
plugin_inputs
;
// plugin_inputs.emplace_back(trans_layer->getOutput(0));
plugin_inputs
.
emplace_back
(
input
);
...
...
paddle/fluid/inference/tensorrt/op_teller.cc
浏览文件 @
ac744db1
...
...
@@ -421,10 +421,8 @@ bool OpTeller::Tell(const framework::ir::Node* node, bool use_no_calib_int8,
return
false
;
}
int
axis
=
BOOST_GET_CONST
(
int
,
desc
.
GetAttr
(
"axis"
));
if
(
with_dynamic_shape
)
{
if
(
axis
<
0
)
return
false
;
}
else
{
if
(
axis
<=
0
)
return
false
;
if
(
!
with_dynamic_shape
)
{
if
(
axis
==
0
)
return
false
;
}
auto
concat_inputs
=
desc
.
Inputs
();
if
(
concat_inputs
.
find
(
"AxisTensor"
)
!=
concat_inputs
.
end
())
{
...
...
paddle/fluid/inference/tensorrt/plugin/special_slice_plugin.cu
浏览文件 @
ac744db1
...
...
@@ -113,32 +113,38 @@ nvinfer1::DataType SpecialSlicePluginDynamic::getOutputDataType(
template
<
typename
T
>
__global__
void
SpecialSliceKernel
(
const
T
*
slice_input
,
const
int32_t
*
cu_seqlens
,
T
*
output
)
{
const
int
hidden
=
blockDim
.
x
;
const
int
hidden
=
blockDim
.
x
*
gridDim
.
y
;
const
int
batch
=
blockIdx
.
x
;
const
int
local_idx
=
blockIdx
.
y
*
blockDim
.
y
+
threadIdx
.
x
;
output
[
batch
*
hidden
+
threadIdx
.
x
]
=
slice_input
[
cu_seqlens
[
batch
]
*
hidden
+
threadIdx
.
x
];
output
[
batch
*
hidden
+
local_id
x
]
=
slice_input
[
cu_seqlens
[
batch
]
*
hidden
+
local_id
x
];
}
int
SpecialSlicePluginDynamic
::
enqueue
(
const
nvinfer1
::
PluginTensorDesc
*
input_desc
,
const
nvinfer1
::
PluginTensorDesc
*
output_desc
,
const
void
*
const
*
inputs
,
void
*
const
*
outputs
,
void
*
workspace
,
cudaStream_t
stream
)
TRT_NOEXCEPT
{
auto
input_dims
=
input_desc
[
0
].
dims
;
// (sum(S),
768
, 1, 1)
auto
out_dims
=
output_desc
[
0
].
dims
;
// (batch,
768
, 1, 1)
auto
input_dims
=
input_desc
[
0
].
dims
;
// (sum(S),
hidden
, 1, 1)
auto
out_dims
=
output_desc
[
0
].
dims
;
// (batch,
hidden
, 1, 1)
assert
(
input_desc
[
0
].
type
==
nvinfer1
::
DataType
::
kHALF
);
PADDLE_ENFORCE_EQ
(
input_desc
[
0
].
type
,
nvinfer1
::
DataType
::
kHALF
,
platform
::
errors
::
InvalidArgument
(
"Type of input should be half."
));
const
int32_t
hidden
=
input_dims
.
d
[
1
];
const
int
num_blocks
=
out_dims
.
d
[
0
];
// batch size
const
int
num_threads
=
hidden
;
PADDLE_ENFORCE_EQ
(
hidden
%
128
,
0
,
platform
::
errors
::
InvalidArgument
(
"hidden should be multiple of 128."
));
constexpr
int
num_threads
=
128
;
const
dim3
blocks
(
out_dims
.
d
[
0
],
hidden
/
num_threads
);
const
half
*
slice_input
=
static_cast
<
const
half
*>
(
inputs
[
0
]);
const
int32_t
*
cu_seqlens
=
static_cast
<
const
int32_t
*>
(
inputs
[
1
]);
half
*
output
=
static_cast
<
half
*>
(
outputs
[
0
]);
SpecialSliceKernel
<<<
num_blocks
,
num_threads
,
0
,
stream
>>>
(
slice_input
,
cu_seqlens
,
output
);
SpecialSliceKernel
<<<
blocks
,
num_threads
,
0
,
stream
>>>
(
slice_input
,
cu_seqlens
,
output
);
return
cudaGetLastError
()
!=
cudaSuccess
;
}
...
...
python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_concat.py
浏览文件 @
ac744db1
...
...
@@ -71,7 +71,7 @@ class TrtConvertConcatTest(TrtLayerAutoScanTest):
def
generate_weight1
(
attrs
:
List
[
Dict
[
str
,
Any
]]):
return
np
.
zeros
([
1
]).
astype
(
np
.
int32
)
for
dims
in
[
1
,
2
,
3
,
4
]:
for
dims
in
[
2
,
3
,
4
]:
for
num_input
in
[
0
,
1
]:
for
batch
in
[
1
,
2
,
4
]:
for
axis
in
[
-
1
,
0
,
1
,
2
,
3
]:
...
...
@@ -277,12 +277,9 @@ class TrtConvertConcatTest(TrtLayerAutoScanTest):
def
generate_trt_nodes_num
(
attrs
,
dynamic_shape
):
if
dynamic_shape
==
True
:
if
attrs
[
0
][
'axis'
]
>=
0
:
return
1
,
4
else
:
return
0
,
5
else
:
if
attrs
[
0
][
'axis'
]
>
0
:
if
attrs
[
0
][
'axis'
]
!=
0
:
return
1
,
4
else
:
return
0
,
5
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录