Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
20e8bf1f
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
20e8bf1f
编写于
4月 25, 2022
作者:
B
baoachun
提交者:
GitHub
4月 25, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix FlattenContiguousRangeOpConverter out dim error (#42087) (#42184)
* fix FlattenContiguousRangeOpConverter out dim error * update code
上级
8c3c6dae
变更
1
隐藏空白更改
内联
并排
Showing
1 changed file
with
92 addition
and
58 deletion
+92
-58
paddle/fluid/inference/tensorrt/convert/flatten_contiguous_range_op.cc
...inference/tensorrt/convert/flatten_contiguous_range_op.cc
+92
-58
未找到文件。
paddle/fluid/inference/tensorrt/convert/flatten_contiguous_range_op.cc
浏览文件 @
20e8bf1f
...
...
@@ -30,14 +30,17 @@ class FlattenContiguousRangeOpConverter : public OpConverter {
public:
void
operator
()(
const
framework
::
proto
::
OpDesc
&
op
,
const
framework
::
Scope
&
scope
,
bool
test_mode
)
override
{
VLOG
(
3
)
<<
"convert a fluid flatten_contiguous_range op to tensorrt layer"
;
framework
::
OpDesc
op_desc
(
op
,
nullptr
);
// Declare inputs
auto
*
input
=
engine_
->
GetITensor
(
op_desc
.
Input
(
"X"
)[
0
]);
int
dims
=
input
->
getDimensions
().
nbDims
;
const
auto
input_dim
=
input
->
getDimensions
();
const
int
dims
=
input_dim
.
nbDims
;
int
start_axis
=
BOOST_GET_CONST
(
int
,
op_desc
.
GetAttr
(
"start_axis"
));
int
stop_axis
=
BOOST_GET_CONST
(
int
,
op_desc
.
GetAttr
(
"stop_axis"
));
nvinfer1
::
IShuffleLayer
*
layer
=
nullptr
;
nvinfer1
::
IShuffleLayer
*
layer
=
TRT_ENGINE_ADD_LAYER
(
engine_
,
Shuffle
,
*
input
);
if
(
!
engine_
->
with_dynamic_shape
())
{
if
(
start_axis
<
0
)
start_axis
+=
dims
+
1
;
if
(
stop_axis
<
0
)
stop_axis
+=
dims
+
1
;
...
...
@@ -46,7 +49,7 @@ class FlattenContiguousRangeOpConverter : public OpConverter {
flatten_dim
.
nbDims
=
dims
-
(
stop_axis
-
start_axis
);
for
(
int
i
=
0
,
j
=
0
;
i
<
dims
;
++
i
)
{
if
(
start_axis
<=
i
+
1
&&
i
+
1
<=
stop_axis
)
{
int
dim_i
=
input
->
getDimensions
()
.
d
[
i
];
int
dim_i
=
input
_dim
.
d
[
i
];
PADDLE_ENFORCE_GT
(
dim_i
,
0
,
platform
::
errors
::
InvalidArgument
(
"flatten_contiguous_range input dim "
"should be > 0, but got %d."
,
...
...
@@ -56,72 +59,103 @@ class FlattenContiguousRangeOpConverter : public OpConverter {
flatten_dim
.
d
[
j
++
]
=
dim_prod
;
}
}
else
{
flatten_dim
.
d
[
j
++
]
=
input
->
getDimensions
()
.
d
[
i
];
flatten_dim
.
d
[
j
++
]
=
input
_dim
.
d
[
i
];
}
}
layer
=
TRT_ENGINE_ADD_LAYER
(
engine_
,
Shuffle
,
*
input
);
layer
->
setReshapeDimensions
(
flatten_dim
);
}
else
{
if
(
start_axis
<
0
)
start_axis
+=
dims
;
if
(
stop_axis
<
0
)
stop_axis
+=
dims
;
auto
*
shape_layer
=
TRT_ENGINE_ADD_LAYER
(
engine_
,
Shape
,
*
input
);
auto
*
shape_layer_itensor
=
shape_layer
->
getOutput
(
0
);
nvinfer1
::
Dims
start_dim
,
size_dim
,
stride_dim
;
start_dim
.
nbDims
=
1
;
size_dim
.
nbDims
=
1
;
stride_dim
.
nbDims
=
1
;
start_dim
.
d
[
0
]
=
start_axis
;
size_dim
.
d
[
0
]
=
stop_axis
-
start_axis
+
1
;
stride_dim
.
d
[
0
]
=
1
;
auto
*
slice_layer
=
TRT_ENGINE_ADD_LAYER
(
engine_
,
Slice
,
*
shape_layer_itensor
,
start_dim
,
size_dim
,
stride_dim
);
uint32_t
reduce_dim
=
1
;
auto
*
reduce_prod_layer
=
TRT_ENGINE_ADD_LAYER
(
engine_
,
Reduce
,
*
(
slice_layer
->
getOutput
(
0
)),
nvinfer1
::
ReduceOperation
::
kPROD
,
reduce_dim
,
true
);
nvinfer1
::
ITensor
*
input_shape
=
nullptr
;
if
(
start_axis
==
0
&&
stop_axis
==
dims
-
1
)
{
input_shape
=
reduce_prod_layer
->
getOutput
(
0
);
}
else
{
std
::
vector
<
nvinfer1
::
ITensor
*>
itensors
;
if
(
start_axis
>
0
)
{
nvinfer1
::
Dims
left_start_dim
,
left_size_dim
,
left_stride_dim
;
left_start_dim
.
nbDims
=
1
;
left_size_dim
.
nbDims
=
1
;
left_stride_dim
.
nbDims
=
1
;
left_start_dim
.
d
[
0
]
=
0
;
left_size_dim
.
d
[
0
]
=
start_axis
;
left_stride_dim
.
d
[
0
]
=
1
;
auto
*
slice_layer_left
=
TRT_ENGINE_ADD_LAYER
(
engine_
,
Slice
,
*
shape_layer_itensor
,
left_start_dim
,
left_size_dim
,
left_stride_dim
);
itensors
.
push_back
(
slice_layer_left
->
getOutput
(
0
));
int
dim_prod
=
1
;
int
dim_negative
=
0
;
nvinfer1
::
Dims
flatten_dim
;
flatten_dim
.
nbDims
=
dims
-
(
stop_axis
-
start_axis
);
bool
need_slice
=
false
;
for
(
int
i
=
0
,
j
=
0
;
i
<
dims
;
++
i
)
{
int
dim_i
=
input_dim
.
d
[
i
];
if
(
start_axis
<=
i
&&
i
<=
stop_axis
)
{
if
(
dim_i
<
0
)
{
need_slice
=
true
;
break
;
}
dim_prod
*=
dim_i
;
if
(
i
==
stop_axis
)
{
flatten_dim
.
d
[
j
++
]
=
dim_prod
;
}
}
else
{
if
(
dim_i
<
0
)
dim_negative
++
;
if
(
dim_negative
>
1
)
{
need_slice
=
true
;
break
;
}
flatten_dim
.
d
[
j
++
]
=
input_dim
.
d
[
i
];
}
itensors
.
push_back
(
reduce_prod_layer
->
getOutput
(
0
));
if
(
stop_axis
<
dims
-
1
)
{
nvinfer1
::
Dims
right_start_dim
,
right_size_dim
,
right_stride_dim
;
right_start_dim
.
nbDims
=
1
;
right_size_dim
.
nbDims
=
1
;
right_stride_dim
.
nbDims
=
1
;
right_start_dim
.
d
[
0
]
=
stop_axis
+
1
;
right_size_dim
.
d
[
0
]
=
dims
-
stop_axis
-
1
;
right_stride_dim
.
d
[
0
]
=
1
;
auto
*
slice_layer_right
=
TRT_ENGINE_ADD_LAYER
(
engine_
,
Slice
,
*
shape_layer_itensor
,
right_start_dim
,
right_size_dim
,
right_stride_dim
);
itensors
.
push_back
(
slice_layer_right
->
getOutput
(
0
));
}
if
(
need_slice
)
{
VLOG
(
3
)
<<
"slice input dim when the input dimension has -1"
;
auto
*
shape_layer
=
TRT_ENGINE_ADD_LAYER
(
engine_
,
Shape
,
*
input
);
auto
*
shape_layer_itensor
=
shape_layer
->
getOutput
(
0
);
nvinfer1
::
Dims
start_dim
,
size_dim
,
stride_dim
;
start_dim
.
nbDims
=
1
;
size_dim
.
nbDims
=
1
;
stride_dim
.
nbDims
=
1
;
start_dim
.
d
[
0
]
=
start_axis
;
size_dim
.
d
[
0
]
=
stop_axis
-
start_axis
+
1
;
stride_dim
.
d
[
0
]
=
1
;
auto
*
slice_layer
=
TRT_ENGINE_ADD_LAYER
(
engine_
,
Slice
,
*
shape_layer_itensor
,
start_dim
,
size_dim
,
stride_dim
);
uint32_t
reduce_dim
=
1
;
auto
*
reduce_prod_layer
=
TRT_ENGINE_ADD_LAYER
(
engine_
,
Reduce
,
*
(
slice_layer
->
getOutput
(
0
)),
nvinfer1
::
ReduceOperation
::
kPROD
,
reduce_dim
,
true
);
nvinfer1
::
ITensor
*
input_shape
=
nullptr
;
if
(
start_axis
==
0
&&
stop_axis
==
dims
-
1
)
{
input_shape
=
reduce_prod_layer
->
getOutput
(
0
);
}
else
{
std
::
vector
<
nvinfer1
::
ITensor
*>
itensors
;
if
(
start_axis
>
0
)
{
nvinfer1
::
Dims
left_start_dim
,
left_size_dim
,
left_stride_dim
;
left_start_dim
.
nbDims
=
1
;
left_size_dim
.
nbDims
=
1
;
left_stride_dim
.
nbDims
=
1
;
left_start_dim
.
d
[
0
]
=
0
;
left_size_dim
.
d
[
0
]
=
start_axis
;
left_stride_dim
.
d
[
0
]
=
1
;
auto
*
slice_layer_left
=
TRT_ENGINE_ADD_LAYER
(
engine_
,
Slice
,
*
shape_layer_itensor
,
left_start_dim
,
left_size_dim
,
left_stride_dim
);
itensors
.
push_back
(
slice_layer_left
->
getOutput
(
0
));
}
itensors
.
push_back
(
reduce_prod_layer
->
getOutput
(
0
));
if
(
stop_axis
<
dims
-
1
)
{
nvinfer1
::
Dims
right_start_dim
,
right_size_dim
,
right_stride_dim
;
right_start_dim
.
nbDims
=
1
;
right_size_dim
.
nbDims
=
1
;
right_stride_dim
.
nbDims
=
1
;
right_start_dim
.
d
[
0
]
=
stop_axis
+
1
;
right_size_dim
.
d
[
0
]
=
dims
-
stop_axis
-
1
;
right_stride_dim
.
d
[
0
]
=
1
;
auto
*
slice_layer_right
=
TRT_ENGINE_ADD_LAYER
(
engine_
,
Slice
,
*
shape_layer_itensor
,
right_start_dim
,
right_size_dim
,
right_stride_dim
);
itensors
.
push_back
(
slice_layer_right
->
getOutput
(
0
));
}
auto
*
concat_layer
=
TRT_ENGINE_ADD_LAYER
(
engine_
,
Concatenation
,
itensors
.
data
(),
itensors
.
size
());
concat_layer
->
setAxis
(
0
);
input_shape
=
concat_layer
->
getOutput
(
0
);
}
auto
*
concat_layer
=
TRT_ENGINE_ADD_LAYER
(
engine_
,
Concatenation
,
itensors
.
data
(),
itensors
.
size
());
concat_layer
->
setAxis
(
0
);
input_shape
=
concat_layer
->
getOutput
(
0
);
layer
->
setInput
(
1
,
*
input_shape
);
}
else
{
layer
->
setReshapeDimensions
(
flatten_dim
);
}
layer
=
TRT_ENGINE_ADD_LAYER
(
engine_
,
Shuffle
,
*
input
);
layer
->
setInput
(
1
,
*
input_shape
);
}
auto
output_name
=
op_desc
.
Output
(
"Out"
)[
0
];
RreplenishLayerAndOutput
(
layer
,
"flatten_contiguous_range"
,
{
output_name
},
test_mode
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录