Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
13288621
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
13288621
编写于
8月 26, 2020
作者:
Z
zlsh80826
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
change preprocessing mask position
上级
2ca3fe5d
变更
3
隐藏空白更改
内联
并排
Showing
3 changed file
with
60 addition
and
27 deletion
+60
-27
paddle/fluid/inference/tensorrt/convert/emb_eltwise_layernorm.cc
...fluid/inference/tensorrt/convert/emb_eltwise_layernorm.cc
+26
-1
paddle/fluid/inference/tensorrt/convert/multihead_matmul_op.cc
...e/fluid/inference/tensorrt/convert/multihead_matmul_op.cc
+24
-22
paddle/fluid/inference/tensorrt/convert/slice_op.cc
paddle/fluid/inference/tensorrt/convert/slice_op.cc
+10
-4
未找到文件。
paddle/fluid/inference/tensorrt/convert/emb_eltwise_layernorm.cc
浏览文件 @
13288621
...
...
@@ -11,6 +11,7 @@ limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
#include "paddle/fluid/inference/tensorrt/helper.h"
#include "paddle/fluid/inference/tensorrt/plugin/cast_int_plugin.h"
#include "paddle/fluid/inference/tensorrt/plugin/emb_eltwise_layernorm_plugin.h"
namespace
paddle
{
...
...
@@ -80,11 +81,35 @@ class EmbEltwiseLayerNormOpConverter : public OpConverter {
nvinfer1
::
ILayer
*
layer
=
nullptr
;
if
(
engine_
->
with_dynamic_shape
())
{
auto
pos_tensor
=
engine_
->
GetITensor
(
"eval_placeholder_2"
);
plugin
::
CastIntPluginDynamic
*
cast_plugin
=
new
plugin
::
CastIntPluginDynamic
();
auto
cast_layer
=
engine_
->
AddPluginV2
(
&
pos_tensor
,
1
,
cast_plugin
);
auto
casted_pos_tensor
=
cast_layer
->
getOutput
(
0
);
auto
reshape_layer
=
TRT_ENGINE_ADD_LAYER
(
engine_
,
Shuffle
,
*
casted_pos_tensor
);
nvinfer1
::
Dims2
reshape_dim
(
0
,
0
);
nvinfer1
::
Permutation
perm
{
1
,
0
,
2
};
reshape_layer
->
setFirstTranspose
(
perm
);
reshape_layer
->
setReshapeDimensions
(
reshape_dim
);
auto
imask_layer
=
TRT_ENGINE_ADD_LAYER
(
engine_
,
Reduce
,
*
reshape_layer
->
getOutput
(
0
),
nvinfer1
::
ReduceOperation
::
kMAX
,
1
,
false
);
engine_
->
SetITensor
(
"imask_tensor"
,
imask_layer
->
getOutput
(
0
));
plugin
::
DynamicPluginTensorRT
*
plugin
=
nullptr
;
plugin
=
new
plugin
::
EmbEltwiseLayernormPluginDynamic
<
float
>
(
input_embs
,
bias
,
scale
,
emb_sizes
,
bias_size
,
scale_size
,
hidden
,
eps
);
layer
=
engine_
->
AddPluginV2
(
input_ids
.
data
(),
input_num
,
plugin
);
auto
plugin_layer
=
engine_
->
AddPluginV2
(
input_ids
.
data
(),
input_num
,
plugin
);
nvinfer1
::
Permutation
permutation
{
1
,
0
,
2
,
3
,
4
};
auto
trans_layer
=
TRT_ENGINE_ADD_LAYER
(
engine_
,
Shuffle
,
*
plugin_layer
->
getOutput
(
0
));
trans_layer
->
setFirstTranspose
(
permutation
);
layer
=
trans_layer
;
}
else
{
PADDLE_THROW
(
platform
::
errors
::
Fatal
(
"You are running the Ernie(Bert) model in static"
...
...
paddle/fluid/inference/tensorrt/convert/multihead_matmul_op.cc
浏览文件 @
13288621
...
...
@@ -13,7 +13,6 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/inference/tensorrt/convert/op_converter.h"
#include "paddle/fluid/inference/tensorrt/plugin/cast_int_plugin.h"
#include "paddle/fluid/inference/tensorrt/plugin/qkv_to_context_plugin.h"
namespace
paddle
{
...
...
@@ -114,29 +113,32 @@ class MultiheadMatMulOpConverter : public OpConverter {
static_cast
<
void
*>
(
bias_data
),
static_cast
<
int32_t
>
(
bias_t
->
numel
())};
nvinfer1
::
Permutation
permutation
{
1
,
0
,
2
,
3
,
4
};
nvinfer1
::
Permutation
permutation
{
0
,
1
,
2
,
3
,
4
};
auto
trans_layer
=
TRT_ENGINE_ADD_LAYER
(
engine_
,
Shuffle
,
*
input
);
trans_layer
->
setFirstTranspose
(
permutation
);
auto
*
fc_layer
=
TRT_ENGINE_ADD_LAYER
(
engine_
,
FullyConnected
,
*
trans_layer
->
getOutput
(
0
),
n
,
weight
,
bias
);
auto
pos_tensor
=
engine_
->
GetITensor
(
"eval_placeholder_2"
);
plugin
::
CastIntPluginDynamic
*
cast_plugin
=
new
plugin
::
CastIntPluginDynamic
();
auto
cast_layer
=
engine_
->
AddPluginV2
(
&
pos_tensor
,
1
,
cast_plugin
);
auto
casted_pos_tensor
=
cast_layer
->
getOutput
(
0
);
auto
reshape_layer
=
TRT_ENGINE_ADD_LAYER
(
engine_
,
Shuffle
,
*
casted_pos_tensor
);
nvinfer1
::
Dims2
reshape_dim
(
0
,
0
);
nvinfer1
::
Permutation
perm
{
1
,
0
,
2
};
reshape_layer
->
setFirstTranspose
(
perm
);
reshape_layer
->
setReshapeDimensions
(
reshape_dim
);
auto
reduce_layer
=
TRT_ENGINE_ADD_LAYER
(
engine_
,
Reduce
,
*
reshape_layer
->
getOutput
(
0
),
nvinfer1
::
ReduceOperation
::
kMAX
,
1
,
false
);
/*
auto pos_tensor = engine_->GetITensor("eval_placeholder_2");
plugin::CastIntPluginDynamic* cast_plugin =
new plugin::CastIntPluginDynamic();
auto cast_layer = engine_->AddPluginV2(&pos_tensor, 1, cast_plugin);
auto casted_pos_tensor = cast_layer->getOutput(0);
auto reshape_layer =
TRT_ENGINE_ADD_LAYER(engine_, Shuffle, *casted_pos_tensor);
nvinfer1::Dims2 reshape_dim(0, 0);
nvinfer1::Permutation perm{1, 0, 2};
reshape_layer->setFirstTranspose(perm);
reshape_layer->setReshapeDimensions(reshape_dim);
auto reduce_layer =
TRT_ENGINE_ADD_LAYER(engine_, Reduce,
*reshape_layer->getOutput(0),
nvinfer1::ReduceOperation::kMAX, 1, false);
*/
auto
imask_tensor
=
engine_
->
GetITensor
(
"imask_tensor"
);
auto
creator
=
GetPluginRegistry
()
->
getPluginCreator
(
"CustomQKVToContextPluginDynamic"
,
"1"
);
...
...
@@ -149,8 +151,7 @@ class MultiheadMatMulOpConverter : public OpConverter {
{
"type_id"
,
&
type
,
nvinfer1
::
PluginFieldType
::
kINT32
,
1
},
{
"hidden_size"
,
&
hidden
,
nvinfer1
::
PluginFieldType
::
kINT32
,
1
},
{
"num_heads"
,
&
head_number
,
nvinfer1
::
PluginFieldType
::
kINT32
,
1
},
{
"has_mask"
,
&
has_mask
,
nvinfer1
::
PluginFieldType
::
kINT32
,
1
},
// no bool type
{
"has_mask"
,
&
has_mask
,
nvinfer1
::
PluginFieldType
::
kINT32
,
1
},
};
nvinfer1
::
PluginFieldCollection
*
pluginPtr
=
static_cast
<
nvinfer1
::
PluginFieldCollection
*>
(
...
...
@@ -164,7 +165,8 @@ class MultiheadMatMulOpConverter : public OpConverter {
creator
->
createPlugin
(
"CustomQKVToContextPluginDynamic"
,
pluginPtr
);
std
::
vector
<
nvinfer1
::
ITensor
*>
plugin_inputs
;
plugin_inputs
.
push_back
(
fc_layer
->
getOutput
(
0
));
plugin_inputs
.
push_back
(
reduce_layer
->
getOutput
(
0
));
// plugin_inputs.push_back(reduce_layer->getOutput(0));
plugin_inputs
.
push_back
(
imask_tensor
);
auto
plugin_layer
=
engine_
->
network
()
->
addPluginV2
(
plugin_inputs
.
data
(),
plugin_inputs
.
size
(),
*
pluginObj
);
assert
(
plugin_layer
!=
nullptr
);
...
...
paddle/fluid/inference/tensorrt/convert/slice_op.cc
浏览文件 @
13288621
...
...
@@ -23,9 +23,8 @@ class SliceOpConverter : public OpConverter {
public:
void
operator
()(
const
framework
::
proto
::
OpDesc
&
op
,
const
framework
::
Scope
&
scope
,
bool
test_mode
)
override
{
// This OP is implemented by trt dynamic shpae plugin.
// Dynamic shape plugin requires TRT version greater than 6.0.
std
::
cerr
<<
"slice op converter
\n
"
<<
std
::
endl
;
// This OP is implemented by trt dynamic shpae plugin.
// Dynamic shape plugin requires TRT version greater than 6.0.
#if IS_TRT_VERSION_GE(6000)
VLOG
(
4
)
<<
"convert slice op to tensorrt layer"
;
framework
::
OpDesc
op_desc
(
op
,
nullptr
);
...
...
@@ -41,10 +40,17 @@ class SliceOpConverter : public OpConverter {
nvinfer1
::
ILayer
*
layer
=
nullptr
;
if
(
engine_
->
with_dynamic_shape
())
{
nvinfer1
::
Permutation
permutation
{
1
,
0
,
2
,
3
,
4
};
auto
trans_layer
=
TRT_ENGINE_ADD_LAYER
(
engine_
,
Shuffle
,
*
input
);
trans_layer
->
setFirstTranspose
(
permutation
);
std
::
vector
<
nvinfer1
::
ITensor
*>
plugin_inputs
;
plugin_inputs
.
emplace_back
(
trans_layer
->
getOutput
(
0
));
bool
ban_fp16
=
engine_
->
disable_trt_plugin_fp16
();
plugin
::
SlicePluginDynamic
*
plugin
=
new
plugin
::
SlicePluginDynamic
(
starts
,
ends
,
axes
,
ban_fp16
);
layer
=
engine_
->
AddPluginV2
(
&
input
,
1
,
plugin
);
layer
=
engine_
->
AddPluginV2
(
plugin_inputs
.
data
(),
plugin_inputs
.
size
(),
plugin
);
}
else
{
PADDLE_THROW
(
platform
::
errors
::
Fatal
(
"You are running the Ernie(Bert) model in static"
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录