Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
ae79a56b
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
ae79a56b
编写于
6月 24, 2021
作者:
Z
zlsh80826
提交者:
GitHub
6月 24, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[Paddle-TRT] TensorRT8 void** compatibility (#33662)
* add trt LT version helper * trt8 requires void** to be void* const*
上级
1def9e05
变更
25
隐藏空白更改
内联
并排
Showing
25 changed file
with
113 addition
and
3 deletion
+113
-3
paddle/fluid/inference/tensorrt/plugin/anchor_generator_op_plugin.cu
...d/inference/tensorrt/plugin/anchor_generator_op_plugin.cu
+4
-0
paddle/fluid/inference/tensorrt/plugin/anchor_generator_op_plugin.h
...id/inference/tensorrt/plugin/anchor_generator_op_plugin.h
+4
-0
paddle/fluid/inference/tensorrt/plugin/elementwise_op_plugin.cu
.../fluid/inference/tensorrt/plugin/elementwise_op_plugin.cu
+4
-0
paddle/fluid/inference/tensorrt/plugin/elementwise_op_plugin.h
...e/fluid/inference/tensorrt/plugin/elementwise_op_plugin.h
+4
-1
paddle/fluid/inference/tensorrt/plugin/gelu_op_plugin.cu
paddle/fluid/inference/tensorrt/plugin/gelu_op_plugin.cu
+4
-0
paddle/fluid/inference/tensorrt/plugin/gelu_op_plugin.h
paddle/fluid/inference/tensorrt/plugin/gelu_op_plugin.h
+4
-0
paddle/fluid/inference/tensorrt/plugin/hard_swish_op_plugin.cu
...e/fluid/inference/tensorrt/plugin/hard_swish_op_plugin.cu
+4
-0
paddle/fluid/inference/tensorrt/plugin/hard_swish_op_plugin.h
...le/fluid/inference/tensorrt/plugin/hard_swish_op_plugin.h
+4
-0
paddle/fluid/inference/tensorrt/plugin/instance_norm_op_plugin.cu
...luid/inference/tensorrt/plugin/instance_norm_op_plugin.cu
+4
-0
paddle/fluid/inference/tensorrt/plugin/instance_norm_op_plugin.h
...fluid/inference/tensorrt/plugin/instance_norm_op_plugin.h
+4
-0
paddle/fluid/inference/tensorrt/plugin/layer_norm_op_plugin.cu
...e/fluid/inference/tensorrt/plugin/layer_norm_op_plugin.cu
+4
-0
paddle/fluid/inference/tensorrt/plugin/layer_norm_op_plugin.h
...le/fluid/inference/tensorrt/plugin/layer_norm_op_plugin.h
+4
-0
paddle/fluid/inference/tensorrt/plugin/pool_op_plugin.cu
paddle/fluid/inference/tensorrt/plugin/pool_op_plugin.cu
+5
-0
paddle/fluid/inference/tensorrt/plugin/pool_op_plugin.h
paddle/fluid/inference/tensorrt/plugin/pool_op_plugin.h
+4
-0
paddle/fluid/inference/tensorrt/plugin/prelu_op_plugin.cu
paddle/fluid/inference/tensorrt/plugin/prelu_op_plugin.cu
+5
-0
paddle/fluid/inference/tensorrt/plugin/prelu_op_plugin.h
paddle/fluid/inference/tensorrt/plugin/prelu_op_plugin.h
+4
-0
paddle/fluid/inference/tensorrt/plugin/slice_op_plugin.cu
paddle/fluid/inference/tensorrt/plugin/slice_op_plugin.cu
+5
-0
paddle/fluid/inference/tensorrt/plugin/slice_op_plugin.h
paddle/fluid/inference/tensorrt/plugin/slice_op_plugin.h
+4
-0
paddle/fluid/inference/tensorrt/plugin/split_op_plugin.cu
paddle/fluid/inference/tensorrt/plugin/split_op_plugin.cu
+5
-0
paddle/fluid/inference/tensorrt/plugin/split_op_plugin.h
paddle/fluid/inference/tensorrt/plugin/split_op_plugin.h
+4
-0
paddle/fluid/inference/tensorrt/plugin/swish_op_plugin.cu
paddle/fluid/inference/tensorrt/plugin/swish_op_plugin.cu
+5
-0
paddle/fluid/inference/tensorrt/plugin/swish_op_plugin.h
paddle/fluid/inference/tensorrt/plugin/swish_op_plugin.h
+4
-0
paddle/fluid/inference/tensorrt/plugin/trt_plugin.h
paddle/fluid/inference/tensorrt/plugin/trt_plugin.h
+12
-2
paddle/fluid/inference/tensorrt/plugin/yolo_box_op_plugin.cu
paddle/fluid/inference/tensorrt/plugin/yolo_box_op_plugin.cu
+4
-0
paddle/fluid/inference/tensorrt/plugin/yolo_box_op_plugin.h
paddle/fluid/inference/tensorrt/plugin/yolo_box_op_plugin.h
+4
-0
未找到文件。
paddle/fluid/inference/tensorrt/plugin/anchor_generator_op_plugin.cu
浏览文件 @
ae79a56b
...
...
@@ -166,7 +166,11 @@ int AnchorGeneratorPlugin::enqueue_impl(int batch_size,
}
int
AnchorGeneratorPlugin
::
enqueue
(
int
batch_size
,
const
void
*
const
*
inputs
,
#if IS_TRT_VERSION_LT(8000)
void
**
outputs
,
void
*
workspace
,
#else
void
*
const
*
outputs
,
void
*
workspace
,
#endif
cudaStream_t
stream
)
{
return
enqueue_impl
<
float
>
(
batch_size
,
inputs
,
outputs
,
workspace
,
stream
);
}
...
...
paddle/fluid/inference/tensorrt/plugin/anchor_generator_op_plugin.h
浏览文件 @
ae79a56b
...
...
@@ -42,7 +42,11 @@ class AnchorGeneratorPlugin : public nvinfer1::IPluginV2Ext {
bool
supportsFormat
(
nvinfer1
::
DataType
type
,
nvinfer1
::
TensorFormat
format
)
const
override
;
size_t
getWorkspaceSize
(
int
max_batch_size
)
const
override
;
#if IS_TRT_VERSION_LT(8000)
int
enqueue
(
int
batch_size
,
const
void
*
const
*
inputs
,
void
**
outputs
,
#else
int
enqueue
(
int
batch_size
,
const
void
*
const
*
inputs
,
void
*
const
*
outputs
,
#endif
void
*
workspace
,
cudaStream_t
stream
)
override
;
int
initialize
()
override
;
void
terminate
()
override
;
...
...
paddle/fluid/inference/tensorrt/plugin/elementwise_op_plugin.cu
浏览文件 @
ae79a56b
...
...
@@ -122,7 +122,11 @@ int ElementWisePlugin::initialize() {
}
int
ElementWisePlugin
::
enqueue
(
int
batch_size
,
const
void
*
const
*
inputs
,
#if IS_TRT_VERSION_LT(8000)
void
**
outputs
,
void
*
workspace
,
#else
void
*
const
*
outputs
,
void
*
workspace
,
#endif
cudaStream_t
stream
)
{
const
float
*
x
=
reinterpret_cast
<
const
float
*>
(
inputs
[
0
]);
const
float
*
y
=
reinterpret_cast
<
const
float
*>
(
inputs
[
1
]);
...
...
paddle/fluid/inference/tensorrt/plugin/elementwise_op_plugin.h
浏览文件 @
ae79a56b
...
...
@@ -58,8 +58,11 @@ class ElementWisePlugin : public PluginTensorRT {
int
initialize
()
override
;
// execute the layer
#if IS_TRT_VERSION_LT(8000)
int
enqueue
(
int
batch_size
,
const
void
*
const
*
inputs
,
void
**
outputs
,
#else
int
enqueue
(
int
batch_size
,
const
void
*
const
*
inputs
,
void
*
const
*
outputs
,
#endif
void
*
workspace
,
cudaStream_t
stream
);
protected:
...
...
paddle/fluid/inference/tensorrt/plugin/gelu_op_plugin.cu
浏览文件 @
ae79a56b
...
...
@@ -100,7 +100,11 @@ __global__ void no_exact_gelu_kernel(const T a, const T b, const T c, int n,
}
int
GeluPlugin
::
enqueue
(
int
batch_size
,
const
void
*
const
*
inputs
,
#if IS_TRT_VERSION_LT(8000)
void
**
outputs
,
void
*
,
cudaStream_t
stream
)
{
#else
void
*
const
*
outputs
,
void
*
,
cudaStream_t
stream
)
{
#endif
const
auto
&
input_dims
=
this
->
getInputDims
(
0
);
int
num
=
batch_size
;
for
(
int
i
=
0
;
i
<
input_dims
.
nbDims
;
i
++
)
{
...
...
paddle/fluid/inference/tensorrt/plugin/gelu_op_plugin.h
浏览文件 @
ae79a56b
...
...
@@ -44,7 +44,11 @@ class GeluPlugin : public PluginTensorRT {
nvinfer1
::
PluginFormat
format
)
const
override
;
nvinfer1
::
Dims
getOutputDimensions
(
int
index
,
const
nvinfer1
::
Dims
*
inputs
,
int
nb_input_dims
)
override
;
#if IS_TRT_VERSION_LT(8000)
int
enqueue
(
int
batch_size
,
const
void
*
const
*
inputs
,
void
**
outputs
,
#else
int
enqueue
(
int
batch_size
,
const
void
*
const
*
inputs
,
void
*
const
*
outputs
,
#endif
void
*
workspace
,
cudaStream_t
stream
)
override
;
protected:
...
...
paddle/fluid/inference/tensorrt/plugin/hard_swish_op_plugin.cu
浏览文件 @
ae79a56b
...
...
@@ -59,7 +59,11 @@ __global__ void hard_swish_kernel(float threshold, float scale, float offset,
}
int
HardSwishPlugin
::
enqueue
(
int
batch_size
,
const
void
*
const
*
inputs
,
#if IS_TRT_VERSION_LT(8000)
void
**
outputs
,
void
*
,
cudaStream_t
stream
)
{
#else
void
*
const
*
outputs
,
void
*
,
cudaStream_t
stream
)
{
#endif
const
auto
&
input_dims
=
this
->
getInputDims
(
0
);
int
num
=
batch_size
;
for
(
int
i
=
0
;
i
<
input_dims
.
nbDims
;
i
++
)
{
...
...
paddle/fluid/inference/tensorrt/plugin/hard_swish_op_plugin.h
浏览文件 @
ae79a56b
...
...
@@ -49,7 +49,11 @@ class HardSwishPlugin : public PluginTensorRT {
int
initialize
()
override
{
return
0
;
}
nvinfer1
::
Dims
getOutputDimensions
(
int
index
,
const
nvinfer1
::
Dims
*
inputs
,
int
nbInputDims
)
override
;
#if IS_TRT_VERSION_LT(8000)
int
enqueue
(
int
batchSize
,
const
void
*
const
*
inputs
,
void
**
outputs
,
#else
int
enqueue
(
int
batchSize
,
const
void
*
const
*
inputs
,
void
*
const
*
outputs
,
#endif
void
*
workspace
,
cudaStream_t
stream
)
override
;
protected:
...
...
paddle/fluid/inference/tensorrt/plugin/instance_norm_op_plugin.cu
浏览文件 @
ae79a56b
...
...
@@ -59,7 +59,11 @@ nvinfer1::Dims InstanceNormPlugin::getOutputDimensions(
}
int
InstanceNormPlugin
::
enqueue
(
int
batch_size
,
const
void
*
const
*
inputs
,
#if IS_TRT_VERSION_LT(8000)
void
**
outputs
,
void
*
workspace
,
#else
void
*
const
*
outputs
,
void
*
workspace
,
#endif
cudaStream_t
stream
)
{
const
auto
&
input_dims
=
this
->
getInputDims
(
0
);
...
...
paddle/fluid/inference/tensorrt/plugin/instance_norm_op_plugin.h
浏览文件 @
ae79a56b
...
...
@@ -101,7 +101,11 @@ class InstanceNormPlugin : public PluginTensorRT {
int
getNbOutputs
()
const
override
{
return
1
;
}
nvinfer1
::
Dims
getOutputDimensions
(
int
index
,
const
nvinfer1
::
Dims
*
inputs
,
int
nbInputDims
)
override
;
#if IS_TRT_VERSION_LT(8000)
int
enqueue
(
int
batchSize
,
const
void
*
const
*
inputs
,
void
**
outputs
,
#else
int
enqueue
(
int
batchSize
,
const
void
*
const
*
inputs
,
void
*
const
*
outputs
,
#endif
void
*
workspace
,
cudaStream_t
stream
)
override
;
bool
supportsFormat
(
nvinfer1
::
DataType
type
,
...
...
paddle/fluid/inference/tensorrt/plugin/layer_norm_op_plugin.cu
浏览文件 @
ae79a56b
...
...
@@ -43,7 +43,11 @@ nvinfer1::Dims LayerNormPlugin::getOutputDimensions(
}
int
LayerNormPlugin
::
enqueue
(
int
batch_size
,
const
void
*
const
*
inputs
,
#if IS_TRT_VERSION_LT(8000)
void
**
outputs
,
void
*
workspace
,
#else
void
*
const
*
outputs
,
void
*
workspace
,
#endif
cudaStream_t
stream
)
{
const
auto
&
input_dims
=
this
->
getInputDims
(
0
);
const
float
*
input
=
reinterpret_cast
<
const
float
*>
(
inputs
[
0
]);
...
...
paddle/fluid/inference/tensorrt/plugin/layer_norm_op_plugin.h
浏览文件 @
ae79a56b
...
...
@@ -100,7 +100,11 @@ class LayerNormPlugin : public PluginTensorRT {
int
getNbOutputs
()
const
override
{
return
1
;
}
nvinfer1
::
Dims
getOutputDimensions
(
int
index
,
const
nvinfer1
::
Dims
*
inputs
,
int
nbInputDims
)
override
;
#if IS_TRT_VERSION_LT(8000)
int
enqueue
(
int
batchSize
,
const
void
*
const
*
inputs
,
void
**
outputs
,
#else
int
enqueue
(
int
batchSize
,
const
void
*
const
*
inputs
,
void
*
const
*
outputs
,
#endif
void
*
workspace
,
cudaStream_t
stream
)
override
;
};
...
...
paddle/fluid/inference/tensorrt/plugin/pool_op_plugin.cu
浏览文件 @
ae79a56b
...
...
@@ -42,7 +42,12 @@ nvinfer1::Dims PoolPlugin::getOutputDimensions(int index,
}
int
PoolPlugin
::
enqueue
(
int
batchSize
,
const
void
*
const
*
inputs
,
#if IS_TRT_VERSION_LT(8000)
void
**
outputs
,
void
*
workspace
,
cudaStream_t
stream
)
{
#else
void
*
const
*
outputs
,
void
*
workspace
,
cudaStream_t
stream
)
{
#endif
auto
const
&
input_dims
=
this
->
getInputDims
(
0
);
int
input_size
=
0
;
float
const
*
idata
=
reinterpret_cast
<
float
const
*>
(
inputs
[
0
]);
...
...
paddle/fluid/inference/tensorrt/plugin/pool_op_plugin.h
浏览文件 @
ae79a56b
...
...
@@ -128,7 +128,11 @@ class PoolPlugin : public PluginTensorRT {
nvinfer1
::
Dims
getOutputDimensions
(
int
index
,
const
nvinfer1
::
Dims
*
inputs
,
int
nbInputDims
)
override
;
int
initialize
()
override
{
return
0
;
}
#if IS_TRT_VERSION_LT(8000)
int
enqueue
(
int
batchSize
,
const
void
*
const
*
inputs
,
void
**
outputs
,
#else
int
enqueue
(
int
batchSize
,
const
void
*
const
*
inputs
,
void
*
const
*
outputs
,
#endif
void
*
workspace
,
cudaStream_t
stream
)
override
;
private:
...
...
paddle/fluid/inference/tensorrt/plugin/prelu_op_plugin.cu
浏览文件 @
ae79a56b
...
...
@@ -57,7 +57,12 @@ nvinfer1::Dims PReluPlugin::getOutputDimensions(int index,
}
int
PReluPlugin
::
enqueue
(
int
batch_size
,
const
void
*
const
*
inputs
,
#if IS_TRT_VERSION_LT(8000)
void
**
outputs
,
void
*
workspace
,
cudaStream_t
stream
)
{
#else
void
*
const
*
outputs
,
void
*
workspace
,
cudaStream_t
stream
)
{
#endif
// input dims is CHW.
const
auto
&
input_dims
=
this
->
getInputDims
(
0
);
const
float
*
input
=
reinterpret_cast
<
const
float
*>
(
inputs
[
0
]);
...
...
paddle/fluid/inference/tensorrt/plugin/prelu_op_plugin.h
浏览文件 @
ae79a56b
...
...
@@ -80,7 +80,11 @@ class PReluPlugin : public PluginTensorRT {
int
getNbOutputs
()
const
override
{
return
1
;
}
nvinfer1
::
Dims
getOutputDimensions
(
int
index
,
const
nvinfer1
::
Dims
*
inputs
,
int
nbInputDims
)
override
;
#if IS_TRT_VERSION_LT(8000)
int
enqueue
(
int
batchSize
,
const
void
*
const
*
inputs
,
void
**
outputs
,
#else
int
enqueue
(
int
batchSize
,
const
void
*
const
*
inputs
,
void
*
const
*
outputs
,
#endif
void
*
workspace
,
cudaStream_t
stream
)
override
;
};
...
...
paddle/fluid/inference/tensorrt/plugin/slice_op_plugin.cu
浏览文件 @
ae79a56b
...
...
@@ -111,7 +111,12 @@ nvinfer1::Dims SlicePlugin::getOutputDimensions(int index,
}
int
SlicePlugin
::
enqueue
(
int
batch_size
,
const
void
*
const
*
inputs
,
#if IS_TRT_VERSION_LT(8000)
void
**
outputs
,
void
*
workspace
,
cudaStream_t
stream
)
{
#else
void
*
const
*
outputs
,
void
*
workspace
,
cudaStream_t
stream
)
{
#endif
auto
input_dims
=
getInputDims
(
0
);
// notice input dims is [C, H, W], add input batch dim here
...
...
paddle/fluid/inference/tensorrt/plugin/slice_op_plugin.h
浏览文件 @
ae79a56b
...
...
@@ -44,7 +44,11 @@ class SlicePlugin : public PluginTensorRT {
nvinfer1
::
PluginFormat
format
)
const
override
;
nvinfer1
::
Dims
getOutputDimensions
(
int
index
,
const
nvinfer1
::
Dims
*
inputs
,
int
nb_input_dims
)
override
;
#if IS_TRT_VERSION_LT(8000)
int
enqueue
(
int
batch_size
,
const
void
*
const
*
inputs
,
void
**
outputs
,
#else
int
enqueue
(
int
batch_size
,
const
void
*
const
*
inputs
,
void
*
const
*
outputs
,
#endif
void
*
workspace
,
cudaStream_t
stream
)
override
;
protected:
...
...
paddle/fluid/inference/tensorrt/plugin/split_op_plugin.cu
浏览文件 @
ae79a56b
...
...
@@ -126,7 +126,12 @@ __global__ void split_kernel(int nsegment,
}
int
SplitPlugin
::
enqueue
(
int
batchSize
,
const
void
*
const
*
inputs
,
#if IS_TRT_VERSION_LT(8000)
void
**
outputs
,
void
*
workspace
,
cudaStream_t
stream
)
{
#else
void
*
const
*
outputs
,
void
*
workspace
,
cudaStream_t
stream
)
{
#endif
const
int
*
d_segment_offsets_ptr
=
thrust
::
raw_pointer_cast
(
&
d_segment_offsets_
[
0
]);
float
const
*
input_ptr
=
reinterpret_cast
<
float
const
*>
(
inputs
[
0
]);
...
...
paddle/fluid/inference/tensorrt/plugin/split_op_plugin.h
浏览文件 @
ae79a56b
...
...
@@ -60,7 +60,11 @@ class SplitPlugin : public PluginTensorRTV2Ext {
int
initialize
()
override
;
void
terminate
()
override
;
#if IS_TRT_VERSION_LT(8000)
int
enqueue
(
int
batch_size
,
const
void
*
const
*
inputs
,
void
**
outputs
,
#else
int
enqueue
(
int
batch_size
,
const
void
*
const
*
inputs
,
void
*
const
*
outputs
,
#endif
void
*
workspace
,
cudaStream_t
stream
)
override
;
void
destroy
()
override
{
delete
this
;
}
...
...
paddle/fluid/inference/tensorrt/plugin/swish_op_plugin.cu
浏览文件 @
ae79a56b
...
...
@@ -85,7 +85,12 @@ __global__ void swish_kernel<half>(int num, const half *input, half *output,
}
int
SwishPlugin
::
enqueue
(
int
batch_size
,
const
void
*
const
*
inputs
,
#if IS_TRT_VERSION_LT(8000)
void
**
outputs
,
void
*
workspace
,
cudaStream_t
stream
)
{
#else
void
*
const
*
outputs
,
void
*
workspace
,
cudaStream_t
stream
)
{
#endif
// input dims is CHW.
const
auto
&
input_dims
=
this
->
getInputDims
(
0
);
const
float
*
input
=
reinterpret_cast
<
const
float
*>
(
inputs
[
0
]);
...
...
paddle/fluid/inference/tensorrt/plugin/swish_op_plugin.h
浏览文件 @
ae79a56b
...
...
@@ -67,7 +67,11 @@ class SwishPlugin : public PluginTensorRT {
int
getNbOutputs
()
const
override
{
return
1
;
}
nvinfer1
::
Dims
getOutputDimensions
(
int
index
,
const
nvinfer1
::
Dims
*
inputs
,
int
nbInputDims
)
override
;
#if IS_TRT_VERSION_LT(8000)
int
enqueue
(
int
batchSize
,
const
void
*
const
*
inputs
,
void
**
outputs
,
#else
int
enqueue
(
int
batchSize
,
const
void
*
const
*
inputs
,
void
*
const
*
outputs
,
#endif
void
*
workspace
,
cudaStream_t
stream
)
override
;
};
...
...
paddle/fluid/inference/tensorrt/plugin/trt_plugin.h
浏览文件 @
ae79a56b
...
...
@@ -82,8 +82,13 @@ class PluginTensorRT : public nvinfer1::IPluginExt {
int
initialize
()
override
{
return
0
;
}
// Shutdown the layer. This is called when the engine is destroyed
void
terminate
()
override
{}
// Execute the layer
// Execute the layer
#if IS_TRT_VERSION_LT(8000)
virtual
int
enqueue
(
int
batch_size
,
const
void
*
const
*
inputs
,
void
**
outputs
,
#else
virtual
int
enqueue
(
int
batch_size
,
const
void
*
const
*
inputs
,
void
*
const
*
outputs
,
#endif
void
*
workspace
,
cudaStream_t
stream
)
=
0
;
// Find the size of the serialization buffer required
...
...
@@ -188,8 +193,13 @@ class PluginTensorRTV2Ext : public nvinfer1::IPluginV2Ext {
// Find the workspace size required by the layer
size_t
getWorkspaceSize
(
int
)
const
override
{
return
0
;
}
// Execute the layer
// Execute the layer
#if IS_TRT_VERSION_LT(8000)
virtual
int
enqueue
(
int
batch_size
,
const
void
*
const
*
inputs
,
void
**
outputs
,
#else
virtual
int
enqueue
(
int
batch_size
,
const
void
*
const
*
inputs
,
void
*
const
*
outputs
,
#endif
void
*
workspace
,
cudaStream_t
stream
)
=
0
;
// Find the size of the serialization buffer required
...
...
paddle/fluid/inference/tensorrt/plugin/yolo_box_op_plugin.cu
浏览文件 @
ae79a56b
...
...
@@ -243,7 +243,11 @@ int YoloBoxPlugin::enqueue_impl(int batch_size, const void* const* inputs,
}
int
YoloBoxPlugin
::
enqueue
(
int
batch_size
,
const
void
*
const
*
inputs
,
#if IS_TRT_VERSION_LT(8000)
void
**
outputs
,
void
*
workspace
,
#else
void
*
const
*
outputs
,
void
*
workspace
,
#endif
cudaStream_t
stream
)
{
if
(
data_type_
==
nvinfer1
::
DataType
::
kFLOAT
)
{
return
enqueue_impl
<
float
>
(
batch_size
,
inputs
,
outputs
,
workspace
,
stream
);
...
...
paddle/fluid/inference/tensorrt/plugin/yolo_box_op_plugin.h
浏览文件 @
ae79a56b
...
...
@@ -43,7 +43,11 @@ class YoloBoxPlugin : public nvinfer1::IPluginV2Ext {
bool
supportsFormat
(
nvinfer1
::
DataType
type
,
nvinfer1
::
TensorFormat
format
)
const
override
;
size_t
getWorkspaceSize
(
int
max_batch_size
)
const
override
;
#if IS_TRT_VERSION_LT(8000)
int
enqueue
(
int
batch_size
,
const
void
*
const
*
inputs
,
void
**
outputs
,
#else
int
enqueue
(
int
batch_size
,
const
void
*
const
*
inputs
,
void
*
const
*
outputs
,
#endif
void
*
workspace
,
cudaStream_t
stream
)
override
;
template
<
typename
T
>
int
enqueue_impl
(
int
batch_size
,
const
void
*
const
*
inputs
,
void
**
outputs
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录