diff --git a/paddle/fluid/inference/tensorrt/plugin/anchor_generator_op_plugin.cu b/paddle/fluid/inference/tensorrt/plugin/anchor_generator_op_plugin.cu index 01ee86ceb48a9ef022ba73fe0dbdab4a52324cc6..8e9845183b3fe76f7f4cfd3e69d28c3ead8f852f 100644 --- a/paddle/fluid/inference/tensorrt/plugin/anchor_generator_op_plugin.cu +++ b/paddle/fluid/inference/tensorrt/plugin/anchor_generator_op_plugin.cu @@ -166,7 +166,11 @@ int AnchorGeneratorPlugin::enqueue_impl(int batch_size, } int AnchorGeneratorPlugin::enqueue(int batch_size, const void* const* inputs, +#if IS_TRT_VERSION_LT(8000) void** outputs, void* workspace, +#else + void* const* outputs, void* workspace, +#endif cudaStream_t stream) { return enqueue_impl(batch_size, inputs, outputs, workspace, stream); } diff --git a/paddle/fluid/inference/tensorrt/plugin/anchor_generator_op_plugin.h b/paddle/fluid/inference/tensorrt/plugin/anchor_generator_op_plugin.h index aff0b6a6802f114a25acf32627a39ca42d572d7c..458326d0679ca96df16db1287139de986f2f3cb4 100644 --- a/paddle/fluid/inference/tensorrt/plugin/anchor_generator_op_plugin.h +++ b/paddle/fluid/inference/tensorrt/plugin/anchor_generator_op_plugin.h @@ -42,7 +42,11 @@ class AnchorGeneratorPlugin : public nvinfer1::IPluginV2Ext { bool supportsFormat(nvinfer1::DataType type, nvinfer1::TensorFormat format) const override; size_t getWorkspaceSize(int max_batch_size) const override; +#if IS_TRT_VERSION_LT(8000) int enqueue(int batch_size, const void* const* inputs, void** outputs, +#else + int enqueue(int batch_size, const void* const* inputs, void* const* outputs, +#endif void* workspace, cudaStream_t stream) override; int initialize() override; void terminate() override; diff --git a/paddle/fluid/inference/tensorrt/plugin/elementwise_op_plugin.cu b/paddle/fluid/inference/tensorrt/plugin/elementwise_op_plugin.cu index cc17f8aa2481708e3e19c9925a1d83ad06203145..687e564e8a8360c3d94ed4e18761489bfe5f6245 100644 --- a/paddle/fluid/inference/tensorrt/plugin/elementwise_op_plugin.cu +++ b/paddle/fluid/inference/tensorrt/plugin/elementwise_op_plugin.cu @@ -122,7 +122,11 @@ int ElementWisePlugin::initialize() { } int ElementWisePlugin::enqueue(int batch_size, const void *const *inputs, +#if IS_TRT_VERSION_LT(8000) void **outputs, void *workspace, +#else + void *const *outputs, void *workspace, +#endif cudaStream_t stream) { const float *x = reinterpret_cast(inputs[0]); const float *y = reinterpret_cast(inputs[1]); diff --git a/paddle/fluid/inference/tensorrt/plugin/elementwise_op_plugin.h b/paddle/fluid/inference/tensorrt/plugin/elementwise_op_plugin.h index 75a1dd85f0f2c440fdd16beb95144df4127739e6..946e327e355798d61b7907ad98218f0de10fc8c9 100644 --- a/paddle/fluid/inference/tensorrt/plugin/elementwise_op_plugin.h +++ b/paddle/fluid/inference/tensorrt/plugin/elementwise_op_plugin.h @@ -58,8 +58,11 @@ class ElementWisePlugin : public PluginTensorRT { int initialize() override; - // execute the layer +#if IS_TRT_VERSION_LT(8000) int enqueue(int batch_size, const void* const* inputs, void** outputs, +#else + int enqueue(int batch_size, const void* const* inputs, void* const* outputs, +#endif void* workspace, cudaStream_t stream); protected: diff --git a/paddle/fluid/inference/tensorrt/plugin/gelu_op_plugin.cu b/paddle/fluid/inference/tensorrt/plugin/gelu_op_plugin.cu index deda2e2cc7247f404ff6d11409b665898d550ee1..3d84855bcbddb92c07f8536a40172687adaab7de 100644 --- a/paddle/fluid/inference/tensorrt/plugin/gelu_op_plugin.cu +++ b/paddle/fluid/inference/tensorrt/plugin/gelu_op_plugin.cu @@ -100,7 +100,11 @@ __global__ void no_exact_gelu_kernel(const T a, const T b, const T c, int n, } int GeluPlugin::enqueue(int batch_size, const void* const* inputs, +#if IS_TRT_VERSION_LT(8000) void** outputs, void*, cudaStream_t stream) { +#else + void* const* outputs, void*, cudaStream_t stream) { +#endif const auto& input_dims = this->getInputDims(0); int num = batch_size; for (int i = 0; i < input_dims.nbDims; i++) { diff --git a/paddle/fluid/inference/tensorrt/plugin/gelu_op_plugin.h b/paddle/fluid/inference/tensorrt/plugin/gelu_op_plugin.h index 23e507ee477e1a3b85339c7b267b290de19805ab..98c05e9792af4550811ff03588d73a0573943c05 100644 --- a/paddle/fluid/inference/tensorrt/plugin/gelu_op_plugin.h +++ b/paddle/fluid/inference/tensorrt/plugin/gelu_op_plugin.h @@ -44,7 +44,11 @@ class GeluPlugin : public PluginTensorRT { nvinfer1::PluginFormat format) const override; nvinfer1::Dims getOutputDimensions(int index, const nvinfer1::Dims* inputs, int nb_input_dims) override; +#if IS_TRT_VERSION_LT(8000) int enqueue(int batch_size, const void* const* inputs, void** outputs, +#else + int enqueue(int batch_size, const void* const* inputs, void* const* outputs, +#endif void* workspace, cudaStream_t stream) override; protected: diff --git a/paddle/fluid/inference/tensorrt/plugin/hard_swish_op_plugin.cu b/paddle/fluid/inference/tensorrt/plugin/hard_swish_op_plugin.cu index 8b2d0ac3cf70f77f1ff9ce9a6fe2ed19fdcf9576..df25b5ba927974779389bc38610189397bb70048 100644 --- a/paddle/fluid/inference/tensorrt/plugin/hard_swish_op_plugin.cu +++ b/paddle/fluid/inference/tensorrt/plugin/hard_swish_op_plugin.cu @@ -59,7 +59,11 @@ __global__ void hard_swish_kernel(float threshold, float scale, float offset, } int HardSwishPlugin::enqueue(int batch_size, const void* const* inputs, +#if IS_TRT_VERSION_LT(8000) void** outputs, void*, cudaStream_t stream) { +#else + void* const* outputs, void*, cudaStream_t stream) { +#endif const auto& input_dims = this->getInputDims(0); int num = batch_size; for (int i = 0; i < input_dims.nbDims; i++) { diff --git a/paddle/fluid/inference/tensorrt/plugin/hard_swish_op_plugin.h b/paddle/fluid/inference/tensorrt/plugin/hard_swish_op_plugin.h index 2e1e1d03baf7e1cb046f887f2d799a907f3586d4..ad1952c246a80fbef1744faa813a6d7a8893414a 100644 --- a/paddle/fluid/inference/tensorrt/plugin/hard_swish_op_plugin.h +++ b/paddle/fluid/inference/tensorrt/plugin/hard_swish_op_plugin.h @@ -49,7 +49,11 @@ class HardSwishPlugin : public PluginTensorRT { int initialize() override { return 0; } nvinfer1::Dims getOutputDimensions(int index, const nvinfer1::Dims* inputs, int nbInputDims) override; +#if IS_TRT_VERSION_LT(8000) int enqueue(int batchSize, const void* const* inputs, void** outputs, +#else + int enqueue(int batchSize, const void* const* inputs, void* const* outputs, +#endif void* workspace, cudaStream_t stream) override; protected: diff --git a/paddle/fluid/inference/tensorrt/plugin/instance_norm_op_plugin.cu b/paddle/fluid/inference/tensorrt/plugin/instance_norm_op_plugin.cu index a579743ee8ad1a9ae480cebf03380635c3a300c4..af063c61c5a56877f4799c33d61a2a368702f4af 100644 --- a/paddle/fluid/inference/tensorrt/plugin/instance_norm_op_plugin.cu +++ b/paddle/fluid/inference/tensorrt/plugin/instance_norm_op_plugin.cu @@ -59,7 +59,11 @@ nvinfer1::Dims InstanceNormPlugin::getOutputDimensions( } int InstanceNormPlugin::enqueue(int batch_size, const void *const *inputs, +#if IS_TRT_VERSION_LT(8000) void **outputs, void *workspace, +#else + void *const *outputs, void *workspace, +#endif cudaStream_t stream) { const auto &input_dims = this->getInputDims(0); diff --git a/paddle/fluid/inference/tensorrt/plugin/instance_norm_op_plugin.h b/paddle/fluid/inference/tensorrt/plugin/instance_norm_op_plugin.h index 83422708f593d8fef66bb2d3b463ede80f041398..f413505bdf43e9828050a5e3dc6851bd1effcb8d 100644 --- a/paddle/fluid/inference/tensorrt/plugin/instance_norm_op_plugin.h +++ b/paddle/fluid/inference/tensorrt/plugin/instance_norm_op_plugin.h @@ -101,7 +101,11 @@ class InstanceNormPlugin : public PluginTensorRT { int getNbOutputs() const override { return 1; } nvinfer1::Dims getOutputDimensions(int index, const nvinfer1::Dims *inputs, int nbInputDims) override; +#if IS_TRT_VERSION_LT(8000) int enqueue(int batchSize, const void *const *inputs, void **outputs, +#else + int enqueue(int batchSize, const void *const *inputs, void *const *outputs, +#endif void *workspace, cudaStream_t stream) override; bool supportsFormat(nvinfer1::DataType type, diff --git a/paddle/fluid/inference/tensorrt/plugin/layer_norm_op_plugin.cu b/paddle/fluid/inference/tensorrt/plugin/layer_norm_op_plugin.cu index f9341613a0f55e6f4693c5994e369236c0d1080c..4d55aea316a358e617f1d1ee479273cb644b7534 100644 --- a/paddle/fluid/inference/tensorrt/plugin/layer_norm_op_plugin.cu +++ b/paddle/fluid/inference/tensorrt/plugin/layer_norm_op_plugin.cu @@ -43,7 +43,11 @@ nvinfer1::Dims LayerNormPlugin::getOutputDimensions( } int LayerNormPlugin::enqueue(int batch_size, const void *const *inputs, +#if IS_TRT_VERSION_LT(8000) void **outputs, void *workspace, +#else + void *const *outputs, void *workspace, +#endif cudaStream_t stream) { const auto &input_dims = this->getInputDims(0); const float *input = reinterpret_cast(inputs[0]); diff --git a/paddle/fluid/inference/tensorrt/plugin/layer_norm_op_plugin.h b/paddle/fluid/inference/tensorrt/plugin/layer_norm_op_plugin.h index 9c4c31b61e128ddcb8586b2cc82978767a41182d..a16c5191f88644bb80d8d9dd3a430a0c6c004752 100644 --- a/paddle/fluid/inference/tensorrt/plugin/layer_norm_op_plugin.h +++ b/paddle/fluid/inference/tensorrt/plugin/layer_norm_op_plugin.h @@ -100,7 +100,11 @@ class LayerNormPlugin : public PluginTensorRT { int getNbOutputs() const override { return 1; } nvinfer1::Dims getOutputDimensions(int index, const nvinfer1::Dims* inputs, int nbInputDims) override; +#if IS_TRT_VERSION_LT(8000) int enqueue(int batchSize, const void* const* inputs, void** outputs, +#else + int enqueue(int batchSize, const void* const* inputs, void* const* outputs, +#endif void* workspace, cudaStream_t stream) override; }; diff --git a/paddle/fluid/inference/tensorrt/plugin/pool_op_plugin.cu b/paddle/fluid/inference/tensorrt/plugin/pool_op_plugin.cu index 154f61a2b7cd3f066cc1a671f8277232fde65a9d..fb8043a9d90e4b0274087c7943eebefb5e545bc9 100644 --- a/paddle/fluid/inference/tensorrt/plugin/pool_op_plugin.cu +++ b/paddle/fluid/inference/tensorrt/plugin/pool_op_plugin.cu @@ -42,7 +42,12 @@ nvinfer1::Dims PoolPlugin::getOutputDimensions(int index, } int PoolPlugin::enqueue(int batchSize, const void *const *inputs, +#if IS_TRT_VERSION_LT(8000) void **outputs, void *workspace, cudaStream_t stream) { +#else + void *const *outputs, void *workspace, + cudaStream_t stream) { +#endif auto const &input_dims = this->getInputDims(0); int input_size = 0; float const *idata = reinterpret_cast(inputs[0]); diff --git a/paddle/fluid/inference/tensorrt/plugin/pool_op_plugin.h b/paddle/fluid/inference/tensorrt/plugin/pool_op_plugin.h index 6693a1fae4d4304af2f826894b119383ea704727..90ce44e6822565fe22bff5bf18b775ca8b2a7c2f 100644 --- a/paddle/fluid/inference/tensorrt/plugin/pool_op_plugin.h +++ b/paddle/fluid/inference/tensorrt/plugin/pool_op_plugin.h @@ -128,7 +128,11 @@ class PoolPlugin : public PluginTensorRT { nvinfer1::Dims getOutputDimensions(int index, const nvinfer1::Dims* inputs, int nbInputDims) override; int initialize() override { return 0; } +#if IS_TRT_VERSION_LT(8000) int enqueue(int batchSize, const void* const* inputs, void** outputs, +#else + int enqueue(int batchSize, const void* const* inputs, void* const* outputs, +#endif void* workspace, cudaStream_t stream) override; private: diff --git a/paddle/fluid/inference/tensorrt/plugin/prelu_op_plugin.cu b/paddle/fluid/inference/tensorrt/plugin/prelu_op_plugin.cu index 00182b87e984fc3c43f46a3fcb2b9d828db4b170..ad3618bc67b0457efbfba52cf6ecfa1c8ee7c398 100644 --- a/paddle/fluid/inference/tensorrt/plugin/prelu_op_plugin.cu +++ b/paddle/fluid/inference/tensorrt/plugin/prelu_op_plugin.cu @@ -57,7 +57,12 @@ nvinfer1::Dims PReluPlugin::getOutputDimensions(int index, } int PReluPlugin::enqueue(int batch_size, const void *const *inputs, +#if IS_TRT_VERSION_LT(8000) void **outputs, void *workspace, cudaStream_t stream) { +#else + void *const *outputs, void *workspace, + cudaStream_t stream) { +#endif // input dims is CHW. const auto &input_dims = this->getInputDims(0); const float *input = reinterpret_cast(inputs[0]); diff --git a/paddle/fluid/inference/tensorrt/plugin/prelu_op_plugin.h b/paddle/fluid/inference/tensorrt/plugin/prelu_op_plugin.h index a0a24e70a01ef47fa71d9d79f7cc2554a60683d0..313272823d4a6dfc39bf207216aa9d50052307d7 100644 --- a/paddle/fluid/inference/tensorrt/plugin/prelu_op_plugin.h +++ b/paddle/fluid/inference/tensorrt/plugin/prelu_op_plugin.h @@ -80,7 +80,11 @@ class PReluPlugin : public PluginTensorRT { int getNbOutputs() const override { return 1; } nvinfer1::Dims getOutputDimensions(int index, const nvinfer1::Dims* inputs, int nbInputDims) override; +#if IS_TRT_VERSION_LT(8000) int enqueue(int batchSize, const void* const* inputs, void** outputs, +#else + int enqueue(int batchSize, const void* const* inputs, void* const* outputs, +#endif void* workspace, cudaStream_t stream) override; }; diff --git a/paddle/fluid/inference/tensorrt/plugin/slice_op_plugin.cu b/paddle/fluid/inference/tensorrt/plugin/slice_op_plugin.cu index b44b3face92e14fc49732621d5397a6fdcf859a2..42d9018fd057952975ffc572828701662e5ba231 100644 --- a/paddle/fluid/inference/tensorrt/plugin/slice_op_plugin.cu +++ b/paddle/fluid/inference/tensorrt/plugin/slice_op_plugin.cu @@ -111,7 +111,12 @@ nvinfer1::Dims SlicePlugin::getOutputDimensions(int index, } int SlicePlugin::enqueue(int batch_size, const void *const *inputs, +#if IS_TRT_VERSION_LT(8000) void **outputs, void *workspace, cudaStream_t stream) { +#else + void *const *outputs, void *workspace, + cudaStream_t stream) { +#endif auto input_dims = getInputDims(0); // notice input dims is [C, H, W], add input batch dim here diff --git a/paddle/fluid/inference/tensorrt/plugin/slice_op_plugin.h b/paddle/fluid/inference/tensorrt/plugin/slice_op_plugin.h index 9d4f9a35c3b6fe02981853eb3c0a697d5cb3a199..015a6b116f60a198b2c669fa4d357cd962e30cdf 100644 --- a/paddle/fluid/inference/tensorrt/plugin/slice_op_plugin.h +++ b/paddle/fluid/inference/tensorrt/plugin/slice_op_plugin.h @@ -44,7 +44,11 @@ class SlicePlugin : public PluginTensorRT { nvinfer1::PluginFormat format) const override; nvinfer1::Dims getOutputDimensions(int index, const nvinfer1::Dims* inputs, int nb_input_dims) override; +#if IS_TRT_VERSION_LT(8000) int enqueue(int batch_size, const void* const* inputs, void** outputs, +#else + int enqueue(int batch_size, const void* const* inputs, void* const* outputs, +#endif void* workspace, cudaStream_t stream) override; protected: diff --git a/paddle/fluid/inference/tensorrt/plugin/split_op_plugin.cu b/paddle/fluid/inference/tensorrt/plugin/split_op_plugin.cu index 1b5c39f8fff855fac4ef8f2ee54faa872023ad05..24d4715e0312dc3200001ae2d37f0fc378fab4b6 100644 --- a/paddle/fluid/inference/tensorrt/plugin/split_op_plugin.cu +++ b/paddle/fluid/inference/tensorrt/plugin/split_op_plugin.cu @@ -126,7 +126,12 @@ __global__ void split_kernel(int nsegment, } int SplitPlugin::enqueue(int batchSize, const void* const* inputs, +#if IS_TRT_VERSION_LT(8000) void** outputs, void* workspace, cudaStream_t stream) { +#else + void* const* outputs, void* workspace, + cudaStream_t stream) { +#endif const int* d_segment_offsets_ptr = thrust::raw_pointer_cast(&d_segment_offsets_[0]); float const* input_ptr = reinterpret_cast(inputs[0]); diff --git a/paddle/fluid/inference/tensorrt/plugin/split_op_plugin.h b/paddle/fluid/inference/tensorrt/plugin/split_op_plugin.h index 1ee895154d6b046c6c18c2e374d3c63f1fcc5d62..a791395f4a3d3824e4c54ed2cfaf97b79859fde4 100644 --- a/paddle/fluid/inference/tensorrt/plugin/split_op_plugin.h +++ b/paddle/fluid/inference/tensorrt/plugin/split_op_plugin.h @@ -60,7 +60,11 @@ class SplitPlugin : public PluginTensorRTV2Ext { int initialize() override; void terminate() override; +#if IS_TRT_VERSION_LT(8000) int enqueue(int batch_size, const void* const* inputs, void** outputs, +#else + int enqueue(int batch_size, const void* const* inputs, void* const* outputs, +#endif void* workspace, cudaStream_t stream) override; void destroy() override { delete this; } diff --git a/paddle/fluid/inference/tensorrt/plugin/swish_op_plugin.cu b/paddle/fluid/inference/tensorrt/plugin/swish_op_plugin.cu index 3847d999446e99dfe0bcdc7abfa06ac6c57e64e2..52e5af01822fac5f563b48f40e039e8297ccd4f0 100644 --- a/paddle/fluid/inference/tensorrt/plugin/swish_op_plugin.cu +++ b/paddle/fluid/inference/tensorrt/plugin/swish_op_plugin.cu @@ -85,7 +85,12 @@ __global__ void swish_kernel(int num, const half *input, half *output, } int SwishPlugin::enqueue(int batch_size, const void *const *inputs, +#if IS_TRT_VERSION_LT(8000) void **outputs, void *workspace, cudaStream_t stream) { +#else + void *const *outputs, void *workspace, + cudaStream_t stream) { +#endif // input dims is CHW. const auto &input_dims = this->getInputDims(0); const float *input = reinterpret_cast(inputs[0]); diff --git a/paddle/fluid/inference/tensorrt/plugin/swish_op_plugin.h b/paddle/fluid/inference/tensorrt/plugin/swish_op_plugin.h index 11579aadcc45731123770352ef08b362ff3ef745..2a8b637730b516ef03a7e7d5008110ae839d2fba 100644 --- a/paddle/fluid/inference/tensorrt/plugin/swish_op_plugin.h +++ b/paddle/fluid/inference/tensorrt/plugin/swish_op_plugin.h @@ -67,7 +67,11 @@ class SwishPlugin : public PluginTensorRT { int getNbOutputs() const override { return 1; } nvinfer1::Dims getOutputDimensions(int index, const nvinfer1::Dims* inputs, int nbInputDims) override; +#if IS_TRT_VERSION_LT(8000) int enqueue(int batchSize, const void* const* inputs, void** outputs, +#else + int enqueue(int batchSize, const void* const* inputs, void* const* outputs, +#endif void* workspace, cudaStream_t stream) override; }; diff --git a/paddle/fluid/inference/tensorrt/plugin/trt_plugin.h b/paddle/fluid/inference/tensorrt/plugin/trt_plugin.h index ce3133ae99e94c62c0c8e958065700373d270037..37be06bba3aebd518e8775c7ed8ccd24aa7fa7e1 100644 --- a/paddle/fluid/inference/tensorrt/plugin/trt_plugin.h +++ b/paddle/fluid/inference/tensorrt/plugin/trt_plugin.h @@ -82,8 +82,13 @@ class PluginTensorRT : public nvinfer1::IPluginExt { int initialize() override { return 0; } // Shutdown the layer. This is called when the engine is destroyed void terminate() override {} - // Execute the layer +// Execute the layer +#if IS_TRT_VERSION_LT(8000) virtual int enqueue(int batch_size, const void* const* inputs, void** outputs, +#else + virtual int enqueue(int batch_size, const void* const* inputs, + void* const* outputs, +#endif void* workspace, cudaStream_t stream) = 0; // Find the size of the serialization buffer required @@ -188,8 +193,13 @@ class PluginTensorRTV2Ext : public nvinfer1::IPluginV2Ext { // Find the workspace size required by the layer size_t getWorkspaceSize(int) const override { return 0; } - // Execute the layer +// Execute the layer +#if IS_TRT_VERSION_LT(8000) virtual int enqueue(int batch_size, const void* const* inputs, void** outputs, +#else + virtual int enqueue(int batch_size, const void* const* inputs, + void* const* outputs, +#endif void* workspace, cudaStream_t stream) = 0; // Find the size of the serialization buffer required diff --git a/paddle/fluid/inference/tensorrt/plugin/yolo_box_op_plugin.cu b/paddle/fluid/inference/tensorrt/plugin/yolo_box_op_plugin.cu index 13d07e774036a48b0ed6e3c91b168eaab4461df5..f9767f38559482ee8377a37a29b56421dfe35eb7 100644 --- a/paddle/fluid/inference/tensorrt/plugin/yolo_box_op_plugin.cu +++ b/paddle/fluid/inference/tensorrt/plugin/yolo_box_op_plugin.cu @@ -243,7 +243,11 @@ int YoloBoxPlugin::enqueue_impl(int batch_size, const void* const* inputs, } int YoloBoxPlugin::enqueue(int batch_size, const void* const* inputs, +#if IS_TRT_VERSION_LT(8000) void** outputs, void* workspace, +#else + void* const* outputs, void* workspace, +#endif cudaStream_t stream) { if (data_type_ == nvinfer1::DataType::kFLOAT) { return enqueue_impl(batch_size, inputs, outputs, workspace, stream); diff --git a/paddle/fluid/inference/tensorrt/plugin/yolo_box_op_plugin.h b/paddle/fluid/inference/tensorrt/plugin/yolo_box_op_plugin.h index 8ca21da7ae0377164cbb50c502f0abb5ca943058..4cd6a383336e236251b9cbef49c96b18a8fe0537 100644 --- a/paddle/fluid/inference/tensorrt/plugin/yolo_box_op_plugin.h +++ b/paddle/fluid/inference/tensorrt/plugin/yolo_box_op_plugin.h @@ -43,7 +43,11 @@ class YoloBoxPlugin : public nvinfer1::IPluginV2Ext { bool supportsFormat(nvinfer1::DataType type, nvinfer1::TensorFormat format) const override; size_t getWorkspaceSize(int max_batch_size) const override; +#if IS_TRT_VERSION_LT(8000) int enqueue(int batch_size, const void* const* inputs, void** outputs, +#else + int enqueue(int batch_size, const void* const* inputs, void* const* outputs, +#endif void* workspace, cudaStream_t stream) override; template int enqueue_impl(int batch_size, const void* const* inputs, void** outputs,