未验证 提交 cf8a5573 编写于 作者: F feng_shuai 提交者: GitHub

pool2d_coonvert_ut (#39545)

上级 a558d386
...@@ -106,6 +106,9 @@ class Pool2dOpConverter : public OpConverter { ...@@ -106,6 +106,9 @@ class Pool2dOpConverter : public OpConverter {
reduce_operation = nvinfer1::ReduceOperation::kAVG; reduce_operation = nvinfer1::ReduceOperation::kAVG;
plugin_pool_type = plugin::PoolPlugin::PoolType::avg; plugin_pool_type = plugin::PoolPlugin::PoolType::avg;
} }
if (global_pooling || adaptive) {
std::fill(paddings.begin(), paddings.end(), 0);
}
if (padding_algorithm == "VALID") { if (padding_algorithm == "VALID") {
std::fill(paddings.begin(), paddings.end(), 0); std::fill(paddings.begin(), paddings.end(), 0);
...@@ -136,6 +139,46 @@ class Pool2dOpConverter : public OpConverter { ...@@ -136,6 +139,46 @@ class Pool2dOpConverter : public OpConverter {
#endif #endif
} }
std::vector<int> real_paddings = paddings;
for (int i = 0; i < 2; ++i) {
int copy_pad = *(paddings.begin() + i);
real_paddings.insert(real_paddings.begin() + 2 * i + 1, copy_pad);
}
// SAME
if (padding_algorithm == "SAME") {
// expand
for (int i = 0; i < 2; ++i) {
int copy_pad = *(paddings.begin() + 2 * i);
paddings.insert(paddings.begin() + 2 * i + 1, copy_pad);
}
// compute
for (int i = 0; i < 2; ++i) {
int out_size = (input_shape.d[2 + i] + strides[i] - 1) / strides[i];
int pad_sum = std::max(
(out_size - 1) * strides[i] + ksize[i] - input_shape.d[2 + i], 0);
int pad_0 = pad_sum / 2;
int pad_1 = pad_sum - pad_0;
paddings[i * 2] = pad_0;
paddings[i * 2 + 1] = pad_1;
}
real_paddings = paddings;
// slice
for (int i = 0; i < 2; ++i) {
paddings.erase(paddings.begin() + i + 1);
}
}
// VALID
if (padding_algorithm == "VALID") {
std::fill(real_paddings.begin(), real_paddings.end(), 0);
}
if (global_pooling == true && !engine_->with_dynamic_shape()) {
nv_ksize.d[0] = input_shape.d[input_dims - 2];
nv_ksize.d[1] = input_shape.d[input_dims - 1];
ksize[0] = input_shape.d[input_dims - 2];
ksize[1] = input_shape.d[input_dims - 1];
}
if (engine_->with_dynamic_shape()) { if (engine_->with_dynamic_shape()) {
if (!adaptive && !global_pooling && !ceil_mode) { if (!adaptive && !global_pooling && !ceil_mode) {
// input_shape.d < 0 means we can't get shape info here. // input_shape.d < 0 means we can't get shape info here.
...@@ -173,15 +216,15 @@ class Pool2dOpConverter : public OpConverter { ...@@ -173,15 +216,15 @@ class Pool2dOpConverter : public OpConverter {
pool_layer->setPaddingMode(nvinfer1::PaddingMode::kEXPLICIT_ROUND_UP); pool_layer->setPaddingMode(nvinfer1::PaddingMode::kEXPLICIT_ROUND_UP);
} }
layer = pool_layer; layer = pool_layer;
} else if (global_pooling) { } else if (global_pooling && !adaptive) {
auto *reduce_layer = TRT_ENGINE_ADD_LAYER(engine_, Reduce, *input1, auto *reduce_layer = TRT_ENGINE_ADD_LAYER(engine_, Reduce, *input1,
reduce_operation, 12, true); reduce_operation, 12, true);
layer = reduce_layer; layer = reduce_layer;
} else { } else {
#if IS_TRT_VERSION_GE(6000) #if IS_TRT_VERSION_GE(6000)
plugin::PoolPluginDynamic *plugin = plugin::PoolPluginDynamic *plugin = new plugin::PoolPluginDynamic(
new plugin::PoolPluginDynamic(ceil_mode, pool_type, adaptive, ksize, ceil_mode, pool_type, adaptive, exclusive, ksize, strides, paddings,
strides, paddings, global_pooling); global_pooling);
layer = engine_->AddDynamicPlugin(&input1, 1, plugin); layer = engine_->AddDynamicPlugin(&input1, 1, plugin);
#endif #endif
} }
...@@ -195,21 +238,13 @@ class Pool2dOpConverter : public OpConverter { ...@@ -195,21 +238,13 @@ class Pool2dOpConverter : public OpConverter {
return; return;
} }
if (global_pooling == true) { if (global_pooling == true && adaptive == false) {
nv_ksize.d[0] = input_shape.d[input_dims - 2];
nv_ksize.d[1] = input_shape.d[input_dims - 1];
auto *pool_layer = TRT_ENGINE_ADD_LAYER(engine_, Pooling, *input1, auto *pool_layer = TRT_ENGINE_ADD_LAYER(engine_, Pooling, *input1,
nv_pool_type, nv_ksize); nv_pool_type, nv_ksize);
PADDLE_ENFORCE_NOT_NULL( PADDLE_ENFORCE_NOT_NULL(
pool_layer, platform::errors::Fatal( pool_layer, platform::errors::Fatal(
"trt pool layer in converter could not be created.")); "trt pool layer in converter could not be created."));
auto output_name = op_desc.Output("Out")[0]; auto output_name = op_desc.Output("Out")[0];
pool_layer->setStride(nv_strides);
pool_layer->setPadding(nv_paddings);
if (padding_algorithm == "SAME") {
pool_layer->setPaddingMode(nvinfer1::PaddingMode::kSAME_UPPER);
}
pool_layer->setAverageCountExcludesPadding(exclusive);
pool_layer->setName(("pool2d (Output: " + output_name + ")").c_str()); pool_layer->setName(("pool2d (Output: " + output_name + ")").c_str());
pool_layer->getOutput(0)->setName(output_name.c_str()); pool_layer->getOutput(0)->setName(output_name.c_str());
engine_->SetITensor(output_name, pool_layer->getOutput(0)); engine_->SetITensor(output_name, pool_layer->getOutput(0));
...@@ -222,58 +257,61 @@ class Pool2dOpConverter : public OpConverter { ...@@ -222,58 +257,61 @@ class Pool2dOpConverter : public OpConverter {
if (!adaptive) { if (!adaptive) {
if (ceil_mode) { if (ceil_mode) {
nvinfer1::DimsHW pre_pad(0, 0); std::vector<int> input_shape_v;
nvinfer1::DimsHW post_pad(0, 0); for (int i = 0; i < input_dims; i++) {
// If ceil mode is true, we will pad the appropriate size to the input. input_shape_v.push_back(input_shape.d[i]);
DealCeilMode(input_shape, ksize, strides, paddings, &pre_pad, &post_pad, }
input_dims); plugin::PoolPlugin *plugin = new plugin::PoolPlugin(
auto *pad_layer = ceil_mode, plugin_pool_type, adaptive, exclusive, ksize, strides,
TRT_ENGINE_ADD_LAYER(engine_, Padding, *input1, pre_pad, post_pad); paddings, input_shape_v, real_paddings);
auto *pool_layer = engine_->AddPlugin(&input1, 1, plugin);
PADDLE_ENFORCE_NOT_NULL( PADDLE_ENFORCE_NOT_NULL(
pad_layer, platform::errors::Fatal( pool_layer,
"Pad layer in poolOp converter could not be " platform::errors::Fatal(
"created. The pointer to pad layer is `NULL`.")); "trt pool plugin layer in converter could not be created."));
input1 = pad_layer->getOutput(0); layer = pool_layer;
} } else {
#if IS_TRT_VERSION_GE(8000) #if IS_TRT_VERSION_GE(8000)
// Exclude padding pixels from the average mean is not supported well by // Exclude padding pixels from the average mean is not supported well by
// TRT // TRT
// so enable padding for trt8.0 above. // so enable padding for trt8.0 above.
if ((g_post_pad.w() > 0 || g_post_pad.h() > 0) && if ((g_post_pad.w() > 0 || g_post_pad.h() > 0) &&
(padding_algorithm != "SAME") && !ceil_mode) { (padding_algorithm != "SAME") && !ceil_mode) {
auto *pad_layer = TRT_ENGINE_ADD_LAYER(engine_, Padding, *input1, auto *pad_layer = TRT_ENGINE_ADD_LAYER(engine_, Padding, *input1,
g_pre_pad, g_post_pad); g_pre_pad, g_post_pad);
PADDLE_ENFORCE_NOT_NULL( PADDLE_ENFORCE_NOT_NULL(
pad_layer, platform::errors::Fatal( pad_layer, platform::errors::Fatal(
"Pad layer in poolOp converter could not be " "Pad layer in poolOp converter could not be "
"created. The pointer to pad layer is `NULL`.")); "created. The pointer to pad layer is `NULL`."));
input1 = pad_layer->getOutput(0); input1 = pad_layer->getOutput(0);
} }
#endif #endif
auto *pool_layer = TRT_ENGINE_ADD_LAYER(engine_, Pooling, *input1, auto *pool_layer = TRT_ENGINE_ADD_LAYER(engine_, Pooling, *input1,
nv_pool_type, nv_ksize); nv_pool_type, nv_ksize);
PADDLE_ENFORCE_NOT_NULL( PADDLE_ENFORCE_NOT_NULL(
pool_layer, platform::errors::Fatal( pool_layer,
"trt pool layer in converter could not be created.")); platform::errors::Fatal(
pool_layer->setStride(nv_strides); "trt pool layer in converter could not be created."));
pool_layer->setPadding(nv_paddings); pool_layer->setStride(nv_strides);
if (padding_algorithm == "SAME") { pool_layer->setPadding(nv_paddings);
pool_layer->setPaddingMode(nvinfer1::PaddingMode::kSAME_UPPER); if (padding_algorithm == "SAME") {
pool_layer->setPaddingMode(nvinfer1::PaddingMode::kSAME_UPPER);
}
pool_layer->setAverageCountExcludesPadding(exclusive);
layer = pool_layer;
} }
pool_layer->setAverageCountExcludesPadding(exclusive);
layer = pool_layer;
} else { } else {
// Average pooling needs to exclude the padding pixels from the average // Average pooling needs to exclude the padding pixels from the average
// mean. // mean.
// It is not supported well by TRT, we use a plugin here. // It is not supported well by TRT, we use a plugin here
std::vector<int> input_shape_v; std::vector<int> input_shape_v;
for (int i = 0; i < input_dims; i++) { for (int i = 0; i < input_dims; i++) {
input_shape_v.push_back(input_shape.d[i]); input_shape_v.push_back(input_shape.d[i]);
} }
plugin::PoolPlugin *plugin = plugin::PoolPlugin *plugin = new plugin::PoolPlugin(
new plugin::PoolPlugin(ceil_mode, plugin_pool_type, adaptive, ksize, ceil_mode, plugin_pool_type, adaptive, exclusive, ksize, strides,
strides, paddings, input_shape_v); paddings, input_shape_v, real_paddings);
auto *pool_layer = engine_->AddPlugin(&input1, 1, plugin); auto *pool_layer = engine_->AddPlugin(&input1, 1, plugin);
PADDLE_ENFORCE_NOT_NULL( PADDLE_ENFORCE_NOT_NULL(
pool_layer, pool_layer,
......
...@@ -35,6 +35,36 @@ nvinfer1::Dims PoolPlugin::getOutputDimensions(int index, ...@@ -35,6 +35,36 @@ nvinfer1::Dims PoolPlugin::getOutputDimensions(int index,
return output_dims; return output_dims;
} }
size_t PoolPlugin::getSerializationSize() const TRT_NOEXCEPT {
return getBaseSerializationSize() + SerializedSize(ceil_mode_) +
SerializedSize(pool_type_) + SerializedSize(adaptive_) +
SerializedSize(exclusive_) + SerializedSize(ksize_) +
SerializedSize(strides_) + SerializedSize(paddings_) +
SerializedSize(real_paddings_) + SerializedSize(input_shape_) +
SerializedSize(output_shape_);
}
// TRT will call this func when we need to serialize the configuration of
// tensorrt.
void PoolPlugin::serialize(void *buffer) const TRT_NOEXCEPT {
serializeBase(buffer);
SerializeValue(&buffer, ceil_mode_);
SerializeValue(&buffer, pool_type_);
SerializeValue(&buffer, adaptive_);
SerializeValue(&buffer, exclusive_);
SerializeValue(&buffer, ksize_);
SerializeValue(&buffer, strides_);
SerializeValue(&buffer, paddings_);
SerializeValue(&buffer, real_paddings_);
SerializeValue(&buffer, input_shape_);
SerializeValue(&buffer, output_shape_);
}
PoolPlugin *PoolPlugin::clone() const TRT_NOEXCEPT {
return new PoolPlugin(ceil_mode_, pool_type_, adaptive_, exclusive_, ksize_,
strides_, paddings_, input_shape_, real_paddings_);
}
int PoolPlugin::enqueue(int batchSize, const void *const *inputs, int PoolPlugin::enqueue(int batchSize, const void *const *inputs,
#if IS_TRT_VERSION_LT(8000) #if IS_TRT_VERSION_LT(8000)
void **outputs, void *workspace, void **outputs, void *workspace,
...@@ -59,14 +89,15 @@ int PoolPlugin::enqueue(int batchSize, const void *const *inputs, ...@@ -59,14 +89,15 @@ int PoolPlugin::enqueue(int batchSize, const void *const *inputs,
paddle::operators::math::MaxPool<float>, float> paddle::operators::math::MaxPool<float>, float>
pool2d_forward; pool2d_forward;
pool2d_forward(idata, input_shape, output_shape, ksize_, strides_, pool2d_forward(idata, input_shape, output_shape, ksize_, strides_,
paddings_, true, adaptive_, odatas[0], stream, pool_process); paddings_, true, false, odatas[0], stream, pool_process);
} else if (pool_type_ == PoolType::avg) { } else if (pool_type_ == PoolType::avg) {
paddle::operators::math::AvgPool<float> pool_process; paddle::operators::math::AvgPool<float> pool_process;
paddle::operators::math::Pool2dDirectCUDAFunctor< paddle::operators::math::Pool2dDirectCUDAFunctor<
paddle::operators::math::AvgPool<float>, float> paddle::operators::math::AvgPool<float>, float>
pool2d_forward; pool2d_forward;
pool2d_forward(idata, input_shape, output_shape, ksize_, strides_, pool2d_forward(idata, input_shape, output_shape, ksize_, strides_,
paddings_, true, adaptive_, odatas[0], stream, pool_process); paddings_, exclusive_, adaptive_, odatas[0], stream,
pool_process);
} }
return cudaGetLastError() != cudaSuccess; return cudaGetLastError() != cudaSuccess;
...@@ -82,6 +113,7 @@ PoolPluginDynamic::PoolPluginDynamic(void const *serialData, ...@@ -82,6 +113,7 @@ PoolPluginDynamic::PoolPluginDynamic(void const *serialData,
DeserializeValue(&serialData, &serialLength, &pool_type); DeserializeValue(&serialData, &serialLength, &pool_type);
pool_type_ = std::string(pool_type); pool_type_ = std::string(pool_type);
DeserializeValue(&serialData, &serialLength, &adaptive_); DeserializeValue(&serialData, &serialLength, &adaptive_);
DeserializeValue(&serialData, &serialLength, &exclusive_);
DeserializeValue(&serialData, &serialLength, &ksize_); DeserializeValue(&serialData, &serialLength, &ksize_);
DeserializeValue(&serialData, &serialLength, &strides_); DeserializeValue(&serialData, &serialLength, &strides_);
DeserializeValue(&serialData, &serialLength, &paddings_); DeserializeValue(&serialData, &serialLength, &paddings_);
...@@ -90,21 +122,27 @@ PoolPluginDynamic::PoolPluginDynamic(void const *serialData, ...@@ -90,21 +122,27 @@ PoolPluginDynamic::PoolPluginDynamic(void const *serialData,
size_t PoolPluginDynamic::getSerializationSize() const TRT_NOEXCEPT { size_t PoolPluginDynamic::getSerializationSize() const TRT_NOEXCEPT {
return SerializedSize(ceil_mode_) + SerializedSize(pool_type_.c_str()) + return SerializedSize(ceil_mode_) + SerializedSize(pool_type_.c_str()) +
SerializedSize(adaptive_) + SerializedSize(ksize_) + SerializedSize(adaptive_) + SerializedSize(exclusive_) +
SerializedSize(strides_) + SerializedSize(paddings_) + SerializedSize(ksize_) + SerializedSize(strides_) +
SerializedSize(is_global_); SerializedSize(paddings_) + SerializedSize(is_global_);
} }
void PoolPluginDynamic::serialize(void *buffer) const TRT_NOEXCEPT { void PoolPluginDynamic::serialize(void *buffer) const TRT_NOEXCEPT {
SerializeValue(&buffer, ceil_mode_); SerializeValue(&buffer, ceil_mode_);
SerializeValue(&buffer, pool_type_.c_str()); SerializeValue(&buffer, pool_type_.c_str());
SerializeValue(&buffer, adaptive_); SerializeValue(&buffer, adaptive_);
SerializeValue(&buffer, exclusive_);
SerializeValue(&buffer, ksize_); SerializeValue(&buffer, ksize_);
SerializeValue(&buffer, strides_); SerializeValue(&buffer, strides_);
SerializeValue(&buffer, paddings_); SerializeValue(&buffer, paddings_);
SerializeValue(&buffer, is_global_); SerializeValue(&buffer, is_global_);
} }
nvinfer1::IPluginV2DynamicExt *PoolPluginDynamic::clone() const TRT_NOEXCEPT {
return new PoolPluginDynamic(ceil_mode_, pool_type_, adaptive_, exclusive_,
ksize_, strides_, paddings_, is_global_);
}
nvinfer1::DimsExprs PoolPluginDynamic::getOutputDimensions( nvinfer1::DimsExprs PoolPluginDynamic::getOutputDimensions(
int output_index, const nvinfer1::DimsExprs *inputs, int nb_inputs, int output_index, const nvinfer1::DimsExprs *inputs, int nb_inputs,
nvinfer1::IExprBuilder &expr_builder) TRT_NOEXCEPT { nvinfer1::IExprBuilder &expr_builder) TRT_NOEXCEPT {
...@@ -117,11 +155,14 @@ nvinfer1::DimsExprs PoolPluginDynamic::getOutputDimensions( ...@@ -117,11 +155,14 @@ nvinfer1::DimsExprs PoolPluginDynamic::getOutputDimensions(
platform::errors::InvalidArgument("The channel dimension should be " platform::errors::InvalidArgument("The channel dimension should be "
"static, but we found it's dynamic.")); "static, but we found it's dynamic."));
nvinfer1::DimsExprs output(inputs[0]); nvinfer1::DimsExprs output(inputs[0]);
if (is_global_) { if (is_global_ && !adaptive_) {
output.d[2] = expr_builder.constant(1); output.d[2] = expr_builder.constant(1);
output.d[3] = expr_builder.constant(1); output.d[3] = expr_builder.constant(1);
return output; return output;
} }
if (is_global_ && adaptive_) {
return inputs[0];
}
if (adaptive_) { if (adaptive_) {
output.d[2] = expr_builder.constant(ksize_[0]); output.d[2] = expr_builder.constant(ksize_[0]);
output.d[3] = expr_builder.constant(ksize_[1]); output.d[3] = expr_builder.constant(ksize_[1]);
...@@ -245,6 +286,10 @@ int PoolPluginDynamic::enqueue(const nvinfer1::PluginTensorDesc *input_desc, ...@@ -245,6 +286,10 @@ int PoolPluginDynamic::enqueue(const nvinfer1::PluginTensorDesc *input_desc,
output_shape[2] = data_dim[0]; output_shape[2] = data_dim[0];
output_shape[3] = data_dim[1]; output_shape[3] = data_dim[1];
} }
if (adaptive_) {
output_shape[2] = h;
output_shape[3] = w;
}
if (pool_type_ == "max") { if (pool_type_ == "max") {
paddle::operators::math::MaxPool<float> pool_process; paddle::operators::math::MaxPool<float> pool_process;
...@@ -252,14 +297,14 @@ int PoolPluginDynamic::enqueue(const nvinfer1::PluginTensorDesc *input_desc, ...@@ -252,14 +297,14 @@ int PoolPluginDynamic::enqueue(const nvinfer1::PluginTensorDesc *input_desc,
paddle::operators::math::MaxPool<float>, float> paddle::operators::math::MaxPool<float>, float>
pool2d_forward; pool2d_forward;
pool2d_forward(input, input_shape, output_shape, ksize, strides_, paddings, pool2d_forward(input, input_shape, output_shape, ksize, strides_, paddings,
true, adaptive_, output, stream, pool_process); true, false, output, stream, pool_process);
} else if (pool_type_ == "avg") { } else if (pool_type_ == "avg") {
paddle::operators::math::AvgPool<float> pool_process; paddle::operators::math::AvgPool<float> pool_process;
paddle::operators::math::Pool2dDirectCUDAFunctor< paddle::operators::math::Pool2dDirectCUDAFunctor<
paddle::operators::math::AvgPool<float>, float> paddle::operators::math::AvgPool<float>, float>
pool2d_forward; pool2d_forward;
pool2d_forward(input, input_shape, output_shape, ksize, strides_, paddings, pool2d_forward(input, input_shape, output_shape, ksize, strides_, paddings,
true, adaptive_, output, stream, pool_process); exclusive_, adaptive_, output, stream, pool_process);
} }
return cudaGetLastError() != cudaSuccess; return cudaGetLastError() != cudaSuccess;
......
...@@ -29,26 +29,32 @@ static std::vector<int> CalcOutputSize(const std::vector<int>& input_shape, ...@@ -29,26 +29,32 @@ static std::vector<int> CalcOutputSize(const std::vector<int>& input_shape,
const bool& adaptive, const bool& adaptive,
const std::vector<int>& ksize, const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings) { const std::vector<int>& real_paddings) {
std::vector<int> output_shape = input_shape; std::vector<int> output_shape = input_shape;
if (adaptive) { if (adaptive) {
output_shape[0] = ksize[0]; output_shape[0] = ksize[0];
output_shape[1] = ksize[1]; output_shape[1] = ksize[1];
} else { } else {
int output_h, output_w; int output_h = 0, output_w = 0;
if (!ceil_mode) { if (ceil_mode) {
output_h = (input_shape[0] - ksize[0] + 2 * paddings[0]) / strides[0] + 1; output_h = (input_shape[0] - ksize[0] + real_paddings[0] +
output_w = (input_shape[1] - ksize[1] + 2 * paddings[1]) / strides[1] + 1; real_paddings[1] + strides[0] - 1) /
} else { strides[0] +
output_h = 1;
(input_shape[0] - ksize[0] + 2 * paddings[0] + strides[0] - 1) / output_w = (input_shape[1] - ksize[1] + real_paddings[2] +
strides[0] + real_paddings[3] + strides[1] - 1) /
1; strides[1] +
output_w = 1;
(input_shape[1] - ksize[1] + 2 * paddings[1] + strides[1] - 1) /
strides[1] +
1;
} }
// TRT will use native layer when ceil_model=false
/*
else{
output_h = (input_shape[0] - ksize[0] + real_paddings[0] +
real_paddings[1]) / strides[0] + 1;
output_w = (input_shape[1] - ksize[1] + real_paddings[2] +
real_paddings[3]) / strides[1] + 1;
}
*/
output_shape[0] = output_h; output_shape[0] = output_h;
output_shape[1] = output_w; output_shape[1] = output_w;
} }
...@@ -57,47 +63,32 @@ static std::vector<int> CalcOutputSize(const std::vector<int>& input_shape, ...@@ -57,47 +63,32 @@ static std::vector<int> CalcOutputSize(const std::vector<int>& input_shape,
class PoolPlugin : public PluginTensorRT { class PoolPlugin : public PluginTensorRT {
public: public:
size_t getSerializationSize() const TRT_NOEXCEPT override { size_t getSerializationSize() const TRT_NOEXCEPT override;
return getBaseSerializationSize() + SerializedSize(ceil_mode_) +
SerializedSize(pool_type_) + SerializedSize(adaptive_) +
SerializedSize(ksize_) + SerializedSize(strides_) +
SerializedSize(paddings_) + SerializedSize(input_shape_) +
SerializedSize(output_shape_);
}
// TRT will call this func when we need to serialize the configuration of void serialize(void* buffer) const TRT_NOEXCEPT override;
// tensorrt.
void serialize(void* buffer) const TRT_NOEXCEPT override {
serializeBase(buffer);
SerializeValue(&buffer, ceil_mode_);
SerializeValue(&buffer, pool_type_);
SerializeValue(&buffer, adaptive_);
SerializeValue(&buffer, ksize_);
SerializeValue(&buffer, strides_);
SerializeValue(&buffer, paddings_);
SerializeValue(&buffer, input_shape_);
SerializeValue(&buffer, output_shape_);
}
enum class PoolType { enum class PoolType {
max = 0, max = 0,
avg, avg,
}; };
PoolPlugin() {} PoolPlugin() {}
PoolPlugin(bool ceil_mode, PoolType pool_type, bool adaptive, PoolPlugin(bool ceil_mode, PoolType pool_type, bool adaptive, bool exclusive,
std::vector<int> ksize, std::vector<int> strides, std::vector<int> ksize, std::vector<int> strides,
std::vector<int> paddings, std::vector<int> input_shape) std::vector<int> paddings, std::vector<int> input_shape,
std::vector<int> real_paddings)
: ceil_mode_(ceil_mode), : ceil_mode_(ceil_mode),
pool_type_(pool_type), pool_type_(pool_type),
adaptive_(adaptive), adaptive_(adaptive),
exclusive_(exclusive),
ksize_(ksize), ksize_(ksize),
strides_(strides), strides_(strides),
paddings_(paddings), paddings_(paddings),
real_paddings_(real_paddings),
input_shape_(input_shape) { input_shape_(input_shape) {
output_shape_ = input_shape_; output_shape_ = input_shape_;
std::vector<int> output_shape = std::vector<int> output_shape =
CalcOutputSize({input_shape_[1], input_shape_[2]}, ceil_mode_, CalcOutputSize({input_shape_[1], input_shape_[2]}, ceil_mode_,
adaptive_, ksize_, strides_, paddings_); adaptive_, ksize_, strides_, real_paddings_);
output_shape_[1] = output_shape[0]; output_shape_[1] = output_shape[0];
output_shape_[2] = output_shape[1]; output_shape_[2] = output_shape[1];
} }
...@@ -109,17 +100,16 @@ class PoolPlugin : public PluginTensorRT { ...@@ -109,17 +100,16 @@ class PoolPlugin : public PluginTensorRT {
DeserializeValue(&serialData, &serialLength, &ceil_mode_); DeserializeValue(&serialData, &serialLength, &ceil_mode_);
DeserializeValue(&serialData, &serialLength, &pool_type_); DeserializeValue(&serialData, &serialLength, &pool_type_);
DeserializeValue(&serialData, &serialLength, &adaptive_); DeserializeValue(&serialData, &serialLength, &adaptive_);
DeserializeValue(&serialData, &serialLength, &exclusive_);
DeserializeValue(&serialData, &serialLength, &ksize_); DeserializeValue(&serialData, &serialLength, &ksize_);
DeserializeValue(&serialData, &serialLength, &strides_); DeserializeValue(&serialData, &serialLength, &strides_);
DeserializeValue(&serialData, &serialLength, &paddings_); DeserializeValue(&serialData, &serialLength, &paddings_);
DeserializeValue(&serialData, &serialLength, &real_paddings_);
DeserializeValue(&serialData, &serialLength, &input_shape_); DeserializeValue(&serialData, &serialLength, &input_shape_);
DeserializeValue(&serialData, &serialLength, &output_shape_); DeserializeValue(&serialData, &serialLength, &output_shape_);
} }
PoolPlugin* clone() const TRT_NOEXCEPT override { PoolPlugin* clone() const TRT_NOEXCEPT override;
return new PoolPlugin(ceil_mode_, pool_type_, adaptive_, ksize_, strides_,
paddings_, input_shape_);
}
const char* getPluginType() const TRT_NOEXCEPT override { const char* getPluginType() const TRT_NOEXCEPT override {
return "pool_plugin"; return "pool_plugin";
...@@ -139,9 +129,11 @@ class PoolPlugin : public PluginTensorRT { ...@@ -139,9 +129,11 @@ class PoolPlugin : public PluginTensorRT {
bool ceil_mode_; bool ceil_mode_;
PoolType pool_type_; PoolType pool_type_;
bool adaptive_; bool adaptive_;
bool exclusive_;
std::vector<int> ksize_; std::vector<int> ksize_;
std::vector<int> strides_; std::vector<int> strides_;
std::vector<int> paddings_; std::vector<int> paddings_;
std::vector<int> real_paddings_;
std::vector<int> input_shape_; std::vector<int> input_shape_;
std::vector<int> output_shape_; std::vector<int> output_shape_;
}; };
...@@ -167,12 +159,14 @@ class PoolPluginDynamic : public DynamicPluginTensorRT { ...@@ -167,12 +159,14 @@ class PoolPluginDynamic : public DynamicPluginTensorRT {
public: public:
PoolPluginDynamic() {} PoolPluginDynamic() {}
PoolPluginDynamic(const bool& ceil_mode, const std::string& pool_type, PoolPluginDynamic(const bool& ceil_mode, const std::string& pool_type,
const bool& adaptive, const std::vector<int>& ksize, const bool& adaptive, bool exclusive,
const std::vector<int>& ksize,
const std::vector<int>& strides, const std::vector<int>& strides,
const std::vector<int>& paddings, const bool& is_global) const std::vector<int>& paddings, const bool& is_global)
: ceil_mode_(ceil_mode), : ceil_mode_(ceil_mode),
pool_type_(pool_type), pool_type_(pool_type),
adaptive_(adaptive), adaptive_(adaptive),
exclusive_(exclusive),
ksize_(ksize), ksize_(ksize),
strides_(strides), strides_(strides),
paddings_(paddings), paddings_(paddings),
...@@ -180,10 +174,7 @@ class PoolPluginDynamic : public DynamicPluginTensorRT { ...@@ -180,10 +174,7 @@ class PoolPluginDynamic : public DynamicPluginTensorRT {
PoolPluginDynamic(void const* serialData, size_t serialLength); PoolPluginDynamic(void const* serialData, size_t serialLength);
~PoolPluginDynamic() {} ~PoolPluginDynamic() {}
nvinfer1::IPluginV2DynamicExt* clone() const TRT_NOEXCEPT override { nvinfer1::IPluginV2DynamicExt* clone() const TRT_NOEXCEPT override;
return new PoolPluginDynamic(ceil_mode_, pool_type_, adaptive_, ksize_,
strides_, paddings_, is_global_);
}
const char* getPluginType() const TRT_NOEXCEPT override { const char* getPluginType() const TRT_NOEXCEPT override {
return "pool_plugin_dynamic"; return "pool_plugin_dynamic";
...@@ -229,6 +220,7 @@ class PoolPluginDynamic : public DynamicPluginTensorRT { ...@@ -229,6 +220,7 @@ class PoolPluginDynamic : public DynamicPluginTensorRT {
bool ceil_mode_; bool ceil_mode_;
std::string pool_type_; std::string pool_type_;
bool adaptive_; bool adaptive_;
bool exclusive_;
std::vector<int> ksize_; std::vector<int> ksize_;
std::vector<int> strides_; std::vector<int> strides_;
std::vector<int> paddings_; std::vector<int> paddings_;
......
...@@ -52,7 +52,7 @@ class TrtConvertPool2dTest(TrtLayerAutoScanTest): ...@@ -52,7 +52,7 @@ class TrtConvertPool2dTest(TrtLayerAutoScanTest):
return np.random.random([24, 3, 3, 3]).astype(np.float32) return np.random.random([24, 3, 3, 3]).astype(np.float32)
for strides in [[1, 1], [1, 2], [2, 2]]: for strides in [[1, 1], [1, 2], [2, 2]]:
for paddings in [[0, 2], [0, 3], [0, 1, 2, 3]]: for paddings in [[0, 2], [0, 3]]:
for pooling_type in ['max', 'avg']: for pooling_type in ['max', 'avg']:
for padding_algotithm in ['EXPLICIT', 'SAME', 'VAILD']: for padding_algotithm in ['EXPLICIT', 'SAME', 'VAILD']:
for ksize in [[2, 3], [3, 3]]: for ksize in [[2, 3], [3, 3]]:
...@@ -145,44 +145,18 @@ class TrtConvertPool2dTest(TrtLayerAutoScanTest): ...@@ -145,44 +145,18 @@ class TrtConvertPool2dTest(TrtLayerAutoScanTest):
True), 1e-5 True), 1e-5
def add_skip_trt_case(self): def add_skip_trt_case(self):
def teller1(program_config, predictor_config): def teller(program_config, predictor_config):
if len(program_config.ops[0].attrs['paddings']) == 4: if program_config.ops[0].attrs['pooling_type'] == 'avg' and \
return True program_config.ops[0].attrs['global_pooling'] == False and \
return False program_config.ops[0].attrs['exclusive'] == True and \
program_config.ops[0].attrs['adaptive'] == False and \
self.add_skip_case(teller1, SkipReasons.TRT_NOT_IMPLEMENTED, program_config.ops[0].attrs['ceil_mode'] == True:
"4-dims paddings are not support for trt now.")
def teller2(program_config, predictor_config):
if program_config.ops[0].attrs['global_pooling'] == True:
return True
return False
self.add_skip_case(
teller2, SkipReasons.TRT_NOT_IMPLEMENTED,
"It is not support that global_pooling is true for trt now.")
def teller3(program_config, predictor_config):
if self.dynamic_shape.min_input_shape == {} and program_config.ops[
0].attrs['ceil_mode'] == True:
return True
return False
self.add_skip_case(
teller3, SkipReasons.TRT_NOT_IMPLEMENTED,
"It is not support that ceil_mode is true in static mode for trt now."
)
def teller4(program_config, predictor_config):
if self.dynamic_shape.min_input_shape != {} and (
program_config.ops[0].attrs['strides'] == [1, 2] or
program_config.ops[0].attrs['strides'] == [2, 2]):
return True return True
return False return False
self.add_skip_case( self.add_skip_case(
teller4, SkipReasons.TRT_NOT_IMPLEMENTED, teller, SkipReasons.TRT_NOT_IMPLEMENTED,
"It is not support that strides is not equal [1, 1] in dynamic mode for trt now." "The results of some cases are Nan, but the results of TensorRT and GPU are the same."
) )
def test(self): def test(self):
......
...@@ -119,6 +119,17 @@ class TensorRTAvgPoolTest(TensorRTPoolTest): ...@@ -119,6 +119,17 @@ class TensorRTAvgPoolTest(TensorRTPoolTest):
self.exclusive = False self.exclusive = False
class TensorRTAvgCeilPoolTest(TensorRTPoolTest):
def set_extra_config(self):
self.pool_size = 2
self.pool_type = 'avg'
self.pool_stride = 1
self.pool_padding = 0
self.global_pooling = False
self.ceil_mode = True
self.exclusive = False
class TensorRTGlobalPoolTest(TensorRTPoolTest): class TensorRTGlobalPoolTest(TensorRTPoolTest):
def set_extra_config(self): def set_extra_config(self):
self.pool_size = 2 self.pool_size = 2
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册