From 3df6aa7e3eef6333636f650e3a9ee67a555eb374 Mon Sep 17 00:00:00 2001 From: chenjiaoAngel Date: Thu, 14 Nov 2019 19:41:38 +0800 Subject: [PATCH] update padding type int other devices, test=develop --- lite/backends/fpga/KD/pes/pooling_pe.hpp | 10 ++++++---- lite/kernels/cuda/pool_compute.cu | 5 +++-- lite/kernels/cuda/pool_compute_test.cc | 15 +++++++++------ lite/kernels/npu/bridges/pool_op.cc | 3 ++- lite/kernels/npu/bridges/pool_op_test.cc | 8 +++++--- lite/kernels/opencl/pool_compute.cc | 2 +- lite/kernels/opencl/pool_compute_test.cc | 3 ++- lite/kernels/x86/pool_compute.h | 5 ++--- lite/kernels/x86/pool_compute_test.cc | 2 +- lite/kernels/xpu/bridges/pool_op.cc | 7 ++++--- lite/kernels/xpu/bridges/pool_op_test.cc | 8 +++++--- lite/operators/pool_op.cc | 2 +- 12 files changed, 41 insertions(+), 29 deletions(-) diff --git a/lite/backends/fpga/KD/pes/pooling_pe.hpp b/lite/backends/fpga/KD/pes/pooling_pe.hpp index 40c6f28412..5bb4f5285a 100644 --- a/lite/backends/fpga/KD/pes/pooling_pe.hpp +++ b/lite/backends/fpga/KD/pes/pooling_pe.hpp @@ -45,13 +45,14 @@ class PoolingPE : public PE { PoolingArgs args = {0}; args.mode = param_.type; + auto paddings = *param_.paddings; args.kernel_reciprocal = fp32_2_fp16(1.0f / (k_width * k_height)); args.image.address = input->data(); args.image.channels = input->shape().channel(); args.image.height = input->shape().height(); args.image.width = input->shape().width(); - args.image.pad_height = param_.paddings[0]; - args.image.pad_width = param_.paddings[2]; + args.image.pad_height = paddings[0]; + args.image.pad_width = paddings[2]; args.image.scale_address = input->scale(); args.output.address = output->mutableData(); args.output.scale_address = output->scale(); @@ -76,12 +77,13 @@ class PoolingPE : public PE { float* image_addr = float_input.mutableData(FP32, input->shape()); float_input.copyFrom(input); float16* data_out = output->data(); + auto paddings = *param_.paddings; int image_height = input->shape().height(); int image_width = input->shape().width(); int image_channels = input->shape().channel(); - int image_pad_h = param_.paddings[0]; - int image_pad_w = param_.paddings[2]; + int image_pad_h = paddings[0]; + int image_pad_w = paddings[2]; int kernel_height = param_.kernelSize[1]; int kernel_width = param_.kernelSize[0]; int kernel_step_h = param_.strides[0]; diff --git a/lite/kernels/cuda/pool_compute.cu b/lite/kernels/cuda/pool_compute.cu index e980186bba..456a2ce911 100644 --- a/lite/kernels/cuda/pool_compute.cu +++ b/lite/kernels/cuda/pool_compute.cu @@ -256,6 +256,7 @@ void PoolCompute::Run() { bool adaptive = param.adaptive; auto x_dims = param.x->dims(); auto out_dims = param.output->dims(); + auto paddings = *param.paddings; const int in_h = x_dims[2]; const int in_w = x_dims[3]; const int out_h = out_dims[2]; @@ -266,8 +267,8 @@ void PoolCompute::Run() { const int win_w = param.ksize[1]; const int stride_h = param.strides[0]; const int stride_w = param.strides[1]; - const int pad_h = param.paddings[0]; - const int pad_w = param.paddings[2]; + const int pad_h = paddings[0]; + const int pad_w = paddings[2]; const int total_threads = out_dims.production(); const int threads = 512; const int blocks = (total_threads + threads - 1) / threads; diff --git a/lite/kernels/cuda/pool_compute_test.cc b/lite/kernels/cuda/pool_compute_test.cc index 0d3ec20aab..308905c1d0 100644 --- a/lite/kernels/cuda/pool_compute_test.cc +++ b/lite/kernels/cuda/pool_compute_test.cc @@ -51,9 +51,10 @@ static std::vector compute_output_shape(operators::PoolParam* param_) { std::vector& ksize = param_->ksize; if (param_->global_pooling) { ksize.resize(static_cast(x_dims.size()) - 2); + auto paddings = *param_->paddings; for (size_t i = 0; i < ksize.size(); ++i) { - param_->paddings[2 * i] = 0; - param_->paddings[2 * i + 1] = 0; + paddings[2 * i] = 0; + paddings[2 * i + 1] = 0; ksize[i] = static_cast(x_dims[i + 2]); } } @@ -66,8 +67,8 @@ static std::vector compute_output_shape(operators::PoolParam* param_) { for (size_t i = 0; i < param_->ksize.size(); ++i) { output_shape.push_back(PoolOutputSize(x_dims[i + 2], param_->ksize[i], - param_->paddings[2 * i], - param_->paddings[2 * i + 1], + paddings[2 * i], + paddings[2 * i + 1], param_->strides[i], param_->ceil_mode)); } @@ -84,7 +85,7 @@ static void pool_compute_ref(const operators::PoolParam& param) { std::vector ksize = param.ksize; std::vector strides = param.strides; - std::vector paddings = param.paddings; + std::vector paddings = *param.paddings; std::string pooling_type = param.pooling_type; bool global_pooling = param.global_pooling; @@ -235,7 +236,9 @@ TEST(pool_cuda, compute) { } param.global_pooling = global_pooling; param.strides = {stride, stride}; - param.paddings = {pad, pad, pad, pad}; + std::vector paddings = {pad, pad, pad, pad}; + param.paddings = + std::make_shared>(paddings); param.exclusive = exclusive; param.ceil_mode = ceil_mode; param.adaptive = false; diff --git a/lite/kernels/npu/bridges/pool_op.cc b/lite/kernels/npu/bridges/pool_op.cc index 87fe705705..1738b8467f 100644 --- a/lite/kernels/npu/bridges/pool_op.cc +++ b/lite/kernels/npu/bridges/pool_op.cc @@ -47,7 +47,8 @@ node_map_type PoolConverter(const std::shared_ptr pool_op, auto ksize = op_info->GetAttr>("ksize"); auto npu_window = ge::AttrValue::LIST_INT(ksize.begin(), ksize.end()); - auto padding = op_info->GetAttr>("paddings"); + auto padding = + *(op_info->GetAttr>>("paddings")); bool pads_equal = (padding[0] == padding[1]) && (padding[2] == padding[3]); if (!pads_equal) { LOG(FATAL) diff --git a/lite/kernels/npu/bridges/pool_op_test.cc b/lite/kernels/npu/bridges/pool_op_test.cc index 298e065547..3e3ef5862c 100644 --- a/lite/kernels/npu/bridges/pool_op_test.cc +++ b/lite/kernels/npu/bridges/pool_op_test.cc @@ -39,7 +39,8 @@ void pool_ref(const std::shared_ptr op) { std::vector ksize = op_info->GetAttr>("ksize"); std::vector strides = op_info->GetAttr>("strides"); - std::vector paddings = op_info->GetAttr>("paddings"); + std::vector paddings = + *(op_info->GetAttr>>("paddings")); bool exclusive = op_info->GetAttr("exclusive"); std::string pooling_type = op_info->GetAttr("pooling_type"); bool global_pooling = op_info->GetAttr("global_pooling"); @@ -163,8 +164,9 @@ void test_pool(int bs, opdesc.SetAttr("global_pooling", global_pooling); opdesc.SetAttr("exclusive", exclusive); opdesc.SetAttr("strides", std::vector({stride, stride})); - opdesc.SetAttr("paddings", - std::vector({padding, padding, padding, padding})); + opdesc.SetAttr( + "paddings", + std::shared_ptr>({padding, padding, padding, padding})); // create and convert op to NPU model, then run it on NPU auto op = CreateOp(opdesc, &scope); diff --git a/lite/kernels/opencl/pool_compute.cc b/lite/kernels/opencl/pool_compute.cc index afd67d038f..d275b312d6 100644 --- a/lite/kernels/opencl/pool_compute.cc +++ b/lite/kernels/opencl/pool_compute.cc @@ -44,7 +44,7 @@ class PoolCompute const auto& out_dims = param.output->dims(); const std::string pooling_type = param.pooling_type; const bool global_pooling = param.global_pooling; - std::vector paddings = param.paddings; + std::vector paddings = *param.paddings; std::vector strides = param.strides; std::vector ksize = param.ksize; if (global_pooling) { diff --git a/lite/kernels/opencl/pool_compute_test.cc b/lite/kernels/opencl/pool_compute_test.cc index 827c449d92..354f31a68b 100644 --- a/lite/kernels/opencl/pool_compute_test.cc +++ b/lite/kernels/opencl/pool_compute_test.cc @@ -13,6 +13,7 @@ // limitations under the License. #include +#include #include #include "lite/backends/opencl/target_wrapper.h" #include "lite/core/op_registry.h" @@ -88,7 +89,7 @@ TEST(pool2d, compute) { param.output = &out; param.global_pooling = true; param.pooling_type = "avg"; - param.paddings = std::vector{0, 0, 0, 0}; + param.paddings = std::make_shared>({0, 0, 0, 0}); param.strides = std::vector{1, 1}; param.ksize = std::vector{7, 7}; diff --git a/lite/kernels/x86/pool_compute.h b/lite/kernels/x86/pool_compute.h index 57bcddcec9..0dccb245b1 100644 --- a/lite/kernels/x86/pool_compute.h +++ b/lite/kernels/x86/pool_compute.h @@ -35,7 +35,6 @@ class PoolCompute : public KernelLite { auto& param = *param_.get_mutable(); if (param.global_pooling) { for (size_t i = 0; i < param.ksize.size(); ++i) { - param.paddings[i] = 0; param.ksize[i] = static_cast(param.x->dims()[i + 2]); } } @@ -52,7 +51,7 @@ class PoolCompute : public KernelLite { param.x, param.ksize, param.strides, - param.paddings, + *param.paddings, pool_process, true, false, @@ -68,7 +67,7 @@ class PoolCompute : public KernelLite { param.x, param.ksize, param.strides, - param.paddings, + *param.paddings, pool_process, param.exclusive, param.adaptive, diff --git a/lite/kernels/x86/pool_compute_test.cc b/lite/kernels/x86/pool_compute_test.cc index 1382c945a2..be17a3deed 100644 --- a/lite/kernels/x86/pool_compute_test.cc +++ b/lite/kernels/x86/pool_compute_test.cc @@ -60,7 +60,7 @@ TEST(pool2d_x86, run_test) { param.x = &x; param.output = &out; param.strides = {2, 2}; - param.paddings = {0, 0, 0, 0}; + param.paddings = std::make_shared>({0, 0, 0, 0}); param.ksize = {2, 2}; param.pooling_type = "max"; std::unique_ptr ctx(new KernelContext); diff --git a/lite/kernels/xpu/bridges/pool_op.cc b/lite/kernels/xpu/bridges/pool_op.cc index fbc6a9919c..4ecad2f036 100644 --- a/lite/kernels/xpu/bridges/pool_op.cc +++ b/lite/kernels/xpu/bridges/pool_op.cc @@ -38,7 +38,8 @@ node_map_type PoolConverter(const std::shared_ptr op, auto x_var_name = op_info->Input("X").front(); auto pooling_type = op_info->GetAttr("pooling_type"); auto ceil_mode = op_info->GetAttr("ceil_mode"); - auto paddings = op_info->GetAttr>("paddings"); + auto paddings = + op_info->GetAttr>>("paddings"); auto global_pooling = op_info->GetAttr("global_pooling"); auto ksize = op_info->GetAttr>("ksize"); auto strides = op_info->GetAttr>("strides"); @@ -57,7 +58,7 @@ node_map_type PoolConverter(const std::shared_ptr op, graph_ctx->builder->CreateMaxPool2D(*input_nodes.at(x_var_name), lite::xpu::CvtShape(ksize), lite::xpu::CvtShape(strides), - lite::xpu::CvtShape(paddings), + lite::xpu::CvtShape(*paddings), "NCHW", ceil_mode)); } @@ -72,7 +73,7 @@ node_map_type PoolConverter(const std::shared_ptr op, graph_ctx->builder->CreateAvgPool2D(*input_nodes.at(x_var_name), lite::xpu::CvtShape(ksize), lite::xpu::CvtShape(strides), - lite::xpu::CvtShape(paddings), + lite::xpu::CvtShape(*paddings), "NCHW", ceil_mode, !exclusive)); diff --git a/lite/kernels/xpu/bridges/pool_op_test.cc b/lite/kernels/xpu/bridges/pool_op_test.cc index 7efc6b464c..bc1ecad2cf 100644 --- a/lite/kernels/xpu/bridges/pool_op_test.cc +++ b/lite/kernels/xpu/bridges/pool_op_test.cc @@ -38,7 +38,8 @@ void pool_ref(const std::shared_ptr op) { std::vector ksize = op_info->GetAttr>("ksize"); std::vector strides = op_info->GetAttr>("strides"); - std::vector paddings = op_info->GetAttr>("paddings"); + std::vector paddings = + *(op_info->GetAttr>>("paddings")); bool exclusive = op_info->GetAttr("exclusive"); std::string pooling_type = op_info->GetAttr("pooling_type"); bool global_pooling = op_info->GetAttr("global_pooling"); @@ -162,8 +163,9 @@ void test_pool(int bs, opdesc.SetAttr("global_pooling", global_pooling); opdesc.SetAttr("exclusive", exclusive); opdesc.SetAttr("strides", std::vector({stride, stride})); - opdesc.SetAttr("paddings", - std::vector({padding, padding, padding, padding})); + opdesc.SetAttr( + "paddings", + std::shared_ptr>({padding, padding, padding, padding})); opdesc.SetAttr("ceil_mode", ceil_mode); // create and convert op to XPU model, then run it on XPU diff --git a/lite/operators/pool_op.cc b/lite/operators/pool_op.cc index 49f336e402..7f2d2ccd9a 100644 --- a/lite/operators/pool_op.cc +++ b/lite/operators/pool_op.cc @@ -35,7 +35,7 @@ bool PoolOpLite::CheckShape() const { CHECK_OR_FALSE(x_dims.size() - ksize.size() == 2U); // Strides size and pooling size should be the same. CHECK_OR_FALSE(ksize.size() == strides.size()); - // Paddings size and pooling size should be the same. + // Paddings size must be 4. CHECK_OR_FALSE(paddings.size() == 4L); return true; -- GitLab