提交 3df6aa7e 编写于 作者: C chenjiaoAngel

update padding type int other devices, test=develop

上级 48ac4ca5
......@@ -45,13 +45,14 @@ class PoolingPE : public PE {
PoolingArgs args = {0};
args.mode = param_.type;
auto paddings = *param_.paddings;
args.kernel_reciprocal = fp32_2_fp16(1.0f / (k_width * k_height));
args.image.address = input->data<float16>();
args.image.channels = input->shape().channel();
args.image.height = input->shape().height();
args.image.width = input->shape().width();
args.image.pad_height = param_.paddings[0];
args.image.pad_width = param_.paddings[2];
args.image.pad_height = paddings[0];
args.image.pad_width = paddings[2];
args.image.scale_address = input->scale();
args.output.address = output->mutableData<float16>();
args.output.scale_address = output->scale();
......@@ -76,12 +77,13 @@ class PoolingPE : public PE {
float* image_addr = float_input.mutableData<float>(FP32, input->shape());
float_input.copyFrom(input);
float16* data_out = output->data<float16>();
auto paddings = *param_.paddings;
int image_height = input->shape().height();
int image_width = input->shape().width();
int image_channels = input->shape().channel();
int image_pad_h = param_.paddings[0];
int image_pad_w = param_.paddings[2];
int image_pad_h = paddings[0];
int image_pad_w = paddings[2];
int kernel_height = param_.kernelSize[1];
int kernel_width = param_.kernelSize[0];
int kernel_step_h = param_.strides[0];
......
......@@ -256,6 +256,7 @@ void PoolCompute::Run() {
bool adaptive = param.adaptive;
auto x_dims = param.x->dims();
auto out_dims = param.output->dims();
auto paddings = *param.paddings;
const int in_h = x_dims[2];
const int in_w = x_dims[3];
const int out_h = out_dims[2];
......@@ -266,8 +267,8 @@ void PoolCompute::Run() {
const int win_w = param.ksize[1];
const int stride_h = param.strides[0];
const int stride_w = param.strides[1];
const int pad_h = param.paddings[0];
const int pad_w = param.paddings[2];
const int pad_h = paddings[0];
const int pad_w = paddings[2];
const int total_threads = out_dims.production();
const int threads = 512;
const int blocks = (total_threads + threads - 1) / threads;
......
......@@ -51,9 +51,10 @@ static std::vector<int64_t> compute_output_shape(operators::PoolParam* param_) {
std::vector<int>& ksize = param_->ksize;
if (param_->global_pooling) {
ksize.resize(static_cast<size_t>(x_dims.size()) - 2);
auto paddings = *param_->paddings;
for (size_t i = 0; i < ksize.size(); ++i) {
param_->paddings[2 * i] = 0;
param_->paddings[2 * i + 1] = 0;
paddings[2 * i] = 0;
paddings[2 * i + 1] = 0;
ksize[i] = static_cast<int>(x_dims[i + 2]);
}
}
......@@ -66,8 +67,8 @@ static std::vector<int64_t> compute_output_shape(operators::PoolParam* param_) {
for (size_t i = 0; i < param_->ksize.size(); ++i) {
output_shape.push_back(PoolOutputSize(x_dims[i + 2],
param_->ksize[i],
param_->paddings[2 * i],
param_->paddings[2 * i + 1],
paddings[2 * i],
paddings[2 * i + 1],
param_->strides[i],
param_->ceil_mode));
}
......@@ -84,7 +85,7 @@ static void pool_compute_ref(const operators::PoolParam& param) {
std::vector<int> ksize = param.ksize;
std::vector<int> strides = param.strides;
std::vector<int> paddings = param.paddings;
std::vector<int> paddings = *param.paddings;
std::string pooling_type = param.pooling_type;
bool global_pooling = param.global_pooling;
......@@ -235,7 +236,9 @@ TEST(pool_cuda, compute) {
}
param.global_pooling = global_pooling;
param.strides = {stride, stride};
param.paddings = {pad, pad, pad, pad};
std::vector<int> paddings = {pad, pad, pad, pad};
param.paddings =
std::make_shared<std::vector<int>>(paddings);
param.exclusive = exclusive;
param.ceil_mode = ceil_mode;
param.adaptive = false;
......
......@@ -47,7 +47,8 @@ node_map_type PoolConverter(const std::shared_ptr<lite::OpLite> pool_op,
auto ksize = op_info->GetAttr<std::vector<int>>("ksize");
auto npu_window = ge::AttrValue::LIST_INT(ksize.begin(), ksize.end());
auto padding = op_info->GetAttr<std::vector<int>>("paddings");
auto padding =
*(op_info->GetAttr<std::shared_ptr<std::vector<int>>>("paddings"));
bool pads_equal = (padding[0] == padding[1]) && (padding[2] == padding[3]);
if (!pads_equal) {
LOG(FATAL)
......
......@@ -39,7 +39,8 @@ void pool_ref(const std::shared_ptr<operators::PoolOpLite> op) {
std::vector<int> ksize = op_info->GetAttr<std::vector<int>>("ksize");
std::vector<int> strides = op_info->GetAttr<std::vector<int>>("strides");
std::vector<int> paddings = op_info->GetAttr<std::vector<int>>("paddings");
std::vector<int> paddings =
*(op_info->GetAttr<std::shared_ptr<std::vector<int>>>("paddings"));
bool exclusive = op_info->GetAttr<bool>("exclusive");
std::string pooling_type = op_info->GetAttr<std::string>("pooling_type");
bool global_pooling = op_info->GetAttr<bool>("global_pooling");
......@@ -163,8 +164,9 @@ void test_pool(int bs,
opdesc.SetAttr("global_pooling", global_pooling);
opdesc.SetAttr("exclusive", exclusive);
opdesc.SetAttr("strides", std::vector<int>({stride, stride}));
opdesc.SetAttr("paddings",
std::vector<int>({padding, padding, padding, padding}));
opdesc.SetAttr(
"paddings",
std::shared_ptr<std::vector<int>>({padding, padding, padding, padding}));
// create and convert op to NPU model, then run it on NPU
auto op = CreateOp<operators::PoolOpLite>(opdesc, &scope);
......
......@@ -44,7 +44,7 @@ class PoolCompute
const auto& out_dims = param.output->dims();
const std::string pooling_type = param.pooling_type;
const bool global_pooling = param.global_pooling;
std::vector<int> paddings = param.paddings;
std::vector<int> paddings = *param.paddings;
std::vector<int> strides = param.strides;
std::vector<int> ksize = param.ksize;
if (global_pooling) {
......
......@@ -13,6 +13,7 @@
// limitations under the License.
#include <gtest/gtest.h>
#include <memory>
#include <random>
#include "lite/backends/opencl/target_wrapper.h"
#include "lite/core/op_registry.h"
......@@ -88,7 +89,7 @@ TEST(pool2d, compute) {
param.output = &out;
param.global_pooling = true;
param.pooling_type = "avg";
param.paddings = std::vector<int>{0, 0, 0, 0};
param.paddings = std::make_shared<std::vector<int>>({0, 0, 0, 0});
param.strides = std::vector<int>{1, 1};
param.ksize = std::vector<int>{7, 7};
......
......@@ -35,7 +35,6 @@ class PoolCompute : public KernelLite<TARGET(kX86), PRECISION(kFloat)> {
auto& param = *param_.get_mutable<param_t>();
if (param.global_pooling) {
for (size_t i = 0; i < param.ksize.size(); ++i) {
param.paddings[i] = 0;
param.ksize[i] = static_cast<int>(param.x->dims()[i + 2]);
}
}
......@@ -52,7 +51,7 @@ class PoolCompute : public KernelLite<TARGET(kX86), PRECISION(kFloat)> {
param.x,
param.ksize,
param.strides,
param.paddings,
*param.paddings,
pool_process,
true,
false,
......@@ -68,7 +67,7 @@ class PoolCompute : public KernelLite<TARGET(kX86), PRECISION(kFloat)> {
param.x,
param.ksize,
param.strides,
param.paddings,
*param.paddings,
pool_process,
param.exclusive,
param.adaptive,
......
......@@ -60,7 +60,7 @@ TEST(pool2d_x86, run_test) {
param.x = &x;
param.output = &out;
param.strides = {2, 2};
param.paddings = {0, 0, 0, 0};
param.paddings = std::make_shared<std::vector<int>>({0, 0, 0, 0});
param.ksize = {2, 2};
param.pooling_type = "max";
std::unique_ptr<KernelContext> ctx(new KernelContext);
......
......@@ -38,7 +38,8 @@ node_map_type PoolConverter(const std::shared_ptr<lite::OpLite> op,
auto x_var_name = op_info->Input("X").front();
auto pooling_type = op_info->GetAttr<std::string>("pooling_type");
auto ceil_mode = op_info->GetAttr<bool>("ceil_mode");
auto paddings = op_info->GetAttr<std::vector<int>>("paddings");
auto paddings =
op_info->GetAttr<std::shared_ptr<std::vector<int>>>("paddings");
auto global_pooling = op_info->GetAttr<bool>("global_pooling");
auto ksize = op_info->GetAttr<std::vector<int>>("ksize");
auto strides = op_info->GetAttr<std::vector<int>>("strides");
......@@ -57,7 +58,7 @@ node_map_type PoolConverter(const std::shared_ptr<lite::OpLite> op,
graph_ctx->builder->CreateMaxPool2D(*input_nodes.at(x_var_name),
lite::xpu::CvtShape(ksize),
lite::xpu::CvtShape(strides),
lite::xpu::CvtShape(paddings),
lite::xpu::CvtShape(*paddings),
"NCHW",
ceil_mode));
}
......@@ -72,7 +73,7 @@ node_map_type PoolConverter(const std::shared_ptr<lite::OpLite> op,
graph_ctx->builder->CreateAvgPool2D(*input_nodes.at(x_var_name),
lite::xpu::CvtShape(ksize),
lite::xpu::CvtShape(strides),
lite::xpu::CvtShape(paddings),
lite::xpu::CvtShape(*paddings),
"NCHW",
ceil_mode,
!exclusive));
......
......@@ -38,7 +38,8 @@ void pool_ref(const std::shared_ptr<operators::PoolOpLite> op) {
std::vector<int> ksize = op_info->GetAttr<std::vector<int>>("ksize");
std::vector<int> strides = op_info->GetAttr<std::vector<int>>("strides");
std::vector<int> paddings = op_info->GetAttr<std::vector<int>>("paddings");
std::vector<int> paddings =
*(op_info->GetAttr<std::shared_ptr<std::vector<int>>>("paddings"));
bool exclusive = op_info->GetAttr<bool>("exclusive");
std::string pooling_type = op_info->GetAttr<std::string>("pooling_type");
bool global_pooling = op_info->GetAttr<bool>("global_pooling");
......@@ -162,8 +163,9 @@ void test_pool(int bs,
opdesc.SetAttr("global_pooling", global_pooling);
opdesc.SetAttr("exclusive", exclusive);
opdesc.SetAttr("strides", std::vector<int>({stride, stride}));
opdesc.SetAttr("paddings",
std::vector<int>({padding, padding, padding, padding}));
opdesc.SetAttr(
"paddings",
std::shared_ptr<std::vector<int>>({padding, padding, padding, padding}));
opdesc.SetAttr("ceil_mode", ceil_mode);
// create and convert op to XPU model, then run it on XPU
......
......@@ -35,7 +35,7 @@ bool PoolOpLite::CheckShape() const {
CHECK_OR_FALSE(x_dims.size() - ksize.size() == 2U);
// Strides size and pooling size should be the same.
CHECK_OR_FALSE(ksize.size() == strides.size());
// Paddings size and pooling size should be the same.
// Paddings size must be 4.
CHECK_OR_FALSE(paddings.size() == 4L);
return true;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册