未验证 提交 40137111 编写于 作者: Z zhupengyang 提交者: GitHub

[NPU] fix pool padding type (#2524)

test=develop
上级 c809321d
......@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "lite/operators/pool_op.h"
#include "lite/backends/npu/builder.h"
#include "lite/kernels/npu/bridges/registry.h"
......@@ -32,49 +33,78 @@ node_map_type PoolConverter(const std::shared_ptr<lite::OpLite> pool_op,
std::shared_ptr<ge::op::Pooling> pool_node =
std::make_shared<ge::op::Pooling>(unique_op_type);
auto x_var_name = op_info->Input("X").front();
auto x = scope->FindTensor(x_var_name);
pool_node->set_input_x(*inputs_map.at(x_var_name));
lite::npu::OpList::Global().add(inputs_map.at(x_var_name));
lite::npu::OpList::Global().add(pool_node);
int mode = 0;
auto pooling_type = op_info->GetAttr<std::string>("pooling_type");
int npu_mode = 0;
if (pooling_type == "max") {
npu_mode = 0;
mode = 0;
} else if (pooling_type == "avg") {
npu_mode = 1;
mode = 1;
CHECK(op_info->GetAttr<bool>("exclusive"))
<< "[NPU] exclusive must be true in HiAI DDK";
} else {
LOG(FATAL) << "[NPU] Unsupported pooling type: " << pooling_type;
}
bool npu_global_pooling = op_info->GetAttr<bool>("global_pooling");
pool_node->set_attr_mode(mode);
int pad_mode = 0;
std::string padding_algorithm("");
if (op_info->HasAttr("padding_algorithm")) {
padding_algorithm = op_info->GetAttr<std::string>("padding_algorithm");
}
if (padding_algorithm == "SAME") {
pad_mode = 6;
} else if (padding_algorithm == "VALID") {
pad_mode = 5;
}
pool_node->set_attr_pad_mode(pad_mode);
bool global_pooling = op_info->GetAttr<bool>("global_pooling");
pool_node->set_attr_global_pooling(global_pooling);
auto ksize = op_info->GetAttr<std::vector<int>>("ksize");
auto npu_window = ge::AttrValue::LIST_INT(ksize.begin(), ksize.end());
auto window = ge::AttrValue::LIST_INT(ksize.begin(), ksize.end());
pool_node->set_attr_window(window);
auto padding = op_info->GetAttr<std::vector<int>>("paddings");
bool pads_equal = (padding[0] == padding[1]) && (padding[2] == padding[3]);
if (!pads_equal) {
LOG(FATAL)
<< "padding requires pad_left == pad_right, pad_top == pad_bottom";
auto paddings = op_info->GetAttr<std::vector<int>>("paddings");
if (paddings.size() == 2L) {
for (size_t i = 0; i < 2L; ++i) {
int copy_pad = *(paddings.begin() + 2 * i);
paddings.insert(paddings.begin() + 2 * i + 1, copy_pad);
}
}
CHECK_EQ(paddings.size(), 4L)
<< "Paddings size should be the same or twice as the inputs size.";
bool adaptive = false;
if (op_info->HasAttr("adaptive")) {
adaptive = op_info->GetAttr<bool>("adaptive");
}
auto npu_pad =
ge::AttrValue::LIST_INT{padding[0], padding[1], padding[2], padding[3]};
auto strides = op_info->GetAttr<std::vector<int>>("strides");
operators::UpdatePadding(&paddings,
global_pooling,
adaptive,
padding_algorithm,
x->dims(),
strides,
ksize);
auto npu_pad = ge::AttrValue::LIST_INT{
paddings[0], paddings[1], paddings[2], paddings[3]};
pool_node->set_attr_pad(npu_pad);
auto npu_stride = ge::AttrValue::LIST_INT(strides.begin(), strides.end());
int npu_ceil_mode = 0;
pool_node->set_attr_stride(npu_stride);
int ceil_mode = 0;
if (op_info->HasAttr("ceil_mode")) {
npu_ceil_mode = op_info->GetAttr<bool>("ceil_mode") ? 1 : 0;
ceil_mode = op_info->GetAttr<bool>("ceil_mode") ? 1 : 0;
}
pool_node->set_input_x(*inputs_map.at(x_var_name));
pool_node->set_attr_mode(npu_mode);
pool_node->set_attr_pad_mode(0);
pool_node->set_attr_global_pooling(npu_global_pooling);
pool_node->set_attr_window(npu_window);
pool_node->set_attr_pad(npu_pad);
pool_node->set_attr_stride(npu_stride);
pool_node->set_attr_ceil_mode(npu_ceil_mode);
pool_node->set_attr_ceil_mode(ceil_mode);
// output_node->set_attr_data_mode(npu_data_mode);
lite::npu::OpList::Global().add(inputs_map.at(x_var_name));
lite::npu::OpList::Global().add(pool_node);
node_map_type outputs_map;
outputs_map[op_info->Output("Out").front()] = pool_node;
return outputs_map;
......
......@@ -41,39 +41,6 @@ bool PoolOpLite::CheckShape() const {
return true;
}
inline void UpdatePadding(std::vector<int>* paddings,
const bool global_pooling,
const bool adaptive,
const std::string padding_algorithm,
const lite::DDim data_dims,
const std::vector<int>& strides,
const std::vector<int>& ksize) {
// when padding_algorithm is "VALID" or "SAME"
if (padding_algorithm == "SAME") {
for (int i = 0; i < strides.size(); ++i) {
int out_size = (data_dims[i + 2] + strides[i] - 1) / strides[i];
int pad_sum =
std::max((out_size - 1) * strides[i] + ksize[i] - data_dims[i + 2],
(int64_t)0);
int pad_0 = pad_sum / 2;
int pad_1 = pad_sum - pad_0;
*(paddings->begin() + i * 2) = pad_0;
*(paddings->begin() + i * 2 + 1) = pad_1;
}
} else if (padding_algorithm == "VALID") {
for (auto it = paddings->begin(); it != paddings->end(); it++) {
*it = 0;
}
}
// if global_pooling == true or adaptive == true, padding will be ignore
if (global_pooling || adaptive) {
for (auto it = paddings->begin(); it != paddings->end(); it++) {
*it = 0;
}
}
}
int PoolOutputSize(int input_size,
int filter_size,
int pad_left,
......
......@@ -14,6 +14,7 @@
#pragma once
#include <algorithm>
#include <memory>
#include <string>
#include <vector>
......@@ -95,6 +96,39 @@ class PoolOpLite : public OpLite {
std::string padding_algorithm_{""};
};
inline void UpdatePadding(std::vector<int> *paddings,
const bool global_pooling,
const bool adaptive,
const std::string padding_algorithm,
const lite::DDim data_dims,
const std::vector<int> &strides,
const std::vector<int> &ksize) {
// when padding_algorithm is "VALID" or "SAME"
if (padding_algorithm == "SAME") {
for (int i = 0; i < strides.size(); ++i) {
int out_size = (data_dims[i + 2] + strides[i] - 1) / strides[i];
int pad_sum =
std::max((out_size - 1) * strides[i] + ksize[i] - data_dims[i + 2],
(int64_t)0);
int pad_0 = pad_sum / 2;
int pad_1 = pad_sum - pad_0;
*(paddings->begin() + i * 2) = pad_0;
*(paddings->begin() + i * 2 + 1) = pad_1;
}
} else if (padding_algorithm == "VALID") {
for (auto it = paddings->begin(); it != paddings->end(); it++) {
*it = 0;
}
}
// if global_pooling == true or adaptive == true, padding will be ignore
if (global_pooling || adaptive) {
for (auto it = paddings->begin(); it != paddings->end(); it++) {
*it = 0;
}
}
}
} // namespace operators
} // namespace lite
} // namespace paddle
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册