// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "lite/operators/pool_op.h" #include #include #include "lite/core/op_registry.h" #include "lite/kernels/npu/bridges/registry.h" #include "lite/kernels/npu/bridges/test_helper.h" namespace paddle { namespace lite { namespace kernels { namespace npu { namespace bridges { void pool_ref(const std::shared_ptr op) { Scope* scope = op->scope(); const OpInfo* op_info = op->op_info(); auto x = scope->FindVar(op_info->Input("X").front())->GetMutable(); auto out = scope->FindVar(op_info->Output("Out").front())->GetMutable(); auto& in_dims = x->dims(); auto& out_dims = out->dims(); const float* src_ptr = x->data(); float* dst_ptr = out->mutable_data(); std::vector ksize = op_info->GetAttr>("ksize"); std::vector strides = op_info->GetAttr>("strides"); std::vector paddings = op_info->GetAttr>("paddings"); bool exclusive = op_info->GetAttr("exclusive"); std::string pooling_type = op_info->GetAttr("pooling_type"); bool global_pooling = op_info->GetAttr("global_pooling"); int in_n = in_dims[0]; int in_c = in_dims[1]; int in_h = in_dims[2]; int in_w = in_dims[3]; int size_in_n = in_c * in_h * in_w; int size_in_c = in_h * in_w; int out_h = out_dims[2]; int out_w = out_dims[3]; int size_out_n = in_c * out_h * out_w; int size_out_c = out_h * out_w; int window_h = ksize[0]; int window_w = ksize[1]; int stride_h = strides[0]; int stride_w = strides[1]; int pad_h = paddings[0]; int pad_w = paddings[1]; if (global_pooling == true) { for (int n = 0; n < in_n; ++n) { for (int c = 0; c < in_c; ++c) { const float* src = src_ptr + n * size_in_n + c * size_in_c; float res = src[0]; if (pooling_type == "max") { for (int i = 1; i < size_in_c; ++i) { float cur_val = src[i]; res = cur_val > res ? cur_val : res; } } else if (pooling_type == "avg") { for (int i = 1; i < size_in_c; ++i) { float cur_val = src[i]; res += cur_val; } res /= size_in_c; } dst_ptr[n * size_out_n + c] = res; } } } else { for (int n = 0; n < in_n; ++n) { for (int c = 0; c < in_c; ++c) { for (int h = 0; h < out_h; ++h) { int sh = h * stride_h; int eh = sh + window_h; sh = (sh - pad_h) < 0 ? 0 : sh - pad_h; eh = (eh - pad_h) > in_h ? in_h : eh - pad_h; for (int w = 0; w < out_w; ++w) { int sw = w * stride_w; int ew = sw + window_w; sw = (sw - pad_w) < 0 ? 0 : sw - pad_w; ew = (ew - pad_w) > in_w ? in_w : ew - pad_w; int pooling_size = (ew - sw) * (eh - sh); if (pooling_size == 0) continue; float res = 0.f; for (int kh = sh; kh < eh; ++kh) { for (int kw = sw; kw < ew; ++kw) { int src_idx = n * size_in_n + c * size_in_c + kh * in_w + kw; if (kh == sh && kw == sw) { res = src_ptr[src_idx]; } else { if (pooling_type == "max") { res = res >= src_ptr[src_idx] ? res : src_ptr[src_idx]; } if (pooling_type == "avg") { res += src_ptr[src_idx]; } } } } if (pooling_type == "avg") { if (exclusive) { res /= pooling_size; } else { res /= window_h * window_w; } } dst_ptr[n * size_out_n + c * size_out_c + h * out_w + w] = res; } } } } } } void test_pool(int bs, int ic, int ih, int iw, std::string pooling_type, bool ceil_mode, bool global_pooling, bool exclusive, int ksize, int stride, int padding) { // prepare input&output variables Scope scope; std::string x_var_name = "x"; std::string out_var_name = "out"; std::string out_ref_var_name = "out_ref"; auto* x = scope.Var(x_var_name)->GetMutable(); auto* out = scope.Var(out_var_name)->GetMutable(); auto* out_ref = scope.Var(out_ref_var_name)->GetMutable(); x->Resize({bs, ic, ih, iw}); // initialize input&output data FillTensor(x); // initialize op desc cpp::OpDesc opdesc; opdesc.SetType("pool2d"); opdesc.SetInput("X", {x_var_name}); opdesc.SetOutput("Out", {out_var_name}); opdesc.SetAttr("pooling_type", pooling_type); opdesc.SetAttr("ksize", std::vector({ksize, ksize})); opdesc.SetAttr("global_pooling", global_pooling); opdesc.SetAttr("exclusive", exclusive); opdesc.SetAttr("strides", std::vector({stride, stride})); opdesc.SetAttr("paddings", std::vector({padding, padding})); // create and convert op to NPU model, then run it on NPU auto op = CreateOp(opdesc, &scope); LauchOp(op, {x_var_name}, {out_var_name}); out_ref->CopyDataFrom(*out); // execute reference implementation and save to output tensor pool_ref(op); // compare results auto* out_data = out->mutable_data(); auto* out_ref_data = out_ref->mutable_data(); for (int i = 0; i < out->dims().production(); i++) { EXPECT_NEAR(out_data[i], out_ref_data[i], 1e-2); } } TEST(NPUBridges, pool) { for (auto pooling_type : {"max", "avg"}) { for (auto ceil_mode : {true, false}) { for (auto global_pooling : {/*true, */ false}) { for (auto exclusive : {true /*, false*/}) { for (auto ksize : {2, 3}) { for (auto stride : {1, 2}) { for (auto padding : {0, 1}) { for (auto bs : {1, 3}) { for (auto ic : {1, 3}) { for (auto ih : {3, 7}) { for (auto iw : {3, 7}) { test_pool(bs, ic, ih, iw, pooling_type, ceil_mode, global_pooling, exclusive, ksize, stride, padding); } } } } } } } } } } } for (auto pooling_type : {"max", "avg"}) { for (auto ceil_mode : {true, false}) { bool global_pooling = true; bool exclusive = true; int ksize = 2; int stride = 1; int padding = 0; int bs = 6; int ic = 6; int ih = 6; int iw = 6; test_pool(bs, ic, ih, iw, pooling_type, ceil_mode, global_pooling, exclusive, ksize, stride, padding); } } } } // namespace bridges } // namespace npu } // namespace kernels } // namespace lite } // namespace paddle USE_LITE_OP(pool2d); USE_NPU_BRIDGE(pool2d);