From 5dac279a4d6cf605385d229079c93b4a676529ba Mon Sep 17 00:00:00 2001 From: zhupengyang <1165938320@qq.com> Date: Tue, 3 Mar 2020 16:03:35 +0800 Subject: [PATCH] [XPU] move elementwise and pool uts (#3060) --- .../xpu/bridges/elementwise_ops_test.cc | 188 ------------ lite/kernels/xpu/bridges/pool_op_test.cc | 268 ------------------ .../tests/kernels/elementwise_compute_test.cc | 2 +- lite/tests/kernels/pool_compute_test.cc | 2 + 4 files changed, 3 insertions(+), 457 deletions(-) delete mode 100644 lite/kernels/xpu/bridges/elementwise_ops_test.cc delete mode 100644 lite/kernels/xpu/bridges/pool_op_test.cc diff --git a/lite/kernels/xpu/bridges/elementwise_ops_test.cc b/lite/kernels/xpu/bridges/elementwise_ops_test.cc deleted file mode 100644 index 2abda822e3..0000000000 --- a/lite/kernels/xpu/bridges/elementwise_ops_test.cc +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "lite/operators/elementwise_ops.h" -#include -#include -#include "lite/core/op_registry.h" -#include "lite/kernels/xpu/bridges/registry.h" -#include "lite/kernels/xpu/bridges/test_helper.h" - -namespace paddle { -namespace lite { -namespace kernels { -namespace xpu { -namespace bridges { - -template -void elementwise_add_ref(const std::shared_ptr op) { - Scope* scope = op->scope(); - const OpInfo* op_info = op->op_info(); - auto x = scope->FindVar(op_info->Input("X").front())->GetMutable(); - auto y = scope->FindVar(op_info->Input("Y").front())->GetMutable(); - auto out = - scope->FindVar(op_info->Output("Out").front())->GetMutable(); - - auto x_data = x->data(); - auto y_data = y->data(); - dtype* out_data = out->mutable_data(); - - auto x_dims = x->dims(); - auto y_dims = y->dims(); - int axis = op_info->GetAttr("axis"); - - if (axis < 0) { - axis = x_dims.size() - y_dims.size(); - } - int batch = 1; - int channels = 1; - int num = 1; - for (int i = 0; i < axis; ++i) { - batch *= x_dims[i]; - } - for (int i = 0; i < y_dims.size(); ++i) { - channels *= y_dims[i]; - } - for (int i = y_dims.size() + axis; i < x_dims.size(); ++i) { - num *= x_dims[i]; - } - // do elementwise add/sub/max... - std::string elt_type = "add"; - if (elt_type == "add") { - for (int i = 0; i < batch; ++i) { - for (int j = 0; j < channels; ++j) { - int offset = (i * channels + j) * num; - const dtype* din_ptr = x_data + offset; - const dtype diny_data = y_data[j]; - dtype* dout_ptr = out_data + offset; - for (int k = 0; k < num; ++k) { - *dout_ptr = *din_ptr + diny_data; - dout_ptr++; - din_ptr++; - } - } - } - } else if (elt_type == "sub") { - for (int i = 0; i < batch; ++i) { - for (int j = 0; j < channels; ++j) { - int offset = (i * channels + j) * num; - const dtype* din_ptr = x_data + offset; - const dtype diny_data = y_data[j]; - dtype* dout_ptr = out_data + offset; - for (int k = 0; k < num; ++k) { - *dout_ptr = *din_ptr - diny_data; - dout_ptr++; - din_ptr++; - } - } - } - } else if (elt_type == "mul") { - for (int i = 0; i < batch; ++i) { - for (int j = 0; j < channels; ++j) { - int offset = (i * channels + j) * num; - const dtype* din_ptr = x_data + offset; - const dtype diny_data = y_data[j]; - dtype* dout_ptr = out_data + offset; - for (int k = 0; k < num; ++k) { - *dout_ptr = *din_ptr * diny_data; - dout_ptr++; - din_ptr++; - } - } - } - } else if (elt_type == "max") { - for (int i = 0; i < batch; ++i) { - for (int j = 0; j < channels; ++j) { - int offset = (i * channels + j) * num; - const dtype* din_ptr = x_data + offset; - const dtype diny_data = y_data[j]; - dtype* dout_ptr = out_data + offset; - for (int k = 0; k < num; ++k) { - *dout_ptr = std::max(*din_ptr, diny_data); - dout_ptr++; - din_ptr++; - } - } - } - } else { - LOG(FATAL) << "unsupported Elementwise type: " << elt_type; - } -} - -void test_elementwise_add(std::vector x_dims, - std::vector y_dims, - int axis) { - // prepare input&output variables - Scope scope; - std::string x_var_name = "x"; - std::string y_var_name = "y"; - std::string out_var_name = "out"; - std::string out_ref_var_name = "out_ref"; - auto* x = scope.Var(x_var_name)->GetMutable(); - auto* y = scope.Var(y_var_name)->GetMutable(); - auto* out = scope.Var(out_var_name)->GetMutable(); - auto* out_ref = scope.Var(out_ref_var_name)->GetMutable(); - x->Resize(x_dims); - if (y_dims.size() == 0) { - y->Resize(x_dims); - } else { - y->Resize(y_dims); - } - - // initialize input&output data - FillTensor(x); - FillTensor(y); - - // initialize op desc - cpp::OpDesc opdesc; - opdesc.SetType("elementwise_add"); - opdesc.SetInput("X", {x_var_name}); - opdesc.SetInput("Y", {y_var_name}); - opdesc.SetOutput("Out", {out_var_name}); - opdesc.SetAttr("axis", axis); - - // create and convert op to XPU model, then run it on XPU - auto op = CreateOp(opdesc, &scope); - LauchOp(op, {x_var_name, y_var_name}, {out_var_name}); - out_ref->CopyDataFrom(*out); - - // execute reference implementation and save to output tensor - elementwise_add_ref(op); - - // compare results - auto* out_data = out->mutable_data(); - auto* out_ref_data = out_ref->mutable_data(); - for (int i = 0; i < out->dims().production(); i++) { - EXPECT_NEAR(out_data[i], out_ref_data[i], 1e-5); - } -} - -// xpu's bias_add only support y with one dimension -TEST(XPUBridges, elementwise_add) { - test_elementwise_add({1, 2, 3, 4}, {1}, 0); - test_elementwise_add({1, 2, 3, 4}, {2}, 1); - test_elementwise_add({2, 2, 3, 4}, {3}, 2); - test_elementwise_add({2, 2, 3, 4}, {4}, 3); - test_elementwise_add({2, 2, 3, 4}, {4}, -1); - test_elementwise_add({2, 2, 3, 4}, {}, -1); -} - -} // namespace bridges -} // namespace xpu -} // namespace kernels -} // namespace lite -} // namespace paddle - -USE_LITE_OP(elementwise_add); -USE_XPU_BRIDGE(elementwise_add); diff --git a/lite/kernels/xpu/bridges/pool_op_test.cc b/lite/kernels/xpu/bridges/pool_op_test.cc deleted file mode 100644 index 7efc6b464c..0000000000 --- a/lite/kernels/xpu/bridges/pool_op_test.cc +++ /dev/null @@ -1,268 +0,0 @@ -// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "lite/operators/pool_op.h" -#include -#include "lite/core/op_registry.h" -#include "lite/kernels/xpu/bridges/registry.h" -#include "lite/kernels/xpu/bridges/test_helper.h" - -namespace paddle { -namespace lite { -namespace kernels { -namespace xpu { -namespace bridges { - -void pool_ref(const std::shared_ptr op) { - Scope* scope = op->scope(); - const OpInfo* op_info = op->op_info(); - auto x = scope->FindVar(op_info->Input("X").front())->GetMutable(); - auto out = - scope->FindVar(op_info->Output("Out").front())->GetMutable(); - auto& in_dims = x->dims(); - auto& out_dims = out->dims(); - - const float* src_ptr = x->data(); - float* dst_ptr = out->mutable_data(); - - std::vector ksize = op_info->GetAttr>("ksize"); - std::vector strides = op_info->GetAttr>("strides"); - std::vector paddings = op_info->GetAttr>("paddings"); - bool exclusive = op_info->GetAttr("exclusive"); - std::string pooling_type = op_info->GetAttr("pooling_type"); - bool global_pooling = op_info->GetAttr("global_pooling"); - - int in_n = in_dims[0]; - int in_c = in_dims[1]; - int in_h = in_dims[2]; - int in_w = in_dims[3]; - int size_in_n = in_c * in_h * in_w; - int size_in_c = in_h * in_w; - - int out_h = out_dims[2]; - int out_w = out_dims[3]; - int size_out_n = in_c * out_h * out_w; - int size_out_c = out_h * out_w; - - int window_h = ksize[0]; - int window_w = ksize[1]; - int stride_h = strides[0]; - int stride_w = strides[1]; - int pad_h = paddings[0]; - int pad_w = paddings[2]; - - if (global_pooling == true) { - for (int n = 0; n < in_n; ++n) { - for (int c = 0; c < in_c; ++c) { - const float* src = src_ptr + n * size_in_n + c * size_in_c; - float res = src[0]; - if (pooling_type == "max") { - for (int i = 1; i < size_in_c; ++i) { - float cur_val = src[i]; - res = cur_val > res ? cur_val : res; - } - } else if (pooling_type == "avg") { - for (int i = 1; i < size_in_c; ++i) { - float cur_val = src[i]; - res += cur_val; - } - res /= size_in_c; - } - dst_ptr[n * size_out_n + c] = res; - } - } - } else { - for (int n = 0; n < in_n; ++n) { - for (int c = 0; c < in_c; ++c) { - for (int h = 0; h < out_h; ++h) { - int sh = h * stride_h; - int eh = sh + window_h; - sh = (sh - pad_h) < 0 ? 0 : sh - pad_h; - eh = (eh - pad_h) > in_h ? in_h : eh - pad_h; - for (int w = 0; w < out_w; ++w) { - int sw = w * stride_w; - int ew = sw + window_w; - sw = (sw - pad_w) < 0 ? 0 : sw - pad_w; - ew = (ew - pad_w) > in_w ? in_w : ew - pad_w; - int pooling_size = (ew - sw) * (eh - sh); - if (pooling_size == 0) continue; - float res = 0.f; - for (int kh = sh; kh < eh; ++kh) { - for (int kw = sw; kw < ew; ++kw) { - int src_idx = n * size_in_n + c * size_in_c + kh * in_w + kw; - if (kh == sh && kw == sw) { - res = src_ptr[src_idx]; - } else { - if (pooling_type == "max") { - res = res >= src_ptr[src_idx] ? res : src_ptr[src_idx]; - } - if (pooling_type == "avg") { - res += src_ptr[src_idx]; - } - } - } - } - if (pooling_type == "avg") { - if (exclusive) { - res /= pooling_size; - } else { - res /= window_h * window_w; - } - } - dst_ptr[n * size_out_n + c * size_out_c + h * out_w + w] = res; - } - } - } - } - } -} - -void test_pool(int bs, - int ic, - int ih, - int iw, - std::string pooling_type, - bool ceil_mode, - bool global_pooling, - bool exclusive, - int ksize, - int stride, - int padding) { - // prepare input&output variables - Scope scope; - std::string x_var_name = "x"; - std::string out_var_name = "out"; - std::string out_ref_var_name = "out_ref"; - auto* x = scope.Var(x_var_name)->GetMutable(); - auto* out = scope.Var(out_var_name)->GetMutable(); - auto* out_ref = scope.Var(out_ref_var_name)->GetMutable(); - x->Resize({bs, ic, ih, iw}); - - // initialize input&output data - FillTensor(x); - - // initialize op desc - cpp::OpDesc opdesc; - opdesc.SetType("pool2d"); - opdesc.SetInput("X", {x_var_name}); - opdesc.SetOutput("Out", {out_var_name}); - opdesc.SetAttr("pooling_type", pooling_type); - opdesc.SetAttr("ksize", std::vector({ksize, ksize})); - opdesc.SetAttr("global_pooling", global_pooling); - opdesc.SetAttr("exclusive", exclusive); - opdesc.SetAttr("strides", std::vector({stride, stride})); - opdesc.SetAttr("paddings", - std::vector({padding, padding, padding, padding})); - opdesc.SetAttr("ceil_mode", ceil_mode); - - // create and convert op to XPU model, then run it on XPU - auto op = CreateOp(opdesc, &scope); - LauchOp(op, {x_var_name}, {out_var_name}); - out_ref->CopyDataFrom(*out); - - // execute reference implementation and save to output tensor - pool_ref(op); - - // compare results - auto* out_data = out->mutable_data(); - auto* out_ref_data = out_ref->mutable_data(); - for (int i = 0; i < out->dims().production(); i++) { - EXPECT_NEAR(out_data[i], out_ref_data[i], 1e-5); - } -} - -TEST(XPUBridges, pool) { - for (auto pooling_type : {"max", "avg"}) { - for (auto bs : {1, 3}) { - for (auto ic : {2}) { - for (auto ih : {3}) { - for (auto iw : {4}) { - test_pool(bs, ic, ih, iw, pooling_type, true, true, true, 0, 1, 0); - } - } - } - } - } - - for (auto pooling_type : {"max"}) { - for (auto ceil_mode : {true, false}) { - for (auto ksize : {2, 3}) { - for (auto stride : {1, 2}) { - for (auto padding : {0, 1}) { - for (auto bs : {1, 3}) { - for (auto ic : {2}) { - for (auto ih : {3}) { - for (auto iw : {4}) { - test_pool(bs, - ic, - ih, - iw, - pooling_type, - ceil_mode, - false, - true, - ksize, - stride, - padding); - } - } - } - } - } - } - } - } - } - - for (auto pooling_type : {"avg"}) { - for (auto ceil_mode : {true, false}) { - for (auto exclusive : {true, false}) { - for (auto ksize : {2, 3}) { - for (auto stride : {1, 2}) { - for (auto padding : {0, 1}) { - for (auto bs : {1, 3}) { - for (auto ic : {2}) { - for (auto ih : {3}) { - for (auto iw : {4}) { - test_pool(bs, - ic, - ih, - iw, - pooling_type, - ceil_mode, - false, - exclusive, - ksize, - stride, - padding); - } - } - } - } - } - } - } - } - } - } -} - -} // namespace bridges -} // namespace xpu -} // namespace kernels -} // namespace lite -} // namespace paddle - -USE_LITE_OP(pool2d); -USE_XPU_BRIDGE(pool2d); diff --git a/lite/tests/kernels/elementwise_compute_test.cc b/lite/tests/kernels/elementwise_compute_test.cc index 66b6223dc4..505ab72dc1 100644 --- a/lite/tests/kernels/elementwise_compute_test.cc +++ b/lite/tests/kernels/elementwise_compute_test.cc @@ -234,7 +234,7 @@ TEST(Elementwise, precision) { return; #endif - // TestEltDims(place, abs_error); + TestEltDims(place, abs_error); TestEltTypes(place, abs_error); TestEltFuseAct(place, abs_error); } diff --git a/lite/tests/kernels/pool_compute_test.cc b/lite/tests/kernels/pool_compute_test.cc index d94c2e5154..988c99bf7c 100644 --- a/lite/tests/kernels/pool_compute_test.cc +++ b/lite/tests/kernels/pool_compute_test.cc @@ -352,6 +352,8 @@ TEST(Pool, precision) { #if defined(LITE_WITH_NPU) place = TARGET(kNPU); abs_error = 1e-2; // Using fp16 in NPU +#elif defined(LITE_WITH_XPU) + place = TARGET(kXPU); #else return; #endif -- GitLab