From 388c69f27ddfba7bf25645445a2a1e53f9f8d69c Mon Sep 17 00:00:00 2001 From: Reventon_L Date: Mon, 8 Mar 2021 14:28:13 +0800 Subject: [PATCH] [NPU] squeeze and unsqueeze op for ascend (#31452) Co-authored-by: root --- paddle/fluid/operators/squeeze_op_npu.cc | 45 +++++++++ paddle/fluid/operators/squeeze_op_npu_test.cc | 92 +++++++++++++++++++ paddle/fluid/operators/unsqueeze_op_npu.cc | 44 +++++++++ .../fluid/operators/unsqueeze_op_npu_test.cc | 92 +++++++++++++++++++ 4 files changed, 273 insertions(+) create mode 100644 paddle/fluid/operators/squeeze_op_npu.cc create mode 100644 paddle/fluid/operators/squeeze_op_npu_test.cc create mode 100644 paddle/fluid/operators/unsqueeze_op_npu.cc create mode 100644 paddle/fluid/operators/unsqueeze_op_npu_test.cc diff --git a/paddle/fluid/operators/squeeze_op_npu.cc b/paddle/fluid/operators/squeeze_op_npu.cc new file mode 100644 index 0000000000..344aaaa4ba --- /dev/null +++ b/paddle/fluid/operators/squeeze_op_npu.cc @@ -0,0 +1,45 @@ +/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#ifdef PADDLE_WITH_ASCEND_CL +#include +#include + +#include "paddle/fluid/operators/squeeze_op.h" +#include "paddle/fluid/operators/npu_op_runner.h" + +namespace ops = paddle::operators; +namespace plat = paddle::platform; + +REGISTER_OP_NPU_KERNEL( + squeeze, + ops::SqueezeKernel, + ops::SqueezeKernel, + ops::SqueezeKernel, + ops::SqueezeKernel, + ops::SqueezeKernel, + ops::SqueezeKernel, + ops::SqueezeKernel, + ops::SqueezeKernel); +REGISTER_OP_NPU_KERNEL( + squeeze2, + ops::SqueezeKernel, + ops::SqueezeKernel, + ops::SqueezeKernel, + ops::SqueezeKernel, + ops::SqueezeKernel, + ops::SqueezeKernel, + ops::SqueezeKernel, + ops::SqueezeKernel); +#endif diff --git a/paddle/fluid/operators/squeeze_op_npu_test.cc b/paddle/fluid/operators/squeeze_op_npu_test.cc new file mode 100644 index 0000000000..9b0464fc22 --- /dev/null +++ b/paddle/fluid/operators/squeeze_op_npu_test.cc @@ -0,0 +1,92 @@ +/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#ifndef _WIN32 +#include +#endif + +#include +#include // NOLINT +#include + +#include "gtest/gtest.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/framework/program_desc.h" +#include "paddle/fluid/operators/dropout_op.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/string/printf.h" + +namespace f = paddle::framework; +namespace p = paddle::platform; +namespace m = paddle::operators::math; + +USE_OP(squeeze); +USE_OP_DEVICE_KERNEL(squeeze, NPU); + +template +void Compare(f::Scope* scope, const p::DeviceContext& ctx) { + // init + auto x = scope->Var("X"); + auto tensor_x = x->GetMutable(); + + int dim0 = 1; + int dim1 = 10; + int dim2 = 1; + + std::vector init; + for (int64_t i = 0; i < dim0 * dim1 * dim2; ++i) { + init.push_back(static_cast(0.1)); + } + + TensorFromVector(init, ctx, tensor_x); + tensor_x->Resize({dim0, dim1, dim2}); + + ctx.Wait(); + + // run + auto place = ctx.GetPlace(); + auto out = scope->Var("Out"); + auto tensor_out = out->GetMutable(); + + std::vector axis; + axis.push_back(2); + f::AttributeMap attrs = {{"axes", axis}}; + + auto op = + f::OpRegistry::CreateOp("squeeze", {{"X", {"X"}}}, + {{"Out", {"Out"}}}, attrs); + + op->Run(*scope, place); + ctx.Wait(); + + EXPECT_EQ((uint32_t)tensor_out->dims().size(), uint32_t(2)); + EXPECT_EQ((uint32_t)tensor_out->dims()[0], uint32_t(dim0)); + EXPECT_EQ((uint32_t)tensor_out->dims()[1], uint32_t(dim1)); + + std::vector out_vec; + TensorToVector(*tensor_out, ctx, &out_vec); + for (uint32_t i = 0; i < out_vec.size(); i++) { + EXPECT_EQ(out_vec[i], static_cast(0.1)); + } + + ctx.Wait(); +} + +TEST(squeeze, NPU_fp32) { + f::Scope scope; + p::NPUDeviceContext ctx(p::NPUPlace(0)); + Compare(&scope, ctx); +} + diff --git a/paddle/fluid/operators/unsqueeze_op_npu.cc b/paddle/fluid/operators/unsqueeze_op_npu.cc new file mode 100644 index 0000000000..c37fc44b2e --- /dev/null +++ b/paddle/fluid/operators/unsqueeze_op_npu.cc @@ -0,0 +1,44 @@ +/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#ifdef PADDLE_WITH_ASCEND_CL +#include +#include + +#include "paddle/fluid/operators/unsqueeze_op.h" +#include "paddle/fluid/operators/npu_op_runner.h" + +namespace ops = paddle::operators; +namespace plat = paddle::platform; + +REGISTER_OP_NPU_KERNEL( + unsqueeze, + ops::UnsqueezeKernel, + ops::UnsqueezeKernel, + ops::UnsqueezeKernel, + ops::UnsqueezeKernel, + ops::UnsqueezeKernel, + ops::UnsqueezeKernel, + ops::UnsqueezeKernel); +REGISTER_OP_NPU_KERNEL( + unsqueeze2, + ops::UnsqueezeKernel, + ops::UnsqueezeKernel, + ops::UnsqueezeKernel, + ops::UnsqueezeKernel, + ops::UnsqueezeKernel, + ops::UnsqueezeKernel, + ops::UnsqueezeKernel); +#endif + diff --git a/paddle/fluid/operators/unsqueeze_op_npu_test.cc b/paddle/fluid/operators/unsqueeze_op_npu_test.cc new file mode 100644 index 0000000000..6b53cc328a --- /dev/null +++ b/paddle/fluid/operators/unsqueeze_op_npu_test.cc @@ -0,0 +1,92 @@ +/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#ifndef _WIN32 +#include +#endif + +#include +#include // NOLINT +#include + +#include "gtest/gtest.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/framework/program_desc.h" +#include "paddle/fluid/operators/dropout_op.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/string/printf.h" + +namespace f = paddle::framework; +namespace p = paddle::platform; +namespace m = paddle::operators::math; + +USE_OP(unsqueeze); +USE_OP_DEVICE_KERNEL(unsqueeze, NPU); + +template +void Compare(f::Scope* scope, const p::DeviceContext& ctx) { + // init + auto x = scope->Var("X"); + auto tensor_x = x->GetMutable(); + + int dim0 = 5; + int dim1 = 10; + + std::vector init; + for (int64_t i = 0; i < dim0 * dim1; ++i) { + init.push_back(static_cast(0.1)); + } + + TensorFromVector(init, ctx, tensor_x); + tensor_x->Resize({dim0, dim1}); + + ctx.Wait(); + + // run + auto place = ctx.GetPlace(); + auto out = scope->Var("Out"); + auto tensor_out = out->GetMutable(); + + std::vector axis; + axis.push_back(1); + f::AttributeMap attrs = {{"axes", axis}}; + + auto op = + f::OpRegistry::CreateOp("unsqueeze", {{"X", {"X"}}}, + {{"Out", {"Out"}}}, attrs); + + op->Run(*scope, place); + ctx.Wait(); + + EXPECT_EQ((uint32_t)tensor_out->dims().size(), uint32_t(3)); + EXPECT_EQ((uint32_t)tensor_out->dims()[0], uint32_t(5)); + EXPECT_EQ((uint32_t)tensor_out->dims()[1], uint32_t(1)); + EXPECT_EQ((uint32_t)tensor_out->dims()[2], uint32_t(10)); + + std::vector out_vec; + TensorToVector(*tensor_out, ctx, &out_vec); + for (uint32_t i = 0; i < out_vec.size(); i++) { + EXPECT_EQ(out_vec[i], static_cast(0.1)); + } + + ctx.Wait(); +} + +TEST(unsqueeze, NPU_fp32) { + f::Scope scope; + p::NPUDeviceContext ctx(p::NPUPlace(0)); + Compare(&scope, ctx); +} + -- GitLab