From ab17f988d6cdfd7d17e4bae5b04731de37861e9b Mon Sep 17 00:00:00 2001 From: houj04 <35131887+houj04@users.noreply.github.com> Date: Mon, 13 Mar 2023 10:15:24 +0800 Subject: [PATCH] [XPU] add increment op. (#51487) * [XPU] add increment op. * fix ci --- cmake/external/xpu.cmake | 4 +- paddle/phi/backends/xpu/xpu2_op_list.cc | 6 +- paddle/phi/kernels/xpu/increment_kernel.cc | 55 ++++++++++++ .../unittests/xpu/test_increment_op_xpu.py | 90 +++++++++++++++++++ .../unittests/xpu/test_set_value_op_xpu.py | 2 + .../tests/unittests/xpu/test_where_op_xpu.py | 1 + 6 files changed, 155 insertions(+), 3 deletions(-) create mode 100644 paddle/phi/kernels/xpu/increment_kernel.cc create mode 100644 python/paddle/fluid/tests/unittests/xpu/test_increment_op_xpu.py diff --git a/cmake/external/xpu.cmake b/cmake/external/xpu.cmake index 4a84e7a2e71..a64851c7abe 100644 --- a/cmake/external/xpu.cmake +++ b/cmake/external/xpu.cmake @@ -8,8 +8,8 @@ set(XPU_API_LIB_NAME "libxpuapi.so") set(XPU_RT_LIB_NAME "libxpurt.so") set(XPU_XFT_LIB_NAME "libxft.so") -set(XPU_BASE_DATE "20230308") -set(XPU_XCCL_BASE_VERSION "1.0.10") +set(XPU_BASE_DATE "20230310") +set(XPU_XCCL_BASE_VERSION "1.0.12") set(XPU_XFT_BASE_VERSION "latest") if(NOT DEFINED XPU_BASE_URL) diff --git a/paddle/phi/backends/xpu/xpu2_op_list.cc b/paddle/phi/backends/xpu/xpu2_op_list.cc index 87db2affdd7..fee4472d78c 100644 --- a/paddle/phi/backends/xpu/xpu2_op_list.cc +++ b/paddle/phi/backends/xpu/xpu2_op_list.cc @@ -385,7 +385,10 @@ XPUOpMap& get_kl2_ops() { {"huber_loss", XPUKernelSet({phi::DataType::FLOAT32})}, {"kldiv_loss", XPUKernelSet({phi::DataType::FLOAT32})}, {"kldiv_loss_grad", XPUKernelSet({phi::DataType::FLOAT32})}, - {"iou_similarity", XPUKernelSet({phi::DataType::FLOAT32})}, + {"increment", + XPUKernelSet({phi::DataType::FLOAT32, + phi::DataType::INT32, + phi::DataType::INT64})}, {"index_sample", XPUKernelSet({phi::DataType::INT8, phi::DataType::INT16, @@ -400,6 +403,7 @@ XPUOpMap& get_kl2_ops() { phi::DataType::INT64})}, {"instance_norm", XPUKernelSet({phi::DataType::FLOAT32})}, {"instance_norm_grad", XPUKernelSet({phi::DataType::FLOAT32})}, + {"iou_similarity", XPUKernelSet({phi::DataType::FLOAT32})}, {"label_smooth", XPUKernelSet({phi::DataType::FLOAT32})}, {"lamb", XPUKernelSet({phi::DataType::FLOAT32, phi::DataType::FLOAT16})}, {"lars_momentum", diff --git a/paddle/phi/kernels/xpu/increment_kernel.cc b/paddle/phi/kernels/xpu/increment_kernel.cc new file mode 100644 index 00000000000..28295fb48bb --- /dev/null +++ b/paddle/phi/kernels/xpu/increment_kernel.cc @@ -0,0 +1,55 @@ +// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/phi/kernels/increment_kernel.h" + +#include "paddle/phi/backends/xpu/enforce_xpu.h" +#include "paddle/phi/common/memory_utils.h" +#include "paddle/phi/core/kernel_registry.h" + +namespace phi { + +template +void IncrementKernel(const Context& ctx, + const DenseTensor& x, + float value, + DenseTensor* out) { + // check input + PADDLE_ENFORCE_EQ(x.numel(), + 1, + phi::errors::InvalidArgument( + "input tensor x's numel should be EXACTLY 1.")); + + const T* x_data = x.data(); + T* out_data = ctx.template Alloc(out); + + // allocation for "value" on xpu + T value_as_t = static_cast(value); + xpu::ctx_guard RAII_GUARD(ctx.x_context()); + T* value_xpu = RAII_GUARD.alloc_l3_or_gm(1); + memory_utils::Copy(ctx.GetPlace(), + value_xpu, + phi::CPUPlace(), + reinterpret_cast(&value_as_t), + sizeof(T)); + + // int add(Context* ctx, const T* x, const T* y, T* z, int64_t len); + int ret = xpu::add(ctx.x_context(), x_data, value_xpu, out_data, 1); + PADDLE_ENFORCE_XDNN_SUCCESS(ret, "add"); +} + +} // namespace phi + +PD_REGISTER_KERNEL( + increment, XPU, ALL_LAYOUT, phi::IncrementKernel, float, int, int64_t) {} diff --git a/python/paddle/fluid/tests/unittests/xpu/test_increment_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_increment_op_xpu.py new file mode 100644 index 00000000000..8ebbeae9654 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/xpu/test_increment_op_xpu.py @@ -0,0 +1,90 @@ +# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import unittest + +import numpy as np + +sys.path.append("..") + +from op_test_xpu import XPUOpTest +from xpu.get_test_cover_info import ( + XPUOpTestWrapper, + create_test_class, + get_xpu_op_support_types, +) + +import paddle + +paddle.enable_static() + + +class XPUTestIncrementOP(XPUOpTestWrapper): + def __init__(self): + self.op_name = 'increment' + self.use_dynamic_create_class = False + + class TestXPUIncrementOp(XPUOpTest): + def setUp(self): + self.place = paddle.XPUPlace(0) + self.init_dtype() + self.op_type = 'increment' + + self.initTestCase() + + x = np.random.uniform(-100, 100, [1]).astype(self.dtype) + output = x + np.cast[self.dtype](self.step) + output = output.astype(self.dtype) + + self.inputs = {'X': x} + self.attrs = {'step': self.step} + self.outputs = {'Out': output} + + def initTestCase(self): + self.step = -1.5 + + def init_dtype(self): + self.dtype = self.in_type + + def test_check_output(self): + self.check_output_with_place(self.place) + + class TestIncrement1(TestXPUIncrementOp): + def initTestCase(self): + self.step = 6.0 + + class TestIncrement2(TestXPUIncrementOp): + def initTestCase(self): + self.step = 2.1 + + class TestIncrement3(TestXPUIncrementOp): + def initTestCase(self): + self.step = -1.5 + + class TestIncrement4(TestXPUIncrementOp): + def initTestCase(self): + self.step = 0.5 + + class TestIncrement5(TestXPUIncrementOp): + def initTestCase(self): + self.step = 3 + + +support_types = get_xpu_op_support_types('increment') +for stype in support_types: + create_test_class(globals(), XPUTestIncrementOP, stype) + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/xpu/test_set_value_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_set_value_op_xpu.py index 72bb45da7ec..0a3eb065d02 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_set_value_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_set_value_op_xpu.py @@ -943,6 +943,8 @@ class XPUTestSetValueOp(XPUOpTestWrapper): with paddle.static.program_guard(main_program, startup_program): x = paddle.static.data(name="x", shape=[4, 4], dtype='float32') y = paddle.static.data(name="y", shape=[4, 4], dtype='float32') + x.stop_gradient = False + y.stop_gradient = False label = paddle.static.data( name="label", shape=[4, 1], dtype='int64' diff --git a/python/paddle/fluid/tests/unittests/xpu/test_where_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_where_op_xpu.py index 45aa192d727..77b0e3d202e 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_where_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_where_op_xpu.py @@ -116,6 +116,7 @@ class TestXPUWhereAPI(unittest.TestCase): y.stop_gradient = y_stop_gradient result = paddle.where(cond, x, y) + result.stop_gradient = False append_backward(paddle.mean(result)) exe = fluid.Executor(self.place) -- GitLab