未验证 提交 ab17f988 编写于 作者: H houj04 提交者: GitHub

[XPU] add increment op. (#51487)

* [XPU] add increment op.

* fix ci
上级 88d42398
......@@ -8,8 +8,8 @@ set(XPU_API_LIB_NAME "libxpuapi.so")
set(XPU_RT_LIB_NAME "libxpurt.so")
set(XPU_XFT_LIB_NAME "libxft.so")
set(XPU_BASE_DATE "20230308")
set(XPU_XCCL_BASE_VERSION "1.0.10")
set(XPU_BASE_DATE "20230310")
set(XPU_XCCL_BASE_VERSION "1.0.12")
set(XPU_XFT_BASE_VERSION "latest")
if(NOT DEFINED XPU_BASE_URL)
......
......@@ -385,7 +385,10 @@ XPUOpMap& get_kl2_ops() {
{"huber_loss", XPUKernelSet({phi::DataType::FLOAT32})},
{"kldiv_loss", XPUKernelSet({phi::DataType::FLOAT32})},
{"kldiv_loss_grad", XPUKernelSet({phi::DataType::FLOAT32})},
{"iou_similarity", XPUKernelSet({phi::DataType::FLOAT32})},
{"increment",
XPUKernelSet({phi::DataType::FLOAT32,
phi::DataType::INT32,
phi::DataType::INT64})},
{"index_sample",
XPUKernelSet({phi::DataType::INT8,
phi::DataType::INT16,
......@@ -400,6 +403,7 @@ XPUOpMap& get_kl2_ops() {
phi::DataType::INT64})},
{"instance_norm", XPUKernelSet({phi::DataType::FLOAT32})},
{"instance_norm_grad", XPUKernelSet({phi::DataType::FLOAT32})},
{"iou_similarity", XPUKernelSet({phi::DataType::FLOAT32})},
{"label_smooth", XPUKernelSet({phi::DataType::FLOAT32})},
{"lamb", XPUKernelSet({phi::DataType::FLOAT32, phi::DataType::FLOAT16})},
{"lars_momentum",
......
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/increment_kernel.h"
#include "paddle/phi/backends/xpu/enforce_xpu.h"
#include "paddle/phi/common/memory_utils.h"
#include "paddle/phi/core/kernel_registry.h"
namespace phi {
template <typename T, typename Context>
void IncrementKernel(const Context& ctx,
const DenseTensor& x,
float value,
DenseTensor* out) {
// check input
PADDLE_ENFORCE_EQ(x.numel(),
1,
phi::errors::InvalidArgument(
"input tensor x's numel should be EXACTLY 1."));
const T* x_data = x.data<T>();
T* out_data = ctx.template Alloc<T>(out);
// allocation for "value" on xpu
T value_as_t = static_cast<T>(value);
xpu::ctx_guard RAII_GUARD(ctx.x_context());
T* value_xpu = RAII_GUARD.alloc_l3_or_gm<T>(1);
memory_utils::Copy(ctx.GetPlace(),
value_xpu,
phi::CPUPlace(),
reinterpret_cast<void*>(&value_as_t),
sizeof(T));
// int add(Context* ctx, const T* x, const T* y, T* z, int64_t len);
int ret = xpu::add(ctx.x_context(), x_data, value_xpu, out_data, 1);
PADDLE_ENFORCE_XDNN_SUCCESS(ret, "add");
}
} // namespace phi
PD_REGISTER_KERNEL(
increment, XPU, ALL_LAYOUT, phi::IncrementKernel, float, int, int64_t) {}
# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
import numpy as np
sys.path.append("..")
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import (
XPUOpTestWrapper,
create_test_class,
get_xpu_op_support_types,
)
import paddle
paddle.enable_static()
class XPUTestIncrementOP(XPUOpTestWrapper):
def __init__(self):
self.op_name = 'increment'
self.use_dynamic_create_class = False
class TestXPUIncrementOp(XPUOpTest):
def setUp(self):
self.place = paddle.XPUPlace(0)
self.init_dtype()
self.op_type = 'increment'
self.initTestCase()
x = np.random.uniform(-100, 100, [1]).astype(self.dtype)
output = x + np.cast[self.dtype](self.step)
output = output.astype(self.dtype)
self.inputs = {'X': x}
self.attrs = {'step': self.step}
self.outputs = {'Out': output}
def initTestCase(self):
self.step = -1.5
def init_dtype(self):
self.dtype = self.in_type
def test_check_output(self):
self.check_output_with_place(self.place)
class TestIncrement1(TestXPUIncrementOp):
def initTestCase(self):
self.step = 6.0
class TestIncrement2(TestXPUIncrementOp):
def initTestCase(self):
self.step = 2.1
class TestIncrement3(TestXPUIncrementOp):
def initTestCase(self):
self.step = -1.5
class TestIncrement4(TestXPUIncrementOp):
def initTestCase(self):
self.step = 0.5
class TestIncrement5(TestXPUIncrementOp):
def initTestCase(self):
self.step = 3
support_types = get_xpu_op_support_types('increment')
for stype in support_types:
create_test_class(globals(), XPUTestIncrementOP, stype)
if __name__ == '__main__':
unittest.main()
......@@ -943,6 +943,8 @@ class XPUTestSetValueOp(XPUOpTestWrapper):
with paddle.static.program_guard(main_program, startup_program):
x = paddle.static.data(name="x", shape=[4, 4], dtype='float32')
y = paddle.static.data(name="y", shape=[4, 4], dtype='float32')
x.stop_gradient = False
y.stop_gradient = False
label = paddle.static.data(
name="label", shape=[4, 1], dtype='int64'
......
......@@ -116,6 +116,7 @@ class TestXPUWhereAPI(unittest.TestCase):
y.stop_gradient = y_stop_gradient
result = paddle.where(cond, x, y)
result.stop_gradient = False
append_backward(paddle.mean(result))
exe = fluid.Executor(self.place)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册