未验证 提交 613a3ffe 编写于 作者: H houj04 提交者: GitHub

[XPU] add fp16 support for assign. update xccl to 1.0.9. (#50702)

上级 d8845735
...@@ -8,7 +8,7 @@ set(XPU_API_LIB_NAME "libxpuapi.so") ...@@ -8,7 +8,7 @@ set(XPU_API_LIB_NAME "libxpuapi.so")
set(XPU_RT_LIB_NAME "libxpurt.so") set(XPU_RT_LIB_NAME "libxpurt.so")
set(XPU_BASE_DATE "20230220") set(XPU_BASE_DATE "20230220")
set(XPU_XCCL_BASE_VERSION "1.0.8") set(XPU_XCCL_BASE_VERSION "1.0.9")
if(NOT DEFINED XPU_BASE_URL) if(NOT DEFINED XPU_BASE_URL)
set(XPU_BASE_URL_WITHOUT_DATE set(XPU_BASE_URL_WITHOUT_DATE
......
...@@ -46,6 +46,7 @@ XPUOpMap& get_kl2_ops() { ...@@ -46,6 +46,7 @@ XPUOpMap& get_kl2_ops() {
XPUKernelSet({phi::DataType::FLOAT32, XPUKernelSet({phi::DataType::FLOAT32,
phi::DataType::FLOAT64, phi::DataType::FLOAT64,
phi::DataType::INT32, phi::DataType::INT32,
phi::DataType::FLOAT16,
phi::DataType::INT64, phi::DataType::INT64,
phi::DataType::BOOL})}, phi::DataType::BOOL})},
{"assign_value", XPUKernelSet({phi::DataType::FLOAT32})}, {"assign_value", XPUKernelSet({phi::DataType::FLOAT32})},
......
...@@ -179,5 +179,6 @@ PD_REGISTER_KERNEL(assign_value, ...@@ -179,5 +179,6 @@ PD_REGISTER_KERNEL(assign_value,
bool, bool,
int, int,
float, float,
int64_t) {} int64_t,
phi::dtype::float16) {}
#endif #endif
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. # Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License"); # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. # you may not use this file except in compliance with the License.
...@@ -13,72 +13,76 @@ ...@@ -13,72 +13,76 @@
# limitations under the License. # limitations under the License.
import sys import sys
import unittest
import numpy as np
sys.path.append("..") sys.path.append("..")
import unittest
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import (
XPUOpTestWrapper,
create_test_class,
get_xpu_op_support_types,
)
import paddle import paddle
''' paddle.enable_static()
class TestAssignOp(op_test.OpTest):
def setUp(self):
self.op_type = "assign" class XPUTestAssignOP(XPUOpTestWrapper):
x = np.random.random(size=(100, 10)).astype('float32') def __init__(self):
self.inputs = {'X': x} self.op_name = 'assign'
self.outputs = {'Out': x} self.use_dynamic_create_class = False
def test_forward(self): class TestAssignOPBase(XPUOpTest):
if paddle.is_compiled_with_xpu(): def setUp(self):
place = paddle.XPUPlace(0) self.place = paddle.XPUPlace(0)
self.check_output_with_place(place) self.init_dtype()
self.set_case()
def test_backward(self):
if paddle.is_compiled_with_xpu(): def set_case(self):
place = paddle.XPUPlace(0) self.op_type = 'assign'
self.check_grad_with_place(place, ['X'], 'Out') self.init_config()
x = np.random.random(size=self.input_shape).astype(self.dtype)
class TestAssignOpWithLoDTensorArray(unittest.TestCase): self.inputs = {'X': x}
def test_assign_LoDTensorArray(self): self.attrs = {}
main_program = Program() self.outputs = {'Out': x}
startup_program = Program()
with program_guard(main_program): def init_dtype(self):
x = fluid.data(name='x', shape=[100, 10], dtype='float32') self.dtype = self.in_type
x.stop_gradient = False
y = fluid.layers.fill_constant( def test_check_output(self):
shape=[100, 10], dtype='float32', value=1) self.check_output_with_place(self.place)
z = paddle.add(x=x, y=y)
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) def test_check_grad(self):
init_array = paddle.tensor.array_write(x=z, i=i) self.check_grad_with_place(self.place, ['X'], 'Out')
array = fluid.layers.assign(init_array)
sums = paddle.tensor.array_read(array=init_array, i=i) def init_config(self):
mean = paddle.mean(sums) self.input_shape = (2, 5)
append_backward(mean)
class XPUTestAssign1(TestAssignOPBase):
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda( def init_config(self):
) else fluid.CPUPlace() self.input_shape = [2, 768]
exe = fluid.Executor(place)
feed_x = np.random.random(size=(100, 10)).astype('float32') class XPUTestAssign2(TestAssignOPBase):
ones = np.ones((100, 10)).astype('float32') def init_config(self):
feed_add = feed_x + ones self.input_shape = [3, 8, 4096]
res = exe.run(main_program,
feed={'x': feed_x}, class XPUTestAssign3(TestAssignOPBase):
fetch_list=[sums.name, x.grad_name]) def init_config(self):
np.testing.assert_allclose(res[0], feed_add) self.input_shape = [1024]
np.testing.assert_allclose(res[1], ones / 1000.0)
class XPUTestAssign4(TestAssignOPBase):
def init_config(self):
class TestAssignOpError(unittest.TestCase): self.input_shape = [2, 2, 255]
def test_errors(self):
with program_guard(Program(), Program()):
# The type of input must be Variable or numpy.ndarray. support_types = get_xpu_op_support_types('assign')
x1 = fluid.create_lod_tensor( for stype in support_types:
np.array([[-1]]), [[1]], fluid.XPUPlace(0)) create_test_class(globals(), XPUTestAssignOP, stype)
self.assertRaises(TypeError, fluid.layers.assign, x1)
x2 = np.array([[2.5, 2.5]], dtype='uint8') if __name__ == "__main__":
self.assertRaises(TypeError, fluid.layers.assign, x2)
'''
if __name__ == '__main__':
paddle.enable_static()
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册