From 6c650445483d81748a16ffda075afd0943f7e1c7 Mon Sep 17 00:00:00 2001 From: yangjianfengo1 <125249383+yangjianfengo1@users.noreply.github.com> Date: Wed, 8 Mar 2023 14:48:52 +0800 Subject: [PATCH] AMP tile_op & Test (#51193) * tile_op * fix bfloat16 x --- .../fluid/tests/unittests/test_tile_op.py | 48 ++++++++++++++++++- python/paddle/tensor/manipulation.py | 10 +++- 2 files changed, 56 insertions(+), 2 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_tile_op.py b/python/paddle/fluid/tests/unittests/test_tile_op.py index b95e8c4af7f..c954cd77963 100644 --- a/python/paddle/fluid/tests/unittests/test_tile_op.py +++ b/python/paddle/fluid/tests/unittests/test_tile_op.py @@ -17,7 +17,7 @@ import unittest import gradient_checker import numpy as np from decorator_helper import prog_scope -from eager_op_test import OpTest +from eager_op_test import OpTest, convert_float_to_uint16 import paddle import paddle.fluid as fluid @@ -196,6 +196,52 @@ class TestTileOpInteger(OpTest): self.check_output() +class TestTileOpFloat16(OpTest): + def setUp(self): + self.op_type = "tile" + self.dtype = np.float16 + self.__class__.op_type = self.op_type + self.python_api = paddle.tile + self.inputs = { + 'X': np.random.uniform(10, size=(100, 4, 5)).astype(self.dtype) + } + self.attrs = {'repeat_times': [2, 1, 4]} + output = np.tile(self.inputs['X'], (2, 1, 4)) + self.outputs = {'Out': output} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Out') + + +@unittest.skipIf( + not core.is_compiled_with_cuda() + or not core.is_bfloat16_supported(core.CUDAPlace(0)), + "core is not complied with CUDA and not support the bfloat16", +) +class TestWhereOpBFloat16(OpTest): + def setUp(self): + self.op_type = 'tile' + self.dtype = np.uint16 + self.__class__.op_type = self.op_type + self.python_api = paddle.tile + x = np.random.uniform(10, size=(100, 4, 5)).astype(np.float32) + output = np.tile(x, (2, 1, 4)) + self.inputs = {'X': convert_float_to_uint16(x)} + self.attrs = {'repeat_times': [2, 1, 4]} + self.outputs = {'Out': convert_float_to_uint16(output)} + + def test_check_output(self): + place = core.CUDAPlace(0) + self.check_output_with_place(place) + + def test_check_grad(self): + place = core.CUDAPlace(0) + self.check_grad_with_place(place, ['X'], 'Out') + + # Situation 5: input x is Bool class TestTileOpBoolean(OpTest): def setUp(self): diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index bf0a52e5724..801fcf5b6ef 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -3156,7 +3156,15 @@ def tile(x, repeat_times, name=None): check_variable_and_dtype( x, 'x', - ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], + [ + 'bool', + 'float16', + 'bfloat16', + 'float32', + 'float64', + 'int32', + 'int64', + ], 'tile', ) if convert_dtype(x.dtype) == 'bool' and not x.stop_gradient: -- GitLab