diff --git a/paddle/phi/kernels/gpu/pixel_unshuffle_grad_kernel.cu b/paddle/phi/kernels/gpu/pixel_unshuffle_grad_kernel.cu index d7d2cde4ebade0b0acc1215ba691755ea76a37b0..830d91452ffd4f13647df7e79d4a11291101d58b 100644 --- a/paddle/phi/kernels/gpu/pixel_unshuffle_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/pixel_unshuffle_grad_kernel.cu @@ -23,4 +23,6 @@ PD_REGISTER_KERNEL(pixel_unshuffle_grad, ALL_LAYOUT, phi::PixelUnshuffleGradKernel, float, - double) {} + double, + phi::dtype::float16, + phi::dtype::bfloat16) {} diff --git a/paddle/phi/kernels/gpu/pixel_unshuffle_kernel.cu b/paddle/phi/kernels/gpu/pixel_unshuffle_kernel.cu index fcc53cbee1ecb378b0a822a3528c6a4a75bdc91b..cfe71b4f0f39bec35023874f976d17e1d588662e 100644 --- a/paddle/phi/kernels/gpu/pixel_unshuffle_kernel.cu +++ b/paddle/phi/kernels/gpu/pixel_unshuffle_kernel.cu @@ -23,4 +23,6 @@ PD_REGISTER_KERNEL(pixel_unshuffle, ALL_LAYOUT, phi::PixelUnshuffleKernel, float, - double) {} + double, + phi::dtype::float16, + phi::dtype::bfloat16) {} diff --git a/python/paddle/fluid/tests/unittests/test_pixel_shuffle_op.py b/python/paddle/fluid/tests/unittests/test_pixel_shuffle_op.py index b12f9c19d501dfb6a4e12f7db5e9f1c5dd453c0e..aa2ba1895a6cab9870cb41c3e58c1f015fe210dc 100644 --- a/python/paddle/fluid/tests/unittests/test_pixel_shuffle_op.py +++ b/python/paddle/fluid/tests/unittests/test_pixel_shuffle_op.py @@ -15,7 +15,7 @@ import unittest import numpy as np -from eager_op_test import OpTest +from eager_op_test import OpTest, convert_float_to_uint16 import paddle import paddle.nn.functional as F @@ -64,6 +64,7 @@ class TestPixelShuffleOp(OpTest): def setUp(self): self.op_type = "pixel_shuffle" self.python_api = paddle.nn.functional.pixel_shuffle + self.init_dtype() self.init_data_format() n, c, h, w = 2, 9, 4, 4 @@ -74,13 +75,16 @@ class TestPixelShuffleOp(OpTest): up_factor = 3 - x = np.random.random(shape).astype("float64") + x = np.random.random(shape).astype(self.dtype) npresult = pixel_shuffle_np(x, up_factor, self.format) self.inputs = {'X': x} self.outputs = {'Out': npresult} self.attrs = {'upscale_factor': up_factor, "data_format": self.format} + def init_dtype(self): + self.dtype = np.float64 + def init_data_format(self): self.format = "NCHW" @@ -99,6 +103,60 @@ class TestChannelLast(TestPixelShuffleOp): self.format = "NHWC" +class TestPixelShuffleFP16Op(TestPixelShuffleOp): + def init_dtype(self): + self.dtype = np.float16 + + +@unittest.skipIf( + not core.is_compiled_with_cuda() + or not core.is_bfloat16_supported(core.CUDAPlace(0)), + "core is not compiled with CUDA or not support bfloat16", +) +class TestPixelShuffleBF16Op(OpTest): + def setUp(self): + self.op_type = "pixel_shuffle" + self.python_api = paddle.nn.functional.pixel_shuffle + self.init_dtype() + self.init_data_format() + n, c, h, w = 2, 9, 4, 4 + + if self.format == "NCHW": + shape = [n, c, h, w] + if self.format == "NHWC": + shape = [n, h, w, c] + + up_factor = 3 + + x = np.random.random(shape).astype(self.np_dtype) + npresult = pixel_shuffle_np(x, up_factor, self.format) + + self.inputs = {'X': x} + self.outputs = {'Out': npresult} + self.attrs = {'upscale_factor': up_factor, "data_format": self.format} + + self.place = core.CUDAPlace(0) + self.inputs['X'] = convert_float_to_uint16(self.inputs['X']) + self.outputs['Out'] = convert_float_to_uint16(self.outputs['Out']) + + def init_dtype(self): + self.dtype = np.uint16 + self.np_dtype = np.float32 + + def init_data_format(self): + self.format = "NCHW" + + def test_check_output(self): + self.check_output_with_place(self.place) + + def test_check_grad(self): + self.check_grad_with_place( + self.place, + ['X'], + 'Out', + ) + + class TestPixelShuffleAPI(unittest.TestCase): def setUp(self): self.x_1_np = np.random.random([2, 9, 4, 4]).astype("float64") diff --git a/python/paddle/fluid/tests/unittests/test_pixel_unshuffle.py b/python/paddle/fluid/tests/unittests/test_pixel_unshuffle.py index 5d1f9907ecb9e5d2dc8732ec22192751a2b369cc..b2cfd457603c429f85faca1b8bb6e4a2004d50d0 100644 --- a/python/paddle/fluid/tests/unittests/test_pixel_unshuffle.py +++ b/python/paddle/fluid/tests/unittests/test_pixel_unshuffle.py @@ -15,7 +15,7 @@ import unittest import numpy as np -from eager_op_test import OpTest +from eager_op_test import OpTest, convert_float_to_uint16 import paddle import paddle.nn.functional as F @@ -82,6 +82,7 @@ class TestPixelUnshuffleOp(OpTest): self.op_type = "pixel_unshuffle" self.python_api = pixel_unshuffle_wrapper + self.init_dtype() self.init_data_format() n, c, h, w = 2, 1, 12, 12 @@ -92,7 +93,7 @@ class TestPixelUnshuffleOp(OpTest): down_factor = 3 - x = np.random.random(shape).astype("float64") + x = np.random.random(shape).astype(self.dtype) npresult = pixel_unshuffle_np(x, down_factor, self.format) self.inputs = {"X": x} @@ -102,6 +103,9 @@ class TestPixelUnshuffleOp(OpTest): "data_format": self.format, } + def init_dtype(self): + self.dtype = np.float64 + def init_data_format(self): '''init_data_format''' @@ -127,6 +131,65 @@ class TestChannelLast(TestPixelUnshuffleOp): self.format = "NHWC" +class TestPixelUnshuffleFP16Op(TestPixelUnshuffleOp): + def init_dtype(self): + self.dtype = np.float16 + + +@unittest.skipIf( + not core.is_compiled_with_cuda() + or not core.is_bfloat16_supported(core.CUDAPlace(0)), + "core is not compiled with CUDA or not support bfloat16", +) +class TestPixelUnshuffleBP16Op(OpTest): + '''TestPixelUnshuffleBP16Op''' + + def setUp(self): + self.op_type = "pixel_unshuffle" + self.python_api = pixel_unshuffle_wrapper + self.init_dtype() + self.init_data_format() + n, c, h, w = 2, 1, 12, 12 + + if self.format == "NCHW": + shape = [n, c, h, w] + if self.format == "NHWC": + shape = [n, h, w, c] + + down_factor = 3 + + x = np.random.random(shape).astype(self.np_dtype) + npresult = pixel_unshuffle_np(x, down_factor, self.format) + + self.inputs = {"X": x} + self.outputs = {"Out": npresult} + self.attrs = { + "downscale_factor": down_factor, + "data_format": self.format, + } + + self.place = core.CUDAPlace(0) + self.inputs['X'] = convert_float_to_uint16(self.inputs['X']) + self.outputs['Out'] = convert_float_to_uint16(self.outputs['Out']) + + def init_dtype(self): + self.dtype = np.uint16 + self.np_dtype = np.float32 + + def init_data_format(self): + self.format = "NCHW" + + def test_check_output(self): + self.check_output_with_place(self.place) + + def test_check_grad(self): + self.check_grad_with_place( + self.place, + ['X'], + 'Out', + ) + + class TestPixelUnshuffleAPI(unittest.TestCase): '''TestPixelUnshuffleAPI''' diff --git a/python/paddle/nn/functional/vision.py b/python/paddle/nn/functional/vision.py index 80e1a176b7662d61e23843e4ff94eb173453542c..03d94b91abb7ff5bbed9f934a7ce8cbade5af38c 100644 --- a/python/paddle/nn/functional/vision.py +++ b/python/paddle/nn/functional/vision.py @@ -443,7 +443,9 @@ def pixel_unshuffle(x, downscale_factor, data_format="NCHW", name=None): ) helper = LayerHelper("pixel_unshuffle", **locals()) - check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'pixel_unshuffle') + check_variable_and_dtype( + x, 'x', ['float16', 'float32', 'float64', 'uint16'], 'pixel_unshuffle' + ) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( type="pixel_unshuffle",