未验证 提交 2aaed989 编写于 作者: C chenxujun 提交者: GitHub

Add pixel_shuffle pixel_unshuffle fp16/bf16 (#52582)

上级 e64ce0bb
......@@ -23,4 +23,6 @@ PD_REGISTER_KERNEL(pixel_unshuffle_grad,
ALL_LAYOUT,
phi::PixelUnshuffleGradKernel,
float,
double) {}
double,
phi::dtype::float16,
phi::dtype::bfloat16) {}
......@@ -23,4 +23,6 @@ PD_REGISTER_KERNEL(pixel_unshuffle,
ALL_LAYOUT,
phi::PixelUnshuffleKernel,
float,
double) {}
double,
phi::dtype::float16,
phi::dtype::bfloat16) {}
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from eager_op_test import OpTest
from eager_op_test import OpTest, convert_float_to_uint16
import paddle
import paddle.nn.functional as F
......@@ -64,6 +64,7 @@ class TestPixelShuffleOp(OpTest):
def setUp(self):
self.op_type = "pixel_shuffle"
self.python_api = paddle.nn.functional.pixel_shuffle
self.init_dtype()
self.init_data_format()
n, c, h, w = 2, 9, 4, 4
......@@ -74,13 +75,16 @@ class TestPixelShuffleOp(OpTest):
up_factor = 3
x = np.random.random(shape).astype("float64")
x = np.random.random(shape).astype(self.dtype)
npresult = pixel_shuffle_np(x, up_factor, self.format)
self.inputs = {'X': x}
self.outputs = {'Out': npresult}
self.attrs = {'upscale_factor': up_factor, "data_format": self.format}
def init_dtype(self):
self.dtype = np.float64
def init_data_format(self):
self.format = "NCHW"
......@@ -99,6 +103,60 @@ class TestChannelLast(TestPixelShuffleOp):
self.format = "NHWC"
class TestPixelShuffleFP16Op(TestPixelShuffleOp):
def init_dtype(self):
self.dtype = np.float16
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not compiled with CUDA or not support bfloat16",
)
class TestPixelShuffleBF16Op(OpTest):
def setUp(self):
self.op_type = "pixel_shuffle"
self.python_api = paddle.nn.functional.pixel_shuffle
self.init_dtype()
self.init_data_format()
n, c, h, w = 2, 9, 4, 4
if self.format == "NCHW":
shape = [n, c, h, w]
if self.format == "NHWC":
shape = [n, h, w, c]
up_factor = 3
x = np.random.random(shape).astype(self.np_dtype)
npresult = pixel_shuffle_np(x, up_factor, self.format)
self.inputs = {'X': x}
self.outputs = {'Out': npresult}
self.attrs = {'upscale_factor': up_factor, "data_format": self.format}
self.place = core.CUDAPlace(0)
self.inputs['X'] = convert_float_to_uint16(self.inputs['X'])
self.outputs['Out'] = convert_float_to_uint16(self.outputs['Out'])
def init_dtype(self):
self.dtype = np.uint16
self.np_dtype = np.float32
def init_data_format(self):
self.format = "NCHW"
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
self.check_grad_with_place(
self.place,
['X'],
'Out',
)
class TestPixelShuffleAPI(unittest.TestCase):
def setUp(self):
self.x_1_np = np.random.random([2, 9, 4, 4]).astype("float64")
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from eager_op_test import OpTest
from eager_op_test import OpTest, convert_float_to_uint16
import paddle
import paddle.nn.functional as F
......@@ -82,6 +82,7 @@ class TestPixelUnshuffleOp(OpTest):
self.op_type = "pixel_unshuffle"
self.python_api = pixel_unshuffle_wrapper
self.init_dtype()
self.init_data_format()
n, c, h, w = 2, 1, 12, 12
......@@ -92,7 +93,7 @@ class TestPixelUnshuffleOp(OpTest):
down_factor = 3
x = np.random.random(shape).astype("float64")
x = np.random.random(shape).astype(self.dtype)
npresult = pixel_unshuffle_np(x, down_factor, self.format)
self.inputs = {"X": x}
......@@ -102,6 +103,9 @@ class TestPixelUnshuffleOp(OpTest):
"data_format": self.format,
}
def init_dtype(self):
self.dtype = np.float64
def init_data_format(self):
'''init_data_format'''
......@@ -127,6 +131,65 @@ class TestChannelLast(TestPixelUnshuffleOp):
self.format = "NHWC"
class TestPixelUnshuffleFP16Op(TestPixelUnshuffleOp):
def init_dtype(self):
self.dtype = np.float16
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not compiled with CUDA or not support bfloat16",
)
class TestPixelUnshuffleBP16Op(OpTest):
'''TestPixelUnshuffleBP16Op'''
def setUp(self):
self.op_type = "pixel_unshuffle"
self.python_api = pixel_unshuffle_wrapper
self.init_dtype()
self.init_data_format()
n, c, h, w = 2, 1, 12, 12
if self.format == "NCHW":
shape = [n, c, h, w]
if self.format == "NHWC":
shape = [n, h, w, c]
down_factor = 3
x = np.random.random(shape).astype(self.np_dtype)
npresult = pixel_unshuffle_np(x, down_factor, self.format)
self.inputs = {"X": x}
self.outputs = {"Out": npresult}
self.attrs = {
"downscale_factor": down_factor,
"data_format": self.format,
}
self.place = core.CUDAPlace(0)
self.inputs['X'] = convert_float_to_uint16(self.inputs['X'])
self.outputs['Out'] = convert_float_to_uint16(self.outputs['Out'])
def init_dtype(self):
self.dtype = np.uint16
self.np_dtype = np.float32
def init_data_format(self):
self.format = "NCHW"
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
self.check_grad_with_place(
self.place,
['X'],
'Out',
)
class TestPixelUnshuffleAPI(unittest.TestCase):
'''TestPixelUnshuffleAPI'''
......
......@@ -443,7 +443,9 @@ def pixel_unshuffle(x, downscale_factor, data_format="NCHW", name=None):
)
helper = LayerHelper("pixel_unshuffle", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'pixel_unshuffle')
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64', 'uint16'], 'pixel_unshuffle'
)
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="pixel_unshuffle",
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册