From 3678cae21f6e1fbb9dcc079f9d552cd2550d4024 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BC=A0=E6=98=A5=E4=B9=94?= <83450930+Liyulingyue@users.noreply.github.com> Date: Mon, 27 Feb 2023 15:50:32 +0800 Subject: [PATCH] support fp16 on AlphaDropout (#50917) --- .../fluid/tests/unittests/test_dropout_op.py | 24 +++++++++++++++++++ python/paddle/nn/functional/common.py | 4 ++-- 2 files changed, 26 insertions(+), 2 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/test_dropout_op.py b/python/paddle/fluid/tests/unittests/test_dropout_op.py index 9a48b877f5..142d18ec99 100644 --- a/python/paddle/fluid/tests/unittests/test_dropout_op.py +++ b/python/paddle/fluid/tests/unittests/test_dropout_op.py @@ -980,6 +980,30 @@ class TestAlphaDropoutCAPI(unittest.TestCase): result.numpy(), result_np, rtol=1e-05 ) + def test_static_fp16_gpu(self): + if paddle.fluid.core.is_compiled_with_cuda(): + place = paddle.CUDAPlace(0) + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): + input = np.random.random([2, 3]).astype("float16") + + x = paddle.static.data(name="x", shape=[2, 3], dtype="float16") + + m = paddle.nn.AlphaDropout(p=0.0) + y = m(x) + + exe = paddle.static.Executor(place) + res = exe.run( + paddle.static.default_main_program(), + feed={ + "x": input, + }, + fetch_list=[y], + ) + + np.testing.assert_allclose(res[0], input, rtol=1e-05) + class TestDropoutWithDeterminateSeedGenerator(unittest.TestCase): def setUp(self): diff --git a/python/paddle/nn/functional/common.py b/python/paddle/nn/functional/common.py index 74d9806723..16d53b1b12 100644 --- a/python/paddle/nn/functional/common.py +++ b/python/paddle/nn/functional/common.py @@ -1384,7 +1384,7 @@ def alpha_dropout(x, p=0.5, training=True, name=None): Alpha Dropout fits well to SELU activate function by randomly setting activations to the negative saturation value. Args: - x (Tensor): The input tensor. The data type is float32 or float64. + x (Tensor): The input tensor. The data type is float16, float32 or float64. p (float | int): Probability of setting units to zero. Default 0.5. training (bool): A flag indicating whether it is in train phrase or not. Default True. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. @@ -1416,7 +1416,7 @@ def alpha_dropout(x, p=0.5, training=True, name=None): if not in_dynamic_mode(): check_variable_and_dtype( - x, 'x', ['float32', 'float64'], 'alpha_dropout' + x, 'x', ['float16', 'float32', 'float64'], 'alpha_dropout' ) if training: -- GitLab