From 52d0967a9872177902c85aaa4f2dfe8ba96a799d Mon Sep 17 00:00:00 2001 From: wawltor Date: Wed, 15 Apr 2020 10:09:45 +0800 Subject: [PATCH] Fix the bug of support fp16 in scale op, cherry-pick from #23793 Fix the support the float16 of scale op, add delete the raise case for fp16 --- python/paddle/fluid/layers/nn.py | 8 ++++---- python/paddle/fluid/tests/unittests/test_scale_op.py | 6 ------ 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index d19b5c48fcf..4a973cc266f 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -10713,10 +10713,6 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None): """ - check_variable_and_dtype( - x, "x", - ['float32', 'float64', 'uint8', 'int16', 'int32', 'in64', 'uint8'], - "scale") if in_dygraph_mode(): _scale = scale.numpy().item(0) if isinstance(scale, Variable) else scale out = core.ops.scale(x, 'scale', @@ -10724,6 +10720,10 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None): float(bias), 'bias_after_scale', bias_after_scale) return dygraph_utils._append_activation_in_dygraph(out) + check_variable_and_dtype(x, "x", [ + 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', + 'uint8' + ], "scale") inputs = {'X': [x]} attrs = { 'bias': float(bias), diff --git a/python/paddle/fluid/tests/unittests/test_scale_op.py b/python/paddle/fluid/tests/unittests/test_scale_op.py index e2d239d2bbc..052704659b6 100644 --- a/python/paddle/fluid/tests/unittests/test_scale_op.py +++ b/python/paddle/fluid/tests/unittests/test_scale_op.py @@ -131,12 +131,6 @@ class TestScaleRaiseError(unittest.TestCase): self.assertRaises(TypeError, test_type) - def test_dtype(): - data = fluid.data(shape=[10], dtype="float16", name="input") - fluid.layers.scale(data) - - self.assertRaises(TypeError, test_dtype) - # Add FP16 test @unittest.skipIf(not core.is_compiled_with_cuda(), -- GitLab