From 17ec3ab23e45fbba2220684731c8dbfb0896bb5e Mon Sep 17 00:00:00 2001 From: wawltor Date: Tue, 14 Apr 2020 12:11:04 +0800 Subject: [PATCH] Fix the scale fp16 dtype support for scale op, delete the fp16 test caseraise for scale Fix the scale fp16 dtype support for scale op, and delete the test case for fp16 case --- python/paddle/fluid/layers/nn.py | 8 ++++---- python/paddle/fluid/tests/unittests/test_scale_op.py | 6 ------ 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 9f05d3d128d..e4094bb5172 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -10713,10 +10713,6 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None): """ - check_variable_and_dtype( - x, "x", - ['float32', 'float64', 'uint8', 'int16', 'int32', 'in64', 'uint8'], - "scale") if in_dygraph_mode(): _scale = scale.numpy().item(0) if isinstance(scale, Variable) else scale out = core.ops.scale(x, 'scale', @@ -10724,6 +10720,10 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None): float(bias), 'bias_after_scale', bias_after_scale) return dygraph_utils._append_activation_in_dygraph(out) + check_variable_and_dtype(x, "x", [ + 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', + 'uint8' + ], "scale") inputs = {'X': [x]} attrs = { 'bias': float(bias), diff --git a/python/paddle/fluid/tests/unittests/test_scale_op.py b/python/paddle/fluid/tests/unittests/test_scale_op.py index e2d239d2bbc..052704659b6 100644 --- a/python/paddle/fluid/tests/unittests/test_scale_op.py +++ b/python/paddle/fluid/tests/unittests/test_scale_op.py @@ -131,12 +131,6 @@ class TestScaleRaiseError(unittest.TestCase): self.assertRaises(TypeError, test_type) - def test_dtype(): - data = fluid.data(shape=[10], dtype="float16", name="input") - fluid.layers.scale(data) - - self.assertRaises(TypeError, test_dtype) - # Add FP16 test @unittest.skipIf(not core.is_compiled_with_cuda(), -- GitLab