From 3792a49d81e95a71a140516463cf3c3c62d87b11 Mon Sep 17 00:00:00 2001 From: LielinJiang <50691816+LielinJiang@users.noreply.github.com> Date: Wed, 3 Jun 2020 20:43:09 +0800 Subject: [PATCH] Fix bilinear_initializer bug when type of input data is float64 (#24771) * fix bilinear initializer, test=develop --- python/paddle/fluid/initializer.py | 7 ++++--- python/paddle/fluid/tests/unittests/test_initializer.py | 8 +++++++- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/python/paddle/fluid/initializer.py b/python/paddle/fluid/initializer.py index c838f09ced2..6c394f4c695 100644 --- a/python/paddle/fluid/initializer.py +++ b/python/paddle/fluid/initializer.py @@ -783,7 +783,7 @@ class BilinearInitializer(Initializer): weight = np.reshape(weight, shape) # to be compatible of fp16 initalizers - if var.dtype == VarDesc.VarType.FP16: + if var.dtype == VarDesc.VarType.FP16 or var.dtype == VarDesc.VarType.FP64: out_dtype = VarDesc.VarType.FP32 out_var = block.create_var( name=unique_name.generate(".".join( @@ -800,7 +800,8 @@ class BilinearInitializer(Initializer): value_name = "fp32_values" values = [float(v) for v in weight.flat] else: - raise ValueError("Unsupported dtype %s", input.dtype) + raise TypeError("Unsupported dtype %s", var.dtype) + if np.prod(shape) > 1024 * 1024: raise ValueError("The size of input is too big. ") op = block.append_op( @@ -812,7 +813,7 @@ class BilinearInitializer(Initializer): value_name: values }) - if var.dtype == VarDesc.VarType.FP16: + if var.dtype == VarDesc.VarType.FP16 or var.dtype == VarDesc.VarType.FP64: block.append_op( type="cast", inputs={"X": out_var}, diff --git a/python/paddle/fluid/tests/unittests/test_initializer.py b/python/paddle/fluid/tests/unittests/test_initializer.py index c6bed4db72e..1b880569dfe 100644 --- a/python/paddle/fluid/tests/unittests/test_initializer.py +++ b/python/paddle/fluid/tests/unittests/test_initializer.py @@ -474,18 +474,24 @@ class TestBilinearInitializer(unittest.TestCase): lod_level=0, name="param", initializer=initializer.BilinearInitializer()) - num_ops = 2 if dtype == "float16" else 1 + num_ops = 2 if dtype == "float16" or dtype == "float64" else 1 self.assertEqual(len(block.ops), num_ops) init_op = block.ops[0] self.assertEqual(init_op.type, 'assign_value') return block + def test_bilinear_initializer_fp64(self): + self.test_bilinear_initializer(dtype='float64') + def test_bilinear_initializer_fp16(self): """Test the bilinear initializer with supplied arguments """ block = self.test_bilinear_initializer("float16") self.assertTrue(check_cast_op(block.ops[1])) + def test_type_error(self): + self.assertRaises(TypeError, self.test_bilinear_initializer, 'int32') + class TestNumpyArrayInitializer(unittest.TestCase): def test_numpy_array_initializer(self, dtype="float32"): -- GitLab