diff --git a/python/paddle/fluid/initializer.py b/python/paddle/fluid/initializer.py index c838f09ced26e1f21f7a7c9cfed5095323b1cead..6c394f4c6955fdccae79fa7bca791f00bba41d38 100644 --- a/python/paddle/fluid/initializer.py +++ b/python/paddle/fluid/initializer.py @@ -783,7 +783,7 @@ class BilinearInitializer(Initializer): weight = np.reshape(weight, shape) # to be compatible of fp16 initalizers - if var.dtype == VarDesc.VarType.FP16: + if var.dtype == VarDesc.VarType.FP16 or var.dtype == VarDesc.VarType.FP64: out_dtype = VarDesc.VarType.FP32 out_var = block.create_var( name=unique_name.generate(".".join( @@ -800,7 +800,8 @@ class BilinearInitializer(Initializer): value_name = "fp32_values" values = [float(v) for v in weight.flat] else: - raise ValueError("Unsupported dtype %s", input.dtype) + raise TypeError("Unsupported dtype %s", var.dtype) + if np.prod(shape) > 1024 * 1024: raise ValueError("The size of input is too big. ") op = block.append_op( @@ -812,7 +813,7 @@ class BilinearInitializer(Initializer): value_name: values }) - if var.dtype == VarDesc.VarType.FP16: + if var.dtype == VarDesc.VarType.FP16 or var.dtype == VarDesc.VarType.FP64: block.append_op( type="cast", inputs={"X": out_var}, diff --git a/python/paddle/fluid/tests/unittests/test_initializer.py b/python/paddle/fluid/tests/unittests/test_initializer.py index c6bed4db72e50135fba7b22f805efb281c178e2d..1b880569dfee0b7a20e783fae419e32db712121e 100644 --- a/python/paddle/fluid/tests/unittests/test_initializer.py +++ b/python/paddle/fluid/tests/unittests/test_initializer.py @@ -474,18 +474,24 @@ class TestBilinearInitializer(unittest.TestCase): lod_level=0, name="param", initializer=initializer.BilinearInitializer()) - num_ops = 2 if dtype == "float16" else 1 + num_ops = 2 if dtype == "float16" or dtype == "float64" else 1 self.assertEqual(len(block.ops), num_ops) init_op = block.ops[0] self.assertEqual(init_op.type, 'assign_value') return block + def test_bilinear_initializer_fp64(self): + self.test_bilinear_initializer(dtype='float64') + def test_bilinear_initializer_fp16(self): """Test the bilinear initializer with supplied arguments """ block = self.test_bilinear_initializer("float16") self.assertTrue(check_cast_op(block.ops[1])) + def test_type_error(self): + self.assertRaises(TypeError, self.test_bilinear_initializer, 'int32') + class TestNumpyArrayInitializer(unittest.TestCase): def test_numpy_array_initializer(self, dtype="float32"):