diff --git a/python/paddle/fluid/dygraph/nn.py b/python/paddle/fluid/dygraph/nn.py index bd03f27065b952538d8d107c367a87dc68230759..736c5fe3005f28002960245c550bc8ce93cf1ab8 100644 --- a/python/paddle/fluid/dygraph/nn.py +++ b/python/paddle/fluid/dygraph/nn.py @@ -939,6 +939,10 @@ class Linear(layers.Layer): return dygraph_utils._append_activation_in_dygraph(pre_act, self._act) + + check_variable_and_dtype(input, 'input', + ['float16', 'float32', 'float64'], "Linear") + attrs = { "x_num_col_dims": len(input.shape) - 1, "y_num_col_dims": 1, diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index 543264be31ec457e343f7ddf86beb876de5746f9..f919ddd72efd0e50563db455ac73d8930dc2d167 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -155,6 +155,31 @@ class TestLayer(LayerTest): self.assertTrue(np.array_equal(static_ret, dy_ret_value)) + with self.static_graph(): + + # the input of Linear must be Variable. + def test_Variable(): + inp = np.ones([3, 32, 32], dtype='float32') + linear = nn.Linear( + 32, + 4, + bias_attr=fluid.initializer.ConstantInitializer(value=1)) + linear_ret1 = linear(inp) + + self.assertRaises(TypeError, test_Variable) + + # the input dtype of Linear must be float16 or float32 or float64 + # float16 only can be set on GPU place + def test_type(): + inp = np.ones([3, 32, 32], dtype='int32') + linear = nn.Linear( + 32, + 4, + bias_attr=fluid.initializer.ConstantInitializer(value=1)) + linear_ret2 = linear(inp) + + self.assertRaises(TypeError, test_type) + def test_layer_norm(self): inp = np.ones([3, 32, 32], dtype='float32') with self.static_graph():