diff --git a/python/paddle/fluid/tests/unittests/test_broadcast_to_op.py b/python/paddle/fluid/tests/unittests/test_broadcast_to_op.py index 897c5b54b169b7e6054cf6692b951f4d8d234ecc..18daf2059f2410c029aa423dad08051dc72f5100 100644 --- a/python/paddle/fluid/tests/unittests/test_broadcast_to_op.py +++ b/python/paddle/fluid/tests/unittests/test_broadcast_to_op.py @@ -70,6 +70,41 @@ class TestBroadcastToAPI(unittest.TestCase): assert np.array_equal(res_2, np.tile(input, (1, 1))) assert np.array_equal(res_3, np.tile(input, (1, 1))) + def test_api_fp16_gpu(self): + if paddle.fluid.core.is_compiled_with_cuda(): + place = paddle.CUDAPlace(0) + with paddle.static.program_guard( + paddle.static.Program(), paddle.static.Program() + ): + input = np.random.random([12, 14]).astype("float16") + x = paddle.static.data( + name="x", shape=[12, 14], dtype="float16" + ) + + positive_2 = paddle.fluid.layers.fill_constant([1], "int32", 12) + expand_shape = paddle.static.data( + name="expand_shape", + shape=[2], + dtype="int32", + ) + + out_1 = paddle.broadcast_to(x, shape=[12, 14]) + out_2 = paddle.broadcast_to(x, shape=[positive_2, 14]) + out_3 = paddle.broadcast_to(x, shape=expand_shape) + + exe = paddle.static.Executor(place) + res_1, res_2, res_3 = exe.run( + paddle.static.default_main_program(), + feed={ + "x": input, + "expand_shape": np.array([12, 14]).astype("int32"), + }, + fetch_list=[out_1, out_2, out_3], + ) + assert np.array_equal(res_1, np.tile(input, (1, 1))) + assert np.array_equal(res_2, np.tile(input, (1, 1))) + assert np.array_equal(res_3, np.tile(input, (1, 1))) + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index e2325fda753a1aae749988429b7a10acf1520ff2..8f257725e89bed55b991dd0674b520326a6a732e 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -3256,7 +3256,7 @@ def broadcast_to(x, shape, name=None): Args: - x (Tensor): The input tensor, its data type is bool, float32, float64, int32 or int64. + x (Tensor): The input tensor, its data type is bool, float16, float32, float64, int32 or int64. shape (list|tuple|Tensor): The result shape after broadcasting. The data type is int32. If shape is a list or tuple, all its elements should be integers or 0-D or 1-D Tensors with the data type int32. If shape is a Tensor, it should be an 1-D Tensor with the data type int32. The value -1 in shape means keeping the corresponding dimension unchanged.