diff --git a/paddle/fluid/operators/concat_op.cc b/paddle/fluid/operators/concat_op.cc index 890f9beaee69915ed61a2526c8156f8f5af3b32b..060f5412f28e3704e64d33d9a3081a2ca934e918 100644 --- a/paddle/fluid/operators/concat_op.cc +++ b/paddle/fluid/operators/concat_op.cc @@ -207,6 +207,7 @@ REGISTER_OPERATOR(concat_grad, ops::ConcatOpGrad, REGISTER_OP_CPU_KERNEL( concat, ops::ConcatKernel, ops::ConcatKernel, + ops::ConcatKernel, ops::ConcatKernel, ops::ConcatKernel, @@ -215,6 +216,7 @@ REGISTER_OP_CPU_KERNEL( concat_grad, ops::ConcatGradKernel, ops::ConcatGradKernel, + ops::ConcatGradKernel, ops::ConcatGradKernel, ops::ConcatGradKernel, diff --git a/paddle/fluid/operators/concat_op.cu.cc b/paddle/fluid/operators/concat_op.cu.cc index 334126c4e0b782c98db2fd3c8278b1daf87da6b6..8c30703f2576b35deb419238de08c5f2fa7b42d2 100644 --- a/paddle/fluid/operators/concat_op.cu.cc +++ b/paddle/fluid/operators/concat_op.cu.cc @@ -20,6 +20,7 @@ namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( concat, ops::ConcatKernel, ops::ConcatKernel, + ops::ConcatKernel, ops::ConcatKernel, ops::ConcatKernel, ops::ConcatKernel); @@ -27,6 +28,7 @@ REGISTER_OP_CUDA_KERNEL( concat_grad, ops::ConcatGradKernel, ops::ConcatGradKernel, + ops::ConcatGradKernel, ops::ConcatGradKernel, ops::ConcatGradKernel, ops::ConcatGradKernel); diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 9ba1b33c739b6910a8e67133e0d38e39448aaecd..a7faf4041cfe496142427c6c6f110d849a54cca4 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -1958,7 +1958,7 @@ class Operator(object): in_proto.name) if found: in_args = inputs[in_proto.name] - if not isinstance(in_args, list): + if not isinstance(in_args, (list, tuple)): in_args = [in_args] if not in_proto.duplicable and len(in_args) > 1: raise ValueError( diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index bba0baac016e86aa25260a7687a235c6cf5baaf4..e33b34cc9254b18a18c293fb3670203fecdeb38f 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -266,8 +266,8 @@ def concat(input, axis=0, name=None): This OP concatenates the input along the axis. Args: - input(list): List of input Tensors with data type float16, float32, float64, int32, - int64. All the Tensors in ``input`` must have the same data type. + input(list|tuple|Tensor): ``input`` can be Tensor, Tensor list or Tensor tuple which is with data type + bool, float16, float32, float64, int32, int64. All the Tensors in ``input`` must have the same data type. axis(int|Tensor, optional): Specify the axis to operate on the input Tensors. It's a scalar with data type int or a Tensor with shape [1] and data type int32 or int64. The effective range is [-R, R), where R is Rank(x). When ``axis < 0``, it works the same way @@ -276,7 +276,8 @@ def concat(input, axis=0, name=None): need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Raises: - TypeError: The dtype of ``input`` must be one of float16, float32, float64, int32 and int64. + TypeError: ``input`` must be one of list, tuple or Tensor. + TypeError: The data type of ``input`` must be one of bool, float16, float32, float64, int32 and int64. TypeError: The ``axis`` must be int or Tensor. The dtype of ``axis`` must be int32 or int64 when it's a Tensor. TypeError: All the Tensors in ``input`` must have the same data type. @@ -289,20 +290,20 @@ def concat(input, axis=0, name=None): import paddle.fluid as fluid import numpy as np - in1 = np.array([[1,2,3], - [4,5,6]]) - in2 = np.array([[11,12,13], - [14,15,16]]) - in3 = np.array([[21,22], - [23,24]]) + in1 = np.array([[1, 2, 3], + [4, 5, 6]]) + in2 = np.array([[11, 12, 13], + [14, 15, 16]]) + in3 = np.array([[21, 22], + [23, 24]]) with fluid.dygraph.guard(): x1 = fluid.dygraph.to_variable(in1) x2 = fluid.dygraph.to_variable(in2) x3 = fluid.dygraph.to_variable(in3) # When the axis is negative, the real axis is (axis + Rank(x)). # As follows, axis is -1, Rank(x) is 2, the real axis is 1 - out1 = fluid.layers.concat(input=[x1,x2,x3], axis=-1) - out2 = fluid.layers.concat(input=[x1,x2], axis=0) + out1 = fluid.layers.concat(input=[x1, x2, x3], axis=-1) + out2 = fluid.layers.concat(input=[x1, x2], axis=0) print(out1.numpy()) # [[ 1 2 3 11 12 13 21 22] # [ 4 5 6 14 15 16 23 24]] @@ -319,18 +320,18 @@ def concat(input, axis=0, name=None): axis = axis[0] return core.ops.concat(input, 'axis', axis) - if not isinstance(input, list): - warnings.warn( - "The type of input in concat should be list, but received %s." % - (type(input))) + check_type(input, 'input', (list, tuple, Variable), 'concat') + if not isinstance(input, Variable): + for id, x in enumerate(input): + check_variable_and_dtype( + x, 'input[' + str(id) + ']', + ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], + 'concat') + if x.dtype != input[0].dtype: + raise TypeError( + "All the Tensors in the input must have the same data type.") + else: input = [input] - for id, x in enumerate(input): - check_variable_and_dtype( - x, 'input[' + str(id) + ']', - ['float16', 'float32', 'float64', 'int32', 'int64'], 'concat') - if x.dtype != input[0].dtype: - raise TypeError( - "All the Tensors in the input must have the same data type.") check_type(axis, 'axis', (int, Variable), 'concat') if isinstance(axis, Variable): @@ -343,7 +344,7 @@ def concat(input, axis=0, name=None): if input[0].desc.type() == core.VarDesc.VarType.LOD_TENSOR_ARRAY: assert len(input) == 1, "If the elements of 'input' in concat are Variable(LoDTensorArray), " \ - "number of the elements must be 1, but received %s." % len(x) + "number of the elements must be 1, but received %s." % len(input) out_index = helper.create_variable_for_type_inference(dtype="int32") helper.append_op( type='tensor_array_to_tensor', @@ -1045,8 +1046,7 @@ def ones(shape, dtype, force_cpu=False): Returns: Tensor: A tensor of data type :attr:`dtype` with shape :attr:`shape` and all elements set to 1. Raises: - TypeError: The ``dtype`` must be one of bool, float16, float32, float64, int32, int64 and None - and the data type of out Tensor must be the same as the dtype. + TypeError: The ``dtype`` must be one of bool, float16, float32, float64, int32, int64. TypeError: The ``shape`` must be one of list, tuple and Tensor. The data type of ``shape`` must be int32 or int64 when it's a Tensor. @@ -1082,8 +1082,7 @@ def zeros(shape, dtype, force_cpu=False, name=None): Tensor: A tensor of data type :attr:`dtype` with shape :attr:`shape` and all elements set to 0. Raises: - TypeError: The ``dtype`` must be one of bool, float16, float32, float64, int32, int64 and None - and the data type of out Tensor must be the same as the dtype. + TypeError: The ``dtype`` must be one of bool, float16, float32, float64, int32, int64. TypeError: The ``shape`` must be one of list, tuple and Tensor. The data type of ``shape`` must be int32 or int64 when it's a Tensor. Examples: diff --git a/python/paddle/tensor/creation.py b/python/paddle/tensor/creation.py index 88d71ae186e802065d5ee9940e6de7a68afde821..10f93f90fbb875f3fd546fb8b561ec0d1933294c 100644 --- a/python/paddle/tensor/creation.py +++ b/python/paddle/tensor/creation.py @@ -136,8 +136,7 @@ def ones(shape, dtype=None, name=None): Tensor: A tensor of data type :attr:`dtype` with shape :attr:`shape` and all elements set to 1. Raises: - TypeError: The ``dtype`` must be one of bool, float16, float32, float64, int32, int64 and None - and the data type of out Tensor must be the same as the dtype. + TypeError: The ``dtype`` must be one of bool, float16, float32, float64, int32, int64 and None. TypeError: The ``shape`` must be one of list, tuple and Tensor. The data type of ``shape`` must be int32 or int64 when it's a Tensor. @@ -229,8 +228,7 @@ def zeros(shape, dtype=None, name=None): Tensor: A tensor of data type :attr:`dtype` with shape :attr:`shape` and all elements set to 0. Raises: - TypeError: The ``dtype`` must be one of bool, float16, float32, float64, int32, int64 and None - and the data type of out Tensor must be the same as the dtype. + TypeError: The ``dtype`` must be one of bool, float16, float32, float64, int32, int64 and None. TypeError: The ``shape`` must be one of list, tuple and Tensor. The data type of ``shape`` must be int32 or int64 when it's a Tensor. diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index f844847a8d94f408f42a74892f1f8e2260dba553..c2f67b4e13855b1a3e29e2bdd675dbf418b0a9a1 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -59,8 +59,8 @@ def concat(x, axis=0, name=None): This OP concatenates the input along the axis. Args: - x(list): List of input Tensors with data type float16, float32, float64, int32, int64. - All the Tensors in ``x`` must have same data type. + x(list|tuple): ``x`` is a Tensor list or Tensor tuple which is with data type bool, float16, + float32, float64, int32, int64. All the Tensors in ``x`` must have same data type. axis(int|Tensor, optional): Specify the axis to operate on the input Tensors. It's a scalar with data type int or a Tensor with shape [1] and data type int32 or int64. The effective range is [-R, R), where R is Rank(x). When ``axis < 0``, @@ -69,7 +69,8 @@ def concat(x, axis=0, name=None): need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Raises: - TypeError: The dtype of ``x`` must be one of float16, float32, float64, int32 and int64. + TypeError: ``x`` must be list or tuple. + TypeError: The data type of ``x`` must be one of bool, float16, float32, float64, int32 and int64. TypeError: The ``axis`` must be int or Tensor. The dtype of ``axis`` must be int32 or int64 when it's a Tensor. TypeError: All the Tensors in ``x`` must have the same data type. @@ -83,21 +84,21 @@ def concat(x, axis=0, name=None): import numpy as np paddle.enable_imperative() # Now we are in imperative mode - in1 = np.array([[1,2,3], - [4,5,6]]) - in2 = np.array([[11,12,13], - [14,15,16]]) - in3 = np.array([[21,22], - [23,24]]) + in1 = np.array([[1, 2, 3], + [4, 5, 6]]) + in2 = np.array([[11, 12, 13], + [14, 15, 16]]) + in3 = np.array([[21, 22], + [23, 24]]) x1 = paddle.imperative.to_variable(in1) x2 = paddle.imperative.to_variable(in2) x3 = paddle.imperative.to_variable(in3) zero = paddle.full(shape=[1], dtype='int32', fill_value=0) # When the axis is negative, the real axis is (axis + Rank(x)) # As follow, axis is -1, Rank(x) is 2, the real axis is 1 - out1 = paddle.concat(x=[x1,x2,x3], axis=-1) - out2 = paddle.concat(x=[x1,x2], axis=0) - out3 = paddle.concat(x=[x1,x2], axis=zero) + out1 = paddle.concat(x=[x1, x2, x3], axis=-1) + out2 = paddle.concat(x=[x1, x2], axis=0) + out3 = paddle.concat(x=[x1, x2], axis=zero) # out1 # [[ 1 2 3 11 12 13 21 22] # [ 4 5 6 14 15 16 23 24]] @@ -107,6 +108,7 @@ def concat(x, axis=0, name=None): # [11 12 13] # [14 15 16]] """ + check_type(x, 'x', (list, tuple), 'concat') return paddle.fluid.layers.concat(input=x, axis=axis, name=name)