From ff717d515810167bd8114806124d319e62377bb7 Mon Sep 17 00:00:00 2001 From: wangchaochaohu Date: Tue, 4 Aug 2020 11:09:09 +0800 Subject: [PATCH] Add support for tuple of concat Op test=develop (#25800) --- paddle/fluid/operators/concat_op.cc | 2 + paddle/fluid/operators/concat_op.cu.cc | 2 + python/paddle/fluid/framework.py | 2 +- python/paddle/fluid/layers/tensor.py | 53 +++++++++++++------------- python/paddle/tensor/creation.py | 6 +-- python/paddle/tensor/manipulation.py | 26 +++++++------ 6 files changed, 47 insertions(+), 44 deletions(-) diff --git a/paddle/fluid/operators/concat_op.cc b/paddle/fluid/operators/concat_op.cc index 890f9beaee6..060f5412f28 100644 --- a/paddle/fluid/operators/concat_op.cc +++ b/paddle/fluid/operators/concat_op.cc @@ -207,6 +207,7 @@ REGISTER_OPERATOR(concat_grad, ops::ConcatOpGrad, REGISTER_OP_CPU_KERNEL( concat, ops::ConcatKernel, ops::ConcatKernel, + ops::ConcatKernel, ops::ConcatKernel, ops::ConcatKernel, @@ -215,6 +216,7 @@ REGISTER_OP_CPU_KERNEL( concat_grad, ops::ConcatGradKernel, ops::ConcatGradKernel, + ops::ConcatGradKernel, ops::ConcatGradKernel, ops::ConcatGradKernel, diff --git a/paddle/fluid/operators/concat_op.cu.cc b/paddle/fluid/operators/concat_op.cu.cc index 334126c4e0b..8c30703f257 100644 --- a/paddle/fluid/operators/concat_op.cu.cc +++ b/paddle/fluid/operators/concat_op.cu.cc @@ -20,6 +20,7 @@ namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( concat, ops::ConcatKernel, ops::ConcatKernel, + ops::ConcatKernel, ops::ConcatKernel, ops::ConcatKernel, ops::ConcatKernel); @@ -27,6 +28,7 @@ REGISTER_OP_CUDA_KERNEL( concat_grad, ops::ConcatGradKernel, ops::ConcatGradKernel, + ops::ConcatGradKernel, ops::ConcatGradKernel, ops::ConcatGradKernel, ops::ConcatGradKernel); diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 9ba1b33c739..a7faf4041cf 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -1958,7 +1958,7 @@ class Operator(object): in_proto.name) if found: in_args = inputs[in_proto.name] - if not isinstance(in_args, list): + if not isinstance(in_args, (list, tuple)): in_args = [in_args] if not in_proto.duplicable and len(in_args) > 1: raise ValueError( diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index bba0baac016..e33b34cc925 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -266,8 +266,8 @@ def concat(input, axis=0, name=None): This OP concatenates the input along the axis. Args: - input(list): List of input Tensors with data type float16, float32, float64, int32, - int64. All the Tensors in ``input`` must have the same data type. + input(list|tuple|Tensor): ``input`` can be Tensor, Tensor list or Tensor tuple which is with data type + bool, float16, float32, float64, int32, int64. All the Tensors in ``input`` must have the same data type. axis(int|Tensor, optional): Specify the axis to operate on the input Tensors. It's a scalar with data type int or a Tensor with shape [1] and data type int32 or int64. The effective range is [-R, R), where R is Rank(x). When ``axis < 0``, it works the same way @@ -276,7 +276,8 @@ def concat(input, axis=0, name=None): need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Raises: - TypeError: The dtype of ``input`` must be one of float16, float32, float64, int32 and int64. + TypeError: ``input`` must be one of list, tuple or Tensor. + TypeError: The data type of ``input`` must be one of bool, float16, float32, float64, int32 and int64. TypeError: The ``axis`` must be int or Tensor. The dtype of ``axis`` must be int32 or int64 when it's a Tensor. TypeError: All the Tensors in ``input`` must have the same data type. @@ -289,20 +290,20 @@ def concat(input, axis=0, name=None): import paddle.fluid as fluid import numpy as np - in1 = np.array([[1,2,3], - [4,5,6]]) - in2 = np.array([[11,12,13], - [14,15,16]]) - in3 = np.array([[21,22], - [23,24]]) + in1 = np.array([[1, 2, 3], + [4, 5, 6]]) + in2 = np.array([[11, 12, 13], + [14, 15, 16]]) + in3 = np.array([[21, 22], + [23, 24]]) with fluid.dygraph.guard(): x1 = fluid.dygraph.to_variable(in1) x2 = fluid.dygraph.to_variable(in2) x3 = fluid.dygraph.to_variable(in3) # When the axis is negative, the real axis is (axis + Rank(x)). # As follows, axis is -1, Rank(x) is 2, the real axis is 1 - out1 = fluid.layers.concat(input=[x1,x2,x3], axis=-1) - out2 = fluid.layers.concat(input=[x1,x2], axis=0) + out1 = fluid.layers.concat(input=[x1, x2, x3], axis=-1) + out2 = fluid.layers.concat(input=[x1, x2], axis=0) print(out1.numpy()) # [[ 1 2 3 11 12 13 21 22] # [ 4 5 6 14 15 16 23 24]] @@ -319,18 +320,18 @@ def concat(input, axis=0, name=None): axis = axis[0] return core.ops.concat(input, 'axis', axis) - if not isinstance(input, list): - warnings.warn( - "The type of input in concat should be list, but received %s." % - (type(input))) + check_type(input, 'input', (list, tuple, Variable), 'concat') + if not isinstance(input, Variable): + for id, x in enumerate(input): + check_variable_and_dtype( + x, 'input[' + str(id) + ']', + ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], + 'concat') + if x.dtype != input[0].dtype: + raise TypeError( + "All the Tensors in the input must have the same data type.") + else: input = [input] - for id, x in enumerate(input): - check_variable_and_dtype( - x, 'input[' + str(id) + ']', - ['float16', 'float32', 'float64', 'int32', 'int64'], 'concat') - if x.dtype != input[0].dtype: - raise TypeError( - "All the Tensors in the input must have the same data type.") check_type(axis, 'axis', (int, Variable), 'concat') if isinstance(axis, Variable): @@ -343,7 +344,7 @@ def concat(input, axis=0, name=None): if input[0].desc.type() == core.VarDesc.VarType.LOD_TENSOR_ARRAY: assert len(input) == 1, "If the elements of 'input' in concat are Variable(LoDTensorArray), " \ - "number of the elements must be 1, but received %s." % len(x) + "number of the elements must be 1, but received %s." % len(input) out_index = helper.create_variable_for_type_inference(dtype="int32") helper.append_op( type='tensor_array_to_tensor', @@ -1045,8 +1046,7 @@ def ones(shape, dtype, force_cpu=False): Returns: Tensor: A tensor of data type :attr:`dtype` with shape :attr:`shape` and all elements set to 1. Raises: - TypeError: The ``dtype`` must be one of bool, float16, float32, float64, int32, int64 and None - and the data type of out Tensor must be the same as the dtype. + TypeError: The ``dtype`` must be one of bool, float16, float32, float64, int32, int64. TypeError: The ``shape`` must be one of list, tuple and Tensor. The data type of ``shape`` must be int32 or int64 when it's a Tensor. @@ -1082,8 +1082,7 @@ def zeros(shape, dtype, force_cpu=False, name=None): Tensor: A tensor of data type :attr:`dtype` with shape :attr:`shape` and all elements set to 0. Raises: - TypeError: The ``dtype`` must be one of bool, float16, float32, float64, int32, int64 and None - and the data type of out Tensor must be the same as the dtype. + TypeError: The ``dtype`` must be one of bool, float16, float32, float64, int32, int64. TypeError: The ``shape`` must be one of list, tuple and Tensor. The data type of ``shape`` must be int32 or int64 when it's a Tensor. Examples: diff --git a/python/paddle/tensor/creation.py b/python/paddle/tensor/creation.py index 88d71ae186e..10f93f90fbb 100644 --- a/python/paddle/tensor/creation.py +++ b/python/paddle/tensor/creation.py @@ -136,8 +136,7 @@ def ones(shape, dtype=None, name=None): Tensor: A tensor of data type :attr:`dtype` with shape :attr:`shape` and all elements set to 1. Raises: - TypeError: The ``dtype`` must be one of bool, float16, float32, float64, int32, int64 and None - and the data type of out Tensor must be the same as the dtype. + TypeError: The ``dtype`` must be one of bool, float16, float32, float64, int32, int64 and None. TypeError: The ``shape`` must be one of list, tuple and Tensor. The data type of ``shape`` must be int32 or int64 when it's a Tensor. @@ -229,8 +228,7 @@ def zeros(shape, dtype=None, name=None): Tensor: A tensor of data type :attr:`dtype` with shape :attr:`shape` and all elements set to 0. Raises: - TypeError: The ``dtype`` must be one of bool, float16, float32, float64, int32, int64 and None - and the data type of out Tensor must be the same as the dtype. + TypeError: The ``dtype`` must be one of bool, float16, float32, float64, int32, int64 and None. TypeError: The ``shape`` must be one of list, tuple and Tensor. The data type of ``shape`` must be int32 or int64 when it's a Tensor. diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index f844847a8d9..c2f67b4e138 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -59,8 +59,8 @@ def concat(x, axis=0, name=None): This OP concatenates the input along the axis. Args: - x(list): List of input Tensors with data type float16, float32, float64, int32, int64. - All the Tensors in ``x`` must have same data type. + x(list|tuple): ``x`` is a Tensor list or Tensor tuple which is with data type bool, float16, + float32, float64, int32, int64. All the Tensors in ``x`` must have same data type. axis(int|Tensor, optional): Specify the axis to operate on the input Tensors. It's a scalar with data type int or a Tensor with shape [1] and data type int32 or int64. The effective range is [-R, R), where R is Rank(x). When ``axis < 0``, @@ -69,7 +69,8 @@ def concat(x, axis=0, name=None): need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Raises: - TypeError: The dtype of ``x`` must be one of float16, float32, float64, int32 and int64. + TypeError: ``x`` must be list or tuple. + TypeError: The data type of ``x`` must be one of bool, float16, float32, float64, int32 and int64. TypeError: The ``axis`` must be int or Tensor. The dtype of ``axis`` must be int32 or int64 when it's a Tensor. TypeError: All the Tensors in ``x`` must have the same data type. @@ -83,21 +84,21 @@ def concat(x, axis=0, name=None): import numpy as np paddle.enable_imperative() # Now we are in imperative mode - in1 = np.array([[1,2,3], - [4,5,6]]) - in2 = np.array([[11,12,13], - [14,15,16]]) - in3 = np.array([[21,22], - [23,24]]) + in1 = np.array([[1, 2, 3], + [4, 5, 6]]) + in2 = np.array([[11, 12, 13], + [14, 15, 16]]) + in3 = np.array([[21, 22], + [23, 24]]) x1 = paddle.imperative.to_variable(in1) x2 = paddle.imperative.to_variable(in2) x3 = paddle.imperative.to_variable(in3) zero = paddle.full(shape=[1], dtype='int32', fill_value=0) # When the axis is negative, the real axis is (axis + Rank(x)) # As follow, axis is -1, Rank(x) is 2, the real axis is 1 - out1 = paddle.concat(x=[x1,x2,x3], axis=-1) - out2 = paddle.concat(x=[x1,x2], axis=0) - out3 = paddle.concat(x=[x1,x2], axis=zero) + out1 = paddle.concat(x=[x1, x2, x3], axis=-1) + out2 = paddle.concat(x=[x1, x2], axis=0) + out3 = paddle.concat(x=[x1, x2], axis=zero) # out1 # [[ 1 2 3 11 12 13 21 22] # [ 4 5 6 14 15 16 23 24]] @@ -107,6 +108,7 @@ def concat(x, axis=0, name=None): # [11 12 13] # [14 15 16]] """ + check_type(x, 'x', (list, tuple), 'concat') return paddle.fluid.layers.concat(input=x, axis=axis, name=name) -- GitLab