提交 200cc5e2 编写于 作者: Z zhupengyang 提交者: Kaipeng Deng

fix APIs: cast, concat, tensor_array_to_tensor, argmin, argmax, argsort (#20363)

* fix APIs: cast, concat, tensor_array_to_tensor, argmin, argmax, argsort

test=develop
test=document_fix
上级 5ea2cc67
...@@ -317,16 +317,16 @@ paddle.fluid.layers.load (ArgSpec(args=['out', 'file_path', 'load_as_fp16'], var ...@@ -317,16 +317,16 @@ paddle.fluid.layers.load (ArgSpec(args=['out', 'file_path', 'load_as_fp16'], var
paddle.fluid.layers.create_tensor (ArgSpec(args=['dtype', 'name', 'persistable'], varargs=None, keywords=None, defaults=(None, False)), ('document', 'fdc2d964488e99fb0743887454c34e36')) paddle.fluid.layers.create_tensor (ArgSpec(args=['dtype', 'name', 'persistable'], varargs=None, keywords=None, defaults=(None, False)), ('document', 'fdc2d964488e99fb0743887454c34e36'))
paddle.fluid.layers.create_parameter (ArgSpec(args=['shape', 'dtype', 'name', 'attr', 'is_bias', 'default_initializer'], varargs=None, keywords=None, defaults=(None, None, False, None)), ('document', '727aa63c061919bee38547fb126d9428')) paddle.fluid.layers.create_parameter (ArgSpec(args=['shape', 'dtype', 'name', 'attr', 'is_bias', 'default_initializer'], varargs=None, keywords=None, defaults=(None, None, False, None)), ('document', '727aa63c061919bee38547fb126d9428'))
paddle.fluid.layers.create_global_var (ArgSpec(args=['shape', 'value', 'dtype', 'persistable', 'force_cpu', 'name'], varargs=None, keywords=None, defaults=(False, False, None)), ('document', 'fa7f74cfb940521cc9fdffabc83debbf')) paddle.fluid.layers.create_global_var (ArgSpec(args=['shape', 'value', 'dtype', 'persistable', 'force_cpu', 'name'], varargs=None, keywords=None, defaults=(False, False, None)), ('document', 'fa7f74cfb940521cc9fdffabc83debbf'))
paddle.fluid.layers.cast (ArgSpec(args=['x', 'dtype'], varargs=None, keywords=None, defaults=None), ('document', '1e44a534cf7d26ab230aa9f5e4e0525a')) paddle.fluid.layers.cast (ArgSpec(args=['x', 'dtype'], varargs=None, keywords=None, defaults=None), ('document', '45df178cbd8c302f92c30ebdaaa6fa8a'))
paddle.fluid.layers.tensor_array_to_tensor (ArgSpec(args=['input', 'axis', 'name'], varargs=None, keywords=None, defaults=(1, None)), ('document', '764c095ba4562ae740f979e970152d6e')) paddle.fluid.layers.tensor_array_to_tensor (ArgSpec(args=['input', 'axis', 'name'], varargs=None, keywords=None, defaults=(1, None)), ('document', 'dd7d2f1e12a8a4225d017209866e5621'))
paddle.fluid.layers.concat (ArgSpec(args=['input', 'axis', 'name'], varargs=None, keywords=None, defaults=(0, None)), ('document', 'b3f30feb5dec8f110d7393ffeb30dbd9')) paddle.fluid.layers.concat (ArgSpec(args=['input', 'axis', 'name'], varargs=None, keywords=None, defaults=(0, None)), ('document', 'ec7d6e716fb29ef1e73e1e3efa5ca46b'))
paddle.fluid.layers.sums (ArgSpec(args=['input', 'out'], varargs=None, keywords=None, defaults=(None,)), ('document', '5df743d578638cd2bbb9369499b44af4')) paddle.fluid.layers.sums (ArgSpec(args=['input', 'out'], varargs=None, keywords=None, defaults=(None,)), ('document', '5df743d578638cd2bbb9369499b44af4'))
paddle.fluid.layers.assign (ArgSpec(args=['input', 'output'], varargs=None, keywords=None, defaults=(None,)), ('document', '8bd94aef4e123986d9a8c29f67b5532b')) paddle.fluid.layers.assign (ArgSpec(args=['input', 'output'], varargs=None, keywords=None, defaults=(None,)), ('document', '8bd94aef4e123986d9a8c29f67b5532b'))
paddle.fluid.layers.fill_constant_batch_size_like (ArgSpec(args=['input', 'shape', 'dtype', 'value', 'input_dim_idx', 'output_dim_idx'], varargs=None, keywords=None, defaults=(0, 0)), ('document', '37a288e4400f6d5510e982827461c11b')) paddle.fluid.layers.fill_constant_batch_size_like (ArgSpec(args=['input', 'shape', 'dtype', 'value', 'input_dim_idx', 'output_dim_idx'], varargs=None, keywords=None, defaults=(0, 0)), ('document', '37a288e4400f6d5510e982827461c11b'))
paddle.fluid.layers.fill_constant (ArgSpec(args=['shape', 'dtype', 'value', 'force_cpu', 'out'], varargs=None, keywords=None, defaults=(False, None)), ('document', '66e1e468666dd47e5b2715226cebeac0')) paddle.fluid.layers.fill_constant (ArgSpec(args=['shape', 'dtype', 'value', 'force_cpu', 'out'], varargs=None, keywords=None, defaults=(False, None)), ('document', '66e1e468666dd47e5b2715226cebeac0'))
paddle.fluid.layers.argmin (ArgSpec(args=['x', 'axis'], varargs=None, keywords=None, defaults=(0,)), ('document', '3dd54487232d05df4d70fba94b7d0b79')) paddle.fluid.layers.argmin (ArgSpec(args=['x', 'axis'], varargs=None, keywords=None, defaults=(0,)), ('document', '53629e27597e5dfb7020aac5bc639ebb'))
paddle.fluid.layers.argmax (ArgSpec(args=['x', 'axis'], varargs=None, keywords=None, defaults=(0,)), ('document', '7f47cc9aa7531b6bd37c5c96bc7f0469')) paddle.fluid.layers.argmax (ArgSpec(args=['x', 'axis'], varargs=None, keywords=None, defaults=(0,)), ('document', 'd9a89fbedbaebd5f65897ac75ee636f3'))
paddle.fluid.layers.argsort (ArgSpec(args=['input', 'axis', 'name'], varargs=None, keywords=None, defaults=(-1, None)), ('document', '9792371e3b66258531225a5551de8961')) paddle.fluid.layers.argsort (ArgSpec(args=['input', 'axis', 'name'], varargs=None, keywords=None, defaults=(-1, None)), ('document', '8c7966eb4b37b2272a16717cac3a876c'))
paddle.fluid.layers.ones (ArgSpec(args=['shape', 'dtype', 'force_cpu'], varargs=None, keywords=None, defaults=(False,)), ('document', '812c623ed52610b9773f9fc05413bc34')) paddle.fluid.layers.ones (ArgSpec(args=['shape', 'dtype', 'force_cpu'], varargs=None, keywords=None, defaults=(False,)), ('document', '812c623ed52610b9773f9fc05413bc34'))
paddle.fluid.layers.zeros (ArgSpec(args=['shape', 'dtype', 'force_cpu'], varargs=None, keywords=None, defaults=(False,)), ('document', '95379f9288c2d05356ec0e2375c6bc57')) paddle.fluid.layers.zeros (ArgSpec(args=['shape', 'dtype', 'force_cpu'], varargs=None, keywords=None, defaults=(False,)), ('document', '95379f9288c2d05356ec0e2375c6bc57'))
paddle.fluid.layers.reverse (ArgSpec(args=['x', 'axis'], varargs=None, keywords=None, defaults=None), ('document', '628135603692137d52bcf5a8d8d6816d')) paddle.fluid.layers.reverse (ArgSpec(args=['x', 'axis'], varargs=None, keywords=None, defaults=None), ('document', '628135603692137d52bcf5a8d8d6816d'))
......
...@@ -149,23 +149,45 @@ def create_global_var(shape, ...@@ -149,23 +149,45 @@ def create_global_var(shape,
def cast(x, dtype): def cast(x, dtype):
""" """
This layer takes in the Variable :attr:`x` with :attr:`x.dtype` and casts This OP takes in the Variable :attr:`x` with :attr:`x.dtype` and casts it
it to the output with :attr:`dtype`. It's meaningless if the output to the output with :attr:`dtype`. It's meaningless if the output dtype
dtype equals the input dtype, but it's fine if you do so. equals the input dtype, but it's fine if you do so.
Args: Args:
x (Variable): The input Variable for casting. x(Variable): An input N-D Tensor with data type bool, float16,
dtype(np.dtype|core.VarDesc.VarType|str): Data type of the output Variable. float32, float64, int32, int64, uint8.
dtype(np.dtype|core.VarDesc.VarType|str): Data type of the output:
bool, float15, float32, float64, int8, int32, int64, uint8.
Returns: Returns:
Variable: The output Variable after casting. Variable: A Tensor with the same shape as input's.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
data = fluid.layers.data(name='x', shape=[13], dtype='float32') import numpy as np
result = fluid.layers.cast(x=data, dtype='float64')
place = fluid.core.CPUPlace()
x_lod = fluid.data(name="x", shape=[2,2], lod_level=0)
cast_res1 = fluid.layers.cast(x=x_lod, dtype="uint8")
cast_res2 = fluid.layers.cast(x=x_lod, dtype=np.int32)
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
x_i_lod = fluid.core.LoDTensor()
x_i_lod.set(np.array([[1.3,-2.4],[0,4]]).astype("float32"), place)
x_i_lod.set_recursive_sequence_lengths([[0,2]])
res1 = exe.run(fluid.default_main_program(), feed={'x':x_i_lod}, fetch_list=[cast_res1], return_numpy=False)
res2 = exe.run(fluid.default_main_program(), feed={'x':x_i_lod}, fetch_list=[cast_res2], return_numpy=False)
print(np.array(res1[0]), np.array(res1[0]).dtype)
# [[ 1 254]
# [ 0 4]] uint8
print(np.array(res2[0]), np.array(res2[0]).dtype)
# [[ 1 -2]
# [ 0 4]] int32
""" """
helper = LayerHelper('cast', **locals()) helper = LayerHelper('cast', **locals())
out = helper.create_variable_for_type_inference(dtype=dtype) out = helper.create_variable_for_type_inference(dtype=dtype)
...@@ -182,27 +204,47 @@ def concat(input, axis=0, name=None): ...@@ -182,27 +204,47 @@ def concat(input, axis=0, name=None):
""" """
**Concat** **Concat**
This function concatenates the input along the axis mentioned This OP concatenates the input along the axis.
and returns that as the output.
Args: Args:
input(list): List of tensors to be concatenated input(list): List of input Tensors with data type float32, float64, int32,
axis(int): Integer axis along which the tensors will be concatenated int64.
name(str|None): A name for this layer(optional). If set None, the layer axis(int, optional): Axis to compute indices along. The effective range
will be named automatically. is [-R, R), where R is Rank(x). when axis<0, it works the same way
as axis+R. Default is 0.
name (str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns: Returns:
Variable: Output variable of the concatenation Variable: A Tensor with the same data type as input's.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
a = fluid.layers.data(name='a', shape=[2, 13], dtype='float32') import numpy as np
b = fluid.layers.data(name='b', shape=[2, 3], dtype='float32')
c = fluid.layers.data(name='c', shape=[2, 2], dtype='float32') in1 = np.array([[1,2,3],
d = fluid.layers.data(name='d', shape=[2, 5], dtype='float32') [4,5,6]])
out = fluid.layers.concat(input=[a, b, c, d], axis=2) in2 = np.array([[11,12,13],
[14,15,16]])
in3 = np.array([[21,22],
[23,24]])
with fluid.dygraph.guard():
x1 = fluid.dygraph.to_variable(in1)
x2 = fluid.dygraph.to_variable(in2)
x3 = fluid.dygraph.to_variable(in3)
out1 = fluid.layers.concat(input=[x1,x2,x3], axis=-1)
out2 = fluid.layers.concat(input=[x1,x2], axis=0)
print(out1.numpy())
# [[ 1 2 3 11 12 13 21 22]
# [ 4 5 6 14 15 16 23 24]]
print(out2.numpy())
# [[ 1 2 3]
# [ 4 5 6]
# [11 12 13]
# [14 15 16]]
""" """
helper = LayerHelper('concat', **locals()) helper = LayerHelper('concat', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
...@@ -216,47 +258,48 @@ def concat(input, axis=0, name=None): ...@@ -216,47 +258,48 @@ def concat(input, axis=0, name=None):
def tensor_array_to_tensor(input, axis=1, name=None): def tensor_array_to_tensor(input, axis=1, name=None):
""" """
This function concatenates the input LodTensorArray along the axis mentioned This OP concatenates the input LodTensorArray along the axis.
and returns that as the output.
A simple example as below:
.. code-block:: text
Given:
input.data = {[[0.6, 0.1, 0.3],
[0.5, 0.3, 0.2]],
[[1.3],
[1.8]],
[[2.3, 2.1],
[2.5, 2.4]]}
axis = 1
Then:
output.data = [[0.6, 0.1, 0.3, 1.3, 2.3, 2.1],
[0.5, 0.3, 0.2, 1.8, 2.5, 2.4]]
output_index.data = [3, 1, 2]
Args: Args:
input(list): Input LodTensorArray input(Variable): A LodTensorArray with data type float32, float64, int32,
axis(int): Integer axis along which the tensors will be concatenated int64.
name(str|None): A name for this layer(optional). If set None, the layer axis(int, optional): Axis to compute indices along. The effective range
will be named automatically. is [-R, R), where R is Rank(x). when axis<0, it works the same way
as axis+R. Default is 1.
name (str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns: Returns:
Variable: Output variable of the concatenation Variable: A LoDTensor with the same data type as input's
Variable: The input LodTensorArray items' dims along the axis Variable: The input LodTensorArray items' dims along the axis.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
tensor_array = fluid.layers.create_parameter(shape=[784, 200], dtype='float32') import numpy as np
output, output_index = fluid.layers.tensor_array_to_tensor(input=tensor_array)
place = fluid.CPUPlace()
x1 = fluid.data(name="x", shape=[2,2], lod_level=0)
tmp = fluid.layers.fill_constant(shape=[2,3], dtype="float32", value=1)
x_arr = fluid.layers.create_array(dtype="float32")
c0 = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
fluid.layers.array_write(x=tmp, i=c0, array=x_arr)
c1 = fluid.layers.fill_constant(shape=[1], dtype='int64', value=1)
fluid.layers.array_write(x=x1, i=c1, array=x_arr)
output, output_index = fluid.layers.tensor_array_to_tensor(input=x_arr, axis=1)
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
feedx = fluid.LoDTensor()
feedx.set(np.array([[1.3,-2.4],[0,4]]).astype("float32"), place)
res = exe.run(fluid.default_main_program(), feed={'x':feedx}, fetch_list=[output], return_numpy=False)
print(np.array(res[0]))
# [[ 1. 1. 1. 1.3 -2.4]
# [ 1. 1. 1. 0. 4. ]]
""" """
helper = LayerHelper('tensor_array_to_tensor', **locals()) helper = LayerHelper('tensor_array_to_tensor', **locals())
out = helper.create_variable_for_type_inference(dtype=helper.input_dtype()) out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
...@@ -498,24 +541,50 @@ def argmin(x, axis=0): ...@@ -498,24 +541,50 @@ def argmin(x, axis=0):
""" """
**argmin** **argmin**
This function computes the indices of the min elements This OP computes the indices of the min elements of the input tensor's
of the input tensor's element along the provided axis. element along the provided axis.
Args: Args:
x(Variable): The input to compute the indices of x(Variable): An input N-D Tensor with type float32, float64, int16,
the min elements. int32, int64, uint8.
axis(int): Axis to compute indices along. axis(int, optional): Axis to compute indices along. The effective range
is [-R, R), where R is Rank(x). when axis<0, it works the same way
as axis+R. Default is 0.
Returns: Returns:
Variable: The tensor variable storing the output Variable: A Tensor with data type int64.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
x = fluid.layers.data(name="x", shape=[3, 4], dtype="float32") import numpy as np
out = fluid.layers.argmin(x, axis=0)
out = fluid.layers.argmin(x, axis=-1) in1 = np.array([[[5,8,9,5],
[0,0,1,7],
[6,9,2,4]],
[[5,2,4,2],
[4,7,7,9],
[1,7,0,6]]])
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(in1)
out1 = fluid.layers.argmin(x=x, axis=-1)
out2 = fluid.layers.argmin(x=x, axis=0)
out3 = fluid.layers.argmin(x=x, axis=1)
out4 = fluid.layers.argmin(x=x, axis=2)
print(out1.numpy())
# [[0 0 2]
# [1 0 2]]
print(out2.numpy())
# [[0 1 1 1]
# [0 0 0 0]
# [1 1 1 0]]
print(out3.numpy())
# [[1 1 1 2]
# [2 0 2 0]]
print(out4.numpy())
# [[0 0 2]
# [1 0 2]]
""" """
helper = LayerHelper("arg_min", **locals()) helper = LayerHelper("arg_min", **locals())
out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64) out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
...@@ -531,24 +600,50 @@ def argmax(x, axis=0): ...@@ -531,24 +600,50 @@ def argmax(x, axis=0):
""" """
**argmax** **argmax**
This function computes the indices of the max elements This OP computes the indices of the max elements of the input tensor's
of the input tensor's element along the provided axis. element along the provided axis.
Args: Args:
x(Variable): The input to compute the indices of x(Variable): An input N-D Tensor with type float32, float64, int16,
the max elements. int32, int64, uint8.
axis(int): Axis to compute indices along. axis(int, optional): Axis to compute indices along. The effective range
is [-R, R), where R is Rank(x). when axis<0, it works the same way
as axis+R. Default is 0.
Returns: Returns:
Variable: The tensor variable storing the output Variable: A Tensor with data type int64.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
x = fluid.layers.data(name="x", shape=[3, 4], dtype="float32") import numpy as np
out = fluid.layers.argmax(x, axis=0)
out = fluid.layers.argmax(x, axis=-1) in1 = np.array([[[5,8,9,5],
[0,0,1,7],
[6,9,2,4]],
[[5,2,4,2],
[4,7,7,9],
[1,7,0,6]]])
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(in1)
out1 = fluid.layers.argmax(x=x, axis=-1)
out2 = fluid.layers.argmax(x=x, axis=0)
out3 = fluid.layers.argmax(x=x, axis=1)
out4 = fluid.layers.argmax(x=x, axis=2)
print(out1.numpy())
# [[2 3 1]
# [0 3 1]]
print(out2.numpy())
# [[0 0 0 0]
# [1 1 1 1]
# [0 0 0 1]]
print(out3.numpy())
# [[2 2 0 1]
# [0 1 1 1]]
print(out4.numpy())
# [[2 3 1]
# [0 3 1]]
""" """
helper = LayerHelper("arg_max", **locals()) helper = LayerHelper("arg_max", **locals())
out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64) out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
...@@ -562,44 +657,70 @@ def argmax(x, axis=0): ...@@ -562,44 +657,70 @@ def argmax(x, axis=0):
def argsort(input, axis=-1, name=None): def argsort(input, axis=-1, name=None):
""" """
Performs sorting on the input Variable along the given axis, and outputs This OP sorts the input along the given axis, and returns sorted output
sorted data Varibale and its corresponding index Variable with the same data Varibale and its corresponding index Variable with the same shape as
shape as :attr:`input`. :attr:`input`.
.. code-block:: text
For example, the given axis is -1 and the input Variable
input = [[0.15849551, 0.45865775, 0.8563702 ],
[0.12070083, 0.28766365, 0.18776911]],
after argsort, the sorted Vairable becomes
out = [[0.15849551, 0.45865775, 0.8563702 ],
[0.12070083, 0.18776911, 0.28766365]],
and the sorted indices along the given axis turn outs to be
indices = [[0, 1, 2],
[0, 2, 1]]
Args: Args:
input(Variable): The input Variable for sorting. input(Variable): An input N-D Tensor with type float32, float64, int16,
axis(int): The axis along which to sort the input Variable. When int32, int64, uint8.
:attr:`axis` < 0, the actual axis will be :attr:`axis` + axis(int, optional): Axis to compute indices along. The effective range
rank(:attr:`input`). Default -1, the last dimension. is [-R, R), where R is Rank(x). when axis<0, it works the same way
name(str|None): (optional) A name for this layer. If set None, the as axis+R. Default is 0.
layer will be named automatically. name(str, optional): The default value is None. Normally there is no
need for user to set this property. For more information, please
refer to :ref:`api_guide_Name`.
Returns: Returns:
tuple: A tuple of sorted data Variable and the sorted indices. tuple: A tuple of sorted data Variable(with the same shape and data
type as input) and the sorted indices(with the same shape as input's
and with data type int64).
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle.fluid as fluid
x = fluid.layers.data(name="x", shape=[3, 4], dtype="float32") import numpy as np
out, indices = fluid.layers.argsort(input=x, axis=0)
in1 = np.array([[[5,8,9,5],
[0,0,1,7],
[6,9,2,4]],
[[5,2,4,2],
[4,7,7,9],
[1,7,0,6]]]).astype(np.float32)
with fluid.dygraph.guard():
x = fluid.dygraph.to_variable(in1)
out1 = fluid.layers.argsort(input=x, axis=-1)
out2 = fluid.layers.argsort(input=x, axis=0)
out3 = fluid.layers.argsort(input=x, axis=1)
print(out1[0].numpy())
# [[[5. 5. 8. 9.]
# [0. 0. 1. 7.]
# [2. 4. 6. 9.]]
# [[2. 2. 4. 5.]
# [4. 7. 7. 9.]
# [0. 1. 6. 7.]]]
print(out1[1].numpy())
# [[[0 3 1 2]
# [0 1 2 3]
# [2 3 0 1]]
# [[1 3 2 0]
# [0 1 2 3]
# [2 0 3 1]]]
print(out2[0].numpy())
# [[[5. 2. 4. 2.]
# [0. 0. 1. 7.]
# [1. 7. 0. 4.]]
# [[5. 8. 9. 5.]
# [4. 7. 7. 9.]
# [6. 9. 2. 6.]]]
print(out3[0].numpy())
# [[[0. 0. 1. 4.]
# [5. 8. 2. 5.]
# [6. 9. 9. 7.]]
# [[1. 2. 0. 2.]
# [4. 7. 4. 6.]
# [5. 7. 7. 9.]]]
""" """
helper = LayerHelper("argsort", **locals()) helper = LayerHelper("argsort", **locals())
out = helper.create_variable_for_type_inference( out = helper.create_variable_for_type_inference(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册