search.py 40.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
C
Chengmo 已提交
14
from __future__ import print_function
15
import numpy as np
Z
zhiboniu 已提交
16
import paddle
17
from ..framework import LayerHelper
C
Chengmo 已提交
18
from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype
Z
zhiboniu 已提交
19
from ..fluid import layers
20 21
from ..framework import core, in_dygraph_mode, _non_static_mode
from ..fluid.framework import _in_legacy_dygraph
22 23 24
from paddle.common_ops_import import convert_np_dtype_to_dtype_
from paddle.common_ops_import import Variable
from paddle.common_ops_import import VarDesc
W
wanghuancoder 已提交
25
from paddle import _C_ops
Z
zhiboniu 已提交
26
from .logic import logical_not
27

28
# TODO: define searching & indexing functions of a tensor
29 30
# from ..fluid.layers import has_inf  #DEFINE_ALIAS
# from ..fluid.layers import has_nan  #DEFINE_ALIAS
31

32 33
__all__ = []

34

35 36
def argsort(x, axis=-1, descending=False, name=None):
    """
37
    Sorts the input along the given axis, and returns the corresponding index tensor for the sorted output values. The default sort algorithm is ascending, if you want the sort algorithm to be descending, you must set the :attr:`descending` as True.
38 39 40 41 42 43

    Args:
        x(Tensor): An input N-D Tensor with type float32, float64, int16,
            int32, int64, uint8.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is Rank(x). when axis<0, it works the same way
C
Chen Long 已提交
44
            as axis+R. Default is -1.
45 46 47
        descending(bool, optional) : Descending is a flag, if set to true,
            algorithm will sort by descending order, else sort by
            ascending order. Default is false.
48
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
49 50 51 52 53 54

    Returns:
        Tensor: sorted indices(with the same shape as ``x``
        and with data type int64).

    Examples:
李灿 已提交
55

56
        .. code-block:: python
李灿 已提交
57

58 59
            import paddle
            
60 61 62 63 64 65 66
            x = paddle.to_tensor([[[5,8,9,5],
                                   [0,0,1,7],
                                   [6,9,2,4]],
                                  [[5,2,4,2],
                                   [4,7,7,9],
                                   [1,7,0,6]]], 
                                dtype='float32')
C
Chen Long 已提交
67 68 69 70
            out1 = paddle.argsort(x, axis=-1)
            out2 = paddle.argsort(x, axis=0)
            out3 = paddle.argsort(x, axis=1)
            
N
Noel 已提交
71
            print(out1)
W
wawltor 已提交
72 73 74
            #[[[0 3 1 2]
            #  [0 1 2 3]
            #  [2 3 0 1]]
75
            # [[1 3 2 0]
W
wawltor 已提交
76 77
            #  [0 1 2 3]
            #  [2 0 3 1]]]
C
Chen Long 已提交
78
            
N
Noel 已提交
79
            print(out2)
W
wawltor 已提交
80 81 82 83 84 85
            #[[[0 1 1 1]
            #  [0 0 0 0]
            #  [1 1 1 0]]
            # [[1 0 0 0]
            #  [1 1 1 1]
            #  [0 0 0 1]]]
C
Chen Long 已提交
86
            
N
Noel 已提交
87
            print(out3)
W
wawltor 已提交
88 89 90 91 92 93
            #[[[1 1 1 2]
            #  [0 0 2 0]
            #  [2 2 0 1]]
            # [[2 0 2 0]
            #  [1 1 0 2]
            #  [0 2 1 1]]]
94
    """
H
hong 已提交
95
    if in_dygraph_mode():
96
        _, ids = _C_ops.final_state_argsort(x, axis, descending)
H
hong 已提交
97 98 99
        return ids

    if _in_legacy_dygraph():
W
wanghuancoder 已提交
100
        _, ids = _C_ops.argsort(x, 'axis', axis, 'descending', descending)
101 102 103 104 105 106
        return ids
    check_variable_and_dtype(
        x, 'x', ['float32', 'float64', 'int16', 'int32', 'int64', 'uint8'],
        'argsort')

    helper = LayerHelper("argsort", **locals())
107 108 109 110 111 112 113 114 115 116 117 118 119 120
    out = helper.create_variable_for_type_inference(dtype=x.dtype,
                                                    stop_gradient=True)
    ids = helper.create_variable_for_type_inference(VarDesc.VarType.INT64,
                                                    stop_gradient=True)
    helper.append_op(type='argsort',
                     inputs={'X': x},
                     outputs={
                         'Out': out,
                         'Indices': ids
                     },
                     attrs={
                         'axis': axis,
                         'descending': descending
                     })
121 122 123
    return ids


124
def argmax(x, axis=None, keepdim=False, dtype="int64", name=None):
125
    """
126
    Computes the indices of the max elements of the input tensor's
127 128 129
    element along the provided axis.

    Args:
W
wawltor 已提交
130
        x(Tensor): An input N-D Tensor with type float32, float64, int16,
131 132
            int32, int64, uint8.
        axis(int, optional): Axis to compute indices along. The effective range
W
wawltor 已提交
133 134
            is [-R, R), where R is x.ndim. when axis < 0, it works the same way
            as axis + R. Default is None, the input `x` will be into the flatten tensor, and selecting the min value index.
135
        keepdim(bool, optional): Whether to keep the given axis in output. If it is True, the dimensions will be same as input x and with size one in the axis. Otherwise the output dimentions is one fewer than x since the axis is squeezed. Default is False.
136
        dtype(str|np.dtype, optional): Data type of the output tensor which can
137
                    be int32, int64. The default value is ``int64`` , and it will
138
                    return the int64 indices.
139
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
140 141

    Returns:
142
        Tensor, return the tensor of int32 if set :attr:`dtype` is int32, otherwise return the tensor of int64.
143 144 145 146

    Examples:
        .. code-block:: python

W
wawltor 已提交
147
            import paddle
148

149 150 151
            x = paddle.to_tensor([[5,8,9,5],
                                 [0,0,1,7],
                                 [6,9,2,4]])
W
wawltor 已提交
152
            out1 = paddle.argmax(x)
N
Noel 已提交
153
            print(out1) # 2
154
            out2 = paddle.argmax(x, axis=0)
N
Noel 已提交
155
            print(out2) 
156
            # [2, 2, 0, 1]
W
wawltor 已提交
157
            out3 = paddle.argmax(x, axis=-1)
N
Noel 已提交
158
            print(out3) 
159 160 161 162
            # [2, 3, 1]
            out4 = paddle.argmax(x, axis=0, keepdim=True)
            print(out4)
            # [[2, 2, 0, 1]]
163
    """
164 165 166 167
    if axis is not None and not isinstance(axis, int):
        raise TypeError(
            "The type of 'axis'  must be int or None in argmax, but received %s."
            % (type(axis)))
168

169 170 171 172
    if dtype is None:
        raise ValueError(
            "the value of 'dtype' in argmax could not be None, but received None"
        )
173

174
    var_dtype = convert_np_dtype_to_dtype_(dtype)
W
wawltor 已提交
175 176 177 178 179
    flatten = False
    if axis is None:
        flatten = True
        axis = 0

H
hong 已提交
180 181 182
    if in_dygraph_mode():
        return _C_ops.final_state_argmax(x, axis, keepdim, flatten, var_dtype)
    if _in_legacy_dygraph():
W
wanghuancoder 已提交
183 184
        out = _C_ops.arg_max(x, 'axis', axis, 'dtype', var_dtype, 'keepdims',
                             keepdim, 'flatten', flatten)
W
wawltor 已提交
185 186 187 188 189 190
        return out

    helper = LayerHelper("argmax", **locals())
    check_variable_and_dtype(
        x, 'x', ['float32', 'float64', 'int16', 'int32', 'int64', 'uint8'],
        'paddle.argmax')
191
    check_dtype(var_dtype, 'dtype', ['int32', 'int64'], 'argmin')
192
    attrs = {}
W
wawltor 已提交
193 194 195 196
    out = helper.create_variable_for_type_inference(var_dtype)
    attrs['keepdims'] = keepdim
    attrs['axis'] = axis
    attrs['flatten'] = flatten
197
    attrs['dtype'] = var_dtype
198 199 200 201
    helper.append_op(type='arg_max',
                     inputs={'X': x},
                     outputs={'Out': [out]},
                     attrs=attrs)
W
wawltor 已提交
202 203 204 205
    out.stop_gradient = True
    return out


206
def argmin(x, axis=None, keepdim=False, dtype="int64", name=None):
W
wawltor 已提交
207
    """
208
    Computes the indices of the min elements of the input tensor's
W
wawltor 已提交
209 210 211 212 213 214 215 216
    element along the provided axis.

    Args:
        x(Tensor): An input N-D Tensor with type float32, float64, int16,
            int32, int64, uint8.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is x.ndim. when axis < 0, it works the same way
            as axis + R. Default is None, the input `x` will be into the flatten tensor, and selecting the min value index.
217
        keepdim(bool, optional): Whether to keep the given axis in output. If it is True, the dimensions will be same as input x and with size one in the axis. Otherwise the output dimentions is one fewer than x since the axis is squeezed. Default is False.
218
        dtype(str, optional): Data type of the output tensor which can
219
                    be int32, int64. The default value is 'int64', and it will
W
wawltor 已提交
220
                    return the int64 indices.
221 222
        name(str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
        
W
wawltor 已提交
223
    Returns:
224
        Tensor, return the tensor of `int32` if set :attr:`dtype` is `int32`, otherwise return the tensor of `int64`.
W
wawltor 已提交
225 226 227

    Examples:
        .. code-block:: python
228
          :name: code-example1
W
wawltor 已提交
229 230
            import paddle

231 232 233
            x =  paddle.to_tensor([[5,8,9,5],
                                     [0,0,1,7],
                                     [6,9,2,4]])
W
wawltor 已提交
234
            out1 = paddle.argmin(x)
N
Noel 已提交
235
            print(out1) # 4
236
            out2 = paddle.argmin(x, axis=0)
N
Noel 已提交
237
            print(out2) 
238
            # [1, 1, 1, 2]
W
wawltor 已提交
239
            out3 = paddle.argmin(x, axis=-1)
N
Noel 已提交
240
            print(out3) 
241 242 243 244
            # [0, 0, 2]
            out4 = paddle.argmin(x, axis=0, keepdim=True)
            print(out4)
            # [[1, 1, 1, 2]]
W
wawltor 已提交
245
    """
246 247 248 249
    if axis is not None and not isinstance(axis, int):
        raise TypeError(
            "The type of 'axis'  must be int or None in argmin, but received %s."
            % (type(axis)))
250

251 252 253 254
    if dtype is None:
        raise ValueError(
            "the value of 'dtype' in argmin could not be None, but received None"
        )
255

256
    var_dtype = convert_np_dtype_to_dtype_(dtype)
W
wawltor 已提交
257
    flatten = False
258
    if axis is None:
W
wawltor 已提交
259 260 261
        flatten = True
        axis = 0

H
hong 已提交
262 263 264
    if in_dygraph_mode():
        return _C_ops.final_state_argmin(x, axis, keepdim, flatten, var_dtype)
    if _in_legacy_dygraph():
W
wanghuancoder 已提交
265 266
        out = _C_ops.arg_min(x, 'axis', axis, 'dtype', var_dtype, 'keepdims',
                             keepdim, 'flatten', flatten)
W
wawltor 已提交
267 268 269 270 271 272
        return out

    helper = LayerHelper("argmin", **locals())
    check_variable_and_dtype(
        x, 'x', ['float32', 'float64', 'int16', 'int32', 'int64', 'uint8'],
        'paddle.argmin')
273
    check_dtype(var_dtype, 'dtype', ['int32', 'int64'], 'argmin')
W
wawltor 已提交
274
    out = helper.create_variable_for_type_inference(var_dtype)
275
    attrs = {}
W
wawltor 已提交
276
    attrs['keepdims'] = keepdim
277
    attrs['axis'] = axis
W
wawltor 已提交
278
    attrs['flatten'] = flatten
279
    attrs['dtype'] = var_dtype
280 281 282 283
    helper.append_op(type='arg_min',
                     inputs={'X': x},
                     outputs={'Out': [out]},
                     attrs=attrs)
284 285
    out.stop_gradient = True
    return out
286 287


288
def index_select(x, index, axis=0, name=None):
289
    """
S
swtkiwi 已提交
290

291 292 293 294
    Returns a new tensor which indexes the ``input`` tensor along dimension ``axis`` using 
    the entries in ``index`` which is a Tensor. The returned tensor has the same number 
    of dimensions as the original ``x`` tensor. The dim-th dimension has the same 
    size as the length of ``index``; other dimensions have the same size as in the ``x`` tensor. 
C
Chengmo 已提交
295

296
    Args:
297 298 299
        x (Tensor): The input Tensor to be operated. The data of ``x`` can be one of float32, float64, int32, int64.
        index (Tensor): The 1-D Tensor containing the indices to index. The data type of ``index`` must be int32 or int64.
        axis (int, optional): The dimension in which we index. Default: if None, the ``axis`` is 0.
300
        name(str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
301 302

    Returns:
303
        Tensor: A Tensor with same data type as ``x``.
304
    
305 306
    Examples:
        .. code-block:: python
307
            
308 309
            import paddle

310 311 312 313
            x = paddle.to_tensor([[1.0, 2.0, 3.0, 4.0],
                                  [5.0, 6.0, 7.0, 8.0],
                                  [9.0, 10.0, 11.0, 12.0]])
            index = paddle.to_tensor([0, 1, 1], dtype='int32')
314 315 316 317 318 319 320 321
            out_z1 = paddle.index_select(x=x, index=index)
            #[[1. 2. 3. 4.]
            # [5. 6. 7. 8.]
            # [5. 6. 7. 8.]]
            out_z2 = paddle.index_select(x=x, index=index, axis=1)
            #[[ 1.  2.  2.]
            # [ 5.  6.  6.]
            # [ 9. 10. 10.]]
322
    """
323

F
From00 已提交
324 325 326 327
    if in_dygraph_mode():
        return _C_ops.final_state_index_select(x, index, axis)

    if _in_legacy_dygraph():
W
wanghuancoder 已提交
328
        return _C_ops.index_select(x, index, 'dim', axis)
329

330 331 332
    helper = LayerHelper("index_select", **locals())
    check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
                             'paddle.tensor.search.index_select')
333
    check_variable_and_dtype(index, 'index', ['int32', 'int64'],
334
                             'paddle.tensor.search.index_select')
335

336
    out = helper.create_variable_for_type_inference(x.dtype)
337

338 339 340 341 342 343 344
    helper.append_op(type='index_select',
                     inputs={
                         'X': x,
                         'Index': index
                     },
                     outputs={'Out': out},
                     attrs={'dim': axis})
345 346 347
    return out


348
def nonzero(x, as_tuple=False):
349 350 351 352 353 354 355 356
    """
    Return a tensor containing the indices of all non-zero elements of the `input` 
    tensor. If as_tuple is True, return a tuple of 1-D tensors, one for each dimension 
    in `input`, each containing the indices (in that dimension) of all non-zero elements 
    of `input`. Given a n-Dimensional `input` tensor with shape [x_1, x_2, ..., x_n], If 
    as_tuple is False, we can get a output tensor with shape [z, n], where `z` is the 
    number of all non-zero elements in the `input` tensor. If as_tuple is True, we can get 
    a 1-D tensor tuple of length `n`, and the shape of each 1-D tensor is [z, 1].
C
Chengmo 已提交
357

358
    Args:
359
        x (Tensor): The input tensor variable.
360 361 362
        as_tuple (bool): Return type, Tensor or tuple of Tensor.

    Returns:
363
        Tensor. The data type is int64.
364 365

    Examples:
366

N
Noel 已提交
367
        .. code-block:: python
李灿 已提交
368

369
            import paddle
370 371

            x1 = paddle.to_tensor([[1.0, 0.0, 0.0],
N
Noel 已提交
372 373
                                   [0.0, 2.0, 0.0],
                                   [0.0, 0.0, 3.0]])
374 375
            x2 = paddle.to_tensor([0.0, 1.0, 0.0, 3.0])
            out_z1 = paddle.nonzero(x1)
N
Noel 已提交
376
            print(out_z1)
377 378 379 380 381
            #[[0 0]
            # [1 1]
            # [2 2]]
            out_z1_tuple = paddle.nonzero(x1, as_tuple=True)
            for out in out_z1_tuple:
N
Noel 已提交
382
                print(out)
383 384 385 386 387 388 389
            #[[0]
            # [1]
            # [2]]
            #[[0]
            # [1]
            # [2]]
            out_z2 = paddle.nonzero(x2)
N
Noel 已提交
390
            print(out_z2)
391 392 393 394
            #[[1]
            # [3]]
            out_z2_tuple = paddle.nonzero(x2, as_tuple=True)
            for out in out_z2_tuple:
N
Noel 已提交
395
                print(out)
396 397
            #[[1]
            # [3]]
N
Noel 已提交
398

399 400
    """
    list_out = []
401
    shape = x.shape
402 403
    rank = len(shape)

404 405 406
    if in_dygraph_mode():
        outs = _C_ops.final_state_where_index(x)
    elif paddle.in_dynamic_mode():
W
wanghuancoder 已提交
407
        outs = _C_ops.where_index(x)
408
    else:
409 410 411 412 413
        helper = LayerHelper("where_index", **locals())

        outs = helper.create_variable_for_type_inference(
            dtype=core.VarDesc.VarType.INT64)

414 415 416
        helper.append_op(type='where_index',
                         inputs={'Condition': x},
                         outputs={'Out': [outs]})
417 418 419 420 421 422 423 424

    if not as_tuple:
        return outs
    elif rank == 1:
        return tuple([outs])
    else:
        for i in range(rank):
            list_out.append(
425
                paddle.slice(outs, axes=[1], starts=[i], ends=[i + 1]))
426 427 428
        return tuple(list_out)


429
def sort(x, axis=-1, descending=False, name=None):
430
    """
S
swtkiwi 已提交
431

432
    Sorts the input along the given axis, and returns the sorted output tensor. The default sort algorithm is ascending, if you want the sort algorithm to be descending, you must set the :attr:`descending` as True.
C
Chengmo 已提交
433

434
    Args:
435
        x(Tensor): An input N-D Tensor with type float32, float64, int16,
436 437 438
            int32, int64, uint8.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is Rank(x). when axis<0, it works the same way
439
            as axis+R. Default is -1.
440 441 442
        descending(bool, optional) : Descending is a flag, if set to true,
            algorithm will sort by descending order, else sort by
            ascending order. Default is false.
443 444
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
        
445
    Returns:
W
wawltor 已提交
446
        Tensor: sorted tensor(with the same shape and data type as ``x``).
447
    Examples:
N
Noel 已提交
448

449
        .. code-block:: python
450
           :name: code-example1
N
Noel 已提交
451

452
            import paddle
N
Noel 已提交
453

454 455 456 457 458 459 460
            x = paddle.to_tensor([[[5,8,9,5],
                                   [0,0,1,7],
                                   [6,9,2,4]],
                                  [[5,2,4,2],
                                   [4,7,7,9],
                                   [1,7,0,6]]], 
                                 dtype='float32')
461 462 463
            out1 = paddle.sort(x=x, axis=-1)
            out2 = paddle.sort(x=x, axis=0)
            out3 = paddle.sort(x=x, axis=1)
N
Noel 已提交
464
            print(out1)
W
wawltor 已提交
465 466 467 468 469 470
            #[[[5. 5. 8. 9.]
            #  [0. 0. 1. 7.]
            #  [2. 4. 6. 9.]]
            # [[2. 2. 4. 5.]
            #  [4. 7. 7. 9.]
            #  [0. 1. 6. 7.]]]
N
Noel 已提交
471
            print(out2)
472
            #[[[5. 2. 4. 2.]
W
wawltor 已提交
473 474 475 476 477
            #  [0. 0. 1. 7.]
            #  [1. 7. 0. 4.]]
            # [[5. 8. 9. 5.]
            #  [4. 7. 7. 9.]
            #  [6. 9. 2. 6.]]]
N
Noel 已提交
478
            print(out3)
479
            #[[[0. 0. 1. 4.]
W
wawltor 已提交
480 481 482 483 484
            #  [5. 8. 2. 5.]
            #  [6. 9. 9. 7.]]
            # [[1. 2. 0. 2.]
            #  [4. 7. 4. 6.]
            #  [5. 7. 7. 9.]]]
485
    """
486 487 488 489 490 491 492
    if in_dygraph_mode():
        outs, _ = _C_ops.final_state_argsort(x, axis, descending)
        return outs

    if _in_legacy_dygraph():
        outs, _ = _C_ops.argsort(x, 'axis', axis, 'descending', descending)
        return outs
493
    helper = LayerHelper("sort", **locals())
494 495 496 497 498 499 500 501 502 503 504 505 506 507
    out = helper.create_variable_for_type_inference(dtype=x.dtype,
                                                    stop_gradient=False)
    ids = helper.create_variable_for_type_inference(VarDesc.VarType.INT64,
                                                    stop_gradient=True)
    helper.append_op(type='argsort',
                     inputs={'X': x},
                     outputs={
                         'Out': out,
                         'Indices': ids
                     },
                     attrs={
                         'axis': axis,
                         'descending': descending
                     })
W
wawltor 已提交
508
    return out
C
Chengmo 已提交
509 510


511 512
def mode(x, axis=-1, keepdim=False, name=None):
    """
513
    Used to find values and indices of the modes at the optional axis.
514 515 516 517 518 519 520

    Args:
        x(Tensor): Tensor, an input N-D Tensor with type float32, float64, int32, int64.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is x.ndim. when axis < 0, it works the same way
            as axis + R. Default is -1.
        keepdim(bool, optional): Whether to keep the given axis in output. If it is True, the dimensions will be same as input x and with size one in the axis. Otherwise the output dimentions is one fewer than x since the axis is squeezed. Default is False.
521
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541

    Returns:
        tuple(Tensor), return the values and indices. The value data type is the same as the input `x`. The indices data type is int64.

    Examples:

        .. code-block:: python

           import paddle
           
           tensor = paddle.to_tensor([[[1,2,2],[2,3,3]],[[0,5,5],[9,9,0]]], dtype=paddle.float32)
           res = paddle.mode(tensor, 2)
           print(res)
           # (Tensor(shape=[2, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
           #   [[2., 3.],
           #    [5., 9.]]), Tensor(shape=[2, 2], dtype=int64, place=CUDAPlace(0), stop_gradient=True,
           #   [[1, 1],
           #    [1, 0]]))
           
    """
542 543 544
    if in_dygraph_mode():
        return _C_ops.final_state_mode(x, axis, keepdim)
    if _in_legacy_dygraph():
545 546 547 548 549 550 551 552 553 554 555
        return _C_ops.mode(x, "axis", axis, "keepdim", keepdim)

    helper = LayerHelper("mode", **locals())
    inputs = {"X": [x]}
    attrs = {}
    attrs['axis'] = axis
    attrs['keepdim'] = keepdim

    values = helper.create_variable_for_type_inference(dtype=x.dtype)
    indices = helper.create_variable_for_type_inference(dtype="int64")

556 557 558 559 560 561 562
    helper.append_op(type="mode",
                     inputs=inputs,
                     outputs={
                         "Out": [values],
                         "Indices": [indices]
                     },
                     attrs=attrs)
563 564 565 566
    indices.stop_gradient = True
    return values, indices


R
ronnywang 已提交
567
def where(condition, x=None, y=None, name=None):
568
    r"""
569
    Return a Tensor of elements selected from either :attr:`x` or :attr:`y` according to corresponding elements of :attr:`condition`. Concretely,
R
ronnywang 已提交
570

571
    .. math::
C
Chengmo 已提交
572

573 574 575 576 577
        out_i =
        \begin{cases}
        x_i, & \text{if}  \ condition_i \  \text{is} \ True \\
        y_i, & \text{if}  \ condition_i \  \text{is} \ False \\
        \end{cases}.
C
Chengmo 已提交
578

579 580
    Notes:
        ``numpy.where(condition)`` is identical to ``paddle.nonzero(condition, as_tuple=True)``, please refer to :ref:`api_tensor_search_nonzero`.
581

582
    Args:
583 584 585 586
        condition (Tensor): The condition to choose x or y. When True (nonzero), yield x, otherwise yield y.
        x (Tensor|scalar, optional): A Tensor or scalar to choose when the condition is True with data type of float32, float64, int32 or int64. Either both or neither of x and y should be given.
        y (Tensor|scalar, optional): A Tensor or scalar to choose when the condition is False with data type of float32, float64, int32 or int64. Either both or neither of x and y should be given.
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
587

588
    Returns:
589
        Tensor: A Tensor with the same shape as :attr:`condition` and same data type as :attr:`x` and :attr:`y`.
590

591 592
    Examples:
        .. code-block:: python
593
            :name:where-example
594

595
            import paddle
596

597 598 599
            x = paddle.to_tensor([0.9383, 0.1983, 3.2, 1.2])
            y = paddle.to_tensor([1.0, 1.0, 1.0, 1.0])
            out = paddle.where(x>1, x, y)
600

601 602
            print(out)
            #out: [1.0, 1.0, 3.2, 1.2]
R
ronnywang 已提交
603

604 605 606 607 608
            out = paddle.where(x>1)
            print(out)
            #out: (Tensor(shape=[2, 1], dtype=int64, place=CPUPlace, stop_gradient=True,
            #            [[2],
            #             [3]]),)
609
    """
R
ronnywang 已提交
610
    if np.isscalar(x):
611
        x = paddle.full([1], x, np.array([x]).dtype.name)
R
ronnywang 已提交
612 613

    if np.isscalar(y):
614
        y = paddle.full([1], y, np.array([y]).dtype.name)
R
ronnywang 已提交
615

R
ronnywang 已提交
616 617 618 619 620 621
    if x is None and y is None:
        return nonzero(condition, as_tuple=True)

    if x is None or y is None:
        raise ValueError("either both or neither of x and y should be given")

Z
zhiboniu 已提交
622
    if not paddle.in_dynamic_mode():
623
        check_variable_and_dtype(condition, 'condition', ['bool'], 'where')
624 625 626 627 628 629
        check_variable_and_dtype(x, 'x',
                                 ['float32', 'float64', 'int32', 'int64'],
                                 'where')
        check_variable_and_dtype(y, 'y',
                                 ['float32', 'float64', 'int32', 'int64'],
                                 'where')
630

631
    condition_shape = list(condition.shape)
632 633
    x_shape = list(x.shape)
    y_shape = list(y.shape)
634

635
    if x_shape == y_shape and condition_shape == x_shape:
636 637 638 639 640
        broadcast_condition = condition
        broadcast_x = x
        broadcast_y = y
    else:
        if core.is_compiled_with_xpu():
Z
zhiboniu 已提交
641 642 643 644 645
            cond_int = paddle.cast(condition, x.dtype)
            cond_not_int = paddle.cast(logical_not(condition), x.dtype)
            out1 = paddle.multiply(x, cond_int)
            out2 = paddle.multiply(y, cond_not_int)
            out = paddle.add(out1, out2)
646
            return out
647

Z
zhiboniu 已提交
648 649 650 651 652 653 654 655 656 657 658 659 660
        zeros_like_x = paddle.zeros_like(x)
        zeros_like_y = paddle.zeros_like(y)
        zeros_like_condition = paddle.zeros_like(condition)
        zeros_like_condition = paddle.cast(zeros_like_condition, x.dtype)
        cast_cond = paddle.cast(condition, x.dtype)

        broadcast_zeros = paddle.add(zeros_like_x, zeros_like_y)
        broadcast_zeros = paddle.add(broadcast_zeros, zeros_like_condition)
        broadcast_x = paddle.add(x, broadcast_zeros)
        broadcast_y = paddle.add(y, broadcast_zeros)
        broadcast_condition = paddle.add(cast_cond, broadcast_zeros)
        broadcast_condition = paddle.cast(broadcast_condition, 'bool')

J
Jiabin Yang 已提交
661 662 663
    if in_dygraph_mode():
        return _C_ops.final_state_where(broadcast_condition, broadcast_x,
                                        broadcast_y)
664
    else:
J
Jiabin Yang 已提交
665 666 667 668 669 670
        if _in_legacy_dygraph():
            return _C_ops.where(broadcast_condition, broadcast_x, broadcast_y)
        else:
            helper = LayerHelper("where", **locals())
            out = helper.create_variable_for_type_inference(dtype=x.dtype)

671 672 673 674 675 676 677
            helper.append_op(type='where',
                             inputs={
                                 'Condition': broadcast_condition,
                                 'X': broadcast_x,
                                 'Y': broadcast_y
                             },
                             outputs={'Out': [out]})
678

J
Jiabin Yang 已提交
679
            return out
680 681


C
Chengmo 已提交
682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705
def index_sample(x, index):
    """
    **IndexSample Layer**

    IndexSample OP returns the element of the specified location of X, 
    and the location is specified by Index. 

    .. code-block:: text


                Given:

                X = [[1, 2, 3, 4, 5],
                     [6, 7, 8, 9, 10]]

                Index = [[0, 1, 3],
                         [0, 2, 4]]

                Then:

                Out = [[1, 2, 4],
                       [6, 8, 10]]

    Args:
C
Chengmo 已提交
706
        x (Tensor): The source input tensor with 2-D shape. Supported data type is 
C
Chengmo 已提交
707
            int32, int64, float32, float64.
C
Chengmo 已提交
708
        index (Tensor): The index input tensor with 2-D shape, first dimension should be same with X. 
C
Chengmo 已提交
709 710 711
            Data type is int32 or int64.

    Returns:
C
Chengmo 已提交
712
        output (Tensor): The output is a tensor with the same shape as index.
C
Chengmo 已提交
713 714 715 716 717 718

    Examples:

        .. code-block:: python

            import paddle
719 720 721 722 723 724 725 726 727 728 729

            x = paddle.to_tensor([[1.0, 2.0, 3.0, 4.0],
                                  [5.0, 6.0, 7.0, 8.0],
                                  [9.0, 10.0, 11.0, 12.0]], dtype='float32')
            index = paddle.to_tensor([[0, 1, 2],
                                      [1, 2, 3],
                                      [0, 0, 0]], dtype='int32')
            target = paddle.to_tensor([[100, 200, 300, 400],
                                       [500, 600, 700, 800],
                                       [900, 1000, 1100, 1200]], dtype='int32')
            out_z1 = paddle.index_sample(x, index)
N
Noel 已提交
730
            print(out_z1)
731 732 733 734 735 736 737 738
            #[[1. 2. 3.]
            # [6. 7. 8.]
            # [9. 9. 9.]]

            # Use the index of the maximum value by topk op
            # get the value of the element of the corresponding index in other tensors
            top_value, top_index = paddle.topk(x, k=2)
            out_z2 = paddle.index_sample(target, top_index)
N
Noel 已提交
739
            print(top_value)
740 741 742 743
            #[[ 4.  3.]
            # [ 8.  7.]
            # [12. 11.]]

N
Noel 已提交
744
            print(top_index)
745 746 747 748
            #[[3 2]
            # [3 2]
            # [3 2]]

N
Noel 已提交
749
            print(out_z2)
750 751 752
            #[[ 400  300]
            # [ 800  700]
            # [1200 1100]]
C
Chengmo 已提交
753

C
Chengmo 已提交
754
    """
J
Jiabin Yang 已提交
755 756 757 758 759 760 761 762 763 764 765 766 767 768
    if in_dygraph_mode():
        return _C_ops.final_state_index_sample(x, index)
    else:
        if _in_legacy_dygraph():
            return _C_ops.index_sample(x, index)
        else:
            helper = LayerHelper("index_sample", **locals())
            check_variable_and_dtype(x, 'x',
                                     ['float32', 'float64', 'int32', 'int64'],
                                     'paddle.tensor.search.index_sample')
            check_variable_and_dtype(index, 'index', ['int32', 'int64'],
                                     'paddle.tensor.search.index_sample')
            out = helper.create_variable_for_type_inference(dtype=x.dtype)

769 770 771 772 773 774
            helper.append_op(type='index_sample',
                             inputs={
                                 'X': x,
                                 'Index': index
                             },
                             outputs={'Out': out})
J
Jiabin Yang 已提交
775
            return out
776 777 778 779


def masked_select(x, mask, name=None):
    """
C
Chen Long 已提交
780
    Returns a new 1-D tensor which indexes the input tensor according to the ``mask``
781 782 783 784 785
    which is a tensor with data type of bool.

    Args:
        x (Tensor): The input Tensor, the data type can be int32, int64, float32, float64. 
        mask (Tensor): The Tensor containing the binary mask to index with, it's data type is bool.
786
        name(str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
787

788 789
    Returns: 
        A 1-D Tensor which is the same data type  as ``x``.
790 791 792 793 794 795
    
    Examples:

        .. code-block:: python

            import paddle
796 797 798 799 800 801 802

            x = paddle.to_tensor([[1.0, 2.0, 3.0, 4.0],
                                  [5.0, 6.0, 7.0, 8.0],
                                  [9.0, 10.0, 11.0, 12.0]])
            mask = paddle.to_tensor([[True, False, False, False],
                                     [True, True, False, False],
                                     [True, False, False, False]])
803 804 805 806
            out = paddle.masked_select(x, mask)
            #[1.0 5.0 6.0 9.0]
    """

H
hong 已提交
807 808 809 810
    if in_dygraph_mode():
        return _C_ops.final_state_masked_select(x, mask)

    if _in_legacy_dygraph():
W
wanghuancoder 已提交
811
        return _C_ops.masked_select(x, mask)
812 813 814 815 816 817 818

    helper = LayerHelper("masked_select", **locals())
    check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
                             'paddle.tensor.search.mask_select')
    check_variable_and_dtype(mask, 'mask', ['bool'],
                             'paddle.tensor.search.masked_select')
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
819 820 821 822 823 824
    helper.append_op(type='masked_select',
                     inputs={
                         'X': x,
                         'Mask': mask
                     },
                     outputs={'Y': out})
825
    return out
W
wawltor 已提交
826 827 828 829


def topk(x, k, axis=None, largest=True, sorted=True, name=None):
    """
830
    Return values and indices of the k largest or smallest at the optional axis.
W
wawltor 已提交
831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851
    If the input is a 1-D Tensor, finds the k largest or smallest values and indices.
    If the input is a Tensor with higher rank, this operator computes the top k values and indices along the :attr:`axis`.

    Args:
        x(Tensor): Tensor, an input N-D Tensor with type float32, float64, int32, int64.
        k(int, Tensor): The number of top elements to look for along the axis.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is x.ndim. when axis < 0, it works the same way
            as axis + R. Default is -1.
        largest(bool, optional) : largest is a flag, if set to true,
            algorithm will sort by descending order, otherwise sort by
            ascending order. Default is True.
        sorted(bool, optional): controls whether to return the elements in sorted order, default value is True. In gpu device, it always return the sorted value. 
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        tuple(Tensor), return the values and indices. The value data type is the same as the input `x`. The indices data type is int64.

    Examples:

        .. code-block:: python
852 853
          :name: code-example1
            import paddle
W
wawltor 已提交
854

855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871
            data_1 = paddle.to_tensor([1, 4, 5, 7])
            value_1, indices_1 = paddle.topk(data_1, k=1)
            print(value_1) # [7]
            print(indices_1) # [3]

            data_2 = paddle.to_tensor([[1, 4, 5, 7], [2, 6, 2, 5]])
            value_2, indices_2 = paddle.topk(data_2, k=1)
            print(value_2) # [[7], [6]]
            print(indices_2) # [[3], [1]]

            value_3, indices_3 = paddle.topk(data_2, k=1, axis=-1)
            print(value_3) # [[7], [6]]
            print(indices_3) # [[3], [1]]

            value_4, indices_4 = paddle.topk(data_2, k=1, axis=0)
            print(value_4) # [[2, 6, 5, 7]]
            print(indices_4) # [[1, 1, 0, 0]]
W
wawltor 已提交
872 873 874


    """
H
hong 已提交
875

H
hong 已提交
876 877 878 879 880 881
    if in_dygraph_mode():
        if axis == None:
            axis = -1
        out, indices = _C_ops.final_state_top_k(x, k, axis, largest, sorted)
        return out, indices

H
hong 已提交
882
    if _non_static_mode():
W
wawltor 已提交
883
        if axis is None:
884 885
            out, indices = _C_ops.top_k_v2(x, 'k', int(k), 'largest', largest,
                                           'sorted', sorted)
W
wawltor 已提交
886
        else:
887 888
            out, indices = _C_ops.top_k_v2(x, 'k', int(k), 'axis', axis,
                                           'largest', largest, 'sorted', sorted)
W
wawltor 已提交
889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905
        return out, indices

    helper = LayerHelper("top_k_v2", **locals())
    inputs = {"X": [x]}
    attrs = {}
    if isinstance(k, Variable):
        inputs['K'] = [k]
    else:
        attrs = {'k': k}
    attrs['largest'] = largest
    attrs['sorted'] = sorted
    if axis is not None:
        attrs['axis'] = axis

    values = helper.create_variable_for_type_inference(dtype=x.dtype)
    indices = helper.create_variable_for_type_inference(dtype="int64")

906 907 908 909 910 911 912
    helper.append_op(type="top_k_v2",
                     inputs=inputs,
                     outputs={
                         "Out": [values],
                         "Indices": [indices]
                     },
                     attrs=attrs)
W
wawltor 已提交
913 914
    indices.stop_gradient = True
    return values, indices
Y
Yanxing Shi 已提交
915 916 917 918 919 920 921 922


def searchsorted(sorted_sequence,
                 values,
                 out_int32=False,
                 right=False,
                 name=None):
    """
923
    Find the index of the corresponding `sorted_sequence` in the innermost dimension based on the given `values`.
Y
Yanxing Shi 已提交
924 925 926 927 928 929 930

    Args:
        sorted_sequence(Tensor): An input N-D or 1-D tensor with type int32, int64, float32, float64. The value of the tensor monotonically increases in the innermost dimension. 
        values(Tensor): An input N-D tensor value with type int32, int64, float32, float64.
        out_int32(bool, optional): Data type of the output tensor which can be int32, int64. The default value is False, and it indicates that the output data type is int64.
        right(bool, optional): Find the upper or lower bounds of the sorted_sequence range in the innermost dimension based on the given `values`. If the value of the sorted_sequence is nan or inf, return the size of the innermost dimension.
                               The default value is False and it shows the lower bounds.  
931
        name(str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
Y
Yanxing Shi 已提交
932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962
        
    Returns:
        Tensor(the same sizes of the `values`), return the tensor of int32 if set :attr:`out_int32` is True, otherwise return the tensor of int64.  
    
    Examples:

        .. code-block:: python
    
            import paddle

            sorted_sequence = paddle.to_tensor([[1, 3, 5, 7, 9, 11],
                                                [2, 4, 6, 8, 10, 12]], dtype='int32')
            values = paddle.to_tensor([[3, 6, 9, 10], [3, 6, 9, 10]], dtype='int32')
            out1 = paddle.searchsorted(sorted_sequence, values)
            print(out1)
            # Tensor(shape=[2, 4], dtype=int64, place=CUDAPlace(0), stop_gradient=True,
            #        [[1, 3, 4, 5],
            #         [1, 2, 4, 4]])
            out2 = paddle.searchsorted(sorted_sequence, values, right=True)
            print(out2)
            # Tensor(shape=[2, 4], dtype=int64, place=CUDAPlace(0), stop_gradient=True,
            #        [[2, 3, 5, 5],
            #         [1, 3, 4, 5]])
            sorted_sequence_1d = paddle.to_tensor([1, 3, 5, 7, 9, 11, 13])
            out3 = paddle.searchsorted(sorted_sequence_1d, values)     
            print(out3)
            # Tensor(shape=[2, 4], dtype=int64, place=CUDAPlace(0), stop_gradient=True,
            #        [[1, 3, 4, 5],
            #         [1, 3, 4, 5]])
            
    """
F
From00 已提交
963 964 965
    if in_dygraph_mode():
        return _C_ops.final_state_searchsorted(sorted_sequence, values,
                                               out_int32, right)
Y
Yanxing Shi 已提交
966

F
From00 已提交
967
    if _in_legacy_dygraph():
Y
Yanxing Shi 已提交
968 969 970 971 972 973 974 975 976 977 978 979 980
        return _C_ops.searchsorted(sorted_sequence, values, "out_int32",
                                   out_int32, "right", right)

    check_variable_and_dtype(sorted_sequence, 'SortedSequence',
                             ['float32', 'float64', 'int32', 'int64'],
                             'paddle.searchsorted')
    check_variable_and_dtype(values, 'Values',
                             ['float32', 'float64', 'int32', 'int64'],
                             'paddle.searchsorted')

    helper = LayerHelper('searchsorted', **locals())
    out_type = 'int32' if out_int32 else 'int64'
    out = helper.create_variable_for_type_inference(dtype=out_type)
981 982 983 984 985 986 987 988 989 990
    helper.append_op(type='searchsorted',
                     inputs={
                         'SortedSequence': sorted_sequence,
                         "Values": values
                     },
                     outputs={'Out': out},
                     attrs={
                         "out_int32": out_int32,
                         "right": right
                     })
Y
Yanxing Shi 已提交
991 992

    return out
993 994 995 996


def kthvalue(x, k, axis=None, keepdim=False, name=None):
    """
997
    Find values and indices of the k-th smallest at the axis.
998 999 1000 1001 1002 1003 1004 1005

    Args:
        x(Tensor): A N-D Tensor with type float32, float64, int32, int64.
        k(int): The k for the k-th smallest number to look for along the axis.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is x.ndim. when axis < 0, it works the same way
            as axis + R. The default is None. And if the axis is None, it will computed as -1 by default.
        keepdim(bool, optional): Whether to keep the given axis in output. If it is True, the dimensions will be same as input x and with size one in the axis. Otherwise the output dimentions is one fewer than x since the axis is squeezed. Default is False.
1006
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032

    Returns:
        tuple(Tensor), return the values and indices. The value data type is the same as the input `x`. The indices data type is int64.
   
    Examples:

        .. code-block:: python
    
            import paddle
            
            x = paddle.randn((2,3,2))
            # Tensor(shape=[2, 3, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
            #       [[[ 0.22954939, -0.01296274],
            #         [ 1.17135799, -0.34493217],
            #         [-0.19550551, -0.17573971]],
            #
            #        [[ 0.15104349, -0.93965352],
            #         [ 0.14745511,  0.98209465],
            #         [ 0.10732264, -0.55859774]]])           
            y = paddle.kthvalue(x, 2, 1)    
            # (Tensor(shape=[2, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
            # [[ 0.22954939, -0.17573971],
            #  [ 0.14745511, -0.55859774]]), Tensor(shape=[2, 2], dtype=int64, place=CUDAPlace(0), stop_gradient=True,
            #  [[0, 2],
            #  [1, 2]]))
    """
1033
    if _non_static_mode():
1034
        if axis is not None:
1035 1036 1037 1038
            if _in_legacy_dygraph():
                return _C_ops.kthvalue(x, 'k', k, "axis", axis, "keepdim",
                                       keepdim)
            return _C_ops.final_state_kthvalue(x, k, axis, keepdim)
1039
        else:
1040 1041 1042
            if _in_legacy_dygraph():
                return _C_ops.kthvalue(x, 'k', k, "keepdim", keepdim)
            return _C_ops.final_state_kthvalue(x, k, -1, keepdim)
1043 1044 1045 1046 1047 1048 1049 1050 1051

    helper = LayerHelper("kthvalue", **locals())
    inputs = {"X": [x]}
    attrs = {'k': k}
    if axis is not None:
        attrs['axis'] = axis
    values = helper.create_variable_for_type_inference(dtype=x.dtype)
    indices = helper.create_variable_for_type_inference(dtype="int64")

1052 1053 1054 1055 1056 1057 1058
    helper.append_op(type="kthvalue",
                     inputs=inputs,
                     outputs={
                         "Out": [values],
                         "Indices": [indices]
                     },
                     attrs=attrs)
1059 1060
    indices.stop_gradient = True
    return values, indices