search.py 41.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
#   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
C
Chengmo 已提交
14
from __future__ import print_function
15
import numpy as np
Z
zhiboniu 已提交
16
import paddle
17
from ..framework import LayerHelper
C
Chengmo 已提交
18
from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype
Z
zhiboniu 已提交
19
from ..fluid import layers
20 21
from ..framework import core, in_dygraph_mode, _non_static_mode
from ..fluid.framework import _in_legacy_dygraph
22 23 24
from paddle.common_ops_import import convert_np_dtype_to_dtype_
from paddle.common_ops_import import Variable
from paddle.common_ops_import import VarDesc
W
wanghuancoder 已提交
25
from paddle import _C_ops
Z
zhiboniu 已提交
26
from .logic import logical_not
27

28
# TODO: define searching & indexing functions of a tensor
29 30
# from ..fluid.layers import has_inf  #DEFINE_ALIAS
# from ..fluid.layers import has_nan  #DEFINE_ALIAS
31

32 33
__all__ = []

34

35 36
def argsort(x, axis=-1, descending=False, name=None):
    """
W
wawltor 已提交
37
    This OP sorts the input along the given axis, and returns the corresponding index tensor for the sorted output values. The default sort algorithm is ascending, if you want the sort algorithm to be descending, you must set the :attr:`descending` as True.
38 39 40 41 42 43

    Args:
        x(Tensor): An input N-D Tensor with type float32, float64, int16,
            int32, int64, uint8.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is Rank(x). when axis<0, it works the same way
C
Chen Long 已提交
44
            as axis+R. Default is -1.
45 46 47 48 49 50 51 52 53 54 55 56
        descending(bool, optional) : Descending is a flag, if set to true,
            algorithm will sort by descending order, else sort by
            ascending order. Default is false.
        name(str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.

    Returns:
        Tensor: sorted indices(with the same shape as ``x``
        and with data type int64).

    Examples:
李灿 已提交
57

58
        .. code-block:: python
李灿 已提交
59

60 61
            import paddle
            
62 63 64 65 66 67 68
            x = paddle.to_tensor([[[5,8,9,5],
                                   [0,0,1,7],
                                   [6,9,2,4]],
                                  [[5,2,4,2],
                                   [4,7,7,9],
                                   [1,7,0,6]]], 
                                dtype='float32')
C
Chen Long 已提交
69 70 71 72
            out1 = paddle.argsort(x, axis=-1)
            out2 = paddle.argsort(x, axis=0)
            out3 = paddle.argsort(x, axis=1)
            
N
Noel 已提交
73
            print(out1)
W
wawltor 已提交
74 75 76
            #[[[0 3 1 2]
            #  [0 1 2 3]
            #  [2 3 0 1]]
77
            # [[1 3 2 0]
W
wawltor 已提交
78 79
            #  [0 1 2 3]
            #  [2 0 3 1]]]
C
Chen Long 已提交
80
            
N
Noel 已提交
81
            print(out2)
W
wawltor 已提交
82 83 84 85 86 87
            #[[[0 1 1 1]
            #  [0 0 0 0]
            #  [1 1 1 0]]
            # [[1 0 0 0]
            #  [1 1 1 1]
            #  [0 0 0 1]]]
C
Chen Long 已提交
88
            
N
Noel 已提交
89
            print(out3)
W
wawltor 已提交
90 91 92 93 94 95
            #[[[1 1 1 2]
            #  [0 0 2 0]
            #  [2 2 0 1]]
            # [[2 0 2 0]
            #  [1 1 0 2]
            #  [0 2 1 1]]]
96
    """
H
hong 已提交
97
    if in_dygraph_mode():
98
        _, ids = _C_ops.final_state_argsort(x, axis, descending)
H
hong 已提交
99 100 101
        return ids

    if _in_legacy_dygraph():
W
wanghuancoder 已提交
102
        _, ids = _C_ops.argsort(x, 'axis', axis, 'descending', descending)
103 104 105 106 107 108
        return ids
    check_variable_and_dtype(
        x, 'x', ['float32', 'float64', 'int16', 'int32', 'int64', 'uint8'],
        'argsort')

    helper = LayerHelper("argsort", **locals())
109 110 111 112 113 114 115 116 117 118 119 120 121 122
    out = helper.create_variable_for_type_inference(dtype=x.dtype,
                                                    stop_gradient=True)
    ids = helper.create_variable_for_type_inference(VarDesc.VarType.INT64,
                                                    stop_gradient=True)
    helper.append_op(type='argsort',
                     inputs={'X': x},
                     outputs={
                         'Out': out,
                         'Indices': ids
                     },
                     attrs={
                         'axis': axis,
                         'descending': descending
                     })
123 124 125
    return ids


126
def argmax(x, axis=None, keepdim=False, dtype="int64", name=None):
127
    """
128
    Computes the indices of the max elements of the input tensor's
129 130 131
    element along the provided axis.

    Args:
W
wawltor 已提交
132
        x(Tensor): An input N-D Tensor with type float32, float64, int16,
133 134
            int32, int64, uint8.
        axis(int, optional): Axis to compute indices along. The effective range
W
wawltor 已提交
135 136
            is [-R, R), where R is x.ndim. when axis < 0, it works the same way
            as axis + R. Default is None, the input `x` will be into the flatten tensor, and selecting the min value index.
137
        keepdim(bool, optional): Whether to keep the given axis in output. If it is True, the dimensions will be same as input x and with size one in the axis. Otherwise the output dimentions is one fewer than x since the axis is squeezed. Default is False.
138
        dtype(str|np.dtype, optional): Data type of the output tensor which can
139
                    be int32, int64. The default value is ``int64`` , and it will
140
                    return the int64 indices.
141
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
142 143

    Returns:
144
        Tensor, return the tensor of int32 if set :attr:`dtype` is int32, otherwise return the tensor of int64.
145 146 147 148

    Examples:
        .. code-block:: python

W
wawltor 已提交
149
            import paddle
150

151 152 153
            x = paddle.to_tensor([[5,8,9,5],
                                 [0,0,1,7],
                                 [6,9,2,4]])
W
wawltor 已提交
154
            out1 = paddle.argmax(x)
N
Noel 已提交
155
            print(out1) # 2
156
            out2 = paddle.argmax(x, axis=0)
N
Noel 已提交
157
            print(out2) 
158
            # [2, 2, 0, 1]
W
wawltor 已提交
159
            out3 = paddle.argmax(x, axis=-1)
N
Noel 已提交
160
            print(out3) 
161 162 163 164
            # [2, 3, 1]
            out4 = paddle.argmax(x, axis=0, keepdim=True)
            print(out4)
            # [[2, 2, 0, 1]]
165
    """
166 167 168 169
    if axis is not None and not isinstance(axis, int):
        raise TypeError(
            "The type of 'axis'  must be int or None in argmax, but received %s."
            % (type(axis)))
170

171 172 173 174
    if dtype is None:
        raise ValueError(
            "the value of 'dtype' in argmax could not be None, but received None"
        )
175

176
    var_dtype = convert_np_dtype_to_dtype_(dtype)
W
wawltor 已提交
177 178 179 180 181
    flatten = False
    if axis is None:
        flatten = True
        axis = 0

H
hong 已提交
182 183 184
    if in_dygraph_mode():
        return _C_ops.final_state_argmax(x, axis, keepdim, flatten, var_dtype)
    if _in_legacy_dygraph():
W
wanghuancoder 已提交
185 186
        out = _C_ops.arg_max(x, 'axis', axis, 'dtype', var_dtype, 'keepdims',
                             keepdim, 'flatten', flatten)
W
wawltor 已提交
187 188 189 190 191 192
        return out

    helper = LayerHelper("argmax", **locals())
    check_variable_and_dtype(
        x, 'x', ['float32', 'float64', 'int16', 'int32', 'int64', 'uint8'],
        'paddle.argmax')
193
    check_dtype(var_dtype, 'dtype', ['int32', 'int64'], 'argmin')
194
    attrs = {}
W
wawltor 已提交
195 196 197 198
    out = helper.create_variable_for_type_inference(var_dtype)
    attrs['keepdims'] = keepdim
    attrs['axis'] = axis
    attrs['flatten'] = flatten
199
    attrs['dtype'] = var_dtype
200 201 202 203
    helper.append_op(type='arg_max',
                     inputs={'X': x},
                     outputs={'Out': [out]},
                     attrs=attrs)
W
wawltor 已提交
204 205 206 207
    out.stop_gradient = True
    return out


208
def argmin(x, axis=None, keepdim=False, dtype="int64", name=None):
W
wawltor 已提交
209
    """
210
    Computing the indices of the min elements of the input tensor's
W
wawltor 已提交
211 212 213 214 215 216 217 218
    element along the provided axis.

    Args:
        x(Tensor): An input N-D Tensor with type float32, float64, int16,
            int32, int64, uint8.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is x.ndim. when axis < 0, it works the same way
            as axis + R. Default is None, the input `x` will be into the flatten tensor, and selecting the min value index.
219
        keepdim(bool, optional): Whether to keep the given axis in output. If it is True, the dimensions will be same as input x and with size one in the axis. Otherwise the output dimentions is one fewer than x since the axis is squeezed. Default is False.
220
        dtype(str, optional): Data type of the output tensor which can
221
                    be int32, int64. The default value is 'int64', and it will
W
wawltor 已提交
222 223 224 225 226 227
                    return the int64 indices.
        name(str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.

    Returns:
228
        Tensor, return the tensor of `int32` if set :attr:`dtype` is `int32`, otherwise return the tensor of `int64`.
W
wawltor 已提交
229 230 231

    Examples:
        .. code-block:: python
232
          :name: code-example1
W
wawltor 已提交
233 234
            import paddle

235 236 237
            x =  paddle.to_tensor([[5,8,9,5],
                                     [0,0,1,7],
                                     [6,9,2,4]])
W
wawltor 已提交
238
            out1 = paddle.argmin(x)
N
Noel 已提交
239
            print(out1) # 4
240
            out2 = paddle.argmin(x, axis=0)
N
Noel 已提交
241
            print(out2) 
242
            # [1, 1, 1, 2]
W
wawltor 已提交
243
            out3 = paddle.argmin(x, axis=-1)
N
Noel 已提交
244
            print(out3) 
245 246 247 248
            # [0, 0, 2]
            out4 = paddle.argmin(x, axis=0, keepdim=True)
            print(out4)
            # [[1, 1, 1, 2]]
W
wawltor 已提交
249
    """
250 251 252 253
    if axis is not None and not isinstance(axis, int):
        raise TypeError(
            "The type of 'axis'  must be int or None in argmin, but received %s."
            % (type(axis)))
254

255 256 257 258
    if dtype is None:
        raise ValueError(
            "the value of 'dtype' in argmin could not be None, but received None"
        )
259

260
    var_dtype = convert_np_dtype_to_dtype_(dtype)
W
wawltor 已提交
261
    flatten = False
262
    if axis is None:
W
wawltor 已提交
263 264 265
        flatten = True
        axis = 0

H
hong 已提交
266 267 268
    if in_dygraph_mode():
        return _C_ops.final_state_argmin(x, axis, keepdim, flatten, var_dtype)
    if _in_legacy_dygraph():
W
wanghuancoder 已提交
269 270
        out = _C_ops.arg_min(x, 'axis', axis, 'dtype', var_dtype, 'keepdims',
                             keepdim, 'flatten', flatten)
W
wawltor 已提交
271 272 273 274 275 276
        return out

    helper = LayerHelper("argmin", **locals())
    check_variable_and_dtype(
        x, 'x', ['float32', 'float64', 'int16', 'int32', 'int64', 'uint8'],
        'paddle.argmin')
277
    check_dtype(var_dtype, 'dtype', ['int32', 'int64'], 'argmin')
W
wawltor 已提交
278
    out = helper.create_variable_for_type_inference(var_dtype)
279
    attrs = {}
W
wawltor 已提交
280
    attrs['keepdims'] = keepdim
281
    attrs['axis'] = axis
W
wawltor 已提交
282
    attrs['flatten'] = flatten
283
    attrs['dtype'] = var_dtype
284 285 286 287
    helper.append_op(type='arg_min',
                     inputs={'X': x},
                     outputs={'Out': [out]},
                     attrs=attrs)
288 289
    out.stop_gradient = True
    return out
290 291


292
def index_select(x, index, axis=0, name=None):
293
    """
S
swtkiwi 已提交
294

295 296 297 298
    Returns a new tensor which indexes the ``input`` tensor along dimension ``axis`` using 
    the entries in ``index`` which is a Tensor. The returned tensor has the same number 
    of dimensions as the original ``x`` tensor. The dim-th dimension has the same 
    size as the length of ``index``; other dimensions have the same size as in the ``x`` tensor. 
C
Chengmo 已提交
299

300
    Args:
301 302 303
        x (Tensor): The input Tensor to be operated. The data of ``x`` can be one of float32, float64, int32, int64.
        index (Tensor): The 1-D Tensor containing the indices to index. The data type of ``index`` must be int32 or int64.
        axis (int, optional): The dimension in which we index. Default: if None, the ``axis`` is 0.
304 305 306
        name(str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.
307 308

    Returns:
309
        Tensor: A Tensor with same data type as ``x``.
310
    
311 312
    Examples:
        .. code-block:: python
313
            
314 315
            import paddle

316 317 318 319
            x = paddle.to_tensor([[1.0, 2.0, 3.0, 4.0],
                                  [5.0, 6.0, 7.0, 8.0],
                                  [9.0, 10.0, 11.0, 12.0]])
            index = paddle.to_tensor([0, 1, 1], dtype='int32')
320 321 322 323 324 325 326 327
            out_z1 = paddle.index_select(x=x, index=index)
            #[[1. 2. 3. 4.]
            # [5. 6. 7. 8.]
            # [5. 6. 7. 8.]]
            out_z2 = paddle.index_select(x=x, index=index, axis=1)
            #[[ 1.  2.  2.]
            # [ 5.  6.  6.]
            # [ 9. 10. 10.]]
328
    """
329

F
From00 已提交
330 331 332 333
    if in_dygraph_mode():
        return _C_ops.final_state_index_select(x, index, axis)

    if _in_legacy_dygraph():
W
wanghuancoder 已提交
334
        return _C_ops.index_select(x, index, 'dim', axis)
335

336 337 338
    helper = LayerHelper("index_select", **locals())
    check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
                             'paddle.tensor.search.index_select')
339
    check_variable_and_dtype(index, 'index', ['int32', 'int64'],
340
                             'paddle.tensor.search.index_select')
341

342
    out = helper.create_variable_for_type_inference(x.dtype)
343

344 345 346 347 348 349 350
    helper.append_op(type='index_select',
                     inputs={
                         'X': x,
                         'Index': index
                     },
                     outputs={'Out': out},
                     attrs={'dim': axis})
351 352 353
    return out


354
def nonzero(x, as_tuple=False):
355 356 357 358 359 360 361 362
    """
    Return a tensor containing the indices of all non-zero elements of the `input` 
    tensor. If as_tuple is True, return a tuple of 1-D tensors, one for each dimension 
    in `input`, each containing the indices (in that dimension) of all non-zero elements 
    of `input`. Given a n-Dimensional `input` tensor with shape [x_1, x_2, ..., x_n], If 
    as_tuple is False, we can get a output tensor with shape [z, n], where `z` is the 
    number of all non-zero elements in the `input` tensor. If as_tuple is True, we can get 
    a 1-D tensor tuple of length `n`, and the shape of each 1-D tensor is [z, 1].
C
Chengmo 已提交
363

364
    Args:
365
        x (Tensor): The input tensor variable.
366 367 368
        as_tuple (bool): Return type, Tensor or tuple of Tensor.

    Returns:
369
        Tensor. The data type is int64.
370 371

    Examples:
372

N
Noel 已提交
373
        .. code-block:: python
李灿 已提交
374

375
            import paddle
376 377

            x1 = paddle.to_tensor([[1.0, 0.0, 0.0],
N
Noel 已提交
378 379
                                   [0.0, 2.0, 0.0],
                                   [0.0, 0.0, 3.0]])
380 381
            x2 = paddle.to_tensor([0.0, 1.0, 0.0, 3.0])
            out_z1 = paddle.nonzero(x1)
N
Noel 已提交
382
            print(out_z1)
383 384 385 386 387
            #[[0 0]
            # [1 1]
            # [2 2]]
            out_z1_tuple = paddle.nonzero(x1, as_tuple=True)
            for out in out_z1_tuple:
N
Noel 已提交
388
                print(out)
389 390 391 392 393 394 395
            #[[0]
            # [1]
            # [2]]
            #[[0]
            # [1]
            # [2]]
            out_z2 = paddle.nonzero(x2)
N
Noel 已提交
396
            print(out_z2)
397 398 399 400
            #[[1]
            # [3]]
            out_z2_tuple = paddle.nonzero(x2, as_tuple=True)
            for out in out_z2_tuple:
N
Noel 已提交
401
                print(out)
402 403
            #[[1]
            # [3]]
N
Noel 已提交
404

405 406
    """
    list_out = []
407
    shape = x.shape
408 409
    rank = len(shape)

410 411 412
    if in_dygraph_mode():
        outs = _C_ops.final_state_where_index(x)
    elif paddle.in_dynamic_mode():
W
wanghuancoder 已提交
413
        outs = _C_ops.where_index(x)
414
    else:
415 416 417 418 419
        helper = LayerHelper("where_index", **locals())

        outs = helper.create_variable_for_type_inference(
            dtype=core.VarDesc.VarType.INT64)

420 421 422
        helper.append_op(type='where_index',
                         inputs={'Condition': x},
                         outputs={'Out': [outs]})
423 424 425 426 427 428 429 430

    if not as_tuple:
        return outs
    elif rank == 1:
        return tuple([outs])
    else:
        for i in range(rank):
            list_out.append(
431
                paddle.slice(outs, axes=[1], starts=[i], ends=[i + 1]))
432 433 434
        return tuple(list_out)


435
def sort(x, axis=-1, descending=False, name=None):
436
    """
S
swtkiwi 已提交
437

W
wawltor 已提交
438
    This OP sorts the input along the given axis, and returns the sorted output tensor. The default sort algorithm is ascending, if you want the sort algorithm to be descending, you must set the :attr:`descending` as True.
C
Chengmo 已提交
439

440
    Args:
441
        x(Tensor): An input N-D Tensor with type float32, float64, int16,
442 443 444 445 446 447 448 449 450 451 452
            int32, int64, uint8.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is Rank(x). when axis<0, it works the same way
            as axis+R. Default is 0.
        descending(bool, optional) : Descending is a flag, if set to true,
            algorithm will sort by descending order, else sort by
            ascending order. Default is false.
        name(str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.
    Returns:
W
wawltor 已提交
453
        Tensor: sorted tensor(with the same shape and data type as ``x``).
454
    Examples:
N
Noel 已提交
455

456
        .. code-block:: python
N
Noel 已提交
457

458
            import paddle
N
Noel 已提交
459

460 461 462 463 464 465 466
            x = paddle.to_tensor([[[5,8,9,5],
                                   [0,0,1,7],
                                   [6,9,2,4]],
                                  [[5,2,4,2],
                                   [4,7,7,9],
                                   [1,7,0,6]]], 
                                 dtype='float32')
467 468 469
            out1 = paddle.sort(x=x, axis=-1)
            out2 = paddle.sort(x=x, axis=0)
            out3 = paddle.sort(x=x, axis=1)
N
Noel 已提交
470
            print(out1)
W
wawltor 已提交
471 472 473 474 475 476
            #[[[5. 5. 8. 9.]
            #  [0. 0. 1. 7.]
            #  [2. 4. 6. 9.]]
            # [[2. 2. 4. 5.]
            #  [4. 7. 7. 9.]
            #  [0. 1. 6. 7.]]]
N
Noel 已提交
477
            print(out2)
478
            #[[[5. 2. 4. 2.]
W
wawltor 已提交
479 480 481 482 483
            #  [0. 0. 1. 7.]
            #  [1. 7. 0. 4.]]
            # [[5. 8. 9. 5.]
            #  [4. 7. 7. 9.]
            #  [6. 9. 2. 6.]]]
N
Noel 已提交
484
            print(out3)
485
            #[[[0. 0. 1. 4.]
W
wawltor 已提交
486 487 488 489 490
            #  [5. 8. 2. 5.]
            #  [6. 9. 9. 7.]]
            # [[1. 2. 0. 2.]
            #  [4. 7. 4. 6.]
            #  [5. 7. 7. 9.]]]
491
    """
492 493 494 495 496 497 498
    if in_dygraph_mode():
        outs, _ = _C_ops.final_state_argsort(x, axis, descending)
        return outs

    if _in_legacy_dygraph():
        outs, _ = _C_ops.argsort(x, 'axis', axis, 'descending', descending)
        return outs
499
    helper = LayerHelper("sort", **locals())
500 501 502 503 504 505 506 507 508 509 510 511 512 513
    out = helper.create_variable_for_type_inference(dtype=x.dtype,
                                                    stop_gradient=False)
    ids = helper.create_variable_for_type_inference(VarDesc.VarType.INT64,
                                                    stop_gradient=True)
    helper.append_op(type='argsort',
                     inputs={'X': x},
                     outputs={
                         'Out': out,
                         'Indices': ids
                     },
                     attrs={
                         'axis': axis,
                         'descending': descending
                     })
W
wawltor 已提交
514
    return out
C
Chengmo 已提交
515 516


517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547
def mode(x, axis=-1, keepdim=False, name=None):
    """
    This OP is used to find values and indices of the modes at the optional axis.

    Args:
        x(Tensor): Tensor, an input N-D Tensor with type float32, float64, int32, int64.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is x.ndim. when axis < 0, it works the same way
            as axis + R. Default is -1.
        keepdim(bool, optional): Whether to keep the given axis in output. If it is True, the dimensions will be same as input x and with size one in the axis. Otherwise the output dimentions is one fewer than x since the axis is squeezed. Default is False.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        tuple(Tensor), return the values and indices. The value data type is the same as the input `x`. The indices data type is int64.

    Examples:

        .. code-block:: python

           import paddle
           
           tensor = paddle.to_tensor([[[1,2,2],[2,3,3]],[[0,5,5],[9,9,0]]], dtype=paddle.float32)
           res = paddle.mode(tensor, 2)
           print(res)
           # (Tensor(shape=[2, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
           #   [[2., 3.],
           #    [5., 9.]]), Tensor(shape=[2, 2], dtype=int64, place=CUDAPlace(0), stop_gradient=True,
           #   [[1, 1],
           #    [1, 0]]))
           
    """
548 549 550
    if in_dygraph_mode():
        return _C_ops.final_state_mode(x, axis, keepdim)
    if _in_legacy_dygraph():
551 552 553 554 555 556 557 558 559 560 561
        return _C_ops.mode(x, "axis", axis, "keepdim", keepdim)

    helper = LayerHelper("mode", **locals())
    inputs = {"X": [x]}
    attrs = {}
    attrs['axis'] = axis
    attrs['keepdim'] = keepdim

    values = helper.create_variable_for_type_inference(dtype=x.dtype)
    indices = helper.create_variable_for_type_inference(dtype="int64")

562 563 564 565 566 567 568
    helper.append_op(type="mode",
                     inputs=inputs,
                     outputs={
                         "Out": [values],
                         "Indices": [indices]
                     },
                     attrs=attrs)
569 570 571 572
    indices.stop_gradient = True
    return values, indices


R
ronnywang 已提交
573
def where(condition, x=None, y=None, name=None):
574
    r"""
575
    Return a Tensor of elements selected from either :attr:`x` or :attr:`y` according to corresponding elements of :attr:`condition`. Concretely,
R
ronnywang 已提交
576

577
    .. math::
C
Chengmo 已提交
578

579 580 581 582 583
        out_i =
        \begin{cases}
        x_i, & \text{if}  \ condition_i \  \text{is} \ True \\
        y_i, & \text{if}  \ condition_i \  \text{is} \ False \\
        \end{cases}.
C
Chengmo 已提交
584

585 586
    Notes:
        ``numpy.where(condition)`` is identical to ``paddle.nonzero(condition, as_tuple=True)``, please refer to :ref:`api_tensor_search_nonzero`.
587

588
    Args:
589 590 591 592
        condition (Tensor): The condition to choose x or y. When True (nonzero), yield x, otherwise yield y.
        x (Tensor|scalar, optional): A Tensor or scalar to choose when the condition is True with data type of float32, float64, int32 or int64. Either both or neither of x and y should be given.
        y (Tensor|scalar, optional): A Tensor or scalar to choose when the condition is False with data type of float32, float64, int32 or int64. Either both or neither of x and y should be given.
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
593

594
    Returns:
595
        Tensor: A Tensor with the same shape as :attr:`condition` and same data type as :attr:`x` and :attr:`y`.
596

597 598
    Examples:
        .. code-block:: python
599
            :name:where-example
600

601
            import paddle
602

603 604 605
            x = paddle.to_tensor([0.9383, 0.1983, 3.2, 1.2])
            y = paddle.to_tensor([1.0, 1.0, 1.0, 1.0])
            out = paddle.where(x>1, x, y)
606

607 608
            print(out)
            #out: [1.0, 1.0, 3.2, 1.2]
R
ronnywang 已提交
609

610 611 612 613 614
            out = paddle.where(x>1)
            print(out)
            #out: (Tensor(shape=[2, 1], dtype=int64, place=CPUPlace, stop_gradient=True,
            #            [[2],
            #             [3]]),)
615
    """
R
ronnywang 已提交
616
    if np.isscalar(x):
617
        x = paddle.full([1], x, np.array([x]).dtype.name)
R
ronnywang 已提交
618 619

    if np.isscalar(y):
620
        y = paddle.full([1], y, np.array([y]).dtype.name)
R
ronnywang 已提交
621

R
ronnywang 已提交
622 623 624 625 626 627
    if x is None and y is None:
        return nonzero(condition, as_tuple=True)

    if x is None or y is None:
        raise ValueError("either both or neither of x and y should be given")

Z
zhiboniu 已提交
628
    if not paddle.in_dynamic_mode():
629
        check_variable_and_dtype(condition, 'condition', ['bool'], 'where')
630 631 632 633 634 635
        check_variable_and_dtype(x, 'x',
                                 ['float32', 'float64', 'int32', 'int64'],
                                 'where')
        check_variable_and_dtype(y, 'y',
                                 ['float32', 'float64', 'int32', 'int64'],
                                 'where')
636

637
    condition_shape = list(condition.shape)
638 639
    x_shape = list(x.shape)
    y_shape = list(y.shape)
640

641
    if x_shape == y_shape and condition_shape == x_shape:
642 643 644 645 646
        broadcast_condition = condition
        broadcast_x = x
        broadcast_y = y
    else:
        if core.is_compiled_with_xpu():
Z
zhiboniu 已提交
647 648 649 650 651
            cond_int = paddle.cast(condition, x.dtype)
            cond_not_int = paddle.cast(logical_not(condition), x.dtype)
            out1 = paddle.multiply(x, cond_int)
            out2 = paddle.multiply(y, cond_not_int)
            out = paddle.add(out1, out2)
652
            return out
653

Z
zhiboniu 已提交
654 655 656 657 658 659 660 661 662 663 664 665 666
        zeros_like_x = paddle.zeros_like(x)
        zeros_like_y = paddle.zeros_like(y)
        zeros_like_condition = paddle.zeros_like(condition)
        zeros_like_condition = paddle.cast(zeros_like_condition, x.dtype)
        cast_cond = paddle.cast(condition, x.dtype)

        broadcast_zeros = paddle.add(zeros_like_x, zeros_like_y)
        broadcast_zeros = paddle.add(broadcast_zeros, zeros_like_condition)
        broadcast_x = paddle.add(x, broadcast_zeros)
        broadcast_y = paddle.add(y, broadcast_zeros)
        broadcast_condition = paddle.add(cast_cond, broadcast_zeros)
        broadcast_condition = paddle.cast(broadcast_condition, 'bool')

J
Jiabin Yang 已提交
667 668 669
    if in_dygraph_mode():
        return _C_ops.final_state_where(broadcast_condition, broadcast_x,
                                        broadcast_y)
670
    else:
J
Jiabin Yang 已提交
671 672 673 674 675 676
        if _in_legacy_dygraph():
            return _C_ops.where(broadcast_condition, broadcast_x, broadcast_y)
        else:
            helper = LayerHelper("where", **locals())
            out = helper.create_variable_for_type_inference(dtype=x.dtype)

677 678 679 680 681 682 683
            helper.append_op(type='where',
                             inputs={
                                 'Condition': broadcast_condition,
                                 'X': broadcast_x,
                                 'Y': broadcast_y
                             },
                             outputs={'Out': [out]})
684

J
Jiabin Yang 已提交
685
            return out
686 687


C
Chengmo 已提交
688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711
def index_sample(x, index):
    """
    **IndexSample Layer**

    IndexSample OP returns the element of the specified location of X, 
    and the location is specified by Index. 

    .. code-block:: text


                Given:

                X = [[1, 2, 3, 4, 5],
                     [6, 7, 8, 9, 10]]

                Index = [[0, 1, 3],
                         [0, 2, 4]]

                Then:

                Out = [[1, 2, 4],
                       [6, 8, 10]]

    Args:
C
Chengmo 已提交
712
        x (Tensor): The source input tensor with 2-D shape. Supported data type is 
C
Chengmo 已提交
713
            int32, int64, float32, float64.
C
Chengmo 已提交
714
        index (Tensor): The index input tensor with 2-D shape, first dimension should be same with X. 
C
Chengmo 已提交
715 716 717
            Data type is int32 or int64.

    Returns:
C
Chengmo 已提交
718
        output (Tensor): The output is a tensor with the same shape as index.
C
Chengmo 已提交
719 720 721 722 723 724

    Examples:

        .. code-block:: python

            import paddle
725 726 727 728 729 730 731 732 733 734 735

            x = paddle.to_tensor([[1.0, 2.0, 3.0, 4.0],
                                  [5.0, 6.0, 7.0, 8.0],
                                  [9.0, 10.0, 11.0, 12.0]], dtype='float32')
            index = paddle.to_tensor([[0, 1, 2],
                                      [1, 2, 3],
                                      [0, 0, 0]], dtype='int32')
            target = paddle.to_tensor([[100, 200, 300, 400],
                                       [500, 600, 700, 800],
                                       [900, 1000, 1100, 1200]], dtype='int32')
            out_z1 = paddle.index_sample(x, index)
N
Noel 已提交
736
            print(out_z1)
737 738 739 740 741 742 743 744
            #[[1. 2. 3.]
            # [6. 7. 8.]
            # [9. 9. 9.]]

            # Use the index of the maximum value by topk op
            # get the value of the element of the corresponding index in other tensors
            top_value, top_index = paddle.topk(x, k=2)
            out_z2 = paddle.index_sample(target, top_index)
N
Noel 已提交
745
            print(top_value)
746 747 748 749
            #[[ 4.  3.]
            # [ 8.  7.]
            # [12. 11.]]

N
Noel 已提交
750
            print(top_index)
751 752 753 754
            #[[3 2]
            # [3 2]
            # [3 2]]

N
Noel 已提交
755
            print(out_z2)
756 757 758
            #[[ 400  300]
            # [ 800  700]
            # [1200 1100]]
C
Chengmo 已提交
759

C
Chengmo 已提交
760
    """
J
Jiabin Yang 已提交
761 762 763 764 765 766 767 768 769 770 771 772 773 774
    if in_dygraph_mode():
        return _C_ops.final_state_index_sample(x, index)
    else:
        if _in_legacy_dygraph():
            return _C_ops.index_sample(x, index)
        else:
            helper = LayerHelper("index_sample", **locals())
            check_variable_and_dtype(x, 'x',
                                     ['float32', 'float64', 'int32', 'int64'],
                                     'paddle.tensor.search.index_sample')
            check_variable_and_dtype(index, 'index', ['int32', 'int64'],
                                     'paddle.tensor.search.index_sample')
            out = helper.create_variable_for_type_inference(dtype=x.dtype)

775 776 777 778 779 780
            helper.append_op(type='index_sample',
                             inputs={
                                 'X': x,
                                 'Index': index
                             },
                             outputs={'Out': out})
J
Jiabin Yang 已提交
781
            return out
782 783 784 785


def masked_select(x, mask, name=None):
    """
C
Chen Long 已提交
786
    Returns a new 1-D tensor which indexes the input tensor according to the ``mask``
787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802
    which is a tensor with data type of bool.

    Args:
        x (Tensor): The input Tensor, the data type can be int32, int64, float32, float64. 
        mask (Tensor): The Tensor containing the binary mask to index with, it's data type is bool.
        name(str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.

    Returns: A 1-D Tensor which is the same data type  as ``x``.
    
    Examples:

        .. code-block:: python

            import paddle
803 804 805 806 807 808 809

            x = paddle.to_tensor([[1.0, 2.0, 3.0, 4.0],
                                  [5.0, 6.0, 7.0, 8.0],
                                  [9.0, 10.0, 11.0, 12.0]])
            mask = paddle.to_tensor([[True, False, False, False],
                                     [True, True, False, False],
                                     [True, False, False, False]])
810 811 812 813
            out = paddle.masked_select(x, mask)
            #[1.0 5.0 6.0 9.0]
    """

H
hong 已提交
814 815 816 817
    if in_dygraph_mode():
        return _C_ops.final_state_masked_select(x, mask)

    if _in_legacy_dygraph():
W
wanghuancoder 已提交
818
        return _C_ops.masked_select(x, mask)
819 820 821 822 823 824 825

    helper = LayerHelper("masked_select", **locals())
    check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
                             'paddle.tensor.search.mask_select')
    check_variable_and_dtype(mask, 'mask', ['bool'],
                             'paddle.tensor.search.masked_select')
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
826 827 828 829 830 831
    helper.append_op(type='masked_select',
                     inputs={
                         'X': x,
                         'Mask': mask
                     },
                     outputs={'Y': out})
832
    return out
W
wawltor 已提交
833 834 835 836


def topk(x, k, axis=None, largest=True, sorted=True, name=None):
    """
837
    Return values and indices of the k largest or smallest at the optional axis.
W
wawltor 已提交
838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858
    If the input is a 1-D Tensor, finds the k largest or smallest values and indices.
    If the input is a Tensor with higher rank, this operator computes the top k values and indices along the :attr:`axis`.

    Args:
        x(Tensor): Tensor, an input N-D Tensor with type float32, float64, int32, int64.
        k(int, Tensor): The number of top elements to look for along the axis.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is x.ndim. when axis < 0, it works the same way
            as axis + R. Default is -1.
        largest(bool, optional) : largest is a flag, if set to true,
            algorithm will sort by descending order, otherwise sort by
            ascending order. Default is True.
        sorted(bool, optional): controls whether to return the elements in sorted order, default value is True. In gpu device, it always return the sorted value. 
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        tuple(Tensor), return the values and indices. The value data type is the same as the input `x`. The indices data type is int64.

    Examples:

        .. code-block:: python
859 860
          :name: code-example1
            import paddle
W
wawltor 已提交
861

862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878
            data_1 = paddle.to_tensor([1, 4, 5, 7])
            value_1, indices_1 = paddle.topk(data_1, k=1)
            print(value_1) # [7]
            print(indices_1) # [3]

            data_2 = paddle.to_tensor([[1, 4, 5, 7], [2, 6, 2, 5]])
            value_2, indices_2 = paddle.topk(data_2, k=1)
            print(value_2) # [[7], [6]]
            print(indices_2) # [[3], [1]]

            value_3, indices_3 = paddle.topk(data_2, k=1, axis=-1)
            print(value_3) # [[7], [6]]
            print(indices_3) # [[3], [1]]

            value_4, indices_4 = paddle.topk(data_2, k=1, axis=0)
            print(value_4) # [[2, 6, 5, 7]]
            print(indices_4) # [[1, 1, 0, 0]]
W
wawltor 已提交
879 880 881


    """
H
hong 已提交
882

H
hong 已提交
883 884 885 886 887 888
    if in_dygraph_mode():
        if axis == None:
            axis = -1
        out, indices = _C_ops.final_state_top_k(x, k, axis, largest, sorted)
        return out, indices

H
hong 已提交
889
    if _non_static_mode():
W
wawltor 已提交
890
        if axis is None:
891 892
            out, indices = _C_ops.top_k_v2(x, 'k', int(k), 'largest', largest,
                                           'sorted', sorted)
W
wawltor 已提交
893
        else:
894 895
            out, indices = _C_ops.top_k_v2(x, 'k', int(k), 'axis', axis,
                                           'largest', largest, 'sorted', sorted)
W
wawltor 已提交
896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912
        return out, indices

    helper = LayerHelper("top_k_v2", **locals())
    inputs = {"X": [x]}
    attrs = {}
    if isinstance(k, Variable):
        inputs['K'] = [k]
    else:
        attrs = {'k': k}
    attrs['largest'] = largest
    attrs['sorted'] = sorted
    if axis is not None:
        attrs['axis'] = axis

    values = helper.create_variable_for_type_inference(dtype=x.dtype)
    indices = helper.create_variable_for_type_inference(dtype="int64")

913 914 915 916 917 918 919
    helper.append_op(type="top_k_v2",
                     inputs=inputs,
                     outputs={
                         "Out": [values],
                         "Indices": [indices]
                     },
                     attrs=attrs)
W
wawltor 已提交
920 921
    indices.stop_gradient = True
    return values, indices
Y
Yanxing Shi 已提交
922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969


def searchsorted(sorted_sequence,
                 values,
                 out_int32=False,
                 right=False,
                 name=None):
    """
    This OP is used to find the index of the corresponding `sorted_sequence` in the innermost dimension based on the given `values`.

    Args:
        sorted_sequence(Tensor): An input N-D or 1-D tensor with type int32, int64, float32, float64. The value of the tensor monotonically increases in the innermost dimension. 
        values(Tensor): An input N-D tensor value with type int32, int64, float32, float64.
        out_int32(bool, optional): Data type of the output tensor which can be int32, int64. The default value is False, and it indicates that the output data type is int64.
        right(bool, optional): Find the upper or lower bounds of the sorted_sequence range in the innermost dimension based on the given `values`. If the value of the sorted_sequence is nan or inf, return the size of the innermost dimension.
                               The default value is False and it shows the lower bounds.  
        name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`.
        
    Returns:
        Tensor(the same sizes of the `values`), return the tensor of int32 if set :attr:`out_int32` is True, otherwise return the tensor of int64.  
    
    Examples:

        .. code-block:: python
    
            import paddle

            sorted_sequence = paddle.to_tensor([[1, 3, 5, 7, 9, 11],
                                                [2, 4, 6, 8, 10, 12]], dtype='int32')
            values = paddle.to_tensor([[3, 6, 9, 10], [3, 6, 9, 10]], dtype='int32')
            out1 = paddle.searchsorted(sorted_sequence, values)
            print(out1)
            # Tensor(shape=[2, 4], dtype=int64, place=CUDAPlace(0), stop_gradient=True,
            #        [[1, 3, 4, 5],
            #         [1, 2, 4, 4]])
            out2 = paddle.searchsorted(sorted_sequence, values, right=True)
            print(out2)
            # Tensor(shape=[2, 4], dtype=int64, place=CUDAPlace(0), stop_gradient=True,
            #        [[2, 3, 5, 5],
            #         [1, 3, 4, 5]])
            sorted_sequence_1d = paddle.to_tensor([1, 3, 5, 7, 9, 11, 13])
            out3 = paddle.searchsorted(sorted_sequence_1d, values)     
            print(out3)
            # Tensor(shape=[2, 4], dtype=int64, place=CUDAPlace(0), stop_gradient=True,
            #        [[1, 3, 4, 5],
            #         [1, 3, 4, 5]])
            
    """
F
From00 已提交
970 971 972
    if in_dygraph_mode():
        return _C_ops.final_state_searchsorted(sorted_sequence, values,
                                               out_int32, right)
Y
Yanxing Shi 已提交
973

F
From00 已提交
974
    if _in_legacy_dygraph():
Y
Yanxing Shi 已提交
975 976 977 978 979 980 981 982 983 984 985 986 987
        return _C_ops.searchsorted(sorted_sequence, values, "out_int32",
                                   out_int32, "right", right)

    check_variable_and_dtype(sorted_sequence, 'SortedSequence',
                             ['float32', 'float64', 'int32', 'int64'],
                             'paddle.searchsorted')
    check_variable_and_dtype(values, 'Values',
                             ['float32', 'float64', 'int32', 'int64'],
                             'paddle.searchsorted')

    helper = LayerHelper('searchsorted', **locals())
    out_type = 'int32' if out_int32 else 'int64'
    out = helper.create_variable_for_type_inference(dtype=out_type)
988 989 990 991 992 993 994 995 996 997
    helper.append_op(type='searchsorted',
                     inputs={
                         'SortedSequence': sorted_sequence,
                         "Values": values
                     },
                     outputs={'Out': out},
                     attrs={
                         "out_int32": out_int32,
                         "right": right
                     })
Y
Yanxing Shi 已提交
998 999

    return out
1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039


def kthvalue(x, k, axis=None, keepdim=False, name=None):
    """
    This OP is used to find values and indices of the k-th smallest at the axis.

    Args:
        x(Tensor): A N-D Tensor with type float32, float64, int32, int64.
        k(int): The k for the k-th smallest number to look for along the axis.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is x.ndim. when axis < 0, it works the same way
            as axis + R. The default is None. And if the axis is None, it will computed as -1 by default.
        keepdim(bool, optional): Whether to keep the given axis in output. If it is True, the dimensions will be same as input x and with size one in the axis. Otherwise the output dimentions is one fewer than x since the axis is squeezed. Default is False.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        tuple(Tensor), return the values and indices. The value data type is the same as the input `x`. The indices data type is int64.
   
    Examples:

        .. code-block:: python
    
            import paddle
            
            x = paddle.randn((2,3,2))
            # Tensor(shape=[2, 3, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
            #       [[[ 0.22954939, -0.01296274],
            #         [ 1.17135799, -0.34493217],
            #         [-0.19550551, -0.17573971]],
            #
            #        [[ 0.15104349, -0.93965352],
            #         [ 0.14745511,  0.98209465],
            #         [ 0.10732264, -0.55859774]]])           
            y = paddle.kthvalue(x, 2, 1)    
            # (Tensor(shape=[2, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
            # [[ 0.22954939, -0.17573971],
            #  [ 0.14745511, -0.55859774]]), Tensor(shape=[2, 2], dtype=int64, place=CUDAPlace(0), stop_gradient=True,
            #  [[0, 2],
            #  [1, 2]]))
    """
1040
    if _non_static_mode():
1041
        if axis is not None:
1042 1043 1044 1045
            if _in_legacy_dygraph():
                return _C_ops.kthvalue(x, 'k', k, "axis", axis, "keepdim",
                                       keepdim)
            return _C_ops.final_state_kthvalue(x, k, axis, keepdim)
1046
        else:
1047 1048 1049
            if _in_legacy_dygraph():
                return _C_ops.kthvalue(x, 'k', k, "keepdim", keepdim)
            return _C_ops.final_state_kthvalue(x, k, -1, keepdim)
1050 1051 1052 1053 1054 1055 1056 1057 1058

    helper = LayerHelper("kthvalue", **locals())
    inputs = {"X": [x]}
    attrs = {'k': k}
    if axis is not None:
        attrs['axis'] = axis
    values = helper.create_variable_for_type_inference(dtype=x.dtype)
    indices = helper.create_variable_for_type_inference(dtype="int64")

1059 1060 1061 1062 1063 1064 1065
    helper.append_op(type="kthvalue",
                     inputs=inputs,
                     outputs={
                         "Out": [values],
                         "Indices": [indices]
                     },
                     attrs=attrs)
1066 1067
    indices.stop_gradient = True
    return values, indices