search.py 43.0 KB
Newer Older
1
#   Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14
import numpy as np
Z
zhiboniu 已提交
15
import paddle
16
from ..framework import LayerHelper, convert_np_dtype_to_dtype_
17
from ..fluid.data_feeder import check_dtype, check_variable_and_dtype
18 19
from ..framework import core, in_dygraph_mode, _non_static_mode
from ..fluid.framework import _in_legacy_dygraph
20 21
from paddle.common_ops_import import Variable
from paddle.common_ops_import import VarDesc
22
from paddle import _C_ops, _legacy_C_ops
23

24
# TODO: define searching & indexing functions of a tensor
25 26
# from ..fluid.layers import has_inf  #DEFINE_ALIAS
# from ..fluid.layers import has_nan  #DEFINE_ALIAS
27

28 29
__all__ = []

30

31 32
def argsort(x, axis=-1, descending=False, name=None):
    """
33
    Sorts the input along the given axis, and returns the corresponding index tensor for the sorted output values. The default sort algorithm is ascending, if you want the sort algorithm to be descending, you must set the :attr:`descending` as True.
34 35 36 37 38 39

    Args:
        x(Tensor): An input N-D Tensor with type float32, float64, int16,
            int32, int64, uint8.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is Rank(x). when axis<0, it works the same way
C
Chen Long 已提交
40
            as axis+R. Default is -1.
41 42 43
        descending(bool, optional) : Descending is a flag, if set to true,
            algorithm will sort by descending order, else sort by
            ascending order. Default is false.
44
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
45 46 47 48 49 50

    Returns:
        Tensor: sorted indices(with the same shape as ``x``
        and with data type int64).

    Examples:
李灿 已提交
51

52
        .. code-block:: python
李灿 已提交
53

54
            import paddle
55

56 57 58 59 60
            x = paddle.to_tensor([[[5,8,9,5],
                                   [0,0,1,7],
                                   [6,9,2,4]],
                                  [[5,2,4,2],
                                   [4,7,7,9],
61
                                   [1,7,0,6]]],
62
                                dtype='float32')
C
Chen Long 已提交
63 64 65
            out1 = paddle.argsort(x, axis=-1)
            out2 = paddle.argsort(x, axis=0)
            out3 = paddle.argsort(x, axis=1)
66

N
Noel 已提交
67
            print(out1)
W
wawltor 已提交
68 69 70
            #[[[0 3 1 2]
            #  [0 1 2 3]
            #  [2 3 0 1]]
71
            # [[1 3 2 0]
W
wawltor 已提交
72 73
            #  [0 1 2 3]
            #  [2 0 3 1]]]
74

N
Noel 已提交
75
            print(out2)
W
wawltor 已提交
76 77 78 79 80 81
            #[[[0 1 1 1]
            #  [0 0 0 0]
            #  [1 1 1 0]]
            # [[1 0 0 0]
            #  [1 1 1 1]
            #  [0 0 0 1]]]
82

N
Noel 已提交
83
            print(out3)
W
wawltor 已提交
84 85 86 87 88 89
            #[[[1 1 1 2]
            #  [0 0 2 0]
            #  [2 2 0 1]]
            # [[2 0 2 0]
            #  [1 1 0 2]
            #  [0 2 1 1]]]
90
    """
H
hong 已提交
91
    if in_dygraph_mode():
92
        _, ids = _C_ops.argsort(x, axis, descending)
H
hong 已提交
93 94 95
        return ids

    if _in_legacy_dygraph():
96 97
        _, ids = _legacy_C_ops.argsort(x, 'axis', axis, 'descending',
                                       descending)
98 99 100 101 102 103
        return ids
    check_variable_and_dtype(
        x, 'x', ['float32', 'float64', 'int16', 'int32', 'int64', 'uint8'],
        'argsort')

    helper = LayerHelper("argsort", **locals())
104 105 106 107 108 109 110 111 112 113 114 115 116 117
    out = helper.create_variable_for_type_inference(dtype=x.dtype,
                                                    stop_gradient=True)
    ids = helper.create_variable_for_type_inference(VarDesc.VarType.INT64,
                                                    stop_gradient=True)
    helper.append_op(type='argsort',
                     inputs={'X': x},
                     outputs={
                         'Out': out,
                         'Indices': ids
                     },
                     attrs={
                         'axis': axis,
                         'descending': descending
                     })
118 119 120
    return ids


121
def argmax(x, axis=None, keepdim=False, dtype="int64", name=None):
122
    """
123
    Computes the indices of the max elements of the input tensor's
124 125 126
    element along the provided axis.

    Args:
W
wawltor 已提交
127
        x(Tensor): An input N-D Tensor with type float32, float64, int16,
128 129
            int32, int64, uint8.
        axis(int, optional): Axis to compute indices along. The effective range
W
wawltor 已提交
130 131
            is [-R, R), where R is x.ndim. when axis < 0, it works the same way
            as axis + R. Default is None, the input `x` will be into the flatten tensor, and selecting the min value index.
132
        keepdim(bool, optional): Whether to keep the given axis in output. If it is True, the dimensions will be same as input x and with size one in the axis. Otherwise the output dimentions is one fewer than x since the axis is squeezed. Default is False.
133
        dtype(str|np.dtype, optional): Data type of the output tensor which can
134
                    be int32, int64. The default value is ``int64`` , and it will
135
                    return the int64 indices.
136
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
137 138

    Returns:
139
        Tensor, return the tensor of int32 if set :attr:`dtype` is int32, otherwise return the tensor of int64.
140 141 142 143

    Examples:
        .. code-block:: python

W
wawltor 已提交
144
            import paddle
145

146 147 148
            x = paddle.to_tensor([[5,8,9,5],
                                 [0,0,1,7],
                                 [6,9,2,4]])
W
wawltor 已提交
149
            out1 = paddle.argmax(x)
N
Noel 已提交
150
            print(out1) # 2
151
            out2 = paddle.argmax(x, axis=0)
152
            print(out2)
153
            # [2, 2, 0, 1]
W
wawltor 已提交
154
            out3 = paddle.argmax(x, axis=-1)
155
            print(out3)
156 157 158 159
            # [2, 3, 1]
            out4 = paddle.argmax(x, axis=0, keepdim=True)
            print(out4)
            # [[2, 2, 0, 1]]
160
    """
161
    if axis is not None and not isinstance(axis, (int, Variable)):
162
        raise TypeError(
163
            "The type of 'axis'  must be int or Tensor or None in argmax, but received %s."
164
            % (type(axis)))
165

166 167 168 169
    if dtype is None:
        raise ValueError(
            "the value of 'dtype' in argmax could not be None, but received None"
        )
170

171
    var_dtype = convert_np_dtype_to_dtype_(dtype)
W
wawltor 已提交
172 173 174 175 176
    flatten = False
    if axis is None:
        flatten = True
        axis = 0

H
hong 已提交
177
    if in_dygraph_mode():
178
        return _C_ops.argmax(x, axis, keepdim, flatten, var_dtype)
H
hong 已提交
179
    if _in_legacy_dygraph():
180 181
        out = _legacy_C_ops.arg_max(x, 'axis', axis, 'dtype', var_dtype,
                                    'keepdims', keepdim, 'flatten', flatten)
W
wawltor 已提交
182 183 184 185 186 187
        return out

    helper = LayerHelper("argmax", **locals())
    check_variable_and_dtype(
        x, 'x', ['float32', 'float64', 'int16', 'int32', 'int64', 'uint8'],
        'paddle.argmax')
188
    check_dtype(var_dtype, 'dtype', ['int32', 'int64'], 'argmin')
189
    attrs = {}
W
wawltor 已提交
190 191 192 193
    out = helper.create_variable_for_type_inference(var_dtype)
    attrs['keepdims'] = keepdim
    attrs['axis'] = axis
    attrs['flatten'] = flatten
194
    attrs['dtype'] = var_dtype
195 196 197 198
    helper.append_op(type='arg_max',
                     inputs={'X': x},
                     outputs={'Out': [out]},
                     attrs=attrs)
W
wawltor 已提交
199 200 201 202
    out.stop_gradient = True
    return out


203
def argmin(x, axis=None, keepdim=False, dtype="int64", name=None):
W
wawltor 已提交
204
    """
205
    Computes the indices of the min elements of the input tensor's
W
wawltor 已提交
206 207 208 209 210 211 212 213
    element along the provided axis.

    Args:
        x(Tensor): An input N-D Tensor with type float32, float64, int16,
            int32, int64, uint8.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is x.ndim. when axis < 0, it works the same way
            as axis + R. Default is None, the input `x` will be into the flatten tensor, and selecting the min value index.
214
        keepdim(bool, optional): Whether to keep the given axis in output. If it is True, the dimensions will be same as input x and with size one in the axis. Otherwise the output dimentions is one fewer than x since the axis is squeezed. Default is False.
215
        dtype(str, optional): Data type of the output tensor which can
216
                    be int32, int64. The default value is 'int64', and it will
W
wawltor 已提交
217
                    return the int64 indices.
218
        name(str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
219

W
wawltor 已提交
220
    Returns:
221
        Tensor, return the tensor of `int32` if set :attr:`dtype` is `int32`, otherwise return the tensor of `int64`.
W
wawltor 已提交
222 223 224

    Examples:
        .. code-block:: python
225

W
wawltor 已提交
226 227
            import paddle

228 229 230
            x =  paddle.to_tensor([[5,8,9,5],
                                     [0,0,1,7],
                                     [6,9,2,4]])
W
wawltor 已提交
231
            out1 = paddle.argmin(x)
N
Noel 已提交
232
            print(out1) # 4
233
            out2 = paddle.argmin(x, axis=0)
234
            print(out2)
235
            # [1, 1, 1, 2]
W
wawltor 已提交
236
            out3 = paddle.argmin(x, axis=-1)
237
            print(out3)
238 239 240 241
            # [0, 0, 2]
            out4 = paddle.argmin(x, axis=0, keepdim=True)
            print(out4)
            # [[1, 1, 1, 2]]
W
wawltor 已提交
242
    """
243
    if axis is not None and not isinstance(axis, (int, Variable)):
244
        raise TypeError(
245
            "The type of 'axis'  must be int or Tensor or None in argmin, but received %s."
246
            % (type(axis)))
247

248 249 250 251
    if dtype is None:
        raise ValueError(
            "the value of 'dtype' in argmin could not be None, but received None"
        )
252

253
    var_dtype = convert_np_dtype_to_dtype_(dtype)
W
wawltor 已提交
254
    flatten = False
255
    if axis is None:
W
wawltor 已提交
256 257 258
        flatten = True
        axis = 0

H
hong 已提交
259
    if in_dygraph_mode():
260
        return _C_ops.argmin(x, axis, keepdim, flatten, var_dtype)
H
hong 已提交
261
    if _in_legacy_dygraph():
262 263
        out = _legacy_C_ops.arg_min(x, 'axis', axis, 'dtype', var_dtype,
                                    'keepdims', keepdim, 'flatten', flatten)
W
wawltor 已提交
264 265 266 267 268 269
        return out

    helper = LayerHelper("argmin", **locals())
    check_variable_and_dtype(
        x, 'x', ['float32', 'float64', 'int16', 'int32', 'int64', 'uint8'],
        'paddle.argmin')
270
    check_dtype(var_dtype, 'dtype', ['int32', 'int64'], 'argmin')
W
wawltor 已提交
271
    out = helper.create_variable_for_type_inference(var_dtype)
272
    attrs = {}
W
wawltor 已提交
273
    attrs['keepdims'] = keepdim
274
    attrs['axis'] = axis
W
wawltor 已提交
275
    attrs['flatten'] = flatten
276
    attrs['dtype'] = var_dtype
277 278 279 280
    helper.append_op(type='arg_min',
                     inputs={'X': x},
                     outputs={'Out': [out]},
                     attrs=attrs)
281 282
    out.stop_gradient = True
    return out
283 284


285
def index_select(x, index, axis=0, name=None):
286
    """
S
swtkiwi 已提交
287

288 289 290 291
    Returns a new tensor which indexes the ``input`` tensor along dimension ``axis`` using
    the entries in ``index`` which is a Tensor. The returned tensor has the same number
    of dimensions as the original ``x`` tensor. The dim-th dimension has the same
    size as the length of ``index``; other dimensions have the same size as in the ``x`` tensor.
C
Chengmo 已提交
292

293
    Args:
294 295 296
        x (Tensor): The input Tensor to be operated. The data of ``x`` can be one of float32, float64, int32, int64.
        index (Tensor): The 1-D Tensor containing the indices to index. The data type of ``index`` must be int32 or int64.
        axis (int, optional): The dimension in which we index. Default: if None, the ``axis`` is 0.
297
        name(str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
298 299

    Returns:
300
        Tensor: A Tensor with same data type as ``x``.
301

302 303
    Examples:
        .. code-block:: python
304

305 306
            import paddle

307 308 309 310
            x = paddle.to_tensor([[1.0, 2.0, 3.0, 4.0],
                                  [5.0, 6.0, 7.0, 8.0],
                                  [9.0, 10.0, 11.0, 12.0]])
            index = paddle.to_tensor([0, 1, 1], dtype='int32')
311 312 313 314 315 316 317 318
            out_z1 = paddle.index_select(x=x, index=index)
            #[[1. 2. 3. 4.]
            # [5. 6. 7. 8.]
            # [5. 6. 7. 8.]]
            out_z2 = paddle.index_select(x=x, index=index, axis=1)
            #[[ 1.  2.  2.]
            # [ 5.  6.  6.]
            # [ 9. 10. 10.]]
319
    """
320

F
From00 已提交
321
    if in_dygraph_mode():
322
        return _C_ops.index_select(x, index, axis)
F
From00 已提交
323 324

    if _in_legacy_dygraph():
325
        return _legacy_C_ops.index_select(x, index, 'dim', axis)
326

327 328 329
    helper = LayerHelper("index_select", **locals())
    check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
                             'paddle.tensor.search.index_select')
330
    check_variable_and_dtype(index, 'index', ['int32', 'int64'],
331
                             'paddle.tensor.search.index_select')
332

333
    out = helper.create_variable_for_type_inference(x.dtype)
334

335 336 337 338 339 340 341
    helper.append_op(type='index_select',
                     inputs={
                         'X': x,
                         'Index': index
                     },
                     outputs={'Out': out},
                     attrs={'dim': axis})
342 343 344
    return out


345
def nonzero(x, as_tuple=False):
346
    """
347 348 349 350 351 352
    Return a tensor containing the indices of all non-zero elements of the `input`
    tensor. If as_tuple is True, return a tuple of 1-D tensors, one for each dimension
    in `input`, each containing the indices (in that dimension) of all non-zero elements
    of `input`. Given a n-Dimensional `input` tensor with shape [x_1, x_2, ..., x_n], If
    as_tuple is False, we can get a output tensor with shape [z, n], where `z` is the
    number of all non-zero elements in the `input` tensor. If as_tuple is True, we can get
353
    a 1-D tensor tuple of length `n`, and the shape of each 1-D tensor is [z, 1].
C
Chengmo 已提交
354

355
    Args:
356
        x (Tensor): The input tensor variable.
357 358 359
        as_tuple (bool): Return type, Tensor or tuple of Tensor.

    Returns:
360
        Tensor. The data type is int64.
361 362

    Examples:
363

N
Noel 已提交
364
        .. code-block:: python
李灿 已提交
365

366
            import paddle
367 368

            x1 = paddle.to_tensor([[1.0, 0.0, 0.0],
N
Noel 已提交
369 370
                                   [0.0, 2.0, 0.0],
                                   [0.0, 0.0, 3.0]])
371 372
            x2 = paddle.to_tensor([0.0, 1.0, 0.0, 3.0])
            out_z1 = paddle.nonzero(x1)
N
Noel 已提交
373
            print(out_z1)
374 375 376 377 378
            #[[0 0]
            # [1 1]
            # [2 2]]
            out_z1_tuple = paddle.nonzero(x1, as_tuple=True)
            for out in out_z1_tuple:
N
Noel 已提交
379
                print(out)
380 381 382 383 384 385 386
            #[[0]
            # [1]
            # [2]]
            #[[0]
            # [1]
            # [2]]
            out_z2 = paddle.nonzero(x2)
N
Noel 已提交
387
            print(out_z2)
388 389 390 391
            #[[1]
            # [3]]
            out_z2_tuple = paddle.nonzero(x2, as_tuple=True)
            for out in out_z2_tuple:
N
Noel 已提交
392
                print(out)
393 394
            #[[1]
            # [3]]
N
Noel 已提交
395

396 397
    """
    list_out = []
398
    shape = x.shape
399 400
    rank = len(shape)

401
    if in_dygraph_mode():
W
wanghuancoder 已提交
402
        outs = _C_ops.where_index(x)
403 404
    elif paddle.in_dynamic_mode():
        outs = _legacy_C_ops.where_index(x)
405
    else:
406 407 408 409 410
        helper = LayerHelper("where_index", **locals())

        outs = helper.create_variable_for_type_inference(
            dtype=core.VarDesc.VarType.INT64)

411 412 413
        helper.append_op(type='where_index',
                         inputs={'Condition': x},
                         outputs={'Out': [outs]})
414 415 416 417 418 419 420 421

    if not as_tuple:
        return outs
    elif rank == 1:
        return tuple([outs])
    else:
        for i in range(rank):
            list_out.append(
422
                paddle.slice(outs, axes=[1], starts=[i], ends=[i + 1]))
423 424 425
        return tuple(list_out)


426
def sort(x, axis=-1, descending=False, name=None):
427
    """
S
swtkiwi 已提交
428

429
    Sorts the input along the given axis, and returns the sorted output tensor. The default sort algorithm is ascending, if you want the sort algorithm to be descending, you must set the :attr:`descending` as True.
C
Chengmo 已提交
430

431
    Args:
432
        x(Tensor): An input N-D Tensor with type float32, float64, int16,
433 434 435
            int32, int64, uint8.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is Rank(x). when axis<0, it works the same way
436
            as axis+R. Default is -1.
437 438 439
        descending(bool, optional) : Descending is a flag, if set to true,
            algorithm will sort by descending order, else sort by
            ascending order. Default is false.
440
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
441

442
    Returns:
W
wawltor 已提交
443
        Tensor: sorted tensor(with the same shape and data type as ``x``).
444
    Examples:
N
Noel 已提交
445

446
        .. code-block:: python
N
Noel 已提交
447

448
            import paddle
N
Noel 已提交
449

450 451 452 453 454
            x = paddle.to_tensor([[[5,8,9,5],
                                   [0,0,1,7],
                                   [6,9,2,4]],
                                  [[5,2,4,2],
                                   [4,7,7,9],
455
                                   [1,7,0,6]]],
456
                                 dtype='float32')
457 458 459
            out1 = paddle.sort(x=x, axis=-1)
            out2 = paddle.sort(x=x, axis=0)
            out3 = paddle.sort(x=x, axis=1)
N
Noel 已提交
460
            print(out1)
W
wawltor 已提交
461 462 463 464 465 466
            #[[[5. 5. 8. 9.]
            #  [0. 0. 1. 7.]
            #  [2. 4. 6. 9.]]
            # [[2. 2. 4. 5.]
            #  [4. 7. 7. 9.]
            #  [0. 1. 6. 7.]]]
N
Noel 已提交
467
            print(out2)
468
            #[[[5. 2. 4. 2.]
W
wawltor 已提交
469 470 471 472 473
            #  [0. 0. 1. 7.]
            #  [1. 7. 0. 4.]]
            # [[5. 8. 9. 5.]
            #  [4. 7. 7. 9.]
            #  [6. 9. 2. 6.]]]
N
Noel 已提交
474
            print(out3)
475
            #[[[0. 0. 1. 4.]
W
wawltor 已提交
476 477 478 479 480
            #  [5. 8. 2. 5.]
            #  [6. 9. 9. 7.]]
            # [[1. 2. 0. 2.]
            #  [4. 7. 4. 6.]
            #  [5. 7. 7. 9.]]]
481
    """
482
    if in_dygraph_mode():
483
        outs, _ = _C_ops.argsort(x, axis, descending)
484 485 486
        return outs

    if _in_legacy_dygraph():
487 488
        outs, _ = _legacy_C_ops.argsort(x, 'axis', axis, 'descending',
                                        descending)
489
        return outs
490
    helper = LayerHelper("sort", **locals())
491 492 493 494 495 496 497 498 499 500 501 502 503 504
    out = helper.create_variable_for_type_inference(dtype=x.dtype,
                                                    stop_gradient=False)
    ids = helper.create_variable_for_type_inference(VarDesc.VarType.INT64,
                                                    stop_gradient=True)
    helper.append_op(type='argsort',
                     inputs={'X': x},
                     outputs={
                         'Out': out,
                         'Indices': ids
                     },
                     attrs={
                         'axis': axis,
                         'descending': descending
                     })
W
wawltor 已提交
505
    return out
C
Chengmo 已提交
506 507


508 509
def mode(x, axis=-1, keepdim=False, name=None):
    """
510
    Used to find values and indices of the modes at the optional axis.
511 512 513 514 515 516 517

    Args:
        x(Tensor): Tensor, an input N-D Tensor with type float32, float64, int32, int64.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is x.ndim. when axis < 0, it works the same way
            as axis + R. Default is -1.
        keepdim(bool, optional): Whether to keep the given axis in output. If it is True, the dimensions will be same as input x and with size one in the axis. Otherwise the output dimentions is one fewer than x since the axis is squeezed. Default is False.
518
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
519 520 521 522 523 524 525 526 527

    Returns:
        tuple(Tensor), return the values and indices. The value data type is the same as the input `x`. The indices data type is int64.

    Examples:

        .. code-block:: python

           import paddle
528

529 530 531 532 533 534 535 536
           tensor = paddle.to_tensor([[[1,2,2],[2,3,3]],[[0,5,5],[9,9,0]]], dtype=paddle.float32)
           res = paddle.mode(tensor, 2)
           print(res)
           # (Tensor(shape=[2, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
           #   [[2., 3.],
           #    [5., 9.]]), Tensor(shape=[2, 2], dtype=int64, place=CUDAPlace(0), stop_gradient=True,
           #   [[1, 1],
           #    [1, 0]]))
537

538
    """
539
    if in_dygraph_mode():
540
        return _C_ops.mode(x, axis, keepdim)
541
    if _in_legacy_dygraph():
542
        return _legacy_C_ops.mode(x, "axis", axis, "keepdim", keepdim)
543 544 545 546 547 548 549 550 551 552

    helper = LayerHelper("mode", **locals())
    inputs = {"X": [x]}
    attrs = {}
    attrs['axis'] = axis
    attrs['keepdim'] = keepdim

    values = helper.create_variable_for_type_inference(dtype=x.dtype)
    indices = helper.create_variable_for_type_inference(dtype="int64")

553 554 555 556 557 558 559
    helper.append_op(type="mode",
                     inputs=inputs,
                     outputs={
                         "Out": [values],
                         "Indices": [indices]
                     },
                     attrs=attrs)
560 561 562 563
    indices.stop_gradient = True
    return values, indices


R
ronnywang 已提交
564
def where(condition, x=None, y=None, name=None):
565
    r"""
566
    Return a Tensor of elements selected from either :attr:`x` or :attr:`y` according to corresponding elements of :attr:`condition`. Concretely,
R
ronnywang 已提交
567

568
    .. math::
C
Chengmo 已提交
569

570 571 572 573 574
        out_i =
        \begin{cases}
        x_i, & \text{if}  \ condition_i \  \text{is} \ True \\
        y_i, & \text{if}  \ condition_i \  \text{is} \ False \\
        \end{cases}.
C
Chengmo 已提交
575

576 577
    Notes:
        ``numpy.where(condition)`` is identical to ``paddle.nonzero(condition, as_tuple=True)``, please refer to :ref:`api_tensor_search_nonzero`.
578

579
    Args:
580 581 582 583
        condition (Tensor): The condition to choose x or y. When True (nonzero), yield x, otherwise yield y.
        x (Tensor|scalar, optional): A Tensor or scalar to choose when the condition is True with data type of float32, float64, int32 or int64. Either both or neither of x and y should be given.
        y (Tensor|scalar, optional): A Tensor or scalar to choose when the condition is False with data type of float32, float64, int32 or int64. Either both or neither of x and y should be given.
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
584

585
    Returns:
586
        Tensor: A Tensor with the same shape as :attr:`condition` and same data type as :attr:`x` and :attr:`y`.
587

588
    Examples:
589

590 591
        .. code-block:: python

592
            import paddle
593

594 595
            x = paddle.to_tensor([0.9383, 0.1983, 3.2, 1.2])
            y = paddle.to_tensor([1.0, 1.0, 1.0, 1.0])
596

597 598 599
            out = paddle.where(x>1, x, y)
            print(out)
            #out: [1.0, 1.0, 3.2, 1.2]
600

601 602 603 604 605
            out = paddle.where(x>1)
            print(out)
            #out: (Tensor(shape=[2, 1], dtype=int64, place=CPUPlace, stop_gradient=True,
            #            [[2],
            #             [3]]),)
606
    """
R
ronnywang 已提交
607
    if np.isscalar(x):
608
        x = paddle.full([1], x, np.array([x]).dtype.name)
R
ronnywang 已提交
609 610

    if np.isscalar(y):
611
        y = paddle.full([1], y, np.array([y]).dtype.name)
R
ronnywang 已提交
612

R
ronnywang 已提交
613 614 615 616 617 618
    if x is None and y is None:
        return nonzero(condition, as_tuple=True)

    if x is None or y is None:
        raise ValueError("either both or neither of x and y should be given")

Z
zhiboniu 已提交
619
    if not paddle.in_dynamic_mode():
620
        check_variable_and_dtype(condition, 'condition', ['bool'], 'where')
621 622 623 624 625 626
        check_variable_and_dtype(x, 'x',
                                 ['float32', 'float64', 'int32', 'int64'],
                                 'where')
        check_variable_and_dtype(y, 'y',
                                 ['float32', 'float64', 'int32', 'int64'],
                                 'where')
627

628
    condition_shape = list(condition.shape)
629 630
    x_shape = list(x.shape)
    y_shape = list(y.shape)
631

632
    if x_shape == y_shape and condition_shape == x_shape:
633 634 635 636
        broadcast_condition = condition
        broadcast_x = x
        broadcast_y = y
    else:
Z
zhiboniu 已提交
637 638 639 640 641 642 643 644 645 646 647 648 649
        zeros_like_x = paddle.zeros_like(x)
        zeros_like_y = paddle.zeros_like(y)
        zeros_like_condition = paddle.zeros_like(condition)
        zeros_like_condition = paddle.cast(zeros_like_condition, x.dtype)
        cast_cond = paddle.cast(condition, x.dtype)

        broadcast_zeros = paddle.add(zeros_like_x, zeros_like_y)
        broadcast_zeros = paddle.add(broadcast_zeros, zeros_like_condition)
        broadcast_x = paddle.add(x, broadcast_zeros)
        broadcast_y = paddle.add(y, broadcast_zeros)
        broadcast_condition = paddle.add(cast_cond, broadcast_zeros)
        broadcast_condition = paddle.cast(broadcast_condition, 'bool')

J
Jiabin Yang 已提交
650
    if in_dygraph_mode():
651
        return _C_ops.where(broadcast_condition, broadcast_x, broadcast_y)
652
    else:
J
Jiabin Yang 已提交
653
        if _in_legacy_dygraph():
654 655
            return _legacy_C_ops.where(broadcast_condition, broadcast_x,
                                       broadcast_y)
J
Jiabin Yang 已提交
656 657 658 659
        else:
            helper = LayerHelper("where", **locals())
            out = helper.create_variable_for_type_inference(dtype=x.dtype)

660 661 662 663 664 665 666
            helper.append_op(type='where',
                             inputs={
                                 'Condition': broadcast_condition,
                                 'X': broadcast_x,
                                 'Y': broadcast_y
                             },
                             outputs={'Out': [out]})
667

J
Jiabin Yang 已提交
668
            return out
669 670


C
Chengmo 已提交
671 672 673 674
def index_sample(x, index):
    """
    **IndexSample Layer**

675 676
    IndexSample OP returns the element of the specified location of X,
    and the location is specified by Index.
C
Chengmo 已提交
677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694

    .. code-block:: text


                Given:

                X = [[1, 2, 3, 4, 5],
                     [6, 7, 8, 9, 10]]

                Index = [[0, 1, 3],
                         [0, 2, 4]]

                Then:

                Out = [[1, 2, 4],
                       [6, 8, 10]]

    Args:
695
        x (Tensor): The source input tensor with 2-D shape. Supported data type is
C
Chengmo 已提交
696
            int32, int64, float32, float64.
697
        index (Tensor): The index input tensor with 2-D shape, first dimension should be same with X.
C
Chengmo 已提交
698 699 700
            Data type is int32 or int64.

    Returns:
C
Chengmo 已提交
701
        output (Tensor): The output is a tensor with the same shape as index.
C
Chengmo 已提交
702 703 704 705 706 707

    Examples:

        .. code-block:: python

            import paddle
708 709 710 711 712 713 714 715 716 717 718

            x = paddle.to_tensor([[1.0, 2.0, 3.0, 4.0],
                                  [5.0, 6.0, 7.0, 8.0],
                                  [9.0, 10.0, 11.0, 12.0]], dtype='float32')
            index = paddle.to_tensor([[0, 1, 2],
                                      [1, 2, 3],
                                      [0, 0, 0]], dtype='int32')
            target = paddle.to_tensor([[100, 200, 300, 400],
                                       [500, 600, 700, 800],
                                       [900, 1000, 1100, 1200]], dtype='int32')
            out_z1 = paddle.index_sample(x, index)
N
Noel 已提交
719
            print(out_z1)
720 721 722 723 724 725 726 727
            #[[1. 2. 3.]
            # [6. 7. 8.]
            # [9. 9. 9.]]

            # Use the index of the maximum value by topk op
            # get the value of the element of the corresponding index in other tensors
            top_value, top_index = paddle.topk(x, k=2)
            out_z2 = paddle.index_sample(target, top_index)
N
Noel 已提交
728
            print(top_value)
729 730 731 732
            #[[ 4.  3.]
            # [ 8.  7.]
            # [12. 11.]]

N
Noel 已提交
733
            print(top_index)
734 735 736 737
            #[[3 2]
            # [3 2]
            # [3 2]]

N
Noel 已提交
738
            print(out_z2)
739 740 741
            #[[ 400  300]
            # [ 800  700]
            # [1200 1100]]
C
Chengmo 已提交
742

C
Chengmo 已提交
743
    """
J
Jiabin Yang 已提交
744
    if in_dygraph_mode():
745
        return _C_ops.index_sample(x, index)
J
Jiabin Yang 已提交
746 747
    else:
        if _in_legacy_dygraph():
748
            return _legacy_C_ops.index_sample(x, index)
J
Jiabin Yang 已提交
749 750 751 752 753 754 755 756 757
        else:
            helper = LayerHelper("index_sample", **locals())
            check_variable_and_dtype(x, 'x',
                                     ['float32', 'float64', 'int32', 'int64'],
                                     'paddle.tensor.search.index_sample')
            check_variable_and_dtype(index, 'index', ['int32', 'int64'],
                                     'paddle.tensor.search.index_sample')
            out = helper.create_variable_for_type_inference(dtype=x.dtype)

758 759 760 761 762 763
            helper.append_op(type='index_sample',
                             inputs={
                                 'X': x,
                                 'Index': index
                             },
                             outputs={'Out': out})
J
Jiabin Yang 已提交
764
            return out
765 766 767 768


def masked_select(x, mask, name=None):
    """
C
Chen Long 已提交
769
    Returns a new 1-D tensor which indexes the input tensor according to the ``mask``
770 771 772
    which is a tensor with data type of bool.

    Args:
773
        x (Tensor): The input Tensor, the data type can be int32, int64, float32, float64.
774
        mask (Tensor): The Tensor containing the binary mask to index with, it's data type is bool.
775
        name(str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
776

777
    Returns:
778
        A 1-D Tensor which is the same data type  as ``x``.
779

780 781 782 783 784
    Examples:

        .. code-block:: python

            import paddle
785 786 787 788 789 790 791

            x = paddle.to_tensor([[1.0, 2.0, 3.0, 4.0],
                                  [5.0, 6.0, 7.0, 8.0],
                                  [9.0, 10.0, 11.0, 12.0]])
            mask = paddle.to_tensor([[True, False, False, False],
                                     [True, True, False, False],
                                     [True, False, False, False]])
792 793 794 795
            out = paddle.masked_select(x, mask)
            #[1.0 5.0 6.0 9.0]
    """

H
hong 已提交
796
    if in_dygraph_mode():
797
        return _C_ops.masked_select(x, mask)
H
hong 已提交
798 799

    if _in_legacy_dygraph():
800
        return _legacy_C_ops.masked_select(x, mask)
801 802 803 804 805 806 807

    helper = LayerHelper("masked_select", **locals())
    check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
                             'paddle.tensor.search.mask_select')
    check_variable_and_dtype(mask, 'mask', ['bool'],
                             'paddle.tensor.search.masked_select')
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
808 809 810 811 812 813
    helper.append_op(type='masked_select',
                     inputs={
                         'X': x,
                         'Mask': mask
                     },
                     outputs={'Y': out})
814
    return out
W
wawltor 已提交
815 816 817 818


def topk(x, k, axis=None, largest=True, sorted=True, name=None):
    """
819
    Return values and indices of the k largest or smallest at the optional axis.
W
wawltor 已提交
820 821 822 823 824 825 826 827 828 829 830 831
    If the input is a 1-D Tensor, finds the k largest or smallest values and indices.
    If the input is a Tensor with higher rank, this operator computes the top k values and indices along the :attr:`axis`.

    Args:
        x(Tensor): Tensor, an input N-D Tensor with type float32, float64, int32, int64.
        k(int, Tensor): The number of top elements to look for along the axis.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is x.ndim. when axis < 0, it works the same way
            as axis + R. Default is -1.
        largest(bool, optional) : largest is a flag, if set to true,
            algorithm will sort by descending order, otherwise sort by
            ascending order. Default is True.
832
        sorted(bool, optional): controls whether to return the elements in sorted order, default value is True. In gpu device, it always return the sorted value.
W
wawltor 已提交
833 834 835 836 837 838 839 840
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.

    Returns:
        tuple(Tensor), return the values and indices. The value data type is the same as the input `x`. The indices data type is int64.

    Examples:

        .. code-block:: python
841

842
            import paddle
W
wawltor 已提交
843

844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860
            data_1 = paddle.to_tensor([1, 4, 5, 7])
            value_1, indices_1 = paddle.topk(data_1, k=1)
            print(value_1) # [7]
            print(indices_1) # [3]

            data_2 = paddle.to_tensor([[1, 4, 5, 7], [2, 6, 2, 5]])
            value_2, indices_2 = paddle.topk(data_2, k=1)
            print(value_2) # [[7], [6]]
            print(indices_2) # [[3], [1]]

            value_3, indices_3 = paddle.topk(data_2, k=1, axis=-1)
            print(value_3) # [[7], [6]]
            print(indices_3) # [[3], [1]]

            value_4, indices_4 = paddle.topk(data_2, k=1, axis=0)
            print(value_4) # [[2, 6, 5, 7]]
            print(indices_4) # [[1, 1, 0, 0]]
W
wawltor 已提交
861 862 863


    """
H
hong 已提交
864

H
hong 已提交
865 866 867
    if in_dygraph_mode():
        if axis == None:
            axis = -1
868
        out, indices = _C_ops.top_k(x, k, axis, largest, sorted)
H
hong 已提交
869 870
        return out, indices

H
hong 已提交
871
    if _non_static_mode():
W
wawltor 已提交
872
        if axis is None:
873 874
            out, indices = _legacy_C_ops.top_k_v2(x, 'k', int(k), 'largest',
                                                  largest, 'sorted', sorted)
W
wawltor 已提交
875
        else:
876 877 878
            out, indices = _legacy_C_ops.top_k_v2(x, 'k', int(k), 'axis', axis,
                                                  'largest', largest, 'sorted',
                                                  sorted)
W
wawltor 已提交
879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895
        return out, indices

    helper = LayerHelper("top_k_v2", **locals())
    inputs = {"X": [x]}
    attrs = {}
    if isinstance(k, Variable):
        inputs['K'] = [k]
    else:
        attrs = {'k': k}
    attrs['largest'] = largest
    attrs['sorted'] = sorted
    if axis is not None:
        attrs['axis'] = axis

    values = helper.create_variable_for_type_inference(dtype=x.dtype)
    indices = helper.create_variable_for_type_inference(dtype="int64")

896 897 898 899 900 901 902
    helper.append_op(type="top_k_v2",
                     inputs=inputs,
                     outputs={
                         "Out": [values],
                         "Indices": [indices]
                     },
                     attrs=attrs)
W
wawltor 已提交
903 904
    indices.stop_gradient = True
    return values, indices
Y
Yanxing Shi 已提交
905 906


907 908 909 910 911 912
def bucketize(x, sorted_sequence, out_int32=False, right=False, name=None):
    """
    This API is used to find the index of the corresponding 1D tensor `sorted_sequence` in the innermost dimension based on the given `x`.

    Args:
        x(Tensor): An input N-D tensor value with type int32, int64, float32, float64.
913
        sorted_sequence(Tensor): An input 1-D tensor with type int32, int64, float32, float64. The value of the tensor monotonically increases in the innermost dimension.
914 915
        out_int32(bool, optional): Data type of the output tensor which can be int32, int64. The default value is False, and it indicates that the output data type is int64.
        right(bool, optional): Find the upper or lower bounds of the sorted_sequence range in the innermost dimension based on the given `x`. If the value of the sorted_sequence is nan or inf, return the size of the innermost dimension.
916
                               The default value is False and it shows the lower bounds.
917
        name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`.
918

919
    Returns:
920 921
        Tensor(the same sizes of the `x`), return the tensor of int32 if set :attr:`out_int32` is True, otherwise return the tensor of int64.

922 923 924
    Examples:

        .. code-block:: python
925

926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949
            import paddle

            sorted_sequence = paddle.to_tensor([2, 4, 8, 16], dtype='int32')
            x = paddle.to_tensor([[0, 8, 4, 16], [-1, 2, 8, 4]], dtype='int32')
            out1 = paddle.bucketize(x, sorted_sequence)
            print(out1)
            # Tensor(shape=[2, 4], dtype=int64, place=CPUPlace, stop_gradient=True,
            #        [[0, 2, 1, 3],
            #         [0, 0, 2, 1]])
            out2 = paddle.bucketize(x, sorted_sequence, right=True)
            print(out2)
            # Tensor(shape=[2, 4], dtype=int64, place=CPUPlace, stop_gradient=True,
            #        [[0, 3, 2, 4],
            #         [0, 1, 3, 2]])
            out3 = x.bucketize(sorted_sequence)
            print(out3)
            # Tensor(shape=[2, 4], dtype=int64, place=CPUPlace, stop_gradient=True,
            #        [[0, 2, 1, 3],
            #         [0, 0, 2, 1]])
            out4 = x.bucketize(sorted_sequence, right=True)
            print(out4)
            # Tensor(shape=[2, 4], dtype=int64, place=CPUPlace, stop_gradient=True,
            #        [[0, 3, 2, 4],
            #         [0, 1, 3, 2]])
950

951 952 953 954 955 956 957 958 959 960 961
    """
    check_variable_and_dtype(sorted_sequence, 'SortedSequence',
                             ['float32', 'float64', 'int32', 'int64'],
                             'paddle.searchsorted')
    if sorted_sequence.dim() != 1:
        raise ValueError(
            f"sorted_sequence tensor must be 1 dimension, but got dim {sorted_sequence.dim()}"
        )
    return searchsorted(sorted_sequence, x, out_int32, right, name)


Y
Yanxing Shi 已提交
962 963 964 965 966 967
def searchsorted(sorted_sequence,
                 values,
                 out_int32=False,
                 right=False,
                 name=None):
    """
968
    Find the index of the corresponding `sorted_sequence` in the innermost dimension based on the given `values`.
Y
Yanxing Shi 已提交
969 970

    Args:
971
        sorted_sequence(Tensor): An input N-D or 1-D tensor with type int32, int64, float32, float64. The value of the tensor monotonically increases in the innermost dimension.
Y
Yanxing Shi 已提交
972 973 974
        values(Tensor): An input N-D tensor value with type int32, int64, float32, float64.
        out_int32(bool, optional): Data type of the output tensor which can be int32, int64. The default value is False, and it indicates that the output data type is int64.
        right(bool, optional): Find the upper or lower bounds of the sorted_sequence range in the innermost dimension based on the given `values`. If the value of the sorted_sequence is nan or inf, return the size of the innermost dimension.
975
                               The default value is False and it shows the lower bounds.
976
        name(str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
977

Y
Yanxing Shi 已提交
978
    Returns:
979 980
        Tensor(the same sizes of the `values`), return the tensor of int32 if set :attr:`out_int32` is True, otherwise return the tensor of int64.

Y
Yanxing Shi 已提交
981 982 983
    Examples:

        .. code-block:: python
984

Y
Yanxing Shi 已提交
985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000
            import paddle

            sorted_sequence = paddle.to_tensor([[1, 3, 5, 7, 9, 11],
                                                [2, 4, 6, 8, 10, 12]], dtype='int32')
            values = paddle.to_tensor([[3, 6, 9, 10], [3, 6, 9, 10]], dtype='int32')
            out1 = paddle.searchsorted(sorted_sequence, values)
            print(out1)
            # Tensor(shape=[2, 4], dtype=int64, place=CUDAPlace(0), stop_gradient=True,
            #        [[1, 3, 4, 5],
            #         [1, 2, 4, 4]])
            out2 = paddle.searchsorted(sorted_sequence, values, right=True)
            print(out2)
            # Tensor(shape=[2, 4], dtype=int64, place=CUDAPlace(0), stop_gradient=True,
            #        [[2, 3, 5, 5],
            #         [1, 3, 4, 5]])
            sorted_sequence_1d = paddle.to_tensor([1, 3, 5, 7, 9, 11, 13])
1001
            out3 = paddle.searchsorted(sorted_sequence_1d, values)
Y
Yanxing Shi 已提交
1002 1003 1004 1005
            print(out3)
            # Tensor(shape=[2, 4], dtype=int64, place=CUDAPlace(0), stop_gradient=True,
            #        [[1, 3, 4, 5],
            #         [1, 3, 4, 5]])
1006

Y
Yanxing Shi 已提交
1007
    """
F
From00 已提交
1008
    if in_dygraph_mode():
1009
        return _C_ops.searchsorted(sorted_sequence, values, out_int32, right)
Y
Yanxing Shi 已提交
1010

F
From00 已提交
1011
    if _in_legacy_dygraph():
1012 1013
        return _legacy_C_ops.searchsorted(sorted_sequence, values, "out_int32",
                                          out_int32, "right", right)
Y
Yanxing Shi 已提交
1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024

    check_variable_and_dtype(sorted_sequence, 'SortedSequence',
                             ['float32', 'float64', 'int32', 'int64'],
                             'paddle.searchsorted')
    check_variable_and_dtype(values, 'Values',
                             ['float32', 'float64', 'int32', 'int64'],
                             'paddle.searchsorted')

    helper = LayerHelper('searchsorted', **locals())
    out_type = 'int32' if out_int32 else 'int64'
    out = helper.create_variable_for_type_inference(dtype=out_type)
1025 1026 1027 1028 1029 1030 1031 1032 1033 1034
    helper.append_op(type='searchsorted',
                     inputs={
                         'SortedSequence': sorted_sequence,
                         "Values": values
                     },
                     outputs={'Out': out},
                     attrs={
                         "out_int32": out_int32,
                         "right": right
                     })
Y
Yanxing Shi 已提交
1035 1036

    return out
1037 1038 1039 1040


def kthvalue(x, k, axis=None, keepdim=False, name=None):
    """
1041
    Find values and indices of the k-th smallest at the axis.
1042 1043 1044 1045 1046 1047 1048 1049

    Args:
        x(Tensor): A N-D Tensor with type float32, float64, int32, int64.
        k(int): The k for the k-th smallest number to look for along the axis.
        axis(int, optional): Axis to compute indices along. The effective range
            is [-R, R), where R is x.ndim. when axis < 0, it works the same way
            as axis + R. The default is None. And if the axis is None, it will computed as -1 by default.
        keepdim(bool, optional): Whether to keep the given axis in output. If it is True, the dimensions will be same as input x and with size one in the axis. Otherwise the output dimentions is one fewer than x since the axis is squeezed. Default is False.
1050
        name (str, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
1051 1052 1053

    Returns:
        tuple(Tensor), return the values and indices. The value data type is the same as the input `x`. The indices data type is int64.
1054

1055 1056 1057
    Examples:

        .. code-block:: python
1058

1059
            import paddle
1060

1061 1062 1063 1064 1065 1066 1067 1068
            x = paddle.randn((2,3,2))
            # Tensor(shape=[2, 3, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
            #       [[[ 0.22954939, -0.01296274],
            #         [ 1.17135799, -0.34493217],
            #         [-0.19550551, -0.17573971]],
            #
            #        [[ 0.15104349, -0.93965352],
            #         [ 0.14745511,  0.98209465],
1069 1070
            #         [ 0.10732264, -0.55859774]]])
            y = paddle.kthvalue(x, 2, 1)
1071 1072 1073 1074 1075 1076
            # (Tensor(shape=[2, 2], dtype=float32, place=CUDAPlace(0), stop_gradient=True,
            # [[ 0.22954939, -0.17573971],
            #  [ 0.14745511, -0.55859774]]), Tensor(shape=[2, 2], dtype=int64, place=CUDAPlace(0), stop_gradient=True,
            #  [[0, 2],
            #  [1, 2]]))
    """
1077
    if _non_static_mode():
1078
        if axis is not None:
1079
            if _in_legacy_dygraph():
1080 1081 1082
                return _legacy_C_ops.kthvalue(x, 'k', k, "axis", axis,
                                              "keepdim", keepdim)
            return _C_ops.kthvalue(x, k, axis, keepdim)
1083
        else:
1084
            if _in_legacy_dygraph():
1085 1086
                return _legacy_C_ops.kthvalue(x, 'k', k, "keepdim", keepdim)
            return _C_ops.kthvalue(x, k, -1, keepdim)
1087 1088 1089 1090 1091 1092 1093 1094 1095

    helper = LayerHelper("kthvalue", **locals())
    inputs = {"X": [x]}
    attrs = {'k': k}
    if axis is not None:
        attrs['axis'] = axis
    values = helper.create_variable_for_type_inference(dtype=x.dtype)
    indices = helper.create_variable_for_type_inference(dtype="int64")

1096 1097 1098 1099 1100 1101 1102
    helper.append_op(type="kthvalue",
                     inputs=inputs,
                     outputs={
                         "Out": [values],
                         "Indices": [indices]
                     },
                     attrs=attrs)
1103 1104
    indices.stop_gradient = True
    return values, indices