nn.py 595.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Y
Yu Yang 已提交
14
"""
15
All layers just related to the neural network.
Y
Yu Yang 已提交
16
"""
17 18
from __future__ import print_function

P
peizhilin 已提交
19
import os
S
sneaxiy 已提交
20
import inspect
21 22 23 24 25 26
import warnings

import numpy as np
import six

import paddle
Y
Yu Yang 已提交
27
from ..layer_helper import LayerHelper
28
from ..initializer import Normal, Constant, NumpyArrayInitializer
29
from ..framework import Variable, OpProtoHolder, in_dygraph_mode, dygraph_only, _dygraph_tracer, default_main_program, _varbase_creator, static_only, _global_flags
30
from .. import dygraph_utils
Y
yangyaming 已提交
31
from ..param_attr import ParamAttr
S
sneaxiy 已提交
32
from .layer_function_generator import autodoc, templatedoc, _generate_doc_string_
33
from .tensor import concat, assign, fill_constant, zeros, tensor_array_to_tensor
34
from . import utils
F
fengjiayi 已提交
35
from .. import unique_name
36
from functools import reduce
37
from .. import core
38
from ...utils import deprecated
39
from ..data_feeder import convert_dtype, check_variable_and_dtype, check_type, check_dtype
40
import paddle
41
from paddle.utils import deprecated
W
wanghuancoder 已提交
42
from paddle import _C_ops
Y
Yu Yang 已提交
43 44

__all__ = [
X
Xin Pan 已提交
45 46 47 48 49 50 51 52 53 54 55
    'fc',
    'embedding',
    'linear_chain_crf',
    'crf_decoding',
    'cos_sim',
    'chunk_eval',
    'conv2d',
    'conv3d',
    'softmax',
    'pool2d',
    'pool3d',
56 57
    'adaptive_pool2d',
    'adaptive_pool3d',
X
Xin Pan 已提交
58
    'batch_norm',
K
Kaipeng Deng 已提交
59
    'inplace_abn',
L
lvmengsi 已提交
60
    'instance_norm',
H
heqiaozhi 已提交
61
    'data_norm',
X
Xin Pan 已提交
62 63 64 65 66 67 68
    'conv2d_transpose',
    'conv3d_transpose',
    'reduce_sum',
    'reduce_mean',
    'reduce_max',
    'reduce_min',
    'reduce_prod',
Z
zhoukunsheng 已提交
69 70
    'reduce_all',
    'reduce_any',
X
Xin Pan 已提交
71 72 73 74 75 76 77 78 79 80 81
    'dropout',
    'split',
    'ctc_greedy_decoder',
    'l2_normalize',
    'matmul',
    'topk',
    'transpose',
    'im2sequence',
    'row_conv',
    'multiplex',
    'layer_norm',
D
Dun 已提交
82
    'group_norm',
D
dengkaipeng 已提交
83
    'spectral_norm',
X
Xin Pan 已提交
84 85 86 87 88 89 90
    'smooth_l1',
    'one_hot',
    'autoincreased_step_counter',
    'reshape',
    'squeeze',
    'unsqueeze',
    'lod_reset',
91
    'lod_append',
X
Xin Pan 已提交
92 93 94 95 96
    'lrn',
    'pad',
    'pad_constant_like',
    'label_smooth',
    'roi_pool',
J
jerrywgz 已提交
97
    'roi_align',
X
Xin Pan 已提交
98 99 100
    'dice_loss',
    'image_resize',
    'image_resize_short',
101
    'resize_linear',
X
Xin Pan 已提交
102
    'resize_bilinear',
K
Kaipeng Deng 已提交
103
    'resize_trilinear',
104
    'resize_nearest',
X
Xin Pan 已提交
105
    'gather',
106
    'gather_nd',
X
Xin Pan 已提交
107
    'scatter',
108 109
    'scatter_nd_add',
    'scatter_nd',
X
Xin Pan 已提交
110 111 112
    'random_crop',
    'mean_iou',
    'relu',
C
chengduo 已提交
113
    'selu',
X
Xin Pan 已提交
114 115
    'log',
    'crop',
116
    'crop_tensor',
X
Xin Pan 已提交
117 118 119 120 121 122 123 124 125 126 127 128 129 130
    'elu',
    'relu6',
    'pow',
    'stanh',
    'hard_sigmoid',
    'swish',
    'prelu',
    'brelu',
    'leaky_relu',
    'soft_relu',
    'flatten',
    'stack',
    'pad2d',
    'unstack',
Z
zhoukunsheng 已提交
131
    'unique',
132
    'unique_with_counts',
X
Xin Pan 已提交
133
    'expand',
134
    'expand_as',
X
Xin Pan 已提交
135 136 137 138 139 140 141 142
    'scale',
    'elementwise_add',
    'elementwise_div',
    'elementwise_sub',
    'elementwise_mul',
    'elementwise_max',
    'elementwise_min',
    'elementwise_pow',
Z
zhoukunsheng 已提交
143 144
    'elementwise_mod',
    'elementwise_floordiv',
X
Xin Pan 已提交
145 146 147 148 149 150
    'uniform_random_batch_size_like',
    'gaussian_random',
    'sampling_id',
    'gaussian_random_batch_size_like',
    'sum',
    'slice',
W
wangchaochaohu 已提交
151
    'strided_slice',
X
Xin Pan 已提交
152
    'shape',
Z
zhoukunsheng 已提交
153
    'rank',
Z
zhoukunsheng 已提交
154
    'size',
X
Xin Pan 已提交
155 156 157 158 159 160 161 162 163
    'logical_and',
    'logical_or',
    'logical_xor',
    'logical_not',
    'clip',
    'clip_by_norm',
    'mean',
    'mul',
    'maxout',
J
JiabinYang 已提交
164
    'space_to_depth',
W
whs 已提交
165
    'affine_grid',
166
    'affine_channel',
B
barrierye 已提交
167
    'similarity_focus',
M
minqiyang 已提交
168
    'hash',
D
dengkaipeng 已提交
169
    'grid_sampler',
G
gmcather 已提交
170 171
    'log_loss',
    'add_position_encoding',
Q
Qiao Longfei 已提交
172
    'bilinear_tensor_product',
C
chengduo 已提交
173 174
    'merge_selected_rows',
    'get_tensor_from_selected_rows',
S
shippingwang 已提交
175
    'shuffle_channel',
176
    'temporal_shift',
S
sneaxiy 已提交
177
    'py_func',
178
    'psroi_pool',
179
    'prroi_pool',
R
ruri 已提交
180
    'pixel_shuffle',
181
    'fsp_matrix',
H
heqiaozhi 已提交
182
    'continuous_value_model',
Z
zhoukunsheng 已提交
183
    'where',
Z
zhoukunsheng 已提交
184
    'sign',
185
    'deformable_conv',
186
    'unfold',
C
cjt222 已提交
187
    'deformable_roi_pooling',
J
Jiawei Wang 已提交
188
    'filter_by_instag',
189
    'shard_index',
H
huangjun12 已提交
190
    'hard_swish',
K
Kaipeng Deng 已提交
191
    'mish',
G
Guo Sheng 已提交
192
    'gather_tree',
193
    'uniform_random',
myq406450149's avatar
myq406450149 已提交
194
    'unbind',
Y
Yu Yang 已提交
195 196 197
]


198 199 200 201 202 203 204
@dygraph_only
def _elementwise_op_in_dygraph(x,
                               y,
                               axis=-1,
                               act=None,
                               use_mkldnn=False,
                               op_name=None):
W
wanghuancoder 已提交
205
    op = getattr(_C_ops, op_name)
206
    out = op(x, y, 'axis', axis, 'use_mkldnn', use_mkldnn)
207

208 209
    return dygraph_utils._append_activation_in_dygraph(
        out, act, use_mkldnn=use_mkldnn)
210 211


Y
Yu Yang 已提交
212 213 214 215 216 217
def fc(input,
       size,
       num_flatten_dims=1,
       param_attr=None,
       bias_attr=None,
       act=None,
218
       name=None):
219
    r"""
220 221
    :api_attr: Static Graph

222
    **Fully Connected Layer**
Y
Yu Yang 已提交
223

224 225 226
    This operator creates a fully connected layer in the network. It can take
    a Tensor(or LoDTensor) or a list of Tensor(or LoDTensor) as its inputs(see
    Args in detail). It creates a variable called weight for each input Tensor,
227
    which represents a fully connected weight matrix from each input unit to
228 229 230 231
    each output unit. The fully connected layer multiplies each input Tensor
    with its corresponding weight to produce an output Tensor with shape :math:`[M, size]` ,
    where M is batch size. If a list of Tensor is given, the results of
    multiple output Tensors with shape :math:`[M, size]` will be summed up. If :attr:`bias_attr`
232
    is not None, a bias variable will be created and added to the output.
233
    Finally, if :attr:`act` is not None, it will be applied to the output as well.
C
caoying03 已提交
234

235
    When the input is a single Tensor(or LoDTensor):
C
caoying03 已提交
236

237 238 239 240
    .. math::

        Out = Act({XW + b})

241
    When the input is a list of Tensor(or LoDTensor):
242 243 244

    .. math::

245
        Out = Act({\sum_{i=0}^{N-1}X_iW_i + b})
246 247 248

    In the above equation:

249 250 251
    * :math:`N`: Number of the input. N equals to len(input) if input is list of Variable.
    * :math:`X_i`: The i-th input tensor.
    * :math:`W_i`: The i-th weights matrix corresponding i-th input tensor.
C
caoying03 已提交
252
    * :math:`b`: The bias parameter created by this layer (if needed).
253
    * :math:`Act`: The activation function.
254
    * :math:`Out`: The output Tensor.
255 256 257

    .. code-block:: text

258 259 260 261 262 263 264 265 266 267 268 269 270 271
        Case 1:
        Given a single Tensor data_1, and num_flatten_dims = 2:
            data_1.data = [[[0.1, 0.2],
                            [0.3, 0.4]]]
            data_1.shape = (1, 2, 2) # 1 is batch_size

            out = fluid.layers.fc(input=data_1, size=1, num_flatten_dims=2)

        Then output is:
            out.data = [[0.83234344], [0.34936576]]
            out.shape = (1, 2, 1)

        Case 2:
        Given a list of Tensor:
272 273 274 275 276 277 278 279 280 281 282 283 284
            data_1.data = [[[0.1, 0.2],
                           [0.3, 0.4]]]
            data_1.shape = (1, 2, 2) # 1 is batch_size

            data_2 = [[[0.1, 0.2, 0.3]]]
            data_2.shape = (1, 1, 3)

            out = fluid.layers.fc(input=[data_1, data_2], size=2)

        Then:
            out.data = [[0.18669507, 0.1893476]]
            out.shape = (1, 2)

Y
Yu Yang 已提交
285
    Args:
286 287 288
        input (Variable|list of Variable): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` or
            a list of Tensor(or LoDTensor). The dimensions of the input Tensor is at least 2 and the data
            type should be float32 or float64.
T
tianshuo78520a 已提交
289
        size(int): The number of output units in this layer, which also means the feature size of output
290 291
            Tensor(or LoDTensor).
        num_flatten_dims (int): The fc layer can accept an input Tensor with more than
R
ranqiu 已提交
292
            two dimensions. If this happens, the multidimensional tensor will first be flattened
293 294
            into a 2-D matrix. The parameter :attr:`num_flatten_dims` determines how the input
            Tensor is flattened: the first :attr:`num_flatten_dims` (inclusive, index starts from 1)
R
ranqiu 已提交
295
            dimensions will be flatten to form the first dimension of the final matrix (height of
296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
            the matrix), and the rest :math:`rank(X) - num\_flatten\_dims` dimensions are flattened to
            form the second dimension of the final matrix (width of the matrix). For example, assuming that
            X is a 5-dimensional Tensor with a shape [2, 3, 4, 5, 6], and :attr:`num_flatten_dims` = 3.
            Then, the flattened matrix will have a shape [2 x 3 x 4, 5 x 6] = [24, 30]. Default: 1.
        param_attr (ParamAttr): To specify the weight parameter property. Default: None, which means the
            default weight parameter property is used. See usage for details in :ref:`api_fluid_ParamAttr` .
        bias_attr (ParamAttr): To specify the bias parameter property. Default: None, which means the
            default bias parameter property is used. See usage for details in :ref:`api_fluid_ParamAttr` .
        act (str): Activation to be applied to the output of this layer, such as tanh, softmax,
            sigmoid, relu. For more information, please refer to :ref:`api_guide_activations_en` . Default: None.
        name (str, optional): The default value is None.  Normally there is no need for user to set this property.
            For more information, please refer to :ref:`api_guide_Name` .

    Returns:
        Variable: Tensor or LoDTensor calculated by fc layer. The data type is same with input.
311 312

    Raises:
313
        ValueError: If dimensions of the input Tensor is less than 2.
314 315 316 317

    Examples:
        .. code-block:: python

318
          import paddle.fluid as fluid
319 320
          import paddle
          paddle.enable_static()
321
          # when input is single tensor
322
          data = fluid.data(name="data", shape=[-1, 32], dtype="float32")
323
          fc = fluid.layers.fc(input=data, size=1000, act="tanh")
324 325

          # when input are multiple tensors
326 327
          data_1 = fluid.data(name="data_1", shape=[-1, 32], dtype="float32")
          data_2 = fluid.data(name="data_2", shape=[-1, 36], dtype="float32")
328
          fc = fluid.layers.fc(input=[data_1, data_2], size=1000, act="tanh")
Y
Yu Yang 已提交
329
    """
C
caoying03 已提交
330
    helper = LayerHelper("fc", **locals())
331
    check_type(input, 'input', (list, tuple, Variable), 'fc')
332 333
    if isinstance(input, (list, tuple)):
        for i, input_x in enumerate(input):
334
            check_type(input_x, 'input[' + str(i) + ']', Variable, 'fc')
Y
Yu Yang 已提交
335
    dtype = helper.input_dtype()
A
arlesniak 已提交
336 337
    check_dtype(dtype, 'input', ['float16', 'uint16', 'float32', 'float64'],
                'fc')
Y
Yu Yang 已提交
338
    mul_results = []
339 340
    for input_var, param_attr in helper.iter_inputs_and_params():
        input_shape = input_var.shape
341 342
        if num_flatten_dims == -1:
            num_flatten_dims = len(input_shape) - 1
Y
Yu Yang 已提交
343 344 345
        param_shape = [
            reduce(lambda a, b: a * b, input_shape[num_flatten_dims:], 1)
        ] + [size]
Y
ying 已提交
346

Y
Yu Yang 已提交
347
        w = helper.create_parameter(
348
            attr=param_attr, shape=param_shape, dtype=dtype, is_bias=False)
X
Xin Pan 已提交
349
        tmp = helper.create_variable_for_type_inference(dtype)
350
        helper.append_op(
351 352 353
            type="mul",
            inputs={"X": input_var,
                    "Y": w},
354
            outputs={"Out": tmp},
M
mozga-intel 已提交
355 356
            attrs={"x_num_col_dims": num_flatten_dims,
                   "y_num_col_dims": 1})
357 358 359 360
        mul_results.append(tmp)

    if len(mul_results) == 1:
        pre_bias = mul_results[0]
361
    else:
X
Xin Pan 已提交
362
        pre_bias = helper.create_variable_for_type_inference(dtype)
363
        helper.append_op(
364 365 366
            type="sum",
            inputs={"X": mul_results},
            outputs={"Out": pre_bias},
X
Xin Pan 已提交
367
            attrs={"use_mkldnn": False})
368 369 370 371
    # add bias
    pre_activation = helper.append_bias_op(pre_bias, dim_start=num_flatten_dims)
    # add activation
    return helper.append_activation(pre_activation)
Y
Yu Yang 已提交
372 373


T
tangwei12 已提交
374
@deprecated(since="2.0.0", update_to="paddle.nn.functional.embedding")
375 376 377
def embedding(input,
              size,
              is_sparse=False,
378
              is_distributed=False,
379 380 381
              padding_idx=None,
              param_attr=None,
              dtype='float32'):
382
    r"""
383
    :api_attr: Static Graph
384

385 386 387 388 389 390 391 392 393 394 395 396
    **WARING:** This OP will be deprecated in a future release. This OP requires the
    last dimension of Tensor shape must be equal to 1. It is recommended to use
    fluid. :ref:`api_fluid_embedding` .

    The operator is used to lookup embeddings vector of ids provided by :attr:`input` .
    It automatically constructs a 2D embedding matrix based on the
    input :attr:`size` (vocab_size, emb_size) and :attr:`dtype` .

    This OP requires the last dimension of Tensor shape must be equal to 1. The shape
    of output Tensor is generated by replacing the last dimension of the input Tensor shape
    with emb_size.

397
    **Note:** The id in :attr:`input` must satisfy :math:`0 =< id < size[0]` ,
398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414
    otherwise the program will throw an exception and exit.

    .. code-block:: text

        Case 1:

        input is a Tensor. padding_idx = -1
            input.data = [[[1], [3]], [[2], [4]], [[4], [127]]]
            input.shape = [3, 2, 1]
        Given size = [128, 16]
        output is a Tensor:
            out.shape = [3, 2, 16]
            out.data = [[[0.129435295, 0.244512452, ..., 0.436322452],
                        [0.345421456, 0.524563927, ..., 0.144534654]],

                        [[0.345249859, 0.124939536, ..., 0.194353745],
                        [0.945345345, 0.435394634, ..., 0.435345365]],
415

416 417 418 419
                        [[0.945345345, 0.435394634, ..., 0.435345365],
                        [0.0,         0.0,         ..., 0.0        ]]]  # padding data
        The input padding_idx is less than 0, it is automatically converted to padding_idx = -1 + 128 = 127
        It will pad all-zero data when ids is 127.
420

421
        Case 2:
422

423 424 425 426 427 428 429 430 431 432 433 434 435 436
        input is a LoDTensor with 1-level LoD. padding_idx = 0
            input.lod = [[2, 3]]
            input.data = [[1], [3], [2], [4], [0]]
            input.shape = [5, 1]
        Given size = [128, 16]
        output is a LoDTensor:
            out.lod = [[2, 3]]
            out.shape = [5, 16]
            out.data = [[0.129435295, 0.244512452, ..., 0.436322452],
                        [0.345421456, 0.524563927, ..., 0.144534654],
                        [0.345249859, 0.124939536, ..., 0.194353745],
                        [0.945345345, 0.435394634, ..., 0.435345365],
                        [0.0,         0.0,         ..., 0.0        ]]  # padding data
        It will pad all-zero data when ids is 0.
Y
Yu Yang 已提交
437 438

    Args:
439 440 441 442 443 444
        input(Variable): A Tensor or LoDTensor with type int64, which contains the id information.
            The last dimension of Tensor shape must be equal to 1. The value of the input id should
            satisfy :math:`0<= id < size[0]` .
        size(tuple|list): The shape of lookup table parameter. It should have two elements which
            indicates the size of the dictionary of embeddings and the size of each embedding vector respectively.
        is_sparse(bool): The flag indicating whether to use sparse update. This parameter only
445
            affects the performance of the backwards gradient update. It is recommended to set
446
            True because sparse update is faster. But some optimizer does not support sparse update,
447
            such as :ref:`api_fluid_optimizer_AdadeltaOptimizer` , :ref:`api_fluid_optimizer_AdamaxOptimizer` ,
448 449 450 451 452
            :ref:`api_fluid_optimizer_DecayedAdagradOptimizer` , :ref:`api_fluid_optimizer_FtrlOptimizer` ,
            :ref:`api_fluid_optimizer_LambOptimizer` and :ref:`api_fluid_optimizer_LarsMomentumOptimizer` .
            In these case, is_sparse must be False. Default: False.
        is_distributed(bool): Whether to store the embedding matrix in a distributed manner. Only used
            in multi-machine distributed CPU training. Default: False.
453
        padding_idx(int|long|None): padding_idx needs to be in the interval [-vocab_size, vocab_size).
454 455 456 457 458 459
            If :math:`padding\_idx < 0`, the :math:`padding\_idx` will automatically be converted
            to :math:`vocab\_size + padding\_idx` . It will output all-zero padding data whenever lookup
            encounters :math:`padding\_idx` in id. And the padding data will not be updated while training.
            If set None, it makes no effect to output. Default: None.
        param_attr(ParamAttr): To specify the weight parameter property. Default: None, which means the
            default weight parameter property is used. See usage for details in :ref:`api_fluid_ParamAttr` . In addition,
460
            user-defined or pre-trained word vectors can be loaded with the :attr:`param_attr` parameter.
461
            The local word vector needs to be transformed into numpy format, and the shape of local word
T
tianshuo78520a 已提交
462
            vector should be consistent with :attr:`size` . Then :ref:`api_fluid_initializer_NumpyArrayInitializer`
463 464 465
            is used to load custom or pre-trained word vectors. See code example 2 for details.
        dtype(str|core.VarDesc.VarType): It refers to the data type of output Tensor.
            It must be float32 or float64. Default: float32.
Y
Yu Yang 已提交
466

467
    Returns:
468
        Variable: Embedding Tensor or LoDTensor mapped by input. The data type is the same as :attr:`dtype` .
Y
Yu Yang 已提交
469

470 471
    Examples:
        .. code-block:: python
Y
Yu Yang 已提交
472

B
bdzhuxiaoning 已提交
473
          import paddle.fluid as fluid
474
          import numpy as np
475 476 477
          import paddle
          paddle.enable_static()
          
478 479
          data = fluid.data(name='x', shape=[None, 1], dtype='int64')

T
tianshuo78520a 已提交
480
          # example 1
481 482 483 484 485 486 487 488 489
          emb_1 = fluid.embedding(input=data, size=[128, 64])

          # example 2: load custom or pre-trained word vectors
          weight_data = np.random.random(size=(128, 100))  # word vectors with numpy format
          w_param_attrs = fluid.ParamAttr(
              name="emb_weight",
              learning_rate=0.5,
              initializer=fluid.initializer.NumpyArrayInitializer(weight_data),
              trainable=True)
490
          emb_2 = fluid.layers.embedding(input=data, size=(128, 100), param_attr=w_param_attrs, dtype='float32')
Y
Yu Yang 已提交
491 492 493
    """

    helper = LayerHelper('embedding', **locals())
494 495
    check_variable_and_dtype(input, 'input', ['int64'],
                             'fluid.layers.embedding')
496
    check_dtype(dtype, 'dtype', ['uint16', 'float16', 'float32', 'float64'],
497
                'fluid.layers.embedding')
498 499 500 501 502 503 504 505 506

    if is_distributed:
        is_distributed = False
        warnings.warn(
            "is_distributed is go out of use, `fluid.contrib.layers.sparse_embedding` is your needed"
        )

    remote_prefetch = True if is_sparse else False

Y
Yu Yang 已提交
507 508
    w = helper.create_parameter(
        attr=helper.param_attr, shape=size, dtype=dtype, is_bias=False)
X
Xin Pan 已提交
509
    tmp = helper.create_variable_for_type_inference(dtype)
510 511
    padding_idx = -1 if padding_idx is None else padding_idx if padding_idx >= 0 else (
        size[0] + padding_idx)
Y
Yu Yang 已提交
512 513 514 515 516
    helper.append_op(
        type='lookup_table',
        inputs={'Ids': input,
                'W': w},
        outputs={'Out': tmp},
517 518 519
        attrs={
            'is_sparse': is_sparse,
            'is_distributed': is_distributed,
Q
Qiao Longfei 已提交
520
            'remote_prefetch': remote_prefetch,
521 522
            'padding_idx': padding_idx
        })
Y
Yu Yang 已提交
523 524 525
    return tmp


526 527 528 529 530 531 532 533 534
def _pull_sparse(input,
                 size,
                 table_id,
                 accessor_class,
                 name="embedding",
                 ctr_label_name="",
                 padding_id=0,
                 dtype='float32',
                 scale_sparse_grad=True):
535
    r"""
536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605
    **Pull Fleet Sparse Layer**

    This layer is used to lookup embeddings of IDs, provided by :attr:`input`, in
    Fleet lookup table. The result of this lookup is the embedding of each ID in the
    :attr:`input`.

    Args:
        input(Variable|list of Variable): Input is a Tensor<int64> Variable, which
            contains the IDs information.
        size(int): The embedding size parameter, which indicates the size of
            each embedding vector respectively.
        table_id(int): the fleet table id of this embedding.
        accessor_class(str): the pslib accessor of the table, default is DownpourCtrAccessor.
        ctr_label_name(str): the layer name of click.
        padding_id(int): the padding id during lookup, default is 0.
        dtype(str): The dtype refers to the data type of output tensor. Only supports
            float32 now.
        scale_sparse_grad(bool): whether to scale sparse gradient with batch size. default
            is True.

    Returns:
        Variable|list of Variable: The tensor variable storing the embeddings of the \
                  supplied inputs.

    Examples:
        .. code-block:: python

          import paddle.fluid as fluid
          data = fluid.layers.data(name='sequence', shape=[1], dtype='int64', lod_level=1)
          emb = fluid.layers.nn._pull_sparse(
              input=data, size=11, table_id=0, accessor_class="DownpourCtrAccessor")
    """
    helper = LayerHelper(name, **locals())
    inputs = helper.multiple_input()
    outs = [helper.create_variable_for_type_inference(dtype)]
    input_names = [i.name for i in inputs]
    attrs = {
        'EmbeddingDim': size,
        'TableId': table_id,
        'AccessorClass': accessor_class,
        'CtrLabelName': ctr_label_name,
        'PaddingId': padding_id,
        'ScaleSparseGrad': scale_sparse_grad,
        'InputNames': input_names,
        # this is only for compatible with embedding op
        'is_distributed': True
    }
    # this is only for compatible with embedding op
    w, _ = helper.create_or_get_global_variable(
        name=name, shape=[size], dtype=dtype, is_bias=False, persistable=True)
    helper.append_op(
        type='pull_sparse',
        inputs={'Ids': inputs,
                'W': w},
        outputs={'Out': outs},
        attrs=attrs)
    if len(outs) == 1:
        return outs[0]
    return outs


def _pull_sparse_v2(input,
                    size,
                    table_id,
                    accessor_class,
                    name="embedding",
                    ctr_label_name="",
                    padding_id=0,
                    dtype='float32',
                    scale_sparse_grad=True):
606
    r"""
607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667
    **Pull Fleet Sparse Layer**

    This layer is used to lookup embeddings of IDs, provided by :attr:`input`, in
    Fleet lookup table. The result of this lookup is the embedding of each ID in the
    :attr:`input`.

    Args:
        input(Variable|list of Variable): Input is a Tensor<int64> Variable, which
            contains the IDs information.
        size(int): The embedding size parameter, which indicates the size of
            each embedding vector respectively.
        table_id(int): the pslib table id of this embedding.
        accessor_class(str): the fleet accessor of the table, default is DownpourCtrAccessor.
        ctr_label_name(str): the layer name of click.
        padding_id(int): the padding id during lookup, default is 0.
        dtype(str): The dtype refers to the data type of output tensor. Only supports
            float32 now.
        scale_sparse_grad(bool): whether to scale sparse gradient with batch size. default
            is True.

    Returns:
        Variable|list of Variable: The tensor variable storing the embeddings of the \
                  supplied inputs.

    Examples:
        .. code-block:: python

          import paddle.fluid as fluid
          data = fluid.layers.data(name='sequence', shape=[1], dtype='int64', lod_level=1)
          emb = fluid.layers.nn._pull_sparse_v2(
              input=data, size=11, table_id=0, accessor_class="DownpourCtrAccessor")
    """
    helper = LayerHelper(name, **locals())
    inputs = helper.multiple_input()
    outs = [helper.create_variable_for_type_inference(dtype)]
    input_names = [i.name for i in inputs]
    attrs = {
        'EmbeddingDim': size,
        'TableId': table_id,
        'AccessorClass': accessor_class,
        'CtrLabelName': ctr_label_name,
        'PaddingId': padding_id,
        'ScaleSparseGrad': scale_sparse_grad,
        'InputNames': input_names,
        # this is only for compatible with embedding op
        'is_distributed': True
    }
    # this is only for compatible with embedding op
    w, _ = helper.create_or_get_global_variable(
        name=name, shape=[size], dtype=dtype, is_bias=False, persistable=True)
    helper.append_op(
        type='pull_sparse_v2',
        inputs={'Ids': inputs,
                'W': w},
        outputs={'Out': outs},
        attrs=attrs)
    if len(outs) == 1:
        return outs[0]
    return outs


T
Thunderbrook 已提交
668 669 670 671 672
def _pull_box_sparse(input,
                     size,
                     dtype='float32',
                     is_distributed=False,
                     is_sparse=False):
673
    r"""
H
hutuxian 已提交
674 675 676 677 678 679 680
    **Pull Box Sparse Layer**

    This layer is used to lookup embeddings of IDs, provided by :attr:`input`, in
    BoxPS lookup table. The result of this lookup is the embedding of each ID in the
    :attr:`input`.

    Args:
681
        input(Variable|list of Variable): Input is a Tensor<int64> Variable, which
H
hutuxian 已提交
682
            contains the IDs information.
683
        size(int): The embedding size parameter, which indicates the size of
H
hutuxian 已提交
684
            each embedding vector respectively.
685
        dtype(str): The dtype refers to the data type of output tensor. Only supports
H
hutuxian 已提交
686 687 688 689 690 691 692 693 694 695 696
	    float32 now.

    Returns:
        Variable|list of Variable: The tensor variable storing the embeddings of the \
                  supplied inputs.

    Examples:
        .. code-block:: python

          import paddle.fluid as fluid
          data = fluid.layers.data(name='sequence', shape=[1], dtype='int64', lod_level=1)
697
          emb = fluid.layers.pull_box_sparse(input=data, size=[11])
H
hutuxian 已提交
698 699 700 701 702 703 704 705 706 707 708 709
    """
    helper = LayerHelper('pull_box_sparse', **locals())
    if dtype != 'float32':
        raise ValueError(
            "BoxPS only support float type embedding now, and your type is: " +
            dtype)
    helper.input_dtype()
    inputs = helper.multiple_input()
    outs = [
        helper.create_variable_for_type_inference(dtype)
        for i in range(len(inputs))
    ]
T
Thunderbrook 已提交
710 711
    w = helper.create_parameter(
        attr=helper.param_attr, shape=[size], dtype=dtype, is_bias=False)
H
hutuxian 已提交
712 713
    helper.append_op(
        type='pull_box_sparse',
T
Thunderbrook 已提交
714 715
        inputs={'Ids': inputs,
                'W': w},
H
hutuxian 已提交
716
        outputs={'Out': outs},
T
Thunderbrook 已提交
717 718 719 720 721
        attrs={
            'size': size,
            'is_distributed': is_distributed,
            'is_sparse': is_sparse
        })
H
hutuxian 已提交
722 723 724 725 726
    if len(outs) == 1:
        return outs[0]
    return outs


Y
yuyang18 已提交
727
@templatedoc()
728
def linear_chain_crf(input, label, param_attr=None, length=None):
Y
yuyang18 已提交
729
    """
730 731
    :api_attr: Static Graph

Y
yuyang18 已提交
732 733 734 735 736
    Linear Chain CRF.

    ${comment}

    Args:
737
        input(${emission_type}): ${emission_comment}
Y
yuyang18 已提交
738
        label(${label_type}): ${label_comment}
739
        Length(${length_type}): ${length_comment}
740
        param_attr(ParamAttr): The attribute of the learnable parameter for transition parameter.
Y
yuyang18 已提交
741 742

    Returns:
D
dzhwinter 已提交
743 744
        output(${emission_exps_type}): ${emission_exps_comment} \n
        output(${transition_exps_type}): ${transition_exps_comment} \n
745
        output(${log_likelihood_type}): ${log_likelihood_comment} \n
Y
yuyang18 已提交
746

J
JesseyXujin 已提交
747 748 749
    Examples:
        .. code-block:: python

750 751
            import paddle.fluid as fluid
            import numpy as np
752 753
            import paddle
            paddle.enable_static()
754 755 756 757 758

            #define net structure, using LodTensor
            train_program = fluid.Program()
            startup_program = fluid.Program()
            with fluid.program_guard(train_program, startup_program):
759 760
                input_data = fluid.data(name='input_data', shape=[-1,10], dtype='float32')
                label = fluid.data(name='label', shape=[-1,1], dtype='int')
761 762 763 764 765 766
                emission= fluid.layers.fc(input=input_data, size=10, act="tanh")
                crf_cost = fluid.layers.linear_chain_crf(
                    input=emission,
                    label=label,
                    param_attr=fluid.ParamAttr(
                    name='crfw',
767
                    learning_rate=0.01))
768 769 770
            use_cuda = False
            place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
            exe = fluid.Executor(place)
771
            exe.run(startup_program)
772 773 774 775 776
            #define data, using LoDTensor
            a = fluid.create_lod_tensor(np.random.rand(12,10).astype('float32'), [[3,3,4,2]], place)
            b = fluid.create_lod_tensor(np.array([[1],[1],[2],[3],[1],[1],[1],[3],[1],[1],[1],[1]]),[[3,3,4,2]] , place)
            feed1 = {'input_data':a,'label':b}
            loss= exe.run(train_program,feed=feed1, fetch_list=[crf_cost])
777
            print(loss)
778 779 780 781 782

            #define net structure, using padding
            train_program = fluid.Program()
            startup_program = fluid.Program()
            with fluid.program_guard(train_program, startup_program):
783 784 785
                input_data2 = fluid.data(name='input_data2', shape=[-1,10,10], dtype='float32')
                label2 = fluid.data(name='label2', shape=[-1,10,1], dtype='int')
                label_length = fluid.data(name='length', shape=[-1,1], dtype='int')
786 787 788 789 790 791
                emission2= fluid.layers.fc(input=input_data2, size=10, act="tanh", num_flatten_dims=2)
                crf_cost2 = fluid.layers.linear_chain_crf(
                    input=emission2,
                    label=label2,
                    length=label_length,
                    param_attr=fluid.ParamAttr(
J
JesseyXujin 已提交
792
                     name='crfw',
793 794 795 796 797 798
                     learning_rate=0.01))

            use_cuda = False
            place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
            exe = fluid.Executor(place)
            exe.run(startup_program)
J
JesseyXujin 已提交
799

800 801 802
            #define data, using padding
            cc=np.random.rand(4,10,10).astype('float32')
            dd=np.random.rand(4,10,1).astype('int64')
803
            ll=np.array([[3],[3],[4],[2]])
804 805
            feed2 = {'input_data2':cc,'label2':dd,'length':ll}
            loss2= exe.run(train_program,feed=feed2, fetch_list=[crf_cost2])
806
            print(loss2)
807 808 809 810 811
            #[array([[ 7.8902354],
            #        [ 7.3602567],
            #        [ 10.004011],
            #        [ 5.86721  ]], dtype=float32)]

812 813 814
            #you can use find_var to get transition parameter.
            transition=np.array(fluid.global_scope().find_var('crfw').get_tensor())
            print(transition)
815

Y
yuyang18 已提交
816
    """
817 818 819
    check_variable_and_dtype(input, 'input', ['float32', 'float64'],
                             'linear_chain_crf')
    check_variable_and_dtype(label, 'label', ['int64'], 'linear_chain_crf')
Y
Yu Yang 已提交
820
    helper = LayerHelper('linear_chain_crf', **locals())
821
    size = input.shape[2] if length else input.shape[1]
Y
Yu Yang 已提交
822 823 824 825
    transition = helper.create_parameter(
        attr=helper.param_attr,
        shape=[size + 2, size],
        dtype=helper.input_dtype())
X
Xin Pan 已提交
826 827 828 829 830 831 832 833
    alpha = helper.create_variable_for_type_inference(
        dtype=helper.input_dtype())
    emission_exps = helper.create_variable_for_type_inference(
        dtype=helper.input_dtype())
    transition_exps = helper.create_variable_for_type_inference(
        dtype=helper.input_dtype())
    log_likelihood = helper.create_variable_for_type_inference(
        dtype=helper.input_dtype())
834 835 836 837 838 839
    this_inputs = {
        "Emission": [input],
        "Transition": transition,
        "Label": [label]
    }
    if length:
840
        this_inputs['Length'] = [length]
Y
Yu Yang 已提交
841 842
    helper.append_op(
        type='linear_chain_crf',
843
        inputs=this_inputs,
Y
Yu Yang 已提交
844 845 846 847 848 849 850 851 852 853
        outputs={
            "Alpha": [alpha],
            "EmissionExps": [emission_exps],
            "TransitionExps": transition_exps,
            "LogLikelihood": log_likelihood
        })

    return log_likelihood


W
wopeizl 已提交
854
@templatedoc()
855
def crf_decoding(input, param_attr, label=None, length=None):
W
wopeizl 已提交
856
    """
857
    :api_attr: Static Graph
858

W
wopeizl 已提交
859
    ${comment}
Y
yi.wu 已提交
860

W
wopeizl 已提交
861
    Args:
862
        input(Tensor): ${emission_comment}
Y
yi.wu 已提交
863

864 865
        param_attr (ParamAttr|None): To specify the weight parameter attribute.
            Default: None, which means the default weight parameter property is
866
            used. See usage for details in :ref:`api_paddle_fluid_param_attr_ParamAttr` .
Y
yuyang18 已提交
867

Y
Yibing Liu 已提交
868
        label(${label_type}, optional): ${label_comment}
869

Y
Yibing Liu 已提交
870
        length(${length_type}, optional): ${length_comment}
871

W
wopeizl 已提交
872
    Returns:
873
        Tensor: ${viterbi_path_comment}
Y
yi.wu 已提交
874

W
wopeizl 已提交
875 876
    Examples:
        .. code-block:: python
Y
yi.wu 已提交
877

878 879
           import paddle
           paddle.enable_static()
880 881 882

           # LoDTensor-based example
           num_labels = 10
883 884 885
           feature = paddle.static.data(name='word_emb', shape=[-1, 784], dtype='float32', lod_level=1)
           label = paddle.static.data(name='label', shape=[-1, 1], dtype='int64', lod_level=1)
           emission = paddle.static.nn.fc(feature, size=num_labels)
886

887 888 889 890
           crf_cost = paddle.fluid.layers.linear_chain_crf(input=emission, label=label,
                     param_attr=paddle.ParamAttr(name="crfw"))
           crf_decode = paddle.static.nn.crf_decoding(input=emission,
                     param_attr=paddle.ParamAttr(name="crfw"))
891 892 893

           # Common tensor example
           num_labels, max_len = 10, 20
894 895 896 897
           feature = paddle.static.data(name='word_emb_pad', shape=[-1, max_len, 784], dtype='float32')
           label = paddle.static.data(name='label_pad', shape=[-1, max_len, 1], dtype='int64')
           length = paddle.static.data(name='length', shape=[-1, 1], dtype='int64')
           emission = paddle.static.nn.fc(feature, size=num_labels,
898
                                      num_flatten_dims=2)
899

900 901 902 903
           crf_cost = paddle.fluid.layers.linear_chain_crf(input=emission, label=label, length=length,
                     param_attr=paddle.ParamAttr(name="crfw_pad"))
           crf_decode = paddle.static.nn.crf_decoding(input=emission, length=length,
                     param_attr=paddle.ParamAttr(name="crfw_pad"))
W
wopeizl 已提交
904
    """
905 906
    check_variable_and_dtype(input, 'input', ['float32', 'float64'],
                             'crf_decoding')
W
wopeizl 已提交
907 908 909
    helper = LayerHelper('crf_decoding', **locals())
    transition = helper.get_parameter(param_attr.name)
    viterbi_path = helper.create_variable_for_type_inference(
910
        dtype=core.VarDesc.VarType.INT64)
911 912 913
    inputs = {"Emission": [input], "Transition": transition, "Label": label}
    if length:
        inputs['Length'] = length
W
wopeizl 已提交
914 915
    helper.append_op(
        type='crf_decoding',
916
        inputs=inputs,
W
wopeizl 已提交
917
        outputs={"ViterbiPath": [viterbi_path]})
Y
Yu Yang 已提交
918

W
wopeizl 已提交
919
    return viterbi_path
Y
Yu Yang 已提交
920 921


Y
yi.wu 已提交
922
@templatedoc()
F
fengjiayi 已提交
923
def cos_sim(X, Y):
Y
Yu Yang 已提交
924
    """
Y
yi.wu 已提交
925 926 927
    ${comment}

    Args:
N
Noel 已提交
928 929
        X (Tensor): ${x_comment}.
        Y (Tensor): ${y_comment}.
F
fengjiayi 已提交
930

Y
yi.wu 已提交
931
    Returns:
N
Noel 已提交
932
        A Tensor representing the output of cosine(X, Y).
L
lvmengsi 已提交
933 934 935 936

    Examples:
        .. code-block:: python

N
Noel 已提交
937 938 939 940 941 942 943
            import paddle

            x = paddle.rand(shape=[3, 7], dtype='float32')
            y = paddle.rand(shape=[1, 7], dtype='float32')
            out = paddle.fluid.layers.cos_sim(x, y)
            print(out)

Y
Yu Yang 已提交
944
    """
945 946
    check_variable_and_dtype(X, 'X', ['float32'], 'cos_sim')
    check_variable_and_dtype(Y, 'Y', ['float32'], 'cos_sim')
F
fengjiayi 已提交
947
    helper = LayerHelper('cos_sim', **locals())
X
Xin Pan 已提交
948 949 950
    out = helper.create_variable_for_type_inference(dtype=X.dtype)
    xnorm = helper.create_variable_for_type_inference(dtype=X.dtype)
    ynorm = helper.create_variable_for_type_inference(dtype=X.dtype)
Y
Yu Yang 已提交
951 952 953 954 955 956 957 958 959 960
    helper.append_op(
        type='cos_sim',
        inputs={'X': [X],
                'Y': [Y]},
        outputs={'Out': [out],
                 'XNorm': [xnorm],
                 'YNorm': [ynorm]})
    return out


961
@deprecated(since="2.0.0", update_to="paddle.nn.functional.dropout")
P
phlrain 已提交
962 963
def dropout(x,
            dropout_prob,
964
            is_test=None,
P
phlrain 已提交
965 966
            seed=None,
            name=None,
P
phlrain 已提交
967
            dropout_implementation="downgrade_in_infer"):
968
    """
969

970 971 972 973
    Computes dropout.

    Drop or keep each element of `x` independently. Dropout is a regularization
    technique for reducing overfitting by preventing neuron co-adaption during
974
    training. The dropout operator randomly sets (according to the given dropout
975 976 977
    probability) the outputs of some units to zero, while others are remain
    unchanged.

H
haowang101779990 已提交
978 979
    dropout op can be removed from the program to make the program more efficient.

980
    Args:
L
lvmengsi 已提交
981
        x (Variable): The input tensor variable. The data type is float16 or float32 or float64.
982
        dropout_prob (float): Probability of setting units to zero.
983 984
        is_test (bool): A flag indicating whether it is in test phrase or not. 
                        Default None, in dynamic graph, it use global tracer mode; in static graph, it means False.
985 986 987
        seed (int): A Python integer used to create random seeds. If this
                    parameter is set to None, a random seed is used.
                    NOTE: If an integer seed is given, always the same output
L
lvmengsi 已提交
988
                    units will be dropped. DO NOT use a fixed seed in training.Default: None.
989 990
        name (str|None): A name for this layer(optional). If set None, the layer
                         will be named automatically.
H
haowang101779990 已提交
991 992
        dropout_implementation(string): ['downgrade_in_infer'(default)|'upscale_in_train']

P
phlrain 已提交
993
                                        1. downgrade_in_infer(default), downgrade the outcome at inference
H
haowang101779990 已提交
994 995

                                           - train: out = input * mask
C
ceci3 已提交
996
                                           - inference: out = input * (1.0 - dropout_prob)
H
haowang101779990 已提交
997 998 999

                                           (mask is a tensor same shape with input, value is 0 or 1
                                           ratio of 0 is dropout_prob)
P
phlrain 已提交
1000
                                        2. upscale_in_train, upscale the outcome at training time
1001

H
haowang101779990 已提交
1002 1003
                                           - train: out = input * mask / ( 1.0 - dropout_prob )
                                           - inference: out = input
P
phlrain 已提交
1004

H
haowang101779990 已提交
1005 1006
                                           (mask is a tensor same shape with input, value is 0 or 1
                                           ratio of 0 is dropout_prob)
1007

M
minqiyang 已提交
1008

1009
    Returns:
L
lvmengsi 已提交
1010
        A Variable holding Tensor representing the dropout, has same shape and data type with `x`.
1011 1012

    Examples:
1013

1014 1015
        .. code-block:: python

1016
            import paddle
1017
            import paddle.fluid as fluid
1018 1019
            
            paddle.enable_static()
L
lvmengsi 已提交
1020
            x = fluid.data(name="data", shape=[None, 32, 32], dtype="float32")
T
tianshuo78520a 已提交
1021
            dropped = fluid.layers.dropout(x, dropout_prob=0.5)
1022
    """
1023 1024 1025
    # fast return for p == 0
    if dropout_prob == 0:
        return x
1026

1027
    if in_dygraph_mode():
1028 1029 1030
        if (seed is None or
                seed == 0) and default_main_program().random_seed != 0:
            seed = default_main_program().random_seed
1031 1032
        if is_test is None:
            is_test = not _dygraph_tracer()._train_mode
W
wanghuancoder 已提交
1033
        out, mask = _C_ops.dropout(
1034
            x, 'dropout_prob', dropout_prob, 'is_test', is_test, 'fix_seed',
1035 1036
            seed is not None, 'seed', seed if seed is not None else 0,
            'dropout_implementation', dropout_implementation)
1037
        return out
1038

W
wanghuancoder 已提交
1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050
    def get_attrs(prog, dropout_prob, is_test, seed):
        if (seed is None or seed == 0) and prog.random_seed != 0:
            seed = prog.random_seed
        attrs = {
            'dropout_prob': dropout_prob,
            'is_test': is_test,
            'fix_seed': seed is not None,
            'seed': seed if seed is not None else 0,
            'dropout_implementation': dropout_implementation,
        }
        return attrs

F
fengjiayi 已提交
1051
    helper = LayerHelper('dropout', **locals())
1052 1053
    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
                             'dropout')
1054

X
Xin Pan 已提交
1055 1056
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
    mask = helper.create_variable_for_type_inference(
Z
Zeng Jinle 已提交
1057
        dtype=core.VarDesc.VarType.UINT8, stop_gradient=True)
C
chengduo 已提交
1058

1059
    attrs = get_attrs(helper.main_program, dropout_prob, is_test, seed)
C
chengduo 已提交
1060

1061 1062 1063 1064 1065
    helper.append_op(
        type='dropout',
        inputs={'X': [x]},
        outputs={'Out': [out],
                 'Mask': [mask]},
1066
        attrs=attrs)
1067 1068 1069
    return out


Y
yi.wu 已提交
1070
@templatedoc()
Y
Yu Yang 已提交
1071 1072 1073 1074
def chunk_eval(input,
               label,
               chunk_scheme,
               num_chunk_types,
1075 1076
               excluded_chunk_types=None,
               seq_length=None):
1077
    r"""
G
Guo Sheng 已提交
1078 1079
    This operator computes the precision, recall and F1-score for chunk detection.
    It is often used in sequence tagging tasks, such as Named Entity Recognition(NER).
Y
yi.wu 已提交
1080

M
minqiyang 已提交
1081
    For some basics of chunking, please refer to
H
haowang101779990 已提交
1082
    `Chunking with Support Vector Machines <https://aclanthology.info/pdf/N/N01/N01-1025.pdf>`_ .
Y
yi.wu 已提交
1083

G
Guo Sheng 已提交
1084 1085
    This operator supports IOB, IOE, IOBES and IO (also known as plain) tagging schemes.
    Here is a NER example for the usage of these tagging schemes:
Y
yi.wu 已提交
1086 1087

    .. code-block:: python
1088

Y
yi.wu 已提交
1089 1090 1091 1092 1093 1094 1095 1096 1097 1098
       ====== ====== ======  =====  ==  ============   =====  ===== =====  ==  =========
              Li     Ming    works  at  Agricultural   Bank   of    China  in  Beijing.
       ====== ====== ======  =====  ==  ============   =====  ===== =====  ==  =========
       IO     I-PER  I-PER   O      O   I-ORG          I-ORG  I-ORG I-ORG  O   I-LOC
       IOB    B-PER  I-PER   O      O   B-ORG          I-ORG  I-ORG I-ORG  O   B-LOC
       IOE    I-PER  E-PER   O      O   I-ORG          I-ORG  I-ORG E-ORG  O   E-LOC
       IOBES  B-PER  E-PER   O      O   I-ORG          I-ORG  I-ORG E-ORG  O   S-LOC
       ====== ====== ======  =====  ==  ============   =====  ===== =====  ==  =========

    There are three chunk types(named entity types) including PER(person), ORG(organization)
G
Guo Sheng 已提交
1099
    and LOC(location), and we can see that the labels have the form `<tag type>-<chunk type>` .
Y
yi.wu 已提交
1100

G
Guo Sheng 已提交
1101 1102 1103
    Since the implementation of this operator actually uses label ids rather than
    label strings, to make it work, there should be a way to map label ids to
    tag types and chunk types. This operator uses the following way to do mapping:
Y
yi.wu 已提交
1104 1105 1106 1107 1108 1109 1110 1111 1112 1113

    .. code-block:: python

       tag_type = label % num_tag_type
       chunk_type = label / num_tag_type

    where `num_tag_type` is the num of tag types in the tagging scheme, `num_chunk_type`
    is the num of chunk types, and `tag_type` get its value from the following table.

    .. code-block:: python
1114

Y
yi.wu 已提交
1115 1116 1117 1118 1119 1120
       Scheme Begin Inside End   Single
        plain   0     -      -     -
        IOB     0     1      -     -
        IOE     -     0      1     -
        IOBES   0     1      2     3

G
Guo Sheng 已提交
1121 1122
    Accordingly, in the above NER example, if the tagging scheme is IOB and chunk
    types are ORG, PER and LOC, then the label ids would be as follows:
Y
yi.wu 已提交
1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133

    .. code-block:: python

       B-ORG  0
       I-ORG  1
       B-PER  2
       I-PER  3
       B-LOC  4
       I-LOC  5
       O      6

G
Guo Sheng 已提交
1134 1135
    With which we can map each label id to the corresponding tag type and chunk
    type correctly.
Y
yi.wu 已提交
1136

Y
yi.wu 已提交
1137
    Args:
N
Noel 已提交
1138 1139 1140 1141 1142
        input (Tensor): A Tensor representing the predicted labels
            from the network. Its shape would be `[N, M, 1]`,
            where `N` stands for batch size, `M` for sequence length. 
            The data type should be int64.
        label (Tensor): A Tensor representing the ground-truth labels.
T
tianshuo78520a 已提交
1143
            It should have the same shape, lod and data type as ``input`` .
G
Guo Sheng 已提交
1144 1145 1146 1147 1148 1149
        chunk_scheme (str): Indicate the tagging schemes used here. The value must
            be IOB, IOE, IOBES or plain.
        num_chunk_types (int): The number of chunk types.
        excluded_chunk_types (list, optional): Indicate the chunk types shouldn't
            be taken into account. It should be a list of chunk type ids(integer).
            Default None.
N
Noel 已提交
1150 1151
        seq_length(Tensor, optional): A 1D Tensor containing the length of each
            sequence when ``input`` and ``label`` are Tensor. Default None.
F
fengjiayi 已提交
1152

Y
yi.wu 已提交
1153
    Returns:
G
Guo Sheng 已提交
1154 1155 1156 1157
        tuple: A tuple including precision, recall, F1-score, chunk number detected, \
            chunk number in ground-truth, chunk number correctly detected. Each \
            is a Tensor with shape `[1]`. The data type of precision, recall and \
            F1-score all is float32, and the others' data type all is int64.
1158

Y
yi.wu 已提交
1159 1160 1161
    Examples:
        .. code-block:: python

1162 1163 1164 1165
            import paddle.fluid as fluid

            dict_size = 10000
            label_dict_len = 7
G
Guo Sheng 已提交
1166
            sequence = fluid.data(
1167
                name='id', shape=[None, 1], lod_level=1, dtype='int64')
G
Guo Sheng 已提交
1168
            embedding = fluid.embedding(
1169 1170
                input=sequence, size=[dict_size, 512])
            hidden = fluid.layers.fc(input=embedding, size=512)
1171 1172
            label = fluid.data(
                name='label', shape=[None, 1], lod_level=1, dtype='int64')
Y
yi.wu 已提交
1173
            crf = fluid.layers.linear_chain_crf(
1174
                input=hidden, label=label, param_attr=fluid.ParamAttr(name="crfw"))
Y
yi.wu 已提交
1175
            crf_decode = fluid.layers.crf_decoding(
1176
                input=hidden, param_attr=fluid.ParamAttr(name="crfw"))
Y
yi.wu 已提交
1177 1178 1179 1180
            fluid.layers.chunk_eval(
                input=crf_decode,
                label=label,
                chunk_scheme="IOB",
1181
                num_chunk_types=int((label_dict_len - 1) / 2))
Y
Yu Yang 已提交
1182
    """
F
fengjiayi 已提交
1183
    helper = LayerHelper("chunk_eval", **locals())
Y
Yu Yang 已提交
1184

1185 1186 1187
    check_variable_and_dtype(input, 'input', ['int64'], 'chunk_eval')
    check_variable_and_dtype(label, 'label', ['int64'], 'chunk_eval')

Y
Yu Yang 已提交
1188
    # prepare output
X
Xin Pan 已提交
1189 1190 1191 1192 1193 1194 1195
    precision = helper.create_variable_for_type_inference(dtype="float32")
    recall = helper.create_variable_for_type_inference(dtype="float32")
    f1_score = helper.create_variable_for_type_inference(dtype="float32")
    num_infer_chunks = helper.create_variable_for_type_inference(dtype="int64")
    num_label_chunks = helper.create_variable_for_type_inference(dtype="int64")
    num_correct_chunks = helper.create_variable_for_type_inference(
        dtype="int64")
Y
Yu Yang 已提交
1196

1197 1198
    this_input = {"Inference": [input], "Label": [label]}

1199
    if seq_length is not None:
1200 1201
        this_input["SeqLength"] = [seq_length]

Y
Yu Yang 已提交
1202 1203
    helper.append_op(
        type="chunk_eval",
1204
        inputs=this_input,
Y
Yu Yang 已提交
1205 1206 1207
        outputs={
            "Precision": [precision],
            "Recall": [recall],
1208 1209 1210 1211
            "F1-Score": [f1_score],
            "NumInferChunks": [num_infer_chunks],
            "NumLabelChunks": [num_label_chunks],
            "NumCorrectChunks": [num_correct_chunks]
Y
Yu Yang 已提交
1212 1213 1214
        },
        attrs={
            "num_chunk_types": num_chunk_types,
G
guosheng 已提交
1215 1216
            "chunk_scheme": chunk_scheme,
            "excluded_chunk_types": excluded_chunk_types or []
Y
Yu Yang 已提交
1217
        })
1218 1219
    return (precision, recall, f1_score, num_infer_chunks, num_label_chunks,
            num_correct_chunks)
Y
Yu Yang 已提交
1220 1221


1222
@deprecated(since="2.0.0", update_to="paddle.nn.functional.softmax")
1223
def softmax(input, use_cudnn=True, name=None, axis=-1):
1224
    r"""
1225
    This operator implements the softmax layer. The calculation process is as follows:
1226

1227
    1. The dimension :attr:`axis` of the ``input`` will be permuted to the last.
1228

1229 1230 1231 1232 1233 1234 1235
    2. Then the input tensor will be logically flattened to a 2-D matrix. The matrix's
    second dimension(row length) is the same as the dimension :attr:`axis` of the input
    tensor, and the first dimension(column length) is the product of all other
    dimensions of the input tensor. For each row of the matrix, the softmax operator
    squashes the K-dimensional(K is the width of the matrix, which is also the size
    of the input tensor's dimension :attr:`axis`) vector of arbitrary real values to a
    K-dimensional vector of real values in the range [0, 1] that add up to 1.
1236

1237
    3. After the softmax operation is completed, the inverse operations of steps 1 and 2
1238
    are performed to restore the two-dimensional matrix to the same dimension as the ``input``.
1239

1240 1241 1242 1243 1244
    It computes the exponential of the given dimension and the sum of exponential
    values of all the other dimensions in the K-dimensional vector input.
    Then the ratio of the exponential of the given dimension and the sum of
    exponential values of all the other dimensions is the output of the softmax
    operator.
1245

1246
    For each row :math:`i` and each column :math:`j` in the matrix, we have:
1247

1248
    .. math::
1249

N
Noel 已提交
1250
        Out[i, j] = \\frac{\\exp(X[i, j])}{\\sum_j(exp(X[i, j])}
1251

1252
    Example:
1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296

    .. code-block:: text

        Case 1:
          Input:
            X.shape = [2, 3, 4]
            X.data = [[[2.0, 3.0, 4.0, 5.0],
                       [3.0, 4.0, 5.0, 6.0],
                       [7.0, 8.0, 8.0, 9.0]],
                      [[1.0, 2.0, 3.0, 4.0],
                       [5.0, 6.0, 7.0, 8.0],
                       [6.0, 7.0, 8.0, 9.0]]]

          Attrs:
            axis = -1

          Output:
            Out.shape = [2, 3, 4]
            Out.data = [[[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
                         [0.0320586 , 0.08714432, 0.23688282, 0.64391426],
                         [0.07232949, 0.19661193, 0.19661193, 0.53444665]],
                        [[0.0320586 , 0.08714432, 0.23688282, 0.64391426],
                         [0.0320586 , 0.08714432, 0.23688282, 0.64391426],
                         [0.0320586 , 0.08714432, 0.23688282, 0.64391426]]]

        Case 2:
          Input:
            X.shape = [2, 3, 4]
            X.data = [[[2.0, 3.0, 4.0, 5.0],
                       [3.0, 4.0, 5.0, 6.0],
                       [7.0, 8.0, 8.0, 9.0]],
                      [[1.0, 2.0, 3.0, 4.0],
                       [5.0, 6.0, 7.0, 8.0],
                       [6.0, 7.0, 8.0, 9.0]]]
          Attrs:
            axis = 1

          Output:
            Out.shape = [2, 3, 4]
            Out.data = [[[0.00657326, 0.00657326, 0.01714783, 0.01714783],
                         [0.01786798, 0.01786798, 0.04661262, 0.04661262],
                         [0.97555875, 0.97555875, 0.93623955, 0.93623955]],
                        [[0.00490169, 0.00490169, 0.00490169, 0.00490169],
                         [0.26762315, 0.26762315, 0.26762315, 0.26762315],
1297
                         [0.72747516, 0.72747516, 0.72747516, 0.72747516]]]
1298

Q
qiaolongfei 已提交
1299
    Args:
N
Noel 已提交
1300
        input (Tensor): The input tensor. A multi-dimension ``Tensor`` with type float32 or float64.
1301
        use_cudnn (bool, optional): Use cudnn kernel or not, it is valid only when the cudnn \
G
GaoWei8 已提交
1302
            library is installed. To improve performance, set use_cudnn to True by default.
1303
        name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . Default: None.
C
chengduo 已提交
1304
            will be named automatically. Default: None.
1305
        axis (int, optional): The index of dimension to perform softmax calculations, it should
D
dengkaipeng 已提交
1306
            be in range :math:`[-1, rank - 1]`, while :math:`rank` is the rank of
N
Noel 已提交
1307
            input tensor. Default: -1. -1 means the last dimension.
Q
qiaolongfei 已提交
1308 1309

    Returns:
N
Noel 已提交
1310
        Tensor: ``Tensor`` indicates the output of softmax. The data type and shape are the same as ``input`` .
Q
qiaolongfei 已提交
1311 1312 1313 1314 1315

    Examples:

        .. code-block:: python

N
Noel 已提交
1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332
            import paddle
            import paddle.nn.functional as F

            x = paddle.to_tensor([[[2.0, 3.0, 4.0, 5.0],
                                [3.0, 4.0, 5.0, 6.0],
                                [7.0, 8.0, 8.0, 9.0]],
                                [[1.0, 2.0, 3.0, 4.0],
                                [5.0, 6.0, 7.0, 8.0],
                                [6.0, 7.0, 8.0, 9.0]]], dtype='float32')
            y = F.softmax(x, axis=1)
            print(y)
            # [[[0.00657326, 0.00657326, 0.01714783, 0.01714783],
            #   [0.01786798, 0.01786798, 0.04661262, 0.04661262],
            #   [0.97555870, 0.97555870, 0.93623954, 0.93623954]],
            #  [[0.00490169, 0.00490169, 0.00490169, 0.00490169],
            #   [0.26762316, 0.26762316, 0.26762316, 0.26762316],
            #   [0.72747517, 0.72747517, 0.72747517, 0.72747517]]]
Q
qiaolongfei 已提交
1333 1334

    """
1335 1336

    if in_dygraph_mode():
W
wanghuancoder 已提交
1337
        return _C_ops.softmax(input, 'axis', axis, 'use_cudnn', use_cudnn)
1338 1339 1340

    inputs = {"X": [input]}
    attrs = {"axis": axis, "use_cudnn": use_cudnn}
1341

1342
    helper = LayerHelper('softmax', **locals())
1343 1344
    check_variable_and_dtype(input, 'input/x',
                             ['float16', 'float32', 'float64'], 'softmax')
1345

1346
    dtype = helper.input_dtype()
X
Xin Pan 已提交
1347
    softmax_out = helper.create_variable_for_type_inference(dtype)
1348 1349 1350 1351
    helper.append_op(
        type="softmax",
        inputs={"X": input},
        outputs={"Out": softmax_out},
1352
        attrs=attrs)
1353 1354 1355
    return softmax_out


Y
Yu Yang 已提交
1356 1357 1358
def conv2d(input,
           num_filters,
           filter_size,
C
chengduoZH 已提交
1359 1360
           stride=1,
           padding=0,
1361
           dilation=1,
Y
Yu Yang 已提交
1362 1363 1364
           groups=None,
           param_attr=None,
           bias_attr=None,
C
chengduoZH 已提交
1365
           use_cudnn=True,
1366
           act=None,
L
liym27 已提交
1367 1368
           name=None,
           data_format="NCHW"):
1369
    r"""
1370 1371
    :api_attr: Static Graph

C
chengduoZH 已提交
1372
    The convolution2D layer calculates the output based on the input, filter
T
tensor-tang 已提交
1373
    and strides, paddings, dilations, groups parameters. Input and
L
liym27 已提交
1374
    Output are in NCHW or NHWC format, where N is batch size, C is the number of
1375
    channels, H is the height of the feature, and W is the width of the feature.
T
tensor-tang 已提交
1376 1377 1378 1379 1380 1381
    Filter is in MCHW format, where M is the number of output image channels,
    C is the number of input image channels, H is the height of the filter,
    and W is the width of the filter. If the groups is greater than 1,
    C will equal the number of input image channels divided by the groups.
    Please refer to UFLDL's `convolution
    <http://ufldl.stanford.edu/tutorial/supervised/FeatureExtractionUsingConvolution/>`_
1382
    for more details.
1383 1384 1385
    If bias attribution and activation type are provided, bias is added to the
    output of the convolution, and the corresponding activation function is
    applied to the final result.
C
chengduoZH 已提交
1386

1387
    For each input :math:`X`, the equation is:
C
refine  
chengduoZH 已提交
1388

C
chengduoZH 已提交
1389 1390
    .. math::

C
refine  
chengduoZH 已提交
1391
        Out = \sigma (W \\ast X + b)
C
chengduoZH 已提交
1392

T
tensor-tang 已提交
1393
    Where:
C
chengduoZH 已提交
1394

L
liym27 已提交
1395
    * :math:`X`: Input value, a tensor with NCHW or NHWC format.
1396 1397 1398 1399
    * :math:`W`: Filter value, a tensor with MCHW format.
    * :math:`\\ast`: Convolution operation.
    * :math:`b`: Bias value, a 2-D tensor with shape [M, 1].
    * :math:`\\sigma`: Activation function.
T
tensor-tang 已提交
1400
    * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
C
chengduoZH 已提交
1401 1402 1403

    Example:

1404 1405
        - Input:

W
weixing02 已提交
1406
          Input shape: :math:`(N, C_{in}, H_{in}, W_{in})`
C
refine  
chengduoZH 已提交
1407

W
weixing02 已提交
1408
          Filter shape: :math:`(C_{out}, C_{in}, H_f, W_f)`
C
refine  
chengduoZH 已提交
1409

1410
        - Output:
T
tensor-tang 已提交
1411

W
weixing02 已提交
1412
          Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`
C
refine  
chengduoZH 已提交
1413

C
chengduoZH 已提交
1414
        Where
1415 1416

        .. math::
C
chengduoZH 已提交
1417

W
weixing02 已提交
1418 1419
            H_{out}&= \\frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 \\\\
            W_{out}&= \\frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1
C
chengduoZH 已提交
1420 1421

    Args:
1422
        input (Tensor): The input is 4-D Tensor with shape [N, C, H, W], the data type
L
lvmengsi 已提交
1423
            of input is float16 or float32 or float64.
T
tensor-tang 已提交
1424
        num_filters(int): The number of filter. It is as same as the output
1425
            image channel.
1426 1427
        filter_size (int|tuple): The filter size. If filter_size
            is a tuple, it must contain two integers, (filter_size_height,
L
lvmengsi 已提交
1428 1429
            filter_size_width). Otherwise, filter_size_height = filter_size_width =\
            filter_size.
1430 1431
        stride (int|tuple): The stride size. It means the stride in convolution.
            If stride is a tuple, it must contain two integers, (stride_height, stride_width).
L
lvmengsi 已提交
1432 1433
            Otherwise, stride_height = stride_width = stride. Default: stride = 1.
        padding (string|int|list|tuple): The padding size. It means the number of zero-paddings
T
tianshuo78520a 已提交
1434
            on both sides for each dimension.If `padding` is a string, either 'VALID' or
L
liym27 已提交
1435 1436
            'SAME' which is the padding algorithm. If padding size is a tuple or list,
            it could be in three forms: `[pad_height, pad_width]` or
1437 1438
            `[pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`, and when
            `data_format` is `"NCHW"`, `padding` can be in the form `[[0,0], [0,0],
L
lvmengsi 已提交
1439
            [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
L
liym27 已提交
1440 1441 1442
            when `data_format` is `"NHWC"`, `pool_padding` can be in the form
            `[[0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
            Default: padding = 0.
L
lvmengsi 已提交
1443
        dilation (int|tuple): The dilation size. It means the spacing between the kernel
1444 1445
            points. If dilation is a tuple, it must contain two integers, (dilation_height,
            dilation_width). Otherwise, dilation_height = dilation_width = dilation.
L
lvmengsi 已提交
1446
            Default: dilation = 1.
1447 1448 1449 1450
        groups (int): The groups number of the Conv2d Layer. According to grouped
            convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
            the first half of the filters is only connected to the first half
            of the input channels, while the second half of the filters is only
C
chengduo 已提交
1451 1452 1453 1454 1455
            connected to the second half of the input channels. Default: groups=1.
        param_attr (ParamAttr|None): The parameter attribute for learnable parameters/weights
            of conv2d. If it is set to None or one attribute of ParamAttr, conv2d
            will create ParamAttr as param_attr. If the Initializer of the param_attr
            is not set, the parameter is initialized with :math:`Normal(0.0, std)`,
H
haowang101779990 已提交
1456
            and the :math:`std` is :math:`(\\frac{2.0 }{filter\_elem\_num})^{0.5}`. Default: None.
C
chengduo 已提交
1457 1458 1459 1460 1461
        bias_attr (ParamAttr|bool|None): The parameter attribute for the bias of conv2d.
            If it is set to False, no bias will be added to the output units.
            If it is set to None or one attribute of ParamAttr, conv2d
            will create ParamAttr as bias_attr. If the Initializer of the bias_attr
            is not set, the bias is initialized zero. Default: None.
1462 1463
        use_cudnn (bool): Use cudnn kernel or not, it is valid only when the cudnn
            library is installed. Default: True
C
chengduo 已提交
1464 1465
        act (str): Activation type, if it is set to None, activation is not appended.
            Default: None
1466 1467
        name(str|None): For detailed information, please refer
           to :ref:`api_guide_Name`. Usually name is no need to set and
L
lvmengsi 已提交
1468
           None by default.
1469
        data_format (str, optional): Specify the data format of the input, and the data format of the output
1470
            will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
L
liym27 已提交
1471 1472
            The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
            `[batch_size, input_channels, input_height, input_width]`.
C
chengduoZH 已提交
1473 1474

    Returns:
1475 1476 1477
        A Tensor representing the conv2d, whose data type is the
        same with input. If act is None, the tensor storing the convolution
        result, and if act is not None, the tensor storing convolution
L
lvmengsi 已提交
1478
        and non-linearity activation result.
C
refine  
chengduoZH 已提交
1479

1480 1481 1482 1483 1484
    Raises:
        ValueError: If the type of `use_cudnn` is not bool.
        ValueError: If `data_format` is not "NCHW" or "NHWC".
        ValueError: If the channel dimmention of the input is less than or equal to zero.
        ValueError: If `padding` is a string, but not "SAME" or "VALID".
1485
        ValueError: If `padding` is a tuple, but the element corresponding to the input's batch size is not 0
1486 1487 1488 1489 1490 1491 1492
            or the element corresponding to the input's channel is not 0.
        ShapeError: If the input is not 4-D Tensor.
        ShapeError: If the input's dimension size and filter's dimension size not equal.
        ShapeError: If the dimension size of input minus the size of `stride` is not 2.
        ShapeError: If the number of input channels is not equal to filter's channels * groups.
        ShapeError: If the number of output channels is not be divided by groups.

C
chengduoZH 已提交
1493 1494 1495
    Examples:
        .. code-block:: python

1496 1497 1498
          import paddle
          paddle.enable_static()
          
1499 1500 1501
          data = paddle.static.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
          conv2d = paddle.static.nn.conv2d(input=data, num_filters=2, filter_size=3, act="relu")
          print(conv2d.shape) # [-1, 2, 30, 30]
Y
Yu Yang 已提交
1502 1503
    """

1504 1505
    check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'],
                             'conv2d')
1506 1507 1508
    if len(input.shape) != 4:
        raise ValueError("Input size should be 4, "
                         "but received {}".format(len(input.shape)))
1509
    num_channels = input.shape[1]
L
liym27 已提交
1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524
    if not isinstance(use_cudnn, bool):
        raise ValueError("Attr(use_cudnn) should be True or False. Received "
                         "Attr(use_cudnn): %s. " % str(use_cudnn))

    if data_format not in ["NCHW", "NHWC"]:
        raise ValueError(
            "Attr(data_format) should be 'NCHW' or 'NHWC'. Received "
            "Attr(data_format): %s." % str(data_format))

    channel_last = (data_format == "NHWC")
    num_channels = input.shape[3] if channel_last else input.shape[1]
    if num_channels < 0:
        raise ValueError(
            "The channel dimmention of the input(%s) should be defined. "
            "Received: %s." % (str(input.shape), str(num_channels)))
C
chengduo 已提交
1525
    assert param_attr is not False, "param_attr should not be False here."
L
liym27 已提交
1526

1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540
    if groups is None:
        num_filter_channels = num_channels
    elif groups <= 0:
        raise ValueError("the groups of input must be greater than 0, "
                         "but received the groups of input is {}".format(
                             groups))
    else:
        if num_channels % groups != 0:
            raise ValueError(
                "the channel of input must be divisible by groups,"
                "received: the channel of input is {}, the shape of input is {}"
                ", the groups is {}".format(num_channels, input.shape, groups))
        num_filter_channels = num_channels // groups

1541
    l_type = 'conv2d'
X
xzl 已提交
1542 1543
    if (num_channels == groups and num_filters % num_channels == 0 and
            not use_cudnn):
1544
        l_type = 'depthwise_conv2d'
1545

1546 1547 1548 1549
    if (num_channels == groups and num_filters % num_channels == 0 and
            core.is_compiled_with_rocm()):
        l_type = 'depthwise_conv2d'

1550 1551 1552 1553 1554 1555 1556
    # NPU only supports depthwise_conv2d when  "input_channel = output_channel = groups"
    if core.is_compiled_with_npu():
        if (num_channels == groups and num_channels == num_filters):
            l_type = 'depthwise_conv2d'
        else:
            l_type = 'conv2d'

1557 1558 1559
    helper = LayerHelper(l_type, **locals())
    dtype = helper.input_dtype()

C
chengduoZH 已提交
1560 1561
    filter_size = utils.convert_to_list(filter_size, 2, 'filter_size')
    stride = utils.convert_to_list(stride, 2, 'stride')
1562
    dilation = utils.convert_to_list(dilation, 2, 'dilation')
C
chengduoZH 已提交
1563

L
liym27 已提交
1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586
    # padding
    def _update_padding(padding, data_format):
        def is_list_or_tuple(ele):
            if isinstance(ele, list) or isinstance(ele, tuple):
                return True
            return False

        if is_list_or_tuple(padding) and len(padding) == 4:
            if is_list_or_tuple(padding[0]) and (data_format == "NCHW"):
                if not (padding[0] == [0, 0] and padding[1] == [0, 0]):
                    raise ValueError(
                        "Non-zero padding(%s) in the batch or channel dimensions "
                        "is not supported." % str(padding))
                padding = padding[2:4]
                padding = [ele for a_list in padding for ele in a_list]
            elif is_list_or_tuple(padding[0]) and (data_format == "NHWC"):
                if not (padding[0] == [0, 0] and padding[3] == [0, 0]):
                    raise ValueError(
                        "Non-zero padding(%s) in the batch or channel dimensions "
                        "is not supported." % str(padding))
                padding = padding[1:3]
                padding = [ele for a_list in padding for ele in a_list]
            padding = utils.convert_to_list(padding, 4, 'padding')
1587 1588 1589
            if utils._is_symmetric_padding(padding, 2):
                padding = [padding[0], padding[2]]

L
liym27 已提交
1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603
        else:
            padding = utils.convert_to_list(padding, 2, 'padding')

        return padding

    padding_algorithm = "EXPLICIT"
    if isinstance(padding, str):
        padding = padding.upper()
        if padding not in ["SAME", "VALID"]:
            raise ValueError(
                "Unknown padding: '%s'. It can only be 'SAME' or 'VALID'." %
                str(padding))
        if padding == "VALID":
            padding_algorithm = "VALID"
1604
            padding = [0, 0]
L
liym27 已提交
1605 1606
        elif padding == "SAME":
            padding_algorithm = "SAME"
1607
            padding = [0, 0]
L
liym27 已提交
1608 1609

    padding = _update_padding(padding, data_format)
Y
Yu Yang 已提交
1610

M
minqiyang 已提交
1611
    filter_shape = [num_filters, int(num_filter_channels)] + filter_size
Y
Yu Yang 已提交
1612 1613

    def _get_default_param_initializer():
C
chengduo 已提交
1614
        filter_elem_num = filter_size[0] * filter_size[1] * num_channels
1615 1616 1617 1618 1619
        if filter_elem_num <= 0:
            raise ValueError(
                "Invalid filter number, excepted number is larger than 0, but"
                " received {}, please check the input shape and "
                "filter size.".format(filter_elem_num))
C
chengduo 已提交
1620
        std = (2.0 / filter_elem_num)**0.5
Y
Yu Yang 已提交
1621 1622 1623 1624 1625 1626 1627 1628
        return Normal(0.0, std, 0)

    filter_param = helper.create_parameter(
        attr=helper.param_attr,
        shape=filter_shape,
        dtype=dtype,
        default_initializer=_get_default_param_initializer())

X
Xin Pan 已提交
1629
    pre_bias = helper.create_variable_for_type_inference(dtype)
Y
Yu Yang 已提交
1630

1631 1632 1633 1634
    if (core.is_compiled_with_cuda() and paddle.fluid.get_flags(
            "FLAGS_conv2d_disable_cudnn")["FLAGS_conv2d_disable_cudnn"]):
        use_cudnn = False

Y
Yu Yang 已提交
1635
    helper.append_op(
1636
        type=l_type,
Y
Yu Yang 已提交
1637 1638 1639 1640 1641
        inputs={
            'Input': input,
            'Filter': filter_param,
        },
        outputs={"Output": pre_bias},
C
chengduoZH 已提交
1642 1643 1644
        attrs={
            'strides': stride,
            'paddings': padding,
1645
            'dilations': dilation,
C
chengduoZH 已提交
1646
            'groups': groups,
1647
            'use_cudnn': use_cudnn,
1648
            'use_mkldnn': False,
L
liym27 已提交
1649 1650 1651
            'fuse_relu_before_depthwise_conv': False,
            "padding_algorithm": padding_algorithm,
            "data_format": data_format,
C
chengduoZH 已提交
1652
        })
Y
Yu Yang 已提交
1653

1654 1655 1656 1657
    if data_format == 'NCHW':
        pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2)
    else:
        pre_act = helper.append_bias_op(pre_bias, dim_start=3, dim_end=4)
Y
Yu Yang 已提交
1658 1659 1660 1661

    return helper.append_activation(pre_act)


C
chengduoZH 已提交
1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672
def conv3d(input,
           num_filters,
           filter_size,
           stride=1,
           padding=0,
           dilation=1,
           groups=None,
           param_attr=None,
           bias_attr=None,
           use_cudnn=True,
           act=None,
L
liym27 已提交
1673 1674
           name=None,
           data_format="NCDHW"):
1675
    r"""
1676 1677
    :api_attr: Static Graph

C
chengduoZH 已提交
1678 1679
    The convolution3D layer calculates the output based on the input, filter
    and strides, paddings, dilations, groups parameters. Input(Input) and
L
liym27 已提交
1680
    Output(Output) are in NCDHW or NDHWC format. Where N is batch size C is the number of
1681 1682 1683 1684 1685
    channels, D is the depth of the feature, H is the height of the feature,
    and W is the width of the feature. Convlution3D is similar with Convlution2D
    but adds one dimension(depth). If bias attribution and activation type are
    provided, bias is added to the output of the convolution, and the
    corresponding activation function is applied to the final result.
C
chengduoZH 已提交
1686 1687 1688 1689 1690 1691 1692 1693 1694

    For each input :math:`X`, the equation is:

    .. math::

        Out = \sigma (W \\ast X + b)

    In the above equation:

L
liym27 已提交
1695
    * :math:`X`: Input value, a tensor with NCDHW or NDHWC format.
1696
    * :math:`W`: Filter value, a tensor with MCDHW format.
C
chengduoZH 已提交
1697 1698 1699
    * :math:`\\ast`: Convolution operation.
    * :math:`b`: Bias value, a 2-D tensor with shape [M, 1].
    * :math:`\\sigma`: Activation function.
1700
    * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
C
chengduoZH 已提交
1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721

    Example:

        - Input:

          Input shape: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`

          Filter shape: :math:`(C_{out}, C_{in}, D_f, H_f, W_f)`

        - Output:
          Output shape: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`

        Where

        .. math::

            D_{out}&= \\frac{(D_{in} + 2 * paddings[0] - (dilations[0] * (D_f - 1) + 1))}{strides[0]} + 1 \\\\
            H_{out}&= \\frac{(H_{in} + 2 * paddings[1] - (dilations[1] * (H_f - 1) + 1))}{strides[1]} + 1 \\\\
            W_{out}&= \\frac{(W_{in} + 2 * paddings[2] - (dilations[2] * (W_f - 1) + 1))}{strides[2]} + 1

    Args:
M
mls1999725 已提交
1722
        input (Tensor): The input is 5-D Tensor with shape [N, C, D, H, W], the data
L
lvmengsi 已提交
1723
            type of input is float16 or float32 or float64.
1724
        num_filters(int): The number of filter. It is as same as the output
C
chengduoZH 已提交
1725
            image channel.
1726
        filter_size (int|tuple): The filter size. If filter_size is a tuple,
1727
            it must contain three integers, (filter_size_depth, filter_size_height,
1728 1729
            filter_size_width). Otherwise, filter_size_depth = filter_size_height = \
            filter_size_width = filter_size.
1730 1731
        stride (int|tuple): The stride size. It means the stride in convolution. If stride is a
            tuple, it must contain three integers, (stride_depth, stride_height, stride_width).
L
lvmengsi 已提交
1732
            Otherwise, stride_depth = stride_height = stride_width = stride. Default: stride = 1.
1733
        padding (string|int|list|tuple): The padding size. It means the number of zero-paddings
T
tianshuo78520a 已提交
1734
            on both sides for each dimension. If `padding` is a string, either 'VALID' or
L
liym27 已提交
1735 1736 1737 1738 1739 1740 1741 1742
            'SAME' which is the padding algorithm. If padding size is a tuple or list,
            it could be in three forms: `[pad_depth, pad_height, pad_width]` or
            `[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`,
            and when `data_format` is `"NCDHW"`, `pool_padding` can be in the form
            `[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
            when `data_format` is `"NDHWC"`, `pool_padding` can be in the form
            `[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
            Default: padding = 0.
1743
        dilation (int|tuple): The dilation size. It means the spacing between the kernel points.
L
lvmengsi 已提交
1744
            If dilation is a tuple, it must contain three integers, (dilation_depth, dilation_height,
1745
            dilation_width). Otherwise, dilation_depth = dilation_height = dilation_width = dilation.
L
lvmengsi 已提交
1746
            Default: dilation = 1.
C
chengduoZH 已提交
1747 1748 1749 1750 1751
        groups (int): The groups number of the Conv3d Layer. According to grouped
            convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
            the first half of the filters is only connected to the first half
            of the input channels, while the second half of the filters is only
            connected to the second half of the input channels. Default: groups=1
C
chengduo 已提交
1752 1753 1754 1755 1756 1757 1758 1759 1760 1761
        param_attr (ParamAttr|None): The parameter attribute for learnable parameters/weights
            of conv3d. If it is set to None or one attribute of ParamAttr, conv3d
            will create ParamAttr as param_attr. If it is set to None, the parameter
            is initialized with :math:`Normal(0.0, std)`, and the :math:`std` is
            :math:`(\\frac{2.0 }{filter\_elem\_num})^{0.5}`. Default: None.
        bias_attr (ParamAttr|bool|None): The parameter attribute for the bias of conv3d.
            If it is set to False, no bias will be added to the output units.
            If it is set to None or one attribute of ParamAttr, conv3d
            will create ParamAttr as bias_attr. If the Initializer of the bias_attr
            is not set, the bias is initialized zero. Default: None.
C
chengduoZH 已提交
1762 1763
        use_cudnn (bool): Use cudnn kernel or not, it is valid only when the cudnn
            library is installed. Default: True
C
chengduo 已提交
1764 1765
        act (str): Activation type, if it is set to None, activation is not appended.
            Default: None.
1766 1767
        name(str|None): For detailed information, please refer
           to :ref:`api_guide_Name`. Usually name is no need to set and
L
lvmengsi 已提交
1768
           None by default.
1769
        data_format (str, optional): Specify the data format of the input, and the data format of the output
1770 1771 1772
            will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
            The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
            `[batch_size, input_channels, input_height, input_width]`.
C
chengduoZH 已提交
1773 1774

    Returns:
1775 1776 1777
        A Variable holding Tensor representing the conv3d, whose data type is
        the same with input. If act is None, the tensor variable storing the
        convolution result, and if act is not None, the tensor variable storing
L
lvmengsi 已提交
1778
        convolution and non-linearity activation result.
C
chengduoZH 已提交
1779

1780 1781 1782 1783 1784
    Raises:
        ValueError: If the type of `use_cudnn` is not bool.
        ValueError: If `data_format` is not "NCDHW" or "NDHWC".
        ValueError: If the channel dimmention of the input is less than or equal to zero.
        ValueError: If `padding` is a string, but not "SAME" or "VALID".
1785
        ValueError: If `padding` is a tuple, but the element corresponding to the input's batch size is not 0
1786 1787 1788 1789 1790 1791 1792
            or the element corresponding to the input's channel is not 0.
        ShapeError: If the input is not 5-D Tensor.
        ShapeError: If the input's dimension size and filter's dimension size not equal.
        ShapeError: If the dimension size of input minus the size of `stride` is not 2.
        ShapeError: If the number of input channels is not equal to filter's channels * groups.
        ShapeError: If the number of output channels is not be divided by groups.

C
chengduoZH 已提交
1793 1794 1795
    Examples:
        .. code-block:: python

1796
          import paddle
M
mls1999725 已提交
1797 1798
          import numpy as np
	  
1799
          paddle.enable_static()
M
mls1999725 已提交
1800 1801 1802 1803 1804 1805 1806 1807 1808
          data = paddle.static.data(name='data', shape=[None, 3, 12, 32, 32], dtype='float32')
          param_attr = paddle.framework.ParamAttr(name='conv3d.weight', initializer=paddle.nn.initializer.XavierNormal(), learning_rate=0.001)
          res = paddle.static.nn.conv3d(input=data, num_filters=2, filter_size=3, act="relu", param_attr=param_attr)
          place = paddle.CPUPlace()
          exe = paddle.static.Executor(place)
          exe.run(paddle.static.default_startup_program())
          x = np.random.rand(1, 3, 12, 32, 32).astype("float32")
          output = exe.run(feed={"data": x}, fetch_list=[res])
          print(output)
C
chengduoZH 已提交
1809 1810 1811
    """

    l_type = 'conv3d'
C
chengduo 已提交
1812
    assert param_attr is not False, "param_attr should not be False here."
C
chengduoZH 已提交
1813 1814 1815
    helper = LayerHelper(l_type, **locals())
    dtype = helper.input_dtype()

L
liym27 已提交
1816 1817 1818 1819 1820 1821 1822 1823 1824 1825
    if not isinstance(use_cudnn, bool):
        raise ValueError("Attr(use_cudnn) should be True or False. Received "
                         "Attr(use_cudnn): %s. " % str(use_cudnn))

    if data_format not in ["NCDHW", "NDHWC"]:
        raise ValueError(
            "Attr(data_format) should be 'NCDHW' or 'NDHWC'. Received "
            "Attr(data_format): %s." % str(data_format))

    channel_last = (data_format == "NDHWC")
1826 1827 1828 1829
    if len(input.shape) != 5:
        raise ValueError(
            "Input should be 5D tensor, but received input with the shape of {}".
            format(input.shape))
L
liym27 已提交
1830 1831 1832 1833 1834
    num_channels = input.shape[4] if channel_last else input.shape[1]
    if num_channels < 0:
        raise ValueError(
            "The channel dimmention of the input(%s) should be defined. "
            "Received: %s." % (str(input.shape), str(num_channels)))
C
chengduoZH 已提交
1835 1836 1837

    if groups is None:
        num_filter_channels = num_channels
1838 1839 1840 1841
    elif groups <= 0:
        raise ValueError(
            "the groups of conv3d should be greater than 0. Received groups: {}".
            format(groups))
C
chengduoZH 已提交
1842 1843
    else:
        if num_channels % groups != 0:
L
liym27 已提交
1844 1845 1846 1847
            raise ValueError(
                "The number of input channels must be divisible by Attr(groups). "
                "Received: number of channels(%s), groups(%s)." %
                (str(num_channels), str(groups)))
M
minqiyang 已提交
1848
        num_filter_channels = num_channels // groups
C
chengduoZH 已提交
1849 1850 1851 1852 1853

    filter_size = utils.convert_to_list(filter_size, 3, 'filter_size')
    stride = utils.convert_to_list(stride, 3, 'stride')
    dilation = utils.convert_to_list(dilation, 3, 'dilation')

L
liym27 已提交
1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875
    def _update_padding(padding, data_format):
        def is_list_or_tuple(ele):
            if isinstance(ele, list) or isinstance(ele, tuple):
                return True
            return False

        if is_list_or_tuple(padding) and len(padding) == 5:
            if is_list_or_tuple(padding[0]) and (data_format == "NCDHW"):
                if not (padding[0] == [0, 0] and padding[1] == [0, 0]):
                    raise ValueError(
                        "Non-zero padding(%s) in the batch or channel dimensions "
                        "is not supported." % str(padding))
                padding = padding[2:5]
                padding = [ele for a_list in padding for ele in a_list]
            elif is_list_or_tuple(padding[0]) and (data_format == "NDHWC"):
                if not (padding[0] == [0, 0] and padding[4] == [0, 0]):
                    raise ValueError(
                        "Non-zero padding(%s) in the batch or channel dimensions "
                        "is not supported." % str(padding))
                padding = padding[1:4]
                padding = [ele for a_list in padding for ele in a_list]
            padding = utils.convert_to_list(padding, 6, 'padding')
1876 1877
            if utils._is_symmetric_padding(padding, 3):
                padding = [padding[0], padding[2], padding[4]]
L
liym27 已提交
1878 1879
        elif is_list_or_tuple(padding) and len(padding) == 6:
            padding = utils.convert_to_list(padding, 6, 'padding')
1880 1881
            if utils._is_symmetric_padding(padding, 3):
                padding = [padding[0], padding[2], padding[4]]
L
liym27 已提交
1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895
        else:
            padding = utils.convert_to_list(padding, 3, 'padding')

        return padding

    padding_algorithm = "EXPLICIT"
    if isinstance(padding, str):
        padding = padding.upper()
        if padding not in ["SAME", "VALID"]:
            raise ValueError(
                "Unknown padding: '%s'. It can only be 'SAME' or 'VALID'." %
                str(padding))
        if padding == "VALID":
            padding_algorithm = "VALID"
1896
            padding = [0, 0, 0]
L
liym27 已提交
1897 1898
        elif padding == "SAME":
            padding_algorithm = "SAME"
1899
            padding = [0, 0, 0]
L
liym27 已提交
1900 1901

    padding = _update_padding(padding, data_format)
C
chengduoZH 已提交
1902 1903 1904 1905 1906

    input_shape = input.shape
    filter_shape = [num_filters, num_filter_channels] + filter_size

    def _get_default_param_initializer():
C
chengduo 已提交
1907 1908
        filter_elem_num = filter_size[0] * filter_size[1] * filter_size[
            2] * num_channels
1909 1910 1911 1912 1913 1914
        if filter_elem_num <= 0:
            raise ValueError(
                "Invalid filter number, excepted number is larger than 0, but"
                " received {}, please check the input shape and "
                "filter size.".format(filter_elem_num))

C
chengduo 已提交
1915
        std = (2.0 / filter_elem_num)**0.5
C
chengduoZH 已提交
1916 1917 1918 1919 1920 1921 1922 1923
        return Normal(0.0, std, 0)

    filter_param = helper.create_parameter(
        attr=helper.param_attr,
        shape=filter_shape,
        dtype=dtype,
        default_initializer=_get_default_param_initializer())

X
Xin Pan 已提交
1924
    pre_bias = helper.create_variable_for_type_inference(dtype)
C
chengduoZH 已提交
1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938

    helper.append_op(
        type=l_type,
        inputs={
            'Input': input,
            'Filter': filter_param,
        },
        outputs={"Output": pre_bias},
        attrs={
            'strides': stride,
            'paddings': padding,
            'dilations': dilation,
            'groups': groups,
            'use_cudnn': use_cudnn,
L
liym27 已提交
1939 1940 1941
            'use_mkldnn': False,
            "padding_algorithm": padding_algorithm,
            "data_format": data_format,
C
chengduoZH 已提交
1942 1943
        })

1944 1945 1946 1947
    if data_format == 'NCDHW':
        pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2)
    else:
        pre_act = helper.append_bias_op(pre_bias, dim_start=4, dim_end=5)
C
chengduoZH 已提交
1948 1949 1950 1951

    return helper.append_activation(pre_act)


F
fengjiayi 已提交
1952
@templatedoc()
Y
Yu Yang 已提交
1953
def pool2d(input,
C
chengduoZH 已提交
1954 1955
           pool_size=-1,
           pool_type="max",
C
chengduoZH 已提交
1956 1957
           pool_stride=1,
           pool_padding=0,
C
caoying03 已提交
1958
           global_pooling=False,
C
chengduoZH 已提交
1959
           use_cudnn=True,
1960
           ceil_mode=False,
1961
           name=None,
1962 1963
           exclusive=True,
           data_format="NCHW"):
Y
Yu Yang 已提交
1964
    """
1965

F
fengjiayi 已提交
1966
    ${comment}
1967 1968

    Args:
K
Kaipeng Deng 已提交
1969 1970 1971 1972 1973
        input (Variable): The input tensor of pooling operator which is a 4-D tensor with
                          shape [N, C, H, W]. The format of input tensor is `"NCHW"` or
                          `"NHWC"`, where `N` is batch size, `C` is the number of channels,
                          `H` is the height of the feature, and `W` is the width of the
                          feature. The data type if float32 or float64.
J
JiabinYang 已提交
1974
        pool_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list,
J
JiabinYang 已提交
1975 1976
            it must contain two integers, (pool_size_Height, pool_size_Width).
            Otherwise, the pool kernel size will be a square of an int.
F
fengjiayi 已提交
1977
        pool_type: ${pooling_type_comment}
J
JiabinYang 已提交
1978 1979 1980
        pool_stride (int|list|tuple): The pool stride size. If pool stride size is a tuple or list,
            it must contain two integers, (pool_stride_Height, pool_stride_Width).
            Otherwise, the pool stride size will be a square of an int.
1981 1982 1983 1984 1985 1986 1987
        pool_padding (string|int|list|tuple): The pool padding. If `pool_padding` is a string, either 'VALID' or
            'SAME' which is the padding algorithm. If pool padding size is a tuple or list,
            it could be in three forms: `[pad_height, pad_width]` or
            `[pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`, and when `data_format` is `"NCHW"`,
            `pool_padding` can be in the form `[[0,0], [0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
            when `data_format` is `"NHWC"`, `pool_padding` can be in the form
            `[[0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
J
JiabinYang 已提交
1988
            Otherwise, the pool padding size will be a square of an int.
1989 1990 1991
        global_pooling (bool): ${global_pooling_comment}
        use_cudnn (bool): ${use_cudnn_comment}
        ceil_mode (bool): ${ceil_mode_comment}
K
Kaipeng Deng 已提交
1992 1993 1994
        name(str, optional): For detailed information, please refer
                             to :ref:`api_guide_Name`. Usually name is no need to set and
                             None by default.
1995
        exclusive (bool): Whether to exclude padding points in average pooling
1996
                          mode, default is `true`.
1997
        data_format (string): The data format of the input and output data. An optional string from: `"NCHW"`, `"NHWC"`.
1998 1999
                The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
                `[batch_size, input_channels, input_height, input_width]`.
F
fengjiayi 已提交
2000

2001
    Returns:
K
Kaipeng Deng 已提交
2002
        Variable: The output tensor of pooling result. The data type is same as input tensor.
F
fengjiayi 已提交
2003 2004

    Raises:
2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016
        ValueError: If `pool_type` is not "max" nor "avg".
        ValueError: If `global_pooling` is False and `pool_size` is -1.
        TypeError: If `use_cudnn` is not a bool value.
        ValueError: If `data_format` is not "NCHW" or "NHWC".
        ValueError: If `pool_padding` is a string, but not "SAME" or "VALID".
        ValueError: If `pool_padding` is "VALID", but `ceil_mode` is True.
        ValueError: If `pool_padding` is a list or tuple, but the elements in the batch or channel dimensions are non-zero.
        ShapeError: If the input is not a 4-D or 5-D Tensor.
        ShapeError: If the dimension of input minus the size of `pool_stride` is not 2.
        ShapeError: If the size of `pool_size` and `pool_stride` is not equal.
        ShapeError: If the output's shape calculated is not greater than 0.

F
fengjiayi 已提交
2017 2018 2019 2020 2021

    Examples:

        .. code-block:: python

2022
          import paddle.fluid as fluid
2023 2024 2025
          import paddle

          paddle.enable_static()
2026

K
Kaipeng Deng 已提交
2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051
          data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32')

          # max pool2d
          pool2d = fluid.layers.pool2d(
            input = data,
            pool_size = 2,
            pool_type = "max",
            pool_stride = 1,
            global_pooling=False)

          # average pool2d
          pool2d = fluid.layers.pool2d(
            input = data,
            pool_size = 2,
            pool_type = "avg",
            pool_stride = 1,
            global_pooling=False)

          # global average pool2d
          pool2d = fluid.layers.pool2d(
            input = data,
            pool_size = 2,
            pool_type = "avg",
            pool_stride = 1,
            global_pooling=True)
2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069

          # Attr(pool_padding) is a list with 4 elements, Attr(data_format) is "NCHW".
          out_1 = fluid.layers.pool2d(
            input = data,
            pool_size = 3,
            pool_type = "avg",
            pool_stride = 1,
            pool_padding = [1, 2, 1, 0],
            data_format = "NCHW")

          # Attr(pool_padding) is a string, Attr(data_format) is "NCHW".
          out_2 = fluid.layers.pool2d(
            input = data,
            pool_size = 3,
            pool_type = "avg",
            pool_stride = 1,
            pool_padding = "VALID",
            data_format = "NCHW")
Y
Yu Yang 已提交
2070 2071 2072
    """
    if pool_type not in ["max", "avg"]:
        raise ValueError(
2073
            "Unknown Attr(pool_type): '%s'. It can only be 'max' or 'avg'.",
Y
Yu Yang 已提交
2074
            str(pool_type))
C
chengduoZH 已提交
2075

C
chengduoZH 已提交
2076 2077
    if global_pooling is False and pool_size == -1:
        raise ValueError(
2078 2079 2080 2081
            "When Attr(global_pooling) is False, Attr(pool_size) must be passed "
            "and be a valid value. Received pool_size: %s." % str(pool_size))

    if not isinstance(use_cudnn, bool):
2082 2083
        raise TypeError("Attr(use_cudnn) should be True or False. Received "
                        "Attr(use_cudnn): %s." % str(use_cudnn))
2084 2085 2086 2087 2088

    if data_format not in ["NCHW", "NHWC"]:
        raise ValueError(
            "Attr(data_format) should be 'NCHW' or 'NHWC'. Received "
            "Attr(data_format): %s." % str(data_format))
C
chengduoZH 已提交
2089

C
chengduoZH 已提交
2090 2091 2092
    pool_size = utils.convert_to_list(pool_size, 2, 'pool_size')
    pool_stride = utils.convert_to_list(pool_stride, 2, 'pool_stride')

2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114
    def update_padding(padding, data_format):
        def is_list_or_tuple(ele):
            if isinstance(ele, list) or isinstance(ele, tuple):
                return True
            return False

        if is_list_or_tuple(padding) and len(padding) == 4:
            if is_list_or_tuple(padding[0]) and (data_format == "NCHW"):
                if not (padding[0] == [0, 0] and padding[1] == [0, 0]):
                    raise ValueError(
                        "Non-zero pool_padding(%s) in the batch or channel dimensions "
                        "is not supported." % str(padding))
                padding = padding[2:4]
                padding = [ele for a_list in padding for ele in a_list]
            elif is_list_or_tuple(padding[0]) and (data_format == "NHWC"):
                if not (padding[0] == [0, 0] and padding[3] == [0, 0]):
                    raise ValueError(
                        "Non-zero pool_padding(%s) in the batch or channel dimensions "
                        "is not supported." % str(padding))
                padding = padding[1:3]
                padding = [ele for a_list in padding for ele in a_list]
            padding = utils.convert_to_list(padding, 4, 'padding')
2115

2116 2117
            if utils._is_symmetric_padding(padding, 2):
                padding = [padding[0], padding[2]]
2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131
        else:
            padding = utils.convert_to_list(padding, 2, 'padding')

        return padding

    padding_algorithm = "EXPLICIT"
    if isinstance(pool_padding, str):
        pool_padding = pool_padding.upper()
        if pool_padding not in ["SAME", "VALID"]:
            raise ValueError(
                "Unknown Attr(pool_padding): '%s'. It can only be 'SAME' or 'VALID'."
                % str(pool_padding))
        if pool_padding == "VALID":
            padding_algorithm = "VALID"
2132
            pool_padding = [0, 0]
2133 2134 2135 2136 2137 2138
            if ceil_mode != False:
                raise ValueError(
                    "When Attr(pool_padding) is \"VALID\", Attr(ceil_mode) must be False. "
                    "Received ceil_mode: True.")
        elif pool_padding == "SAME":
            padding_algorithm = "SAME"
2139
            pool_padding = [0, 0]
2140 2141 2142 2143 2144

    pool_padding = update_padding(pool_padding, data_format)

    op_type = 'pool2d'
    helper = LayerHelper(op_type, **locals())
Y
Yu Yang 已提交
2145
    dtype = helper.input_dtype()
X
Xin Pan 已提交
2146
    pool_out = helper.create_variable_for_type_inference(dtype)
Y
Yu Yang 已提交
2147 2148

    helper.append_op(
2149
        type=op_type,
2150 2151 2152 2153 2154 2155 2156 2157
        inputs={"X": input},
        outputs={"Out": pool_out},
        attrs={
            "pooling_type": pool_type,
            "ksize": pool_size,
            "global_pooling": global_pooling,
            "strides": pool_stride,
            "paddings": pool_padding,
2158
            "padding_algorithm": padding_algorithm,
2159 2160
            "use_cudnn": use_cudnn,
            "ceil_mode": ceil_mode,
2161 2162
            "use_mkldnn": False,
            "exclusive": exclusive,
2163
            "data_format": data_format,
2164 2165 2166 2167 2168
        })

    return pool_out


D
dengkaipeng 已提交
2169
@templatedoc()
2170 2171 2172 2173 2174 2175 2176 2177
def pool3d(input,
           pool_size=-1,
           pool_type="max",
           pool_stride=1,
           pool_padding=0,
           global_pooling=False,
           use_cudnn=True,
           ceil_mode=False,
2178
           name=None,
2179 2180
           exclusive=True,
           data_format="NCDHW"):
2181
    """
2182

2183
    ${comment}
2184 2185

    Args:
K
Kaipeng Deng 已提交
2186 2187
        input (Variable): The input tensor of pooling operator, which is a 5-D tensor with
                          shape [N, C, D, H, W]. The format of
2188 2189 2190
                          input tensor is `"NCDHW"` or `"NDHWC"`, where `N` is batch size, `C` is
                          the number of channels, `D` is the depth of the feature,
                          `H` is the height of the feature, and `W` is the width
D
dengkaipeng 已提交
2191
                          of the feature.
2192 2193
        pool_size (int|list|tuple): The pool kernel size. If pool kernel size
            is a tuple or list, it must contain three integers,
D
dengkaipeng 已提交
2194 2195 2196
            (pool_size_Depth, pool_size_Height, pool_size_Width).
            Otherwise, the pool kernel size will be the cube of an int.
        pool_type (string): ${pooling_type_comment}
2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207
        pool_stride (string|int|list|tuple)): The pool padding. If `pool_padding` is a string, either 'VALID' or
            'SAME' which is the padding algorithm. If pool stride size is a tuple or list,
            it must contain three integers, `[stride_Depth, stride_Height, stride_Width]`.
            Otherwise, the pool stride size will be a cube of an int.
        pool_padding (int|list|tuple): The pool padding size. If pool padding size is a tuple or list,
            it could be in three forms: `[pad_depth, pad_height, pad_width]` or
            `[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`,
            and when `data_format` is `"NCDHW"`, `pool_padding` can be in the form
            `[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
            when `data_format` is `"NDHWC"`, `pool_padding` can be in the form
            `[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
2208 2209 2210
        global_pooling (bool): ${global_pooling_comment}
        use_cudnn (bool): ${use_cudnn_comment}
        ceil_mode (bool): ${ceil_mode_comment}
K
Kaipeng Deng 已提交
2211 2212 2213
        name(str, optional): For detailed information, please refer
                             to :ref:`api_guide_Name`. Usually name is no need to set and
                             None by default.
2214
        exclusive (bool): Whether to exclude padding points in average pooling
2215 2216 2217 2218
                          mode, default is true.
        data_format (string): The data format of the input and output data. An optional string from: `"NCDHW"`, `"NDHWC"`.
                The default is `"NCDHW"`. When it is `"NCDHW"`, the data is stored in the order of:
                `[batch_size, input_channels, input_depth, input_height, input_width]`.
2219

2220
    Returns:
K
Kaipeng Deng 已提交
2221
        Variable: The output tensor of pooling result. The data type is same as input tensor.
D
dengkaipeng 已提交
2222

2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235
    Raises:
        ValueError: If `pool_type` is not "max" nor "avg".
        ValueError: If `global_pooling` is False and `pool_size` is -1.
        TypeError: If `use_cudnn` is not a bool value.
        ValueError: If `data_format` is not "NCDHW" or "NDHWC".
        ValueError: If `pool_padding` is a string, but not "SAME" or "VALID".
        ValueError: If `pool_padding` is "VALID", but `ceil_mode` is True.
        ValueError: If `pool_padding` is a list or tuple, but the elements in the batch or channel dimensions are non-zero.
        ShapeError: If the input is not a 4-D or 5-D Tensor.
        ShapeError: If the dimension of input minus the size of `pool_stride` is not 2.
        ShapeError: If the size of `pool_size` and `pool_stride` is not equal.
        ShapeError: If the output's shape calculated is not greater than 0.

D
dengkaipeng 已提交
2236 2237 2238 2239
    Examples:

        .. code-block:: python

2240
          import paddle.fluid as fluid
2241 2242 2243
          import paddle

          paddle.enable_static()
2244

K
Kaipeng Deng 已提交
2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269
          data = fluid.data(name='data', shape=[None, 3, 32, 32, 32], dtype='float32')

          # max pool3d
          pool3d = fluid.layers.pool3d(
            input = data,
            pool_size = 2,
            pool_type = "max",
            pool_stride = 1,
            global_pooling=False)

          # average pool3d
          pool3d = fluid.layers.pool3d(
            input = data,
            pool_size = 2,
            pool_type = "avg",
            pool_stride = 1,
            global_pooling=False)

          # global average pool3d
          pool3d = fluid.layers.pool3d(
            input = data,
            pool_size = 2,
            pool_type = "avg",
            pool_stride = 1,
            global_pooling=True)
2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292

          # example 1:
          # Attr(pool_padding) is a list with 6 elements, Attr(data_format) is "NCDHW".
          out_1 = fluid.layers.pool3d(
            input = data,
            pool_size = 2,
            pool_type = "avg",
            pool_stride = 1,
            pool_padding = [1, 2, 1, 0, 1, 2],
            global_pooling = False,
            data_format = "NCDHW")

          # example 2:
          # Attr(pool_padding) is a string, Attr(data_format) is "NCDHW".
          out_2 = fluid.layers.pool3d(
            input = data,
            pool_size = 3,
            pool_type = "avg",
            pool_stride = 1,
            pool_padding = "VALID",
            global_pooling = False,
            data_format = "NCDHW")

Y
Yu Yang 已提交
2293 2294 2295
    """
    if pool_type not in ["max", "avg"]:
        raise ValueError(
2296
            "Unknown Attr(pool_type): '%s'. It can only be 'max' or 'avg'.",
Y
Yu Yang 已提交
2297
            str(pool_type))
C
chengduoZH 已提交
2298

C
chengduoZH 已提交
2299 2300
    if global_pooling is False and pool_size == -1:
        raise ValueError(
2301 2302 2303 2304 2305
            "When Attr(global_pooling) is False, Attr(pool_size) must be passed "
            "and be a valid value. Received Attr(pool_size): %s." %
            str(pool_size))

    if not isinstance(use_cudnn, bool):
2306 2307
        raise TypeError("Attr(use_cudnn) should be True or False. Received "
                        "Attr(use_cudnn): %s. " % str(use_cudnn))
2308 2309 2310 2311 2312

    if data_format not in ["NCDHW", "NDHWC"]:
        raise ValueError(
            "Attr(data_format) should be 'NCDHW' or 'NDHWC'. Received "
            "Attr(data_format): %s" % str(data_format))
C
chengduoZH 已提交
2313

2314 2315
    pool_size = utils.convert_to_list(pool_size, 3, 'pool_size')
    pool_stride = utils.convert_to_list(pool_stride, 3, 'pool_stride')
C
chengduoZH 已提交
2316

2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338
    def update_padding(padding, data_format):
        def is_list_or_tuple(ele):
            if isinstance(ele, (list, tuple)):
                return True
            return False

        if is_list_or_tuple(padding) and len(padding) == 5:
            if is_list_or_tuple(padding[0]) and (data_format == "NCDHW"):
                if not (padding[0] == [0, 0] and padding[1] == [0, 0]):
                    raise ValueError(
                        "Non-zero pool_padding(%s) in the batch or channel dimensions "
                        "is not supported." % str(padding))
                padding = padding[2:5]
                padding = [ele for a_list in padding for ele in a_list]
            elif is_list_or_tuple(padding[0]) and (data_format == "NDHWC"):
                if not (padding[0] == [0, 0] and padding[4] == [0, 0]):
                    raise ValueError(
                        "Non-zero pool_padding(%s) in the batch or channel dimensions "
                        "is not supported." % str(padding))
                padding = padding[1:4]
                padding = [ele for a_list in padding for ele in a_list]
            padding = utils.convert_to_list(padding, 6, 'padding')
2339 2340
            if utils._is_symmetric_padding(padding, 3):
                padding = [padding[0], padding[2], padding[4]]
2341 2342 2343

        elif is_list_or_tuple(padding) and len(padding) == 6:
            padding = utils.convert_to_list(padding, 6, 'padding')
2344 2345
            if utils._is_symmetric_padding(padding, 3):
                padding = [padding[0], padding[2], padding[4]]
2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359
        else:
            padding = utils.convert_to_list(padding, 3, 'padding')

        return padding

    padding_algorithm = "EXPLICIT"
    if isinstance(pool_padding, str):
        pool_padding = pool_padding.upper()
        if pool_padding not in ["SAME", "VALID"]:
            raise ValueError(
                "Unknown Attr(pool_padding): '%s'. It can only be 'SAME' or 'VALID'."
                % str(pool_padding))
        if pool_padding == "VALID":
            padding_algorithm = "VALID"
2360
            pool_padding = [0, 0, 0]
2361 2362 2363 2364 2365 2366
            if ceil_mode != False:
                raise ValueError(
                    "When Attr(pool_padding) is \"VALID\", ceil_mode must be False. "
                    "Received ceil_mode: True.")
        elif pool_padding == "SAME":
            padding_algorithm = "SAME"
2367
            pool_padding = [0, 0, 0]
2368 2369 2370 2371 2372

    pool_padding = update_padding(pool_padding, data_format)

    op_type = "pool3d"
    helper = LayerHelper(op_type, **locals())
Y
Yu Yang 已提交
2373
    dtype = helper.input_dtype()
X
Xin Pan 已提交
2374
    pool_out = helper.create_variable_for_type_inference(dtype)
Y
Yu Yang 已提交
2375 2376

    helper.append_op(
2377
        type=op_type,
Y
Yu Yang 已提交
2378 2379 2380 2381 2382 2383 2384
        inputs={"X": input},
        outputs={"Out": pool_out},
        attrs={
            "pooling_type": pool_type,
            "ksize": pool_size,
            "global_pooling": global_pooling,
            "strides": pool_stride,
C
chengduoZH 已提交
2385
            "paddings": pool_padding,
2386
            "padding_algorithm": padding_algorithm,
2387
            "use_cudnn": use_cudnn,
2388
            "ceil_mode": ceil_mode,
2389 2390
            "use_mkldnn": False,
            "exclusive": exclusive,
2391
            "data_format": data_format,
Y
Yu Yang 已提交
2392 2393 2394 2395 2396
        })

    return pool_out


2397
@deprecated(since="2.0.0")
2398 2399 2400 2401 2402 2403
@templatedoc(op_type="pool2d")
def adaptive_pool2d(input,
                    pool_size,
                    pool_type="max",
                    require_index=False,
                    name=None):
2404
    r"""
2405

K
Kaipeng Deng 已提交
2406
    This operation calculates the output based on the input, pool_size,
D
dengkaipeng 已提交
2407 2408 2409 2410
    pool_type parameters. Input(X) and output(Out) are in NCHW format, where N is batch
    size, C is the number of channels, H is the height of the feature, and W is
    the width of the feature. Parameters(pool_size) should contain two elements which
    represent height and width, respectively. Also the H and W dimensions of output(Out)
K
Kaipeng Deng 已提交
2411
    is same as Parameter(pool_size). The output tensor shape will be [N, C, pool_size[0], pool_size[1]]
2412

2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425
    For average adaptive pool2d:

    ..  math::

       hstart &= floor(i * H_{in} / H_{out})

       hend &= ceil((i + 1) * H_{in} / H_{out})

       wstart &= floor(j * W_{in} / W_{out})

       wend &= ceil((j + 1) * W_{in} / W_{out})

       Output(i ,j) &= \\frac{sum(Input[hstart:hend, wstart:wend])}{(hend - hstart) * (wend - wstart)}
2426 2427

    Args:
2428
        input (Tensor): The input tensor of pooling operator, which is a 4-D tensor
K
Kaipeng Deng 已提交
2429 2430 2431 2432
                          with shape [N, C, H, W].  The format of input tensor is NCHW,
                          where N is batch size, C is the number of channels, H is the
                          height of the feature, and W is the width of the feature.
                          The data type is float32 or float64.
2433 2434 2435
        pool_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list,
            it must contain two integers, (pool_size_Height, pool_size_Width).
        pool_type: ${pooling_type_comment}
D
dengkaipeng 已提交
2436
        require_index (bool): If true, the index of max pooling point will be returned along
K
Kaipeng Deng 已提交
2437 2438 2439 2440
            with outputs. It cannot be set in average pooling type. Default False.
        name(str, optional): For detailed information, please refer
                             to :ref:`api_guide_Name`. Usually name is no need to set and
                             None by default.
2441 2442

    Returns:
2443
        Tensor: The output tensor of adaptive pooling result. The data type is same
K
Kaipeng Deng 已提交
2444
                  as input tensor.
2445 2446 2447 2448 2449 2450 2451 2452 2453

    Raises:
        ValueError: 'pool_type' is not 'max' nor 'avg'.
        ValueError: invalid setting 'require_index' true when 'pool_type' is 'avg'.
        ValueError: 'pool_size' should be a list or tuple with length as 2.

    Examples:
        .. code-block:: python

K
Kaipeng Deng 已提交
2454
          # average adaptive pool2d
M
minqiyang 已提交
2455
          # suppose input data in shape of [N, C, H, W], `pool_size` is [m, n],
T
tianshuo78520a 已提交
2456
          # output shape is [N, C, m, n], adaptive pool divide H and W dimensions
M
minqiyang 已提交
2457
          # of input data into m * n grids averagely and performs poolings in each
2458 2459
          # grid to get output.
          # adaptive average pool performs calculations as follow:
M
minqiyang 已提交
2460
          #
2461 2462 2463 2464 2465 2466 2467 2468
          #     for i in range(m):
          #         for j in range(n):
          #             hstart = floor(i * H / m)
          #             hend = ceil((i + 1) * H / m)
          #             wstart = floor(i * W / n)
          #             wend = ceil((i + 1) * W / n)
          #             output[:, :, i, j] = avg(input[:, :, hstart: hend, wstart: wend])
          #
2469
          import paddle
2470
          paddle.enable_static()
2471 2472
          data = paddle.rand(shape=[1,3,32,32])
          pool_out = paddle.fluid.layers.adaptive_pool2d(
2473 2474
                            input=data,
                            pool_size=[3, 3],
2475
                            pool_type='avg')
K
Kaipeng Deng 已提交
2476 2477 2478

          # max adaptive pool2d
          # suppose input data in shape of [N, C, H, W], `pool_size` is [m, n],
T
tianshuo78520a 已提交
2479
          # output shape is [N, C, m, n], adaptive pool divide H and W dimensions
K
Kaipeng Deng 已提交
2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491
          # of input data into m * n grids averagely and performs poolings in each
          # grid to get output.
          # adaptive average pool performs calculations as follow:
          #
          #     for i in range(m):
          #         for j in range(n):
          #             hstart = floor(i * H / m)
          #             hend = ceil((i + 1) * H / m)
          #             wstart = floor(i * W / n)
          #             wend = ceil((i + 1) * W / n)
          #             output[:, :, i, j] = max(input[:, :, hstart: hend, wstart: wend])
          #
2492 2493 2494
          import paddle
          data = paddle.rand(shape=[1,3,32,32])
          pool_out = paddle.fluid.layers.adaptive_pool2d(
K
Kaipeng Deng 已提交
2495 2496 2497
                            input=data,
                            pool_size=[3, 3],
                            pool_type='max')
2498
    """
2499 2500 2501 2502 2503 2504
    check_variable_and_dtype(
        input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'],
        'adaptive_pool2d')
    check_type(pool_type, 'pool_type', str, 'adaptive_pool2d')
    check_type(pool_size, 'pool_size', (int, list, tuple), 'adaptive_pool2d')
    check_type(require_index, 'require_index', bool, 'adaptive_pool2d')
2505 2506 2507 2508 2509 2510 2511 2512 2513
    if pool_type not in ["max", "avg"]:
        raise ValueError(
            "Unknown pool_type: '%s'. It can only be 'max' or 'avg'.",
            str(pool_type))

    if pool_type == "avg" and require_index:
        raise ValueError(
            "invalid setting 'require_index' true when 'pool_type' is 'avg'.")

2514
    pool_size = utils.convert_to_list(pool_size, 2, 'pool_size')
2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539

    if pool_type == "max":
        l_type = 'max_pool2d_with_index'
    else:
        l_type = "pool2d"

    helper = LayerHelper(l_type, **locals())
    dtype = helper.input_dtype()
    pool_out = helper.create_variable_for_type_inference(dtype)

    outputs = {"Out": pool_out}
    if pool_type == "max":
        mask = helper.create_variable_for_type_inference(dtype)
        outputs["Mask"] = mask

    helper.append_op(
        type=l_type,
        inputs={"X": input},
        outputs=outputs,
        attrs={
            "pooling_type": pool_type,
            "ksize": pool_size,
            "adaptive": True,
        })

D
dengkaipeng 已提交
2540
    return (pool_out, mask) if require_index else pool_out
2541 2542


2543
@deprecated(since="2.0.0")
2544 2545 2546 2547 2548 2549
@templatedoc(op_type="pool3d")
def adaptive_pool3d(input,
                    pool_size,
                    pool_type="max",
                    require_index=False,
                    name=None):
2550
    r"""
2551

K
Kaipeng Deng 已提交
2552
    This operation calculates the output based on the input, pool_size,
D
dengkaipeng 已提交
2553 2554 2555 2556
    pool_type parameters. Input(X) and output(Out) are in NCDHW format, where N is batch
    size, C is the number of channels, D is the depth of the feature, H is the height of
    the feature, and W is the width of the feature. Parameters(pool_size) should contain
    three elements which represent height and width, respectively. Also the D, H and W
K
Kaipeng Deng 已提交
2557 2558
    dimensions of output(Out) is same as Parameter(pool_size). The output tensor shape
    will be [N, C, pool_size[0], pool_size[1], pool_size[2]]
2559

2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576
    For average adaptive pool3d:

    ..  math::

      dstart &= floor(i * D_{in} / D_{out})

      dend &= ceil((i + 1) * D_{in} / D_{out})

      hstart &= floor(j * H_{in} / H_{out})

      hend &= ceil((j + 1) * H_{in} / H_{out})

      wstart &= floor(k * W_{in} / W_{out})

      wend &= ceil((k + 1) * W_{in} / W_{out})

      Output(i ,j, k) &= \\frac{sum(Input[dstart:dend, hstart:hend, wstart:wend])}{(dend - dstart) * (hend - hstart) * (wend - wstart)}
2577 2578

    Args:
2579
        input (Tensor): The input tensor of pooling operator, which is a 5-D tensor with
K
Kaipeng Deng 已提交
2580 2581
                          shape [N, C, D, H, W]. The format of input tensor is NCDHW, where
                          N is batch size, C is the number of channels, D is the depth of the feature,
D
dengkaipeng 已提交
2582
                          H is the height of the feature, and W is the width of the feature.
K
Kaipeng Deng 已提交
2583
                          The data type is float32 or float64.
2584
        pool_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list,
D
dengkaipeng 已提交
2585
            it must contain three integers, (Depth, Height, Width).
2586
        pool_type: ${pooling_type_comment}
D
dengkaipeng 已提交
2587
        require_index (bool): If true, the index of max pooling point will be returned along
K
Kaipeng Deng 已提交
2588 2589 2590 2591
            with outputs. It cannot be set in average pooling type. Default False.
        name(str, optional): For detailed information, please refer
                             to :ref:`api_guide_Name`. Usually name is no need to set and
                             None by default.
2592 2593

    Returns:
2594
        Tensor: The output tensor of adaptive pooling result. The data type is same as input tensor.
2595 2596 2597 2598 2599 2600 2601 2602 2603

    Raises:
        ValueError: 'pool_type' is not 'max' nor 'avg'.
        ValueError: invalid setting 'require_index' true when 'pool_type' is 'avg'.
        ValueError: 'pool_size' should be a list or tuple with length as 2.

    Examples:
        .. code-block:: python

K
Kaipeng Deng 已提交
2604
          # average adaptive pool3d
2605
          # suppose input data in shape of [N, C, D, H, W], `pool_size` is [l, m, n],
T
tianshuo78520a 已提交
2606
          # output shape is [N, C, l, m, n], adaptive pool divide D, H and W dimensions
M
minqiyang 已提交
2607
          # of input data into l * m * n grids averagely and performs poolings in each
2608 2609
          # grid to get output.
          # adaptive average pool performs calculations as follow:
M
minqiyang 已提交
2610
          #
2611 2612 2613 2614 2615 2616 2617 2618 2619
          #     for i in range(l):
          #         for j in range(m):
          #             for k in range(n):
          #                 dstart = floor(i * D / l)
          #                 dend = ceil((i + 1) * D / l)
          #                 hstart = floor(j * H / m)
          #                 hend = ceil((j + 1) * H / m)
          #                 wstart = floor(k * W / n)
          #                 wend = ceil((k + 1) * W / n)
M
minqiyang 已提交
2620
          #                 output[:, :, i, j, k] =
2621 2622
          #                     avg(input[:, :, dstart:dend, hstart: hend, wstart: wend])
          #
K
Kaipeng Deng 已提交
2623

2624
          import paddle
2625
          paddle.enable_static()
2626 2627
          data = paddle.rand(shape=[1,3,32,32,32])
          pool_out = paddle.fluid.layers.adaptive_pool3d(
2628
                            input=data,
D
dengkaipeng 已提交
2629
                            pool_size=[3, 3, 3],
2630
                            pool_type='avg')
K
Kaipeng Deng 已提交
2631 2632 2633

          # max adaptive pool3d
          # suppose input data in shape of [N, C, D, H, W], `pool_size` is [l, m, n],
T
tianshuo78520a 已提交
2634
          # output shape is [N, C, l, m, n], adaptive pool divide D, H and W dimensions
K
Kaipeng Deng 已提交
2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651
          # of input data into l * m * n grids averagely and performs poolings in each
          # grid to get output.
          # adaptive average pool performs calculations as follow:
          #
          #     for i in range(l):
          #         for j in range(m):
          #             for k in range(n):
          #                 dstart = floor(i * D / l)
          #                 dend = ceil((i + 1) * D / l)
          #                 hstart = floor(j * H / m)
          #                 hend = ceil((j + 1) * H / m)
          #                 wstart = floor(k * W / n)
          #                 wend = ceil((k + 1) * W / n)
          #                 output[:, :, i, j, k] =
          #                     avg(input[:, :, dstart:dend, hstart: hend, wstart: wend])
          #

2652 2653 2654
          import paddle
          data = paddle.rand(shape=[1,3,32,32,32])
          pool_out = paddle.fluid.layers.adaptive_pool3d(
K
Kaipeng Deng 已提交
2655 2656 2657
                            input=data,
                            pool_size=[3, 3, 3],
                            pool_type='max')
2658
    """
2659 2660 2661 2662 2663 2664
    check_variable_and_dtype(
        input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'],
        'adaptive_pool3d')
    check_type(pool_type, 'pool_type', str, 'adaptive_pool3d')
    check_type(pool_size, 'pool_size', (int, list, tuple), 'adaptive_pool3d')
    check_type(require_index, 'require_index', bool, 'adaptive_pool3d')
2665 2666 2667 2668 2669 2670 2671 2672 2673
    if pool_type not in ["max", "avg"]:
        raise ValueError(
            "Unknown pool_type: '%s'. It can only be 'max' or 'avg'.",
            str(pool_type))

    if pool_type == "avg" and require_index:
        raise ValueError(
            "invalid setting 'require_index' true when 'pool_type' is 'avg'.")

2674
    pool_size = utils.convert_to_list(pool_size, 3, 'pool_size')
2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699

    if pool_type == "max":
        l_type = 'max_pool3d_with_index'
    else:
        l_type = "pool3d"

    helper = LayerHelper(l_type, **locals())
    dtype = helper.input_dtype()
    pool_out = helper.create_variable_for_type_inference(dtype)

    outputs = {"Out": pool_out}
    if pool_type == "max":
        mask = helper.create_variable_for_type_inference(dtype)
        outputs["Mask"] = mask

    helper.append_op(
        type=l_type,
        inputs={"X": input},
        outputs=outputs,
        attrs={
            "pooling_type": pool_type,
            "ksize": pool_size,
            "adaptive": True,
        })

D
dengkaipeng 已提交
2700
    return (pool_out, mask) if require_index else pool_out
2701 2702


Y
Yu Yang 已提交
2703 2704 2705 2706 2707 2708 2709
def batch_norm(input,
               act=None,
               is_test=False,
               momentum=0.9,
               epsilon=1e-05,
               param_attr=None,
               bias_attr=None,
C
caoying03 已提交
2710
               data_layout='NCHW',
Y
Yang Yang 已提交
2711
               in_place=False,
2712 2713
               name=None,
               moving_mean_name=None,
W
wanghaoshuang 已提交
2714
               moving_variance_name=None,
2715
               do_model_average_for_mean_and_var=True,
2716
               use_global_stats=False):
2717
    r"""
2718 2719
    :api_attr: Static Graph

Q
qiaolongfei 已提交
2720 2721
    **Batch Normalization Layer**

L
lvmengsi 已提交
2722
    Can be used as a normalizer function for convolution or fully_connected operations.
Q
qiaolongfei 已提交
2723
    The required data format for this layer is one of the following:
Q
qiaolongfei 已提交
2724

Q
qiaolongfei 已提交
2725
    1. NHWC `[batch, in_height, in_width, in_channels]`
Q
qiaolongfei 已提交
2726

Q
qiaolongfei 已提交
2727 2728
    2. NCHW `[batch, in_channels, in_height, in_width]`

Q
qiaolongfei 已提交
2729 2730 2731
    Refer to `Batch Normalization: Accelerating Deep Network Training by Reducing
    Internal Covariate Shift <https://arxiv.org/pdf/1502.03167.pdf>`_
    for more details.
Q
qiaolongfei 已提交
2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743

    :math:`input` is the input features over a mini-batch.

    ..  math::

        \\mu_{\\beta} &\\gets \\frac{1}{m} \\sum_{i=1}^{m} x_i \\qquad &//\\
        \ mini-batch\ mean \\\\
        \\sigma_{\\beta}^{2} &\\gets \\frac{1}{m} \\sum_{i=1}^{m}(x_i - \\
        \\mu_{\\beta})^2 \\qquad &//\ mini-batch\ variance \\\\
        \\hat{x_i} &\\gets \\frac{x_i - \\mu_\\beta} {\\sqrt{\\
        \\sigma_{\\beta}^{2} + \\epsilon}} \\qquad &//\ normalize \\\\
        y_i &\\gets \\gamma \\hat{x_i} + \\beta \\qquad &//\ scale\ and\ shift
2744

L
lvmengsi 已提交
2745
        moving\_mean = moving\_mean * momentum + mini-batch\_mean * (1. - momentum) \\\\
2746
        moving\_var = moving\_var * momentum + mini-batch\_var * (1. - momentum)
L
lvmengsi 已提交
2747

2748

L
lvmengsi 已提交
2749
    moving_mean is global mean and moving_var is global variance.
2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762

    When use_global_stats = True, the :math:`\\mu_{\\beta}`
    and :math:`\\sigma_{\\beta}^{2}` are not the statistics of one mini-batch.
    They are global (or running) statistics. (It usually got from the
    pre-trained model.)
    The training and testing (or inference) have the same behavior:

    ..  math::

        \\hat{x_i} &\\gets \\frac{x_i - \\mu_\\beta} {\\sqrt{\\
        \\sigma_{\\beta}^{2} + \\epsilon}}  \\\\
        y_i &\\gets \\gamma \\hat{x_i} + \\beta

L
lvmengsi 已提交
2763
    Note:
2764
        if build_strategy.sync_batch_norm=True, the batch_norm in network will use
L
lvmengsi 已提交
2765
        sync_batch_norm automatically.
2766
        `is_test = True` can only be used in test program and inference program, `is_test` CANNOT be set to True in train program, if you want to use global status from pre_train model in train program, please set `use_global_stats = True`.
L
lvmengsi 已提交
2767

2768
    Args:
2769
        input(Tensor): The rank of input Tensor can be 2, 3, 4, 5. The data type
L
lvmengsi 已提交
2770
            is float16 or float32 or float64.
Q
qiaolongfei 已提交
2771
        act(string, Default None): Activation type, linear|relu|prelu|...
Q
qingqing01 已提交
2772 2773
        is_test (bool, Default False): A flag indicating whether it is in
            test phrase or not.
2774 2775
        momentum(float|Tensor, Default 0.9): The value used for the moving_mean and
            moving_var computation. This should be a float number or a Tensor with
2776
            shape [1] and data type as float32. The updated formula is:
Q
qingqing01 已提交
2777 2778 2779 2780 2781
            :math:`moving\_mean = moving\_mean * momentum + new\_mean * (1. - momentum)`
            :math:`moving\_var = moving\_var * momentum + new\_var * (1. - momentum)`
            Default is 0.9.
        epsilon(float, Default 1e-05): A value added to the denominator for
            numerical stability. Default is 1e-5.
C
chengduo 已提交
2782 2783
        param_attr(ParamAttr|None): The parameter attribute for Parameter `scale`
             of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm
2784
	     will create ParamAttr as param_attr, the name of scale can be set in ParamAttr.
2785
	     If the Initializer of the param_attr is not set, the parameter is initialized
2786
	     with Xavier. Default: None.
C
chengduo 已提交
2787 2788
        bias_attr(ParamAttr|None): The parameter attribute for the bias of batch_norm.
             If it is set to None or one attribute of ParamAttr, batch_norm
2789 2790
	     will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr.
	     If the Initializer of the bias_attr is not set, the bias is initialized zero.
2791
	     Default: None.
2792
        data_layout (str, optional): Specify the data format of the input, and the data format of the output
K
Kaipeng Deng 已提交
2793 2794 2795
             will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
             The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
             `[batch_size, input_channels, input_height, input_width]`.
2796
        in_place(bool, Default False): Make the input and output of batch norm reuse memory.
2797 2798 2799 2800
        name(str|None): For detailed information, please refer to :ref:`api_guide_Name`.
            Usually name is no need to set and None by default.
        moving_mean_name(str, Default None): The name of moving_mean which store the global Mean. If it
            is set to None, batch_norm will save global mean with a random name, otherwise, batch_norm
2801
            will save global mean with the string.
L
lvmengsi 已提交
2802
        moving_variance_name(str, Default None): The name of the moving_variance which store the global Variance.
2803
            If it is set to None, batch_norm will save global variance with a random name, otherwise, batch_norm
2804
            will save global variance with the string.
2805 2806
        do_model_average_for_mean_and_var(bool, Default True): Whether parameter mean and variance should do model
            average when model average is enabled.
2807 2808 2809 2810 2811
        use_global_stats(bool, Default False): Whether to use global mean and
            variance. In inference or test mode, set use_global_stats to true
            or is_test to true, and the behavior is equivalent.
            In train mode, when setting use_global_stats True, the global mean
            and variance are also used during train period.
2812
    Returns:
2813
        A Tensor which is the result after applying batch normalization on the input,
2814
        has same shape and data type with input.
Q
qiaolongfei 已提交
2815 2816 2817 2818 2819

    Examples:

        .. code-block:: python

2820
            import paddle
2821
            
2822
            paddle.enable_static()
2823 2824 2825 2826 2827 2828 2829
            x = paddle.static.data(name='x', shape=[3, 7, 3, 7], dtype='float32')
            hidden1 = paddle.static.nn.fc(x=x, size=200)
            print(hidden1.shape)
            # [3, 200]
            hidden2 = paddle.static.nn.batch_norm(input=hidden1)
            print(hidden2.shape)
            # [3, 200]
Y
Yu Yang 已提交
2830
    """
C
chengduo 已提交
2831
    assert bias_attr is not False, "bias_attr should not be False in batch_norm."
Y
Yu Yang 已提交
2832 2833
    helper = LayerHelper('batch_norm', **locals())

2834 2835
    check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'],
                             'batch_norm')
2836
    dtype = helper.input_dtype()
2837

W
Wu Yi 已提交
2838 2839 2840 2841
    # use fp32 for bn parameter
    if dtype == core.VarDesc.VarType.FP16:
        dtype = core.VarDesc.VarType.FP32

Y
Yu Yang 已提交
2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859
    input_shape = input.shape
    if data_layout == 'NCHW':
        channel_num = input_shape[1]
    else:
        if data_layout == 'NHWC':
            channel_num = input_shape[-1]
        else:
            raise ValueError("unsupported data layout:" + data_layout)

    param_shape = [channel_num]

    # create parameter
    scale = helper.create_parameter(
        attr=helper.param_attr,
        shape=param_shape,
        dtype=dtype,
        default_initializer=Constant(1.0))
    bias = helper.create_parameter(
2860
        attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True)
Y
Yu Yang 已提交
2861

2862 2863
    mean = helper.create_parameter(
        attr=ParamAttr(
W
wanghaoshuang 已提交
2864 2865 2866
            name=moving_mean_name,
            initializer=Constant(0.0),
            trainable=False,
W
wanghaoshuang 已提交
2867
            do_model_average=do_model_average_for_mean_and_var),
Q
QI JUN 已提交
2868
        shape=param_shape,
W
Wu Yi 已提交
2869
        dtype=dtype)
2870 2871 2872 2873 2874 2875
    mean.stop_gradient = True

    variance = helper.create_parameter(
        attr=ParamAttr(
            name=moving_variance_name,
            initializer=Constant(1.0),
W
wanghaoshuang 已提交
2876
            trainable=False,
W
wanghaoshuang 已提交
2877
            do_model_average=do_model_average_for_mean_and_var),
Q
QI JUN 已提交
2878
        shape=param_shape,
W
Wu Yi 已提交
2879
        dtype=dtype)
2880
    variance.stop_gradient = True
Y
Yu Yang 已提交
2881 2882 2883 2884

    # create output
    # mean and mean_out share the same memory
    mean_out = mean
2885
    # variance and variance_out share the same memory
Y
Yu Yang 已提交
2886
    variance_out = variance
X
Xin Pan 已提交
2887 2888 2889 2890
    saved_mean = helper.create_variable_for_type_inference(
        dtype=dtype, stop_gradient=True)
    saved_variance = helper.create_variable_for_type_inference(
        dtype=dtype, stop_gradient=True)
2891
    reserve_space = None
2892
    if not is_test:
2893
        reserve_space = helper.create_variable_for_type_inference(
2894
            dtype=helper.input_dtype(), stop_gradient=True)
2895

K
Kaipeng Deng 已提交
2896 2897
    batch_norm_out = input if in_place else \
            helper.create_variable_for_type_inference(dtype)
Y
Yu Yang 已提交
2898

2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917
    inputs = {
        "X": input,
        "Scale": scale,
        "Bias": bias,
        "Mean": mean,
        "Variance": variance
    }
    attrs = {
        "epsilon": epsilon,
        "is_test": is_test,
        "data_layout": data_layout,
        "use_mkldnn": False,
        "fuse_with_relu": False,
        "use_global_stats": use_global_stats
    }
    if isinstance(momentum, Variable):
        inputs['MomemtumTensor'] = momentum
    else:
        attrs['momentum'] = momentum
2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928

    outputs = {
        "Y": batch_norm_out,
        "MeanOut": mean_out,
        "VarianceOut": variance_out,
        "SavedMean": saved_mean,
        "SavedVariance": saved_variance
    }
    if reserve_space is not None:
        outputs["ReserveSpace"] = reserve_space

Y
Yu Yang 已提交
2929
    helper.append_op(
2930
        type="batch_norm", inputs=inputs, outputs=outputs, attrs=attrs)
Y
Yu Yang 已提交
2931 2932 2933 2934

    return helper.append_activation(batch_norm_out)


K
Kaipeng Deng 已提交
2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948
def inplace_abn(input,
                act=None,
                is_test=False,
                momentum=0.9,
                epsilon=1e-05,
                param_attr=None,
                bias_attr=None,
                data_layout='NCHW',
                name=None,
                moving_mean_name=None,
                moving_variance_name=None,
                do_model_average_for_mean_and_var=True,
                use_global_stats=False,
                act_alpha=1.0):
2949
    r"""
K
Kaipeng Deng 已提交
2950
    **In-place Activation Batch Normalization Layer**
2951

K
Kaipeng Deng 已提交
2952 2953
    This layer calculates batch normalization and activation with in-place memory.
    For batch normalization calculations, see `fluid.layers.batch_norm`.
2954
    For in-place activation batch normalization, see `In-Place Activated BatchNorm for
K
Kaipeng Deng 已提交
2955 2956 2957 2958 2959 2960 2961
    Memory-Optimized Training of DNNs <https://arxiv.org/abs/1712.02616>`_

    `inplace_abn` only support activation type as `None`, `identity`, `leaky_relu`,
    `elu` currently.
    `inplace_abn` only support data type as `float32`, `float64` currently.

    Note:
2962
        if build_strategy.sync_batch_norm=True, the batch_norm in network will use
K
Kaipeng Deng 已提交
2963 2964 2965 2966
        sync_batch_norm automatically.
        `is_test = True` can only be used in test program and inference program, `is_test` CANNOT be set to True in train program, if you want to use global status from pre_train model in train program, please set `use_global_stats = True`.

    Args:
2967
        input(Variable): The rank of input variable can be 2, 3, 4, 5. The data type
K
Kaipeng Deng 已提交
2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980
            is float16 or float32 or float64.
        act(string, Default None): Activation type, linear|relu|prelu|...
        is_test (bool, Default False): A flag indicating whether it is in
            test phrase or not.
        momentum(float|Variable, Default 0.9): The value used for the moving_mean and
            moving_var computation. This should be a float number or a Variable with
            shape [1] and data type as float32. The updated formula is:
            :math:`moving\_mean = moving\_mean * momentum + new\_mean * (1. - momentum)`
            :math:`moving\_var = moving\_var * momentum + new\_var * (1. - momentum)`
            Default is 0.9.
        epsilon(float, Default 1e-05): A value added to the denominator for
            numerical stability. Default is 1e-5.
        param_attr(ParamAttr|None): The parameter attribute for Parameter `scale`
2981
             of inplace_abn. If it is set to None or one attribute of ParamAttr, inplace_abn
K
Kaipeng Deng 已提交
2982
	     will create ParamAttr as param_attr, the name of scale can be set in ParamAttr.
2983
	     If the Initializer of the param_attr is not set, the parameter is initialized
K
Kaipeng Deng 已提交
2984 2985
	     with Xavier. Default: None.
        bias_attr(ParamAttr|None): The parameter attribute for the bias of inplace_abn.
2986 2987 2988
             If it is set to None or one attribute of ParamAttr, inplace_abn
	     will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr.
	     If the Initializer of the bias_attr is not set, the bias is initialized zero.
K
Kaipeng Deng 已提交
2989
	     Default: None.
2990
        data_layout (str, optional): Specify the data format of the input, and the data format of the output
K
Kaipeng Deng 已提交
2991 2992 2993
             will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
             The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
             `[batch_size, input_channels, input_height, input_width]`.
2994 2995 2996 2997
        name(str|None): For detailed information, please refer to :ref:`api_guide_Name`.
            Usually name is no need to set and None by default.
        moving_mean_name(str, Default None): The name of moving_mean which store the global Mean. If it
            is set to None, inplace_abn will save global mean with a random name, otherwise, inplace_abn
K
Kaipeng Deng 已提交
2998 2999
            will save global mean with the string.
        moving_variance_name(str, Default None): The name of the moving_variance which store the global Variance.
3000
            If it is set to None, inplace_abn, will save global variance with a random name, otherwise, inplace_abn
K
Kaipeng Deng 已提交
3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012
            will save global variance with the string.
        do_model_average_for_mean_and_var(bool, Default True): Whether parameter mean and variance should do model
            average when model average is enabled.
        use_global_stats(bool, Default False): Whether to use global mean and
            variance. In inference or test mode, set use_global_stats to true
            or is_test to true, and the behavior is equivalent.
            In train mode, when setting use_global_stats True, the global mean
            and variance are also used during train period.
        act_alpha(float, Default 1.0): when activation is in ['elu', 'identity', 'leaky_relu'],
            inplace activative batch normalization will be used, and alpha parameter for activation
            can be given by this parameter.
    Returns:
3013 3014
        A Variable holding Tensor which is the result after applying batch normalization and activation on the input,
        has same shape and data type with input.
K
Kaipeng Deng 已提交
3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085

    Examples:

        .. code-block:: python

            import paddle.fluid as fluid
            x = fluid.data(name='x', shape=[3, 7, 3, 7], dtype='float32')
            hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w')
            hidden2 = fluid.layers.inplace_abn(input=hidden1)
            hidden3 = fluid.layers.inplace_abn(input=hidden2, act='leaky_relu', act_alpha=0.2)

    """
    assert act in [None, 'identity', 'leaky_relu', 'elu'], \
        "inplace_abn only support act as None, 'identity', " \
        "'leaky_relu', 'elu' currently"
    assert bias_attr is not False, "bias_attr should not be False in inplace_abn."
    helper = LayerHelper('inplace_abn', **locals())

    check_variable_and_dtype(input, 'input', ['float32', 'float64'],
                             'inplace_abn')
    dtype = helper.input_dtype()

    input_shape = input.shape
    if data_layout == 'NCHW':
        channel_num = input_shape[1]
    else:
        if data_layout == 'NHWC':
            channel_num = input_shape[-1]
        else:
            raise ValueError("unsupported data layout:" + data_layout)

    param_shape = [channel_num]

    # create parameter
    scale = helper.create_parameter(
        attr=helper.param_attr,
        shape=param_shape,
        dtype=dtype,
        default_initializer=Constant(1.0))
    bias = helper.create_parameter(
        attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True)

    mean = helper.create_parameter(
        attr=ParamAttr(
            name=moving_mean_name,
            initializer=Constant(0.0),
            trainable=False,
            do_model_average=do_model_average_for_mean_and_var),
        shape=param_shape,
        dtype=dtype)
    mean.stop_gradient = True

    variance = helper.create_parameter(
        attr=ParamAttr(
            name=moving_variance_name,
            initializer=Constant(1.0),
            trainable=False,
            do_model_average=do_model_average_for_mean_and_var),
        shape=param_shape,
        dtype=dtype)
    variance.stop_gradient = True

    # create output
    # mean and mean_out share the same memory
    mean_out = mean
    # variance and variance out share the same memory
    variance_out = variance
    saved_mean = helper.create_variable_for_type_inference(
        dtype=dtype, stop_gradient=True)
    saved_variance = helper.create_variable_for_type_inference(
        dtype=dtype, stop_gradient=True)
3086 3087
    reserve_space = helper.create_variable_for_type_inference(
        dtype=dtype, stop_gradient=True)
K
Kaipeng Deng 已提交
3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126
    batch_norm_out = input

    inputs = {
        "X": input,
        "Scale": scale,
        "Bias": bias,
        "Mean": mean,
        "Variance": variance
    }
    attrs = {
        "epsilon": epsilon,
        "is_test": is_test,
        "data_layout": data_layout,
        "use_mkldnn": False,
        "fuse_with_relu": False,
        "use_global_stats": use_global_stats,
        "activation": act,
        "alpha": act_alpha,
    }
    if isinstance(momentum, Variable):
        inputs['MomemtumTensor'] = momentum
    else:
        attrs['momentum'] = momentum
    outputs = {
        "Y": batch_norm_out,
        "MeanOut": mean_out,
        "VarianceOut": variance_out,
        "SavedMean": saved_mean,
        "SavedVariance": saved_variance
    }
    if reserve_space is not None:
        outputs["ReserveSpace"] = reserve_space

    helper.append_op(
        type="inplace_abn", inputs=inputs, outputs=outputs, attrs=attrs)

    return batch_norm_out


L
lvmengsi 已提交
3127 3128 3129 3130 3131
def instance_norm(input,
                  epsilon=1e-05,
                  param_attr=None,
                  bias_attr=None,
                  name=None):
3132
    r"""
3133 3134
    :api_attr: Static Graph

L
lvmengsi 已提交
3135 3136
    **Instance Normalization Layer**

L
lvmengsi 已提交
3137
    Can be used as a normalizer function for convolution or fully_connected operations.
L
lvmengsi 已提交
3138 3139 3140 3141
    The required data format for this layer is one of the following:

    DataLayout: NCHW `[batch, in_channels, in_height, in_width]`

3142
    Refer to `Instance Normalization: The Missing Ingredient for
L
lvmengsi 已提交
3143 3144 3145 3146 3147 3148 3149 3150
    Fast Stylization <https://arxiv.org/pdf/1607.08022.pdf>`_
    for more details.

    :math:`input` is the input features over a mini-batch.

    ..  math::

        \\mu_{\\beta} &\\gets \\frac{1}{HW} \\sum_{i=1}^{HW} x_i \\qquad &//\\
L
lvmengsi 已提交
3151
        \\ mean\ of\ one\  feature\ map\ in\ mini-batch \\\\
L
lvmengsi 已提交
3152
        \\sigma_{\\beta}^{2} &\\gets \\frac{1}{HW} \\sum_{i=1}^{HW}(x_i - \\
L
lvmengsi 已提交
3153
        \\mu_{\\beta})^2 \\qquad &//\ variance\ of\ one\ feature\ map\ in\ mini-batch \\\\
L
lvmengsi 已提交
3154 3155 3156 3157
        \\hat{x_i} &\\gets \\frac{x_i - \\mu_\\beta} {\\sqrt{\\
        \\sigma_{\\beta}^{2} + \\epsilon}} \\qquad &//\ normalize \\\\
        y_i &\\gets \\gamma \\hat{x_i} + \\beta \\qquad &//\ scale\ and\ shift

L
lvmengsi 已提交
3158 3159
    Note:
        `H` means height of feature map, `W` means width of feature map.
L
lvmengsi 已提交
3160 3161

    Args:
C
ceci3 已提交
3162
        input(Tensor): The rank of input tensor can be 2, 3, 4, 5.
L
lvmengsi 已提交
3163
            The data type is float32 or float64.
L
lvmengsi 已提交
3164 3165
        epsilon(float, Default 1e-05): A value added to the denominator for
            numerical stability. Default is 1e-5.
C
ceci3 已提交
3166
        param_attr(ParamAttr|None|bool, optional): The parameter attribute for Parameter `scale`
L
lvmengsi 已提交
3167 3168
             of instance_norm. If it is set to None or one attribute of ParamAttr, instance_norm
	     will create ParamAttr as param_attr, the name of scale can be set in ParamAttr.
3169
	     If the Initializer of the param_attr is not set, the parameter is initialized
C
ceci3 已提交
3170 3171 3172
	     with Xavier. If the param_attr is set to False, instance_norm will not create param_attr.
             Default: None.
        bias_attr(ParamAttr|None|bool, optional): The parameter attribute for the bias of instance_norm.
L
lvmengsi 已提交
3173
             If it is set to None or one attribute of ParamAttr, instance_norm
3174 3175
	     will create ParamAttr as bias_attr, the name of bias can be set in ParamAttr.
	     If the Initializer of the bias_attr is not set, the bias is initialized zero.
C
ceci3 已提交
3176
             If the bias_attr is set to False, instance_norm will not create bias_attr.
L
lvmengsi 已提交
3177 3178 3179 3180 3181
	     Default: None.
        name(string, Default None): A name for this layer(optional). If set None, the layer
            will be named automatically.

    Returns:
C
ceci3 已提交
3182
        A Tensor which is the result after applying instance normalization on the input,
3183
        has same shape and data type with input.
L
lvmengsi 已提交
3184 3185 3186 3187 3188

    Examples:

        .. code-block:: python

3189 3190
            import paddle
            paddle.enable_static()
C
ceci3 已提交
3191 3192 3193
            x = paddle.static.data(name='x', shape=[3, 7, 3, 7], dtype='float32')
            hidden1 = paddle.static.nn.fc(x, size=200)
            hidden2 = paddle.static.nn.instance_norm(hidden1)
L
lvmengsi 已提交
3194
    """
3195 3196
    check_variable_and_dtype(input, 'input', ['float32', 'float64'],
                             'instance_norm')
C
ceci3 已提交
3197 3198 3199
    if param_attr is False:
        assert bias_attr is False, "param_attr and bias_attr must be set to Fasle at the same time in instance_norm"

L
lvmengsi 已提交
3200 3201 3202 3203 3204 3205 3206 3207
    helper = LayerHelper('instance_norm', **locals())
    dtype = helper.input_dtype()

    # use fp32 for in parameter
    if dtype == core.VarDesc.VarType.FP16:
        dtype = core.VarDesc.VarType.FP32

    input_shape = input.shape
C
ceci3 已提交
3208 3209 3210 3211
    if len(input.shape) < 2 or len(input.shape) > 5:
        raise ValueError(
            'expected 2D or 3D or 4D or 5D input (got {}D input, input shape is: {})'.
            format(len(input.shape), input_shape))
L
lvmengsi 已提交
3212 3213 3214 3215
    channel_num = input_shape[1]

    param_shape = [channel_num]

3216
    if param_attr != False and bias_attr != False:
C
ceci3 已提交
3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228
        # create parameter
        scale = helper.create_parameter(
            attr=helper.param_attr,
            shape=param_shape,
            dtype=dtype,
            default_initializer=Constant(1.0))
        bias = helper.create_parameter(
            attr=helper.bias_attr,
            shape=param_shape,
            dtype=dtype,
            is_bias=True,
            default_initializer=Constant(0.0))
L
lvmengsi 已提交
3229 3230 3231 3232 3233 3234 3235 3236 3237

    # create output
    saved_mean = helper.create_variable_for_type_inference(
        dtype=dtype, stop_gradient=True)
    saved_variance = helper.create_variable_for_type_inference(
        dtype=dtype, stop_gradient=True)

    instance_norm_out = helper.create_variable_for_type_inference(dtype)

C
ceci3 已提交
3238
    inputs = {"X": input}
3239
    if param_attr != False and bias_attr != False:
C
ceci3 已提交
3240 3241 3242
        inputs["Scale"] = scale
        inputs["Bias"] = bias

L
lvmengsi 已提交
3243 3244
    helper.append_op(
        type="instance_norm",
C
ceci3 已提交
3245
        inputs=inputs,
L
lvmengsi 已提交
3246 3247 3248 3249 3250 3251 3252 3253 3254 3255
        outputs={
            "Y": instance_norm_out,
            "SavedMean": saved_mean,
            "SavedVariance": saved_variance
        },
        attrs={"epsilon": epsilon, })

    return instance_norm_out


3256
@static_only
H
heqiaozhi 已提交
3257 3258 3259 3260 3261 3262 3263 3264 3265
def data_norm(input,
              act=None,
              epsilon=1e-05,
              param_attr=None,
              data_layout='NCHW',
              in_place=False,
              name=None,
              moving_mean_name=None,
              moving_variance_name=None,
3266
              do_model_average_for_mean_and_var=True,
H
hutuxian 已提交
3267 3268
              slot_dim=-1,
              sync_stats=False,
3269 3270
              summary_decay_rate=0.9999999,
              enable_scale_and_shift=False):
3271
    r"""
3272 3273
    :api_attr: Static Graph

H
heqiaozhi 已提交
3274 3275
    **Data Normalization Layer**

3276
    This op can be used as a normalizer function for conv2d and fully_connected operations.
H
heqiaozhi 已提交
3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295
    The required data format for this layer is one of the following:

    1. NHWC `[batch, in_height, in_width, in_channels]`

    2. NCHW `[batch, in_channels, in_height, in_width]`

    :math:`input` is the input features over a mini-batch.

    ..  math::

        \\mu_{\\beta} &\\gets \\frac{1}{m} \\sum_{i=1}^{m} x_i \\qquad &//\\
        \ mini-batch\ mean \\\\
        \\sigma_{\\beta}^{2} &\\gets \\frac{1}{m} \\sum_{i=1}^{m}(x_i - \\
        \\mu_{\\beta})^2 \\qquad &//\ mini-batch\ variance \\\\
        \\hat{x_i} &\\gets \\frac{x_i - \\mu_\\beta} {\\sqrt{\\
        \\sigma_{\\beta}^{2} + \\epsilon}} \\qquad &//\ normalize \\\\
        y_i &\\gets \\gamma \\hat{x_i} + \\beta \\qquad &//\ scale\ and\ shift

    Args:
Y
yaoxuefeng 已提交
3296
        input(Tensor): The input Tensor.
H
heqiaozhi 已提交
3297 3298 3299
        act(string, Default None): Activation type, linear|relu|prelu|...
        epsilon(float, Default 1e-05):
        param_attr(ParamAttr): The parameter attribute for Parameter `scale`.
3300
        data_layout (str, optional): Specify the data format of the input, and the data format of the output
3301 3302 3303
            will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
            The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
            `[batch_size, input_channels, input_height, input_width]`.
H
heqiaozhi 已提交
3304 3305 3306 3307 3308
        in_place(bool, Default False): Make the input and output of batch norm reuse memory.
        name(string, Default None): A name for this layer(optional). If set None, the layer
            will be named automatically.
        moving_mean_name(string, Default None): The name of moving_mean which store the global Mean.
        moving_variance_name(string, Default None): The name of the moving_variance which store the global Variance.
3309 3310
        do_model_average_for_mean_and_var(bool, Default True): Whether parameter mean and variance
            should do model average when model average is enabled.
3311
        slot_dim(int): The embedding dimension of one slot. Slot is a set of one specific feature. In pslib mode, we
3312 3313
            distinguish feature ids by slot and pull their embeddings from parameter server (pslib). The first
            place of the embedding is the historical show number (occurence time of this feature id with a label 0).
3314 3315
            If the input of this op is concated by slot-wise embeddings, and the show number is zero when this slot
            is new or empty, the normalization result may be impractical. To avoid this, we add slot_dim to locate
3316 3317
            the show number and judge if the show number is zero. If so, we choose to skip normalization on this
            embedding.
H
hutuxian 已提交
3318 3319 3320
        sync_stats(bool, Default False): When running with multiple GPU cards, using allreduce to sync the
            summary messages.
        summary_decay_rate(float, Default 0.9999999): The decay rate when updating summary.
3321
        enable_scale_and_shift(bool, Default False): do scale&shift after normalization.
H
heqiaozhi 已提交
3322 3323

    Returns:
Y
yaoxuefeng 已提交
3324
        Tensor: A tensor which is the result after applying data normalization on the input.
H
heqiaozhi 已提交
3325 3326 3327 3328

    Examples:

        .. code-block:: python
3329

Y
yaoxuefeng 已提交
3330
            import paddle
3331
            paddle.enable_static()
H
heqiaozhi 已提交
3332

Y
yaoxuefeng 已提交
3333 3334
            x = paddle.randn(shape=[32,100])
            hidden2 = paddle.static.nn.data_norm(input=x)
H
heqiaozhi 已提交
3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352
    """
    helper = LayerHelper('data_norm', **locals())
    dtype = helper.input_dtype()

    input_shape = input.shape
    if data_layout == 'NCHW':
        channel_num = input_shape[1]
    else:
        if data_layout == 'NHWC':
            channel_num = input_shape[-1]
        else:
            raise ValueError("unsupported data layout:" + data_layout)

    param_shape = [channel_num]

    batch_size_default = 1e4
    batch_sum_default = 0.0
    batch_square_sum_default = 1e4
3353 3354
    scale_w_default = 1.0
    bias_default = 0.0
H
heqiaozhi 已提交
3355 3356 3357 3358 3359

    if param_attr and isinstance(param_attr, dict):
        batch_size_default = param_attr.get("batch_size", 1e4)
        batch_sum_default = param_attr.get("batch_sum", 0.0)
        batch_square_sum_default = param_attr.get("batch_square", 1e4)
3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381
    if enable_scale_and_shift:
        scale_w_default = param_attr.get("scale_w", 1.0)
        bias_default = param_attr.get("bias", 0.0)

    # create scale and shift(bias) when enable_scale_and_shift is True
    if name == None:
        name = "dn"
    if enable_scale_and_shift:
        scale_w = helper.create_parameter(
            attr=ParamAttr(
                name=name + '.scale_w',
                initializer=Constant(value=float(scale_w_default)),
                trainable=True),
            shape=param_shape,
            dtype=input.dtype)
        bias = helper.create_parameter(
            attr=ParamAttr(
                name=name + '.bias',
                initializer=Constant(value=float(bias_default)),
                trainable=True),
            shape=param_shape,
            dtype=input.dtype)
H
heqiaozhi 已提交
3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411
    # create parameter
    batch_size = helper.create_parameter(
        attr=ParamAttr(
            name=name + '.batch_size',
            initializer=Constant(value=float(batch_size_default)),
            trainable=True),
        shape=param_shape,
        dtype=input.dtype)

    batch_sum = helper.create_parameter(
        attr=ParamAttr(
            name=name + '.batch_sum',
            initializer=Constant(value=float(batch_sum_default)),
            trainable=True),
        shape=param_shape,
        dtype=input.dtype)

    batch_square_sum = helper.create_parameter(
        attr=ParamAttr(
            name=name + '.batch_square_sum',
            initializer=Constant(value=float(batch_square_sum_default)),
            trainable=True),
        shape=param_shape,
        dtype=input.dtype)

    means = helper.create_variable(dtype=dtype, stop_gradient=True)
    scales = helper.create_variable(dtype=dtype, stop_gradient=True)

    data_norm_out = input if in_place else helper.create_variable(dtype=dtype)

3412 3413 3414 3415 3416 3417
    inputs = {
        "X": input,
        "BatchSize": batch_size,
        "BatchSum": batch_sum,
        "BatchSquareSum": batch_square_sum
    }
3418 3419
    attrs = {
        "epsilon": epsilon,
X
XiangGao 已提交
3420
        "data_layout": data_layout,
3421 3422 3423 3424 3425 3426 3427
        "sync_stats": sync_stats,
        "summary_decay_rate": summary_decay_rate,
    }
    if slot_dim > 0:
        attrs["slot_dim"] = slot_dim
    if enable_scale_and_shift:
        attrs["enable_scale_and_shift"] = enable_scale_and_shift
3428 3429 3430
    if enable_scale_and_shift:
        inputs["scale_w"] = scale_w
        inputs["bias"] = bias
H
heqiaozhi 已提交
3431 3432
    helper.append_op(
        type="data_norm",
3433
        inputs=inputs,
H
hutuxian 已提交
3434 3435 3436 3437 3438 3439 3440 3441
        outputs={
            "Y": data_norm_out,
            "Means": means,
            "Scales": scales,
            "BatchSize": batch_size,
            "BatchSum": batch_sum,
            "BatchSquareSum": batch_square_sum
        },
3442
        attrs=attrs)
H
heqiaozhi 已提交
3443 3444 3445 3446

    return helper.append_activation(data_norm_out)


Y
yuyang18 已提交
3447
@templatedoc()
G
guosheng 已提交
3448 3449 3450 3451 3452 3453 3454 3455 3456
def layer_norm(input,
               scale=True,
               shift=True,
               begin_norm_axis=1,
               epsilon=1e-05,
               param_attr=None,
               bias_attr=None,
               act=None,
               name=None):
3457
    r"""
3458 3459
    :api_attr: Static Graph

3460 3461 3462 3463
    **Layer Normalization Layer**

    The API implements the function of the Layer Normalization Layer and can be applied to mini-batch input data.
    Refer to `Layer Normalization <https://arxiv.org/pdf/1607.06450v1.pdf>`_
G
guosheng 已提交
3464 3465 3466

    The formula is as follows:

Y
yuyang18 已提交
3467
    ..  math::
G
guosheng 已提交
3468

3469
        \\mu & = \\frac{1}{H}\\sum_{i=1}^{H} x_i
G
guosheng 已提交
3470

3471
        \\sigma & = \\sqrt{\\frac{1}{H}\sum_{i=1}^{H}{(x_i - \\mu)^2} + \\epsilon}
Y
yuyang18 已提交
3472

3473
        y & = f(\\frac{g}{\\sigma}(x - \\mu) + b)
Y
yuyang18 已提交
3474

3475 3476 3477 3478 3479
    - :math:`x`: the vector representation of the summed inputs to the neurons in that layer.
    - :math:`H`: the number of hidden units in a layers
    - :math:`\\epsilon`: the small value added to the variance to prevent division by zero.
    - :math:`g`: the trainable scale parameter.
    - :math:`b`: the trainable bias parameter.
Y
yuyang18 已提交
3480

G
guosheng 已提交
3481
    Args:
3482
        input(Tensor): A multi-dimension ``Tensor`` , and the data type is float32 or float64.
3483 3484 3485 3486 3487
        scale(bool, optional): Whether to learn the adaptive gain :math:`g` after
            normalization. Default: True.
        shift(bool, optional): Whether to learn the adaptive bias :math:`b` after
            normalization. Default: True.
        begin_norm_axis(int, optional): The normalization will be performed along
G
guosheng 已提交
3488
            dimensions from :attr:`begin_norm_axis` to :attr:`rank(input)`.
3489 3490 3491 3492
            Default: 1.
        epsilon(float, optional): The small value added to the variance to prevent
            division by zero. Default: 1e-05.
        param_attr(ParamAttr, optional): The parameter attribute for the learnable
S
sneaxiy 已提交
3493 3494
            gain :math:`g`. If :attr:`scale` is False, :attr:`param_attr` is
            omitted. If :attr:`scale` is True and :attr:`param_attr` is None,
3495
            a default :code:`ParamAttr` would be added as scale. The
3496 3497
            :attr:`param_attr` is initialized as 1 if it is added. Default: None.
        bias_attr(ParamAttr, optional): The parameter attribute for the learnable
S
sneaxiy 已提交
3498 3499
            bias :math:`b`. If :attr:`shift` is False, :attr:`bias_attr` is
            omitted. If :attr:`shift` is True and :attr:`param_attr` is None,
3500
            a default :code:`ParamAttr` would be added as bias. The
3501
            :attr:`bias_attr` is initialized as 0 if it is added. Default: None.
T
tianshuo78520a 已提交
3502
        act(str, optional): Activation to be applied to the output of layer normalization.
3503 3504
                  Default: None.
        name(str): The default value is None.  Normally there is no need for user to set this property.  For more information, please refer to :ref:`api_guide_Name` .
G
guosheng 已提交
3505 3506

    Returns:
3507
        Tensor: ``Tensor``  indicating the normalized result, the data type is the same as  ``input`` , and the return dimension is the same as  ``input`` .
G
guosheng 已提交
3508 3509 3510

    Examples:

3511 3512
        .. code-block:: python

3513 3514
            import paddle
            paddle.enable_static()
3515 3516 3517
            x = paddle.static.data(name='x', shape=[8, 32, 32], dtype='float32')
            output = paddle.static.nn.layer_norm(input=x, begin_norm_axis=1)
            print(output.shape)  # [8, 32, 32]
G
guosheng 已提交
3518
    """
L
lujun 已提交
3519
    assert in_dygraph_mode(
3520
    ) is not True, "please use LayerNorm instead of layer_norm in dygraph mode!"
G
guosheng 已提交
3521
    helper = LayerHelper('layer_norm', **locals())
3522 3523
    check_variable_and_dtype(input, 'input', ['float32', 'float64'],
                             'layer_norm')
G
guosheng 已提交
3524 3525 3526 3527 3528 3529 3530
    dtype = helper.input_dtype()

    # create intput and parameters
    inputs = {'X': input}
    input_shape = input.shape
    param_shape = [reduce(lambda x, y: x * y, input_shape[begin_norm_axis:])]
    if scale:
3531
        assert param_attr is not False, "param_attr should not be False when using scale."
G
guosheng 已提交
3532 3533 3534 3535 3536 3537
        scale = helper.create_parameter(
            attr=helper.param_attr,
            shape=param_shape,
            dtype=dtype,
            default_initializer=Constant(1.0))
        inputs['Scale'] = scale
3538 3539
    else:
        if param_attr:
T
tianshuo78520a 已提交
3540
            warnings.warn("param_attr is only available with scale is True.")
G
guosheng 已提交
3541
    if shift:
3542
        assert bias_attr is not False, "bias_attr should not be False when using shift."
G
guosheng 已提交
3543 3544 3545
        bias = helper.create_parameter(
            attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True)
        inputs['Bias'] = bias
3546 3547
    else:
        if bias_attr:
T
tianshuo78520a 已提交
3548
            warnings.warn("bias_attr is only available with shift is True.")
G
guosheng 已提交
3549 3550

    # create output
X
Xin Pan 已提交
3551 3552 3553 3554 3555
    mean_out = helper.create_variable_for_type_inference(
        dtype=dtype, stop_gradient=True)
    variance_out = helper.create_variable_for_type_inference(
        dtype=dtype, stop_gradient=True)
    layer_norm_out = helper.create_variable_for_type_inference(dtype)
G
guosheng 已提交
3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570

    helper.append_op(
        type="layer_norm",
        inputs=inputs,
        outputs={
            "Y": layer_norm_out,
            "Mean": mean_out,
            "Variance": variance_out,
        },
        attrs={"epsilon": epsilon,
               "begin_norm_axis": begin_norm_axis})

    return helper.append_activation(layer_norm_out)


D
Dun 已提交
3571 3572 3573 3574 3575 3576 3577 3578 3579 3580
@templatedoc()
def group_norm(input,
               groups,
               epsilon=1e-05,
               param_attr=None,
               bias_attr=None,
               act=None,
               data_layout='NCHW',
               name=None):
    """
3581 3582
    :api_attr: Static Graph

D
Dun 已提交
3583 3584
    **Group Normalization Layer**

H
haowang101779990 已提交
3585
    Refer to `Group Normalization <https://arxiv.org/abs/1803.08494>`_ .
D
Dun 已提交
3586

3587
    Parameters:
3588
        input(Tensor): Tensor with dimension greater than 1, the data type is float32 or float64.
3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600
        groups(int): The number of groups that divided from channels, the data type
            is int32.
        epsilon(float, optional): The small value added to the variance to prevent
            division by zero, the data type is float32. Default: 1e-05.
        param_attr(ParamAttr|bool, optional): ParamAttr object that specifies weight parameter
            attribute. If a bool type, only False is supported, which means there is no weight parameter.
            Default: None, the default weight parameter attribute is used. For more information, please
            refer to :ref:`api_guide_ParamAttr` .
        bias_attr(ParamAttr|bool, optional): ParamAttr object that specifies bias parameter
            attribute. If a bool type, only False is supported, which means there is no bias parameter.
            Default: None, the default bias parameter attribute is used. For more information, please
            refer to :ref:`api_guide_ParamAttr` .
T
tianshuo78520a 已提交
3601
        act(str, optional): Activation to be applied to the output of group normalization.
3602
        data_layout(str, optional): Specify the data format of the input, and the data format of the output
3603 3604
            will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
            The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
3605
            `[batch_size, input_channels, *]`.
3606 3607
        name (str, optional): The default value is None. Normally there is no need for user to set this
            property. For more information, please refer to :ref:`api_guide_Name` .
D
Dun 已提交
3608 3609

    Returns:
3610
        Tensor: A Tensor has same data type and data format with `input`.
D
Dun 已提交
3611 3612

    Examples:
3613
       .. code-block:: python
D
Dun 已提交
3614

3615 3616 3617
            import paddle
            paddle.enable_static()
            
C
Chen Long 已提交
3618 3619 3620
            data = paddle.static.data(name='data', shape=[2, 8, 32, 32], dtype='float32')
            x = paddle.static.nn.group_norm(input=data, groups=4)
            print(x.shape) # [2, 8, 32, 32]
D
Dun 已提交
3621 3622 3623
    """
    helper = LayerHelper('group_norm', **locals())
    dtype = helper.input_dtype()
3624 3625
    check_variable_and_dtype(input, 'input', ['float32', 'float64'],
                             'group_norm')
D
Dun 已提交
3626 3627 3628
    # create intput and parameters
    inputs = {'X': input}
    input_shape = input.shape
3629 3630 3631 3632
    if len(input_shape) < 2:
        raise ValueError(
            f"The dimensions of Op(fluid.layers.group_norm)'s input should be more than 1. But received {len(input_shape)}"
        )
3633 3634 3635 3636 3637 3638
    if data_layout != 'NCHW' and data_layout != 'NHWC':
        raise ValueError(
            "Param(data_layout) of Op(fluid.layers.group_norm) got wrong value: received "
            + data_layout + " but only NCHW or NHWC supported.")
    channel_num = input_shape[1] if data_layout == 'NCHW' else input_shape[-1]
    param_shape = [channel_num]
D
Dun 已提交
3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651
    if param_attr:
        scale = helper.create_parameter(
            attr=helper.param_attr,
            shape=param_shape,
            dtype=dtype,
            default_initializer=Constant(1.0))
        inputs['Scale'] = scale
    if bias_attr:
        bias = helper.create_parameter(
            attr=helper.bias_attr, shape=param_shape, dtype=dtype, is_bias=True)
        inputs['Bias'] = bias

    # create output
H
heqiaozhi 已提交
3652 3653
    mean_out = helper.create_variable(dtype=dtype, stop_gradient=True)
    variance_out = helper.create_variable(dtype=dtype, stop_gradient=True)
D
dengkaipeng 已提交
3654 3655 3656 3657 3658 3659 3660 3661 3662 3663
    group_norm_out = helper.create_variable(dtype=dtype)

    helper.append_op(
        type="group_norm",
        inputs=inputs,
        outputs={
            "Y": group_norm_out,
            "Mean": mean_out,
            "Variance": variance_out,
        },
3664 3665 3666 3667 3668
        attrs={
            "epsilon": epsilon,
            "groups": groups,
            "data_layout": data_layout
        })
D
dengkaipeng 已提交
3669 3670 3671 3672 3673

    return helper.append_activation(group_norm_out)


@templatedoc()
3674
def spectral_norm(weight, dim=0, power_iters=1, eps=1e-12, name=None):
3675
    r"""
3676 3677
    :api_attr: Static Graph

D
dengkaipeng 已提交
3678 3679
    **Spectral Normalization Layer**

K
Kaipeng Deng 已提交
3680
    This operation calculates the spectral normalization value of weight parameters of
3681
    fc, conv1d, conv2d, conv3d layers which should be 2-D, 3-D, 4-D, 5-D
K
Kaipeng Deng 已提交
3682 3683
    Parameters. Output tensor will be in same shape with input tensor.
    Calculations are showed as follows.
3684

D
dengkaipeng 已提交
3685 3686 3687
    Step 1:
    Generate vector U in shape of [H], and V in shape of [W].
    While H is the :attr:`dim` th dimension of the input weights,
D
dengkaipeng 已提交
3688
    and W is the product result of remaining dimensions.
D
dengkaipeng 已提交
3689 3690

    Step 2:
T
tianshuo78520a 已提交
3691
    :attr:`power_iters` should be a positive integer, do following
K
Kaipeng Deng 已提交
3692 3693
    calculations with U and V for :attr:`power_iters` rounds. Calculations
    as follows:
D
dengkaipeng 已提交
3694

3695
    .. math::
D
dengkaipeng 已提交
3696 3697 3698 3699 3700 3701

        \mathbf{v} := \\frac{\mathbf{W}^{T} \mathbf{u}}{\|\mathbf{W}^{T} \mathbf{u}\|_2}

        \mathbf{u} := \\frac{\mathbf{W}^{T} \mathbf{v}}{\|\mathbf{W}^{T} \mathbf{v}\|_2}

    Step 3:
D
dengkaipeng 已提交
3702
    Calculate :math:`\sigma(\mathbf{W})` and normalize weight values.
D
dengkaipeng 已提交
3703 3704 3705 3706

    .. math::

        \sigma(\mathbf{W}) = \mathbf{u}^{T} \mathbf{W} \mathbf{v}
3707

D
dengkaipeng 已提交
3708
        \mathbf{W} = \\frac{\mathbf{W}}{\sigma(\mathbf{W})}
3709

3710

D
dengkaipeng 已提交
3711 3712 3713
    Refer to `Spectral Normalization <https://arxiv.org/abs/1802.05957>`_ .

    Args:
C
Chen Long 已提交
3714
        weight(Tensor): ${weight_comment}
D
dengkaipeng 已提交
3715 3716 3717
        dim(int): ${dim_comment}
        power_iters(int): ${power_iters_comment}
        eps(float): ${eps_comment}
K
Kaipeng Deng 已提交
3718 3719 3720
        name(str, optional): For detailed information, please refer
                             to :ref:`api_guide_Name`. Usually name is no need to set and
                             None by default.
D
dengkaipeng 已提交
3721 3722

    Returns:
C
Chen Long 已提交
3723
        Tensor: A tensor of weight parameters after spectral normalization.
K
Kaipeng Deng 已提交
3724
                  The data type and shape is same as input tensor.
D
dengkaipeng 已提交
3725 3726

    Examples:
K
Kaipeng Deng 已提交
3727
       .. code-block:: python
D
dengkaipeng 已提交
3728

3729
            import paddle
K
Kaipeng Deng 已提交
3730

3731
            paddle.enable_static()
C
Chen Long 已提交
3732
            weight = paddle.static.data(name='weight', shape=[2, 8, 32, 32], dtype='float32')
3733
            x = paddle.static.nn.spectral_norm(weight=weight, dim=1, power_iters=2)
C
Chen Long 已提交
3734
            print(x.shape) # [2, 8, 32, 32]
D
dengkaipeng 已提交
3735 3736
    """
    helper = LayerHelper('spectral_norm', **locals())
3737 3738 3739 3740 3741
    check_variable_and_dtype(weight, 'weight', ['float32', 'float64'],
                             'spectral_norm')
    check_type(dim, 'dim', int, 'spectral_norm')
    check_type(power_iters, 'power_iters', int, 'spectral_norm')
    check_type(eps, 'eps', float, 'spectral_norm')
3742
    dtype = weight.dtype
D
dengkaipeng 已提交
3743 3744 3745

    # create intput and parameters
    inputs = {'Weight': weight}
3746
    input_shape = weight.shape
3747 3748 3749 3750
    assert weight.numel() > 0, "Any dimension of input cannot be equal to 0."
    assert dim < len(input_shape), ("The input `dim` should be less than the "
                                    "rank of `weight`, but received dim="
                                    "{}".format(dim))
3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767
    h = input_shape[dim]
    w = np.prod(input_shape) // h

    u = helper.create_parameter(
        attr=ParamAttr(),
        shape=[h],
        dtype=dtype,
        default_initializer=Normal(0., 1.))
    u.stop_gradient = True
    inputs['U'] = u
    v = helper.create_parameter(
        attr=ParamAttr(),
        shape=[w],
        dtype=dtype,
        default_initializer=Normal(0., 1.))
    inputs['V'] = v
    v.stop_gradient = True
D
dengkaipeng 已提交
3768 3769

    # create output
3770
    out = helper.create_variable(dtype=dtype)
D
Dun 已提交
3771 3772

    helper.append_op(
3773
        type="spectral_norm",
D
Dun 已提交
3774
        inputs=inputs,
3775 3776 3777 3778 3779 3780
        outputs={"Out": out, },
        attrs={
            "dim": dim,
            "power_iters": power_iters,
            "eps": eps,
        })
D
Dun 已提交
3781

3782
    return out
D
Dun 已提交
3783 3784


Y
Yu Yang 已提交
3785 3786 3787 3788
def conv2d_transpose(input,
                     num_filters,
                     output_size=None,
                     filter_size=None,
C
chengduoZH 已提交
3789 3790 3791
                     padding=0,
                     stride=1,
                     dilation=1,
3792
                     groups=None,
C
caoying03 已提交
3793
                     param_attr=None,
3794
                     bias_attr=None,
C
chengduoZH 已提交
3795
                     use_cudnn=True,
3796
                     act=None,
3797 3798
                     name=None,
                     data_format='NCHW'):
3799
    r"""
3800 3801
    :api_attr: Static Graph

3802 3803
    The convolution2D transpose layer calculates the output based on the input,
    filter, and dilations, strides, paddings. Input(Input) and output(Output)
3804
    are in NCHW or NHWC format. Where N is batch size, C is the number of channels,
3805 3806 3807
    H is the height of the feature, and W is the width of the feature.
    Parameters(dilations, strides, paddings) are two elements. These two elements
    represent height and width, respectively. The details of convolution transpose
3808
    layer, please refer to the following explanation and references
L
lvmengsi 已提交
3809
    `therein <https://arxiv.org/pdf/1603.07285.pdf>`_.
3810 3811 3812
    If bias attribution and activation type are provided, bias is added to
    the output of the convolution, and the corresponding activation function
    is applied to the final result.
3813 3814 3815 3816 3817

    For each input :math:`X`, the equation is:

    .. math::

3818
        Out = \sigma (W \\ast X + b)
3819

3820
    Where:
3821

3822 3823
    * :math:`X`: Input value, a 4-D Tensor with NCHW or NHWC format.
    * :math:`W`: Filter value, a 4-D Tensor with MCHW format.
3824
    * :math:`\\ast`: Convolution operation.
3825
    * :math:`b`: Bias value, a 2-D Tensor with shape [M, 1].
3826
    * :math:`\\sigma`: Activation function.
3827
    * :math:`Out`: Output value, a 4-D Tensor with data format 'NCHW' or 'NHWC', the shape of :math:`Out` and :math:`X` may be different.
Y
Yu Yang 已提交
3828

3829 3830 3831 3832
    Example:

        - Input:

3833
          Input shape: :math:`(N, C_{in}, H_{in}, W_{in})`
3834

3835
          Filter shape: :math:`(C_{in}, C_{out}, H_f, W_f)`
3836 3837 3838

        - Output:

3839
          Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`
3840 3841

        Where
Y
Yu Yang 已提交
3842

3843 3844
        .. math::

3845 3846
           H^\prime_{out} &= (H_{in} - 1) * strides[0] - pad_height_top - pad_height_bottom + dilations[0] * (H_f - 1) + 1 \\\\
           W^\prime_{out} &= (W_{in} - 1) * strides[1] - pad_width_left - pad_width_right + dilations[1] * (W_f - 1) + 1 \\\\
L
lvmengsi 已提交
3847
           H_{out} &\in [ H^\prime_{out}, H^\prime_{out} + strides[0] ] \\\\
3848 3849
           W_{out} &\in [ W^\prime_{out}, W^\prime_{out} + strides[1] ]

L
lvmengsi 已提交
3850
    Note:
3851 3852
          The conv2d_transpose can be seen as the backward of the conv2d. For conv2d,
          when stride > 1, conv2d maps multiple input shape to the same output shape,
L
lvmengsi 已提交
3853
          so for conv2d_transpose, when stride > 1, input shape maps multiple output shape.
3854 3855 3856 3857
          If output_size is None, :math:`H_{out} = H^\prime_{out}, W_{out} = W^\prime_{out}`;
          else, the :math:`H_{out}` of the output size must between :math:`H^\prime_{out}`
          and :math:`H^\prime_{out} + strides[0]`, and the :math:`W_{out}` of the output size must
          between :math:`W^\prime_{out}` and :math:`W^\prime_{out} + strides[1]`,
L
lvmengsi 已提交
3858
          conv2d_transpose can compute the kernel size automatically.
Y
Yu Yang 已提交
3859 3860

    Args:
3861
        input(Tensor): 4-D Tensor with [N, C, H, W] or [N, H, W, C] format,
3862
                         its data type is float32 or float64.
3863 3864
        num_filters(int): The number of the filter. It is as same as the output
            image channel.
3865
        output_size(int|tuple, optional): The output image size. If output size is a
3866
            tuple, it must contain two integers, (image_height, image_width). None if use
3867
            filter_size, padding, and stride to calculate output_size.
L
lvmengsi 已提交
3868
            If output_size and filter_size are specified at the same time, They
3869
            should follow the formula above. Default: None. output_size and filter_size
L
lvmengsi 已提交
3870
            should not be None at the same time.
3871
        filter_size(int|tuple, optional): The filter size. If filter_size is a tuple,
3872
            it must contain two integers, (filter_size_height, filter_size_width).
3873 3874
            Otherwise, filter_size_height = filter_size_width = filter_size. None if
            use output size to calculate filter_size. Default: None. filter_size and
L
lvmengsi 已提交
3875
            output_size should not be None at the same time.
3876 3877
        stride(int|tuple, optional): The stride size. It means the stride in transposed convolution.
            If stride is a tuple, it must contain two integers, (stride_height, stride_width).
L
lvmengsi 已提交
3878
            Otherwise, stride_height = stride_width = stride. Default: stride = 1.
3879 3880 3881 3882 3883 3884 3885 3886
        padding(str|int|list|tuple, optional): The padding size. It means the number of zero-paddings 
            on both sides for each dimension. If `padding` is a string, either 'VALID' or 
            'SAME' which is the padding algorithm. If `padding` is a tuple or list,
            it could be in three forms: `[pad_height, pad_width]` or 
            `[pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`,
            and when `data_format` is `"NCHW"`, `padding` can be in the form 
            `[[0,0], [0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
            when `data_format` is `"NHWC"`, `padding` can be in the form 
3887 3888
            `[[0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
            Default: padding = 0.
3889 3890
        dilation(int|tuple, optional): The dilation size. It means the spacing between the kernel points.
            If dilation is a tuple, it must contain two integers, (dilation_height, dilation_width).
L
lvmengsi 已提交
3891 3892 3893
            Otherwise, dilation_height = dilation_width = dilation. Default: dilation = 1.
        filter_size(int|tuple, optional): The filter size. If filter_size is a tuple,
            it must contain two integers, (filter_size_height, filter_size_width).
3894
            Otherwise, filter_size_height = filter_size_width = filter_size. None if
L
lvmengsi 已提交
3895
            use output size to calculate filter_size. Default: None.
3896
        groups(int, optional): The groups number of the Conv2d transpose layer. Inspired by
3897 3898 3899 3900
            grouped convolution in Alex Krizhevsky's Deep CNN paper, in which
            when group=2, the first half of the filters is only connected to the
            first half of the input channels, while the second half of the
            filters is only connected to the second half of the input channels.
C
chengduo 已提交
3901
            Default: groups = 1.
3902
        param_attr (ParamAttr, optional): The parameter attribute for learnable parameters/weights
C
chengduo 已提交
3903 3904 3905
            of conv2d_transpose. If it is set to None or one attribute of ParamAttr, conv2d_transpose
            will create ParamAttr as param_attr. If the Initializer of the param_attr
            is not set, the parameter is initialized with Xavier. Default: None.
3906
        bias_attr (ParamAttr|bool, optional): The parameter attribute for the bias of conv2d_transpose.
C
chengduo 已提交
3907 3908 3909 3910
            If it is set to False, no bias will be added to the output units.
            If it is set to None or one attribute of ParamAttr, conv2d_transpose
            will create ParamAttr as bias_attr. If the Initializer of the bias_attr
            is not set, the bias is initialized zero. Default: None.
3911
        use_cudnn(bool, optional): Use cudnn kernel or not, it is valid only when the cudnn
C
chengduo 已提交
3912
            library is installed. Default: True.
3913
        act (str, optional): Activation type, if it is set to None, activation is not appended.
C
chengduo 已提交
3914
            Default: None.
3915 3916
        name(str, optional): For detailed information, please refer
           to :ref:`api_guide_Name`. Usually name is no need to set and
L
lvmengsi 已提交
3917
           None by default.
3918
        data_format (str, optional): Specify the data format of the input, and the data format of the output
3919 3920 3921
            will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
            The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
            `[batch_size, input_channels, input_height, input_width]`.
Y
Yu Yang 已提交
3922 3923

    Returns:
3924
        A Tensor representing the conv2d_transpose, whose
3925
        data type is the same with input and shape is (num_batches, channels, out_h,
3926
        out_w) or (num_batches, out_h, out_w, channels). If act is None, the tensor 
3927
        storing the transposed convolution result, and if act is not None, the
3928
        tensor storing transposed convolution and non-linearity activation
L
lvmengsi 已提交
3929
        result.
3930 3931

    Raises:
3932 3933 3934
        ValueError: If the type of `use_cudnn` is not bool.
        ValueError: If `data_format` is not "NCHW" or "NHWC".
        ValueError: If `padding` is a string, but not "SAME" or "VALID".
3935
        ValueError: If `padding` is a tuple, but the element corresponding to the input's batch size is not 0
3936 3937 3938 3939 3940 3941 3942
            or the element corresponding to the input's channel is not 0.
        ValueError: If `output_size` and filter_size are None at the same time.
        ShapeError: If the input is not 4-D Tensor.
        ShapeError: If the input's dimension size and filter's dimension size not equal.
        ShapeError: If the dimension size of input minus the size of `stride` is not 2.
        ShapeError: If the number of input channels is not equal to filter's channels.
        ShapeError: If the size of `output_size` is not equal to that of `stride`.
3943 3944 3945 3946

    Examples:
       .. code-block:: python

3947 3948
          import paddle
          paddle.enable_static()
3949 3950 3951 3952

          data = paddle.static.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
          conv2d_transpose = paddle.static.nn.conv2d_transpose(input=data, num_filters=2, filter_size=3)
          print(conv2d_transpose.shape) # [-1, 2, 34, 34]
Y
Yu Yang 已提交
3953
    """
C
chengduo 已提交
3954
    assert param_attr is not False, "param_attr should not be False in conv2d_transpose."
3955 3956 3957 3958
    if len(input.shape) != 4:
        raise ValueError("Input size should be 4, "
                         "but received {}".format(len(input.shape)))

3959 3960 3961 3962
    if data_format not in ['NCHW', 'NHWC']:
        raise ValueError(
            "Attr(data_format) of Op(fluid.layers.conv2d_transpose) got wrong value: received "
            + data_format + " but only NCHW or NHWC supported.")
3963

3964
    input_channel = input.shape[1] if data_format == 'NCHW' else input.shape[-1]
3965 3966 3967 3968 3969 3970
    op_type = 'conv2d_transpose'
    if (input_channel == groups and num_filters == input_channel and
            not use_cudnn):
        op_type = 'depthwise_conv2d_transpose'

    helper = LayerHelper(op_type, **locals())
Y
Yu Yang 已提交
3971 3972 3973
    if not isinstance(input, Variable):
        raise TypeError("Input of conv2d_transpose must be Variable")

C
chengduoZH 已提交
3974 3975
    stride = utils.convert_to_list(stride, 2, 'stride')
    dilation = utils.convert_to_list(dilation, 2, 'dilation')
G
guosheng 已提交
3976

C
chengduoZH 已提交
3977 3978
    if not isinstance(use_cudnn, bool):
        raise ValueError("use_cudnn should be True or False")
G
guosheng 已提交
3979

3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022
    def _update_padding(padding, data_format):
        def is_list_or_tuple(ele):
            if isinstance(ele, list) or isinstance(ele, tuple):
                return True
            return False

        if is_list_or_tuple(padding) and len(padding) == 4:
            if is_list_or_tuple(padding[0]) and (data_format == "NCHW"):
                if not (padding[0] == [0, 0] and padding[1] == [0, 0]):
                    raise ValueError(
                        "Non-zero padding(%s) in the batch or channel dimensions "
                        "is not supported." % str(padding))
                padding = padding[2:4]
                padding = [ele for a_list in padding for ele in a_list]
            elif is_list_or_tuple(padding[0]) and (data_format == "NHWC"):
                if not (padding[0] == [0, 0] and padding[3] == [0, 0]):
                    raise ValueError(
                        "Non-zero padding(%s) in the batch or channel dimensions "
                        "is not supported." % str(padding))
                padding = padding[1:3]
                padding = [ele for a_list in padding for ele in a_list]
            padding = utils.convert_to_list(padding, 4, 'padding')
        else:
            padding = utils.convert_to_list(padding, 2, 'padding')
            padding = [padding[0], padding[0], padding[1], padding[1]]
        return padding

    padding_algorithm = "EXPLICIT"
    if isinstance(padding, str):
        padding = padding.upper()
        if padding not in ["SAME", "VALID"]:
            raise ValueError(
                "Unknown padding: '%s'. It can only be 'SAME' or 'VALID'." %
                str(padding))
        if padding == "VALID":
            padding_algorithm = "VALID"
            padding = [0, 0, 0, 0]
        elif padding == "SAME":
            padding_algorithm = "SAME"
            padding = [0, 0, 0, 0]

    padding = _update_padding(padding, data_format)

Y
Yu Yang 已提交
4023 4024 4025 4026 4027
    if filter_size is None:
        if output_size is None:
            raise ValueError("output_size must be set when filter_size is None")
        if isinstance(output_size, int):
            output_size = [output_size, output_size]
G
guosheng 已提交
4028

4029 4030
        h_in = input.shape[2] if data_format == 'NCHW' else input.shape[1]
        w_in = input.shape[3] if data_format == 'NCHW' else input.shape[2]
G
guosheng 已提交
4031

4032 4033 4034 4035
        filter_size_h = (output_size[0] - (h_in - 1) * stride[0] + padding[0] +
                         padding[1] - 1) // dilation[0] + 1
        filter_size_w = (output_size[1] - (w_in - 1) * stride[1] + padding[2] +
                         padding[3] - 1) // dilation[1] + 1
Y
Yu Yang 已提交
4036
        filter_size = [filter_size_h, filter_size_w]
C
chengduoZH 已提交
4037 4038 4039
    else:
        filter_size = utils.convert_to_list(filter_size, 2,
                                            'conv2d_transpose.filter_size')
C
chengduo 已提交
4040

4041 4042 4043
    if len(padding) == 4 and utils._is_symmetric_padding(padding, 2):
        padding = [padding[0], padding[2]]

4044 4045
    if output_size is None:
        output_size = []
4046
    elif isinstance(output_size, (list, tuple, int)):
4047 4048
        output_size = utils.convert_to_list(output_size, 2, 'output_size')
    else:
4049
        raise ValueError("output_size should be int, list[int] or tuple[int]")
4050 4051 4052 4053 4054 4055 4056 4057

    if groups is None:
        groups = 1
    elif groups <= 0:
        raise ValueError("the groups of input must be greater than 0, "
                         "but received the groups of input is {}".format(
                             groups))

M
minqiyang 已提交
4058
    filter_shape = [input_channel, num_filters // groups] + filter_size
C
chengduo 已提交
4059

Y
Yu Yang 已提交
4060 4061 4062
    img_filter = helper.create_parameter(
        dtype=input.dtype, shape=filter_shape, attr=helper.param_attr)

X
Xin Pan 已提交
4063
    pre_bias = helper.create_variable_for_type_inference(dtype=input.dtype)
Y
Yu Yang 已提交
4064
    helper.append_op(
4065
        type=op_type,
Y
Yu Yang 已提交
4066 4067
        inputs={'Input': [input],
                'Filter': [img_filter]},
4068
        outputs={'Output': pre_bias},
C
chengduoZH 已提交
4069
        attrs={
4070
            'output_size': output_size,
4071 4072
            'strides': stride,
            'paddings': padding,
4073
            'padding_algorithm': padding_algorithm,
4074 4075
            'dilations': dilation,
            'groups': groups,
4076 4077
            'use_cudnn': use_cudnn,
            'data_format': data_format
Y
Yu Yang 已提交
4078 4079
        })

4080 4081 4082 4083
    if data_format == 'NCHW':
        pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2)
    else:
        pre_act = helper.append_bias_op(pre_bias, dim_start=3, dim_end=4)
4084 4085
    out = helper.append_activation(pre_act)
    return out
Y
Yu Yang 已提交
4086 4087


4088
def conv3d_transpose(input,
Y
Yu Yang 已提交
4089 4090 4091
                     num_filters,
                     output_size=None,
                     filter_size=None,
C
chengduoZH 已提交
4092 4093 4094
                     padding=0,
                     stride=1,
                     dilation=1,
4095
                     groups=None,
C
caoying03 已提交
4096
                     param_attr=None,
4097
                     bias_attr=None,
C
chengduoZH 已提交
4098
                     use_cudnn=True,
4099
                     act=None,
4100 4101
                     name=None,
                     data_format='NCDHW'):
4102
    r"""
4103 4104
    :api_attr: Static Graph

4105
    The convolution3D transpose layer calculates the output based on the input,
4106
    filter, and dilations, strides, paddings. Input(Input) and output(Output)
4107
    are in NCDHW or NDHWC format. Where N is batch size, C is the number of channels,
4108 4109 4110 4111
    D is the depth of the feature, H is the height of the feature, and W
    is the width of the feature. Parameters(dilations, strides, paddings) are
    two elements. These two elements represent height and width, respectively.
    The details of convolution transpose layer, please refer to the following
L
lvmengsi 已提交
4112
    explanation and references `therein <https://arxiv.org/pdf/1603.07285.pdf>`_.
4113 4114 4115
    If bias attribution and activation type are provided, bias is added to
    the output of the convolution, and the corresponding activation function
    is applied to the final result.
4116 4117 4118 4119 4120

    For each input :math:`X`, the equation is:

    .. math::

S
sunzhongkai588 已提交
4121
        Out = \sigma (W \ast X + b)
4122 4123 4124

    In the above equation:

4125 4126
    * :math:`X`: Input value, a Tensor with NCDHW or NDHWC format.
    * :math:`W`: Filter value, a Tensor with MCDHW format.
S
sunzhongkai588 已提交
4127
    * :math:`\ast`: Convolution operation.
4128
    * :math:`b`: Bias value, a 2-D Tensor with shape [M, 1].
S
sunzhongkai588 已提交
4129
    * :math:`\sigma`: Activation function.
4130
    * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Y
Yu Yang 已提交
4131

4132 4133 4134 4135
    Example:

        - Input:

4136
          Input shape: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`
4137

4138
          Filter shape: :math:`(C_{in}, C_{out}, D_f, H_f, W_f)`
4139 4140 4141

        - Output:

4142
          Output shape: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})`
4143 4144

        Where
Y
Yu Yang 已提交
4145

4146 4147
        .. math::

L
lvmengsi 已提交
4148 4149 4150 4151 4152 4153
           D^\prime_{out} &= (D_{in} - 1) * strides[0] - 2 * paddings[0] + dilations[0] * (D_f - 1) + 1 \\\\
           H^\prime_{out} &= (H_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (H_f - 1) + 1 \\\\
           W^\prime_{out} &= (W_{in} - 1) * strides[2] - 2 * paddings[2] + dilations[2] * (W_f - 1) + 1 \\\\
           D_{out} &\in [ D^\prime_{out}, D^\prime_{out} + strides[0] ] \\\\
           H_{out} &\in [ H^\prime_{out}, H^\prime_{out} + strides[1] ] \\\\
           W_{out} &\in [ W^\prime_{out}, W^\prime_{out} + strides[2] ]
Y
Yu Yang 已提交
4154

L
lvmengsi 已提交
4155
    Note:
4156 4157
          The conv3d_transpose can be seen as the backward of the conv3d. For conv3d,
          when stride > 1, conv3d maps multiple input shape to the same output shape,
L
lvmengsi 已提交
4158 4159
          so for conv3d_transpose, when stride > 1, input shape maps multiple output shape.
          If output_size is None, :math:`H_{out} = H^\prime_{out}, :math:`H_{out} = \
4160 4161 4162 4163 4164
          H^\prime_{out}, W_{out} = W^\prime_{out}`; else, the :math:`D_{out}` of the output
          size must between :math:`D^\prime_{out}` and :math:`D^\prime_{out} + strides[0]`,
          the :math:`H_{out}` of the output size must between :math:`H^\prime_{out}`
          and :math:`H^\prime_{out} + strides[1]`, and the :math:`W_{out}` of the output size must
          between :math:`W^\prime_{out}` and :math:`W^\prime_{out} + strides[2]`,
L
lvmengsi 已提交
4165 4166 4167
          conv3d_transpose can compute the kernel size automatically.

    Args:
M
mls1999725 已提交
4168
        input(Tensor): The input is 5-D Tensor with shape [N, C, D, H, W] or [N, D, H, W, C], the data type
L
lvmengsi 已提交
4169
            of input is float32 or float64.
4170 4171
        num_filters(int): The number of the filter. It is as same as the output
            image channel.
4172
        output_size(int|tuple, optional): The output image size. If output size is a
L
lvmengsi 已提交
4173
            tuple, it must contain three integers, (image_depth, image_height, image_width). This
4174 4175
            parameter only works when filter_size is None. If output_size and filter_size are
            specified at the same time, They should follow the formula above. Default: None.
L
lvmengsi 已提交
4176
            Output_size and filter_size should not be None at the same time.
4177
        filter_size(int|tuple, optional): The filter size. If filter_size is a tuple,
L
lvmengsi 已提交
4178
            it must contain three integers, (filter_size_depth, filter_size_height,
4179 4180
            filter_size_width). Otherwise, filter_size_depth = filter_size_height = \
            filter_size_width = filter_size. None if use output size to
4181
            calculate filter_size. Default: None. filter_size and output_size should not be
L
lvmengsi 已提交
4182 4183
            None at the same time.
        padding(int|list|str|tuple, optional): The padding size. The padding argument effectively
S
sunzhongkai588 已提交
4184 4185 4186
            adds `dilation * (kernel - 1)` amount of zero-padding on both sides of input. If `padding` is a string,
            either 'VALID' or 'SAME' supported, which is the padding algorithm. If `padding`
            is a tuple or list, it could be in three forms: `[pad_depth, pad_height, pad_width]` or
4187 4188 4189 4190 4191 4192
            `[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`,
            and when `data_format` is `'NCDHW'`, `padding` can be in the form
            `[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
            when `data_format` is `'NDHWC'`, `padding` can be in the form
            `[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
            Default: padding = 0.
4193 4194 4195
        stride(int|tuple, optional): The stride size. It means the stride in transposed convolution.
            If stride is a tuple, it must contain three integers, (stride_depth, stride_height,
            stride_width). Otherwise, stride_depth = stride_height = stride_width = stride.
L
lvmengsi 已提交
4196
            Default: stride = 1.
4197 4198 4199
        dilation(int|tuple, optional): The dilation size. It means the spacing between the kernel points.
            If dilation is a tuple, it must contain three integers, (dilation_depth, dilation_height,
            dilation_width). Otherwise, dilation_depth = dilation_height = dilation_width = dilation.
L
lvmengsi 已提交
4200
            Default: dilation = 1.
4201
        groups(int, optional): The groups number of the Conv3d transpose layer. Inspired by
4202 4203 4204 4205 4206
            grouped convolution in Alex Krizhevsky's Deep CNN paper, in which
            when group=2, the first half of the filters is only connected to the
            first half of the input channels, while the second half of the
            filters is only connected to the second half of the input channels.
            Default: groups=1
4207
        param_attr (ParamAttr, optional): The parameter attribute for learnable parameters/weights
C
chengduo 已提交
4208 4209 4210
            of conv3d_transpose. If it is set to None or one attribute of ParamAttr, conv3d_transpose
            will create ParamAttr as param_attr. If the Initializer of the param_attr
            is not set, the parameter is initialized with Xavier. Default: None.
4211
        bias_attr (ParamAttr|bool, optional): The parameter attribute for the bias of conv3d_transpose.
C
chengduo 已提交
4212 4213 4214 4215
            If it is set to False, no bias will be added to the output units.
            If it is set to None or one attribute of ParamAttr, conv3d_transpose
            will create ParamAttr as bias_attr. If the Initializer of the bias_attr
            is not set, the bias is initialized zero. Default: None.
4216
        use_cudnn(bool, optional): Use cudnn kernel or not, it is valid only when the cudnn
4217
            library is installed. Default: True
4218
        act (str, optional): Activation type, if it is set to None, activation is not appended.
C
chengduo 已提交
4219
            Default: None.
4220 4221
        name(str, optional): For detailed information, please refer
           to :ref:`api_guide_Name`. Usually name is no need to set and
L
lvmengsi 已提交
4222
           None by default.
4223
        data_format (str, optional): Specify the data format of the input, and the data format of the output
4224 4225 4226
            will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
            The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
            `[batch_size, input_channels, input_height, input_width]`.
Y
Yu Yang 已提交
4227 4228

    Returns:
4229 4230 4231 4232
        A Variable holding Tensor representing the conv3d_transpose, whose data
        type is the same with input and shape is (num_batches, channels, out_d, out_h,
        out_w) or (num_batches, out_d, out_h, out_w, channels). If act is None, the tensor
        variable storing the transposed convolution result, and if act is not None, the tensor
L
lvmengsi 已提交
4233
        variable storing transposed convolution and non-linearity activation result.
4234 4235

    Raises:
4236 4237 4238
        ValueError: If the type of `use_cudnn` is not bool.
        ValueError: If `data_format` is not "NCDHW" or "NDHWC".
        ValueError: If `padding` is a string, but not "SAME" or "VALID".
4239
        ValueError: If `padding` is a tuple, but the element corresponding to the input's batch size is not 0
4240 4241 4242 4243 4244 4245 4246
            or the element corresponding to the input's channel is not 0.
        ValueError: If `output_size` and filter_size are None at the same time.
        ShapeError: If the input is not 5-D Tensor.
        ShapeError: If the input's dimension size and filter's dimension size not equal.
        ShapeError: If the dimension size of input minus the size of `stride` is not 2.
        ShapeError: If the number of input channels is not equal to filter's channels.
        ShapeError: If the size of `output_size` is not equal to that of `stride`.
4247 4248 4249 4250

    Examples:
       .. code-block:: python

4251
          import paddle
M
mls1999725 已提交
4252 4253
          import numpy as np
	    
4254
          paddle.enable_static()
M
mls1999725 已提交
4255 4256 4257 4258 4259 4260 4261 4262 4263
          data = paddle.static.data(name='data', shape=[None, 3, 12, 32, 32], dtype='float32')
          param_attr = paddle.framework.ParamAttr(name='conv3d.weight', initializer=paddle.nn.initializer.XavierNormal(), learning_rate=0.001)
          res = paddle.static.nn.conv3d_transpose(input=data, num_filters=2, filter_size=3, act="relu", param_attr=param_attr)
          place = paddle.CPUPlace()
          exe = paddle.static.Executor(place)
          exe.run(paddle.static.default_startup_program())
          x = np.random.rand(1, 3, 12, 32, 32).astype("float32")
          output = exe.run(feed={"data": x}, fetch_list=[res])
          print(output)
Y
Yu Yang 已提交
4264
    """
C
chengduo 已提交
4265
    assert param_attr is not False, "param_attr should not be False in conv3d_transpose."
4266 4267 4268 4269
    if data_format not in ['NCDHW', 'NDHWC']:
        raise ValueError(
            "Param(data_format) of Op(fluid.layers.conv3d_transpose) got wrong value: received "
            + data_format + " but only NCDHW or NDHWC supported.")
4270

4271 4272
    l_type = "conv3d_transpose"
    helper = LayerHelper(l_type, **locals())
Y
Yu Yang 已提交
4273
    if not isinstance(input, Variable):
4274
        raise TypeError("Input of conv3d_transpose must be Variable")
4275 4276 4277 4278
    if len(input.shape) != 5:
        raise ValueError(
            "Input should be 5D tensor, but received input with the shape of {}".
            format(input.shape))
4279 4280
    input_channel = input.shape[1] if data_format == 'NCDHW' else input.shape[
        -1]
Y
Yu Yang 已提交
4281

4282 4283
    stride = utils.convert_to_list(stride, 3, 'stride')
    dilation = utils.convert_to_list(dilation, 3, 'dilation')
C
chengduoZH 已提交
4284

C
chengduoZH 已提交
4285 4286 4287
    if not isinstance(use_cudnn, bool):
        raise ValueError("use_cudnn should be True or False")

4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301
    def _update_padding(padding, data_format):
        def is_list_or_tuple(ele):
            if isinstance(ele, list) or isinstance(ele, tuple):
                return True
            return False

        if is_list_or_tuple(padding) and len(padding) == 5:
            if is_list_or_tuple(padding[0]) and (data_format == "NCDHW"):
                if not (padding[0] == [0, 0] and padding[1] == [0, 0]):
                    raise ValueError(
                        "Non-zero padding(%s) in the batch or channel dimensions "
                        "is not supported." % str(padding))
                padding = padding[2:5]
                padding = [ele for a_list in padding for ele in a_list]
4302 4303 4304 4305 4306 4307 4308 4309
            elif is_list_or_tuple(padding[0]) and (data_format == "NDHWC"):
                if not (padding[0] == [0, 0] and padding[4] == [0, 0]):
                    raise ValueError(
                        "Non-zero padding(%s) in the batch or channel dimensions "
                        "is not supported." % str(padding))
                padding = padding[1:4]
                padding = [ele for a_list in padding for ele in a_list]
            padding = utils.convert_to_list(padding, 6, 'padding')
G
Guo Sheng 已提交
4310

4311 4312
        elif is_list_or_tuple(padding) and len(padding) == 6:
            padding = utils.convert_to_list(padding, 6, 'padding')
G
Guo Sheng 已提交
4313

4314 4315 4316 4317 4318 4319 4320
        else:
            padding = utils.convert_to_list(padding, 3, 'padding')
            padding = [
                padding[0], padding[0], padding[1], padding[1], padding[2],
                padding[2]
            ]
        return padding
G
Guo Sheng 已提交
4321

4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334
    padding_algorithm = "EXPLICIT"
    if isinstance(padding, str):
        padding = padding.upper()
        if padding not in ["SAME", "VALID"]:
            raise ValueError(
                "Unknown padding: '%s'. It can only be 'SAME' or 'VALID'." %
                str(padding))
        if padding == "VALID":
            padding_algorithm = "VALID"
            padding = [0, 0, 0, 0, 0, 0]
        elif padding == "SAME":
            padding_algorithm = "SAME"
            padding = [0, 0, 0, 0, 0, 0]
G
Guo Sheng 已提交
4335

4336
    padding = _update_padding(padding, data_format)
Y
yangyaming 已提交
4337

4338 4339 4340 4341
    if filter_size is None:
        if output_size is None:
            raise ValueError("output_size must be set when filter_size is None")
        if isinstance(output_size, int):
4342
            output_size = [output_size, output_size, output_size]
Y
yangyaming 已提交
4343

4344 4345 4346
        d_in = input.shape[2] if data_format == 'NCDHW' else input.shape[1]
        h_in = input.shape[3] if data_format == 'NCDHW' else input.shape[2]
        w_in = input.shape[4] if data_format == 'NCDHW' else input.shape[3]
Y
yangyaming 已提交
4347

4348 4349 4350 4351 4352 4353 4354 4355 4356 4357
        filter_size_d = (output_size[0] - (d_in - 1) * stride[0] + padding[0] +
                         padding[1] - 1) // dilation[0] + 1
        filter_size_h = (output_size[1] - (h_in - 1) * stride[1] + padding[2] +
                         padding[3] - 1) // dilation[1] + 1
        filter_size_w = (output_size[2] - (w_in - 1) * stride[2] + padding[4] +
                         padding[5] - 1) // dilation[2] + 1
        filter_size = [filter_size_d, filter_size_h, filter_size_w]
    else:
        filter_size = utils.convert_to_list(filter_size, 3,
                                            'conv3d_transpose.filter_size')
Y
yangyaming 已提交
4358

4359 4360
    if len(padding) == 6 and utils._is_symmetric_padding(padding, 3):
        padding = [padding[0], padding[2], padding[4]]
Y
yangyaming 已提交
4361

4362 4363 4364 4365 4366 4367 4368
    if output_size is None:
        output_size = []
    elif isinstance(output_size, (list, tuple, int)):
        output_size = utils.convert_to_list(output_size, 3, 'output_size')
    else:
        raise ValueError("output_size should be int, list[int] or tuple[int]")

4369
    groups = 1 if groups is None else groups
4370 4371 4372 4373 4374 4375 4376 4377 4378
    if groups <= 0:
        raise ValueError(
            "the groups of conv3d_transpose should be greater than 0. Received groups: {}".
            format(groups))
    if num_filters % groups != 0:
        raise ValueError("Attr(num_filters) must be divisible by groups,"
                         "Received: Attr(num_filters) is {}, the groups is {}".
                         format(num_filters, groups))

4379 4380 4381
    filter_shape = [input_channel, num_filters // groups] + filter_size
    img_filter = helper.create_parameter(
        dtype=input.dtype, shape=filter_shape, attr=helper.param_attr)
4382

4383 4384 4385 4386
    if data_format == 'NCDHW':
        data_format = 'NCHW'
    if data_format == 'NDHWC':
        data_format = 'NHWC'
Y
yangyaming 已提交
4387

4388
    pre_bias = helper.create_variable_for_type_inference(dtype=input.dtype)
Y
yangyaming 已提交
4389
    helper.append_op(
4390 4391 4392 4393 4394
        type=l_type,
        inputs={'Input': [input],
                'Filter': [img_filter]},
        outputs={'Output': pre_bias},
        attrs={
4395
            'output_size': output_size,
4396 4397 4398 4399 4400 4401 4402 4403
            'strides': stride,
            'paddings': padding,
            'padding_algorithm': padding_algorithm,
            'dilations': dilation,
            'groups': groups,
            'use_cudnn': use_cudnn,
            'data_format': data_format
        })
Y
yangyaming 已提交
4404

4405 4406 4407 4408 4409 4410
    if data_format == 'NCHW':
        pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2)
    else:
        pre_act = helper.append_bias_op(pre_bias, dim_start=4, dim_end=5)
    out = helper.append_activation(pre_act)
    return out
G
guosheng 已提交
4411 4412


C
caoying03 已提交
4413
def reduce_sum(input, dim=None, keep_dim=False, name=None):
G
guosheng 已提交
4414
    """
4415

Y
yangyaming 已提交
4416
    Computes the sum of tensor elements over the given dimension.
G
guosheng 已提交
4417 4418

    Args:
4419 4420 4421
        input (Variable): The input variable which is a Tensor, the data type is float32,
            float64, int32, int64.
        dim (list|int, optional): The dimensions along which the sum is performed. If
Y
yangyaming 已提交
4422 4423
            :attr:`None`, sum all elements of :attr:`input` and return a
            Tensor variable with a single element, otherwise must be in the
W
whs 已提交
4424 4425
            range :math:`[-rank(input), rank(input))`. If :math:`dim[i] < 0`,
            the dimension to reduce is :math:`rank + dim[i]`.
4426
        keep_dim (bool, optional): Whether to reserve the reduced dimension in the
Y
yangyaming 已提交
4427
            output Tensor. The result tensor will have one fewer dimension
4428 4429 4430 4431
            than the :attr:`input` unless :attr:`keep_dim` is true, default
            value is False.
        name(str, optional): The default value is None.  Normally there is no need for
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`
G
guosheng 已提交
4432 4433

    Returns:
4434 4435
        Variable: Tensor, results of summation operation on the specified dim of input tensor,
        it's data type is the same as input's Tensor.
F
fengjiayi 已提交
4436

4437 4438
    Raises:
        TypeError, if out data type is different with the input data type.
4439

G
guosheng 已提交
4440 4441 4442
    Examples:
        .. code-block:: python

4443
            import paddle.fluid as fluid
4444 4445
            import paddle
            paddle.enable_static()
G
guosheng 已提交
4446 4447 4448
            # x is a Tensor variable with following elements:
            #    [[0.2, 0.3, 0.5, 0.9]
            #     [0.1, 0.2, 0.6, 0.7]]
Q
qiaolongfei 已提交
4449
            # Each example is followed by the corresponding output tensor.
4450
            x = fluid.data(name='x', shape=[2, 4], dtype='float32')
G
guosheng 已提交
4451 4452 4453 4454
            fluid.layers.reduce_sum(x)  # [3.5]
            fluid.layers.reduce_sum(x, dim=0)  # [0.3, 0.5, 1.1, 1.6]
            fluid.layers.reduce_sum(x, dim=-1)  # [1.9, 1.6]
            fluid.layers.reduce_sum(x, dim=1, keep_dim=True)  # [[1.9], [1.6]]
W
whs 已提交
4455

4456
            # y is a Tensor variable with shape [2, 2, 2] and elements as below:
W
whs 已提交
4457 4458
            #      [[[1, 2], [3, 4]],
            #      [[5, 6], [7, 8]]]
Q
qiaolongfei 已提交
4459
            # Each example is followed by the corresponding output tensor.
4460
            y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32')
4461 4462
            fluid.layers.reduce_sum(y, dim=[1, 2]) # [10, 26]
            fluid.layers.reduce_sum(y, dim=[0, 1]) # [16, 20]
W
whs 已提交
4463

G
guosheng 已提交
4464
    """
4465 4466
    if dim is not None and not isinstance(dim, list):
        dim = [dim]
4467 4468

    if in_dygraph_mode():
Q
Qinghe JING 已提交
4469 4470
        reduce_all = True if dim == None or dim == [] or len(dim) == len(
            input.shape) else False
4471
        dim = dim if dim != None and dim != [] else [0]
W
wanghuancoder 已提交
4472 4473
        return _C_ops.reduce_sum(input, 'dim', dim, 'keep_dim', keep_dim,
                                 'reduce_all', reduce_all)
4474
    attrs = {
4475
        'dim': dim if dim != None and dim != [] else [0],
4476
        'keep_dim': keep_dim,
Q
Qinghe JING 已提交
4477 4478
        'reduce_all': True
        if dim == None or dim == [] or len(dim) == len(input.shape) else False
4479
    }
4480
    check_variable_and_dtype(
4481 4482
        input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'],
        'reduce_sum')
4483
    helper = LayerHelper('reduce_sum', **locals())
X
Xin Pan 已提交
4484
    out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
G
guosheng 已提交
4485 4486 4487 4488
    helper.append_op(
        type='reduce_sum',
        inputs={'X': input},
        outputs={'Out': out},
4489
        attrs=attrs)
G
guosheng 已提交
4490
    return out
G
guosheng 已提交
4491 4492


4493
@deprecated(since="2.0.0", update_to="paddle.mean")
C
caoying03 已提交
4494
def reduce_mean(input, dim=None, keep_dim=False, name=None):
G
guosheng 已提交
4495
    """
Y
Yibing Liu 已提交
4496
    Computes the mean of the input tensor's elements along the given dimension.
G
guosheng 已提交
4497 4498

    Args:
4499 4500 4501
        input (Variable): The input variable which is a Tensor, the data type is float32,
            float64, int32, int64.
        dim (list|int, optional): The dimension along which the mean is computed. If
Y
Yibing Liu 已提交
4502 4503
            `None`, compute the mean over all elements of :attr:`input`
            and return a variable with a single element, otherwise it
Y
yangyaming 已提交
4504
            must be in the range :math:`[-rank(input), rank(input))`. If
4505
            :math:`dim[i] < 0`, the dimension to reduce is
Y
Yibing Liu 已提交
4506
            :math:`rank(input) + dim[i]`.
4507
        keep_dim (bool, optional): Whether to reserve the reduced dimension in the
Y
yangyaming 已提交
4508
            output Tensor. The result tensor will have one fewer dimension
4509
            than the :attr:`input` unless :attr:`keep_dim` is true, default
4510 4511 4512
            value is False.
        name(str, optional): The default value is None.  Normally there is no need for
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`
4513

G
guosheng 已提交
4514
    Returns:
4515 4516
        Variable: Tensor, results of average on the specified dim of input tensor,
        it's data type is the same as input's Tensor.
4517

4518 4519
    Raises:
        TypeError, if out data type is different with the input data type.
4520

G
guosheng 已提交
4521 4522 4523
    Examples:
        .. code-block:: python

4524
            import paddle
4525
            import paddle.fluid as fluid
4526 4527
            paddle.enable_static()

G
guosheng 已提交
4528 4529 4530
            # x is a Tensor variable with following elements:
            #    [[0.2, 0.3, 0.5, 0.9]
            #     [0.1, 0.2, 0.6, 0.7]]
T
tianshuo78520a 已提交
4531
            # Each example is followed by the corresponding output tensor.
4532
            x = fluid.data(name='x', shape=[2, 4], dtype='float32')
G
guosheng 已提交
4533 4534 4535
            fluid.layers.reduce_mean(x)  # [0.4375]
            fluid.layers.reduce_mean(x, dim=0)  # [0.15, 0.25, 0.55, 0.8]
            fluid.layers.reduce_mean(x, dim=-1)  # [0.475, 0.4]
4536
            fluid.layers.reduce_mean(x, dim=1, keep_dim=True)  # [[0.475], [0.4]]
W
whs 已提交
4537

4538
            # y is a Tensor variable with shape [2, 2, 2] and elements as below:
W
whs 已提交
4539 4540
            #      [[[1.0, 2.0], [3.0, 4.0]],
            #      [[5.0, 6.0], [7.0, 8.0]]]
T
tianshuo78520a 已提交
4541
            # Each example is followed by the corresponding output tensor.
4542
            y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32')
4543 4544
            fluid.layers.reduce_mean(y, dim=[1, 2]) # [2.5, 6.5]
            fluid.layers.reduce_mean(y, dim=[0, 1]) # [4.0, 5.0]
G
guosheng 已提交
4545
    """
4546

4547
    return paddle.mean(x=input, axis=dim, keepdim=keep_dim, name=name)
4548 4549


C
caoying03 已提交
4550
def reduce_max(input, dim=None, keep_dim=False, name=None):
4551
    """
4552

Y
yangyaming 已提交
4553
    Computes the maximum of tensor elements over the given dimension.
4554 4555

    Args:
4556 4557 4558
        input (Variable): The input variable which is a Tensor, the data type is float32,
            float64, int32, int64.
        dim (list|int, optional): The dimension along which the maximum is computed.
Y
yangyaming 已提交
4559 4560 4561
            If :attr:`None`, compute the maximum over all elements of
            :attr:`input` and return a Tensor variable with a single element,
            otherwise must be in the range :math:`[-rank(input), rank(input))`.
W
whs 已提交
4562
            If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`.
4563
        keep_dim (bool, optional): Whether to reserve the reduced dimension in the
Y
yangyaming 已提交
4564
            output Tensor. The result tensor will have one fewer dimension
4565 4566
            than the :attr:`input` unless :attr:`keep_dim` is true, default
            value is False.
4567
        name(str, optional): The default value is None.  Normally there is no need for
4568
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`
4569 4570

    Returns:
4571 4572
        Variable: Tensor, results of maximum on the specified dim of input tensor,
        it's data type is the same as input's Tensor.
Y
yangyaming 已提交
4573

4574 4575 4576
    Examples:
        .. code-block:: python

4577
            import paddle.fluid as fluid
4578 4579
            import paddle
            paddle.enable_static()
4580 4581 4582
            # x is a Tensor variable with following elements:
            #    [[0.2, 0.3, 0.5, 0.9]
            #     [0.1, 0.2, 0.6, 0.7]]
T
tianshuo78520a 已提交
4583
            # Each example is followed by the corresponding output tensor.
4584
            x = fluid.data(name='x', shape=[2, 4], dtype='float32')
4585 4586 4587 4588
            fluid.layers.reduce_max(x)  # [0.9]
            fluid.layers.reduce_max(x, dim=0)  # [0.2, 0.3, 0.6, 0.9]
            fluid.layers.reduce_max(x, dim=-1)  # [0.9, 0.7]
            fluid.layers.reduce_max(x, dim=1, keep_dim=True)  # [[0.9], [0.7]]
W
whs 已提交
4589

4590
            # y is a Tensor variable with shape [2, 2, 2] and elements as below:
W
whs 已提交
4591 4592
            #      [[[1.0, 2.0], [3.0, 4.0]],
            #      [[5.0, 6.0], [7.0, 8.0]]]
T
tianshuo78520a 已提交
4593
            # Each example is followed by the corresponding output tensor.
4594
            y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32')
4595 4596
            fluid.layers.reduce_max(y, dim=[1, 2]) # [4.0, 8.0]
            fluid.layers.reduce_max(y, dim=[0, 1]) # [7.0, 8.0]
4597 4598
    """
    helper = LayerHelper('reduce_max', **locals())
X
Xin Pan 已提交
4599
    out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
W
whs 已提交
4600 4601
    if dim is not None and not isinstance(dim, list):
        dim = [dim]
4602 4603 4604 4605 4606
    helper.append_op(
        type='reduce_max',
        inputs={'X': input},
        outputs={'Out': out},
        attrs={
4607
            'dim': dim if dim != None and dim != [] else [0],
4608
            'keep_dim': keep_dim,
Q
Qinghe JING 已提交
4609 4610
            'reduce_all': True if dim == None or dim == [] or
            len(dim) == len(input.shape) else False
4611 4612 4613 4614
        })
    return out


C
caoying03 已提交
4615
def reduce_min(input, dim=None, keep_dim=False, name=None):
4616
    """
4617

Y
yangyaming 已提交
4618
    Computes the minimum of tensor elements over the given dimension.
4619 4620

    Args:
4621 4622 4623
        input (Variable): The input variable which is a Tensor, the data type is float32,
            float64, int32, int64.
        dim (list|int, optional): The dimensions along which the minimum is computed.
Y
yangyaming 已提交
4624 4625 4626
            If :attr:`None`, compute the minimum over all elements of
            :attr:`input` and return a Tensor variable with a single element,
            otherwise must be in the range :math:`[-rank(input), rank(input))`.
W
whs 已提交
4627
            If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`.
4628
        keep_dim (bool, optional): Whether to reserve the reduced dimension in the
Y
yangyaming 已提交
4629
            output Tensor. The result tensor will have one fewer dimension
4630 4631
            than the :attr:`input` unless :attr:`keep_dim` is true, default
            value is False.
4632
        name(str, optional): The default value is None.  Normally there is no need for
4633
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`
4634 4635

    Returns:
4636 4637
        Variable: Tensor, result of minimum on the specified dim of input tensor,
        it's data type is the same as input's Tensor.
Y
yangyaming 已提交
4638

4639 4640 4641
    Examples:
        .. code-block:: python

4642
            import paddle.fluid as fluid
4643 4644 4645
            import paddle
            paddle.enable_static()

4646 4647 4648
            # x is a Tensor variable with following elements:
            #    [[0.2, 0.3, 0.5, 0.9]
            #     [0.1, 0.2, 0.6, 0.7]]
T
tianshuo78520a 已提交
4649
            # Each example is followed by the corresponding output tensor.
4650
            x = fluid.data(name='x', shape=[2, 4], dtype='float32')
4651 4652 4653 4654
            fluid.layers.reduce_min(x)  # [0.1]
            fluid.layers.reduce_min(x, dim=0)  # [0.1, 0.2, 0.5, 0.7]
            fluid.layers.reduce_min(x, dim=-1)  # [0.2, 0.1]
            fluid.layers.reduce_min(x, dim=1, keep_dim=True)  # [[0.2], [0.1]]
W
whs 已提交
4655

4656
            # y is a Tensor variable with shape [2, 2, 2] and elements as below:
W
whs 已提交
4657 4658
            #      [[[1.0, 2.0], [3.0, 4.0]],
            #      [[5.0, 6.0], [7.0, 8.0]]]
T
tianshuo78520a 已提交
4659
            # Each example is followed by the corresponding output tensor.
4660
            y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32')
4661 4662
            fluid.layers.reduce_min(y, dim=[1, 2]) # [1.0, 5.0]
            fluid.layers.reduce_min(y, dim=[0, 1]) # [1.0, 2.0]
4663 4664
    """
    helper = LayerHelper('reduce_min', **locals())
X
Xin Pan 已提交
4665
    out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
W
whs 已提交
4666 4667
    if dim is not None and not isinstance(dim, list):
        dim = [dim]
4668 4669 4670 4671 4672
    helper.append_op(
        type='reduce_min',
        inputs={'X': input},
        outputs={'Out': out},
        attrs={
4673
            'dim': dim if dim != None and dim != [] else [0],
4674
            'keep_dim': keep_dim,
Q
Qinghe JING 已提交
4675 4676
            'reduce_all': True if dim == None or dim == [] or
            len(dim) == len(input.shape) else False
4677 4678
        })
    return out
G
guosheng 已提交
4679 4680


4681 4682
def reduce_prod(input, dim=None, keep_dim=False, name=None):
    """
4683

4684 4685 4686
    Computes the product of tensor elements over the given dimension.

    Args:
4687 4688
        input (Variable): The input variable which is a Tensor, the data type is float32,
            float64, int32, int64.
G
guofei 已提交
4689
        dim (int|list|tuple, optional): The dimensions along which the product is performed. If
T
tianshuo78520a 已提交
4690
            :attr:`None`, multiply all elements of :attr:`input` and return a
4691
            Tensor variable with a single element, otherwise must be in the
W
whs 已提交
4692 4693
            range :math:`[-rank(input), rank(input))`. If :math:`dim[i] < 0`,
            the dimension to reduce is :math:`rank + dim[i]`.
4694
        keep_dim (bool, optional): Whether to reserve the reduced dimension in the
4695
            output Tensor. The result tensor will have one fewer dimension
4696 4697
            than the :attr:`input` unless :attr:`keep_dim` is true, default
            value is False.
4698
        name(str, optional): The default value is None.  Normally there is no need for
4699
            user to set this property.  For more information, please refer to :ref:`api_guide_Name`
4700 4701

    Returns:
4702 4703
        Variable: Tensor, result of product on the specified dim of input tensor,
        it's data type is the same as input's Tensor.
4704

4705 4706 4707
    Examples:
        .. code-block:: python

4708
            import paddle.fluid as fluid
4709 4710
            import paddle
            paddle.enable_static()
4711 4712 4713
            # x is a Tensor variable with following elements:
            #    [[0.2, 0.3, 0.5, 0.9]
            #     [0.1, 0.2, 0.6, 0.7]]
T
tianshuo78520a 已提交
4714
            # Each example is followed by the corresponding output tensor.
4715
            x = fluid.data(name='x', shape=[2, 4], dtype='float32')
4716 4717 4718
            fluid.layers.reduce_prod(x)  # [0.0002268]
            fluid.layers.reduce_prod(x, dim=0)  # [0.02, 0.06, 0.3, 0.63]
            fluid.layers.reduce_prod(x, dim=-1)  # [0.027, 0.0084]
Y
yangyaming 已提交
4719
            fluid.layers.reduce_prod(x, dim=1,
Z
zhouhanqing 已提交
4720
                                     keep_dim=True)  # [[0.027], [0.0084]]
W
whs 已提交
4721

4722
            # y is a Tensor variable with shape [2, 2, 2] and elements as below:
W
whs 已提交
4723 4724
            #      [[[1.0, 2.0], [3.0, 4.0]],
            #      [[5.0, 6.0], [7.0, 8.0]]]
T
tianshuo78520a 已提交
4725
            # Each example is followed by the corresponding output tensor.
4726
            y = fluid.data(name='y', shape=[2, 2, 2], dtype='float32')
4727 4728
            fluid.layers.reduce_prod(y, dim=[1, 2]) # [24.0, 1680.0]
            fluid.layers.reduce_prod(y, dim=[0, 1]) # [105.0, 384.0]
4729 4730
    """
    helper = LayerHelper('reduce_prod', **locals())
W
whs 已提交
4731
    if dim is not None and not isinstance(dim, list):
G
guofei 已提交
4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742
        if isinstance(dim, tuple):
            dim = list(dim)
        elif isinstance(dim, int):
            dim = [dim]
        else:
            raise TypeError(
                "The type of axis must be int, list or tuple, but received {}".
                format(type(dim)))
    check_variable_and_dtype(
        input, 'input', ['float32', 'float64', 'int32', 'int64'], 'reduce_prod')
    out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
4743 4744 4745 4746 4747
    helper.append_op(
        type='reduce_prod',
        inputs={'X': input},
        outputs={'Out': out},
        attrs={
4748
            'dim': dim if dim != None and dim != [] else [0],
4749
            'keep_dim': keep_dim,
Q
Qinghe JING 已提交
4750 4751
            'reduce_all': True if dim == None or dim == [] or
            len(dim) == len(input.shape) else False
4752 4753 4754 4755
        })
    return out


Z
zhoukunsheng 已提交
4756 4757
def reduce_all(input, dim=None, keep_dim=False, name=None):
    """
4758

4759
    This OP computes the ``logical and`` of tensor elements over the given dimension, and output the result.
Z
zhoukunsheng 已提交
4760 4761

    Args:
4762
        input (Tensor): the input tensor, it's data type should be `bool`.
4763
        dim (list|int|optional): The dimension along which the logical and is computed.
Z
zhoukunsheng 已提交
4764 4765 4766
            If :attr:`None`, compute the logical and over all elements of
            :attr:`input` and return a Tensor variable with a single element,
            otherwise must be in the range :math:`[-rank(input), rank(input))`.
4767
            If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`. The default value is None.
Z
zhoukunsheng 已提交
4768 4769
        keep_dim (bool): Whether to reserve the reduced dimension in the
            output Tensor. The result tensor will have one fewer dimension
4770
            than the :attr:`input` unless :attr:`keep_dim` is true. The default value is False.
Z
zhoukunsheng 已提交
4771
        name(str|None): A name for this layer(optional). If set None, the layer
4772
                       will be named automatically. The default value is None.
Z
zhoukunsheng 已提交
4773

4774
    Returns:
4775
        Tensor, the output data type is bool. : The reduced tensor variable with ``logical and`` in given dims.
Z
zhoukunsheng 已提交
4776 4777 4778

    Examples:
        .. code-block:: python
4779

4780
            import paddle
4781
            import paddle.fluid as fluid
4782 4783 4784
            import paddle.fluid.layers as layers
            import numpy as np

Z
zhoukunsheng 已提交
4785 4786 4787
            # x is a bool Tensor variable with following elements:
            #    [[True, False]
            #     [True, True]]
4788 4789
            x = fluid.layers.assign(np.array([[1, 0], [1, 1]], dtype='int32'))
            x = fluid.layers.cast(x, 'bool')
4790

4791 4792 4793
            out = fluid.layers.reduce_all(x)  # False
            out = fluid.layers.reduce_all(x, dim=0)  # [True, False]
            out = fluid.layers.reduce_all(x, dim=-1)  # [False, True]
4794 4795
            # keep_dim=False, x.shape=(2,2), out.shape=(2,)

4796
            out = fluid.layers.reduce_all(x, dim=1, keep_dim=True)  # [[False], [True]]
4797
            # keep_dim=True, x.shape=(2,2), out.shape=(2,1)
Z
zhoukunsheng 已提交
4798 4799

    """
4800
    check_variable_and_dtype(input, 'input', ('bool'), 'reduce_all')
Z
zhoukunsheng 已提交
4801 4802 4803 4804 4805 4806 4807 4808 4809
    helper = LayerHelper('reduce_all', **locals())
    out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
    if dim is not None and not isinstance(dim, list):
        dim = [dim]
    helper.append_op(
        type='reduce_all',
        inputs={'X': input},
        outputs={'Out': out},
        attrs={
4810
            'dim': dim if dim != None and dim != [] else [0],
Z
zhoukunsheng 已提交
4811
            'keep_dim': keep_dim,
Q
Qinghe JING 已提交
4812 4813
            'reduce_all': True if dim == None or dim == [] or
            len(dim) == len(input.shape) else False
Z
zhoukunsheng 已提交
4814 4815 4816 4817 4818 4819
        })
    return out


def reduce_any(input, dim=None, keep_dim=False, name=None):
    """
4820
    This OP computes the ``logical or`` of tensor elements over the given dimension, and output the result.
Z
zhoukunsheng 已提交
4821 4822

    Args:
4823
        input (Tensor): the input tensor, it's data type should be `bool`.
4824 4825
        dim (list|int|optional): The dimension along which the logical and is computed.
            If :attr:`None`, compute the logical and over all elements of
Z
zhoukunsheng 已提交
4826 4827
            :attr:`input` and return a Tensor variable with a single element,
            otherwise must be in the range :math:`[-rank(input), rank(input))`.
4828
            If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`. The default value is None.
Z
zhoukunsheng 已提交
4829 4830
        keep_dim (bool): Whether to reserve the reduced dimension in the
            output Tensor. The result tensor will have one fewer dimension
4831
            than the :attr:`input` unless :attr:`keep_dim` is true. The default value is False.
4832
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Z
zhoukunsheng 已提交
4833

4834
    Returns:
4835
        Tensor, the output data type is bool. : The reduced tensor variable with ``logical or`` in given dims.
Z
zhoukunsheng 已提交
4836 4837 4838

    Examples:
        .. code-block:: python
Z
zhoukunsheng 已提交
4839

4840
            import paddle
4841
            import paddle.fluid as fluid
4842 4843 4844
            import paddle.fluid.layers as layers
            import numpy as np

Z
zhoukunsheng 已提交
4845 4846 4847
            # x is a bool Tensor variable with following elements:
            #    [[True, False]
            #     [False, False]]
4848 4849
            x = fluid.layers.assign(np.array([[1, 0], [0, 0]], dtype='int32'))
            x = fluid.layers.cast(x, 'bool')
4850

4851 4852 4853
            out = fluid.layers.reduce_any(x)  # True
            out = fluid.layers.reduce_any(x, dim=0)  # [True, False]
            out = fluid.layers.reduce_any(x, dim=-1)  # [True, False]
4854 4855
            # keep_dim=False, x.shape=(2,2), out.shape=(2,)

4856
            out = fluid.layers.reduce_any(x, dim=1,
Z
zhoukunsheng 已提交
4857
                                     keep_dim=True)  # [[True], [False]]
4858
            # keep_dim=True, x.shape=(2,2), out.shape=(2,1)
Z
zhoukunsheng 已提交
4859 4860

    """
4861
    check_variable_and_dtype(input, 'input', ('bool'), 'reduce_any')
Z
zhoukunsheng 已提交
4862 4863 4864 4865 4866 4867 4868 4869 4870
    helper = LayerHelper('reduce_any', **locals())
    out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
    if dim is not None and not isinstance(dim, list):
        dim = [dim]
    helper.append_op(
        type='reduce_any',
        inputs={'X': input},
        outputs={'Out': out},
        attrs={
4871
            'dim': dim if dim != None and dim != [] else [0],
Z
zhoukunsheng 已提交
4872
            'keep_dim': keep_dim,
Q
Qinghe JING 已提交
4873 4874
            'reduce_all': True if dim == None or dim == [] or
            len(dim) == len(input.shape) else False
4875 4876 4877 4878
        })
    return out


C
caoying03 已提交
4879
def split(input, num_or_sections, dim=-1, name=None):
G
guosheng 已提交
4880
    """
4881
    Split the input tensor into multiple sub-Tensors.
G
guosheng 已提交
4882 4883

    Args:
4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894
        input (Tensor): A N-D Tensor. The data type is bool, float16, float32, float64, int32 or int64.
        num_or_sections (int|list|tuple): If ``num_or_sections`` is int, then the ``num_or_sections`` 
            indicates the number of equal sized sub-Tensors that the ``input``
            will be divided into. If ``num_or_sections`` is a list or tuple, the length of it 
            indicates the number of sub-Tensors and the elements in it indicate the sizes of sub-Tensors'
            dimension orderly. The length of the list mustn't be larger than the ``input`` 's size of specified dim.
        dim (int|Tensor, optional): The dimension along which to split, it can be a scalar with type ``int`` or
            a ``Tensor`` with shape [1] and data type ``int32`` or ``int64``. If :math:`dim < 0`,
            the dimension to split along is :math:`rank(input) + dim`. Default is -1.
        name (str, optional): The default value is None.  Normally there is no need for user to set this property. 
            For more information, please refer to :ref:`api_guide_Name` .
G
guosheng 已提交
4895 4896

    Returns:
4897
        list(Tensor): The list of segmented Tensors.
G
guosheng 已提交
4898

4899
    Example:
G
guosheng 已提交
4900 4901
        .. code-block:: python

4902 4903
            import paddle.fluid as fluid

4904
            # input is a Tensor which shape is [3, 9, 5]
4905
            input = fluid.data(
4906 4907
                 name="input", shape=[3, 9, 5], dtype="float32")

4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928
            out0, out1, out2 = fluid.layers.split(input, num_or_sections=3, dim=1)
            # out0.shape [3, 3, 5]
            # out1.shape [3, 3, 5]
            # out2.shape [3, 3, 5]

            out0, out1, out2 = fluid.layers.split(input, num_or_sections=[2, 3, 4], dim=1)
            # out0.shape [3, 2, 5]
            # out1.shape [3, 3, 5]
            # out2.shape [3, 4, 5]

            out0, out1, out2 = fluid.layers.split(input, num_or_sections=[2, 3, -1], dim=1)
            # out0.shape [3, 2, 5]
            # out1.shape [3, 3, 5]
            # out2.shape [3, 4, 5]
            
            # dim is negative, the real dim is (rank(input) + axis) which real
            # value is 1.
            out0, out1, out2 = fluid.layers.split(input, num_or_sections=3, dim=-2)
            # out0.shape [3, 3, 5]
            # out1.shape [3, 3, 5]
            # out2.shape [3, 3, 5]
4929

G
guosheng 已提交
4930
    """
4931
    if in_dygraph_mode():
4932 4933 4934
        num = None
        attrs = ()

S
songyouwei 已提交
4935 4936
        if isinstance(dim, Variable):
            dim = dim.numpy()
4937
            dim = dim.item(0)
W
wangzhen38 已提交
4938
        assert len(input.shape) + dim >= 0, "(rank(x) + axis) must >= 0"
S
songyouwei 已提交
4939
        dim = (len(input.shape) + dim) if dim < 0 else dim
4940
        attrs += ('axis', dim)
4941 4942 4943

        if isinstance(num_or_sections, int):
            num = num_or_sections
4944
            attrs += ('num', num_or_sections)
L
Leo Chen 已提交
4945
        elif isinstance(num_or_sections, (list, tuple)):
4946
            num = len(num_or_sections)
L
Leo Chen 已提交
4947
            if utils._contain_var(num_or_sections):
4948 4949 4950 4951 4952
                for index, item in enumerate(num_or_sections):
                    if isinstance(item, Variable):
                        num_or_sections[index] = num_or_sections[index].numpy()[
                            0]
                attrs += ('sections', list(num_or_sections))
L
Leo Chen 已提交
4953
            else:
4954
                attrs += ('sections', list(num_or_sections))
4955 4956
        else:
            raise TypeError(
4957
                "The type of 'num_or_sections' in split must be int, list or tuple in imperative mode, but "
4958
                "received %s." % (type(num_or_sections)))
W
wanghuancoder 已提交
4959
        return _C_ops.split(input, num, *attrs)
L
Leo Chen 已提交
4960

4961 4962
    check_variable_and_dtype(
        input, 'input',
4963
        ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], 'split')
4964 4965 4966 4967
    check_type(num_or_sections, 'num_or_sections', (list, int, tuple), 'split')
    check_type(dim, 'dim', (int, Variable), 'split')
    if isinstance(dim, Variable):
        check_dtype(dim.dtype, 'dim', ['int32', 'int64'], 'split')
4968

G
guosheng 已提交
4969
    helper = LayerHelper('split', **locals())
4970

G
guosheng 已提交
4971
    input_shape = input.shape
4972 4973 4974 4975 4976 4977 4978 4979 4980 4981 4982 4983 4984 4985 4986 4987 4988 4989 4990 4991 4992 4993 4994 4995 4996 4997 4998 4999
    inputs = {'X': input}
    attrs = {'num': num_or_sections if isinstance(num_or_sections, int) else 0}

    def _get_SectionsTensorList(one_list):
        tensor_list = []
        unk_dim_idx = -1
        for idx, dim_size in enumerate(one_list):
            if isinstance(dim_size, Variable):
                dim_size.stop_gradient = True
                tensor_list.append(dim_size)
            else:
                assert (isinstance(dim_size, int))
                if dim_size == -1:
                    assert unk_dim_idx == -1, (
                        "Only one value of 'num_or_section' in split can "
                        "be -1. But received num_or_section[%d] is also -1." %
                        idx)
                    unk_dim_idx = idx
                temp_out = helper.create_variable_for_type_inference('int32')
                fill_constant(
                    [1], 'int32', dim_size, force_cpu=True, out=temp_out)
                tensor_list.append(temp_out)
        return tensor_list

    if isinstance(dim, Variable):
        dim.stop_gradient = True
        inputs['AxisTensor'] = dim
    else:
W
wangzhen38 已提交
5000
        assert len(input.shape) + dim >= 0, "(rank(x) + axis) must >= 0"
5001 5002 5003
        dim = (len(input_shape) + dim) if dim < 0 else dim
        attrs['axis'] = dim

G
guosheng 已提交
5004 5005
    if isinstance(num_or_sections, int):
        assert num_or_sections > 1, 'num_or_sections must be more than 1.'
5006 5007 5008 5009 5010
        if isinstance(dim, int) and input_shape[dim] > 0:
            assert input_shape[dim] % num_or_sections ==0, \
                "The input's size along the split dimension " \
                "must be evenly divisible by Attr(num_or_sections). " \
                "But %d is not evenly divisible by %d. " % (num_or_sections,input_shape[dim])
G
guosheng 已提交
5011 5012
        num = num_or_sections
    else:
5013 5014 5015
        if isinstance(dim, int) and input_shape[dim] > 0:
            assert len(num_or_sections) <= input_shape[
                dim], 'len(num_or_sections) must not be more than input.shape[dim].'
G
guosheng 已提交
5016
        num = len(num_or_sections)
5017 5018 5019
        attrs['sections'] = list(
            map(lambda ele: -1 if isinstance(ele, Variable) else ele,
                num_or_sections))
L
Leo Chen 已提交
5020
        if utils._contain_var(num_or_sections):
5021 5022 5023
            inputs['SectionsTensorList'] = _get_SectionsTensorList(
                num_or_sections)

G
guosheng 已提交
5024
    outs = [
X
Xin Pan 已提交
5025
        helper.create_variable_for_type_inference(dtype=helper.input_dtype())
G
guosheng 已提交
5026 5027 5028
        for i in range(num)
    ]
    helper.append_op(
5029
        type='split', inputs=inputs, outputs={'Out': outs}, attrs=attrs)
G
guosheng 已提交
5030
    return outs
C
caoying03 已提交
5031 5032 5033


def l2_normalize(x, axis, epsilon=1e-12, name=None):
5034
    r"""
5035

R
ruri 已提交
5036
    This op normalizes `x` along dimension `axis` using an L2
C
caoying03 已提交
5037 5038
    norm. For a 1-D tensor (`dim` is fixed to 0), this layer computes

5039
    .. math::
5040 5041

        y = \\frac{x}{ \sqrt{\sum {x^2} + epsion }}
C
caoying03 已提交
5042 5043 5044 5045 5046

    For `x` with more dimensions, this layer independently normalizes each 1-D
    slice along dimension `axis`.

    Args:
5047
        x(Variable|list): The input tensor could be N-D tensor, and the input data type could be float16, float32 or float64.
5048
        axis(int): The axis on which to apply normalization. If `axis < 0`, \
5049 5050
            the dimension to normalization is rank(X) + axis. -1 is the
            last dimension.
5051
        epsilon(float): The epsilon value is used to avoid division by zero, \
翟飞跃 已提交
5052
            the default value is 1e-12.
5053
    name(str, optional): The default value is None.  Normally there is no need for user to set this property.  For more information, please refer to :ref:`api_guide_Name`
5054

C
caoying03 已提交
5055
    Returns:
R
ruri 已提交
5056
        Variable: The output has the same shape and data type with `x`.
C
caoying03 已提交
5057 5058

    Examples:
5059

5060 5061 5062
    .. code-block:: python
        :name: code-example1
        
5063
        import paddle
5064 5065 5066
        
        X = paddle.randn(shape=[3, 5], dtype='float64')
        out = paddle.fluid.layers.l2_normalize(X, axis=-1)
G
Guoxia Wang 已提交
5067
        print(out)
R
ruri 已提交
5068

5069 5070 5071
        # [[ 0.21558504  0.56360189  0.47466096  0.46269539 -0.44326736]
        #  [-0.70602414 -0.52745777  0.37771788 -0.2804768  -0.04449922]
        #  [-0.33972208 -0.43014923  0.31772556  0.76617881 -0.10761525]]
5072

C
caoying03 已提交
5073 5074
    """

F
fengjiayi 已提交
5075 5076
    if len(x.shape) == 1:
        axis = 0
5077 5078 5079 5080 5081 5082
    if in_dygraph_mode():
        _, out = _C_ops.norm(x, 'axis', 1
                             if axis is None else axis, 'epsilon', epsilon)
        return out

    check_variable_and_dtype(x, "X", ("float16", "float32", "float64"), "norm")
C
caoying03 已提交
5083

5084
    helper = LayerHelper("l2_normalize", **locals())
X
Xin Pan 已提交
5085 5086
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
    norm = helper.create_variable_for_type_inference(dtype=x.dtype)
C
caoying03 已提交
5087
    helper.append_op(
5088 5089 5090 5091
        type="norm",
        inputs={"X": x},
        outputs={"Out": out,
                 "Norm": norm},
C
caoying03 已提交
5092
        attrs={
5093 5094
            "axis": 1 if axis is None else axis,
            "epsilon": epsilon,
C
caoying03 已提交
5095 5096
        })
    return out
5097 5098


S
ShenLiang 已提交
5099
@deprecated(since="2.0.0", update_to="paddle.matmul")
S
sneaxiy 已提交
5100
def matmul(x, y, transpose_x=False, transpose_y=False, alpha=1.0, name=None):
G
guosheng 已提交
5101
    """
Y
ying 已提交
5102 5103 5104 5105
    Applies matrix multiplication to two tensors.

    Currently, the input tensors' rank can be any, but when the rank of any
    inputs is bigger than 3, this two inputs' rank should be equal.
G
guosheng 已提交
5106

C
chengduoZH 已提交
5107
    The actual behavior depends on the shapes of :math:`x`, :math:`y` and the
5108
    flag values of :attr:`transpose_x`, :attr:`transpose_y`. Specifically:
G
guosheng 已提交
5109

5110 5111 5112 5113 5114
    - If a transpose flag is specified, the last two dimensions of the tensor
      are transposed. If the tensor is rank-1 of shape :math:`[D]`, then for
      :math:`x` it is treated as :math:`[1, D]` in nontransposed form and as
      :math:`[D, 1]` in transposed form, whereas for :math:`y` it is the
      opposite: It is treated as :math:`[D, 1]` in nontransposed form and as
5115
      :math:`[1, D]` in transposed form.
G
guosheng 已提交
5116

C
chengduoZH 已提交
5117
    - After transpose, the two tensors are 2-D or n-D and matrix multiplication
5118
      performs in the following way.
G
guosheng 已提交
5119

5120
      - If both are 2-D, they are multiplied like conventional matrices.
C
chengduoZH 已提交
5121
      - If either is n-D, it is treated as a stack of matrices residing in the
Y
ying 已提交
5122
        last two dimensions and a batched matrix multiply supporting broadcast
5123
        applies on the two tensors.
G
guosheng 已提交
5124

Y
ying 已提交
5125 5126
    Also note that if the raw tensor :math:`x` or :math:`y` is rank-1 and
    nontransposed, the prepended or appended dimension :math:`1` will be
C
chengduoZH 已提交
5127
    removed after matrix multiplication.
G
guosheng 已提交
5128 5129 5130

    Args:
        x (Variable): The input variable which is a Tensor or LoDTensor.
5131 5132 5133
        y (Variable): The input variable which is a Tensor or LoDTensor.
        transpose_x (bool): Whether to transpose :math:`x` before multiplication.
        transpose_y (bool): Whether to transpose :math:`y` before multiplication.
S
sneaxiy 已提交
5134
        alpha (float): The scale of output. Default 1.0.
5135
        name(str|None): A name for this layer(optional). If set None, the layer
5136
            will be named automatically.
G
guosheng 已提交
5137 5138

    Returns:
石晓伟 已提交
5139
        Variable: The product Tensor (or LoDTensor) variable.
G
guosheng 已提交
5140

G
guosheng 已提交
5141 5142 5143
    Examples:
        .. code-block:: python

5144
            # Examples to clarify shapes of the inputs and output
C
chengduoZH 已提交
5145
            # x: [B, ..., M, K], y: [B, ..., K, N]
5146
            # fluid.layers.matmul(x, y)  # out: [B, ..., M, N]
Y
ying 已提交
5147

5148
            # x: [B, M, K], y: [B, K, N]
5149
            # fluid.layers.matmul(x, y)  # out: [B, M, N]
Y
ying 已提交
5150

5151
            # x: [B, M, K], y: [K, N]
5152
            # fluid.layers.matmul(x, y)  # out: [B, M, N]
Y
ying 已提交
5153

5154
            # x: [M, K], y: [K, N]
5155
            # fluid.layers.matmul(x, y)  # out: [M, N]
Y
ying 已提交
5156 5157

            # x: [B, M, K], y: [K]
5158
            # fluid.layers.matmul(x, y)  # out: [B, M]
Y
ying 已提交
5159

5160
            # x: [K], y: [K]
5161
            # fluid.layers.matmul(x, y)  # out: [1]
5162

Y
ying 已提交
5163
            # x: [M], y: [N]
5164 5165
            # fluid.layers.matmul(x, y, True, True)  # out: [M, N]

5166
            import paddle
5167
            import paddle.fluid as fluid
5168 5169
            paddle.enable_static()

5170 5171 5172
            x = fluid.layers.data(name='x', shape=[2, 3], dtype='float32')
            y = fluid.layers.data(name='y', shape=[3, 2], dtype='float32')
            out = fluid.layers.matmul(x, y, True, True)
G
guosheng 已提交
5173
    """
S
ShenLiang 已提交
5174 5175
    if in_dygraph_mode():
        out = _varbase_creator(dtype=x.dtype)
W
wanghuancoder 已提交
5176 5177
        _C_ops.matmul(x, y, out, 'transpose_X', transpose_x, 'transpose_Y',
                      transpose_y, 'alpha', float(alpha))
S
ShenLiang 已提交
5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215
        return out

    def __check_input(x, y):
        var_names = {'x': x, 'y': y}
        for name, val in var_names.items():
            check_variable_and_dtype(
                val, name, ['float16', 'float32', 'float64'], 'matmul')
        x_shape = list(x.shape)
        y_shape = list(y.shape)
        if len(x_shape) == 1:
            x_shape = [1] + x_shape
        if len(y_shape) == 1:
            y_shape = y_shape + [1]

        # check the inner 2 dimensions
        if transpose_x:
            x_shape[-2], x_shape[-1] = x_shape[-1], x_shape[-2]
        if transpose_y:
            y_shape[-2], y_shape[-1] = y_shape[-1], y_shape[-2]
        if x_shape[-1] != y_shape[-2]:
            assert (x_shape[-1] == -1) or (y_shape[-2] == -1),                         \
                "After performing an optional transpose, Input X's width should be "   \
                "equal to Y's width for multiplication "                               \
                "prerequisites. But received X's shape: %s, Y's shape: %s\n" %         \
                (x_shape, y_shape)

        if len(y_shape) > 2 and len(x_shape) > 2:
            for i, dim_x in enumerate(x_shape[:-2]):
                # don't check neg shape
                if dim_x < 0 or y_shape[i] < 0:
                    continue
                if dim_x != y_shape[i]:
                    raise ValueError(
                        "When the matrix is larger than 2 dimensions, the higher "
                        "dimensional values of the two matrices need to be equal. "
                        "But received x_shape[%d] != y_shape[%d]. X's shape: %s, "
                        "Y's shape: %s.\n" % (i, i, x_shape, y_shape))

W
wanghuancoder 已提交
5216 5217 5218 5219 5220 5221
    attrs = {
        'transpose_X': transpose_x,
        'transpose_Y': transpose_y,
        'alpha': float(alpha),
    }

S
ShenLiang 已提交
5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232
    __check_input(x, y)

    helper = LayerHelper('matmul', **locals())
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
    helper.append_op(
        type='matmul',
        inputs={'X': x,
                'Y': y},
        outputs={'Out': out},
        attrs=attrs)
    return out
5233 5234


5235
def topk(input, k, name=None):
Q
qingqing01 已提交
5236
    """
5237 5238 5239 5240
    :alias_main: paddle.topk
	:alias: paddle.topk,paddle.tensor.topk,paddle.tensor.search.topk
	:old_api: paddle.fluid.layers.topk

5241
    This OP is used to find values and indices of the k largest entries
Q
qingqing01 已提交
5242 5243
    for the last dimension.

5244 5245
    If the input is a 1-D Tensor, finds the k largest entries and outputs
    their values and indices.
Q
qingqing01 已提交
5246 5247 5248 5249

    If the input is a Tensor with higher rank, this operator computes the top k
    entries along the last dimension.

F
fengjiayi 已提交
5250 5251
    .. code-block:: text

5252 5253 5254 5255 5256
        Case 1:

          Input:
            input.shape = [3, 4]
            input.data = [[5, 4, 2, 3],
F
fengjiayi 已提交
5257 5258 5259 5260
                     [9, 7, 10, 25],
                     [6, 2, 10, 1]]
            k = 2

5261
          Output:
F
fengjiayi 已提交
5262
            The first output:
5263 5264
            values.shape = [3, 2]
            values.data = [[5, 4],
F
fengjiayi 已提交
5265 5266 5267 5268
                      [10, 25],
                      [6, 10]]

            The second output:
5269 5270
            indices.shape = [3, 2]
            indices.data = [[0, 1],
F
fengjiayi 已提交
5271 5272 5273
                       [2, 3],
                       [0, 2]]

Q
qingqing01 已提交
5274
    Args:
5275 5276 5277 5278
        input(Variable): The input tensor. Support data types: float32, float64.
        k(int | Variable): The number of top elements to look for along the last dimension
                           of input tensor.
        name (str, optional): Please refer to :ref:`api_guide_Name`, Default None.
Q
qingqing01 已提交
5279 5280

    Returns:
5281 5282
        Values (Variable): Input tensor's k largest elements along each last dimensional slice. The dimension is: :math:`input.shape[:-1]+[k]`.
        Indices (Variable): Indices of k largest elements alone the last dimension of input. The dimension is same as values.
Q
qingqing01 已提交
5283

F
fengjiayi 已提交
5284
    Raises:
5285
        ValueError: If :math:`k < 1` or :math:`k > last dimension of input`.
Q
qingqing01 已提交
5286 5287 5288 5289

    Examples:
        .. code-block:: python

5290
            import paddle.fluid as fluid
5291
            import paddle.fluid.layers as layers
5292 5293 5294 5295 5296 5297 5298 5299 5300 5301 5302 5303 5304
            # set batch size=None
            input = fluid.data(name="input", shape=[None, 13, 11], dtype='float32')
            top5_values, top5_indices = layers.topk(input, k=5) # top5_values.shape[None, 13, 5], top5_indices.shape=[None, 13, 5]

            # 1D Tensor
            input1 = fluid.data(name="input1", shape=[None, 13], dtype='float32')
            top5_values, top5_indices = layers.topk(input1, k=5) #top5_values.shape=[None, 5], top5_indices.shape=[None, 5]

            # k=Variable
            input2 = fluid.data(name="input2", shape=[None, 13, 11], dtype='float32')
            vk = fluid.data(name="vk", shape=[None, 1], dtype='int32') # save k in vk.data[0]
            vk_values, vk_indices = layers.topk(input2, k=vk) #vk_values.shape=[None, 13, k], vk_indices.shape=[None, 13, k]

Q
qingqing01 已提交
5305
    """
5306
    if in_dygraph_mode():
5307
        _k = k.numpy().item(0) if isinstance(k, Variable) else k
W
wanghuancoder 已提交
5308
        out, indices = _C_ops.top_k(input, 'k', _k)
5309 5310 5311
        out.stop_gradient = True
        indices.stop_gradient = True
        return out, indices
5312

5313 5314
    inputs = {"X": [input]}
    attrs = {}
S
songyouwei 已提交
5315 5316 5317 5318 5319
    if isinstance(k, Variable):
        inputs['K'] = [k]
    else:
        attrs = {'k': k}

5320 5321 5322 5323
    helper = LayerHelper("top_k", **locals())
    values = helper.create_variable_for_type_inference(dtype=input.dtype)
    indices = helper.create_variable_for_type_inference(dtype="int64")

Q
qingqing01 已提交
5324 5325
    helper.append_op(
        type="top_k",
W
whs 已提交
5326
        inputs=inputs,
Q
qingqing01 已提交
5327 5328
        outputs={"Out": [values],
                 "Indices": [indices]},
W
whs 已提交
5329
        attrs=attrs)
Q
qingqing01 已提交
5330 5331 5332 5333 5334
    values.stop_gradient = True
    indices.stop_gradient = True
    return values, indices


5335 5336 5337 5338 5339
def ctc_greedy_decoder(input,
                       blank,
                       input_length=None,
                       padding_value=0,
                       name=None):
5340
    r"""
S
SunGaofeng 已提交
5341
    This op is used to decode sequences by greedy policy by the following steps:
Y
yi.wu 已提交
5342

S
SunGaofeng 已提交
5343
    1. Get the indexes of maximum value for each row in input. a.k.a.
Y
ying 已提交
5344 5345 5346
       numpy.argmax(input, axis=0).
    2. For each sequence in result of step1, merge repeated tokens between two
       blanks and delete all blanks.
5347

S
SunGaofeng 已提交
5348
    This op is implemented in two modes: lod and padding, either of them can be used.
5349
    The input can be either LoDTensor or Tensor, corresponding to lod and padding
S
SunGaofeng 已提交
5350 5351
    mode respectively.

5352 5353 5354 5355 5356
    A simple example as below:

    .. code-block:: text

        Given:
S
SunGaofeng 已提交
5357
        (1) for lod mode:
5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368

        input.data = [[0.6, 0.1, 0.3, 0.1],
                      [0.3, 0.2, 0.4, 0.1],
                      [0.1, 0.5, 0.1, 0.3],
                      [0.5, 0.1, 0.3, 0.1],

                      [0.5, 0.1, 0.3, 0.1],
                      [0.2, 0.2, 0.2, 0.4],
                      [0.2, 0.2, 0.1, 0.5],
                      [0.5, 0.1, 0.3, 0.1]]

5369
        input.lod = [[4, 4]]
M
minqiyang 已提交
5370

W
whs 已提交
5371
        Computation:
5372

W
whs 已提交
5373 5374 5375 5376 5377 5378
        step1: Apply argmax to first input sequence which is input.data[0:4]. Then we get:
               [[0], [2], [1], [0]]
        step2: merge repeated tokens and remove blank which is 0. Then we get first output sequence:
               [[2], [1]]

        Finally:
5379 5380 5381 5382 5383

        output.data = [[2],
                       [1],
                       [3]]

5384
        output.lod = [[2, 1]]
5385

S
SunGaofeng 已提交
5386
        (2) for padding mode:
5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402

         input.data = [[[0.6, 0.1, 0.3, 0.1],
                        [0.3, 0.2, 0.4, 0.1],
                        [0.1, 0.5, 0.1, 0.3],
                        [0.5, 0.1, 0.3, 0.1]],

                       [[0.5, 0.1, 0.3, 0.1],
                        [0.2, 0.2, 0.2, 0.4],
                        [0.2, 0.2, 0.1, 0.5],
                        [0.5, 0.1, 0.3, 0.1]]]

        input_length.data = [[4], [4]]
        input.shape = [2, 4, 4]

        step1: Apply argmax to first input sequence which is input.data[0:4]. Then we get:
               [[0], [2], [1], [0]], for input.data[4:8] is [[0], [3], [3], [0]], shape is [2,4,1]
5403
        step2: Change the argmax result to use padding mode, then argmax result is
5404 5405 5406 5407 5408 5409 5410 5411 5412
                [[0, 2, 1, 0], [0, 3, 3, 0]], shape is [2, 4], lod is [], input_length is [[4], [4]]
        step3: Apply ctc_align to padding argmax result, padding_value is 0

        Finally:
        output.data = [[2, 1, 0, 0],
                       [3, 0, 0, 0]]
        output_length.data = [[2], [1]]


S
SunGaofeng 已提交
5413
    Parameters:
5414

5415 5416
        input(Variable): the probabilities of variable-length sequences. When in lod mode,
                         it is a 2-D LoDTensor with LoD information. It's shape is [Lp, num_classes + 1]
Y
ying 已提交
5417
                         where Lp is the sum of all input sequences' length and
5418 5419
                         num_classes is the true number of classes. When in padding mode,
                         it is a 3-D Tensor with padding, It's shape is [batch_size, N, num_classes + 1].
S
SunGaofeng 已提交
5420
                         (not including the blank label). The data type can be float32 or float64.
Y
ying 已提交
5421
        blank(int): the blank label index of Connectionist Temporal
S
SunGaofeng 已提交
5422
                    Classification (CTC) loss, which is in the half-opened
Y
ying 已提交
5423
                    interval [0, num_classes + 1).
S
SunGaofeng 已提交
5424 5425
        input_length(Variable, optional): 2-D LoDTensor, shape is [batch_size, 1], data type is int64.
                                 It is used for padding mode. In lod mode, input_length is None.
5426
        padding_value(int): padding value.
5427 5428 5429
        name(str, optional): The default value is None.
                             Normally there is no need for user to set this property.
                             For more information, please refer to :ref:`api_guide_Name`
5430 5431

    Returns:
S
SunGaofeng 已提交
5432 5433 5434 5435 5436
        For lod mode, returns the result of CTC greedy decoder, 2-D LoDTensor, shape is [Lp, 1], \
        data type is int64. 'Lp' is the sum of all output sequences' length. If all the sequences \
        in result were empty, the result LoDTensor will be [-1] with  empty \
        LoD [[]].

5437
        For padding mode, returns a tuple of (output, output_length), which was described as below:
S
SunGaofeng 已提交
5438 5439 5440 5441 5442 5443 5444 5445 5446 5447 5448

        output, 2-D Tensor, shape is [batch_size, N], data type is int64.

        output_length, 2-D Tensor, shape is [batch_size, 1], data type is int64. It is the length of \
                           each sequence of output for padding mode.

    Return type:
        For lod mode: Variable

        For padding mode: tuple of two Variables (output, output_length).

5449 5450 5451 5452

    Examples:
        .. code-block:: python

5453
            # for lod mode
S
SunGaofeng 已提交
5454
            import paddle.fluid as fluid
S
SunGaofeng 已提交
5455
            x = fluid.data(name='x', shape=[None, 8], dtype='float32', lod_level=1)
5456
            cost = fluid.layers.ctc_greedy_decoder(input=x, blank=0)
5457 5458

            # for padding mode
S
SunGaofeng 已提交
5459 5460
            x_pad = fluid.data(name='x_pad', shape=[10, 4, 8], dtype='float32')
            x_pad_len = fluid.data(name='x_pad_len', shape=[10, 1], dtype='int64')
5461 5462 5463
            out, out_len = fluid.layers.ctc_greedy_decoder(input=x_pad, blank=0,
                            input_length=x_pad_len)

W
wanghaoshuang 已提交
5464
    """
5465 5466 5467
    check_variable_and_dtype(input, 'input', ['float32', 'float64'],
                             'ctc_greedy_decoder')

5468
    helper = LayerHelper("ctc_greedy_decoder", **locals())
Q
qingqing01 已提交
5469
    _, topk_indices = topk(input, k=1)
5470 5471

    # ctc align op
X
Xin Pan 已提交
5472
    ctc_out = helper.create_variable_for_type_inference(dtype="int64")
5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496 5497

    if input_length is None:
        helper.append_op(
            type="ctc_align",
            inputs={"Input": [topk_indices]},
            outputs={"Output": [ctc_out]},
            attrs={"merge_repeated": True,
                   "blank": blank})
        return ctc_out
    else:
        ctc_out_len = helper.create_variable_for_type_inference(dtype="int64")
        ctc_input = squeeze(topk_indices, [2])

        helper.append_op(
            type="ctc_align",
            inputs={"Input": [ctc_input],
                    "InputLength": [input_length]},
            outputs={"Output": [ctc_out],
                     "OutputLength": [ctc_out_len]},
            attrs={
                "merge_repeated": True,
                "blank": blank,
                "padding_value": padding_value
            })
        return ctc_out, ctc_out_len
5498 5499


Y
fix ci.  
ying 已提交
5500
def transpose(x, perm, name=None):
Y
ying 已提交
5501
    """
5502
    Permute the data dimensions of `input` according to `perm`.
Y
ying 已提交
5503 5504 5505 5506 5507

    The `i`-th dimension  of the returned tensor will correspond to the
    perm[i]-th dimension of `input`.

    Args:
5508
        x (Tensor): The input Tensor. It is a N-D Tensor of data types bool, float32, float64, int32.
5509
        perm (list|tuple): Permute the input according to the data of perm.
5510
        name (str): The name of this layer. It is optional.
Y
ying 已提交
5511 5512

    Returns:
5513
        Tensor: A transposed n-D Tensor, with data type being bool, float32, float64, int32, int64.
5514 5515 5516 5517 5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536

    For Example:

        .. code-block:: text

         x = [[[ 1  2  3  4] [ 5  6  7  8] [ 9 10 11 12]]
             [[13 14 15 16] [17 18 19 20] [21 22 23 24]]]
         shape(x) =  [2,3,4]

         # Example 1
         perm0 = [1,0,2]
         y_perm0 = [[[ 1  2  3  4] [13 14 15 16]]
                   [[ 5  6  7  8]  [17 18 19 20]]
                   [[ 9 10 11 12]  [21 22 23 24]]]
         shape(y_perm0) = [3,2,4]

         # Example 2
         perm1 = [2,1,0]
         y_perm1 = [[[ 1 13] [ 5 17] [ 9 21]]
                   [[ 2 14] [ 6 18] [10 22]]
                   [[ 3 15]  [ 7 19]  [11 23]]
                   [[ 4 16]  [ 8 20]  [12 24]]]
         shape(y_perm1) = [4,3,2]
Y
ying 已提交
5537 5538

    Examples:
5539

Y
ying 已提交
5540 5541
        .. code-block:: python

5542 5543 5544 5545 5546 5547
            import paddle

            x = paddle.randn([2, 3, 4])
            x_transposed = paddle.transpose(x, perm=[1, 0, 2])
            print(x_transposed.shape)
            # [3L, 2L, 4L]
Y
ying 已提交
5548

5549
    """
5550
    if in_dygraph_mode():
W
wanghuancoder 已提交
5551
        out, _ = _C_ops.transpose2(x, 'axis', perm)
5552
        return out
5553

5554
    check_variable_and_dtype(
5555
        x, 'x', ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
5556
        'transpose')
5557 5558 5559
    check_type(perm, 'perm', (list, tuple), 'transpose')
    if isinstance(perm, tuple):
        perm = list(perm)
Y
fix ci.  
ying 已提交
5560
    if len(perm) != len(x.shape):
Y
ying 已提交
5561
        raise ValueError(
5562 5563 5564 5565
            "Input(perm) is the permutation of dimensions of Input(x), "
            "its length should be equal to dimensions of Input(x), "
            "but received dimension of Input(x) is %s, "
            "the length of Input(perm) is %s." % (len(x.shape), len(perm)))
Y
ying 已提交
5566 5567 5568
    for idx, dim in enumerate(perm):
        if dim >= len(x.shape):
            raise ValueError(
5569 5570 5571
                "Each element in Input(perm) should be less than Input(x)'s dimension, "
                "but %d-th element in Input(perm) is %d which exceeds Input(x)'s "
                "dimension %d." % (idx, perm[idx], len(x.shape)))
Y
ying 已提交
5572 5573

    helper = LayerHelper('transpose', **locals())
X
Xin Pan 已提交
5574 5575
    out = helper.create_variable_for_type_inference(x.dtype)
    x_shape = helper.create_variable_for_type_inference(x.dtype)
Y
ying 已提交
5576
    helper.append_op(
5577
        type='transpose2',
Y
fix ci.  
ying 已提交
5578
        inputs={'X': [x]},
5579 5580
        outputs={'Out': [out],
                 'XShape': [x_shape]},
Y
ying 已提交
5581 5582
        attrs={'axis': perm})
    return out
5583 5584


5585 5586 5587 5588 5589 5590 5591
def im2sequence(input,
                filter_size=1,
                stride=1,
                padding=0,
                input_image_size=None,
                out_stride=1,
                name=None):
5592
    r"""
5593 5594
    :api_attr: Static Graph

5595
    Extracts image patches from the input tensor to form a tensor of shape
L
Liufang Sang 已提交
5596 5597 5598
    {input.batch_size * output_height * output_width, filter_size_height *
    filter_size_width * input.channels}. This op use filter to scan images
    and convert these images to sequences. After expanding, the number of time step are
5599 5600
    output_height * output_width for an image, in which output_height and
    output_width are calculated by below equation:
5601 5602 5603

    .. math::

L
Liufang Sang 已提交
5604 5605 5606 5607
        output\_height  = 1 + \
            (padding\_up + padding\_down + input\_height  - filter\_size\_height  + stride\_height - 1) / stride\_height \\\\
        output\_width  = 1 + \
            (padding\_left + padding\_right + input\_width  - filter\_size\_width  + stride\_width - 1) / stride\_width
5608

L
Liufang Sang 已提交
5609
    And the dimension of each time step is filter_size_height * filter_size_width * input.channels.
5610

L
Liufang Sang 已提交
5611 5612
    Parameters:
        input (Variable): The input should be a 4-D Tensor in :math:`NCHW` format. The data type is float32.
W
wanghaoshuang 已提交
5613

L
Liufang Sang 已提交
5614 5615 5616
        filter_size(int32 | List[int32]): The filter size. If filter_size is a List,
            it must contain two integers, :math:`[filter\_size\_height, filter\_size\_width]` .
            Otherwise, the filter size will be a square :math:`[filter\_size, filter\_size]` . Default is 1.
5617

L
Liufang Sang 已提交
5618 5619
        stride(int32 | List[int32]): The stride size. If stride is a List, it must
            contain two integers, :math:`[stride\_height, stride\_width]` . Otherwise, the stride size will be a square :math:`[stride\_size, stride\_size]` . Default is 1.
5620

L
Liufang Sang 已提交
5621 5622 5623 5624 5625
        padding(int32 | List[int32]): The padding size. If padding is a List, it can
            contain four integers like :math:`[padding\_up, padding\_left, padding\_down, padding\_right]` to indicate
            paddings of four direction.  Or it can contain two integers :math:`[padding\_height, padding\_width]` which means
            padding_up = padding_down = padding_height and
            padding_left = padding_right = padding_width. Otherwise, a scalar padding means
5626
            padding_up = padding_down = padding_left = padding_right = padding.
L
Liufang Sang 已提交
5627
            Default is 0.
5628

L
Liufang Sang 已提交
5629 5630 5631 5632
        input_image_size(Variable, optional): the input contains image real size.It's dim
            is :math:`[batchsize, 2]` . It is just for batch inference when not None. Default is None.

        out_stride(int32 | List[int32]): The scaling of image through CNN. It is valid only when input_image_size is not None.
T
tianshuo78520a 已提交
5633
            If out_stride is List,  it must contain two integers,
L
Liufang Sang 已提交
5634 5635 5636 5637 5638
            :math:`[out\_stride\_height, out\_stride\_W]` . Otherwise,
            the out_stride_height = out_stride_width = out_stride. Default is 1.

        name (str, optional): The default value is None.  Normally there is no need for
                    user to set this property.  For more information, please refer to :ref:`api_guide_Name` .
5639 5640 5641

    Returns:
            The output is a 2-D LoDTensor with shape {input.batch\_size * output\_height * output\_width, \
L
Liufang Sang 已提交
5642 5643 5644
            filter\_size\_height * filter\_size\_width * input.channels}. The data type is float32.

    Return Type: Variable
5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671

    Examples:

        .. code-block:: text

            Given:

            x = [[[[ 6.  2.  1.]
                   [ 8.  3.  5.]
                   [ 0.  2.  6.]]

                  [[ 2.  4.  4.]
                   [ 6.  3.  0.]
                   [ 6.  4.  7.]]]

                 [[[ 6.  7.  1.]
                   [ 5.  7.  9.]
                   [ 2.  4.  8.]]

                  [[ 1.  2.  1.]
                   [ 1.  3.  5.]
                   [ 9.  0.  8.]]]]

            x.dims = {2, 2, 3, 3}

            And:

W
wanghaoshuang 已提交
5672 5673 5674
            filter = [2, 2]
            stride = [1, 1]
            padding = [0, 0]
5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686

            Then:

            output.data = [[ 6.  2.  8.  3.  2.  4.  6.  3.]
                           [ 2.  1.  3.  5.  4.  4.  3.  0.]
                           [ 8.  3.  0.  2.  6.  3.  6.  4.]
                           [ 3.  5.  2.  6.  3.  0.  4.  7.]
                           [ 6.  7.  5.  7.  1.  2.  1.  3.]
                           [ 7.  1.  7.  9.  2.  1.  3.  5.]
                           [ 5.  7.  2.  4.  1.  3.  9.  0.]
                           [ 7.  9.  4.  8.  3.  5.  0.  8.]]

5687
            output.dims = {8, 8}
5688

5689
            output.lod = [[4, 4]]
5690

T
Tink_Y 已提交
5691
    Examples:
5692 5693 5694

        .. code-block:: python

B
Bai Yifan 已提交
5695
            import paddle.fluid as fluid
5696 5697
            import paddle
            paddle.enable_static()
L
Liufang Sang 已提交
5698
            data = fluid.data(name='data', shape=[None, 3, 32, 32],
B
Bai Yifan 已提交
5699
                                     dtype='float32')
5700
            output = fluid.layers.im2sequence(
B
Bai Yifan 已提交
5701 5702
                input=data, stride=[1, 1], filter_size=[2, 2])

5703 5704

    """
L
lujun 已提交
5705
    assert not in_dygraph_mode(), (
5706
        "sequence layer is not supported in dygraph mode yet.")
W
wanghaoshuang 已提交
5707

5708 5709
    check_variable_and_dtype(input, 'input', ['float32'], 'im2sequence')

W
wanghaoshuang 已提交
5710 5711 5712 5713 5714 5715 5716 5717 5718
    if isinstance(filter_size, int):
        filter_size = [filter_size, filter_size]
    if isinstance(stride, int):
        stride = [stride, stride]
    if isinstance(padding, int):
        padding = [padding, padding]
    if len(padding) == 2:
        padding.append(padding[0])
        padding.append(padding[1])
5719
    inputs = {"X": input}
5720
    attrs = {"kernels": filter_size, "strides": stride, "paddings": padding}
5721 5722 5723 5724 5725
    if input_image_size:
        if isinstance(out_stride, int):
            out_stride = [out_stride, out_stride]
        inputs["Y"] = input_image_size
        attrs["out_stride"] = out_stride
5726
    helper = LayerHelper('im2sequence', **locals())
X
Xin Pan 已提交
5727
    out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
5728
    helper.append_op(
5729
        type='im2sequence', inputs=inputs, outputs={'Out': out}, attrs=attrs)
5730
    return out
5731 5732


Y
yuyang18 已提交
5733
@templatedoc()
5734
def row_conv(input, future_context_size, param_attr=None, act=None):
Y
yuyang18 已提交
5735
    """
5736 5737
    :api_attr: Static Graph

Y
yuyang18 已提交
5738
    ${comment}
5739 5740

    Args:
Y
yuyang18 已提交
5741
        input (${x_type}): ${x_comment}.
Y
yangyaming 已提交
5742 5743
        future_context_size (int): Future context size. Please note, the shape
            of convolution kernel is [future_context_size + 1, D].
5744 5745 5746 5747 5748
        param_attr (ParamAttr): Attributes of parameters, including
            name, initializer etc.
        act (str): Non-linear activation to be applied to output variable.

    Returns:
Y
yuyang18 已提交
5749
        ${out_comment}.
5750 5751

    Examples:
B
Bai Yifan 已提交
5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763

      .. code-block:: python

        # for LodTensor inputs
        import paddle
        paddle.enable_static()
        x = paddle.static.data(name='x', shape=[9, 16],
                               dtype='float32', lod_level=1)
        out = paddle.static.nn.row_conv(input=x, future_context_size=2)
        # for Tensor inputs
        x = paddle.static.data(name='x', shape=[9, 4, 16], dtype='float32')
        out = paddle.static.nn.row_conv(input=x, future_context_size=2)
5764 5765
    """
    helper = LayerHelper('row_conv', **locals())
5766
    check_variable_and_dtype(input, 'input', ['float32'], 'row_conv')
5767
    dtype = helper.input_dtype()
5768
    filter_shape = [future_context_size + 1, input.shape[-1]]
5769 5770
    filter_param = helper.create_parameter(
        attr=helper.param_attr, shape=filter_shape, dtype=dtype)
X
Xin Pan 已提交
5771
    out = helper.create_variable_for_type_inference(dtype)
5772 5773 5774 5775 5776
    helper.append_op(
        type='row_conv',
        inputs={'X': [input],
                'Filter': [filter_param]},
        outputs={'Out': [out]})
Y
yangyaming 已提交
5777
    return helper.append_activation(out)
5778 5779


Y
yuyang18 已提交
5780
@templatedoc()
5781
def multiplex(inputs, index, name=None):
5782
    """
Y
yuyang18 已提交
5783

5784
    Based on the given index parameter, the OP selects a specific row from each input Tensor to construct the output Tensor.
L
lujun 已提交
5785

5786
    If the input of this OP contains :math:`m` Tensors, where :math:`I_{i}` means the i-th input Tensor, :math:`i` between :math:`[0,m)` .
L
lujun 已提交
5787

5788
    And :math:`O` means the output, where :math:`O[i]` means the i-th row of the output, then the output satisfies that :math:`O[i] = I_{index[i]}[i]` .
L
lujun 已提交
5789

5790
    For Example:
L
lujun 已提交
5791

5792
            .. code-block:: text
L
lujun 已提交
5793

5794
                Given:
L
lujun 已提交
5795

5796 5797 5798 5799
                inputs = [[[0,0,3,4], [0,1,3,4], [0,2,4,4], [0,3,3,4]],
                          [[1,0,3,4], [1,1,7,8], [1,2,4,2], [1,3,3,4]],
                          [[2,0,3,4], [2,1,7,8], [2,2,4,2], [2,3,3,4]],
                          [[3,0,3,4], [3,1,7,8], [3,2,4,2], [3,3,3,4]]]
L
lujun 已提交
5800

5801
                index = [[3],[0],[1],[2]]
L
lujun 已提交
5802

5803 5804 5805 5806
                out = [[3,0,3,4],    # out[0] = inputs[index[0]][0] = inputs[3][0] = [3,0,3,4]
                       [0,1,3,4],    # out[1] = inputs[index[1]][1] = inputs[0][1] = [0,1,3,4]
                       [1,2,4,2],    # out[2] = inputs[index[2]][2] = inputs[1][2] = [1,2,4,2]
                       [2,3,3,4]]    # out[3] = inputs[index[3]][3] = inputs[2][3] = [2,3,3,4]
L
lujun 已提交
5807 5808


5809
    Args:
5810 5811 5812 5813 5814
        inputs (list): The input Tensor list. The list elements are N-D Tensors of data types float32, float64, int32, int64. All input Tensor shapes should be the same and rank must be at least 2.
        index (Tensor): Used to select some rows in the input Tensor to construct an index of the output Tensor. It is a 2-D Tensor with data type int32 or int64 and shape [M, 1], where M is the number of input Tensors.
        name(str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.
5815
    Returns:
5816
        Tensor: Output of multiplex OP, with data type being float32, float64, int32, int64.
X
xuezhong 已提交
5817 5818

    Examples:
5819

X
xuezhong 已提交
5820 5821
        .. code-block:: python

5822
            import paddle
5823 5824 5825
            import numpy as np
            img1 = np.array([[1, 2], [3, 4]]).astype(np.float32)
            img2 = np.array([[5, 6], [7, 8]]).astype(np.float32)
5826 5827 5828
            inputs = [paddle.to_tensor(img1), paddle.to_tensor(img2)]
            index = paddle.to_tensor(np.array([[1], [0]]).astype(np.int32))
            res = paddle.multiplex(inputs, index)
5829
            print(res) # [array([[5., 6.], [3., 4.]], dtype=float32)]
X
xuezhong 已提交
5830

5831
    """
5832
    if in_dygraph_mode():
W
wanghuancoder 已提交
5833
        return _C_ops.multiplex(index, inputs)
5834 5835
    helper = LayerHelper('multiplex', **locals())

5836 5837 5838 5839 5840 5841 5842 5843 5844
    check_type(inputs, 'inputs', (list), 'multiplex')
    if len(inputs) < 2:
        raise ValueError(
            "inputs should be a list object with at least 2 elements.")
    for id, x in enumerate(inputs):
        check_variable_and_dtype(x, 'input[' + str(id) + ']',
                                 ['float32', 'float64', 'int32', 'int64'],
                                 'multiplex')
    check_variable_and_dtype(index, "index", ['int32', 'int64'], 'multiplex')
5845 5846

    out = helper.create_variable_for_type_inference(inputs[0].dtype)
5847
    helper.append_op(
5848 5849 5850 5851 5852
        type='multiplex',
        inputs={'X': inputs,
                'Ids': index},
        outputs={'Out': [out]})
    return out
X
xuezhong 已提交
5853 5854


5855 5856
def smooth_l1(x, y, inside_weight=None, outside_weight=None, sigma=None):
    """
5857

Y
Yibing Liu 已提交
5858 5859
    This layer computes the smooth L1 loss for Variable :attr:`x` and :attr:`y`.
    It takes the first dimension of :attr:`x` and :attr:`y` as batch size.
Q
qingqing01 已提交
5860
    For each instance, it computes the smooth L1 loss element by element first
T
tianshuo78520a 已提交
5861
    and then sums all the losses. So the shape of output Variable is
5862
    [batch_size, 1].
5863

5864 5865
    Args:
        x (Variable): A tensor with rank at least 2. The input value of smooth
Q
qingqing01 已提交
5866
            L1 loss op with shape [batch_size, dim1, ..., dimN].
5867
            A LoDTensor or Tensor with type float32.
5868
        y (Variable): A tensor with rank at least 2. The target value of smooth
Y
Yibing Liu 已提交
5869
            L1 loss op with same shape as :attr:`x`.
5870
            A LoDTensor or Tensor with type float32.
5871
        inside_weight (Variable|None):  A tensor with rank at least 2. This
5872 5873
            input is optional and should have same shape with :attr:`x`. If
            provided, the result of (:attr:`x` - :attr:`y`) will be multiplied
Y
Yibing Liu 已提交
5874
            by this tensor element by element.
5875
            A Tensor with type float32.
5876
        outside_weight (Variable|None): A tensor with rank at least 2. This
5877 5878
            input is optional and should have same shape with :attr:`x`. If
            provided, the out smooth L1 loss will be multiplied by this tensor
Y
Yibing Liu 已提交
5879
            element by element.
5880
            A Tensor with type float32.
5881
        sigma (float|None): Hyper parameter of smooth L1 loss layer. A float
5882 5883
           scalar with default value 1.0.

5884
    Returns:
5885
        Variable: The output smooth L1 loss with shape [batch_size, 1].  A Tensor with type float32.
5886 5887 5888 5889

    Examples:
        .. code-block:: python

5890
            import paddle.fluid as fluid
5891
            import numpy as np
5892 5893
            import paddle
            paddle.enable_static()
5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904
            data = fluid.data(name="x", shape=[-1, 3], dtype="float32")
            label = fluid.data(name="y", shape=[-1, 3], dtype="float32")
            result = fluid.layers.smooth_l1(data,label)
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            exe.run(fluid.default_startup_program())
            x = np.random.rand(3,3).astype("float32")
            y = np.random.rand(3,3).astype("float32")
            output= exe.run(feed={"x":x, "y":y},
                             fetch_list=[result])
            print(output)
5905

5906 5907 5908 5909
            #[array([[0.08220536],
            #       [0.36652038],
            #      [0.20541131]], dtype=float32)]

5910
    """
5911 5912
    check_variable_and_dtype(x, 'X', ['float32', 'float64'], 'smooth_l1_loss')
    check_variable_and_dtype(y, 'Y', ['float32', 'float64'], 'smooth_l1_loss')
5913

5914
    helper = LayerHelper('smooth_l1_loss', **locals())
5915

X
Xin Pan 已提交
5916 5917
    diff = helper.create_variable_for_type_inference(dtype=x.dtype)
    loss = helper.create_variable_for_type_inference(dtype=x.dtype)
5918 5919 5920 5921 5922 5923 5924 5925 5926 5927
    helper.append_op(
        type='smooth_l1_loss',
        inputs={
            'X': x,
            'Y': y,
            'InsideWeight': inside_weight,
            'OutsideWeight': outside_weight
        },
        outputs={'Diff': diff,
                 'Out': loss},
5928
        attrs={'sigma': sigma if sigma is not None else 1.0})
5929
    return loss
5930 5931


5932
@deprecated(since='2.0.0', update_to='paddle.nn.functional.one_hot')
5933
def one_hot(input, depth, allow_out_of_range=False):
5934
    """
5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972

    **WARING:** This OP requires the last dimension of Tensor shape must be equal to 1.
    This OP will be deprecated in a future release. It is recommended to use fluid. :ref:`api_fluid_one_hot` .

    The operator converts each id in the input to an one-hot vector with a
    :attr:`depth` length. The value in the vector dimension corresponding to the id
    is 1, and the value in the remaining dimension is 0.

    The shape of output Tensor or LoDTensor is generated by adding :attr:`depth` dimension
    behind the last dimension of the input shape.

    .. code-block:: text

        Example 1 (allow_out_of_range=False):

        input:
            X.shape = [4, 1]
            X.data = [[1], [1], [3], [0]]
            depth = 4

        output:
            Out.shape = [4, 4]
            Out.data = [[0., 1., 0., 0.],
                        [0., 1., 0., 0.],
                        [0., 0., 0., 1.],
                        [1., 0., 0., 0.]]

        Example 2 (allow_out_of_range=True):

        input:
            X.shape = [4, 1]
            X.data = [[1], [1], [5], [0]]
            depth = 4
            allow_out_of_range = True

        output:
            Out.shape = [4, 4]
            Out.data = [[0., 1., 0., 0.],
5973
                        [0., 1., 0., 0.],
5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985
                        [0., 0., 0., 0.], # This id is 5, which goes beyond depth, so set it all-zeros data.
                        [1., 0., 0., 0.]]

        Example 3 (allow_out_of_range=False):

        input:
            X.shape = [4, 1]
            X.data = [[1], [1], [5], [0]]
            depth = 4
            allow_out_of_range = False

        output: Throw an exception for Illegal value
5986
            The second dimension in X is 5, which is greater than depth.
5987 5988
            Allow_out_of_range =False means that does not allow the word id to exceed depth,
            so it throws an exception.
5989 5990

    Args:
5991 5992 5993
        input(Variable): Tensor or LoDTensor with shape :math:`[N_1, N_2, ..., N_k, 1]` ,
            which contains at least one dimension and the last dimension must be 1.
            The data type is int32 or int64.
5994
        depth(scalar): An integer defining the :attr:`depth` of the one hot dimension. If input
5995
            is word id, depth is generally the dictionary size.
5996
        allow_out_of_range(bool): A bool value indicating whether the input
5997 5998 5999 6000
            indices could be out of range :math:`[0, depth)` . When input indices are
            out of range, exceptions :code:`Illegal value` is raised if :attr:`allow_out_of_range`
            is False, or zero-filling representations is created if it is set True.
            Default: False.
6001 6002

    Returns:
6003
        Variable: The one-hot representations of input. A Tensor or LoDTensor with type float32.
6004 6005

    Examples:
C
caoying03 已提交
6006
        .. code-block:: python
6007

6008
            import paddle
6009
            import paddle.fluid as fluid
6010 6011
            paddle.enable_static()

6012 6013 6014
            # Correspond to the first example above, where label.shape is [4, 1] and one_hot_label.shape is [4, 4].
            label = fluid.data(name="label", shape=[4, 1], dtype="int64")
            one_hot_label = fluid.layers.one_hot(input=label, depth=4)
6015
    """
6016
    if in_dygraph_mode():
S
songyouwei 已提交
6017 6018 6019 6020
        if isinstance(depth, Variable):
            depth = depth.numpy()
            assert depth.shape == (
                1, ), "depth of type Variable should have shape [1]"
6021
            depth = depth.item(0)
W
wanghuancoder 已提交
6022 6023
        out = _C_ops.one_hot(input, 'depth', depth, 'allow_out_of_range',
                             allow_out_of_range)
6024 6025
        out.stop_gradient = True
        return out
6026

6027
    helper = LayerHelper("one_hot", **locals())
6028 6029
    check_variable_and_dtype(input, 'input', ['int32', 'int64'], 'one_hot')
    check_type(depth, 'depth', (six.integer_types, Variable), 'one_hot')
X
Xin Pan 已提交
6030
    one_hot_out = helper.create_variable_for_type_inference(dtype='float32')
6031

6032 6033
    if not isinstance(depth, Variable):
        # user attribute
6034
        inputs = {'X': input}
Y
Yi Liu 已提交
6035
        attrs = {'depth': depth, 'allow_out_of_range': allow_out_of_range}
6036
    else:
6037 6038 6039
        depth.stop_gradient = True
        inputs = {'X': input, 'depth_tensor': depth}
        attrs = {'allow_out_of_range': allow_out_of_range}
6040 6041
    helper.append_op(
        type="one_hot",
6042 6043
        inputs=inputs,
        attrs=attrs,
6044 6045
        outputs={'Out': one_hot_out})
    one_hot_out.stop_gradient = True
6046
    return one_hot_out
Y
Yu Yang 已提交
6047 6048


Y
Yu Yang 已提交
6049
def autoincreased_step_counter(counter_name=None, begin=1, step=1):
Y
Yu Yang 已提交
6050
    """
6051 6052
    :api_attr: Static Graph

6053 6054
    Create an auto-increase variable. which will be automatically increased
    by 1 in every iteration. By default, the first return of this counter is 1,
Y
Yibing Liu 已提交
6055
    and the step size is 1.
Y
Yu Yang 已提交
6056 6057

    Args:
Y
Yibing Liu 已提交
6058 6059 6060
        counter_name(str, optional): The counter name. Default '@STEP_COUNTER@'.
        begin(int, optional): The first return value of this counter. Default 1.
        step(int, optional): The step size. Default 1.
Y
Yu Yang 已提交
6061

6062
    Returns:
Y
Yibing Liu 已提交
6063
        Variable: The auto-increased Variable with data type int64.
Y
yi.wu 已提交
6064 6065 6066 6067

    Examples:
        .. code-block:: python

6068
           import paddle.fluid as fluid
6069 6070
           import paddle
           paddle.enable_static()
Y
yi.wu 已提交
6071
           global_step = fluid.layers.autoincreased_step_counter(
Y
Yibing Liu 已提交
6072
               counter_name='@LR_DECAY_COUNTER@', begin=0, step=1)
Y
Yu Yang 已提交
6073 6074
    """
    helper = LayerHelper('global_step_counter')
Y
Yu Yang 已提交
6075 6076
    if counter_name is None:
        counter_name = '@STEP_COUNTER@'
Y
Yu Yang 已提交
6077
    counter, is_new_var = helper.create_or_get_global_variable(
H
hong 已提交
6078 6079 6080 6081 6082
        name=counter_name,
        dtype='int64',
        shape=[1],
        persistable=True,
        belong_to_optimizer=True)
Y
Yu Yang 已提交
6083 6084 6085
    if is_new_var:
        helper.set_variable_initializer(
            counter, initializer=Constant(
Y
Yu Yang 已提交
6086
                value=begin - 1, force_cpu=True))
W
Wu Yi 已提交
6087
        helper.main_program.global_block()._prepend_op(
Y
Yu Yang 已提交
6088 6089
            type='increment',
            inputs={'X': [counter]},
Y
Yu Yang 已提交
6090
            outputs={'Out': [counter]},
6091
            attrs={'step': float(step)})
Y
Yu Yang 已提交
6092 6093 6094
        counter.stop_gradient = True

    return counter
Y
yangyaming 已提交
6095 6096


6097
def reshape(x, shape, actual_shape=None, act=None, inplace=False, name=None):
6098
    r"""
6099 6100 6101
    :alias_main: paddle.reshape
	:alias: paddle.reshape,paddle.tensor.reshape,paddle.tensor.manipulation.reshape

6102
    This operator changes the shape of ``x`` without changing its data.
C
caoying03 已提交
6103

6104 6105 6106 6107
    The target shape can be given by ``shape`` or ``actual_shape``.
    When ``shape`` and ``actual_shape`` are set at the same time,
    ``actual_shape`` has a higher priority than ``shape``
    but at this time ``shape`` can only be an integer list or tuple, and ``shape`` still should be set correctly to
T
tianshuo78520a 已提交
6108
    guarantee shape inference in compile-time.
C
caoying03 已提交
6109

6110
    Some tricks exist when specifying the target shape.
C
caoying03 已提交
6111

6112 6113 6114 6115
    1. -1 means the value of this dimension is inferred from the total element
    number of x and remaining dimensions. Thus one and only one dimension can
    be set -1.

6116
    2. 0 means the actual dimension value is going to be copied from the
T
tianshuo78520a 已提交
6117
    corresponding dimension of x. The index of 0s in shape can not exceed
6118
    the dimension of x.
6119 6120

    Here are some examples to explain it.
C
caoying03 已提交
6121 6122

    1. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape
W
wanghaoshuang 已提交
6123
    is [6, 8], the reshape operator will transform x into a 2-D tensor with
6124
    shape [6, 8] and leaving x's data unchanged.
C
caoying03 已提交
6125

6126
    2. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape
6127 6128
    specified is [2, 3, -1, 2], the reshape operator will transform x into a
    4-D tensor with shape [2, 3, 4, 2] and leaving x's data unchanged. In this
W
wanghaoshuang 已提交
6129 6130
    case, one dimension of the target shape is set to -1, the value of this
    dimension is inferred from the total element number of x and remaining
6131
    dimensions.
C
caoying03 已提交
6132

6133
    3. Given a 3-D tensor x with a shape [2, 4, 6], and the target shape
6134 6135 6136 6137
    is [-1, 0, 3, 2], the reshape operator will transform x into a 4-D tensor
    with shape [2, 4, 3, 2] and leaving x's data unchanged. In this case,
    besides -1, 0 means the actual dimension value is going to be copied from
    the corresponding dimension of x.
C
caoying03 已提交
6138

6139 6140
    **Note**:
        The parameter ``actual_shape`` will be deprecated in the future and only use ``shape`` instead to represent the target shape.
6141

C
caoying03 已提交
6142
    Args:
6143 6144
        x(Tensor): An N-D Tensor. The data type is ``float32``, ``float64``, ``int32`` or ``int64``.
        shape(list|tuple|Tensor): Define the target shape. At most one dimension of the target shape can be -1.
6145
                        The data type is ``int32`` . If ``shape`` is a list or tuple, the elements of it should be integers or Tensors with shape [1].
6146
                        If ``shape`` is an Tensor, it should be an 1-D Tensor .
6147 6148 6149
        actual_shape(variable, optional): An 1-D ``Tensor`` or ``LoDTensor`` . The data type is ``int32`` . If provided, reshape
                                according to this given shape rather than ``shape`` specifying shape.
                                That is to say ``actual_shape`` has a higher priority
6150
                                than ``shape(list|tuple)`` but not ``shape(Tensor)``. \
6151 6152 6153 6154 6155 6156 6157 6158 6159
                                This argument ``actual_shape`` will be removed in a future version. \
                                Instructions for updating: ``actual_shape`` will be removed in future versions and replaced by ``shape``.
        act (str, optional): The non-linear activation to be applied to the reshaped input. Default None.
        inplace(bool, optional): If ``inplace`` is True, the input and output of ``layers.reshape``
                       are the same variable. Otherwise, the input and output of
                       ``layers.reshape`` are different variable. Default False. Note that if ``x``
                       is more than one OPs' input, ``inplace`` must be False.
        name(str, optional): The default value is None. Normally there is no need for user to set this property.
                            For more information, please refer to :ref:`api_guide_Name` .
C
caoying03 已提交
6160

6161
    Returns:
6162
        Tensor: A reshaped Tensor with the same data type as ``x``. It is a new tensor variable if ``inplace`` is ``False``, otherwise it is ``x``. If ``act`` is None, return the reshaped tensor variable, otherwise return the activated tensor variable.
C
caoying03 已提交
6163

X
Xin Pan 已提交
6164

C
caoying03 已提交
6165 6166
    Examples:
        .. code-block:: python
6167 6168
            
            import paddle
6169
            import paddle.fluid as fluid
6170 6171
            paddle.enable_static()
            
6172
            # example 1:
6173
            # attr shape is a list which doesn't contain Tensors.
6174 6175
            data_1 = fluid.data(
              name='data_1', shape=[2, 4, 6], dtype='float32')
6176
            reshaped_1 = fluid.layers.reshape(
6177
              x=data_1, shape=[-1, 0, 3, 2])
6178
            # the shape of reshaped_1 is [2,4,3,2].
6179 6180

            # example 2:
6181
            # attr shape is a list which contains Tensors.
6182 6183 6184
            data_2 = fluid.layers.fill_constant([2,25], "int32", 3)
            dim = fluid.layers.fill_constant([1], "int32", 5)
            reshaped_2 = fluid.layers.reshape(data_2, shape=[dim, 10])
6185
            # the shape of reshaped_2 is [5,10].
M
mapingshuo 已提交
6186 6187 6188 6189 6190 6191

            # example 3:
            data_3 = fluid.data(
              name="data_3", shape=[2,4,6], dtype='float32')
            reshaped_3 = fluid.layers.reshape(x=data_3, shape=[6,8])
            # the shape of reshaped_3 is [6,8].
C
caoying03 已提交
6192
    """
6193
    if in_dygraph_mode():
L
Leo Chen 已提交
6194
        #TODO(zhiqiu): enable inplace in dygraph mode.
6195 6196 6197 6198 6199
        if inplace:
            warnings.warn(
                "Inplace on reshape is not allowed and will be discarded in dygraph mode currently."
            )
        if isinstance(shape, (list, tuple)):
6200
            shape = [
6201
                item.numpy().item(0) if isinstance(item, Variable) else item
6202 6203
                for item in shape
            ]
W
wanghuancoder 已提交
6204
            out, _ = _C_ops.reshape2(x, None, 'shape', shape)
6205 6206
        elif isinstance(shape, Variable):
            shape.stop_gradient = True
W
wanghuancoder 已提交
6207
            out, _ = _C_ops.reshape2(x, shape)
6208 6209 6210 6211
        else:
            raise ValueError(
                "shape must be an instance of `list`, `tuple` or `Variable`,"
                " got '{}.'".format(type(shape)))
6212 6213

        return dygraph_utils._append_activation_in_dygraph(out, act)
6214

6215 6216 6217
    check_variable_and_dtype(x, 'x', [
        'float16', 'float32', 'float64', 'int32', 'int64', 'bool', 'uint16'
    ], 'reshape')
6218 6219
    check_type(shape, 'shape', (list, tuple, Variable), 'reshape')
    check_type(actual_shape, 'actual_shape', (Variable, type(None)), 'reshape')
6220

6221
    helper = LayerHelper("reshape2", **locals())
6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232

    def get_attr_shape(list_shape):
        unk_dim_idx = -1
        attrs_shape = []
        for dim_idx, dim_size in enumerate(list_shape):
            if isinstance(dim_size, Variable):
                attrs_shape.append(-1)
            else:
                attrs_shape.append(dim_size)
                if dim_size == -1:
                    assert unk_dim_idx == -1, (
6233 6234
                        "Only one dimension value of 'shape' in reshape can "
                        "be -1. But received shape[%d] is also -1." % dim_idx)
6235 6236 6237
                    unk_dim_idx = dim_idx
                elif dim_size == 0:
                    assert dim_idx < len(x.shape), (
6238 6239 6240 6241
                        "The index of 0 in `shape` must be less than "
                        "the input tensor X's dimensions. "
                        "But received shape[%d] = 0, X's dimensions = %d." %
                        (dim_idx, len(x.shape)))
6242 6243
                else:
                    assert dim_size > 0, (
6244
                        "Each dimension value of 'shape' in reshape must not "
T
tianshuo78520a 已提交
6245
                        "be negative except one unknown dimension. "
6246 6247
                        "But received shape[%d] = %s." %
                        (dim_idx, str(dim_size)))
6248 6249
        return attrs_shape

6250 6251 6252 6253 6254 6255 6256 6257 6258
    inputs = {"X": x}
    attrs = {}
    if isinstance(shape, Variable):
        shape.stop_gradient = True
        inputs["Shape"] = shape
    elif isinstance(shape, (list, tuple)):
        assert len(shape) > 0, ("The size of 'shape' in reshape can't be zero, "
                                "but received %s." % len(shape))
        attrs["shape"] = get_attr_shape(shape)
L
Leo Chen 已提交
6259
        if utils._contain_var(shape):
6260
            inputs['ShapeTensor'] = utils._convert_to_tensor_list(shape)
6261 6262 6263 6264 6265 6266
        elif isinstance(actual_shape, Variable):
            actual_shape.stop_gradient = True
            inputs["Shape"] = actual_shape

    out = x if inplace else helper.create_variable_for_type_inference(
        dtype=x.dtype)
X
Xin Pan 已提交
6267
    x_shape = helper.create_variable_for_type_inference(dtype=x.dtype)
C
caoying03 已提交
6268
    helper.append_op(
6269
        type="reshape2",
X
Xin Pan 已提交
6270
        inputs=inputs,
6271
        attrs=attrs,
6272 6273
        outputs={"Out": out,
                 "XShape": x_shape})
C
caoying03 已提交
6274

D
dzhwinter 已提交
6275
    return helper.append_activation(out)
6276

6277

6278
def squeeze(input, axes, name=None):
Y
Yibing Liu 已提交
6279
    """
6280 6281 6282
    This OP will squeeze single-dimensional entries of input tensor's shape. If axes is provided, will
    remove the dims by axes, the dims selected by axes should be one. If not provide axes, all dims equal
    to one will be deleted.
M
minqiyang 已提交
6283

H
haowang101779990 已提交
6284

6285
    .. code-block:: text
H
haowang101779990 已提交
6286

6287
        Case1:
H
haowang101779990 已提交
6288

6289
          Input:
H
haowang101779990 已提交
6290 6291
            X.shape = (1, 3, 1, 5)
            axes = [0]
6292
          Output:
H
haowang101779990 已提交
6293 6294
            Out.shape = (3, 1, 5)

6295
        Case2:
H
haowang101779990 已提交
6296

6297
          Input:
H
haowang101779990 已提交
6298 6299
            X.shape = (1, 3, 1, 5)
            axes = []
6300
          Output:
H
haowang101779990 已提交
6301
            Out.shape = (3, 5)
M
minqiyang 已提交
6302

6303 6304 6305 6306 6307 6308 6309 6310
        Case3:

          Input:
            X.shape = [1,3,1,5]
            axes = [-2]
          Output:
            Out.shape = [1,3,5]

Y
Yibing Liu 已提交
6311
    Args:
6312
        input (Variable): The input Tensor. Supported data type: float32, float64, bool, int8, int32, int64.
6313 6314 6315 6316
                          axes (list): One integer or List of integers, indicating the dimensions to be squeezed.
                          Axes range is :math:`[-rank(input), rank(input))`.
                          If axes is negative, :math:`axes=axes+rank(input)`.
        name (str, optional): Please refer to :ref:`api_guide_Name`, Default None.
Y
Yibing Liu 已提交
6317 6318

    Returns:
6319
        Variable: Output squeezed Tensor. Data type is same as input Tensor.
Y
Yibing Liu 已提交
6320 6321 6322 6323

    Examples:
        .. code-block:: python

6324
            import paddle.fluid as fluid
6325
            import paddle.fluid.layers as layers
6326 6327 6328 6329
            # set batch size=None
            x = fluid.data(name='x', shape=[None, 5, 1, 10])
            y = layers.squeeze(input=x, axes=[2]) # y.shape=[None, 5, 10]

Y
Yibing Liu 已提交
6330
    """
L
Leo Chen 已提交
6331
    if in_dygraph_mode():
W
wanghuancoder 已提交
6332
        out, _ = _C_ops.squeeze2(input, 'axes', axes)
L
Leo Chen 已提交
6333 6334
        return out

Y
Yibing Liu 已提交
6335
    helper = LayerHelper("squeeze", **locals())
6336 6337
    check_variable_and_dtype(
        input, 'input',
6338 6339 6340
        ['float16', 'float32', 'float64', 'bool', 'int8', 'int32', 'int64'],
        'squeeze')
    check_type(axes, 'axis/axes', (list, tuple), 'squeeze')
X
Xin Pan 已提交
6341 6342
    out = helper.create_variable_for_type_inference(dtype=input.dtype)
    x_shape = helper.create_variable_for_type_inference(dtype=input.dtype)
Y
Yibing Liu 已提交
6343
    helper.append_op(
6344
        type="squeeze2",
6345
        inputs={"X": input},
Y
Yibing Liu 已提交
6346
        attrs={"axes": axes},
6347 6348
        outputs={"Out": out,
                 "XShape": x_shape})
Y
Yibing Liu 已提交
6349

6350 6351 6352
    return out


6353
def unsqueeze(input, axes, name=None):
Y
Yibing Liu 已提交
6354
    """
6355
    Insert single-dimensional entries to the shape of a Tensor. Takes one
M
minqiyang 已提交
6356 6357
    required argument axes, a list of dimensions that will be inserted.
    Dimension indices in axes are as seen in the output tensor.
Y
Yibing Liu 已提交
6358

M
minqiyang 已提交
6359
    For example:
H
haowang101779990 已提交
6360 6361 6362

    .. code-block:: text

M
minqiyang 已提交
6363
      Given a tensor such that tensor with shape [3, 4, 5],
Y
Yibing Liu 已提交
6364
      then Unsqueezed tensor with axes=[0, 4] has shape [1, 3, 4, 5, 1].
M
minqiyang 已提交
6365

Y
Yibing Liu 已提交
6366
    Args:
6367
        input (Variable): The input Tensor to be unsqueezed. Supported data type: float32, float64, bool, int8, int32, int64.
6368
        axes (int|list|tuple|Variable): Indicates the dimensions to be inserted. The data type is ``int32`` . If ``axes`` is a list or tuple, the elements of it should be integers or Tensors with shape [1]. If ``axes`` is an Variable, it should be an 1-D Tensor .
6369
        name (str|None): Name for this layer.
Y
Yibing Liu 已提交
6370 6371

    Returns:
6372
        Variable: Unsqueezed Tensor, with the same data type as input.
Y
Yibing Liu 已提交
6373 6374 6375 6376

    Examples:
        .. code-block:: python

6377 6378 6379
            import paddle.fluid as fluid
            x = fluid.layers.data(name='x', shape=[5, 10])
            y = fluid.layers.unsqueeze(input=x, axes=[1])
6380

Y
Yibing Liu 已提交
6381
    """
6382
    if in_dygraph_mode():
L
Leo Chen 已提交
6383 6384 6385
        if isinstance(axes, int):
            axes = [axes]
        elif isinstance(axes, Variable):
6386
            axes = axes.numpy().tolist()
L
Leo Chen 已提交
6387 6388 6389 6390 6391
        elif isinstance(axes, (list, tuple)):
            axes = [
                item.numpy().item(0) if isinstance(item, Variable) else item
                for item in axes
            ]
W
wanghuancoder 已提交
6392
        out, _ = _C_ops.unsqueeze2(input, 'axes', axes)
6393 6394 6395 6396 6397 6398 6399
        return out

    check_type(axes, 'axis/axes', (int, list, tuple, Variable), 'unsqueeze')
    check_variable_and_dtype(
        input, 'input',
        ['float16', 'float32', 'float64', 'bool', 'int8', 'int32', 'int64'],
        'unsqueeze')
6400 6401 6402 6403 6404 6405 6406 6407 6408 6409
    helper = LayerHelper("unsqueeze2", **locals())
    inputs = {"X": input}
    attrs = {}

    if isinstance(axes, int):
        axes = [axes]
    if isinstance(axes, Variable):
        axes.stop_gradient = True
        inputs["AxesTensor"] = axes
    elif isinstance(axes, (list, tuple)):
L
Leo Chen 已提交
6410
        if utils._contain_var(axes):
6411
            inputs["AxesTensorList"] = utils._convert_to_tensor_list(axes)
6412 6413 6414
        else:
            attrs["axes"] = axes

X
Xin Pan 已提交
6415 6416
    out = helper.create_variable_for_type_inference(dtype=input.dtype)
    x_shape = helper.create_variable_for_type_inference(dtype=input.dtype)
Y
Yibing Liu 已提交
6417
    helper.append_op(
6418
        type="unsqueeze2",
6419 6420
        inputs=inputs,
        attrs=attrs,
6421 6422
        outputs={"Out": out,
                 "XShape": x_shape})
Y
Yibing Liu 已提交
6423

6424 6425
    return out

6426

Y
yangyaming 已提交
6427
def lod_reset(x, y=None, target_lod=None):
Y
yangyaming 已提交
6428
    """
Y
Yibing Liu 已提交
6429
    Set LoD of :attr:`x` to a new one specified by :attr:`y` or
6430 6431 6432 6433
    :attr:`target_lod`. When :attr:`y` provided, :attr:`y.lod` would be
    considered as target LoD first, otherwise :attr:`y.data` would be
    considered as target LoD. If :attr:`y` is not provided, target LoD should
    be specified by :attr:`target_lod`. If target LoD is specified by
6434
    :attr:`y.data` or :attr:`target_lod`, only one level LoD is supported.
Y
yangyaming 已提交
6435 6436 6437 6438 6439 6440

    .. code-block:: text

        * Example 1:

            Given a 1-level LoDTensor x:
6441
                x.lod =  [[ 2,           3,                   1 ]]
Y
yangyaming 已提交
6442 6443 6444
                x.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
                x.dims = [6, 1]

6445
            target_lod: [4, 2]
Y
yangyaming 已提交
6446 6447

            then we get a 1-level LoDTensor:
6448
                out.lod =  [[4,                          2]]
Y
yangyaming 已提交
6449 6450 6451 6452 6453 6454
                out.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
                out.dims = [6, 1]

        * Example 2:

            Given a 1-level LoDTensor x:
6455
                x.lod =  [[2,            3,                   1]]
Y
yangyaming 已提交
6456 6457 6458 6459
                x.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
                x.dims = [6, 1]

            y is a Tensor:
6460
                y.data = [[2, 4]]
Y
yangyaming 已提交
6461 6462 6463
                y.dims = [1, 3]

            then we get a 1-level LoDTensor:
6464
                out.lod =  [[2,            4]]
Y
yangyaming 已提交
6465 6466 6467 6468 6469 6470
                out.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
                out.dims = [6, 1]

        * Example 3:

            Given a 1-level LoDTensor x:
6471
                x.lod =  [[2,            3,                   1]]
Y
yangyaming 已提交
6472 6473 6474 6475
                x.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
                x.dims = [6, 1]

            y is a 2-level LoDTensor:
6476
                y.lod =  [[2, 2], [2, 2, 1, 1]]
Y
yangyaming 已提交
6477 6478 6479 6480
                y.data = [[1.1], [2.1], [3.1], [4.1], [5.1], [6.1]]
                y.dims = [6, 1]

            then we get a 2-level LoDTensor:
6481
                out.lod =  [[2, 2], [2, 2, 1, 1]]
Y
yangyaming 已提交
6482 6483 6484 6485
                out.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
                out.dims = [6, 1]

    Args:
6486 6487 6488 6489 6490 6491
        x (Variable): Input variable which could be a Tensor or LoDTensor. 
                      The data type should be int32, int64, float32 or float64.
        y (Variable, optional): If provided, output's LoD would be derived from :attr:`y`. 
                                If y's lod level>0, the data type can be any type. 
                                If y's lod level=0, the data type should be int32.
        target_lod (list|tuple, optional): One level LoD which should be considered
Y
Yibing Liu 已提交
6492
                                      as target LoD when :attr:`y` not provided.
Y
yangyaming 已提交
6493 6494

    Returns:
Y
Yibing Liu 已提交
6495
        Variable: Output variable with LoD specified by this layer.
Y
yangyaming 已提交
6496 6497

    Raises:
Y
Yibing Liu 已提交
6498
        ValueError: If :attr:`y` and :attr:`target_lod` are both None.
Y
yangyaming 已提交
6499 6500 6501 6502

    Examples:
        .. code-block:: python

6503
            import paddle.fluid as fluid
6504 6505 6506
            x = fluid.layers.data(name='x', shape=[10])
            y = fluid.layers.data(name='y', shape=[10, 20], lod_level=2)
            out = fluid.layers.lod_reset(x=x, y=y)
Y
yangyaming 已提交
6507
    """
6508 6509
    check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
                             'lod_reset')
Y
yangyaming 已提交
6510
    helper = LayerHelper("lod_reset", **locals())
X
Xin Pan 已提交
6511
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
Y
yangyaming 已提交
6512
    if y is not None:
6513
        check_type(y, 'y', (Variable), 'lod_reset')
G
GaoWei8 已提交
6514
        #TODO: check y.lod_level = 0 dtype
Y
yangyaming 已提交
6515 6516 6517 6518 6519 6520 6521 6522 6523 6524
        helper.append_op(
            type="lod_reset", inputs={'X': x,
                                      'Y': y}, outputs={'Out': out})
    elif target_lod is not None:
        helper.append_op(
            type="lod_reset",
            inputs={'X': x},
            attrs={'target_lod': target_lod},
            outputs={'Out': out})
    else:
6525 6526 6527 6528 6529 6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545 6546 6547 6548 6549
        raise ValueError("y and target_lod should not be both none.")
    return out


def lod_append(x, level):
    """
    Append level to LoD of :attr:`x`.

    .. code-block:: text

        * Example 1:

            given a 1-level LoDTensor x:
                x.lod =  [[ 2,           3,                   1 ]]
                x.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
                x.dims = [6, 1]

            level: [1, 1, 1, 1, 1, 1, 1]

            then we get a 2-level LoDTensor:
                x.lod =  [[ 2, 3, 1 ], [1, 1, 1, 1, 1, 1]]
                x.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]]
                x.dims = [6, 1]

    Args:
6550 6551 6552 6553 6554
        x (Variable): Input variable which could be a tensor or LoDTensor. 
                      The data type should be int32, int64, float32 or float64.
        level (list|tuple|Variable, optional): The LoD level to be appended into LoD of x. 
                                               If level is variable and its lod level>0, the data type can be any type.
                                               If level is variable and its lod level=0, the data type should be int32.
6555 6556 6557 6558 6559
    Returns:
        Variable: Output variable with new LoD level.

    Raises:
        ValueError: If :attr:`y` is None or and :attr:`level` is not Iterator.
Y
yangyaming 已提交
6560

6561 6562 6563 6564 6565 6566 6567 6568 6569 6570
    Examples:
        .. code-block:: python

            import paddle.fluid as fluid
            x = fluid.layers.data(name='x', shape=[6, 10], lod_level=1)
            out = fluid.layers.lod_append(x, [1,1,1,1,1,1])
    """
    from collections import Iterable
    if x is None:
        raise ValueError("Input(x) can't be None.")
6571 6572 6573
    if (not isinstance(level, Iterable)) and (not isinstance(level, Variable)):
        raise ValueError("Input(level) must be list, tuple or Variable.")

6574 6575 6576
    check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
                             'lod_append')

6577 6578
    helper = LayerHelper("lod_append", **locals())
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
6579 6580 6581 6582 6583 6584

    inputs = {'X': x}
    attrs = {'append': True}

    if isinstance(level, Variable):
        inputs['Y'] = level
G
GaoWei8 已提交
6585
        #TODO: check y.lod_level = 0 dtype
6586 6587
    else:
        attrs['target_lod'] = level
6588
    helper.append_op(
6589
        type="lod_reset", inputs=inputs, attrs=attrs, outputs={'Out': out})
Y
yangyaming 已提交
6590
    return out
D
dragonwarrior 已提交
6591 6592


6593 6594
def lrn(input, n=5, k=1.0, alpha=1e-4, beta=0.75, name=None,
        data_format='NCHW'):
6595
    r"""
6596 6597 6598 6599
    :alias_main: paddle.nn.functional.lrn
	:alias: paddle.nn.functional.lrn,paddle.nn.functional.norm.lrn
	:old_api: paddle.fluid.layers.lrn

6600 6601 6602
    This operator implements the Local Response Normalization Layer.
    This layer performs a type of "lateral inhibition" by normalizing over local input regions.
    For more information, please refer to `ImageNet Classification with Deep Convolutional Neural Networks <https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf>`_
D
dragonwarrior 已提交
6603 6604 6605 6606 6607

    The formula is as follows:

    .. math::

6608
        Output(i, x, y) = Input(i, x, y) / \\left(k + \\alpha \\sum\\limits^{\\min(C-1, i + n/2)}_{j = \\max(0, i - n/2)}(Input(j, x, y))^2\\right)^{\\beta}
D
dragonwarrior 已提交
6609 6610 6611

    In the above equation:

6612 6613 6614 6615
    - :math:`n` : The number of channels to sum over.
    - :math:`k` : The offset (avoid being divided by 0).
    - :math:`\\alpha` : The scaling parameter.
    - :math:`\\beta` : The exponent parameter.
D
dragonwarrior 已提交
6616 6617 6618


    Args:
6619 6620
        input (Variable): Input feature, 4D-Tensor with the shape of [N,C,H,W] or [N, H, W, C],
            where N is the batch size, C is the input channel, H is Height, W is weight. The data
6621
            type is float32. The rank of this tensor must be 4, otherwise it will raise ValueError.
6622 6623 6624 6625
        n (int, optional): The number of channels to sum over. Default: 5
        k (float, optional): An offset, positive. Default: 1.0
        alpha (float, optional): The scaling parameter, positive. Default:1e-4
        beta (float, optional): The exponent, positive. Default:0.75
6626 6627 6628
        name (str, optional): The default value is None. Normally there is no need for user to set
            this property. For more information, please refer to :ref:`api_guide_Name`
        data_format (str, optional): Specify the data format of the input, and the data format of the output
6629 6630 6631
            will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
            The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
            `[batch_size, input_channels, input_height, input_width]`.
6632

D
dragonwarrior 已提交
6633
    Returns:
6634 6635
        Variable: A tensor variable storing the transformation result with the same shape and data type as input.

D
dragonwarrior 已提交
6636 6637 6638

    Examples:

6639 6640 6641 6642 6643 6644 6645 6646
    .. code-block:: python

        import paddle.fluid as fluid
        data = fluid.data(
            name="data", shape=[None, 3, 112, 112], dtype="float32")
        lrn = fluid.layers.lrn(input=data)
        print(lrn.shape)  # [-1, 3, 112, 112]
        print(lrn.dtype)  # float32
D
dragonwarrior 已提交
6647 6648
    """
    helper = LayerHelper('lrn', **locals())
6649
    check_variable_and_dtype(input, 'input', ['float32'], 'lrn')
D
dragonwarrior 已提交
6650 6651 6652 6653 6654 6655
    dtype = helper.input_dtype()
    input_shape = input.shape
    dims = len(input_shape)

    if dims != 4:
        raise ValueError(
6656
            "Input's dimension size of Op(lrn) must be 4, but received %d." %
D
dragonwarrior 已提交
6657
            (dims))
6658 6659 6660 6661
    if data_format not in ['NCHW', 'NHWC']:
        raise ValueError(
            "Attr(data_format) of Op(lrn) got wrong value: received " +
            data_format + " but only NCHW or NHWC supported.")
D
dragonwarrior 已提交
6662

X
Xin Pan 已提交
6663 6664 6665
    mid_out = helper.create_variable_for_type_inference(
        dtype=dtype, stop_gradient=True)
    lrn_out = helper.create_variable_for_type_inference(dtype)
D
dragonwarrior 已提交
6666 6667 6668 6669 6670 6671 6672
    helper.append_op(
        type="lrn",
        inputs={"X": input},
        outputs={
            "Out": lrn_out,
            "MidOut": mid_out,
        },
6673 6674 6675 6676 6677 6678 6679
        attrs={
            "n": n,
            "k": k,
            "alpha": alpha,
            "beta": beta,
            "data_format": data_format
        })
D
dragonwarrior 已提交
6680 6681

    return lrn_out
G
guosheng 已提交
6682 6683 6684


def pad(x, paddings, pad_value=0., name=None):
6685
    r"""
6686 6687 6688 6689
    :alias_main: paddle.nn.functional.pad
	:alias: paddle.nn.functional.pad,paddle.nn.functional.common.pad
	:old_api: paddle.fluid.layers.pad

S
SunGaofeng 已提交
6690 6691
    This op will pad a tensor with a constant value given by :attr:`pad_value`, and the
    padded shape is specified by :attr:`paddings`.
G
guosheng 已提交
6692

S
SunGaofeng 已提交
6693 6694 6695 6696
    Specifically, the number of values padded before the elements of :attr:`x`
    in dimension :attr:`i` is indicated by :attr:`paddings[2*i]`, and the number
    of values padded after the elements of :attr:`x` in dimension :attr:`i` is
    indicated by :attr:`paddings[2*i+1]`.
G
guosheng 已提交
6697 6698 6699 6700 6701 6702 6703 6704 6705 6706 6707 6708 6709 6710 6711 6712 6713 6714

    See below for an example.

    .. code-block:: text

        Given:
            x = [[1, 2], [3, 4]]

            paddings = [0, 1, 1, 2]

            pad_value = 0

        Return:
            out = [[0, 1, 2, 0, 0]
                   [0, 3, 4, 0, 0]
                   [0, 0, 0, 0, 0]]

    Args:
S
SunGaofeng 已提交
6715
        x (Variable): Tensor, data type is float32.
G
guosheng 已提交
6716
        paddings (list): A list of integers. Its elements specify the padded
S
SunGaofeng 已提交
6717
                         width before and after each dimension in turn.
6718
                         The length of :attr:`paddings` must be equal to
G
guosheng 已提交
6719 6720
                         :math:`rank(x) \\times 2`.
        pad_value (float): The constant value used to pad.
6721 6722
        name(str, optional): The default value is None.
                             Normally there is no need for user to set this property.
S
SunGaofeng 已提交
6723
                             For more information, please refer to :ref:`api_guide_Name`
G
guosheng 已提交
6724 6725

    Returns:
S
SunGaofeng 已提交
6726 6727 6728 6729
        The padded tensor, with the same data type and rank as :attr:`x`

    Return Type:
        Variable
G
guosheng 已提交
6730 6731 6732

    Examples:
        .. code-block:: python
G
guosheng 已提交
6733

6734
            # x is a rank 2 tensor variable
S
SunGaofeng 已提交
6735
            import paddle.fluid as fluid
6736 6737
            x = fluid.data(name='data', shape=[300, 300], dtype='float32')
            out = fluid.layers.pad(x=x, paddings=[0, 1, 1, 2], pad_value=0.)
G
guosheng 已提交
6738
    """
6739 6740 6741 6742
    check_variable_and_dtype(x, 'x', [
        'float16', 'float32', 'float64', 'int32', 'int64', 'complex64',
        'complex128'
    ], "pad")
6743

6744 6745
    helper = LayerHelper('pad', **locals())
    dtype = helper.input_dtype(input_param_name='x')
X
Xin Pan 已提交
6746
    out = helper.create_variable_for_type_inference(dtype)
G
guosheng 已提交
6747 6748 6749 6750 6751 6752 6753
    helper.append_op(
        type='pad',
        inputs={'X': x},
        outputs={'Out': out},
        attrs={'paddings': paddings,
               'pad_value': float(pad_value)})
    return out
6754 6755


C
chengduo 已提交
6756
def pad_constant_like(x, y, pad_value=0., name=None):
6757
    r"""
S
SunGaofeng 已提交
6758
    Pad :attr:`y` with :attr:`pad_value`, the number of values padded to
C
chengduo 已提交
6759
    the edges of each axis is specified by the difference of the shape
S
SunGaofeng 已提交
6760 6761
    of :attr:`x` and :attr:`y` . ((0, shape_x_0 - shape_y_0), ... (0, shape_x_n - shape_y_n))
    specify padding widths for each axis. The input should be a k-D tensor(k > 0 and k < 7).
C
chengduo 已提交
6762 6763 6764 6765 6766 6767 6768 6769 6770 6771 6772 6773 6774 6775 6776 6777 6778 6779

    See below for an example.

    .. code-block:: text

        Given:
            X = [[[[ 0,  1,  2],
                   [ 3,  4,  5]],
                  [[ 6,  7,  8],
                   [ 9, 10, 11]],
                  [[12, 13, 14],
                   [15, 16, 17]]],
                 [[[18, 19, 20],
                   [21, 22, 23]],
                  [[24, 25, 26],
                   [27, 28, 29]],
                  [[30, 31, 32],
                   [33, 34, 35]]]]
6780

C
chengduo 已提交
6781 6782 6783 6784 6785
            X.shape = (2, 3, 2, 3)

            Y = [[[[35, 36, 37]],
                  [[38, 39, 40]],
                  [[41, 42, 43]]]]
6786

C
chengduo 已提交
6787
            Y.shape = (1, 3, 1, 3)
6788 6789 6790

        And
            pad_value = 0.
C
chengduo 已提交
6791

T
Tink_Y 已提交
6792 6793
        Return:
            Out = [[[[35, 36, 37],
6794
                     [ 0,  0,  0]],
T
Tink_Y 已提交
6795
                    [[38, 39, 40],
6796
                     [ 0,  0,  0]],
T
Tink_Y 已提交
6797
                    [[41, 42, 43],
6798
                     [ 0,  0,  0]]],
6799
                   [[[ 0,  0,  0],
6800
                     [ 0,  0,  0]],
6801
                    [[ 0,  0,  0],
6802
                     [ 0,  0,  0]],
6803
                    [[ 0,  0,  0],
6804 6805 6806 6807
                     [ 0,  0,  0]]]]

            Out.shape = [2, 3, 2, 3]

C
chengduo 已提交
6808 6809

    Args:
T
tianshuo78520a 已提交
6810
        x (Variable): Tensor, its shape specifies the shape of output.
6811
        y (Variable): Tensor, its rank is the same with :attr:`x`, and for each dimension :math:`i` ,
S
SunGaofeng 已提交
6812
                      :math:`y\_shape[i] <= x\_shape[i]` . The data type can be float32 or float64.
C
chengduo 已提交
6813
        pad_value (float): The constant value used to pad.
6814 6815
        name(str, optional): The default value is None.
                             Normally there is no need for user to set this property.
S
SunGaofeng 已提交
6816
                             For more information, please refer to :ref:`api_guide_Name`
C
chengduo 已提交
6817 6818

    Returns:
S
SunGaofeng 已提交
6819 6820 6821 6822
        The padded tensor, with the same shape as :attr:`x` and the same data type as :attr:`y`

    Return Type:
        Variable
C
chengduo 已提交
6823 6824 6825 6826 6827 6828

    Examples:
        .. code-block:: python

            # x is a rank 4 tensor variable, x.shape = (2, 3, 2, 3)
            # y is a rank 4 tensor variable, y.shape = (1, 3, 1, 3)
S
SunGaofeng 已提交
6829
            import paddle.fluid as fluid
S
SunGaofeng 已提交
6830 6831
            x = fluid.data(name='x', shape=[2,3,2,3], dtype='float32')
            y = fluid.data(name='y', shape=[1,3,1,3], dtype='float32')
C
chengduo 已提交
6832 6833 6834
            out = fluid.layers.pad_constant_like(x=x, y=y, pad_value=0.)
            # out is a rank 4 tensor variable, and out.shape = [2, 3 ,2 , 3]
    """
6835 6836 6837 6838
    check_type(x, 'x', (Variable), 'pad_constant_like')
    check_variable_and_dtype(y, 'y', ['float32', 'float64', 'int32', 'int64'],
                             "pad_constant_like")

6839 6840
    helper = LayerHelper('pad_constant_like', **locals())
    dtype = helper.input_dtype(input_param_name='y')
X
Xin Pan 已提交
6841
    out = helper.create_variable_for_type_inference(dtype)
C
chengduo 已提交
6842 6843 6844 6845 6846 6847 6848 6849 6850
    helper.append_op(
        type='pad_constant_like',
        inputs={'X': x,
                'Y': y},
        outputs={'Out': out},
        attrs={'pad_value': float(pad_value)})
    return out


6851 6852 6853 6854 6855
def label_smooth(label,
                 prior_dist=None,
                 epsilon=0.1,
                 dtype="float32",
                 name=None):
6856
    r"""
6857 6858 6859 6860
    :alias_main: paddle.nn.functional.label_smooth
	:alias: paddle.nn.functional.label_smooth,paddle.nn.functional.common.label_smooth
	:old_api: paddle.fluid.layers.label_smooth

6861 6862
    Label smoothing is a mechanism to regularize the classifier layer and is called
    label-smoothing regularization (LSR).
6863

6864 6865 6866 6867 6868 6869 6870 6871 6872 6873 6874 6875 6876 6877 6878 6879 6880
    Label smoothing is proposed to encourage the model to be less confident,
    since optimizing the log-likelihood of the correct label directly may
    cause overfitting and reduce the ability of the model to adapt. Label
    smoothing replaces the ground-truth label :math:`y` with the weighted sum
    of itself and some fixed distribution :math:`\mu`. For class :math:`k`,
    i.e.

    .. math::

        \\tilde{y_k} = (1 - \epsilon) * y_k + \epsilon * \mu_k,

    where :math:`1 - \epsilon` and :math:`\epsilon` are the weights
    respectively, and :math:`\\tilde{y}_k` is the smoothed label. Usually
    uniform distribution is used for :math:`\mu`.

    See more details about label smoothing in https://arxiv.org/abs/1512.00567.

D
DuYao 已提交
6881
    Parameters:
6882
        label(Variable): The input variable containing the label data. The
6883 6884
                        label data should use one-hot representation. It's
                        a multidimensional tensor with a shape of
6885
                        :math:`[N_1, ..., Depth]`, where Depth is class number. The dtype can be "float32" and "float64".
D
DuYao 已提交
6886 6887 6888 6889 6890
        prior_dist(Variable, optional): The prior distribution to be used to smooth
                        labels. If not provided, an uniform distribution
                        is used. It's a multidimensional tensor with a shape of
                        :math:`[1, class\_num]` . The default value is None.
        epsilon(float, optional): The weight used to mix up the original ground-truth
6891
                        distribution and the fixed distribution. The default value is
D
DuYao 已提交
6892 6893 6894
                        0.1.
        dtype(np.dtype|core.VarDesc.VarType|str, optional): The data type can be set
                        as 'float32', 'float64'. The default value is 'float32'.
6895 6896
        name(str, optional): The default value is None. Normally there is no need for user
                        to set this property. For more information, please refer to
D
DuYao 已提交
6897
                        :ref:`api_guide_Name`.
6898 6899 6900 6901 6902 6903

    Returns:
        Variable: The tensor variable containing the smoothed labels.

    Examples:
        .. code-block:: python
6904

6905
            import paddle.fluid as fluid
6906
            import paddle.fluid.layers as layers
6907

6908
            label = layers.data(name="label", shape=[1], dtype="int32")
6909 6910 6911 6912 6913 6914
            one_hot_label = layers.one_hot(input=label, depth=10)
            smooth_label = layers.label_smooth(
                label=one_hot_label, epsilon=0.1, dtype="float32")
    """
    if epsilon > 1. or epsilon < 0.:
        raise ValueError("The value of epsilon must be between 0 and 1.")
6915 6916

    if in_dygraph_mode():
W
wanghuancoder 已提交
6917
        return _C_ops.label_smooth(label, prior_dist, 'epsilon', float(epsilon))
6918

6919 6920 6921
    check_variable_and_dtype(label, 'label', ['float32', 'float64'],
                             'label_smooth')

6922 6923
    helper = LayerHelper("label_smooth", **locals())
    label.stop_gradient = True
X
Xin Pan 已提交
6924
    smooth_label = helper.create_variable_for_type_inference(dtype)
6925 6926 6927 6928 6929 6930 6931
    helper.append_op(
        type="label_smooth",
        inputs={"X": label,
                "PriorDist": prior_dist} if prior_dist else {"X": label},
        outputs={"Out": smooth_label},
        attrs={"epsilon": float(epsilon)})
    return smooth_label
6932 6933


W
wopeizl 已提交
6934
@templatedoc()
F
FDInSky 已提交
6935 6936 6937 6938 6939
def roi_pool(input,
             rois,
             pooled_height=1,
             pooled_width=1,
             spatial_scale=1.0,
6940 6941
             rois_num=None,
             name=None):
W
wopeizl 已提交
6942
    """
6943

6944
    This operator implements the roi_pooling layer.
6945
    Region of interest pooling (also known as RoI pooling) is to perform max pooling on inputs of nonuniform sizes to obtain fixed-size feature maps (e.g. 7*7).
6946

6947
    The operator has three steps:
6948

6949 6950 6951
        1. Dividing each region proposal into equal-sized sections with the pooled_width and pooled_height;
        2. Finding the largest value in each section;
        3. Copying these max values to the output buffer.
6952

6953
    For more information, please refer to https://stackoverflow.com/questions/43430056/what-is-roi-layer-in-fast-rcnn
6954

W
wopeizl 已提交
6955
    Args:
6956 6957 6958 6959 6960
        input (Variable): Input feature, 4D-Tensor with the shape of [N,C,H,W], where N is the batch size, C is the input channel, H is Height, W is weight. The data type is float32 or float64.
        rois (Variable): ROIs (Regions of Interest) to pool over. 2D-LoDTensor with the shape of [num_rois,4], the lod level is 1. Given as [[x1, y1, x2, y2], ...], (x1, y1) is the top left coordinates, and (x2, y2) is the bottom right coordinates.
        pooled_height (int, optional): The pooled output height, data type is int32. Default: 1
        pooled_width (int, optional): The pooled output height, data type is int32. Default: 1
        spatial_scale (float, optional): Multiplicative spatial scale factor to translate ROI coords from their input scale to the scale used when pooling. Default: 1.0
6961 6962 6963 6964 6965
        rois_num (Tensor): The number of RoIs in each image. Default: None
        name(str, optional): For detailed information, please refer
            to :ref:`api_guide_Name`. Usually name is no need to set and
            None by default.

6966

W
wopeizl 已提交
6967
    Returns:
6968
        Variable: The pooled feature, 4D-Tensor with the shape of [num_rois, C, pooled_height, pooled_width].
6969 6970


W
wopeizl 已提交
6971
    Examples:
6972

6973
    ..  code-block:: python
6974

6975 6976
        import paddle.fluid as fluid
        import numpy as np
6977 6978
        import paddle
        paddle.enable_static()
6979

6980
        DATATYPE='float32'
6981

6982 6983
        place = fluid.CPUPlace()
        #place = fluid.CUDAPlace(0)
6984

6985 6986
        input_data = np.array([i for i in range(1,17)]).reshape(1,1,4,4).astype(DATATYPE)
        roi_data =fluid.create_lod_tensor(np.array([[1., 1., 2., 2.], [1.5, 1.5, 3., 3.]]).astype(DATATYPE),[[2]], place)
6987
        rois_num_data = np.array([2]).astype('int32')
F
FDInSky 已提交
6988

6989 6990
        x = fluid.data(name='input', shape=[None,1,4,4], dtype=DATATYPE)
        rois = fluid.data(name='roi', shape=[None,4], dtype=DATATYPE)
6991
        rois_num = fluid.data(name='rois_num', shape=[None], dtype='int32')
F
FDInSky 已提交
6992

6993
        pool_out = fluid.layers.roi_pool(
6994 6995
                input=x,
                rois=rois,
6996 6997
                pooled_height=1,
                pooled_width=1,
F
FDInSky 已提交
6998
                spatial_scale=1.0,
6999
                rois_num=rois_num)
7000

7001
        exe = fluid.Executor(place)
7002
        out, = exe.run(feed={'input':input_data ,'roi':roi_data, 'rois_num': rois_num_data}, fetch_list=[pool_out.name])
7003 7004
        print(out)   #array([[[[11.]]], [[[16.]]]], dtype=float32)
        print(np.array(out).shape)  # (2, 1, 1, 1)
W
wopeizl 已提交
7005
    """
7006 7007
    if in_dygraph_mode():
        assert rois_num is not None, "rois_num should not be None in dygraph mode."
W
wanghuancoder 已提交
7008
        pool_out, argmaxes = _C_ops.roi_pool(
7009 7010 7011 7012
            input, rois, rois_num, "pooled_height", pooled_height,
            "pooled_width", pooled_width, "spatial_scale", spatial_scale)
        return pool_out, argmaxes

7013 7014
    check_variable_and_dtype(input, 'input', ['float32'], 'roi_pool')
    check_variable_and_dtype(rois, 'rois', ['float32'], 'roi_pool')
W
wopeizl 已提交
7015 7016 7017 7018
    helper = LayerHelper('roi_pool', **locals())
    dtype = helper.input_dtype()
    pool_out = helper.create_variable_for_type_inference(dtype)
    argmaxes = helper.create_variable_for_type_inference(dtype='int32')
7019 7020 7021 7022 7023 7024 7025

    inputs = {
        "X": input,
        "ROIs": rois,
    }
    if rois_num is not None:
        inputs['RoisNum'] = rois_num
W
wopeizl 已提交
7026 7027
    helper.append_op(
        type="roi_pool",
7028
        inputs=inputs,
W
wopeizl 已提交
7029 7030 7031 7032 7033 7034 7035 7036
        outputs={"Out": pool_out,
                 "Argmax": argmaxes},
        attrs={
            "pooled_height": pooled_height,
            "pooled_width": pooled_width,
            "spatial_scale": spatial_scale
        })
    return pool_out
W
whs 已提交
7037 7038


J
jerrywgz 已提交
7039 7040 7041 7042 7043 7044
@templatedoc()
def roi_align(input,
              rois,
              pooled_height=1,
              pooled_width=1,
              spatial_scale=1.0,
J
jerrywgz 已提交
7045
              sampling_ratio=-1,
7046 7047
              rois_num=None,
              name=None):
J
jerrywgz 已提交
7048
    """
7049

J
jerrywgz 已提交
7050 7051 7052 7053
    ${comment}

    Args:
        input (Variable): ${x_comment}
7054
        rois (Variable): ROIs (Regions of Interest) to pool over.It should be
7055 7056
            a 2-D LoDTensor of shape (num_rois, 4), the lod level is 1. The
            data type is float32 or float64. Given as [[x1, y1, x2, y2], ...],
W
wangguanzhong 已提交
7057
            (x1, y1) is the top left coordinates, and (x2, y2) is the bottom
F
FDInSky 已提交
7058
            right coordinates.
W
wangguanzhong 已提交
7059 7060 7061 7062
        pooled_height (int32, optional): ${pooled_height_comment} Default: 1
        pooled_width (int32, optional): ${pooled_width_comment} Default: 1
        spatial_scale (float32, optional): ${spatial_scale_comment} Default: 1.0
        sampling_ratio(int32, optional): ${sampling_ratio_comment} Default: -1
7063
        rois_num (Tensor): The number of RoIs in each image. Default: None
7064 7065 7066
        name(str, optional): For detailed information, please refer
            to :ref:`api_guide_Name`. Usually name is no need to set and
            None by default.
J
jerrywgz 已提交
7067 7068

    Returns:
W
wangguanzhong 已提交
7069 7070 7071 7072 7073
        Variable:

        Output: ${out_comment}.


J
jerrywgz 已提交
7074 7075 7076
    Examples:
        .. code-block:: python

7077
            import paddle.fluid as fluid
7078 7079 7080
            import paddle
            paddle.enable_static()

7081 7082 7083 7084
            x = fluid.data(
                name='data', shape=[None, 256, 32, 32], dtype='float32')
            rois = fluid.data(
                name='rois', shape=[None, 4], dtype='float32')
7085
            rois_num = fluid.data(name='rois_num', shape=[None], dtype='int32')
7086 7087 7088
            align_out = fluid.layers.roi_align(input=x,
                                               rois=rois,
                                               pooled_height=7,
J
jerrywgz 已提交
7089 7090
                                               pooled_width=7,
                                               spatial_scale=0.5,
F
FDInSky 已提交
7091
                                               sampling_ratio=-1,
7092
                                               rois_num=rois_num)
J
jerrywgz 已提交
7093
    """
7094 7095
    if in_dygraph_mode():
        assert rois_num is not None, "rois_num should not be None in dygraph mode."
W
wanghuancoder 已提交
7096
        align_out = _C_ops.roi_align(
7097 7098 7099 7100 7101
            input, rois, rois_num, "pooled_height", pooled_height,
            "pooled_width", pooled_width, "spatial_scale", spatial_scale,
            "sampling_ratio", sampling_ratio)
        return align_out

7102 7103 7104
    check_variable_and_dtype(input, 'input', ['float32', 'float64'],
                             'roi_align')
    check_variable_and_dtype(rois, 'rois', ['float32', 'float64'], 'roi_align')
J
jerrywgz 已提交
7105 7106
    helper = LayerHelper('roi_align', **locals())
    dtype = helper.input_dtype()
X
Xin Pan 已提交
7107
    align_out = helper.create_variable_for_type_inference(dtype)
7108 7109 7110 7111 7112 7113
    inputs = {
        "X": input,
        "ROIs": rois,
    }
    if rois_num is not None:
        inputs['RoisNum'] = rois_num
J
jerrywgz 已提交
7114 7115
    helper.append_op(
        type="roi_align",
7116
        inputs=inputs,
J
jerrywgz 已提交
7117 7118 7119 7120 7121 7122 7123 7124 7125 7126
        outputs={"Out": align_out},
        attrs={
            "pooled_height": pooled_height,
            "pooled_width": pooled_width,
            "spatial_scale": spatial_scale,
            "sampling_ratio": sampling_ratio
        })
    return align_out


S
SunGaofeng 已提交
7127
def dice_loss(input, label, epsilon=0.00001, name=None):
7128
    r"""
7129

S
SunGaofeng 已提交
7130 7131 7132 7133
    Dice loss for comparing the similarity between the input predictions and the label.
    This implementation is for binary classification, where the input is sigmoid
    predictions of each pixel, usually used for segmentation task. The dice loss can
    be defined as the following equation:
W
whs 已提交
7134 7135 7136

    .. math::

7137 7138 7139
        dice\_loss &= 1 - \frac{2 * intersection\_area}{total\_area} \\
                  &= \frac{(total\_area - intersection\_area) - intersection\_area}{total\_area} \\
                  &= \frac{(union\_area - intersection\_area)}{total\_area}
W
whs 已提交
7140 7141


S
SunGaofeng 已提交
7142
    Parameters:
S
shangliang Xu 已提交
7143 7144 7145 7146 7147
        input (Tensor): Tensor, rank>=2, shape is :math:`[N_1, N_2, ..., N_k, D]`, where :math:`N_1` is
                          the batch_size, :math:`D` is the number of categories. It is usually the output
                          predictions of sigmoid activation. The data type can be float32 or float64.
        label (Tensor): Tensor, the groud truth with the same rank as input, shape is :math:`[N_1, N_2, ..., N_k, 1]`.
                          where :math:`N_1` is the batch_size. The data type can be int32 or int64.
W
whs 已提交
7148 7149 7150
        epsilon (float): The epsilon will be added to the numerator and denominator.
                         If both input and label are empty, it makes sure dice is 1.
                         Default: 0.00001
7151 7152
        name(str, optional): The default value is None.
                             Normally there is no need for user to set this property.
S
SunGaofeng 已提交
7153
                             For more information, please refer to :ref:`api_guide_Name`
W
whs 已提交
7154 7155

    Returns:
7156
        Tensor, which shape is [1], data type is the same as `input` .
W
whs 已提交
7157

S
SunGaofeng 已提交
7158
    Example:
7159 7160
        .. code-block:: python

7161 7162 7163 7164 7165 7166 7167
            import paddle
            import paddle.nn.functional as F

            x = paddle.randn((3,224,224,2))
            label = paddle.randint(high=2, shape=(3,224,224,1))
            predictions = F.softmax(x)
            loss = F.dice_loss(input=predictions, label=label)
W
whs 已提交
7168
    """
S
shangliang Xu 已提交
7169 7170 7171 7172 7173 7174 7175 7176 7177 7178 7179 7180 7181 7182 7183
    assert input.dtype in (paddle.float32, paddle.float64)
    assert label.dtype in (paddle.int32, paddle.int64)
    assert len(input.shape) >= 2, \
        "The rank of input should be greater than or equal to 2."
    assert len(input.shape) == len(label.shape), (
        "The rank of input and label should be equal, "
        "but received input: %d, label: %d." %
        (len(input.shape), len(label.shape)))
    assert label.shape[-1] == 1, ("The last dimension of label should be 1, "
                                  "but received %d." % label.shape[-1])
    assert input.shape[:-1] == label.shape[:-1], (
        "All dimensions should be equal except the last one.")
    assert input.numel() > 0 and label.numel() > 0, \
        "Any dimension of input and label cannot be equal to 0."

S
shangliang Xu 已提交
7184 7185
    label = squeeze(label, [-1])
    label = paddle.nn.functional.one_hot(label, input.shape[-1])
7186
    reduce_dim = list(range(1, len(input.shape)))
W
whs 已提交
7187 7188 7189 7190 7191 7192
    inse = reduce_sum(input * label, dim=reduce_dim)
    dice_denominator = reduce_sum(
        input, dim=reduce_dim) + reduce_sum(
            label, dim=reduce_dim)
    dice_score = 1 - inse * 2 / (dice_denominator + epsilon)
    return reduce_mean(dice_score)
7193 7194


7195 7196 7197 7198
def image_resize(input,
                 out_shape=None,
                 scale=None,
                 name=None,
7199
                 resample='BILINEAR',
7200 7201
                 actual_shape=None,
                 align_corners=True,
7202 7203
                 align_mode=1,
                 data_format='NCHW'):
7204
    """
7205

R
ruri 已提交
7206
    This op resizes a batch of images.
F
stash  
fengjiayi 已提交
7207

7208 7209
    The input must be a 3-D Tensor of the shape (num_batches, channels, in_w)
    or a 4-D Tensor of the shape (num_batches, channels, in_h, in_w)
7210 7211
    or (num_batches, in_h, in_w, channels), or a 5-D Tensor of the shape
    (num_batches, channels, in_d, in_h, in_w) or (num_batches, in_d, in_h, in_w, channels),
T
tianshuo78520a 已提交
7212
    and the resizing only applies on the three dimensions(depth, height and width).
7213

7214
    **Warning:** the parameter :attr:`actual_shape` will be deprecated in the
7215 7216
    future and only use :attr:`out_shape` instead.

7217
    Supporting resample methods:
7218
        'LINEAR' : Linear interpolation 
Q
update  
qiaolongfei 已提交
7219

7220
        'BILINEAR' : Bilinear interpolation
T
Tink_Y 已提交
7221

K
Kaipeng Deng 已提交
7222 7223
        'TRILINEAR' : Trilinear interpolation

7224
        'NEAREST' : Nearest neighbor interpolation
7225 7226
        
        'BICUBIC' : Bicubic interpolation
7227 7228 7229 7230
    
    Linear interpolation is the method of using a line connecting two known quantities 
    to determine the value of an unknown quantity between the two known quantities.
    
7231
    Nearest neighbor interpolation is to perform nearest neighbor interpolation
7232
    in both the 3rd dimension(in height direction) and the 4th dimension(in width
7233
    direction) on input tensor.
7234 7235 7236 7237 7238

    Bilinear interpolation is an extension of linear interpolation for
    interpolating functions of two variables (e.g. H-direction and
    W-direction in this op) on a rectilinear 2D grid. The key idea is
    to perform linear interpolation first in one direction, and then
7239 7240
    again in the other direction.

7241 7242 7243
    Trilinear interpolation is an extension of linear interpolation for
    interpolating functions of three variables (e.g. D-direction,
    H-direction and W-direction in this op) on a rectilinear 3D grid.
K
Kaipeng Deng 已提交
7244
    The linear interpolation is performed on three directions.
7245 7246 7247 7248 7249
    
    Bicubic interpolation is an extension of cubic interpolation for interpolating
    data points on a two-dimensional regular grid. The interpolated surface is
    smoother than corresponding surfaces obtained by bilinear interpolation or
    nearest-neighbor interpolation.
K
Kaipeng Deng 已提交
7250

7251
    Align_corners and align_mode are optional parameters,the calculation method
7252 7253 7254 7255
    of interpolation can be selected by them.

    Example:

T
Tink_Y 已提交
7256
    .. code-block:: text
7257

T
Tink_Y 已提交
7258
        For scale:
7259

T
Tink_Y 已提交
7260
            if align_corners = True && out_size > 1 :
7261

T
Tink_Y 已提交
7262
              scale_factor = (in_size-1.0)/(out_size-1.0)
7263

T
Tink_Y 已提交
7264
            else:
7265

T
Tink_Y 已提交
7266
              scale_factor = float(in_size/out_size)
7267 7268


T
Tink_Y 已提交
7269
        Nearest neighbor interpolation:
7270

T
Tink_Y 已提交
7271 7272
          if:
              align_corners = False
7273

T
Tink_Y 已提交
7274 7275
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
7276

T
Tink_Y 已提交
7277 7278
              H_out = floor (H_{in} * scale_{factor})
              W_out = floor (W_{in} * scale_{factor})
7279

T
Tink_Y 已提交
7280 7281
          else:
              align_corners = True
7282

T
Tink_Y 已提交
7283 7284
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
7285

T
Tink_Y 已提交
7286 7287
              H_out = round(H_{in} * scale_{factor})
              W_out = round(W_{in} * scale_{factor})
7288

7289 7290 7291 7292 7293 7294 7295 7296 7297 7298 7299 7300 7301 7302 7303 7304 7305
        linear interpolation:

          if:
              align_corners = False , align_mode = 0

              input : (N,C,W_in)
              output: (N,C,W_out) where:

              W_out = (W_{in}+0.5) * scale_{factor} - 0.5

          else:

              input : (N,C,W_in)
              output: (N,C,H_out,W_out) where:

              W_out = W_{in} * scale_{factor}

T
Tink_Y 已提交
7306 7307 7308 7309
        Bilinear interpolation:

          if:
              align_corners = False , align_mode = 0
7310

T
Tink_Y 已提交
7311 7312
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
7313

T
Tink_Y 已提交
7314 7315
              H_out = (H_{in}+0.5) * scale_{factor} - 0.5
              W_out = (W_{in}+0.5) * scale_{factor} - 0.5
7316

T
Tink_Y 已提交
7317
          else:
7318

T
Tink_Y 已提交
7319 7320
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
7321

T
Tink_Y 已提交
7322 7323
              H_out = H_{in} * scale_{factor}
              W_out = W_{in} * scale_{factor}
7324

K
Kaipeng Deng 已提交
7325 7326 7327 7328
        Trilinear interpolation:

          if:
              align_corners = False , align_mode = 0
7329

K
Kaipeng Deng 已提交
7330 7331
              input : (N,C,D_in,H_in,W_in)
              output: (N,C,D_out,H_out,W_out) where:
7332

K
Kaipeng Deng 已提交
7333 7334 7335 7336 7337 7338
              D_out = (D_{in}+0.5) * scale_{factor} - 0.5
              H_out = (H_{in}+0.5) * scale_{factor} - 0.5
              W_out = (W_{in}+0.5) * scale_{factor} - 0.5


          else:
7339

K
Kaipeng Deng 已提交
7340 7341 7342 7343
              input : (N,C,D_in,H_in,W_in)
              output: (N,C,D_out,H_out,W_out) where:

              D_out = D_{in} * scale_{factor}
7344 7345 7346 7347 7348 7349 7350 7351 7352 7353 7354 7355 7356
       
        Trilinear interpolation:
          if:
              align_corners = False , align_mode = 0
              input : (N,C,D_in,H_in,W_in)
              output: (N,C,D_out,H_out,W_out) where:
              D_out = (D_{in}+0.5) * scale_{factor} - 0.5
              H_out = (H_{in}+0.5) * scale_{factor} - 0.5
              W_out = (W_{in}+0.5) * scale_{factor} - 0.5
          else:
              input : (N,C,D_in,H_in,W_in)
              output: (N,C,D_out,H_out,W_out) where:
              D_out = D_{in} * scale_{factor}
K
Kaipeng Deng 已提交
7357 7358
              H_out = H_{in} * scale_{factor}
              W_out = W_{in} * scale_{factor}
7359
        
7360

7361 7362 7363
    For details of linear interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Linear_interpolation.
    
7364
    For details of nearest neighbor interpolation, please refer to Wikipedia:
7365
    https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation.
7366
    
7367
    For details of bilinear interpolation, please refer to Wikipedia:
7368
    https://en.wikipedia.org/wiki/Bilinear_interpolation.
7369
    
7370
    For details of trilinear interpolation, please refer to Wikipedia:
K
Kaipeng Deng 已提交
7371
    https://en.wikipedia.org/wiki/Trilinear_interpolation.
7372 7373 7374
    
    For details of bicubic interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Bicubic_interpolation
7375

R
ruri 已提交
7376
    Parameters:
7377
        input (Variable): 3-D, 4-D or 5-D Tensor, its data type is float32, float64, or uint8,
7378
                          its data format is specified by :attr:`data_format`.
7379 7380 7381 7382
        out_shape (list|tuple|Variable|None): Output shape of image resize
             layer, the shape is (out_w, ) when input is a 3-D Tensor, the shape is (out_h, out_w) 
             when input is a 4-D Tensor and is (out_d, out_h, out_w) when input is a 5-D Tensor. 
             Default: None. If a list, each element can be an integer or a Tensor Variable of shape: [1].
7383
             If a Tensor Variable, its dimensions size should be a 1.
7384 7385 7386
        scale(float|Variable|None): The multiplier for the input height or width. At
             least one of :attr:`out_shape` or :attr:`scale` must be set.
             And :attr:`out_shape` has a higher priority than :attr:`scale`.
D
dengkaipeng 已提交
7387
             Default: None.
7388 7389
        name(str|None): A name for this layer(optional). If set None, the layer
                        will be named automatically.
7390
        resample(str): The resample method. It supports 'LINEAR', 'BICUBIC', 'BILINEAR', 'TRILINEAR'
K
Kaipeng Deng 已提交
7391
                       and 'NEAREST' currently. Default: 'BILINEAR'
7392 7393 7394
        actual_shape(Variable): An optional input to specify output shape
                                dynamically. If provided, image resize
                                according to this given shape rather than
7395
                                :attr:`out_shape` and :attr:`scale` specifying
7396 7397
                                shape. That is to say actual_shape has the
                                highest priority. It is recommended to use
7398 7399 7400 7401 7402
                                :attr:`out_shape` if you want to specify output
                                shape dynamically, because :attr:`actual_shape`
                                will be deprecated. When using actual_shape to
                                specify output shape, one of :attr:`out_shape`
                                and :attr:`scale` should also be set, otherwise
T
tianshuo78520a 已提交
7403
                                errors would be occurred in graph constructing stage.
7404
                                Default: None
7405 7406
        align_corners(bool) :  An optional bool, If True, the centers of the 4 corner pixels of the
                               input and output tensors are aligned, preserving the values at the
7407 7408
                               corner pixels.
                               Default: True
7409 7410 7411
        align_mode(int)  :  An optional for linear/bilinear/trilinear interpolation. Refer to the fomula in the 
                            the example code above, it can be \'0\' for src_idx = scale*(dst_indx+0.5)-0.5 , 
                            can be \'1\' for src_idx = scale*dst_index.
7412
        data_format (str, optional): Specify the data format of the input, and the data format of the output
7413
            will be consistent with that of the input. An optional string from:`NCW`, `NWC`, `"NCHW"`, `"NHWC"`, `"NCDHW"`,
7414
            `"NDHWC"`. The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
7415
            `[batch_size, input_channels, input_height, input_width]`. When it is `"NCHW"`, the data is stored
7416
            in the order of: `[batch_size, input_channels, input_depth, input_height, input_width]`.
7417 7418

    Returns:
7419
        A 3-D Tensor of the shape (num_batches, channels, out_w) or (num_batches, out_w, channels),
7420 7421
        A 4-D Tensor of the shape (num_batches, channels, out_h, out_w) or (num_batches, out_h, out_w, channels),
        or 5-D Tensor of the shape (num_batches, channels, out_d, out_h, out_w) or (num_batches, out_d, out_h, out_w, channels).
F
stash  
fengjiayi 已提交
7422

7423 7424 7425
    Raises:
        TypeError: out_shape should be a list or tuple or Variable.
        TypeError: actual_shape should either be Variable or None.
7426 7427
        ValueError: The 'resample' of image_resize can only be 'LINEAR', 'BILINEAR',
                    'TRILINEAR', 'BICUBIC' or 'NEAREST' currently.
7428
        ValueError: 'LINEAR' only support 3-D tensor.
7429
        ValueError: 'BICUBIC', 'BILINEAR' and 'NEAREST' only support 4-D tensor.
K
Kaipeng Deng 已提交
7430
        ValueError: 'TRILINEAR' only support 5-D tensor.
7431
        ValueError: One of out_shape and scale must not be None.
7432
        ValueError: out_shape length should be 1 for input 3-D tensor.
K
Kaipeng Deng 已提交
7433 7434
        ValueError: out_shape length should be 2 for input 4-D tensor.
        ValueError: out_shape length should be 3 for input 5-D tensor.
D
dengkaipeng 已提交
7435
        ValueError: scale should be greater than zero.
T
tianshuo78520a 已提交
7436
        TypeError: align_corners should be a bool value
7437
        ValueError: align_mode can only be '0' or '1'
7438
        ValueError: data_format can only be 'NCW', 'NWC', 'NCHW', 'NHWC', 'NCDHW' or 'NDHWC'.
7439

7440 7441
    Examples:
        .. code-block:: python
7442

R
ruri 已提交
7443
	    #declarative mode
7444
	    import paddle
R
ruri 已提交
7445 7446
	    import paddle.fluid as fluid
	    import numpy as np
7447
	    paddle.enable_static()
R
ruri 已提交
7448 7449 7450 7451 7452 7453 7454 7455 7456 7457 7458 7459 7460 7461 7462 7463 7464 7465 7466 7467 7468 7469 7470 7471 7472 7473
	    input = fluid.data(name="input", shape=[None,3,6,10])

	    #1
	    output = fluid.layers.image_resize(input=input,out_shape=[12,12])

	    #2
	    #x = np.array([2]).astype("int32")
	    #dim1 = fluid.data(name="dim1", shape=[1], dtype="int32")
	    #fluid.layers.assign(input=x, output=dim1)
	    #output = fluid.layers.image_resize(input=input,out_shape=[12,dim1])

	    #3
	    #x = np.array([3,12]).astype("int32")
	    #shape_tensor = fluid.data(name="shape_tensor", shape=[2], dtype="int32")
	    #fluid.layers.assign(input=x, output=shape_tensor)
	    #output = fluid.layers.image_resize(input=input,out_shape=shape_tensor)

	    #4
	    #x = np.array([0.5]).astype("float32")
	    #scale_tensor = fluid.data(name="scale", shape=[1], dtype="float32")
	    #fluid.layers.assign(x,scale_tensor)
	    #output = fluid.layers.image_resize(input=input,scale=scale_tensor)

	    place = fluid.CPUPlace()
	    exe = fluid.Executor(place)
	    exe.run(fluid.default_startup_program())
7474

R
ruri 已提交
7475
	    input_data = np.random.rand(2,3,6,10).astype("float32")
7476

R
ruri 已提交
7477 7478 7479 7480
	    output_data = exe.run(fluid.default_main_program(),
                feed={"input":input_data},
                fetch_list=[output],
                return_numpy=True)
7481

R
ruri 已提交
7482
	    print(output_data[0].shape)
7483

R
ruri 已提交
7484 7485 7486 7487 7488 7489 7490 7491
	    #1
	    # (2, 3, 12, 12)
	    #2
	    # (2, 3, 12, 2)
	    #3
	    # (2, 3, 3, 12)
	    #4
	    # (2, 3, 3, 5)
7492

R
ruri 已提交
7493 7494
	    #imperative mode
	    import paddle.fluid.dygraph as dg
7495

R
ruri 已提交
7496 7497 7498 7499
	    with dg.guard(place) as g:
    		input = dg.to_variable(input_data)
    		output = fluid.layers.image_resize(input=input, out_shape=[12,12])
    		print(output.shape)
7500

R
ruri 已提交
7501
		# [2L, 3L, 12L, 12L]
7502

7503
    """
7504
    resample_methods = {
7505
        'LINEAR': 'linear',
7506
        'BILINEAR': 'bilinear',
K
Kaipeng Deng 已提交
7507
        'TRILINEAR': 'trilinear',
7508
        'NEAREST': 'nearest',
7509
        'LINEAR': 'linear',
7510
    }
7511
    resample = resample.upper()
7512 7513
    if resample not in resample_methods:
        raise ValueError(
7514
            "The 'resample' of image_resize can only be 'LINEAR', 'BILINEAR', 'TRILINEAR' "
K
Kaipeng Deng 已提交
7515
            "or 'NEAREST' currently.")
7516
    resample_type = resample_methods[resample]
7517

7518 7519 7520
    if resample == 'LINEAR' and len(input.shape) != 3:
        raise ValueError("'LINER only support 3-D tensor.")
    elif resample in ['BILINEAR', 'NEAREST'] and len(input.shape) != 4:
K
Kaipeng Deng 已提交
7521
        raise ValueError("'BILINEAR' and 'NEAREST' only support 4-D tensor.")
7522
    elif resample == 'TRILINEAR' and len(input.shape) != 5:
K
Kaipeng Deng 已提交
7523 7524
        raise ValueError("'TRILINEAR'only support 5-D tensor.")

7525 7526 7527 7528 7529
    if not isinstance(align_corners, bool):
        raise TypeError("Attr align_corners should be a bool value")
    if align_mode != 0 and align_mode != 1:
        raise ValueError("align_mode can only be 0 or 1")

7530
    if out_shape is None and scale is None:
7531
        raise ValueError("One of out_shape and scale must not be None.")
7532
    helper = LayerHelper('{}_interp'.format(resample_type), **locals())
7533
    dtype = helper.input_dtype()
7534

7535
    if len(input.shape) == 3 and data_format not in ['NCW', 'NWC']:
7536 7537
        raise ValueError(
            "Got wrong value for param `data_format`: " + data_format +
7538
            " received but only `NCW` or `NWC` supported for 3-D input.")
7539
    elif len(input.shape) == 4 and data_format not in ['NCHW', 'NHWC']:
7540 7541 7542 7543 7544 7545 7546 7547
        raise ValueError(
            "Got wrong value for param `data_format`: " + data_format +
            " received but only `NCHW` or `NHWC` supported for 4-D input.")
    elif len(input.shape) == 5 and data_format not in ['NCDHW', 'NDHWC']:
        raise ValueError(
            "Got wrong value for param `data_format`: " + data_format +
            " received but only `NCDHW` or `NDHWC` supported for 5-D input.")

7548 7549 7550
    def _is_list_or_turple_(data):
        return (isinstance(data, list) or isinstance(data, tuple))

7551
    if data_format == 'NCHW' or data_format == 'NCDHW' or data_format == 'NCW':
7552
        data_layout = 'NCHW'
7553
    if data_format == 'NHWC' or data_format == 'NDHWC' or data_format == 'NWC':
7554 7555
        data_layout = 'NHWC'

7556
    inputs = {"X": input}
D
dengkaipeng 已提交
7557
    attrs = {
7558 7559 7560
        "out_d": -1,
        "out_h": -1,
        "out_w": -1,
D
dengkaipeng 已提交
7561 7562
        "interp_method": resample_type,
        "align_corners": align_corners,
7563 7564
        "align_mode": align_mode,
        "data_layout": data_layout
D
dengkaipeng 已提交
7565 7566
    }

7567
    if out_shape is not None:
7568
        if isinstance(out_shape, Variable):
7569
            out_shape.stop_gradient = True
7570
            inputs['OutSize'] = out_shape
7571 7572
        else:
            if not (_is_list_or_turple_(out_shape)):
D
dengkaipeng 已提交
7573 7574
                raise TypeError(
                    "out_shape should be a list or tuple or Variable.")
7575 7576 7577 7578 7579 7580 7581 7582 7583 7584 7585 7586 7587 7588 7589 7590 7591 7592 7593 7594 7595 7596 7597 7598 7599 7600 7601 7602
            # Validate the shape
            contain_var = False
            for dim_idx, dim_size in enumerate(out_shape):
                if isinstance(dim_size, Variable):
                    contain_var = True
                    continue
                assert dim_size > 0, (
                    "Each dimension size given in out_shape must be greater than 0."
                )

            if contain_var:
                new_size_tensor = []
                size_list = []
                for dim in out_shape:
                    if isinstance(dim, Variable):
                        dim.stop_gradient = True
                        new_size_tensor.append(dim)
                        size_list.append(-1)
                    else:
                        assert (isinstance(dim, int))
                        temp_out = helper.create_variable_for_type_inference(
                            'int32')
                        fill_constant(
                            [1], 'int32', dim, force_cpu=True, out=temp_out)
                        new_size_tensor.append(temp_out)
                        size_list.append(dim)
                inputs['SizeTensor'] = new_size_tensor

7603 7604 7605 7606 7607 7608 7609 7610 7611 7612
            if len(input.shape) == 3:
                if len(out_shape) != 1:
                    raise ValueError("out_shape length should be 1 for "
                                     "input 3-D tensor.")
                if contain_var:
                    attrs['out_w'] = size_list[0]
                else:
                    out_shape = list(map(int, out_shape))
                    attrs['out_w'] = out_shape[0]
            elif len(input.shape) == 4:
K
Kaipeng Deng 已提交
7613 7614 7615
                if len(out_shape) != 2:
                    raise ValueError("out_shape length should be 2 for "
                                     "input 4-D tensor.")
7616 7617 7618 7619 7620 7621 7622
                if contain_var:
                    attrs['out_h'] = size_list[0]
                    attrs['out_w'] = size_list[1]
                else:
                    out_shape = list(map(int, out_shape))
                    attrs['out_h'] = out_shape[0]
                    attrs['out_w'] = out_shape[1]
K
Kaipeng Deng 已提交
7623 7624 7625 7626
            if len(input.shape) == 5:
                if len(out_shape) != 3:
                    raise ValueError("out_shape length should be 3 for "
                                     "input 5-D tensor.")
7627 7628 7629 7630 7631 7632 7633 7634 7635
                if contain_var:
                    attrs['out_d'] = size_list[0]
                    attrs['out_h'] = size_list[1]
                    attrs['out_w'] = size_list[2]
                else:
                    out_shape = list(map(int, out_shape))
                    attrs['out_d'] = out_shape[0]
                    attrs['out_h'] = out_shape[1]
                    attrs['out_w'] = out_shape[2]
7636

7637
    else:
7638 7639 7640
        if isinstance(scale, Variable):
            scale.stop_gradient = True
            inputs["Scale"] = scale
7641
        elif isinstance(scale, float) or isinstance(scale, int):
7642
            if scale <= 0:
7643
                raise ValueError("Attr(scale) should be greater than zero.")
7644
            attrs['scale'] = float(scale)
7645 7646 7647
        else:
            raise TypeError(
                "Attr(scale)'s type should be float, int or Variable.")
7648

7649
    if isinstance(actual_shape, Variable):
7650 7651 7652 7653 7654
        warnings.warn(
            "actual_shape will be deprecated, it is recommended to use "
            "out_shape instead of actual_shape to specify output shape dynamically."
        )
        actual_shape.stop_gradient = True
7655 7656 7657
        inputs["OutSize"] = actual_shape
    elif actual_shape is not None:
        raise TypeError("actual_shape should either be Variable or None.")
X
Xin Pan 已提交
7658
    out = helper.create_variable_for_type_inference(dtype)
7659
    helper.append_op(
7660
        type='{}_interp'.format(resample_type),
7661
        inputs=inputs,
7662
        outputs={"Out": out},
D
dengkaipeng 已提交
7663
        attrs=attrs)
7664
    return out
F
stash  
fengjiayi 已提交
7665 7666


7667 7668 7669 7670 7671 7672 7673 7674
@templatedoc(op_type="linear_interp")
def resize_linear(input,
                  out_shape=None,
                  scale=None,
                  name=None,
                  actual_shape=None,
                  align_corners=True,
                  align_mode=1,
7675
                  data_format='NCW'):
7676 7677 7678 7679 7680 7681 7682 7683 7684 7685 7686 7687 7688 7689 7690 7691 7692 7693 7694 7695 7696 7697 7698 7699 7700 7701 7702 7703 7704 7705 7706 7707 7708 7709 7710 7711 7712 7713 7714 7715 7716 7717
    """
    This op resizes the input by performing linear interpolation based on given
    output shape which specified by actual_shape, out_shape and scale
    in priority order.

    **Warning:** the parameter :attr:`actual_shape` will be deprecated in 
    the future and only use :attr:`out_shape` instead.

    Align_corners and align_mode are optional parameters,the calculation 
    method of interpolation can be selected by them.

    Example:

    .. code-block:: text

        For scale:
          
            if align_corners = True && out_size > 1 :

              scale_factor = (in_size-1.0)/(out_size-1.0)
            
            else:
              
              scale_factor = float(in_size/out_size)

        Linear interpolation:

          if:
              align_corners = False , align_mode = 0
              
              input : (N,C,W_in)
              output: (N,C,W_out) where:
              
              W_out = (W_{in}+0.5) * scale_{factor} - 0.5

          else:

              input : (N,C,W_in)
              output: (N,C,W_out) where:
              W_out = W_{in} * scale_{factor}

    Parameters:
7718
        input(Variable): 3-D Tensor(NCW), its data type is float32, float64, or uint8,
7719 7720 7721 7722 7723 7724 7725 7726 7727 7728 7729 7730 7731 7732 7733 7734 7735 7736 7737 7738 7739 7740 7741 7742 7743
                          its data format is specified by :attr:`data_format`.
        out_shape(list|tuple|Variable|None): Output shape of resize linear
            layer, the shape is (out_w,). Default: None. If a list, each 
            element can be an integer or a Tensor Variable with shape: [1]. If a 
            Tensor Variable, its dimension size should be 1.
        scale(float|Variable|None): The multiplier for the input height or width. At
             least one of :attr:`out_shape` or :attr:`scale` must be set. 
             And :attr:`out_shape` has a higher priority than :attr:`scale`. 
             Default: None.
        actual_shape(Variable): An optional input to specify output shape
                                dynamically. If provided, image resize
                                according to this given shape rather than
                                :attr:`out_shape` and :attr:`scale` specifying
                                shape. That is to say actual_shape has the
                                highest priority. It is recommended to use
                                :attr:`out_shape` if you want to specify output 
                                shape dynamically, because :attr:`actual_shape` 
                                will be deprecated. When using actual_shape to 
                                specify output shape, one of :attr:`out_shape` 
                                and :attr:`scale` should also be set, otherwise 
                                errors would be occurred in graph constructing stage.
                                Default: None
        align_corners(bool): ${align_corners_comment}
        align_mode(bool): ${align_mode_comment}
        data_format (str, optional): Specify the data format of the input, and the data format of the output 
7744 7745 7746 7747 7748
            will be consistent with that of the input. An optional string from: `"NCW"`, `"NWC"`.
            The default is `"NCW"`. When it is `"NCW"`, the data is stored in the order of:
            `[batch_size, input_channels, input_width]`.
        name(str, optional): The default value is None.  Normally there is no need for user to set this property.  
            For more information, please refer to :ref:`api_guide_Name`
7749 7750

    Returns:
7751
	Variable: 3-D tensor(NCW or NWC).
7752 7753 7754 7755 7756 7757 7758 7759 7760 7761 7762 7763 7764 7765 7766 7767 7768 7769 7770 7771 7772 7773 7774 7775 7776 7777 7778 7779 7780 7781 7782 7783 7784 7785 7786 7787 7788 7789 7790 7791 7792 7793
    
    Examples:
        .. code-block:: python
	
	    #declarative mode
	    import paddle.fluid as fluid
	    import numpy as np
	    input = fluid.data(name="input", shape=[None,3,100])

	    output = fluid.layers.resize_linear(input=input,out_shape=[50,])

	    place = fluid.CPUPlace()
	    exe = fluid.Executor(place)
	    exe.run(fluid.default_startup_program())
 
	    input_data = np.random.rand(1,3,100).astype("float32")

	    output_data = exe.run(fluid.default_main_program(),
                feed={"input":input_data},
                fetch_list=[output],
                return_numpy=True)
 
	    print(output_data[0].shape)

	    # (1, 3, 50)

	    #imperative mode
	    import paddle.fluid.dygraph as dg

	    with dg.guard(place) as g:
    		input = dg.to_variable(input_data)
    		output = fluid.layers.resize_linear(input=input, out_shape=[50,])
    		print(output.shape)

		# [1L, 3L, 50L]

    """

    return image_resize(input, out_shape, scale, name, 'LINEAR', actual_shape,
                        align_corners, align_mode, data_format)


7794
@templatedoc(op_type="bilinear_interp")
7795 7796 7797 7798
def resize_bilinear(input,
                    out_shape=None,
                    scale=None,
                    name=None,
7799 7800
                    actual_shape=None,
                    align_corners=True,
7801 7802
                    align_mode=1,
                    data_format='NCHW'):
7803
    """
7804

R
ruri 已提交
7805
    This op resizes the input by performing bilinear interpolation based on given
7806
    output shape which specified by actual_shape, out_shape and scale
7807 7808
    in priority order.

7809
    **Warning:** the parameter :attr:`actual_shape` will be deprecated in
7810 7811
    the future and only use :attr:`out_shape` instead.

7812 7813 7814 7815
    Bilinear interpolation is an extension of linear interpolation for
    interpolating functions of two variables (e.g. H-direction and
    W-direction in this op) on a rectilinear 2D grid. The key idea is
    to perform linear interpolation first in one direction, and then
7816 7817
    again in the other direction.

7818
    For details of bilinear interpolation, please refer to Wikipedia:
7819
    https://en.wikipedia.org/wiki/Bilinear_interpolation
Y
yuyang18 已提交
7820

7821
    Align_corners and align_mode are optional parameters,the calculation
7822 7823 7824 7825
    method of interpolation can be selected by them.

    Example:

T
Tink_Y 已提交
7826
    .. code-block:: text
7827

T
Tink_Y 已提交
7828
        For scale:
7829

T
Tink_Y 已提交
7830
            if align_corners = True && out_size > 1 :
7831

T
Tink_Y 已提交
7832
              scale_factor = (in_size-1.0)/(out_size-1.0)
7833

T
Tink_Y 已提交
7834
            else:
7835

7836
              scale_factor = float(in_size/out_size)
7837

T
Tink_Y 已提交
7838 7839 7840 7841
        Bilinear interpolation:

          if:
              align_corners = False , align_mode = 0
7842

T
Tink_Y 已提交
7843 7844
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
7845

T
Tink_Y 已提交
7846 7847
              H_out = (H_{in}+0.5) * scale_{factor} - 0.5
              W_out = (W_{in}+0.5) * scale_{factor} - 0.5
7848

T
Tink_Y 已提交
7849
          else:
T
tink2123 已提交
7850

T
Tink_Y 已提交
7851 7852 7853 7854
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
              H_out = H_{in} * scale_{factor}
              W_out = W_{in} * scale_{factor}
7855

R
ruri 已提交
7856 7857
    Parameters:
        input(Variable): 4-D Tensor(NCHW), its data type is float32, float64, or uint8,
7858
                          its data format is specified by :attr:`data_format`.
D
dengkaipeng 已提交
7859
        out_shape(list|tuple|Variable|None): Output shape of resize bilinear
7860 7861
            layer, the shape is (out_h, out_w).Default: None. If a list, each
            element can be an integer or a Tensor Variable with shape: [1]. If a
7862
            Tensor Variable, its dimension size should be 1.
7863
        scale(float|Variable|None): The multiplier for the input height or width. At
7864 7865
             least one of :attr:`out_shape` or :attr:`scale` must be set.
             And :attr:`out_shape` has a higher priority than :attr:`scale`.
D
dengkaipeng 已提交
7866
             Default: None.
7867 7868 7869
        actual_shape(Variable): An optional input to specify output shape
                                dynamically. If provided, image resize
                                according to this given shape rather than
7870
                                :attr:`out_shape` and :attr:`scale` specifying
7871 7872
                                shape. That is to say actual_shape has the
                                highest priority. It is recommended to use
7873 7874 7875 7876 7877
                                :attr:`out_shape` if you want to specify output
                                shape dynamically, because :attr:`actual_shape`
                                will be deprecated. When using actual_shape to
                                specify output shape, one of :attr:`out_shape`
                                and :attr:`scale` should also be set, otherwise
T
tianshuo78520a 已提交
7878
                                errors would be occurred in graph constructing stage.
7879
                                Default: None
7880 7881
        align_corners(bool): ${align_corners_comment}
        align_mode(bool): ${align_mode_comment}
7882
        data_format (str, optional): Specify the data format of the input, and the data format of the output
7883 7884 7885
            will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
            The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
            `[batch_size, input_channels, input_height, input_width]`.
R
ruri 已提交
7886
        name(str, optional): The default value is None.  Normally there is no need for user to set this property.  For more information, please refer to :ref:`api_guide_Name`
Y
yuyang18 已提交
7887 7888

    Returns:
R
ruri 已提交
7889
	Variable: 4-D tensor(NCHW or NHWC).
7890

7891 7892
    Examples:
        .. code-block:: python
7893

R
ruri 已提交
7894 7895
	    #declarative mode
	    import paddle.fluid as fluid
7896
	    import numpy as np
7897 7898
	    import paddle
	    paddle.enable_static()
R
ruri 已提交
7899 7900 7901 7902 7903 7904 7905 7906 7907 7908 7909 7910 7911 7912 7913 7914 7915 7916 7917 7918 7919 7920 7921 7922 7923 7924
	    input = fluid.data(name="input", shape=[None,3,6,10])

	    #1
	    output = fluid.layers.resize_bilinear(input=input,out_shape=[12,12])

	    #2
	    #x = np.array([2]).astype("int32")
	    #dim1 = fluid.data(name="dim1", shape=[1], dtype="int32")
	    #fluid.layers.assign(input=x, output=dim1)
	    #output = fluid.layers.resize_bilinear(input=input,out_shape=[12,dim1])

	    #3
	    #x = np.array([3,12]).astype("int32")
	    #shape_tensor = fluid.data(name="shape_tensor", shape=[2], dtype="int32")
	    #fluid.layers.assign(input=x, output=shape_tensor)
	    #output = fluid.layers.resize_bilinear(input=input,out_shape=shape_tensor)

	    #4
	    #x = np.array([0.5]).astype("float32")
	    #scale_tensor = fluid.data(name="scale", shape=[1], dtype="float32")
	    #fluid.layers.assign(x,scale_tensor)
	    #output = fluid.layers.resize_bilinear(input=input,scale=scale_tensor)

	    place = fluid.CPUPlace()
	    exe = fluid.Executor(place)
	    exe.run(fluid.default_startup_program())
7925

R
ruri 已提交
7926
	    input_data = np.random.rand(2,3,6,10).astype("float32")
7927

R
ruri 已提交
7928 7929 7930 7931
	    output_data = exe.run(fluid.default_main_program(),
                feed={"input":input_data},
                fetch_list=[output],
                return_numpy=True)
7932

R
ruri 已提交
7933
	    print(output_data[0].shape)
7934

R
ruri 已提交
7935 7936 7937 7938 7939 7940 7941 7942
	    #1
	    # (2, 3, 12, 12)
	    #2
	    # (2, 3, 12, 2)
	    #3
	    # (2, 3, 3, 12)
	    #4
	    # (2, 3, 3, 5)
7943

R
ruri 已提交
7944 7945
	    #imperative mode
	    import paddle.fluid.dygraph as dg
7946

R
ruri 已提交
7947 7948 7949 7950
	    with dg.guard(place) as g:
    		input = dg.to_variable(input_data)
    		output = fluid.layers.resize_bilinear(input=input, out_shape=[12,12])
    		print(output.shape)
7951

R
ruri 已提交
7952
		# [2L, 3L, 12L, 12L]
7953

7954 7955
    """

7956
    return image_resize(input, out_shape, scale, name, 'BILINEAR', actual_shape,
7957
                        align_corners, align_mode, data_format)
7958 7959


K
Kaipeng Deng 已提交
7960 7961 7962 7963 7964 7965 7966
@templatedoc(op_type="trilinear_interp")
def resize_trilinear(input,
                     out_shape=None,
                     scale=None,
                     name=None,
                     actual_shape=None,
                     align_corners=True,
7967 7968
                     align_mode=1,
                     data_format='NCDHW'):
K
Kaipeng Deng 已提交
7969
    """
7970

R
ruri 已提交
7971
    This op resizes the input by performing trilinear interpolation based on given
K
Kaipeng Deng 已提交
7972 7973 7974
    output shape which specified by actual_shape, out_shape and scale
    in priority order.

7975
    **Warning:** the parameter :attr:`actual_shape` will be deprecated
7976 7977
    in the future and only use :attr:`out_shape` instead.

7978 7979 7980
    Trilinear interpolation is an extension of linear interpolation for
    interpolating functions of three variables (e.g. D-direction,
    H-direction and W-direction in this op) on a rectilinear 3D grid.
K
Kaipeng Deng 已提交
7981 7982 7983 7984 7985
    The linear interpolation is performed on three directions.

    For details of trilinear interpolation, please refer to Wikipedia:
    https://en.wikipedia.org/wiki/Trilinear_interpolation

7986
    Align_corners and align_mode are optional parameters,the calculation
K
Kaipeng Deng 已提交
7987 7988 7989 7990 7991 7992 7993
    method of interpolation can be selected by them.

    Example:

    .. code-block:: text

        For scale:
7994

K
Kaipeng Deng 已提交
7995 7996 7997
            if align_corners = True && out_size > 1 :

              scale_factor = (in_size-1.0)/(out_size-1.0)
7998

K
Kaipeng Deng 已提交
7999
            else:
8000 8001

              scale_factor = float(in_size/out_size)
K
Kaipeng Deng 已提交
8002 8003 8004 8005

        Bilinear interpolation:

          if:
8006

K
Kaipeng Deng 已提交
8007
              align_corners = False , align_mode = 0
8008

K
Kaipeng Deng 已提交
8009 8010
              input : (N,C,D_in,H_in,W_in)
              output: (N,C,D_out,H_out,W_out) where:
8011

K
Kaipeng Deng 已提交
8012 8013 8014 8015 8016 8017 8018 8019 8020 8021 8022 8023 8024
              D_out = (D_{in}+0.5) * scale_{factor} - 0.5
              H_out = (H_{in}+0.5) * scale_{factor} - 0.5
              W_out = (W_{in}+0.5) * scale_{factor} - 0.5

          else:

              input : (N,C,D_in,H_in,W_in)
              output: (N,C,D_out,H_out,W_out) where:

              D_out = D_{in} * scale_{factor}
              H_out = H_{in} * scale_{factor}
              W_out = W_{in} * scale_{factor}

R
ruri 已提交
8025
    Parameters:
8026 8027
        input(${x_type}): 5-D Tensor, its data type is float32, float64, or uint8,
                          its data format is specified by :attr:`data_format`.
R
ruri 已提交
8028
        out_shape(list|tuple|Variable|None): The output shape of resized tensor, the shape is (out_d, out_h, out_w). Default: None. Every element should be an integer or a Tensor Variable with shape: [1] if it is a list. If it is a Tensor Variable, its dimension size should be 1.
8029
        scale(float|Variable|None): The multiplier for the input depth, height or width.
8030 8031
             At least one of :attr:`out_shape` or :attr:`scale` must be set.
             And :attr:`out_shape` has a higher priority than :attr:`scale`.
K
Kaipeng Deng 已提交
8032
             Default: None.
R
ruri 已提交
8033
        name(str, optional): The default value is None.  Normally there is no need for user to set this property.  For more information, please refer to :ref:`api_guide_Name`
K
Kaipeng Deng 已提交
8034 8035 8036 8037 8038 8039
        actual_shape(Variable): An optional input to specify output shape
                                dynamically. If provided, image resize
                                according to this given shape rather than
                                :attr:`out_shape` and :attr:`scale` specifying
                                shape. That is to say actual_shape has the
                                highest priority. It is recommended to use
8040 8041 8042 8043 8044
                                :attr:`out_shape` if you want to specify output
                                shape dynamically, because :attr:`actual_shape`
                                will be deprecated. When using actual_shape to
                                specify output shape, one of :attr:`out_shape`
                                and :attr:`scale` should also be set, otherwise
T
tianshuo78520a 已提交
8045
                                errors would be occurred in graph constructing stage.
K
Kaipeng Deng 已提交
8046 8047 8048
                                Default: None
        align_corners(bool): ${align_corners_comment}
        align_mode(bool): ${align_mode_comment}
8049
        data_format (str, optional): Specify the data format of the input, and the data format of the output
8050 8051 8052
            will be consistent with that of the input. An optional string from: `"NCDHW"`, `"NDHWC"`.
            The default is `"NCDHW"`. When it is `"NCDHW"`, the data is stored in the order of:
            `[batch_size, input_channels, input_depth, input_height, input_width]`.
K
Kaipeng Deng 已提交
8053 8054

    Returns:
8055
        Variable: A 5-D Tensor(NCDHW or NDHWC)
K
Kaipeng Deng 已提交
8056 8057 8058

    Examples:
        .. code-block:: python
8059

R
ruri 已提交
8060 8061
	    #declarative mode
	    import paddle.fluid as fluid
8062
	    import paddle
R
ruri 已提交
8063
	    import numpy as np
8064
	    paddle.enable_static()
R
ruri 已提交
8065 8066 8067 8068 8069 8070 8071 8072 8073 8074 8075 8076 8077 8078 8079 8080 8081 8082 8083 8084 8085 8086 8087 8088 8089 8090
	    input = fluid.data(name="input", shape=[None,3,6,8,10])

	    #1
	    output = fluid.layers.resize_trilinear(input=input,out_shape=[12,12,12])

	    #2
	    #x = np.array([2]).astype("int32")
	    #dim1 = fluid.data(name="dim1", shape=[1], dtype="int32")
	    #fluid.layers.assign(input=x, output=dim1)
	    #output = fluid.layers.resize_trilinear(input=input,out_shape=[12,dim1,4])

	    #3
	    #x = np.array([3,12,12]).astype("int32")
	    #shape_tensor = fluid.data(name="shape_tensor", shape=[3], dtype="int32")
	    #fluid.layers.assign(input=x, output=shape_tensor)
	    #output = fluid.layers.resize_trilinear(input=input,out_shape=shape_tensor)

	    #4
	    #x = np.array([0.5]).astype("float32")
	    #scale_tensor = fluid.data(name="scale", shape=[1], dtype="float32")
	    #fluid.layers.assign(x,scale_tensor)
	    #output = fluid.layers.resize_trilinear(input=input,scale=scale_tensor)

	    place = fluid.CPUPlace()
	    exe = fluid.Executor(place)
	    exe.run(fluid.default_startup_program())
8091

R
ruri 已提交
8092
	    input_data = np.random.rand(2,3,6,8,10).astype("float32")
K
Kaipeng Deng 已提交
8093

R
ruri 已提交
8094 8095 8096 8097
	    output_data = exe.run(fluid.default_main_program(),
                feed={"input":input_data},
                fetch_list=[output],
                return_numpy=True)
8098

R
ruri 已提交
8099 8100 8101 8102 8103 8104 8105 8106 8107 8108 8109 8110 8111
	    print(output_data[0].shape)

	    #1
	    # (2, 3, 12, 12, 12)
	    #2
	    # (2, 3, 12, 2, 4)
	    #3
	    # (2, 3, 3, 12, 12)
	    #4
	    # (2, 3, 3, 4, 5)

	    #imperative mode
	    import paddle.fluid.dygraph as dg
8112

R
ruri 已提交
8113 8114 8115 8116
	    with dg.guard(place) as g:
    		input = dg.to_variable(input_data)
    		output = fluid.layers.resize_trilinear(input=input, out_shape=[12,12,12])
    		print(output.shape)
8117

R
ruri 已提交
8118
		# [2L, 3L, 12L, 12L, 12L]
8119 8120 8121



K
Kaipeng Deng 已提交
8122 8123 8124
    """

    return image_resize(input, out_shape, scale, name, 'TRILINEAR',
8125
                        actual_shape, align_corners, align_mode, data_format)
K
Kaipeng Deng 已提交
8126 8127


8128
@templatedoc(op_type="nearest_interp")
8129 8130 8131 8132
def resize_nearest(input,
                   out_shape=None,
                   scale=None,
                   name=None,
8133
                   actual_shape=None,
8134 8135
                   align_corners=True,
                   data_format='NCHW'):
8136
    """
8137

R
ruri 已提交
8138
    This op resizes the input by performing nearest neighbor interpolation in both the
8139
    height direction and the width direction based on given output shape
8140
    which is specified by actual_shape, out_shape and scale in priority order.
8141

8142
    **Warning:** the parameter :attr:`actual_shape` will be deprecated in the
8143 8144
    future and only use :attr:`out_shape` instead.

8145 8146
    Example:

T
Tink_Y 已提交
8147 8148 8149
    .. code-block:: text

        For scale:
8150

T
Tink_Y 已提交
8151 8152
            if align_corners = True && out_size > 1 :
              scale_factor = (in_size-1.0)/(out_size-1.0)
8153

T
Tink_Y 已提交
8154
            else:
8155

T
Tink_Y 已提交
8156
              scale_factor = float(in_size/out_size)
8157

T
Tink_Y 已提交
8158
        Nearest neighbor interpolation:
8159

T
Tink_Y 已提交
8160 8161
          if:
              align_corners = False
8162

T
Tink_Y 已提交
8163 8164
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
8165

T
Tink_Y 已提交
8166 8167
              H_out = floor(H_{in} * scale_{factor})
              W_out = floor(W_{in} * scale_{factor})
8168

T
Tink_Y 已提交
8169 8170
          else:
              align_corners = True
8171

T
Tink_Y 已提交
8172 8173
              input : (N,C,H_in,W_in)
              output: (N,C,H_out,W_out) where:
8174

T
Tink_Y 已提交
8175 8176
              H_out = round(H_{in} * scale_{factor})
              W_out = round(W_{in} * scale_{factor})
8177 8178


8179
    For details of nearest neighbor interpolation, please refer to Wikipedia:
8180
    https://en.wikipedia.org/wiki/Nearest-neighbor_interpolation
Y
yuyang18 已提交
8181

R
ruri 已提交
8182
    Parameters:
8183 8184
        input(${x_type}): 4-D Tensor, its data type is float32, float64, or uint8,
                          its data format is specified by :attr:`data_format`.
R
ruri 已提交
8185
        out_shape(list|tuple|Variable|None): The output shape of resized tensor, the shape is (out_h, out_w). Default: None. Every element should be an integer or a tensor Variable with shape: [1] if it is a list. If it is a tensor Variable, its dimension size should be 1.
8186
        scale(float|Variable|None): The multiplier for the input height or width. At
8187 8188 8189
             least one of :attr:`out_shape` or :attr:`scale` must be set.
             And :attr:`out_shape` has a higher priority than :attr:`scale`.
             Default: None.
R
ruri 已提交
8190 8191
        name(str, optional): The default value is None.  Normally there is no need for user to set this property.  For more information, please refer to :ref:`api_guide_Name`
	actual_shape(Variable): An optional input to specify output shape
8192 8193
                                dynamically. If provided, image resize
                                according to this given shape rather than
8194
                                :attr:`out_shape` and :attr:`scale` specifying
8195 8196
                                shape. That is to say actual_shape has the
                                highest priority. It is recommended to use
8197 8198 8199 8200 8201
                                :attr:`out_shape` if you want to specify output
                                shape dynamically, because :attr:`actual_shape`
                                will be deprecated. When using actual_shape to
                                specify output shape, one of :attr:`out_shape`
                                and :attr:`scale` should also be set, otherwise
T
tianshuo78520a 已提交
8202
                                errors would be occurred in graph constructing stage.
8203
                                Default: None
8204
        align_corners(bool): ${align_corners_comment}
8205
        data_format (str, optional): Specify the data format of the input, and the data format of the output
8206 8207 8208
            will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
            The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
            `[batch_size, input_channels, input_height, input_width]`.
Y
yuyang18 已提交
8209 8210

    Returns:
R
ruri 已提交
8211
	Variable: 4-D tensor(NCHW or NHWC).
8212 8213 8214

    Examples:
        .. code-block:: python
8215

R
ruri 已提交
8216 8217 8218
	    #declarative mode
	    import paddle.fluid as fluid
	    import numpy as np
8219 8220 8221
	    import paddle
	    paddle.enable_static()

R
ruri 已提交
8222 8223 8224 8225 8226 8227 8228 8229 8230 8231 8232 8233 8234 8235 8236 8237 8238 8239 8240 8241 8242 8243 8244 8245 8246 8247
	    input = fluid.data(name="input", shape=[None,3,6,10])

	    #1
	    output = fluid.layers.resize_nearest(input=input,out_shape=[12,12])

	    #2
	    #x = np.array([2]).astype("int32")
	    #dim1 = fluid.data(name="dim1", shape=[1], dtype="int32")
	    #fluid.layers.assign(input=x, output=dim1)
	    #output = fluid.layers.resize_nearest(input=input,out_shape=[12,dim1])

	    #3
	    #x = np.array([3,12]).astype("int32")
	    #shape_tensor = fluid.data(name="shape_tensor", shape=[2], dtype="int32")
	    #fluid.layers.assign(input=x, output=shape_tensor)
	    #output = fluid.layers.resize_nearest(input=input,out_shape=shape_tensor)

	    #4
	    #x = np.array([0.5]).astype("float32")
	    #scale_tensor = fluid.data(name="scale", shape=[1], dtype="float32")
	    #fluid.layers.assign(x,scale_tensor)
	    #output = fluid.layers.resize_nearest(input=input,scale=scale_tensor)

	    place = fluid.CPUPlace()
	    exe = fluid.Executor(place)
	    exe.run(fluid.default_startup_program())
8248

R
ruri 已提交
8249
	    input_data = np.random.rand(2,3,6,10).astype("float32")
8250

R
ruri 已提交
8251 8252 8253 8254
	    output_data = exe.run(fluid.default_main_program(),
                feed={"input":input_data},
                fetch_list=[output],
                return_numpy=True)
8255

R
ruri 已提交
8256 8257 8258 8259 8260 8261 8262 8263 8264 8265
	    print(output_data[0].shape)

	    #1
	    # (2, 3, 12, 12)
	    #2
	    # (2, 3, 12, 2)
	    #3
	    # (2, 3, 3, 12)
	    #4
	    # (2, 3, 3, 5)
8266

R
ruri 已提交
8267 8268
	    #imperative mode
	    import paddle.fluid.dygraph as dg
8269

R
ruri 已提交
8270 8271 8272 8273 8274 8275
	    with dg.guard(place) as g:
    		input = dg.to_variable(input_data)
    		output = fluid.layers.resize_nearest(input=input, out_shape=[12,12])
    		print(output.shape)

		# [2L, 3L, 12L, 12L]
8276 8277 8278



8279 8280
    """

8281 8282 8283 8284 8285 8286 8287 8288 8289 8290
    return image_resize(
        input,
        out_shape,
        scale,
        name,
        'NEAREST',
        actual_shape,
        align_corners,
        align_mode=1,
        data_format=data_format)
8291 8292 8293 8294


def image_resize_short(input, out_short_len, resample='BILINEAR'):
    """
R
ruri 已提交
8295
    This op resizes a batch of images. The short edge of input images will be
8296 8297
    resized to the given 'out_short_len'. The long edge of input images
    will be resized proportionately to make images' length-width ratio
8298 8299
    constant.

R
ruri 已提交
8300 8301
    Parameters:
        input (Variable): 4-D tensor(NCHW), The input tensor of image resize layer.
8302
        out_short_len(int): The length of output images' short edge.
8303
        resample (str): resample method, default: BILINEAR.
F
fengjiayi 已提交
8304

8305
    Returns:
R
ruri 已提交
8306
        Variable: 4-D tensor(NCHW).
R
ruri 已提交
8307 8308 8309 8310

    Examples:
        .. code-block:: python

8311
            import paddle.fluid as fluid
R
ruri 已提交
8312
            input = fluid.data(name="input", shape=[None,3,6,9], dtype="float32")
R
ruri 已提交
8313
            out = fluid.layers.image_resize_short(input, out_short_len=3)
8314 8315 8316 8317 8318 8319 8320 8321 8322 8323
    """
    in_shape = input.shape
    if len(in_shape) != 4:
        raise ValueError(
            "The rank of input must be 4 (num_batches, channels, in_h, in_w).")
    hw = in_shape[2:4]
    short_idx = hw.index(min(hw))
    long_idx = 1 - short_idx
    out_shape = list(hw)
    out_shape[short_idx] = out_short_len
F
fengjiayi 已提交
8324 8325 8326
    out_shape[long_idx] = int(
        float(out_shape[long_idx]) * (float(out_short_len) / float(hw[
            short_idx])) + 0.5)
8327 8328 8329
    return image_resize(input=input, out_shape=out_shape, resample=resample)


8330
@deprecated(since="2.0.0", update_to="paddle.gather")
8331
def gather(input, index, overwrite=True):
W
whs 已提交
8332
    """
Q
qiaolongfei 已提交
8333

8334
    Output is obtained by gathering entries of the outer-most dimension
W
whs 已提交
8335 8336 8337 8338
    of X indexed by `index` and concatenate them together.

    .. math::

8339
        Out = X[Index]
W
whs 已提交
8340 8341 8342 8343 8344 8345 8346


    .. code-block:: text


                Given:

8347 8348
                X = [[1, 2],
                     [3, 4],
W
whs 已提交
8349 8350 8351 8352 8353 8354 8355 8356 8357 8358
                     [5, 6]]

                Index = [1, 2]

                Then:

                Out = [[3, 4],
                       [5, 6]]

    Args:
8359
        input (Tensor): The source input tensor with rank>=1. Supported data type is
8360
            int32, int64, float32, float64 and uint8 (only for CPU),
Y
Yibing Liu 已提交
8361
            float16 (only for GPU).
8362
        index (Tensor): The index input tensor with rank=1. Data type is int32 or int64.
Y
Yibing Liu 已提交
8363
        overwrite (bool, optional): The mode that updating the grad when has same index.
8364
            If True, use the overwrite mode to update the grad of the same index,
8365
	    if False, use the accumulate mode to update the grad of the same index.
8366
	    Default value is True.
8367

W
whs 已提交
8368
    Returns:
8369 8370
        output (Tensor): The output is a tensor with the same rank as input.
    
W
whs 已提交
8371
    Examples:
W
whs 已提交
8372

W
whs 已提交
8373 8374
        .. code-block:: python

8375
            import paddle
8376
            import paddle.fluid as fluid
8377 8378
            paddle.enable_static()

Y
Yibing Liu 已提交
8379 8380
            x = fluid.data(name='x', shape=[-1, 5], dtype='float32')
            index = fluid.data(name='index', shape=[-1, 1], dtype='int32')
W
whs 已提交
8381 8382
            output = fluid.layers.gather(x, index)
    """
8383
    if in_dygraph_mode():
W
wanghuancoder 已提交
8384
        return _C_ops.gather(input, index, None, 'overwrite', overwrite)
8385 8386 8387 8388 8389

    check_variable_and_dtype(
        input, 'x',
        ['float16', 'float32', 'float64', 'int32', 'int64', 'uint8'], 'gather')
    check_variable_and_dtype(index, 'index', ['int32', 'int64'], 'gather')
W
whs 已提交
8390 8391
    helper = LayerHelper('gather', **locals())
    dtype = helper.input_dtype()
X
Xin Pan 已提交
8392
    out = helper.create_variable_for_type_inference(dtype)
W
whs 已提交
8393 8394 8395 8396
    helper.append_op(
        type="gather",
        inputs={"X": input,
                "Index": index},
8397 8398
        outputs={"Out": out},
        attrs={'overwrite': overwrite})
W
whs 已提交
8399 8400 8401
    return out


8402
@deprecated(since="2.0.0", update_to="paddle.gather_nd")
8403 8404 8405 8406
def gather_nd(input, index, name=None):
    """
    **Gather Nd Layer**

8407 8408 8409 8410
    This function is actually a high-dimensional extension of :code:`gather`
    and supports for simultaneous indexing by multiple axes. :attr:`index` is a
    K-dimensional integer tensor, which is regarded as a (K-1)-dimensional
    tensor of :attr:`index` into :attr:`input`, where each element defines
8411 8412 8413 8414 8415 8416 8417 8418 8419 8420 8421 8422 8423 8424 8425 8426 8427 8428 8429 8430 8431 8432
    a slice of params:

    .. math::

        output[(i_0, ..., i_{K-2})] = input[index[(i_0, ..., i_{K-2})]]

    Obviously, :code:`index.shape[-1] <= input.rank` . And, the output tensor has
    shape :code:`index.shape[:-1] + input.shape[index.shape[-1]:]` .

    .. code-block:: text

            Given:
                input = [[[ 0,  1,  2,  3],
                          [ 4,  5,  6,  7],
                          [ 8,  9, 10, 11]],
                         [[12, 13, 14, 15],
                          [16, 17, 18, 19],
                          [20, 21, 22, 23]]]
                input.shape = (2, 3, 4)

            * Case 1:
                index = [[1]]
8433 8434 8435

                gather_nd(input, index)
                         = [input[1, :, :]]
8436 8437 8438 8439 8440 8441 8442 8443 8444 8445 8446 8447 8448 8449 8450 8451 8452 8453 8454
                         = [[12, 13, 14, 15],
                            [16, 17, 18, 19],
                            [20, 21, 22, 23]]

            * Case 2:
                index = [[0,2]]

                gather_nd(input, index)
                         = [input[0, 2, :]]
                         = [8, 9, 10, 11]

            * Case 3:
                index = [[1, 2, 3]]

                gather_nd(input, index)
                         = [input[1, 2, 3]]
                         = [23]

    Args:
8455
        input (Tensor): The input Tensor which it's data type should be bool, float32, float64, int32, int64.
8456 8457 8458 8459
        index (Tensor): The index input with rank > 1, index.shape[-1] <= input.rank.
                        Its dtype should be int32, int64.
        name(str, optional): The default value is None.  Normally there is no need for user to set this property.
                        For more information, please refer to :ref:`api_guide_Name` .
8460 8461

    Returns:
8462
        output (Tensor): A tensor with the shape index.shape[:-1] + input.shape[index.shape[-1]:]
8463 8464 8465 8466 8467

    Examples:

        .. code-block:: python

8468
            import paddle
8469
            import paddle.fluid as fluid
8470 8471
            paddle.enable_static()

8472 8473
            x = fluid.data(name='x', shape=[3, 4, 5], dtype='float32')
            index = fluid.data(name='index', shape=[2, 2], dtype='int32')
8474 8475 8476
            output = fluid.layers.gather_nd(x, index)

    """
8477
    if in_dygraph_mode():
W
wanghuancoder 已提交
8478
        return _C_ops.gather_nd(input, index)
8479 8480 8481 8482
    check_variable_and_dtype(input, 'input',
                             ['bool', 'float32', 'float64', 'int32', 'int64'],
                             'gather_np')
    check_variable_and_dtype(index, 'index', ['int32', 'int64'], 'gather_np')
8483 8484
    helper = LayerHelper('gather_nd', **locals())
    dtype = helper.input_dtype()
8485
    output = helper.create_variable_for_type_inference(dtype)
8486 8487 8488 8489 8490 8491 8492 8493
    helper.append_op(
        type="gather_nd",
        inputs={"X": input,
                "Index": index},
        outputs={"Out": output})
    return output


S
ShenLiang 已提交
8494
@deprecated(since="2.0.0", update_to="paddle.scatter")
8495
def scatter(input, index, updates, name=None, overwrite=True):
8496
    """
8497 8498 8499 8500
    :alias_main: paddle.scatter
	:alias: paddle.scatter,paddle.tensor.scatter,paddle.tensor.manipulation.scatter
	:old_api: paddle.fluid.layers.scatter

8501 8502
    **Scatter Layer**

8503
    Output is obtained by updating the input on selected indices based on updates.
8504

8505
    .. code-block:: python
8506

8507
        import numpy as np
8508

8509 8510 8511 8512 8513 8514 8515 8516 8517 8518 8519 8520 8521 8522 8523 8524 8525 8526 8527 8528 8529
        #input:
        input = np.array([[1, 1], [2, 2], [3, 3]])
        index = np.array([2, 1, 0, 1])
        # shape of updates should be the same as input
        # shape of updates with dim > 1 should be the same as input
        updates = np.array([[1, 1], [2, 2], [3, 3], [4, 4]])
        overwrite = False

        # calculation:
        if not overwrite:
            for i in range(len(index)):
                input[index[i]] = np.zeros((2))

        for i in range(len(index)):
            if (overwrite):
                input[index[i]] = updates[i]
            else:
                input[index[i]] += updates[i]
        # output:
        out = np.array([[3, 3], [6, 6], [1, 1]])
        out.shape # [3, 2]
8530 8531

    Args:
8532 8533
        input (Variable): The input N-D Tensor with rank>=1. Data type can be float32.
        index (Variable): The index 1-D Tensor. Data type can be int32, int64. The length of index cannot exceed updates's length, and the value in index cannot exceed input's length.
T
tianshuo78520a 已提交
8534
        updates (Variable): update input with updates parameter based on index. shape should be the same as input, and dim value with dim > 1 should be the same as input.
8535 8536
        name(str, optional): The default value is None.  Normally there is no need for user to set this property.  For more information, please refer to :ref:`api_guide_Name` .
        overwrite (bool): The mode that updating the output when there are same indices.
8537
            If True, use the overwrite mode to update the output of the same index,
8538
	    if False, use the accumulate mode to update the output of the same index.
8539
	    Default value is True.
8540 8541

    Returns:
8542
        Variable(Tensor|LoDTensor): The output is a Tensor with the same shape as input.
8543 8544 8545 8546 8547

    Examples:

        .. code-block:: python

8548
            import paddle
8549
            import numpy as np
8550
            import paddle.fluid as fluid
8551
            paddle.enable_static()
8552

8553 8554 8555
            input = fluid.layers.data(name='data', shape=[3, 2], dtype='float32', append_batch_size=False)
            index = fluid.layers.data(name='index', shape=[4], dtype='int64', append_batch_size=False)
            updates = fluid.layers.data(name='update', shape=[4, 2], dtype='float32', append_batch_size=False)
8556

8557 8558 8559 8560 8561 8562 8563 8564 8565 8566 8567 8568 8569 8570
            output = fluid.layers.scatter(input, index, updates, overwrite=False)

            exe = fluid.Executor(fluid.CPUPlace())
            exe.run(fluid.default_startup_program())

            in_data = np.array([[1, 1], [2, 2], [3, 3]]).astype(np.float32)
            index_data = np.array([2, 1, 0, 1]).astype(np.int64)
            update_data = np.array([[1, 1], [2, 2], [3, 3], [4, 4]]).astype(np.float32)

            res = exe.run(fluid.default_main_program(), feed={'data':in_data, "index":index_data, "update":update_data}, fetch_list=[output])
            print(res)
            # [array([[3., 3.],
            #   [6., 6.],
            #   [1., 1.]], dtype=float32)]
8571 8572 8573
    """
    helper = LayerHelper('scatter', **locals())
    dtype = helper.input_dtype()
X
Xin Pan 已提交
8574
    out = helper.create_variable_for_type_inference(dtype)
8575 8576 8577 8578 8579
    helper.append_op(
        type="scatter",
        inputs={"X": input,
                "Ids": index,
                "Updates": updates},
8580
        attrs={'overwrite': overwrite},
8581 8582 8583 8584
        outputs={"Out": out})
    return out


8585
def scatter_nd_add(ref, index, updates, name=None):
8586
    r"""
8587 8588 8589
    **Scatter_nd_add Layer**

    Output is obtained by applying sparse addition to a single value
8590
    or slice in a Variable.
8591

8592 8593 8594
    :attr:`ref` is a Tensor with rank :math:`R`
    and :attr:`index` is a Tensor with rank :math:`K` . Thus, :attr:`index`
    has shape :math:`[i_0, i_1, ..., i_{K-2}, Q]` where :math:`Q \leq R` . :attr:`updates`
8595 8596
    is a Tensor with rank :math:`K - 1 + R - Q` and its
    shape is :math:`index.shape[:-1] + ref.shape[index.shape[-1]:]` .
8597

8598 8599 8600 8601 8602
    According to the :math:`[i_0, i_1, ..., i_{K-2}]` of :attr:`index` ,
    add the corresponding :attr:`updates` slice to the :attr:`ref` slice
    which is obtained by the last one dimension of :attr:`index` .

    .. code-block:: text
8603

8604 8605 8606 8607 8608 8609 8610 8611
        Given:

        * Case 1:
            ref = [0, 1, 2, 3, 4, 5]
            index = [[1], [2], [3], [1]]
            updates = [9, 10, 11, 12]

          we get:
8612

8613 8614 8615 8616 8617 8618 8619 8620 8621 8622 8623 8624
            output = [0, 22, 12, 14, 4, 5]

        * Case 2:
            ref = [[65, 17], [-14, -25]]
            index = [[], []]
            updates = [[[-1, -2], [1, 2]],
                       [[3, 4], [-3, -4]]]
            ref.shape = (2, 2)
            index.shape = (2, 0)
            updates.shape = (2, 2, 2)

          we get:
8625

8626 8627 8628
            output = [[67, 19], [-16, -27]]

    Args:
Z
Zeng Jinle 已提交
8629
        ref (Variable): The ref input. Its dtype should be int32, int64, float32, float64.
8630 8631
        index (Variable): The index input with rank > 1 and index.shape[-1] <= ref.rank.
                          Its dtype should be int32 or int64 as it is used as indexes.
8632 8633 8634
        updates (Variable): The updated value of scatter_nd_add op, and it must have the same dtype
                            as ref. It must have the shape index.shape[:-1] + ref.shape[index.shape[-1]:].
        name (str|None): The output variable name. If set None, the layer will be named automatically.
8635 8636

    Returns:
8637
        output (Variable): The output is a tensor with the same shape and dtype as ref.
8638 8639 8640 8641 8642 8643

    Examples:

        .. code-block:: python

            import paddle.fluid as fluid
8644 8645
            import paddle
            paddle.enable_static()
8646 8647 8648
            ref = fluid.data(name='ref', shape=[3, 5, 9, 10], dtype='float32')
            index = fluid.data(name='index', shape=[3, 2], dtype='int32')
            updates = fluid.data(name='update', shape=[3, 9, 10], dtype='float32')
8649 8650 8651

            output = fluid.layers.scatter_nd_add(ref, index, updates)
    """
8652 8653

    if in_dygraph_mode():
W
wanghuancoder 已提交
8654
        op = getattr(_C_ops, 'scatter_nd_add')
8655 8656
        return op(ref, index, updates)

8657 8658 8659 8660
    if ref.dtype != updates.dtype:
        raise ValueError("ref and updates must have same data type.")

    helper = LayerHelper('scatter_nd_add', **locals())
8661
    dtype = helper.input_dtype(input_param_name='ref')
8662
    output = helper.create_variable_for_type_inference(dtype)
8663 8664 8665 8666 8667 8668 8669 8670 8671 8672 8673 8674 8675
    helper.append_op(
        type="scatter_nd_add",
        inputs={"X": ref,
                "Index": index,
                "Updates": updates},
        outputs={"Out": output})
    return output


def scatter_nd(index, updates, shape, name=None):
    """
    **Scatter_nd Layer**

8676 8677 8678
    Output is obtained by scattering the :attr:`updates` in a new tensor according
    to :attr:`index` . This op is similar to :code:`scatter_nd_add`, except the
    tensor of :attr:`shape` is zero-initialized. Correspondingly, :code:`scatter_nd(index, updates, shape)`
8679
    is equal to :code:`scatter_nd_add(paddle.zeros(shape, updates.dtype), index, updates)` .
8680 8681 8682
    If :attr:`index` has repeated elements, then the corresponding updates are accumulated.
    Because of the numerical approximation issues, the different order of repeated elements
    in :attr:`index` may cause different results. The specific calculation method can be
8683 8684 8685
    seen :code:`scatter_nd_add` . This op is the inverse of the :code:`gather_nd` op.

    Args:
8686
        index (Tensor): The index input with ndim > 1 and index.shape[-1] <= len(shape).
8687
                          Its dtype should be int32 or int64 as it is used as indexes.
8688
        updates (Tensor): The updated value of scatter_nd op. Its dtype should be float32, float64.
8689 8690
                            It must have the shape index.shape[:-1] + shape[index.shape[-1]:]
        shape(tuple|list): Shape of output tensor.
8691
        name (str|None): The output Tensor name. If set None, the layer will be named automatically.
8692 8693

    Returns:
8694
        output (Tensor): The output is a tensor with the same type as :attr:`updates` .
8695 8696 8697 8698 8699

    Examples:

        .. code-block:: python

8700 8701
            import paddle
            import numpy as np
8702

8703 8704 8705 8706 8707
            index_data = np.array([[1, 1],
                                   [0, 1],
                                   [1, 3]]).astype(np.int64)
            index = paddle.to_tensor(index_data)
            updates = paddle.rand(shape=[3, 9, 10], dtype='float32')
8708 8709
            shape = [3, 5, 9, 10]

8710
            output = paddle.scatter_nd(index, updates, shape)
8711 8712 8713 8714
    """
    return scatter_nd_add(zeros(shape, updates.dtype), index, updates, name)


Y
yuyang18 已提交
8715 8716 8717 8718 8719 8720 8721 8722 8723 8724 8725 8726 8727
@templatedoc()
def random_crop(x, shape, seed=None):
    """
    ${comment}

    Args:
        x(${x_type}): ${x_comment}
        shape(${shape_type}): ${shape_comment}
        seed(int|${seed_type}|None): ${seed_comment} By default, the seed will
            get from `random.randint(-65536, 65535)`.

    Returns:
        ${out_comment}
8728

8729
    Examples:
Q
qingqing01 已提交
8730 8731 8732 8733 8734 8735 8736 8737 8738 8739 8740 8741 8742
        .. code-block:: python

            import paddle.fluid as fluid
            img = fluid.data("img", [None, 3, 256, 256])
            # cropped_img is [-1, 3, 224, 224]
            cropped_img = fluid.layers.random_crop(img, shape=[3, 224, 224])

            # cropped_img2 shape: [-1, 2, 224, 224]
            # cropped_img2 = fluid.layers.random_crop(img, shape=[2, 224, 224])

            # cropped_img3 shape: [-1, 3, 128, 224]
            # cropped_img3 = fluid.layers.random_crop(img, shape=[128, 224])

Y
yuyang18 已提交
8743
    """
F
stash  
fengjiayi 已提交
8744
    helper = LayerHelper("random_crop", **locals())
8745 8746 8747 8748
    check_variable_and_dtype(x, 'x',
                             ['float32', 'float64', 'uint8', 'int16', 'int32'],
                             'random_crop')
    check_type(shape, 'shape', (list, Variable), 'random_crop')
F
fengjiayi 已提交
8749
    dtype = x.dtype
X
Xin Pan 已提交
8750
    out = helper.create_variable_for_type_inference(dtype)
Y
yuyang18 已提交
8751
    if seed is None:
8752
        seed = np.random.randint(-65536, 65536)
F
fengjiayi 已提交
8753
    op_attrs = {"shape": shape}
F
stash  
fengjiayi 已提交
8754
    if isinstance(seed, int):
F
fengjiayi 已提交
8755 8756 8757 8758 8759
        op_attrs["startup_seed"] = seed
        seed = helper.create_variable(
            name=unique_name.generate("random_crop_seed"),
            dtype="int64",
            persistable=True)
F
stash  
fengjiayi 已提交
8760 8761 8762 8763
    elif not isinstance(seed, Variable):
        raise ValueError("'seed' must be a Variable or an int.")
    helper.append_op(
        type="random_crop",
F
fix  
fengjiayi 已提交
8764
        inputs={"X": x,
F
stash  
fengjiayi 已提交
8765 8766
                "Seed": seed},
        outputs={"Out": out,
F
fengjiayi 已提交
8767 8768
                 "SeedOut": seed},
        attrs=op_attrs)
F
stash  
fengjiayi 已提交
8769
    return out
W
whs 已提交
8770 8771


8772
def log(x, name=None):
8773
    r"""
W
wanghaoshuang 已提交
8774 8775 8776 8777
    Calculates the natural log of the given input tensor, element-wise.

    .. math::

8778
        Out = \\ln(x)
W
wanghaoshuang 已提交
8779 8780

    Args:
S
Steffy-zxf 已提交
8781
        x (Tensor): Input Tensor. Must be one of the following types: float32, float64.
W
Wilber 已提交
8782
        name (str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
8783

W
wanghaoshuang 已提交
8784 8785

    Returns:
S
Steffy-zxf 已提交
8786
        Tensor: The natural log of the input Tensor computed element-wise.
W
wanghaoshuang 已提交
8787 8788 8789 8790 8791

    Examples:

        .. code-block:: python

S
Steffy-zxf 已提交
8792
            import paddle
W
Wilber 已提交
8793

S
Steffy-zxf 已提交
8794 8795 8796 8797
            x = [[2,3,4], [7,8,9]]
            x = paddle.to_tensor(x, dtype='float32')
            res = paddle.log(x)
            # [[0.693147, 1.09861, 1.38629], [1.94591, 2.07944, 2.19722]]
W
wanghaoshuang 已提交
8798
    """
8799
    if in_dygraph_mode():
W
wanghuancoder 已提交
8800
        return _C_ops.log(x)
8801

8802
    check_variable_and_dtype(x, 'x', ['float32', 'float64'], "log")
8803
    inputs = {'X': [x]}
W
wanghaoshuang 已提交
8804
    helper = LayerHelper('log', **locals())
W
wanghaoshuang 已提交
8805
    dtype = helper.input_dtype(input_param_name='x')
X
Xin Pan 已提交
8806
    out = helper.create_variable_for_type_inference(dtype)
W
wanghaoshuang 已提交
8807
    helper.append_op(type="log", inputs={"X": x}, outputs={"Out": out})
W
wanghaoshuang 已提交
8808 8809 8810
    return out


8811
@deprecated(since="2.0.0", update_to="paddle.nn.functional.relu")
8812
def relu(x, name=None):
W
wanghaoshuang 已提交
8813
    """
Z
zhupengyang 已提交
8814
    ${comment}
W
wanghaoshuang 已提交
8815 8816

    Args:
Z
zhupengyang 已提交
8817 8818 8819 8820
        x(Variable): ${x_comment}
        name(str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.
W
wanghaoshuang 已提交
8821 8822

    Returns:
Z
zhupengyang 已提交
8823
        Variable: ${out_comment}
W
wanghaoshuang 已提交
8824 8825 8826 8827 8828

    Examples:

        .. code-block:: python

8829
            import paddle.fluid as fluid
Z
zhupengyang 已提交
8830 8831 8832 8833 8834 8835 8836 8837 8838
            import numpy as np
            in1 = np.array([[-1,0],[1,2.6]])
            with fluid.dygraph.guard():
                x1 = fluid.dygraph.to_variable(in1)
                out1 = fluid.layers.relu(x1)
                print(out1.numpy())
                # [[0.  0. ]
                #  [1.  2.6]]
"""
8839
    if in_dygraph_mode():
W
wanghuancoder 已提交
8840
        return _C_ops.relu(x)
8841

8842 8843
    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'relu')

8844
    inputs = {'X': [x]}
W
wanghaoshuang 已提交
8845
    helper = LayerHelper('relu', **locals())
W
wanghaoshuang 已提交
8846
    dtype = helper.input_dtype(input_param_name='x')
X
Xin Pan 已提交
8847
    out = helper.create_variable_for_type_inference(dtype)
X
Xin Pan 已提交
8848 8849
    helper.append_op(
        type="relu", inputs={"X": helper.input('x')}, outputs={"Out": out})
W
wanghaoshuang 已提交
8850
    return out
8851 8852


8853
@deprecated(since="2.0.0", update_to="paddle.nn.functional.selu")
C
chengduo 已提交
8854
def selu(x, scale=None, alpha=None, name=None):
8855
    r"""
8856

8857 8858 8859
    Selu Operator.

    The equation is:
8860

8861 8862 8863 8864 8865 8866
    .. math::
        selu= \\lambda*
        \\begin{cases}
            x                      &\\quad \\text{ if } x>0 \n
            \\alpha * e^x - \\alpha  &\\quad \\text{ if } x<=0
        \\end{cases}
8867

8868 8869 8870

    The input `X` can carry the LoD (Level of Details) information,
    or not. And the output shares the LoD information with input `X`.
C
chengduo 已提交
8871 8872

    Args:
8873 8874
        x (Variable): The input N-D Tensor.
        scale(float, optional): lambda in selu activation function,
C
chengduo 已提交
8875 8876 8877
            the default value is 1.0507009873554804934193349852946.
            For more information about this value, please refer
            to: https://arxiv.org/abs/1706.02515.
8878
        alpha(float, optional): alpha in selu activation function,
C
chengduo 已提交
8879 8880 8881
            the default value is 1.6732632423543772848170429916717.
            For more information about this value, please refer
            to: https://arxiv.org/abs/1706.02515.
8882 8883
        name(str, optional): The default value is None.  Normally there is no need for user to set this property.  For more information, please refer to :ref:`api_guide_Name` .

C
chengduo 已提交
8884 8885

    Returns:
8886
        Variable(Tensor|LoDTensor): The output Tensor or LoDTensor with the same shape and LoD information as input.
C
chengduo 已提交
8887 8888 8889 8890

    Examples:

        .. code-block:: python
8891

8892
            import paddle
8893
            import paddle.fluid as fluid
8894
            import numpy as np
8895
            paddle.enable_static()
8896 8897 8898 8899 8900 8901 8902 8903 8904 8905 8906

            inputs = fluid.layers.data(name="x", shape=[2, 2], dtype="float32")
            output = fluid.layers.selu(inputs)

            exe = fluid.Executor(fluid.CPUPlace())
            exe.run(fluid.default_startup_program())

            img = np.array([[0, 1],[2, 3]]).astype(np.float32)

            res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output])
            print(res) # [array([[0.      , 1.050701],[2.101402, 3.152103]], dtype=float32)]
C
chengduo 已提交
8907
    """
8908 8909
    check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'selu')

C
chengduo 已提交
8910 8911 8912 8913 8914 8915 8916 8917 8918 8919 8920 8921 8922 8923
    helper = LayerHelper('selu', **locals())
    dtype = helper.input_dtype(input_param_name='x')
    out = helper.create_variable_for_type_inference(dtype)
    attrs = {}
    if scale is not None:
        attrs["scale"] = scale
    if alpha is not None:
        attrs["alpha"] = alpha

    helper.append_op(
        type="selu", inputs={"X": x}, outputs={"Out": out}, attrs=attrs)
    return out


W
whs 已提交
8924
def mean_iou(input, label, num_classes):
8925
    r"""
W
whs 已提交
8926
    Mean Intersection-Over-Union is a common evaluation metric for
8927 8928 8929 8930
    semantic image segmentation, which first computes the IOU for each
    semantic class and then computes the average over classes.
    IOU is defined as follows:

W
whs 已提交
8931
    .. math::
8932

H
haowang101779990 已提交
8933
        IOU = \\frac{true\_positive}{(true\_positive + false\_positive + false\_negative)}.
W
whs 已提交
8934

8935
    The predictions are accumulated in a confusion matrix and mean-IOU
W
whs 已提交
8936 8937 8938
    is then calculated from it.


L
Liufang Sang 已提交
8939
    Parameters:
S
Steffy-zxf 已提交
8940 8941
        input (Tensor): A n-D Tensor of prediction results for semantic labels with type int32 or int64.
        label (Tensor): A Tensor of ground truth labels with type int32 or int64.
W
whs 已提交
8942
                           Its shape should be the same as input.
L
Liufang Sang 已提交
8943 8944
        num_classes (int32): The possible number of labels.

8945
    Returns:
S
Steffy-zxf 已提交
8946
	Three Tensors.
L
Liufang Sang 已提交
8947

S
Steffy-zxf 已提交
8948
        - mean_iou(Tensor) : A 1-D Tensor representing the mean intersection-over-union with shape [1]. \
L
Liufang Sang 已提交
8949
			    Data type is float32.
S
Steffy-zxf 已提交
8950
        - out_wrong(Tensor) : A 1-D Tensor with shape [num_classes]. Data type is int32. \
L
Liufang Sang 已提交
8951
			     The wrong numbers of each class.
S
Steffy-zxf 已提交
8952
        - out_correct(Tensor): A 1-D  Tensor with shape [num_classes]. Data type is int32. The correct numbers of each class.
8953 8954


W
whs 已提交
8955 8956 8957
    Examples:

        .. code-block:: python
8958

S
Steffy-zxf 已提交
8959 8960 8961
            import paddle

            iou_shape = [64, 32, 32]
8962
            num_classes = 5
S
Steffy-zxf 已提交
8963 8964 8965
            predict = paddle.randint(low=0, high=255, shape=iou_shape, dtype='int64')
            label = paddle.randint(low=0, high=255, shape=iou_shape, dtype='int64')
            mean_iou, out_wrong, out_correct = paddle.metric.mean_iou(predict, label, num_classes)
W
whs 已提交
8966
    """
S
Steffy-zxf 已提交
8967
    if in_dygraph_mode():
W
wanghuancoder 已提交
8968
        return _C_ops.mean_iou(input, label, 'num_classes', num_classes)
S
Steffy-zxf 已提交
8969

W
whs 已提交
8970
    helper = LayerHelper('mean_iou', **locals())
8971 8972 8973
    check_variable_and_dtype(input, 'Predictions', ['int32', 'int64'],
                             'mean_iou')
    check_variable_and_dtype(label, 'Labels', ['int32', 'int64'], 'mean_iou')
X
Xin Pan 已提交
8974 8975 8976
    out_mean_iou = helper.create_variable_for_type_inference(dtype='float32')
    out_wrong = helper.create_variable_for_type_inference(dtype='int32')
    out_correct = helper.create_variable_for_type_inference(dtype='int32')
W
whs 已提交
8977 8978
    helper.append_op(
        type="mean_iou",
W
whs 已提交
8979 8980
        inputs={"Predictions": input,
                "Labels": label},
W
whs 已提交
8981
        outputs={
W
whs 已提交
8982 8983 8984
            "OutMeanIou": out_mean_iou,
            "OutWrong": out_wrong,
            "OutCorrect": out_correct
W
whs 已提交
8985 8986 8987
        },
        attrs={"num_classes": num_classes})
    return out_mean_iou, out_wrong, out_correct
8988 8989 8990 8991 8992 8993


def crop(x, shape=None, offsets=None, name=None):
    """
    Crop input into output, as specified by offsets and shape.

S
SunGaofeng 已提交
8994 8995
    **Warning:** THIS OP IS DEPRECATED. It will be removed in the future version.
    Instructions for updating: Use :ref:`api_fluid_layers_crop_tensor` instead.
8996

8997 8998 8999 9000 9001 9002 9003 9004 9005 9006 9007 9008 9009 9010 9011 9012 9013 9014 9015 9016 9017 9018 9019 9020 9021 9022 9023 9024
    .. code-block:: text

        * Case 1:
            Given
                X = [[0, 1, 2, 0, 0]
                     [0, 3, 4, 0, 0]
                     [0, 0, 0, 0, 0]],
            and
                shape = [2, 2],
                offsets = [0, 1],
            output is:
                Out = [[1, 2],
                       [3, 4]].
        * Case 2:
            Given
                X = [[0, 1, 2, 5, 0]
                     [0, 3, 4, 6, 0]
                     [0, 0, 0, 0, 0]],
            and shape is tensor
                shape = [[0, 0, 0]
                         [0, 0, 0]]
            and
                offsets = [0, 1],

            output is:
                Out = [[1, 2, 5],
                       [3, 4, 6]].

S
SunGaofeng 已提交
9025 9026 9027 9028
    Parameters:
        x (Variable): Tensor, data type can be float32 or float64.
        shape (Variable|list/tuple of integers): The output shape is specified
            by `shape`, which can be a Tensor or a list/tuple of integers.
9029
            If it is a Tensor, it's rank must be the same as `x` , only
S
SunGaofeng 已提交
9030
            it's shape will be used, and the value of it will be ignored. This way
9031
            is suitable for the case that the output shape may be changed each
S
SunGaofeng 已提交
9032
            iteration. If it is a list/tuple of integers, it's length must be the same
9033
            as the rank of `x`
S
SunGaofeng 已提交
9034 9035 9036
        offsets (Variable|list/tuple of integers|None): Specifies the cropping
            offsets at each dimension. It can be a Tensor or a list/tuple
            of integers. If it is a Tensor, it's rank must be the same as `x`.
9037
            This way is suitable for the case that the offsets may be changed
S
SunGaofeng 已提交
9038 9039
            each iteration. If it is a list/tuple of integers, it's length must be the
            same as the rank of `x`. If None, the offsets are 0 at each dimension.
9040 9041 9042
        name(str, optional): For detailed information, please refer
            to :ref:`api_guide_Name` . Usually name is no need to set and
            None by default.
9043 9044

    Returns:
S
SunGaofeng 已提交
9045 9046 9047 9048
        The cropped Tensor, which has the same rank and data type with `x`

    Return Type:
        Variable
9049 9050 9051 9052 9053 9054 9055 9056

    Raises:
        ValueError: If shape is not a list, tuple or Variable.

    Examples:

        .. code-block:: python

S
SunGaofeng 已提交
9057
            import paddle.fluid as fluid
9058 9059 9060
            import paddle.fluid as fluid
            import paddle
            paddle.enable_static()
S
SunGaofeng 已提交
9061 9062
            x = fluid.data(name="x", shape=[3, 3, 5], dtype="float32")
            y = fluid.data(name="y", shape=[2, 2, 3], dtype="float32")
9063 9064 9065
            crop = fluid.layers.crop(x, shape=y)

            # or
S
SunGaofeng 已提交
9066 9067
            z = fluid.data(name="z", shape=[3, 3, 5], dtype="float32")
            crop = fluid.layers.crop(z, shape=[2, 2, 3])
9068 9069

    """
9070 9071
    check_variable_and_dtype(x, 'x', ['float32'], 'crop')
    check_type(shape, 'shape', (list, tuple, Variable), 'crop')
9072 9073 9074 9075 9076
    helper = LayerHelper('crop', **locals())

    if offsets is None:
        offsets = [0] * len(x.shape)

X
Xin Pan 已提交
9077
    out = helper.create_variable_for_type_inference(x.dtype)
9078 9079 9080 9081 9082 9083 9084 9085 9086 9087 9088 9089 9090 9091 9092 9093 9094
    ipts = {'X': x}
    attrs = {}
    if isinstance(shape, Variable):
        ipts['Y'] = shape
    else:
        attrs['shape'] = shape
    if isinstance(offsets, Variable):
        ipts['Offsets'] = offsets
    else:
        attrs['offsets'] = offsets

    helper.append_op(
        type='crop',
        inputs=ipts,
        outputs={'Out': out},
        attrs=None if len(attrs) == 0 else attrs)
    return out
9095 9096


9097 9098 9099 9100 9101 9102
def crop_tensor(x, shape=None, offsets=None, name=None):
    """
    Crop input into output, as specified by offsets and shape.

    .. code-block:: text

9103 9104
        * Case 1 (input is a 2-D Tensor):
            Input:
9105
                X.shape = [3, 5]
9106 9107 9108 9109 9110 9111 9112
                X.data = [[0, 1, 2, 0, 0],
                          [0, 3, 4, 0, 0],
                          [0, 0, 0, 0, 0]]
            Parameters:
                shape = [2, 2]
                offsets = [0, 1]
            Output:
9113 9114 9115
                Out.shape = [2, 2]
                Out.data = [[1, 2],
                            [3, 4]]
9116 9117 9118 9119 9120 9121 9122 9123 9124 9125
        * Case 2 (input is a 3-D Tensor):
            Input:
                X.shape = [2, 3, 4]
                X.data =  [[[0, 1, 2, 3],
                            [0, 5, 6, 7],
                            [0, 0, 0, 0]],
                           [[0, 3, 4, 5],
                            [0, 6, 7, 8],
                            [0, 0, 0, 0]]]
            Parameters:
9126
                shape = [2, 2, -1]
9127 9128
                offsets = [0, 0, 1]
            Output:
9129 9130 9131 9132 9133
                Out.shape = [2, 2, 3]
                Out.data  = [[[1, 2, 3],
                              [5, 6, 7]],
                             [[3, 4, 5],
                              [6, 7, 8]]]
9134 9135

    Parameters:
T
Thomas Young 已提交
9136 9137
        x (Tensor): 1-D to 6-D Tensor, the data type is float32, float64, int32 or int64.
        shape (list|tuple|Tensor): The output shape is specified
9138
            by `shape`. Its data type is int32. If a list/tuple, it's length must be
T
Thomas Young 已提交
9139
            the same as the dimension size of `x`. If a Tensor, it should be a 1-D Tensor.
9140
            When it is a list, each element can be an integer or a Tensor of shape: [1].
9141 9142
            If Variable contained, it is suitable for the case that the shape may
            be changed each iteration.
9143 9144
        offsets (list|tuple|Variable, optional): Specifies the cropping
            offsets at each dimension. Its data type is int32. If a list/tuple, it's length
T
Thomas Young 已提交
9145
            must be the same as the dimension size of `x`. If a Tensor, it should be a 1-D
9146 9147 9148 9149 9150
            Tensor. When it is a list, each element can be an integer or a Tensor of shape: [1].
            If Variable contained, it is suitable for the case that the offsets may be changed
            each iteration. Default: None, the offsets are 0 at each dimension.
        name(str, optional): The default value is None. Normally there is no need for user to set
            this property. For more information, please refer to :ref:`api_guide_Name` .
9151 9152

    Returns:
T
Thomas Young 已提交
9153
        Tensor: The cropped Tensor has same data type with `x`.
9154 9155 9156 9157

    Examples:

        .. code-block:: python
T
Thomas Young 已提交
9158
          :name: code-example1
9159

9160
            import paddle
T
Thomas Young 已提交
9161 9162 9163 9164 9165 9166 9167 9168 9169 9170 9171 9172 9173 9174 9175 9176 9177 9178 9179 9180 9181 9182
            x = paddle.to_tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
            # x.shape = [3, 3]
            # x = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]

            # shape can be a 1-D Tensor or list or tuple.
            shape = paddle.to_tensor([2, 2], dtype='int32')
            # shape = [2, 2]
            # shape = (2, 2)
            out = paddle.crop(x, shape)
            # out.shape = [2, 2]
            # out = [[1,2], [4,5]]

            # offsets can be a 1-D Tensor or list or tuple.
            offsets = paddle.to_tensor([0, 1], dtype='int32')
            # offsets = [1, 0]
            # offsets = (1, 1)
            out = paddle.crop(x, shape, offsets)
            # out.shape = [2, 2]
            # if offsets = [0, 0], out = [[1,2], [4,5]]
            # if offsets = [0, 1], out = [[2,3], [5,6]]
            # if offsets = [1, 0], out = [[4,5], [7,8]]
            # if offsets = [1, 1], out = [[5,6], [8,9]]
9183 9184 9185

    """
    helper = LayerHelper('crop_tensor', **locals())
9186 9187
    check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
                             'crop_tensor')
9188 9189 9190
    check_type(shape, 'shape', (list, tuple, Variable), 'crop_tensor')
    check_type(offsets, 'offsets', (list, tuple, Variable, type(None)),
               'crop_tensor')
9191 9192 9193 9194 9195 9196 9197 9198

    if offsets is None:
        offsets = [0] * len(x.shape)

    out = helper.create_variable_for_type_inference(x.dtype)
    ipts = {'X': x}
    attrs = {}

9199 9200 9201 9202 9203 9204 9205 9206 9207 9208 9209 9210 9211 9212 9213 9214 9215 9216 9217 9218 9219 9220 9221 9222
    def _attr_shape_check(shape_val):
        if not isinstance(shape_val, int):
            raise TypeError(
                "Attr(shape)'s dtype of Op(crop_tensor) should be int32, but received: %s."
                % type(shape_val))
        if shape_val == 0:
            raise ValueError(
                "Attr(shape) of Op(crop_tensor) should not be zero, but received: %s."
                % str(shape_val))
        if shape_val < -1:
            raise ValueError(
                "When the element in Attr(shape) of Op(crop_tensor) is negative, only -1 is supported, but received: %s."
                % str(shape_val))

    def _attr_offsets_check(offset_val):
        if not isinstance(offset_val, int):
            raise TypeError(
                "Attr(offsets)'s dtype of Op(crop_tensor) should be int32, but received: %s."
                % type(offset_val))
        if offset_val < 0:
            raise ValueError(
                "Attr(offsets) of Op(crop_tensor) should be greater or equal to zero, but received: %s."
                % str(offset_val))

9223 9224 9225
    if isinstance(offsets, Variable):
        offsets.stop_gradient = True
        ipts['Offsets'] = offsets
9226
        attrs['offsets'] = [-1] * len(x.shape)
L
Leo Chen 已提交
9227
    elif utils._contain_var(offsets):
9228
        new_offsets_tensor = []
9229
        offsets_attr = []
9230 9231 9232 9233
        for dim in offsets:
            if isinstance(dim, Variable):
                dim.stop_gradient = True
                new_offsets_tensor.append(dim)
9234
                offsets_attr.append(-1)
9235
            else:
9236
                _attr_offsets_check(dim)
9237 9238 9239
                temp_out = helper.create_variable_for_type_inference('int32')
                fill_constant([1], 'int32', dim, force_cpu=True, out=temp_out)
                new_offsets_tensor.append(temp_out)
9240
                offsets_attr.append(dim)
9241
        ipts['OffsetsTensor'] = new_offsets_tensor
9242
        attrs['offsets'] = offsets_attr
9243
    else:
9244 9245
        for offset in offsets:
            _attr_offsets_check(offset)
9246 9247 9248 9249 9250
        attrs['offsets'] = offsets

    if isinstance(shape, Variable):
        shape.stop_gradient = True
        ipts['Shape'] = shape
L
Leo Chen 已提交
9251
    elif utils._contain_var(shape):
9252 9253
        new_shape_tensor = []
        shape_attr = []
9254
        for dim_size in shape:
9255 9256 9257
            if isinstance(dim_size, Variable):
                dim_size.stop_gradient = True
                new_shape_tensor.append(dim_size)
9258
                shape_attr.append(0)
9259
            else:
9260
                _attr_shape_check(dim_size)
9261 9262 9263 9264 9265 9266 9267 9268
                temp_out = helper.create_variable_for_type_inference('int32')
                fill_constant(
                    [1], 'int32', dim_size, force_cpu=True, out=temp_out)
                new_shape_tensor.append(temp_out)
                shape_attr.append(dim_size)
        ipts['ShapeTensor'] = new_shape_tensor
        attrs['shape'] = shape_attr
    else:
9269 9270
        for dim_size in shape:
            _attr_shape_check(dim_size)
9271 9272 9273 9274 9275 9276 9277 9278 9279 9280
        attrs['shape'] = shape

    helper.append_op(
        type='crop_tensor',
        inputs=ipts,
        outputs={'Out': out},
        attrs=None if len(attrs) == 0 else attrs)
    return out


W
whs 已提交
9281 9282
def affine_grid(theta, out_shape, name=None):
    """
9283 9284 9285 9286
    :alias_main: paddle.nn.functional.affine_grid
	:alias: paddle.nn.functional.affine_grid,paddle.nn.functional.vision.affine_grid
	:old_api: paddle.fluid.layers.affine_grid

W
whs 已提交
9287 9288 9289 9290 9291 9292
    It generates a grid of (x,y) coordinates using the parameters of
    the affine transformation that correspond to a set of points where
    the input feature map should be sampled to produce the transformed
    output feature map.

    Args:
9293 9294 9295 9296 9297 9298
        theta (Variable) - A Tensor with shape [N, 2, 3]. It contains a batch of affine transform parameters.
                           The data type can be float32 or float64.
        out_shape (Variable | list | tuple): The shape of target output with format [batch_size, channel, height, width].
                                             ``out_shape`` can be a Tensor or a list or tuple. The data
                                             type must be int32.
        name(str|None): The default value is None.  Normally there is no need for user to set this property.  For more information, please refer to :ref:`api_guide_Name`.
W
whs 已提交
9299 9300

    Returns:
9301
        Variable: A Tensor with shape [batch_size, H, W, 2] while 'H' and 'W' are the height and width of feature map in affine transformation. The data type is the same as `theta`.
W
whs 已提交
9302 9303 9304 9305 9306 9307 9308

    Raises:
        ValueError: If the type of arguments is not supported.

    Examples:

        .. code-block:: python
H
haowang101779990 已提交
9309

S
SunGaofeng 已提交
9310
            import paddle.fluid as fluid
9311 9312 9313 9314 9315 9316 9317 9318 9319 9320 9321 9322 9323 9324
            import numpy as np
            place = fluid.CPUPlace()
            theta = fluid.data(name="x", shape=[None, 2, 3], dtype="float32")
            out_shape = fluid.data(name="y", shape=[4], dtype="int32")
            grid_0 = fluid.layers.affine_grid(theta, out_shape)
            grid_1 = fluid.layers.affine_grid(theta, [5, 3, 28, 28])
            batch_size=2
            exe = fluid.Executor(place)
            exe.run(fluid.default_startup_program())
            output= exe.run(feed={"x": np.random.rand(batch_size,2,3).astype("float32"),
                                  "y": np.array([5, 3, 28, 28]).astype("int32")},
                                  fetch_list=[grid_0.name, grid_1.name])
            print(output[0])
            print(output[1])
W
whs 已提交
9325 9326 9327
    """
    helper = LayerHelper('affine_grid')

9328 9329 9330
    check_variable_and_dtype(theta, 'theta', ['float32', 'float64'],
                             'affine_grid')

W
whs 已提交
9331
    if not (isinstance(out_shape, list) or isinstance(out_shape, tuple) or \
9332
            isinstance(out_shape, Variable)):
W
whs 已提交
9333 9334 9335 9336 9337 9338 9339 9340 9341 9342
        raise ValueError("The out_shape should be a list, tuple or Variable.")

    if not isinstance(theta, Variable):
        raise ValueError("The theta should be a Variable.")

    out = helper.create_variable_for_type_inference(theta.dtype)
    ipts = {'Theta': theta}
    attrs = {}
    if isinstance(out_shape, Variable):
        ipts['OutputShape'] = out_shape
9343 9344
        check_variable_and_dtype(out_shape, 'out_shape', ['int32'],
                                 'affine_grid')
W
whs 已提交
9345 9346
    else:
        attrs['output_shape'] = out_shape
9347 9348 9349
    if core.is_compiled_with_rocm():
        # ROCM platform do not have MIOPEN kernel for affine_grid
        attrs['use_cudnn'] = False
W
whs 已提交
9350 9351 9352 9353 9354 9355 9356 9357 9358

    helper.append_op(
        type='affine_grid',
        inputs=ipts,
        outputs={'Output': out},
        attrs=None if len(attrs) == 0 else attrs)
    return out


W
whs 已提交
9359 9360 9361 9362 9363 9364 9365
def pad2d(input,
          paddings=[0, 0, 0, 0],
          mode='constant',
          pad_value=0.0,
          data_format="NCHW",
          name=None):
    """
9366

T
tianshuo78520a 已提交
9367
    Pad 2-d images according to 'paddings' and 'mode'.
W
whs 已提交
9368 9369 9370
    If mode is 'reflect', paddings[0] and paddings[1] must be no greater
    than height-1. And the width dimension has the same condition.

L
Liufang Sang 已提交
9371
    Parameters:
9372 9373
        input (Tensor): The input image with [N, C, H, W] format or [N, H, W, C] format, which is a 4-D Tensor with data type float32.
        paddings (Tensor | List[int32]): The padding size. If padding is a List, it must
L
Liufang Sang 已提交
9374 9375 9376 9377 9378 9379 9380 9381 9382 9383 9384 9385 9386 9387 9388
            contain four integers, (padding_top, padding_bottom, padding_left, padding_right).
            Otherwise, it is a 1-D Tensor with shape [4]. Data type is int32.
            Default is [0, 0, 0, 0].
        mode (str): Three modes: 'constant' (default), 'reflect', 'edge' .
        	When in 'constant' mode, this op uses a constant value to pad the input tensor.
        	When in 'reflect' mode, uses reflection of the input boundaries to pad the input tensor.
        	When in 'edge' mode, uses input boundaries to pad the input tensor.
        	Default is 'constant'
        pad_value (float32): The value to fill the padded areas in 'constant' mode . Default is 0.0
        data_format (str): An string from: "NHWC", "NCHW". Specify the data format of
                           the input data.
                           Default is  "NCHW"
        name (str, optional) : The default value is None.  Normally there is no need for
                    user to set this property.  For more information, please refer to :ref:`api_guide_Name` .

9389
    Returns: Tensor, a 4-D Tensor padded according to paddings and mode and data type is same as input.
L
Liufang Sang 已提交
9390 9391

    Examples:
T
Tink_Y 已提交
9392
        .. code-block:: text
W
whs 已提交
9393

9394 9395 9396 9397 9398 9399 9400 9401 9402 9403 9404 9405 9406 9407 9408 9409 9410 9411 9412 9413 9414 9415 9416 9417
            Input = [[[[1., 2., 3.],
                       [4., 5., 6.]]]]

            Case 0:
                paddings = [0, 1, 2, 3],
                mode = 'constant'
                pad_value = 0
                Out = [[[[0., 0., 1., 2., 3., 0., 0., 0.],
                         [0., 0., 4., 5., 6., 0., 0., 0.],
                         [0., 0., 0., 0., 0., 0., 0., 0.]]]]

            Case 1:
                paddings = [0, 1, 2, 1],
                mode = 'reflect'
                Out = [[[[3., 2., 1., 2., 3., 2.],
                         [6., 5., 4., 5., 6., 5.],
                         [3., 2., 1., 2., 3., 2.]]]]

            Case 2:
                paddings = [0, 1, 2, 1],
                mode = 'edge'
                Out = [[[[1., 1., 1., 2., 3., 3.],
                         [4., 4., 4., 5., 6., 6.],
                         [4., 4., 4., 5., 6., 6.]]]]
M
minqiyang 已提交
9418

L
Liufang Sang 已提交
9419
    Code Examples:
W
whs 已提交
9420 9421
        .. code-block:: python

9422 9423 9424 9425 9426 9427 9428 9429
            import numpy as np
            import paddle
            import paddle.nn.functional as F

            # example 1
            x_shape = (1, 1, 3, 4)
            x = np.arange(np.prod(x_shape), dtype=np.float32).reshape(x_shape) + 1
            tensor_x = paddle.to_tensor(x)
9430
            y = paddle.fluid.layers.pad2d(tensor_x, paddings=[1, 2, 2, 1], pad_value=1, mode='constant')
9431 9432 9433 9434 9435 9436 9437 9438 9439 9440 9441 9442
            print(y.numpy())
            # [[[[ 1.  1.  1.  1.  1.  1.  1.]
            #    [ 1.  1.  1.  2.  3.  4.  1.]
            #    [ 1.  1.  5.  6.  7.  8.  1.]
            #    [ 1.  1.  9. 10. 11. 12.  1.]
            #    [ 1.  1.  1.  1.  1.  1.  1.]
            #    [ 1.  1.  1.  1.  1.  1.  1.]]]]

            # example 2
            x_shape = (1, 1, 2, 3)
            x = np.arange(np.prod(x_shape), dtype=np.float32).reshape(x_shape) + 1
            tensor_x = paddle.to_tensor(x)
9443
            y = paddle.fluid.layers.pad2d(tensor_x, paddings=[1, 1, 1, 1], mode='reflect')
9444 9445 9446 9447 9448
            print(y.numpy())
            # [[[[5. 4. 5. 6. 5.]
            #    [2. 1. 2. 3. 2.]
            #    [5. 4. 5. 6. 5.]
            #    [2. 1. 2. 3. 2.]]]]
W
whs 已提交
9449
    """
9450 9451 9452
    if in_dygraph_mode():
        _paddings = paddings.numpy().tolist() if isinstance(
            paddings, Variable) else paddings
W
wanghuancoder 已提交
9453 9454
        return _C_ops.pad2d(input, 'mode', mode, 'pad_value', pad_value,
                            'data_format', data_format, 'paddings', _paddings)
9455

W
wanghuancoder 已提交
9456 9457 9458 9459
    check_variable_and_dtype(
        input, 'input', ['float16', 'float32', 'float64', 'int32', 'int64'],
        "pad2d")

9460 9461 9462 9463 9464 9465 9466 9467
    attrs = {'mode': mode, 'pad_value': pad_value, 'data_format': data_format}
    inputs = {'X': [input]}
    if isinstance(paddings, Variable):
        inputs['Paddings'] = [paddings]
        attrs['paddings'] = []
    else:
        attrs['paddings'] = paddings

W
whs 已提交
9468
    helper = LayerHelper('pad2d', **locals())
9469 9470 9471 9472

    assert mode in ['reflect', 'edge', 'constant'
                    ], "mode should be one of constant, reflect, edge."

W
whs 已提交
9473
    dtype = helper.input_dtype(input_param_name='input')
X
Xin Pan 已提交
9474
    out = helper.create_variable_for_type_inference(dtype)
9475

W
whs 已提交
9476
    helper.append_op(
9477
        type='pad2d', inputs=inputs, outputs={"Out": out}, attrs=attrs)
W
whs 已提交
9478 9479 9480 9481

    return out


9482
@deprecated(since="2.0.0", update_to="paddle.nn.functional.elu")
9483 9484
def elu(x, alpha=1.0, name=None):
    """
9485 9486 9487 9488
    :alias_main: paddle.nn.functional.elu
	:alias: paddle.nn.functional.elu,paddle.nn.functional.activation.elu
	:old_api: paddle.fluid.layers.elu

9489 9490 9491 9492
    ${comment}
    Args:
        x(${x_type}): ${x_comment}
        alpha(${alpha_type}|1.0): ${alpha_comment}
9493
        name(str|None): The default value is None. Normally there is no need for user to set this property.
9494
                        For more information, please refer to :ref:`api_guide_Name`.
9495
    Returns:
9496
        ${out_type}: ${out_comment}
Z
ZhenWang 已提交
9497 9498 9499 9500 9501

    Examples:

        .. code-block:: python

9502
            import paddle.fluid as fluid
9503
            import numpy as np
9504

9505 9506 9507 9508 9509 9510 9511
            input_elu = np.array([[-1,6],[1,15.6]])
            with fluid.dygraph.guard():
                x = fluid.dygraph.to_variable(input_elu)
                y = fluid.layers.elu(x, alpha=0.2)
                print(y.numpy())
                # [[-0.12642411  6.        ]
                # [ 1.          15.6       ]]
9512 9513
    """
    helper = LayerHelper('elu', **locals())
9514
    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'elu')
X
Xin Pan 已提交
9515
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
9516 9517 9518 9519 9520 9521 9522 9523
    helper.append_op(
        type='elu',
        inputs={'X': x},
        outputs={'Out': out},
        attrs={'alpha': alpha})
    return out


9524
@deprecated(since="2.0.0", update_to="paddle.nn.functional.relu6")
9525 9526
def relu6(x, threshold=6.0, name=None):
    """
9527

9528
    ${comment}
Z
zhupengyang 已提交
9529

9530 9531
    Args:
        x(${x_type}): ${x_comment}
Z
zhupengyang 已提交
9532 9533 9534 9535
        threshold(float, optional): ${threshold_comment}
        name(str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.
9536 9537 9538

    Returns:
        output(${out_type}): ${out_comment}
Z
ZhenWang 已提交
9539 9540 9541 9542 9543

    Examples:

        .. code-block:: python

9544
            import paddle.fluid as fluid
Z
zhupengyang 已提交
9545 9546 9547 9548 9549 9550 9551 9552
            import numpy as np
            in1 = np.array([[-1,0],[2.5,7.8]])
            with fluid.dygraph.guard():
                x1 = fluid.dygraph.to_variable(in1)
                out1 = fluid.layers.relu6(x=x1, threshold=6.0)
                print(out1.numpy())
                # [[0.  0. ]
                #  [2.5 6. ]]
9553
    """
9554 9555
    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'relu6')

9556
    helper = LayerHelper('relu6', **locals())
X
Xin Pan 已提交
9557
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
9558 9559 9560 9561
    helper.append_op(
        type='relu6',
        inputs={'X': x},
        outputs={'Out': out},
A
Adam 已提交
9562 9563
        attrs={
            'threshold': threshold,
9564
            'use_mkldnn': _global_flags()["FLAGS_use_mkldnn"]
A
Adam 已提交
9565
        })
9566 9567 9568 9569 9570 9571
    return out


@templatedoc()
def pow(x, factor=1.0, name=None):
    """
9572 9573 9574 9575
    This is Pow Activation Operator.

    :math:`out = x^{factor}`

9576
    Args:
9577 9578 9579
        x(Variable): A ``Tensor`` or ``LoDTensor`` . The data type is ``float32`` or ``float64``.
        factor(float32|Variable, optional): A scalar with type ``float32`` or a ``Tensor`` with shape [1] and type ``float32``.  The exponential factor of Pow. Default 1.0.
        name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
9580 9581

    Returns:
9582
        Variable: A ``Tensor`` or ``LoDTensor``. The data type is same as ``x``.
Z
ZhenWang 已提交
9583 9584 9585 9586 9587

    Examples:

        .. code-block:: python

9588
            import paddle.fluid as fluid
9589

9590
            x = fluid.data(name="x", shape=[32,32], dtype="float32")
9591 9592 9593

            # example 1: argument factor is float
            y_1 = fluid.layers.pow(x, factor=2.0)
9594
            # y_1 is x^{2.0}
9595 9596 9597 9598

            # example 2: argument factor is Variable
            factor_tensor = fluid.layers.fill_constant([1], "float32", 3.0)
            y_2 = fluid.layers.pow(x, factor=factor_tensor)
9599
            # y_2 is x^{3.0}
9600
    """
9601 9602
    check_variable_and_dtype(
        x, 'x', ['int32', 'int64', 'float16', 'float32', 'float64'], 'pow')
9603

9604
    helper = LayerHelper('pow', **locals())
9605 9606 9607
    inputs = {'X': x}
    attrs = {}
    if isinstance(factor, Variable):
9608
        check_variable_and_dtype(factor, 'factor', ['float32'], 'pow')
9609 9610 9611 9612 9613
        factor.stop_gradient = True
        inputs['FactorTensor'] = factor
    else:
        attrs['factor'] = factor

X
Xin Pan 已提交
9614
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
9615
    helper.append_op(
9616
        type='pow', inputs=inputs, outputs={'Out': out}, attrs=attrs)
9617 9618 9619 9620
    return out


@templatedoc()
9621
def stanh(x, scale_a=0.67, scale_b=1.7159, name=None):
9622
    """
9623
    stanh activation.
9624

9625 9626 9627 9628 9629 9630 9631 9632 9633 9634
    .. math::

        out = b * \\frac{e^{a * x} - e^{-a * x}}{e^{a * x} + e^{-a * x}}

    Parameters:
        x (Tensor): The input Tensor with data type float32, float64.
        scale_a (float, optional): The scale factor a of the input. Default is 0.67.
        scale_b (float, optional): The scale factor b of the output. Default is 1.7159.
        name (str, optional): Name for the operation (optional, default is None).
            For more information, please refer to :ref:`api_guide_Name`.
9635 9636

    Returns:
9637
        A Tensor with the same data type and shape as ``x`` .
Z
ZhenWang 已提交
9638 9639 9640 9641

    Examples:
        .. code-block:: python

N
Noel 已提交
9642
            import paddle
9643

9644 9645
            x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0])
            out = paddle.stanh(x, scale_a=0.67, scale_b=1.72) # [1.00616539, 1.49927628, 1.65933108, 1.70390463]
9646

9647
    """
9648 9649

    if in_dygraph_mode():
W
wanghuancoder 已提交
9650
        return _C_ops.stanh(x, 'scale_a', scale_a, 'scale_b', scale_b)
9651

9652 9653
    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'stanh')

9654
    helper = LayerHelper('stanh', **locals())
X
Xin Pan 已提交
9655
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
9656 9657 9658 9659 9660 9661 9662 9663 9664 9665 9666 9667 9668
    helper.append_op(
        type='stanh',
        inputs={'X': x},
        outputs={'Out': out},
        attrs={'scale_a': scale_a,
               'scale_b': scale_b})
    return out


@templatedoc()
def hard_sigmoid(x, slope=0.2, offset=0.5, name=None):
    """
    ${comment}
9669 9670 9671 9672 9673 9674 9675
    Parameters:
        x (${x_type}): ${x_comment}
        slope (float, optional): ${slope_comment}
        offset (float, optional): ${offset_comment}
        name (str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`
9676 9677

    Returns:
9678
        ${out_type}: ${out_comment}
Z
ZhenWang 已提交
9679 9680 9681 9682 9683

    Examples:

        .. code-block:: python

9684
            import paddle.fluid as fluid
9685 9686 9687
            import paddle
            paddle.enable_static()

9688 9689
            data = fluid.layers.fill_constant(shape=[3, 2], value=0.5, dtype='float32') # [[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]]
            result = fluid.layers.hard_sigmoid(data) # [[0.6, 0.6], [0.6, 0.6], [0.6, 0.6]]
9690
    """
9691
    if in_dygraph_mode():
W
wanghuancoder 已提交
9692
        return _C_ops.hard_sigmoid(x, 'slope', slope, 'offset', offset)
9693

9694 9695 9696
    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
                             'hard_sigmoid')

9697
    helper = LayerHelper('hard_sigmoid', **locals())
X
Xin Pan 已提交
9698
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
9699 9700 9701 9702 9703 9704 9705 9706 9707 9708 9709
    helper.append_op(
        type='hard_sigmoid',
        inputs={'X': x},
        outputs={'Out': out},
        attrs={'slope': slope,
               'offset': offset})
    return out


@templatedoc()
def swish(x, beta=1.0, name=None):
9710
    r"""
9711 9712 9713 9714
    :alias_main: paddle.nn.functional.swish
	:alias: paddle.nn.functional.swish,paddle.nn.functional.activation.swish
	:old_api: paddle.fluid.layers.swish

9715
    Elementwise swish activation function. See `Searching for Activation Functions <https://arxiv.org/abs/1710.05941>`_ for more details.
9716

9717 9718 9719 9720
    Equation:

    .. math::
        out = \\frac{x}{1 + e^{- beta * x}}
9721

9722
    Args:
9723
        x(Variable): Tensor or LoDTensor, dtype: float32 or float64, the input of swish activation.
9724

9725
        beta(float): Constant beta of swish operator, default 1.0.
9726

9727
        name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`.
9728 9729

    Returns:
9730 9731

        Variable: Output of the swish activation, Tensor or LoDTensor, with the same dtype and shape with the input x.
Z
ZhenWang 已提交
9732 9733 9734 9735

    Examples:

        .. code-block:: python
9736

9737 9738 9739
            # declarative mode
            import numpy as np
            from paddle import fluid
9740

9741
            x = fluid.data(name="x", shape=(-1, 3), dtype="float32")
Z
ZhenWang 已提交
9742
            y = fluid.layers.swish(x, beta=2.0)
9743

9744 9745 9746 9747
            place = fluid.CPUPlace()
            exe = fluid.Executor(place)
            start = fluid.default_startup_program()
            main = fluid.default_main_program()
9748

9749 9750 9751
            data = np.random.randn(2, 3).astype("float32")
            exe.run(start)
            y_np, = exe.run(main, feed={"x": data}, fetch_list=[y])
9752

9753 9754 9755 9756 9757 9758 9759 9760 9761 9762 9763 9764 9765 9766
            data
            # array([[-1.1239197 ,  1.3391294 ,  0.03921051],
            #        [ 1.1970421 ,  0.02440812,  1.2055548 ]], dtype=float32)
            y_np
            # array([[-0.2756806 ,  1.0610548 ,  0.01998957],
            #        [ 0.9193261 ,  0.01235299,  0.9276883 ]], dtype=float32)


        .. code-block:: python

            # imperative mode
            import numpy as np
            from paddle import fluid
            import paddle.fluid.dygraph as dg
9767

9768 9769 9770 9771 9772 9773 9774 9775 9776 9777 9778 9779
            data = np.random.randn(2, 3).astype("float32")
            place = fluid.CPUPlace()
            with dg.guard(place) as g:
                x = dg.to_variable(data)
                y = fluid.layers.swish(x)
                y_np = y.numpy()
            data
            # array([[-0.0816701 ,  1.1603649 , -0.88325626],
            #        [ 0.7522361 ,  1.0978601 ,  0.12987892]], dtype=float32)
            y_np
            # array([[-0.03916847,  0.8835007 , -0.25835553],
            #        [ 0.51126915,  0.82324016,  0.06915068]], dtype=float32)
9780
    """
9781 9782
    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'swish')

9783
    helper = LayerHelper('swish', **locals())
X
Xin Pan 已提交
9784
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
9785 9786 9787 9788 9789 9790 9791 9792
    helper.append_op(
        type='swish',
        inputs={'X': x},
        outputs={'Out': out},
        attrs={'slope': beta})
    return out


Z
zhupengyang 已提交
9793
@deprecated(since="2.0.0", update_to="paddle.static.nn.prelu")
J
jerrywgz 已提交
9794
def prelu(x, mode, param_attr=None, name=None):
9795
    r"""
Z
zhupengyang 已提交
9796
    prelu activation.
J
jerrywgz 已提交
9797

H
haowang101779990 已提交
9798
    .. math::
S
sunzhongkai588 已提交
9799
        prelu(x) = max(0, x) + \alpha * min(0, x)
J
jerrywgz 已提交
9800

J
jerrywgz 已提交
9801 9802 9803 9804 9805 9806 9807 9808
    There are three modes for the activation:

    .. code-block:: text

        all: All elements share same alpha.
        channel: Elements in same channel share same alpha.
        element: All elements do not share alpha. Each element has its own alpha.

Z
zhupengyang 已提交
9809
    Parameters:
S
sunzhongkai588 已提交
9810
    
Z
zhupengyang 已提交
9811
        x (Tensor): The input Tensor or LoDTensor with data type float32.
S
sunzhongkai588 已提交
9812

9813
        mode (str): The mode for weight sharing.
S
sunzhongkai588 已提交
9814 9815 9816 9817 9818 9819 9820

        param_attr (ParamAttr|None, optional): The parameter attribute for the learnable \
        weight (alpha), it can be create by ParamAttr. None by default. \
        For detailed information, please refer to :ref:`api_fluid_ParamAttr`.

        name (str, optional): Name for the operation (optional, default is None). \
        For more information, please refer to :ref:`api_guide_Name`.
J
jerrywgz 已提交
9821 9822

    Returns:
Z
zhupengyang 已提交
9823
        Tensor: A tensor with the same shape and data type as x.
J
jerrywgz 已提交
9824 9825 9826 9827 9828

    Examples:

        .. code-block:: python

9829
            import paddle
Z
zhupengyang 已提交
9830 9831 9832 9833 9834

            x = paddle.to_tensor([-1., 2., 3.])
            param = paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(0.2))
            out = paddle.static.nn.prelu(x, 'all', param)
            # [-0.2, 2., 3.]
J
jerrywgz 已提交
9835

J
jerrywgz 已提交
9836
    """
G
Guoxia Wang 已提交
9837
    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'prelu')
9838

J
jerrywgz 已提交
9839 9840 9841 9842
    helper = LayerHelper('prelu', **locals())
    if mode not in ['all', 'channel', 'element']:
        raise ValueError('mode should be one of all, channel, element.')
    alpha_shape = [1]
9843
    # NOTE(): The input of this API should be ``N,C,...`` format,
9844
    # which means x.shape[0] is batch_size and x.shape[0] is channel.
J
jerrywgz 已提交
9845
    if mode == 'channel':
9846 9847 9848 9849 9850
        assert len(
            x.shape
        ) >= 2, "The size of input shape should be equal or larger than 2 in prelu() when mode is 'channel'"
        #NOTE(zhiqiu): The alpha_shape should be [1, channel] + [1] * len(x.shape[2:]).
        # To be consistent with Prelu, it is simplified.
9851 9852
        #NOTE(zhiqiu): Revert shape to [1, channel, 1, 1] for compatibility with saved model of old version.
        alpha_shape = [1, x.shape[1], 1, 1]
J
jerrywgz 已提交
9853
    elif mode == 'element':
9854 9855 9856 9857
        assert len(
            x.shape
        ) >= 1, "The size of input shape should be equal or larger than 1 in prelu() when mode is 'element'"
        alpha_shape = [1] + list(x.shape)[1:]
J
jerrywgz 已提交
9858 9859
    dtype = helper.input_dtype(input_param_name='x')
    alpha = helper.create_parameter(
Q
Qiao Longfei 已提交
9860
        attr=helper.param_attr,
J
jerrywgz 已提交
9861
        shape=alpha_shape,
G
Guoxia Wang 已提交
9862
        dtype=dtype,
J
jerrywgz 已提交
9863
        is_bias=False,
9864
        default_initializer=Constant(0.25))
X
Xin Pan 已提交
9865
    out = helper.create_variable_for_type_inference(dtype)
J
jerrywgz 已提交
9866 9867 9868 9869 9870 9871 9872 9873 9874
    helper.append_op(
        type="prelu",
        inputs={"X": x,
                'Alpha': alpha},
        attrs={"mode": mode},
        outputs={"Out": out})
    return out


9875 9876 9877 9878 9879 9880 9881 9882
@templatedoc()
def brelu(x, t_min=0.0, t_max=24.0, name=None):
    """
    ${comment}
    Args:
        x(${x_type}): ${x_comment}
        t_min(${t_min_type}|0.0): ${t_min_comment}
        t_max(${t_max_type}|24.0): ${t_max_comment}
9883
        name(str|None): The default value is None. Normally there is no need for user to set this property.
9884
                        For more information, please refer to :ref:`api_guide_Name`.
9885
    Returns:
9886
        ${out_type}: ${out_comment}
9887 9888 9889

    Examples:

9890
    .. code-block:: python
9891

9892
            import paddle.fluid as fluid
9893
            import paddle
9894
            import numpy as np
9895
            paddle.enable_static()
9896

9897 9898 9899 9900 9901 9902
            input_brelu = np.array([[-1,6],[1,15.6]])
            with fluid.dygraph.guard():
                x = fluid.dygraph.to_variable(input_brelu)
                y = fluid.layers.brelu(x, t_min=1.0, t_max=10.0)
                print(y.numpy())
                #[[ 1.  6.]
9903
                #[ 1. 10.]]
9904
    """
9905
    if in_dygraph_mode():
W
wanghuancoder 已提交
9906
        return _C_ops.brelu(x, 't_min', t_min, 't_max', t_max)
9907

9908 9909
    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'brelu')

9910
    helper = LayerHelper('brelu', **locals())
X
Xin Pan 已提交
9911
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
9912 9913 9914 9915 9916 9917 9918 9919 9920
    helper.append_op(
        type='brelu',
        inputs={'X': x},
        outputs={'Out': out},
        attrs={'t_min': t_min,
               't_max': t_max})
    return out


9921
@deprecated(since="2.0.0", update_to="paddle.nn.functional.leaky_relu")
9922 9923 9924 9925 9926 9927 9928
@templatedoc()
def leaky_relu(x, alpha=0.02, name=None):
    """
    ${comment}
    Args:
        x(${x_type}): ${x_comment}
        alpha(${alpha_type}|0.02): ${alpha_comment}
W
Wilber 已提交
9929 9930
        name(str|None): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`

9931
    Returns:
9932
        output(${out_type}): ${out_comment}
9933 9934 9935 9936 9937

    Examples:

        .. code-block:: python

N
Noel 已提交
9938
            import paddle
W
Wilber 已提交
9939

N
Noel 已提交
9940 9941 9942
            x = paddle.to_tensor([[-1, 2], [3, -4]], dtype='float32')
            y = paddle.fluid.layers.leaky_relu(x, alpha=0.1)
            print(y) # [[-0.1, 2], [3, -0.4]]
W
Wilber 已提交
9943

9944
    """
9945
    return paddle.nn.functional.leaky_relu(x, alpha, name)
9946 9947 9948


def soft_relu(x, threshold=40.0, name=None):
9949
    r"""
9950

9951 9952 9953 9954
    SoftRelu Activation Operator.

    $out = \ln(1 + \exp(\max(\min(x, threshold), -threshold)))$

9955
    Args:
9956 9957 9958 9959
        x(Variable): Input of soft_relu operator. Data type can be float32, float64.
        threshold(float, optional): The threshold value of soft_relu, default value being 40.0.
        name(str, optional): The default value is None.  Normally there is no need for user to set this property.  For more information, please refer to :ref:`api_guide_Name` .

9960
    Returns:
9961
        Variable(Tensor|LoDTensor)): Output of soft_relu operator, shape and LoD same as input.
9962 9963 9964

    Examples:

9965 9966
        .. code-block:: python

9967
            import paddle.fluid as fluid
9968
            import numpy as np
9969 9970
            import numpy as np
            import paddle
9971

9972
            paddle.enable_static()
9973 9974 9975 9976 9977 9978 9979 9980 9981 9982
            inputs = fluid.layers.data(name="x", shape=[2, 2], dtype="float32")
            output = fluid.layers.soft_relu(inputs, threshold=20.0)

            exe = fluid.Executor(fluid.CPUPlace())
            exe.run(fluid.default_startup_program())

            img = np.array([[0, 1],[2, 3]]).astype(np.float32)

            res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output])
            print(res) # [array([[0.6931472, 1.3132616], [2.126928 , 3.0485873]], dtype=float32)]
9983
    """
9984 9985 9986
    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
                             'soft_relu')

9987
    helper = LayerHelper('soft_relu', **locals())
X
Xin Pan 已提交
9988
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
9989 9990 9991 9992 9993 9994 9995 9996
    helper.append_op(
        type='soft_relu',
        inputs={'X': x},
        outputs={'Out': out},
        attrs={'threshold': threshold})
    return out


9997
def flatten(x, axis=1, name=None):
9998
    r"""
9999 10000 10001
    **Flatten op**

    Flatten the input tensor into a 2D matrix.
M
minqiyang 已提交
10002

H
haowang101779990 已提交
10003
    For Example:
M
minqiyang 已提交
10004

H
haowang101779990 已提交
10005
    .. code-block:: text
10006

H
haowang101779990 已提交
10007 10008 10009 10010 10011 10012 10013 10014 10015 10016 10017 10018 10019 10020 10021 10022 10023 10024 10025 10026 10027
        Case 1:

          Given
            X.shape = (3, 100, 100, 4)

          and
            axis = 2

          We get:
            Out.shape = (3 * 100, 4 * 100)

        Case 2:

          Given
            X.shape = (3, 100, 100, 4)

          and
            axis = 0

          We get:
            Out.shape = (1, 3 * 100 * 100 * 4)
10028 10029

    Args:
10030
        x (Variable): A tensor of rank >= axis. A tensor with type float32,
10031
                      float64, int8, int32, int64, uint8.
10032 10033
        axis (int): Indicate up to which input dimensions (exclusive) should
                    be flattened to the outer dimension of the output.
10034
                    The value for axis must be in the range [0, R], where R
10035 10036 10037
                    is the rank of the input tensor. Default: 1.
        name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
                        Generally, no setting is required. Default: None.
10038 10039

    Returns:
H
haowang101779990 已提交
10040 10041 10042
        Variable: A 2D tensor with the contents of the input tensor, with input \
                  dimensions up to axis flattened to the outer dimension of \
                  the output and remaining input dimensions flattened into the \
10043
                  inner dimension of the output. A Tensor with type same as input x.
10044 10045 10046

    Raises:
        ValueError: If x is not a variable.
10047
        ValueError: If axis is not in range [0, rank(x)].
10048 10049 10050 10051 10052

    Examples:

        .. code-block:: python

10053
            import paddle
10054
            import paddle.fluid as fluid
10055
            paddle.enable_static()
B
Bai Yifan 已提交
10056
            x = fluid.data(name="x", shape=[4, 4, 3], dtype="float32")
10057
            # x shape is [4, 4, 3]
10058
            out = fluid.layers.flatten(x=x, axis=2)
10059
            # out shape is [16, 3]
10060
    """
10061
    check_variable_and_dtype(
10062 10063
        x, 'x', ['float32', 'float64', 'int8', 'int32', 'int64', 'uint8'],
        'flatten')
10064 10065 10066 10067 10068 10069 10070 10071
    helper = LayerHelper('flatten', **locals())

    if not (isinstance(x, Variable)):
        raise ValueError("The input x should be a Variable")

    if not (isinstance(axis, int)) or axis > len(x.shape) or axis < 0:
        raise ValueError("The axis should be a int, and in range [0, rank(x)]")

X
Xin Pan 已提交
10072 10073
    out = helper.create_variable_for_type_inference(x.dtype)
    x_shape = helper.create_variable_for_type_inference(x.dtype)
10074
    helper.append_op(
10075
        type='flatten2',
10076
        inputs={"X": x},
10077 10078
        outputs={'Out': out,
                 'XShape': x_shape},
10079 10080
        attrs={"axis": axis})
    return out
X
Xin Pan 已提交
10081 10082


L
Leo Chen 已提交
10083
def stack(x, axis=0, name=None):
S
sneaxiy 已提交
10084
    """
10085

10086
    This OP stacks all the inputs :code:`x` along axis.
C
chengduozh 已提交
10087

C
chengduozh 已提交
10088 10089 10090
    .. code-block:: text

        Case 1:
10091

C
chengduozh 已提交
10092
          Input:
10093
            x[0].shape = [1, 2]
C
chengduozh 已提交
10094
            x[0].data = [ [1.0 , 2.0 ] ]
10095
            x[1].shape = [1, 2]
C
chengduozh 已提交
10096
            x[1].data = [ [3.0 , 4.0 ] ]
10097
            x[2].shape = [1, 2]
C
chengduozh 已提交
10098 10099 10100 10101 10102 10103
            x[2].data = [ [5.0 , 6.0 ] ]

          Attrs:
            axis = 0

          Output:
10104
            Out.dims = [3, 1, 2]
C
chengduozh 已提交
10105 10106 10107
            Out.data =[ [ [1.0, 2.0] ],
                        [ [3.0, 4.0] ],
                        [ [5.0, 6.0] ] ]
10108

C
chengduozh 已提交
10109 10110

        Case 2:
10111 10112 10113 10114


          Input:
            x[0].shape = [1, 2]
C
chengduozh 已提交
10115
            x[0].data = [ [1.0 , 2.0 ] ]
10116
            x[1].shape = [1, 2]
C
chengduozh 已提交
10117
            x[1].data = [ [3.0 , 4.0 ] ]
10118
            x[2].shape = [1, 2]
C
chengduozh 已提交
10119
            x[2].data = [ [5.0 , 6.0 ] ]
10120

C
chengduozh 已提交
10121 10122 10123 10124 10125

          Attrs:
            axis = 1 or axis = -2

          Output:
10126
            Out.shape = [1, 3, 2]
C
chengduozh 已提交
10127 10128 10129
            Out.data =[ [ [1.0, 2.0]
                          [3.0, 4.0]
                          [5.0, 6.0] ] ]
10130

C
chengduozh 已提交
10131

S
sneaxiy 已提交
10132
    Args:
L
Leo Chen 已提交
10133
        x (list(Variable)|tuple(Variable)): Input :code:`x` can be a :code:`list` or :code:`tuple` of Tensors, the shapes of all these Tensors
10134 10135 10136
                                     must be the same. Supposing input is N dims
                                     Tensors :math:`[d_0, d_1, ..., d_{n-1}]`, the output is N+1 dims
                                     Tensor :math:`[d_0, d_1, d_{axis-1}, len(x), d_{axis}, ..., d_{n-1}]`.
10137
                                     Supported data types: float32, float64, int32, int64.
L
Leo Chen 已提交
10138 10139 10140 10141 10142
        axis (int, optional): The axis along which all inputs are stacked. ``axis`` range is ``[-(R+1), R+1)``,
                              where ``R`` is the number of dimensions of the first input tensor ``x[0]``. 
                              If ``axis < 0``, ``axis = axis+R+1``. The default value of axis is 0.
        name (str, optional): Please refer to :ref:`api_guide_Name`, Default None.
    
10143

S
sneaxiy 已提交
10144
    Returns:
10145
        Variable: The stacked Tensor, has same data type with input Tensors. Output dim is :math:`rank(x[0])+1`.
10146

10147 10148 10149
    Examples:
        .. code-block:: python

10150
            import paddle.fluid as fluid
10151
            import paddle.fluid.layers as layers
10152 10153 10154 10155 10156 10157 10158 10159
            # set batch size=None
            x1 = fluid.data(name='x1', shape=[None, 1, 2], dtype='int32')
            x2 = fluid.data(name='x2', shape=[None, 1, 2], dtype='int32')
            # stack Tensor list
            data = layers.stack([x1,x2]) # stack according to axis 0, data.shape=[2, None, 1, 2]

            data = layers.stack([x1,x2], axis=1) # stack according to axis 1, data.shape=[None, 2, 1, 2]

10160

S
sneaxiy 已提交
10161
    """
X
Xin Pan 已提交
10162
    axis = 0 if axis is None else axis
L
Leo Chen 已提交
10163 10164

    if in_dygraph_mode():
W
wanghuancoder 已提交
10165
        return _C_ops.stack(x, 'axis', axis)
L
Leo Chen 已提交
10166

L
Leo Chen 已提交
10167 10168 10169 10170 10171 10172 10173 10174 10175 10176 10177 10178
    if not isinstance(x, list) and not isinstance(x, tuple):
        # NOTE:(zhiqiu) Only support Variable as input if the Variable is a LOD_TENSOR_ARRAY create by create_array, array_write, array_read, etc.
        # In that case, Variable is array of tensors indeed.
        if isinstance(x, Variable) and x.desc.type(
        ) == core.VarDesc.VarType.LOD_TENSOR_ARRAY:
            x = [x]
        else:
            raise TypeError("The type of '%s' in %s must be %s, but received %s"
                            % ('x', 'stack',
                               'list[Tensor], tuple[Tensor] or TensorArray',
                               type(x)))

L
Leo Chen 已提交
10179
    helper = LayerHelper('stack', **locals())
L
Leo Chen 已提交
10180

X
Xin Pan 已提交
10181
    out = helper.create_variable_for_type_inference(x[0].dtype)
L
Leo Chen 已提交
10182
    if x[0].desc.type() == core.VarDesc.VarType.LOD_TENSOR_ARRAY:
10183 10184 10185
        assert len(x) == 1, "If the elements of 'x' in stack are Variable(LoDTensorArray), " \
                            "number of the elements must be 1, but received %s." % len(x)
        out_index = helper.create_variable_for_type_inference(dtype="int32")
X
xujiaqi01 已提交
10186 10187 10188 10189 10190

        for i in x:
            check_variable_and_dtype(i, 'x', \
                ['float16', 'float32', 'float64', 'int32', 'int64'], 'stack')

10191 10192 10193 10194 10195 10196 10197 10198 10199 10200 10201 10202 10203
        helper.append_op(
            type='tensor_array_to_tensor',
            inputs={'X': x[0]},
            outputs={'Out': [out],
                     'OutIndex': [out_index]},
            attrs={'axis': axis,
                   'use_stack': True})
    else:
        helper.append_op(
            type='stack',
            inputs={'X': x},
            outputs={'Y': out},
            attrs={'axis': axis})
10204

X
Xin Pan 已提交
10205
    return out
D
dzhwinter 已提交
10206 10207


J
Jiawei Wang 已提交
10208
@templatedoc(op_type="filter_by_instag")
Y
yaoxuefeng 已提交
10209
def filter_by_instag(ins, ins_tag, filter_tag, is_lod, out_val_if_empty=0):
J
Jiawei Wang 已提交
10210 10211
    """
    **Filter By Instag Layer**
10212 10213 10214

    This function filter a batch of ins by instag,
    There are multiple ins, and every ins belongs to some tags.
J
Jiawei Wang 已提交
10215 10216
    We can specify some tags we want. So the ins which belongs to that tags
    remains in the output, and others removed.
10217 10218 10219

    For example, one batch has 4 ins. Every ins has its tag list.

J
Jiawei Wang 已提交
10220 10221 10222 10223 10224 10225 10226 10227 10228 10229 10230 10231 10232 10233 10234
       | Ins   |   Ins_Tag |
       |:-----:|:------:|
       |  0    |   0, 1 |
       |  1    |   1, 3 |
       |  2    |   0, 3 |
       |  3    |   2, 6 |

    And Lod is [1,1,1,1]

    And the filter tags [1]

    From the definition above, ins which has tag 1 can pass the filter
    So Ins 0 and Ins 1 can pass and be seen in the output,
    Ins 2 and 3 cannot pass because they do not has tag 1.

10235
    Actually, if is_lod is false, it is normal tensor that equals to
J
Jiawei Wang 已提交
10236 10237 10238 10239 10240 10241 10242
    lod_tensor with all 1, similar to the example above.

    Args:
        ins (Variable): Input Variable (LoDTensor), usually it is 2D tensor
                        And first dimension can have lod info or not.
        ins_tag (Variable): Input Variable (LoDTensor), usually it is 1D list
                        And split them by lod info
10243
        filter_tag (Variable): Input Variable (1D Tensor/List), usually it is
J
Jiawei Wang 已提交
10244 10245
                        list that holds the tags.
        is_lod (Bool): Boolean value to indicate ins is lod tensor or not.
Y
yaoxuefeng 已提交
10246 10247
        out_val_if_empty(Int64): If the output after filter is empty, this value
                        will be set to Output tensor.
J
Jiawei Wang 已提交
10248 10249 10250 10251 10252 10253 10254 10255 10256 10257 10258 10259

    Returns:
        Variable: filtered ins (LoDTensor) and loss weight (Tensor)

    Examples:
        .. code-block:: python

          import paddle.fluid.layers as layers
          ins = layers.data(name='Ins', shape=[-1,32], lod_level=0, dtype='float64')
          ins_tag = layers.data(name='Ins_tag', shape=[-1,16], lod_level=0, dtype='int64')
          filter_tag = layers.data(name='Filter_tag', shape=[-1,16], dtype='int64')
          out, loss_weight = layers.filter_by_instag(ins,  ins_tag,  filter_tag, True)
10260

J
Jiawei Wang 已提交
10261 10262 10263 10264 10265 10266 10267 10268 10269 10270 10271 10272 10273 10274
    """
    helper = LayerHelper('filter_by_instag', **locals())

    out = helper.create_variable_for_type_inference(dtype=ins.dtype)
    loss_weight = helper.create_variable_for_type_inference(dtype=np.float64)
    mmap = helper.create_variable_for_type_inference(dtype=ins_tag.dtype)
    helper.append_op(
        type='filter_by_instag',
        inputs={'Ins': ins,
                'Ins_tag': ins_tag,
                'Filter_tag': filter_tag},
        outputs={'Out': out,
                 'LossWeight': loss_weight,
                 'IndexMap': mmap},
Y
yaoxuefeng 已提交
10275 10276
        attrs={'is_lod': is_lod,
               'out_val_if_empty': out_val_if_empty})
J
Jiawei Wang 已提交
10277 10278 10279 10280

    return [out, loss_weight]


D
dzhwinter 已提交
10281 10282
def unstack(x, axis=0, num=None):
    """
10283 10284 10285 10286
    :alias_main: paddle.unstack
	:alias: paddle.unstack,paddle.tensor.unstack,paddle.tensor.manipulation.unstack
	:old_api: paddle.fluid.layers.unstack

D
dzhwinter 已提交
10287 10288
    **UnStack Layer**

10289
    This layer unstacks input Tensor :code:`x` into several Tensors along :code:`axis`.
M
minqiyang 已提交
10290

D
dzhwinter 已提交
10291 10292 10293
    If :code:`axis` < 0, it would be replaced with :code:`axis+rank(x)`.
    If :code:`num` is None, it would be inferred from :code:`x.shape[axis]`,
    and if :code:`x.shape[axis]` <= 0 or is unknown, :code:`ValueError` is
M
minqiyang 已提交
10294
    raised.
D
dzhwinter 已提交
10295 10296

    Args:
M
MRXLT 已提交
10297
        x (Tensor): Input Tensor. It is a N-D Tensors of data types float32, float64, int32, int64.
D
dzhwinter 已提交
10298 10299
        axis (int): The axis along which the input is unstacked.
        num (int|None): The number of output variables.
M
minqiyang 已提交
10300

D
dzhwinter 已提交
10301
    Returns:
M
MRXLT 已提交
10302
        list(Tensor): The unstacked Tensors list. The list elements are N-D Tensors of data types float32, float64, int32, int64.
10303 10304 10305

    Raises:
        ValueError: If x.shape[axis] <= 0 or axis is not in range [-D, D).
M
minqiyang 已提交
10306

10307 10308 10309
    Examples:
        .. code-block:: python

10310 10311 10312
            import paddle
            x = paddle.ones(name='x', shape=[2, 3, 5], dtype='float32')  # create a tensor with shape=[2, 3, 5]
            y = paddle.unstack(x, axis=1)  # unstack with second axis, which results 3 tensors with shape=[2, 5]
D
dzhwinter 已提交
10313

10314
    """
M
MRXLT 已提交
10315 10316 10317
    if in_dygraph_mode():
        if num == None:
            num = x.shape[axis]
10318 10319
        if num == 0:
            return []
W
wanghuancoder 已提交
10320
        return _C_ops.unstack(x, num, 'axis', int(axis), 'num', num)
M
MRXLT 已提交
10321

D
dzhwinter 已提交
10322 10323 10324 10325 10326 10327 10328 10329
    helper = LayerHelper('unstack', **locals())
    if num is None:
        if axis is None or x.shape[axis] <= 0:
            raise ValueError('unknown unstack number')
        else:
            num = x.shape[axis]

    outs = []
Y
Yibing Liu 已提交
10330
    for _ in range(num):
X
Xin Pan 已提交
10331
        outs.append(helper.create_variable_for_type_inference(x.dtype))
D
dzhwinter 已提交
10332 10333 10334 10335 10336 10337 10338 10339

    helper.append_op(
        type='unstack',
        inputs={'X': [x]},
        outputs={'Y': outs},
        attrs={'axis': axis,
               'num': num})
    return outs
W
whs 已提交
10340 10341


10342
@deprecated(since='2.0.0', update_to="paddle.expand")
W
whs 已提交
10343
def expand(x, expand_times, name=None):
10344
    """
10345 10346 10347 10348
    :alias_main: paddle.expand
	:alias: paddle.expand,paddle.tensor.expand,paddle.tensor.manipulation.expand
	:old_api: paddle.fluid.layers.expand

10349 10350 10351
    This operation tiles ``x`` multiple times according to the parameter ``expand_times``.
    The times number for each dimension of ``x`` is set by the parameter ``expand_times``.
    The rank of ``x`` should be less than or equal to 6. Please note that size of ``expand_times`` must be the same
W
whs 已提交
10352 10353 10354 10355 10356 10357
    with X's rank. Following is a using case:


    .. code-block:: text

        Input(X) is a 3-D tensor with shape [2, 3, 1]:
M
minqiyang 已提交
10358

W
whs 已提交
10359 10360 10361 10362
                [
                   [[1], [2], [3]],
                   [[4], [5], [6]]
                ]
M
minqiyang 已提交
10363

W
whs 已提交
10364
        Attr(expand_times):  [1, 2, 2]
M
minqiyang 已提交
10365

W
whs 已提交
10366
        Output(Out) is a 3-D tensor with shape [2, 6, 2]:
M
minqiyang 已提交
10367

W
whs 已提交
10368 10369 10370 10371
                [
                    [[1, 1], [2, 2], [3, 3], [1, 1], [2, 2], [3, 3]],
                    [[4, 4], [5, 5], [6, 6], [4, 4], [5, 5], [6, 6]]
                ]
M
minqiyang 已提交
10372

W
whs 已提交
10373
    Args:
10374 10375 10376 10377 10378
        x (Variable): A ``Tensor`` or ``LoDTensor`` with dimension in [1, 6]. The data type is ``bool``, ``float32``, ``float64`` or ``int32`` .
        expand_times (list|tuple|Variable): The data type is ``int32`` . If ``expand_times`` is a list or tuple, the elements of
                it should be integers or Tensors with shape [1]. If ``expand_times`` is an Variable, it should be an 1-D Tensor.
                Expand times number for each dimension of ``x`` .
        name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
W
whs 已提交
10379 10380

    Returns:
10381
        Variable: A ``Tensor`` or ``LoDTensor``. The data type is same as ``x``. After expanding, size of each dimension of output is equal to the size of the corresponding dimension of ``x`` multiplying the corresponding value given by ``expand_times`` .
W
whs 已提交
10382

10383 10384 10385
    Raises:
        TypeError: The type of ``expand_times`` must be list, tuple or Variable.
        ValueError: The elements of ``expand_times`` cannot be negative.
W
whs 已提交
10386 10387 10388

    Examples:
        .. code-block:: python
L
liym27 已提交
10389

W
wangchaochaohu 已提交
10390
            import paddle.fluid as fluid
L
liym27 已提交
10391 10392 10393 10394

            # example 1:
            data_1 = fluid.layers.fill_constant(shape=[2, 3, 1], dtype='int32', value=0)
            expanded_1 = fluid.layers.expand(data_1, expand_times=[1, 2, 2])
10395
            # the shape of expanded_1 is [2, 6, 2].
L
liym27 已提交
10396 10397 10398 10399 10400

            # example 2:
            data_2 = fluid.layers.fill_constant(shape=[12, 14], dtype="int32", value=3)
            expand_times = fluid.layers.fill_constant(shape=[2], dtype="int32", value=4)
            expanded_2 = fluid.layers.expand(data_2, expand_times=expand_times)
10401
            # the shape of expanded_2 is [48, 56].
W
whs 已提交
10402
    """
10403
    if in_dygraph_mode():
10404 10405
        attrs = ()
        expand_times_tensor = None
10406
        if isinstance(expand_times, (list, tuple)):
10407
            expand_times = [
10408
                item.numpy().item(0) if isinstance(item, Variable) else item
10409 10410
                for item in expand_times
            ]
10411 10412 10413 10414
            attrs += ('expand_times', expand_times)
        elif isinstance(expand_times, Variable):
            expand_times_tensor = expand_times
            expand_times_tensor.stop_gradient = True
10415

W
wanghuancoder 已提交
10416
        return _C_ops.expand(x, expand_times_tensor, *attrs)
10417

10418 10419
    inputs = {"X": [x]}
    attrs = {}
10420
    check_variable_and_dtype(
10421 10422
        x, 'x', ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'],
        'expand')
10423
    check_type(expand_times, 'expand_times', (list, tuple, Variable), 'expand')
W
wangchaochaohu 已提交
10424 10425 10426
    if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == True:
        raise ValueError(
            "expand op bool date type must set the stop_gradient to be False")
L
liym27 已提交
10427

W
whs 已提交
10428
    helper = LayerHelper('expand', input=x, **locals())
L
liym27 已提交
10429 10430 10431 10432 10433 10434 10435 10436 10437

    def get_attr_expand_times(list_expand_times):
        attrs_expand_times = []
        for idx, times in enumerate(list_expand_times):
            if isinstance(times, Variable):
                attrs_expand_times.append(-1)
            else:
                attrs_expand_times.append(times)
                assert times > 0, (
T
tianshuo78520a 已提交
10438
                    "Each element given in expand_times must not be negative.")
L
liym27 已提交
10439 10440
        return attrs_expand_times

L
Leo Chen 已提交
10441 10442 10443 10444 10445 10446
    if isinstance(expand_times, Variable):
        expand_times.stop_gradient = True
        inputs['ExpandTimes'] = expand_times
    elif isinstance(expand_times, (list, tuple)):
        attrs['expand_times'] = get_attr_expand_times(expand_times)
        if utils._contain_var(expand_times):
10447
            inputs['expand_times_tensor'] = utils._convert_to_tensor_list(
L
Leo Chen 已提交
10448
                expand_times)
10449

L
liym27 已提交
10450 10451
    dtype = helper.input_dtype(input_param_name='x')
    out = helper.create_variable_for_type_inference(dtype)
W
whs 已提交
10452
    helper.append_op(
10453
        type='expand', inputs=inputs, outputs={'Out': out}, attrs=attrs)
W
whs 已提交
10454
    return out
S
sneaxiy 已提交
10455 10456


10457
@deprecated(since='2.0.0', update_to="paddle.expand_as")
10458 10459
def expand_as(x, target_tensor, name=None):
    """
10460 10461 10462 10463
    :alias_main: paddle.expand_as
	:alias: paddle.expand_as,paddle.tensor.expand_as,paddle.tensor.manipulation.expand_as
	:old_api: paddle.fluid.layers.expand_as
    
10464 10465 10466 10467 10468 10469 10470 10471 10472 10473 10474 10475 10476 10477 10478
    expand_as operator tiles to the input by given expand tensor. You should set expand tensor
    for each dimension by providing tensor 'target_tensor'. The rank of X
    should be in [1, 6]. Please note that size of 'target_tensor' must be the same
    with X's rank. Following is a using case:


    .. code-block:: text

        Input(X) is a 3-D tensor with shape [2, 3, 1]:

                [
                   [[1], [2], [3]],
                   [[4], [5], [6]]
                ]

10479
        target_tensor's shape:  [2, 6, 2]
10480 10481 10482 10483 10484 10485 10486

        Output(Out) is a 3-D tensor with shape [2, 6, 2]:

                [
                    [[1, 1], [2, 2], [3, 3], [1, 1], [2, 2], [3, 3]],
                    [[4, 4], [5, 5], [6, 6], [4, 4], [5, 5], [6, 6]]
                ]
10487

10488 10489 10490 10491 10492 10493 10494 10495

    Args:
        x (Variable): A Tensor with dtype float64, float32, int32.
        A tensor with rank in [1, 6].
        target_tensor (Variable): A Tensor with dtype float64, float32, int32.
        target_tensor for expanding to Input(X). Only use target_tensor'shape.

    Returns:
10496 10497
        Variable: A Tensor with dtype float64, float32, int32.
        After expanding, size of each dimension of Output(Out) is equal to the size
10498 10499 10500 10501 10502 10503
        of the corresponding dimension of target_tensor multiplying the corresponding
        value given by target_tensor.


    Examples:
        .. code-block:: python
10504

10505 10506 10507 10508
            import paddle
            import paddle.fluid as fluid
            import numpy as np
            paddle.enable_static()
10509

10510 10511 10512 10513 10514 10515 10516 10517 10518 10519 10520 10521 10522
            data = fluid.layers.data(name="data", shape=[-1,10], dtype='float64')
            target_tensor = fluid.layers.data(
              name="target_tensor", shape=[-1,20], dtype='float64')
            result = fluid.layers.expand_as(x=data, target_tensor=target_tensor)
            use_cuda = False
            place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
            exe = fluid.Executor(place)
            exe.run(fluid.default_startup_program())
            x = np.random.rand(3,10)
            y = np.random.rand(3,20)
            output= exe.run(feed={"data":x,"target_tensor":y},fetch_list=[result.name])
            print(output[0].shape)
            #(3,20)
10523 10524

    """
10525
    if in_dygraph_mode():
W
wanghuancoder 已提交
10526
        return _C_ops.expand_as(x, target_tensor)
10527

10528 10529 10530 10531 10532
    check_variable_and_dtype(
        x, 'x', ['float32', 'float64', 'int32', 'int64', 'bool'], 'expand_as')
    check_variable_and_dtype(target_tensor, 'target_tensor',
                             ['float32', 'float64', 'int32', 'int64', 'bool'],
                             'expand_as')
10533 10534 10535 10536 10537 10538 10539 10540
    helper = LayerHelper('expand_as', input=x, **locals())
    dtype = helper.input_dtype(input_param_name='x')
    out = helper.create_variable_for_type_inference(dtype)
    inputs = {'X': x, 'target_tensor': target_tensor}
    helper.append_op(type='expand_as', inputs=inputs, outputs={'Out': out})
    return out


G
fix  
gongweibao 已提交
10541 10542 10543
from paddle.fluid.framework import convert_np_dtype_to_dtype_


10544
@deprecated(since='1.8.0', update_to="paddle.uniform")
G
gongweibao 已提交
10545
@templatedoc()
G
fix  
gongweibao 已提交
10546 10547 10548 10549 10550 10551 10552 10553 10554
def uniform_random_batch_size_like(input,
                                   shape,
                                   dtype='float32',
                                   input_dim_idx=0,
                                   output_dim_idx=0,
                                   min=-1.0,
                                   max=1.0,
                                   seed=0):
    """
10555 10556 10557 10558 10559 10560
    This OP initializes a variable with random values sampled from a
    uniform distribution in the range [min, max). The input_dim_idx used to get the input dimension value which will be used to resize the output dimension.

    .. code-block:: text

        *Case 1:
G
fix  
gongweibao 已提交
10561

10562 10563 10564 10565 10566
            Given:
                input =[[0.946741  , 0.1357001 , 0.38086128]]    # input.shape=[1,3]
                shape=[2,4]

            result.shape[output_dim_idx] = input.shape[input_dim_idx],
10567
            output_dim_idx = 0,
10568
            input_dim_idx = 0,
10569
            result.shape[0] = input.shape[0],
10570 10571
            then:
                result=[[ 0.3443427 , -0.23056602,  0.3477049 ,  0.06139076]]    # result.shape=[1,4]
10572

10573
       *Case 2:
10574

10575 10576 10577 10578 10579
           Given:
               input =[[0.946741  , 0.1357001 , 0.38086128]]     # input.shape=[1,3]
               shape=[2,4]
               input_dim_idx=1
               output_dim_idx=1
10580

10581
           result.shape[output_dim_idx] = input.shape[input_dim_idx],
10582
           output_dim_idx = 1,
10583
           input_dim_idx = 1,
10584
           result.shape[1] = input.shape[1],
10585 10586 10587
           then:
               result=[[-0.23133647, -0.84195036,  0.21441269],
                       [-0.08774924,  0.25605237, -0.09403259]]    # result.shape=[2,3]
G
fix  
gongweibao 已提交
10588
    Args:
10589 10590
        input (Variable): A Tensor. Supported data types: float32, float64.
        shape (tuple|list): A python list or python tuple. The shape of the output Tensor, the data type is int.
10591
        input_dim_idx (int, optional): An index used to get the input dimension value which will be used to resize the output dimension. Default  0.
10592 10593 10594 10595 10596
        output_dim_idx (int, optional): An index used to indicate the specific dimension that will be replaced by corresponding input dimension value. Default 0.
        min (float, optional): The lower bound on the range of random values to generate, the min is included in the range. Default -1.0.
        max (float, optional): The upper bound on the range of random values to generate, the max is excluded in the range. Default 1.0.
        seed (int, optional):  Random seed used for generating samples. 0 means use a seed generated by the system.Note that if seed is not 0, this operator will always generate the same random numbers every time.
        dtype(np.dtype|core.VarDesc.VarType|str, optional): The data type of output Tensor. Supported data types: float32, float64. Default float32.
G
fix  
gongweibao 已提交
10597
    Returns:
10598
        Variable: A Tensor of the specified shape filled with uniform_random values. The shape of the Tensor is determined by the shape parameter and the specified dimension of the input Tensor.
G
fix  
gongweibao 已提交
10599

10600 10601 10602
    Examples:
        .. code-block:: python

10603
            import paddle
10604
            import paddle.fluid as fluid
10605
            paddle.enable_static()
10606 10607

            # example 1:
10608 10609
            input = fluid.data(name="input", shape=[1, 3], dtype='float32')
            out_1 = fluid.layers.uniform_random_batch_size_like(input, [2, 4]) # out_1.shape=[1, 4]
10610

10611
            # example 2:
10612 10613
            out_2 = fluid.layers.uniform_random_batch_size_like(input, [2, 4], input_dim_idx=1, output_dim_idx=1) # out_2.shape=[2, 3]

10614

G
fix  
gongweibao 已提交
10615
    """
10616
    check_variable_and_dtype(input, 'Input', ("float32", 'float64', "uint16"),
10617 10618
                             'uniform_random_batch_size_like')
    check_type(shape, 'shape', (list, tuple), 'uniform_random_batch_size_like')
10619
    check_dtype(dtype, 'dtype', ('float32', 'float64', "uint16"),
10620
                'uniform_random_batch_size_like')
G
fix  
gongweibao 已提交
10621 10622

    helper = LayerHelper('uniform_random_batch_size_like', **locals())
X
Xin Pan 已提交
10623
    out = helper.create_variable_for_type_inference(dtype)
G
fix  
gongweibao 已提交
10624 10625 10626 10627 10628 10629 10630 10631 10632 10633 10634 10635 10636 10637 10638 10639
    c_dtype = convert_np_dtype_to_dtype_(dtype)
    helper.append_op(
        type='uniform_random_batch_size_like',
        inputs={'Input': input},
        outputs={'Out': out},
        attrs={
            'shape': shape,
            'input_dim_idx': input_dim_idx,
            'output_dim_idx': output_dim_idx,
            'min': min,
            'max': max,
            'seed': seed,
            'dtype': c_dtype
        })

    return out
G
fix  
gongweibao 已提交
10640 10641


10642
@deprecated(since="2.0.0", update_to="paddle.normal")
G
gongweibao 已提交
10643
@templatedoc()
10644 10645 10646 10647 10648 10649
def gaussian_random(shape,
                    mean=0.0,
                    std=1.0,
                    seed=0,
                    dtype='float32',
                    name=None):
G
fix  
gongweibao 已提交
10650
    """
10651 10652
    This OP returns a Tensor filled with random values sampled from a Gaussian
    distribution, with ``shape`` and ``dtype``.
G
fix  
gongweibao 已提交
10653 10654

    Args:
10655 10656 10657 10658 10659 10660 10661 10662 10663 10664 10665 10666 10667 10668 10669
        shape(list|tuple|Tensor): The shape of the output Tensor. If ``shape``
            is a list or tuple, the elements of it should be integers or Tensors
            (with the shape [1], and the data type int32 or int64). If ``shape``
            is a Tensor, it should be a 1-D Tensor(with the data type int32 or
            int64).
        mean(float|int, optional): Mean of the output tensor, default is 0.0.
        std(float|int, optional): Standard deviation of the output tensor, default
            is 1.0.
        seed(int, optional): ${seed_comment}
        dtype(str|np.dtype|core.VarDesc.VarType, optional): The data type of
            the output Tensor. Supported data types: float32, float64.
            Default is float32.
        name(str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.
G
fix  
gongweibao 已提交
10670 10671

    Returns:
10672 10673
        Tensor: A Tensor filled with random values sampled from a Gaussian
        distribution, with ``shape`` and ``dtype``.
G
fix  
gongweibao 已提交
10674

10675
    Examples:
10676
       .. code-block:: python
10677

10678
            import paddle
10679
            import paddle.fluid as fluid
10680
            paddle.enable_static()
10681 10682

            # example 1:
10683
            # attr shape is a list which doesn't contain Tensor.
10684
            result_1 = fluid.layers.gaussian_random(shape=[3, 4])
10685 10686 10687
            # [[-0.31261674,  1.8736548,  -0.6274357,   0.96988016],
            #  [-0.12294637,  0.9554768,   1.5690808,  -1.2894802 ],
            #  [-0.60082096, -0.61138713,  1.5345167,  -0.21834975]]
10688 10689

            # example 2:
10690 10691 10692
            # attr shape is a list which contains Tensor.
            dim_1 = fluid.layers.fill_constant([1], "int64", 2)
            dim_2 = fluid.layers.fill_constant([1], "int32", 3)
10693
            result_2 = fluid.layers.gaussian_random(shape=[dim_1, dim_2])
10694 10695
            # [[ 0.51398206, -0.3389769,   0.23597084],
            #  [ 1.0388143,  -1.2015356,  -1.0499583 ]]
10696 10697

            # example 3:
10698
            # attr shape is a Tensor, the data type must be int64 or int32.
10699 10700
            var_shape = fluid.data(name='var_shape', shape=[2], dtype="int64")
            result_3 = fluid.layers.gaussian_random(var_shape)
10701 10702 10703 10704
            # if var_shape's value is [2, 3]
            # result_3 is:
            # [[-0.12310527,  0.8187662,   1.923219  ]
            #  [ 0.70721835,  0.5210541,  -0.03214082]]
10705 10706 10707
       
       .. code-block:: python
       
10708 10709
           # declarative mode
           # required: skiptest
10710 10711
           import numpy as np
           from paddle import fluid
10712
   
10713
           x = fluid.layers.gaussian_random((2, 3), std=2., seed=10)
10714
   
10715 10716 10717 10718
           place = fluid.CPUPlace()
           exe = fluid.Executor(place)
           start = fluid.default_startup_program()
           main = fluid.default_main_program()
10719
   
10720 10721
           exe.run(start)
           x_np, = exe.run(main, feed={}, fetch_list=[x])
10722

10723 10724 10725 10726 10727 10728 10729 10730 10731 10732
           x_np
           # array([[2.3060477, 2.676496 , 3.9911983],
           #        [0.9990833, 2.8675377, 2.2279181]], dtype=float32)

       .. code-block:: python

           # imperative mode
           import numpy as np
           from paddle import fluid
           import paddle.fluid.dygraph as dg
10733
    
10734 10735 10736
           place = fluid.CPUPlace()
           with dg.guard(place) as g:
               x = fluid.layers.gaussian_random((2, 4), mean=2., dtype="float32", seed=10)
10737
               x_np = x.numpy()       
10738 10739 10740
           x_np
           # array([[2.3060477 , 2.676496  , 3.9911983 , 0.9990833 ],
           #        [2.8675377 , 2.2279181 , 0.79029655, 2.8447366 ]], dtype=float32)
G
fix  
gongweibao 已提交
10741
    """
10742 10743
    if not isinstance(dtype, core.VarDesc.VarType):
        dtype = convert_np_dtype_to_dtype_(dtype)
10744 10745

    if in_dygraph_mode():
10746
        shape = utils.convert_shape_to_list(shape)
W
wanghuancoder 已提交
10747 10748 10749
        return _C_ops.gaussian_random('shape', shape, 'mean',
                                      float(mean), 'std',
                                      float(std), 'seed', seed, 'dtype', dtype)
10750 10751 10752

    check_type(shape, 'shape', (list, tuple, Variable), 'gaussian_random/randn')
    check_dtype(dtype, 'dtype', ['float32', 'float64'], 'gaussian_random/randn')
10753 10754

    inputs = {}
10755 10756 10757 10758
    attrs = {
        'mean': mean,
        'std': std,
        'seed': seed,
10759
        'dtype': dtype,
10760 10761
        'use_mkldnn': False
    }
10762
    utils.get_shape_tensor_inputs(
10763 10764 10765 10766
        inputs=inputs,
        attrs=attrs,
        shape=shape,
        op_type='gaussian_random/randn')
10767

10768 10769
    helper = LayerHelper('gaussian_random', **locals())
    out = helper.create_variable_for_type_inference(dtype)
G
fix  
gongweibao 已提交
10770 10771
    helper.append_op(
        type='gaussian_random',
10772
        inputs=inputs,
G
fix  
gongweibao 已提交
10773
        outputs={'Out': out},
10774
        attrs=attrs)
G
fix  
gongweibao 已提交
10775 10776 10777 10778

    return out


G
gongweibao 已提交
10779
@templatedoc()
G
fix  
gongweibao 已提交
10780
def sampling_id(x, min=0.0, max=1.0, seed=0, dtype='float32'):
G
fix  
gongweibao 已提交
10781
    """
R
ruri 已提交
10782
    This op is used for sampling id from multinomial distribution from the input, sampling one id for one sample.
G
fix  
gongweibao 已提交
10783

R
ruri 已提交
10784 10785 10786 10787
    Parameters:
        x (Variable): 2-D tensor, [batch_size, input_feature_dimensions]
        min (Float): minimum , default 0.0.
        max (Float): maximum, default 1.0.
10788
        seed (Float): Random seed, default 0. if seed is not 0, will generate same number every time.
G
fix  
gongweibao 已提交
10789
        dtype(np.dtype|core.VarDesc.VarType|str): The type of output data : float32, float_16, int etc
G
fix  
gongweibao 已提交
10790 10791

    Returns:
R
ruri 已提交
10792
        Variable: sampling tensor.
G
fix  
gongweibao 已提交
10793

10794 10795 10796
    Examples:
        .. code-block:: python

10797
            import paddle.fluid as fluid
R
ruri 已提交
10798
            x = fluid.data(
10799 10800
                name="X",
                shape=[13, 11],
R
ruri 已提交
10801
                dtype='float32')
10802

Y
Yibing Liu 已提交
10803
            out = fluid.layers.sampling_id(x)
G
fix  
gongweibao 已提交
10804 10805 10806
    """

    helper = LayerHelper('sampling_id', **locals())
X
Xin Pan 已提交
10807
    out = helper.create_variable_for_type_inference(dtype)
G
fix  
gongweibao 已提交
10808 10809 10810 10811 10812 10813 10814 10815 10816 10817 10818
    helper.append_op(
        type='sampling_id',
        inputs={'X': x},
        outputs={'Out': out},
        attrs={'min': min,
               'max': max,
               'seed': seed})

    return out


10819
@deprecated(since='1.8.0', update_to="paddle.normal")
G
gongweibao 已提交
10820
@templatedoc()
G
fix  
gongweibao 已提交
10821 10822 10823 10824 10825 10826 10827 10828 10829
def gaussian_random_batch_size_like(input,
                                    shape,
                                    input_dim_idx=0,
                                    output_dim_idx=0,
                                    mean=0.0,
                                    std=1.0,
                                    seed=0,
                                    dtype='float32'):
    """
G
gongweibao 已提交
10830
    ${comment}
G
fix  
gongweibao 已提交
10831 10832

    Args:
G
gongweibao 已提交
10833 10834
        input (Variable): ${input_comment}
        shape (tuple|list): ${shape_comment}
Y
Yibing Liu 已提交
10835 10836 10837 10838 10839 10840
        input_dim_idx (int): ${input_dim_idx_comment}
        output_dim_idx (int): ${output_dim_idx_comment}
        mean (float): ${mean_comment}
        std (float): ${std_comment}
        seed (int): ${seed_comment}
        dtype(np.dtype|core.VarDesc.VarType|str): The type of output data, float32 or float_64.
G
fix  
gongweibao 已提交
10841 10842

    Returns:
G
gongweibao 已提交
10843
        out (Variable): ${out_comment}
10844 10845 10846 10847

    Examples:
        .. code-block:: python

10848
            import paddle
10849
            import paddle.fluid as fluid
10850 10851
            paddle.enable_static()

Y
Yibing Liu 已提交
10852
            input = fluid.data(name="input", shape=[13, 11], dtype='float32')
10853

Y
Yibing Liu 已提交
10854
            out = fluid.layers.gaussian_random_batch_size_like(
10855
                input, shape=[-1, 11], mean=1.0, std=2.0)
G
fix  
gongweibao 已提交
10856 10857 10858
    """

    helper = LayerHelper('gaussian_random_batch_size_like', **locals())
10859 10860 10861 10862 10863 10864
    check_type(input, 'input', (Variable),
               'fluid.layers.gaussian_random_batch_size_like')
    check_type(shape, 'shape', (list, tuple),
               'fluid.layers.gaussian_random_batch_size_like')
    check_dtype(dtype, 'dtype', ['float16', 'float32', 'int'],
                'fluid.layers.gaussian_random_batch_size_like')
X
Xin Pan 已提交
10865
    out = helper.create_variable_for_type_inference(dtype)
G
fix  
gongweibao 已提交
10866 10867 10868 10869 10870 10871 10872 10873 10874 10875 10876 10877 10878 10879 10880 10881 10882 10883
    c_dtype = convert_np_dtype_to_dtype_(dtype)
    helper.append_op(
        type='gaussian_random_batch_size_like',
        inputs={'Input': input},
        outputs={'Out': out},
        attrs={
            'shape': shape,
            'input_dim_idx': input_dim_idx,
            'output_dim_idx': output_dim_idx,
            'mean': mean,
            'std': std,
            'seed': seed,
            'dtype': c_dtype
        })

    return out


G
gongweibao 已提交
10884
@templatedoc()
X
Xin Pan 已提交
10885
def sum(x):
G
fix  
gongweibao 已提交
10886
    """
G
gongweibao 已提交
10887
    ${comment}
10888

10889 10890 10891 10892 10893 10894 10895 10896 10897 10898 10899 10900 10901 10902 10903 10904 10905 10906 10907 10908 10909 10910 10911 10912 10913 10914 10915 10916 10917
    Case 1:
    ::
        Input:
            Input. Shape = [2, 3]
            Input = [[1, 2, 3],
                     [4, 5, 6]]

        Output:
            The output. Shape = [2, 3]
            Output = [[1, 2, 3],
                      [4, 5, 6]]

    Case 2:
    ::
        Input:
            First input:
            Input1. Shape = [2, 3]
            Input1 = [[1, 2, 3],
                      [4, 5, 6]]

        The second input:
            Input2. Shape = [2, 3]
            Input2 = [[7, 8, 9],
                      [10, 11, 12]]

        Output:
            The output. Shape = [2, 3]
            Output = [[8, 10, 12],
                      [14, 16, 18]]
G
fix  
gongweibao 已提交
10918 10919

    Args:
10920
        x (Variable|list(Variable)): ${x_comment}
G
fix  
gongweibao 已提交
10921 10922

    Returns:
10923
        Variable: ${out_comment}
10924 10925 10926 10927

    Examples:
        .. code-block:: python

10928
            import paddle.fluid as fluid
10929 10930 10931 10932 10933 10934 10935 10936 10937 10938 10939 10940 10941 10942 10943 10944 10945 10946 10947

            input0 = fluid.layers.fill_constant(shape=[2, 3], dtype='int64', value=5)
            input1 = fluid.layers.fill_constant(shape=[2, 3], dtype='int64', value=3)
            sum = fluid.layers.sum([input0, input1])

            # You can print out 'sum' via executor.
            out = fluid.layers.Print(sum, message="the sum of input0 and input1: ")
            exe = fluid.Executor(fluid.CPUPlace())
            exe.run(fluid.default_main_program())

            # The printed result is:
            # 1570701754	the sum of input0 and input1: 	The place is:CPUPlace
            # Tensor[sum_0.tmp_0]
            #    shape: [2,3,]
            #    dtype: l
            #    data: 8,8,8,8,8,8,

            # the sum of input0 and input1 is 2-D Tensor with shape [2,3].
            # dtype is the corresponding C++ data type, which may vary in different environments.
10948 10949
            # Eg: if the data type of tensor is int64, then the corresponding C++ data type is int64_t,
            #       so the dtype value is typeid(int64_t).Name(), which is 'x' on MacOS, 'l' on Linux,
10950
            #       and '__int64' on Windows. They both represent 64-bit integer variables.
G
fix  
gongweibao 已提交
10951 10952
    """

S
Steffy-zxf 已提交
10953
    return paddle.add_n(x)
G
fix  
gongweibao 已提交
10954 10955


G
gongweibao 已提交
10956
@templatedoc()
G
fix  
gongweibao 已提交
10957 10958
def slice(input, axes, starts, ends):
    """
10959
    This operator produces a slice of ``input`` along multiple axes. Similar to numpy:
10960
    https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html
10961 10962 10963 10964 10965 10966 10967
    Slice uses ``axes``, ``starts`` and ``ends`` attributes to specify the start and
    end dimension for each axis in the list of axes and Slice uses this information
    to slice the input data tensor. If a negative value is passed to
    ``starts`` or ``ends`` such as :math:`-i`,  it represents the reverse position of the
    axis :math:`i-1` (here 0 is the initial position).
    If the value passed to ``starts`` or ``ends`` is greater than n
    (the number of elements in this dimension), it represents n.
10968
    For slicing to the end of a dimension with unknown size, it is recommended
10969
    to pass in INT_MAX. The size of ``axes`` must be equal to ``starts`` and ``ends``.
10970 10971 10972
    Following examples will explain how slice works:

    .. code-block:: text
G
fix  
gongweibao 已提交
10973

10974 10975 10976 10977 10978 10979 10980 10981
        Case1:
            Given:
                data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
                axes = [0, 1]
                starts = [1, 0]
                ends = [2, 3]
            Then:
                result = [ [5, 6, 7], ]
10982

10983 10984 10985 10986 10987
        Case2:
            Given:
                data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
                axes = [0, 1]
                starts = [0, 1]
10988
                ends = [-1, 1000]       # -1 denotes the reverse 0th position of dimension 0.
10989
            Then:
10990
                result = [ [2, 3, 4], ] # result = data[0:1, 1:4]
T
Thunderbrook 已提交
10991
    
G
fix  
gongweibao 已提交
10992
    Args:
T
Thunderbrook 已提交
10993
        input (Tensor): A ``Tensor`` . The data type is ``float16``, ``float32``, ``float64``, ``int32`` or ``int64``.
10994
        axes (list|tuple): The data type is ``int32`` . Axes that `starts` and `ends` apply to .
T
Thunderbrook 已提交
10995 10996
        starts (list|tuple|Tensor): The data type is ``int32`` . If ``starts`` is a list or tuple, the elements of
                it should be integers or Tensors with shape [1]. If ``starts`` is an Tensor, it should be an 1-D Tensor.
10997
                It represents starting indices of corresponding axis in ``axes``.
T
Thunderbrook 已提交
10998 10999
        ends (list|tuple|Tensor): The data type is ``int32`` . If ``ends`` is a list or tuple, the elements of
                it should be integers or Tensors with shape [1]. If ``ends`` is an Tensor, it should be an 1-D Tensor .
11000
                It represents ending indices of corresponding axis in ``axes``.
G
fix  
gongweibao 已提交
11001 11002

    Returns:
T
Thunderbrook 已提交
11003
        Tensor:  A ``Tensor``. The data type is same as ``input``.
11004 11005

    Raises:
T
Thunderbrook 已提交
11006 11007
        TypeError: The type of ``starts`` must be list, tuple or Tensor.
        TypeError: The type of ``ends`` must be list, tuple or Tensor.
G
fix  
gongweibao 已提交
11008

11009 11010 11011
    Examples:
        .. code-block:: python

T
Thunderbrook 已提交
11012
            import paddle
11013

T
Thunderbrook 已提交
11014
            input = paddle.rand(shape=[4, 5, 6], dtype='float32')
11015
            # example 1:
T
Thunderbrook 已提交
11016
            # attr starts is a list which doesn't contain tensor.
11017 11018 11019
            axes = [0, 1, 2]
            starts = [-3, 0, 2]
            ends = [3, 2, 4]
T
Thunderbrook 已提交
11020
            sliced_1 = paddle.slice(input, axes=axes, starts=starts, ends=ends)
11021
            # sliced_1 is input[0:3, 0:2, 2:4].
11022 11023

            # example 2:
T
Thunderbrook 已提交
11024 11025 11026
            # attr starts is a list which contain tensor.
            minus_3 = paddle.full([1], -3, "int32")
            sliced_2 = paddle.slice(input, axes=axes, starts=[minus_3, 0, 2], ends=ends)
11027
            # sliced_2 is input[0:3, 0:2, 2:4].
G
fix  
gongweibao 已提交
11028
    """
11029
    if in_dygraph_mode():
11030 11031 11032
        attrs = ()
        starts_tensor = None
        ends_tensor = None
11033 11034

        if isinstance(axes, (list, tuple)):
11035
            axes = list(axes)
11036 11037 11038 11039 11040 11041 11042 11043 11044 11045 11046 11047 11048 11049
            if len(axes) == 0:
                raise ValueError(
                    "Input axes should not be an empty list/tuple.")
            for i in range(len(axes)):
                if axes[i] < 0:
                    axes[i] = max(0, axes[i] + len(input.shape))
                else:
                    axes[i] = min(len(input.shape) - 1, axes[i])

        else:
            raise ValueError(
                "Input axes must be a python list or tuple, but reveived {}".
                format(type(axes)))

11050
        infer_flags = list(1 for i in range(len(axes)))
11051 11052

        if isinstance(starts, (list, tuple)):
11053
            starts = [
11054
                item.numpy().item(0) if isinstance(item, Variable) else item
11055 11056
                for item in starts
            ]
11057 11058 11059 11060 11061 11062 11063
            attrs += ('starts', starts)
        elif isinstance(starts, Variable):
            starts_tensor = starts
            starts.stop_gradient = True
            infer_flags = list(-1 for i in range(len(axes)))

        if isinstance(ends, (list, tuple)):
11064
            ends = [
11065
                item.numpy().item(0) if isinstance(item, Variable) else item
11066 11067
                for item in ends
            ]
11068 11069 11070 11071 11072 11073
            attrs += ('ends', ends)
        elif isinstance(ends, Variable):
            ends_tensor = ends
            ends_tensor.stop_gradient = True
            infer_flags = list(-1 for i in range(len(axes)))

W
wanghuancoder 已提交
11074 11075
        return _C_ops.slice(input, starts_tensor, ends_tensor, 'axes', axes,
                            'infer_flags', infer_flags, *attrs)
11076

11077 11078 11079 11080 11081 11082 11083
    if not isinstance(starts, (list, tuple, Variable)):
        raise ValueError(
            "Input starts must be an Variable, python list or tuple.")
    if not isinstance(ends, (list, tuple, Variable)):
        raise ValueError(
            "Input ends must be an Variable, python list or tuple.")

G
fix  
gongweibao 已提交
11084
    helper = LayerHelper('slice', **locals())
11085 11086 11087 11088 11089

    inputs = {'Input': input}
    attrs = {'axes': axes}
    infer_flags = list(1 for i in range(len(axes)))

11090 11091 11092 11093 11094 11095 11096
    # starts
    if isinstance(starts, Variable):
        starts.stop_gradient = True
        inputs['StartsTensor'] = starts
        infer_flags = list(-1 for i in range(len(axes)))
    elif isinstance(starts, (list, tuple)):
        attrs['starts'] = []
L
Leo Chen 已提交
11097
        if utils._contain_var(starts):
11098
            inputs['StartsTensorList'] = utils._convert_to_tensor_list(starts)
11099 11100 11101 11102 11103 11104
            for i, dim in enumerate(starts):
                if isinstance(dim, Variable):
                    attrs['starts'].append(-1)
                    infer_flags[i] = -1
                else:
                    attrs['starts'].append(dim)
L
Leo Chen 已提交
11105 11106
        else:
            attrs['starts'] = starts
11107 11108 11109 11110 11111 11112 11113 11114

    # ends
    if isinstance(ends, Variable):
        ends.stop_gradient = True
        inputs['EndsTensor'] = ends
        infer_flags = list(-1 for i in range(len(axes)))
    elif isinstance(ends, (list, tuple)):
        attrs['ends'] = []
L
Leo Chen 已提交
11115
        if utils._contain_var(ends):
11116
            inputs['EndsTensorList'] = utils._convert_to_tensor_list(ends)
11117 11118 11119 11120 11121 11122
            for i, dim in enumerate(ends):
                if isinstance(dim, Variable):
                    attrs['ends'].append(-1)
                    infer_flags[i] = -1
                else:
                    attrs['ends'].append(dim)
L
Leo Chen 已提交
11123 11124 11125
        else:
            attrs['ends'] = ends

11126 11127
    # infer_flags
    attrs['infer_flags'] = infer_flags
X
Xin Pan 已提交
11128 11129
    out = helper.create_variable_for_type_inference(
        dtype=helper.input_dtype('input'))
G
fix  
gongweibao 已提交
11130
    helper.append_op(
11131
        type='slice', inputs=inputs, attrs=attrs, outputs={'Out': out})
G
fix  
gongweibao 已提交
11132 11133 11134 11135

    return out


11136
@deprecated(since='2.0.0', update_to="paddle.strided_slice")
W
wangchaochaohu 已提交
11137 11138
def strided_slice(input, axes, starts, ends, strides):
    """
11139 11140 11141 11142
    :alias_main: paddle.strided_slice
	:alias: paddle.strided_slice,paddle.tensor.strided_slice,paddle.tensor.manipulation.strided_slice
	:old_api: paddle.fluid.layers.strided_slice

W
wangchaochaohu 已提交
11143 11144 11145 11146 11147 11148 11149 11150 11151 11152 11153 11154 11155
    This operator produces a slice of ``input`` along multiple axes. Similar to numpy:
    https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html
    Slice uses ``axes``, ``starts`` and ``ends`` attributes to specify the start and
    end dimension for each axis in the list of axes and Slice uses this information
    to slice the input data tensor. If a negative value is passed to
    ``starts`` or ``ends`` such as :math:`-i`,  it represents the reverse position of the
    axis :math:`i-1` th(here 0 is the initial position). The ``strides`` represents steps of
    slicing and if the ``strides`` is negative, slice operation is in the opposite direction.
    If the value passed to ``starts`` or ``ends`` is greater than n
    (the number of elements in this dimension), it represents n.
    For slicing to the end of a dimension with unknown size, it is recommended
    to pass in INT_MAX. The size of ``axes`` must be equal to ``starts`` , ``ends`` and ``strides``.
    Following examples will explain how strided_slice works:
W
wangchaochaohu 已提交
11156 11157 11158 11159 11160 11161 11162 11163 11164

    .. code-block:: text

        Case1:
            Given:
                data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
                axes = [0, 1]
                starts = [1, 0]
                ends = [2, 3]
W
wangchaochaohu 已提交
11165
                strides = [1, 1]
W
wangchaochaohu 已提交
11166
            Then:
11167
                result = [ [5, 6, 7], ]
11168

W
wangchaochaohu 已提交
11169 11170 11171 11172
        Case2:
            Given:
                data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
                axes = [0, 1]
11173
                starts = [0, 1]
W
wangchaochaohu 已提交
11174 11175 11176 11177
                ends = [2, 0]
                strides = [1, -1]
            Then:
                result = [ [8, 7, 6], ]
11178

W
wangchaochaohu 已提交
11179 11180 11181 11182
        Case3:
            Given:
                data = [ [1, 2, 3, 4], [5, 6, 7, 8], ]
                axes = [0, 1]
11183
                starts = [0, 1]
11184 11185
                ends = [-1, 1000]
                strides = [1, 3]
W
wangchaochaohu 已提交
11186
            Then:
11187 11188
                result = [ [2], ]
    Args:
11189
        input (Variable): An N-D ``Tensor`` or ``LoDTensor`` . The data type is ``bool``, ``float32``, ``float64``, ``int32`` or ``int64``.
W
wangchaochaohu 已提交
11190 11191 11192 11193 11194 11195 11196 11197 11198 11199 11200
        axes (list|tuple): The data type is ``int32`` . Axes that `starts` and `ends` apply to.
                            It's optional. If it is not provides, it will be treated as :math:`[0,1,...,len(starts)-1]`.
        starts (list|tuple|Variable): The data type is ``int32`` . If ``starts`` is a list or tuple, the elements of
                it should be integers or Tensors with shape [1]. If ``starts`` is an Variable, it should be an 1-D Tensor.
                It represents starting indices of corresponding axis in ``axes``.
        ends (list|tuple|Variable): The data type is ``int32`` . If ``ends`` is a list or tuple, the elements of
                it should be integers or Tensors with shape [1]. If ``ends`` is an Variable, it should be an 1-D Tensor .
                It represents ending indices of corresponding axis in ``axes``.
        strides (list|tuple|Variable): The data type is ``int32`` . If ``strides`` is a list or tuple, the elements of
                it should be integers or Tensors with shape [1]. If ``strides`` is an Variable, it should be an 1-D Tensor .
                It represents slice step of corresponding axis in ``axes``.
11201 11202

    Returns:
W
wangchaochaohu 已提交
11203 11204 11205 11206 11207 11208
        Variable:  A ``Tensor`` or ``LoDTensor`` with the same dimension as ``input``. The data type is same as ``input``.

    Raises:
        TypeError: The type of ``starts`` must be list, tuple or Variable.
        TypeError: The type of ``ends`` must be list, tuple or Variable.
        TypeError: The type of ``strides`` must be list, tuple or Variable.
11209

W
wangchaochaohu 已提交
11210 11211 11212 11213
    Examples:
        .. code-block:: python

            import paddle.fluid as fluid
11214
            import paddle
W
wangchaochaohu 已提交
11215

11216
            paddle.enable_static()
W
wangchaochaohu 已提交
11217
            input = fluid.data(
W
wangchaochaohu 已提交
11218 11219
                name="input", shape=[3, 4, 5, 6], dtype='float32')

11220 11221 11222 11223 11224
            # example 1:
            # attr starts is a list which doesn't contain tensor Variable.
            axes = [0, 1, 2]
            starts = [-3, 0, 2]
            ends = [3, 2, 4]
W
wangchaochaohu 已提交
11225 11226 11227 11228 11229
            strides_1 = [1, 1, 1]
            strides_2 = [1, 1, 2]
            sliced_1 = fluid.layers.strided_slice(input, axes=axes, starts=starts, ends=ends, strides=strides_1)
            # sliced_1 is input[:, 0:3:1, 0:2:1, 2:4:1].

11230 11231 11232 11233

            # example 2:
            # attr starts is a list which contain tensor Variable.
            minus_3 = fluid.layers.fill_constant([1], "int32", -3)
W
wangchaochaohu 已提交
11234 11235
            sliced_2 = fluid.layers.strided_slice(input, axes=axes, starts=[minus_3, 0, 2], ends=ends, strides=strides_2)
            # sliced_2 is input[:, 0:3:1, 0:2:1, 2:4:2].
W
wangchaochaohu 已提交
11236 11237 11238
    """
    helper = LayerHelper('strided_slice', **locals())

11239
    check_variable_and_dtype(input, 'input',
11240
                             ['bool', 'float32', 'float64', 'int32', 'int64'],
11241 11242 11243 11244 11245 11246 11247 11248 11249 11250 11251 11252 11253 11254 11255 11256 11257 11258 11259 11260 11261
                             'strided_slice')
    check_type(axes, 'axes', (list, tuple), 'strided_slice')
    check_type(starts, 'starts', (list, tuple, Variable), 'strided_slice')
    check_type(ends, 'ends', (list, tuple, Variable), 'strided_slice')
    check_type(strides, 'strides', (list, tuple, Variable), 'strided_slice')

    def check_list_elements_dtype(list_input, input_name):
        if isinstance(list_input, Variable):
            check_dtype(list_input.dtype, input_name, ['int32'],
                        'strided_slice')
        else:
            for i, var in enumerate(list_input):
                var_name = input_name + '[' + str(i) + ']'
                if isinstance(var, Variable):
                    check_dtype(var.dtype, var_name, ['int32'], 'strided_slice')

    check_list_elements_dtype(axes, 'axes')
    check_list_elements_dtype(starts, 'starts')
    check_list_elements_dtype(ends, 'ends')
    check_list_elements_dtype(strides, 'strides')

11262 11263 11264 11265 11266 11267 11268 11269 11270 11271 11272 11273 11274 11275 11276 11277 11278 11279 11280 11281
    def get_new_list_tensor(old_list):
        new_list_tensor = []
        for dim in old_list:
            if isinstance(dim, Variable):
                dim.stop_gradient = True
                new_list_tensor.append(dim)
            else:
                assert (isinstance(dim, int))
                temp_out = helper.create_variable_for_type_inference('int32')
                fill_constant([1], 'int32', dim, force_cpu=True, out=temp_out)
                new_list_tensor.append(temp_out)
        return new_list_tensor

    inputs = {'Input': input}
    attrs = {'axes': axes}
    infer_flags = list(1 for i in range(len(axes)))

    if in_dygraph_mode():
        inputs = {'Input': input}
        attrs = {
W
wangchaochaohu 已提交
11282 11283 11284
            'axes': axes,
            'starts': starts,
            'ends': ends,
11285 11286 11287 11288 11289 11290 11291 11292 11293 11294
            'strides': strides,
            'infer_flags': infer_flags
        }
    else:
        # starts
        if isinstance(starts, Variable):
            starts.stop_gradient = True
            inputs['StartsTensor'] = starts
        elif isinstance(starts, (list, tuple)):
            attrs['starts'] = []
L
Leo Chen 已提交
11295
            if utils._contain_var(starts):
11296 11297 11298 11299 11300 11301 11302
                inputs['StartsTensorList'] = get_new_list_tensor(starts)
                for i, dim in enumerate(starts):
                    if isinstance(dim, Variable):
                        attrs['starts'].append(-1)
                        infer_flags[i] = -1
                    else:
                        attrs['starts'].append(dim)
L
Leo Chen 已提交
11303 11304
            else:
                attrs['starts'] = starts
11305 11306 11307 11308 11309 11310 11311

        # ends
        if isinstance(ends, Variable):
            ends.stop_gradient = True
            inputs['EndsTensor'] = ends
        elif isinstance(ends, (list, tuple)):
            attrs['ends'] = []
L
Leo Chen 已提交
11312
            if utils._contain_var(ends):
11313 11314 11315 11316 11317 11318 11319
                inputs['EndsTensorList'] = get_new_list_tensor(ends)
                for i, dim in enumerate(ends):
                    if isinstance(dim, Variable):
                        attrs['ends'].append(-1)
                        infer_flags[i] = -1
                    else:
                        attrs['ends'].append(dim)
L
Leo Chen 已提交
11320 11321 11322
            else:
                attrs['ends'] = ends

11323 11324 11325 11326 11327 11328
        # strides
        if isinstance(strides, Variable):
            strides.stop_gradient = True
            inputs['StridesTensor'] = strides
        elif isinstance(strides, (list, tuple)):
            attrs['strides'] = []
L
Leo Chen 已提交
11329
            if utils._contain_var(strides):
11330 11331 11332 11333 11334 11335 11336
                inputs['StridesTensorList'] = get_new_list_tensor(strides)
                for i, dim in enumerate(strides):
                    if isinstance(dim, Variable):
                        attrs['strides'].append(-1)
                        infer_flags[i] = -1
                    else:
                        attrs['strides'].append(dim)
L
Leo Chen 已提交
11337 11338
            else:
                attrs['strides'] = strides
11339 11340 11341 11342 11343
        attrs['infer_flags'] = infer_flags
    out = helper.create_variable_for_type_inference(
        dtype=helper.input_dtype('input'))
    helper.append_op(
        type='strided_slice', inputs=inputs, attrs=attrs, outputs={'Out': out})
W
wangchaochaohu 已提交
11344 11345 11346 11347

    return out


G
fix  
gongweibao 已提交
11348 11349
def shape(input):
    """
11350 11351 11352 11353
    :alias_main: paddle.shape
	:alias: paddle.shape,paddle.tensor.shape,paddle.tensor.attribute.shape
	:old_api: paddle.fluid.layers.shape

C
chengduozh 已提交
11354 11355
    **Shape Layer**

C
fix doc  
chengduozh 已提交
11356
    Get the shape of the input.
G
fix  
gongweibao 已提交
11357

11358 11359 11360 11361 11362 11363 11364 11365 11366 11367 11368 11369 11370 11371 11372 11373 11374
    .. code-block:: text

        Case1:
            Given N-D Tensor:
                input = [ [1, 2, 3, 4], [5, 6, 7, 8] ]

            Then:
                input.shape = [2, 4]

        Case2:
            Given SelectedRows:
                input.rows = [0, 4, 19]
                input.height = 20
                input.value = [ [1, 2], [3, 4], [5, 6] ]  # inner tensor
            Then:
                input.shape = [3, 2]

G
fix  
gongweibao 已提交
11375
    Args:
11376
        input (Variable): The input can be N-D Tensor or SelectedRows with data type bool, float16, float32, float64, int32, int64.
11377
                          If input variable is type of SelectedRows, returns the shape of it's inner tensor.
G
fix  
gongweibao 已提交
11378 11379

    Returns:
11380
        Variable (Tensor): The shape of the input variable.
G
fix  
gongweibao 已提交
11381

11382 11383 11384
    Examples:
        .. code-block:: python

11385
            import paddle.fluid as fluid
11386
            import numpy as np
W
Wilber 已提交
11387 11388
            import paddle
            paddle.enable_static()
11389

11390
            inputs = fluid.data(name="x", shape=[3, 100, 100], dtype="float32")
11391 11392 11393 11394 11395 11396 11397 11398 11399
            output = fluid.layers.shape(inputs)

            exe = fluid.Executor(fluid.CPUPlace())
            exe.run(fluid.default_startup_program())

            img = np.ones((3, 100, 100)).astype(np.float32)

            res = exe.run(fluid.default_main_program(), feed={'x':img}, fetch_list=[output])
            print(res) # [array([  3, 100, 100], dtype=int32)]
G
fix  
gongweibao 已提交
11400
    """
W
Wilber 已提交
11401 11402 11403 11404 11405
    if in_dygraph_mode():
        out = _C_ops.shape(input)
        out.stop_gradient = True
        return out

11406 11407 11408 11409
    check_variable_and_dtype(input, 'input', [
        'bool', 'float16', 'float32', 'float64', 'int32', 'int64', 'complex64',
        'complex128'
    ], 'shape')
G
fix  
gongweibao 已提交
11410
    helper = LayerHelper('shape', **locals())
11411
    out = helper.create_variable_for_type_inference(dtype='int32')
G
fix  
gongweibao 已提交
11412
    helper.append_op(
W
Wilber 已提交
11413 11414 11415 11416
        type='shape',
        inputs={'Input': input},
        outputs={'Out': out},
        stop_gradient=True)
G
fix  
gongweibao 已提交
11417 11418

    return out
G
merge  
gongweibao 已提交
11419 11420


Z
zhoukunsheng 已提交
11421 11422
def rank(input):
    """
11423

11424
    The OP returns the number of dimensions for a tensor, which is a 0-D int32 Tensor.
Z
zhoukunsheng 已提交
11425 11426

    Args:
11427
        input (Tensor): The input N-D tensor with shape of :math:`[N_1, N_2, ..., N_k]`, the data type is arbitrary.
Z
zhoukunsheng 已提交
11428 11429

    Returns:
11430
        Tensor, the output data type is int32.: The 0-D tensor with the dimensions of the input Tensor.
Z
zhoukunsheng 已提交
11431 11432 11433 11434

    Examples:
        .. code-block:: python

11435
            import paddle
11436

11437 11438 11439 11440
            input = paddle.rand((3, 100, 100))
            rank = paddle.rank(input)
            print(rank)
            # 3
Z
zhoukunsheng 已提交
11441
    """
11442
    check_type(input, 'input', (Variable), 'input')
Z
zhoukunsheng 已提交
11443 11444 11445 11446 11447 11448
    ndims = len(input.shape)
    out = assign(np.array(ndims, 'int32'))

    return out


11449
@deprecated(since="2.0.0", update_to="paddle.numel")
Z
zhoukunsheng 已提交
11450 11451 11452 11453 11454 11455 11456
def size(input):
    """
    **Size Layer**

    Returns the number of elements for a tensor, which is a int64 Tensor with shape [1].

    Args:
11457
        input (Tensor): The input Tensor, it's data type can be bool, float16, float32, float64, int32, int64.
Z
zhoukunsheng 已提交
11458 11459

    Returns:
11460
        Tensor: The number of elements for the input Tensor.
Z
zhoukunsheng 已提交
11461

11462 11463 11464
    Raises:
        TypeError: ``input`` must be a Tensor and the data type of ``input`` must be one of bool, float16, float32, float64, int32, int64.
    
Z
zhoukunsheng 已提交
11465 11466 11467
    Examples:
        .. code-block:: python

11468
            import paddle
Z
zhoukunsheng 已提交
11469
            import paddle.fluid.layers as layers
11470
            paddle.enable_static()
Z
zhoukunsheng 已提交
11471 11472 11473 11474 11475 11476

            input = layers.data(
                name="input", shape=[3, 100], dtype="float32", append_batch_size=False)
            rank = layers.size(input) # 300
    """

11477
    if in_dygraph_mode():
W
wanghuancoder 已提交
11478
        return _C_ops.size(input)
11479
    check_variable_and_dtype(
11480 11481
        input, 'input',
        ['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], "size")
Z
zhoukunsheng 已提交
11482 11483 11484 11485 11486 11487 11488
    helper = LayerHelper('size', **locals())
    out = helper.create_variable_for_type_inference(dtype='int64')
    helper.append_op(type='size', inputs={'Input': input}, outputs={'Out': out})

    return out


S
sneaxiy 已提交
11489 11490 11491 11492
def _elementwise_op(helper):
    op_type = helper.layer_type
    x = helper.kwargs.get('x', None)
    y = helper.kwargs.get('y', None)
X
Xin Pan 已提交
11493

S
sneaxiy 已提交
11494 11495
    assert x is not None, 'x cannot be None in {}'.format(op_type)
    assert y is not None, 'y cannot be None in {}'.format(op_type)
11496
    check_variable_and_dtype(
11497 11498
        x, 'x', ['float16', 'uint16', 'float32', 'float64', 'int32', 'int64'],
        op_type)
11499
    check_variable_and_dtype(
11500 11501
        y, 'y', ['float16', 'uint16', 'float32', 'float64', 'int32', 'int64'],
        op_type)
11502

S
sneaxiy 已提交
11503 11504
    axis = helper.kwargs.get('axis', -1)
    use_mkldnn = helper.kwargs.get('use_mkldnn', False)
S
sneaxiy 已提交
11505
    name = helper.kwargs.get('name', None)
11506
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
S
sneaxiy 已提交
11507

S
sneaxiy 已提交
11508 11509 11510 11511 11512 11513 11514 11515 11516 11517
    helper.append_op(
        type=op_type,
        inputs={'X': x,
                'Y': y},
        outputs={'Out': out},
        attrs={'axis': axis,
               'use_mkldnn': use_mkldnn})
    return helper.append_activation(out)


S
sneaxiy 已提交
11518
def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None):
S
sneaxiy 已提交
11519
    """
11520 11521 11522 11523 11524 11525 11526 11527 11528 11529 11530 11531 11532
    Scale operator.

    Putting scale and bias to the input Tensor as following:

    ``bias_after_scale`` is True:

    .. math::
                            Out=scale*X+bias

    ``bias_after_scale`` is False:

    .. math::
                            Out=scale*(X+bias)
S
sneaxiy 已提交
11533 11534

    Args:
S
Steffy-zxf 已提交
11535 11536
        x(Tensor): Input N-D Tensor of scale operator. Data type can be float32, float64, int8, int16, int32, int64, uint8.
        scale(float|Tensor): The scale factor of the input, it should be a float number or a Tensor with shape [1] and data type as float32.
11537 11538 11539
        bias(float): The bias to be put on the input.
        bias_after_scale(bool): Apply bias addition after or before scaling. It is useful for numeric stability in some circumstances.
        act(str, optional): Activation applied to the output such as tanh, softmax, sigmoid, relu.
11540
        name(str, optional): The default value is None. Normally there is no need for user to set this property.  For more information, please refer to :ref:`api_guide_Name`
S
sneaxiy 已提交
11541 11542

    Returns:
S
Steffy-zxf 已提交
11543
        Tensor: Output tensor of scale operator, with shape and data type same as input.
11544 11545 11546

    Examples:
        .. code-block:: python
S
Steffy-zxf 已提交
11547 11548 11549
            
            # scale as a float32 number
            import paddle
11550

S
Steffy-zxf 已提交
11551 11552
            data = paddle.randn(shape=[2,3], dtype='float32')
            res = paddle.scale(data, scale=2.0, bias=1.0)
11553 11554 11555

        .. code-block:: python

S
Steffy-zxf 已提交
11556 11557
            # scale with parameter scale as a Tensor
            import paddle
11558

S
Steffy-zxf 已提交
11559 11560 11561
            data = paddle.randn(shape=[2, 3], dtype='float32')
            factor = paddle.to_tensor([2], dtype='float32')
            res = paddle.scale(data, scale=factor, bias=1.0)
11562

S
sneaxiy 已提交
11563
    """
11564 11565 11566

    if in_dygraph_mode():
        _scale = scale.numpy().item(0) if isinstance(scale, Variable) else scale
W
wanghuancoder 已提交
11567 11568 11569
        out = _C_ops.scale(x, 'scale',
                           float(_scale), 'bias',
                           float(bias), 'bias_after_scale', bias_after_scale)
11570 11571
        return dygraph_utils._append_activation_in_dygraph(out)

11572
    check_variable_and_dtype(x, "x", [
11573 11574
        'float16', 'uint16', 'float32', 'float64', 'int8', 'int16', 'int32',
        'int64', 'uint8'
11575
    ], "scale")
11576
    inputs = {'X': [x]}
11577 11578 11579 11580 11581
    attrs = {
        'bias': float(bias),
        'bias_after_scale': bias_after_scale,
    }
    if isinstance(scale, Variable):
11582
        inputs['ScaleTensor'] = [scale]
11583 11584
    else:
        attrs['scale'] = float(scale)
11585
    helper = LayerHelper('scale', **locals())
11586
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
11587

S
sneaxiy 已提交
11588
    helper.append_op(
11589
        type='scale', inputs=inputs, outputs={'Out': out}, attrs=attrs)
S
sneaxiy 已提交
11590
    return helper.append_activation(out)
S
sneaxiy 已提交
11591 11592


X
Xin Pan 已提交
11593
def elementwise_add(x, y, axis=-1, act=None, name=None):
11594
    """
11595

11596 11597 11598 11599 11600 11601
Examples:

    .. code-block:: python

        import paddle.fluid as fluid
        import numpy as np
11602
        import paddle
11603 11604
        def gen_data():
            return {
11605 11606
                "x": np.array([2, 3, 4]).astype('float32'),
                "y": np.array([1, 5, 2]).astype('float32')
11607
            }
11608
        paddle.enable_static()
11609 11610
        x = fluid.data(name="x", shape=[3], dtype='float32')
        y = fluid.data(name="y", shape=[3], dtype='float32')
11611
        z = fluid.layers.elementwise_add(x, y)
11612
        # z = x + y
11613 11614 11615 11616 11617 11618

        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
        z_value = exe.run(feed=gen_data(),
                            fetch_list=[z.name])

11619
        print(z_value) # [3., 8., 6.]
11620 11621 11622 11623 11624 11625


    .. code-block:: python

        import paddle.fluid as fluid
        import numpy as np
11626
        import paddle
11627 11628 11629 11630 11631 11632

        def gen_data():
            return {
                "x": np.ones((2, 3, 4, 5)).astype('float32'),
                "y": np.zeros((3, 4)).astype('float32')
            }
11633
        paddle.enable_static()
11634 11635
        x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
        y = fluid.data(name="y", shape=[3,4], dtype='float32')
11636
        z = fluid.layers.elementwise_add(x, y, axis=1)
11637
        # z = x + y
11638 11639 11640 11641 11642 11643 11644 11645 11646 11647 11648 11649 11650 11651

        place = fluid.CPUPlace()
        exe = fluid.Executor(place)

        z_value = exe.run(feed=gen_data(),
                            fetch_list=[z.name])

        print(z_value) # z.shape=[2,3,4,5]


    ..  code-block:: python

        import paddle.fluid as fluid
        import numpy as np
11652
        import paddle
11653 11654 11655 11656 11657 11658

        def gen_data():
            return {
                "x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'),
                "y": np.random.randint(1, 5, size=[5]).astype('float32')
            }
11659
        paddle.enable_static()
11660 11661
        x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
        y = fluid.data(name="y", shape=[5], dtype='float32')
11662
        z = fluid.layers.elementwise_add(x, y, axis=3)
11663
        # z = x + y
11664 11665 11666 11667 11668 11669 11670 11671 11672

        place = fluid.CPUPlace()
        exe = fluid.Executor(place)

        z_value = exe.run(feed=gen_data(),
                            fetch_list=[z.name])
        print(z_value) # z.shape=[2,3,4,5]

    """
11673 11674
    if in_dygraph_mode():
        return _elementwise_op_in_dygraph(
11675 11676 11677 11678 11679
            x,
            y,
            axis=axis,
            act=act,
            op_name='elementwise_add',
11680
            use_mkldnn=_global_flags()["FLAGS_use_mkldnn"])
11681

S
sneaxiy 已提交
11682 11683 11684
    return _elementwise_op(LayerHelper('elementwise_add', **locals()))


11685
@deprecated(since="2.0.0", update_to="paddle.divide")
X
Xin Pan 已提交
11686
def elementwise_div(x, y, axis=-1, act=None, name=None):
11687
    """
11688

11689 11690 11691 11692 11693 11694
Examples:

    .. code-block:: python

        import paddle.fluid as fluid
        import numpy as np
11695
        import paddle
11696 11697 11698

        def gen_data():
            return {
11699 11700
                "x": np.array([2, 3, 4]).astype('float32'),
                "y": np.array([1, 5, 2]).astype('float32')
11701
            }
11702
        paddle.enable_static()
11703 11704
        x = fluid.data(name="x", shape=[3], dtype='float32')
        y = fluid.data(name="y", shape=[3], dtype='float32')
11705
        z = fluid.layers.elementwise_div(x, y)
11706
        # z = x / y
11707 11708 11709 11710 11711 11712

        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
        z_value = exe.run(feed=gen_data(),
                            fetch_list=[z.name])

11713
        print(z_value) # [2., 0.6, 2.]
11714 11715 11716 11717 11718 11719


    .. code-block:: python

        import paddle.fluid as fluid
        import numpy as np
11720
        import paddle
11721 11722 11723 11724 11725 11726

        def gen_data():
            return {
                "x": np.ones((2, 3, 4, 5)).astype('float32'),
                "y": np.zeros((3, 4)).astype('float32')
            }
11727
        paddle.enable_static()
11728 11729
        x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
        y = fluid.data(name="y", shape=[3,4], dtype='float32')
11730
        z = fluid.layers.elementwise_div(x, y, axis=1)
11731
        # z = x / y
11732 11733 11734 11735 11736 11737 11738 11739 11740 11741 11742 11743 11744 11745

        place = fluid.CPUPlace()
        exe = fluid.Executor(place)

        z_value = exe.run(feed=gen_data(),
                            fetch_list=[z.name])

        print(z_value) # z.shape=[2,3,4,5]


    ..  code-block:: python

        import paddle.fluid as fluid
        import numpy as np
11746
        import paddle
11747 11748 11749 11750 11751 11752

        def gen_data():
            return {
                "x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'),
                "y": np.random.randint(1, 5, size=[5]).astype('float32')
            }
11753
        paddle.enable_static()
11754 11755
        x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
        y = fluid.data(name="y", shape=[5], dtype='float32')
11756
        z = fluid.layers.elementwise_div(x, y, axis=3)
11757
        # z = x / y
11758 11759 11760

        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
11761

11762 11763 11764 11765 11766
        z_value = exe.run(feed=gen_data(),
                            fetch_list=[z.name])
        print(z_value) # z.shape=[2,3,4,5]

    """
11767 11768 11769 11770
    if in_dygraph_mode():
        return _elementwise_op_in_dygraph(
            x, y, axis=axis, act=act, op_name='elementwise_div')

S
sneaxiy 已提交
11771 11772 11773
    return _elementwise_op(LayerHelper('elementwise_div', **locals()))


X
Xin Pan 已提交
11774
def elementwise_sub(x, y, axis=-1, act=None, name=None):
11775
    """
11776

11777 11778 11779 11780 11781 11782
Examples:

    .. code-block:: python

        import paddle.fluid as fluid
        import numpy as np
11783
        import paddle
11784 11785 11786

        def gen_data():
            return {
11787 11788
                "x": np.array([2, 3, 4]).astype('float32'),
                "y": np.array([1, 5, 2]).astype('float32')
11789
            }
11790
        paddle.enable_static()
11791 11792
        x = fluid.data(name="x", shape=[3], dtype='float32')
        y = fluid.data(name="y", shape=[3], dtype='float32')
11793
        z = fluid.layers.elementwise_sub(x, y)
11794
        # z = x - y
11795 11796 11797 11798 11799 11800

        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
        z_value = exe.run(feed=gen_data(),
                            fetch_list=[z.name])

11801
        print(z_value) # [1., -2., 2.]
11802 11803 11804 11805 11806 11807


    .. code-block:: python

        import paddle.fluid as fluid
        import numpy as np
11808
        import paddle
11809 11810 11811 11812 11813 11814

        def gen_data():
            return {
                "x": np.ones((2, 3, 4, 5)).astype('float32'),
                "y": np.zeros((3, 4)).astype('float32')
            }
11815
        paddle.enable_static()
11816 11817
        x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
        y = fluid.data(name="y", shape=[3,4], dtype='float32')
11818
        z = fluid.layers.elementwise_sub(x, y, axis=1)
11819
        # z = x - y
11820 11821 11822 11823 11824 11825 11826 11827 11828 11829 11830 11831 11832 11833

        place = fluid.CPUPlace()
        exe = fluid.Executor(place)

        z_value = exe.run(feed=gen_data(),
                            fetch_list=[z.name])

        print(z_value) # z.shape=[2,3,4,5]


    ..  code-block:: python

        import paddle.fluid as fluid
        import numpy as np
11834
        import paddle
11835 11836 11837 11838 11839 11840

        def gen_data():
            return {
                "x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'),
                "y": np.random.randint(1, 5, size=[5]).astype('float32')
            }
11841
        paddle.enable_static()
11842 11843
        x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
        y = fluid.data(name="y", shape=[5], dtype='float32')
11844
        z = fluid.layers.elementwise_sub(x, y, axis=3)
11845
        # z = x - y
11846 11847 11848

        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
11849

11850 11851 11852 11853 11854
        z_value = exe.run(feed=gen_data(),
                            fetch_list=[z.name])
        print(z_value) # z.shape=[2,3,4,5]

    """
11855 11856 11857 11858
    if in_dygraph_mode():
        return _elementwise_op_in_dygraph(
            x, y, axis=axis, act=act, op_name='elementwise_sub')

S
sneaxiy 已提交
11859 11860 11861
    return _elementwise_op(LayerHelper('elementwise_sub', **locals()))


11862
@deprecated(since="2.0.0", update_to="paddle.multiply")
X
Xin Pan 已提交
11863
def elementwise_mul(x, y, axis=-1, act=None, name=None):
11864
    """
11865

11866 11867 11868 11869 11870 11871
Examples:

    .. code-block:: python

        import paddle.fluid as fluid
        import numpy as np
11872
        import paddle
11873 11874 11875

        def gen_data():
            return {
11876 11877
                "x": np.array([2, 3, 4]).astype('float32'),
                "y": np.array([1, 5, 2]).astype('float32')
11878
            }
11879
        paddle.enable_static()
11880 11881
        x = fluid.data(name="x", shape=[3], dtype='float32')
        y = fluid.data(name="y", shape=[3], dtype='float32')
11882
        z = fluid.layers.elementwise_mul(x, y)
11883
        # z = x * y
11884 11885 11886 11887 11888 11889

        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
        z_value = exe.run(feed=gen_data(),
                            fetch_list=[z.name])

11890
        print(z_value) # [2., 15., 8.]
11891 11892 11893 11894 11895 11896


    .. code-block:: python

        import paddle.fluid as fluid
        import numpy as np
11897
        import paddle
11898 11899 11900 11901 11902 11903

        def gen_data():
            return {
                "x": np.ones((2, 3, 4, 5)).astype('float32'),
                "y": np.zeros((3, 4)).astype('float32')
            }
11904
        paddle.enable_static()
11905 11906
        x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
        y = fluid.data(name="y", shape=[3,4], dtype='float32')
11907
        z = fluid.layers.elementwise_mul(x, y, axis=1)
11908
        # z = x * y
11909 11910 11911 11912 11913 11914 11915 11916 11917 11918 11919 11920 11921 11922

        place = fluid.CPUPlace()
        exe = fluid.Executor(place)

        z_value = exe.run(feed=gen_data(),
                            fetch_list=[z.name])

        print(z_value) # z.shape=[2,3,4,5]


    ..  code-block:: python

        import paddle.fluid as fluid
        import numpy as np
11923
        import paddle
11924 11925 11926 11927 11928 11929

        def gen_data():
            return {
                "x": np.random.randint(1, 5, size=[2, 3, 4, 5]).astype('float32'),
                "y": np.random.randint(1, 5, size=[5]).astype('float32')
            }
11930
        paddle.enable_static()
11931 11932
        x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
        y = fluid.data(name="y", shape=[5], dtype='float32')
11933
        z = fluid.layers.elementwise_mul(x, y, axis=3)
11934
        # z = x * y
11935 11936 11937

        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
11938

11939 11940 11941
        z_value = exe.run(feed=gen_data(),
                            fetch_list=[z.name])
        print(z_value) # z.shape=[2,3,4,5]
11942

11943
    """
11944 11945 11946 11947
    if in_dygraph_mode():
        return _elementwise_op_in_dygraph(
            x, y, axis=axis, act=act, op_name='elementwise_mul')

S
sneaxiy 已提交
11948 11949 11950
    return _elementwise_op(LayerHelper('elementwise_mul', **locals()))


X
Xin Pan 已提交
11951
def elementwise_max(x, y, axis=-1, act=None, name=None):
11952
    """
11953 11954 11955 11956
    :alias_main: paddle.elementwise_max
	:alias: paddle.elementwise_max,paddle.tensor.elementwise_max,paddle.tensor.math.elementwise_max
	:old_api: paddle.fluid.layers.elementwise_max

11957 11958 11959 11960 11961 11962
Examples:

    .. code-block:: python

        import paddle.fluid as fluid
        import numpy as np
11963
        import paddle
11964 11965 11966

        def gen_data():
            return {
11967 11968
                "x": np.array([2, 3, 4]).astype('float32'),
                "y": np.array([1, 5, 2]).astype('float32')
11969
            }
11970
        paddle.enable_static()
11971 11972
        x = fluid.data(name="x", shape=[3], dtype='float32')
        y = fluid.data(name="y", shape=[3], dtype='float32')
11973 11974 11975 11976 11977 11978 11979 11980 11981 11982 11983 11984 11985 11986
        z = fluid.layers.elementwise_max(x, y)

        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
        z_value = exe.run(feed=gen_data(),
                            fetch_list=[z.name])

        print(z_value) #[2, 5, 4]


    .. code-block:: python

        import paddle.fluid as fluid
        import numpy as np
11987
        import paddle
11988 11989 11990 11991 11992 11993

        def gen_data():
            return {
                "x": np.ones((2, 3, 4, 5)).astype('float32'),
                "y": np.zeros((3, 4)).astype('float32')
            }
11994
        paddle.enable_static()
11995 11996
        x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
        y = fluid.data(name="y", shape=[3,4], dtype='float32')
11997 11998 11999 12000 12001 12002 12003 12004 12005 12006 12007
        z = fluid.layers.elementwise_max(x, y, axis=1)

        place = fluid.CPUPlace()
        exe = fluid.Executor(place)

        z_value = exe.run(feed=gen_data(),
                            fetch_list=[z.name])

        print(z_value)#[[[[1., 1., 1., 1., 1.] .... [1., 1., 1., 1., 1.]]]]

    """
12008 12009 12010 12011
    if in_dygraph_mode():
        return _elementwise_op_in_dygraph(
            x, y, axis=axis, act=act, op_name='elementwise_max')

S
sneaxiy 已提交
12012 12013 12014
    return _elementwise_op(LayerHelper('elementwise_max', **locals()))


X
Xin Pan 已提交
12015
def elementwise_min(x, y, axis=-1, act=None, name=None):
12016
    """
12017 12018 12019 12020
    :alias_main: paddle.elementwise_min
	:alias: paddle.elementwise_min,paddle.tensor.elementwise_min,paddle.tensor.math.elementwise_min
	:old_api: paddle.fluid.layers.elementwise_min

12021 12022 12023 12024 12025 12026
Examples:

    ..  code-block:: python

        import paddle.fluid as fluid
        import numpy as np
12027
        import paddle
12028 12029 12030

        def gen_data():
            return {
12031 12032
                "x": np.array([2, 3, 4]).astype('float32'),
                "y": np.array([1, 5, 2]).astype('float32')
12033
            }
12034
        paddle.enable_static()
12035 12036
        x = fluid.data(name="x", shape=[3], dtype='float32')
        y = fluid.data(name="y", shape=[3], dtype='float32')
12037
        z = fluid.layers.elementwise_min(x, y)
12038 12039 12040 12041 12042 12043 12044 12045 12046 12047 12048 12049

        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
        z_value = exe.run(feed=gen_data(),
                            fetch_list=[z.name])

        print(z_value) #[1, 3, 2]

    ..  code-block:: python

        import paddle.fluid as fluid
        import numpy as np
12050
        import paddle
12051 12052 12053 12054 12055 12056

        def gen_data():
            return {
                "x": np.ones((2, 3, 4, 5)).astype('float32'),
                "y": np.zeros((3, 4)).astype('float32')
            }
12057
        paddle.enable_static()
12058 12059
        x = fluid.data(name="x", shape=[2,3,4,5], dtype='float32')
        y = fluid.data(name="y", shape=[3,4], dtype='float32')
12060
        z = fluid.layers.elementwise_min(x, y, axis=1)
12061 12062 12063 12064 12065 12066 12067 12068 12069

        place = fluid.CPUPlace()
        exe = fluid.Executor(place)

        z_value = exe.run(feed=gen_data(),
                            fetch_list=[z.name])

        print(z_value)#[[[[0., 0., 0., 0., 0.] .... [0., 0., 0., 0., 0.]]]]
    """
12070 12071 12072
    if in_dygraph_mode():
        return _elementwise_op_in_dygraph(
            x, y, axis=axis, act=act, op_name='elementwise_min')
12073

S
sneaxiy 已提交
12074 12075 12076
    return _elementwise_op(LayerHelper('elementwise_min', **locals()))


X
Xin Pan 已提交
12077
def elementwise_pow(x, y, axis=-1, act=None, name=None):
12078
    """
12079

12080 12081 12082 12083 12084 12085
Examples:

    ..  code-block:: python

        import paddle.fluid as fluid
        import numpy as np
12086
        import paddle
12087 12088 12089

        def gen_data():
            return {
12090 12091
                "x": np.array([2, 3, 4]).astype('float32'),
                "y": np.array([1, 5, 2]).astype('float32')
12092
            }
12093
        paddle.enable_static()
12094 12095
        x = fluid.data(name="x", shape=[3], dtype='float32')
        y = fluid.data(name="y", shape=[3], dtype='float32')
12096 12097 12098 12099 12100 12101 12102 12103 12104
        z = fluid.layers.elementwise_pow(x, y)

        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
        z_value = exe.run(feed=gen_data(),
                            fetch_list=[z.name])

        print(z_value) #[2, 243, 16]
    """
12105 12106 12107
    if in_dygraph_mode():
        return _elementwise_op_in_dygraph(
            x, y, axis=axis, act=act, op_name='elementwise_pow')
S
sneaxiy 已提交
12108 12109 12110
    return _elementwise_op(LayerHelper('elementwise_pow', **locals()))


12111
@deprecated(since="2.0.0", update_to="paddle.remainder")
12112
def elementwise_mod(x, y, axis=-1, act=None, name=None):
12113
    """
12114

12115 12116 12117 12118 12119 12120
Examples:

    ..  code-block:: python

        import paddle.fluid as fluid
        import numpy as np
12121
        import paddle
12122 12123 12124 12125 12126 12127

        def gen_data():
            return {
                "x": np.array([10, 15, 8]).astype('int32'),
                "y": np.array([3, 6, 5]).astype('int32')
            }
12128
        paddle.enable_static()
12129 12130 12131 12132 12133 12134 12135 12136 12137 12138 12139
        x = fluid.data(name="x", shape=[3], dtype='int32')
        y = fluid.data(name="y", shape=[3], dtype='int32')
        z = fluid.layers.elementwise_mod(x, y)

        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
        z_value = exe.run(feed=gen_data(),
                            fetch_list=[z.name])

        print(z_value) #[1, 3, 3]
    """
12140 12141 12142 12143
    if in_dygraph_mode():
        return _elementwise_op_in_dygraph(
            x, y, axis=axis, act=act, op_name='elementwise_mod')

12144 12145 12146
    return _elementwise_op(LayerHelper('elementwise_mod', **locals()))


12147
@deprecated(since="2.0.0", update_to="paddle.floor_divide")
12148
def elementwise_floordiv(x, y, axis=-1, act=None, name=None):
12149
    """
12150

12151 12152 12153 12154 12155 12156
Examples:

    ..  code-block:: python

        import paddle.fluid as fluid
        import numpy as np
12157
        import paddle
12158 12159 12160 12161 12162 12163

        def gen_data():
            return {
                "x": np.array([10, 15, 8]).astype('int32'),
                "y": np.array([3, 7, 5]).astype('int32')
            }
12164
        paddle.enable_static()
12165 12166 12167 12168 12169 12170 12171 12172 12173 12174 12175
        x = fluid.data(name="x", shape=[3], dtype='int32')
        y = fluid.data(name="y", shape=[3], dtype='int32')
        z = fluid.layers.elementwise_floordiv(x, y)

        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
        z_value = exe.run(feed=gen_data(),
                            fetch_list=[z.name])

        print(z_value) #[3, 2, 1]
    """
12176 12177 12178 12179
    if in_dygraph_mode():
        return _elementwise_op_in_dygraph(
            x, y, axis=axis, act=act, op_name='elementwise_floordiv')

12180 12181 12182
    return _elementwise_op(LayerHelper('elementwise_floordiv', **locals()))


S
sneaxiy 已提交
12183
for func in [
12184 12185 12186 12187
        elementwise_add,
        elementwise_div,
        elementwise_sub,
        elementwise_mul,
12188 12189
        elementwise_max,
        elementwise_pow,
12190
        elementwise_min,
12191 12192
        elementwise_mod,
        elementwise_floordiv,
12193 12194
]:
    op_proto = OpProtoHolder.instance().get_op_proto(func.__name__)
12195 12196

    # insert the c++ doc string on top of python doc string
12197 12198 12199 12200 12201 12202 12203 12204 12205 12206 12207 12208
    func.__doc__ = _generate_doc_string_(
        op_proto,
        additional_args_lines=[
            "axis (int32, optional): If X.dimension != Y.dimension, \
            Y.dimension must be a subsequence of x.dimension. \
            And axis is the start dimension index for broadcasting Y onto X. ",
            "act (string, optional): Activation applied to the output. \
            Default is None. Details: :ref:`api_guide_activations_en` ",
            "name (string, optional): Name of the output. \
            Default is None. It's used to print debug info for developers. Details: \
            :ref:`api_guide_Name` "
        ],
12209 12210
        skip_attrs_set={
            "x_data_format", "y_data_format", "axis", "use_quantizer",
12211
            "mkldnn_data_type", "Scale_x", "Scale_y", "Scale_out"
12212
        }) + """\n""" + str(func.__doc__)
12213

12214 12215 12216 12217 12218 12219 12220 12221 12222 12223
    doc_list = func.__doc__.splitlines()

    for idx, val in enumerate(doc_list):
        if val.startswith("Warning: ") and val.endswith(
                " instead."
        ) and "and will be removed in future versions." in val:
            doc_list.insert(0, doc_list.pop(idx))
            func.__doc__ = "\n" + "\n".join(i for i in doc_list)
            break

12224
for func in []:
S
sneaxiy 已提交
12225 12226 12227 12228
    op_proto = OpProtoHolder.instance().get_op_proto(func.__name__)
    func.__doc__ = _generate_doc_string_(
        op_proto,
        additional_args_lines=[
S
sneaxiy 已提交
12229 12230
            "act (basestring|None): Activation applied to the output.",
            "name (basestring|None): Name of the output."
S
sneaxiy 已提交
12231
        ])
12232 12233 12234 12235
    func.__doc__ = func.__doc__ + """

Examples:
  .. code-block:: python
12236

12237 12238 12239 12240 12241 12242 12243 12244 12245 12246 12247 12248 12249 12250 12251 12252 12253 12254 12255 12256 12257 12258 12259 12260 12261 12262 12263 12264 12265 12266 12267 12268
    import paddle.fluid as fluid
    # example 1: shape(x) = (2, 3, 4, 5), shape(y) = (2, 3, 4, 5)
    x0 = fluid.layers.data(name="x0", shape=[2, 3, 4, 5], dtype='float32')
    y0 = fluid.layers.data(name="y0", shape=[2, 3, 4, 5], dtype='float32')
    z0 = fluid.layers.%s(x0, y0)

    # example 2: shape(X) = (2, 3, 4, 5), shape(Y) = (5)
    x1 = fluid.layers.data(name="x1", shape=[2, 3, 4, 5], dtype='float32')
    y1 = fluid.layers.data(name="y1", shape=[5], dtype='float32')
    z1 = fluid.layers.%s(x1, y1)

    # example 3: shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5), with axis=-1(default) or axis=2
    x2 = fluid.layers.data(name="x2", shape=[2, 3, 4, 5], dtype='float32')
    y2 = fluid.layers.data(name="y2", shape=[4, 5], dtype='float32')
    z2 = fluid.layers.%s(x2, y2, axis=2)

    # example 4: shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1
    x3 = fluid.layers.data(name="x3", shape=[2, 3, 4, 5], dtype='float32')
    y3 = fluid.layers.data(name="y3", shape=[3, 4], dtype='float32')
    z3 = fluid.layers.%s(x3, y3, axis=1)

    # example 5: shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0
    x4 = fluid.layers.data(name="x4", shape=[2, 3, 4, 5], dtype='float32')
    y4 = fluid.layers.data(name="y4", shape=[2], dtype='float32')
    z4 = fluid.layers.%s(x4, y4, axis=0)

    # example 6: shape(X) = (2, 3, 4, 5), shape(Y) = (2, 1), with axis=0
    x5 = fluid.layers.data(name="x5", shape=[2, 3, 4, 5], dtype='float32')
    y5 = fluid.layers.data(name="y5", shape=[2], dtype='float32')
    z5 = fluid.layers.%s(x5, y5, axis=0)
    """ % (func.__name__, func.__name__, func.__name__, func.__name__,
           func.__name__, func.__name__)
M
minqiyang 已提交
12269 12270


12271
def _logical_op(op_name, x, y, out=None, name=None, binary_op=True):
12272
    if in_dygraph_mode():
W
wanghuancoder 已提交
12273
        op = getattr(_C_ops, op_name)
12274 12275 12276 12277
        if binary_op:
            return op(x, y)
        else:
            return op(x)
12278 12279 12280
    check_variable_and_dtype(x, "x", [
        "bool", "int8", "int16", "int32", "int64", "float32", "float64"
    ], op_name)
12281
    if y is not None:
12282 12283 12284
        check_variable_and_dtype(y, "y", [
            "bool", "int8", "int16", "int32", "int64", "float32", "float64"
        ], op_name)
12285
    if out is not None:
12286
        check_type(out, "out", Variable, op_name)
12287

M
minqiyang 已提交
12288 12289
    helper = LayerHelper(op_name, **locals())

12290 12291 12292 12293
    if binary_op and x.dtype != y.dtype:
        raise ValueError(
            "(InvalidArgument) The DataType of %s Op's Variable must be consistent, but received %s and %s."
            % (op_name, x.dtype, y.dtype))
M
minqiyang 已提交
12294 12295

    if out is None:
12296
        out = helper.create_variable_for_type_inference(dtype=x.dtype)
M
minqiyang 已提交
12297 12298 12299 12300 12301 12302 12303 12304 12305 12306 12307

    if binary_op:
        helper.append_op(
            type=op_name, inputs={"X": x,
                                  "Y": y}, outputs={"Out": out})
    else:
        helper.append_op(type=op_name, inputs={"X": x}, outputs={"Out": out})

    return out


12308
def logical_and(x, y, out=None, name=None):
12309
    r"""
12310

12311
    ``logical_and`` operator computes element-wise logical AND on ``x`` and ``y``, and returns ``out``. ``out`` is N-dim boolean ``Tensor``.
S
Shibo Tao 已提交
12312
    Each element of ``out`` is calculated by
12313

W
Wilber 已提交
12314 12315
    .. math::

S
Shibo Tao 已提交
12316
        out = x \&\& y
M
minqiyang 已提交
12317

12318 12319 12320
    .. note::
        ``paddle.logical_and`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting`.

M
minqiyang 已提交
12321
    Args:
12322 12323
        x (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float32, float64.
        y (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float32, float64.
12324 12325
        out(Tensor): The ``Tensor`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor`` will be created to save the output.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
M
minqiyang 已提交
12326 12327

    Returns:
12328
        N-D Tensor. A location into which the result is stored. It's dimension equals with ``x``.
12329 12330 12331 12332

    Examples:
        .. code-block:: python

S
Shibo Tao 已提交
12333
            import paddle
W
Wilber 已提交
12334

12335 12336
            x = paddle.to_tensor([True])
            y = paddle.to_tensor([True, False, True, False])
S
Shibo Tao 已提交
12337
            res = paddle.logical_and(x, y)
N
Noel 已提交
12338
            print(res) # [True False True False]
M
minqiyang 已提交
12339 12340 12341 12342 12343
    """
    return _logical_op(
        op_name="logical_and", x=x, y=y, name=name, out=out, binary_op=True)


12344
def logical_or(x, y, out=None, name=None):
M
minqiyang 已提交
12345
    """
W
Wilber 已提交
12346

12347
    ``logical_or`` operator computes element-wise logical OR on ``x`` and ``y``, and returns ``out``. ``out`` is N-dim boolean ``Tensor``.
S
Shibo Tao 已提交
12348
    Each element of ``out`` is calculated by
12349

W
Wilber 已提交
12350 12351
    .. math::

S
Shibo Tao 已提交
12352
        out = x || y
M
minqiyang 已提交
12353

12354 12355 12356
    .. note::
        ``paddle.logical_or`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting`.
    
M
minqiyang 已提交
12357
    Args:
12358 12359
        x (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float32, float64.
        y (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float32, float64.
12360 12361
        out(Tensor): The ``Variable`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor`` will be created to save the output.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
M
minqiyang 已提交
12362 12363

    Returns:
12364
        N-D Tensor. A location into which the result is stored. It's dimension equals with ``x``.
12365 12366 12367 12368

    Examples:
        .. code-block:: python

S
Shibo Tao 已提交
12369
            import paddle
W
Wilber 已提交
12370 12371
            import numpy as np

12372 12373 12374 12375
            x_data = np.array([True, False], dtype=np.bool).reshape(2, 1)
            y_data = np.array([True, False, True, False], dtype=np.bool).reshape(2, 2)
            x = paddle.to_tensor(x_data)
            y = paddle.to_tensor(y_data)
S
Shibo Tao 已提交
12376
            res = paddle.logical_or(x, y)
N
Noel 已提交
12377
            print(res) # [[ True  True] [ True False]]
M
minqiyang 已提交
12378 12379 12380 12381 12382
    """
    return _logical_op(
        op_name="logical_or", x=x, y=y, name=name, out=out, binary_op=True)


12383
def logical_xor(x, y, out=None, name=None):
12384
    r"""
W
Wilber 已提交
12385

12386
    ``logical_xor`` operator computes element-wise logical XOR on ``x`` and ``y``, and returns ``out``. ``out`` is N-dim boolean ``Tensor``.
S
Shibo Tao 已提交
12387
    Each element of ``out`` is calculated by
12388

W
Wilber 已提交
12389 12390
    .. math::

S
Shibo Tao 已提交
12391
        out = (x || y) \&\& !(x \&\& y)
M
minqiyang 已提交
12392

12393 12394 12395
    .. note::
        ``paddle.logical_xor`` supports broadcasting. If you want know more about broadcasting, please refer to :ref:`user_guide_broadcasting`.

M
minqiyang 已提交
12396
    Args:
12397 12398
        x (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float32, float64.
        y (Tensor): the input tensor, it's data type should be one of bool, int8, int16, in32, in64, float32, float64.
12399 12400
        out(Tensor): The ``Tensor`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor`` will be created to save the output.
        name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
M
minqiyang 已提交
12401 12402

    Returns:
12403
        N-D Tensor. A location into which the result is stored. It's dimension equals with ``x``.
12404 12405 12406 12407

    Examples:
        .. code-block:: python

S
Shibo Tao 已提交
12408
            import paddle
W
Wilber 已提交
12409 12410
            import numpy as np

12411 12412 12413 12414
            x_data = np.array([True, False], dtype=np.bool).reshape([2, 1])
            y_data = np.array([True, False, True, False], dtype=np.bool).reshape([2, 2])
            x = paddle.to_tensor(x_data)
            y = paddle.to_tensor(y_data)
S
Shibo Tao 已提交
12415
            res = paddle.logical_xor(x, y)
N
Noel 已提交
12416
            print(res) # [[False,  True], [ True, False]]
M
minqiyang 已提交
12417 12418 12419 12420 12421 12422
    """
    return _logical_op(
        op_name="logical_xor", x=x, y=y, name=name, out=out, binary_op=True)


@templatedoc()
12423
def logical_not(x, out=None, name=None):
M
minqiyang 已提交
12424
    """
12425

12426
    ``logical_not`` operator computes element-wise logical NOT on ``x``, and returns ``out``. ``out`` is N-dim boolean ``Variable``.
S
Shibo Tao 已提交
12427
    Each element of ``out`` is calculated by
12428

W
Wilber 已提交
12429 12430
    .. math::

S
Shibo Tao 已提交
12431
        out = !x
M
minqiyang 已提交
12432 12433

    Args:
12434
        x(Tensor):  Operand of logical_not operator. Must be a Tensor of type bool, int8, int16, in32, in64, float32, or float64.
N
Noel 已提交
12435
        out(Tensor): The ``Tensor`` that specifies the output of the operator, which can be any ``Tensor`` that has been created in the program. The default value is None, and a new ``Tensor` will be created to save the output.
S
Shibo Tao 已提交
12436
        name(str|None): The default value is None. Normally there is no need for users to set this property. For more information, please refer to :ref:`api_guide_Name`.
M
minqiyang 已提交
12437 12438

    Returns:
N
Noel 已提交
12439
        Tensor: ${out_comment}
12440 12441 12442

    Examples:
        .. code-block:: python
N
Noel 已提交
12443

S
Shibo Tao 已提交
12444
            import paddle
W
Wilber 已提交
12445

12446
            x = paddle.to_tensor([True, False, True, False])
S
Shibo Tao 已提交
12447
            res = paddle.logical_not(x)
N
Noel 已提交
12448
            print(res) # [False  True False  True]
M
minqiyang 已提交
12449 12450 12451 12452
    """

    return _logical_op(
        op_name="logical_not", x=x, y=None, name=name, out=out, binary_op=False)
12453 12454 12455 12456 12457


@templatedoc()
def clip(x, min, max, name=None):
    """
12458 12459
	:old_api: paddle.fluid.layers.clip

12460 12461 12462 12463
    ${comment}

    Args:
        x(${x_type}): ${x_comment}
S
SunGaofeng 已提交
12464 12465
        min(float): ${min_comment}
        max(float): ${max_comment}
12466 12467
        name(str, optional): The default value is None.
                             Normally there is no need for user to set this property.
S
SunGaofeng 已提交
12468
                             For more information, please refer to :ref:`api_guide_Name`
12469 12470

    Returns:
S
SunGaofeng 已提交
12471 12472 12473 12474
        ${out_comment}

    Return Type:
        ${out_type}
12475 12476 12477 12478

    Examples:
        .. code-block:: python

S
SunGaofeng 已提交
12479
            import paddle.fluid as fluid
S
SunGaofeng 已提交
12480
            input = fluid.data(
12481 12482
                name='data', shape=[1], dtype='float32')
            reward = fluid.layers.clip(x=input, min=-1.0, max=1.0)
12483 12484 12485
    """

    helper = LayerHelper("clip", **locals())
12486
    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'clip')
12487 12488

    if name is None:
12489 12490
        name = unique_name.generate_with_ignorable_key(".".join(
            [helper.name, 'tmp']))
S
sneaxiy 已提交
12491 12492 12493

    out = helper.create_variable(
        type=x.type, name=name, dtype=x.dtype, persistable=False)
12494 12495 12496 12497 12498 12499 12500 12501 12502 12503 12504 12505 12506 12507 12508 12509 12510 12511 12512

    helper.append_op(
        type="clip",
        inputs={"X": x},
        attrs={"min": min,
               "max": max},
        outputs={"Out": out})

    return out


@templatedoc()
def clip_by_norm(x, max_norm, name=None):
    """
    ${comment}

    Args:
        x(${x_type}): ${x_comment}
        max_norm(${max_norm_type}): ${max_norm_comment}
12513 12514 12515
        name(str, optional): For detailed information, please refer
            to :ref:`api_guide_Name`. Usually name is no need to set and
            None by default.
12516 12517

    Returns:
12518
        Tensor:
W
wangguanzhong 已提交
12519

12520
        out(${out_type}): ${out_comment}
12521

W
wangguanzhong 已提交
12522

12523 12524 12525
    Examples:
        .. code-block:: python

12526
            import paddle
12527
            import paddle.fluid as fluid
12528

12529 12530 12531
            input = paddle.to_tensor([[2.0, 2.0], [2.0, 2.0]], dtype='float32')
            reward = fluid.layers.clip_by_norm(x=input, max_norm=1.0)
            # [[0.5, 0.5], [0.5, 0.5]]
12532 12533
    """

12534
    if in_dygraph_mode():
W
wanghuancoder 已提交
12535
        return _C_ops.clip_by_norm(x, 'max_norm', max_norm)
12536

12537
    helper = LayerHelper("clip_by_norm", **locals())
12538
    check_variable_and_dtype(x, 'X', ['float32', 'float16'], 'clip_by_norm')
12539
    check_type(max_norm, 'max_norm', (float), 'clip_by_norm')
12540 12541

    if name is None:
12542 12543
        name = unique_name.generate_with_ignorable_key(".".join(
            [helper.name, 'tmp']))
S
sneaxiy 已提交
12544 12545 12546

    out = helper.create_variable(
        type=x.type, name=name, dtype=x.dtype, persistable=False)
12547 12548 12549 12550 12551 12552 12553 12554

    helper.append_op(
        type="clip_by_norm",
        inputs={"X": x},
        attrs={"max_norm": max_norm},
        outputs={"Out": out})

    return out
X
Xin Pan 已提交
12555 12556


12557
@deprecated(since="2.0.0", update_to="paddle.mean")
X
Xin Pan 已提交
12558 12559 12560 12561 12562 12563 12564 12565 12566 12567 12568
@templatedoc()
def mean(x, name=None):
    """
    ${comment}

    Args:
        x(${x_type}): ${x_comment}
        name(basestring|None): Name of the output.

    Returns:
        out(${out_type}): ${out_comment}
12569 12570 12571 12572

    Examples:
        .. code-block:: python

12573
            import paddle
12574
            import paddle.fluid as fluid
12575 12576
            paddle.enable_static()

12577 12578 12579
            input = fluid.layers.data(
                name='data', shape=[2, 3], dtype='float32')
            mean = fluid.layers.mean(input)
X
Xin Pan 已提交
12580
    """
12581

12582
    if in_dygraph_mode():
W
wanghuancoder 已提交
12583
        return _C_ops.mean(x)
X
Xin Pan 已提交
12584 12585

    helper = LayerHelper("mean", **locals())
12586
    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'mean')
12587
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
X
Xin Pan 已提交
12588 12589 12590 12591 12592 12593 12594

    helper.append_op(
        type="mean", inputs={"X": x}, attrs={}, outputs={"Out": out})

    return out


C
chengduo 已提交
12595 12596 12597 12598 12599 12600 12601 12602 12603 12604 12605
@templatedoc()
def merge_selected_rows(x, name=None):
    """
    ${comment}

    Args:
        x(${x_type}): ${x_comment}
        name(basestring|None): Name of the output.

    Returns:
        out(${out_type}): ${out_comment}
12606 12607 12608 12609

    Examples:
        .. code-block:: python

12610
            import paddle.fluid as fluid
12611 12612 12613 12614 12615
            b = fluid.default_main_program().global_block()
            var = b.create_var(
                name="X", dtype="float32", persistable=True,
                type=fluid.core.VarDesc.VarType.SELECTED_ROWS)
            y = fluid.layers.merge_selected_rows(var)
C
chengduo 已提交
12616 12617 12618 12619 12620 12621 12622 12623 12624 12625 12626 12627
    """

    helper = LayerHelper("merge_selected_rows", **locals())
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
    helper.append_op(
        type="merge_selected_rows",
        inputs={"X": x},
        attrs={},
        outputs={"Out": out})
    return out


X
Xin Pan 已提交
12628 12629
def mul(x, y, x_num_col_dims=1, y_num_col_dims=1, name=None):
    """
L
liu zhengxi 已提交
12630 12631 12632 12633 12634 12635 12636 12637
    Mul Operator.
    This operator is used to perform matrix multiplication for input $x$ and $y$.
    The equation is:

    ..  math::
        Out = x * y

    Both the input $x$ and $y$ can carry the LoD (Level of Details) information, or not. But the output only shares the LoD information with input $x$.
X
Xin Pan 已提交
12638 12639

    Args:
L
liu zhengxi 已提交
12640 12641
        x (Variable): The first input Tensor/LoDTensor of mul_op.
        y (Variable): The second input Tensor/LoDTensor of mul_op.
12642 12643 12644
        x_num_col_dims (int, optional): The mul_op can take tensors with more than two dimensions as its inputs. If the input $x$ is a tensor with more than two dimensions, $x$ will be flattened into a two-dimensional matrix first. The flattening rule is: the first `num_col_dims` will be flattened to form the first dimension of the final matrix (the height of the matrix), and the rest `rank(x) - num_col_dims` dimensions are flattened to form the second dimension of the final matrix (the width of the matrix). As a result, height of the flattened matrix is equal to the product of $x$'s first `x_num_col_dims` dimensions' sizes, and width of the flattened matrix is equal to the product of $x$'s last `rank(x) - num_col_dims` dimensions' size. For example, suppose $x$ is a 6-dimensional tensor with the shape [2, 3, 4, 5, 6], and `x_num_col_dims` = 3. Thus, the flattened matrix will have a shape [2 x 3 x 4, 5 x 6] = [24, 30]. Default is 1.
        y_num_col_dims (int, optional): The mul_op can take tensors with more than two dimensions as its inputs. If the input $y$ is a tensor with more than two dimensions, $y$ will be flattened into a two-dimensional matrix first. The attribute `y_num_col_dims` determines how $y$ is flattened. See comments of `x_num_col_dims` for more details. Default is 1.
        name (str, optional): Name of the output. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Default is None.
X
Xin Pan 已提交
12645 12646

    Returns:
L
liu zhengxi 已提交
12647
        Variable(Tensor/LoDTensor): The output Tensor/LoDTensor of mul op.
12648 12649

    Examples:
L
liu zhengxi 已提交
12650
        ..  code-block:: python
12651

12652
            import paddle.fluid as fluid
12653 12654
            import paddle
            paddle.enable_static()
12655 12656 12657 12658 12659
            dataX = fluid.layers.data(name="dataX", append_batch_size = False, shape=[2, 5], dtype="float32")
            dataY = fluid.layers.data(name="dataY", append_batch_size = False, shape=[5, 3], dtype="float32")
            output = fluid.layers.mul(dataX, dataY,
                                      x_num_col_dims = 1,
                                      y_num_col_dims = 1)
12660

12661

X
Xin Pan 已提交
12662
    """
12663
    if in_dygraph_mode():
W
wanghuancoder 已提交
12664 12665
        return _C_ops.mul(x, y, 'x_num_col_dims', x_num_col_dims,
                          'y_num_col_dims', y_num_col_dims)
X
Xin Pan 已提交
12666

12667 12668
    inputs = {"X": [x], "Y": [y]}
    attrs = {"x_num_col_dims": x_num_col_dims, "y_num_col_dims": y_num_col_dims}
X
Xin Pan 已提交
12669
    helper = LayerHelper("mul", **locals())
12670 12671
    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'mul')
    check_variable_and_dtype(y, 'y', ['float16', 'float32', 'float64'], 'mul')
12672
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
X
Xin Pan 已提交
12673 12674

    helper.append_op(
12675 12676
        type="mul", inputs={"X": x,
                            "Y": y}, attrs=attrs, outputs={"Out": out})
X
Xin Pan 已提交
12677 12678 12679
    return out


12680
@deprecated(since="2.0.0", update_to="paddle.nn.functional.maxout")
X
Xin Pan 已提交
12681
@templatedoc()
12682
def maxout(x, groups, name=None, axis=1):
X
Xin Pan 已提交
12683 12684 12685 12686 12687
    """
    ${comment}

    Args:
        x(${x_type}): ${x_comment}
12688 12689
        groups(int): ${groups_comment}
        axis(int, optional): ${axis_comment}
12690 12691
        name(str, optional): For detailed information, please refer
            to :ref:`api_guide_Name`. Usually name is no need to set and
W
wangguanzhong 已提交
12692
            None by default.
X
Xin Pan 已提交
12693 12694

    Returns:
12695
        Variable: ${out_comment}
J
jerrywgz 已提交
12696

12697 12698
    Raises:
        ValueError: If `axis` is not 1, -1 or 3.
12699
        ValueError: If the number of input channels can not be divisible by `groups`.
W
wangguanzhong 已提交
12700

J
jerrywgz 已提交
12701 12702 12703
    Examples:
        .. code-block:: python

12704
            import paddle.fluid as fluid
12705 12706 12707
            import paddle
            paddle.enable_static()

12708
            input = fluid.data(
12709 12710
                name='data',
                shape=[None, 256, 32, 32],
J
jerrywgz 已提交
12711 12712
                dtype='float32')
            out = fluid.layers.maxout(input, groups=2)
X
Xin Pan 已提交
12713
    """
12714
    return paddle.nn.functional.maxout(**locals())
12715 12716


J
JiabinYang 已提交
12717
def space_to_depth(x, blocksize, name=None):
12718
    r"""
12719

J
JiabinYang 已提交
12720
    Gives a blocksize to space_to_depth the input LoDtensor with Layout: [batch, channel, height, width]
12721

12722 12723 12724
    This op rearranges blocks of spatial data, into depth. More specifically, this op outputs a copy of \
        theinput LoDtensor where values from the height and width dimensions are moved to the channel \
        dimension.
J
JiabinYang 已提交
12725
    The attr blocksize indicates the input block size.
12726

T
tianshuo78520a 已提交
12727
    space_to_depth will reorganize the elements of input with shape[batch, channel, height, width] \
12728 12729
        according to blocksize to construct output with shape \
        [batch, channel * blocksize * blocksize, height/blocksize, width/blocksize]:
J
JiabinYang 已提交
12730

J
JiabinYang 已提交
12731 12732 12733 12734 12735
    - Non-overlapping blocks of size block_size x block size are rearranged into depth at each location.
    - The Y, X coordinates within each block of the input become the high order component of the output channel index
    - channel should be divisible by square of blocksize
    - height, width should be divsible by blocksize

12736 12737 12738 12739 12740 12741 12742 12743 12744 12745 12746 12747 12748 12749 12750 12751 12752
    This OP is useful for resizing the activations between convolutions \
        (but keeping all data)

    .. code-block:: text

        Given the input x with the shape [1, 1, 4, 4]:
        x.data = [[[[1,   2,  5,  6],
                    [3,   4,  7,  8],
                    [9,  10, 13, 14],
                    [11, 12, 15, 16]]]]
        blocksize = 2

        then get the output with the shape [1, 4, 2, 2]:
        out.data = [[[[1,   2],  [3,  4]],
                     [[5,   6],  [7,  8]],
                     [[9,  10], [11, 12]],
                     [[13, 14], [15, 16]]]]
J
JiabinYang 已提交
12753

J
JiabinYang 已提交
12754
    Args:
12755 12756 12757 12758 12759 12760
        x (Variable): The input, which should be 4 dims Tensor or LodTensor, with the shape \
            [batch, channel, height, width]
        blocksize (int): The blocksize to select the element on each feature map should be > 2
        name(str, optional): For detailed information, please refer \
            to :ref:`api_guide_Name`. Usually name is no need to set and \
            None by default.
J
JiabinYang 已提交
12761

12762 12763 12764 12765
    Returns: The output, which should be 4 dims Tensor or LodTensor, with the shape \
            [batch, channel * blocksize * blocksize, height/blocksize, width/blocksize]

    Return Type: Variable
J
JiabinYang 已提交
12766 12767

    Raises:
12768
        TypeError: blocksize type must be int64.
J
JiabinYang 已提交
12769 12770 12771

    Examples:
        .. code-block:: python
12772

12773 12774
            import paddle.fluid as fluid
            import numpy as np
12775 12776
            import numpy as np
            import paddle
J
JiabinYang 已提交
12777

12778
            paddle.enable_static()
12779 12780
            data = fluid.data(
                name='data', shape=[1, 4, 2, 2], dtype='float32')
J
JiabinYang 已提交
12781
            space_to_depthed = fluid.layers.space_to_depth(
J
JiabinYang 已提交
12782
                x=data, blocksize=2)
12783

12784
            exe = fluid.Executor(fluid.CPUPlace())
12785
            data_np = np.arange(0,16).reshape((1,4,2,2)).astype('float32')
12786 12787 12788 12789 12790 12791 12792

            print(data_np)
            #array([[[[ 0.,  1.], [ 2.,  3.]],
            #        [[ 4.,  5.], [ 6.,  7.]],
            #        [[ 8.,  9.], [10., 11.]],
            #        [[12., 13.], [14., 15.]]]], dtype=float32)

12793
            out_main = exe.run(fluid.default_main_program(),
12794 12795 12796 12797 12798 12799 12800 12801
                        feed={'data': data_np},
                        fetch_list=[space_to_depthed])

            print(out_main)
            #[array([[[[ 0.]], [[ 4.]], [[ 1.]], [[ 5.]],
            #         [[ 8.]], [[12.]], [[ 9.]], [[13.]],
            #         [[ 2.]], [[ 6.]], [[ 3.]], [[ 7.]],
            #         [[10.]], [[14.]], [[11.]], [[15.]]]], dtype=float32)]
12802

J
JiabinYang 已提交
12803 12804
    """

J
JiabinYang 已提交
12805
    helper = LayerHelper("space_to_depth", **locals())
J
JiabinYang 已提交
12806

J
JiabinYang 已提交
12807 12808
    if not (isinstance(blocksize, int)):
        raise ValueError("blocksize must be a python Int")
J
JiabinYang 已提交
12809

X
xujiaqi01 已提交
12810 12811 12812
    check_variable_and_dtype(x, 'x', \
        ['float16', 'float32', 'float64', 'int32', 'int64'], 'space_to_depth')

12813
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
J
JiabinYang 已提交
12814 12815

    helper.append_op(
J
JiabinYang 已提交
12816
        type="space_to_depth",
J
JiabinYang 已提交
12817
        inputs={"X": x},
J
JiabinYang 已提交
12818
        attrs={"blocksize": blocksize},
J
JiabinYang 已提交
12819
        outputs={"Out": out})
J
JiabinYang 已提交
12820 12821
    return out

J
JiabinYang 已提交
12822

12823 12824 12825 12826 12827 12828
def affine_channel(x,
                   scale=None,
                   bias=None,
                   data_layout='NCHW',
                   name=None,
                   act=None):
12829
    """
12830

12831 12832 12833 12834
    Applies a separate affine transformation to each channel of the input.
    Useful for replacing spatial batch norm with its equivalent fixed
    transformation. The input also can be 2D tensor and applies a affine
    transformation in second dimension.
12835

12836 12837 12838
    Args:
        x (Variable): Feature map input can be a 4D tensor with order NCHW
            or NHWC. It also can be a 2D tensor and the affine transformation
L
LielinJiang 已提交
12839
            is applied in the second dimension.The data type is float32 or float64.
12840 12841
        scale (Variable): 1D input of shape (C), the c-th element is the scale
            factor of the affine transformation for the c-th channel of
L
LielinJiang 已提交
12842
            the input.The data type is float32 or float64.
12843 12844
        bias (Variable): 1D input of shape (C), the c-th element is the bias
            of the affine transformation for the c-th channel of the input.
L
LielinJiang 已提交
12845
            The data type is float32 or float64.
12846
        data_layout (str, optional): Specify the data format of the input, and the data format of the output
12847 12848
            will be consistent with that of the input. An optional string from: `"NCHW"`, `"NHWC"`.
            The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of:
12849
            `[batch_size, input_channels, input_height, input_width]`. If input is 2D Tensor, you can ignore
12850
            data_layout.
L
LielinJiang 已提交
12851 12852
        name (str, default None): The name of this layer. For more information,
            please refer to :ref:`api_guide_Name` .
12853
        act (str, default None): Activation to be applied to the output of this layer.
12854 12855

    Returns:
L
LielinJiang 已提交
12856
        Variable: A tensor which has the same shape, data layout and data type with x.
B
Bai Yifan 已提交
12857 12858 12859

    Examples:
        .. code-block:: python
L
LielinJiang 已提交
12860 12861

            import numpy as np
B
Bai Yifan 已提交
12862
            import paddle.fluid as fluid
12863 12864
            import paddle.fluid as fluid
            import paddle
L
LielinJiang 已提交
12865

12866
            paddle.enable_static()
L
LielinJiang 已提交
12867 12868 12869 12870 12871 12872 12873 12874 12875
            use_gpu = False
            place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()
            exe = fluid.Executor(place)

            data = fluid.data(name='data', shape=[None, 1, 2, 2], dtype='float32')
            input_scale = fluid.layers.create_parameter(shape=[1], dtype="float32",
                                    default_initializer=fluid.initializer.Constant(2.0))
            input_bias = fluid.layers.create_parameter(shape=[1],dtype="float32",
                                    default_initializer=fluid.initializer.Constant(0.5))
B
Bai Yifan 已提交
12876
            out = fluid.layers.affine_channel(data,scale=input_scale,
L
LielinJiang 已提交
12877 12878 12879 12880 12881 12882 12883 12884 12885 12886
                                    bias=input_bias)

            exe.run(fluid.default_startup_program())
            test_program = fluid.default_main_program().clone(for_test=True)

            [out_array] = exe.run(test_program,
                                  fetch_list=out,
                                  feed={'data': np.ones([1,1,2,2]).astype('float32')})
            # out_array is [[[[2.5, 2.5],
            #                [2.5, 2.5]]]] with shape: [1, 1, 2, 2]
B
Bai Yifan 已提交
12887

12888 12889
    """
    helper = LayerHelper("affine_channel", **locals())
12890 12891 12892
    check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'affine_channel')
    check_type(scale, 'scale', (Variable, type(None)), 'affine_channel')
    check_type(bias, 'bias', (Variable, type(None)), 'affine_channel')
12893
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
12894 12895 12896 12897 12898 12899 12900 12901

    helper.append_op(
        type="affine_channel",
        inputs={"X": x,
                'Scale': scale,
                'Bias': bias},
        attrs={"data_layout": data_layout},
        outputs={"Out": out})
12902
    return helper.append_activation(out)
12903 12904


B
barrierye 已提交
12905
def similarity_focus(input, axis, indexes, name=None):
12906
    r"""
B
barrierye 已提交
12907
    SimilarityFocus Operator
B
barrierye 已提交
12908 12909

    Generate a similarity focus mask with the same shape of input using the following method:
M
minqiyang 已提交
12910

12911 12912 12913
    1. Extract the 3-D tensor(here the first dimension is BatchSize) corresponding
       to the axis according to the indexes. For example, if axis=1 and indexes=[a],
       it will get the matrix T=X[:, a, :, :]. In this case, if the shape of input X
B
barrierye 已提交
12914
       is (BatchSize, A, B, C), the shape of tensor T is (BatchSize, B, C).
12915 12916 12917 12918 12919 12920 12921
    2. For each index, find the largest numbers in the tensor T, so that the same
       row and same column has at most one number(what it means is that if the
       largest number has been found in the i-th row and the j-th column, then
       the numbers in the i-th row or j-th column will be skipped. And then the
       next largest number will be selected from the remaining numbers. Obviously
       there will be min(B, C) numbers), and mark the corresponding position of the
       3-D similarity focus mask as 1, otherwise as 0. Do elementwise-or for
B
barrierye 已提交
12922
       each index.
B
barrierye 已提交
12923 12924 12925 12926
    3. Broadcast the 3-D similarity focus mask to the same shape of input X.

    Refer to `Similarity Focus Layer <http://www.aclweb.org/anthology/N16-1108>`_

B
barrierye 已提交
12927 12928 12929 12930 12931 12932 12933 12934 12935 12936 12937 12938 12939 12940 12941 12942 12943 12944 12945 12946 12947 12948 12949 12950 12951 12952 12953 12954 12955 12956 12957 12958 12959 12960 12961 12962 12963 12964 12965 12966 12967 12968 12969 12970 12971 12972 12973 12974 12975
    .. code-block:: text

        * Example :

            Given a 4-D tensor x with the shape (BatchSize, C, A, B), where C is
            the number of channels and the shape of feature map is (A, B):
                x.shape = (2, 3, 2, 2)
                x.data = [[[[0.8, 0.1],
                            [0.4, 0.5]],

                           [[0.9, 0.7],
                            [0.9, 0.9]],

                           [[0.8, 0.9],
                            [0.1, 0.2]]],


                          [[[0.2, 0.5],
                            [0.3, 0.4]],

                           [[0.9, 0.7],
                            [0.8, 0.4]],

                           [[0.0, 0.2],
                            [0.4, 0.7]]]]

            Given axis: 1 (the axis of the channel)
            Given indexes: [0]

            then we get a 4-D tensor out with the same shape of input x:
                out.shape = (2, 3, 2, 2)
                out.data = [[[[1.0, 0.0],
                              [0.0, 1.0]],

                             [[1.0, 0.0],
                              [0.0, 1.0]],

                             [[1.0, 0.0],
                              [0.0, 1.0]]],

                            [[[0.0, 1.0],
                              [1.0, 0.0]],

                             [[0.0, 1.0],
                              [1.0, 0.0]],

                             [[0.0, 1.0],
                              [1.0, 0.0]]]]

B
barrierye 已提交
12976
    Args:
12977
        input(Variable): The input tensor variable(default float). It should
12978
            be a 4-D tensor with shape [BatchSize, A, B, C]. Data type is
Y
Yibing Liu 已提交
12979
            float32 or float64.
B
barrierye 已提交
12980
        axis(int): Indicating the dimension to be selected. It can only be
B
barrierye 已提交
12981
            1, 2 or 3.
B
barrierye 已提交
12982
        indexes(list): Indicating the indexes of the selected dimension.
B
barrierye 已提交
12983 12984

    Returns:
H
haowang101779990 已提交
12985 12986
        Variable: A tensor variable with the same shape and same type \
                  as the input.
12987

B
barrierye 已提交
12988 12989
    Examples:
        .. code-block:: python
H
haowang101779990 已提交
12990

12991
            import paddle.fluid as fluid
12992 12993
            import paddle
            paddle.enable_static()
Y
Yibing Liu 已提交
12994
            data = fluid.data(
Y
Yibing Liu 已提交
12995 12996
                name='data', shape=[-1, 3, 2, 2], dtype='float32')
            fluid.layers.similarity_focus(input=data, axis=1, indexes=[0])
B
barrierye 已提交
12997 12998 12999
    """
    helper = LayerHelper('similarity_focus', **locals())
    # check attrs
13000 13001 13002 13003
    check_variable_and_dtype(input, 'input', ['float32', 'float64'],
                             "similarity_focus")
    check_type(axis, 'axis', int, "similarity_focus")
    check_type(indexes, 'indexes', list, "similarity_focus")
B
barrierye 已提交
13004 13005 13006 13007 13008
    if axis != 1 and axis != 2 and axis != 3:
        raise ValueError("axis must be 1, 2 or 3.")
    if len(indexes) == 0:
        raise ValueError("indexes can not be empty.")

13009
    out = helper.create_variable_for_type_inference(dtype=input.dtype)
B
barrierye 已提交
13010 13011 13012 13013 13014 13015 13016
    helper.append_op(
        type='similarity_focus',
        inputs={'X': input},
        outputs={'Out': out},
        attrs={"axis": axis,
               "indexes": indexes})
    return out
B
barrierye 已提交
13017 13018


M
minqiyang 已提交
13019 13020
def hash(input, hash_size, num_hash=1, name=None):
    """
13021

Z
zhupengyang 已提交
13022
    This OP hash the input to an integer less than the hash_size.
M
minqiyang 已提交
13023 13024
    The hash algorithm we used was xxHash - Extremely fast hash algorithm
    (https://github.com/Cyan4973/xxHash/tree/v0.6.5)
M
minqiyang 已提交
13025 13026

    Args:
Z
zhupengyang 已提交
13027 13028 13029 13030 13031 13032
        input(Variable): A **Two-Dimensional** LoDTensor with type int32, int64.
             **Only support LoDTensor**.
        num_hash(int, optional): The times of hash, default is 1.
        name(str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.
M
minqiyang 已提交
13033 13034

    Returns:
Z
zhupengyang 已提交
13035
       Variable: A LoDTensor with the same data type as input.
M
minqiyang 已提交
13036 13037

    Examples:
Z
zhupengyang 已提交
13038
        .. code-block:: python
H
haowang101779990 已提交
13039

13040
            import paddle.fluid as fluid
Z
zhupengyang 已提交
13041
            import numpy as np
13042 13043
            import paddle
            paddle.enable_static()
13044

Z
zhupengyang 已提交
13045
            place = fluid.core.CPUPlace()
13046

13047 13048
            x = fluid.data(name="x", shape=[2,2], dtype="int32", lod_level=1)
            res = fluid.layers.hash(name="res", input=x, hash_size=1000, num_hash=4)
13049

Z
zhupengyang 已提交
13050 13051 13052 13053
            exe = fluid.Executor(place)
            exe.run(fluid.default_startup_program())
            in1 = np.array([[1,2],[3,4]]).astype("int32")
            print(in1)
13054
            x_i = fluid.create_lod_tensor(in1, [[0, 2]], place)
Z
zhupengyang 已提交
13055 13056 13057 13058 13059 13060 13061 13062 13063 13064
            res = exe.run(fluid.default_main_program(), feed={'x':x_i}, fetch_list=[res], return_numpy=False)
            print(np.array(res[0]))
            # [[[722]
            #   [407]
            #   [337]
            #   [395]]
            #  [[603]
            #   [590]
            #   [386]
            #   [901]]]
M
minqiyang 已提交
13065
    """
13066
    check_variable_and_dtype(input, 'input', ['int32', 'int64'], 'hash')
13067 13068
    check_type(hash_size, 'hash_size', int, 'hash')
    check_type(num_hash, 'num_hash', int, 'hash')
M
minqiyang 已提交
13069
    helper = LayerHelper('hash', **locals())
M
minqiyang 已提交
13070 13071
    out = helper.create_variable_for_type_inference(
        helper.input_dtype(), stop_gradient=True)
M
minqiyang 已提交
13072 13073 13074 13075 13076 13077 13078
    helper.append_op(
        type='hash',
        inputs={'X': input},
        outputs={'Out': out},
        attrs={'num_hash': num_hash,
               'mod_by': hash_size})
    return out
G
gmcather 已提交
13079 13080


D
dengkaipeng 已提交
13081
@templatedoc()
13082 13083
def grid_sampler(x, grid, name=None):
    """
13084

13085
    This operation samples input X by using bilinear interpolation based on
T
tianshuo78520a 已提交
13086
    flow field grid, which is usually generated by :code:`affine_grid` . The grid of
K
Kaipeng Deng 已提交
13087 13088
    shape [N, H, W, 2] is the concatenation of (x, y) coordinates
    with shape [N, H, W] each, where x is indexing the 4th dimension
T
tianshuo78520a 已提交
13089 13090
    (in width dimension) of input data x and y is indexing the 3rd
    dimension (in height dimension), finally results is the bilinear
13091
    interpolation value of 4 nearest corner points. The output tensor
K
Kaipeng Deng 已提交
13092
    shape will be [N, C, H, W].
13093

H
haowang101779990 已提交
13094
    .. code-block:: text
13095

H
haowang101779990 已提交
13096 13097
        Step 1:
        Get (x, y) grid coordinates and scale to [0, H-1/W-1].
13098

K
Kaipeng Deng 已提交
13099 13100 13101 13102
        .. code-block:: text

            grid_x = 0.5 * (grid[:, :, :, 0] + 1) * (W - 1)
            grid_y = 0.5 * (grid[:, :, :, 1] + 1) * (H - 1)
13103

H
haowang101779990 已提交
13104 13105 13106
        Step 2:
        Indices input data X with grid (x, y) in each [H, W] area, and bilinear
        interpolate point value by 4 nearest points.
13107

H
haowang101779990 已提交
13108 13109 13110 13111 13112 13113 13114 13115 13116
          wn ------- y_n ------- en
          |           |           |
          |          d_n          |
          |           |           |
         x_w --d_w-- grid--d_e-- x_e
          |           |           |
          |          d_s          |
          |           |           |
          ws ------- y_s ------- wn
13117

H
haowang101779990 已提交
13118 13119 13120 13121
        x_w = floor(x)              // west side x coord
        x_e = x_w + 1               // east side x coord
        y_n = floor(y)              // north side y coord
        y_s = y_s + 1               // south side y coord
13122

H
haowang101779990 已提交
13123 13124 13125 13126
        d_w = grid_x - x_w          // distance to west side
        d_e = x_e - grid_x          // distance to east side
        d_n = grid_y - y_n          // distance to north side
        d_s = y_s - grid_y          // distance to south side
13127

H
haowang101779990 已提交
13128 13129 13130 13131
        wn = X[:, :, y_n, x_w]      // north-west point value
        en = X[:, :, y_n, x_e]      // north-east point value
        ws = X[:, :, y_s, x_w]      // south-east point value
        es = X[:, :, y_s, x_w]      // north-east point value
13132

H
haowang101779990 已提交
13133 13134
        output = wn * d_e * d_s + en * d_w * d_s
               + ws * d_e * d_n + es * d_w * d_n
D
dengkaipeng 已提交
13135 13136

    Args:
K
Kaipeng Deng 已提交
13137 13138 13139 13140 13141 13142 13143 13144 13145
        x(Variable): The input tensor, which is a 4-D tensor with shape
                     [N, C, H, W], N is the batch size, C is the channel
                     number, H and W is the feature height and width.
                     The data type is float32 or float64.
        grid(Variable): Input grid tensor of shape [N, H, W, 2]. The
                        data type is float32 or float64.
        name(str, optional): For detailed information, please refer
                             to :ref:`api_guide_Name`. Usually name is no need to set and
                             None by default.
D
dengkaipeng 已提交
13146 13147

    Returns:
H
haowang101779990 已提交
13148
        Variable: Output of shape [N, C, H, W] data samples input X
K
Kaipeng Deng 已提交
13149 13150
                  using bilnear interpolation based on input grid.
                  The data type is same as input tensor.
13151

H
haowang101779990 已提交
13152 13153 13154 13155
    Examples:

        .. code-block:: python

K
Kaipeng Deng 已提交
13156
            import paddle.fluid as fluid
13157 13158
            import paddle.fluid as fluid
            import paddle
K
Kaipeng Deng 已提交
13159

13160
            paddle.enable_static()
K
Kaipeng Deng 已提交
13161 13162
            # use with affine_grid
            x = fluid.data(name='x', shape=[None, 10, 32, 32], dtype='float32')
K
Kaipeng Deng 已提交
13163 13164
            theta = fluid.layers.data(name='theta', shape=[2, 3], dtype='float32')
            grid = fluid.layers.affine_grid(theta=theta, out_shape=[3, 10, 32, 32])
H
haowang101779990 已提交
13165
            out = fluid.layers.grid_sampler(x=x, grid=grid)
13166

D
dengkaipeng 已提交
13167 13168 13169
    """
    helper = LayerHelper("grid_sampler", **locals())

13170 13171 13172
    check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'grid_sampler')
    check_variable_and_dtype(grid, 'grid', ['float32', 'float64'],
                             'grid_sampler')
D
dengkaipeng 已提交
13173 13174 13175 13176 13177 13178
    if not isinstance(x, Variable):
        return ValueError("The x should be a Variable")

    if not isinstance(grid, Variable):
        return ValueError("The grid should be a Variable")

13179
    out = helper.create_variable_for_type_inference(x.dtype)
D
dengkaipeng 已提交
13180 13181
    ipts = {'X': x, 'Grid': grid}

13182 13183 13184 13185
    attrs = {'use_cudnn': False} if core.is_compiled_with_rocm() else {}

    helper.append_op(
        type='grid_sampler', inputs=ipts, outputs={'Output': out}, attrs=attrs)
13186 13187 13188
    return out


G
gmcather 已提交
13189
def log_loss(input, label, epsilon=1e-4, name=None):
13190
    r"""
13191

G
gmcather 已提交
13192 13193 13194 13195 13196 13197 13198
    **Negative Log Loss Layer**

    This layer accepts input predictions and target label and returns the
    negative log loss.

    .. math::

13199 13200
        Out = -label * \log{(input + \epsilon)}
              - (1 - label) * \log{(1 - input + \epsilon)}
G
gmcather 已提交
13201 13202

    Args:
13203
        input (Tensor|list):  A 2-D tensor with shape [N x 1], where N is the
G
gmcather 已提交
13204
                                batch size. This input is a probability computed
Y
Yibing Liu 已提交
13205
                                by the previous operator. Data type float32.
13206
        label (Tensor|list):  The ground truth which is a 2-D tensor with
13207
                                shape [N x 1], where N is the batch size.
Y
Yibing Liu 已提交
13208 13209
                                Data type float32.
        epsilon (float, optional): A small number for numerical stability. Default 1e-4.
13210
        name(str|None): For detailed information, please refer to
Y
Yibing Liu 已提交
13211
            :ref:`api_guide_Name` . Usually name is no need to set and None by default.
G
gmcather 已提交
13212 13213

    Returns:
13214
        Tensor, which shape is [N x 1], data type is float32.
G
gmcather 已提交
13215 13216 13217 13218

    Examples:
        .. code-block:: python

13219 13220 13221 13222 13223 13224
          import paddle
          import paddle.nn.functional as F

          label = paddle.randn((10,1))
          prob = paddle.randn((10,1))
          cost = F.log_loss(input=prob, label=label)
G
gmcather 已提交
13225 13226
    """
    helper = LayerHelper('log_loss', **locals())
13227 13228
    check_variable_and_dtype(input, 'input', ['float32'], 'log_loss')
    check_variable_and_dtype(label, 'label', ['float32'], 'log_loss')
G
gmcather 已提交
13229

13230
    loss = helper.create_variable_for_type_inference(dtype=input.dtype)
G
gmcather 已提交
13231 13232 13233 13234 13235 13236 13237 13238 13239 13240 13241

    helper.append_op(
        type='log_loss',
        inputs={'Predicted': [input],
                'Labels': [label]},
        outputs={'Loss': [loss]},
        attrs={'epsilon': epsilon})
    return loss


def add_position_encoding(input, alpha, beta, name=None):
13242
    r"""
13243

G
Guo Sheng 已提交
13244 13245
    This operator performs weighted sum of input feature at each position
    (position in the sequence) and the corresponding position encoding.
G
gmcather 已提交
13246

13247
    For more details of position encoding, please refer to `Attention Is All You
G
Guo Sheng 已提交
13248
    Need <http://arxiv.org/pdf/1706.03762.pdf>`_ .
G
gmcather 已提交
13249

G
Guo Sheng 已提交
13250
    The formula is as follows:
G
gmcather 已提交
13251 13252

    .. math::
H
haowang101779990 已提交
13253 13254 13255
        PE(pos, 2i) &= \\sin{(pos / 10000^{2i / P})}   \\\\
        PE(pos, 2i + 1) &= \\cos{(pos / 10000^{2i / P})}  \\\\
        Out(:, pos, i) &= \\alpha * input(:, pos, i) + \\beta * PE(pos, i)
G
gmcather 已提交
13256 13257

    Where:
G
Guo Sheng 已提交
13258 13259 13260 13261 13262 13263 13264 13265 13266 13267 13268 13269 13270 13271
      - :math:`PE(pos, 2i)` : the value at even index `2i` for encoding of position `pos`.
      - :math:`PE(pos, 2i + 1)` : the value at odd index `2i+1` for encoding of position `pos`

    Args:
        input(Variable): A Tensor or LoDTensor (lod level is 1). If it is a
            Tensor, the shape should be `[N, M, P]`, where `N` stands for
            batch size, `M` for sequence length, `P` for the size of feature
            dimension. If it is a LoDTensor, the shape should be `[N, P]`,
            where `N` stands for the total sequence lengths in this mini-batch,
            `P` for the size of feature. The data type should be float32 or float64.
        alpha(float): Indicate the weight coefficient for `input` when performing
            weighted sum.
        beta(float): Indicate the weight coefficient for position encoding when
            performing weighted sum.
13272 13273
        name(str, optional): For detailed information, please refer
            to :ref:`api_guide_Name`. Usually name is no need to set and
G
Guo Sheng 已提交
13274
            None by default.
G
gmcather 已提交
13275 13276

    Returns:
G
Guo Sheng 已提交
13277
        Variable: A Tensor or LoDTensor. It has the same shape, data type and lod as `input`.
G
gmcather 已提交
13278 13279 13280 13281

    Examples:
        .. code-block:: python

13282
          import paddle
13283

13284
          tensor = paddle.randn([16, 32, 64])
13285
          position_tensor = paddle.fluid.layers.add_position_encoding(
13286
                input=tensor, alpha=1.0, beta=1.0)
H
haowang101779990 已提交
13287

G
gmcather 已提交
13288
    """
13289
    if in_dygraph_mode():
W
wanghuancoder 已提交
13290
        return _C_ops.add_position_encoding(input, "alpha", alpha, "beta", beta)
13291

G
gmcather 已提交
13292
    helper = LayerHelper('add_position_encoding', **locals())
13293 13294
    check_variable_and_dtype(input, 'input', ['float32', 'float64'],
                             "add_position_encoding")
G
gmcather 已提交
13295 13296
    dtype = helper.input_dtype()

13297
    out = helper.create_variable_for_type_inference(dtype=dtype)
G
gmcather 已提交
13298 13299 13300 13301 13302 13303 13304 13305

    helper.append_op(
        type="add_position_encoding",
        inputs={"X": input},
        outputs={"Out": out},
        attrs={"alpha": alpha,
               "beta": beta})
    return out
Q
Qiao Longfei 已提交
13306 13307 13308 13309 13310 13311 13312 13313 13314


def bilinear_tensor_product(x,
                            y,
                            size,
                            act=None,
                            name=None,
                            param_attr=None,
                            bias_attr=None):
13315
    r"""
13316 13317
    :api_attr: Static Graph

Y
Yibing Liu 已提交
13318
    **Bilinear Tensor Product Layer**
Q
Qiao Longfei 已提交
13319

Q
Qiao Longfei 已提交
13320
    This layer performs bilinear tensor product on two inputs.
Q
Qiao Longfei 已提交
13321 13322 13323
    For example:

    .. math::
H
haowang101779990 已提交
13324
       out_{i} = x * W_{i} * {y^\mathrm{T}}, i=0,1,...,size-1
Q
Qiao Longfei 已提交
13325

Q
Qiao Longfei 已提交
13326
    In this formula:
13327 13328
      - :math:`x`: the first input contains M elements, shape is [batch_size, M].
      - :math:`y`: the second input contains N elements, shape is [batch_size, N].
Y
Yibing Liu 已提交
13329
      - :math:`W_{i}`: the i-th learned weight, shape is [M, N].
H
haowang101779990 已提交
13330
      - :math:`out_{i}`: the i-th element of out, shape is [batch_size, size].
Q
Qiao Longfei 已提交
13331 13332 13333
      - :math:`y^\mathrm{T}`: the transpose of :math:`y_{2}`.

    Args:
13334
        x (Variable): 2-D input tensor with shape [batch_size, M]. Data type
Y
Yibing Liu 已提交
13335
            is float32 or float64.
13336
        y (Variable): 2-D input tensor with shape [batch_size, N]. Data type
Y
Yibing Liu 已提交
13337
            should be same as **x**.
Q
Qiao Longfei 已提交
13338
        size (int): The dimension of this layer.
Y
Yibing Liu 已提交
13339
        act (str|None): Activation to be applied to the output of this layer. Default None.
13340
        name(str|None): For detailed information, please refer to
Y
Yibing Liu 已提交
13341
            :ref:`api_guide_Name` . Usually name is no need to set and None by default.
13342 13343
        param_attr (ParamAttr|None): To specify the weight parameter attribute.
            Default: None, which means the default weight parameter property is
Y
Yibing Liu 已提交
13344
            used. See usage for details in :ref:`api_fluid_ParamAttr` .
13345 13346
        bias_attr (ParamAttr|None): To specify the bias parameter attribute.
            Default: None, which means the default bias parameter property is
Y
Yibing Liu 已提交
13347
            used. See usage for details in :ref:`api_fluid_ParamAttr` .
Q
Qiao Longfei 已提交
13348
    Returns:
Y
Yibing Liu 已提交
13349
        Variable: A 2-D Tensor of shape [batch_size, size]. Data type is the same as input **x**.
Q
Qiao Longfei 已提交
13350 13351 13352 13353

    Examples:
        .. code-block:: python

13354 13355 13356 13357 13358
            import paddle
            paddle.enable_static()
            layer1 = paddle.static.data("t1", shape=[-1, 5], dtype="float32")
            layer2 = paddle.static.data("t2", shape=[-1, 4], dtype="float32")
            tensor = paddle.static.nn.bilinear_tensor_product(x=layer1, y=layer2, size=1000)
Q
Qiao Longfei 已提交
13359 13360
    """
    helper = LayerHelper('bilinear_tensor_product', **locals())
Q
Qiao Longfei 已提交
13361
    dtype = helper.input_dtype('x')
Q
Qiao Longfei 已提交
13362 13363 13364 13365

    param_shape = [size, x.shape[1], y.shape[1]]

    w = helper.create_parameter(
Q
Qiao Longfei 已提交
13366
        attr=helper.param_attr, shape=param_shape, dtype=dtype, is_bias=False)
13367
    out = helper.create_variable_for_type_inference(dtype=dtype)
Q
Qiao Longfei 已提交
13368 13369 13370 13371 13372 13373 13374 13375 13376 13377 13378 13379

    inputs = {"X": x, "Y": y, "Weight": w}
    if helper.bias_attr:
        bias_size = [1, size]
        bias = helper.create_parameter(
            attr=helper.bias_attr, shape=bias_size, dtype=dtype, is_bias=True)
        inputs["Bias"] = bias
    helper.append_op(
        type="bilinear_tensor_product", inputs=inputs, outputs={"Out": out})

    # add activation
    return helper.append_activation(out)
C
chengduo 已提交
13380 13381 13382 13383 13384


@templatedoc()
def get_tensor_from_selected_rows(x, name=None):
    """
13385 13386 13387 13388 13389 13390 13391 13392 13393 13394 13395 13396 13397 13398 13399 13400
    This operator gets tensor data from input with SelectedRows type, and outputs a LoDTensor.

    .. code-block:: text

        input x is SelectedRows:
           x.rows = [0, 5, 5, 4, 19]
           x.height = 20
           x.value = [[1, 1] [2, 2] [2, 2] [3, 3] [6, 6]]

        Ouput is LoDTensor:
           out.shape = [5, 2]
           out.data = [[1, 1],
                       [2, 2],
                       [2, 2],
                       [3, 3],
                       [6, 6]]
C
chengduo 已提交
13401 13402

    Args:
13403 13404 13405
        x(SelectedRows): Input with SelectedRows type. The data type is float32, float64, int32 or int64.
        name(str, optional): The default value is None.  Normally there is no need for user to set this property.
            For more information, please refer to :ref:`api_guide_Name` .
C
chengduo 已提交
13406 13407

    Returns:
13408
        Variable: LoDTensor transformed from SelectedRows. The data type is same with input.
B
bdzhuxiaoning 已提交
13409 13410 13411

    Examples:
        .. code-block:: python
13412

B
bdzhuxiaoning 已提交
13413 13414 13415 13416
            import paddle.fluid as fluid
            b = fluid.default_main_program().global_block()
            input = b.create_var(name="X", dtype="float32", persistable=True, type=fluid.core.VarDesc.VarType.SELECTED_ROWS)
            out = fluid.layers.get_tensor_from_selected_rows(input)
C
chengduo 已提交
13417 13418
    """

13419 13420 13421 13422 13423
    check_type(x, 'x', Variable, 'get_tensor_from_selected_rows')
    if x.type != core.VarDesc.VarType.SELECTED_ROWS:
        raise TypeError(
            "The type of 'x' in get_tensor_from_selected_rows must be SELECTED_ROWS."
        )
C
chengduo 已提交
13424 13425 13426 13427 13428 13429 13430 13431
    helper = LayerHelper('get_tensor_from_selected_rows', **locals())
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
    helper.append_op(
        type='get_tensor_from_selected_rows',
        inputs={'X': x},
        outputs={'Out': out},
        attrs={})
    return out
13432 13433


S
shippingwang 已提交
13434
def shuffle_channel(x, group, name=None):
S
shippingwang 已提交
13435
    """
S
shippingwang 已提交
13436 13437 13438 13439 13440 13441
    This operator shuffles the channels of input x.
    It divide the input channels in each group into :attr:`group` subgroups,
    and obtain a new order by selecting element from every subgroup one by one.

    Please refer to the paper
    https://arxiv.org/pdf/1707.01083.pdf
13442

S
shippingwang 已提交
13443
    .. code-block:: text
13444

S
shippingwang 已提交
13445 13446 13447 13448 13449 13450 13451 13452 13453 13454 13455 13456 13457 13458
        Given a 4-D tensor input with the shape (N, C, H, W):
            input.shape = (1, 4, 2, 2)
            input.data =[[[[0.1, 0.2],
                           [0.2, 0.3]],

                          [[0.3, 0.4],
                           [0.4, 0.5]],

                          [[0.5, 0.6],
                           [0.6, 0.7]],

                          [[0.7, 0.8],
                           [0.8, 0.9]]]]
            Given group: 2
13459
            then we get a 4-D tensor out with the same shape of input:
S
shippingwang 已提交
13460 13461 13462
            out.shape = (1, 4, 2, 2)
            out.data = [[[[0.1, 0.2],
                          [0.2, 0.3]],
13463

S
shippingwang 已提交
13464 13465
                         [[0.5, 0.6],
                          [0.6, 0.7]],
13466

S
shippingwang 已提交
13467 13468
                         [[0.3, 0.4],
                          [0.4, 0.5]],
13469

S
shippingwang 已提交
13470 13471
                         [[0.7, 0.8],
                          [0.8, 0.9]]]]
13472 13473

    Args:
S
shippingwang 已提交
13474
        x(Variable): The input tensor variable. It should be a 4-D tensor with shape [N, C, H, W]
T
tianshuo78520a 已提交
13475
        group(int): Indicating the counts of subgroups, It should divide the number of channels.
S
shippingwang 已提交
13476 13477

    Returns:
13478
        out(Variable): the channels shuffling result is a tensor variable with the
S
shippingwang 已提交
13479
        same shape and same type as the input.
S
shippingwang 已提交
13480 13481

    Raises:
S
shippingwang 已提交
13482
        ValueError: If group is not an int type variable.
S
shippingwang 已提交
13483 13484 13485

    Examples:
        .. code-block:: python
13486

13487
            import paddle
13488 13489
            import paddle.fluid as fluid
            paddle.enable_static()
R
ruri 已提交
13490
            input = fluid.data(name='input', shape=[None,4,2,2], dtype='float32')
S
shippingwang 已提交
13491
            out = fluid.layers.shuffle_channel(x=input, group=2)
S
shippingwang 已提交
13492 13493 13494
    """
    helper = LayerHelper("shuffle_channel", **locals())

S
shippingwang 已提交
13495
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
S
shippingwang 已提交
13496 13497 13498 13499 13500 13501 13502 13503 13504

    if not isinstance(group, int):
        raise TypeError("group must be int type")

    helper.append_op(
        type="shuffle_channel",
        inputs={"X": x},
        outputs={"Out": out},
        attrs={"group": group})
S
shippingwang 已提交
13505
    return out
S
Add  
shippingwang 已提交
13506 13507


13508
@templatedoc()
13509
def temporal_shift(x, seg_num, shift_ratio=0.25, name=None, data_format="NCHW"):
13510
    """
13511

13512
    **Temporal Shift Operator**
13513

13514
    ${comment}
13515 13516

    Args:
13517
        x(Tensor): ${x_comment}
13518
        seg_num(int): ${seg_num_comment}
D
dengkaipeng 已提交
13519
        shift_ratio(float): ${shift_ratio_comment}
K
Kaipeng Deng 已提交
13520 13521 13522
        name(str, optional): For detailed information, please refer
                             to :ref:`api_guide_Name`. Usually name is no need to set and
                             None by default.
13523 13524
        data_format(str, optional): Data format that specifies the layout of input.
            It can be "NCHW" or "NHWC". Default: "NCHW".
13525 13526

    Returns:
13527
        out(Tensor): The temporal shifting result is a tensor with the
K
Kaipeng Deng 已提交
13528
        same shape and same data type as the input.
13529 13530 13531 13532 13533 13534 13535

    Raises:
        TypeError: seg_num must be int type.

    Examples:
        .. code-block:: python

13536 13537 13538 13539
            import paddle
            import paddle.nn.functional as F

            input = paddle.randn([6, 4, 2, 2])
13540
            out = F.temporal_shift(x=input, seg_num=2, shift_ratio=0.2)
13541
    """
13542 13543 13544 13545
    if data_format not in ["NCHW", "NHWC"]:
        raise ValueError("Attr(data_format) should be 'NCHW' or 'NHWC'. "
                         "Received Attr(data_format): {}.".format(data_format))
    if in_dygraph_mode():
W
wanghuancoder 已提交
13546 13547
        return _C_ops.temporal_shift(x, 'seg_num', seg_num, 'shift_ratio',
                                     shift_ratio, 'data_format', data_format)
13548

13549
    helper = LayerHelper("temporal_shift", **locals())
13550 13551 13552
    check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'temporal_shift')
    check_type(seg_num, 'seg_num', int, 'temporal_shift')
    check_type(shift_ratio, 'shift_ratio', float, 'temporal_shift')
13553 13554 13555 13556 13557 13558 13559 13560 13561 13562

    out = helper.create_variable_for_type_inference(dtype=x.dtype)

    if not isinstance(seg_num, int):
        raise TypeError("seg_num must be int type.")

    helper.append_op(
        type="temporal_shift",
        inputs={"X": x},
        outputs={"Out": out},
13563 13564 13565 13566 13567
        attrs={
            "seg_num": seg_num,
            "shift_ratio": shift_ratio,
            "data_format": data_format
        })
13568 13569 13570
    return out


S
sneaxiy 已提交
13571
class PyFuncRegistry(object):
S
sneaxiy 已提交
13572 13573 13574
    _register_funcs = []

    def __init__(self, func):
S
sneaxiy 已提交
13575
        if func is None or not callable(func):
S
sneaxiy 已提交
13576 13577 13578
            raise TypeError('func must be a Python function')

        self._func = func
M
minqiyang 已提交
13579
        # find named args using reflection
S
sneaxiy 已提交
13580 13581 13582 13583 13584 13585 13586
        args = inspect.getargspec(self._func)
        if len(args[0]) == 0 and args[1] is None and args[2] is None:
            # Function with no inputs
            self._named_args = None
        else:
            self._named_args = args[0]
        self._id = core._append_python_callable_object_and_return_id(self)
S
sneaxiy 已提交
13587 13588 13589
        '''
        Why record self here?

M
minqiyang 已提交
13590 13591
        1. For debug usage. Users can call
           :code:`py_func.registered_func(idx)` method
S
sneaxiy 已提交
13592
           to find the registered function corresponding
M
minqiyang 已提交
13593
           to :code:`idx`.
S
sneaxiy 已提交
13594

M
minqiyang 已提交
13595 13596
        2. For increasing reference count of self.
           It seems that to release Python object
S
sneaxiy 已提交
13597
           whose reference count is 1 would cause
M
minqiyang 已提交
13598
           segmentation fault error in C++ side.
S
sneaxiy 已提交
13599 13600
           May be lack of Python GC in C++ side?
        '''
S
sneaxiy 已提交
13601
        PyFuncRegistry._register_funcs.append(self)
S
sneaxiy 已提交
13602 13603 13604 13605 13606 13607 13608 13609 13610 13611 13612 13613 13614 13615

    @classmethod
    def registered_func(cls, idx):
        return cls._register_funcs[idx]._func

    @classmethod
    def registered_func_num(cls):
        return len(cls._register_funcs)

    @property
    def id(self):
        return self._id

    def __call__(self, *args):
S
sneaxiy 已提交
13616 13617 13618 13619 13620 13621 13622 13623 13624
        if self._named_args is None:
            func_ret = self._func()
        else:
            kwargs = dict()
            idx = 0
            for arg in self._named_args:
                kwargs[arg] = args[idx]
                idx += 1
            func_ret = self._func(*args[idx:], **kwargs)
S
sneaxiy 已提交
13625

S
sneaxiy 已提交
13626 13627
        if not isinstance(func_ret, (list, tuple)):
            func_ret = (func_ret, )
S
sneaxiy 已提交
13628 13629

        ret = []
S
sneaxiy 已提交
13630 13631 13632
        for each_ret in func_ret:
            if each_ret is None or isinstance(each_ret, core.LoDTensor):
                ret.append(each_ret)
S
sneaxiy 已提交
13633 13634
                continue

S
sneaxiy 已提交
13635 13636
            if not isinstance(each_ret, np.ndarray):
                each_ret = np.array(each_ret)
S
sneaxiy 已提交
13637

S
sneaxiy 已提交
13638 13639 13640
            tensor = core.LoDTensor()
            tensor.set(each_ret, core.CPUPlace())
            ret.append(tensor)
S
sneaxiy 已提交
13641

S
sneaxiy 已提交
13642
        return tuple(ret)
S
sneaxiy 已提交
13643 13644


13645
@static_only
S
sneaxiy 已提交
13646 13647 13648
@templatedoc()
def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None):
    """
13649 13650
    :api_attr: Static Graph

13651 13652
    This OP is used to register customized Python OP to Paddle. The design
    principe of py_func is that Tensor and numpy array can be converted to each
13653 13654
    other easily. So you can use Python and numpy API to register a python OP.

13655 13656
    The forward function of the registered OP is ``func`` and the backward function
    of that is ``backward_func``. Paddle will call ``func`` at forward runtime and
13657
    call ``backward_func`` at backward runtime(if ``backward_func`` is not  None).
13658 13659
    ``x`` is the input of ``func``, whose type must be Tensor; ``out`` is
    the output of ``func``, whose type can be either Tensor or numpy array.
13660

13661
    The input of the backward function ``backward_func`` is ``x``, ``out`` and
13662 13663 13664
    the gradient of ``out``. If ``out`` have no gradient, the relevant input of
    ``backward_func`` is None. If ``x`` do not have a gradient, the user should
    return None in ``backward_func``.
13665

13666 13667
    The data type and shape of ``out`` should also be set correctly before this
    API is called, and the data type and shape of the gradient of ``out`` and
13668 13669 13670 13671 13672 13673 13674
    ``x`` will be inferred automatically.

    This API can also be used to debug the neural network by setting the ``func``
    as a function that only print variables.

    Args:
        func (callable): The forward function of the registered OP. When the network
13675 13676
            is running, the forward output ``out`` will be calculated according to this
            function and the forward input ``x``. In ``func`` , it's suggested that we
13677
            actively convert Tensor into a numpy array, so that we can use Python and
13678
            numpy API arbitrarily. If not, some operations of numpy may not be compatible.
13679 13680 13681 13682 13683 13684 13685
        x (Tensor|tuple(Tensor)|list[Tensor]): The input of the forward function ``func``.
            It can be Tensor|tuple(Tensor)|list[Tensor]. In addition, Multiple Tensor
            should be passed in the form of tuple(Tensor) or list[Tensor].
        out (T|tuple(T)|list[T]): The output of the forward function ``func``, it can be
            T|tuple(T)|list[T], where T can be either Tensor or numpy array. Since Paddle
            cannot automatically infer the shape and type of ``out``, you must create
            ``out`` in advance.
13686 13687 13688
        backward_func (callable, optional): The backward function of the registered OP.
            Its default value is None, which means there is no reverse calculation. If
            it is not None, ``backward_func`` is called to calculate the gradient of
13689
            ``x`` when the network is at backward runtime.
13690 13691
        skip_vars_in_backward_input (Tensor, optional): It's used to limit the input
            list of ``backward_func``, and it can be Tensor|tuple(Tensor)|list[Tensor].
13692
            It must belong to either ``x`` or ``out``. The default  value is None, which means
13693 13694
            that no tensors need to be removed from ``x`` and ``out``. If it is not None,
            these tensors will not be the input of ``backward_func``. This parameter is only
13695
            useful when ``backward_func`` is not None.
13696 13697

    Returns:
13698
        Tensor|tuple(Tensor)|list[Tensor]: The output ``out`` of the forward function ``func``.
S
sneaxiy 已提交
13699 13700

    Examples:
13701
        .. code-block:: python
13702

13703
            # example 1:
13704
            import paddle
13705
            import six
13706
            import numpy as np
13707

13708 13709 13710
            paddle.enable_static()

            # Creates a forward function, Tensor can be input directly without
13711
            # being converted into numpy array.
13712 13713 13714
            def tanh(x):
                return np.tanh(x)

13715
            # Skip x in backward function and return the gradient of x
13716
            # Tensor must be actively converted to numpy array, otherwise,
13717
            # operations such as +/- can't be used.
13718 13719
            def tanh_grad(y, dy):
                return np.array(dy) * (1 - np.square(np.array(y)))
13720

13721
            # Creates a forward function for debugging running networks(print value)
13722 13723
            def debug_func(x):
                print(x)
13724

13725
            def create_tmp_var(name, dtype, shape):
13726
                return paddle.static.default_main_program().current_block().create_var(
13727
                    name=name, dtype=dtype, shape=shape)
13728 13729 13730 13731

            def simple_net(img, label):
                hidden = img
                for idx in six.moves.range(4):
13732
                    hidden = paddle.static.nn.fc(hidden, size=200)
13733 13734 13735
                    new_hidden = create_tmp_var(name='hidden_{}'.format(idx),
                        dtype=hidden.dtype, shape=hidden.shape)

13736
                    # User-defined forward and backward
13737
                    hidden = paddle.static.py_func(func=tanh, x=hidden,
13738 13739 13740
                        out=new_hidden, backward_func=tanh_grad,
                        skip_vars_in_backward_input=hidden)

13741
                    # User-defined debug functions that print out the input Tensor
13742
                    paddle.static.py_func(func=debug_func, x=hidden, out=None)
13743

13744
                prediction = paddle.static.nn.fc(hidden, size=10, activation='softmax')
13745 13746 13747 13748 13749 13750 13751 13752 13753 13754 13755 13756 13757 13758 13759 13760 13761
                ce_loss = paddle.nn.loss.CrossEntropyLoss()
                return ce_loss(prediction, label)

            x = paddle.static.data(name='x', shape=[1,4], dtype='float32')
            y = paddle.static.data(name='y', shape=[1,10], dtype='int64')
            res = simple_net(x, y)

            exe = paddle.static.Executor(paddle.CPUPlace())
            exe.run(paddle.static.default_startup_program())
            input1 = np.random.random(size=[1,4]).astype('float32')
            input2 = np.random.randint(1, 10, size=[1,10], dtype='int64')
            out = exe.run(paddle.static.default_main_program(),
                          feed={'x':input1, 'y':input2},
                          fetch_list=[res.name])
            print(out)

        .. code-block:: python
13762

13763
            # example 2:
13764
            # This example shows how to turn Tensor into numpy array and
13765
            # use numpy API to register an Python OP
13766
            import paddle
13767 13768
            import numpy as np

13769 13770
            paddle.enable_static()

13771
            def element_wise_add(x, y):
13772
                # Tensor must be actively converted to numpy array, otherwise,
13773
                # numpy.shape can't be used.
13774
                x = np.array(x)
13775 13776 13777 13778 13779 13780 13781 13782 13783 13784 13785 13786 13787
                y = np.array(y)

                if x.shape != y.shape:
                    raise AssertionError("the shape of inputs must be the same!")

                result = np.zeros(x.shape, dtype='int32')
                for i in range(len(x)):
                    for j in range(len(x[0])):
                        result[i][j] = x[i][j] + y[i][j]

                return result

            def create_tmp_var(name, dtype, shape):
13788
                return paddle.static.default_main_program().current_block().create_var(
13789 13790 13791
                            name=name, dtype=dtype, shape=shape)

            def py_func_demo():
13792 13793
                start_program = paddle.static.default_startup_program()
                main_program = paddle.static.default_main_program()
13794 13795

                # Input of the forward function
13796 13797
                x = paddle.static.data(name='x', shape=[2,3], dtype='int32')
                y = paddle.static.data(name='y', shape=[2,3], dtype='int32')
13798

13799 13800 13801 13802
                # Output of the forward function, name/dtype/shape must be specified
                output = create_tmp_var('output','int32', [3,1])

                # Multiple Variable should be passed in the form of tuple(Variale) or list[Variale]
13803
                paddle.static.py_func(func=element_wise_add, x=[x,y], out=output)
13804

13805
                exe=paddle.static.Executor(paddle.CPUPlace())
13806 13807 13808 13809 13810
                exe.run(start_program)

                # Feed numpy array to main_program
                input1 = np.random.randint(1, 10, size=[2,3], dtype='int32')
                input2 = np.random.randint(1, 10, size=[2,3], dtype='int32')
13811
                out = exe.run(main_program,
13812 13813 13814 13815 13816 13817 13818 13819 13820
                            feed={'x':input1, 'y':input2},
                            fetch_list=[output.name])
                print("{0} + {1} = {2}".format(input1, input2, out))

            py_func_demo()

            # Reference output:
            # [[5, 9, 9]   + [[7, 8, 4]  =  [array([[12, 17, 13]
            #  [7, 5, 2]]     [1, 3, 3]]            [8, 8, 5]], dtype=int32)]
S
sneaxiy 已提交
13821
    """
S
sneaxiy 已提交
13822
    helper = LayerHelper('py_func', **locals())
13823
    check_type(x, 'X', (list, tuple, Variable, type(None)), 'py_func')
S
sneaxiy 已提交
13824 13825 13826
    if x is None:
        x = []
    elif isinstance(x, Variable):
S
sneaxiy 已提交
13827
        x = [x]
13828 13829 13830
    elif isinstance(x, tuple):
        x = list(x)
    elif not isinstance(x, (list, tuple, Variable)):
S
sneaxiy 已提交
13831
        raise TypeError('Input must be Variable/list(Variable)/tuple(Variable)')
13832
    check_type(out, 'Out', (list, tuple, Variable, type(None)), 'py_func')
S
sneaxiy 已提交
13833 13834 13835
    if out is None:
        out_list = []
    elif isinstance(out, Variable):
S
sneaxiy 已提交
13836
        out_list = [out]
13837 13838
    elif isinstance(out, tuple):
        out_list = list(out)
13839 13840 13841
    elif isinstance(out, list):
        out_list = out
    else:
S
sneaxiy 已提交
13842 13843
        raise TypeError(
            'Output must be Variable/list(Variable)/tuple(Variable)')
S
sneaxiy 已提交
13844

S
sneaxiy 已提交
13845 13846
    fwd_func_id = PyFuncRegistry(func).id
    bwd_func_id = PyFuncRegistry(
S
sneaxiy 已提交
13847
        backward_func).id if backward_func is not None else -1
S
sneaxiy 已提交
13848 13849

    for each_out in out_list:
S
sneaxiy 已提交
13850 13851
        if len(each_out.shape) == 0:
            raise ValueError(
S
sneaxiy 已提交
13852 13853
                'Output shapes of py_func op should be provided by users manually'
            )
S
sneaxiy 已提交
13854

S
sneaxiy 已提交
13855 13856 13857 13858 13859 13860 13861 13862 13863 13864 13865 13866 13867 13868 13869
    backward_skip_vars = set()
    if backward_func is not None and skip_vars_in_backward_input is not None:
        if isinstance(skip_vars_in_backward_input, Variable):
            skip_vars_in_backward_input = [skip_vars_in_backward_input]

        fwd_in_out = [v.name for v in x]
        fwd_in_out.extend([v.name for v in out_list])
        fwd_in_out = set(fwd_in_out)
        backward_skip_vars = set()
        for v in skip_vars_in_backward_input:
            if not v.name in fwd_in_out:
                raise ValueError(
                    'Variable {} is not found in forward inputs and outputs'
                    .format(v.name))
            backward_skip_vars.add(v.name)
S
sneaxiy 已提交
13870 13871 13872 13873

    helper.append_op(
        type='py_func',
        inputs={'X': x},
S
sneaxiy 已提交
13874 13875
        outputs={'Out': out_list},
        attrs={
S
sneaxiy 已提交
13876 13877 13878
            'forward_callable_id': fwd_func_id,
            'backward_callable_id': bwd_func_id,
            'backward_skip_vars': list(backward_skip_vars)
S
sneaxiy 已提交
13879
        })
S
sneaxiy 已提交
13880
    return out
S
sneaxiy 已提交
13881 13882 13883


# For debug usage
S
sneaxiy 已提交
13884 13885 13886 13887
py_func.registered_func = PyFuncRegistry.registered_func
py_func.registered_func_num = PyFuncRegistry.registered_func_num


13888 13889 13890 13891 13892 13893 13894 13895 13896
@templatedoc()
def psroi_pool(input,
               rois,
               output_channels,
               spatial_scale,
               pooled_height,
               pooled_width,
               name=None):
    """
13897

13898 13899
    ${comment}

S
SunGaofeng 已提交
13900
    Parameters:
13901
        input (Variable): ${x_comment}
S
SunGaofeng 已提交
13902
        rois (Variable): LoDTensor, ROIs (Regions of Interest) to pool over.It should be
S
SunGaofeng 已提交
13903 13904 13905
                         a 2-D LoDTensor of shape (num_rois, 4), the lod level
                         is 1. Given as [[x1, y1, x2, y2], ...], (x1, y1) is
                         the top left coordinates, and (x2, y2) is the bottom
S
SunGaofeng 已提交
13906 13907
                         right coordinates. The data type is the same as `input`
        output_channels (int): ${output_channels_comment}
13908
        spatial_scale (float): ${spatial_scale_comment} Default: 1.0
S
SunGaofeng 已提交
13909 13910
        pooled_height (int): ${pooled_height_comment} Default: 1
        pooled_width (int): ${pooled_width_comment} Default: 1
13911 13912
        name(str, optional): The default value is None.
                             Normally there is no need for user to set this property.
S
SunGaofeng 已提交
13913
                             For more information, please refer to :ref:`api_guide_Name`
13914 13915

    Returns:
S
SunGaofeng 已提交
13916 13917 13918 13919
        ${out_comment}.

    Return Type:
        Variable
13920 13921 13922 13923

    Examples:
        .. code-block:: python

S
SunGaofeng 已提交
13924
            import paddle.fluid as fluid
13925 13926
            import paddle
            paddle.enable_static()
S
SunGaofeng 已提交
13927 13928
            x = fluid.data(name='x', shape=[100, 490, 28, 28], dtype='float32')
            rois = fluid.data(name='rois', shape=[None, 4], lod_level=1, dtype='float32')
S
SunGaofeng 已提交
13929
            pool_out = fluid.layers.psroi_pool(x, rois, 10, 1.0, 7, 7)
13930 13931 13932 13933 13934 13935 13936 13937 13938 13939 13940 13941 13942 13943 13944 13945 13946 13947 13948 13949 13950 13951 13952 13953 13954
    """
    helper = LayerHelper('psroi_pool', **locals())
    # check attrs
    if not isinstance(output_channels, int):
        raise TypeError("output_channels must be int type")
    if not isinstance(spatial_scale, float):
        raise TypeError("spatial_scale must be float type")
    if not isinstance(pooled_height, int):
        raise TypeError("pooled_height must be int type")
    if not isinstance(pooled_width, int):
        raise TypeError("pooled_width must be int type")
    dtype = helper.input_dtype()
    out = helper.create_variable_for_type_inference(dtype)
    helper.append_op(
        type='psroi_pool',
        inputs={'X': input,
                'ROIs': rois},
        outputs={'Out': out},
        attrs={
            'output_channels': output_channels,
            'spatial_scale': spatial_scale,
            'pooled_height': pooled_height,
            'pooled_width': pooled_width
        })
    return out
13955 13956 13957 13958 13959 13960 13961 13962


@templatedoc()
def prroi_pool(input,
               rois,
               spatial_scale=1.0,
               pooled_height=1,
               pooled_width=1,
13963
               batch_roi_nums=None,
13964 13965
               name=None):
    """
13966

13967
    The precise roi pooling implementation for paddle. Reference: https://arxiv.org/pdf/1807.11590.pdf
13968 13969

    Args:
13970
        input (Variable):The input of precise roi pooliing.The shape of input tensor is
13971 13972 13973
                        [N,C,H,W]. Where N is batch size,C is number of input channels,H
                        is height of the feature, and W is the width of the feature.
        rois (Variable): ROIs (Regions of Interest) to pool over.It should be
13974 13975 13976 13977 13978
                        a 2-D LoDTensor or Tensor of shape (num_rois, 4), the lod level
                        is 1 when it is LoDTensor. The LoD include the rois's batch index
                        information. If rois is Tensor, its batch index information should
                        be provided by batch_index.
                        Given as [[x1, y1, x2, y2], ...], (x1, y1) is
13979 13980 13981 13982 13983 13984
                        the top left coordinates, and (x2, y2) is the bottom
                        right coordinates.
        spatial_scale (float): Ratio of input feature map height (or width) to raw image height (or width).
                             Equals the reciprocal of total stride in convolutional layers, Default: 1.0.
        pooled_height (integer): The pooled output height. Default: 1.
        pooled_width (integer): The pooled output width. Default: 1.
13985 13986
        batch_roi_nums (Variable): The number of roi for each image in batch. It
                         should be 1-D Tensor, with shape [N] and dtype int64,
13987 13988
                         where N is the batch size. Default: None. Be note: The lod of input should be
                         empty when batch_roi_nums has values;
13989 13990 13991
        name (str, default None): The name of this operation.

    Returns:
13992
        Variable(Tensor):The shape of the returned Tensor is (N, C, pooled_height, pooled_width), with value type float32,float16. N, C denote batch_size and channels of input respectively.
13993 13994 13995 13996

    Examples:
        .. code-block:: python

13997
            ## prroi_pool without batch_roi_num
13998
            import paddle.fluid as fluid
13999 14000
            x = fluid.data(name='x', shape=[None, 490, 28, 28], dtype='float32')
            rois = fluid.data(name='rois', shape=[None, 4], lod_level=1, dtype='float32')
14001
            pool_out = fluid.layers.prroi_pool(x, rois, 1.0, 7, 7)
14002

14003 14004 14005 14006 14007 14008 14009 14010
            ## prroi_pool with batch_roi_num
            batchsize=4
            x2 = fluid.data(name='x2', shape=[batchsize, 490, 28, 28], dtype='float32')
            rois2 = fluid.data(name='rois2', shape=[batchsize, 4], dtype='float32')
            batch_rois_num = fluid.data(name='rois_nums', shape=[batchsize], dtype='int64')
            pool_out2 = fluid.layers.prroi_pool(x2, rois2, 1.0, 7, 7, batch_roi_nums=batch_rois_num)


14011
    """
14012 14013
    check_variable_and_dtype(input, 'input', ['float32'], 'prroi_pool')
    check_variable_and_dtype(rois, 'rois', ['float32'], 'prroi_pool')
14014 14015 14016 14017 14018 14019 14020 14021 14022 14023
    helper = LayerHelper('prroi_pool', **locals())
    # check attrs
    if not isinstance(spatial_scale, float):
        raise TypeError("spatial_scale must be float type")
    if not isinstance(pooled_height, int):
        raise TypeError("pooled_height must be int type")
    if not isinstance(pooled_width, int):
        raise TypeError("pooled_width must be int type")
    dtype = helper.input_dtype()
    out = helper.create_variable_for_type_inference(dtype)
14024 14025 14026
    inputs_op = {'X': input, 'ROIs': rois}
    if batch_roi_nums is not None:
        inputs_op['BatchRoINums'] = batch_roi_nums
14027 14028
    helper.append_op(
        type='prroi_pool',
14029
        inputs=inputs_op,
14030 14031 14032 14033 14034 14035 14036
        outputs={'Out': out},
        attrs={
            'spatial_scale': spatial_scale,
            'pooled_height': pooled_height,
            'pooled_width': pooled_width
        })
    return out
14037

M
minqiyang 已提交
14038

R
ruri 已提交
14039 14040 14041
def pixel_shuffle(x, upscale_factor):
    """

R
ruri 已提交
14042
    This op rearranges elements in a tensor of shape [N, C, H, W]
R
ruri 已提交
14043 14044 14045
    to a tensor of shape [N, C/r**2, H*r, W*r].
    This is useful for implementing efficient sub-pixel convolution
    with a stride of 1/r.
14046
    Please refer to the paper: `Real-Time Single Image and Video Super-Resolution
R
ruri 已提交
14047 14048 14049
    Using an Efficient Sub-Pixel Convolutional Neural Network <https://arxiv.org/abs/1609.05158v2>`_ .
    by Shi et. al (2016) for more details.

R
ruri 已提交
14050
    Parameters:
R
ruri 已提交
14051

R
ruri 已提交
14052 14053
        x(Variable): 4-D tensor, the data type should be float32 or float64.
        upscale_factor(int): factor to increase spatial resolution.
R
ruri 已提交
14054 14055

    Returns:
14056
        Out(Variable): Reshaped tensor according to the new dimension.
R
ruri 已提交
14057 14058 14059 14060 14061 14062 14063

    Raises:
        ValueError: If the square of upscale_factor cannot divide the channels of input.

    Examples:
        .. code-block:: python

R
ruri 已提交
14064 14065 14066 14067 14068 14069 14070 14071
	    # declarative mode
	    import paddle.fluid as fluid
	    import numpy as np
	    input = fluid.data(name="input", shape=[2,9,4,4])
	    output = fluid.layers.pixel_shuffle(x=input, upscale_factor=3)
	    place = fluid.CPUPlace()
	    exe = fluid.Executor(place)
	    exe.run(fluid.default_startup_program())
14072

R
ruri 已提交
14073 14074 14075 14076 14077
	    input_data = np.random.rand(2,9,4,4).astype("float32")
	    output_data = exe.run(fluid.default_main_program(),
                feed={"input":input_data},
                fetch_list=[output],
                return_numpy=True)
14078

R
ruri 已提交
14079 14080
 	    # print(output.shape)
	    # (2L, 1L, 12L, 12L)
R
ruri 已提交
14081 14082 14083

    """

R
ruri 已提交
14084
    check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'pixel_shuffle')
R
ruri 已提交
14085 14086 14087 14088 14089 14090 14091 14092 14093 14094 14095 14096 14097 14098 14099
    helper = LayerHelper("pixel_shuffle", **locals())

    out = helper.create_variable_for_type_inference(dtype=x.dtype)

    if not isinstance(upscale_factor, int):
        raise TypeError("upscale factor must be int type")

    helper.append_op(
        type="pixel_shuffle",
        inputs={"X": x},
        outputs={"Out": out},
        attrs={"upscale_factor": upscale_factor})
    return out


14100 14101 14102 14103 14104
def fsp_matrix(x, y):
    """

    **FSP matrix op**

14105
    This op is used to calculate the flow of solution procedure (FSP) matrix of two 4-D Tensor feature maps.
14106 14107 14108 14109 14110 14111 14112 14113 14114 14115 14116
    Given feature map x with shape [x_channel, h, w] and feature map y with shape
    [y_channel, h, w], we can get the fsp matrix of x and y in two steps:

    1. reshape x into matrix with shape [x_channel, h * w] and reshape and
       transpose y into matrix with shape [h * w, y_channel].
    2. multiply x and y to get fsp matrix with shape [x_channel, y_channel].

    The output is a batch of fsp matrices.

    Args:

14117 14118 14119
        x (Variable): A 4-D Tensor feature map with shape [batch_size, x_channel, height, width].
                      A Tensor with type float32, float64.
        y (Variable): A 4-D Tensor feature map with shape [batch_size, y_channel, height, width].
14120
                      The y_channel can be different with the x_channel of Input(X)
14121 14122
                      while the other dimensions must be the same with Input(X)'s. A Tensor with
                      type float32, float64.
14123 14124 14125 14126

    Returns:

        fsp matrix (Variable): The output of FSP op with shape [batch_size, x_channel, y_channel].
14127 14128
        The x_channel is the channel of x and the y_channel is the channel of y. A Tensor with
        type float32, float64.
14129 14130 14131 14132 14133

    Examples:

        .. code-block:: python

B
Bai Yifan 已提交
14134
            import paddle.fluid as fluid
B
Bai Yifan 已提交
14135
            data = fluid.data(name='data', shape=[None, 3, 32, 32])
B
Bai Yifan 已提交
14136 14137 14138 14139
            feature_map_0 = fluid.layers.conv2d(data, num_filters=2,
                                                filter_size=3)
            feature_map_1 = fluid.layers.conv2d(feature_map_0, num_filters=2,
                                                filter_size=1)
14140 14141 14142
            loss = fluid.layers.fsp_matrix(feature_map_0, feature_map_1)

    """
14143 14144
    check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'fsp_matrix')
    check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'fsp_matrix')
14145 14146 14147 14148 14149
    helper = LayerHelper('fsp_matrix', **locals())
    out = helper.create_variable_for_type_inference(dtype=helper.input_dtype(
        input_param_name='x'))
    helper.append_op(type='fsp', inputs={'X': x, 'Y': y}, outputs={'Out': out})
    return out
H
heqiaozhi 已提交
14150 14151 14152


def continuous_value_model(input, cvm, use_cvm=True):
14153
    r"""
H
fix doc  
heqiaozhi 已提交
14154

H
heqiaozhi 已提交
14155
    **continuous_value_model layers**
H
fix doc  
heqiaozhi 已提交
14156

Z
zhoushiyu 已提交
14157
    Now, this OP is used in CTR project to remove or dispose show and click value in :attr:`input`.
H
fix doc  
heqiaozhi 已提交
14158

Z
zhoushiyu 已提交
14159 14160
    :attr:`input` is an embedding vector including show and click value, whose shape is :math:`[N, D]` (N is batch size. D is `2 + embedding dim` ).
    Show and click at first two dims of embedding vector D.
T
tianshuo78520a 已提交
14161
    If :attr:`use_cvm` is True, it will calculate :math:`log(show)` and :math:`log(click)` , and output shape is :math:`[N, D]` .
Z
zhoushiyu 已提交
14162 14163
    If :attr:`use_cvm` is False, it will remove show and click from :attr:`input` , and output shape is :math:`[N, D - 2]` .
    :attr:`cvm` is show_click info, whose shape is :math:`[N, 2]` .
H
fix doc  
heqiaozhi 已提交
14164

Z
zhoushiyu 已提交
14165 14166 14167 14168 14169 14170 14171
    Args:
        input (Variable): The input variable. A 2-D LoDTensor with shape :math:`[N, D]` , where N is the batch size, D is `2 + the embedding dim` . `lod level = 1` .
        A Tensor with type float32, float64.
        cvm (Variable): Show and click variable. A 2-D Tensor with shape :math:`[N, 2]` , where N is the batch size, 2 is show and click.
        A Tensor with type float32, float64.
        use_cvm  (bool):  Use show_click or not. if use, the output dim is the same as input.
                          if not use, the output dim is `input dim - 2` (remove show and click)
H
fix doc  
heqiaozhi 已提交
14172

H
heqiaozhi 已提交
14173
    Returns:
H
fix doc  
heqiaozhi 已提交
14174

Z
zhoushiyu 已提交
14175 14176
        Variable: A 2-D LodTensor with shape :math:`[N, M]` . if :attr:`use_cvm` = True, M is equal to input dim D. if False, M is equal to `D - 2`. \
        A Tensor with same type as input.
H
fix doc  
heqiaozhi 已提交
14177

H
heqiaozhi 已提交
14178
    Examples:
H
fix doc  
heqiaozhi 已提交
14179

H
heqiaozhi 已提交
14180
        .. code-block:: python
H
fix doc  
heqiaozhi 已提交
14181

14182
          import paddle.fluid as fluid
Z
zhoushiyu 已提交
14183 14184
          input = fluid.data(name="input", shape=[64, 1], dtype="int64")
          label = fluid.data(name="label", shape=[64, 1], dtype="int64")
H
heqiaozhi 已提交
14185 14186 14187 14188 14189 14190 14191 14192
          embed = fluid.layers.embedding(
                            input=input,
                            size=[100, 11],
                            dtype='float32')
          ones = fluid.layers.fill_constant_batch_size_like(input=label, shape=[-1, 1], dtype="int64", value=1)
          show_clk = fluid.layers.cast(fluid.layers.concat([ones, label], axis=1), dtype='float32')
          show_clk.stop_gradient = True
          input_with_cvm = fluid.layers.continuous_value_model(embed, show_clk, True)
H
fix doc  
heqiaozhi 已提交
14193

H
heqiaozhi 已提交
14194 14195 14196
    """
    helper = LayerHelper('cvm', **locals())
    out = helper.create_variable(dtype=input.dtype)
14197 14198
    check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'],
                             'cvm')
H
heqiaozhi 已提交
14199 14200 14201 14202 14203 14204
    helper.append_op(
        type='cvm',
        inputs={'X': [input],
                'CVM': [cvm]},
        outputs={'Y': [out]},
        attrs={"use_cvm": use_cvm})
H
heqiaozhi 已提交
14205
    return out
Z
zhoukunsheng 已提交
14206 14207 14208 14209 14210 14211 14212


def where(condition):
    """
    Return an int64 tensor with rank 2, specifying the coordinate of true element in `condition`.

    Args:
14213
        condition(Variable): A bool tensor with rank at least 1, the data type is bool.
Z
zhoukunsheng 已提交
14214 14215

    Returns:
14216
        Variable, the output data type is int64. : The tensor variable storing a 2-D tensor, which involves all coordinate.
Z
zhoukunsheng 已提交
14217 14218 14219 14220

    Examples:
        .. code-block:: python

14221
             import paddle.fluid as fluid
14222 14223 14224
             import paddle.fluid.layers as layers
             import numpy as np

Z
zhoukunsheng 已提交
14225
             # condition is a tensor [True, False, True]
14226 14227 14228
             condition = layers.assign(np.array([1, 0, 1], dtype='int32'))
             condition = layers.cast(condition, 'bool')
             out = layers.where(condition) # [[0], [2]]
Z
zhoukunsheng 已提交
14229 14230

             # condition is a tensor [[True, False], [False, True]]
14231 14232 14233
             condition = layers.assign(np.array([[1, 0], [0, 1]], dtype='int32'))
             condition = layers.cast(condition, 'bool')
             out = layers.where(condition) # [[0, 0], [1, 1]]
Z
zhoukunsheng 已提交
14234 14235

             # condition is a tensor [False, False, False]
14236 14237 14238 14239
             condition = layers.assign(np.array([0, 0, 0], dtype='int32'))
             condition = layers.cast(condition, 'bool')
             out = layers.where(condition) # [[]]

Z
zhoukunsheng 已提交
14240
    """
14241
    if in_dygraph_mode():
W
wanghuancoder 已提交
14242
        return _C_ops.where_index(condition)
14243

W
wanghuancoder 已提交
14244 14245
    helper = LayerHelper("where_index", **locals())

Z
zhoukunsheng 已提交
14246 14247 14248 14249
    out = helper.create_variable_for_type_inference(
        dtype=core.VarDesc.VarType.INT64)

    helper.append_op(
14250 14251 14252
        type='where_index',
        inputs={'Condition': condition},
        outputs={'Out': [out]})
Z
zhoukunsheng 已提交
14253
    return out
Z
zhoukunsheng 已提交
14254 14255


W
WangXi 已提交
14256
@deprecated(since="2.0.0", update_to="paddle.sign")
Z
zhoukunsheng 已提交
14257
def sign(x):
14258
    r"""
14259
    This OP returns sign of every element in `x`: 1 for positive, -1 for negative and 0 for zero.
Z
zhoukunsheng 已提交
14260 14261

    Args:
14262 14263
        x(Variable|numpy.ndarray): The input variable could be N-D tensor or N-D numpy array, \
            the input data type is float32 or float64.
Z
zhoukunsheng 已提交
14264 14265

    Returns:
14266
        Variable, the output data type is the same as input data type. : The output sign tensor with identical shape to input :attr:`x`.
Z
zhoukunsheng 已提交
14267 14268 14269 14270

    Examples:
        .. code-block:: python

14271 14272 14273
          import paddle.fluid as fluid
          import numpy as np

14274
          # [1.0, 0.0, -1.0]
14275
          data = fluid.layers.sign(np.array([3.0, 0.0, -2.0], dtype='float32'))
Z
zhoukunsheng 已提交
14276 14277 14278
    """

    helper = LayerHelper("sign", **locals())
14279 14280 14281 14282
    check_type(x, 'x', (Variable, np.ndarray), 'sign')
    if isinstance(x, np.ndarray):
        x = assign(x)
    check_dtype(x.dtype, 'x', ['float16', 'float32', 'float64'], 'sign')
Z
zhoukunsheng 已提交
14283 14284 14285 14286 14287
    out = helper.create_variable_for_type_inference(dtype=x.dtype)

    helper.append_op(type='sign', inputs={'X': [x]}, outputs={'Out': [out]})

    return out
14288 14289


Z
zhoukunsheng 已提交
14290
def unique(x, dtype='int32'):
14291
    r"""
Z
zhoukunsheng 已提交
14292 14293 14294
    Return a unique tensor for `x` and an index tensor pointing to this unique tensor.

    Args:
Z
Zhang Ting 已提交
14295 14296
        x(Tensor): A 1-D input tensor, it's data type should be float32, float64, int32, int64.
        dtype(np.dtype|str, optional): The type of index tensor: int32, int64. Default: int32.
Z
zhoukunsheng 已提交
14297 14298 14299 14300 14301 14302 14303 14304 14305 14306

    Returns:
        tuple: (out, index). `out` is the unique tensor for `x`, with identical dtype to `x`, and \
            `index` is an index tensor pointing to `out`, by which user can recover the original `x` tensor.

    Examples:
        .. code-block:: python

             import numpy as np
             import paddle.fluid as fluid
W
wawltor 已提交
14307
             x = fluid.layers.assign(np.array([2, 3, 3, 1, 5, 3], dtype='int32'))
Z
zhoukunsheng 已提交
14308 14309 14310
             out, index = fluid.layers.unique(x) # out is [2, 3, 1, 5]; index is [0, 1, 1, 2, 3, 1]
    """

14311 14312
    check_variable_and_dtype(x, "x", ['float32', 'float64', 'int32', 'int64'],
                             "unique")
Z
zhoukunsheng 已提交
14313 14314 14315 14316 14317 14318 14319 14320 14321 14322 14323 14324 14325 14326 14327 14328
    helper = LayerHelper("unique", **locals())

    out = helper.create_variable_for_type_inference(dtype=x.dtype)

    index = helper.create_variable_for_type_inference(dtype)

    helper.append_op(
        type='unique',
        inputs={'X': x},
        attrs={'dtype': convert_np_dtype_to_dtype_(dtype)},
        outputs={'Out': [out],
                 'Index': [index]})

    return out, index


14329
def unique_with_counts(x, dtype='int32'):
14330
    r"""
T
tianshuo78520a 已提交
14331
    This OP return a unique tensor for `x` , and count tensor that the count of unique result in raw input, \
14332
    and an index tensor pointing to this unique tensor.
14333

14334
    **NOTICE**: This op support the variable type of Tensor only.
14335 14336

    Args:
14337 14338
        x(Variable): A 1-D input tensor with input shape of :math:`[N]` , the input data type is float32, float64, int32, int64.
        dtype(np.dtype|core.VarDesc.VarType|str): The type of count and index tensor, it could be int32, int64. Defalut value is int32.
14339

14340
    Returns:
14341 14342 14343
        tuple, the variable type in tuple is Tensor, the output :attr:`out` data type is the same as input :attr:`x`, \
        and data type of output :attr:`index` and :attr:`count` will be int32 or int64.: The :attr:`out` is unique tensor for input :attr:`x`,\
        the data shape is :math:`[K]`, the `K` may be different to the `N` in shape of :attr:`x`. :attr:`index` is an index tensor pointing\
T
tianshuo78520a 已提交
14344
        to :attr:`out`, the data shape is :math:`[N]` , the data shape is the same as input :attr:`x`. :attr:`count` is count of unique element in\
14345
        the :attr:`x`, the data shape is :math:`[K]`, the data shape is the same as output :attr:`out`.
14346 14347 14348 14349 14350 14351 14352 14353 14354

    Examples:
        .. code-block:: python

             import numpy as np
             import paddle.fluid as fluid
             x = fluid.layers.assign(np.array([2, 3, 3, 1, 5, 3], dtype='int32'))
             out, index, count = fluid.layers.unique_with_counts(x) # out is [2, 3, 1, 5]; index is [0, 1, 1, 2, 3, 1]
                                                        # count is [1, 3, 1, 1]
14355
            # x.shape=(6,) out.shape=(4,), index.shape=(6,), count.shape=(4,)
14356
    """
14357 14358
    check_variable_and_dtype(x, "x", ['float32', 'float64', 'int32', 'int64'],
                             "unique_with_counts")
14359 14360 14361 14362 14363 14364 14365 14366 14367 14368 14369 14370 14371 14372 14373 14374 14375 14376 14377 14378 14379 14380 14381 14382 14383 14384 14385 14386
    if not (dtype == 'int32' or dtype == 'int64'):
        raise TypeError(
            "Op unique_with_counts, index dtype must be int32 or int64")

    if x is None or len(x.shape) != 1:
        raise ValueError(
            "Op unique_with_counts, x must not be null and size of dim must be 1"
        )

    helper = LayerHelper("unique_with_counts", **locals())

    out = helper.create_variable_for_type_inference(dtype=x.dtype)

    index = helper.create_variable_for_type_inference(dtype)

    count = helper.create_variable_for_type_inference(dtype)

    helper.append_op(
        type='unique_with_counts',
        inputs={'X': x},
        attrs={'dtype': convert_np_dtype_to_dtype_(dtype)},
        outputs={'Out': [out],
                 'Index': [index],
                 'Count': [count]})

    return out, index, count


14387 14388 14389 14390 14391 14392 14393 14394 14395 14396 14397 14398 14399
def deformable_conv(input,
                    offset,
                    mask,
                    num_filters,
                    filter_size,
                    stride=1,
                    padding=0,
                    dilation=1,
                    groups=None,
                    deformable_groups=None,
                    im2col_step=None,
                    param_attr=None,
                    bias_attr=None,
14400
                    modulated=True,
14401
                    name=None):
14402
    r"""
14403 14404
    :api_attr: Static Graph

14405
    **Deformable Convolution op**
14406 14407 14408

    Compute 2-D deformable convolution on 4-D input.
    Given input image x, output feature map y, the deformable convolution operation can be expressed as follow:
14409 14410 14411 14412


    Deformable Convolution v2:

14413 14414 14415
    .. math::

        y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k) * \Delta m_k}
14416 14417

    Deformable Convolution v1:
14418

14419 14420 14421
    .. math::

        y(p) = \sum_{k=1}^{K}{w_k * x(p + p_k + \Delta p_k)}
14422 14423

    Where :math:`\Delta p_k` and :math:`\Delta m_k` are the learnable offset and modulation scalar for the k-th location,
14424
    Which :math:`\Delta m_k` is one in deformable convolution v1. Please refer to `Deformable ConvNets v2: More Deformable, Better Results
14425
    <https://arxiv.org/abs/1811.11168v2>`_ and `Deformable Convolutional Networks <https://arxiv.org/abs/1703.06211>`_.
14426

14427 14428 14429 14430 14431 14432 14433 14434 14435 14436 14437 14438 14439 14440 14441 14442 14443 14444 14445 14446 14447 14448 14449
    Example:
        - Input:

          Input shape: :math:`(N, C_{in}, H_{in}, W_{in})`

          Filter shape: :math:`(C_{out}, C_{in}, H_f, W_f)`

          Offset shape: :math:`(N, 2 * deformable\_groups * H_f * H_w, H_{in}, W_{in})`

          Mask shape: :math:`(N, deformable\_groups * H_f * H_w, H_{in}, W_{in})`

        - Output:

          Output shape: :math:`(N, C_{out}, H_{out}, W_{out})`

        Where

        .. math::

            H_{out}&= \\frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 \\\\
            W_{out}&= \\frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1

    Args:
14450 14451
        input (Variable): The input image with [N, C, H, W] format. A Tensor with type
            float32, float64.
14452
        offset (Variable): The input coordinate offset of deformable convolution layer.
14453
            A Tensor with type float32, float64.
14454 14455 14456
        Mask (Variable, Optional): The input mask of deformable convolution layer.
            A Tensor with type float32, float64. It should be None when you use
            deformable convolution v1.
14457 14458
        num_filters(int): The number of filter. It is as same as the output
            image channel.
14459
        filter_size (int|tuple): The filter size. If filter_size is a tuple,
14460 14461 14462 14463 14464 14465 14466 14467 14468 14469 14470 14471 14472 14473 14474 14475 14476 14477
            it must contain two integers, (filter_size_H, filter_size_W).
            Otherwise, the filter will be a square.
        stride (int|tuple): The stride size. If stride is a tuple, it must
            contain two integers, (stride_H, stride_W). Otherwise, the
            stride_H = stride_W = stride. Default: stride = 1.
        padding (int|tuple): The padding size. If padding is a tuple, it must
            contain two integers, (padding_H, padding_W). Otherwise, the
            padding_H = padding_W = padding. Default: padding = 0.
        dilation (int|tuple): The dilation size. If dilation is a tuple, it must
            contain two integers, (dilation_H, dilation_W). Otherwise, the
            dilation_H = dilation_W = dilation. Default: dilation = 1.
        groups (int): The groups number of the deformable conv layer. According to
            grouped convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
            the first half of the filters is only connected to the first half
            of the input channels, while the second half of the filters is only
            connected to the second half of the input channels. Default: groups=1.
        deformable_groups (int): The number of deformable group partitions.
            Default: deformable_groups = 1.
14478
        im2col_step (int): Maximum number of images per im2col computation;
T
tianshuo78520a 已提交
14479
            The total batch size should be devisable by this value or smaller
14480 14481 14482
            than this value; if you face out of memory problem, you can try
            to use a smaller value here.
            Default: im2col_step = 64.
14483
        param_attr (ParamAttr, Optional): The parameter attribute for learnable parameters/weights
14484 14485 14486
            of deformable conv. If it is set to None or one attribute of ParamAttr,
            deformable conv will create ParamAttr as param_attr.
            If the Initializer of the param_attr is not set, the parameter is
14487
            initialized with :math:`Normal(0.0, std)`, and the
14488
            :math:`std` is :math:`(\\frac{2.0 }{filter\_elem\_num})^{0.5}`. Default: None.
14489
        bias_attr (ParamAttr|bool, Optional): The parameter attribute for the bias of
14490 14491 14492 14493
            deformable conv layer. If it is set to False, no bias will be added
            to the output units. If it is set to None or one attribute of ParamAttr, conv2d
            will create ParamAttr as bias_attr. If the Initializer of the bias_attr
            is not set, the bias is initialized zero. Default: None.
14494 14495
        modulated (bool): Make sure which version should be used between v1 and v2, where v2 is \
            used while True. Default: True.
14496 14497
        name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
                        Generally, no setting is required. Default: None.
14498 14499
    Returns:
        Variable: The tensor variable storing the deformable convolution \
14500
                  result. A Tensor with type float32, float64.
14501 14502 14503 14504 14505 14506
    Raises:
        ValueError: If the shapes of input, filter_size, stride, padding and
                    groups mismatch.
    Examples:
        .. code-block:: python

14507
          #deformable conv v2:
14508

14509
          import paddle.fluid as fluid
14510 14511 14512
          import paddle
          paddle.enable_static()
          
14513 14514
          C_in, H_in, W_in = 3, 32, 32
          filter_size, deformable_groups = 3, 1
B
Bai Yifan 已提交
14515 14516 14517
          data = fluid.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32')
          offset = fluid.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
          mask = fluid.data(name='mask', shape=[None, deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
14518
          out = fluid.layers.deformable_conv(input=data, offset=offset, mask=mask,
14519
                                             num_filters=2, filter_size=filter_size, padding=1, modulated=True)
14520 14521 14522 14523

          #deformable conv v1:

          import paddle.fluid as fluid
14524 14525
          C_in, H_in, W_in = 3, 32, 32
          filter_size, deformable_groups = 3, 1
B
Bai Yifan 已提交
14526 14527
          data = fluid.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32')
          offset = fluid.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
14528
          out = fluid.layers.deformable_conv(input=data, offset=offset, mask=None,
14529
                                             num_filters=2, filter_size=filter_size, padding=1, modulated=False)
14530 14531
    """

14532 14533 14534 14535 14536 14537
    check_variable_and_dtype(input, "input", ['float32', 'float64'],
                             'deformable_conv')
    check_variable_and_dtype(offset, "offset", ['float32', 'float64'],
                             'deformable_conv')
    check_type(mask, 'mask', (Variable, type(None)), 'deformable_conv')

14538 14539 14540 14541 14542 14543 14544 14545 14546 14547 14548 14549 14550 14551 14552 14553 14554 14555 14556 14557 14558 14559 14560 14561 14562 14563 14564 14565
    num_channels = input.shape[1]
    assert param_attr is not False, "param_attr should not be False here."

    helper = LayerHelper('deformable_conv', **locals())
    dtype = helper.input_dtype()

    if not isinstance(input, Variable):
        raise TypeError("Input of deformable_conv must be Variable")
    if not isinstance(offset, Variable):
        raise TypeError("Input Offset of deformable_conv must be Variable")

    if groups is None:
        num_filter_channels = num_channels
    else:
        if num_channels % groups != 0:
            raise ValueError("num_channels must be divisible by groups.")
        num_filter_channels = num_channels // groups

    filter_size = utils.convert_to_list(filter_size, 2, 'filter_size')
    stride = utils.convert_to_list(stride, 2, 'stride')
    padding = utils.convert_to_list(padding, 2, 'padding')
    dilation = utils.convert_to_list(dilation, 2, 'dilation')

    input_shape = input.shape
    filter_shape = [num_filters, int(num_filter_channels)] + filter_size

    def _get_default_param_initializer():
        filter_elem_num = filter_size[0] * filter_size[1] * num_channels
14566 14567 14568 14569 14570
        if filter_elem_num <= 0:
            raise ValueError(
                "Invalid filter number, excepted number is larger than 0, but"
                " received {}, please check the input shape and "
                "filter size.".format(filter_elem_num))
14571 14572 14573 14574 14575 14576 14577 14578 14579 14580 14581
        std = (2.0 / filter_elem_num)**0.5
        return Normal(0.0, std, 0)

    filter_param = helper.create_parameter(
        attr=helper.param_attr,
        shape=filter_shape,
        dtype=dtype,
        default_initializer=_get_default_param_initializer())

    pre_bias = helper.create_variable_for_type_inference(dtype)

14582 14583 14584 14585 14586 14587 14588 14589 14590 14591 14592 14593 14594 14595 14596 14597 14598 14599 14600 14601 14602 14603 14604 14605 14606 14607 14608 14609 14610 14611 14612 14613 14614 14615 14616 14617
    if modulated:
        helper.append_op(
            type='deformable_conv',
            inputs={
                'Input': input,
                'Filter': filter_param,
                'Offset': offset,
                'Mask': mask,
            },
            outputs={"Output": pre_bias},
            attrs={
                'strides': stride,
                'paddings': padding,
                'dilations': dilation,
                'groups': groups,
                'deformable_groups': deformable_groups,
                'im2col_step': im2col_step,
            })

    else:
        helper.append_op(
            type='deformable_conv_v1',
            inputs={
                'Input': input,
                'Filter': filter_param,
                'Offset': offset,
            },
            outputs={"Output": pre_bias},
            attrs={
                'strides': stride,
                'paddings': padding,
                'dilations': dilation,
                'groups': groups,
                'deformable_groups': deformable_groups,
                'im2col_step': im2col_step,
            })
14618 14619 14620

    output = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2)
    return output
14621 14622 14623


def unfold(x, kernel_sizes, strides=1, paddings=0, dilations=1, name=None):
14624
    r"""
14625

S
SunGaofeng 已提交
14626
    This op returns a col buffer of sliding local blocks of input x, also known
14627
    as im2col for batched 2D image tensors. For each block under the convolution filter,
T
tianshuo78520a 已提交
14628
    all element will be rearranged as a column. While the convolution filter sliding over
14629 14630
    the input feature map, a series of such columns will be formed.

S
SunGaofeng 已提交
14631
    For each input :math:`x` with shape [N, C, H, W], the output shape [N, Cout, Lout]
14632 14633 14634 14635
    can be calculated as following.

    .. math::

14636
        dkernel[0] &= dilations[0] \times (kernel\_sizes[0] - 1) + 1
14637

14638
        dkernel[1] &= dilations[1] \times (kernel\_sizes[1] - 1) + 1
14639

14640
        hout &= \frac{H + paddings[0] + paddings[2] - dkernel[0]}{strides[0]} + 1
14641

14642
        wout &= \frac{W + paddings[1] + paddings[3] - dkernel[1]}{strides[1]} + 1
14643

14644
        Cout &= C \times kernel\_sizes[0] \times kernel\_sizes[1]
14645

14646
        Lout &= hout \times wout
14647 14648


S
SunGaofeng 已提交
14649
    Parameters:
14650
        x(Tensor):              4-D Tensor, input tensor of format [N, C, H, W],
S
SunGaofeng 已提交
14651
                                  data type can be float32 or float64
14652 14653 14654 14655 14656 14657 14658 14659 14660 14661 14662 14663
        kernel_sizes(int|list):   The size of convolution kernel, should be [k_h, k_w]
                                  or an integer k treated as [k, k].
        strides(int|list):        The strides, should be [stride_h, stride_w]
                                  or an integer stride treated as [sride, stride].
                                  For default, strides will be [1, 1].
        paddings(int|list):       The paddings of each dimension, should be
                                  [padding_top, padding_left, padding_bottom, padding_right]
                                  or [padding_h, padding_w] or an integer padding.
                                  If [padding_h, padding_w] was given, it will expanded to
                                  [padding_h, padding_w, padding_h, padding_w]. If an integer
                                  padding was given, [padding, padding, padding, padding] will
                                  be used. For default, paddings will be [0, 0, 0, 0]
T
tianshuo78520a 已提交
14664
        dilations(int|list):      the dilations of convolution kernel, should be
T
tianshuo78520a 已提交
14665
                                  [dilation_h, dilation_w], or an integer dilation treated as
14666
                                  [dilation, dilation]. For default, it will be [1, 1].
14667 14668
        name(str, optional): The default value is None.
                             Normally there is no need for user to set this property.
S
SunGaofeng 已提交
14669
                             For more information, please refer to :ref:`api_guide_Name`
14670

14671

14672
    Returns:
14673
        The tensor corresponding to the sliding local blocks.
14674 14675 14676
        The output shape is [N, Cout, Lout] as decriabled above.
        Cout is the  total number of values within each block,
        and Lout is the total number of such blocks.
S
SunGaofeng 已提交
14677 14678 14679
        The data type of output is the same as the input :math:`x`

    Return Type:
14680
        Tensor
14681 14682 14683 14684 14685

    Examples:

        .. code-block:: python

14686 14687 14688 14689 14690
            import paddle
            import paddle.nn.functional as F

            x = paddle.randn((100,3,224,224))
            y = F.unfold(x, [3, 3], 1, 1, 1)
14691 14692 14693 14694
    """

    helper = LayerHelper("unfold", **locals())

14695 14696
    check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'unfold')

14697 14698 14699 14700 14701 14702 14703 14704 14705 14706 14707 14708 14709 14710 14711 14712 14713 14714 14715 14716 14717 14718 14719 14720 14721 14722 14723 14724 14725 14726 14727 14728 14729 14730 14731 14732 14733 14734 14735 14736 14737 14738 14739 14740 14741 14742 14743 14744 14745
    assert len(x.shape) == 4, \
            "input should be the format of [N, C, H, W]"

    if isinstance(kernel_sizes, int):
        kernel_sizes = [kernel_sizes, kernel_sizes]
    else:
        assert isinstance(kernel_sizes, list) and (len(kernel_sizes) == 2), \
            "kernel_sizes should either be an integer or a list of two integers"

    if isinstance(strides, int):
        strides = [strides, strides]
    else:
        assert isinstance(strides, list) and (len(strides) == 2), \
            "strides should either be an integer or a list of two integers"

    if isinstance(dilations, int):
        dilations = [dilations, dilations]
    else:
        assert isinstance(dilations, list) and (len(dilations) == 2), \
            "dilations should either be an integer or a list of two integers"

    if isinstance(paddings, int):
        paddings = [paddings] * 4
    elif isinstance(paddings, list):
        if len(paddings) == 2:
            paddings = paddings * 2
        elif len(paddings) == 4:
            pass
        else:
            raise ValueError(
                "paddings should either be an integer or a list of 2 or 4 integers"
            )
    else:
        raise ValueError(
            "Unexpected type of paddings, it should be either an integer or a list"
            "of 2 or 4 integers")

    out = helper.create_variable_for_type_inference(dtype=x.dtype)
    helper.append_op(
        type="unfold",
        inputs={"X": x},
        outputs={"Y": out},
        attrs={
            "kernel_sizes": kernel_sizes,
            "strides": strides,
            "paddings": paddings,
            "dilations": dilations
        })
    return out
C
cjt222 已提交
14746 14747 14748 14749 14750 14751 14752 14753 14754 14755 14756 14757 14758 14759 14760


def deformable_roi_pooling(input,
                           rois,
                           trans,
                           no_trans=False,
                           spatial_scale=1.0,
                           group_size=[1, 1],
                           pooled_height=1,
                           pooled_width=1,
                           part_size=None,
                           sample_per_part=1,
                           trans_std=0.1,
                           position_sensitive=False,
                           name=None):
14761
    r"""
14762

14763
    Deformable ROI Pooling Layer
14764

14765
    Performs deformable region-of-interest pooling on inputs. As described
14766
    in `Deformable Convolutional Networks <https://arxiv.org/abs/1703.06211>`_, it will get offset for each bin after
14767
    roi pooling so that pooling at correct region. Batch_size will change to the number of region bounding boxes after deformable_roi_pooling.
14768

14769
    The operation has three steps:
14770

14771
    1. Dividing each region proposal into equal-sized sections with the pooled_width and pooled_height.
14772

14773 14774
    2. Add offset to pixel in ROI to get new location and the new value which are computed directly through
       bilinear interpolation with four nearest pixel.
14775

14776
    3. Sample several points in each bin to get average values as output.
14777 14778


14779 14780 14781 14782 14783 14784 14785 14786 14787
    Args:
        input (Variable):The input of deformable roi pooling and it is tensor which value type is float32. The shape of input is
                         [N, C, H, W]. Where N is batch size, C is number of input channels,
                         H is height of the feature, and W is the width of the feature.
        rois (Variable): ROIs (Regions of Interest) with type float32 to pool over. It should be
                         a 2-D LoDTensor of shape (num_rois, 4), and the lod level
                         is 1. Given as [[x1, y1, x2, y2], ...], (x1, y1) is
                         the top left coordinates, and (x2, y2) is the bottom
                         right coordinates, which value type is float32.
14788 14789 14790
        trans (Variable): Offset of features on ROIs while pooling which value type is float32. The format is [N, C, H, W], where
                          N is number of ROIs, C is number of channels, which indicate the offset distance
                          in the x and y directions, H is pooled height, and W is pooled width.
14791 14792 14793 14794
        no_trans (bool): Whether to add offset to get new value or not while roi pooling, which value with type bool is True or False.
                         If value is True, no offset will be added in operation. Default: False.
        spatial_scale (float): Ratio of input feature map height (or width) to raw image height (or width), which value type is float32.
                         Equals the reciprocal of total stride in convolutional layers, Default: 1.0.
14795
        group_size (list|tuple): The number of groups which input channels are divided and the input is list or tuple, which value type is int32. (eg.number of input channels
14796
                          is k1 * k2 * (C + 1), which k1 and k2 are group width and height and C+1 is number of output
T
tianshuo78520a 已提交
14797
                          channels.) eg.(4, 6), which 4 is height of group and 6 is width of group. Default: [1, 1].
14798 14799 14800 14801 14802 14803 14804
        pooled_height (int): The pooled output height which value type is int32. Default: 1.
        pooled_width (int): The pooled output width which value type is int32. Default: 1.
        part_size (list|tuple): The height and width of offset which values in list or tuple is int32, eg.(4, 6), which height is 4 and width is 6, and values always equal to pooled_height \
                         and pooled_width. Default: if None, default value is [pooled_height, pooled_width].
        sample_per_part (int): The number of samples in each bin which value type is int32. If value is bigger, it will consume more performance. Default: 1.
        trans_std (float): Coefficient of offset which value type is float32. It controls weight of offset. Default: 0.1.
        position_sensitive (bool): Whether to choose deformable psroi pooling mode or not, and value type is bool(True or False). If value is False, input dimension equals to output dimension. \
T
tianshuo78520a 已提交
14805
                                   If value is True, input dimension should be output dimension * pooled_height * pooled_width. Default: False.
14806 14807 14808 14809
        name (str|None): Name of layer. Default: None.
    Returns:
        Variable: Output of deformable roi pooling is that, if position sensitive is False, input dimension equals to output dimension. If position sensitive is True,\
                  input dimension should be the result of output dimension divided by pooled height and pooled width.
C
cjt222 已提交
14810 14811 14812 14813

    Examples:
      .. code-block:: python

14814 14815
        # position_sensitive=True
        import paddle.fluid as fluid
C
chengjuntao 已提交
14816
        input = fluid.data(name="input",
14817 14818
                           shape=[2, 192, 64, 64],
                           dtype='float32')
C
chengjuntao 已提交
14819 14820
        rois = fluid.data(name="rois",
                          shape=[-1, 4],
14821
                          dtype='float32',
C
chengjuntao 已提交
14822 14823
                          lod_level=1)
        trans = fluid.data(name="trans",
14824 14825 14826 14827 14828
                           shape=[2, 384, 64, 64],
                           dtype='float32')
        x = fluid.layers.deformable_roi_pooling(input=input,
                                                rois=rois,
                                                trans=trans,
C
chengjuntao 已提交
14829
                                                no_trans=False,
14830
                                                spatial_scale=1.0,
C
chengjuntao 已提交
14831 14832 14833 14834
                                                group_size=(1, 1),
                                                pooled_height=8,
                                                pooled_width=8,
                                                part_size=(8, 8),
14835
                                                sample_per_part=4,
C
chengjuntao 已提交
14836 14837
                                                trans_std=0.1,
                                                position_sensitive=True)
14838

14839
        # position_sensitive=False
14840
        import paddle.fluid as fluid
C
chengjuntao 已提交
14841
        input = fluid.data(name="input",
14842 14843
                           shape=[2, 192, 64, 64],
                           dtype='float32')
C
chengjuntao 已提交
14844 14845
        rois = fluid.data(name="rois",
                          shape=[-1, 4],
14846
                          dtype='float32',
C
chengjuntao 已提交
14847 14848
                          lod_level=1)
        trans = fluid.data(name="trans",
14849 14850 14851 14852 14853
                           shape=[2, 384, 64, 64],
                           dtype='float32')
        x = fluid.layers.deformable_roi_pooling(input=input,
                                                rois=rois,
                                                trans=trans,
C
chengjuntao 已提交
14854
                                                no_trans=False,
14855
                                                spatial_scale=1.0,
C
chengjuntao 已提交
14856 14857 14858 14859
                                                group_size=(1, 1),
                                                pooled_height=8,
                                                pooled_width=8,
                                                part_size=(8, 8),
14860
                                                sample_per_part=4,
C
chengjuntao 已提交
14861 14862
                                                trans_std=0.1,
                                                position_sensitive=False)
C
cjt222 已提交
14863 14864
    """

14865 14866 14867 14868 14869 14870 14871 14872 14873 14874 14875 14876
    check_variable_and_dtype(input, 'input', ['float32', 'float64'],
                             'deformable_roi_pooling')
    check_variable_and_dtype(rois, 'rois', ['float32', 'float64'],
                             'deformable_roi_pooling')
    check_variable_and_dtype(trans, 'trans', ['float32', 'float64'],
                             'deformable_roi_pooling')
    check_type(group_size, 'group_size', (list, tuple),
               'deformable_roi_pooling')
    if part_size is not None:
        check_type(part_size, 'part_size', (list, tuple),
                   'deformable_roi_pooling')

C
cjt222 已提交
14877 14878 14879 14880 14881 14882 14883 14884 14885 14886 14887 14888 14889 14890 14891 14892 14893 14894 14895 14896 14897 14898 14899 14900 14901 14902 14903 14904 14905 14906 14907 14908 14909 14910 14911
    input_channels = input.shape[1]
    if position_sensitive == False:
        output_channels = input_channels
    else:
        output_channels = input_channels / pooled_height / pooled_width

    if part_size is None:
        part_height = pooled_height
        part_width = pooled_width
        part_size = [part_height, part_width]
    part_size = utils.convert_to_list(part_size, 2, 'part_size')
    group_size = utils.convert_to_list(group_size, 2, 'group_size')
    helper = LayerHelper('deformable_psroi_pooling', **locals())
    dtype = helper.input_dtype()
    output = helper.create_variable_for_type_inference(dtype)
    top_count = helper.create_variable_for_type_inference(dtype='int32')
    helper.append_op(
        type="deformable_psroi_pooling",
        inputs={"Input": input,
                "ROIs": rois,
                "Trans": trans},
        outputs={"Output": output,
                 "TopCount": top_count},
        attrs={
            "no_trans": no_trans,
            "spatial_scale": spatial_scale,
            "output_dim": output_channels,
            "group_size": group_size,
            "pooled_height": pooled_height,
            "pooled_width": pooled_width,
            "part_size": part_size,
            "sample_per_part": sample_per_part,
            "trans_std": trans_std
        })
    return output
14912 14913


14914
@deprecated(since="2.0.0", update_to="paddle.shard_index")
14915 14916
def shard_index(input, index_num, nshards, shard_id, ignore_value=-1):
    """
L
lilong12 已提交
14917 14918 14919 14920 14921 14922 14923 14924 14925
    Reset the values of `input` according to the shard it beloning to.
    Every value in `input` must be a non-negative integer, and
    the parameter `index_num` represents the integer above the maximum
    value of `input`. Thus, all values in `input` must be in the range
    [0, index_num) and each value can be regarded as the offset to the beginning
    of the range. The range is further split into multiple shards. Specifically,
    we first compute the `shard_size` according to the following formula,
    which represents the number of integers each shard can hold. So for the
    i'th shard, it can hold values in the range [i*shard_size, (i+1)*shard_size).
14926 14927
    ::

14928
        shard_size = (index_num + nshards - 1) // nshards
14929

L
lilong12 已提交
14930 14931 14932 14933 14934 14935 14936 14937
    For each value `v` in `input`, we reset it to a new value according to the
    following formula:
    ::
   
        v = v - shard_id * shard_size if shard_id * shard_size <= v < (shard_id+1) * shard_size else ignore_value

    That is, the value `v` is set to the new offset within the range represented by the shard `shard_id`
    if it in the range. Otherwise, we reset it to be `ignore_value`.
14938 14939

    Args:
L
lilong12 已提交
14940 14941
        input (Tensor): Input tensor with data type int64 or int32. It's last dimension must be 1.
        index_num (int): An integer represents the integer above the maximum value of `input`.
14942 14943 14944
        nshards (int): The number of shards.
        shard_id (int): The index of the current shard.
        ignore_value (int): An integer value out of sharded index range.
14945 14946

    Returns:
L
lilong12 已提交
14947
        Tensor.
14948 14949 14950 14951

    Examples:
        .. code-block:: python

14952 14953 14954 14955 14956 14957 14958 14959
            import paddle
            label = paddle.to_tensor([[16], [1]], "int64")
            shard_label = paddle.shard_index(input=label,
                                             index_num=20,
                                             nshards=2,
                                             shard_id=0)
            print(shard_label)
            # [[-1], [1]]
14960
    """
B
Baibaifan 已提交
14961
    check_variable_and_dtype(input, 'input', ['int64', 'int32'], 'shard_index')
14962 14963 14964 14965 14966 14967 14968 14969 14970 14971 14972 14973 14974 14975 14976 14977 14978 14979 14980
    op_type = 'shard_index'
    helper = LayerHelper(op_type, **locals())
    if shard_id < 0 or shard_id >= nshards:
        raise ValueError('The shard_id(%d) should be in [0, %d)' %
                         (shard_id, nshards))

    out = helper.create_variable_for_type_inference(dtype=input.dtype)
    helper.append_op(
        type=op_type,
        inputs={'X': [input]},
        outputs={'Out': out},
        attrs={
            'index_num': index_num,
            'nshards': nshards,
            'shard_id': shard_id,
            'ignore_value': ignore_value
        },
        stop_gradient=True)
    return out
H
huangjun12 已提交
14981 14982 14983 14984


@templatedoc()
def hard_swish(x, threshold=6.0, scale=6.0, offset=3.0, name=None):
14985
    r"""
14986 14987 14988
    This operator implements the hard_swish activation function.
    Hard_swish is proposed in MobileNetV3, and performs better in computational stability and efficiency compared to swish function.
    For more details please refer to: https://arxiv.org/pdf/1905.02244.pdf
H
huangjun12 已提交
14989

14990
    The formula is as follows:
H
huangjun12 已提交
14991

14992
    .. math::
H
huangjun12 已提交
14993

14994
        out = \\frac{x * (min(max(0, x+offset), threshold))}{scale}
H
huangjun12 已提交
14995

14996 14997 14998 14999 15000 15001 15002 15003 15004
    In the above equation:

    ``threshold`` and ``scale`` should be positive, ``offset`` can be positive or negative. It is recommended to use default parameters.

    Args:
        x (Variable): Input feature, multi-dimensional Tensor. The data type should be float32 or float64.
        threshold (float, optional): The threshold in Relu function. Default: 6.0
        scale (float, optional): The scale factor. Default: 6.0
        offset (float, optional): The offset factor. Default: 3.0
15005 15006
        name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`

15007 15008
    Returns:
        Variable: The output tensor with the same shape and data type as input.
15009 15010


15011
    Examples:
15012

15013
    .. code-block:: python
15014

15015
        import paddle.fluid as fluid
15016
        import paddle
15017
        import numpy as np
15018
        paddle.enable_static()
15019

15020
        DATATYPE='float32'
15021

15022
        x_data = np.array([i for i in range(1,5)]).reshape([1,1,4]).astype(DATATYPE)
15023

15024 15025
        x = fluid.data(name="x", shape=[None,1,4], dtype=DATATYPE)
        y = fluid.layers.hard_swish(x)
15026

15027 15028 15029 15030 15031
        place = fluid.CPUPlace()
        #place = fluid.CUDAPlace(0)
        exe = fluid.Executor(place)
        out, = exe.run(feed={'x':x_data}, fetch_list=[y.name])
        print(out)  # [[0.66666667, 1.66666667,3., 4.]]
H
huangjun12 已提交
15032
    """
15033
    if in_dygraph_mode():
W
wanghuancoder 已提交
15034 15035
        return _C_ops.hard_swish(x, 'threshold', threshold, 'scale', scale,
                                 'offset', offset)
15036

15037 15038 15039
    check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
                             'hard_swish')

H
huangjun12 已提交
15040 15041 15042 15043 15044 15045 15046 15047 15048 15049
    helper = LayerHelper('hard_swish', **locals())
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
    helper.append_op(
        type='hard_swish',
        inputs={'X': x},
        outputs={'Out': out},
        attrs={'threshold': threshold,
               'scale': scale,
               'offset': offset})
    return out
R
ruri 已提交
15050 15051


K
Kaipeng Deng 已提交
15052 15053
@templatedoc()
def mish(x, threshold=20, name=None):
15054
    r"""
K
Kaipeng Deng 已提交
15055 15056 15057 15058 15059 15060 15061 15062 15063 15064 15065 15066 15067 15068 15069 15070 15071 15072 15073 15074 15075 15076 15077 15078 15079 15080 15081 15082 15083 15084 15085 15086 15087 15088 15089 15090 15091 15092 15093 15094 15095 15096 15097 15098 15099 15100 15101 15102 15103 15104 15105 15106 15107 15108 15109 15110 15111 15112 15113 15114 15115 15116 15117 15118 15119 15120 15121 15122 15123 15124 15125 15126
    This operator implements the mish activation function.
    Refer to `Mish: A Self Regularized Non-Monotonic Neural
    Activation Function <https://arxiv.org/abs/1908.08681>`_


    The formula is as follows if :attr:`threshold` is :code:`None` or negative:

    .. math::

        out = x * \\tanh(\\ln(1 + e^{x}))

    The formula is as follows if :attr:`threshold` is set as positive value:

    .. math::

	out = \\begin{cases}
		x \\ast \\tanh(x), \\text{if } x > \\text{threshold} \\\\
		x \\ast \\tanh(e^{x}), \\text{if } x < -\\text{threshold} \\\\
		x \\ast \\tanh(\\ln(1 + e^{x})),  \\text{otherwise}
	      \\end{cases}

    Args:
        x (Variable): Input feature, multi-dimensional Tensor. The data type
                      should be float16, float32 or float64.
        threshold (float|None): threshold for softplus in Mish operator.
                Approximate value of softplus will be used if absolute value
                of input is greater than :attr:threshold and :attr:threshold
                is set as positive value. For none or negative threshold,
                approximate value is not used. Default 20.
        name (str, optional): The default value is None. Normally there is no
                need for user to set this property. For more information, please
                refer to :ref:`api_guide_Name`

    Returns:
        Variable: The output tensor with the same shape and data type as input.


    Examples:

    .. code-block:: python

        import paddle.fluid as fluid
        import numpy as np

        DATATYPE='float32'

        x_data = np.array([i for i in range(1,5)]).reshape([1,1,4]).astype(DATATYPE)

        x = fluid.data(name="x", shape=[None,1,4], dtype=DATATYPE)
        y = fluid.layers.mish(x)

        place = fluid.CPUPlace()
        # place = fluid.CUDAPlace(0)
        exe = fluid.Executor(place)
        out, = exe.run(feed={'x':x_data}, fetch_list=[y.name])
        print(out)  # [[0.66666667, 1.66666667, 3., 4.]]
    """
    check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'mish')
    check_type(threshold, 'threshold', (float, int), 'mish')
    assert threshold > 0, "threshold of mish should be greater than 0, " \
                          "but got {}".format(threshold)

    helper = LayerHelper('mish', **locals())
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
    helper.append_op(
        type='mish',
        inputs={'X': x},
        outputs={'Out': out},
        attrs={'threshold': threshold or -1})
    return out


G
Guo Sheng 已提交
15127
def gather_tree(ids, parents):
15128
    r"""
G
Guo Sheng 已提交
15129 15130 15131 15132 15133 15134 15135 15136 15137 15138 15139 15140 15141 15142 15143 15144 15145 15146 15147 15148 15149 15150 15151 15152
    To be used after beam search. After beam search, we get selected ids at
    each time step and the corresponding parents in the search tree. Both ids
    and parents have the layout :attr:`[max_time, batch_size, beam_size]`. Then
    :attr:`gather_tree` is used to backtrace from the last time step and
    generate the full sequences by collecting selected ids.

    Here is an example:

    .. code-block:: text

            Given:
                ids = [[[2 2]
                        [6 1]]
                       [[3 9]
                        [6 1]]
                       [[0 1]
                        [9 0]]]
                parents = [[[0 0]
                            [1 1]]
                           [[1 0]
                            [1 0]]
                           [[0 0]
                            [0 1]]]

15153 15154
            Then:
                gather_tree(ids, parents)
G
Guo Sheng 已提交
15155 15156 15157 15158 15159 15160 15161 15162
                         = [[[2 2]
                             [1 6]]
                            [[3 3]
                             [6 1]]
                            [[0 1]
                             [9 0]]]

    Args:
L
liu zhengxi 已提交
15163
        ids(Tensor): A Tensor with shape :attr:`[length, batch_size, beam_size]`
G
Guo Sheng 已提交
15164 15165
            and data type :attr:`int32` or :attr:`int64`. It contains the selected
            ids of all time steps.
L
liu zhengxi 已提交
15166
        parents(Tensor): A Tensor with the same shape and data type as :attr:`ids`,
G
Guo Sheng 已提交
15167 15168 15169 15170
            It contains the parents corresponding to selected ids when searching
            among beams.

    Returns:
L
liu zhengxi 已提交
15171
            A Tensor with the same shape and data type as :attr:`ids`. \
G
Guo Sheng 已提交
15172 15173 15174 15175 15176 15177
            It contains the full sequences. The sequences are collected from \
            :attr:`ids` by backtracing according to :attr:`parents`.

    Examples:
        .. code-block:: python

L
liu zhengxi 已提交
15178 15179 15180 15181 15182 15183 15184 15185
            import paddle

            ids = paddle.to_tensor([[[2, 2], [6, 1]], [[3, 9], [6, 1]], [[0, 1], [9, 0]]])

            parents = paddle.to_tensor([[[0, 0], [1, 1]], [[1, 0], [1, 0]], [[0, 0], [0, 1]]])

            final_sequences = paddle.nn.functional.gather_tree(ids, parents)
            # [[[2, 2], [1, 6]], [[3, 3], [6, 1]], [[0, 1], [9, 0]]]
G
Guo Sheng 已提交
15186 15187

    """
15188
    if in_dygraph_mode():
W
wanghuancoder 已提交
15189
        return _C_ops.gather_tree(ids, parents)
15190 15191 15192 15193 15194 15195
    else:
        helper = LayerHelper('gather_tree', **locals())
        check_variable_and_dtype(ids, 'ids', ['int32', 'int64'], 'gather_tree')
        check_variable_and_dtype(parents, 'parents', ['int32', 'int64'],
                                 'gather_tree')
        out = helper.create_variable_for_type_inference(dtype=ids.dtype)
G
Guo Sheng 已提交
15196

15197 15198 15199 15200 15201
        helper.append_op(
            type="gather_tree",
            inputs={"Ids": ids,
                    "Parents": parents},
            outputs={"Out": out})
G
Guo Sheng 已提交
15202

15203
        return out
G
Guo Sheng 已提交
15204 15205


15206
@deprecated(since="2.0.0", update_to="paddle.uniform")
15207
@templatedoc()
15208 15209
def uniform_random(shape, dtype='float32', min=-1.0, max=1.0, seed=0,
                   name=None):
15210
    """
15211 15212
    This OP returns a Tensor filled with random values sampled from a uniform
    distribution in the range [``min``, ``max``), with ``shape`` and ``dtype``.
15213 15214 15215

    Examples:
    ::
15216

15217 15218
        Input:
          shape = [1, 2]
15219

15220 15221 15222 15223
        Output:
          result=[[0.8505902, 0.8397286]]

    Args:
15224 15225 15226 15227 15228 15229 15230 15231 15232 15233 15234 15235 15236
        shape(list|tuple|Tensor): The shape of the output Tensor. If ``shape``
            is a list or tuple, the elements of it should be integers or Tensors
            (with the shape [1], and the data type int32 or int64). If ``shape``
            is a Tensor, it should be a 1-D Tensor(with the data type int32 or
            int64).
        dtype(str|np.dtype|core.VarDesc.VarType, optional): The data type of
            the output Tensor. Supported data types: float32, float64.
            Default is float32.
        min(float|int, optional): The lower bound on the range of random values
            to generate, ``min`` is included in the range. Default is -1.0.
        max(float|int, optional): The upper bound on the range of random values
            to generate, ``max`` is excluded in the range. Default is 1.0.
        seed(int, optional): Random seed used for generating samples. 0 means
15237 15238
            use a seed generated by the system. Note that if seed is not 0,
            this operator will always generate the same random numbers every
15239
            time. Default is 0.
15240 15241 15242
        name(str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.
15243

15244
    Returns:
15245 15246
        Tensor: A Tensor filled with random values sampled from a uniform
        distribution in the range [``min``, ``max``), with ``shape`` and ``dtype``.
15247

15248
    Raises:
15249 15250
        TypeError: If ``shape`` is not list, tuple, Tensor.
        TypeError: If ``dtype`` is not float32, float64.
15251

15252 15253 15254
    Examples:
        .. code-block:: python

15255
            import paddle
15256
            import paddle.fluid as fluid
15257
            paddle.enable_static()
15258 15259

            # example 1:
15260
            # attr shape is a list which doesn't contain Tensor.
15261
            result_1 = fluid.layers.uniform_random(shape=[3, 4])
15262 15263 15264
            # [[ 0.84524226,  0.6921872,   0.56528175,  0.71690357],
            #  [-0.34646994, -0.45116323, -0.09902662, -0.11397249],
            #  [ 0.433519,    0.39483607, -0.8660099,   0.83664286]]
15265 15266

            # example 2:
15267 15268 15269
            # attr shape is a list which contains Tensor.
            dim_1 = fluid.layers.fill_constant([1], "int64", 2)
            dim_2 = fluid.layers.fill_constant([1], "int32", 3)
15270
            result_2 = fluid.layers.uniform_random(shape=[dim_1, dim_2])
15271 15272
            # [[-0.9951253,   0.30757582, 0.9899647 ],
            #  [ 0.5864527,   0.6607096,  -0.8886161 ]]
15273 15274

            # example 3:
15275
            # attr shape is a Tensor, the data type must be int64 or int32.
15276
            var_shape = fluid.data(name='var_shape', shape=[2], dtype="int64")
15277
            result_3 = fluid.layers.uniform_random(var_shape)
15278 15279 15280 15281
            # if var_shape's value is [2, 3]
            # result_3 is:
            # [[-0.8517412,  -0.4006908,   0.2551912 ],
            #  [ 0.3364414,   0.36278176, -0.16085452]]
15282

15283 15284 15285
    """
    if not isinstance(dtype, core.VarDesc.VarType):
        dtype = convert_np_dtype_to_dtype_(dtype)
15286

15287
    if in_dygraph_mode():
15288
        shape = utils.convert_shape_to_list(shape)
W
wanghuancoder 已提交
15289 15290 15291
        return _C_ops.uniform_random('shape', shape, 'min',
                                     float(min), 'max',
                                     float(max), 'seed', seed, 'dtype', dtype)
15292

15293
    check_type(shape, 'shape', (list, tuple, Variable), 'uniform_random/rand')
15294 15295
    check_dtype(dtype, 'dtype', ('float32', 'float64', 'uint16'),
                'uniform_random/rand')
15296 15297

    inputs = dict()
15298
    attrs = {'seed': seed, 'min': min, 'max': max, 'dtype': dtype}
15299
    utils.get_shape_tensor_inputs(
15300
        inputs=inputs, attrs=attrs, shape=shape, op_type='uniform_random/rand')
15301

15302
    helper = LayerHelper("uniform_random", **locals())
15303 15304 15305 15306
    out = helper.create_variable_for_type_inference(dtype)
    helper.append_op(
        type="uniform_random", inputs=inputs, attrs=attrs,
        outputs={"Out": out})
15307
    utils.try_set_static_shape_tensor(out, shape)
15308
    return out
myq406450149's avatar
myq406450149 已提交
15309 15310 15311 15312 15313 15314 15315 15316 15317 15318 15319 15320 15321 15322 15323 15324 15325 15326 15327 15328 15329 15330 15331 15332 15333 15334 15335 15336 15337 15338 15339 15340 15341 15342 15343 15344 15345 15346 15347 15348 15349 15350 15351 15352 15353 15354 15355 15356 15357 15358 15359 15360 15361 15362


def unbind(input, axis=0):
    """
    Removes a tensor dimension, then split the input tensor into multiple sub-Tensors.
    Args:
        input (Variable): The input variable which is an N-D Tensor, data type being float32, float64, int32 or int64.
       
        axis (int32|int64, optional): A scalar with type ``int32|int64`` shape [1]. The dimension along which to unbind. If :math:`axis < 0`, the
            dimension to unbind along is :math:`rank(input) + axis`. Default is 0.
    Returns:
        list(Variable): The list of segmented Tensor variables.

    Example:
        .. code-block:: python
            import paddle
            # input is a variable which shape is [3, 4, 5]
            input = paddle.fluid.data(
                 name="input", shape=[3, 4, 5], dtype="float32")
            [x0, x1, x2] = paddle.tensor.unbind(input, axis=0)
            # x0.shape [4, 5]
            # x1.shape [4, 5]
            # x2.shape [4, 5]
            [x0, x1, x2, x3] = paddle.tensor.unbind(input, axis=1)
            # x0.shape [3, 5]
            # x1.shape [3, 5]
            # x2.shape [3, 5]
            # x3.shape [3, 5]

    """
    helper = LayerHelper("unbind", **locals())
    check_type(input, 'input', (Variable), 'unbind')
    dtype = helper.input_dtype()
    check_dtype(dtype, 'unbind', ['float32', 'float64', 'int32', 'int64'],
                'unbind')
    if not isinstance(axis, (int)):
        raise TypeError("The type of 'axis'  must be int, but received %s." %
                        (type(axis)))
    if isinstance(axis, np.generic):
        axis = np.asscalar(axis)
    input_shape = input.shape
    axis_ = axis if axis >= 0 else len(input_shape) + axis
    num = input_shape[axis_]
    outs = [
        helper.create_variable_for_type_inference(dtype=helper.input_dtype())
        for i in range(num)
    ]

    helper.append_op(
        type="unbind",
        inputs={"X": input},
        outputs={"Out": outs},
        attrs={"axis": axis})
    return outs