nn.py 33.6 KB
Newer Older
Y
Yu Yang 已提交
1 2 3 4 5 6 7
"""
All layers just related to the neural network.
"""

from ..layer_helper import LayerHelper
from ..initializer import Normal, Constant
from ..framework import Variable
Y
yangyaming 已提交
8
from ..param_attr import ParamAttr
Y
yangyaming 已提交
9
from tensor import concat
Y
Yu Yang 已提交
10 11 12 13 14

__all__ = [
    'fc', 'embedding', 'dynamic_lstm', 'gru_unit', 'linear_chain_crf',
    'crf_decoding', 'cos_sim', 'cross_entropy', 'square_error_cost', 'accuracy',
    'chunk_eval', 'sequence_conv', 'conv2d', 'sequence_pool', 'pool2d',
15
    'batch_norm', 'beam_search_decode', 'conv2d_transpose', 'sequence_expand',
G
guosheng 已提交
16
    'lstm_unit', 'reduce_sum'
Y
Yu Yang 已提交
17 18 19 20 21 22 23 24 25
]


def fc(input,
       size,
       num_flatten_dims=1,
       param_attr=None,
       bias_attr=None,
       act=None,
26
       name=None):
Y
Yu Yang 已提交
27 28 29 30
    """
    Fully Connected Layer.

    Args:
C
caoying03 已提交
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
       input: The input tensor(s) to the fully connected layer.
       size: The number of output units in the fully connected layer.
       num_flatten_dims: The fc layer can accept an input tensor with more than
                         two dimensions. If this happens, the multidimensional
                         tensor will first be flattened into a 2-dimensional
                         matrix. The parameter `num_flatten_dims` determines
                         how the input tensor is flattened: the first
                         `num_flatten_dims` dimensions will be flatten to form
                         the first dimension of the final matrix (height of the
                         matrix), and the rest `rank(X) - num_col_dims`
                         dimensions are flattened to form the second dimension
                         of the final matrix (width of the matrix). For example,
                         suppose `X` is a 6-dimensional tensor with a shape
                         [2, 3, 4, 5, 6], and `x_num_col_dims` = 3. Then, the
                         flattened matrix will have a shape [2 x 3 x 4, 5 x 6]
                         = [24, 30]. By default, `x_num_col_dims` is set to 1.
       param_attr: The parameter attribute for learnable parameters/weights of
                   the fully connected Layer.
       param_initializer: The initializer used for the weight/parameter.
                          If set None, XavierInitializer() will be used.
       bias_attr: The parameter attribute for the bias parameter for this layer.
                  If set None, no bias will be added to the output units.
       bias_initializer: The initializer used for the bias. If set None,
                         then ConstantInitializer() will be used.
       act: Activation to be applied to the output of the fully connected layer.
       name: Name/alias of the fully connected layer.

C
caoying03 已提交
58 59 60 61 62 63 64 65 66
    The fully connected layer can take multiple tensors as its inputs. It
    creates a variable (one for each input tensor) called weights for each input
    tensor, which represents a fully connected weight matrix from each input
    unit to each output unit. The fully connected layer multiplies each input
    tensor with its coresponding weight to produce an output Tensor. If
    multiple input tensors are given, the results of multiple multiplications
    will be sumed up. If bias_attr is not None, a biases variable will be
    created and added to the output. Finally, if activation is not None,
    it will be applied to the output as well.
C
caoying03 已提交
67

C
caoying03 已提交
68
    This process can be formulated as follows:
C
caoying03 已提交
69 70 71 72 73

    .. math::
        Y = \sigma({\sum_{i=0}^{N-1}W_iX_i + b})

    where, :math:`N` is the number of input, :math:`X_i` is the input tensor,
C
caoying03 已提交
74 75
    :math:`W` is the weights created by this layer, :math:`b` is the bias
    created by this layer (if needed), :math:`\sigma` is the activation funtion.
Y
Yu Yang 已提交
76 77

    """
C
caoying03 已提交
78

C
caoying03 已提交
79
    helper = LayerHelper("fc", **locals())
Y
Yu Yang 已提交
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98

    dtype = helper.input_dtype()

    mul_results = []
    for input_var, param_attr in helper.iter_inputs_and_params():
        input_shape = input_var.shape
        param_shape = [
            reduce(lambda a, b: a * b, input_shape[num_flatten_dims:], 1)
        ] + [size]
        w = helper.create_parameter(
            attr=param_attr, shape=param_shape, dtype=dtype, is_bias=False)
        tmp = helper.create_tmp_variable(dtype)
        helper.append_op(
            type="mul",
            inputs={
                "X": input_var,
                "Y": w,
            },
            outputs={"Out": tmp},
C
caoying03 已提交
99 100
            attrs={"x_num_col_dims": num_flatten_dims,
                   "y_num_col_dims": 1})
Y
Yu Yang 已提交
101 102 103 104 105 106 107 108 109 110 111 112 113 114 115
        mul_results.append(tmp)

    # sum
    if len(mul_results) == 1:
        pre_bias = mul_results[0]
    else:
        pre_bias = helper.create_tmp_variable(dtype)
        helper.append_op(
            type="sum", inputs={"X": mul_results}, outputs={"Out": pre_bias})
    # add bias
    pre_activation = helper.append_bias_op(pre_bias)
    # add activation
    return helper.append_activation(pre_activation)


116
def embedding(input, size, is_sparse=False, param_attr=None, dtype='float32'):
Y
Yu Yang 已提交
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159
    """
    Embedding Layer.

    Args:
       param_initializer:
       input: The input to the function
       size: The size of the layer
       is_sparse: A flag that decleares whether the input is sparse
       param_attr: Parameters for this layer
       dtype: The type of data : float32, float_16, int etc

    This function can take in the input (which is a vector of IDs) and
    performs a lookup in the lookup_table using these IDs, to result into
    the embedding of each ID in the input.

    All the input variables of this function are passed in as local variables
    to the LayerHelper constructor.

    """

    helper = LayerHelper('embedding', **locals())
    w = helper.create_parameter(
        attr=helper.param_attr, shape=size, dtype=dtype, is_bias=False)
    tmp = helper.create_tmp_variable(dtype)
    helper.append_op(
        type='lookup_table',
        inputs={'Ids': input,
                'W': w},
        outputs={'Out': tmp},
        attrs={'is_sparse': is_sparse})
    return tmp


# TODO(qijun): expose H0 and C0
def dynamic_lstm(input,
                 size,
                 param_attr=None,
                 bias_attr=None,
                 use_peepholes=True,
                 is_reverse=False,
                 gate_activation='sigmoid',
                 cell_activation='tanh',
                 candidate_activation='tanh',
160
                 dtype='float32'):
Y
Yu Yang 已提交
161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
    helper = LayerHelper('lstm', **locals())
    size = size / 4
    weight = helper.create_parameter(
        attr=helper.param_attr, shape=[size, 4 * size], dtype=dtype)
    bias_size = [1, 7 * size]
    if not use_peepholes:
        bias_size[1] = 4 * size
    bias = helper.create_parameter(
        attr=helper.bias_attr, shape=bias_size, dtype=dtype, is_bias=True)

    hidden = helper.create_tmp_variable(dtype)
    cell = helper.create_tmp_variable(dtype)
    batch_gate = helper.create_tmp_variable(dtype)
    batch_cell_pre_act = helper.create_tmp_variable(dtype)

    helper.append_op(
        type='lstm',
        inputs={'Input': input,
                'Weight': weight,
                'Bias': bias},
        outputs={
            'Hidden': hidden,
            'Cell': cell,
            'BatchGate': batch_gate,
            'BatchCellPreAct': batch_cell_pre_act
        },
        attrs={
            'use_peepholes': use_peepholes,
            'is_reverse': is_reverse,
            'gate_activation': gate_activation,
            'cell_activation': cell_activation,
            'candidate_activation': candidate_activation
        })
    return hidden, cell


def gru_unit(input,
             hidden,
             size,
             weight=None,
             bias=None,
             activation='tanh',
203
             gate_activation='sigmoid'):
Y
Yu Yang 已提交
204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
    """
    GRUUnit Operator implements partial calculations of the GRU unit as following:

    $$
    update \ gate: u_t = actGate(xu_t + W_u * h_{t-1} + b_u) \\
    reset \ gate: r_t = actGate(xr_t + W_r * h_{t-1} + b_r)  \\
    output \ candidate: {h}_t = actNode(xc_t + W_c * dot(r_t, h_{t-1}) + b_c) \\
    output: h_t = dot((1 - u_t), h_{t-1}) + dot(u_t, {h}_t)
    $$

    which is same as one time step of GRU Operator.

    @note To implement the complete GRU unit, fully-connected operator must be
    used before to feed xu, xr and xc as the Input of GRUUnit operator.

    TODO(ChunweiYan) add more document here
    """
    activation_dict = dict(
        identity=0,
        sigmoid=1,
        tanh=2,
        relu=3, )
    activation = activation_dict[activation]
    gate_activation = activation_dict[gate_activation]

    helper = LayerHelper('gru_unit', **locals())
    dtype = helper.input_dtype()
    size = size / 3

    # create weight
    if weight is None:
        weight = helper.create_parameter(
            attr=helper.param_attr, shape=[size, 3 * size], dtype=dtype)

    # create bias
    if bias is None:
        bias_size = [1, 3 * size]
        bias = helper.create_parameter(
            attr=helper.bias_attr, shape=bias_size, dtype=dtype, is_bias=True)

    gate = helper.create_tmp_variable(dtype)
    reset_hidden_pre = helper.create_tmp_variable(dtype)
    updated_hidden = helper.create_tmp_variable(dtype)

    helper.append_op(
        type='gru_unit',
        inputs={'Input': input,
                'HiddenPrev': hidden,
                'Weight': weight},
        outputs={
            'Gate': gate,
            'ResetHiddenPrev': reset_hidden_pre,
            'Hidden': updated_hidden,
        },
        attrs={
            'activation': 0,
            'gate_activation': 1,
        })

    return updated_hidden, reset_hidden_pre, gate


266
def linear_chain_crf(input, label, param_attr=None):
Y
Yu Yang 已提交
267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291
    helper = LayerHelper('linear_chain_crf', **locals())
    size = input.shape[1]
    transition = helper.create_parameter(
        attr=helper.param_attr,
        shape=[size + 2, size],
        dtype=helper.input_dtype())
    alpha = helper.create_tmp_variable(dtype=helper.input_dtype())
    emission_exps = helper.create_tmp_variable(dtype=helper.input_dtype())
    transition_exps = helper.create_tmp_variable(dtype=helper.input_dtype())
    log_likelihood = helper.create_tmp_variable(dtype=helper.input_dtype())
    helper.append_op(
        type='linear_chain_crf',
        inputs={"Emission": [input],
                "Transition": transition,
                "Label": label},
        outputs={
            "Alpha": [alpha],
            "EmissionExps": [emission_exps],
            "TransitionExps": transition_exps,
            "LogLikelihood": log_likelihood
        })

    return log_likelihood


292
def crf_decoding(input, param_attr, label=None):
Y
Yu Yang 已提交
293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399
    helper = LayerHelper('crf_decoding', **locals())
    transition = helper.get_parameter(param_attr.name)
    viterbi_path = helper.create_tmp_variable(dtype=helper.input_dtype())
    helper.append_op(
        type='crf_decoding',
        inputs={"Emission": [input],
                "Transition": transition,
                "Label": label},
        outputs={"ViterbiPath": [viterbi_path]})

    return viterbi_path


def cos_sim(X, Y, **kwargs):
    """
    This function performs the cosine similarity between two tensors
    X and Y and returns that as the output.
    """
    helper = LayerHelper('cos_sim', **kwargs)
    out = helper.create_tmp_variable(dtype=X.dtype)
    xnorm = helper.create_tmp_variable(dtype=X.dtype)
    ynorm = helper.create_tmp_variable(dtype=X.dtype)
    helper.append_op(
        type='cos_sim',
        inputs={'X': [X],
                'Y': [Y]},
        outputs={'Out': [out],
                 'XNorm': [xnorm],
                 'YNorm': [ynorm]})
    return out


def cross_entropy(input, label, **kwargs):
    """
    This function computes cross_entropy using the input and label.
    """
    helper = LayerHelper('cross_entropy', **kwargs)
    out = helper.create_tmp_variable(dtype=input.dtype)
    helper.append_op(
        type='cross_entropy',
        inputs={'X': [input],
                'Label': [label]},
        outputs={'Y': [out]},
        attrs=kwargs)
    return out


def square_error_cost(input, label, **kwargs):
    """
    This functions returns the squared error cost using the input and label.
    The output is appending the op to do the above.
    """
    helper = LayerHelper('square_error_cost', **kwargs)
    minus_out = helper.create_tmp_variable(dtype=input.dtype)
    helper.append_op(
        type='elementwise_sub',
        inputs={'X': [input],
                'Y': [label]},
        outputs={'Out': [minus_out]})

    square_out = helper.create_tmp_variable(dtype=input.dtype)
    helper.append_op(
        type='square', inputs={'X': [minus_out]}, outputs={'Y': [square_out]})
    return square_out


def accuracy(input, label, k=1, correct=None, total=None, **kwargs):
    """
    This function computes the accuracy using the input and label.
    The output is the top_k inputs and their indices.
    """
    helper = LayerHelper("accuracy", **kwargs)
    topk_out = helper.create_tmp_variable(dtype=input.dtype)
    topk_indices = helper.create_tmp_variable(dtype="int64")
    helper.append_op(
        type="top_k",
        inputs={"X": [input]},
        outputs={"Out": [topk_out],
                 "Indices": [topk_indices]},
        attrs={"k": k})
    acc_out = helper.create_tmp_variable(dtype="float32")
    if correct is None:
        correct = helper.create_tmp_variable(dtype="int64")
    if total is None:
        total = helper.create_tmp_variable(dtype="int64")
    helper.append_op(
        type="accuracy",
        inputs={
            "Out": [topk_out],
            "Indices": [topk_indices],
            "Label": [label]
        },
        outputs={
            "Accuracy": [acc_out],
            "Correct": [correct],
            "Total": [total],
        })
    return acc_out


def chunk_eval(input,
               label,
               chunk_scheme,
               num_chunk_types,
               excluded_chunk_types=None,
               **kwargs):
    """
Y
yangyaming 已提交
400
    This function computes and outputs the precision, recall and
401
    F1-score of chunk detection.
Y
Yu Yang 已提交
402 403 404 405 406 407 408
    """
    helper = LayerHelper("chunk_eval", **kwargs)

    # prepare output
    precision = helper.create_tmp_variable(dtype="float32")
    recall = helper.create_tmp_variable(dtype="float32")
    f1_score = helper.create_tmp_variable(dtype="float32")
409 410 411
    num_infer_chunks = helper.create_tmp_variable(dtype="int64")
    num_label_chunks = helper.create_tmp_variable(dtype="int64")
    num_correct_chunks = helper.create_tmp_variable(dtype="int64")
Y
Yu Yang 已提交
412 413 414 415 416 417 418 419

    helper.append_op(
        type="chunk_eval",
        inputs={"Inference": [input],
                "Label": [label]},
        outputs={
            "Precision": [precision],
            "Recall": [recall],
420 421 422 423
            "F1-Score": [f1_score],
            "NumInferChunks": [num_infer_chunks],
            "NumLabelChunks": [num_label_chunks],
            "NumCorrectChunks": [num_correct_chunks]
Y
Yu Yang 已提交
424 425 426
        },
        attrs={
            "num_chunk_types": num_chunk_types,
G
guosheng 已提交
427 428
            "chunk_scheme": chunk_scheme,
            "excluded_chunk_types": excluded_chunk_types or []
Y
Yu Yang 已提交
429
        })
430
    return precision, recall, f1_score, num_infer_chunks, num_label_chunks, num_correct_chunks
Y
Yu Yang 已提交
431 432 433 434 435 436 437 438 439


def sequence_conv(input,
                  num_filters,
                  filter_size=3,
                  filter_stride=1,
                  padding=None,
                  bias_attr=None,
                  param_attr=None,
440
                  act=None):
Y
Yu Yang 已提交
441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482
    """
    This function creates the op for sequence_conv, using the inputs and
    other convolutional configurations for the filters and stride as given
    in the input parameters to the function.
    """

    # FIXME(dzh) : want to unify the argument of python layer
    # function. So we ignore some unecessary attributes.
    # such as, padding_trainable, context_start.

    helper = LayerHelper('sequence_conv', **locals())
    dtype = helper.input_dtype()
    filter_shape = [filter_size * input.shape[1], num_filters]
    filter_param = helper.create_parameter(
        attr=helper.param_attr, shape=filter_shape, dtype=dtype)
    pre_bias = helper.create_tmp_variable(dtype)

    helper.append_op(
        type='sequence_conv',
        inputs={
            'X': [input],
            'Filter': [filter_param],
        },
        outputs={"Out": pre_bias},
        attrs={
            'contextStride': filter_stride,
            'contextStart': -int(filter_size / 2),
            'contextLength': filter_size
        })
    pre_act = helper.append_bias_op(pre_bias)
    return helper.append_activation(pre_act)


def conv2d(input,
           num_filters,
           filter_size,
           stride=None,
           padding=None,
           groups=None,
           param_attr=None,
           bias_attr=None,
           act=None,
483
           name=None):
Y
Yu Yang 已提交
484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568
    """
    This function creates the op for a 2-dimensional Convolution.
    This is performed using the parameters of filters(size, dimensionality etc)
    , stride and other configurations for a Convolution operation.
    This funciton can also append an activation on top of the
    conv-2d output, if mentioned in the input parameters.
    """

    if stride is None:
        stride = [1, 1]
    helper = LayerHelper('conv2d', **locals())
    dtype = helper.input_dtype()

    num_channels = input.shape[1]
    if groups is None:
        num_filter_channels = num_channels
    else:
        if num_channels % groups != 0:
            raise ValueError("num_channels must be divisible by groups.")
        num_filter_channels = num_channels / groups

    if isinstance(filter_size, int):
        filter_size = [filter_size, filter_size]
    if isinstance(stride, int):
        stride = [stride, stride]
    if isinstance(padding, int):
        padding = [padding, padding]

    input_shape = input.shape
    filter_shape = [num_filters, num_filter_channels] + filter_size

    def _get_default_param_initializer():
        std = (2.0 / (filter_size[0]**2 * num_channels))**0.5
        return Normal(0.0, std, 0)

    filter_param = helper.create_parameter(
        attr=helper.param_attr,
        shape=filter_shape,
        dtype=dtype,
        default_initializer=_get_default_param_initializer())

    pre_bias = helper.create_tmp_variable(dtype)

    helper.append_op(
        type='conv2d_cudnn',
        inputs={
            'Input': input,
            'Filter': filter_param,
        },
        outputs={"Output": pre_bias},
        attrs={'strides': stride,
               'paddings': padding,
               'groups': groups})

    pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2)

    return helper.append_activation(pre_act)


def sequence_pool(input, pool_type, **kwargs):
    """
    This function add the operator for sequence pooling.
    This is applied on top of the input using pool_type mentioned
    in the parameters.
    """
    helper = LayerHelper('sequence_pool', input=input, **kwargs)
    dtype = helper.input_dtype()
    pool_out = helper.create_tmp_variable(dtype)
    max_index = helper.create_tmp_variable(dtype)

    helper.append_op(
        type="sequence_pool",
        inputs={"X": input},
        outputs={"Out": pool_out,
                 "MaxIndex": max_index},
        attrs={"pooltype": pool_type.upper()})

    return pool_out


def pool2d(input,
           pool_size,
           pool_type,
           pool_stride=None,
           pool_padding=None,
569
           global_pooling=False):
Y
Yu Yang 已提交
570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614
    """
    This function adds the operator for pooling in 2 dimensions, using the
    pooling configurations mentioned in input parameters.
    """
    if pool_padding is None:
        pool_padding = [0, 0]
    if pool_stride is None:
        pool_stride = [1, 1]
    if pool_type not in ["max", "avg"]:
        raise ValueError(
            "Unknown pool_type: '%s'. It can only be 'max' or 'avg'.",
            str(pool_type))
    if isinstance(pool_size, int):
        pool_size = [pool_size, pool_size]
    if isinstance(pool_stride, int):
        pool_stride = [pool_stride, pool_stride]
    if isinstance(pool_padding, int):
        pool_padding = [pool_padding, pool_padding]

    helper = LayerHelper('pool2d', **locals())
    dtype = helper.input_dtype()
    pool_out = helper.create_tmp_variable(dtype)

    helper.append_op(
        type="pool2d",
        inputs={"X": input},
        outputs={"Out": pool_out},
        attrs={
            "pooling_type": pool_type,
            "ksize": pool_size,
            "global_pooling": global_pooling,
            "strides": pool_stride,
            "paddings": pool_padding
        })

    return pool_out


def batch_norm(input,
               act=None,
               is_test=False,
               momentum=0.9,
               epsilon=1e-05,
               param_attr=None,
               bias_attr=None,
615
               data_layout='NCHW'):
Y
Yu Yang 已提交
616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684
    """
    This function helps create an operator to implement
    the BatchNorm layer using the configurations from the input parameters.
    """
    helper = LayerHelper('batch_norm', **locals())
    dtype = helper.input_dtype()

    input_shape = input.shape
    if data_layout == 'NCHW':
        channel_num = input_shape[1]
    else:
        if data_layout == 'NHWC':
            channel_num = input_shape[-1]
        else:
            raise ValueError("unsupported data layout:" + data_layout)

    param_shape = [channel_num]

    # create parameter
    scale = helper.create_parameter(
        attr=helper.param_attr,
        shape=param_shape,
        dtype=dtype,
        default_initializer=Constant(1.0))

    bias = helper.create_parameter(
        attr=helper.param_attr, shape=param_shape, dtype=dtype, is_bias=True)

    mean = helper.create_global_variable(
        dtype=input.dtype, shape=param_shape, persistable=True)
    helper.set_variable_initializer(var=mean, initializer=Constant(0.0))

    variance = helper.create_global_variable(
        dtype=input.dtype, shape=param_shape, persistable=True)
    helper.set_variable_initializer(var=variance, initializer=Constant(1.0))

    # create output
    # mean and mean_out share the same memory
    mean_out = mean
    # variance and variance out share the same memory
    variance_out = variance
    saved_mean = helper.create_tmp_variable(dtype)
    saved_variance = helper.create_tmp_variable(dtype)

    batch_norm_out = helper.create_tmp_variable(dtype)

    helper.append_op(
        type="batch_norm",
        inputs={
            "X": input,
            "Scale": scale,
            "Bias": bias,
            "Mean": mean,
            "Variance": variance
        },
        outputs={
            "Y": batch_norm_out,
            "MeanOut": mean_out,
            "VarianceOut": variance_out,
            "SavedMean": saved_mean,
            "SavedVariance": saved_variance
        },
        attrs={"momentum": momentum,
               "epsilon": epsilon,
               "is_test": is_test})

    return helper.append_activation(batch_norm_out)


685
def beam_search_decode(ids, scores):
Y
Yu Yang 已提交
686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707
    helper = LayerHelper('beam_search_decode', **locals())
    sentence_ids = helper.create_tmp_variable(dtype=ids.dtype)
    sentence_scores = helper.create_tmp_variable(dtype=ids.dtype)

    helper.append_op(
        type="beam_search_decode",
        inputs={"Ids": ids,
                "Scores": scores},
        outputs={
            "SentenceIds": sentence_ids,
            "SentenceScores": sentence_scores
        })

    return sentence_ids, sentence_scores


def conv2d_transpose(input,
                     num_filters,
                     output_size=None,
                     filter_size=None,
                     padding=None,
                     stride=None,
708
                     param_attr=None):
Y
Yu Yang 已提交
709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786
    """
    The transpose of conv2d layer.

    This layer is also known as deconvolution layer.

    Args:
        input(Variable): The input image with [N, C, H, W] format.
        num_filters(int): The number of filter. It is as same as the output
            image channel.
        output_size(int|tuple|None): The output image size. If output size is a
            tuple, it must contain two integers, (image_H, image_W). This
            parameter only works when filter_size is None.
        filter_size(int|tuple|None): The filter size. If filter_size is a tuple,
            it must contain two integers, (filter_size_H, filter_size_W).
            Otherwise, the filter will be a square.  None if use output size to
            calculate filter_size
        padding(int|tuple): The padding size. If padding is a tuple, it must
            contain two integers, (padding_H, padding_W). Otherwise, the
            padding_H = padding_W = padding.
        stride(int|tuple): The stride size. If stride is a tuple, it must
            contain two integers, (stride_H, stride_W). Otherwise, the
            stride_H = stride_W = stride.
        param_attr: Parameter Attribute.
        main_program(Program): the main program
        startup_program(Program): the startup program

    Returns:
        Variable: Output image.
    """
    helper = LayerHelper("conv2d_transpose", **locals())
    if not isinstance(input, Variable):
        raise TypeError("Input of conv2d_transpose must be Variable")
    input_channel = input.shape[1]

    op_attr = dict()

    if isinstance(padding, int):
        op_attr['paddings'] = [padding, padding]
    elif padding is not None:
        op_attr['paddings'] = padding

    if isinstance(stride, int):
        op_attr['strides'] = stride
    elif stride is not None:
        op_attr['strides'] = stride

    if filter_size is None:
        if output_size is None:
            raise ValueError("output_size must be set when filter_size is None")
        if isinstance(output_size, int):
            output_size = [output_size, output_size]

        padding = op_attr.get('paddings', [0, 0])
        stride = op_attr.get('strides', [1, 1])

        h_in = input.shape[2]
        w_in = input.shape[3]
        filter_size_h = output_size[0] - \
                        (h_in - 1) * stride[0] + 2 * padding[0]
        filter_size_w = output_size[1] - \
                        (w_in - 1) * stride[1] + 2 * padding[1]
        filter_size = [filter_size_h, filter_size_w]
    elif isinstance(filter_size, int):
        filter_size = [filter_size, filter_size]

    filter_shape = [input_channel, num_filters] + filter_size
    img_filter = helper.create_parameter(
        dtype=input.dtype, shape=filter_shape, attr=helper.param_attr)

    out = helper.create_tmp_variable(dtype=input.dtype)
    helper.append_op(
        type='conv2d_transpose',
        inputs={'Input': [input],
                'Filter': [img_filter]},
        outputs={'Output': out},
        attrs=op_attr)

    return out
Y
yangyaming 已提交
787 788


789
def sequence_expand(x, y):
790 791
    """Sequence Expand Layer. This layer will expand the input variable **x**
    according to LoD information of **y**. And the following examples will
Y
yangyaming 已提交
792
    explain how sequence_expand works:
793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820

    .. code-block:: text

        * Case 1
            x is a LoDTensor:
                x.lod = [[0,       2, 3],
                         [0, 1,    3, 4]]
                x.data = [a, b, c, d]
                x.dims = [4, 1]

            y is a LoDTensor:
                y.lod = [[0,    2,    4],
                         [0, 3, 6, 7, 8]]

            with condition len(y.lod[-1]) - 1 == x.dims[0]

            then output is a 2-level LoDTensor:
                out.lod = [[0,                2,    4],
                           [0,       3,       6, 7, 8]]
                out.data = [a, a, a, b, b, b, c, d]
                out.dims = [8, 1]

        * Case 2
            x is a Tensor:
                x.data = [a, b, c]
                x.dims = [3, 1]

            y is a LoDTensor:
Y
yangyaming 已提交
821
                y.lod = [[0, 2, 3, 6]]
822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842

            with condition len(y.lod[-1]) - 1 == x.dims[0]

            then output is a 1-level LoDTensor:
                out.lod = [[0,    2, 3,      6]]
                out.data = [a, a, b, c, c, c]
                out.dims = [6, 1]

    Args:
        x (Variable): The input variable which is a Tensor or LoDTensor.
        y (Variable): The input variable which is a LoDTensor.

    Returns:
        Variable: The expanded variable which is a LoDTensor.

    Examples:
        .. code-block:: python

            x = fluid.layers.data(name='x', shape=[10], dtype='float32')
            y = fluid.layers.data(name='y', shape=[10, 20],
                             dtype='float32', lod_level=1)
Y
yangyaming 已提交
843
            out = layers.sequence_expand(x=x, y=y)
844
    """
Y
yangyaming 已提交
845
    helper = LayerHelper('sequence_expand', input=x, **locals())
846 847 848
    dtype = helper.input_dtype()
    tmp = helper.create_tmp_variable(dtype)
    helper.append_op(
Y
yangyaming 已提交
849 850
        type='sequence_expand', inputs={'X': x,
                                        'Y': y}, outputs={'Out': tmp})
851
    return tmp
852 853


Y
yangyaming 已提交
854 855 856 857
def lstm_unit(x_t,
              hidden_t_prev,
              cell_t_prev,
              forget_bias=0.0,
Y
yangyaming 已提交
858
              param_attr=None,
859
              bias_attr=None):
Y
yangyaming 已提交
860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890
    """Lstm unit layer. The equation of a lstm step is:

        .. math::

            i_t & = \sigma(W_{x_i}x_{t} + W_{h_i}h_{t-1} + W_{c_i}c_{t-1} + b_i)

            f_t & = \sigma(W_{x_f}x_{t} + W_{h_f}h_{t-1} + W_{c_f}c_{t-1} + b_f)

            c_t & = f_tc_{t-1} + i_t tanh (W_{x_c}x_t+W_{h_c}h_{t-1} + b_c)

            o_t & = \sigma(W_{x_o}x_{t} + W_{h_o}h_{t-1} + W_{c_o}c_t + b_o)

            h_t & = o_t tanh(c_t)

    The inputs of lstm unit includes :math:`x_t`, :math:`h_{t-1}` and
    :math:`c_{t-1}`. The implementation separates the linear transformation
    and non-linear transformation apart. Here, we take :math:`i_t` as an
    example. The linear transformation is applied by calling a `fc` layer and
    the equation is:

        .. math::

            L_{i_t} = W_{x_i}x_{t} + W_{h_i}h_{t-1} + W_{c_i}c_{t-1} + b_i

    The non-linear transformation is applied by calling `lstm_unit_op` and the
    equation is:

        .. math::

            i_t = \sigma(L_{i_t})

Y
yangyaming 已提交
891
    This layer has two outputs including :math:`h_t` and :math:`o_t`.
Y
yangyaming 已提交
892 893 894 895 896 897

    Args:
        x_t (Variable): The input value of current step.
        hidden_t_prev (Variable): The hidden value of lstm unit.
        cell_t_prev (Variable): The cell value of lstm unit.
        forget_bias (float): The forget bias of lstm unit.
Y
yangyaming 已提交
898 899
        param_attr (ParamAttr): The attributes of parameter weights, used to set
            initializer, name etc.
Y
yangyaming 已提交
900 901
        bias_attr (ParamAttr): The attributes of bias weights, if not False,
            bias weights will be created and be set to default value.
Y
yangyaming 已提交
902 903

    Returns:
Y
yangyaming 已提交
904
        tuple: The hidden value and cell value of lstm unit.
Y
yangyaming 已提交
905 906 907 908 909 910 911 912 913 914 915 916 917

    Raises:
        ValueError: The ranks of **x_t**, **hidden_t_prev** and **cell_t_prev**\
                not be 2 or the 1st dimensions of **x_t**, **hidden_t_prev** \
                and **cell_t_prev** not be the same.

    Examples:

        .. code-block:: python

             x_t = fluid.layers.fc(input=x_t_data, size=10)
             prev_hidden = fluid.layers.fc(input=prev_hidden_data, size=20)
             prev_cell = fluid.layers.fc(input=prev_cell_data, size=30)
Y
yangyaming 已提交
918
             hidden_value, cell_value = fluid.layers.lstm_unit(x_t=x_t,
Y
yangyaming 已提交
919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937
                                                    hidden_t_prev=prev_hidden,
                                                    cell_t_prev=prev_cell)
    """
    helper = LayerHelper('lstm_unit', **locals())

    if len(x_t.shape) != 2:
        raise ValueError("Rank of x_t must be 2.")

    if len(hidden_t_prev.shape) != 2:
        raise ValueError("Rank of hidden_t_prev must be 2.")

    if len(cell_t_prev.shape) != 2:
        raise ValueError("Rank of cell_t_prev must be 2.")

    if x_t.shape[0] != hidden_t_prev.shape[0] or x_t.shape[
            0] != cell_t_prev.shape[0]:
        raise ValueError("The 1s dimension of x_t, hidden_t_prev and "
                         "cell_t_prev must be the same.")

Y
yangyaming 已提交
938 939 940
    if bias_attr is None:
        bias_attr = ParamAttr()

Y
yangyaming 已提交
941
    size = cell_t_prev.shape[1]
942
    concat_out = concat(input=[x_t, hidden_t_prev], axis=1)
Y
yangyaming 已提交
943 944
    fc_out = fc(input=concat_out,
                size=4 * size,
Y
yangyaming 已提交
945
                param_attr=param_attr,
946
                bias_attr=bias_attr)
Y
yangyaming 已提交
947 948 949 950 951 952 953 954 955 956 957 958
    dtype = x_t.dtype
    c = helper.create_tmp_variable(dtype)
    h = helper.create_tmp_variable(dtype)

    helper.append_op(
        type='lstm_unit',
        inputs={"X": fc_out,
                "C_prev": cell_t_prev},
        outputs={"C": c,
                 "H": h},
        attrs={"forget_bias": forget_bias})

Y
yangyaming 已提交
959
    return h, c
G
guosheng 已提交
960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003


def reduce_sum(input, dim=None, keep_dim=False):
    """
    Computes the sum of tensor elements over the given dimension. 

    Args:
        input (Variable): The input variable which is a Tensor or LoDTensor.
        dim (int|None): The dimension along which the sum is performed. If 
            :attr:`None`, sum all elements of :attr:`input` and return a 
            Tensor variable with a single element, otherwise must be in the 
            range :math:`[-rank(input), rank(input))`. If :math:`dim < 0`, 
            the dimension to reduce is :math:`rank + dim`.
        keep_dim (bool): Whether to reserve the reduced dimension in the 
            output Tensor. The result tensor will have one fewer dimension 
            than the :attr:`input` unless :attr:`keep_dim` is true.

    Returns:
        Variable: The reduced Tensor variable.
    
    Examples:
        .. code-block:: python

            # x is a Tensor variable with following elements:
            #    [[0.2, 0.3, 0.5, 0.9]
            #     [0.1, 0.2, 0.6, 0.7]]
            # Each example is followed by the correspending output tensor.
            fluid.layers.reduce_sum(x)  # [3.5]
            fluid.layers.reduce_sum(x, dim=0)  # [0.3, 0.5, 1.1, 1.6]
            fluid.layers.reduce_sum(x, dim=-1)  # [1.9, 1.6]
            fluid.layers.reduce_sum(x, dim=1, keep_dim=True)  # [[1.9], [1.6]]
    """
    helper = LayerHelper('reduce_sum', **locals())
    out = helper.create_tmp_variable(dtype=helper.input_dtype())
    helper.append_op(
        type='reduce_sum',
        inputs={'X': input},
        outputs={'Out': out},
        attrs={
            'dim': dim if dim != None else 0,
            'keep_dim': keep_dim,
            'reduce_all': True if dim == None else False
        })
    return out