tensor.py 23.4 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
Y
yuyang18 已提交
9
# Unlessf required by applicable law or agreed to in writing, software
D
dzhwinter 已提交
10 11 12 13 14
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

Y
Yu Yang 已提交
17
from ..layer_helper import LayerHelper
18
from ..param_attr import ParamAttr
X
xuwei06 已提交
19 20
from ..framework import convert_np_dtype_to_dtype_
from ..framework import Variable
21
from ..initializer import Constant, force_init_on_cpu
22
from ..core import VarDesc
M
minqiyang 已提交
23
from ..imperative import base as imperative_base
24
from .layer_function_generator import templatedoc
X
xuwei06 已提交
25
import numpy
Y
Yu Yang 已提交
26 27

__all__ = [
L
li099 已提交
28 29 30 31
    'create_tensor', 'create_parameter', 'create_global_var', 'cast',
    'tensor_array_to_tensor', 'concat', 'sums', 'assign',
    'fill_constant_batch_size_like', 'fill_constant', 'argmin', 'argmax',
    'argsort', 'ones', 'zeros', 'reverse', 'has_inf', 'has_nan', 'isfinite'
Y
Yu Yang 已提交
32 33 34
]


X
xuwei06 已提交
35
def create_tensor(dtype, name=None, persistable=False):
36
    """
Q
update  
qiaolongfei 已提交
37
    Create an variable, which will hold a LoDTensor with data type dtype.
38 39

    Args:
Q
update  
qiaolongfei 已提交
40
        dtype(string): 'float32'|'int32'|..., the data type of the
41
            created tensor.
Q
update  
qiaolongfei 已提交
42
        name(string): The name of the created tensor, if not set,
43
            the name will be a random unique one.
Q
update  
qiaolongfei 已提交
44
        persistable(bool): Set the persistable flag of the create tensor.
45 46 47 48 49 50 51 52 53

    Returns:
        Variable: The tensor variable storing the created tensor.

    Examples:
        .. code-block:: python

          tensor = fluid.layers.create_tensor(dtype='float32')
    """
Y
Yu Yang 已提交
54
    helper = LayerHelper("create_tensor", **locals())
X
xuwei06 已提交
55 56
    return helper.create_variable(
        name=helper.name, dtype=dtype, persistable=persistable)
Y
Yu Yang 已提交
57 58


59 60
def create_parameter(shape,
                     dtype,
X
xuwei06 已提交
61
                     name=None,
62 63 64 65
                     attr=None,
                     is_bias=False,
                     default_initializer=None):
    """
Y
yuyang18 已提交
66 67 68 69 70 71
    Create a parameter. The parameter is a learnable variable, which can have
    gradient, and can be optimized.

    NOTE: this is a very low-level API. This API is useful when you create
    operator by your self. instead of using layers.

72 73 74 75 76 77 78 79 80 81 82
    Args:
        shape(list[int]): shape of the parameter
        dtype(string): element type of the parameter
        attr(ParamAttr): attributes of the parameter
        is_bias(bool): This can affect which default initializer is chosen
                       when default_initializer is None. If is_bias,
                       initializer.Constant(0.0) will be used. Otherwise,
                       Xavier() will be used.
        default_initializer(Initializer): initializer for the parameter

    Returns:
Y
yuyang18 已提交
83 84 85 86 87 88
        the created parameter.

    Examples:
        >>> W = fluid.layers.create_parameter(shape=[784, 200], dtype='float32')
        >>> data = fluid.layers.data(name="img", shape=[64, 784], append_batch_size=False)
        >>> hidden = fluid.layers.matmul(x=data, y=W)
89
    """
Q
Qiao Longfei 已提交
90
    helper = LayerHelper("create_parameter", **locals())
91
    if attr is None:
X
xuwei06 已提交
92
        attr = ParamAttr(name=name)
93 94 95 96
    return helper.create_parameter(attr, shape, dtype, is_bias,
                                   default_initializer)


97 98 99 100 101 102 103
def create_global_var(shape,
                      value,
                      dtype,
                      persistable=False,
                      force_cpu=False,
                      name=None):
    """
X
Xin Pan 已提交
104
    Create a new tensor variable with value in the global block(block 0).
F
fengjiayi 已提交
105

106 107
    Args:
        shape(list[int]): shape of the variable
M
minqiyang 已提交
108
        value(float): the value of the variable. The new created
F
fengjiayi 已提交
109 110
                      variable will be filled with it.
        dtype(string): data type of the variable
M
minqiyang 已提交
111
        persistable(bool): if this variable is persistable.
F
fengjiayi 已提交
112
                           Default: False
M
minqiyang 已提交
113
        force_cpu(bool): force this variable to be on CPU.
F
fengjiayi 已提交
114
                         Default: False
M
minqiyang 已提交
115 116
        name(str|None): The name of the variable. If set to None the variable
                        name will be generated automatically.
F
fengjiayi 已提交
117
                        Default: None
118 119 120

    Returns:
        Variable: the created Variable
F
fengjiayi 已提交
121 122 123 124

    Examples:
        .. code-block:: python

M
minqiyang 已提交
125
            var = fluid.create_global_var(shape=[2,3], value=1.0, dtype='float32',
F
fengjiayi 已提交
126
                                 persistable=True, force_cpu=True, name='new_var')
127
    """
Q
Qiao Longfei 已提交
128 129
    helper = LayerHelper("global_var", **locals())
    var = helper.create_global_variable(
M
minqiyang 已提交
130 131 132 133 134
        dtype=dtype,
        shape=shape,
        persistable=persistable,
        name=name,
        stop_gradient=True)
M
minqiyang 已提交
135 136 137
    helper.set_variable_initializer(
        var, initializer=Constant(
            value=float(value), force_cpu=force_cpu))
M
minqiyang 已提交
138

Q
Qiao Longfei 已提交
139 140 141
    return var


142
def cast(x, dtype):
Y
Yu Yang 已提交
143
    """
M
minqiyang 已提交
144
    This layer takes in the Variable :attr:`x` with :attr:`x.dtype` and casts
Y
Yibing Liu 已提交
145 146 147 148 149 150 151 152 153 154 155
    it to the output with :attr:`dtype`.

    Args:
        x (Variable): The input Variable for casting.
        dtype(np.dtype|core.VarDesc.VarType|str): Data type of the output Variable.

    Returns:
        Variable: The output Variable after casting.

    Examples:
        .. code-block:: python
F
fengjiayi 已提交
156

Y
Yibing Liu 已提交
157 158
            data = fluid.layers.data(name='x', shape=[13], dtype='float32')
            result = fluid.layers.cast(x=data, dtype='float64')
Y
Yu Yang 已提交
159 160
    """
    helper = LayerHelper('cast', **locals())
X
Xin Pan 已提交
161
    out = helper.create_variable_for_type_inference(dtype=dtype)
Y
Yu Yang 已提交
162 163 164 165 166 167 168 169 170
    helper.append_op(
        type='cast',
        inputs={'X': [x]},
        outputs={'Out': [out]},
        attrs={'in_dtype': x.dtype,
               'out_dtype': out.dtype})
    return out


171
def concat(input, axis=0, name=None):
Y
Yu Yang 已提交
172
    """
173 174 175
    **Concat**

    This function concatenates the input along the axis mentioned
Y
Yu Yang 已提交
176
    and returns that as the output.
177 178 179 180

    Args:
        input(list): List of tensors to be concatenated
        axis(int): Integer axis along which the tensors will be concatenated
181 182
        name(str|None): A name for this layer(optional). If set None, the layer
                       will be named automatically.
183 184 185 186 187 188

    Returns:
        Variable: Output variable of the concatenation

    Examples:
        .. code-block:: python
F
fengjiayi 已提交
189

F
fengjiayi 已提交
190
           out = fluid.layers.concat(input=[Efirst, Esecond, Ethird, Efourth])
Y
Yu Yang 已提交
191 192
    """
    helper = LayerHelper('concat', **locals())
X
Xin Pan 已提交
193
    out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
Y
Yu Yang 已提交
194 195 196 197 198 199 200 201
    helper.append_op(
        type='concat',
        inputs={'X': input},
        outputs={'Out': [out]},
        attrs={'axis': axis})
    return out


L
li099 已提交
202 203 204 205 206 207
def tensor_array_to_tensor(input, axis=1, name=None):
    """
    This function concatenates the input LodTensorArray along the axis mentioned
    and returns that as the output.

    A simple example as below:
M
minqiyang 已提交
208

L
li099 已提交
209
    .. code-block:: text
M
minqiyang 已提交
210

L
li099 已提交
211 212 213 214 215 216 217 218
        Given:

        input.data = {[[0.6, 0.1, 0.3],
                       [0.5, 0.3, 0.2]],
                      [[1.3],
                       [1.8]],
                      [[2.3, 2.1],
                       [2.5, 2.4]]}
M
minqiyang 已提交
219

L
li099 已提交
220
        axis = 1
M
minqiyang 已提交
221

L
li099 已提交
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243
        Then:

        output.data = [[0.6, 0.1, 0.3, 1.3, 2.3, 2.1],
                       [0.5, 0.3, 0.2, 1.8, 2.5, 2.4]]

        output_index.data = [3, 1, 2]

    Args:
        input(list): Input LodTensorArray
        axis(int): Integer axis along which the tensors will be concatenated
        name(str|None): A name for this layer(optional). If set None, the layer
                       will be named automatically.

    Returns:
        Variable: Output variable of the concatenation
        Variable: The input LodTensorArray items' dims along the axis

    Examples:
        .. code-block:: python

           output, output_index = fluid.layers.tensor_array_to_tensor(input=tensor_array)
    """
L
li099 已提交
244
    helper = LayerHelper('tensor_array_to_tensor', **locals())
L
li099 已提交
245 246 247
    out = helper.create_variable_for_type_inference(dtype=helper.input_dtype())
    out_index = helper.create_variable_for_type_inference(dtype="int32")
    helper.append_op(
L
li099 已提交
248
        type='tensor_array_to_tensor',
L
li099 已提交
249 250 251 252 253 254 255
        inputs={'X': input},
        outputs={'Out': [out],
                 'OutIndex': [out_index]},
        attrs={'axis': axis})
    return out, out_index


256
def sums(input, out=None):
F
fengjiayi 已提交
257 258
    """
    This function performs the sum operation on the input and returns the
K
kavyasrinet 已提交
259 260 261 262 263
    result as the output.

    Args:
        input (Variable|list): The input tensor that has the elements
                               that need to be summed up.
F
fengjiayi 已提交
264
        out (Variable|None): Output parameter. The sum result.
F
fengjiayi 已提交
265
                             Default: None
K
kavyasrinet 已提交
266 267

    Returns:
F
fengjiayi 已提交
268
        Variable: the sum of input. The same as the argument 'out'
K
kavyasrinet 已提交
269 270

    Examples:
F
fengjiayi 已提交
271
        .. code-block:: python
K
kavyasrinet 已提交
272 273 274 275 276 277

          tmp = fluid.layers.zeros(shape=[10], dtype='int32')
          i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10)
          a0 = layers.array_read(array=tmp, i=i)
          i = layers.increment(x=i)
          a1 = layers.array_read(array=tmp, i=i)
Y
Yu Yang 已提交
278 279
          mean_a0 = layers.mean(a0)
          mean_a1 = layers.mean(a1)
K
kavyasrinet 已提交
280
          a_sum = layers.sums(input=[mean_a0, mean_a1])
Y
Yu Yang 已提交
281 282 283
    """
    helper = LayerHelper('sum', **locals())
    if out is None:
X
Xin Pan 已提交
284 285
        out = helper.create_variable_for_type_inference(
            dtype=helper.input_dtype())
T
tensor-tang 已提交
286 287 288 289 290
    helper.append_op(
        type='sum',
        inputs={'X': input},
        outputs={'Out': out},
        attrs={'use_mkldnn': False})
Y
Yu Yang 已提交
291 292 293
    return out


F
fengjiayi 已提交
294
def assign(input, output=None):
295 296 297 298 299 300
    """
    **Assign**

    This function copies the *input* Variable to the *output* Variable.

    Args:
X
xuwei06 已提交
301
        input(Variable|numpy.ndarray): The source variable
F
fengjiayi 已提交
302
        output(Variable|None): The destination variable
303 304 305 306 307 308

    Returns:
        Variable: The destination variable that was supplied as the *output*.

    Examples:
        .. code-block:: python
309

310 311 312 313
          out = fluid.layers.create_tensor(dtype='float32')
          hidden = fluid.layers.fc(input=data, size=10)
          fluid.layers.assign(hidden, out)
    """
Y
Yu Yang 已提交
314
    helper = LayerHelper('assign', **locals())
F
fengjiayi 已提交
315
    if output is None:
X
Xin Pan 已提交
316
        output = helper.create_variable_for_type_inference(dtype=input.dtype)
X
xuwei06 已提交
317 318
    if isinstance(input, Variable):
        helper.append_op(
R
robot 已提交
319
            type='assign', inputs={'X': [input]}, outputs={'Out': [output]})
X
xuwei06 已提交
320 321
    elif isinstance(input, numpy.ndarray):
        dtype = convert_np_dtype_to_dtype_(input.dtype)
322
        if dtype == VarDesc.VarType.FP32:
X
xuwei06 已提交
323
            value_name = "fp32_values"
324
            values = [float(v) for v in input.flat]
325
        elif dtype == VarDesc.VarType.INT32:
X
xuwei06 已提交
326
            value_name = "int32_values"
327
            values = [int(v) for v in input.flat]
X
xuwei06 已提交
328 329
        else:
            raise ValueError("Unsupported dtype %s", input.dtype)
330 331 332
        if input.size > 1024 * 1024:
            raise ValueError("The size of input is too big. Please consider "
                             "saving it to file and 'load_op' to load it")
X
xuwei06 已提交
333 334 335 336 337 338 339

        helper.append_op(
            type='assign_value',
            outputs={'Out': [output]},
            attrs={
                'dtype': dtype,
                'shape': list(input.shape),
340
                value_name: values
X
xuwei06 已提交
341 342 343 344
            })
    else:
        raise ValueError("Wrong type for assign input: %s" % type(input))

Y
Yu Yang 已提交
345 346 347
    return output


Q
QI JUN 已提交
348
def fill_constant(shape, dtype, value, force_cpu=False, out=None):
Y
Yu Yang 已提交
349
    """
350 351
    **fill_constant**

352 353
    This function creates a tensor with specified `shape` and `dtype`, and
    initializes it with a constant specifed by `value`.
K
kavyasrinet 已提交
354

355
    The attribute `stop_gradient` of the created tensor is set to True.
356 357

    Args:
358
        shape(tuple|list|None): Shape of the output tensor.
359
        dtype(np.dtype|core.VarDesc.VarType|str): Data type of the output tensor.
360 361
        value(float): The constant value used to initialize the output tensor.
        out(Variable): The output tensor.
362
        force_cpu(True|False): data should be on CPU if set true.
363 364

    Returns:
365
        Variable: The tensor variable storing the output.
366 367 368 369 370

    Examples:
        .. code-block:: python

          data = fluid.layers.fill_constant(shape=[1], value=0, dtype='int64')
Y
Yu Yang 已提交
371
    """
372

Y
Yu Yang 已提交
373 374
    helper = LayerHelper("fill_constant", **locals())
    if out is None:
X
Xin Pan 已提交
375
        out = helper.create_variable_for_type_inference(dtype=dtype)
Y
Yu Yang 已提交
376 377 378 379
    helper.append_op(
        type='fill_constant',
        inputs={},
        outputs={'Out': [out]},
Q
QI JUN 已提交
380 381 382 383
        attrs={
            'shape': shape,
            'dtype': out.dtype,
            'value': float(value),
384
            'force_cpu': force_cpu or force_init_on_cpu()
M
minqiyang 已提交
385 386
        },
        stop_gradient=True)
Y
Yu Yang 已提交
387 388 389 390
    out.stop_gradient = True
    return out


Y
yuyang18 已提交
391
@templatedoc()
Y
Yu Yang 已提交
392 393 394 395 396
def fill_constant_batch_size_like(input,
                                  shape,
                                  dtype,
                                  value,
                                  input_dim_idx=0,
397
                                  output_dim_idx=0):
398
    """
Y
yuyang18 已提交
399
    ${comment}
400 401 402 403

    It also sets *stop_gradient* to True.

    Args:
Y
yuyang18 已提交
404
        input(${input_type}): ${input_comment}.
405

Y
yuyang18 已提交
406
        shape(${shape_type}): ${shape_comment}.
407

Y
yuyang18 已提交
408 409 410
        dtype(${dtype_type}): ${dtype_comment}.

        value(${value_type}): ${value_comment}.
411

Y
yuyang18 已提交
412 413 414 415 416
        input_dim_idx(${input_dim_idx_type}): ${input_dim_idx_comment}.

        output_dim_idx(${output_dim_idx_type}): ${output_dim_idx_comment}.

    Returns:
Y
yuyang18 已提交
417
        ${out_comment}.
H
haowang101779990 已提交
418 419 420 421 422 423 424 425

    Examples:

        .. code-block:: python

             data = fluid.layers.fill_constant_batch_size_like(
                         input=like, shape=[1], value=0, dtype='int64')

426
    """
Y
Yu Yang 已提交
427
    helper = LayerHelper("fill_constant_batch_size_like", **locals())
X
Xin Pan 已提交
428
    out = helper.create_variable_for_type_inference(dtype=dtype)
Y
Yu Yang 已提交
429 430 431 432 433 434 435 436 437 438 439 440 441 442 443
    helper.append_op(
        type='fill_constant_batch_size_like',
        inputs={'Input': input},
        outputs={'Out': [out]},
        attrs={
            'shape': shape,
            'dtype': out.dtype,
            'value': float(value),
            'input_dim_idx': input_dim_idx,
            'output_dim_idx': output_dim_idx
        })
    out.stop_gradient = True
    return out


S
sneaxiy 已提交
444 445 446 447
def argmin(x, axis=0):
    """
    **argmin**

448
    This function computes the indices of the min elements
S
sneaxiy 已提交
449 450 451 452 453 454
    of the input tensor's element along the provided axis.

    Args:
        x(Variable): The input to compute the indices of
                     the min elements.
        axis(int): Axis to compute indices along.
F
fengjiayi 已提交
455

S
sneaxiy 已提交
456 457
    Returns:
        Variable: The tensor variable storing the output
F
fengjiayi 已提交
458

S
sneaxiy 已提交
459 460
    Examples:
        .. code-block:: python
F
fengjiayi 已提交
461

S
sneaxiy 已提交
462
          out = fluid.layers.argmin(x=in, axis=0)
463
          out = fluid.layers.argmin(x=in, axis=-1)
S
sneaxiy 已提交
464 465
    """
    helper = LayerHelper("arg_min", **locals())
X
Xin Pan 已提交
466
    out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
S
sneaxiy 已提交
467 468 469 470 471 472 473 474 475 476 477 478
    helper.append_op(
        type='arg_min',
        inputs={'X': x},
        outputs={'Out': [out]},
        attrs={'axis': axis})
    return out


def argmax(x, axis=0):
    """
    **argmax**

479
    This function computes the indices of the max elements
S
sneaxiy 已提交
480 481 482 483 484 485
    of the input tensor's element along the provided axis.

    Args:
        x(Variable): The input to compute the indices of
                     the max elements.
        axis(int): Axis to compute indices along.
F
fengjiayi 已提交
486

S
sneaxiy 已提交
487 488
    Returns:
        Variable: The tensor variable storing the output
F
fengjiayi 已提交
489

S
sneaxiy 已提交
490 491
    Examples:
        .. code-block:: python
F
fengjiayi 已提交
492

S
sneaxiy 已提交
493
          out = fluid.layers.argmax(x=in, axis=0)
494
          out = fluid.layers.argmax(x=in, axis=-1)
S
sneaxiy 已提交
495 496
    """
    helper = LayerHelper("arg_max", **locals())
X
Xin Pan 已提交
497
    out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
S
sneaxiy 已提交
498 499 500 501 502 503 504 505
    helper.append_op(
        type='arg_max',
        inputs={'X': x},
        outputs={'Out': [out]},
        attrs={'axis': axis})
    return out


506
def argsort(input, axis=-1, name=None):
Y
Yibing Liu 已提交
507
    """
M
minqiyang 已提交
508 509
    Performs sorting on the input Variable along the given axis, and outputs
    sorted data Varibale and its corresponding index Variable with the same
Y
Yibing Liu 已提交
510 511 512
    shape as :attr:`input`.

    .. code-block:: text
M
minqiyang 已提交
513

Y
Yibing Liu 已提交
514 515 516 517 518 519 520 521 522 523 524 525
        For example, the given axis is -1 and the input Variable

            input = [[0.15849551, 0.45865775, 0.8563702 ],
                     [0.12070083, 0.28766365, 0.18776911]],

        after argsort, the sorted Vairable becomes

            out = [[0.15849551, 0.45865775, 0.8563702 ],
                   [0.12070083, 0.18776911, 0.28766365]],

        and the sorted indices along the given axis turn outs to be

M
minqiyang 已提交
526
            indices = [[0, 1, 2],
Y
Yibing Liu 已提交
527 528 529 530
                       [0, 2, 1]]

    Args:
        input(Variable): The input Variable for sorting.
M
minqiyang 已提交
531 532
        axis(int): The axis along which to sort the input Variable. When
                   :attr:`axis` < 0, the actual axis will be :attr:`axis` +
Y
Yibing Liu 已提交
533
                   rank(:attr:`input`). Default -1, the last dimension.
M
minqiyang 已提交
534
        name(str|None): (optional) A name for this layer. If set None, the
535
                   layer will be named automatically.
Y
Yibing Liu 已提交
536 537 538 539 540 541 542 543 544 545 546

    Returns:
        tuple: A tuple of sorted data Variable and the sorted indices.

    Examples:
        .. code-block:: python

            input = fluid.layers.data(data=[2, 3])
            out, indices = fluid.layers.argsort(input, axis=0)
    """
    helper = LayerHelper("argsort", **locals())
X
Xin Pan 已提交
547 548 549 550
    out = helper.create_variable_for_type_inference(
        dtype=input.dtype, stop_gradient=True)
    ids = helper.create_variable_for_type_inference(
        VarDesc.VarType.INT64, stop_gradient=True)
Y
Yibing Liu 已提交
551 552 553 554
    helper.append_op(
        type='argsort',
        inputs={'X': input},
        outputs={'Out': out,
555 556
                 'Indices': ids},
        attrs={'axis': axis})
Y
Yibing Liu 已提交
557 558 559
    return out, ids


Y
Yang Yu 已提交
560
def ones(shape, dtype, force_cpu=False):
Y
Yu Yang 已提交
561
    """
562 563 564 565 566 567 568 569
    **ones**

    This function creates a tensor of specified *shape* and
    *dtype*, and initializes this with 1.

    It also sets *stop_gradient* to True.

    Args:
C
chengduozh 已提交
570
        shape(tuple|list): Shape of output tensor
571
        dtype(np.dtype|core.VarDesc.VarType|str): Data type of output tensor
572 573 574 575 576 577 578 579

    Returns:
        Variable: The tensor variable storing the output

    Examples:
        .. code-block:: python

          data = fluid.layers.ones(shape=[1], dtype='int64')
Y
Yu Yang 已提交
580
    """
C
chengduozh 已提交
581 582 583 584
    assert isinstance(shape, list) or isinstance(
        shape, tuple), "The shape's type should be list or tuple."
    assert reduce(lambda x, y: x * y,
                  shape) > 0, "The shape is invalid: %s." % (str(shape))
Y
Yu Yang 已提交
585 586 587
    return fill_constant(value=1.0, **locals())


Y
Yang Yu 已提交
588
def zeros(shape, dtype, force_cpu=False):
Y
Yu Yang 已提交
589
    """
590 591 592 593 594 595 596 597
    **zeros**

    This function creates a tensor of specified *shape* and
    *dtype*, and initializes this with 0.

    It also sets *stop_gradient* to True.

    Args:
W
wanghaoshuang 已提交
598 599 600
        shape(tuple|list|None): Shape of output tensor.
        dtype(np.dtype|core.VarDesc.VarType|str): Data type of output tensor.
        force_cpu(bool, default False): Whether to make output stay on CPU.
601 602

    Returns:
W
wanghaoshuang 已提交
603
        Variable: The tensor variable storing the output.
604 605 606 607 608

    Examples:
        .. code-block:: python

          data = fluid.layers.zeros(shape=[1], dtype='int64')
Y
Yu Yang 已提交
609 610
    """
    return fill_constant(value=0.0, **locals())
611 612


F
fengjiayi 已提交
613 614 615 616 617 618 619 620
def reverse(x, axis):
    """
    **reverse**

    This function reverse the input 'x' along given axises.

    Args:
        x(Vairbale): the input to be reversed.
621 622 623
        axis(int|tuple|list): Axis that along which order of elements
                    is reversed. If it is a tuple or a list, reversing
                    will be apply on each axis in the tuple or list.
F
fengjiayi 已提交
624 625 626 627 628 629 630 631 632 633 634 635 636 637

    Returns:
        Variable: The reversed tensor.

    Examples:
        .. code-block:: python

          out = fluid.layers.reverse(x=in, axis=0)
          # or:
          out = fluid.layers.reverse(x=in, axis=[0,1])
    """
    if isinstance(axis, int):
        axis = [axis]
    helper = LayerHelper("reverse", **locals())
X
Xin Pan 已提交
638
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
F
fengjiayi 已提交
639 640
    helper.append_op(
        type='reverse',
W
Wu Yi 已提交
641
        inputs={'X': x},
F
fengjiayi 已提交
642 643 644 645 646
        outputs={'Out': [out]},
        attrs={'axis': axis})
    return out


647 648 649 650 651 652 653
def save(x, file_path, overwrite=True):
    """
    Saves a variable as a file.

    Args:
        x(variable): The Tensor/LoDTensor to be saved.
        file_path(str): The file path where the variable will be saved.
654 655 656
        overwrite(bool): Whether or not cover the given file when it has already
            existed. If it's set 'False' and the file is existed, a runtime
            error will be thrown.
657 658 659 660 661 662 663 664 665 666 667 668 669 670 671
    """
    helper = LayerHelper("save", **locals())
    helper.append_op(
        type="save",
        inputs={"input": x},
        outputs={},
        args={"file_path": file_path,
              "overwrite": overwrite})


def save_combine(x, file_path, overwrite=True):
    """
    Saves a list of variables into a single file.

    Args:
672 673
        x(list): A list of Tensor/LoDTensor variables to be saved together in
                 a single file.
674
        file_path(str): The file path where variables will be saved.
675
        overwrite(bool): Whether or not cover the given file when it has already
676 677
            existed. If it's set 'False' and the file is existed, a runtime
            error will be thrown.
678 679 680 681 682 683 684 685 686 687 688 689 690 691 692

    Returns:
        There is no return value.

    Examples:

        .. code-block:: python

            v1 = fluid.layers.data(name="data",
                                   shape=(4, 6),
                                   dtype="float32")
            v2 = fluid.layers.data(name="data",
                                   shape=(6, 8, 4),
                                   dtype="float32")
            normed = fluid.layers.save_combine([v1, v2], file_path="output")
693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716
    """
    helper = LayerHelper("save_combine", **locals())
    helper.append_op(
        type="save_combine",
        inputs={"input": x},
        outputs={},
        args={"file_path": file_path,
              "overwrite": overwrite})


def load_combine(out, file_path):
    """
    Loads a list of vairables from a single file.

    Args:
        out(list): The list of variables to be read from the disk file.
        file_path(str): The path of the disk file.
    """
    helper = LayerHelper("load_combine", **locals())
    helper.append_op(
        type="load_combine",
        inputs={},
        output={"Out": out},
        args={"file_path": file_path})
717 718 719 720 721 722 723 724 725 726 727 728 729


def has_inf(x):
    """
    Test if any of x contains an infinity number

    Args:
       x(variable): The Tensor/LoDTensor to be checked.

    Returns:
        Variable: The tensor variable storing the output, only a bool value.
    """
    helper = LayerHelper("isinf", **locals())
X
Xin Pan 已提交
730
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
731 732 733 734 735 736 737 738 739 740 741 742 743 744 745
    helper.append_op(type="isinf", inputs={"X": x}, outputs={"Out": out})
    return out


def has_nan(x):
    """
    Test if any of x contains a NAN

    Args:
       x(variable): The Tensor/LoDTensor to be checked.

    Returns:
        Variable: The tensor variable storing the output, only a bool value.
    """
    helper = LayerHelper("isnan", **locals())
X
Xin Pan 已提交
746
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762
    helper.append_op(type="isnan", inputs={"X": x}, outputs={"Out": out})
    return out


def isfinite(x):
    """
    Test if any of x contains an infinity/NAN number. If all the elements are finite,
    returns true, else false.

    Args:
       x(variable): The Tensor/LoDTensor to be checked.

    Returns:
        Variable: The tensor variable storing the output, contains a bool value.
    """
    helper = LayerHelper("isfinite", **locals())
X
Xin Pan 已提交
763
    out = helper.create_variable_for_type_inference(dtype=x.dtype)
764 765
    helper.append_op(type="isfinite", inputs={"X": x}, outputs={"Out": out})
    return out