tensor.py 16.3 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
Y
yuyang18 已提交
9
# Unlessf required by applicable law or agreed to in writing, software
D
dzhwinter 已提交
10 11 12 13 14
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Y
Yu Yang 已提交
15
from ..layer_helper import LayerHelper
16
from ..param_attr import ParamAttr
X
xuwei06 已提交
17 18
from ..framework import convert_np_dtype_to_dtype_
from ..framework import Variable
19
from ..initializer import Constant, force_init_on_cpu
20
from ..core import VarDesc
Y
yuyang18 已提交
21
from layer_function_generator import templatedoc
X
xuwei06 已提交
22
import numpy
Y
Yu Yang 已提交
23 24

__all__ = [
25 26
    'create_tensor',
    'create_parameter',
Q
Qiao Longfei 已提交
27
    'create_global_var',
28 29 30 31 32 33
    'cast',
    'concat',
    'sums',
    'assign',
    'fill_constant_batch_size_like',
    'fill_constant',
S
sneaxiy 已提交
34 35
    'argmin',
    'argmax',
36 37
    'ones',
    'zeros',
Y
Yu Yang 已提交
38 39 40
]


X
xuwei06 已提交
41
def create_tensor(dtype, name=None, persistable=False):
42
    """
Q
update  
qiaolongfei 已提交
43
    Create an variable, which will hold a LoDTensor with data type dtype.
44 45

    Args:
Q
update  
qiaolongfei 已提交
46
        dtype(string): 'float32'|'int32'|..., the data type of the
47
            created tensor.
Q
update  
qiaolongfei 已提交
48
        name(string): The name of the created tensor, if not set,
49
            the name will be a random unique one.
Q
update  
qiaolongfei 已提交
50
        persistable(bool): Set the persistable flag of the create tensor.
51 52 53 54 55 56 57 58 59

    Returns:
        Variable: The tensor variable storing the created tensor.

    Examples:
        .. code-block:: python

          tensor = fluid.layers.create_tensor(dtype='float32')
    """
Y
Yu Yang 已提交
60
    helper = LayerHelper("create_tensor", **locals())
X
xuwei06 已提交
61 62
    return helper.create_variable(
        name=helper.name, dtype=dtype, persistable=persistable)
Y
Yu Yang 已提交
63 64


65 66
def create_parameter(shape,
                     dtype,
X
xuwei06 已提交
67
                     name=None,
68 69 70 71
                     attr=None,
                     is_bias=False,
                     default_initializer=None):
    """
Y
yuyang18 已提交
72 73 74 75 76 77
    Create a parameter. The parameter is a learnable variable, which can have
    gradient, and can be optimized.

    NOTE: this is a very low-level API. This API is useful when you create
    operator by your self. instead of using layers.

78 79 80 81 82 83 84 85 86 87 88
    Args:
        shape(list[int]): shape of the parameter
        dtype(string): element type of the parameter
        attr(ParamAttr): attributes of the parameter
        is_bias(bool): This can affect which default initializer is chosen
                       when default_initializer is None. If is_bias,
                       initializer.Constant(0.0) will be used. Otherwise,
                       Xavier() will be used.
        default_initializer(Initializer): initializer for the parameter

    Returns:
Y
yuyang18 已提交
89 90 91 92 93 94
        the created parameter.

    Examples:
        >>> W = fluid.layers.create_parameter(shape=[784, 200], dtype='float32')
        >>> data = fluid.layers.data(name="img", shape=[64, 784], append_batch_size=False)
        >>> hidden = fluid.layers.matmul(x=data, y=W)
95
    """
Q
Qiao Longfei 已提交
96
    helper = LayerHelper("create_parameter", **locals())
97
    if attr is None:
X
xuwei06 已提交
98
        attr = ParamAttr(name=name)
99 100 101 102
    return helper.create_parameter(attr, shape, dtype, is_bias,
                                   default_initializer)


103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
def create_global_var(shape,
                      value,
                      dtype,
                      persistable=False,
                      force_cpu=False,
                      name=None):
    """
    Create a global variable. such as global_step
    Args:
        shape(list[int]): shape of the variable
        value(float): the value of the variable
        dtype(string): element type of the parameter
        persistable(bool): if this variable is persistable
        force_cpu(bool): force this variable to be on CPU

    Returns:
        Variable: the created Variable
    """
Q
Qiao Longfei 已提交
121 122 123 124
    helper = LayerHelper("global_var", **locals())
    var = helper.create_global_variable(
        dtype=dtype, shape=shape, persistable=persistable, name=name)
    helper.set_variable_initializer(
125 126
        var, initializer=Constant(
            value=float(value), force_cpu=force_cpu))
Q
Qiao Longfei 已提交
127 128 129
    return var


130
def cast(x, dtype):
Y
Yu Yang 已提交
131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
    """
    This function takes in the input with input_dtype
    and casts it to the output_dtype as the output.
    """
    helper = LayerHelper('cast', **locals())
    out = helper.create_tmp_variable(dtype=dtype)
    helper.append_op(
        type='cast',
        inputs={'X': [x]},
        outputs={'Out': [out]},
        attrs={'in_dtype': x.dtype,
               'out_dtype': out.dtype})
    return out


146
def concat(input, axis=0, name=None):
Y
Yu Yang 已提交
147
    """
148 149 150
    **Concat**

    This function concatenates the input along the axis mentioned
Y
Yu Yang 已提交
151
    and returns that as the output.
152 153 154 155

    Args:
        input(list): List of tensors to be concatenated
        axis(int): Integer axis along which the tensors will be concatenated
156 157
        name(str|None): A name for this layer(optional). If set None, the layer
                       will be named automatically.
158 159 160 161 162 163 164

    Returns:
        Variable: Output variable of the concatenation

    Examples:
        .. code-block:: python
          out = fluid.layers.concat(input=[Efirst, Esecond, Ethird, Efourth])
Y
Yu Yang 已提交
165 166 167 168 169 170 171 172 173 174 175
    """
    helper = LayerHelper('concat', **locals())
    out = helper.create_tmp_variable(dtype=helper.input_dtype())
    helper.append_op(
        type='concat',
        inputs={'X': input},
        outputs={'Out': [out]},
        attrs={'axis': axis})
    return out


176
def sums(input, out=None):
K
kavyasrinet 已提交
177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
    """This function performs the sum operation on the input and returns the
    result as the output.

    Args:
        input (Variable|list): The input tensor that has the elements
                               that need to be summed up.

    Returns:
        Variable: The tensor type variable that has the sum of input
                  written to it.

    Examples:
        .. code-block::python

          tmp = fluid.layers.zeros(shape=[10], dtype='int32')
          i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10)
          a0 = layers.array_read(array=tmp, i=i)
          i = layers.increment(x=i)
          a1 = layers.array_read(array=tmp, i=i)
Y
Yu Yang 已提交
196 197
          mean_a0 = layers.mean(a0)
          mean_a1 = layers.mean(a1)
K
kavyasrinet 已提交
198
          a_sum = layers.sums(input=[mean_a0, mean_a1])
Y
Yu Yang 已提交
199 200 201 202 203 204 205 206
    """
    helper = LayerHelper('sum', **locals())
    if out is None:
        out = helper.create_tmp_variable(dtype=helper.input_dtype())
    helper.append_op(type='sum', inputs={'X': input}, outputs={'Out': out})
    return out


207
def assign(input, output):
208 209 210 211 212 213
    """
    **Assign**

    This function copies the *input* Variable to the *output* Variable.

    Args:
X
xuwei06 已提交
214
        input(Variable|numpy.ndarray): The source variable
215 216 217 218 219 220 221 222 223 224 225
        output(Variable): The destination variable

    Returns:
        Variable: The destination variable that was supplied as the *output*.

    Examples:
        .. code-block:: python
          out = fluid.layers.create_tensor(dtype='float32')
          hidden = fluid.layers.fc(input=data, size=10)
          fluid.layers.assign(hidden, out)
    """
Y
Yu Yang 已提交
226
    helper = LayerHelper('assign', **locals())
X
xuwei06 已提交
227 228
    if isinstance(input, Variable):
        helper.append_op(
R
robot 已提交
229
            type='assign', inputs={'X': [input]}, outputs={'Out': [output]})
X
xuwei06 已提交
230 231
    elif isinstance(input, numpy.ndarray):
        dtype = convert_np_dtype_to_dtype_(input.dtype)
232
        if dtype == VarDesc.VarType.FP32:
X
xuwei06 已提交
233
            value_name = "fp32_values"
234
            values = [float(v) for v in input.flat]
235
        elif dtype == VarDesc.VarType.INT32:
X
xuwei06 已提交
236
            value_name = "int32_values"
237
            values = [int(v) for v in input.flat]
X
xuwei06 已提交
238 239
        else:
            raise ValueError("Unsupported dtype %s", input.dtype)
240 241 242
        if input.size > 1024 * 1024:
            raise ValueError("The size of input is too big. Please consider "
                             "saving it to file and 'load_op' to load it")
X
xuwei06 已提交
243 244 245 246 247 248 249

        helper.append_op(
            type='assign_value',
            outputs={'Out': [output]},
            attrs={
                'dtype': dtype,
                'shape': list(input.shape),
250
                value_name: values
X
xuwei06 已提交
251 252 253 254
            })
    else:
        raise ValueError("Wrong type for assign input: %s" % type(input))

Y
Yu Yang 已提交
255 256 257
    return output


Q
QI JUN 已提交
258
def fill_constant(shape, dtype, value, force_cpu=False, out=None):
Y
Yu Yang 已提交
259
    """
260 261
    **fill_constant**

262 263
    This function creates a tensor with specified `shape` and `dtype`, and
    initializes it with a constant specifed by `value`.
K
kavyasrinet 已提交
264

265
    The attribute `stop_gradient` of the created tensor is set to True.
266 267

    Args:
268
        shape(tuple|list|None): Shape of the output tensor.
269
        dtype(np.dtype|core.VarDesc.VarType|str): Data type of the output tensor.
270 271
        value(float): The constant value used to initialize the output tensor.
        out(Variable): The output tensor.
272
        force_cpu(True|False): data should be on CPU if set true.
273 274

    Returns:
275
        Variable: The tensor variable storing the output.
276 277 278 279 280

    Examples:
        .. code-block:: python

          data = fluid.layers.fill_constant(shape=[1], value=0, dtype='int64')
Y
Yu Yang 已提交
281
    """
282

Y
Yu Yang 已提交
283 284 285 286 287 288 289
    helper = LayerHelper("fill_constant", **locals())
    if out is None:
        out = helper.create_tmp_variable(dtype=dtype)
    helper.append_op(
        type='fill_constant',
        inputs={},
        outputs={'Out': [out]},
Q
QI JUN 已提交
290 291 292 293
        attrs={
            'shape': shape,
            'dtype': out.dtype,
            'value': float(value),
294
            'force_cpu': force_cpu or force_init_on_cpu()
Q
QI JUN 已提交
295
        })
Y
Yu Yang 已提交
296 297 298 299
    out.stop_gradient = True
    return out


Y
yuyang18 已提交
300
@templatedoc()
Y
Yu Yang 已提交
301 302 303 304 305
def fill_constant_batch_size_like(input,
                                  shape,
                                  dtype,
                                  value,
                                  input_dim_idx=0,
306
                                  output_dim_idx=0):
307
    """
Y
yuyang18 已提交
308
    ${comment}
309 310 311

    It also sets *stop_gradient* to True.

Y
yuyang18 已提交
312 313 314
    >>> data = fluid.layers.fill_constant_batch_size_like(
    >>>             input=like, shape=[1], value=0, dtype='int64')

315
    Args:
Y
yuyang18 已提交
316
        input(${input_type}): ${input_comment}.
317

Y
yuyang18 已提交
318
        shape(${shape_type}): ${shape_comment}.
319

Y
yuyang18 已提交
320 321 322
        dtype(${dtype_type}): ${dtype_comment}.

        value(${value_type}): ${value_comment}.
323

Y
yuyang18 已提交
324 325 326 327 328
        input_dim_idx(${input_dim_idx_type}): ${input_dim_idx_comment}.

        output_dim_idx(${output_dim_idx_type}): ${output_dim_idx_comment}.

    Returns:
Y
yuyang18 已提交
329
        ${out_comment}.
330
    """
Y
Yu Yang 已提交
331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347
    helper = LayerHelper("fill_constant_batch_size_like", **locals())
    out = helper.create_tmp_variable(dtype=dtype)
    helper.append_op(
        type='fill_constant_batch_size_like',
        inputs={'Input': input},
        outputs={'Out': [out]},
        attrs={
            'shape': shape,
            'dtype': out.dtype,
            'value': float(value),
            'input_dim_idx': input_dim_idx,
            'output_dim_idx': output_dim_idx
        })
    out.stop_gradient = True
    return out


S
sneaxiy 已提交
348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
def argmin(x, axis=0):
    """
    **argmin**

    This function computes the indices of the min elements 
    of the input tensor's element along the provided axis.

    Args:
        x(Variable): The input to compute the indices of
                     the min elements.
        axis(int): Axis to compute indices along.
    
    Returns:
        Variable: The tensor variable storing the output
    
    Examples:
        .. code-block:: python
          
          out = fluid.layers.argmin(x=in, axis=0)
          out = fluid.layers.argmin(x=in, axis=-1)  
    """
    helper = LayerHelper("arg_min", **locals())
    out = helper.create_tmp_variable(VarDesc.VarType.INT64)
    helper.append_op(
        type='arg_min',
        inputs={'X': x},
        outputs={'Out': [out]},
        attrs={'axis': axis})
    return out


def argmax(x, axis=0):
    """
    **argmax**

    This function computes the indices of the max elements 
    of the input tensor's element along the provided axis.

    Args:
        x(Variable): The input to compute the indices of
                     the max elements.
        axis(int): Axis to compute indices along.
    
    Returns:
        Variable: The tensor variable storing the output
    
    Examples:
        .. code-block:: python
          
          out = fluid.layers.argmax(x=in, axis=0)
          out = fluid.layers.argmax(x=in, axis=-1)  
    """
    helper = LayerHelper("arg_max", **locals())
    out = helper.create_tmp_variable(VarDesc.VarType.INT64)
    helper.append_op(
        type='arg_max',
        inputs={'X': x},
        outputs={'Out': [out]},
        attrs={'axis': axis})
    return out


Y
Yang Yu 已提交
410
def ones(shape, dtype, force_cpu=False):
Y
Yu Yang 已提交
411
    """
412 413 414 415 416 417 418 419 420
    **ones**

    This function creates a tensor of specified *shape* and
    *dtype*, and initializes this with 1.

    It also sets *stop_gradient* to True.

    Args:
        shape(tuple|list|None): Shape of output tensor
421
        dtype(np.dtype|core.VarDesc.VarType|str): Data type of output tensor
422 423 424 425 426 427 428 429

    Returns:
        Variable: The tensor variable storing the output

    Examples:
        .. code-block:: python

          data = fluid.layers.ones(shape=[1], dtype='int64')
Y
Yu Yang 已提交
430 431 432 433
    """
    return fill_constant(value=1.0, **locals())


Y
Yang Yu 已提交
434
def zeros(shape, dtype, force_cpu=False):
Y
Yu Yang 已提交
435
    """
436 437 438 439 440 441 442 443 444
    **zeros**

    This function creates a tensor of specified *shape* and
    *dtype*, and initializes this with 0.

    It also sets *stop_gradient* to True.

    Args:
        shape(tuple|list|None): Shape of output tensor
445
        dtype(np.dtype|core.VarDesc.VarType|str): Data type of output tensor
446 447 448 449 450 451 452 453

    Returns:
        Variable: The tensor variable storing the output

    Examples:
        .. code-block:: python

          data = fluid.layers.zeros(shape=[1], dtype='int64')
Y
Yu Yang 已提交
454 455
    """
    return fill_constant(value=0.0, **locals())
456 457


F
fengjiayi 已提交
458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491
def reverse(x, axis):
    """
    **reverse**

    This function reverse the input 'x' along given axises.

    Args:
        x(Vairbale): the input to be reversed.
        axis(int|tuple|list): Axis that along which order of elements 
                    is reversed. If it is a tuple or a list, reversing 
                    will be apply on each axis in the tuple or list.  

    Returns:
        Variable: The reversed tensor.

    Examples:
        .. code-block:: python

          out = fluid.layers.reverse(x=in, axis=0)
          # or:
          out = fluid.layers.reverse(x=in, axis=[0,1])
    """
    if isinstance(axis, int):
        axis = [axis]
    helper = LayerHelper("reverse", **locals())
    out = helper.create_tmp_variable(dtype=x.dtype)
    helper.append_op(
        type='reverse',
        inputs={'Input': x},
        outputs={'Out': [out]},
        attrs={'axis': axis})
    return out


492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545
def save(x, file_path, overwrite=True):
    """
    Saves a variable as a file.

    Args:
        x(variable): The Tensor/LoDTensor to be saved.
        file_path(str): The file path where the variable will be saved.
        overwrite(bool): Whether or not cover the given file when it has already 
            existed. If it's set 'False' and the file is existed, a runtime 
            error will be thrown. 
    """
    helper = LayerHelper("save", **locals())
    helper.append_op(
        type="save",
        inputs={"input": x},
        outputs={},
        args={"file_path": file_path,
              "overwrite": overwrite})


def save_combine(x, file_path, overwrite=True):
    """
    Saves a list of variables into a single file.

    Args:
        x(list): A list of Tensor/LoDTensor to be saved together in a single file.
        file_path(str): The file path where variables will be saved.
        overwrite(bool): Whether or not cover the given file when it has already 
            existed. If it's set 'False' and the file is existed, a runtime 
            error will be thrown. 
    """
    helper = LayerHelper("save_combine", **locals())
    helper.append_op(
        type="save_combine",
        inputs={"input": x},
        outputs={},
        args={"file_path": file_path,
              "overwrite": overwrite})


def load_combine(out, file_path):
    """
    Loads a list of vairables from a single file.

    Args:
        out(list): The list of variables to be read from the disk file.
        file_path(str): The path of the disk file.
    """
    helper = LayerHelper("load_combine", **locals())
    helper.append_op(
        type="load_combine",
        inputs={},
        output={"Out": out},
        args={"file_path": file_path})