未验证 提交 7836f477 编写于 作者: H Huihuang Zheng 提交者: GitHub

Update API of paddle.fluid.data (#20024)

Set output type LoDTensor only

After code experiment, I found data doens't support other type
上级 afc40a59
......@@ -556,7 +556,7 @@ paddle.fluid.contrib.BasicLSTMUnit.sublayers (ArgSpec(args=['self', 'include_sub
paddle.fluid.contrib.BasicLSTMUnit.train (ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.contrib.basic_lstm (ArgSpec(args=['input', 'init_hidden', 'init_cell', 'hidden_size', 'num_layers', 'sequence_length', 'dropout_prob', 'bidirectional', 'batch_first', 'param_attr', 'bias_attr', 'gate_activation', 'activation', 'forget_bias', 'dtype', 'name'], varargs=None, keywords=None, defaults=(1, None, 0.0, False, True, None, None, None, None, 1.0, 'float32', 'basic_lstm')), ('document', 'fe4d0c3c55a162b8cfe10b05fabb7ce4'))
paddle.fluid.contrib.ctr_metric_bundle (ArgSpec(args=['input', 'label'], varargs=None, keywords=None, defaults=None), ('document', 'b68d12366896c41065fc3738393da2aa'))
paddle.fluid.data (ArgSpec(args=['name', 'shape', 'dtype', 'type'], varargs=None, keywords=None, defaults=('float32', VarType.LOD_TENSOR)), ('document', '4e96c3d52ab30b07157f7588ba61d3d1'))
paddle.fluid.data (ArgSpec(args=['name', 'shape', 'dtype'], varargs=None, keywords=None, defaults=('float32',)), ('document', '4b62e32530615a11b00e2ee52a53488a'))
paddle.fluid.dygraph.Layer ('paddle.fluid.dygraph.layers.Layer', ('document', 'a889d5affd734ede273e94d4257163ab'))
paddle.fluid.dygraph.Layer.__init__ (ArgSpec(args=['self', 'name_scope', 'dtype'], varargs=None, keywords=None, defaults=(VarType.FP32,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.dygraph.Layer.add_parameter (ArgSpec(args=['self', 'name', 'parameter'], varargs=None, keywords=None, defaults=None), ('document', 'f35ab374c7d5165c3daf3bd64a5a2ec1'))
......
......@@ -20,28 +20,30 @@ from .layer_helper import LayerHelper
__all__ = ['data']
def data(name, shape, dtype='float32', type=core.VarDesc.VarType.LOD_TENSOR):
def data(name, shape, dtype='float32'):
"""
**Data Layer**
This function creates a variable on the global scope. The global variables
can be accessed by all the following operators in the graph.
This function creates a variable on the global block. The global variable
can be accessed by all the following operators in the graph. The variable
is a placeholder that could be feeded with input, such as Executor can feed
input into the variable.
Note:
`paddle.fluid.layers.data` is deprecated. It will be removed in a future
version. Please use this `paddle.fluid.data`.
The `paddle.fluid.layers.data` set shape at compile time but does NOT
check the shape of feeded data, this `paddle.fluid.data` checks the
shape of data feeded by Executor/ParallelExecutor during run time.
The `paddle.fluid.layers.data` set shape and dtype at compile time but
does NOT check the shape or the dtype of feeded data, this
`paddle.fluid.data` checks the shape and the dtype of data feeded by
Executor or ParallelExecutor during run time.
Args:
name (str): The name/alias of the variable
name (str): The name/alias of the variable, see :ref:`api_guide_Name`
for more details.
shape (list|tuple): List|Tuple of integers declaring the shape.
dtype (np.dtype|VarType|str): The type of the data. Supported dtype:
float16, float32, float64, int8, int16, int32, int64, uint8, bool.
type (VarType): The output type. Supported type: VarType.LOD_TENSOR,
VarType.SELECTED_ROWS, VarType.NCCL_ID. Default: VarType.LOD_TENSOR.
bool, float16, float32, float64, int8, int16, int32, int64, uint8.
Returns:
Variable: The global variable that gives access to the data.
......@@ -50,15 +52,33 @@ def data(name, shape, dtype='float32', type=core.VarDesc.VarType.LOD_TENSOR):
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
# Creates a variable with fixed size [1, 2, 3]
# Creates a variable with fixed size [3, 2, 1]
# User can only feed data of the same shape to x
x = fluid.data(name='x', shape=[1, 2, 3], dtype='int64')
x = fluid.data(name='x', shape=[3, 2, 1], dtype='float32')
# Creates a variable with changable batch size -1.
# Users can feed data of any batch size into y,
# but size of each data sample has to be [3, 224, 224]
y = fluid.data(name='y', shape=[-1, 3, 224, 224], dtype='float32')
# but size of each data sample has to be [2, 1]
y = fluid.data(name='y', shape=[-1, 2, 1], dtype='float32')
z = x + y
# In this example, we will feed x and y with np-ndarry "1"
# and fetch z, like implementing "1 + 1 = 2" in PaddlePaddle
feed_data = np.ones(shape=[3, 2, 1], dtype=np.float32)
exe = fluid.Executor(fluid.CPUPlace())
out = exe.run(fluid.default_main_program(),
feed={
'x': feed_data,
'y': feed_data
},
fetch_list=[z.name])
# np-ndarray of shape=[3, 2, 1], dtype=float32, whose elements are 2
print(out)
"""
helper = LayerHelper('data', **locals())
......@@ -66,7 +86,7 @@ def data(name, shape, dtype='float32', type=core.VarDesc.VarType.LOD_TENSOR):
name=name,
shape=shape,
dtype=dtype,
type=type,
type=core.VarDesc.VarType.LOD_TENSOR,
stop_gradient=True,
lod_level=0,
is_data=True,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册