未验证 提交 885171e3 编写于 作者: Y Yilingyelu 提交者: GitHub

Fix paddle.t doc en and the annotation display on 4 en docs (#41699)

* gradients; test=document_fix

* fix VarType; test=document_fix

* fix vartype; test=document_fix

* cumsum; test=document_fix

* t; test=document_fix
上级 65a5492a
...@@ -2054,7 +2054,7 @@ def gradients(targets, inputs, target_gradients=None, no_grad_set=None): ...@@ -2054,7 +2054,7 @@ def gradients(targets, inputs, target_gradients=None, no_grad_set=None):
y = paddle.static.nn.conv2d(x, 4, 1, bias_attr=False) y = paddle.static.nn.conv2d(x, 4, 1, bias_attr=False)
y = F.relu(y) y = F.relu(y)
z = paddle.static.gradients([y], x) z = paddle.static.gradients([y], x)
print(z) # [var x@GRAD : fluid.VarType.LOD_TENSOR.shape(-1L, 2L, 8L, 8L).astype(VarType.FP32)] print(z) # [var x@GRAD : LOD_TENSOR.shape(-1, 2, 8, 8).dtype(float32).stop_gradient(False)]
""" """
check_type(targets, 'targets', (framework.Variable, list, tuple), check_type(targets, 'targets', (framework.Variable, list, tuple),
'paddle.static.gradients') 'paddle.static.gradients')
......
...@@ -6085,8 +6085,8 @@ class Program(object): ...@@ -6085,8 +6085,8 @@ class Program(object):
for var in prog.list_vars(): for var in prog.list_vars():
print(var) print(var)
# var img : paddle.VarType.LOD_TENSOR.shape(-1, 1, 28, 28).astype(VarType.FP32) # var img : LOD_TENSOR.shape(-1, 1, 28, 28).dtype(float32).stop_gradient(True)
# var label : paddle.VarType.LOD_TENSOR.shape(-1, 1).astype(VarType.INT64) # var label : LOD_TENSOR.shape(-1, 1).dtype(int64).stop_gradient(True)
""" """
for each_block in self.blocks: for each_block in self.blocks:
for each_var in list(each_block.vars.values()): for each_var in list(each_block.vars.values()):
...@@ -6119,8 +6119,8 @@ class Program(object): ...@@ -6119,8 +6119,8 @@ class Program(object):
# Here will print all parameters in current program, in this example, # Here will print all parameters in current program, in this example,
# the result is like: # the result is like:
# #
# persist trainable param fc_0.w_0 : paddle.VarType.LOD_TENSOR.shape(13, 10).astype(VarType.FP32) # persist trainable param fc_0.w_0 : LOD_TENSOR.shape(13, 10).dtype(float32).stop_gradient(False)
# persist trainable param fc_0.b_0 : paddle.VarType.LOD_TENSOR.shape(10,).astype(VarType.FP32) # persist trainable param fc_0.b_0 : LOD_TENSOR.shape(10,).dtype(float32).stop_gradient(False)
# #
# Here print(param) will print out all the properties of a parameter, # Here print(param) will print out all the properties of a parameter,
# including name, type and persistable, you can access to specific # including name, type and persistable, you can access to specific
......
...@@ -147,8 +147,8 @@ class InputSpec(object): ...@@ -147,8 +147,8 @@ class InputSpec(object):
input = InputSpec([None, 784], 'float32', 'x') input = InputSpec([None, 784], 'float32', 'x')
label = InputSpec([None, 1], 'int64', 'label') label = InputSpec([None, 1], 'int64', 'label')
print(input) # InputSpec(shape=(-1, 784), dtype=VarType.FP32, name=x) print(input) # InputSpec(shape=(-1, 784), dtype=paddle.float32, name=x)
print(label) # InputSpec(shape=(-1, 1), dtype=VarType.INT64, name=label) print(label) # InputSpec(shape=(-1, 1), dtype=paddle.int64, name=label)
""" """
def __init__(self, shape, dtype='float32', name=None): def __init__(self, shape, dtype='float32', name=None):
...@@ -190,7 +190,7 @@ class InputSpec(object): ...@@ -190,7 +190,7 @@ class InputSpec(object):
x = paddle.to_tensor(np.ones([2, 2], np.float32)) x = paddle.to_tensor(np.ones([2, 2], np.float32))
x_spec = InputSpec.from_tensor(x, name='x') x_spec = InputSpec.from_tensor(x, name='x')
print(x_spec) # InputSpec(shape=(2, 2), dtype=VarType.FP32, name=x) print(x_spec) # InputSpec(shape=(2, 2), dtype=paddle.float32, name=x)
""" """
if isinstance(tensor, (Variable, core.VarBase, core.eager.Tensor)): if isinstance(tensor, (Variable, core.VarBase, core.eager.Tensor)):
...@@ -219,7 +219,7 @@ class InputSpec(object): ...@@ -219,7 +219,7 @@ class InputSpec(object):
x = np.ones([2, 2], np.float32) x = np.ones([2, 2], np.float32)
x_spec = InputSpec.from_numpy(x, name='x') x_spec = InputSpec.from_numpy(x, name='x')
print(x_spec) # InputSpec(shape=(2, 2), dtype=VarType.FP32, name=x) print(x_spec) # InputSpec(shape=(2, 2), dtype=paddle.float32, name=x)
""" """
return cls(ndarray.shape, ndarray.dtype, name) return cls(ndarray.shape, ndarray.dtype, name)
...@@ -241,7 +241,7 @@ class InputSpec(object): ...@@ -241,7 +241,7 @@ class InputSpec(object):
x_spec = InputSpec(shape=[64], dtype='float32', name='x') x_spec = InputSpec(shape=[64], dtype='float32', name='x')
x_spec.batch(4) x_spec.batch(4)
print(x_spec) # InputSpec(shape=(4, 64), dtype=VarType.FP32, name=x) print(x_spec) # InputSpec(shape=(4, 64), dtype=paddle.float32, name=x)
""" """
if isinstance(batch_size, (list, tuple)): if isinstance(batch_size, (list, tuple)):
...@@ -273,7 +273,7 @@ class InputSpec(object): ...@@ -273,7 +273,7 @@ class InputSpec(object):
x_spec = InputSpec(shape=[4, 64], dtype='float32', name='x') x_spec = InputSpec(shape=[4, 64], dtype='float32', name='x')
x_spec.unbatch() x_spec.unbatch()
print(x_spec) # InputSpec(shape=(64,), dtype=VarType.FP32, name=x) print(x_spec) # InputSpec(shape=(64,), dtype=paddle.float32, name=x)
""" """
if len(self.shape) == 0: if len(self.shape) == 0:
......
...@@ -1166,40 +1166,37 @@ def t(input, name=None): ...@@ -1166,40 +1166,37 @@ def t(input, name=None):
the paddle.transpose function which perm dimensions set 0 and 1. the paddle.transpose function which perm dimensions set 0 and 1.
Args: Args:
input (Tensor): The input Tensor. It is a N-D (N<=2) Tensor of data types float16, float32, float64, int32. input (Tensor): The input Tensor. It is a N-D (N<=2) Tensor of data types float32, float64, int32, int64.
name(str, optional): The default value is None. Normally there is no need for name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name` user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns: Returns:
Tensor: A transposed n-D Tensor, with data type being float16, float32, float64, int32, int64. Tensor: A transposed n-D Tensor, with data type being float16, float32, float64, int32, int64.
For Example: Examples:
.. code-block:: text .. code-block:: python
:name: code-example
import paddle
# Example 1 (0-D tensor) # Example 1 (0-D tensor)
x = tensor([0.79]) x = paddle.to_tensor([0.79])
paddle.t(x) = tensor([0.79]) paddle.t(x) # [0.79]
# Example 2 (1-D tensor) # Example 2 (1-D tensor)
x = tensor([0.79, 0.84, 0.32]) x = paddle.to_tensor([0.79, 0.84, 0.32])
paddle.t(x) = tensor([0.79, 0.84, 0.32]) paddle.t(x) # [0.79000002, 0.83999997, 0.31999999]
paddle.t(x).shape # [3]
# Example 3 (2-D tensor) # Example 3 (2-D tensor)
x = tensor([0.79, 0.84, 0.32], x = paddle.to_tensor([[0.79, 0.84, 0.32],
[0.64, 0.14, 0.57]) [0.64, 0.14, 0.57]])
paddle.t(x) = tensor([0.79, 0.64], x.shape # [2, 3]
[0.84, 0.14], paddle.t(x)
[0.32, 0.57]) # [[0.79000002, 0.63999999],
# [0.83999997, 0.14000000],
# [0.31999999, 0.56999999]]
paddle.t(x).shape # [3, 2]
Examples:
.. code-block:: python
import paddle
x = paddle.ones(shape=[2, 3], dtype='int32')
x_transposed = paddle.t(x)
print(x_transposed.shape)
# [3, 2]
""" """
if len(input.shape) > 2: if len(input.shape) > 2:
raise ValueError( raise ValueError(
......
...@@ -2967,7 +2967,7 @@ def cumsum(x, axis=None, dtype=None, name=None): ...@@ -2967,7 +2967,7 @@ def cumsum(x, axis=None, dtype=None, name=None):
y = paddle.cumsum(data, dtype='float64') y = paddle.cumsum(data, dtype='float64')
print(y.dtype) print(y.dtype)
# VarType.FP64 # paddle.float64
""" """
if axis is None: if axis is None:
flatten = True flatten = True
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册