diff --git a/python/paddle/fluid/backward.py b/python/paddle/fluid/backward.py index ba7692b442f82c2ae6fefa159854e3b88907ceb8..adce8051959607f6968112639c857983e14f6916 100755 --- a/python/paddle/fluid/backward.py +++ b/python/paddle/fluid/backward.py @@ -2054,7 +2054,7 @@ def gradients(targets, inputs, target_gradients=None, no_grad_set=None): y = paddle.static.nn.conv2d(x, 4, 1, bias_attr=False) y = F.relu(y) z = paddle.static.gradients([y], x) - print(z) # [var x@GRAD : fluid.VarType.LOD_TENSOR.shape(-1L, 2L, 8L, 8L).astype(VarType.FP32)] + print(z) # [var x@GRAD : LOD_TENSOR.shape(-1, 2, 8, 8).dtype(float32).stop_gradient(False)] """ check_type(targets, 'targets', (framework.Variable, list, tuple), 'paddle.static.gradients') diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 5dab39a35d4787a091e0d144e3aa79e053e8f9f7..314a502a3cbef07768ddac68adbe346c26a41739 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -6085,8 +6085,8 @@ class Program(object): for var in prog.list_vars(): print(var) - # var img : paddle.VarType.LOD_TENSOR.shape(-1, 1, 28, 28).astype(VarType.FP32) - # var label : paddle.VarType.LOD_TENSOR.shape(-1, 1).astype(VarType.INT64) + # var img : LOD_TENSOR.shape(-1, 1, 28, 28).dtype(float32).stop_gradient(True) + # var label : LOD_TENSOR.shape(-1, 1).dtype(int64).stop_gradient(True) """ for each_block in self.blocks: for each_var in list(each_block.vars.values()): @@ -6119,8 +6119,8 @@ class Program(object): # Here will print all parameters in current program, in this example, # the result is like: # - # persist trainable param fc_0.w_0 : paddle.VarType.LOD_TENSOR.shape(13, 10).astype(VarType.FP32) - # persist trainable param fc_0.b_0 : paddle.VarType.LOD_TENSOR.shape(10,).astype(VarType.FP32) + # persist trainable param fc_0.w_0 : LOD_TENSOR.shape(13, 10).dtype(float32).stop_gradient(False) + # persist trainable param fc_0.b_0 : LOD_TENSOR.shape(10,).dtype(float32).stop_gradient(False) # # Here print(param) will print out all the properties of a parameter, # including name, type and persistable, you can access to specific diff --git a/python/paddle/static/input.py b/python/paddle/static/input.py index 7c0c71951aa1d7a566cabf73ecb9d26e03b8dab6..f58c06c9b51b6e5509533d73bff2f41f4a0cb731 100644 --- a/python/paddle/static/input.py +++ b/python/paddle/static/input.py @@ -147,8 +147,8 @@ class InputSpec(object): input = InputSpec([None, 784], 'float32', 'x') label = InputSpec([None, 1], 'int64', 'label') - print(input) # InputSpec(shape=(-1, 784), dtype=VarType.FP32, name=x) - print(label) # InputSpec(shape=(-1, 1), dtype=VarType.INT64, name=label) + print(input) # InputSpec(shape=(-1, 784), dtype=paddle.float32, name=x) + print(label) # InputSpec(shape=(-1, 1), dtype=paddle.int64, name=label) """ def __init__(self, shape, dtype='float32', name=None): @@ -190,7 +190,7 @@ class InputSpec(object): x = paddle.to_tensor(np.ones([2, 2], np.float32)) x_spec = InputSpec.from_tensor(x, name='x') - print(x_spec) # InputSpec(shape=(2, 2), dtype=VarType.FP32, name=x) + print(x_spec) # InputSpec(shape=(2, 2), dtype=paddle.float32, name=x) """ if isinstance(tensor, (Variable, core.VarBase, core.eager.Tensor)): @@ -219,7 +219,7 @@ class InputSpec(object): x = np.ones([2, 2], np.float32) x_spec = InputSpec.from_numpy(x, name='x') - print(x_spec) # InputSpec(shape=(2, 2), dtype=VarType.FP32, name=x) + print(x_spec) # InputSpec(shape=(2, 2), dtype=paddle.float32, name=x) """ return cls(ndarray.shape, ndarray.dtype, name) @@ -241,7 +241,7 @@ class InputSpec(object): x_spec = InputSpec(shape=[64], dtype='float32', name='x') x_spec.batch(4) - print(x_spec) # InputSpec(shape=(4, 64), dtype=VarType.FP32, name=x) + print(x_spec) # InputSpec(shape=(4, 64), dtype=paddle.float32, name=x) """ if isinstance(batch_size, (list, tuple)): @@ -273,7 +273,7 @@ class InputSpec(object): x_spec = InputSpec(shape=[4, 64], dtype='float32', name='x') x_spec.unbatch() - print(x_spec) # InputSpec(shape=(64,), dtype=VarType.FP32, name=x) + print(x_spec) # InputSpec(shape=(64,), dtype=paddle.float32, name=x) """ if len(self.shape) == 0: diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index 098f17e6759c223c0d03de88d6b164c305833b1b..6006a9dec0cbbf2719d2e203c52694d3f4fac196 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -1166,40 +1166,37 @@ def t(input, name=None): the paddle.transpose function which perm dimensions set 0 and 1. Args: - input (Tensor): The input Tensor. It is a N-D (N<=2) Tensor of data types float16, float32, float64, int32. + input (Tensor): The input Tensor. It is a N-D (N<=2) Tensor of data types float32, float64, int32, int64. name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` Returns: Tensor: A transposed n-D Tensor, with data type being float16, float32, float64, int32, int64. - For Example: - - .. code-block:: text + Examples: + .. code-block:: python + :name: code-example + import paddle + # Example 1 (0-D tensor) - x = tensor([0.79]) - paddle.t(x) = tensor([0.79]) - + x = paddle.to_tensor([0.79]) + paddle.t(x) # [0.79] + # Example 2 (1-D tensor) - x = tensor([0.79, 0.84, 0.32]) - paddle.t(x) = tensor([0.79, 0.84, 0.32]) + x = paddle.to_tensor([0.79, 0.84, 0.32]) + paddle.t(x) # [0.79000002, 0.83999997, 0.31999999] + paddle.t(x).shape # [3] # Example 3 (2-D tensor) - x = tensor([0.79, 0.84, 0.32], - [0.64, 0.14, 0.57]) - paddle.t(x) = tensor([0.79, 0.64], - [0.84, 0.14], - [0.32, 0.57]) + x = paddle.to_tensor([[0.79, 0.84, 0.32], + [0.64, 0.14, 0.57]]) + x.shape # [2, 3] + paddle.t(x) + # [[0.79000002, 0.63999999], + # [0.83999997, 0.14000000], + # [0.31999999, 0.56999999]] + paddle.t(x).shape # [3, 2] - Examples: - - .. code-block:: python - - import paddle - x = paddle.ones(shape=[2, 3], dtype='int32') - x_transposed = paddle.t(x) - print(x_transposed.shape) - # [3, 2] """ if len(input.shape) > 2: raise ValueError( diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index 6bbeb4e77be204efc0e335898a4da232540e90d8..ad65a22dfae92e99de4273357e9dc47b56ffb8a8 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -2967,7 +2967,7 @@ def cumsum(x, axis=None, dtype=None, name=None): y = paddle.cumsum(data, dtype='float64') print(y.dtype) - # VarType.FP64 + # paddle.float64 """ if axis is None: flatten = True