未验证 提交 72efd830 编写于 作者: L liuyuhui 提交者: GitHub

[API 2.0: doc] fix doc of linspace cast assign addcmul (#27897)

* update assign/cast/linspace/addcmul op doc for 2.0 api,test=document_fix

* fix bug about cast doc for 2.0 api,test=document_fix
上级 772a01d8
...@@ -199,49 +199,27 @@ def create_global_var(shape, ...@@ -199,49 +199,27 @@ def create_global_var(shape,
def cast(x, dtype): def cast(x, dtype):
""" """
:alias_main: paddle.cast
:alias: paddle.cast,paddle.tensor.cast,paddle.tensor.manipulation.cast
:old_api: paddle.fluid.layers.cast
This OP takes in the Variable :attr:`x` with :attr:`x.dtype` and casts it This OP takes in the Variable :attr:`x` with :attr:`x.dtype` and casts it
to the output with :attr:`dtype`. It's meaningless if the output dtype to the output with :attr:`dtype`. It's meaningless if the output dtype
equals the input dtype, but it's fine if you do so. equals the input dtype, but it's fine if you do so.
Args: Args:
x(Variable): An input N-D Tensor with data type bool, float16, x(Tensor): An input N-D Tensor with data type bool, float16,
float32, float64, int32, int64, uint8. float32, float64, int32, int64, uint8.
dtype(np.dtype|core.VarDesc.VarType|str): Data type of the output: dtype(np.dtype|core.VarDesc.VarType|str): Data type of the output:
bool, float16, float32, float64, int8, int32, int64, uint8. bool, float16, float32, float64, int8, int32, int64, uint8.
Returns: Returns:
Variable: A Tensor with the same shape as input's. Tensor: A Tensor with the same shape as input's.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle
import numpy as np
place = fluid.core.CPUPlace() x = paddle.to_tensor([2, 3, 4], 'float64')
y = paddle.cast(x, 'uint8')
x_lod = fluid.data(name="x", shape=[2,2], lod_level=0)
cast_res1 = fluid.layers.cast(x=x_lod, dtype="uint8")
cast_res2 = fluid.layers.cast(x=x_lod, dtype=np.int32)
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
x_i_lod = fluid.core.LoDTensor()
x_i_lod.set(np.array([[1.3,-2.4],[0,4]]).astype("float32"), place)
x_i_lod.set_recursive_sequence_lengths([[0,2]])
res1 = exe.run(fluid.default_main_program(), feed={'x':x_i_lod}, fetch_list=[cast_res1], return_numpy=False)
res2 = exe.run(fluid.default_main_program(), feed={'x':x_i_lod}, fetch_list=[cast_res2], return_numpy=False)
print(np.array(res1[0]), np.array(res1[0]).dtype)
# [[ 1 254]
# [ 0 4]] uint8
print(np.array(res2[0]), np.array(res2[0]).dtype)
# [[ 1 -2]
# [ 0 4]] int32
""" """
check_variable_and_dtype( check_variable_and_dtype(
x, 'x', x, 'x',
...@@ -550,9 +528,6 @@ def sums(input, out=None): ...@@ -550,9 +528,6 @@ def sums(input, out=None):
def assign(input, output=None): def assign(input, output=None):
""" """
:alias_main: paddle.nn.functional.assign
:alias: paddle.nn.functional.assign,paddle.nn.functional.common.assign
:old_api: paddle.fluid.layers.assign
The OP copies the :attr:`input` to the :attr:`output`. The OP copies the :attr:`input` to the :attr:`output`.
...@@ -568,13 +543,16 @@ def assign(input, output=None): ...@@ -568,13 +543,16 @@ def assign(input, output=None):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle
import numpy as np import numpy as np
data = fluid.layers.fill_constant(shape=[3, 2], value=2.5, dtype='float64') # [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]] data = paddle.fill_constant(shape=[3, 2], value=2.5, dtype='float64') # [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
result1 = fluid.layers.create_tensor(dtype='float64') array = np.array([[1, 1],
fluid.layers.assign(data, result1) # result1 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]] [3, 4],
result2 = fluid.layers.assign(data) # result2 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]] [1, 3]]).astype(np.int64)
result3 = fluid.layers.assign(np.array([[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]], dtype='float32')) # result3 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]] result1 = paddle.zeros(shape=[3, 3], dtype='float32')
paddle.nn.functional.assign(array, result1) # result1 = [[1, 1], [3 4], [1, 3]]
result2 = paddle.nn.functional.assign(data) # result2 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
result3 = paddle.nn.functional.assign(np.array([[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]], dtype='float32')) # result3 = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
""" """
helper = LayerHelper('assign', **locals()) helper = LayerHelper('assign', **locals())
check_type(input, 'input', (Variable, numpy.ndarray), 'assign') check_type(input, 'input', (Variable, numpy.ndarray), 'assign')
...@@ -1438,9 +1416,9 @@ def linspace(start, stop, num, dtype=None, name=None): ...@@ -1438,9 +1416,9 @@ def linspace(start, stop, num, dtype=None, name=None):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid import paddle
data = fluid.layers.linspace(0, 10, 5, 'float32') # [0.0, 2.5, 5.0, 7.5, 10.0] data = paddle.linspace(0, 10, 5, 'float32') # [0.0, 2.5, 5.0, 7.5, 10.0]
data = fluid.layers.linspace(0, 10, 1, 'float32') # [0.0] data = paddle.linspace(0, 10, 1, 'float32') # [0.0]
""" """
if dtype is None: if dtype is None:
......
...@@ -1324,32 +1324,34 @@ def log1p(x, name=None): ...@@ -1324,32 +1324,34 @@ def log1p(x, name=None):
def addcmul(input, tensor1, tensor2, value=1.0, name=None): def addcmul(input, tensor1, tensor2, value=1.0, name=None):
""" """
:alias_main: paddle.addcmul
:alias: paddle.addcmul,paddle.tensor.addcmul,paddle.tensor.math.addcmul
Calculate the element-wise multiplication of tensor1 and tensor2, Calculate the element-wise multiplication of tensor1 and tensor2,
then multiply the result by value, and add it to input. The shape of input, then multiply the result by value, and add it to input. The shape of input,
tensor1, tensor2 should be broadcastable. tensor1, tensor2 should be broadcastable.
The equation is: The equation is:
.. math:: .. math::
out = input + value * tensor1 * tensor2 out = input + value * tensor1 * tensor2
Args: Args:
input(Variable): The input to be added. A Tensor with type float32, float64, int32, int64. input(Tensor): The input to be added. A Tensor with type float32, float64, int32, int64.
tensor1(Variable): The tensor to be multiplied. A Tensor with type float32, float64, int32, int64. tensor1(Tensor): The tensor to be multiplied. A Tensor with type float32, float64, int32, int64.
tensor2(Variable): The tensor to be multiplied. A Tensor with type float32, float64, int32, int64. tensor2(Tensor): The tensor to be multiplied. A Tensor with type float32, float64, int32, int64.
value(int|float): The multiplier for tensor1*tensor2. For float32 and float64 type input, value must be float, otherwise an integer. value(int|float): The multiplier for tensor1*tensor2. For float32 and float64 type input, value must be float, otherwise an integer.
name(str, Optional): For details, please refer to :ref:`api_guide_Name`. name(str, Optional): For details, please refer to :ref:`api_guide_Name`.
Generally, no setting is required. Default: None. Generally, no setting is required. Default: None.
Returns: Returns:
out(Variable): The output result. A Tensor with the same data type as input's. out(Tensor): The output result. A Tensor with the same data type as input's.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle import paddle
import paddle.fluid as fluid input = paddle.ones([2,2])
input = fluid.data(name='input', dtype='float32', shape=[3, 4]) tensor1 = paddle.ones([2,2])
tensor1 = fluid.data(name='tenosr1', dtype='float32', shape=[1, 4]) tensor2 = paddle.ones([2,2])
tensor2 = fluid.data(name='tensor2', dtype='float32', shape=[3, 4]) out = paddle.addcmul(input, tensor1, tensor2, value=0.5)
data = paddle.addcmul(input, tensor1, tensor2, value=1.0) print(out.numpy())
# [[1.5 1.5]
# [1.5 1.5]]
""" """
check_variable_and_dtype(input, 'input', ['float32', 'float64', 'int32', 'int64'], 'addcmul') check_variable_and_dtype(input, 'input', ['float32', 'float64', 'int32', 'int64'], 'addcmul')
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册