未验证 提交 241505c2 编写于 作者: W wangchaochaohu 提交者: GitHub

refine the doc for API2.0 (#28386)

上级 21596467
......@@ -82,7 +82,7 @@ def get_cudnn_version():
import paddle
cudnn_version = get_cudnn_version()
cudnn_version = paddle.get_cudnn_version()
......@@ -117,7 +117,7 @@ def set_device(device):
.. code-block:: python
import paddle
paddle.disable_static()
paddle.set_device("cpu")
x1 = paddle.ones(name='x1', shape=[1, 2], dtype='int32')
x2 = paddle.zeros(name='x2', shape=[1, 2], dtype='int32')
......@@ -179,7 +179,6 @@ def get_device():
.. code-block:: python
import paddle
paddle.disable_static()
device = paddle.get_device()
"""
......
......@@ -224,7 +224,6 @@ def full_like(x, fill_value, dtype=None, name=None):
import paddle
import numpy as np
paddle.disable_static() # Now we are in imperative mode
input = paddle.full(shape=[2, 3], fill_value=0.0, dtype='float32', name='input')
output = paddle.full_like(input, 2.0)
# [[2. 2. 2.]
......@@ -277,7 +276,6 @@ def ones(shape, dtype=None, name=None):
.. code-block:: python
import paddle
paddle.disable_static()
# default dtype for ones OP
data1 = paddle.ones(shape=[3, 2])
......@@ -361,7 +359,6 @@ def zeros(shape, dtype=None, name=None):
import paddle
paddle.disable_static() # Now we are in imperative mode
data = paddle.zeros(shape=[3, 2], dtype='float32')
# [[0. 0.]
# [0. 0.]
......@@ -446,7 +443,6 @@ def eye(num_rows, num_columns=None, dtype=None, name=None):
import paddle
paddle.disable_static() # Now we are in imperative mode
data = paddle.eye(3, dtype='int32')
# [[1 0 0]
# [0 1 0]
......@@ -493,7 +489,6 @@ def full(shape, fill_value, dtype=None, name=None):
import paddle
paddle.disable_static() # Now we are in imperative mode
data1 = paddle.full(shape=[2,1], fill_value=0, dtype='int64')
#[[0]
# [0]]
......
......@@ -67,8 +67,6 @@ __all__ = [
def concat(x, axis=0, name=None):
"""
:alias_main: paddle.concat
:alias: paddle.tensor.concat, paddle.tensor.manipulation.concat
This OP concatenates the input along the axis.
......@@ -91,7 +89,6 @@ def concat(x, axis=0, name=None):
import paddle
paddle.disable_static() # Now we are in imperative mode
x1 = paddle.to_tensor([[1, 2, 3],
[4, 5, 6]])
x2 = paddle.to_tensor([[11, 12, 13],
......@@ -465,7 +462,6 @@ def split(x, num_or_sections, axis=0, name=None):
import numpy as np
import paddle
paddle.disable_static()
# x is a Tensor which shape is [3, 9, 5]
x_np = np.random.random([3, 9, 5]).astype("int32")
x = paddle.to_tensor(x_np)
......@@ -608,7 +604,6 @@ def unique(x,
import paddle
paddle.disable_static()
x = paddle.to_tensor([2, 3, 3, 1, 5, 3])
unique = paddle.unique(x)
np_unique = unique.numpy() # [1 2 3 5]
......@@ -744,9 +739,6 @@ def unsqueeze(x, axis, name=None):
def gather(x, index, axis=None, name=None):
"""
**Gather Layer**
Output is obtained by gathering entries of ``axis``
of ``x`` indexed by ``index`` and concatenate them together.
......@@ -765,7 +757,8 @@ def gather(x, index, axis=None, name=None):
Then:
out = [[3, 4],
[5, 6]]
[5, 6]]
Args:
x (Tensor): The source input tensor with rank>=1. Supported data type is
int32, int64, float32, float64 and uint8 (only for CPU),
......@@ -784,7 +777,6 @@ def gather(x, index, axis=None, name=None):
import paddle
paddle.disable_static()
input = paddle.to_tensor([[1,2],[3,4],[5,6]])
index = paddle.to_tensor([0,1])
output = paddle.gather(input, index, axis=0)
......@@ -1059,7 +1051,6 @@ def chunk(x, chunks, axis=0, name=None):
import numpy as np
import paddle
paddle.disable_static()
# x is a Tensor which shape is [3, 9, 5]
x_np = np.random.random([3, 9, 5]).astype("int32")
x = paddle.to_tensor(x_np)
......@@ -1452,7 +1443,6 @@ def gather_nd(x, index, name=None):
import paddle
paddle.disable_static()
x = paddle.to_tensor([[[1, 2], [3, 4], [5, 6]],
[[7, 8], [9, 10], [11, 12]]])
index = paddle.to_tensor([[0, 1]])
......
......@@ -1780,44 +1780,31 @@ def prod(x, axis=None, keepdim=False, dtype=None, name=None):
import paddle
paddle.disable_static()
# the axis is a int element
x = paddle.to_tensor([[0.2, 0.3, 0.5, 0.9],
[0.1, 0.2, 0.6, 0.7]])
out1 = paddle.prod(x)
print(out1.numpy())
# [0.0002268]
out2 = paddle.prod(x, -1)
print(out2.numpy())
# [0.027 0.0084]
out3 = paddle.prod(x, 0)
print(out3.numpy())
# [0.02 0.06 0.3 0.63]
print(out3.numpy().dtype)
# float32
out4 = paddle.prod(x, 0, keepdim=True)
print(out4.numpy())
# [[0.02 0.06 0.3 0.63]]
out5 = paddle.prod(x, 0, dtype='int64')
print(out5.numpy())
# [0 0 0 0]
print(out5.numpy().dtype)
# int64
# the axis is list
y = paddle.to_tensor([[[1.0, 2.0], [3.0, 4.0]],
[[5.0, 6.0], [7.0, 8.0]]])
out6 = paddle.prod(y, [0, 1])
print(out6.numpy())
# [105. 384.]
out7 = paddle.prod(y, (1, 2))
print(out7.numpy())
# [ 24. 1680.]
"""
......
......@@ -280,8 +280,6 @@ def argmin(x, axis=None, keepdim=False, dtype="int64", name=None):
def index_select(x, index, axis=0, name=None):
"""
:alias_main: paddle.index_select
:alias: paddle.tensor.index_select, paddle.tensor.search.index_select
Returns a new tensor which indexes the ``input`` tensor along dimension ``axis`` using
the entries in ``index`` which is a Tensor. The returned tensor has the same number
......@@ -304,7 +302,6 @@ def index_select(x, index, axis=0, name=None):
import paddle
paddle.disable_static() # Now we are in imperative mode
x = paddle.to_tensor([[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
[9.0, 10.0, 11.0, 12.0]])
......@@ -680,7 +677,6 @@ def masked_select(x, mask, name=None):
import paddle
paddle.disable_static()
x = paddle.to_tensor([[1.0, 2.0, 3.0, 4.0],
[5.0, 6.0, 7.0, 8.0],
......
......@@ -242,7 +242,6 @@ def numel(x, name=None):
import paddle
paddle.disable_static()
x = paddle.full(shape=[4, 5, 7], fill_value=0, dtype='int32')
numel = paddle.numel(x) # 140
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册