未验证 提交 b0ed082e 编写于 作者: 张春乔 提交者: GitHub

[xdoctest] reformat example code with google style in 78 (#55966)

* input.py

* Update python/paddle/nn/functional/input.py

* Update input.py
上级 723c6f77
...@@ -72,16 +72,20 @@ def one_hot(x, num_classes, name=None): ...@@ -72,16 +72,20 @@ def one_hot(x, num_classes, name=None):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
# Correspond to the first example above, where label.shape is 4 and one_hot_label.shape is [4, 4]. >>> # Correspond to the first example above, where label.shape is 4 and one_hot_label.shape is [4, 4].
label = paddle.to_tensor([1, 1, 3, 0], dtype='int64') >>> label = paddle.to_tensor([1, 1, 3, 0], dtype='int64')
# label.shape = [4] >>> print(label.shape)
one_hot_label = paddle.nn.functional.one_hot(label, num_classes=4) [4]
# one_hot_label.shape = [4, 4] >>> one_hot_label = paddle.nn.functional.one_hot(label, num_classes=4)
# one_hot_label = [[0., 1., 0., 0.], >>> print(one_hot_label.shape)
# [0., 1., 0., 0.], [4, 4]
# [0., 0., 0., 1.], >>> print(one_hot_label)
# [1., 0., 0., 0.]] Tensor(shape=[4, 4], dtype=float32, place=Place(cpu), stop_gradient=True,
[[0., 1., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 0., 1.],
[1., 0., 0., 0.]])
""" """
...@@ -166,24 +170,43 @@ def embedding(x, weight, padding_idx=None, sparse=False, name=None): ...@@ -166,24 +170,43 @@ def embedding(x, weight, padding_idx=None, sparse=False, name=None):
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
import paddle.nn as nn >>> import paddle.nn as nn
x0 = paddle.arange(3, 6).reshape((3, 1)).astype(paddle.int64) >>> x0 = paddle.arange(3, 6).reshape((3, 1)).astype(paddle.int64)
w0 = paddle.full(shape=(10, 3), fill_value=2).astype(paddle.float32) >>> w0 = paddle.full(shape=(10, 3), fill_value=2).astype(paddle.float32)
# x.data = [[3], [4], [5]] >>> x = paddle.to_tensor(x0, stop_gradient=False)
# x.shape = [3, 1] >>> print(x.numpy())
x = paddle.to_tensor(x0, stop_gradient=False) [[3]
[4]
# w.data = [[2. 2. 2.] ... [2. 2. 2.]] [5]]
# w.shape = [10, 3] >>> print(x.shape)
w = paddle.to_tensor(w0, stop_gradient=False) [3, 1]
# emb.data = [[[2., 2., 2.]], [[2., 2., 2.]], [[2., 2., 2.]]] >>> w = paddle.to_tensor(w0, stop_gradient=False)
# emb.shape = [3, 1, 3] >>> print(w.numpy())
emb = nn.functional.embedding( [[2. 2. 2.]
x=x, weight=w, sparse=True, name="embedding") [2. 2. 2.]
[2. 2. 2.]
[2. 2. 2.]
[2. 2. 2.]
[2. 2. 2.]
[2. 2. 2.]
[2. 2. 2.]
[2. 2. 2.]
[2. 2. 2.]]
>>> print(w.shape)
[10, 3]
>>> emb = nn.functional.embedding(
... x=x, weight=w, sparse=True, name="embedding")
>>> print(emb.numpy())
[[[2. 2. 2.]]
[[2. 2. 2.]]
[[2. 2. 2.]]]
>>> print(emb.shape)
[3, 1, 3]
""" """
padding_idx = ( padding_idx = (
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册