未验证 提交 4eba6478 编写于 作者: K Kaedeharai 提交者: GitHub

[xdoctest] reformat example code with google style in No.31-35 (#56051)

上级 b982af4a
......@@ -52,40 +52,39 @@ class Dirac(Initializer):
Examples:
.. code-block:: python
import paddle
#1. For kernel_size is uneven number:
attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Dirac())
conv = paddle.nn.Conv1D(3, 2, 3, weight_attr=attr)
conv.weight
# Tensor(shape=[2, 3, 3], dtype=float32, place=CPUPlace, stop_gradient=False,
# [[[0., 1., 0.],
# [0., 0., 0.],
# [0., 0., 0.]],
#
# [[0., 0., 0.],
# [0., 1., 0.],
# [0., 0., 0.]]])
input = paddle.rand([8, 3, 10])
output = conv(input)
output == input[:, 0:2, 1:9]
# output.shape is [8, 2, 8], It means output is almost the same with input, 2 channels are reserved
#2. For kernel_size is even number:
attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Dirac())
conv = paddle.nn.Conv1D(3, 2, 4, weight_attr=attr)
conv.weight
# Tensor(shape=[2, 3, 4], dtype=float32, place=CPUPlace, stop_gradient=False,
# [[[0., 0., 1., 0.],
# [0., 0., 0., 0.],
# [0., 0., 0., 0.]],
#
# [[0., 0., 0., 0.],
# [0., 0., 1., 0.],
# [0., 0., 0., 0.]]])
>>> import paddle
>>> # 1. For kernel_size is uneven number:
>>> attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Dirac())
>>> conv = paddle.nn.Conv1D(3, 2, 3, weight_attr=attr)
>>> print(conv.weight)
Parameter containing:
Tensor(shape=[2, 3, 3], dtype=float32, place=CPUPlace, stop_gradient=False,
[[[0., 1., 0.],
[0., 0., 0.],
[0., 0., 0.]],
[[0., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]]])
>>> input = paddle.rand([8, 3, 10])
>>> output = conv(input)
>>> output == input[:, 0:2, 1:9]
>>> print(output.shape)
[8, 2, 8]
>>> # It means output is almost the same with input, 2 channels are reserved
>>> # 2. For kernel_size is even number:
>>> attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Dirac())
>>> conv = paddle.nn.Conv1D(3, 2, 4, weight_attr=attr)
>>> print(conv.weight)
Parameter containing:
Tensor(shape=[2, 3, 4], dtype=float32, place=CPUPlace, stop_gradient=False,
[[[0., 0., 1., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.]],
[[0., 0., 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 0.]]])
"""
def __init__(self, groups=1, name=None):
......
......@@ -81,10 +81,10 @@ class Initializer:
convolutions.
Args:
var: variable for which fan_in and fan_out have to be computed
var: variable for which fan_in and fan_out have to be computed.
Returns:
tuple of two integers (fan_in, fan_out)
tuple of two integers (fan_in, fan_out).
"""
shape = var.shape
if not shape or len(shape) == 0:
......@@ -124,10 +124,17 @@ def calculate_gain(nonlinearity, param=None):
Examples:
.. code-block:: python
import paddle
gain = paddle.nn.initializer.calculate_gain('tanh') # 5.0 / 3
gain = paddle.nn.initializer.calculate_gain('leaky_relu', param=1.0) # 1.0 = math.sqrt(2.0 / (1+param^2))
initializer = paddle.nn.initializer.Orthogonal(gain)
>>> import paddle
>>> gain = paddle.nn.initializer.calculate_gain('tanh')
>>> print(gain)
1.6666666666666667
>>> # 5.0 / 3
>>> gain = paddle.nn.initializer.calculate_gain('leaky_relu', param=1.0)
>>> print(gain)
1.0
>>> # math.sqrt(2.0 / (1+param^2))
>>> initializer = paddle.nn.initializer.Orthogonal(gain)
"""
if param is None:
......
......@@ -84,7 +84,7 @@ class MSRAInitializer(Initializer):
should be added. Used in static graph only, default None.
Returns:
The initialization op
The initialization op.
"""
block = self._check_block(block)
......@@ -220,14 +220,12 @@ class KaimingNormal(MSRAInitializer):
Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
>>> import paddle
>>> import paddle.nn as nn
linear = nn.Linear(2,
4,
weight_attr=nn.initializer.KaimingNormal())
data = paddle.rand([30, 10, 2], dtype='float32')
res = linear(data)
>>> linear = nn.Linear(2, 4, weight_attr=nn.initializer.KaimingNormal())
>>> data = paddle.rand([30, 10, 2], dtype='float32')
>>> res = linear(data)
"""
......@@ -268,14 +266,12 @@ class KaimingUniform(MSRAInitializer):
Examples:
.. code-block:: python
import paddle
import paddle.nn as nn
>>> import paddle
>>> import paddle.nn as nn
linear = nn.Linear(2,
4,
weight_attr=nn.initializer.KaimingUniform())
data = paddle.rand([30, 10, 2], dtype='float32')
res = linear(data)
>>> linear = nn.Linear(2, 4, weight_attr=nn.initializer.KaimingUniform())
>>> data = paddle.rand([30, 10, 2], dtype='float32')
>>> res = linear(data)
"""
......
......@@ -50,7 +50,7 @@ class NormalInitializer(Initializer):
should be added. Used in static graph only, default None.
Returns:
The initialization op
The initialization op.
"""
block = self._check_block(block)
......@@ -112,24 +112,32 @@ class Normal(NormalInitializer):
Examples:
.. code-block:: python
import paddle
data = paddle.ones(shape=[3, 1, 2], dtype='float32')
weight_attr = paddle.framework.ParamAttr(
name="linear_weight",
initializer=paddle.nn.initializer.Normal(mean=0.0, std=2.0))
bias_attr = paddle.framework.ParamAttr(
name="linear_bias",
initializer=paddle.nn.initializer.Normal(mean=0.0, std=2.0))
linear = paddle.nn.Linear(2, 2, weight_attr=weight_attr, bias_attr=bias_attr)
# linear.weight: [[ 2.1973135 -2.2697184]
# [-1.9104223 -1.0541488]]
# linear.bias: [ 0.7885926 -0.74719954]
res = linear(data)
# res: [[[ 1.0754838 -4.071067 ]]
# [[ 1.0754838 -4.071067 ]]
# [[ 1.0754838 -4.071067 ]]]
>>> import paddle
>>> data = paddle.ones(shape=[3, 1, 2], dtype='float32')
>>> weight_attr = paddle.framework.ParamAttr(
... name="linear_weight",
... initializer=paddle.nn.initializer.Normal(mean=0.0, std=2.0))
>>> bias_attr = paddle.framework.ParamAttr(
... name="linear_bias",
... initializer=paddle.nn.initializer.Normal(mean=0.0, std=2.0))
>>> # doctest: +SKIP('name has been used')
>>> linear = paddle.nn.Linear(2, 2, weight_attr=weight_attr, bias_attr=bias_attr)
>>> print(linear.weight)
Parameter containing:
Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=False,
[[ 2.1973135 -2.2697184],
[-1.9104223 -1.0541488]])
>>> print(linear.bias)
Parameter containing:
Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=False,
[ 0.7885926 -0.74719954])
>>> res = linear(data)
>>> print(res)
Tensor(shape=[3, 1, 2], dtype=float32, place=Place(cpu), stop_gradient=False,
[[[ 1.0754838 -4.071067 ]],
[[ 1.0754838 -4.071067 ]],
[[ 1.0754838 -4.071067 ]]])
"""
def __init__(self, mean=0.0, std=1.0, name=None):
......@@ -253,24 +261,32 @@ class TruncatedNormal(TruncatedNormalInitializer):
Examples:
.. code-block:: python
import paddle
data = paddle.ones(shape=[3, 1, 2], dtype='float32')
weight_attr = paddle.framework.ParamAttr(
name="linear_weight",
initializer=paddle.nn.initializer.TruncatedNormal(mean=0.0, std=2.0))
bias_attr = paddle.framework.ParamAttr(
name="linear_bias",
initializer=paddle.nn.initializer.TruncatedNormal(mean=0.0, std=2.0))
linear = paddle.nn.Linear(2, 2, weight_attr=weight_attr, bias_attr=bias_attr)
# linear.weight: [[-1.0981836 1.4140984]
# [ 3.1390522 -2.8266568]]
# linear.bias: [-2.1546738 -1.6570673]
res = linear(data)
# res: [[[-0.11380529 -3.0696259 ]]
# [[-0.11380529 -3.0696259 ]]
# [[-0.11380529 -3.0696259 ]]
>>> import paddle
>>> data = paddle.ones(shape=[3, 1, 2], dtype='float32')
>>> weight_attr = paddle.framework.ParamAttr(
... name="linear_weight",
... initializer=paddle.nn.initializer.TruncatedNormal(mean=0.0, std=2.0))
>>> bias_attr = paddle.framework.ParamAttr(
... name="linear_bias",
... initializer=paddle.nn.initializer.TruncatedNormal(mean=0.0, std=2.0))
>>> # doctest: +SKIP('name has been used')
>>> linear = paddle.nn.Linear(2, 2, weight_attr=weight_attr, bias_attr=bias_attr)
>>> print(linear.weight)
Parameter containing:
Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=False,
[[-1.0981836 1.4140984],
[ 3.1390522 -2.8266568]])
>>> print(linear.bias)
Parameter containing:
Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=False,
[ -2.1546738 -1.6570673])
>>> res = linear(data)
>>> print(res)
Tensor(shape=[3, 1, 2], dtype=float32, place=Place(cpu), stop_gradient=False,
[[[-0.11380529 -3.0696259 ]],
[[-0.11380529 -3.0696259 ]],
[[-0.11380529 -3.0696259 ]]])
"""
def __init__(self, mean=0.0, std=1.0, name=None):
......
......@@ -56,14 +56,13 @@ class Orthogonal(Initializer):
Examples:
.. code-block:: python
import paddle
>>> import paddle
weight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Orthogonal())
linear = paddle.nn.Linear(10, 15, weight_attr=weight_attr)
# linear.weight: X * X' = I
linear = paddle.nn.Linear(15, 10, weight_attr=weight_attr)
# linear.weight: X' * X = I
>>> weight_attr = paddle.ParamAttr(initializer=paddle.nn.initializer.Orthogonal())
>>> linear = paddle.nn.Linear(10, 15, weight_attr=weight_attr)
>>> # linear.weight: X * X' = I
>>> linear = paddle.nn.Linear(15, 10, weight_attr=weight_attr)
>>> # linear.weight: X' * X = I
"""
def __init__(self, gain=1.0, name=None):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册