未验证 提交 3e36271e 编写于 作者: R Ryan 提交者: GitHub

[xdoctest][task 125-137] reformat example code with google style in static/* (#56677)

* add docx

* fix conversation

* fix

* fix code style

* fix code style

* Thank SigureMo

* finished?

* add ... and fix line

* add skip doctest
上级 bb078c1d
...@@ -152,34 +152,32 @@ def fc( ...@@ -152,34 +152,32 @@ def fc(
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
paddle.enable_static() >>> paddle.enable_static()
# When input is a single tensor >>> # When input is a single tensor
x = paddle.static.data(name="x", shape=[1, 2, 2], dtype="float32") >>> x = paddle.static.data(name="x", shape=[1, 2, 2], dtype="float32")
# x: [[[0.1 0.2] >>> out = paddle.static.nn.fc(
# [0.3 0.4]]] ... x=x,
out = paddle.static.nn.fc( ... size=1,
x=x, ... num_flatten_dims=2,
size=1, ... weight_attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(value=0.5)),
num_flatten_dims=2, ... bias_attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(value=1.0)))
weight_attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(value=0.5)), >>> print(out)
bias_attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(value=1.0))) var fc_0.tmp_1 : LOD_TENSOR.shape(1, 2, 1).dtype(float32).stop_gradient(False)
# out: [[[1.15]
# [1.35]]] >>> # When input is multiple tensors
>>> x0 = paddle.static.data(name="x0", shape=[1, 2, 2], dtype="float32")
# When input is multiple tensors >>> x1 = paddle.static.data(name="x1", shape=[1, 1, 3], dtype="float32")
x0 = paddle.static.data(name="x0", shape=[1, 2, 2], dtype="float32")
# x0: [[[0.1 0.2] >>> out = paddle.static.nn.fc(
# [0.3 0.4]]] ... x=[x0, x1],
x1 = paddle.static.data(name="x1", shape=[1, 1, 3], dtype="float32") ... size=2,
# x1: [[[0.1 0.2 0.3]]] ... weight_attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(value=0.5)),
out = paddle.static.nn.fc( ... bias_attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(value=1.0)))
x=[x0, x1], >>> print(out)
size=2, var fc_1.tmp_3 : LOD_TENSOR.shape(1, 2).dtype(float32).stop_gradient(False)
weight_attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(value=0.5)),
bias_attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(value=1.0)))
# out: [[1.8 1.8]]
""" """
def fc_fluid( def fc_fluid(
...@@ -308,11 +306,11 @@ def instance_norm( ...@@ -308,11 +306,11 @@ def instance_norm(
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
paddle.enable_static() >>> paddle.enable_static()
x = paddle.static.data(name='x', shape=[3, 7, 3, 7], dtype='float32') >>> x = paddle.static.data(name='x', shape=[3, 7, 3, 7], dtype='float32')
hidden1 = paddle.static.nn.fc(x, size=200) >>> hidden1 = paddle.static.nn.fc(x, size=200)
hidden2 = paddle.static.nn.instance_norm(hidden1) >>> hidden2 = paddle.static.nn.instance_norm(hidden1)
""" """
check_variable_and_dtype( check_variable_and_dtype(
input, input,
...@@ -412,18 +410,18 @@ def continuous_value_model(input, cvm, use_cvm=True): ...@@ -412,18 +410,18 @@ def continuous_value_model(input, cvm, use_cvm=True):
A Tensor with same type as input. A Tensor with same type as input.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle.fluid as fluid
import paddle >>> import paddle
input = paddle.static.data(name="input", shape=[64, 1], dtype="int64")
label = paddle.static.data(name="label", shape=[64, 1], dtype="int64") >>> paddle.enable_static()
w0 = paddle.full(shape=(100, 1), fill_value=2).astype(paddle.float32) >>> input = paddle.static.data(name="input", shape=[64, 1], dtype="int64")
embed = paddle.nn.functional.embedding( >>> label = paddle.static.data(name="label", shape=[64, 1], dtype="int64")
input, >>> w0 = paddle.full(shape=(100, 1), fill_value=2).astype(paddle.float32)
w0) >>> embed = paddle.nn.functional.embedding(input, w0)
ones = paddle.full_like(label, 1, dtype="int64") >>> ones = paddle.full_like(label, 1, dtype="int64")
show_clk = paddle.cast(paddle.concat([ones, label], axis=1), dtype='float32') >>> show_clk = paddle.cast(paddle.concat([ones, label], axis=1), dtype='float32')
show_clk.stop_gradient = True >>> show_clk.stop_gradient = True
input_with_cvm = paddle.static.nn.continuous_value_model(embed, show_clk, True) >>> input_with_cvm = paddle.static.nn.continuous_value_model(embed[:, 0], show_clk, True)
""" """
helper = LayerHelper('cvm', **locals()) helper = LayerHelper('cvm', **locals())
out = helper.create_variable(dtype=input.dtype) out = helper.create_variable(dtype=input.dtype)
...@@ -515,11 +513,11 @@ def data_norm( ...@@ -515,11 +513,11 @@ def data_norm(
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
paddle.enable_static() >>> paddle.enable_static()
x = paddle.randn(shape=[32,100]) >>> x = paddle.randn(shape=[32, 100])
hidden2 = paddle.static.nn.data_norm(input=x) >>> hidden2 = paddle.static.nn.data_norm(input=x)
""" """
helper = LayerHelper('data_norm', **locals()) helper = LayerHelper('data_norm', **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
...@@ -694,12 +692,13 @@ def group_norm( ...@@ -694,12 +692,13 @@ def group_norm(
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
paddle.enable_static() >>> paddle.enable_static()
data = paddle.static.data(name='data', shape=[2, 8, 32, 32], dtype='float32') >>> data = paddle.static.data(name='data', shape=[2, 8, 32, 32], dtype='float32')
x = paddle.static.nn.group_norm(input=data, groups=4) >>> x = paddle.static.nn.group_norm(input=data, groups=4)
print(x.shape) # [2, 8, 32, 32] >>> print(x.shape)
(2, 8, 32, 32)
""" """
helper = LayerHelper('group_norm', **locals()) helper = LayerHelper('group_norm', **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
...@@ -888,12 +887,13 @@ def conv2d( ...@@ -888,12 +887,13 @@ def conv2d(
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
paddle.enable_static() >>> paddle.enable_static()
data = paddle.static.data(name='data', shape=[None, 3, 32, 32], dtype='float32') >>> data = paddle.static.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
conv2d = paddle.static.nn.conv2d(input=data, num_filters=2, filter_size=3, act="relu") >>> conv2d = paddle.static.nn.conv2d(input=data, num_filters=2, filter_size=3, act="relu")
print(conv2d.shape) # [-1, 2, 30, 30] >>> print(conv2d.shape)
(-1, 2, 30, 30)
""" """
check_variable_and_dtype( check_variable_and_dtype(
...@@ -1193,19 +1193,22 @@ def conv3d( ...@@ -1193,19 +1193,22 @@ def conv3d(
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
import numpy as np >>> import numpy as np
paddle.enable_static() >>> np.random.seed(1107)
data = paddle.static.data(name='data', shape=[None, 3, 12, 32, 32], dtype='float32') >>> paddle.seed(1107)
param_attr = paddle.framework.ParamAttr(name='conv3d.weight', initializer=paddle.nn.initializer.XavierNormal(), learning_rate=0.001) >>> paddle.enable_static()
res = paddle.static.nn.conv3d(input=data, num_filters=2, filter_size=3, act="relu", param_attr=param_attr) >>> data = paddle.static.data(name='data', shape=[None, 3, 12, 32, 32], dtype='float32')
place = paddle.CPUPlace() >>> param_attr = paddle.framework.ParamAttr(name='conv3d.weight', initializer=paddle.nn.initializer.XavierNormal(), learning_rate=0.001)
exe = paddle.static.Executor(place) >>> res = paddle.static.nn.conv3d(input=data, num_filters=2, filter_size=3, act="relu", param_attr=param_attr)
exe.run(paddle.static.default_startup_program()) >>> place = paddle.CPUPlace()
x = np.random.rand(1, 3, 12, 32, 32).astype("float32") >>> exe = paddle.static.Executor(place)
output = exe.run(feed={"data": x}, fetch_list=[res]) >>> exe.run(paddle.static.default_startup_program())
print(output) >>> x = np.random.rand(1, 3, 12, 32, 32).astype("float32")
>>> output, = exe.run(feed={"data": x}, fetch_list=[res])
>>> print(output.shape)
(1, 2, 10, 30, 30)
""" """
l_type = 'conv3d' l_type = 'conv3d'
...@@ -1536,14 +1539,15 @@ def conv2d_transpose( ...@@ -1536,14 +1539,15 @@ def conv2d_transpose(
ShapeError: If the size of `output_size` is not equal to that of `stride`. ShapeError: If the size of `output_size` is not equal to that of `stride`.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
paddle.enable_static() >>> paddle.enable_static()
data = paddle.static.data(name='data', shape=[None, 3, 32, 32], dtype='float32') >>> data = paddle.static.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
conv2d_transpose = paddle.static.nn.conv2d_transpose(input=data, num_filters=2, filter_size=3) >>> conv2d_transpose = paddle.static.nn.conv2d_transpose(input=data, num_filters=2, filter_size=3)
print(conv2d_transpose.shape) # [-1, 2, 34, 34] >>> print(conv2d_transpose.shape)
(-1, 2, 34, 34)
""" """
assert ( assert (
param_attr is not False param_attr is not False
...@@ -1907,21 +1911,24 @@ def conv3d_transpose( ...@@ -1907,21 +1911,24 @@ def conv3d_transpose(
variable storing transposed convolution and non-linearity activation result. variable storing transposed convolution and non-linearity activation result.
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
import numpy as np >>> import numpy as np
paddle.enable_static() >>> paddle.seed(1107)
data = paddle.static.data(name='data', shape=[None, 3, 12, 32, 32], dtype='float32') >>> np.random.seed(1107)
param_attr = paddle.framework.ParamAttr(name='conv3d.weight', initializer=paddle.nn.initializer.XavierNormal(), learning_rate=0.001) >>> paddle.enable_static()
res = paddle.static.nn.conv3d_transpose(input=data, num_filters=2, filter_size=3, act="relu", param_attr=param_attr) >>> data = paddle.static.data(name='data', shape=[None, 3, 12, 32, 32], dtype='float32')
place = paddle.CPUPlace() >>> param_attr = paddle.framework.ParamAttr(name='conv3d.weight', initializer=paddle.nn.initializer.XavierNormal(), learning_rate=0.001)
exe = paddle.static.Executor(place) >>> res = paddle.static.nn.conv3d_transpose(input=data, num_filters=2, filter_size=3, act="relu", param_attr=param_attr)
exe.run(paddle.static.default_startup_program()) >>> place = paddle.CPUPlace()
x = np.random.rand(1, 3, 12, 32, 32).astype("float32") >>> exe = paddle.static.Executor(place)
output = exe.run(feed={"data": x}, fetch_list=[res]) >>> exe.run(paddle.static.default_startup_program())
print(output) >>> x = np.random.rand(1, 3, 12, 32, 32).astype("float32")
>>> output = exe.run(feed={"data": x}, fetch_list=[res.mean()])
>>> print(output)
[array(0.5148856, dtype=float32)]
""" """
assert ( assert (
param_attr is not False param_attr is not False
...@@ -2226,28 +2233,26 @@ def deformable_conv( ...@@ -2226,28 +2233,26 @@ def deformable_conv(
Examples: Examples:
.. code-block:: python .. code-block:: python
#deformable conv v2: >>> # deformable conv v2:
>>> import paddle
import paddle >>> paddle.enable_static()
paddle.enable_static()
>>> C_in, H_in, W_in = 3, 32, 32
C_in, H_in, W_in = 3, 32, 32 >>> filter_size, deformable_groups = 3, 1
filter_size, deformable_groups = 3, 1 >>> data = paddle.static.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32')
data = paddle.static.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32') >>> offset = paddle.static.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
offset = paddle.static.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32') >>> mask = paddle.static.data(name='mask', shape=[None, deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
mask = paddle.static.data(name='mask', shape=[None, deformable_groups*filter_size**2, H_in, W_in], dtype='float32') >>> out = paddle.static.nn.common.deformable_conv(input=data, offset=offset, mask=mask,
out = paddle.static.layers.common.deformable_conv(input=data, offset=offset, mask=mask, ... num_filters=2, filter_size=filter_size, padding=1, modulated=True)
num_filters=2, filter_size=filter_size, padding=1, modulated=True)
>>> # deformable conv v1:
#deformable conv v1: >>> import paddle
>>> C_in, H_in, W_in = 3, 32, 32
import paddle >>> filter_size, deformable_groups = 3, 1
C_in, H_in, W_in = 3, 32, 32 >>> data = paddle.static.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32')
filter_size, deformable_groups = 3, 1 >>> offset = paddle.static.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
data = paddle.static.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32') >>> out = paddle.static.nn.common.deformable_conv(input=data, offset=offset, mask=None,
offset = paddle.static.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32') ... num_filters=2, filter_size=filter_size, padding=1, modulated=False)
out = paddle.static.layers.common.deformable_conv(input=data, offset=offset, mask=None,
num_filters=2, filter_size=filter_size, padding=1, modulated=False)
""" """
check_variable_and_dtype( check_variable_and_dtype(
...@@ -2471,30 +2476,28 @@ def deform_conv2d( ...@@ -2471,30 +2476,28 @@ def deform_conv2d(
Examples: Examples:
.. code-block:: python .. code-block:: python
#deformable conv v2: >>> # deformable conv v2:
>>> import paddle
import paddle >>> paddle.enable_static()
paddle.enable_static()
>>> C_in, H_in, W_in = 3, 32, 32
C_in, H_in, W_in = 3, 32, 32 >>> filter_size, deformable_groups = 3, 1
filter_size, deformable_groups = 3, 1 >>> data = paddle.static.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32')
data = paddle.static.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32') >>> offset = paddle.static.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
offset = paddle.static.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32') >>> mask = paddle.static.data(name='mask', shape=[None, deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
mask = paddle.static.data(name='mask', shape=[None, deformable_groups*filter_size**2, H_in, W_in], dtype='float32') >>> out = paddle.static.nn.deform_conv2d(x=data, offset=offset, mask=mask,
out = paddle.static.nn.deform_conv2d(x=data, offset=offset, mask=mask, ... num_filters=2, filter_size=filter_size, padding=1)
num_filters=2, filter_size=filter_size, padding=1)
>>> # deformable conv v1:
#deformable conv v1: >>> import paddle
>>> paddle.enable_static()
import paddle
paddle.enable_static() >>> C_in, H_in, W_in = 3, 32, 32
>>> filter_size, deformable_groups = 3, 1
C_in, H_in, W_in = 3, 32, 32 >>> data = paddle.static.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32')
filter_size, deformable_groups = 3, 1 >>> offset = paddle.static.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
data = paddle.static.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32') >>> out = paddle.static.nn.deform_conv2d(x=data, offset=offset, mask=None,
offset = paddle.static.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32') ... num_filters=2, filter_size=filter_size, padding=1)
out = paddle.static.nn.deform_conv2d(x=data, offset=offset, mask=None,
num_filters=2, filter_size=filter_size, padding=1)
""" """
if mask is None: if mask is None:
...@@ -2574,12 +2577,12 @@ def bilinear_tensor_product( ...@@ -2574,12 +2577,12 @@ def bilinear_tensor_product(
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
paddle.enable_static() >>> paddle.enable_static()
x = paddle.static.data("t1", shape=[-1, 5], dtype="float32") >>> x = paddle.static.data("t1", shape=[-1, 5], dtype="float32")
y = paddle.static.data("t2", shape=[-1, 4], dtype="float32") >>> y = paddle.static.data("t2", shape=[-1, 4], dtype="float32")
tensor = paddle.static.nn.bilinear_tensor_product(x, y, size=1000) >>> tensor = paddle.static.nn.bilinear_tensor_product(x, y, size=1000)
""" """
helper = LayerHelper('bilinear_tensor_product', **locals()) helper = LayerHelper('bilinear_tensor_product', **locals())
...@@ -2731,16 +2734,16 @@ def batch_norm( ...@@ -2731,16 +2734,16 @@ def batch_norm(
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
paddle.enable_static() >>> paddle.enable_static()
x = paddle.static.data(name='x', shape=[3, 7, 3, 7], dtype='float32') >>> x = paddle.static.data(name='x', shape=[3, 7, 3, 7], dtype='float32')
hidden1 = paddle.static.nn.fc(x=x, size=200) >>> hidden1 = paddle.static.nn.fc(x=x, size=200)
print(hidden1.shape) >>> print(hidden1.shape)
# [3, 200] (3, 200)
hidden2 = paddle.static.nn.batch_norm(input=hidden1) >>> hidden2 = paddle.static.nn.batch_norm(input=hidden1)
print(hidden2.shape) >>> print(hidden2.shape)
# [3, 200] (3, 200)
""" """
assert ( assert (
bias_attr is not False bias_attr is not False
...@@ -2978,13 +2981,13 @@ def prelu(x, mode, param_attr=None, data_format="NCHW", name=None): ...@@ -2978,13 +2981,13 @@ def prelu(x, mode, param_attr=None, data_format="NCHW", name=None):
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
paddle.enable_static() >>> paddle.enable_static()
x = paddle.static.data(name="x", shape=[None,5,10,10], dtype="float32") >>> x = paddle.static.data(name="x", shape=[None, 5, 10, 10], dtype="float32")
mode = 'channel' >>> mode = 'channel'
output = paddle.static.nn.prelu( >>> output = paddle.static.nn.prelu(
x,mode,param_attr=paddle.ParamAttr(name='alpha')) ... x,mode,param_attr=paddle.ParamAttr(name='alpha'))
""" """
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'prelu') check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'prelu')
...@@ -3170,103 +3173,111 @@ def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None): ...@@ -3170,103 +3173,111 @@ def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None):
Tensor|tuple(Tensor)|list[Tensor], The output ``out`` of the forward function ``func``. Tensor|tuple(Tensor)|list[Tensor], The output ``out`` of the forward function ``func``.
Examples: Examples:
.. code-block:: python .. code-block:: python
:name: code-example1 :name: code-example1
# example 1: >>> import paddle
import paddle >>> import numpy as np
import numpy as np
paddle.enable_static() >>> np.random.seed(1107)
# Creates a forward function, Tensor can be input directly without >>> paddle.seed(1107)
# being converted into numpy array.
def tanh(x): >>> paddle.enable_static()
return np.tanh(x) >>> # Creates a forward function, Tensor can be input directly without
# Skip x in backward function and return the gradient of x >>> # being converted into numpy array.
# Tensor must be actively converted to numpy array, otherwise, >>> def tanh(x):
# operations such as +/- can't be used. ... return np.tanh(x)
def tanh_grad(y, dy):
return np.array(dy) * (1 - np.square(np.array(y))) >>> # Skip x in backward function and return the gradient of x
# Creates a forward function for debugging running networks(print value) >>> # Tensor must be actively converted to numpy array, otherwise,
def debug_func(x): >>> # operations such as +/- can't be used.
print(x) >>> def tanh_grad(y, dy):
def create_tmp_var(name, dtype, shape): ... return np.array(dy) * (1 - np.square(np.array(y)))
return paddle.static.default_main_program().current_block().create_var(
name=name, dtype=dtype, shape=shape) >>> # Creates a forward function for debugging running networks(print value)
def simple_net(img, label): >>> def debug_func(x):
hidden = img ... # print(x)
for idx in range(4): ... pass
hidden = paddle.static.nn.fc(hidden, size=200) >>> def create_tmp_var(name, dtype, shape):
new_hidden = create_tmp_var(name='hidden_{}'.format(idx), ... return paddle.static.default_main_program().current_block().create_var(
dtype=hidden.dtype, shape=hidden.shape) ... name=name, dtype=dtype, shape=shape)
# User-defined forward and backward >>> def simple_net(img, label):
hidden = paddle.static.py_func(func=tanh, x=hidden, ... hidden = img
out=new_hidden, backward_func=tanh_grad, ... for idx in range(4):
skip_vars_in_backward_input=hidden) ... hidden = paddle.static.nn.fc(hidden, size=200)
# User-defined debug functions that print out the input Tensor ... new_hidden = create_tmp_var(name='hidden_{}'.format(idx),
paddle.static.py_func(func=debug_func, x=hidden, out=None) ... dtype=hidden.dtype, shape=hidden.shape)
prediction = paddle.static.nn.fc(hidden, size=10, activation='softmax') ... # User-defined forward and backward
ce_loss = paddle.nn.loss.CrossEntropyLoss() ... hidden = paddle.static.py_func(func=tanh, x=hidden,
return ce_loss(prediction, label) ... out=new_hidden, backward_func=tanh_grad,
x = paddle.static.data(name='x', shape=[1,4], dtype='float32') ... skip_vars_in_backward_input=hidden)
y = paddle.static.data(name='y', shape=[1], dtype='int64') ... # User-defined debug functions that print out the input Tensor
res = simple_net(x, y) ... paddle.static.py_func(func=debug_func, x=hidden, out=None)
exe = paddle.static.Executor(paddle.CPUPlace()) ... prediction = paddle.static.nn.fc(hidden, size=10, activation='softmax')
exe.run(paddle.static.default_startup_program()) ... ce_loss = paddle.nn.loss.CrossEntropyLoss()
input1 = np.random.random(size=[1,4]).astype('float32') ... return ce_loss(prediction, label)
input2 = np.random.randint(1, 10, size=[1], dtype='int64') >>> x = paddle.static.data(name='x', shape=[1,4], dtype='float32')
out = exe.run(paddle.static.default_main_program(), >>> y = paddle.static.data(name='y', shape=[1], dtype='int64')
feed={'x':input1, 'y':input2}, >>> res = simple_net(x, y)
fetch_list=[res.name]) >>> exe = paddle.static.Executor(paddle.CPUPlace())
print(out) >>> exe.run(paddle.static.default_startup_program())
>>> input1 = np.random.random(size=[1,4]).astype('float32')
>>> input2 = np.random.randint(1, 10, size=[1], dtype='int64')
>>> out = exe.run(paddle.static.default_main_program(),
... feed={'x':input1, 'y':input2},
... fetch_list=[res.name])
>>> print(out[0].shape)
()
.. code-block:: python .. code-block:: python
:name: code-example2 :name: code-example2
# example 2: >>> # This example shows how to turn Tensor into numpy array and
# This example shows how to turn Tensor into numpy array and >>> # use numpy API to register an Python OP
# use numpy API to register an Python OP >>> import paddle
import paddle >>> import numpy as np
import numpy as np
paddle.enable_static() >>> np.random.seed(1107)
def element_wise_add(x, y): >>> paddle.seed(1107)
# Tensor must be actively converted to numpy array, otherwise,
# numpy.shape can't be used. >>> paddle.enable_static()
x = np.array(x) >>> def element_wise_add(x, y):
y = np.array(y) ... # Tensor must be actively converted to numpy array, otherwise,
if x.shape != y.shape: ... # numpy.shape can't be used.
raise AssertionError("the shape of inputs must be the same!") ... x = np.array(x)
result = np.zeros(x.shape, dtype='int32') ... y = np.array(y)
for i in range(len(x)): ... if x.shape != y.shape:
for j in range(len(x[0])): ... raise AssertionError("the shape of inputs must be the same!")
result[i][j] = x[i][j] + y[i][j] ... result = np.zeros(x.shape, dtype='int32')
return result ... for i in range(len(x)):
def create_tmp_var(name, dtype, shape): ... for j in range(len(x[0])):
return paddle.static.default_main_program().current_block().create_var( ... result[i][j] = x[i][j] + y[i][j]
name=name, dtype=dtype, shape=shape) ... return result
def py_func_demo(): >>> def create_tmp_var(name, dtype, shape):
start_program = paddle.static.default_startup_program() ... return paddle.static.default_main_program().current_block().create_var(
main_program = paddle.static.default_main_program() ... name=name, dtype=dtype, shape=shape)
# Input of the forward function >>> def py_func_demo():
x = paddle.static.data(name='x', shape=[2,3], dtype='int32') ... start_program = paddle.static.default_startup_program()
y = paddle.static.data(name='y', shape=[2,3], dtype='int32') ... main_program = paddle.static.default_main_program()
# Output of the forward function, name/dtype/shape must be specified ... # Input of the forward function
output = create_tmp_var('output','int32', [3,1]) ... x = paddle.static.data(name='x', shape=[2, 3], dtype='int32')
# Multiple Tensor should be passed in the form of tuple(Tensor) or list[Tensor] ... y = paddle.static.data(name='y', shape=[2, 3], dtype='int32')
paddle.static.py_func(func=element_wise_add, x=[x,y], out=output) ... # Output of the forward function, name/dtype/shape must be specified
exe=paddle.static.Executor(paddle.CPUPlace()) ... output = create_tmp_var('output','int32', [3, 1])
exe.run(start_program) ... # Multiple Tensor should be passed in the form of tuple(Tensor) or list[Tensor]
# Feed numpy array to main_program ... paddle.static.py_func(func=element_wise_add, x=[x, y], out=output)
input1 = np.random.randint(1, 10, size=[2,3], dtype='int32') ... exe=paddle.static.Executor(paddle.CPUPlace())
input2 = np.random.randint(1, 10, size=[2,3], dtype='int32') ... exe.run(start_program)
out = exe.run(main_program, ... # Feed numpy array to main_program
feed={'x':input1, 'y':input2}, ... input1 = np.random.randint(1, 10, size=[2, 3], dtype='int32')
fetch_list=[output.name]) ... input2 = np.random.randint(1, 10, size=[2, 3], dtype='int32')
print("{0} + {1} = {2}".format(input1, input2, out)) ... out = exe.run(main_program,
py_func_demo() ... feed={'x':input1, 'y':input2},
# Reference output: ... fetch_list=[output.name])
# [[5, 9, 9] + [[7, 8, 4] = [array([[12, 17, 13] ... print("{0} + {1} = {2}".format(input1, input2, out))
# [7, 5, 2]] [1, 3, 3]] [8, 8, 5]], dtype=int32)] >>> py_func_demo()
>>> # [[1 5 4] + [[3 7 7] = [array([[ 4, 12, 11]
>>> # [9 4 8]] [2 3 9]] [11, 7, 17]], dtype=int32)]
""" """
helper = LayerHelper('py_func', **locals()) helper = LayerHelper('py_func', **locals())
check_type(x, 'X', (list, tuple, Variable, type(None)), 'py_func') check_type(x, 'X', (list, tuple, Variable, type(None)), 'py_func')
...@@ -3352,17 +3363,18 @@ def row_conv(input, future_context_size, param_attr=None, act=None): ...@@ -3352,17 +3363,18 @@ def row_conv(input, future_context_size, param_attr=None, act=None):
Examples: Examples:
.. code-block:: python .. code-block:: python
# for LodTensor inputs >>> # for LodTensor inputs
import paddle >>> import paddle
paddle.enable_static() >>> paddle.enable_static()
x = paddle.static.data(name='x', shape=[9, 16], >>> x = paddle.static.data(name='x', shape=[9, 16],
dtype='float32', lod_level=1) ... dtype='float32', lod_level=1)
out_x = paddle.static.nn.row_conv(input=x, future_context_size=2) >>> out_x = paddle.static.nn.row_conv(input=x, future_context_size=2)
# for Tensor inputs
y = paddle.static.data(name='y', shape=[9, 4, 16], dtype='float32') >>> # for Tensor inputs
out_y = paddle.static.nn.row_conv(input=y, future_context_size=2) >>> y = paddle.static.data(name='y', shape=[9, 4, 16], dtype='float32')
>>> out_y = paddle.static.nn.row_conv(input=y, future_context_size=2)
""" """
helper = LayerHelper('row_conv', **locals()) helper = LayerHelper('row_conv', **locals())
check_variable_and_dtype(input, 'input', ['float32'], 'row_conv') check_variable_and_dtype(input, 'input', ['float32'], 'row_conv')
...@@ -3443,12 +3455,13 @@ def spectral_norm(weight, dim=0, power_iters=1, eps=1e-12, name=None): ...@@ -3443,12 +3455,13 @@ def spectral_norm(weight, dim=0, power_iters=1, eps=1e-12, name=None):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
paddle.enable_static() >>> paddle.enable_static()
weight = paddle.static.data(name='weight', shape=[2, 8, 32, 32], dtype='float32') >>> weight = paddle.static.data(name='weight', shape=[2, 8, 32, 32], dtype='float32')
x = paddle.static.nn.spectral_norm(weight=weight, dim=1, power_iters=2) >>> x = paddle.static.nn.spectral_norm(weight=weight, dim=1, power_iters=2)
print(x.shape) # [2, 8, 32, 32] >>> print(x.shape)
(2, 8, 32, 32)
""" """
helper = LayerHelper('spectral_norm', **locals()) helper = LayerHelper('spectral_norm', **locals())
check_variable_and_dtype( check_variable_and_dtype(
...@@ -3584,11 +3597,12 @@ def layer_norm( ...@@ -3584,11 +3597,12 @@ def layer_norm(
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
paddle.enable_static() >>> paddle.enable_static()
x = paddle.static.data(name='x', shape=[8, 32, 32], dtype='float32') >>> x = paddle.static.data(name='x', shape=[8, 32, 32], dtype='float32')
output = paddle.static.nn.layer_norm(input=x, begin_norm_axis=1) >>> output = paddle.static.nn.layer_norm(input=x, begin_norm_axis=1)
print(output.shape) # [8, 32, 32] >>> print(output.shape)
(8, 32, 32)
""" """
assert ( assert (
in_dygraph_mode() is not True in_dygraph_mode() is not True
...@@ -3744,37 +3758,29 @@ def embedding( ...@@ -3744,37 +3758,29 @@ def embedding(
Static Examples: Static Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
import numpy as np >>> import numpy as np
paddle.enable_static() >>> paddle.enable_static()
x = paddle.static.data(name="x", shape = [2, 4], dtype=np.int64) >>> x = paddle.static.data(name="x", shape = [2, 4], dtype=np.int64)
output = paddle.static.nn.embedding(x, (10, 3), >>> output = paddle.static.nn.embedding(x, (10, 3),
param_attr=paddle.nn.initializer.Constant(value=1.0)) ... param_attr=paddle.nn.initializer.Constant(value=1.0))
m_output=paddle.mean(output) >>> m_output=paddle.mean(output)
place = paddle.CPUPlace() >>> place = paddle.CPUPlace()
exe = paddle.static.Executor(place) >>> exe = paddle.static.Executor(place)
exe.run(paddle.static.default_startup_program()) >>> exe.run(paddle.static.default_startup_program())
x = np.array([[7, 2, 4, 5],[4, 3, 2, 9]], dtype=np.int64) >>> x = np.array([[7, 2, 4, 5],[4, 3, 2, 9]], dtype=np.int64)
>>> out, = exe.run(paddle.static.default_main_program(), feed={'x':x}, fetch_list=[output])
# x is a Numpy. >>> print(out)
# x.data = [[7, 2, 4, 5], [4, 3, 2, 9]] [[[1. 1. 1.]
# x.shape = [2, 4] [1. 1. 1.]
[1. 1. 1.]
out, = exe.run(paddle.static.default_main_program(), feed={'x':x}, fetch_list=[output]) [1. 1. 1.]]
[[1. 1. 1.]
# out is a Numpy. [1. 1. 1.]
# out.data = [[1., 1., 1.], [1. 1. 1.]
# [1., 1., 1.], [1. 1. 1.]]]
# [1., 1., 1.],
# [1., 1., 1.]],
#
# [[1., 1., 1.],
# [1., 1., 1.],
# [1., 1., 1.],
# [0., 0., 0.]]]
# out.shape = [2, 4, 3]
""" """
helper = LayerHelper('embedding', **locals()) helper = LayerHelper('embedding', **locals())
...@@ -3912,24 +3918,24 @@ def sparse_embedding( ...@@ -3912,24 +3918,24 @@ def sparse_embedding(
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
paddle.enable_static() >>> paddle.enable_static()
sparse_feature_dim = 1024 >>> sparse_feature_dim = 1024
embedding_size = 64 >>> embedding_size = 64
# Only when the feature appear more than 10 times or more will be participated in the training. >>> # Only when the feature appear more than 10 times or more will be participated in the training.
entry = paddle.distributed.CountFilterEntry(10) >>> entry = paddle.distributed.CountFilterEntry(10)
input = paddle.static.data(name='ins', shape=[1], dtype='int64') >>> input = paddle.static.data(name='ins', shape=[1], dtype='int64')
emb = paddle.static.nn.sparse_embedding( >>> emb = paddle.static.nn.sparse_embedding(
input=input, ... input=input,
size=[sparse_feature_dim, embedding_size], ... size=[sparse_feature_dim, embedding_size],
is_test=False, ... is_test=False,
entry=entry, ... entry=entry,
param_attr=paddle.ParamAttr(name="SparseFeatFactors", ... param_attr=paddle.ParamAttr(name="SparseFeatFactors",
initializer=paddle.nn.initializer.Uniform())) ... initializer=paddle.nn.initializer.Uniform()))
""" """
...@@ -4060,49 +4066,49 @@ class ExponentialMovingAverage: ...@@ -4060,49 +4066,49 @@ class ExponentialMovingAverage:
.. code-block:: python .. code-block:: python
import numpy >>> import numpy
import paddle >>> import paddle
import paddle.static as static >>> import paddle.static as static
from paddle.static import ExponentialMovingAverage >>> from paddle.static import ExponentialMovingAverage
paddle.enable_static() >>> paddle.enable_static()
data = static.data(name='x', shape=[-1, 5], dtype='float32') >>> data = static.data(name='x', shape=[-1, 5], dtype='float32')
hidden = static.nn.fc(x=data, size=10) >>> hidden = static.nn.fc(x=data, size=10)
cost = paddle.mean(hidden) >>> cost = paddle.mean(hidden)
test_program = static.default_main_program().clone(for_test=True) >>> test_program = static.default_main_program().clone(for_test=True)
optimizer = paddle.optimizer.Adam(learning_rate=0.001) >>> optimizer = paddle.optimizer.Adam(learning_rate=0.001)
optimizer.minimize(cost) >>> optimizer.minimize(cost)
ema = ExponentialMovingAverage(0.999) >>> ema = ExponentialMovingAverage(0.999)
ema.update() >>> ema.update()
place = paddle.CPUPlace() >>> place = paddle.CPUPlace()
exe = static.Executor(place) >>> exe = static.Executor(place)
exe.run(static.default_startup_program()) >>> exe.run(static.default_startup_program())
for pass_id in range(3): >>> for pass_id in range(3):
for batch_id in range(6): ... for batch_id in range(6):
data = numpy.random.random(size=(10, 5)).astype('float32') ... data = numpy.random.random(size=(10, 5)).astype('float32')
exe.run(program=static.default_main_program(), ... exe.run(program=static.default_main_program(),
feed={'x': data}, ... feed={'x': data},
fetch_list=[cost.name]) ... fetch_list=[cost.name])
# usage 1 ... # usage 1
with ema.apply(exe): ... with ema.apply(exe):
data = numpy.random.random(size=(10, 5)).astype('float32') ... data = numpy.random.random(size=(10, 5)).astype('float32')
exe.run(program=test_program, ... exe.run(program=test_program,
feed={'x': data}, ... feed={'x': data},
fetch_list=[hidden.name]) ... fetch_list=[hidden.name])
# usage 2 ... # usage 2
with ema.apply(exe, need_restore=False): ... with ema.apply(exe, need_restore=False):
data = numpy.random.random(size=(10, 5)).astype('float32') ... data = numpy.random.random(size=(10, 5)).astype('float32')
exe.run(program=test_program, ... exe.run(program=test_program,
feed={'x': data}, ... feed={'x': data},
fetch_list=[hidden.name]) ... fetch_list=[hidden.name])
ema.restore(exe) ... ema.restore(exe)
""" """
......
...@@ -62,29 +62,29 @@ def Assert(cond, data=None, summarize=20, name=None): ...@@ -62,29 +62,29 @@ def Assert(cond, data=None, summarize=20, name=None):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
from paddle.static.nn.control_flow import Assert >>> from paddle.static.nn.control_flow import Assert
paddle.enable_static() >>> paddle.enable_static()
x = paddle.full([2, 3], 2.0, 'float32') >>> x = paddle.full([2, 3], 2.0, 'float32')
condition = paddle.max(x) < 1.0 # False >>> condition = paddle.max(x) < 1.0 # False
Assert(condition, [x], 10, "example_assert_layer") >>> Assert(condition, [x], 10, "example_assert_layer")
exe = paddle.static.Executor() >>> exe = paddle.static.Executor()
try: >>> try:
exe.run(paddle.static.default_main_program()) ... exe.run(paddle.static.default_main_program())
# Print x and throws ValueError ... # Print x and throws ValueError
# Example printed message for x: ... # Example printed message for x:
# ... #
# Variable: fill_constant_0.tmp_0 ... # Variable: fill_constant_0.tmp_0
# - lod: {} ... # - lod: {}
# - place: CPUPlace() ... # - place: CPUPlace()
# - shape: [2, 3] ... # - shape: [2, 3]
# - layout: NCHW ... # - layout: NCHW
# - dtype: float ... # - dtype: float
# - data: [2 2 2 2 2 2] ... # - data: [2 2 2 2 2 2]
except ValueError as e: ... except ValueError as e:
print("Assert Exception Example") ... print("Assert Exception Example")
''' '''
check_variable_and_dtype( check_variable_and_dtype(
...@@ -165,17 +165,21 @@ class ConditionalBlock: ...@@ -165,17 +165,21 @@ class ConditionalBlock:
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
import paddle.fluid as fluid >>> from paddle.static.nn.control_flow import ConditionalBlock
cond = paddle.less_than(x=label, y=limit)
true_image, false_image = layers.split_lod_tensor( >>> label = paddle.rand([1])
input=image, mask=cond) >>> limit = paddle.ones([1]) * 0.5
true_cond = layers.ConditionalBlock([true_image]) >>> cond = paddle.less_than(x=label, y=limit)
>>> image = paddle.ones([1])
with true_cond.block():
... >>> true_image = image[cond]
with false_cond.block(): >>> true_cond = ConditionalBlock([true_image])
...
>>> with true_cond.block():
... pass
>>> with false_cond.block():
... pass
''' '''
def __init__(self, inputs, is_scalar_condition=False, name=None): def __init__(self, inputs, is_scalar_condition=False, name=None):
...@@ -425,59 +429,61 @@ class While: ...@@ -425,59 +429,61 @@ class While:
is_test(bool, optional): A flag indicating whether execution is in test phase. Default value is False. is_test(bool, optional): A flag indicating whether execution is in test phase. Default value is False.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` . name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Examples 1: Examples:
.. code-block:: python .. code-block:: python
:name: example-1
import paddle
import numpy as np
paddle.enable_static()
i = paddle.full(shape=[1], dtype='int64', fill_value=0) # loop counter
loop_len = paddle.full(shape=[1],dtype='int64', fill_value=10) # loop length
cond = paddle.less_than(x=i, y=loop_len)
while_op = paddle.static.nn.control_flow.While(cond=cond)
with while_op.block():
i = paddle.increment(x=i, value=1)
paddle.assign(paddle.less_than(x=i, y=loop_len), output=cond)
exe = paddle.static.Executor(paddle.CPUPlace())
exe.run(paddle.static.default_startup_program())
res = exe.run(paddle.static.default_main_program(), feed={}, fetch_list=[i]) >>> import paddle
print(res) # [array([10])] >>> import numpy as np
>>> paddle.enable_static()
Examples 2: >>> i = paddle.full(shape=[1], dtype='int64', fill_value=0) # loop counter
.. code-block:: python
import paddle >>> loop_len = paddle.full(shape=[1],dtype='int64', fill_value=10) # loop length
import numpy as np
paddle.enable_static() >>> cond = paddle.less_than(x=i, y=loop_len)
>>> while_op = paddle.static.nn.control_flow.While(cond=cond)
>>> with while_op.block():
... i = paddle.increment(x=i, value=1)
... paddle.assign(paddle.less_than(x=i, y=loop_len), output=cond)
i = paddle.full(shape=[1], dtype='int64', fill_value=0) >>> exe = paddle.static.Executor(paddle.CPUPlace())
loop_len = paddle.full(shape=[1], dtype='int64', fill_value=10) >>> exe.run(paddle.static.default_startup_program())
one = paddle.full(shape=[1], dtype='float32', fill_value=1)
data = paddle.static.data(name='data', shape=[1], dtype='float32')
sums = paddle.full(shape=[1], dtype='float32', fill_value=0) # Define the variable to be obtained ouside of While, which name should be different from the variable inside the While to be obtained
cond = paddle.less_than(x=i, y=loop_len) >>> res = exe.run(paddle.static.default_main_program(), feed={}, fetch_list=[i])
while_op = paddle.static.nn.control_flow.While(cond=cond) >>> print(res)
with while_op.block(): [array([10], dtype=int64)]
sums_tensor = paddle.add(x=data, y=data)
paddle.assign(sums_tensor, sums) # Update the value of sums_tensor defined in While to the sums which defined outside of While through layers.assign
i = paddle.increment(x=i, value=1)
data = paddle.add(x=data, y=one)
paddle.assign(paddle.less_than(x=i, y=loop_len), output=cond)
feed_data = np.ones(1).astype('float32') .. code-block:: python
exe = paddle.static.Executor(paddle.CPUPlace()) :name: example-2
exe.run(paddle.static.default_startup_program())
res = exe.run(paddle.static.default_main_program(), feed={'data': feed_data}, fetch_list=sums) >>> import paddle
print(res[0]) # [2.] # Because the data in While does not update the value outside the While, the value of sums is [2.] after the loop >>> import numpy as np
>>> paddle.enable_static()
>>> i = paddle.full(shape=[1], dtype='int64', fill_value=0)
>>> loop_len = paddle.full(shape=[1], dtype='int64', fill_value=10)
>>> one = paddle.full(shape=[1], dtype='float32', fill_value=1)
>>> data = paddle.static.data(name='data', shape=[1], dtype='float32')
>>> sums = paddle.full(shape=[1], dtype='float32', fill_value=0) # Define the variable to be obtained >>> ouside of While, which name should be different from the variable inside the While to be obtained
>>> cond = paddle.less_than(x=i, y=loop_len)
>>> while_op = paddle.static.nn.control_flow.While(cond=cond)
>>> with while_op.block():
... sums_tensor = paddle.add(x=data, y=data)
... paddle.assign(sums_tensor, sums) # Update the value of sums_tensor defined in While to the sums which defined outside of While through layers.assign
... i = paddle.increment(x=i, value=1)
... data = paddle.add(x=data, y=one)
... paddle.assign(paddle.less_than(x=i, y=loop_len), output=cond)
>>> feed_data = np.ones(1).astype('float32')
>>> exe = paddle.static.Executor(paddle.CPUPlace())
>>> exe.run(paddle.static.default_startup_program())
>>> res = exe.run(paddle.static.default_main_program(), feed={'data': feed_data}, fetch_list=sums)
>>> print(res[0]) # Because the data in While does not update the value outside the While, the value of sums is [2.] after the loop
[2.]
""" """
BEFORE_WHILE_BLOCK = 0 BEFORE_WHILE_BLOCK = 0
...@@ -619,26 +625,27 @@ def while_loop(cond, body, loop_vars, is_test=False, name=None): ...@@ -619,26 +625,27 @@ def while_loop(cond, body, loop_vars, is_test=False, name=None):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
paddle.enable_static() >>> paddle.enable_static()
def cond(i, ten): >>> def cond(i, ten):
return i < ten ... return i < ten
def body(i, ten): >>> def body(i, ten):
i = i + 1 ... i = i + 1
return [i, ten] ... return [i, ten]
main_program = paddle.static.default_main_program() >>> main_program = paddle.static.default_main_program()
startup_program = paddle.static.default_startup_program() >>> startup_program = paddle.static.default_startup_program()
with paddle.static.program_guard(main_program, startup_program): >>> with paddle.static.program_guard(main_program, startup_program):
i = paddle.full(shape=[1], fill_value=0, dtype='int64') # loop counter ... i = paddle.full(shape=[1], fill_value=0, dtype='int64') # loop counter
ten = paddle.full(shape=[1], fill_value=10, dtype='int64') # loop length ... ten = paddle.full(shape=[1], fill_value=10, dtype='int64') # loop length
i, ten = paddle.static.nn.while_loop(cond, body, [i, ten]) ... i, ten = paddle.static.nn.while_loop(cond, body, [i, ten])
exe = paddle.static.Executor(paddle.CPUPlace()) ... exe = paddle.static.Executor(paddle.CPUPlace())
res = exe.run(main_program, feed={}, fetch_list=[i]) ... res = exe.run(main_program, feed={}, fetch_list=[i])
print(res) # [array([10])] ... print(res)
[array([10], dtype=int64)]
""" """
helper = LayerHelper('while_loop', **locals()) helper = LayerHelper('while_loop', **locals())
...@@ -783,43 +790,42 @@ def case(pred_fn_pairs, default=None, name=None): ...@@ -783,43 +790,42 @@ def case(pred_fn_pairs, default=None, name=None):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
>>> paddle.enable_static()
paddle.enable_static() >>> def fn_1():
... return paddle.full(shape=[1, 2], dtype='float32', fill_value=1)
def fn_1(): >>> def fn_2():
return paddle.full(shape=[1, 2], dtype='float32', fill_value=1) ... return paddle.full(shape=[2, 2], dtype='int32', fill_value=2)
def fn_2(): >>> def fn_3():
return paddle.full(shape=[2, 2], dtype='int32', fill_value=2) ... return paddle.full(shape=[3], dtype='int32', fill_value=3)
def fn_3(): >>> main_program = paddle.static.default_startup_program()
return paddle.full(shape=[3], dtype='int32', fill_value=3) >>> startup_program = paddle.static.default_main_program()
main_program = paddle.static.default_startup_program() >>> with paddle.static.program_guard(main_program, startup_program):
startup_program = paddle.static.default_main_program() ... x = paddle.full(shape=[1], dtype='float32', fill_value=0.3)
... y = paddle.full(shape=[1], dtype='float32', fill_value=0.1)
... z = paddle.full(shape=[1], dtype='float32', fill_value=0.2)
with paddle.static.program_guard(main_program, startup_program): ... pred_1 = paddle.less_than(z, x) # true: 0.2 < 0.3
x = paddle.full(shape=[1], dtype='float32', fill_value=0.3) ... pred_2 = paddle.less_than(x, y) # false: 0.3 < 0.1
y = paddle.full(shape=[1], dtype='float32', fill_value=0.1) ... pred_3 = paddle.equal(x, y) # false: 0.3 == 0.1
z = paddle.full(shape=[1], dtype='float32', fill_value=0.2)
pred_1 = paddle.less_than(z, x) # true: 0.2 < 0.3 ... # Call fn_1 because pred_1 is True
pred_2 = paddle.less_than(x, y) # false: 0.3 < 0.1 ... out_1 = paddle.static.nn.case(
pred_3 = paddle.equal(x, y) # false: 0.3 == 0.1 ... pred_fn_pairs=[(pred_1, fn_1), (pred_2, fn_2)], default=fn_3)
# Call fn_1 because pred_1 is True ... # Argument default is None and no pred in pred_fn_pairs is True. fn_3 will be called.
out_1 = paddle.static.nn.case( ... # because fn_3 is the last callable in pred_fn_pairs.
pred_fn_pairs=[(pred_1, fn_1), (pred_2, fn_2)], default=fn_3) ... out_2 = paddle.static.nn.case(pred_fn_pairs=[(pred_2, fn_2), (pred_3, fn_3)])
# Argument default is None and no pred in pred_fn_pairs is True. fn_3 will be called. ... exe = paddle.static.Executor(paddle.CPUPlace())
# because fn_3 is the last callable in pred_fn_pairs. ... res_1, res_2 = exe.run(main_program, fetch_list=[out_1, out_2])
out_2 = paddle.static.nn.case(pred_fn_pairs=[(pred_2, fn_2), (pred_3, fn_3)]) ... print(res_1, res_2)
[[1. 1.]] [3 3 3]
exe = paddle.static.Executor(paddle.CPUPlace())
res_1, res_2 = exe.run(main_program, fetch_list=[out_1, out_2])
print(res_1) # [[1. 1.]]
print(res_2) # [3 3 3]
''' '''
helper = LayerHelper('case', **locals()) helper = LayerHelper('case', **locals())
...@@ -919,45 +925,59 @@ def switch_case(branch_index, branch_fns, default=None, name=None): ...@@ -919,45 +925,59 @@ def switch_case(branch_index, branch_fns, default=None, name=None):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
>>> paddle.enable_static()
paddle.enable_static()
>>> def fn_1():
def fn_1(): ... return paddle.full(shape=[1, 2], dtype='float32', fill_value=1)
return paddle.full(shape=[1, 2], dtype='float32', fill_value=1)
>>> def fn_2():
def fn_2(): ... return paddle.full(shape=[2, 2], dtype='int32', fill_value=2)
return paddle.full(shape=[2, 2], dtype='int32', fill_value=2)
>>> def fn_3():
def fn_3(): ... return paddle.full(shape=[3], dtype='int32', fill_value=3)
return paddle.full(shape=[3], dtype='int32', fill_value=3)
>>> startup_program = paddle.static.default_startup_program()
main_program = paddle.static.default_startup_program() >>> main_program = paddle.static.default_main_program()
startup_program = paddle.static.default_main_program() >>> with paddle.static.program_guard(main_program, startup_program):
with paddle.static.program_guard(main_program, startup_program): ... index_1 = paddle.full(shape=[1], dtype='int32', fill_value=1)
index_1 = paddle.full(shape=[1], dtype='int32', fill_value=1) ... index_2 = paddle.full(shape=[1], dtype='int32', fill_value=2)
index_2 = paddle.full(shape=[1], dtype='int32', fill_value=2) ...
... out_1 = paddle.static.nn.switch_case(
out_1 = paddle.static.nn.switch_case( ... branch_index=index_1,
branch_index=index_1, ... branch_fns={1: fn_1, 2: fn_2},
branch_fns={1: fn_1, 2: fn_2}, ... default=fn_3)
default=fn_3) ...
... out_2 = paddle.static.nn.switch_case(
out_2 = paddle.static.nn.switch_case( ... branch_index=index_2,
branch_index=index_2, ... branch_fns=[(1, fn_1), (2, fn_2)],
branch_fns=[(1, fn_1), (2, fn_2)], ... default=fn_3)
default=fn_3) ...
... # Argument default is None and no index matches. fn_3 will be called because of the max index 7.
# Argument default is None and no index matches. fn;,,_3 will be called because of the max index 7. ... out_3 = paddle.static.nn.switch_case(
out_3 = paddle.static.nn.switch_case( ... branch_index=index_2,
branch_index=index_2, ... branch_fns=[(0, fn_1), (4, fn_2), (7, fn_3)])
branch_fns=[(0, fn_1), (4, fn_2), (7, fn_3)]) ...
... exe = paddle.static.Executor(paddle.CPUPlace())
exe = paddle.static.Executor(paddle.CPUPlace()) ... res_1, res_2, res_3 = exe.run(main_program, fetch_list=[out_1, out_2, out_3])
res_1, res_2, res_3 = exe.run(main_program, fetch_list=[out_1, out_2, out_3]) ... # Variable: fill_constant_1.tmp_0
print(res_1) # [[1. 1.]] ... # - message: The content of input layer:
print(res_2) # [[2 2] [2 2]] ... # - lod: {}
print(res_3) # [3 3 3] ... # - place: Place(cpu)
... # - shape: [2, 3]
... # - layout: NCHW
... # - dtype: int64
... # - data: [3 3 3 3 3 3]
>>> print(res_1)
[[1. 1.]]
>>> print(res_2)
[[2 2]
[2 2]]
>>> print(res_3)
[3 3 3]
''' '''
helper = LayerHelper('switch_case', **locals()) helper = LayerHelper('switch_case', **locals())
...@@ -1093,12 +1113,12 @@ def cond(pred, true_fn=None, false_fn=None, name=None, return_names=None): ...@@ -1093,12 +1113,12 @@ def cond(pred, true_fn=None, false_fn=None, name=None, return_names=None):
.. code-block:: python .. code-block:: python
:name: code-example-1 :name: code-example-1
import paddle >>> import paddle
a = paddle.zeros((1, 1)) >>> a = paddle.zeros((1, 1))
b = paddle.zeros((1, 1)) >>> b = paddle.zeros((1, 1))
c = a * b >>> c = a * b
out = paddle.static.nn.cond(a < b, lambda: a + c, lambda: b * b) >>> out = paddle.static.nn.cond(a < b, lambda: a + c, lambda: b * b)
No matter whether ``a < b`` , ``c = a * b`` will be in net building and No matter whether ``a < b`` , ``c = a * b`` will be in net building and
run. ``a + c`` and ``b * b`` will be in net building, but only one run. ``a + c`` and ``b * b`` will be in net building, but only one
...@@ -1128,39 +1148,46 @@ def cond(pred, true_fn=None, false_fn=None, name=None, return_names=None): ...@@ -1128,39 +1148,46 @@ def cond(pred, true_fn=None, false_fn=None, name=None, return_names=None):
.. code-block:: python .. code-block:: python
:name: code-example-2 :name: code-example-2
import paddle >>> import paddle
# >>> # pseudocode:
# pseudocode: >>> # if 0.1 < 0.23:
# if 0.1 < 0.23: >>> # return 1, True
# return 1, True >>> # else:
# else: >>> # return 3, 2
# return 3, 2
# >>> def true_func():
... return paddle.full(shape=[1, 2],
def true_func(): ... dtype='int32',
return paddle.full(shape=[1, 2], dtype='int32', ... fill_value=1
fill_value=1), paddle.full(shape=[2, 3], ... ), paddle.full(shape=[2, 3],
dtype='bool', ... dtype='bool',
fill_value=True) ... fill_value=True
... )
def false_func():
return paddle.full(shape=[3, 4], dtype='float32', >>> def false_func():
fill_value=3), paddle.full(shape=[4, 5], ... return paddle.full(shape=[3, 4],
dtype='int64', ... dtype='float32',
fill_value=2) ... fill_value=3
... ), paddle.full(shape=[4, 5],
... dtype='int64',
x = paddle.full(shape=[1], dtype='float32', fill_value=0.1) ... fill_value=2
y = paddle.full(shape=[1], dtype='float32', fill_value=0.23) ... )
pred = paddle.less_than(x=x, y=y, name=None)
ret = paddle.static.nn.cond(pred, true_func, false_func)
# ret is a tuple containing 2 tensors >>> x = paddle.full(shape=[1], dtype='float32', fill_value=0.1)
# ret[0] = [[1 1]] >>> y = paddle.full(shape=[1], dtype='float32', fill_value=0.23)
# ret[1] = [[ True True True] >>> pred = paddle.less_than(x=x, y=y, name=None)
# [ True True True]] >>> a, b = paddle.static.nn.cond(pred, true_func, false_func)
>>> print(a)
Tensor(shape=[1, 2], dtype=int32, place=Place(cpu), stop_gradient=True,
[[1, 1]])
>>> print(b)
Tensor(shape=[2, 3], dtype=bool, place=Place(cpu), stop_gradient=True,
[[True, True, True],
[True, True, True]])
""" """
if in_dygraph_mode(): if in_dygraph_mode():
assert isinstance(pred, Variable), "The pred in cond must be Variable" assert isinstance(pred, Variable), "The pred in cond must be Variable"
...@@ -1665,24 +1692,29 @@ def Print( ...@@ -1665,24 +1692,29 @@ def Print(
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
paddle.enable_static() >>> paddle.enable_static()
x = paddle.full(shape=[2, 3], fill_value=3, dtype='int64') >>> x = paddle.full(shape=[2, 3], fill_value=3, dtype='int64')
out = paddle.static.Print(x, message="The content of input layer:") >>> out = paddle.static.Print(x, message="The content of input layer:")
main_program = paddle.static.default_main_program() >>> main_program = paddle.static.default_main_program()
exe = paddle.static.Executor(place=paddle.CPUPlace()) >>> exe = paddle.static.Executor(place=paddle.CPUPlace())
res = exe.run(main_program, fetch_list=[out]) >>> res = exe.run(main_program, fetch_list=[out])
# Variable: fill_constant_1.tmp_0 >>> # doctest: +SKIP('Unable to get output')
# - message: The content of input layer: Variable: fill_constant_1.tmp_0
# - lod: {} - message: The content of input layer:
# - place: CPUPlace - lod: {}
# - shape: [2, 3] - place: Place(cpu)
# - layout: NCHW - shape: [2, 3]
# - dtype: long - layout: NCHW
# - data: [3 3 3 3 3 3] - dtype: int64
- data: [3 3 3 3 3 3]
>>> # doctest: -SKIP
>>> res
[array([[3, 3, 3],
[3, 3, 3]], dtype=int64)]
''' '''
check_variable_and_dtype( check_variable_and_dtype(
input, input,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册