未验证 提交 3e36271e 编写于 作者: R Ryan 提交者: GitHub

[xdoctest][task 125-137] reformat example code with google style in static/* (#56677)

* add docx

* fix conversation

* fix

* fix code style

* fix code style

* Thank SigureMo

* finished?

* add ... and fix line

* add skip doctest
上级 bb078c1d
......@@ -152,34 +152,32 @@ def fc(
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
# When input is a single tensor
x = paddle.static.data(name="x", shape=[1, 2, 2], dtype="float32")
# x: [[[0.1 0.2]
# [0.3 0.4]]]
out = paddle.static.nn.fc(
x=x,
size=1,
num_flatten_dims=2,
weight_attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(value=0.5)),
bias_attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(value=1.0)))
# out: [[[1.15]
# [1.35]]]
# When input is multiple tensors
x0 = paddle.static.data(name="x0", shape=[1, 2, 2], dtype="float32")
# x0: [[[0.1 0.2]
# [0.3 0.4]]]
x1 = paddle.static.data(name="x1", shape=[1, 1, 3], dtype="float32")
# x1: [[[0.1 0.2 0.3]]]
out = paddle.static.nn.fc(
x=[x0, x1],
size=2,
weight_attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(value=0.5)),
bias_attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(value=1.0)))
# out: [[1.8 1.8]]
>>> import paddle
>>> paddle.enable_static()
>>> # When input is a single tensor
>>> x = paddle.static.data(name="x", shape=[1, 2, 2], dtype="float32")
>>> out = paddle.static.nn.fc(
... x=x,
... size=1,
... num_flatten_dims=2,
... weight_attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(value=0.5)),
... bias_attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(value=1.0)))
>>> print(out)
var fc_0.tmp_1 : LOD_TENSOR.shape(1, 2, 1).dtype(float32).stop_gradient(False)
>>> # When input is multiple tensors
>>> x0 = paddle.static.data(name="x0", shape=[1, 2, 2], dtype="float32")
>>> x1 = paddle.static.data(name="x1", shape=[1, 1, 3], dtype="float32")
>>> out = paddle.static.nn.fc(
... x=[x0, x1],
... size=2,
... weight_attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(value=0.5)),
... bias_attr=paddle.ParamAttr(initializer=paddle.nn.initializer.Constant(value=1.0)))
>>> print(out)
var fc_1.tmp_3 : LOD_TENSOR.shape(1, 2).dtype(float32).stop_gradient(False)
"""
def fc_fluid(
......@@ -308,11 +306,11 @@ def instance_norm(
.. code-block:: python
import paddle
paddle.enable_static()
x = paddle.static.data(name='x', shape=[3, 7, 3, 7], dtype='float32')
hidden1 = paddle.static.nn.fc(x, size=200)
hidden2 = paddle.static.nn.instance_norm(hidden1)
>>> import paddle
>>> paddle.enable_static()
>>> x = paddle.static.data(name='x', shape=[3, 7, 3, 7], dtype='float32')
>>> hidden1 = paddle.static.nn.fc(x, size=200)
>>> hidden2 = paddle.static.nn.instance_norm(hidden1)
"""
check_variable_and_dtype(
input,
......@@ -412,18 +410,18 @@ def continuous_value_model(input, cvm, use_cvm=True):
A Tensor with same type as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
input = paddle.static.data(name="input", shape=[64, 1], dtype="int64")
label = paddle.static.data(name="label", shape=[64, 1], dtype="int64")
w0 = paddle.full(shape=(100, 1), fill_value=2).astype(paddle.float32)
embed = paddle.nn.functional.embedding(
input,
w0)
ones = paddle.full_like(label, 1, dtype="int64")
show_clk = paddle.cast(paddle.concat([ones, label], axis=1), dtype='float32')
show_clk.stop_gradient = True
input_with_cvm = paddle.static.nn.continuous_value_model(embed, show_clk, True)
>>> import paddle
>>> paddle.enable_static()
>>> input = paddle.static.data(name="input", shape=[64, 1], dtype="int64")
>>> label = paddle.static.data(name="label", shape=[64, 1], dtype="int64")
>>> w0 = paddle.full(shape=(100, 1), fill_value=2).astype(paddle.float32)
>>> embed = paddle.nn.functional.embedding(input, w0)
>>> ones = paddle.full_like(label, 1, dtype="int64")
>>> show_clk = paddle.cast(paddle.concat([ones, label], axis=1), dtype='float32')
>>> show_clk.stop_gradient = True
>>> input_with_cvm = paddle.static.nn.continuous_value_model(embed[:, 0], show_clk, True)
"""
helper = LayerHelper('cvm', **locals())
out = helper.create_variable(dtype=input.dtype)
......@@ -515,11 +513,11 @@ def data_norm(
.. code-block:: python
import paddle
paddle.enable_static()
>>> import paddle
>>> paddle.enable_static()
x = paddle.randn(shape=[32,100])
hidden2 = paddle.static.nn.data_norm(input=x)
>>> x = paddle.randn(shape=[32, 100])
>>> hidden2 = paddle.static.nn.data_norm(input=x)
"""
helper = LayerHelper('data_norm', **locals())
dtype = helper.input_dtype()
......@@ -694,12 +692,13 @@ def group_norm(
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
>>> import paddle
>>> paddle.enable_static()
data = paddle.static.data(name='data', shape=[2, 8, 32, 32], dtype='float32')
x = paddle.static.nn.group_norm(input=data, groups=4)
print(x.shape) # [2, 8, 32, 32]
>>> data = paddle.static.data(name='data', shape=[2, 8, 32, 32], dtype='float32')
>>> x = paddle.static.nn.group_norm(input=data, groups=4)
>>> print(x.shape)
(2, 8, 32, 32)
"""
helper = LayerHelper('group_norm', **locals())
dtype = helper.input_dtype()
......@@ -888,12 +887,13 @@ def conv2d(
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
>>> import paddle
>>> paddle.enable_static()
data = paddle.static.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
conv2d = paddle.static.nn.conv2d(input=data, num_filters=2, filter_size=3, act="relu")
print(conv2d.shape) # [-1, 2, 30, 30]
>>> data = paddle.static.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
>>> conv2d = paddle.static.nn.conv2d(input=data, num_filters=2, filter_size=3, act="relu")
>>> print(conv2d.shape)
(-1, 2, 30, 30)
"""
check_variable_and_dtype(
......@@ -1193,19 +1193,22 @@ def conv3d(
Examples:
.. code-block:: python
import paddle
import numpy as np
paddle.enable_static()
data = paddle.static.data(name='data', shape=[None, 3, 12, 32, 32], dtype='float32')
param_attr = paddle.framework.ParamAttr(name='conv3d.weight', initializer=paddle.nn.initializer.XavierNormal(), learning_rate=0.001)
res = paddle.static.nn.conv3d(input=data, num_filters=2, filter_size=3, act="relu", param_attr=param_attr)
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(paddle.static.default_startup_program())
x = np.random.rand(1, 3, 12, 32, 32).astype("float32")
output = exe.run(feed={"data": x}, fetch_list=[res])
print(output)
>>> import paddle
>>> import numpy as np
>>> np.random.seed(1107)
>>> paddle.seed(1107)
>>> paddle.enable_static()
>>> data = paddle.static.data(name='data', shape=[None, 3, 12, 32, 32], dtype='float32')
>>> param_attr = paddle.framework.ParamAttr(name='conv3d.weight', initializer=paddle.nn.initializer.XavierNormal(), learning_rate=0.001)
>>> res = paddle.static.nn.conv3d(input=data, num_filters=2, filter_size=3, act="relu", param_attr=param_attr)
>>> place = paddle.CPUPlace()
>>> exe = paddle.static.Executor(place)
>>> exe.run(paddle.static.default_startup_program())
>>> x = np.random.rand(1, 3, 12, 32, 32).astype("float32")
>>> output, = exe.run(feed={"data": x}, fetch_list=[res])
>>> print(output.shape)
(1, 2, 10, 30, 30)
"""
l_type = 'conv3d'
......@@ -1536,14 +1539,15 @@ def conv2d_transpose(
ShapeError: If the size of `output_size` is not equal to that of `stride`.
Examples:
.. code-block:: python
.. code-block:: python
import paddle
paddle.enable_static()
>>> import paddle
>>> paddle.enable_static()
data = paddle.static.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
conv2d_transpose = paddle.static.nn.conv2d_transpose(input=data, num_filters=2, filter_size=3)
print(conv2d_transpose.shape) # [-1, 2, 34, 34]
>>> data = paddle.static.data(name='data', shape=[None, 3, 32, 32], dtype='float32')
>>> conv2d_transpose = paddle.static.nn.conv2d_transpose(input=data, num_filters=2, filter_size=3)
>>> print(conv2d_transpose.shape)
(-1, 2, 34, 34)
"""
assert (
param_attr is not False
......@@ -1907,21 +1911,24 @@ def conv3d_transpose(
variable storing transposed convolution and non-linearity activation result.
Examples:
.. code-block:: python
.. code-block:: python
import paddle
import numpy as np
paddle.enable_static()
data = paddle.static.data(name='data', shape=[None, 3, 12, 32, 32], dtype='float32')
param_attr = paddle.framework.ParamAttr(name='conv3d.weight', initializer=paddle.nn.initializer.XavierNormal(), learning_rate=0.001)
res = paddle.static.nn.conv3d_transpose(input=data, num_filters=2, filter_size=3, act="relu", param_attr=param_attr)
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(paddle.static.default_startup_program())
x = np.random.rand(1, 3, 12, 32, 32).astype("float32")
output = exe.run(feed={"data": x}, fetch_list=[res])
print(output)
>>> import paddle
>>> import numpy as np
>>> paddle.seed(1107)
>>> np.random.seed(1107)
>>> paddle.enable_static()
>>> data = paddle.static.data(name='data', shape=[None, 3, 12, 32, 32], dtype='float32')
>>> param_attr = paddle.framework.ParamAttr(name='conv3d.weight', initializer=paddle.nn.initializer.XavierNormal(), learning_rate=0.001)
>>> res = paddle.static.nn.conv3d_transpose(input=data, num_filters=2, filter_size=3, act="relu", param_attr=param_attr)
>>> place = paddle.CPUPlace()
>>> exe = paddle.static.Executor(place)
>>> exe.run(paddle.static.default_startup_program())
>>> x = np.random.rand(1, 3, 12, 32, 32).astype("float32")
>>> output = exe.run(feed={"data": x}, fetch_list=[res.mean()])
>>> print(output)
[array(0.5148856, dtype=float32)]
"""
assert (
param_attr is not False
......@@ -2226,28 +2233,26 @@ def deformable_conv(
Examples:
.. code-block:: python
#deformable conv v2:
import paddle
paddle.enable_static()
C_in, H_in, W_in = 3, 32, 32
filter_size, deformable_groups = 3, 1
data = paddle.static.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32')
offset = paddle.static.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
mask = paddle.static.data(name='mask', shape=[None, deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
out = paddle.static.layers.common.deformable_conv(input=data, offset=offset, mask=mask,
num_filters=2, filter_size=filter_size, padding=1, modulated=True)
#deformable conv v1:
import paddle
C_in, H_in, W_in = 3, 32, 32
filter_size, deformable_groups = 3, 1
data = paddle.static.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32')
offset = paddle.static.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
out = paddle.static.layers.common.deformable_conv(input=data, offset=offset, mask=None,
num_filters=2, filter_size=filter_size, padding=1, modulated=False)
>>> # deformable conv v2:
>>> import paddle
>>> paddle.enable_static()
>>> C_in, H_in, W_in = 3, 32, 32
>>> filter_size, deformable_groups = 3, 1
>>> data = paddle.static.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32')
>>> offset = paddle.static.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
>>> mask = paddle.static.data(name='mask', shape=[None, deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
>>> out = paddle.static.nn.common.deformable_conv(input=data, offset=offset, mask=mask,
... num_filters=2, filter_size=filter_size, padding=1, modulated=True)
>>> # deformable conv v1:
>>> import paddle
>>> C_in, H_in, W_in = 3, 32, 32
>>> filter_size, deformable_groups = 3, 1
>>> data = paddle.static.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32')
>>> offset = paddle.static.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
>>> out = paddle.static.nn.common.deformable_conv(input=data, offset=offset, mask=None,
... num_filters=2, filter_size=filter_size, padding=1, modulated=False)
"""
check_variable_and_dtype(
......@@ -2471,30 +2476,28 @@ def deform_conv2d(
Examples:
.. code-block:: python
#deformable conv v2:
import paddle
paddle.enable_static()
C_in, H_in, W_in = 3, 32, 32
filter_size, deformable_groups = 3, 1
data = paddle.static.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32')
offset = paddle.static.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
mask = paddle.static.data(name='mask', shape=[None, deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
out = paddle.static.nn.deform_conv2d(x=data, offset=offset, mask=mask,
num_filters=2, filter_size=filter_size, padding=1)
#deformable conv v1:
import paddle
paddle.enable_static()
C_in, H_in, W_in = 3, 32, 32
filter_size, deformable_groups = 3, 1
data = paddle.static.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32')
offset = paddle.static.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
out = paddle.static.nn.deform_conv2d(x=data, offset=offset, mask=None,
num_filters=2, filter_size=filter_size, padding=1)
>>> # deformable conv v2:
>>> import paddle
>>> paddle.enable_static()
>>> C_in, H_in, W_in = 3, 32, 32
>>> filter_size, deformable_groups = 3, 1
>>> data = paddle.static.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32')
>>> offset = paddle.static.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
>>> mask = paddle.static.data(name='mask', shape=[None, deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
>>> out = paddle.static.nn.deform_conv2d(x=data, offset=offset, mask=mask,
... num_filters=2, filter_size=filter_size, padding=1)
>>> # deformable conv v1:
>>> import paddle
>>> paddle.enable_static()
>>> C_in, H_in, W_in = 3, 32, 32
>>> filter_size, deformable_groups = 3, 1
>>> data = paddle.static.data(name='data', shape=[None, C_in, H_in, W_in], dtype='float32')
>>> offset = paddle.static.data(name='offset', shape=[None, 2*deformable_groups*filter_size**2, H_in, W_in], dtype='float32')
>>> out = paddle.static.nn.deform_conv2d(x=data, offset=offset, mask=None,
... num_filters=2, filter_size=filter_size, padding=1)
"""
if mask is None:
......@@ -2574,12 +2577,12 @@ def bilinear_tensor_product(
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
>>> import paddle
>>> paddle.enable_static()
x = paddle.static.data("t1", shape=[-1, 5], dtype="float32")
y = paddle.static.data("t2", shape=[-1, 4], dtype="float32")
tensor = paddle.static.nn.bilinear_tensor_product(x, y, size=1000)
>>> x = paddle.static.data("t1", shape=[-1, 5], dtype="float32")
>>> y = paddle.static.data("t2", shape=[-1, 4], dtype="float32")
>>> tensor = paddle.static.nn.bilinear_tensor_product(x, y, size=1000)
"""
helper = LayerHelper('bilinear_tensor_product', **locals())
......@@ -2731,16 +2734,16 @@ def batch_norm(
.. code-block:: python
import paddle
>>> import paddle
paddle.enable_static()
x = paddle.static.data(name='x', shape=[3, 7, 3, 7], dtype='float32')
hidden1 = paddle.static.nn.fc(x=x, size=200)
print(hidden1.shape)
# [3, 200]
hidden2 = paddle.static.nn.batch_norm(input=hidden1)
print(hidden2.shape)
# [3, 200]
>>> paddle.enable_static()
>>> x = paddle.static.data(name='x', shape=[3, 7, 3, 7], dtype='float32')
>>> hidden1 = paddle.static.nn.fc(x=x, size=200)
>>> print(hidden1.shape)
(3, 200)
>>> hidden2 = paddle.static.nn.batch_norm(input=hidden1)
>>> print(hidden2.shape)
(3, 200)
"""
assert (
bias_attr is not False
......@@ -2978,13 +2981,13 @@ def prelu(x, mode, param_attr=None, data_format="NCHW", name=None):
.. code-block:: python
import paddle
paddle.enable_static()
>>> import paddle
>>> paddle.enable_static()
x = paddle.static.data(name="x", shape=[None,5,10,10], dtype="float32")
mode = 'channel'
output = paddle.static.nn.prelu(
x,mode,param_attr=paddle.ParamAttr(name='alpha'))
>>> x = paddle.static.data(name="x", shape=[None, 5, 10, 10], dtype="float32")
>>> mode = 'channel'
>>> output = paddle.static.nn.prelu(
... x,mode,param_attr=paddle.ParamAttr(name='alpha'))
"""
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'prelu')
......@@ -3170,103 +3173,111 @@ def py_func(func, x, out, backward_func=None, skip_vars_in_backward_input=None):
Tensor|tuple(Tensor)|list[Tensor], The output ``out`` of the forward function ``func``.
Examples:
.. code-block:: python
:name: code-example1
# example 1:
import paddle
import numpy as np
paddle.enable_static()
# Creates a forward function, Tensor can be input directly without
# being converted into numpy array.
def tanh(x):
return np.tanh(x)
# Skip x in backward function and return the gradient of x
# Tensor must be actively converted to numpy array, otherwise,
# operations such as +/- can't be used.
def tanh_grad(y, dy):
return np.array(dy) * (1 - np.square(np.array(y)))
# Creates a forward function for debugging running networks(print value)
def debug_func(x):
print(x)
def create_tmp_var(name, dtype, shape):
return paddle.static.default_main_program().current_block().create_var(
name=name, dtype=dtype, shape=shape)
def simple_net(img, label):
hidden = img
for idx in range(4):
hidden = paddle.static.nn.fc(hidden, size=200)
new_hidden = create_tmp_var(name='hidden_{}'.format(idx),
dtype=hidden.dtype, shape=hidden.shape)
# User-defined forward and backward
hidden = paddle.static.py_func(func=tanh, x=hidden,
out=new_hidden, backward_func=tanh_grad,
skip_vars_in_backward_input=hidden)
# User-defined debug functions that print out the input Tensor
paddle.static.py_func(func=debug_func, x=hidden, out=None)
prediction = paddle.static.nn.fc(hidden, size=10, activation='softmax')
ce_loss = paddle.nn.loss.CrossEntropyLoss()
return ce_loss(prediction, label)
x = paddle.static.data(name='x', shape=[1,4], dtype='float32')
y = paddle.static.data(name='y', shape=[1], dtype='int64')
res = simple_net(x, y)
exe = paddle.static.Executor(paddle.CPUPlace())
exe.run(paddle.static.default_startup_program())
input1 = np.random.random(size=[1,4]).astype('float32')
input2 = np.random.randint(1, 10, size=[1], dtype='int64')
out = exe.run(paddle.static.default_main_program(),
feed={'x':input1, 'y':input2},
fetch_list=[res.name])
print(out)
>>> import paddle
>>> import numpy as np
>>> np.random.seed(1107)
>>> paddle.seed(1107)
>>> paddle.enable_static()
>>> # Creates a forward function, Tensor can be input directly without
>>> # being converted into numpy array.
>>> def tanh(x):
... return np.tanh(x)
>>> # Skip x in backward function and return the gradient of x
>>> # Tensor must be actively converted to numpy array, otherwise,
>>> # operations such as +/- can't be used.
>>> def tanh_grad(y, dy):
... return np.array(dy) * (1 - np.square(np.array(y)))
>>> # Creates a forward function for debugging running networks(print value)
>>> def debug_func(x):
... # print(x)
... pass
>>> def create_tmp_var(name, dtype, shape):
... return paddle.static.default_main_program().current_block().create_var(
... name=name, dtype=dtype, shape=shape)
>>> def simple_net(img, label):
... hidden = img
... for idx in range(4):
... hidden = paddle.static.nn.fc(hidden, size=200)
... new_hidden = create_tmp_var(name='hidden_{}'.format(idx),
... dtype=hidden.dtype, shape=hidden.shape)
... # User-defined forward and backward
... hidden = paddle.static.py_func(func=tanh, x=hidden,
... out=new_hidden, backward_func=tanh_grad,
... skip_vars_in_backward_input=hidden)
... # User-defined debug functions that print out the input Tensor
... paddle.static.py_func(func=debug_func, x=hidden, out=None)
... prediction = paddle.static.nn.fc(hidden, size=10, activation='softmax')
... ce_loss = paddle.nn.loss.CrossEntropyLoss()
... return ce_loss(prediction, label)
>>> x = paddle.static.data(name='x', shape=[1,4], dtype='float32')
>>> y = paddle.static.data(name='y', shape=[1], dtype='int64')
>>> res = simple_net(x, y)
>>> exe = paddle.static.Executor(paddle.CPUPlace())
>>> exe.run(paddle.static.default_startup_program())
>>> input1 = np.random.random(size=[1,4]).astype('float32')
>>> input2 = np.random.randint(1, 10, size=[1], dtype='int64')
>>> out = exe.run(paddle.static.default_main_program(),
... feed={'x':input1, 'y':input2},
... fetch_list=[res.name])
>>> print(out[0].shape)
()
.. code-block:: python
:name: code-example2
# example 2:
# This example shows how to turn Tensor into numpy array and
# use numpy API to register an Python OP
import paddle
import numpy as np
paddle.enable_static()
def element_wise_add(x, y):
# Tensor must be actively converted to numpy array, otherwise,
# numpy.shape can't be used.
x = np.array(x)
y = np.array(y)
if x.shape != y.shape:
raise AssertionError("the shape of inputs must be the same!")
result = np.zeros(x.shape, dtype='int32')
for i in range(len(x)):
for j in range(len(x[0])):
result[i][j] = x[i][j] + y[i][j]
return result
def create_tmp_var(name, dtype, shape):
return paddle.static.default_main_program().current_block().create_var(
name=name, dtype=dtype, shape=shape)
def py_func_demo():
start_program = paddle.static.default_startup_program()
main_program = paddle.static.default_main_program()
# Input of the forward function
x = paddle.static.data(name='x', shape=[2,3], dtype='int32')
y = paddle.static.data(name='y', shape=[2,3], dtype='int32')
# Output of the forward function, name/dtype/shape must be specified
output = create_tmp_var('output','int32', [3,1])
# Multiple Tensor should be passed in the form of tuple(Tensor) or list[Tensor]
paddle.static.py_func(func=element_wise_add, x=[x,y], out=output)
exe=paddle.static.Executor(paddle.CPUPlace())
exe.run(start_program)
# Feed numpy array to main_program
input1 = np.random.randint(1, 10, size=[2,3], dtype='int32')
input2 = np.random.randint(1, 10, size=[2,3], dtype='int32')
out = exe.run(main_program,
feed={'x':input1, 'y':input2},
fetch_list=[output.name])
print("{0} + {1} = {2}".format(input1, input2, out))
py_func_demo()
# Reference output:
# [[5, 9, 9] + [[7, 8, 4] = [array([[12, 17, 13]
# [7, 5, 2]] [1, 3, 3]] [8, 8, 5]], dtype=int32)]
>>> # This example shows how to turn Tensor into numpy array and
>>> # use numpy API to register an Python OP
>>> import paddle
>>> import numpy as np
>>> np.random.seed(1107)
>>> paddle.seed(1107)
>>> paddle.enable_static()
>>> def element_wise_add(x, y):
... # Tensor must be actively converted to numpy array, otherwise,
... # numpy.shape can't be used.
... x = np.array(x)
... y = np.array(y)
... if x.shape != y.shape:
... raise AssertionError("the shape of inputs must be the same!")
... result = np.zeros(x.shape, dtype='int32')
... for i in range(len(x)):
... for j in range(len(x[0])):
... result[i][j] = x[i][j] + y[i][j]
... return result
>>> def create_tmp_var(name, dtype, shape):
... return paddle.static.default_main_program().current_block().create_var(
... name=name, dtype=dtype, shape=shape)
>>> def py_func_demo():
... start_program = paddle.static.default_startup_program()
... main_program = paddle.static.default_main_program()
... # Input of the forward function
... x = paddle.static.data(name='x', shape=[2, 3], dtype='int32')
... y = paddle.static.data(name='y', shape=[2, 3], dtype='int32')
... # Output of the forward function, name/dtype/shape must be specified
... output = create_tmp_var('output','int32', [3, 1])
... # Multiple Tensor should be passed in the form of tuple(Tensor) or list[Tensor]
... paddle.static.py_func(func=element_wise_add, x=[x, y], out=output)
... exe=paddle.static.Executor(paddle.CPUPlace())
... exe.run(start_program)
... # Feed numpy array to main_program
... input1 = np.random.randint(1, 10, size=[2, 3], dtype='int32')
... input2 = np.random.randint(1, 10, size=[2, 3], dtype='int32')
... out = exe.run(main_program,
... feed={'x':input1, 'y':input2},
... fetch_list=[output.name])
... print("{0} + {1} = {2}".format(input1, input2, out))
>>> py_func_demo()
>>> # [[1 5 4] + [[3 7 7] = [array([[ 4, 12, 11]
>>> # [9 4 8]] [2 3 9]] [11, 7, 17]], dtype=int32)]
"""
helper = LayerHelper('py_func', **locals())
check_type(x, 'X', (list, tuple, Variable, type(None)), 'py_func')
......@@ -3352,17 +3363,18 @@ def row_conv(input, future_context_size, param_attr=None, act=None):
Examples:
.. code-block:: python
# for LodTensor inputs
import paddle
paddle.enable_static()
x = paddle.static.data(name='x', shape=[9, 16],
dtype='float32', lod_level=1)
out_x = paddle.static.nn.row_conv(input=x, future_context_size=2)
# for Tensor inputs
y = paddle.static.data(name='y', shape=[9, 4, 16], dtype='float32')
out_y = paddle.static.nn.row_conv(input=y, future_context_size=2)
.. code-block:: python
>>> # for LodTensor inputs
>>> import paddle
>>> paddle.enable_static()
>>> x = paddle.static.data(name='x', shape=[9, 16],
... dtype='float32', lod_level=1)
>>> out_x = paddle.static.nn.row_conv(input=x, future_context_size=2)
>>> # for Tensor inputs
>>> y = paddle.static.data(name='y', shape=[9, 4, 16], dtype='float32')
>>> out_y = paddle.static.nn.row_conv(input=y, future_context_size=2)
"""
helper = LayerHelper('row_conv', **locals())
check_variable_and_dtype(input, 'input', ['float32'], 'row_conv')
......@@ -3443,12 +3455,13 @@ def spectral_norm(weight, dim=0, power_iters=1, eps=1e-12, name=None):
Examples:
.. code-block:: python
import paddle
>>> import paddle
paddle.enable_static()
weight = paddle.static.data(name='weight', shape=[2, 8, 32, 32], dtype='float32')
x = paddle.static.nn.spectral_norm(weight=weight, dim=1, power_iters=2)
print(x.shape) # [2, 8, 32, 32]
>>> paddle.enable_static()
>>> weight = paddle.static.data(name='weight', shape=[2, 8, 32, 32], dtype='float32')
>>> x = paddle.static.nn.spectral_norm(weight=weight, dim=1, power_iters=2)
>>> print(x.shape)
(2, 8, 32, 32)
"""
helper = LayerHelper('spectral_norm', **locals())
check_variable_and_dtype(
......@@ -3584,11 +3597,12 @@ def layer_norm(
.. code-block:: python
import paddle
paddle.enable_static()
x = paddle.static.data(name='x', shape=[8, 32, 32], dtype='float32')
output = paddle.static.nn.layer_norm(input=x, begin_norm_axis=1)
print(output.shape) # [8, 32, 32]
>>> import paddle
>>> paddle.enable_static()
>>> x = paddle.static.data(name='x', shape=[8, 32, 32], dtype='float32')
>>> output = paddle.static.nn.layer_norm(input=x, begin_norm_axis=1)
>>> print(output.shape)
(8, 32, 32)
"""
assert (
in_dygraph_mode() is not True
......@@ -3744,37 +3758,29 @@ def embedding(
Static Examples:
.. code-block:: python
import paddle
import numpy as np
paddle.enable_static()
x = paddle.static.data(name="x", shape = [2, 4], dtype=np.int64)
output = paddle.static.nn.embedding(x, (10, 3),
param_attr=paddle.nn.initializer.Constant(value=1.0))
m_output=paddle.mean(output)
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(paddle.static.default_startup_program())
x = np.array([[7, 2, 4, 5],[4, 3, 2, 9]], dtype=np.int64)
# x is a Numpy.
# x.data = [[7, 2, 4, 5], [4, 3, 2, 9]]
# x.shape = [2, 4]
out, = exe.run(paddle.static.default_main_program(), feed={'x':x}, fetch_list=[output])
# out is a Numpy.
# out.data = [[1., 1., 1.],
# [1., 1., 1.],
# [1., 1., 1.],
# [1., 1., 1.]],
#
# [[1., 1., 1.],
# [1., 1., 1.],
# [1., 1., 1.],
# [0., 0., 0.]]]
# out.shape = [2, 4, 3]
>>> import paddle
>>> import numpy as np
>>> paddle.enable_static()
>>> x = paddle.static.data(name="x", shape = [2, 4], dtype=np.int64)
>>> output = paddle.static.nn.embedding(x, (10, 3),
... param_attr=paddle.nn.initializer.Constant(value=1.0))
>>> m_output=paddle.mean(output)
>>> place = paddle.CPUPlace()
>>> exe = paddle.static.Executor(place)
>>> exe.run(paddle.static.default_startup_program())
>>> x = np.array([[7, 2, 4, 5],[4, 3, 2, 9]], dtype=np.int64)
>>> out, = exe.run(paddle.static.default_main_program(), feed={'x':x}, fetch_list=[output])
>>> print(out)
[[[1. 1. 1.]
[1. 1. 1.]
[1. 1. 1.]
[1. 1. 1.]]
[[1. 1. 1.]
[1. 1. 1.]
[1. 1. 1.]
[1. 1. 1.]]]
"""
helper = LayerHelper('embedding', **locals())
......@@ -3912,24 +3918,24 @@ def sparse_embedding(
Examples:
.. code-block:: python
import paddle
>>> import paddle
paddle.enable_static()
sparse_feature_dim = 1024
embedding_size = 64
>>> paddle.enable_static()
>>> sparse_feature_dim = 1024
>>> embedding_size = 64
# Only when the feature appear more than 10 times or more will be participated in the training.
entry = paddle.distributed.CountFilterEntry(10)
>>> # Only when the feature appear more than 10 times or more will be participated in the training.
>>> entry = paddle.distributed.CountFilterEntry(10)
input = paddle.static.data(name='ins', shape=[1], dtype='int64')
>>> input = paddle.static.data(name='ins', shape=[1], dtype='int64')
emb = paddle.static.nn.sparse_embedding(
input=input,
size=[sparse_feature_dim, embedding_size],
is_test=False,
entry=entry,
param_attr=paddle.ParamAttr(name="SparseFeatFactors",
initializer=paddle.nn.initializer.Uniform()))
>>> emb = paddle.static.nn.sparse_embedding(
... input=input,
... size=[sparse_feature_dim, embedding_size],
... is_test=False,
... entry=entry,
... param_attr=paddle.ParamAttr(name="SparseFeatFactors",
... initializer=paddle.nn.initializer.Uniform()))
"""
......@@ -4060,49 +4066,49 @@ class ExponentialMovingAverage:
.. code-block:: python
import numpy
import paddle
import paddle.static as static
from paddle.static import ExponentialMovingAverage
paddle.enable_static()
data = static.data(name='x', shape=[-1, 5], dtype='float32')
hidden = static.nn.fc(x=data, size=10)
cost = paddle.mean(hidden)
test_program = static.default_main_program().clone(for_test=True)
optimizer = paddle.optimizer.Adam(learning_rate=0.001)
optimizer.minimize(cost)
ema = ExponentialMovingAverage(0.999)
ema.update()
place = paddle.CPUPlace()
exe = static.Executor(place)
exe.run(static.default_startup_program())
for pass_id in range(3):
for batch_id in range(6):
data = numpy.random.random(size=(10, 5)).astype('float32')
exe.run(program=static.default_main_program(),
feed={'x': data},
fetch_list=[cost.name])
# usage 1
with ema.apply(exe):
data = numpy.random.random(size=(10, 5)).astype('float32')
exe.run(program=test_program,
feed={'x': data},
fetch_list=[hidden.name])
# usage 2
with ema.apply(exe, need_restore=False):
data = numpy.random.random(size=(10, 5)).astype('float32')
exe.run(program=test_program,
feed={'x': data},
fetch_list=[hidden.name])
ema.restore(exe)
>>> import numpy
>>> import paddle
>>> import paddle.static as static
>>> from paddle.static import ExponentialMovingAverage
>>> paddle.enable_static()
>>> data = static.data(name='x', shape=[-1, 5], dtype='float32')
>>> hidden = static.nn.fc(x=data, size=10)
>>> cost = paddle.mean(hidden)
>>> test_program = static.default_main_program().clone(for_test=True)
>>> optimizer = paddle.optimizer.Adam(learning_rate=0.001)
>>> optimizer.minimize(cost)
>>> ema = ExponentialMovingAverage(0.999)
>>> ema.update()
>>> place = paddle.CPUPlace()
>>> exe = static.Executor(place)
>>> exe.run(static.default_startup_program())
>>> for pass_id in range(3):
... for batch_id in range(6):
... data = numpy.random.random(size=(10, 5)).astype('float32')
... exe.run(program=static.default_main_program(),
... feed={'x': data},
... fetch_list=[cost.name])
... # usage 1
... with ema.apply(exe):
... data = numpy.random.random(size=(10, 5)).astype('float32')
... exe.run(program=test_program,
... feed={'x': data},
... fetch_list=[hidden.name])
... # usage 2
... with ema.apply(exe, need_restore=False):
... data = numpy.random.random(size=(10, 5)).astype('float32')
... exe.run(program=test_program,
... feed={'x': data},
... fetch_list=[hidden.name])
... ema.restore(exe)
"""
......
......@@ -62,29 +62,29 @@ def Assert(cond, data=None, summarize=20, name=None):
Examples:
.. code-block:: python
import paddle
from paddle.static.nn.control_flow import Assert
paddle.enable_static()
x = paddle.full([2, 3], 2.0, 'float32')
condition = paddle.max(x) < 1.0 # False
Assert(condition, [x], 10, "example_assert_layer")
exe = paddle.static.Executor()
try:
exe.run(paddle.static.default_main_program())
# Print x and throws ValueError
# Example printed message for x:
#
# Variable: fill_constant_0.tmp_0
# - lod: {}
# - place: CPUPlace()
# - shape: [2, 3]
# - layout: NCHW
# - dtype: float
# - data: [2 2 2 2 2 2]
except ValueError as e:
print("Assert Exception Example")
>>> import paddle
>>> from paddle.static.nn.control_flow import Assert
>>> paddle.enable_static()
>>> x = paddle.full([2, 3], 2.0, 'float32')
>>> condition = paddle.max(x) < 1.0 # False
>>> Assert(condition, [x], 10, "example_assert_layer")
>>> exe = paddle.static.Executor()
>>> try:
... exe.run(paddle.static.default_main_program())
... # Print x and throws ValueError
... # Example printed message for x:
... #
... # Variable: fill_constant_0.tmp_0
... # - lod: {}
... # - place: CPUPlace()
... # - shape: [2, 3]
... # - layout: NCHW
... # - dtype: float
... # - data: [2 2 2 2 2 2]
... except ValueError as e:
... print("Assert Exception Example")
'''
check_variable_and_dtype(
......@@ -165,17 +165,21 @@ class ConditionalBlock:
Examples:
.. code-block:: python
import paddle
import paddle.fluid as fluid
cond = paddle.less_than(x=label, y=limit)
true_image, false_image = layers.split_lod_tensor(
input=image, mask=cond)
true_cond = layers.ConditionalBlock([true_image])
with true_cond.block():
...
with false_cond.block():
...
>>> import paddle
>>> from paddle.static.nn.control_flow import ConditionalBlock
>>> label = paddle.rand([1])
>>> limit = paddle.ones([1]) * 0.5
>>> cond = paddle.less_than(x=label, y=limit)
>>> image = paddle.ones([1])
>>> true_image = image[cond]
>>> true_cond = ConditionalBlock([true_image])
>>> with true_cond.block():
... pass
>>> with false_cond.block():
... pass
'''
def __init__(self, inputs, is_scalar_condition=False, name=None):
......@@ -425,59 +429,61 @@ class While:
is_test(bool, optional): A flag indicating whether execution is in test phase. Default value is False.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Examples 1:
.. code-block:: python
import paddle
import numpy as np
paddle.enable_static()
i = paddle.full(shape=[1], dtype='int64', fill_value=0) # loop counter
loop_len = paddle.full(shape=[1],dtype='int64', fill_value=10) # loop length
cond = paddle.less_than(x=i, y=loop_len)
while_op = paddle.static.nn.control_flow.While(cond=cond)
with while_op.block():
i = paddle.increment(x=i, value=1)
paddle.assign(paddle.less_than(x=i, y=loop_len), output=cond)
exe = paddle.static.Executor(paddle.CPUPlace())
exe.run(paddle.static.default_startup_program())
Examples:
.. code-block:: python
:name: example-1
res = exe.run(paddle.static.default_main_program(), feed={}, fetch_list=[i])
print(res) # [array([10])]
>>> import paddle
>>> import numpy as np
>>> paddle.enable_static()
Examples 2:
.. code-block:: python
>>> i = paddle.full(shape=[1], dtype='int64', fill_value=0) # loop counter
import paddle
import numpy as np
>>> loop_len = paddle.full(shape=[1],dtype='int64', fill_value=10) # loop length
paddle.enable_static()
>>> cond = paddle.less_than(x=i, y=loop_len)
>>> while_op = paddle.static.nn.control_flow.While(cond=cond)
>>> with while_op.block():
... i = paddle.increment(x=i, value=1)
... paddle.assign(paddle.less_than(x=i, y=loop_len), output=cond)
i = paddle.full(shape=[1], dtype='int64', fill_value=0)
loop_len = paddle.full(shape=[1], dtype='int64', fill_value=10)
one = paddle.full(shape=[1], dtype='float32', fill_value=1)
data = paddle.static.data(name='data', shape=[1], dtype='float32')
sums = paddle.full(shape=[1], dtype='float32', fill_value=0) # Define the variable to be obtained ouside of While, which name should be different from the variable inside the While to be obtained
>>> exe = paddle.static.Executor(paddle.CPUPlace())
>>> exe.run(paddle.static.default_startup_program())
cond = paddle.less_than(x=i, y=loop_len)
while_op = paddle.static.nn.control_flow.While(cond=cond)
with while_op.block():
sums_tensor = paddle.add(x=data, y=data)
paddle.assign(sums_tensor, sums) # Update the value of sums_tensor defined in While to the sums which defined outside of While through layers.assign
i = paddle.increment(x=i, value=1)
data = paddle.add(x=data, y=one)
paddle.assign(paddle.less_than(x=i, y=loop_len), output=cond)
>>> res = exe.run(paddle.static.default_main_program(), feed={}, fetch_list=[i])
>>> print(res)
[array([10], dtype=int64)]
feed_data = np.ones(1).astype('float32')
exe = paddle.static.Executor(paddle.CPUPlace())
exe.run(paddle.static.default_startup_program())
res = exe.run(paddle.static.default_main_program(), feed={'data': feed_data}, fetch_list=sums)
print(res[0]) # [2.] # Because the data in While does not update the value outside the While, the value of sums is [2.] after the loop
.. code-block:: python
:name: example-2
>>> import paddle
>>> import numpy as np
>>> paddle.enable_static()
>>> i = paddle.full(shape=[1], dtype='int64', fill_value=0)
>>> loop_len = paddle.full(shape=[1], dtype='int64', fill_value=10)
>>> one = paddle.full(shape=[1], dtype='float32', fill_value=1)
>>> data = paddle.static.data(name='data', shape=[1], dtype='float32')
>>> sums = paddle.full(shape=[1], dtype='float32', fill_value=0) # Define the variable to be obtained >>> ouside of While, which name should be different from the variable inside the While to be obtained
>>> cond = paddle.less_than(x=i, y=loop_len)
>>> while_op = paddle.static.nn.control_flow.While(cond=cond)
>>> with while_op.block():
... sums_tensor = paddle.add(x=data, y=data)
... paddle.assign(sums_tensor, sums) # Update the value of sums_tensor defined in While to the sums which defined outside of While through layers.assign
... i = paddle.increment(x=i, value=1)
... data = paddle.add(x=data, y=one)
... paddle.assign(paddle.less_than(x=i, y=loop_len), output=cond)
>>> feed_data = np.ones(1).astype('float32')
>>> exe = paddle.static.Executor(paddle.CPUPlace())
>>> exe.run(paddle.static.default_startup_program())
>>> res = exe.run(paddle.static.default_main_program(), feed={'data': feed_data}, fetch_list=sums)
>>> print(res[0]) # Because the data in While does not update the value outside the While, the value of sums is [2.] after the loop
[2.]
"""
BEFORE_WHILE_BLOCK = 0
......@@ -619,26 +625,27 @@ def while_loop(cond, body, loop_vars, is_test=False, name=None):
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
>>> import paddle
>>> paddle.enable_static()
def cond(i, ten):
return i < ten
>>> def cond(i, ten):
... return i < ten
def body(i, ten):
i = i + 1
return [i, ten]
>>> def body(i, ten):
... i = i + 1
... return [i, ten]
main_program = paddle.static.default_main_program()
startup_program = paddle.static.default_startup_program()
with paddle.static.program_guard(main_program, startup_program):
i = paddle.full(shape=[1], fill_value=0, dtype='int64') # loop counter
ten = paddle.full(shape=[1], fill_value=10, dtype='int64') # loop length
i, ten = paddle.static.nn.while_loop(cond, body, [i, ten])
>>> main_program = paddle.static.default_main_program()
>>> startup_program = paddle.static.default_startup_program()
>>> with paddle.static.program_guard(main_program, startup_program):
... i = paddle.full(shape=[1], fill_value=0, dtype='int64') # loop counter
... ten = paddle.full(shape=[1], fill_value=10, dtype='int64') # loop length
... i, ten = paddle.static.nn.while_loop(cond, body, [i, ten])
exe = paddle.static.Executor(paddle.CPUPlace())
res = exe.run(main_program, feed={}, fetch_list=[i])
print(res) # [array([10])]
... exe = paddle.static.Executor(paddle.CPUPlace())
... res = exe.run(main_program, feed={}, fetch_list=[i])
... print(res)
[array([10], dtype=int64)]
"""
helper = LayerHelper('while_loop', **locals())
......@@ -783,43 +790,42 @@ def case(pred_fn_pairs, default=None, name=None):
Examples:
.. code-block:: python
import paddle
>>> import paddle
>>> paddle.enable_static()
paddle.enable_static()
>>> def fn_1():
... return paddle.full(shape=[1, 2], dtype='float32', fill_value=1)
def fn_1():
return paddle.full(shape=[1, 2], dtype='float32', fill_value=1)
>>> def fn_2():
... return paddle.full(shape=[2, 2], dtype='int32', fill_value=2)
def fn_2():
return paddle.full(shape=[2, 2], dtype='int32', fill_value=2)
>>> def fn_3():
... return paddle.full(shape=[3], dtype='int32', fill_value=3)
def fn_3():
return paddle.full(shape=[3], dtype='int32', fill_value=3)
>>> main_program = paddle.static.default_startup_program()
>>> startup_program = paddle.static.default_main_program()
main_program = paddle.static.default_startup_program()
startup_program = paddle.static.default_main_program()
>>> with paddle.static.program_guard(main_program, startup_program):
... x = paddle.full(shape=[1], dtype='float32', fill_value=0.3)
... y = paddle.full(shape=[1], dtype='float32', fill_value=0.1)
... z = paddle.full(shape=[1], dtype='float32', fill_value=0.2)
with paddle.static.program_guard(main_program, startup_program):
x = paddle.full(shape=[1], dtype='float32', fill_value=0.3)
y = paddle.full(shape=[1], dtype='float32', fill_value=0.1)
z = paddle.full(shape=[1], dtype='float32', fill_value=0.2)
... pred_1 = paddle.less_than(z, x) # true: 0.2 < 0.3
... pred_2 = paddle.less_than(x, y) # false: 0.3 < 0.1
... pred_3 = paddle.equal(x, y) # false: 0.3 == 0.1
pred_1 = paddle.less_than(z, x) # true: 0.2 < 0.3
pred_2 = paddle.less_than(x, y) # false: 0.3 < 0.1
pred_3 = paddle.equal(x, y) # false: 0.3 == 0.1
... # Call fn_1 because pred_1 is True
... out_1 = paddle.static.nn.case(
... pred_fn_pairs=[(pred_1, fn_1), (pred_2, fn_2)], default=fn_3)
# Call fn_1 because pred_1 is True
out_1 = paddle.static.nn.case(
pred_fn_pairs=[(pred_1, fn_1), (pred_2, fn_2)], default=fn_3)
... # Argument default is None and no pred in pred_fn_pairs is True. fn_3 will be called.
... # because fn_3 is the last callable in pred_fn_pairs.
... out_2 = paddle.static.nn.case(pred_fn_pairs=[(pred_2, fn_2), (pred_3, fn_3)])
# Argument default is None and no pred in pred_fn_pairs is True. fn_3 will be called.
# because fn_3 is the last callable in pred_fn_pairs.
out_2 = paddle.static.nn.case(pred_fn_pairs=[(pred_2, fn_2), (pred_3, fn_3)])
exe = paddle.static.Executor(paddle.CPUPlace())
res_1, res_2 = exe.run(main_program, fetch_list=[out_1, out_2])
print(res_1) # [[1. 1.]]
print(res_2) # [3 3 3]
... exe = paddle.static.Executor(paddle.CPUPlace())
... res_1, res_2 = exe.run(main_program, fetch_list=[out_1, out_2])
... print(res_1, res_2)
[[1. 1.]] [3 3 3]
'''
helper = LayerHelper('case', **locals())
......@@ -919,45 +925,59 @@ def switch_case(branch_index, branch_fns, default=None, name=None):
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
def fn_1():
return paddle.full(shape=[1, 2], dtype='float32', fill_value=1)
def fn_2():
return paddle.full(shape=[2, 2], dtype='int32', fill_value=2)
def fn_3():
return paddle.full(shape=[3], dtype='int32', fill_value=3)
main_program = paddle.static.default_startup_program()
startup_program = paddle.static.default_main_program()
with paddle.static.program_guard(main_program, startup_program):
index_1 = paddle.full(shape=[1], dtype='int32', fill_value=1)
index_2 = paddle.full(shape=[1], dtype='int32', fill_value=2)
out_1 = paddle.static.nn.switch_case(
branch_index=index_1,
branch_fns={1: fn_1, 2: fn_2},
default=fn_3)
out_2 = paddle.static.nn.switch_case(
branch_index=index_2,
branch_fns=[(1, fn_1), (2, fn_2)],
default=fn_3)
# Argument default is None and no index matches. fn;,,_3 will be called because of the max index 7.
out_3 = paddle.static.nn.switch_case(
branch_index=index_2,
branch_fns=[(0, fn_1), (4, fn_2), (7, fn_3)])
exe = paddle.static.Executor(paddle.CPUPlace())
res_1, res_2, res_3 = exe.run(main_program, fetch_list=[out_1, out_2, out_3])
print(res_1) # [[1. 1.]]
print(res_2) # [[2 2] [2 2]]
print(res_3) # [3 3 3]
>>> import paddle
>>> paddle.enable_static()
>>> def fn_1():
... return paddle.full(shape=[1, 2], dtype='float32', fill_value=1)
>>> def fn_2():
... return paddle.full(shape=[2, 2], dtype='int32', fill_value=2)
>>> def fn_3():
... return paddle.full(shape=[3], dtype='int32', fill_value=3)
>>> startup_program = paddle.static.default_startup_program()
>>> main_program = paddle.static.default_main_program()
>>> with paddle.static.program_guard(main_program, startup_program):
... index_1 = paddle.full(shape=[1], dtype='int32', fill_value=1)
... index_2 = paddle.full(shape=[1], dtype='int32', fill_value=2)
...
... out_1 = paddle.static.nn.switch_case(
... branch_index=index_1,
... branch_fns={1: fn_1, 2: fn_2},
... default=fn_3)
...
... out_2 = paddle.static.nn.switch_case(
... branch_index=index_2,
... branch_fns=[(1, fn_1), (2, fn_2)],
... default=fn_3)
...
... # Argument default is None and no index matches. fn_3 will be called because of the max index 7.
... out_3 = paddle.static.nn.switch_case(
... branch_index=index_2,
... branch_fns=[(0, fn_1), (4, fn_2), (7, fn_3)])
...
... exe = paddle.static.Executor(paddle.CPUPlace())
... res_1, res_2, res_3 = exe.run(main_program, fetch_list=[out_1, out_2, out_3])
... # Variable: fill_constant_1.tmp_0
... # - message: The content of input layer:
... # - lod: {}
... # - place: Place(cpu)
... # - shape: [2, 3]
... # - layout: NCHW
... # - dtype: int64
... # - data: [3 3 3 3 3 3]
>>> print(res_1)
[[1. 1.]]
>>> print(res_2)
[[2 2]
[2 2]]
>>> print(res_3)
[3 3 3]
'''
helper = LayerHelper('switch_case', **locals())
......@@ -1093,12 +1113,12 @@ def cond(pred, true_fn=None, false_fn=None, name=None, return_names=None):
.. code-block:: python
:name: code-example-1
import paddle
>>> import paddle
a = paddle.zeros((1, 1))
b = paddle.zeros((1, 1))
c = a * b
out = paddle.static.nn.cond(a < b, lambda: a + c, lambda: b * b)
>>> a = paddle.zeros((1, 1))
>>> b = paddle.zeros((1, 1))
>>> c = a * b
>>> out = paddle.static.nn.cond(a < b, lambda: a + c, lambda: b * b)
No matter whether ``a < b`` , ``c = a * b`` will be in net building and
run. ``a + c`` and ``b * b`` will be in net building, but only one
......@@ -1128,39 +1148,46 @@ def cond(pred, true_fn=None, false_fn=None, name=None, return_names=None):
.. code-block:: python
:name: code-example-2
import paddle
#
# pseudocode:
# if 0.1 < 0.23:
# return 1, True
# else:
# return 3, 2
#
def true_func():
return paddle.full(shape=[1, 2], dtype='int32',
fill_value=1), paddle.full(shape=[2, 3],
dtype='bool',
fill_value=True)
def false_func():
return paddle.full(shape=[3, 4], dtype='float32',
fill_value=3), paddle.full(shape=[4, 5],
dtype='int64',
fill_value=2)
x = paddle.full(shape=[1], dtype='float32', fill_value=0.1)
y = paddle.full(shape=[1], dtype='float32', fill_value=0.23)
pred = paddle.less_than(x=x, y=y, name=None)
ret = paddle.static.nn.cond(pred, true_func, false_func)
# ret is a tuple containing 2 tensors
# ret[0] = [[1 1]]
# ret[1] = [[ True True True]
# [ True True True]]
>>> import paddle
>>> # pseudocode:
>>> # if 0.1 < 0.23:
>>> # return 1, True
>>> # else:
>>> # return 3, 2
>>> def true_func():
... return paddle.full(shape=[1, 2],
... dtype='int32',
... fill_value=1
... ), paddle.full(shape=[2, 3],
... dtype='bool',
... fill_value=True
... )
>>> def false_func():
... return paddle.full(shape=[3, 4],
... dtype='float32',
... fill_value=3
... ), paddle.full(shape=[4, 5],
... dtype='int64',
... fill_value=2
... )
>>> x = paddle.full(shape=[1], dtype='float32', fill_value=0.1)
>>> y = paddle.full(shape=[1], dtype='float32', fill_value=0.23)
>>> pred = paddle.less_than(x=x, y=y, name=None)
>>> a, b = paddle.static.nn.cond(pred, true_func, false_func)
>>> print(a)
Tensor(shape=[1, 2], dtype=int32, place=Place(cpu), stop_gradient=True,
[[1, 1]])
>>> print(b)
Tensor(shape=[2, 3], dtype=bool, place=Place(cpu), stop_gradient=True,
[[True, True, True],
[True, True, True]])
"""
if in_dygraph_mode():
assert isinstance(pred, Variable), "The pred in cond must be Variable"
......@@ -1665,24 +1692,29 @@ def Print(
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
x = paddle.full(shape=[2, 3], fill_value=3, dtype='int64')
out = paddle.static.Print(x, message="The content of input layer:")
main_program = paddle.static.default_main_program()
exe = paddle.static.Executor(place=paddle.CPUPlace())
res = exe.run(main_program, fetch_list=[out])
# Variable: fill_constant_1.tmp_0
# - message: The content of input layer:
# - lod: {}
# - place: CPUPlace
# - shape: [2, 3]
# - layout: NCHW
# - dtype: long
# - data: [3 3 3 3 3 3]
>>> import paddle
>>> paddle.enable_static()
>>> x = paddle.full(shape=[2, 3], fill_value=3, dtype='int64')
>>> out = paddle.static.Print(x, message="The content of input layer:")
>>> main_program = paddle.static.default_main_program()
>>> exe = paddle.static.Executor(place=paddle.CPUPlace())
>>> res = exe.run(main_program, fetch_list=[out])
>>> # doctest: +SKIP('Unable to get output')
Variable: fill_constant_1.tmp_0
- message: The content of input layer:
- lod: {}
- place: Place(cpu)
- shape: [2, 3]
- layout: NCHW
- dtype: int64
- data: [3 3 3 3 3 3]
>>> # doctest: -SKIP
>>> res
[array([[3, 3, 3],
[3, 3, 3]], dtype=int64)]
'''
check_variable_and_dtype(
input,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册