未验证 提交 b77d9f26 编写于 作者: B Bai Yifan 提交者: GitHub

migrate code example and doc (#27627)

* migrate code example and doc
上级 7cd2c13f
...@@ -302,9 +302,6 @@ def cross_entropy2(input, label, ignore_index=kIgnoreIndex): ...@@ -302,9 +302,6 @@ def cross_entropy2(input, label, ignore_index=kIgnoreIndex):
def square_error_cost(input, label): def square_error_cost(input, label):
""" """
:alias_main: paddle.nn.functional.square_error_cost
:alias: paddle.nn.functional.square_error_cost,paddle.nn.functional.loss.square_error_cost
:old_api: paddle.fluid.layers.square_error_cost
This op accepts input predictions and target label and returns the This op accepts input predictions and target label and returns the
squared error cost. squared error cost.
...@@ -316,49 +313,26 @@ def square_error_cost(input, label): ...@@ -316,49 +313,26 @@ def square_error_cost(input, label):
Out = (input - label)^2 Out = (input - label)^2
Parameters: Parameters:
input (Variable): Input tensor, the data type should be float32. input (Tensor): Input tensor, the data type should be float32.
label (Variable): Label tensor, the data type should be float32. label (Tensor): Label tensor, the data type should be float32.
Returns: Returns:
The tensor variable storing the element-wise squared error \ The tensor storing the element-wise squared error \
difference between input and label. difference between input and label.
Return type: Variable. Return type: Tensor.
Examples: Examples:
.. code-block:: python .. code-block:: python
# declarative mode import paddle
import paddle.fluid as fluid input = paddle.to_tensor([1.1, 1.9])
import numpy as np label = paddle.to_tensor([1.0, 2.0])
input = fluid.data(name="input", shape=[1]) output = paddle.nn.functional.square_error_cost(input, label)
label = fluid.data(name="label", shape=[1]) print(output.numpy())
output = fluid.layers.square_error_cost(input,label) # [0.01, 0.01]
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
input_data = np.array([1.5]).astype("float32")
label_data = np.array([1.7]).astype("float32")
output_data = exe.run(fluid.default_main_program(),
feed={"input":input_data, "label":label_data},
fetch_list=[output],
return_numpy=True)
print(output_data)
# [array([0.04000002], dtype=float32)]
# imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
label = dg.to_variable(label_data)
output = fluid.layers.square_error_cost(input, label)
print(output.numpy())
# [0.04000002]
""" """
check_variable_and_dtype(input, "input", ['float32', 'float64'], check_variable_and_dtype(input, "input", ['float32', 'float64'],
'square_error_cost') 'square_error_cost')
...@@ -1777,9 +1751,6 @@ def npair_loss(anchor, positive, labels, l2_reg=0.002): ...@@ -1777,9 +1751,6 @@ def npair_loss(anchor, positive, labels, l2_reg=0.002):
def mse_loss(input, label): def mse_loss(input, label):
""" """
:alias_main: paddle.nn.functional.mse_loss
:alias: paddle.nn.functional.mse_loss,paddle.nn.functional.loss.mse_loss
:old_api: paddle.fluid.layers.mse_loss
This op accepts input predications and target label and returns the mean square error. This op accepts input predications and target label and returns the mean square error.
...@@ -1790,47 +1761,23 @@ def mse_loss(input, label): ...@@ -1790,47 +1761,23 @@ def mse_loss(input, label):
Out = MEAN((input - label)^2) Out = MEAN((input - label)^2)
Parameters: Parameters:
input (Variable): Input tensor, the data type should be float32. input (Tensor): Input tensor, the data type should be float32.
label (Variable): Label tensor, the data type should be float32. label (Tensor): Label tensor, the data type should be float32.
Returns: Returns:
Variable: The tensor variable storing the mean square error difference of input and label. Tensor: The tensor storing the mean square error difference of input and label.
Return type: Variable. Return type: Tensor.
Examples: Examples:
.. code-block:: python .. code-block:: python
# declarative mode
import paddle.fluid as fluid
import numpy as np
input = fluid.data(name="input", shape=[1])
label = fluid.data(name="label", shape=[1])
output = fluid.layers.mse_loss(input,label)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
input_data = np.array([1.5]).astype("float32")
label_data = np.array([1.7]).astype("float32")
output_data = exe.run(fluid.default_main_program(),
feed={"input":input_data, "label":label_data},
fetch_list=[output],
return_numpy=True)
print(output_data)
# [array([0.04000002], dtype=float32)]
# imperative mode
import paddle.fluid.dygraph as dg
with dg.guard(place) as g:
input = dg.to_variable(input_data)
label = dg.to_variable(label_data)
output = fluid.layers.mse_loss(input, label)
print(output.numpy())
# [0.04000002]
import paddle
input = paddle.to_tensor([1.1, 1.9])
label = paddle.to_tensor([1.0, 2.0])
output = paddle.fluid.layers.mse_loss(input, label)
print(output.numpy())
# [0.01]
""" """
check_variable_and_dtype(input, "input", ['float32', 'float64'], 'mse_loss') check_variable_and_dtype(input, "input", ['float32', 'float64'], 'mse_loss')
check_variable_and_dtype(label, "label", ['float32', 'float64'], 'mse_loss') check_variable_and_dtype(label, "label", ['float32', 'float64'], 'mse_loss')
......
...@@ -2306,7 +2306,7 @@ def pool3d(input, ...@@ -2306,7 +2306,7 @@ def pool3d(input,
return pool_out return pool_out
@deprecated(since="2.0.0", update_to="paddle.nn.functional.adaptive_pool2d") @deprecated(since="2.0.0")
@templatedoc(op_type="pool2d") @templatedoc(op_type="pool2d")
def adaptive_pool2d(input, def adaptive_pool2d(input,
pool_size, pool_size,
...@@ -2314,9 +2314,6 @@ def adaptive_pool2d(input, ...@@ -2314,9 +2314,6 @@ def adaptive_pool2d(input,
require_index=False, require_index=False,
name=None): name=None):
""" """
:alias_main: paddle.nn.functional.adaptive_pool2d
:alias: paddle.nn.functional.adaptive_pool2d,paddle.nn.functional.pooling.adaptive_pool2d
:old_api: paddle.fluid.layers.adaptive_pool2d
This operation calculates the output based on the input, pool_size, This operation calculates the output based on the input, pool_size,
pool_type parameters. Input(X) and output(Out) are in NCHW format, where N is batch pool_type parameters. Input(X) and output(Out) are in NCHW format, where N is batch
...@@ -2340,7 +2337,7 @@ def adaptive_pool2d(input, ...@@ -2340,7 +2337,7 @@ def adaptive_pool2d(input,
Output(i ,j) &= \\frac{sum(Input[hstart:hend, wstart:wend])}{(hend - hstart) * (wend - wstart)} Output(i ,j) &= \\frac{sum(Input[hstart:hend, wstart:wend])}{(hend - hstart) * (wend - wstart)}
Args: Args:
input (Variable): The input tensor of pooling operator, which is a 4-D tensor input (Tensor): The input tensor of pooling operator, which is a 4-D tensor
with shape [N, C, H, W]. The format of input tensor is NCHW, with shape [N, C, H, W]. The format of input tensor is NCHW,
where N is batch size, C is the number of channels, H is the where N is batch size, C is the number of channels, H is the
height of the feature, and W is the width of the feature. height of the feature, and W is the width of the feature.
...@@ -2355,7 +2352,7 @@ def adaptive_pool2d(input, ...@@ -2355,7 +2352,7 @@ def adaptive_pool2d(input,
None by default. None by default.
Returns: Returns:
Variable: The output tensor of adaptive pooling result. The data type is same Tensor: The output tensor of adaptive pooling result. The data type is same
as input tensor. as input tensor.
Raises: Raises:
...@@ -2381,9 +2378,9 @@ def adaptive_pool2d(input, ...@@ -2381,9 +2378,9 @@ def adaptive_pool2d(input,
# wend = ceil((i + 1) * W / n) # wend = ceil((i + 1) * W / n)
# output[:, :, i, j] = avg(input[:, :, hstart: hend, wstart: wend]) # output[:, :, i, j] = avg(input[:, :, hstart: hend, wstart: wend])
# #
import paddle.fluid as fluid import paddle
data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32') data = paddle.rand(shape=[1,3,32,32])
pool_out = fluid.layers.adaptive_pool2d( pool_out = paddle.fluid.layers.adaptive_pool2d(
input=data, input=data,
pool_size=[3, 3], pool_size=[3, 3],
pool_type='avg') pool_type='avg')
...@@ -2403,9 +2400,9 @@ def adaptive_pool2d(input, ...@@ -2403,9 +2400,9 @@ def adaptive_pool2d(input,
# wend = ceil((i + 1) * W / n) # wend = ceil((i + 1) * W / n)
# output[:, :, i, j] = max(input[:, :, hstart: hend, wstart: wend]) # output[:, :, i, j] = max(input[:, :, hstart: hend, wstart: wend])
# #
import paddle.fluid as fluid import paddle
data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32') data = paddle.rand(shape=[1,3,32,32])
pool_out = fluid.layers.adaptive_pool2d( pool_out = paddle.fluid.layers.adaptive_pool2d(
input=data, input=data,
pool_size=[3, 3], pool_size=[3, 3],
pool_type='max') pool_type='max')
...@@ -2454,7 +2451,7 @@ def adaptive_pool2d(input, ...@@ -2454,7 +2451,7 @@ def adaptive_pool2d(input,
return (pool_out, mask) if require_index else pool_out return (pool_out, mask) if require_index else pool_out
@deprecated(since="2.0.0", update_to="paddle.nn.functional.adaptive_pool3d") @deprecated(since="2.0.0")
@templatedoc(op_type="pool3d") @templatedoc(op_type="pool3d")
def adaptive_pool3d(input, def adaptive_pool3d(input,
pool_size, pool_size,
...@@ -2462,9 +2459,6 @@ def adaptive_pool3d(input, ...@@ -2462,9 +2459,6 @@ def adaptive_pool3d(input,
require_index=False, require_index=False,
name=None): name=None):
""" """
:alias_main: paddle.nn.functional.adaptive_pool3d
:alias: paddle.nn.functional.adaptive_pool3d,paddle.nn.functional.pooling.adaptive_pool3d
:old_api: paddle.fluid.layers.adaptive_pool3d
This operation calculates the output based on the input, pool_size, This operation calculates the output based on the input, pool_size,
pool_type parameters. Input(X) and output(Out) are in NCDHW format, where N is batch pool_type parameters. Input(X) and output(Out) are in NCDHW format, where N is batch
...@@ -2493,7 +2487,7 @@ def adaptive_pool3d(input, ...@@ -2493,7 +2487,7 @@ def adaptive_pool3d(input,
Output(i ,j, k) &= \\frac{sum(Input[dstart:dend, hstart:hend, wstart:wend])}{(dend - dstart) * (hend - hstart) * (wend - wstart)} Output(i ,j, k) &= \\frac{sum(Input[dstart:dend, hstart:hend, wstart:wend])}{(dend - dstart) * (hend - hstart) * (wend - wstart)}
Args: Args:
input (Variable): The input tensor of pooling operator, which is a 5-D tensor with input (Tensor): The input tensor of pooling operator, which is a 5-D tensor with
shape [N, C, D, H, W]. The format of input tensor is NCDHW, where shape [N, C, D, H, W]. The format of input tensor is NCDHW, where
N is batch size, C is the number of channels, D is the depth of the feature, N is batch size, C is the number of channels, D is the depth of the feature,
H is the height of the feature, and W is the width of the feature. H is the height of the feature, and W is the width of the feature.
...@@ -2508,7 +2502,7 @@ def adaptive_pool3d(input, ...@@ -2508,7 +2502,7 @@ def adaptive_pool3d(input,
None by default. None by default.
Returns: Returns:
Variable: The output tensor of adaptive pooling result. The data type is same as input tensor. Tensor: The output tensor of adaptive pooling result. The data type is same as input tensor.
Raises: Raises:
ValueError: 'pool_type' is not 'max' nor 'avg'. ValueError: 'pool_type' is not 'max' nor 'avg'.
...@@ -2538,11 +2532,9 @@ def adaptive_pool3d(input, ...@@ -2538,11 +2532,9 @@ def adaptive_pool3d(input,
# avg(input[:, :, dstart:dend, hstart: hend, wstart: wend]) # avg(input[:, :, dstart:dend, hstart: hend, wstart: wend])
# #
import paddle.fluid as fluid import paddle
data = paddle.rand(shape=[1,3,32,32,32])
data = fluid.data( pool_out = paddle.fluid.layers.adaptive_pool3d(
name='data', shape=[None, 3, 32, 32, 32], dtype='float32')
pool_out = fluid.layers.adaptive_pool3d(
input=data, input=data,
pool_size=[3, 3, 3], pool_size=[3, 3, 3],
pool_type='avg') pool_type='avg')
...@@ -2567,11 +2559,9 @@ def adaptive_pool3d(input, ...@@ -2567,11 +2559,9 @@ def adaptive_pool3d(input,
# avg(input[:, :, dstart:dend, hstart: hend, wstart: wend]) # avg(input[:, :, dstart:dend, hstart: hend, wstart: wend])
# #
import paddle.fluid as fluid import paddle
data = paddle.rand(shape=[1,3,32,32,32])
data = fluid.data( pool_out = paddle.fluid.layers.adaptive_pool3d(
name='data', shape=[None, 3, 32, 32, 32], dtype='float32')
pool_out = fluid.layers.adaptive_pool3d(
input=data, input=data,
pool_size=[3, 3, 3], pool_size=[3, 3, 3],
pool_type='max') pool_type='max')
......
...@@ -173,16 +173,12 @@ from .norm import normalize #DEFINE_ALIAS ...@@ -173,16 +173,12 @@ from .norm import normalize #DEFINE_ALIAS
from .pooling import pool2d #DEFINE_ALIAS from .pooling import pool2d #DEFINE_ALIAS
from .pooling import pool3d #DEFINE_ALIAS from .pooling import pool3d #DEFINE_ALIAS
from .pooling import avg_pool1d #DEFINE_ALIAS from .pooling import avg_pool1d #DEFINE_ALIAS
from .pooling import adaptive_pool2d #DEFINE_ALIAS
from .pooling import adaptive_pool3d #DEFINE_ALIAS
from .pooling import avg_pool2d #DEFINE_ALIAS from .pooling import avg_pool2d #DEFINE_ALIAS
from .pooling import avg_pool3d #DEFINE_ALIAS from .pooling import avg_pool3d #DEFINE_ALIAS
from .pooling import max_pool1d #DEFINE_ALIAS from .pooling import max_pool1d #DEFINE_ALIAS
from .pooling import max_pool2d #DEFINE_ALIAS from .pooling import max_pool2d #DEFINE_ALIAS
from .pooling import max_pool3d #DEFINE_ALIAS from .pooling import max_pool3d #DEFINE_ALIAS
from .pooling import adaptive_pool2d #DEFINE_ALIAS
from .pooling import adaptive_pool3d #DEFINE_ALIAS
from .pooling import adaptive_max_pool1d #DEFINE_ALIAS from .pooling import adaptive_max_pool1d #DEFINE_ALIAS
from .pooling import adaptive_max_pool2d #DEFINE_ALIAS from .pooling import adaptive_max_pool2d #DEFINE_ALIAS
from .pooling import adaptive_max_pool3d #DEFINE_ALIAS from .pooling import adaptive_max_pool3d #DEFINE_ALIAS
......
...@@ -15,8 +15,6 @@ ...@@ -15,8 +15,6 @@
# TODO: define pooling functions # TODO: define pooling functions
from ...fluid.layers import pool2d #DEFINE_ALIAS from ...fluid.layers import pool2d #DEFINE_ALIAS
from ...fluid.layers import pool3d #DEFINE_ALIAS from ...fluid.layers import pool3d #DEFINE_ALIAS
from ...fluid.layers import adaptive_pool2d #DEFINE_ALIAS
from ...fluid.layers import adaptive_pool3d #DEFINE_ALIAS
from ...fluid import core from ...fluid import core
from ...fluid.framework import in_dygraph_mode from ...fluid.framework import in_dygraph_mode
from ...fluid.layers import utils, LayerHelper, unsqueeze, squeeze from ...fluid.layers import utils, LayerHelper, unsqueeze, squeeze
...@@ -25,8 +23,6 @@ from ...fluid.data_feeder import check_type, check_variable_and_dtype ...@@ -25,8 +23,6 @@ from ...fluid.data_feeder import check_type, check_variable_and_dtype
__all__ = [ __all__ = [
'pool2d', 'pool2d',
'pool3d', 'pool3d',
'adaptive_pool2d',
'adaptive_pool3d',
'avg_pool1d', 'avg_pool1d',
'avg_pool2d', 'avg_pool2d',
'avg_pool3d', 'avg_pool3d',
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册