From 0e6b43a6b45a1401146e0837b4e83ca4878ff46a Mon Sep 17 00:00:00 2001 From: Yang Zhang Date: Fri, 24 Apr 2020 19:31:26 +0800 Subject: [PATCH] [Cherry-pick Release 2.0] Minor fix to `MSELoss` docstring (#24078) * Indent MSELoss example docs * Point out input tensors should be of same shape test=develop * Document `MSELoss` input and return parameters test=release/2.0-beta,test=document_fix --- python/paddle/nn/layer/loss.py | 80 +++++++++++++++++++--------------- 1 file changed, 45 insertions(+), 35 deletions(-) diff --git a/python/paddle/nn/layer/loss.py b/python/paddle/nn/layer/loss.py index 5838bf33c63..63931613d17 100644 --- a/python/paddle/nn/layer/loss.py +++ b/python/paddle/nn/layer/loss.py @@ -155,48 +155,58 @@ class MSELoss(fluid.dygraph.layers.Layer): .. math:: Out = \operatorname{sum}((input - label)^2) - where `input` and `label` are `float32` tensors of arbitrary shapes. + where `input` and `label` are `float32` tensors of same shape. Parameters: + input (Variable): Input tensor, the data type is float32, + label (Variable): Label tensor, the data type is float32, reduction (string, optional): The reduction method for the output, could be 'none' | 'mean' | 'sum'. - 'none': no reduction will be applied - 'mean': the output will be averaged - 'sum': the output will be summed + If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned. + If :attr:`size_average` is ``'sum'``, the reduced sum loss is returned. + If :attr:`reduction` is ``'none'``, the unreduced loss is returned. + Default is ``'mean'``. + + Returns: + The tensor variable storing the MSE loss of input and label. + + Return type: + Variable. Examples: .. code-block:: python - import numpy as np - import paddle - from paddle import fluid - import paddle.fluid.dygraph as dg - - mse_loss = paddle.nn.loss.MSELoss() - input = fluid.data(name="input", shape=[1]) - label = fluid.data(name="label", shape=[1]) - place = fluid.CPUPlace() - input_data = np.array([1.5]).astype("float32") - label_data = np.array([1.7]).astype("float32") - - # declarative mode - output = mse_loss(input,label) - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) - output_data = exe.run( - fluid.default_main_program(), - feed={"input":input_data, "label":label_data}, - fetch_list=[output], - return_numpy=True) - print(output_data) - # [array([0.04000002], dtype=float32)] - - # imperative mode - with dg.guard(place) as g: - input = dg.to_variable(input_data) - label = dg.to_variable(label_data) - output = mse_loss(input, label) - print(output.numpy()) - # [0.04000002] + + import numpy as np + import paddle + from paddle import fluid + import paddle.fluid.dygraph as dg + + mse_loss = paddle.nn.loss.MSELoss() + input = fluid.data(name="input", shape=[1]) + label = fluid.data(name="label", shape=[1]) + place = fluid.CPUPlace() + input_data = np.array([1.5]).astype("float32") + label_data = np.array([1.7]).astype("float32") + + # declarative mode + output = mse_loss(input,label) + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + output_data = exe.run( + fluid.default_main_program(), + feed={"input":input_data, "label":label_data}, + fetch_list=[output], + return_numpy=True) + print(output_data) + # [array([0.04000002], dtype=float32)] + + # imperative mode + with dg.guard(place) as g: + input = dg.to_variable(input_data) + label = dg.to_variable(label_data) + output = mse_loss(input, label) + print(output.numpy()) + # [0.04000002] """ def __init__(self, reduction='mean'): -- GitLab