diff --git a/doc/api/v2/config/layer.rst b/doc/api/v2/config/layer.rst index 2a02baf17ba0d1119a8d222024616ef8ae33f8d5..4e3589ebc47039a4bc4e04b130c0aa536dda73a1 100644 --- a/doc/api/v2/config/layer.rst +++ b/doc/api/v2/config/layer.rst @@ -419,6 +419,11 @@ hsigmoid .. autoclass:: paddle.v2.layer.hsigmoid :noindex: +smooth_l1 +--------- +.. automodule:: paddle.v2.layer.smooth_l1 + :noindex: + Check Layer ============ diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index 56fca13d37242a091f09b35ca2e139071c644cad..1796e48f09ae50642897af7e783f020580772ac4 100755 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -116,7 +116,7 @@ __all__ = [ 'spp_layer', 'pad_layer', 'eos_layer', - 'smooth_l1_cost', + 'smooth_l1', 'layer_support', ] @@ -5283,7 +5283,7 @@ def multi_binary_label_cross_entropy(input, @wrap_name_default() @layer_support() -def smooth_l1_cost(input, label, name=None, layer_attr=None): +def smooth_l1(input, label, name=None, layer_attr=None): """ This is a L1 loss but more smooth. It requires that the size of input and label are equal. The formula is as follows, @@ -5307,8 +5307,8 @@ def smooth_l1_cost(input, label, name=None, layer_attr=None): .. code-block:: python - cost = smooth_l1_cost(input=input_layer, - label=label_layer) + cost = smooth_l1(input=input_layer, + label=label_layer) :param input: The input layer. :type input: LayerOutput