From 90c7f9870e76a398f97d1d510c662a75d373881f Mon Sep 17 00:00:00 2001 From: frankwhzhang Date: Mon, 10 Dec 2018 14:36:52 +0800 Subject: [PATCH] fix 'name', test=develop --- paddle/fluid/API.spec | 2 +- python/paddle/fluid/layers/nn.py | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec index 3a422dcb336..fd4cf92d85d 100644 --- a/paddle/fluid/API.spec +++ b/paddle/fluid/API.spec @@ -66,7 +66,7 @@ paddle.fluid.layers.linear_chain_crf ArgSpec(args=['input', 'label', 'param_attr paddle.fluid.layers.crf_decoding ArgSpec(args=['input', 'param_attr', 'label'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.cos_sim ArgSpec(args=['X', 'Y'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.cross_entropy ArgSpec(args=['input', 'label', 'soft_label', 'ignore_index'], varargs=None, keywords=None, defaults=(False, -100)) -paddle.fluid.layers.bpr_loss ArgSpec(args=['input', 'label'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.bpr_loss ArgSpec(args=['input', 'label', 'name'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.square_error_cost ArgSpec(args=['input', 'label'], varargs=None, keywords=None, defaults=None) paddle.fluid.layers.chunk_eval ArgSpec(args=['input', 'label', 'chunk_scheme', 'num_chunk_types', 'excluded_chunk_types'], varargs=None, keywords=None, defaults=(None,)) paddle.fluid.layers.sequence_conv ArgSpec(args=['input', 'num_filters', 'filter_size', 'filter_stride', 'padding', 'bias_attr', 'param_attr', 'act', 'name'], varargs=None, keywords=None, defaults=(3, 1, None, None, None, None, None)) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 04582acf6b8..e25eaaa9fda 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -1349,7 +1349,7 @@ def cross_entropy(input, label, soft_label=False, ignore_index=kIgnoreIndex): return out -def bpr_loss(input, label): +def bpr_loss(input, label, name=None): """ Bayesian Personalized Ranking Loss Operator. @@ -1366,6 +1366,8 @@ def bpr_loss(input, label): This input is not probability but logits. label (Variable|list): the ground truth which is a 2-D tensor. `label` is a tensor with shape [N x 1]. + name (str|None): A name for this layer(optional). If set None, the + layer will be named automatically. Default: None. Returns: A 2-D tensor with shape [N x 1], the bpr loss. -- GitLab