diff --git a/paddle/operators/margin_rank_loss_op.cc b/paddle/operators/margin_rank_loss_op.cc new file mode 100644 index 0000000000000000000000000000000000000000..5be61dfec3bb58ab9b658cb59ab0dd49bb67d8cb --- /dev/null +++ b/paddle/operators/margin_rank_loss_op.cc @@ -0,0 +1,124 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/operators/margin_rank_loss_op.h" + +namespace paddle { +namespace operators { + +class MarginRankLossOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(framework::InferShapeContext *ctx) const override { + // input check + PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) shouldn't be null."); + PADDLE_ENFORCE(ctx->HasInput("X1"), "Input(X1) shouldn't be null."); + PADDLE_ENFORCE(ctx->HasInput("X2"), "Input(X2) shouldn't be null."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) shouldn't be null."); + auto label_dims = ctx->GetInputDim("Label"); + auto x1_dims = ctx->GetInputDim("X1"); + auto x2_dims = ctx->GetInputDim("X2"); + PADDLE_ENFORCE( + (label_dims == x1_dims) && (x1_dims == x2_dims) && + (label_dims.size() == 2) && (label_dims[1] == 1), + "All inputs must be 2-D tensor with shape [batch_size x 1]."); + ctx->SetOutputDim("Activated", label_dims); + ctx->SetOutputDim("Out", label_dims); + } +}; + +template +class MarginRankLossOpMaker : public framework::OpProtoAndCheckerMaker { + public: + MarginRankLossOpMaker(framework::OpProto *proto, + framework::OpAttrChecker *op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X1", + "(2-D tensor with shape [batch_size x 1]) The score for " + "one item X1 to be ranked, from pairwise ranking model."); + AddInput("X2", + "(2-D tensor with shape [batch_size x 1]) The score for " + "another item X2 to be ranked, from pairwise ranking model."); + AddInput("Label", + "(2-D tensor with shape [batch_size x 1]) " + "The label indicating X1 ranked higher than X2 or not, " + "can only be +1 or -1."); + AddAttr("margin", "(scalar, default 0) Margin for MarginRankLossOp.") + .SetDefault(static_cast(0)); + AddOutput("Activated", + "(2-D tensor with shape [batch_size x 1]) Intermediate tensor " + "to indicate whether each element of Output(Out) is activated.") + .AsIntermediate(); + AddOutput("Out", + "(2-D tensor with shape [batch_size x 1]) " + "The output loss of MarginRankLoss operator."); + AddComment(R"DOC( + +MarginRankLoss operator measures the loss given a pair of training sample +{`X1`, `X2`} and the `Label` with attribute `margin`, where `Label = +1` +indicating X1 is ranked higher than `X2`, otherwise `Label = -1`. The loss +turns out + +loss(X1, X2, Label) = max(0, -Label * (X1 - X2) + margin). + +The attribute `margin` involved here helps make the predictions more robust. +Denote the item ranked higher as the positive sample, otherwise the negative +sample. If the score of the two samples satisfies + +positive sample - negative sample < margin, + +the pair of samples will contribute to the final loss, which will backpropogate +and train the ranking model to enlarge the difference of the two score. + +For batch input with size `batch_size`, `X1`, `X2` and `Label` +all have the same shape [batch_size x 1]. + +)DOC"); + } +}; + +class MarginRankLossGradOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(framework::InferShapeContext *ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) shouldn't be null."); + PADDLE_ENFORCE(ctx->HasInput("X1"), "Input(X1) shouldn't be null."); + PADDLE_ENFORCE(ctx->HasInput("X2"), "Input(X2) shouldn't be null."); + PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), + "Input(Out@GRAD) shouldn't be null."); + PADDLE_ENFORCE(ctx->HasInput("Activated"), + "Intermediate(Activated) shouldn't be null."); + auto dims = ctx->GetInputDim("Label"); + ctx->SetOutputDim(framework::GradVarName("X1"), dims); + ctx->SetOutputDim(framework::GradVarName("X2"), dims); + } +}; + +} // namespace operators +} // namespace paddle +namespace ops = paddle::operators; + +REGISTER_OP(margin_rank_loss, ops::MarginRankLossOp, + ops::MarginRankLossOpMaker, margin_rank_loss_grad, + ops::MarginRankLossGradOp); +REGISTER_OP_CPU_KERNEL( + margin_rank_loss, + ops::MarginRankLossKernel); +REGISTER_OP_CPU_KERNEL( + margin_rank_loss_grad, + ops::MarginRankLossGradKernel); diff --git a/paddle/operators/margin_rank_loss_op.cu b/paddle/operators/margin_rank_loss_op.cu new file mode 100644 index 0000000000000000000000000000000000000000..3a639f25d478a712c1030d57c57d7e55de1488b5 --- /dev/null +++ b/paddle/operators/margin_rank_loss_op.cu @@ -0,0 +1,24 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "paddle/operators/margin_rank_loss_op.h" + +namespace ops = paddle::operators; + +REGISTER_OP_GPU_KERNEL( + margin_rank_loss, + ops::MarginRankLossKernel); +REGISTER_OP_GPU_KERNEL( + margin_rank_loss_grad, + ops::MarginRankLossGradKernel); diff --git a/paddle/operators/margin_rank_loss_op.h b/paddle/operators/margin_rank_loss_op.h new file mode 100644 index 0000000000000000000000000000000000000000..8d0830147ecc465909e8988e90125929829f6f34 --- /dev/null +++ b/paddle/operators/margin_rank_loss_op.h @@ -0,0 +1,98 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once + +#include "paddle/framework/eigen.h" +#include "paddle/framework/op_registry.h" + +namespace paddle { +namespace operators { + +template +struct ReLU { + HOSTDEVICE T operator()(const T& val) const { + return val > 0 ? val : static_cast(0); + } +}; + +template +struct Heaviside { + HOSTDEVICE T operator()(const T& val) const { + return static_cast(val > 0 ? 1 : 0); + } +}; + +template +class MarginRankLossKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const { + auto* out_t = ctx.Output("Out"); + auto* act_t = ctx.Output("Activated"); + + auto* label_t = ctx.Input("Label"); + auto* x1_t = ctx.Input("X1"); + auto* x2_t = ctx.Input("X2"); + + out_t->mutable_data(ctx.GetPlace()); + act_t->mutable_data(ctx.GetPlace()); + + auto margin = static_cast(ctx.Attr("margin")); + auto out = framework::EigenVector::Flatten(*out_t); + auto act = framework::EigenVector::Flatten(*act_t); + + auto label = framework::EigenVector::Flatten(*label_t); + auto x1 = framework::EigenVector::Flatten(*x1_t); + auto x2 = framework::EigenVector::Flatten(*x2_t); + + auto& dev = ctx.GetEigenDevice(); + out.device(dev) = (-label * (x1 - x2) + margin).unaryExpr(ReLU()); + act.device(dev) = out.unaryExpr(Heaviside()); + } +}; + +template +class MarginRankLossGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const { + auto* d_x1_t = + ctx.Output(framework::GradVarName("X1")); + auto* d_x2_t = + ctx.Output(framework::GradVarName("X2")); + + auto* act_t = ctx.Input("Activated"); + auto* d_out_t = ctx.Input(framework::GradVarName("Out")); + auto* label_t = ctx.Input("Label"); + + auto d_out = framework::EigenVector::Flatten(*d_out_t); + auto act = framework::EigenVector::Flatten(*act_t); + auto label = framework::EigenVector::Flatten(*label_t); + auto& dev = ctx.GetEigenDevice(); + + // compute d_x1 + if (d_x1_t) { + d_x1_t->mutable_data(ctx.GetPlace()); + auto d_x1 = framework::EigenVector::Flatten(*d_x1_t); + d_x1.device(dev) = -d_out * act * label; + } + // compute d_x2 + if (d_x2_t) { + d_x2_t->mutable_data(ctx.GetPlace()); + auto d_x2 = framework::EigenVector::Flatten(*d_x2_t); + d_x2.device(dev) = d_out * act * label; + } + } +}; +} // namespace operators +} // namespace paddle diff --git a/python/paddle/v2/framework/tests/test_margin_rank_loss_op.py b/python/paddle/v2/framework/tests/test_margin_rank_loss_op.py new file mode 100644 index 0000000000000000000000000000000000000000..63378cbc4ec95d7d3c49a92f750b55a8dbc22414 --- /dev/null +++ b/python/paddle/v2/framework/tests/test_margin_rank_loss_op.py @@ -0,0 +1,39 @@ +import unittest +import numpy as np +from op_test import OpTest + + +class TestMarginRankLossOp(OpTest): + def setUp(self): + self.op_type = "margin_rank_loss" + batch_size = 5 + margin = 0.5 + # labels_{i} = {-1, 1} + label = 2 * np.random.randint( + 0, 2, size=(batch_size, 1)).astype("float32") - 1 + x1 = np.random.random((batch_size, 1)).astype("float32") + x2 = np.random.random((batch_size, 1)).astype("float32") + # loss = max(0, -label * (x1 - x2) + margin) + loss = -label * (x1 - x2) + margin + loss = np.where(loss > 0, loss, 0) + act = np.where(loss > 0, 1., 0.) + + self.attrs = {'margin': margin} + self.inputs = {'Label': label, 'X1': x1, 'X2': x2} + self.outputs = {'Activated': act, 'Out': loss} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(["X1", "X2"], "Out") + + def test_check_grad_ignore_x1(self): + self.check_grad(["X2"], "Out", no_grad_set=set('X1')) + + def test_check_grad_ignore_x2(self): + self.check_grad(["X1"], "Out", no_grad_set=set('X2')) + + +if __name__ == '__main__': + unittest.main()