提交 4db50fbc 编写于 作者: Y Yibing Liu

adapt to the new infershape interface

上级 e303897f
...@@ -22,28 +22,21 @@ class MarginRankLossOp : public framework::OperatorWithKernel { ...@@ -22,28 +22,21 @@ class MarginRankLossOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(framework::InferShapeContextBase *ctx) const override {
// input check // input check
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Label"), PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) shouldn't be null.");
"Input(Label) shouldn't be null."); PADDLE_ENFORCE(ctx->HasInput("X1"), "Input(X1) shouldn't be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X1"), "Input(X1) shouldn't be null."); PADDLE_ENFORCE(ctx->HasInput("X2"), "Input(X2) shouldn't be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X2"), "Input(X2) shouldn't be null."); PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) shouldn't be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.OutputVar("Out"), auto label_dims = ctx->GetInputDim("Label");
"Output(X2) shouldn't be null."); auto x1_dims = ctx->GetInputDim("X1");
auto label_dims = ctx.Input<framework::Tensor>("Label")->dims(); auto x2_dims = ctx->GetInputDim("X2");
auto x1_dims = ctx.Input<framework::Tensor>("X1")->dims(); PADDLE_ENFORCE(
auto x2_dims = ctx.Input<framework::Tensor>("X2")->dims(); (label_dims == x1_dims) && (x1_dims == x2_dims) &&
PADDLE_ENFORCE((label_dims == x1_dims) && (x1_dims == x2_dims) && (label_dims.size() == 2) && (label_dims[1] == 1),
(label_dims.size() == 2) && (label_dims[1] == 1), "All inputs must be 2-D tensor with shape [batch_size x 1].");
"All inputs must be vector with the same size."); ctx->SetOutputDim("Activated", label_dims);
auto act_t = ctx.Output<framework::LoDTensor>("Activated"); ctx->SetOutputDim("Out", label_dims);
auto out_t = ctx.Output<framework::LoDTensor>("Out");
if (act_t) {
act_t->Resize(label_dims);
}
if (out_t) {
out_t->Resize(label_dims);
}
} }
}; };
...@@ -71,7 +64,7 @@ class MarginRankLossOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -71,7 +64,7 @@ class MarginRankLossOpMaker : public framework::OpProtoAndCheckerMaker {
.AsIntermediate(); .AsIntermediate();
AddOutput("Out", AddOutput("Out",
"(2-D tensor with shape [batch_size x 1])" "(2-D tensor with shape [batch_size x 1])"
"The output loss of MarginRankLoss operator"); "The output loss of MarginRankLoss operator.");
AddComment(R"DOC( AddComment(R"DOC(
MarginRankLoss operator measures the loss given a pair of input {`X1`, `X2`} MarginRankLoss operator measures the loss given a pair of input {`X1`, `X2`}
...@@ -96,26 +89,17 @@ class MarginRankLossGradOp : public framework::OperatorWithKernel { ...@@ -96,26 +89,17 @@ class MarginRankLossGradOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
protected: protected:
void InferShape(const framework::InferShapeContext &ctx) const override { void InferShape(framework::InferShapeContextBase *ctx) const override {
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Label"), PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) shouldn't be null.");
"Input(Label) shouldn't be null."); PADDLE_ENFORCE(ctx->HasInput("X1"), "Input(X1) shouldn't be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X1"), "Input(X1) shouldn't be null."); PADDLE_ENFORCE(ctx->HasInput("X2"), "Input(X2) shouldn't be null.");
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("X2"), "Input(X2) shouldn't be null."); PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar(framework::GradVarName("Out")), "Input(Out@GRAD) shouldn't be null.");
"Input(Out@GRAD) shouldn't be null."); PADDLE_ENFORCE(ctx->HasInput("Activated"),
PADDLE_ENFORCE_NOT_NULL(ctx.InputVar("Activated"), "Intermediate(Activated) shouldn't be null.");
"Intermediate(Activated) shouldn't be null."); auto dims = ctx->GetInputDim("Label");
auto dims = ctx.Input<framework::Tensor>("X1")->dims(); ctx->SetOutputDim(framework::GradVarName("X1"), dims);
auto *x1_grad = ctx->SetOutputDim(framework::GradVarName("X2"), dims);
ctx.Output<framework::LoDTensor>(framework::GradVarName("X1"));
auto *x2_grad =
ctx.Output<framework::LoDTensor>(framework::GradVarName("X2"));
if (x1_grad) {
x1_grad->Resize(dims);
}
if (x2_grad) {
x2_grad->Resize(dims);
}
} }
}; };
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册