未验证 提交 a93d9e88 编写于 作者: F Feiyu Chan 提交者: GitHub

API/OP (margin_rank_loss, nce, row_conv, positive_negative_pair) erro… (#24246) (#24376)

* API/OP (margin_rank_loss, nce, row_conv, glu, positive_negative_pair) error message enchancement, test=release/1.8
上级 6e3554e4
...@@ -14,6 +14,7 @@ limitations under the License. */ ...@@ -14,6 +14,7 @@ limitations under the License. */
#include "paddle/fluid/operators/margin_rank_loss_op.h" #include "paddle/fluid/operators/margin_rank_loss_op.h"
#include <memory> #include <memory>
#include "paddle/fluid/platform/enforce.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -24,17 +25,42 @@ class MarginRankLossOp : public framework::OperatorWithKernel { ...@@ -24,17 +25,42 @@ class MarginRankLossOp : public framework::OperatorWithKernel {
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
// input check // input check
PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) shouldn't be null."); OP_INOUT_CHECK(ctx->HasInput("Label"), "Input", "Label",
PADDLE_ENFORCE(ctx->HasInput("X1"), "Input(X1) shouldn't be null."); "margin_rank_loss");
PADDLE_ENFORCE(ctx->HasInput("X2"), "Input(X2) shouldn't be null."); OP_INOUT_CHECK(ctx->HasInput("X1"), "Input", "X1", "margin_rank_loss");
PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) shouldn't be null."); OP_INOUT_CHECK(ctx->HasInput("X2"), "Input", "X2", "margin_rank_loss");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "margin_rank_loss");
auto label_dims = ctx->GetInputDim("Label"); auto label_dims = ctx->GetInputDim("Label");
auto x1_dims = ctx->GetInputDim("X1"); auto x1_dims = ctx->GetInputDim("X1");
auto x2_dims = ctx->GetInputDim("X2"); auto x2_dims = ctx->GetInputDim("X2");
PADDLE_ENFORCE(
(label_dims == x1_dims) && (x1_dims == x2_dims) && PADDLE_ENFORCE_EQ(
(label_dims.size() == 2) && (label_dims[1] == 1), label_dims, x1_dims,
"All inputs must be 2-D tensor with shape [batch_size x 1]."); platform::errors::InvalidArgument(
"The shape of Input(Label) shape should equals the shape of "
"Input(X1). Received: Input(Label)'s shape: [%s], Input(X1)'s "
"shape: [%s].",
label_dims, x1_dims));
PADDLE_ENFORCE_EQ(
x1_dims, x2_dims,
platform::errors::InvalidArgument(
"The shape of Input(X1) shape should equals the shape of "
"Input(X2). Received: Input(X1)'s shape: [%s], Input(X2)'s shape: "
"[%s].",
x1_dims, x2_dims));
PADDLE_ENFORCE_EQ(
label_dims.size(), 2,
platform::errors::InvalidArgument(
"The dimensions of Input(Label) should be 2. Received: "
"the shape of Input(Label): [%s], the dimensions of Input(Label): "
"%d.",
label_dims, label_dims.size()));
PADDLE_ENFORCE_EQ(label_dims[1], 1,
platform::errors::InvalidArgument(
"The second dimension of Input(Lable) should be 1"
"Received: the shape of Input(Label): [%s].",
label_dims));
ctx->SetOutputDim("Activated", label_dims); ctx->SetOutputDim("Activated", label_dims);
ctx->SetOutputDim("Out", label_dims); ctx->SetOutputDim("Out", label_dims);
} }
...@@ -94,11 +120,17 @@ class MarginRankLossGradOp : public framework::OperatorWithKernel { ...@@ -94,11 +120,17 @@ class MarginRankLossGradOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) shouldn't be null."); OP_INOUT_CHECK(ctx->HasInput("Label"), "Input", "Label",
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), "margin_rank_loss_grad");
"Input(Out@GRAD) shouldn't be null."); OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
PADDLE_ENFORCE(ctx->HasInput("Activated"), framework::GradVarName("Out"), "margin_rank_loss_grad");
"Intermediate(Activated) shouldn't be null."); OP_INOUT_CHECK(ctx->HasInput("Activated"), "Input", "Activated",
"margin_rank_loss_grad");
OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("X1")), "Output",
framework::GradVarName("X1"), "margin_rank_loss_grad");
OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("X2")), "Output",
framework::GradVarName("X2"), "margin_rank_loss_grad");
auto dims = ctx->GetInputDim("Label"); auto dims = ctx->GetInputDim("Label");
ctx->SetOutputDim(framework::GradVarName("X1"), dims); ctx->SetOutputDim(framework::GradVarName("X1"), dims);
ctx->SetOutputDim(framework::GradVarName("X2"), dims); ctx->SetOutputDim(framework::GradVarName("X2"), dims);
......
...@@ -28,33 +28,39 @@ class NCEOp : public framework::OperatorWithKernel { ...@@ -28,33 +28,39 @@ class NCEOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput("Input"), true); OP_INOUT_CHECK(ctx->HasInput("Input"), "Input", "Input", "nce");
PADDLE_ENFORCE_EQ(ctx->HasInput("Label"), true); OP_INOUT_CHECK(ctx->HasInput("Label"), "Input", "Label", "nce");
PADDLE_ENFORCE_EQ(ctx->HasInput("Weight"), true); OP_INOUT_CHECK(ctx->HasInput("Weight"), "Input", "Weight", "nce");
PADDLE_ENFORCE_EQ(ctx->HasOutput("Cost"), true);
PADDLE_ENFORCE_EQ(ctx->HasOutput("SampleLogits"), true); OP_INOUT_CHECK(ctx->HasOutput("Cost"), "Output", "Cost", "nce");
PADDLE_ENFORCE_EQ(ctx->HasOutput("SampleLabels"), true); OP_INOUT_CHECK(ctx->HasOutput("SampleLogits"), "Output", "SampleLogits",
"nce");
OP_INOUT_CHECK(ctx->HasOutput("SampleLabels"), "Output", "SampleLabels",
"nce");
auto x_dims = ctx->GetInputDim("Input"); auto x_dims = ctx->GetInputDim("Input");
auto label_dims = ctx->GetInputDim("Label"); auto label_dims = ctx->GetInputDim("Label");
if (ctx->IsRuntime() || (x_dims[0] > 0 && label_dims[0] > 0)) { if (ctx->IsRuntime() || (x_dims[0] > 0 && label_dims[0] > 0)) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
x_dims[0], label_dims[0], x_dims[0], label_dims[0],
"ShapeError: the first dimension of Input(Input) and Input(Label) " platform::errors::InvalidArgument(
"should be equal in runtime. But received: Input(Input)'s shape = " "The first dimension of Input(Input) and Input(Label) should be "
"[%s] with 1st dim = %d, Input(Label)'s shape = [%s] with 1st " "equal in runtime. But received: Input(Input)'s shape = [%s] "
"dim = %d.", "with 1st dim = %d, Input(Label)'s shape = [%s] with 1st dim = "
x_dims, x_dims[0], label_dims, label_dims[0]); "%d.",
x_dims, x_dims[0], label_dims, label_dims[0]));
} }
int num_true_classes = label_dims.size() == 2 ? label_dims[1] : 1; int num_true_classes = label_dims.size() == 2 ? label_dims[1] : 1;
if (ctx->HasInput("Bias")) { if (ctx->HasInput("Bias")) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
ctx->GetInputDim("Weight")[0], ctx->GetInputDim("Bias")[0], ctx->GetInputDim("Weight")[0], ctx->GetInputDim("Bias")[0],
"ShapeError: the first dimension of Input(Weight) and Input(Bias) " platform::errors::InvalidArgument(
"should be equal. But received: Input(Weight)'s shape = [%s] with " "The first dimension of Input(Weight) and Input(Bias) "
"1st dim = %d, Input(Bias)'s shape = [%s] with 1st dim = %d.", "should be equal. But received: Input(Weight)'s shape = [%s] "
ctx->GetInputDim("Weight"), ctx->GetInputDim("Weight")[0], "with 1st dim = %d, and Input(Bias)'s shape = [%s] with 1st dim "
ctx->GetInputDim("Bias"), ctx->GetInputDim("Bias")[0]); "= %d.",
ctx->GetInputDim("Weight"), ctx->GetInputDim("Weight")[0],
ctx->GetInputDim("Bias"), ctx->GetInputDim("Bias")[0]));
} }
auto num_neg_samples = ctx->Attrs().Get<int>("num_neg_samples"); auto num_neg_samples = ctx->Attrs().Get<int>("num_neg_samples");
auto num_total_classes = ctx->Attrs().Get<int>("num_total_classes"); auto num_total_classes = ctx->Attrs().Get<int>("num_total_classes");
...@@ -62,18 +68,20 @@ class NCEOp : public framework::OperatorWithKernel { ...@@ -62,18 +68,20 @@ class NCEOp : public framework::OperatorWithKernel {
ctx->Attrs().Get<std::vector<int>>("custom_neg_classes"); ctx->Attrs().Get<std::vector<int>>("custom_neg_classes");
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
num_total_classes, ctx->GetInputDim("Weight")[0], num_total_classes, ctx->GetInputDim("Weight")[0],
"ShapeError: the number of total classes should be equal to the first " platform::errors::InvalidArgument(
"dimension of Input(Weight). But received: Attr(num_total_classes) = " "The number of total classes should be equal to the first "
"%d, Input(Weight)'s shape = [%s] with 1st dim = %d.", "dimension of Input(Weight). But received: Attr(num_total_classes) "
num_total_classes, ctx->GetInputDim("Weight"), "= %d, Input(Weight)'s shape = [%s] with 1st dim = %d.",
ctx->GetInputDim("Weight")[0]); num_total_classes, ctx->GetInputDim("Weight"),
ctx->GetInputDim("Weight")[0]));
if (custom_neg_classes.size() > 0) { if (custom_neg_classes.size() > 0) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
custom_neg_classes.size(), static_cast<size_t>(num_neg_samples), custom_neg_classes.size(), static_cast<size_t>(num_neg_samples),
"ShapeError: the size of Attr(custom_neg_classes) should be equal " platform::errors::InvalidArgument(
"to the number of negative samples. But received: " "The size of Attr(custom_neg_classes) should be equal "
"custom_neg_classes.size() = %d, num_neg_samples = %d.", "to the number of negative samples. But received: "
custom_neg_classes.size(), num_neg_samples); "custom_neg_classes.size() = %d, num_neg_samples = %d.",
custom_neg_classes.size(), num_neg_samples));
} }
// set dims of output(Out) // set dims of output(Out)
std::vector<int64_t> out_dims; std::vector<int64_t> out_dims;
...@@ -242,12 +250,14 @@ class NCEOpGrad : public framework::OperatorWithKernel { ...@@ -242,12 +250,14 @@ class NCEOpGrad : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("Input")); OP_INOUT_CHECK(ctx->HasInput("Input"), "Input", "Input", "nce_grad");
PADDLE_ENFORCE(ctx->HasInput("Weight")); OP_INOUT_CHECK(ctx->HasInput("Weight"), "Input", "Weight", "nce_grad");
PADDLE_ENFORCE(ctx->HasInput("SampleLogits")); OP_INOUT_CHECK(ctx->HasInput("SampleLogits"), "Input", "SampleLogits",
PADDLE_ENFORCE(ctx->HasInput("SampleLabels")); "nce_grad");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Cost")), OP_INOUT_CHECK(ctx->HasInput("SampleLabels"), "Input", "SampleLabels",
"The input(Out@GRAD) should not be null."); "nce_grad");
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Cost")), "Input",
framework::GradVarName("Cost"), "nce_grad");
auto x_dims = ctx->GetInputDim("Input"); auto x_dims = ctx->GetInputDim("Input");
auto x_grad_name = framework::GradVarName("Input"); auto x_grad_name = framework::GradVarName("Input");
......
...@@ -10,6 +10,7 @@ See the License for the specific language governing permissions and ...@@ -10,6 +10,7 @@ See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include "paddle/fluid/operators/positive_negative_pair_op.h" #include "paddle/fluid/operators/positive_negative_pair_op.h"
#include "paddle/fluid/platform/enforce.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -19,24 +20,19 @@ class PositiveNegativePairOp : public framework::OperatorWithKernel { ...@@ -19,24 +20,19 @@ class PositiveNegativePairOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE( OP_INOUT_CHECK(ctx->HasInput("Score"), "Input", "Score",
ctx->HasInput("Score"), "positive_negative_pair");
"Input(Score) of PositiveNegativePairOp should not be null."); OP_INOUT_CHECK(ctx->HasInput("Label"), "Input", "Label",
PADDLE_ENFORCE( "positive_negative_pair");
ctx->HasInput("Label"), OP_INOUT_CHECK(ctx->HasInput("QueryID"), "Input", "QueryID",
"Input(Label) of PositiveNegativePairOp should not be null."); "positive_negative_pair");
PADDLE_ENFORCE( OP_INOUT_CHECK(ctx->HasOutput("PositivePair"), "Output", "PositivePair",
ctx->HasInput("QueryID"), "positive_negative_pair");
"Input(QueryID) of PositiveNegativePairOp should not be null."); OP_INOUT_CHECK(ctx->HasOutput("NegativePair"), "Output", "NegativePair",
PADDLE_ENFORCE( "positive_negative_pair");
ctx->HasOutput("PositivePair"), OP_INOUT_CHECK(ctx->HasOutput("NeutralPair"), "Output", "NeutralPair",
"Output(PositivePair) of PositiveNegativePairOp should not be null."); "positive_negative_pair");
PADDLE_ENFORCE(
ctx->HasOutput("NegativePair"),
"Output(NegativePair) of PositiveNegativePairOp should not be null.");
PADDLE_ENFORCE(
ctx->HasOutput("NeutralPair"),
"Output(NeutralPair) of PositiveNegativePairOp should not be null.");
auto scalar_dim = framework::make_ddim({1}); auto scalar_dim = framework::make_ddim({1});
if (ctx->HasInput("AccumulatePositivePair") || if (ctx->HasInput("AccumulatePositivePair") ||
ctx->HasInput("AccumulateNegativePair") || ctx->HasInput("AccumulateNegativePair") ||
...@@ -48,43 +44,93 @@ class PositiveNegativePairOp : public framework::OperatorWithKernel { ...@@ -48,43 +44,93 @@ class PositiveNegativePairOp : public framework::OperatorWithKernel {
"AccumulateNegativePair, AccumulateNeutralPair) of " "AccumulateNegativePair, AccumulateNeutralPair) of "
"PositiveNegativePairOp are required if one of them is " "PositiveNegativePairOp are required if one of them is "
"specified."); "specified.");
PADDLE_ENFORCE_EQ(ctx->GetInputDim("AccumulatePositivePair"), scalar_dim, PADDLE_ENFORCE_EQ(
"Shape of AccumulatePositivePair should be {1}."); ctx->GetInputDim("AccumulatePositivePair"), scalar_dim,
PADDLE_ENFORCE_EQ(ctx->GetInputDim("AccumulateNegativePair"), scalar_dim, platform::errors::InvalidArgument(
"Shape of AccumulateNegativePair should be {1}."); "Shape of Input(AccumulatePositivePair) should be [1]. Received "
PADDLE_ENFORCE_EQ(ctx->GetInputDim("AccumulateNeutralPair"), scalar_dim, "shape of Input(AccumulatePositivePair): [%s].",
"Shape of AccumulateNeutralPair should be {1}."); ctx->GetInputDim("AccumulatePositivePair")));
PADDLE_ENFORCE_EQ(
ctx->GetInputDim("AccumulateNegativePair"), scalar_dim,
platform::errors::InvalidArgument(
"Shape of Input(AccumulateNegativePair) should be [1]. Received "
"shape of Input(AccumulateNegativePair): [%s].",
ctx->GetInputDim("AccumulateNegativePair")));
PADDLE_ENFORCE_EQ(
ctx->GetInputDim("AccumulateNeutralPair"), scalar_dim,
platform::errors::InvalidArgument(
"Shape of Input(AccumulateNeutralPair) should be [1]. Received "
"shape of Input(AccumulateNeutralPair): [%s].",
ctx->GetInputDim("AccumulateNeutralPair")));
} }
auto score_dim = ctx->GetInputDim("Score"); auto score_dim = ctx->GetInputDim("Score");
auto label_dim = ctx->GetInputDim("Label"); auto label_dim = ctx->GetInputDim("Label");
auto query_dim = ctx->GetInputDim("QueryID"); auto query_dim = ctx->GetInputDim("QueryID");
PADDLE_ENFORCE_EQ(score_dim.size(), 2, "Score should be a 2-D tensor."); PADDLE_ENFORCE_EQ(score_dim.size(), 2,
PADDLE_ENFORCE_EQ(label_dim.size(), 2, "Label should be a 2-D tensor."); platform::errors::InvalidArgument(
"Score should be a 2-D tensor. Received shape of "
"Input(Score): [%s].",
score_dim));
PADDLE_ENFORCE_EQ(label_dim.size(), 2,
platform::errors::InvalidArgument(
"Label should be a 2-D tensor. Received shape of "
"Input(Label): [%s].",
label_dim));
if (ctx->IsRuntime() || if (ctx->IsRuntime() ||
(score_dim[0] > 0 && label_dim[0] > 0 && query_dim[0] > 0)) { (score_dim[0] > 0 && label_dim[0] > 0 && query_dim[0] > 0)) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
label_dim[0], score_dim[0], label_dim[0], score_dim[0],
"Tensor Score and Label should have the same height (batch size)."); platform::errors::InvalidArgument(
"Input(Score) and Input(Label) should have the same "
"height (batch size). Received: the shape of Input(Score) is "
"[%s], while the shape of Input(Label) is [%s]. The first "
"dimensions of them are different.",
label_dim, score_dim));
PADDLE_ENFORCE_EQ(label_dim[1], 1, PADDLE_ENFORCE_EQ(
"The width of Label should be 1, i.e. each item should " label_dim[1], 1,
"have a scalar label."); platform::errors::InvalidArgument(
"The width of Label should be 1, i.e. each item should "
"have a scalar label. Received shape of Input(Label) is [%s]. "
"The second dimension of it is %d, while the expected is %d.",
label_dim, label_dim[1], 1));
PADDLE_ENFORCE(query_dim == label_dim, PADDLE_ENFORCE_EQ(
"QueryID should have the same shape as Label."); query_dim, label_dim,
platform::errors::InvalidArgument(
"Input(QueryID) should have the same shape as Input(Label). "
"Received: the shape of Input(QueryID) is [%s], "
"while the shape of Input(Label) is [%s].",
query_dim, label_dim));
if (ctx->HasInput("Weight")) { if (ctx->HasInput("Weight")) {
PADDLE_ENFORCE(ctx->GetInputDim("Weight") == label_dim, PADDLE_ENFORCE_EQ(
"Weight should have the same shape as Label."); ctx->GetInputDim("Weight"), label_dim,
platform::errors::InvalidArgument(
"Input(Weight) should have the same shape as Input(Label). "
"Received: the shape of Input(Weight) is [%s] while the shape "
"of Input(Label) is [%s].",
ctx->GetInputDim("Weight"), label_dim));
} }
int column = ctx->Attrs().Get<int>("column"); int column = ctx->Attrs().Get<int>("column");
auto depth = score_dim[1]; auto depth = score_dim[1];
PADDLE_ENFORCE(column < depth && column >= -depth, PADDLE_ENFORCE_LT(
"Attribute column should be in the range of [-%l, %l)", column, depth,
depth, depth); platform::errors::OutOfRange(
"Attr(column) should be less than depth(the second "
"dimension of Input(Score)). Recieved Attr(column): %d, while "
"depth is %d.",
column, depth));
PADDLE_ENFORCE_GE(
column, -depth,
platform::errors::OutOfRange(
"Attr(column) should be greater than equal to negative "
"depth, i.e. the second dimension of Input(Score). "
"Recieved Attr(column): %d, while negative depth is %d.",
column, -depth));
} }
ctx->SetOutputDim("PositivePair", scalar_dim); ctx->SetOutputDim("PositivePair", scalar_dim);
......
...@@ -15,8 +15,8 @@ limitations under the License. */ ...@@ -15,8 +15,8 @@ limitations under the License. */
#include <memory> #include <memory>
#include <string> #include <string>
#include <vector> #include <vector>
#include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/platform/enforce.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -33,16 +33,17 @@ class RowConvOp : public framework::OperatorWithKernel { ...@@ -33,16 +33,17 @@ class RowConvOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "row_conv");
"Input(X) of RowConvOp should not be null."); OP_INOUT_CHECK(ctx->HasInput("Filter"), "Input", "Filter", "row_conv");
PADDLE_ENFORCE(ctx->HasInput("Filter"), OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "row_conv");
"Input(Filter) of RowConvOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of RowConvOp should not be null.");
auto x_dims = ctx->GetInputDim("X"); auto x_dims = ctx->GetInputDim("X");
auto filter_dims = ctx->GetInputDim("Filter"); auto filter_dims = ctx->GetInputDim("Filter");
PADDLE_ENFORCE_EQ(filter_dims.size(), 2, "Input(Y)'s rank should be 2."); PADDLE_ENFORCE_EQ(filter_dims.size(), 2,
platform::errors::InvalidArgument(
"Input(Filter)'s dimensions should be 2. Received: "
"Input(Filter)'s shape: [%s].",
filter_dims));
ctx->SetOutputDim("Out", x_dims); ctx->SetOutputDim("Out", x_dims);
ctx->ShareLoD("X", "Out"); ctx->ShareLoD("X", "Out");
...@@ -54,10 +55,9 @@ class RowConvGradOp : public framework::OperatorWithKernel { ...@@ -54,10 +55,9 @@ class RowConvGradOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override { void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("Filter"), OP_INOUT_CHECK(ctx->HasInput("Filter"), "Input", "Filter", "row_conv_grad");
"Input(Filter) should not be null."); OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), framework::GradVarName("Out"), "row_conv_grad");
"Gradient of output(Out) should not be null.");
auto x_grad_name = framework::GradVarName("X"); auto x_grad_name = framework::GradVarName("X");
if (ctx->HasOutput(x_grad_name)) { if (ctx->HasOutput(x_grad_name)) {
......
...@@ -1380,12 +1380,9 @@ def margin_rank_loss(label, left, right, margin=0.1, name=None): ...@@ -1380,12 +1380,9 @@ def margin_rank_loss(label, left, right, margin=0.1, name=None):
out = fluid.layers.margin_rank_loss(label, left, right) out = fluid.layers.margin_rank_loss(label, left, right)
""" """
helper = LayerHelper('margin_rank_loss', **locals()) helper = LayerHelper('margin_rank_loss', **locals())
if not isinstance(label, Variable): check_variable_and_dtype(label, 'label', ['float32'], 'margin_rank_loss')
raise ValueError("The label should be a Variable.") check_variable_and_dtype(label, 'left', ['float32'], 'margin_rank_loss')
if not isinstance(left, Variable): check_variable_and_dtype(label, 'right', ['float32'], 'margin_rank_loss')
raise ValueError("The left should be a Variable.")
if not isinstance(right, Variable):
raise ValueError("The right should be a Variable.")
out = helper.create_variable_for_type_inference(left.dtype) out = helper.create_variable_for_type_inference(left.dtype)
act = helper.create_variable_for_type_inference(left.dtype) act = helper.create_variable_for_type_inference(left.dtype)
helper.append_op( helper.append_op(
......
...@@ -6920,6 +6920,7 @@ def row_conv(input, future_context_size, param_attr=None, act=None): ...@@ -6920,6 +6920,7 @@ def row_conv(input, future_context_size, param_attr=None, act=None):
>>> out = fluid.layers.row_conv(input=x, future_context_size=2) >>> out = fluid.layers.row_conv(input=x, future_context_size=2)
""" """
helper = LayerHelper('row_conv', **locals()) helper = LayerHelper('row_conv', **locals())
check_variable_and_dtype(input, 'input', ['float32'], 'row_conv')
dtype = helper.input_dtype() dtype = helper.input_dtype()
filter_shape = [future_context_size + 1, input.shape[-1]] filter_shape = [future_context_size + 1, input.shape[-1]]
filter_param = helper.create_parameter( filter_param = helper.create_parameter(
......
...@@ -351,7 +351,8 @@ def glu(input, dim=-1): ...@@ -351,7 +351,8 @@ def glu(input, dim=-1):
# shape of output: [-1, 3, 3, 9] # shape of output: [-1, 3, 3, 9]
output = fluid.nets.glu(input=data, dim=1) output = fluid.nets.glu(input=data, dim=1)
""" """
check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'],
"glu")
a, b = layers.split(input, num_or_sections=2, dim=dim) a, b = layers.split(input, num_or_sections=2, dim=dim)
act_b = layers.sigmoid(x=b) act_b = layers.sigmoid(x=b)
out = layers.elementwise_mul(x=a, y=act_b) out = layers.elementwise_mul(x=a, y=act_b)
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from paddle import fluid
import paddle.fluid.dygraph as dg
import unittest
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
def glu(x, dim=-1):
a, b = np.split(x, 2, axis=dim)
out = a * sigmoid(b)
return out
class TestGLUCase(unittest.TestCase):
def setUp(self):
self.x = np.random.randn(5, 20)
self.dim = -1
self.out = glu(self.x, self.dim)
def check_identity(self, place):
with dg.guard(place):
x_var = dg.to_variable(self.x)
y_var = fluid.nets.glu(x_var, self.dim)
y_np = y_var.numpy()
np.testing.assert_allclose(y_np, self.out)
def test_case(self):
self.check_identity(fluid.CPUPlace())
if fluid.is_compiled_with_cuda():
self.check_identity(fluid.CUDAPlace(0))
if __name__ == '__main__':
unittest.main()
...@@ -17,6 +17,7 @@ from __future__ import print_function ...@@ -17,6 +17,7 @@ from __future__ import print_function
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from op_test import OpTest
from paddle import fluid
class TestMarginRankLossOp(OpTest): class TestMarginRankLossOp(OpTest):
...@@ -51,5 +52,48 @@ class TestMarginRankLossOp(OpTest): ...@@ -51,5 +52,48 @@ class TestMarginRankLossOp(OpTest):
self.check_grad(["X1"], "Out", no_grad_set=set('X2')) self.check_grad(["X1"], "Out", no_grad_set=set('X2'))
class TestMarginRankLossLayer(unittest.TestCase):
def setUp(self):
self.batch_size = 5
self.margin = 0.5
# labels_{i} = {-1, 1}
self.label = 2 * np.random.randint(
0, 2, size=(self.batch_size, 1)).astype("float32") - 1
self.x1 = np.random.random((self.batch_size, 1)).astype("float32")
self.x2 = np.random.random((self.batch_size, 1)).astype("float32")
# loss = max(0, -label * (x1 - x2) + margin)
loss = -self.label * (self.x1 - self.x2) + self.margin
loss = np.where(loss > 0, loss, 0)
self.loss = loss
def test_identity(self):
place = fluid.CPUPlace()
self.check_identity(place)
if fluid.is_compiled_with_cuda():
place = fluid.CUDAPlace(0)
self.check_identity(place)
def check_identity(self, place):
main = fluid.Program()
start = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, start):
label = fluid.data("label", (self.batch_size, 1), "float32")
x1 = fluid.data("x1", (self.batch_size, 1), "float32")
x2 = fluid.data("x2", (self.batch_size, 1), "float32")
out = fluid.layers.margin_rank_loss(label, x1, x2, self.margin)
exe = fluid.Executor(place)
exe.run(start)
out_np, = exe.run(
main,
feed={"label": self.label,
"x1": self.x1,
"x2": self.x2},
fetch_list=[out])
np.testing.assert_allclose(out_np, self.loss)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -17,6 +17,7 @@ from __future__ import print_function ...@@ -17,6 +17,7 @@ from __future__ import print_function
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from op_test import OpTest
from paddle import fluid
def row_conv_forward(x, lod, wt): def row_conv_forward(x, lod, wt):
...@@ -167,5 +168,35 @@ class TestRowOpWithTensorInput(OpTest): ...@@ -167,5 +168,35 @@ class TestRowOpWithTensorInput(OpTest):
['X'], 'Out', no_grad_set=set('Filter'), check_dygraph=False) ['X'], 'Out', no_grad_set=set('Filter'), check_dygraph=False)
class TestRowConvLayer(unittest.TestCase):
def setUp(self):
self.B = 2
self.T = 6
self.C = 20
self.context_length = 6
self.x = np.random.random((self.B, self.T, self.C)).astype("float32")
self.w = np.random.random(
(self.context_length, self.C)).astype("float32")
self.out = row_conv_foward_Tensor(self.x, self.w)
def check_identity(self):
start = fluid.Program()
main = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, start):
x = fluid.data("x", (-1, -1, self.C), "float32")
out = fluid.layers.row_conv(
x,
self.context_length,
param_attr=fluid.initializer.NumpyArrayInitializer(self.w))
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(start)
out_np, = exe.run(main, feed={'x': self.x}, fetch_list=[out])
np.testing.assert_allclose(out_np, self.out)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册