未验证 提交 a93d9e88 编写于 作者: F Feiyu Chan 提交者: GitHub

API/OP (margin_rank_loss, nce, row_conv, positive_negative_pair) erro… (#24246) (#24376)

* API/OP (margin_rank_loss, nce, row_conv, glu, positive_negative_pair) error message enchancement, test=release/1.8
上级 6e3554e4
......@@ -14,6 +14,7 @@ limitations under the License. */
#include "paddle/fluid/operators/margin_rank_loss_op.h"
#include <memory>
#include "paddle/fluid/platform/enforce.h"
namespace paddle {
namespace operators {
......@@ -24,17 +25,42 @@ class MarginRankLossOp : public framework::OperatorWithKernel {
void InferShape(framework::InferShapeContext *ctx) const override {
// input check
PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) shouldn't be null.");
PADDLE_ENFORCE(ctx->HasInput("X1"), "Input(X1) shouldn't be null.");
PADDLE_ENFORCE(ctx->HasInput("X2"), "Input(X2) shouldn't be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) shouldn't be null.");
OP_INOUT_CHECK(ctx->HasInput("Label"), "Input", "Label",
"margin_rank_loss");
OP_INOUT_CHECK(ctx->HasInput("X1"), "Input", "X1", "margin_rank_loss");
OP_INOUT_CHECK(ctx->HasInput("X2"), "Input", "X2", "margin_rank_loss");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "margin_rank_loss");
auto label_dims = ctx->GetInputDim("Label");
auto x1_dims = ctx->GetInputDim("X1");
auto x2_dims = ctx->GetInputDim("X2");
PADDLE_ENFORCE(
(label_dims == x1_dims) && (x1_dims == x2_dims) &&
(label_dims.size() == 2) && (label_dims[1] == 1),
"All inputs must be 2-D tensor with shape [batch_size x 1].");
PADDLE_ENFORCE_EQ(
label_dims, x1_dims,
platform::errors::InvalidArgument(
"The shape of Input(Label) shape should equals the shape of "
"Input(X1). Received: Input(Label)'s shape: [%s], Input(X1)'s "
"shape: [%s].",
label_dims, x1_dims));
PADDLE_ENFORCE_EQ(
x1_dims, x2_dims,
platform::errors::InvalidArgument(
"The shape of Input(X1) shape should equals the shape of "
"Input(X2). Received: Input(X1)'s shape: [%s], Input(X2)'s shape: "
"[%s].",
x1_dims, x2_dims));
PADDLE_ENFORCE_EQ(
label_dims.size(), 2,
platform::errors::InvalidArgument(
"The dimensions of Input(Label) should be 2. Received: "
"the shape of Input(Label): [%s], the dimensions of Input(Label): "
"%d.",
label_dims, label_dims.size()));
PADDLE_ENFORCE_EQ(label_dims[1], 1,
platform::errors::InvalidArgument(
"The second dimension of Input(Lable) should be 1"
"Received: the shape of Input(Label): [%s].",
label_dims));
ctx->SetOutputDim("Activated", label_dims);
ctx->SetOutputDim("Out", label_dims);
}
......@@ -94,11 +120,17 @@ class MarginRankLossGradOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) shouldn't be null.");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
"Input(Out@GRAD) shouldn't be null.");
PADDLE_ENFORCE(ctx->HasInput("Activated"),
"Intermediate(Activated) shouldn't be null.");
OP_INOUT_CHECK(ctx->HasInput("Label"), "Input", "Label",
"margin_rank_loss_grad");
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
framework::GradVarName("Out"), "margin_rank_loss_grad");
OP_INOUT_CHECK(ctx->HasInput("Activated"), "Input", "Activated",
"margin_rank_loss_grad");
OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("X1")), "Output",
framework::GradVarName("X1"), "margin_rank_loss_grad");
OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("X2")), "Output",
framework::GradVarName("X2"), "margin_rank_loss_grad");
auto dims = ctx->GetInputDim("Label");
ctx->SetOutputDim(framework::GradVarName("X1"), dims);
ctx->SetOutputDim(framework::GradVarName("X2"), dims);
......
......@@ -28,33 +28,39 @@ class NCEOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput("Input"), true);
PADDLE_ENFORCE_EQ(ctx->HasInput("Label"), true);
PADDLE_ENFORCE_EQ(ctx->HasInput("Weight"), true);
PADDLE_ENFORCE_EQ(ctx->HasOutput("Cost"), true);
PADDLE_ENFORCE_EQ(ctx->HasOutput("SampleLogits"), true);
PADDLE_ENFORCE_EQ(ctx->HasOutput("SampleLabels"), true);
OP_INOUT_CHECK(ctx->HasInput("Input"), "Input", "Input", "nce");
OP_INOUT_CHECK(ctx->HasInput("Label"), "Input", "Label", "nce");
OP_INOUT_CHECK(ctx->HasInput("Weight"), "Input", "Weight", "nce");
OP_INOUT_CHECK(ctx->HasOutput("Cost"), "Output", "Cost", "nce");
OP_INOUT_CHECK(ctx->HasOutput("SampleLogits"), "Output", "SampleLogits",
"nce");
OP_INOUT_CHECK(ctx->HasOutput("SampleLabels"), "Output", "SampleLabels",
"nce");
auto x_dims = ctx->GetInputDim("Input");
auto label_dims = ctx->GetInputDim("Label");
if (ctx->IsRuntime() || (x_dims[0] > 0 && label_dims[0] > 0)) {
PADDLE_ENFORCE_EQ(
x_dims[0], label_dims[0],
"ShapeError: the first dimension of Input(Input) and Input(Label) "
"should be equal in runtime. But received: Input(Input)'s shape = "
"[%s] with 1st dim = %d, Input(Label)'s shape = [%s] with 1st "
"dim = %d.",
x_dims, x_dims[0], label_dims, label_dims[0]);
platform::errors::InvalidArgument(
"The first dimension of Input(Input) and Input(Label) should be "
"equal in runtime. But received: Input(Input)'s shape = [%s] "
"with 1st dim = %d, Input(Label)'s shape = [%s] with 1st dim = "
"%d.",
x_dims, x_dims[0], label_dims, label_dims[0]));
}
int num_true_classes = label_dims.size() == 2 ? label_dims[1] : 1;
if (ctx->HasInput("Bias")) {
PADDLE_ENFORCE_EQ(
ctx->GetInputDim("Weight")[0], ctx->GetInputDim("Bias")[0],
"ShapeError: the first dimension of Input(Weight) and Input(Bias) "
"should be equal. But received: Input(Weight)'s shape = [%s] with "
"1st dim = %d, Input(Bias)'s shape = [%s] with 1st dim = %d.",
platform::errors::InvalidArgument(
"The first dimension of Input(Weight) and Input(Bias) "
"should be equal. But received: Input(Weight)'s shape = [%s] "
"with 1st dim = %d, and Input(Bias)'s shape = [%s] with 1st dim "
"= %d.",
ctx->GetInputDim("Weight"), ctx->GetInputDim("Weight")[0],
ctx->GetInputDim("Bias"), ctx->GetInputDim("Bias")[0]);
ctx->GetInputDim("Bias"), ctx->GetInputDim("Bias")[0]));
}
auto num_neg_samples = ctx->Attrs().Get<int>("num_neg_samples");
auto num_total_classes = ctx->Attrs().Get<int>("num_total_classes");
......@@ -62,18 +68,20 @@ class NCEOp : public framework::OperatorWithKernel {
ctx->Attrs().Get<std::vector<int>>("custom_neg_classes");
PADDLE_ENFORCE_EQ(
num_total_classes, ctx->GetInputDim("Weight")[0],
"ShapeError: the number of total classes should be equal to the first "
"dimension of Input(Weight). But received: Attr(num_total_classes) = "
"%d, Input(Weight)'s shape = [%s] with 1st dim = %d.",
platform::errors::InvalidArgument(
"The number of total classes should be equal to the first "
"dimension of Input(Weight). But received: Attr(num_total_classes) "
"= %d, Input(Weight)'s shape = [%s] with 1st dim = %d.",
num_total_classes, ctx->GetInputDim("Weight"),
ctx->GetInputDim("Weight")[0]);
ctx->GetInputDim("Weight")[0]));
if (custom_neg_classes.size() > 0) {
PADDLE_ENFORCE_EQ(
custom_neg_classes.size(), static_cast<size_t>(num_neg_samples),
"ShapeError: the size of Attr(custom_neg_classes) should be equal "
platform::errors::InvalidArgument(
"The size of Attr(custom_neg_classes) should be equal "
"to the number of negative samples. But received: "
"custom_neg_classes.size() = %d, num_neg_samples = %d.",
custom_neg_classes.size(), num_neg_samples);
custom_neg_classes.size(), num_neg_samples));
}
// set dims of output(Out)
std::vector<int64_t> out_dims;
......@@ -242,12 +250,14 @@ class NCEOpGrad : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("Input"));
PADDLE_ENFORCE(ctx->HasInput("Weight"));
PADDLE_ENFORCE(ctx->HasInput("SampleLogits"));
PADDLE_ENFORCE(ctx->HasInput("SampleLabels"));
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Cost")),
"The input(Out@GRAD) should not be null.");
OP_INOUT_CHECK(ctx->HasInput("Input"), "Input", "Input", "nce_grad");
OP_INOUT_CHECK(ctx->HasInput("Weight"), "Input", "Weight", "nce_grad");
OP_INOUT_CHECK(ctx->HasInput("SampleLogits"), "Input", "SampleLogits",
"nce_grad");
OP_INOUT_CHECK(ctx->HasInput("SampleLabels"), "Input", "SampleLabels",
"nce_grad");
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Cost")), "Input",
framework::GradVarName("Cost"), "nce_grad");
auto x_dims = ctx->GetInputDim("Input");
auto x_grad_name = framework::GradVarName("Input");
......
......@@ -10,6 +10,7 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/operators/positive_negative_pair_op.h"
#include "paddle/fluid/platform/enforce.h"
namespace paddle {
namespace operators {
......@@ -19,24 +20,19 @@ class PositiveNegativePairOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(
ctx->HasInput("Score"),
"Input(Score) of PositiveNegativePairOp should not be null.");
PADDLE_ENFORCE(
ctx->HasInput("Label"),
"Input(Label) of PositiveNegativePairOp should not be null.");
PADDLE_ENFORCE(
ctx->HasInput("QueryID"),
"Input(QueryID) of PositiveNegativePairOp should not be null.");
PADDLE_ENFORCE(
ctx->HasOutput("PositivePair"),
"Output(PositivePair) of PositiveNegativePairOp should not be null.");
PADDLE_ENFORCE(
ctx->HasOutput("NegativePair"),
"Output(NegativePair) of PositiveNegativePairOp should not be null.");
PADDLE_ENFORCE(
ctx->HasOutput("NeutralPair"),
"Output(NeutralPair) of PositiveNegativePairOp should not be null.");
OP_INOUT_CHECK(ctx->HasInput("Score"), "Input", "Score",
"positive_negative_pair");
OP_INOUT_CHECK(ctx->HasInput("Label"), "Input", "Label",
"positive_negative_pair");
OP_INOUT_CHECK(ctx->HasInput("QueryID"), "Input", "QueryID",
"positive_negative_pair");
OP_INOUT_CHECK(ctx->HasOutput("PositivePair"), "Output", "PositivePair",
"positive_negative_pair");
OP_INOUT_CHECK(ctx->HasOutput("NegativePair"), "Output", "NegativePair",
"positive_negative_pair");
OP_INOUT_CHECK(ctx->HasOutput("NeutralPair"), "Output", "NeutralPair",
"positive_negative_pair");
auto scalar_dim = framework::make_ddim({1});
if (ctx->HasInput("AccumulatePositivePair") ||
ctx->HasInput("AccumulateNegativePair") ||
......@@ -48,43 +44,93 @@ class PositiveNegativePairOp : public framework::OperatorWithKernel {
"AccumulateNegativePair, AccumulateNeutralPair) of "
"PositiveNegativePairOp are required if one of them is "
"specified.");
PADDLE_ENFORCE_EQ(ctx->GetInputDim("AccumulatePositivePair"), scalar_dim,
"Shape of AccumulatePositivePair should be {1}.");
PADDLE_ENFORCE_EQ(ctx->GetInputDim("AccumulateNegativePair"), scalar_dim,
"Shape of AccumulateNegativePair should be {1}.");
PADDLE_ENFORCE_EQ(ctx->GetInputDim("AccumulateNeutralPair"), scalar_dim,
"Shape of AccumulateNeutralPair should be {1}.");
PADDLE_ENFORCE_EQ(
ctx->GetInputDim("AccumulatePositivePair"), scalar_dim,
platform::errors::InvalidArgument(
"Shape of Input(AccumulatePositivePair) should be [1]. Received "
"shape of Input(AccumulatePositivePair): [%s].",
ctx->GetInputDim("AccumulatePositivePair")));
PADDLE_ENFORCE_EQ(
ctx->GetInputDim("AccumulateNegativePair"), scalar_dim,
platform::errors::InvalidArgument(
"Shape of Input(AccumulateNegativePair) should be [1]. Received "
"shape of Input(AccumulateNegativePair): [%s].",
ctx->GetInputDim("AccumulateNegativePair")));
PADDLE_ENFORCE_EQ(
ctx->GetInputDim("AccumulateNeutralPair"), scalar_dim,
platform::errors::InvalidArgument(
"Shape of Input(AccumulateNeutralPair) should be [1]. Received "
"shape of Input(AccumulateNeutralPair): [%s].",
ctx->GetInputDim("AccumulateNeutralPair")));
}
auto score_dim = ctx->GetInputDim("Score");
auto label_dim = ctx->GetInputDim("Label");
auto query_dim = ctx->GetInputDim("QueryID");
PADDLE_ENFORCE_EQ(score_dim.size(), 2, "Score should be a 2-D tensor.");
PADDLE_ENFORCE_EQ(label_dim.size(), 2, "Label should be a 2-D tensor.");
PADDLE_ENFORCE_EQ(score_dim.size(), 2,
platform::errors::InvalidArgument(
"Score should be a 2-D tensor. Received shape of "
"Input(Score): [%s].",
score_dim));
PADDLE_ENFORCE_EQ(label_dim.size(), 2,
platform::errors::InvalidArgument(
"Label should be a 2-D tensor. Received shape of "
"Input(Label): [%s].",
label_dim));
if (ctx->IsRuntime() ||
(score_dim[0] > 0 && label_dim[0] > 0 && query_dim[0] > 0)) {
PADDLE_ENFORCE_EQ(
label_dim[0], score_dim[0],
"Tensor Score and Label should have the same height (batch size).");
platform::errors::InvalidArgument(
"Input(Score) and Input(Label) should have the same "
"height (batch size). Received: the shape of Input(Score) is "
"[%s], while the shape of Input(Label) is [%s]. The first "
"dimensions of them are different.",
label_dim, score_dim));
PADDLE_ENFORCE_EQ(label_dim[1], 1,
PADDLE_ENFORCE_EQ(
label_dim[1], 1,
platform::errors::InvalidArgument(
"The width of Label should be 1, i.e. each item should "
"have a scalar label.");
"have a scalar label. Received shape of Input(Label) is [%s]. "
"The second dimension of it is %d, while the expected is %d.",
label_dim, label_dim[1], 1));
PADDLE_ENFORCE(query_dim == label_dim,
"QueryID should have the same shape as Label.");
PADDLE_ENFORCE_EQ(
query_dim, label_dim,
platform::errors::InvalidArgument(
"Input(QueryID) should have the same shape as Input(Label). "
"Received: the shape of Input(QueryID) is [%s], "
"while the shape of Input(Label) is [%s].",
query_dim, label_dim));
if (ctx->HasInput("Weight")) {
PADDLE_ENFORCE(ctx->GetInputDim("Weight") == label_dim,
"Weight should have the same shape as Label.");
PADDLE_ENFORCE_EQ(
ctx->GetInputDim("Weight"), label_dim,
platform::errors::InvalidArgument(
"Input(Weight) should have the same shape as Input(Label). "
"Received: the shape of Input(Weight) is [%s] while the shape "
"of Input(Label) is [%s].",
ctx->GetInputDim("Weight"), label_dim));
}
int column = ctx->Attrs().Get<int>("column");
auto depth = score_dim[1];
PADDLE_ENFORCE(column < depth && column >= -depth,
"Attribute column should be in the range of [-%l, %l)",
depth, depth);
PADDLE_ENFORCE_LT(
column, depth,
platform::errors::OutOfRange(
"Attr(column) should be less than depth(the second "
"dimension of Input(Score)). Recieved Attr(column): %d, while "
"depth is %d.",
column, depth));
PADDLE_ENFORCE_GE(
column, -depth,
platform::errors::OutOfRange(
"Attr(column) should be greater than equal to negative "
"depth, i.e. the second dimension of Input(Score). "
"Recieved Attr(column): %d, while negative depth is %d.",
column, -depth));
}
ctx->SetOutputDim("PositivePair", scalar_dim);
......
......@@ -15,8 +15,8 @@ limitations under the License. */
#include <memory>
#include <string>
#include <vector>
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/platform/enforce.h"
namespace paddle {
namespace operators {
......@@ -33,16 +33,17 @@ class RowConvOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"),
"Input(X) of RowConvOp should not be null.");
PADDLE_ENFORCE(ctx->HasInput("Filter"),
"Input(Filter) of RowConvOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of RowConvOp should not be null.");
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "row_conv");
OP_INOUT_CHECK(ctx->HasInput("Filter"), "Input", "Filter", "row_conv");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "row_conv");
auto x_dims = ctx->GetInputDim("X");
auto filter_dims = ctx->GetInputDim("Filter");
PADDLE_ENFORCE_EQ(filter_dims.size(), 2, "Input(Y)'s rank should be 2.");
PADDLE_ENFORCE_EQ(filter_dims.size(), 2,
platform::errors::InvalidArgument(
"Input(Filter)'s dimensions should be 2. Received: "
"Input(Filter)'s shape: [%s].",
filter_dims));
ctx->SetOutputDim("Out", x_dims);
ctx->ShareLoD("X", "Out");
......@@ -54,10 +55,9 @@ class RowConvGradOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("Filter"),
"Input(Filter) should not be null.");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
"Gradient of output(Out) should not be null.");
OP_INOUT_CHECK(ctx->HasInput("Filter"), "Input", "Filter", "row_conv_grad");
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")), "Input",
framework::GradVarName("Out"), "row_conv_grad");
auto x_grad_name = framework::GradVarName("X");
if (ctx->HasOutput(x_grad_name)) {
......
......@@ -1380,12 +1380,9 @@ def margin_rank_loss(label, left, right, margin=0.1, name=None):
out = fluid.layers.margin_rank_loss(label, left, right)
"""
helper = LayerHelper('margin_rank_loss', **locals())
if not isinstance(label, Variable):
raise ValueError("The label should be a Variable.")
if not isinstance(left, Variable):
raise ValueError("The left should be a Variable.")
if not isinstance(right, Variable):
raise ValueError("The right should be a Variable.")
check_variable_and_dtype(label, 'label', ['float32'], 'margin_rank_loss')
check_variable_and_dtype(label, 'left', ['float32'], 'margin_rank_loss')
check_variable_and_dtype(label, 'right', ['float32'], 'margin_rank_loss')
out = helper.create_variable_for_type_inference(left.dtype)
act = helper.create_variable_for_type_inference(left.dtype)
helper.append_op(
......
......@@ -6920,6 +6920,7 @@ def row_conv(input, future_context_size, param_attr=None, act=None):
>>> out = fluid.layers.row_conv(input=x, future_context_size=2)
"""
helper = LayerHelper('row_conv', **locals())
check_variable_and_dtype(input, 'input', ['float32'], 'row_conv')
dtype = helper.input_dtype()
filter_shape = [future_context_size + 1, input.shape[-1]]
filter_param = helper.create_parameter(
......
......@@ -351,7 +351,8 @@ def glu(input, dim=-1):
# shape of output: [-1, 3, 3, 9]
output = fluid.nets.glu(input=data, dim=1)
"""
check_variable_and_dtype(input, 'input', ['float16', 'float32', 'float64'],
"glu")
a, b = layers.split(input, num_or_sections=2, dim=dim)
act_b = layers.sigmoid(x=b)
out = layers.elementwise_mul(x=a, y=act_b)
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from paddle import fluid
import paddle.fluid.dygraph as dg
import unittest
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
def glu(x, dim=-1):
a, b = np.split(x, 2, axis=dim)
out = a * sigmoid(b)
return out
class TestGLUCase(unittest.TestCase):
def setUp(self):
self.x = np.random.randn(5, 20)
self.dim = -1
self.out = glu(self.x, self.dim)
def check_identity(self, place):
with dg.guard(place):
x_var = dg.to_variable(self.x)
y_var = fluid.nets.glu(x_var, self.dim)
y_np = y_var.numpy()
np.testing.assert_allclose(y_np, self.out)
def test_case(self):
self.check_identity(fluid.CPUPlace())
if fluid.is_compiled_with_cuda():
self.check_identity(fluid.CUDAPlace(0))
if __name__ == '__main__':
unittest.main()
......@@ -17,6 +17,7 @@ from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
from paddle import fluid
class TestMarginRankLossOp(OpTest):
......@@ -51,5 +52,48 @@ class TestMarginRankLossOp(OpTest):
self.check_grad(["X1"], "Out", no_grad_set=set('X2'))
class TestMarginRankLossLayer(unittest.TestCase):
def setUp(self):
self.batch_size = 5
self.margin = 0.5
# labels_{i} = {-1, 1}
self.label = 2 * np.random.randint(
0, 2, size=(self.batch_size, 1)).astype("float32") - 1
self.x1 = np.random.random((self.batch_size, 1)).astype("float32")
self.x2 = np.random.random((self.batch_size, 1)).astype("float32")
# loss = max(0, -label * (x1 - x2) + margin)
loss = -self.label * (self.x1 - self.x2) + self.margin
loss = np.where(loss > 0, loss, 0)
self.loss = loss
def test_identity(self):
place = fluid.CPUPlace()
self.check_identity(place)
if fluid.is_compiled_with_cuda():
place = fluid.CUDAPlace(0)
self.check_identity(place)
def check_identity(self, place):
main = fluid.Program()
start = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, start):
label = fluid.data("label", (self.batch_size, 1), "float32")
x1 = fluid.data("x1", (self.batch_size, 1), "float32")
x2 = fluid.data("x2", (self.batch_size, 1), "float32")
out = fluid.layers.margin_rank_loss(label, x1, x2, self.margin)
exe = fluid.Executor(place)
exe.run(start)
out_np, = exe.run(
main,
feed={"label": self.label,
"x1": self.x1,
"x2": self.x2},
fetch_list=[out])
np.testing.assert_allclose(out_np, self.loss)
if __name__ == '__main__':
unittest.main()
......@@ -17,6 +17,7 @@ from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
from paddle import fluid
def row_conv_forward(x, lod, wt):
......@@ -167,5 +168,35 @@ class TestRowOpWithTensorInput(OpTest):
['X'], 'Out', no_grad_set=set('Filter'), check_dygraph=False)
class TestRowConvLayer(unittest.TestCase):
def setUp(self):
self.B = 2
self.T = 6
self.C = 20
self.context_length = 6
self.x = np.random.random((self.B, self.T, self.C)).astype("float32")
self.w = np.random.random(
(self.context_length, self.C)).astype("float32")
self.out = row_conv_foward_Tensor(self.x, self.w)
def check_identity(self):
start = fluid.Program()
main = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(main, start):
x = fluid.data("x", (-1, -1, self.C), "float32")
out = fluid.layers.row_conv(
x,
self.context_length,
param_attr=fluid.initializer.NumpyArrayInitializer(self.w))
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(start)
out_np, = exe.run(main, feed={'x': self.x}, fetch_list=[out])
np.testing.assert_allclose(out_np, self.out)
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册