diff --git a/paddle/fluid/operators/nce_op.cc b/paddle/fluid/operators/nce_op.cc index 0ccc5d30b3141b029b157fd8a046c4dbeab22c23..e78fda111397dfc89023375f1bea175a615b4c03 100644 --- a/paddle/fluid/operators/nce_op.cc +++ b/paddle/fluid/operators/nce_op.cc @@ -27,31 +27,52 @@ class NCEOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Input")); - PADDLE_ENFORCE(ctx->HasInput("Label")); - PADDLE_ENFORCE(ctx->HasInput("Weight")); - PADDLE_ENFORCE(ctx->HasOutput("Cost")); - PADDLE_ENFORCE(ctx->HasOutput("SampleLogits")); - PADDLE_ENFORCE(ctx->HasOutput("SampleLabels")); + PADDLE_ENFORCE_EQ(ctx->HasInput("Input"), true); + PADDLE_ENFORCE_EQ(ctx->HasInput("Label"), true); + PADDLE_ENFORCE_EQ(ctx->HasInput("Weight"), true); + PADDLE_ENFORCE_EQ(ctx->HasOutput("Cost"), true); + PADDLE_ENFORCE_EQ(ctx->HasOutput("SampleLogits"), true); + PADDLE_ENFORCE_EQ(ctx->HasOutput("SampleLabels"), true); auto x_dims = ctx->GetInputDim("Input"); auto label_dims = ctx->GetInputDim("Label"); if (ctx->IsRuntime() || (x_dims[0] > 0 && label_dims[0] > 0)) { - PADDLE_ENFORCE_EQ(x_dims[0], label_dims[0]); + PADDLE_ENFORCE_EQ( + x_dims[0], label_dims[0], + "ShapeError: the first dimension of Input(Input) and Input(Label) " + "should be equal in runtime. But received: Input(Input)'s shape = " + "[%s] with 1st dim = %d, Input(Label)'s shape = [%s] with 1st " + "dim = %d.", + x_dims, x_dims[0], label_dims, label_dims[0]); } int num_true_classes = label_dims.size() == 2 ? label_dims[1] : 1; if (ctx->HasInput("Bias")) { - PADDLE_ENFORCE_EQ(ctx->GetInputDim("Weight")[0], - ctx->GetInputDim("Bias")[0]); + PADDLE_ENFORCE_EQ( + ctx->GetInputDim("Weight")[0], ctx->GetInputDim("Bias")[0], + "ShapeError: the first dimension of Input(Weight) and Input(Bias) " + "should be equal. But received: Input(Weight)'s shape = [%s] with " + "1st dim = %d, Input(Bias)'s shape = [%s] with 1st dim = %d.", + ctx->GetInputDim("Weight"), ctx->GetInputDim("Weight")[0], + ctx->GetInputDim("Bias"), ctx->GetInputDim("Bias")[0]); } auto num_neg_samples = ctx->Attrs().Get("num_neg_samples"); auto num_total_classes = ctx->Attrs().Get("num_total_classes"); std::vector custom_neg_classes = ctx->Attrs().Get>("custom_neg_classes"); - PADDLE_ENFORCE_EQ(num_total_classes, ctx->GetInputDim("Weight")[0]); + PADDLE_ENFORCE_EQ( + num_total_classes, ctx->GetInputDim("Weight")[0], + "ShapeError: the number of total classes should be equal to the first " + "dimension of Input(Weight). But received: Attr(num_total_classes) = " + "%d, Input(Weight)'s shape = [%s] with 1st dim = %d.", + num_total_classes, ctx->GetInputDim("Weight"), + ctx->GetInputDim("Weight")[0]); if (custom_neg_classes.size() > 0) { - PADDLE_ENFORCE_EQ(custom_neg_classes.size(), - static_cast(num_neg_samples)); + PADDLE_ENFORCE_EQ( + custom_neg_classes.size(), static_cast(num_neg_samples), + "ShapeError: the size of Attr(custom_neg_classes) should be equal " + "to the number of negative samples. But received: " + "custom_neg_classes.size() = %d, num_neg_samples = %d.", + custom_neg_classes.size(), num_neg_samples); } // set dims of output(Out) std::vector out_dims; diff --git a/paddle/fluid/operators/nce_op.h b/paddle/fluid/operators/nce_op.h index 1f2f778bcd75d083e33ae43ed66f5ba345356003..f66d046402cbacea690fe96c2877aec9371b6bd6 100644 --- a/paddle/fluid/operators/nce_op.h +++ b/paddle/fluid/operators/nce_op.h @@ -102,9 +102,27 @@ class NCEKernel : public framework::OpKernel { auto dist_alias = context.Input("CustomDistAlias"); auto dist_alias_probs = context.Input("CustomDistAliasProbs"); - PADDLE_ENFORCE_EQ(dist_probs->numel(), num_total_classes); - PADDLE_ENFORCE_EQ(dist_alias->numel(), num_total_classes); - PADDLE_ENFORCE_EQ(dist_alias_probs->numel(), num_total_classes); + PADDLE_ENFORCE_EQ( + dist_probs->numel(), num_total_classes, + "ShapeError: The number of elements in Input(CustomDistProbs) " + "should be equal to the number of total classes. But Received: " + "Input(CustomDistProbs).numel() = %d, Attr(num_total_classes) " + "= %d.", + dist_probs->numel(), num_total_classes); + PADDLE_ENFORCE_EQ( + dist_alias->numel(), num_total_classes, + "ShapeError: The number of elements in Input(CustomDistAlias) " + "should be equal to the number of total classes. But Received: " + "Input(CustomDistAlias).numel() = %d, Attr(num_total_classes) " + "= %d.", + dist_alias->numel(), num_total_classes); + PADDLE_ENFORCE_EQ( + dist_alias_probs->numel(), num_total_classes, + "ShapeError: The number of elements in Input(CustomDistAliasProbs) " + "should be equal to the number of total classes. But Received: " + "Input(CustomDistAliasProbs).numel() = %d, " + "Attr(num_total_classes) = %d.", + dist_alias_probs->numel(), num_total_classes); const float *probs_data = dist_probs->data(); const int *alias_data = dist_alias->data(); @@ -121,7 +139,11 @@ class NCEKernel : public framework::OpKernel { const int64_t *sample_labels_data = sample_labels->data(); for (int x = 0; x < sample_labels->numel(); x++) { - PADDLE_ENFORCE_GE(sample_labels_data[x], 0, "nce sample label %d", x); + PADDLE_ENFORCE_GE(sample_labels_data[x], 0, + "ValueError: Every sample label should be " + "non-negative. But received: " + "Input(SampleLabels)[%d] = %d", + x, sample_labels_data[x]); } auto sample_out = context.Output("SampleLogits"); @@ -289,9 +311,27 @@ class NCEGradKernel : public framework::OpKernel { auto dist_alias = context.Input("CustomDistAlias"); auto dist_alias_probs = context.Input("CustomDistAliasProbs"); - PADDLE_ENFORCE_EQ(dist_probs->numel(), num_total_classes); - PADDLE_ENFORCE_EQ(dist_alias->numel(), num_total_classes); - PADDLE_ENFORCE_EQ(dist_alias_probs->numel(), num_total_classes); + PADDLE_ENFORCE_EQ( + dist_probs->numel(), num_total_classes, + "ShapeError: The number of elements in Input(CustomDistProbs) " + "should be equal to the number of total classes. But Received: " + "Input(CustomDistProbs).numel() = %d, Attr(num_total_classes) " + "= %d.", + dist_probs->numel(), num_total_classes); + PADDLE_ENFORCE_EQ( + dist_alias->numel(), num_total_classes, + "ShapeError: The number of elements in Input(CustomDistAlias) " + "should be equal to the number of total classes. But Received: " + "Input(CustomDistAlias).numel() = %d, Attr(num_total_classes) " + "= %d.", + dist_alias->numel(), num_total_classes); + PADDLE_ENFORCE_EQ( + dist_alias_probs->numel(), num_total_classes, + "ShapeError: The number of elements in Input(CustomDistAliasProbs) " + "should be equal to the number of total classes. But Received: " + "Input(CustomDistAliasProbs).numel() = %d, " + "Attr(num_total_classes) = %d.", + dist_alias_probs->numel(), num_total_classes); const float *probs_data = dist_probs->data(); const int *alias_data = dist_alias->data(); diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 6f1943d6c0e348822076cb91bfe50040e16cb931..97d8e3c47a536f286498c3e74bbfe34e3af7f3f5 100755 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -6827,8 +6827,23 @@ def nce(input, custom_dist=dist) """ helper = LayerHelper('nce', **locals()) - assert isinstance(input, Variable) - assert isinstance(label, Variable) + + if not isinstance(input, Variable): + raise TypeError( + "The type of 'input' in nce layer must be Variable, but received %s" + % (type(input))) + if not isinstance(label, Variable): + raise TypeError( + "The type of 'label' in nce layer must be Variable, but received %s" + % (type(label))) + if convert_dtype(input.dtype) not in ['float32', 'float64']: + raise TypeError( + "The data type of 'input' in nce layer must be float32 or float64, but received %s." + % (convert_dtype(input.dtype))) + if convert_dtype(label.dtype) not in ['int64']: + raise TypeError( + "The data type of 'label' in nce layer must be int64, but received %s." + % (convert_dtype(label.dtype))) dim = input.shape[1] num_true_class = label.shape[1] diff --git a/python/paddle/fluid/tests/unittests/test_nce.py b/python/paddle/fluid/tests/unittests/test_nce.py index 1e462d13d0755f48fd73a9eae335584858ecb17f..e950c47fcb0a276ad041eb2c94cd453487b8102e 100644 --- a/python/paddle/fluid/tests/unittests/test_nce.py +++ b/python/paddle/fluid/tests/unittests/test_nce.py @@ -19,6 +19,7 @@ import unittest import paddle.fluid as fluid import paddle.fluid.initializer as initializer +from paddle.fluid import Program, program_guard from op_test import OpTest @@ -219,5 +220,37 @@ class TestNCECase1SelectedRows(unittest.TestCase): self.assertEqual(rets[0], rets[1]) +class TestNCE_OpError(OpTest): + def test_errors(self): + with program_guard(Program(), Program()): + input1 = fluid.create_lod_tensor( + np.array([0.0, 3.0, 2.0, 4.0]), [[1, 1, 2]], fluid.CPUPlace()) + label1 = fluid.layers.data( + name='label1', shape=[-1, 4], dtype="int64") + # the input(input) of nce layer must be Variable. + self.assertRaises(TypeError, fluid.layers.nce, input1, label1, 5) + + input2 = fluid.layers.data( + name='input2', shape=[-1, 4], dtype="float32") + label2 = fluid.create_lod_tensor( + np.array([0.0, 3.0, 2.0, 4.0]), [[1, 1, 2]], fluid.CPUPlace()) + # the input(label) of nce layer must be Variable. + self.assertRaises(TypeError, fluid.layers.nce, input2, label2, 5) + + input3 = fluid.layers.data( + name='input3', shape=[-1, 4], dtype="float16") + label3 = fluid.layers.data( + name='label3', shape=[-1, 1], dtype="int64") + # the data type of input(input) must be float32 or float64. + self.assertRaises(TypeError, fluid.layers.nce, input3, label3, 5) + + input4 = fluid.layers.data( + name='input4', shape=[-1, 4], dtype="float32") + label4 = fluid.layers.data( + name='label4', shape=[-1, 1], dtype="int32") + # the data type of input(label) must be int64. + self.assertRaises(TypeError, fluid.layers.nce, input4, label4, 5) + + if __name__ == '__main__': unittest.main()