未验证 提交 0650ae08 编写于 作者: L liu zhengxi 提交者: GitHub

[cherry-pick] error message enhancement (#24631)

* API(dynamic_gru, chunk_eval, BeamSearchDecoder) error message enhancement (#24513)

* dynamic_gru err_msg enhancement, test=develop

* chunk_eval err_msg enhancement and fix crf_decoding output type, test=develop

* BeamSearchDecoder err msg enhancement, test=develop

* API/OP(sequence_expand_as) error message enhancement (#23712) 
Co-authored-by: NGuo Sheng <whucsgs@163.com>
上级 47bd526b
......@@ -24,45 +24,48 @@ class ChunkEvalOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput("Inference"), true,
"Input(Inference) of ChunkEvalOp should not be null.");
PADDLE_ENFORCE_EQ(ctx->HasInput("Label"), true,
"Input(Label) of ChunkEvalOp should not be null.");
PADDLE_ENFORCE_EQ(ctx->HasOutput("Precision"), true,
"Output(Precision) of ChunkEvalOp should not be null.");
PADDLE_ENFORCE_EQ(ctx->HasOutput("Recall"), true,
"Output(Recall) of ChunkEvalOp should not be null.");
PADDLE_ENFORCE_EQ(ctx->HasOutput("F1-Score"), true,
"Output(F1-Score) of ChunkEvalOp should not be null.");
PADDLE_ENFORCE_EQ(
ctx->HasOutput("NumInferChunks"), true,
"Output(NumInferChunks) of ChunkEvalOp should not be null.");
PADDLE_ENFORCE_EQ(
ctx->HasOutput("NumLabelChunks"), true,
"Output(NumLabelChunks) of ChunkEvalOp should not be null.");
PADDLE_ENFORCE_EQ(
ctx->HasOutput("NumCorrectChunks"), true,
"Output(NumCorrectChunks) of ChunkEvalOp should not be null.");
OP_INOUT_CHECK(ctx->HasInput("Inference"), "Input", "Inference",
"chunk_eval");
OP_INOUT_CHECK(ctx->HasInput("Label"), "Input", "Label", "chunk_eval");
OP_INOUT_CHECK(ctx->HasOutput("Precision"), "Output", "Precision",
"chunk_eval");
OP_INOUT_CHECK(ctx->HasOutput("Recall"), "Output", "Recall", "chunk_eval");
OP_INOUT_CHECK(ctx->HasOutput("F1-Score"), "Output", "F1-Score",
"chunk_eval");
OP_INOUT_CHECK(ctx->HasOutput("NumInferChunks"), "Output", "NumInferChunks",
"chunk_eval");
OP_INOUT_CHECK(ctx->HasOutput("NumLabelChunks"), "Output", "NumLabelChunks",
"chunk_eval");
OP_INOUT_CHECK(ctx->HasOutput("NumCorrectChunks"), "Output",
"NumCorrectChunks", "chunk_eval");
auto inference_dim = ctx->GetInputDim("Inference");
auto label_dim = ctx->GetInputDim("Label");
PADDLE_ENFORCE_EQ(
inference_dim, label_dim,
"Input(Inference)'s shape must be the same as Input(Label)'s shape.");
platform::errors::InvalidArgument(
"Input(Inference)'s shape must be the same as Input(Label)'s "
"shape, but received [%s] (Inference) vs [%s] (Label).",
inference_dim, label_dim));
bool use_padding = ctx->HasInput("SeqLength");
if (use_padding) {
PADDLE_ENFORCE_EQ((inference_dim.size() == 3 && inference_dim[2] == 1) ||
inference_dim.size() == 2,
true,
"when Input(SeqLength) is provided, Input(Inference) "
"should be of dim 3 (batch_size, bucket, 1) or dim 2 "
"(batch_size, bucket).");
PADDLE_ENFORCE_EQ(
(inference_dim.size() == 3 && inference_dim[2] == 1) ||
inference_dim.size() == 2,
true, platform::errors::InvalidArgument(
"when Input(SeqLength) is provided, Input(Inference) "
"should be of dim 3 (batch_size, bucket, 1) or dim 2 "
"(batch_size, bucket), but received [%s].",
inference_dim));
auto seq_length_dim = ctx->GetInputDim("SeqLength");
PADDLE_ENFORCE_LE(
seq_length_dim.size(), 2,
"Input(SeqLength)'s rank should not be greater than 2.");
PADDLE_ENFORCE_LE(seq_length_dim.size(), 2,
platform::errors::InvalidArgument(
"Input(SeqLength)'s rank should not be greater "
"than 2, but received %d.",
seq_length_dim.size()));
}
ctx->SetOutputDim("Precision", {1});
......
......@@ -51,7 +51,13 @@ class ChunkEvalKernel : public framework::OpKernel<T> {
for (int i = 0; i < length; ++i) {
int prev_tag = tag;
int prev_type = type;
PADDLE_ENFORCE_LE(label[i], num_chunk_types * num_tag_types);
PADDLE_ENFORCE_LE(
label[i], num_chunk_types * num_tag_types,
platform::errors::InvalidArgument(
"The value of Input(Label) should be less than the number of "
"chunk types times the number of tag types, but received %d "
"(Label) vs %d (chunk types) * %d (tag types).",
label[i], num_chunk_types, num_tag_types));
tag = label[i] % num_tag_types;
type = label[i] / num_tag_types;
if (in_chunk && ChunkEnd(prev_tag, prev_type, tag, type, other_chunk_type,
......@@ -191,10 +197,16 @@ class ChunkEvalKernel : public framework::OpKernel<T> {
tag_inside, tag_end, tag_single, excluded_chunk_types);
}
} else {
PADDLE_ENFORCE_EQ(lod.size(), 1UL,
"Only support one level sequence now.");
PADDLE_ENFORCE(lod == inference->lod(),
"LoD must be same between Inference and Label.");
PADDLE_ENFORCE_EQ(
lod.size(), 1UL,
platform::errors::InvalidArgument(
"Only support one level LoD sequence now, but received %d.",
lod.size()));
PADDLE_ENFORCE_EQ(
lod, inference->lod(),
platform::errors::InvalidArgument(
"Input(Inference) and Input(Label) of Op(chunk_eval) should have "
"same LoD information."));
num_sequences = lod[0].size() - 1;
for (int i = 0; i < num_sequences; ++i) {
......
......@@ -31,44 +31,58 @@ class GRUOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("Input"),
"Input(%s) of GRUOp should not be null.", "Input");
PADDLE_ENFORCE(ctx->HasInput("Weight"),
"Input(%s) of GRUOp should not be null.", "Weight");
PADDLE_ENFORCE(ctx->HasOutput("BatchGate"),
"Output(%s) of GRUOp should not be null.", "BatchGate");
PADDLE_ENFORCE(ctx->HasOutput("BatchResetHiddenPrev"),
"Output(%s) of GRUOp should not be null.",
"BatchResetHiddenPrev");
PADDLE_ENFORCE(ctx->HasOutput("BatchHidden"),
"Output(%s) of GRUOp should not be null.", "BatchHidden");
PADDLE_ENFORCE(ctx->HasOutput("Hidden"),
"Output(%s) of GRUOp should not be null.", "Hidden");
OP_INOUT_CHECK(ctx->HasInput("Input"), "Input", "Input", "GRU");
OP_INOUT_CHECK(ctx->HasInput("Weight"), "Input", "Weight", "GRU");
OP_INOUT_CHECK(ctx->HasOutput("BatchGate"), "Output", "BatchGate", "GRU");
OP_INOUT_CHECK(ctx->HasOutput("BatchResetHiddenPrev"), "Output",
"BatchResetHiddenPrev", "GRU");
OP_INOUT_CHECK(ctx->HasOutput("BatchHidden"), "Output", "BatchHidden",
"GRU");
OP_INOUT_CHECK(ctx->HasOutput("Hidden"), "Output", "Hidden", "GRU");
auto input_dims = ctx->GetInputDim("Input");
auto weight_dims = ctx->GetInputDim("Weight");
int input_size = input_dims[1];
int frame_size = weight_dims[0];
if (ctx->IsRuntime()) {
PADDLE_ENFORCE_EQ(
input_size, frame_size * 3,
"The input_size must be 3 times of frame_size in GRUOp.");
PADDLE_ENFORCE_EQ(input_size, frame_size * 3,
platform::errors::InvalidArgument(
"The second dimension of Input(Input) must be 3 "
"times of frame_size in GRUOp, but received %d "
"(Input) vs %d (frame_size).",
input_size, frame_size));
}
PADDLE_ENFORCE_EQ(
weight_dims[1], frame_size * 3,
"The shape of Weight matrix must be [frame_size, frame_size * 3].");
platform::errors::InvalidArgument(
"The shape of Input(Weight) matrix must be [frame_size, frame_size "
"* 3], but received [%d, %d] (Weight) vs [%d, %d] (frame_size).",
weight_dims[0], weight_dims[1], frame_size, frame_size * 3));
if (ctx->HasInput("H0")) {
auto h0_dims = ctx->GetInputDim("H0");
PADDLE_ENFORCE_EQ(h0_dims[1], frame_size,
"The width of H0 must be equal to frame_size.");
PADDLE_ENFORCE_EQ(
h0_dims[1], frame_size,
platform::errors::InvalidArgument(
"The width of Input(H0) must be equal to frame_size, but "
"received %d (width of H0) vs %d (frame_size).",
h0_dims[1], frame_size));
}
if (ctx->HasInput("Bias")) {
auto bias_dims = ctx->GetInputDim("Bias");
int bias_height = bias_dims[0];
int bias_width = bias_dims[1];
PADDLE_ENFORCE_EQ(bias_height, 1,
"The shape of Bias must be [1, frame_size * 3].");
PADDLE_ENFORCE_EQ(bias_width, frame_size * 3,
"The shape of Bias must be [1, frame_size * 3].");
PADDLE_ENFORCE_EQ(
bias_height, 1,
platform::errors::InvalidArgument(
"The shape of Bias must be [1, frame_size * 3], but received "
"[%d, %d] (Bias) vs [1, %d] (frame_size * 3).",
bias_height, bias_width, frame_size * 3));
PADDLE_ENFORCE_EQ(
bias_width, frame_size * 3,
platform::errors::InvalidArgument(
"The shape of Bias must be [1, frame_size * 3], but received "
"[%d, %d] (Bias) vs [1, %d] (frame_size * 3).",
bias_height, bias_width, frame_size * 3));
}
ctx->SetOutputDim("BatchGate", input_dims);
ctx->SetOutputDim("BatchResetHiddenPrev", {input_dims[0], frame_size});
......@@ -166,39 +180,50 @@ class GRUGradOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("Input"),
"Input(%s) of GRUGradOp should not be null.", "Input");
PADDLE_ENFORCE(ctx->HasInput("Weight"),
"Input(%s) of GRUGradOp should not be null.", "Weight");
PADDLE_ENFORCE(ctx->HasInput("BatchGate"),
"Input(%s) of GRUGradOp should not be null.", "BatchGate");
PADDLE_ENFORCE(ctx->HasInput("BatchResetHiddenPrev"),
"Input(%s) of GRUGradOp should not be null.",
"BatchResetHiddenPrev");
PADDLE_ENFORCE(ctx->HasInput("BatchHidden"),
"Input(%s) of GRUOp should not be null.", "BatchHidden");
PADDLE_ENFORCE(ctx->HasInput("Hidden"),
"Input(%s) of GRUGradOp should not be null.", "Hidden");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Hidden")),
"Input(%s@GRAD) of GRUGradOp should not be null.", "Hidden");
OP_INOUT_CHECK(ctx->HasInput("Input"), "Input", "Input", "GRU@Grad");
OP_INOUT_CHECK(ctx->HasInput("Weight"), "Input", "Weight", "GRU@Grad");
OP_INOUT_CHECK(ctx->HasInput("BatchGate"), "Input", "BatchGate",
"GRU@Grad");
OP_INOUT_CHECK(ctx->HasInput("BatchResetHiddenPrev"), "Input",
"BatchResetHiddenPrev", "GRU@Grad");
OP_INOUT_CHECK(ctx->HasInput("BatchHidden"), "Input", "BatchHidden",
"GRU@Grad");
OP_INOUT_CHECK(ctx->HasInput("Hidden"), "Input", "Hidden", "GRU@Grad");
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Hidden")), "Input",
framework::GradVarName("Hidden"), "GRU@Grad");
auto input_dims = ctx->GetInputDim("Input");
auto weight_dims = ctx->GetInputDim("Weight");
int input_size = input_dims[1];
int frame_size = weight_dims[0];
int weight_height = weight_dims[0];
int weight_width = weight_dims[1];
PADDLE_ENFORCE_EQ(input_size, frame_size * 3,
"The input_size must be 3 times of frame_size in GRUOp.");
PADDLE_ENFORCE_EQ(
input_size, frame_size * 3,
platform::errors::InvalidArgument(
"The second dimension of Input(Input) must be 3 times of "
"frame_size in GRUOp, but received %d (Input) vs %d (frame_size).",
input_size, frame_size));
PADDLE_ENFORCE_EQ(
weight_height, frame_size,
"The shape of Weight matrix must be [frame_size, frame_size * 3].");
platform::errors::InvalidArgument(
"The shape of Input(Weight) matrix must be [frame_size, frame_size "
"* 3], but received [%d, %d] (Weight) vs [%d, %d] (frame_size).",
weight_height, weight_width, frame_size, frame_size * 3));
PADDLE_ENFORCE_EQ(
weight_width, frame_size * 3,
"The shape of Weight matrix must be [frame_size, frame_size * 3].");
platform::errors::InvalidArgument(
"The shape of Input(Weight) matrix must be [frame_size, frame_size "
"* 3], but received [%d, %d] (Weight) vs [%d, %d] (frame_size).",
weight_height, weight_width, frame_size, frame_size * 3));
if (ctx->HasInput("H0")) {
auto h0_dims = ctx->GetInputDim("H0");
PADDLE_ENFORCE_EQ(h0_dims[1], frame_size,
"The width of H0 must be equal to frame_size.");
PADDLE_ENFORCE_EQ(
h0_dims[1], frame_size,
platform::errors::InvalidArgument(
"The width of Input(H0) must be equal to frame_size, but "
"received %d (width of H0) vs %d (frame_size).",
h0_dims[1], frame_size));
auto h0_grad_name = framework::GradVarName("H0");
if (ctx->HasOutput(h0_grad_name))
ctx->SetOutputDim(h0_grad_name, h0_dims);
......@@ -207,10 +232,18 @@ class GRUGradOp : public framework::OperatorWithKernel {
auto bias_dims = ctx->GetInputDim("Bias");
int bias_height = bias_dims[0];
int bias_width = bias_dims[1];
PADDLE_ENFORCE_EQ(bias_height, 1,
"The shape of Bias must be [1, frame_size * 3].");
PADDLE_ENFORCE_EQ(bias_width, frame_size * 3,
"The shape of Bias must be [1, frame_size * 3].");
PADDLE_ENFORCE_EQ(
bias_height, 1,
platform::errors::InvalidArgument(
"The shape of Bias must be [1, frame_size * 3], but received "
"[%d, %d] (Bias) vs [1, %d] (frame_size * 3).",
bias_height, bias_width, frame_size * 3));
PADDLE_ENFORCE_EQ(
bias_width, frame_size * 3,
platform::errors::InvalidArgument(
"The shape of Bias must be [1, frame_size * 3], but received "
"[%d, %d] (Bias) vs [1, %d] (frame_size * 3).",
bias_height, bias_width, frame_size * 3));
auto bias_grad_name = framework::GradVarName("Bias");
if (ctx->HasOutput(bias_grad_name))
ctx->SetOutputDim(bias_grad_name, bias_dims);
......@@ -298,14 +331,20 @@ class GRUCPUKernel : public framework::OpKernel<T> {
T* packed_gate = blas.GEMM_ALLOC(CblasBMatrix, 1 /*height of C*/,
frame_size * 2 /*width of weight*/,
frame_size /*height of height*/);
PADDLE_ENFORCE(packed_gate);
PADDLE_ENFORCE_NOT_NULL(
packed_gate, platform::errors::NotFound(
"The caculation result of packed_gate by "
"GEMM_ALLOC should not be null when using MKL."));
blas.GEMM_PACK(CblasBMatrix, CblasNoTrans, 1 /*cur bs?*/, frame_size * 2,
frame_size, T(1.0), gru_value.gate_weight, frame_size * 2,
packed_gate);
T* packed_state = blas.GEMM_ALLOC(CblasBMatrix, 1 /*height of C*/,
frame_size /*width of weight*/,
frame_size /*height of height*/);
PADDLE_ENFORCE(packed_state);
PADDLE_ENFORCE_NOT_NULL(
packed_state, platform::errors::NotFound(
"The caculation result of packed_state by "
"GEMM_ALLOC should not be null when using MKL."));
blas.GEMM_PACK(CblasBMatrix, CblasNoTrans, 1 /*cur bs?*/, frame_size,
frame_size, T(1.0), gru_value.state_weight, frame_size,
packed_state);
......
......@@ -219,7 +219,13 @@ class LSTMGradKernel : public framework::OpKernel<T> {
auto in_dims = input->dims();
auto out_dims = hidden_g->dims();
int frame_size = static_cast<int>(in_dims[1] / 4);
PADDLE_ENFORCE_EQ(frame_size, out_dims[1]);
PADDLE_ENFORCE_EQ(
frame_size, out_dims[1],
platform::errors::InvalidArgument(
"The second dimension of Input(" +
framework::GradVarName("Hidden") +
") should be %d, but received %d in LSTM@Grad operator.",
frame_size, out_dims[1]));
math::LstmMetaValue<T> lstm_value;
if (bias && ctx.Attr<bool>("use_peepholes")) {
......
......@@ -327,7 +327,11 @@ class LSTMPGradKernel : public framework::OpKernel<T> {
auto out_dims = cell_out->dims();
framework::DDim proj_dims({in_dims[0], proj_weight->dims()[1]});
int frame_size = static_cast<int>(in_dims[1] / 4);
PADDLE_ENFORCE_EQ(frame_size, out_dims[1]);
PADDLE_ENFORCE_EQ(frame_size, out_dims[1],
platform::errors::InvalidArgument(
"The second dimension of Input(Cell) should be %d, "
"but received %d in LSTMP@Grad operator.",
frame_size, out_dims[1]));
math::LstmMetaValue<T> lstmp_value;
if (bias && ctx.Attr<bool>("use_peepholes")) {
......
......@@ -27,18 +27,18 @@ class SequenceExpandAsOp : public framework::OperatorWithKernel {
protected:
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"),
"Input(X) of SequenceExpandAsOp should not be null.");
PADDLE_ENFORCE(ctx->HasInput("Y"),
"Input(Y) of SequenceExpandAsOp should not be null.");
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of SequenceExpandAsOp should not be null.");
OP_INOUT_CHECK(ctx->HasInputs("X"), "Input", "X", "SequenceExpandAs");
OP_INOUT_CHECK(ctx->HasInputs("Y"), "Input", "Y", "SequenceExpandAs");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "SequenceExpandAs");
auto x_dims = ctx->GetInputDim("X");
auto out_dims = x_dims;
PADDLE_ENFORCE_GE(x_dims.size(), 2,
"Dimension number of Input(X) should be at least 2.");
platform::errors::InvalidArgument(
"Dimension number of Input(X) should be at least 2. "
"But received X's dimensions = %d, X's shape = [%s].",
x_dims.size(), x_dims));
if (ctx->IsRuntime()) {
framework::Variable* x_var =
......@@ -50,11 +50,17 @@ class SequenceExpandAsOp : public framework::OperatorWithKernel {
auto& y_lod = y_var->Get<LoDTensor>().lod();
PADDLE_ENFORCE_EQ(y_lod.size(), 1,
"Level number of Input(Y)'s lod should be 1.");
platform::errors::InvalidArgument(
"Level number of Input(Y)'s lod should be 1. But "
"received Y's lod level = %d.",
y_lod.size()));
PADDLE_ENFORCE_EQ(static_cast<size_t>(x_dim[0]), y_lod[0].size() - 1,
"The first dimension of Input(X) should be equal "
"to the size of Input(Y)'s 0 level lod.");
platform::errors::InvalidArgument(
"The first dimension of Input(X) should be one "
"less than the size of Input(Y)'s 0 level lod. But "
"received X's shape[0] = %d, Y's lod[0].size = %d.",
x_dim[0], y_lod[0].size()));
int64_t out_first_dim = 0;
if (y_lod[0].size() <= 1) {
......@@ -138,9 +144,9 @@ class SequenceExpandAsOpGrad : public framework::OperatorWithKernel {
protected:
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null.");
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
"Input(Out@GRAD) should not be null.");
OP_INOUT_CHECK(ctx->HasInputs("X"), "Input", "X", "SequenceExpandAsGrad");
OP_INOUT_CHECK(ctx->HasInputs(framework::GradVarName("Out")), "Input",
"Out@GRAD", "SequenceExpandAsGrad");
auto x_dims = ctx->GetInputDim("X");
auto x_grad_name = framework::GradVarName("X");
......
......@@ -74,13 +74,25 @@ class SequenceExpandAsKernel : public framework::OpKernel<T> {
auto *y = context.Input<framework::LoDTensor>("Y");
auto *out = context.Output<framework::LoDTensor>("Out");
PADDLE_ENFORCE_EQ(y->lod().empty(), false,
"Input(Y) Tensor of SequenceExpandAsOp does not contain "
"LoD information.");
PADDLE_ENFORCE_EQ(
y->lod().empty(), false,
platform::errors::InvalidArgument(
"Input(Y) of SequenceExpandAsOp has wrong LoD information. "
"Expected Y's lod is not empty, but received empty lod."));
auto &y_lod = y->lod();
PADDLE_ENFORCE_EQ(y_lod.size(), 1, "LoD of Y should be 1.");
PADDLE_ENFORCE_GT(y_lod[0].size(), 1, ".");
PADDLE_ENFORCE_EQ(y_lod.size(), 1,
platform::errors::InvalidArgument(
"Input(Y) of SequenceExpandAsOp has wrong LoD "
"information. Expected Y's lod level = 1, but "
"received lod level = %d.",
y_lod.size()));
PADDLE_ENFORCE_GT(y_lod[0].size(), 1,
platform::errors::InvalidArgument(
"Input(Y) of SequenceExpandAsOp has wrong LoD "
"information. Expected the size of Y's lod[0] > 1, "
"but received lod[0].size = %d.",
y_lod[0].size()));
out->mutable_data<T>(context.GetPlace());
......
......@@ -2241,7 +2241,7 @@ def crf_decoding(input, param_attr, label=None, length=None):
helper = LayerHelper('crf_decoding', **locals())
transition = helper.get_parameter(param_attr.name)
viterbi_path = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
dtype=core.VarDesc.VarType.INT64)
inputs = {"Emission": [input], "Transition": transition, "Label": label}
if length:
inputs['Length'] = length
......@@ -2487,12 +2487,12 @@ def chunk_eval(input,
dict_size = 10000
label_dict_len = 7
sequence = fluid.data(
name='id', shape=[-1, 1], lod_level=1, dtype='int64')
name='id', shape=[None, 1], lod_level=1, dtype='int64')
embedding = fluid.embedding(
input=sequence, size=[dict_size, 512])
hidden = fluid.layers.fc(input=embedding, size=512)
label = fluid.layers.data(
name='label', shape=[1], lod_level=1, dtype='int32')
label = fluid.data(
name='label', shape=[None, 1], lod_level=1, dtype='int64')
crf = fluid.layers.linear_chain_crf(
input=hidden, label=label, param_attr=fluid.ParamAttr(name="crfw"))
crf_decode = fluid.layers.crf_decoding(
......@@ -2501,10 +2501,13 @@ def chunk_eval(input,
input=crf_decode,
label=label,
chunk_scheme="IOB",
num_chunk_types=(label_dict_len - 1) / 2)
num_chunk_types=int((label_dict_len - 1) / 2))
"""
helper = LayerHelper("chunk_eval", **locals())
check_variable_and_dtype(input, 'input', ['int64'], 'chunk_eval')
check_variable_and_dtype(label, 'label', ['int64'], 'chunk_eval')
# prepare output
precision = helper.create_variable_for_type_inference(dtype="float32")
recall = helper.create_variable_for_type_inference(dtype="float32")
......
......@@ -756,6 +756,8 @@ class BeamSearchDecoder(Decoder):
Variable: A tensor with shape `[batch_size * beam_size, ...]`, whose \
data type is same as `x`.
"""
check_type(x, 'x', (Variable),
'BeamSearchDecoder.tile_beam_merge_with_batch')
x = nn.unsqueeze(x, [1]) # [batch_size, 1, ...]
expand_times = [1] * len(x.shape)
expand_times[1] = beam_size
......@@ -784,6 +786,7 @@ class BeamSearchDecoder(Decoder):
Variable: A tensor with shape `[batch_size, beam_size, ...]`, whose \
data type is same as `x`.
"""
check_type(x, 'x', (Variable), 'BeamSearchDecoder._split_batch_beams')
# TODO: avoid fake shape in compile-time like tile_beam_merge_with_batch
return nn.reshape(x, shape=(-1, self.beam_size) + x.shape[1:])
......@@ -800,6 +803,7 @@ class BeamSearchDecoder(Decoder):
Variable: A tensor with shape `[batch_size * beam_size, ...]`, whose \
data type is same as `x`.
"""
check_type(x, 'x', (Variable), 'BeamSearchDecoder._merge_batch_beams')
# TODO: avoid fake shape in compile-time like tile_beam_merge_with_batch
return nn.reshape(x, shape=(-1, ) + x.shape[2:])
......@@ -812,16 +816,14 @@ class BeamSearchDecoder(Decoder):
`beam_size` times.
Parameters:
probs(Variable): A tensor with shape `[batch_size, ...]`, representing
the log probabilities. Its data type should be float32 or float64.
finished(Variable): A tensor with shape `[batch_size, beam_size]`,
representing the finished status for all beams. Its data type
should be bool.
x(Variable): A tensor with shape `[batch_size, ...]`, The data type
should be float32, float64, int32, int64 or bool.
Returns:
Variable: A tensor with shape `[batch_size, beam_size, ...]`, whose \
data type is same as `x`.
"""
check_type(x, 'x', (Variable), 'BeamSearchDecoder._expand_to_beam_size')
x = nn.unsqueeze(x, [1])
expand_times = [1] * len(x.shape)
expand_times[1] = self.beam_size
......@@ -845,6 +847,9 @@ class BeamSearchDecoder(Decoder):
where unfinished beams stay unchanged and finished beams are \
replaced with a tensor with all probability on the EOS token.
"""
check_type(probs, 'probs', (Variable), 'BeamSearchDecoder._mask_probs')
check_type(finished, 'finished', (Variable),
'BeamSearchDecoder._mask_probs')
# TODO: use where_op
finished = tensor.cast(finished, dtype=probs.dtype)
probs = nn.elementwise_mul(
......@@ -869,6 +874,10 @@ class BeamSearchDecoder(Decoder):
Variable: A tensor with the same shape and data type as `x`, \
representing the gathered tensor.
"""
check_type(x, 'x', (Variable), 'BeamSearchDecoder._gather')
check_type(indices, 'indices', (Variable), 'BeamSearchDecoder._gather')
check_type(batch_size, 'batch_size', (Variable),
'BeamSearchDecoder._gather')
# TODO: compatibility of int32 and int64
batch_size = tensor.cast(
batch_size,
......@@ -2601,6 +2610,14 @@ def dynamic_gru(input,
assert in_dygraph_mode(
) is not True, "please use gru instead of dynamic_gru in dygraph mode!"
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'dynamic_gru')
check_type(h_0, 'h_0', (Variable, type(None)), 'dynamic_gru')
if isinstance(h_0, Variable):
check_variable_and_dtype(h_0, 'h_0', ['float32', 'float64'],
'dynamic_gru')
helper = LayerHelper('gru', **locals())
dtype = helper.input_dtype()
......
......@@ -797,7 +797,7 @@ def sequence_expand_as(x, y, name=None):
Args:
x (Variable): The input variable which is a Tensor or LoDTensor, with the \
dims ``[M, K]``. The data type should be float32, float64, int8, int32 \
dims ``[M, K]``. The data type should be float32, float64, int32 \
or int64.
y (Variable): The input variable which is a LoDTensor with 1-level lod.
name (str, optional): For detailed information, please refer \
......@@ -854,6 +854,9 @@ def sequence_expand_as(x, y, name=None):
"""
assert not in_dygraph_mode(), (
"sequence layer is not supported in dygraph mode yet.")
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
'sequence_expand_as')
check_type(y, 'y', Variable, 'sequence_expand_as')
helper = LayerHelper('sequence_expand_as', input=x, **locals())
dtype = helper.input_dtype()
tmp = helper.create_variable_for_type_inference(dtype)
......
......@@ -18,7 +18,9 @@ import unittest
import numpy as np
import sys
sys.path.append("../")
import paddle.fluid as fluid
from op_test import OpTest
from paddle.fluid import Program, program_guard
class TestSequenceExpandAs(OpTest):
......@@ -84,5 +86,22 @@ class TestSequenceExpandAsCase3(TestSequenceExpandAs):
self.inputs = {'X': (x_data, x_lod), 'Y': (y_data, y_lod)}
class TestSequenceExpandAsOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
# the input x must be Variable
x1 = np.random.random((2, 4)).astype("float32")
self.assertRaises(TypeError, fluid.layers.sequence_expand_as, x1)
# the dtype of input x must be float32, float64, int32 or int64
x2 = fluid.data(name='x2', shape=[None, 4], dtype="bool")
self.assertRaises(TypeError, fluid.layers.sequence_expand_as, x2)
# the input y must be Variable
x3 = fluid.data(name='x3', shape=[None, 4], dtype="float32")
y = np.random.random((2, 4)).astype("float32")
self.assertRaises(TypeError, fluid.layers.sequence_expand_as, x3, y)
if __name__ == '__main__':
unittest.main()
......@@ -17,6 +17,9 @@ from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import numpy as np
from paddle.fluid import Program, program_guard
from paddle import fluid
class Segment(object):
......@@ -229,5 +232,45 @@ class TestChunkEvalOpWithTensorInput(TestChunkEvalOp):
}
class TestChunkEvalOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
def test_input():
input_data = np.random.random(1, 1).astype("int64")
label_data = np.random.random(1).astype("int64")
fluid.layers.chunk_eval(
input=input_data,
label=label_data,
chunk_scheme="IOB",
num_chunk_types=3)
self.assertRaises(TypeError, test_input)
def test_label():
input_ = fluid.data(
name="input", shape=[None, 1], dtype="int64")
label_data = np.random.random(1).astype("int64")
fluid.layers.chunk_eval(
input=input_,
label=label_data,
chunk_scheme="IOB",
num_chunk_types=3)
self.assertRaises(TypeError, test_label)
def test_type():
in_data = fluid.data(
name="input_", shape=[None, 1], dtype="int32")
label = fluid.data(name="label_", shape=[1], dtype="int64")
fluid.layers.chunk_eval(
input=in_data,
label=label,
chunk_scheme="IOB",
num_chunk_types=3)
self.assertRaises(TypeError, test_type)
if __name__ == '__main__':
unittest.main()
......@@ -20,6 +20,8 @@ import math
import functools
from op_test import OpTest
from test_lstm_op import ACTIVATION
from paddle import fluid
from paddle.fluid import Program, program_guard
def gru(
......@@ -227,5 +229,24 @@ class TestGRUOpReverseOriginMode(TestGRUOp):
self.origin_mode = True
class TestGruOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
def test_Variable():
input_data = np.random.random((1, 1536)).astype("float32")
fluid.layers.dynamic_gru(input=input_data, size=512)
self.assertRaises(TypeError, test_Variable)
def test_h_0():
in_data = fluid.data(
name="input", shape=[None, 1536], dtype="float32")
h = fluid.data(name="h", shape=[None, 512], dtype="int32")
fluid.layers.dynamic_gru(input=in_data, size=512, h_0=h)
self.assertRaises(TypeError, test_h_0)
if __name__ == "__main__":
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册