From 63ce906b088c641c3bf33a2b8aa6324a39310ffe Mon Sep 17 00:00:00 2001 From: guosheng Date: Tue, 12 Dec 2017 20:33:44 +0800 Subject: [PATCH] Refine ChunkEvalutor by following comments --- paddle/operators/chunk_eval_op.cc | 16 +++++++------ .../tests/book/test_label_semantic_roles.py | 23 +++++++++---------- 2 files changed, 20 insertions(+), 19 deletions(-) diff --git a/paddle/operators/chunk_eval_op.cc b/paddle/operators/chunk_eval_op.cc index ff2a08ccac9..894f355deb9 100644 --- a/paddle/operators/chunk_eval_op.cc +++ b/paddle/operators/chunk_eval_op.cc @@ -80,14 +80,16 @@ class ChunkEvalOpMaker : public framework::OpProtoAndCheckerMaker { "sensitivity) of chunks on the given mini-batch."); AddOutput("F1-Score", "(float). The evaluated F1-Score on the given mini-batch."); + AddOutput("NumInferChunks", + "(int64_t). The number of chunks in Inference on the given " + "mini-batch."); AddOutput( - "NumInferChunks", - "(int). The number of chunks in Inference on the given mini-batch."); - AddOutput("NumLabelChunks", - "(int). The number of chunks in Label on the given mini-batch."); - AddOutput("NumCorrectChunks", - "(int). The number of chunks both in Inference and Label on the " - "given mini-batch."); + "NumLabelChunks", + "(int64_t). The number of chunks in Label on the given mini-batch."); + AddOutput( + "NumCorrectChunks", + "(int64_t). The number of chunks both in Inference and Label on the " + "given mini-batch."); AddAttr("num_chunk_types", "(int). The number of chunk type. See below for details."); AddAttr( diff --git a/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py b/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py index caa51b5df4e..c3591a613ac 100644 --- a/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py +++ b/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py @@ -178,20 +178,19 @@ def main(): for pass_id in xrange(PASS_NUM): chunk_evaluator.reset(exe) for data in train_data(): - outs = exe.run(fluid.default_main_program(), - feed=feeder.feed(data), - fetch_list=[avg_cost] + chunk_evaluator.metrics) - precision, recall, f1_score = chunk_evaluator.eval(exe) - avg_cost_val = np.array(outs[0]) - precision_val = np.array(precision) - recall_val = np.array(recall) - f1_score_val = np.array(f1_score) + cost, precision, recall, f1_score = exe.run( + fluid.default_main_program(), + feed=feeder.feed(data), + fetch_list=[avg_cost] + chunk_evaluator.metrics) + pass_precision, pass_recall, pass_f1_score = chunk_evaluator.eval( + exe) if batch_id % 10 == 0: - print("avg_cost=" + str(avg_cost_val)) - print("precision_val=" + str(precision_val)) - print("recall_val:" + str(recall_val)) - print("f1_score_val:" + str(f1_score_val)) + print("avg_cost:" + str(cost) + " precision:" + str( + precision) + " recall:" + str(recall) + " f1_score:" + str( + f1_score) + " pass_precision:" + str( + pass_precision) + " pass_recall:" + str(pass_recall) + + " pass_f1_score:" + str(pass_f1_score)) # exit early for CI exit(0) -- GitLab