From cd94df86793e1380d44a177eecb2cde90cc734e9 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Fri, 28 Dec 2018 07:39:59 +0000 Subject: [PATCH] fix load and refine --- paddle/fluid/inference/api/analysis_predictor.cc | 2 +- paddle/fluid/inference/tests/api/analyzer_ner_tester.cc | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index 3f8feaaa1..6e3c0aa1e 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -251,7 +251,7 @@ bool AnalysisPredictor::SetFeed(const std::vector &inputs, input.set_lod(lod); int idx = -1; if (config_.specify_input_name) { - idx = feed_names_[inputs[i].name]; + idx = feed_names_.at(inputs[i].name); } else { idx = boost::get(feeds_[i]->GetAttr("col")); } diff --git a/paddle/fluid/inference/tests/api/analyzer_ner_tester.cc b/paddle/fluid/inference/tests/api/analyzer_ner_tester.cc index f8635968c..04f8b3ffe 100644 --- a/paddle/fluid/inference/tests/api/analyzer_ner_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_ner_tester.cc @@ -60,8 +60,7 @@ struct DataRecord { } }; -void PrepareInputs(std::vector *input_slots, DataRecord *data, - int batch_size) { +void PrepareInputs(std::vector *input_slots, DataRecord *data) { PaddleTensor lod_word_tensor, lod_mention_tensor; lod_word_tensor.name = "word"; lod_mention_tensor.name = "mention"; @@ -100,7 +99,7 @@ void SetInput(std::vector> *inputs) { int epoch = FLAGS_test_all_data ? data.num_samples / FLAGS_batch_size : 1; LOG(INFO) << "number of samples: " << epoch * FLAGS_batch_size; for (int bid = 0; bid < epoch; ++bid) { - PrepareInputs(&input_slots, &data, FLAGS_batch_size); + PrepareInputs(&input_slots, &data); (*inputs).emplace_back(input_slots); } } -- GitLab