diff --git a/paddle/fluid/inference/tests/book/test_inference_nlp.cc b/paddle/fluid/inference/tests/book/test_inference_nlp.cc index 6ff8a18cdb14b10dc404e91fcbec1bd262269e7f..95cdeb4ad1f9f1524490495abe70bc962f18590e 100644 --- a/paddle/fluid/inference/tests/book/test_inference_nlp.cc +++ b/paddle/fluid/inference/tests/book/test_inference_nlp.cc @@ -14,7 +14,12 @@ limitations under the License. */ #include #include +#include +#include +#include +#include #include // NOLINT +#include #include "gflags/gflags.h" #include "gtest/gtest.h" #include "paddle/fluid/inference/tests/test_helper.h" @@ -31,16 +36,37 @@ inline double get_current_ms() { return 1e+3 * time.tv_sec + 1e-3 * time.tv_usec; } +void read_data( + std::vector>* out, + const std::string& filename = "/home/tangjian/paddle-tj/out.ids.txt") { + using namespace std; // NOLINT + fstream fin(filename); + string line; + out->clear(); + while (getline(fin, line)) { + istringstream iss(line); + vector ids; + string field; + while (getline(iss, field, ' ')) { + ids.push_back(stoi(field)); + } + out->push_back(ids); + } +} + TEST(inference, understand_sentiment) { if (FLAGS_dirname.empty()) { LOG(FATAL) << "Usage: ./example --dirname=path/to/your/model"; } - + std::vector> inputdatas; + read_data(&inputdatas); + LOG(INFO) << "---------- dataset size: " << inputdatas.size(); LOG(INFO) << "FLAGS_dirname: " << FLAGS_dirname << std::endl; std::string dirname = FLAGS_dirname; + const bool model_combined = false; - int total_work = 100; - int num_threads = 10; + int total_work = 10; + int num_threads = 2; int work_per_thread = total_work / num_threads; std::vector> infer_threads; for (int i = 0; i < num_threads; ++i) {