// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/inference/tests/api/tester_helper.h" namespace paddle { namespace inference { using paddle::PaddleTensor; template void GetValueFromStream(std::stringstream *ss, T *t) { (*ss) >> (*t); } template <> void GetValueFromStream(std::stringstream *ss, std::string *t) { *t = ss->str(); } // Split string to vector template void Split(const std::string &line, char sep, std::vector *v) { std::stringstream ss; T t; for (auto c : line) { if (c != sep) { ss << c; } else { GetValueFromStream(&ss, &t); v->push_back(std::move(t)); ss.str({}); ss.clear(); } } if (!ss.str().empty()) { GetValueFromStream(&ss, &t); v->push_back(std::move(t)); ss.str({}); ss.clear(); } } // Parse tensor from string template bool ParseTensor(const std::string &field, paddle::PaddleTensor *tensor) { std::vector data; Split(field, ':', &data); if (data.size() < 2) return false; std::string shape_str = data[0]; std::vector shape; Split(shape_str, ' ', &shape); std::string mat_str = data[1]; std::vector mat; Split(mat_str, ' ', &mat); tensor->shape = shape; auto size = std::accumulate(shape.begin(), shape.end(), 1, std::multiplies()) * sizeof(T); tensor->data.Resize(size); std::copy(mat.begin(), mat.end(), static_cast(tensor->data.data())); tensor->dtype = GetPaddleDType(); return true; } // Parse input tensors from string bool ParseLine(const std::string &line, std::vector *tensors) { std::vector fields; Split(line, ';', &fields); tensors->clear(); tensors->reserve(4); int i = 0; auto input_name = FLAGS_ernie_large ? "eval_placeholder_" : "placeholder_"; for (; i < 3; i++) { paddle::PaddleTensor temp; ParseTensor(fields[i], &temp); temp.name = input_name + std::to_string(i); tensors->push_back(temp); } // input_mask paddle::PaddleTensor input_mask; ParseTensor(fields[i], &input_mask); input_mask.name = input_name + std::to_string(i); tensors->push_back(input_mask); return true; } bool LoadInputData(std::vector> *inputs) { if (FLAGS_infer_data.empty()) { LOG(ERROR) << "please set input data path"; return false; } std::ifstream fin(FLAGS_infer_data); std::string line; int sample = 0; // The unit-test dataset only have 10 samples, each sample have 5 feeds. while (std::getline(fin, line)) { std::vector feed_data; ParseLine(line, &feed_data); inputs->push_back(std::move(feed_data)); sample++; if (!FLAGS_test_all_data && sample == FLAGS_batch_size) break; } LOG(INFO) << "number of samples: " << sample; return true; } void SetConfig(AnalysisConfig *cfg, bool use_mkldnn = false, bool use_gpu = false) { cfg->SetModel(FLAGS_infer_model); if (use_mkldnn) { cfg->EnableMKLDNN(); } if (use_gpu) { cfg->EnableUseGpu(100, 0); } else { cfg->DisableGpu(); } cfg->SwitchSpecifyInputNames(); cfg->SwitchIrOptim(); cfg->SetCpuMathLibraryNumThreads(FLAGS_paddle_num_threads); } void profile(bool use_mkldnn = false, bool use_gpu = false) { AnalysisConfig config; SetConfig(&config, use_mkldnn, use_gpu); std::vector> outputs; std::vector> inputs; LoadInputData(&inputs); TestPrediction(reinterpret_cast(&config), inputs, &outputs, FLAGS_num_threads); } TEST(Analyzer_ernie, profile) { profile(); } #ifdef PADDLE_WITH_MKLDNN TEST(Analyzer_ernie, profile_mkldnn) { profile(true, false); } #endif // Check the model by gpu #ifdef PADDLE_WITH_CUDA TEST(Analyzer_ernie, profile_gpu) { profile(false, true); } #endif // Check the fuse status TEST(Analyzer_Ernie, fuse_statis) { AnalysisConfig cfg; SetConfig(&cfg); int num_ops; auto predictor = CreatePaddlePredictor(cfg); auto fuse_statis = GetFuseStatis( static_cast(predictor.get()), &num_ops); ASSERT_TRUE(fuse_statis.count("fc_fuse")); LOG(INFO) << "num_ops: " << num_ops; if (FLAGS_ernie_large) { ASSERT_EQ(fuse_statis.at("fc_fuse"), 146); EXPECT_EQ(num_ops, 859); } else { ASSERT_EQ(fuse_statis.at("fc_fuse"), 74); EXPECT_EQ(num_ops, 295); } } // Compare result of NativeConfig and AnalysisConfig void compare(bool use_mkldnn = false) { AnalysisConfig cfg; SetConfig(&cfg, use_mkldnn, false); std::vector> inputs; LoadInputData(&inputs); CompareNativeAndAnalysis( reinterpret_cast(&cfg), inputs); } TEST(Analyzer_ernie, compare) { compare(); } #ifdef PADDLE_WITH_MKLDNN TEST(Analyzer_ernie, compare_mkldnn) { compare(true /* use_mkldnn */); } #endif // Compare Deterministic result TEST(Analyzer_Ernie, compare_determine) { AnalysisConfig cfg; SetConfig(&cfg); std::vector> input_slots_all; LoadInputData(&input_slots_all); CompareDeterministic(reinterpret_cast(&cfg), input_slots_all); } // Compare results TEST(Analyzer_Ernie, compare_results) { AnalysisConfig cfg; SetConfig(&cfg); std::vector> input_slots_all; LoadInputData(&input_slots_all); std::ifstream fin(FLAGS_refer_result); std::string line; std::vector ref; while (std::getline(fin, line)) { Split(line, ' ', &ref); } auto predictor = CreateTestPredictor( reinterpret_cast(&cfg), FLAGS_use_analysis); std::vector outputs; for (size_t i = 0; i < input_slots_all.size(); i++) { outputs.clear(); predictor->Run(input_slots_all[i], &outputs); auto outputs_size = outputs.front().data.length() / (sizeof(float)); for (size_t j = 0; j < outputs_size; ++j) { EXPECT_NEAR(ref[i * outputs_size + j], static_cast(outputs[0].data.data())[j], FLAGS_accuracy); } } } } // namespace inference } // namespace paddle