From 98fb8e58fd4fb91423d414d67f2a2684b6841020 Mon Sep 17 00:00:00 2001 From: tensor-tang Date: Fri, 25 May 2018 11:57:44 +0800 Subject: [PATCH] test infer nlp --- paddle/fluid/inference/io.cc | 2 +- .../fluid/inference/tests/book/CMakeLists.txt | 1 + .../tests/book/test_inference_nlp.cc | 85 +++++++++++++++++++ paddle/fluid/inference/tests/test_helper.h | 3 + 4 files changed, 90 insertions(+), 1 deletion(-) create mode 100644 paddle/fluid/inference/tests/book/test_inference_nlp.cc diff --git a/paddle/fluid/inference/io.cc b/paddle/fluid/inference/io.cc index 65db7c7b500..98780b6881c 100644 --- a/paddle/fluid/inference/io.cc +++ b/paddle/fluid/inference/io.cc @@ -117,7 +117,7 @@ std::unique_ptr Load(framework::Executor* executor, std::string program_desc_str; VLOG(3) << "loading model from " << model_filename; ReadBinaryFile(model_filename, &program_desc_str); - + // LOG(INFO) << program_desc_str; std::unique_ptr main_program( new framework::ProgramDesc(program_desc_str)); diff --git a/paddle/fluid/inference/tests/book/CMakeLists.txt b/paddle/fluid/inference/tests/book/CMakeLists.txt index dbb81462b82..90357f99d1d 100644 --- a/paddle/fluid/inference/tests/book/CMakeLists.txt +++ b/paddle/fluid/inference/tests/book/CMakeLists.txt @@ -35,6 +35,7 @@ inference_test(image_classification ARGS vgg resnet) inference_test(label_semantic_roles) inference_test(recognize_digits ARGS mlp conv) inference_test(recommender_system) +inference_test(nlp) #inference_test(rnn_encoder_decoder) #inference_test(understand_sentiment ARGS conv) inference_test(word2vec) diff --git a/paddle/fluid/inference/tests/book/test_inference_nlp.cc b/paddle/fluid/inference/tests/book/test_inference_nlp.cc new file mode 100644 index 00000000000..0d6d0adfb23 --- /dev/null +++ b/paddle/fluid/inference/tests/book/test_inference_nlp.cc @@ -0,0 +1,85 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "gflags/gflags.h" +#include "gtest/gtest.h" +#include "paddle/fluid/inference/tests/test_helper.h" + +DEFINE_string(dirname, "", "Directory of the inference model."); + +TEST(inference, understand_sentiment) { + if (FLAGS_dirname.empty()) { + LOG(FATAL) << "Usage: ./example --dirname=path/to/your/model"; + } + + LOG(INFO) << "FLAGS_dirname: " << FLAGS_dirname << std::endl; + std::string dirname = FLAGS_dirname; + + // 0. Call `paddle::framework::InitDevices()` initialize all the devices + // In unittests, this is done in paddle/testing/paddle_gtest_main.cc + paddle::framework::LoDTensor words; + + paddle::framework::LoD lod{{0, 83}}; + int64_t word_dict_len = 198392; + SetupLoDTensor(&words, lod, static_cast(0), + static_cast(word_dict_len - 1)); + /* + std::vector srcdata{ + 784, 784, 1550, 6463, 56, 75693, 6189, 784, 784, 1550, + 198391, 6463, 42468, 4376, 10251, 10760, 6189, 297, 396, 6463, + 6463, 1550, 198391, 6463, 22564, 1612, 291, 68, 164, 784, + 784, 1550, 198391, 6463, 13659, 3362, 42468, 6189, 2209, + 198391, + 6463, 2209, 2209, 198391, 6463, 2209, 1062, 3029, 1831, 3029, + 1065, 2281, 100, 11216, 1110, 56, 10869, 9811, 100, + 198391, + 6463, 100, 9280, 100, 288, 40031, 1680, 1335, 100, 1550, + 9280, 7265, 244, 1550, 198391, 6463, 1550, 198391, 6463, + 42468, + 4376, 10251, 10760}; + paddle::framework::LoD lod{{0, srcdata.size()}}; + words.set_lod(lod); + int64_t* pdata = + words.mutable_data({static_cast(srcdata.size()), 1}, + paddle::platform::CPUPlace()); + memcpy(pdata, srcdata.data(), words.numel() * sizeof(int64_t)); + */ + LOG(INFO) << "number of input size:" << words.numel(); + std::vector cpu_feeds; + cpu_feeds.push_back(&words); + + paddle::framework::LoDTensor output1; + std::vector cpu_fetchs1; + cpu_fetchs1.push_back(&output1); + + int repeat = 100; + // Run inference on CPU + TestInference(dirname, cpu_feeds, + cpu_fetchs1, repeat); + LOG(INFO) << output1.lod(); + LOG(INFO) << output1.dims(); + +#ifdef PADDLE_WITH_CUDA + paddle::framework::LoDTensor output2; + std::vector cpu_fetchs2; + cpu_fetchs2.push_back(&output2); + + // Run inference on CUDA GPU + TestInference(dirname, cpu_feeds, cpu_fetchs2); + LOG(INFO) << output2.lod(); + LOG(INFO) << output2.dims(); + + CheckError(output1, output2); +#endif +} diff --git a/paddle/fluid/inference/tests/test_helper.h b/paddle/fluid/inference/tests/test_helper.h index 01b8dc0be66..1f5551567c6 100644 --- a/paddle/fluid/inference/tests/test_helper.h +++ b/paddle/fluid/inference/tests/test_helper.h @@ -182,6 +182,9 @@ void TestInference(const std::string& dirname, "init_program", paddle::platform::DeviceContextPool::Instance().Get(place)); inference_program = InitProgram(&executor, scope, dirname, is_combined); + // std::string binary_str; + // inference_program->Proto()->SerializeToString(&binary_str); + // LOG(INFO) << binary_str; if (use_mkldnn) { EnableMKLDNN(inference_program); } -- GitLab