diff --git a/paddle/fluid/inference/io.cc b/paddle/fluid/inference/io.cc index 65db7c7b5008dcb301e741ec17c3623715e10bb8..98780b6881caa96c0356e07a0a3bd040f28efba0 100644 --- a/paddle/fluid/inference/io.cc +++ b/paddle/fluid/inference/io.cc @@ -117,7 +117,7 @@ std::unique_ptr Load(framework::Executor* executor, std::string program_desc_str; VLOG(3) << "loading model from " << model_filename; ReadBinaryFile(model_filename, &program_desc_str); - + // LOG(INFO) << program_desc_str; std::unique_ptr main_program( new framework::ProgramDesc(program_desc_str)); diff --git a/paddle/fluid/inference/tests/book/CMakeLists.txt b/paddle/fluid/inference/tests/book/CMakeLists.txt index dbb81462b8273bd701e9c9f530eaf69817abd6a1..90357f99d1defb97eaf7137b947d68443ad316a2 100644 --- a/paddle/fluid/inference/tests/book/CMakeLists.txt +++ b/paddle/fluid/inference/tests/book/CMakeLists.txt @@ -35,6 +35,7 @@ inference_test(image_classification ARGS vgg resnet) inference_test(label_semantic_roles) inference_test(recognize_digits ARGS mlp conv) inference_test(recommender_system) +inference_test(nlp) #inference_test(rnn_encoder_decoder) #inference_test(understand_sentiment ARGS conv) inference_test(word2vec) diff --git a/paddle/fluid/inference/tests/book/test_inference_nlp.cc b/paddle/fluid/inference/tests/book/test_inference_nlp.cc new file mode 100644 index 0000000000000000000000000000000000000000..0d6d0adfb235d1cf0d39920878f13e3b6c828750 --- /dev/null +++ b/paddle/fluid/inference/tests/book/test_inference_nlp.cc @@ -0,0 +1,85 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "gflags/gflags.h" +#include "gtest/gtest.h" +#include "paddle/fluid/inference/tests/test_helper.h" + +DEFINE_string(dirname, "", "Directory of the inference model."); + +TEST(inference, understand_sentiment) { + if (FLAGS_dirname.empty()) { + LOG(FATAL) << "Usage: ./example --dirname=path/to/your/model"; + } + + LOG(INFO) << "FLAGS_dirname: " << FLAGS_dirname << std::endl; + std::string dirname = FLAGS_dirname; + + // 0. Call `paddle::framework::InitDevices()` initialize all the devices + // In unittests, this is done in paddle/testing/paddle_gtest_main.cc + paddle::framework::LoDTensor words; + + paddle::framework::LoD lod{{0, 83}}; + int64_t word_dict_len = 198392; + SetupLoDTensor(&words, lod, static_cast(0), + static_cast(word_dict_len - 1)); + /* + std::vector srcdata{ + 784, 784, 1550, 6463, 56, 75693, 6189, 784, 784, 1550, + 198391, 6463, 42468, 4376, 10251, 10760, 6189, 297, 396, 6463, + 6463, 1550, 198391, 6463, 22564, 1612, 291, 68, 164, 784, + 784, 1550, 198391, 6463, 13659, 3362, 42468, 6189, 2209, + 198391, + 6463, 2209, 2209, 198391, 6463, 2209, 1062, 3029, 1831, 3029, + 1065, 2281, 100, 11216, 1110, 56, 10869, 9811, 100, + 198391, + 6463, 100, 9280, 100, 288, 40031, 1680, 1335, 100, 1550, + 9280, 7265, 244, 1550, 198391, 6463, 1550, 198391, 6463, + 42468, + 4376, 10251, 10760}; + paddle::framework::LoD lod{{0, srcdata.size()}}; + words.set_lod(lod); + int64_t* pdata = + words.mutable_data({static_cast(srcdata.size()), 1}, + paddle::platform::CPUPlace()); + memcpy(pdata, srcdata.data(), words.numel() * sizeof(int64_t)); + */ + LOG(INFO) << "number of input size:" << words.numel(); + std::vector cpu_feeds; + cpu_feeds.push_back(&words); + + paddle::framework::LoDTensor output1; + std::vector cpu_fetchs1; + cpu_fetchs1.push_back(&output1); + + int repeat = 100; + // Run inference on CPU + TestInference(dirname, cpu_feeds, + cpu_fetchs1, repeat); + LOG(INFO) << output1.lod(); + LOG(INFO) << output1.dims(); + +#ifdef PADDLE_WITH_CUDA + paddle::framework::LoDTensor output2; + std::vector cpu_fetchs2; + cpu_fetchs2.push_back(&output2); + + // Run inference on CUDA GPU + TestInference(dirname, cpu_feeds, cpu_fetchs2); + LOG(INFO) << output2.lod(); + LOG(INFO) << output2.dims(); + + CheckError(output1, output2); +#endif +} diff --git a/paddle/fluid/inference/tests/test_helper.h b/paddle/fluid/inference/tests/test_helper.h index 01b8dc0be662da22fe15a79cd9abfe5fa92c9577..1f5551567c6881775c09bd1e52a713261ee87a16 100644 --- a/paddle/fluid/inference/tests/test_helper.h +++ b/paddle/fluid/inference/tests/test_helper.h @@ -182,6 +182,9 @@ void TestInference(const std::string& dirname, "init_program", paddle::platform::DeviceContextPool::Instance().Get(place)); inference_program = InitProgram(&executor, scope, dirname, is_combined); + // std::string binary_str; + // inference_program->Proto()->SerializeToString(&binary_str); + // LOG(INFO) << binary_str; if (use_mkldnn) { EnableMKLDNN(inference_program); }