From f95e05a388d9d9fb541affd9480dc2ae8636d04f Mon Sep 17 00:00:00 2001 From: Liu Yiqun Date: Mon, 12 Feb 2018 06:07:48 +0000 Subject: [PATCH] Refine the inference unittests. --- .../fluid/inference/tests/book/CMakeLists.txt | 2 +- .../test_inference_label_semantic_roles.cc | 46 +++++++++++++++---- .../test_inference_understand_sentiment.cc | 7 ++- .../tests/book/test_label_semantic_roles.py | 1 - .../tests/book/test_understand_sentiment.py | 30 +++++++----- 5 files changed, 61 insertions(+), 25 deletions(-) diff --git a/paddle/fluid/inference/tests/book/CMakeLists.txt b/paddle/fluid/inference/tests/book/CMakeLists.txt index cddd5a786c..8db3e76e76 100644 --- a/paddle/fluid/inference/tests/book/CMakeLists.txt +++ b/paddle/fluid/inference/tests/book/CMakeLists.txt @@ -30,5 +30,5 @@ inference_test(label_semantic_roles) inference_test(recognize_digits ARGS mlp) inference_test(recommender_system) #inference_test(rnn_encoder_decoder) -inference_test(understand_sentiment) +inference_test(understand_sentiment ARGS conv lstm) inference_test(word2vec) diff --git a/paddle/fluid/inference/tests/book/test_inference_label_semantic_roles.cc b/paddle/fluid/inference/tests/book/test_inference_label_semantic_roles.cc index 55acd95f50..7b75aea73f 100644 --- a/paddle/fluid/inference/tests/book/test_inference_label_semantic_roles.cc +++ b/paddle/fluid/inference/tests/book/test_inference_label_semantic_roles.cc @@ -32,16 +32,42 @@ TEST(inference, label_semantic_roles) { paddle::framework::LoDTensor word, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, mark; paddle::framework::LoD lod{{0, 4, 10}}; - - SetupLoDTensor(word, lod, static_cast(0), static_cast(1)); - SetupLoDTensor( - predicate, lod, static_cast(0), static_cast(1)); - SetupLoDTensor(ctx_n2, lod, static_cast(0), static_cast(1)); - SetupLoDTensor(ctx_n1, lod, static_cast(0), static_cast(1)); - SetupLoDTensor(ctx_0, lod, static_cast(0), static_cast(1)); - SetupLoDTensor(ctx_p1, lod, static_cast(0), static_cast(1)); - SetupLoDTensor(ctx_p2, lod, static_cast(0), static_cast(1)); - SetupLoDTensor(mark, lod, static_cast(0), static_cast(1)); + int64_t word_dict_len = 44068; + int64_t predicate_dict_len = 3162; + int64_t mark_dict_len = 2; + + SetupLoDTensor(word, + lod, + static_cast(0), + static_cast(word_dict_len - 1)); + SetupLoDTensor(predicate, + lod, + static_cast(0), + static_cast(predicate_dict_len - 1)); + SetupLoDTensor(ctx_n2, + lod, + static_cast(0), + static_cast(word_dict_len - 1)); + SetupLoDTensor(ctx_n1, + lod, + static_cast(0), + static_cast(word_dict_len - 1)); + SetupLoDTensor(ctx_0, + lod, + static_cast(0), + static_cast(word_dict_len - 1)); + SetupLoDTensor(ctx_p1, + lod, + static_cast(0), + static_cast(word_dict_len - 1)); + SetupLoDTensor(ctx_p2, + lod, + static_cast(0), + static_cast(word_dict_len - 1)); + SetupLoDTensor(mark, + lod, + static_cast(0), + static_cast(mark_dict_len - 1)); std::vector cpu_feeds; cpu_feeds.push_back(&word); diff --git a/paddle/fluid/inference/tests/book/test_inference_understand_sentiment.cc b/paddle/fluid/inference/tests/book/test_inference_understand_sentiment.cc index 3b29d52880..0167bc0a51 100644 --- a/paddle/fluid/inference/tests/book/test_inference_understand_sentiment.cc +++ b/paddle/fluid/inference/tests/book/test_inference_understand_sentiment.cc @@ -31,7 +31,12 @@ TEST(inference, understand_sentiment) { paddle::framework::LoDTensor words; paddle::framework::LoD lod{{0, 4, 10}}; - SetupLoDTensor(words, lod, static_cast(0), static_cast(10)); + int64_t word_dict_len = 5147; + + SetupLoDTensor(words, + lod, + static_cast(0), + static_cast(word_dict_len - 1)); std::vector cpu_feeds; cpu_feeds.push_back(&words); diff --git a/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py b/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py index f5fb3ed36d..9248898fdf 100644 --- a/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py +++ b/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py @@ -296,7 +296,6 @@ def infer(use_cuda, save_dirname=None): print(results[0].lod()) np_data = np.array(results[0]) print("Inference Shape: ", np_data.shape) - print("Inference results: ", np_data) def main(use_cuda): diff --git a/python/paddle/v2/fluid/tests/book/test_understand_sentiment.py b/python/paddle/v2/fluid/tests/book/test_understand_sentiment.py index 6e0206d41d..1776128813 100644 --- a/python/paddle/v2/fluid/tests/book/test_understand_sentiment.py +++ b/python/paddle/v2/fluid/tests/book/test_understand_sentiment.py @@ -93,7 +93,7 @@ def create_random_lodtensor(lod, place, low, high): return res -def train(word_dict, net_method, use_cuda, save_dirname=None): +def train(word_dict, nn_type, use_cuda, save_dirname=None): BATCH_SIZE = 128 PASS_NUM = 5 dict_dim = len(word_dict) @@ -102,6 +102,11 @@ def train(word_dict, net_method, use_cuda, save_dirname=None): data = fluid.layers.data( name="words", shape=[1], dtype="int64", lod_level=1) label = fluid.layers.data(name="label", shape=[1], dtype="int64") + + if nn_type == "conv": + net_method = convolution_net + else: + net_method = stacked_lstm_net cost, acc_out, prediction = net_method( data, label, input_dim=dict_dim, class_dim=class_dim) @@ -132,7 +137,7 @@ def train(word_dict, net_method, use_cuda, save_dirname=None): net_method.__name__)) -def infer(use_cuda, save_dirname=None): +def infer(word_dict, use_cuda, save_dirname=None): if save_dirname is None: return @@ -146,10 +151,11 @@ def infer(use_cuda, save_dirname=None): [inference_program, feed_target_names, fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) + word_dict_len = len(word_dict) + lod = [0, 4, 10] - word_dict = paddle.dataset.imdb.word_dict() tensor_words = create_random_lodtensor( - lod, place, low=0, high=len(word_dict) - 1) + lod, place, low=0, high=word_dict_len - 1) # Construct feed as a dictionary of {feed_target_name: feed_target_data} # and results will contain a list of data corresponding to fetch_targets. @@ -164,15 +170,15 @@ def infer(use_cuda, save_dirname=None): print("Inference results: ", np_data) -def main(word_dict, net_method, use_cuda): +def main(word_dict, nn_type, use_cuda): if use_cuda and not fluid.core.is_compiled_with_cuda(): return # Directory for saving the trained model - save_dirname = "understand_sentiment.inference.model" + save_dirname = "understand_sentiment_" + nn_type + ".inference.model" - train(word_dict, net_method, use_cuda, save_dirname) - infer(use_cuda, save_dirname) + train(word_dict, nn_type, use_cuda, save_dirname) + infer(word_dict, use_cuda, save_dirname) class TestUnderstandSentiment(unittest.TestCase): @@ -191,19 +197,19 @@ class TestUnderstandSentiment(unittest.TestCase): def test_conv_cpu(self): with self.new_program_scope(): - main(self.word_dict, net_method=convolution_net, use_cuda=False) + main(self.word_dict, nn_type="conv", use_cuda=False) def test_stacked_lstm_cpu(self): with self.new_program_scope(): - main(self.word_dict, net_method=stacked_lstm_net, use_cuda=False) + main(self.word_dict, nn_type="lstm", use_cuda=False) def test_conv_gpu(self): with self.new_program_scope(): - main(self.word_dict, net_method=convolution_net, use_cuda=True) + main(self.word_dict, nn_type="conv", use_cuda=True) def test_stacked_lstm_gpu(self): with self.new_program_scope(): - main(self.word_dict, net_method=stacked_lstm_net, use_cuda=True) + main(self.word_dict, nn_type="lstm", use_cuda=True) if __name__ == '__main__': -- GitLab