diff --git a/paddle/fluid/inference/tests/book/CMakeLists.txt b/paddle/fluid/inference/tests/book/CMakeLists.txt index cddd5a786c45c804014d82012ec3a7ef988491a5..8db3e76e7671df401612be1a8f89d94ae9a72d7c 100644 --- a/paddle/fluid/inference/tests/book/CMakeLists.txt +++ b/paddle/fluid/inference/tests/book/CMakeLists.txt @@ -30,5 +30,5 @@ inference_test(label_semantic_roles) inference_test(recognize_digits ARGS mlp) inference_test(recommender_system) #inference_test(rnn_encoder_decoder) -inference_test(understand_sentiment) +inference_test(understand_sentiment ARGS conv lstm) inference_test(word2vec) diff --git a/paddle/fluid/inference/tests/book/test_inference_label_semantic_roles.cc b/paddle/fluid/inference/tests/book/test_inference_label_semantic_roles.cc index 55acd95f50906b13a5a906e0bcc2e73a0c7f8ef2..7b75aea73f3c8a587e176db0b0070c8fa45926c5 100644 --- a/paddle/fluid/inference/tests/book/test_inference_label_semantic_roles.cc +++ b/paddle/fluid/inference/tests/book/test_inference_label_semantic_roles.cc @@ -32,16 +32,42 @@ TEST(inference, label_semantic_roles) { paddle::framework::LoDTensor word, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, mark; paddle::framework::LoD lod{{0, 4, 10}}; - - SetupLoDTensor(word, lod, static_cast(0), static_cast(1)); - SetupLoDTensor( - predicate, lod, static_cast(0), static_cast(1)); - SetupLoDTensor(ctx_n2, lod, static_cast(0), static_cast(1)); - SetupLoDTensor(ctx_n1, lod, static_cast(0), static_cast(1)); - SetupLoDTensor(ctx_0, lod, static_cast(0), static_cast(1)); - SetupLoDTensor(ctx_p1, lod, static_cast(0), static_cast(1)); - SetupLoDTensor(ctx_p2, lod, static_cast(0), static_cast(1)); - SetupLoDTensor(mark, lod, static_cast(0), static_cast(1)); + int64_t word_dict_len = 44068; + int64_t predicate_dict_len = 3162; + int64_t mark_dict_len = 2; + + SetupLoDTensor(word, + lod, + static_cast(0), + static_cast(word_dict_len - 1)); + SetupLoDTensor(predicate, + lod, + static_cast(0), + static_cast(predicate_dict_len - 1)); + SetupLoDTensor(ctx_n2, + lod, + static_cast(0), + static_cast(word_dict_len - 1)); + SetupLoDTensor(ctx_n1, + lod, + static_cast(0), + static_cast(word_dict_len - 1)); + SetupLoDTensor(ctx_0, + lod, + static_cast(0), + static_cast(word_dict_len - 1)); + SetupLoDTensor(ctx_p1, + lod, + static_cast(0), + static_cast(word_dict_len - 1)); + SetupLoDTensor(ctx_p2, + lod, + static_cast(0), + static_cast(word_dict_len - 1)); + SetupLoDTensor(mark, + lod, + static_cast(0), + static_cast(mark_dict_len - 1)); std::vector cpu_feeds; cpu_feeds.push_back(&word); diff --git a/paddle/fluid/inference/tests/book/test_inference_understand_sentiment.cc b/paddle/fluid/inference/tests/book/test_inference_understand_sentiment.cc index 3b29d52880cef1710696074ed8b2fdecf4f9fcca..0167bc0a51e07788c8a3f9785a5d5af0251ebaf1 100644 --- a/paddle/fluid/inference/tests/book/test_inference_understand_sentiment.cc +++ b/paddle/fluid/inference/tests/book/test_inference_understand_sentiment.cc @@ -31,7 +31,12 @@ TEST(inference, understand_sentiment) { paddle::framework::LoDTensor words; paddle::framework::LoD lod{{0, 4, 10}}; - SetupLoDTensor(words, lod, static_cast(0), static_cast(10)); + int64_t word_dict_len = 5147; + + SetupLoDTensor(words, + lod, + static_cast(0), + static_cast(word_dict_len - 1)); std::vector cpu_feeds; cpu_feeds.push_back(&words); diff --git a/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py b/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py index f5fb3ed36d59dc3001b0b106df553bde5a2fa21d..9248898fdff07637d8f5a454ff4e978c472c983e 100644 --- a/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py +++ b/python/paddle/v2/fluid/tests/book/test_label_semantic_roles.py @@ -296,7 +296,6 @@ def infer(use_cuda, save_dirname=None): print(results[0].lod()) np_data = np.array(results[0]) print("Inference Shape: ", np_data.shape) - print("Inference results: ", np_data) def main(use_cuda): diff --git a/python/paddle/v2/fluid/tests/book/test_understand_sentiment.py b/python/paddle/v2/fluid/tests/book/test_understand_sentiment.py index 6e0206d41db6265e926991fe35cd53513bd3417d..17761288138fbf21f96657ae8a0d7f9f4e060a62 100644 --- a/python/paddle/v2/fluid/tests/book/test_understand_sentiment.py +++ b/python/paddle/v2/fluid/tests/book/test_understand_sentiment.py @@ -93,7 +93,7 @@ def create_random_lodtensor(lod, place, low, high): return res -def train(word_dict, net_method, use_cuda, save_dirname=None): +def train(word_dict, nn_type, use_cuda, save_dirname=None): BATCH_SIZE = 128 PASS_NUM = 5 dict_dim = len(word_dict) @@ -102,6 +102,11 @@ def train(word_dict, net_method, use_cuda, save_dirname=None): data = fluid.layers.data( name="words", shape=[1], dtype="int64", lod_level=1) label = fluid.layers.data(name="label", shape=[1], dtype="int64") + + if nn_type == "conv": + net_method = convolution_net + else: + net_method = stacked_lstm_net cost, acc_out, prediction = net_method( data, label, input_dim=dict_dim, class_dim=class_dim) @@ -132,7 +137,7 @@ def train(word_dict, net_method, use_cuda, save_dirname=None): net_method.__name__)) -def infer(use_cuda, save_dirname=None): +def infer(word_dict, use_cuda, save_dirname=None): if save_dirname is None: return @@ -146,10 +151,11 @@ def infer(use_cuda, save_dirname=None): [inference_program, feed_target_names, fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) + word_dict_len = len(word_dict) + lod = [0, 4, 10] - word_dict = paddle.dataset.imdb.word_dict() tensor_words = create_random_lodtensor( - lod, place, low=0, high=len(word_dict) - 1) + lod, place, low=0, high=word_dict_len - 1) # Construct feed as a dictionary of {feed_target_name: feed_target_data} # and results will contain a list of data corresponding to fetch_targets. @@ -164,15 +170,15 @@ def infer(use_cuda, save_dirname=None): print("Inference results: ", np_data) -def main(word_dict, net_method, use_cuda): +def main(word_dict, nn_type, use_cuda): if use_cuda and not fluid.core.is_compiled_with_cuda(): return # Directory for saving the trained model - save_dirname = "understand_sentiment.inference.model" + save_dirname = "understand_sentiment_" + nn_type + ".inference.model" - train(word_dict, net_method, use_cuda, save_dirname) - infer(use_cuda, save_dirname) + train(word_dict, nn_type, use_cuda, save_dirname) + infer(word_dict, use_cuda, save_dirname) class TestUnderstandSentiment(unittest.TestCase): @@ -191,19 +197,19 @@ class TestUnderstandSentiment(unittest.TestCase): def test_conv_cpu(self): with self.new_program_scope(): - main(self.word_dict, net_method=convolution_net, use_cuda=False) + main(self.word_dict, nn_type="conv", use_cuda=False) def test_stacked_lstm_cpu(self): with self.new_program_scope(): - main(self.word_dict, net_method=stacked_lstm_net, use_cuda=False) + main(self.word_dict, nn_type="lstm", use_cuda=False) def test_conv_gpu(self): with self.new_program_scope(): - main(self.word_dict, net_method=convolution_net, use_cuda=True) + main(self.word_dict, nn_type="conv", use_cuda=True) def test_stacked_lstm_gpu(self): with self.new_program_scope(): - main(self.word_dict, net_method=stacked_lstm_net, use_cuda=True) + main(self.word_dict, nn_type="lstm", use_cuda=True) if __name__ == '__main__':