From 5a10d833db5f6e2eeab2fb13b79b9e149032fd35 Mon Sep 17 00:00:00 2001 From: Kexin Zhao Date: Wed, 23 May 2018 16:37:52 -0700 Subject: [PATCH] initial commit --- .../test_understand_sentiment_conv.py | 26 +++++++++++-------- .../test_understand_sentiment_dynamic_rnn.py | 26 +++++++++++-------- .../test_understand_sentiment_stacked_lstm.py | 26 +++++++++++-------- .../tests/book/notest_understand_sentiment.py | 26 +++++++++++-------- 4 files changed, 60 insertions(+), 44 deletions(-) diff --git a/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_conv.py b/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_conv.py index 89179fc586..6e10a8a669 100644 --- a/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_conv.py +++ b/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_conv.py @@ -121,17 +121,21 @@ def infer(use_cuda, inference_program, save_dirname=None): param_path=save_dirname, place=place) - def create_random_lodtensor(lod, place, low, high): - data = np.random.random_integers(low, high, - [lod[-1], 1]).astype("int64") - res = fluid.LoDTensor() - res.set(data, place) - res.set_lod([lod]) - return res - - lod = [0, 4, 10] - tensor_words = create_random_lodtensor( - lod, place, low=0, high=len(word_dict) - 1) + # Setup input by creating LoDTensor to represent sequence of words. + # Here each word is the basic element of the LoDTensor and the shape of + # each word (base_shape) should be [1] since it is simply an index to + # look up for the corresponding word vector. + # Suppose the length_based level of detail (lod) info is set to [[3, 4, 2]], + # which has only one lod level. Then the created LoDTensor will have only + # one higher level structure (sequence of words, or sentence) than the basic + # element (word). Hence the LoDTensor will hold data for three sentences of + # length 3, 4 and 2, respectively. + # Note that lod info should be a list of lists. + lod = [[3, 4, 2]] + base_shape = [1] + # The range of random integers is [low, high] + tensor_words = fluid.create_random_lodtensor( + lod, base_shape, place, low=0, high=len(word_dict) - 1) results = inferencer.infer({'words': tensor_words}) print("infer results: ", results) diff --git a/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_dynamic_rnn.py b/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_dynamic_rnn.py index 7db097b3b3..acb569d9f6 100644 --- a/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_dynamic_rnn.py +++ b/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_dynamic_rnn.py @@ -136,17 +136,21 @@ def infer(use_cuda, inference_program, save_dirname=None): param_path=save_dirname, place=place) - def create_random_lodtensor(lod, place, low, high): - data = np.random.random_integers(low, high, - [lod[-1], 1]).astype("int64") - res = fluid.LoDTensor() - res.set(data, place) - res.set_lod([lod]) - return res - - lod = [0, 4, 10] - tensor_words = create_random_lodtensor( - lod, place, low=0, high=len(word_dict) - 1) + # Setup input by creating LoDTensor to represent sequence of words. + # Here each word is the basic element of the LoDTensor and the shape of + # each word (base_shape) should be [1] since it is simply an index to + # look up for the corresponding word vector. + # Suppose the length_based level of detail (lod) info is set to [[3, 4, 2]], + # which has only one lod level. Then the created LoDTensor will have only + # one higher level structure (sequence of words, or sentence) than the basic + # element (word). Hence the LoDTensor will hold data for three sentences of + # length 3, 4 and 2, respectively. + # Note that lod info should be a list of lists. + lod = [[3, 4, 2]] + base_shape = [1] + # The range of random integers is [low, high] + tensor_words = fluid.create_random_lodtensor( + lod, base_shape, place, low=0, high=len(word_dict) - 1) results = inferencer.infer({'words': tensor_words}) print("infer results: ", results) diff --git a/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_stacked_lstm.py b/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_stacked_lstm.py index 0d7cbe3874..c92ef2a30b 100644 --- a/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_stacked_lstm.py +++ b/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_stacked_lstm.py @@ -128,17 +128,21 @@ def infer(use_cuda, inference_program, save_dirname=None): param_path=save_dirname, place=place) - def create_random_lodtensor(lod, place, low, high): - data = np.random.random_integers(low, high, - [lod[-1], 1]).astype("int64") - res = fluid.LoDTensor() - res.set(data, place) - res.set_lod([lod]) - return res - - lod = [0, 4, 10] - tensor_words = create_random_lodtensor( - lod, place, low=0, high=len(word_dict) - 1) + # Setup input by creating LoDTensor to represent sequence of words. + # Here each word is the basic element of the LoDTensor and the shape of + # each word (base_shape) should be [1] since it is simply an index to + # look up for the corresponding word vector. + # Suppose the length_based level of detail (lod) info is set to [[3, 4, 2]], + # which has only one lod level. Then the created LoDTensor will have only + # one higher level structure (sequence of words, or sentence) than the basic + # element (word). Hence the LoDTensor will hold data for three sentences of + # length 3, 4 and 2, respectively. + # Note that lod info should be a list of lists. + lod = [[3, 4, 2]] + base_shape = [1] + # The range of random integers is [low, high] + tensor_words = fluid.create_random_lodtensor( + lod, base_shape, place, low=0, high=len(word_dict) - 1) results = inferencer.infer({'words': tensor_words}) print("infer results: ", results) diff --git a/python/paddle/fluid/tests/book/notest_understand_sentiment.py b/python/paddle/fluid/tests/book/notest_understand_sentiment.py index 792ed7368d..beebc15774 100644 --- a/python/paddle/fluid/tests/book/notest_understand_sentiment.py +++ b/python/paddle/fluid/tests/book/notest_understand_sentiment.py @@ -125,14 +125,6 @@ def stacked_lstm_net(data, return avg_cost, accuracy, prediction -def create_random_lodtensor(lod, place, low, high): - data = np.random.random_integers(low, high, [lod[-1], 1]).astype("int64") - res = fluid.LoDTensor() - res.set(data, place) - res.set_lod([lod]) - return res - - def train(word_dict, net_method, use_cuda, @@ -242,9 +234,21 @@ def infer(word_dict, use_cuda, save_dirname=None): word_dict_len = len(word_dict) - lod = [0, 4, 10] - tensor_words = create_random_lodtensor( - lod, place, low=0, high=word_dict_len - 1) + # Setup input by creating LoDTensor to represent sequence of words. + # Here each word is the basic element of the LoDTensor and the shape of + # each word (base_shape) should be [1] since it is simply an index to + # look up for the corresponding word vector. + # Suppose the length_based level of detail (lod) info is set to [[3, 4, 2]], + # which has only one lod level. Then the created LoDTensor will have only + # one higher level structure (sequence of words, or sentence) than the basic + # element (word). Hence the LoDTensor will hold data for three sentences of + # length 3, 4 and 2, respectively. + # Note that lod info should be a list of lists. + lod = [[3, 4, 2]] + base_shape = [1] + # The range of random integers is [low, high] + tensor_words = fluid.create_random_lodtensor( + lod, base_shape, place, low=0, high=word_dict_len - 1) # Construct feed as a dictionary of {feed_target_name: feed_target_data} # and results will contain a list of data corresponding to fetch_targets. -- GitLab