diff --git a/04.word2vec/train.py b/04.word2vec/train.py index 02601856e598a5c828ce9a2197efe6d4f320cc8e..3c28c8e6377d785a4e8d329866c2f253cdaa98fe 100644 --- a/04.word2vec/train.py +++ b/04.word2vec/train.py @@ -188,11 +188,11 @@ def infer(use_cuda, params_dirname=None): # meaning there is only one level of detail and there is only one sequence of # one word on this level. # Note that recursive_sequence_lengths should be a list of lists. - data1 = [[211]] # 'among' - data2 = [[6]] # 'a' - data3 = [[96]] # 'group' - data4 = [[4]] # 'of' - lod = [[1]] + data1 = [[211L]] # 'among' + data2 = [[6L]] # 'a' + data3 = [[96L]] # 'group' + data4 = [[4L]] # 'of' + lod = [[1L]] first_word = fluid.create_lod_tensor(data1, lod, place) second_word = fluid.create_lod_tensor(data2, lod, place) diff --git a/05.recommender_system/train.py b/05.recommender_system/train.py index 26e80c9be53c159c0783bd57a5dbcdf522b3d971..7b1f971b0a54f380345fc613d01ae5a24f9cc9eb 100644 --- a/05.recommender_system/train.py +++ b/05.recommender_system/train.py @@ -271,26 +271,26 @@ def infer(use_cuda, params_dirname): # Correspondingly, recursive_sequence_lengths = [[3, 2]] contains one # level of detail info, indicating that `data` consists of two sequences # of length 3 and 2, respectively. - user_id = fluid.create_lod_tensor([[1]], [[1]], place) + user_id = fluid.create_lod_tensor([[1L]], [[1]], place) assert feed_target_names[1] == "gender_id" - gender_id = fluid.create_lod_tensor([[1]], [[1]], place) + gender_id = fluid.create_lod_tensor([[1L]], [[1]], place) assert feed_target_names[2] == "age_id" - age_id = fluid.create_lod_tensor([[0]], [[1]], place) + age_id = fluid.create_lod_tensor([[0L]], [[1]], place) assert feed_target_names[3] == "job_id" - job_id = fluid.create_lod_tensor([[10]], [[1]], place) + job_id = fluid.create_lod_tensor([[10L]], [[1]], place) assert feed_target_names[4] == "movie_id" - movie_id = fluid.create_lod_tensor([[783]], [[1]], place) + movie_id = fluid.create_lod_tensor([[783L]], [[1]], place) assert feed_target_names[5] == "category_id" - category_id = fluid.create_lod_tensor([[10, 8, 9]], [[3]], place) + category_id = fluid.create_lod_tensor([[10L, 8L, 9L]], [[3]], place) assert feed_target_names[6] == "movie_title" - movie_title = fluid.create_lod_tensor([[1069, 4140, 2923, 710, 988]], - [[5]], place) + movie_title = fluid.create_lod_tensor( + [[1069L, 4140L, 2923L, 710L, 988L]], [[5]], place) # Construct feed as a dictionary of {feed_target_name: feed_target_data} # and results will contain a list of data corresponding to fetch_targets. diff --git a/06.understand_sentiment/train_conv.py b/06.understand_sentiment/train_conv.py index 7c203feb805e262cc5d027a9720ce555bd9e94db..d8d5c40f7ffef0d7c29913b73052ba25617605d6 100644 --- a/06.understand_sentiment/train_conv.py +++ b/06.understand_sentiment/train_conv.py @@ -185,7 +185,7 @@ def infer(use_cuda, params_dirname=None): UNK = word_dict[''] lod = [] for c in reviews: - lod.append([word_dict.get(words, UNK) for words in c]) + lod.append([np.int64(word_dict.get(words, UNK)) for words in c]) base_shape = [[len(c) for c in lod]] diff --git a/06.understand_sentiment/train_dyn_rnn.py b/06.understand_sentiment/train_dyn_rnn.py index 368f4e626d95c0cca47fa005d131598edb1e5fb0..7eaf860ea21f861935f6fcca7af6c1130098ef49 100644 --- a/06.understand_sentiment/train_dyn_rnn.py +++ b/06.understand_sentiment/train_dyn_rnn.py @@ -197,7 +197,7 @@ def infer(use_cuda, params_dirname=None): UNK = word_dict[''] lod = [] for c in reviews: - lod.append([word_dict.get(words, UNK) for words in c]) + lod.append([np.int64(word_dict.get(words, UNK)) for words in c]) base_shape = [[len(c) for c in lod]] diff --git a/06.understand_sentiment/train_stacked_lstm.py b/06.understand_sentiment/train_stacked_lstm.py index 66fcdf0933bfb8319e9406a05cfc30c36462d0b7..a3665e548d4851bf77e8eb3e948ac94f46d0aff9 100644 --- a/06.understand_sentiment/train_stacked_lstm.py +++ b/06.understand_sentiment/train_stacked_lstm.py @@ -195,7 +195,7 @@ def infer(use_cuda, params_dirname=None): UNK = word_dict[''] lod = [] for c in reviews: - lod.append([word_dict.get(words, UNK) for words in c]) + lod.append([np.int64(word_dict.get(words, UNK)) for words in c]) base_shape = [[len(c) for c in lod]]