提交 5a10d833 编写于 作者: K Kexin Zhao

initial commit

上级 224bd8f0
...@@ -121,17 +121,21 @@ def infer(use_cuda, inference_program, save_dirname=None): ...@@ -121,17 +121,21 @@ def infer(use_cuda, inference_program, save_dirname=None):
param_path=save_dirname, param_path=save_dirname,
place=place) place=place)
def create_random_lodtensor(lod, place, low, high): # Setup input by creating LoDTensor to represent sequence of words.
data = np.random.random_integers(low, high, # Here each word is the basic element of the LoDTensor and the shape of
[lod[-1], 1]).astype("int64") # each word (base_shape) should be [1] since it is simply an index to
res = fluid.LoDTensor() # look up for the corresponding word vector.
res.set(data, place) # Suppose the length_based level of detail (lod) info is set to [[3, 4, 2]],
res.set_lod([lod]) # which has only one lod level. Then the created LoDTensor will have only
return res # one higher level structure (sequence of words, or sentence) than the basic
# element (word). Hence the LoDTensor will hold data for three sentences of
lod = [0, 4, 10] # length 3, 4 and 2, respectively.
tensor_words = create_random_lodtensor( # Note that lod info should be a list of lists.
lod, place, low=0, high=len(word_dict) - 1) lod = [[3, 4, 2]]
base_shape = [1]
# The range of random integers is [low, high]
tensor_words = fluid.create_random_lodtensor(
lod, base_shape, place, low=0, high=len(word_dict) - 1)
results = inferencer.infer({'words': tensor_words}) results = inferencer.infer({'words': tensor_words})
print("infer results: ", results) print("infer results: ", results)
......
...@@ -136,17 +136,21 @@ def infer(use_cuda, inference_program, save_dirname=None): ...@@ -136,17 +136,21 @@ def infer(use_cuda, inference_program, save_dirname=None):
param_path=save_dirname, param_path=save_dirname,
place=place) place=place)
def create_random_lodtensor(lod, place, low, high): # Setup input by creating LoDTensor to represent sequence of words.
data = np.random.random_integers(low, high, # Here each word is the basic element of the LoDTensor and the shape of
[lod[-1], 1]).astype("int64") # each word (base_shape) should be [1] since it is simply an index to
res = fluid.LoDTensor() # look up for the corresponding word vector.
res.set(data, place) # Suppose the length_based level of detail (lod) info is set to [[3, 4, 2]],
res.set_lod([lod]) # which has only one lod level. Then the created LoDTensor will have only
return res # one higher level structure (sequence of words, or sentence) than the basic
# element (word). Hence the LoDTensor will hold data for three sentences of
lod = [0, 4, 10] # length 3, 4 and 2, respectively.
tensor_words = create_random_lodtensor( # Note that lod info should be a list of lists.
lod, place, low=0, high=len(word_dict) - 1) lod = [[3, 4, 2]]
base_shape = [1]
# The range of random integers is [low, high]
tensor_words = fluid.create_random_lodtensor(
lod, base_shape, place, low=0, high=len(word_dict) - 1)
results = inferencer.infer({'words': tensor_words}) results = inferencer.infer({'words': tensor_words})
print("infer results: ", results) print("infer results: ", results)
......
...@@ -128,17 +128,21 @@ def infer(use_cuda, inference_program, save_dirname=None): ...@@ -128,17 +128,21 @@ def infer(use_cuda, inference_program, save_dirname=None):
param_path=save_dirname, param_path=save_dirname,
place=place) place=place)
def create_random_lodtensor(lod, place, low, high): # Setup input by creating LoDTensor to represent sequence of words.
data = np.random.random_integers(low, high, # Here each word is the basic element of the LoDTensor and the shape of
[lod[-1], 1]).astype("int64") # each word (base_shape) should be [1] since it is simply an index to
res = fluid.LoDTensor() # look up for the corresponding word vector.
res.set(data, place) # Suppose the length_based level of detail (lod) info is set to [[3, 4, 2]],
res.set_lod([lod]) # which has only one lod level. Then the created LoDTensor will have only
return res # one higher level structure (sequence of words, or sentence) than the basic
# element (word). Hence the LoDTensor will hold data for three sentences of
lod = [0, 4, 10] # length 3, 4 and 2, respectively.
tensor_words = create_random_lodtensor( # Note that lod info should be a list of lists.
lod, place, low=0, high=len(word_dict) - 1) lod = [[3, 4, 2]]
base_shape = [1]
# The range of random integers is [low, high]
tensor_words = fluid.create_random_lodtensor(
lod, base_shape, place, low=0, high=len(word_dict) - 1)
results = inferencer.infer({'words': tensor_words}) results = inferencer.infer({'words': tensor_words})
print("infer results: ", results) print("infer results: ", results)
......
...@@ -125,14 +125,6 @@ def stacked_lstm_net(data, ...@@ -125,14 +125,6 @@ def stacked_lstm_net(data,
return avg_cost, accuracy, prediction return avg_cost, accuracy, prediction
def create_random_lodtensor(lod, place, low, high):
data = np.random.random_integers(low, high, [lod[-1], 1]).astype("int64")
res = fluid.LoDTensor()
res.set(data, place)
res.set_lod([lod])
return res
def train(word_dict, def train(word_dict,
net_method, net_method,
use_cuda, use_cuda,
...@@ -242,9 +234,21 @@ def infer(word_dict, use_cuda, save_dirname=None): ...@@ -242,9 +234,21 @@ def infer(word_dict, use_cuda, save_dirname=None):
word_dict_len = len(word_dict) word_dict_len = len(word_dict)
lod = [0, 4, 10] # Setup input by creating LoDTensor to represent sequence of words.
tensor_words = create_random_lodtensor( # Here each word is the basic element of the LoDTensor and the shape of
lod, place, low=0, high=word_dict_len - 1) # each word (base_shape) should be [1] since it is simply an index to
# look up for the corresponding word vector.
# Suppose the length_based level of detail (lod) info is set to [[3, 4, 2]],
# which has only one lod level. Then the created LoDTensor will have only
# one higher level structure (sequence of words, or sentence) than the basic
# element (word). Hence the LoDTensor will hold data for three sentences of
# length 3, 4 and 2, respectively.
# Note that lod info should be a list of lists.
lod = [[3, 4, 2]]
base_shape = [1]
# The range of random integers is [low, high]
tensor_words = fluid.create_random_lodtensor(
lod, base_shape, place, low=0, high=word_dict_len - 1)
# Construct feed as a dictionary of {feed_target_name: feed_target_data} # Construct feed as a dictionary of {feed_target_name: feed_target_data}
# and results will contain a list of data corresponding to fetch_targets. # and results will contain a list of data corresponding to fetch_targets.
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册