提交 e8ad56e5 编写于 作者: Y Yibing Liu

Enable dam's running in py3

上级 be80e25e
......@@ -88,7 +88,7 @@ class Net(object):
initializer=fluid.initializer.Normal(scale=0.1)))
Hu_stack = [Hu]
for index in six.moves.range(self._stack_num):
for index in six.moves.xrange(self._stack_num):
# share parameters
Hu = layers.block(
name="turn_self_stack" + str(index),
......@@ -104,7 +104,7 @@ class Net(object):
# cross attention
r_a_t_stack = []
t_a_r_stack = []
for index in six.moves.range(self._stack_num + 1):
for index in six.moves.xrange(self._stack_num + 1):
t_a_r = layers.block(
name="t_attend_r_" + str(index),
query=Hu_stack[index],
......
......@@ -7,8 +7,12 @@ import multiprocessing
import paddle
import paddle.fluid as fluid
import utils.reader as reader
import cPickle as pickle
from utils.util import print_arguments
from utils.util import print_arguments, mkdir
try:
import cPickle as pickle #python 2
except ImportError as e:
import pickle #python 3
from model import Net
......@@ -108,7 +112,7 @@ def parse_args():
def test(args):
if not os.path.exists(args.save_path):
raise ValueError("Invalid save path %s" % args.save_path)
mkdir(args.save_path)
if not os.path.exists(args.model_path):
raise ValueError("Invalid model init path %s" % args.model_path)
# data data_config
......@@ -159,7 +163,11 @@ def test(args):
use_cuda=args.use_cuda, main_program=test_program)
print("start loading data ...")
train_data, val_data, test_data = pickle.load(open(args.data_path, 'rb'))
with open(args.data_path, 'rb') as f:
if six.PY2:
train_data, val_data, test_data = pickle.load(f)
else:
train_data, val_data, test_data = pickle.load(f, encoding="bytes")
print("finish loading data ...")
if args.ext_eval:
......@@ -181,7 +189,7 @@ def test(args):
for it in six.moves.xrange(test_batch_num // dev_count):
feed_list = []
for dev in xrange(dev_count):
for dev in six.moves.xrange(dev_count):
index = it * dev_count + dev
feed_dict = reader.make_one_batch_input(test_batches, index)
feed_list.append(feed_dict)
......@@ -191,9 +199,9 @@ def test(args):
scores = np.array(predicts[0])
print("step = %d" % it)
for dev in xrange(dev_count):
for dev in six.moves.xrange(dev_count):
index = it * dev_count + dev
for i in xrange(args.batch_size):
for i in six.moves.xrange(args.batch_size):
score_file.write(
str(scores[args.batch_size * dev + i][0]) + '\t' + str(
test_batches["label"][index][i]) + '\n')
......
......@@ -169,22 +169,32 @@ def train(args):
if args.word_emb_init is not None:
print("start loading word embedding init ...")
word_emb = np.array(pickle.load(open(args.word_emb_init, 'rb'))).astype(
'float32')
if six.PY2:
word_emb = np.array(pickle.load(open(args.word_emb_init,
'rb'))).astype('float32')
else:
word_emb = np.array(
pickle.load(
open(args.word_emb_init, 'rb'), encoding="bytes")).astype(
'float32')
dam.set_word_embedding(word_emb, place)
print("finish init word embedding ...")
print("start loading data ...")
train_data, val_data, test_data = pickle.load(open(args.data_path, 'rb'))
with open(args.data_path, 'rb') as f:
if six.PY2:
train_data, val_data, test_data = pickle.load(f)
else:
train_data, val_data, test_data = pickle.load(f, encoding="bytes")
print("finish loading data ...")
val_batches = reader.build_batches(val_data, data_conf)
batch_num = len(train_data['y']) / args.batch_size
batch_num = len(train_data[six.b('y')]) // args.batch_size
val_batch_num = len(val_batches["response"])
print_step = max(1, batch_num / (dev_count * 100))
save_step = max(1, batch_num / (dev_count * 10))
print_step = max(1, batch_num // (dev_count * 100))
save_step = max(1, batch_num // (dev_count * 10))
print("begin model training ...")
print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
......@@ -197,7 +207,7 @@ def train(args):
ave_cost = 0.0
for it in six.moves.xrange(batch_num // dev_count):
feed_list = []
for dev in xrange(dev_count):
for dev in six.moves.xrange(dev_count):
index = it * dev_count + dev
feed_dict = reader.make_one_batch_input(train_batches, index)
feed_list.append(feed_dict)
......@@ -214,16 +224,15 @@ def train(args):
if (args.save_path is not None) and (step % save_step == 0):
save_path = os.path.join(args.save_path, "step_" + str(step))
print("Save model at step %d ... " % step)
print(
time.strftime('%Y-%m-%d %H:%M:%S',
time.localtime(time.time())))
print(time.strftime('%Y-%m-%d %H:%M:%S',
time.localtime(time.time())))
fluid.io.save_persistables(exe, save_path)
score_path = os.path.join(args.save_path, 'score.' + str(step))
score_file = open(score_path, 'w')
for it in xrange(val_batch_num // dev_count):
for it in six.moves.xrange(val_batch_num // dev_count):
feed_list = []
for dev in xrange(dev_count):
for dev in six.moves.xrange(dev_count):
val_index = it * dev_count + dev
feed_dict = reader.make_one_batch_input(val_batches,
val_index)
......@@ -233,9 +242,9 @@ def train(args):
fetch_list=[logits.name])
scores = np.array(predicts[0])
for dev in xrange(dev_count):
for dev in six.moves.xrange(dev_count):
val_index = it * dev_count + dev
for i in xrange(args.batch_size):
for i in six.moves.xrange(args.batch_size):
score_file.write(
str(scores[args.batch_size * dev + i][0]) + '\t'
+ str(val_batches["label"][val_index][
......@@ -250,9 +259,8 @@ def train(args):
for p_at in result:
out_file.write(str(p_at) + '\n')
print('finish evaluation')
print(
time.strftime('%Y-%m-%d %H:%M:%S',
time.localtime(time.time())))
print(time.strftime('%Y-%m-%d %H:%M:%S',
time.localtime(time.time())))
if __name__ == '__main__':
......
import sys
import six
import numpy as np
from sklearn.metrics import average_precision_score
......@@ -7,7 +8,7 @@ def mean_average_precision(sort_data):
#to do
count_1 = 0
sum_precision = 0
for index in range(len(sort_data)):
for index in six.moves.xrange(len(sort_data)):
if sort_data[index][1] == 1:
count_1 += 1
sum_precision += 1.0 * count_1 / (index + 1)
......
import sys
import six
def get_p_at_n_in_m(data, n, m, ind):
......@@ -30,9 +31,9 @@ def evaluate(file_path):
p_at_2_in_10 = 0.0
p_at_5_in_10 = 0.0
length = len(data) / 10
length = len(data) // 10
for i in xrange(0, length):
for i in six.moves.xrange(0, length):
ind = i * 10
assert data[ind][1] == 1
......
import six
import numpy as np
try:
......@@ -10,13 +11,13 @@ def unison_shuffle(data, seed=None):
if seed is not None:
np.random.seed(seed)
y = np.array(data['y'])
c = np.array(data['c'])
r = np.array(data['r'])
y = np.array(data[six.b('y')])
c = np.array(data[six.b('c')])
r = np.array(data[six.b('r')])
assert len(y) == len(c) == len(r)
p = np.random.permutation(len(y))
shuffle_data = {'y': y[p], 'c': c[p], 'r': r[p]}
shuffle_data = {six.b('y'): y[p], six.b('c'): c[p], six.b('r'): r[p]}
return shuffle_data
......@@ -69,9 +70,9 @@ def produce_one_sample(data,
max_turn_len=50
return y, nor_turns_nor_c, nor_r, turn_len, term_len, r_len
'''
c = data['c'][index]
r = data['r'][index][:]
y = data['y'][index]
c = data[six.b('c')][index]
r = data[six.b('r')][index][:]
y = data[six.b('y')][index]
turns = split_c(c, split_id)
#normalize turns_c length, nor_turns length is max_turn_num
......@@ -105,7 +106,7 @@ def build_one_batch(data,
_label = []
for i in range(conf['batch_size']):
for i in six.moves.xrange(conf['batch_size']):
index = batch_index * conf['batch_size'] + i
y, nor_turns_nor_c, nor_r, turn_len, term_len, r_len = produce_one_sample(
data, index, conf['_EOS_'], conf['max_turn_num'],
......@@ -149,8 +150,8 @@ def build_batches(data, conf, turn_cut_type='tail', term_cut_type='tail'):
_label_batches = []
batch_len = len(data['y']) / conf['batch_size']
for batch_index in range(batch_len):
batch_len = len(data[six.b('y')]) // conf['batch_size']
for batch_index in six.moves.range(batch_len):
_turns, _tt_turns_len, _every_turn_len, _response, _response_len, _label = build_one_batch(
data, batch_index, conf, turn_cut_type='tail', term_cut_type='tail')
......@@ -196,8 +197,10 @@ def make_one_batch_input(data_batches, index):
max_turn_num = turns.shape[1]
max_turn_len = turns.shape[2]
turns_list = [turns[:, i, :] for i in xrange(max_turn_num)]
every_turn_len_list = [every_turn_len[:, i] for i in xrange(max_turn_num)]
turns_list = [turns[:, i, :] for i in six.moves.xrange(max_turn_num)]
every_turn_len_list = [
every_turn_len[:, i] for i in six.moves.xrange(max_turn_num)
]
feed_dict = {}
for i, turn in enumerate(turns_list):
......@@ -208,7 +211,7 @@ def make_one_batch_input(data_batches, index):
for i, turn_len in enumerate(every_turn_len_list):
feed_dict["turn_mask_%d" % i] = np.ones(
(batch_size, max_turn_len, 1)).astype("float32")
for row in xrange(batch_size):
for row in six.moves.xrange(batch_size):
feed_dict["turn_mask_%d" % i][row, turn_len[row]:, 0] = 0
feed_dict["response"] = response
......@@ -216,7 +219,7 @@ def make_one_batch_input(data_batches, index):
feed_dict["response_mask"] = np.ones(
(batch_size, max_turn_len, 1)).astype("float32")
for row in xrange(batch_size):
for row in six.moves.xrange(batch_size):
feed_dict["response_mask"][row, response_len[row]:, 0] = 0
feed_dict["label"] = np.array([data_batches["label"][index]]).reshape(
......@@ -232,14 +235,14 @@ if __name__ == '__main__':
"max_turn_len": 50,
"_EOS_": 28270,
}
train, val, test = pickle.load(open('../data/ubuntu/data_small.pkl', 'rb'))
with open('../ubuntu/data/data_small.pkl', 'rb') as f:
if six.PY2:
train, val, test = pickle.load(f)
else:
train, val, test = pickle.load(f, encoding="bytes")
print('load data success')
train_batches = build_batches(train, conf)
val_batches = build_batches(val, conf)
test_batches = build_batches(test, conf)
print('build batches success')
pickle.dump([train_batches, val_batches, test_batches],
open('../data/ubuntu/data_small_xxx.pkl', 'wb'))
print('dump success')
import six
import os
def print_arguments(args):
......@@ -8,6 +9,14 @@ def print_arguments(args):
print('------------------------------------------------')
def mkdir(path):
if not os.path.isdir(path):
mkdir(os.path.split(path)[0])
else:
return
os.mkdir(path)
def pos_encoding_init():
pass
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册