提交 d47f10fe 编写于 作者: Z zhengya01

add gru4rec ce

上级 ecfc61a3
#!/bin/bash
export MKL_NUM_THREADS=1
export OMP_NUM_THREADS=1
cudaid=${tagspace:=0} # use 0-th card as default
export CUDA_VISIBLE_DEVICES=$cudaid
FLAGS_benchmark=true python train.py --train_dir train_big_data --vocab_path vocab_big.txt --use_cuda 1 --batch_size 500 --model_dir model_output --pass_num 2 --enable_ce | python _ce.py
cudaid=${tagspace_4:=0,1,2,3} # use 0-th card as default
export CUDA_VISIBLE_DEVICES=$cudaid
FLAGS_benchmark=true python train.py --train_dir train_big_data --vocab_path vocab_big.txt --use_cuda 1 --parallel 1 --num_devices 2 --batch_size 500 --model_dir model_output --pass_num 2 --enable_ce | python _ce.py
# this file is only used for continuous evaluation test!
import os
import sys
sys.path.append(os.environ['ceroot'])
from kpi import CostKpi
from kpi import DurationKpi
from kpi import AccKpi
each_pass_duration_gpu1_kpi = DurationKpi('each_pass_duration_gpu1', 0.08, 0, actived=True)
train_ppl_gpu1_kpi = CostKpi('train_ppl_gpu1', 0.08, 0)
each_pass_duration_gpu4_kpi = DurationKpi('each_pass_duration_gpu4', 0.08, 0, actived=True)
train_ppl_gpu4_kpi = CostKpi('train_ppl_gpu4', 0.08, 0)
tracking_kpis = [
each_pass_duration_gpu1_kpi,
train_ppl_gpu1_kpi,
each_pass_duration_gpu4_kpi,
train_ppl_gpu4_kpi,
]
def parse_log(log):
'''
This method should be implemented by model developers.
The suggestion:
each line in the log should be key, value, for example:
"
train_cost\t1.0
test_cost\t1.0
train_cost\t1.0
train_cost\t1.0
train_acc\t1.2
"
'''
for line in log.split('\n'):
fs = line.strip().split('\t')
print(fs)
if len(fs) == 3 and fs[0] == 'kpis':
kpi_name = fs[1]
kpi_value = float(fs[2])
yield kpi_name, kpi_value
def log_to_ce(log):
kpi_tracker = {}
for kpi in tracking_kpis:
kpi_tracker[kpi.name] = kpi
for (kpi_name, kpi_value) in parse_log(log):
print(kpi_name, kpi_value)
kpi_tracker[kpi_name].add_record(kpi_value)
kpi_tracker[kpi_name].persist()
if __name__ == '__main__':
log = sys.stdin.read()
log_to_ce(log)
...@@ -40,6 +40,10 @@ def parse_args(): ...@@ -40,6 +40,10 @@ def parse_args():
'--base_lr', type=float, default=0.01, help='learning rate') '--base_lr', type=float, default=0.01, help='learning rate')
parser.add_argument( parser.add_argument(
'--num_devices', type=int, default=1, help='Number of GPU devices') '--num_devices', type=int, default=1, help='Number of GPU devices')
parser.add_argument(
'--enable_ce',
action='store_true',
help='If set, run the task with continuous evaluation logs.')
args = parser.parse_args() args = parser.parse_args()
return args return args
...@@ -51,6 +55,9 @@ def get_cards(args): ...@@ -51,6 +55,9 @@ def get_cards(args):
def train(): def train():
""" do training """ """ do training """
args = parse_args() args = parse_args()
if args.enable_ce:
fluid.default_startup_program().random_seed = SEED
fluid.default_main_program().random_seed = SEED
hid_size = args.hid_size hid_size = args.hid_size
train_dir = args.train_dir train_dir = args.train_dir
vocab_path = args.vocab_path vocab_path = args.vocab_path
...@@ -84,6 +91,7 @@ def train(): ...@@ -84,6 +91,7 @@ def train():
model_dir = args.model_dir model_dir = args.model_dir
fetch_list = [avg_cost.name] fetch_list = [avg_cost.name]
ce_info = []
total_time = 0.0 total_time = 0.0
for pass_idx in six.moves.xrange(pass_num): for pass_idx in six.moves.xrange(pass_num):
epoch_idx = pass_idx + 1 epoch_idx = pass_idx + 1
...@@ -105,8 +113,11 @@ def train(): ...@@ -105,8 +113,11 @@ def train():
fetch_list=fetch_list) fetch_list=fetch_list)
avg_ppl = np.exp(ret_avg_cost[0]) avg_ppl = np.exp(ret_avg_cost[0])
newest_ppl = np.mean(avg_ppl) newest_ppl = np.mean(avg_ppl)
ce_info.append(newest_ppl)
if i % args.print_batch == 0: if i % args.print_batch == 0:
print("step:%d ppl:%.3f" % (i, newest_ppl)) print("step:%d ppl:%.3f" % (i, newest_ppl))
if args.enable_ce and i > 1000:
break
t1 = time.time() t1 = time.time()
total_time += t1 - t0 total_time += t1 - t0
...@@ -117,8 +128,43 @@ def train(): ...@@ -117,8 +128,43 @@ def train():
fetch_vars = [avg_cost, acc] fetch_vars = [avg_cost, acc]
fluid.io.save_inference_model(save_dir, feed_var_names, fetch_vars, exe) fluid.io.save_inference_model(save_dir, feed_var_names, fetch_vars, exe)
print("model saved in %s" % save_dir) print("model saved in %s" % save_dir)
# only for ce
if args.enable_ce:
ce_ppl = 0
try:
ce_ppl = ce_info[-2]
except:
print("ce info error")
epoch_idx = args.pass_num
device = get_device(args)
if args.use_cuda:
gpu_num = device[1]
print("kpis\teach_pass_duration_gpu%s\t%s" %
(gpu_num, total_time / epoch_idx))
print("kpis\ttrain_ppl_gpu%s\t%s" %
(gpu_num, ce_ppl))
else:
cpu_num = device[1]
threads_num = device[2]
print("kpis\teach_pass_duration_cpu%s_thread%s\t%s" %
(cpu_num, threads_num, total_time / epoch_idx))
print("kpis\ttrain_ppl_cpu%s_thread%s\t%s" %
(cpu_num, threads_num, ce_ppl))
print("finish training") print("finish training")
def get_device(args):
if args.use_cuda:
gpus = os.environ.get("CUDA_VISIBLE_DEVICES", 1)
gpu_num = len(gpus.split(','))
return "gpu", gpu_num
else:
threads_num = os.environ.get('NUM_THREADS', 1)
cpu_num = os.environ.get('CPU_NUM', 1)
return "cpu", int(cpu_num), int(threads_num)
if __name__ == "__main__": if __name__ == "__main__":
train() train()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册