未验证 提交 6d3433e3 编写于 作者: Y Yibing Liu 提交者: GitHub

Merge pull request #1114 from kuke/text_cls_ce

Enable model ce for text_classification
...@@ -53,7 +53,7 @@ def main(train_data_file, test_data_file, vocab_file, target_file, emb_file, ...@@ -53,7 +53,7 @@ def main(train_data_file, test_data_file, vocab_file, target_file, emb_file,
chunk_scheme="IOB", chunk_scheme="IOB",
num_chunk_types=int(math.ceil((label_dict_len - 1) / 2.0))) num_chunk_types=int(math.ceil((label_dict_len - 1) / 2.0)))
inference_program = fluid.default_main_program().clone() inference_program = fluid.default_main_program().clone(for_test=True)
with fluid.program_guard(inference_program): with fluid.program_guard(inference_program):
test_target = chunk_evaluator.metrics + chunk_evaluator.states test_target = chunk_evaluator.metrics + chunk_evaluator.states
inference_program = fluid.io.get_inference_program(test_target) inference_program = fluid.io.get_inference_program(test_target)
......
###!/bin/bash
####This file is only used for continuous evaluation.
export CE_MODE_X=1
python train.py cnn | python _ce.py
####this file is only used for continuous evaluation test!
import os
import sys
sys.path.append(os.environ['ceroot'])
from kpi import CostKpi, DurationKpi, AccKpi
#### NOTE kpi.py should shared in models in some way!!!!
train_acc_kpi = AccKpi('train_acc', 0.005, actived=True)
train_cost_kpi = CostKpi('train_cost', 0.005, actived=True)
train_duration_kpi = DurationKpi('train_duration', 0.05, actived=True)
tracking_kpis = [
train_acc_kpi,
train_cost_kpi,
train_duration_kpi,
]
def parse_log(log):
for line in log.split('\n'):
fs = line.strip().split('\t')
print(fs)
if len(fs) == 3 and fs[0] == 'kpis':
print("-----%s" % fs)
kpi_name = fs[1]
kpi_value = float(fs[2])
yield kpi_name, kpi_value
def log_to_ce(log):
kpi_tracker = {}
for kpi in tracking_kpis:
kpi_tracker[kpi.name] = kpi
for (kpi_name, kpi_value) in parse_log(log):
print(kpi_name, kpi_value)
kpi_tracker[kpi_name].add_record(kpi_value)
kpi_tracker[kpi_name].persist()
if __name__ == '__main__':
log = sys.stdin.read()
print("*****")
print(log)
print("****")
log_to_ce(log)
import os
import sys import sys
import time import time
import unittest import unittest
...@@ -53,8 +54,12 @@ def train(train_reader, ...@@ -53,8 +54,12 @@ def train(train_reader,
exe = fluid.Executor(place) exe = fluid.Executor(place)
feeder = fluid.DataFeeder(feed_list=[data, label], place=place) feeder = fluid.DataFeeder(feed_list=[data, label], place=place)
# For internal continuous evaluation
if 'CE_MODE_X' in os.environ:
fluid.default_startup_program().random_seed = 110
exe.run(fluid.default_startup_program()) exe.run(fluid.default_startup_program())
for pass_id in xrange(pass_num): for pass_id in xrange(pass_num):
pass_start = time.time()
data_size, data_count, total_acc, total_cost = 0, 0, 0.0, 0.0 data_size, data_count, total_acc, total_cost = 0, 0, 0.0, 0.0
for data in train_reader(): for data in train_reader():
avg_cost_np, avg_acc_np = exe.run(fluid.default_main_program(), avg_cost_np, avg_acc_np = exe.run(fluid.default_main_program(),
...@@ -73,6 +78,13 @@ def train(train_reader, ...@@ -73,6 +78,13 @@ def train(train_reader,
epoch_model = save_dirname + "/" + "epoch" + str(pass_id) epoch_model = save_dirname + "/" + "epoch" + str(pass_id)
fluid.io.save_inference_model(epoch_model, ["words", "label"], acc, exe) fluid.io.save_inference_model(epoch_model, ["words", "label"], acc, exe)
pass_end = time.time()
# For internal continuous evaluation
if 'CE_MODE_X' in os.environ:
print("kpis train_acc %f" % avg_acc)
print("kpis train_cost %f" % avg_cost)
print("kpis train_duration %f" % (pass_end - pass_start))
def train_net(): def train_net():
word_dict, train_reader, test_reader = utils.prepare_data( word_dict, train_reader, test_reader = utils.prepare_data(
......
import os
import sys import sys
import time import time
import numpy as np import numpy as np
...@@ -64,15 +65,22 @@ def prepare_data(data_type="imdb", ...@@ -64,15 +65,22 @@ def prepare_data(data_type="imdb",
raise RuntimeError("No such dataset") raise RuntimeError("No such dataset")
if data_type == "imdb": if data_type == "imdb":
train_reader = paddle.batch( if 'CE_MODE_X' in os.environ:
paddle.reader.shuffle( train_reader = paddle.batch(
paddle.dataset.imdb.train(word_dict), buf_size=buf_size), paddle.dataset.imdb.train(word_dict), batch_size=batch_size)
batch_size=batch_size)
test_reader = paddle.batch( test_reader = paddle.batch(
paddle.reader.shuffle( paddle.dataset.imdb.test(word_dict), batch_size=batch_size)
paddle.dataset.imdb.test(word_dict), buf_size=buf_size), else:
batch_size=batch_size) train_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.imdb.train(word_dict), buf_size=buf_size),
batch_size=batch_size)
test_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.imdb.test(word_dict), buf_size=buf_size),
batch_size=batch_size)
else: else:
raise RuntimeError("no such dataset") raise RuntimeError("no such dataset")
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册