未验证 提交 aa0cb2e9 编写于 作者: P pkpk 提交者: GitHub

Refine api of emotion detection and dialogue domain classification (#4308)

* Update README.md (#4267)

* test=develop (#4269)

* 3d use new api (#4275)

* PointNet++ and PointRCNN use new API

* Update Readme of Dygraph BERT (#4277)

Fix some typos.

* Update run_classifier_multi_gpu.sh (#4279)

remove the CUDA_VISIBLE_DEVICES

* Update README.md (#4280)

* 17 update api (#4294)

* update1.7 save/load & fluid.data

* update datafeed to dataloader

* Update resnet_acnet.py (#4297)

Bias attr of square conv should be "False" rather than None during training mode.

* test=develop
Co-authored-by: NKaipeng Deng <dengkaipeng@baidu.com>
Co-authored-by: Nzhang wenhui <frankwhzhang@126.com>
Co-authored-by: Nparap1uie-s <parap1uie-s@users.noreply.github.com>
上级 872f494e
...@@ -172,7 +172,7 @@ class ResNetACNet(object): ...@@ -172,7 +172,7 @@ class ResNetACNet(object):
act=act if self.deploy else None, act=act if self.deploy else None,
param_attr=ParamAttr(name=name + "_acsquare_weights"), param_attr=ParamAttr(name=name + "_acsquare_weights"),
bias_attr=ParamAttr(name=name + "_acsquare_bias") bias_attr=ParamAttr(name=name + "_acsquare_bias")
if self.deploy else None, if self.deploy else False,
name=name + '.acsquare.conv2d.output.1') name=name + '.acsquare.conv2d.output.1')
if self.deploy: if self.deploy:
......
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
# limitations under the License. # limitations under the License.
""" """
from __future__ import absolute_import from __future__ import absolute_import
from __future__ import division from __future__ import division
from __future__ import print_function from __future__ import print_function
...@@ -40,43 +39,55 @@ import math ...@@ -40,43 +39,55 @@ import math
np.random.seed(0) np.random.seed(0)
random.seed(0) random.seed(0)
parser = argparse.ArgumentParser(__doc__) parser = argparse.ArgumentParser(__doc__)
DEV_COUNT = 1 DEV_COUNT = 1
model_g = ArgumentGroup(parser, "model", "model configuration and paths.") model_g = ArgumentGroup(parser, "model", "model configuration and paths.")
model_g.add_arg("init_checkpoint", str, None, "Init checkpoint to resume training from.") model_g.add_arg("init_checkpoint", str, None,
model_g.add_arg("checkpoints", str, "./checkpoints", "Path to save checkpoints.") "Init checkpoint to resume training from.")
model_g.add_arg("checkpoints", str, "./checkpoints",
"Path to save checkpoints.")
model_g.add_arg("config_path", str, "./data/input/model.conf", "Model conf.") model_g.add_arg("config_path", str, "./data/input/model.conf", "Model conf.")
model_g.add_arg("build_dict", bool, False, "Build dict.") model_g.add_arg("build_dict", bool, False, "Build dict.")
train_g = ArgumentGroup(parser, "training", "training options.") train_g = ArgumentGroup(parser, "training", "training options.")
train_g.add_arg("cpu_num", int, 3, "Number of Threads.") train_g.add_arg("cpu_num", int, 3, "Number of Threads.")
train_g.add_arg("epoch", int, 100, "Number of epoches for training.") train_g.add_arg("epoch", int, 100, "Number of epoches for training.")
train_g.add_arg("learning_rate", float, 0.1, "Learning rate used to train with warmup.") train_g.add_arg("learning_rate", float, 0.1,
train_g.add_arg("save_steps", int, 1000, "The steps interval to save checkpoints.") "Learning rate used to train with warmup.")
train_g.add_arg("validation_steps", int, 100, "The steps interval to evaluate model performance.") train_g.add_arg("save_steps", int, 1000,
"The steps interval to save checkpoints.")
train_g.add_arg("validation_steps", int, 100,
"The steps interval to evaluate model performance.")
train_g.add_arg("random_seed", int, 7, "random seed") train_g.add_arg("random_seed", int, 7, "random seed")
train_g.add_arg("threshold", float, 0.1, "When the confidence exceeds the threshold, the corresponding label is given.") train_g.add_arg(
"threshold", float, 0.1,
"When the confidence exceeds the threshold, the corresponding label is given."
)
log_g = ArgumentGroup(parser, "logging", "logging related.") log_g = ArgumentGroup(parser, "logging", "logging related.")
log_g.add_arg("skip_steps", int, 10, "The steps interval to print loss.") log_g.add_arg("skip_steps", int, 10, "The steps interval to print loss.")
data_g = ArgumentGroup(parser, "data", "Data paths, vocab paths and data processing options") data_g = ArgumentGroup(parser, "data",
"Data paths, vocab paths and data processing options")
data_g.add_arg("data_dir", str, "./data/input/", "Path to training data.") data_g.add_arg("data_dir", str, "./data/input/", "Path to training data.")
data_g.add_arg("save_dir", str, "./data/output/", "Path to save.") data_g.add_arg("save_dir", str, "./data/output/", "Path to save.")
data_g.add_arg("max_seq_len", int, 50, "Tokens' number of the longest seqence allowed.") data_g.add_arg("max_seq_len", int, 50,
data_g.add_arg("batch_size", int, 64, "The total number of examples in one batch for training.") "Tokens' number of the longest seqence allowed.")
data_g.add_arg("batch_size", int, 64,
"The total number of examples in one batch for training.")
run_type_g = ArgumentGroup(parser, "run_type", "running type options.") run_type_g = ArgumentGroup(parser, "run_type", "running type options.")
run_type_g.add_arg("use_cuda", bool, False, "If set, use GPU for training.") run_type_g.add_arg("use_cuda", bool, False, "If set, use GPU for training.")
# run_type_g.add_arg("use_fast_executor", bool, False, "If set, use fast parallel executor (in experiment).") # run_type_g.add_arg("use_fast_executor", bool, False, "If set, use fast parallel executor (in experiment).")
run_type_g.add_arg("do_train", bool, True, "Whether to perform evaluation on test data set.") run_type_g.add_arg("do_train", bool, True,
run_type_g.add_arg("do_eval", bool, True, "Whether to perform evaluation on test data set.") "Whether to perform evaluation on test data set.")
run_type_g.add_arg("do_test", bool, True, "Whether to perform evaluation on test data set.") run_type_g.add_arg("do_eval", bool, True,
"Whether to perform evaluation on test data set.")
run_type_g.add_arg("do_test", bool, True,
"Whether to perform evaluation on test data set.")
args = parser.parse_args() args = parser.parse_args()
def get_score(pred_result, label, eval_phase): def get_score(pred_result, label, eval_phase):
"""[get precision recall and f-score] """[get precision recall and f-score]
...@@ -139,7 +150,7 @@ def train(args, train_exe, build_res, place): ...@@ -139,7 +150,7 @@ def train(args, train_exe, build_res, place):
pred_label = build_res["pred_label"] pred_label = build_res["pred_label"]
label = build_res["label"] label = build_res["label"]
fetch_list = [cost.name, prediction.name, pred_label.name, label.name] fetch_list = [cost.name, prediction.name, pred_label.name, label.name]
train_pyreader = build_res["train_pyreader"] train_data_loader = build_res["train_data_loader"]
train_prog = build_res["train_prog"] train_prog = build_res["train_prog"]
steps = 0 steps = 0
time_begin = time.time() time_begin = time.time()
...@@ -147,22 +158,24 @@ def train(args, train_exe, build_res, place): ...@@ -147,22 +158,24 @@ def train(args, train_exe, build_res, place):
logger.info("Begin training") logger.info("Begin training")
for i in range(args.epoch): for i in range(args.epoch):
try: try:
for data in train_pyreader(): for data in train_data_loader():
avg_cost_np, avg_pred_np, pred_label, label = train_exe.run(feed=data, program=compiled_prog, \ avg_cost_np, avg_pred_np, pred_label, label = train_exe.run(feed=data, program=compiled_prog, \
fetch_list=fetch_list) fetch_list=fetch_list)
steps += 1 steps += 1
if steps % int(args.skip_steps) == 0: if steps % int(args.skip_steps) == 0:
time_end = time.time() time_end = time.time()
used_time = time_end - time_begin used_time = time_end - time_begin
get_score(pred_label, label, eval_phase = "Train") get_score(pred_label, label, eval_phase="Train")
logger.info('loss is {}'.format(avg_cost_np)) logger.info('loss is {}'.format(avg_cost_np))
logger.info("epoch: %d, step: %d, speed: %f steps/s" % (i, steps, args.skip_steps / used_time)) logger.info("epoch: %d, step: %d, speed: %f steps/s" %
(i, steps, args.skip_steps / used_time))
time_begin = time.time() time_begin = time.time()
if steps % args.save_steps == 0: if steps % args.save_steps == 0:
save_path = os.path.join(args.checkpoints, save_path = os.path.join(args.checkpoints,
"step_" + str(steps)) "step_" + str(steps))
fluid.io.save_persistables(train_exe, save_path, train_prog) fluid.io.save(train_prog, save_path)
logger.info("[save]step %d : save at %s" % (steps, save_path)) logger.info("[save]step %d : save at %s" %
(steps, save_path))
if steps % args.validation_steps == 0: if steps % args.validation_steps == 0:
if args.do_eval: if args.do_eval:
evaluate(args, test_exe, build_res, "eval") evaluate(args, test_exe, build_res, "eval")
...@@ -173,11 +186,16 @@ def train(args, train_exe, build_res, place): ...@@ -173,11 +186,16 @@ def train(args, train_exe, build_res, place):
logger.error("Train error : %s" % str(e)) logger.error("Train error : %s" % str(e))
exit(1) exit(1)
save_path = os.path.join(args.checkpoints, "step_" + str(steps)) save_path = os.path.join(args.checkpoints, "step_" + str(steps))
fluid.io.save_persistables(train_exe, save_path, train_prog) fluid.io.save(train_prog, save_path)
logger.info("[save]step %d : save at %s" % (steps, save_path)) logger.info("[save]step %d : save at %s" % (steps, save_path))
def evaluate(args, test_exe, build_res, eval_phase, save_result=False, id2intent=None): def evaluate(args,
test_exe,
build_res,
eval_phase,
save_result=False,
id2intent=None):
"""[evaluate on dev/test dataset] """[evaluate on dev/test dataset]
Arguments: Arguments:
...@@ -203,14 +221,14 @@ def evaluate(args, test_exe, build_res, eval_phase, save_result=False, id2intent ...@@ -203,14 +221,14 @@ def evaluate(args, test_exe, build_res, eval_phase, save_result=False, id2intent
total_cost, total_acc, pred_prob_list, pred_label_list, label_list = [], [], [], [], [] total_cost, total_acc, pred_prob_list, pred_label_list, label_list = [], [], [], [], []
if eval_phase == "eval": if eval_phase == "eval":
test_prog = build_res["eval_compiled_prog"] test_prog = build_res["eval_compiled_prog"]
test_pyreader = build_res["eval_pyreader"] test_data_loader = build_res["eval_data_loader"]
elif eval_phase == "test": elif eval_phase == "test":
test_prog = build_res["test_compiled_prog"] test_prog = build_res["test_compiled_prog"]
test_pyreader = build_res["test_pyreader"] test_data_loader = build_res["test_data_loader"]
else: else:
exit(1) exit(1)
logger.info("-----------------------------------------------------------") logger.info("-----------------------------------------------------------")
for data in test_pyreader(): for data in test_data_loader():
avg_cost_np, avg_pred_np, pred_label, label= test_exe.run(program=test_prog, fetch_list=fetch_list, feed=data, \ avg_cost_np, avg_pred_np, pred_label, label= test_exe.run(program=test_prog, fetch_list=fetch_list, feed=data, \
return_numpy=True) return_numpy=True)
total_cost.append(avg_cost_np) total_cost.append(avg_cost_np)
...@@ -219,13 +237,18 @@ def evaluate(args, test_exe, build_res, eval_phase, save_result=False, id2intent ...@@ -219,13 +237,18 @@ def evaluate(args, test_exe, build_res, eval_phase, save_result=False, id2intent
label_list.extend(label) label_list.extend(label)
if save_result: if save_result:
logger.info("save result at : %s" % args.save_dir + "/" + eval_phase + ".rst") logger.info("save result at : %s" % args.save_dir + "/" + eval_phase +
".rst")
save_dir = args.save_dir save_dir = args.save_dir
if not os.path.exists(save_dir): if not os.path.exists(save_dir):
logger.warning("save dir not exists, and create it") logger.warning("save dir not exists, and create it")
os.makedirs(save_dir) os.makedirs(save_dir)
fin = codecs.open(os.path.join(args.data_dir, eval_phase + ".txt"), "r", encoding="utf8") fin = codecs.open(
fout = codecs.open(args.save_dir + "/" + eval_phase + ".rst", "w", encoding="utf8") os.path.join(args.data_dir, eval_phase + ".txt"),
"r",
encoding="utf8")
fout = codecs.open(
args.save_dir + "/" + eval_phase + ".rst", "w", encoding="utf8")
for line in pred_prob_list: for line in pred_prob_list:
query = fin.readline().rsplit("\t", 1)[0] query = fin.readline().rsplit("\t", 1)[0]
res = [] res = []
...@@ -245,9 +268,14 @@ def evaluate(args, test_exe, build_res, eval_phase, save_result=False, id2intent ...@@ -245,9 +268,14 @@ def evaluate(args, test_exe, build_res, eval_phase, save_result=False, id2intent
logger.info("-----------------------------------------------------------") logger.info("-----------------------------------------------------------")
def create_net(args,
def create_net(args, flow_data, class_dim, dict_dim, place, model_name="textcnn_net", is_infer=False): flow_data,
"""[create network and pyreader] class_dim,
dict_dim,
place,
model_name="textcnn_net",
is_infer=False):
"""[create network and loader]
Arguments: Arguments:
flow_data {[type]} -- [description] flow_data {[type]} -- [description]
...@@ -266,11 +294,23 @@ def create_net(args, flow_data, class_dim, dict_dim, place, model_name="textcnn_ ...@@ -266,11 +294,23 @@ def create_net(args, flow_data, class_dim, dict_dim, place, model_name="textcnn_
model = textcnn_net_multi_label model = textcnn_net_multi_label
else: else:
return return
char_list = fluid.data(name="char", shape=[None, args.max_seq_len, 1], dtype="int64", lod_level=0) char_list = fluid.data(
label = fluid.data(name="label", shape=[None, class_dim], dtype="float32", lod_level=0) # label data name="char",
reader = fluid.io.PyReader(feed_list=[char_list, label], capacity=args.batch_size * 10, iterable=True, \ shape=[None, args.max_seq_len, 1],
dtype="int64",
lod_level=0)
label = fluid.data(
name="label", shape=[None, class_dim], dtype="float32",
lod_level=0) # label data
data_loader = fluid.io.DataLoader.from_generator(
feed_list=[char_list, label],
capacity=args.batch_size * 10,
iterable=True,
return_list=False) return_list=False)
output = model(char_list, label, dict_dim, output = model(
char_list,
label,
dict_dim,
emb_dim=flow_data["model"]["emb_dim"], emb_dim=flow_data["model"]["emb_dim"],
hid_dim=flow_data["model"]["hid_dim"], hid_dim=flow_data["model"]["hid_dim"],
hid_dim2=flow_data["model"]["hid_dim2"], hid_dim2=flow_data["model"]["hid_dim2"],
...@@ -281,14 +321,15 @@ def create_net(args, flow_data, class_dim, dict_dim, place, model_name="textcnn_ ...@@ -281,14 +321,15 @@ def create_net(args, flow_data, class_dim, dict_dim, place, model_name="textcnn_
max_seq_len=args.max_seq_len) max_seq_len=args.max_seq_len)
if is_infer: if is_infer:
prediction = output prediction = output
return [reader, prediction] return [data_loader, prediction]
else: else:
avg_cost, prediction, pred_label, label = output[0], output[1], output[2], output[3] avg_cost, prediction, pred_label, label = output[0], output[1], output[
return [reader, avg_cost, prediction, pred_label, label] 2], output[3]
return [data_loader, avg_cost, prediction, pred_label, label]
def build_data_reader(args, char_dict, intent_dict): def build_data_loader(args, char_dict, intent_dict):
"""[decorate samples for pyreader] """[decorate samples for dataloader]
Arguments: Arguments:
args {[type]} -- [description] args {[type]} -- [description]
...@@ -298,20 +339,22 @@ def build_data_reader(args, char_dict, intent_dict): ...@@ -298,20 +339,22 @@ def build_data_reader(args, char_dict, intent_dict):
Returns: Returns:
[type] -- [description] [type] -- [description]
""" """
reader_res = {} loader_res = {}
if args.do_train: if args.do_train:
train_processor = DataReader(char_dict, intent_dict, args.max_seq_len) train_processor = DataReader(char_dict, intent_dict, args.max_seq_len)
train_data_generator = train_processor.prepare_data( train_data_generator = train_processor.prepare_data(
data_path=args.data_dir + "train.txt", data_path=args.data_dir + "train.txt",
batch_size=args.batch_size, batch_size=args.batch_size,
mode='train') mode='train')
reader_res["train_data_generator"] = train_data_generator loader_res["train_data_generator"] = train_data_generator
num_train_examples = train_processor._get_num_examples() num_train_examples = train_processor._get_num_examples()
logger.info("Num train examples: %d" % num_train_examples) logger.info("Num train examples: %d" % num_train_examples)
logger.info("Num train steps: %d" % (math.ceil(num_train_examples * 1.0 / args.batch_size) * \ logger.info("Num train steps: %d" % (math.ceil(num_train_examples * 1.0 / args.batch_size) * \
args.epoch // DEV_COUNT)) args.epoch // DEV_COUNT))
if math.ceil(num_train_examples * 1.0 / args.batch_size) // DEV_COUNT <= 0: if math.ceil(num_train_examples * 1.0 /
logger.error("Num of train steps is less than 0 or equals to 0, exit") args.batch_size) // DEV_COUNT <= 0:
logger.error(
"Num of train steps is less than 0 or equals to 0, exit")
exit(1) exit(1)
if args.do_eval: if args.do_eval:
eval_processor = DataReader(char_dict, intent_dict, args.max_seq_len) eval_processor = DataReader(char_dict, intent_dict, args.max_seq_len)
...@@ -319,7 +362,7 @@ def build_data_reader(args, char_dict, intent_dict): ...@@ -319,7 +362,7 @@ def build_data_reader(args, char_dict, intent_dict):
data_path=args.data_dir + "eval.txt", data_path=args.data_dir + "eval.txt",
batch_size=args.batch_size, batch_size=args.batch_size,
mode='eval') mode='eval')
reader_res["eval_data_generator"] = eval_data_generator loader_res["eval_data_generator"] = eval_data_generator
num_eval_examples = eval_processor._get_num_examples() num_eval_examples = eval_processor._get_num_examples()
logger.info("Num eval examples: %d" % num_eval_examples) logger.info("Num eval examples: %d" % num_eval_examples)
if args.do_test: if args.do_test:
...@@ -328,11 +371,12 @@ def build_data_reader(args, char_dict, intent_dict): ...@@ -328,11 +371,12 @@ def build_data_reader(args, char_dict, intent_dict):
data_path=args.data_dir + "test.txt", data_path=args.data_dir + "test.txt",
batch_size=args.batch_size, batch_size=args.batch_size,
mode='test') mode='test')
reader_res["test_data_generator"] = test_data_generator loader_res["test_data_generator"] = test_data_generator
return reader_res return loader_res
def build_graph(args, model_config, num_labels, dict_dim, place, test_place, reader_res): def build_graph(args, model_config, num_labels, dict_dim, place, test_place,
loader_res):
"""[build paddle graph] """[build paddle graph]
Arguments: Arguments:
...@@ -341,7 +385,7 @@ def build_graph(args, model_config, num_labels, dict_dim, place, test_place, rea ...@@ -341,7 +385,7 @@ def build_graph(args, model_config, num_labels, dict_dim, place, test_place, rea
num_labels {[type]} -- [description] num_labels {[type]} -- [description]
dict_dim {[type]} -- [description] dict_dim {[type]} -- [description]
place {[type]} -- [description] place {[type]} -- [description]
reader_res {[type]} -- [description] loader_res {[type]} -- [description]
Returns: Returns:
[type] -- [description] [type] -- [description]
...@@ -358,36 +402,42 @@ def build_graph(args, model_config, num_labels, dict_dim, place, test_place, rea ...@@ -358,36 +402,42 @@ def build_graph(args, model_config, num_labels, dict_dim, place, test_place, rea
if args.do_train: if args.do_train:
with fluid.program_guard(train_prog, startup_prog): with fluid.program_guard(train_prog, startup_prog):
with fluid.unique_name.guard(): with fluid.unique_name.guard():
train_pyreader, cost, prediction, pred_label, label = create_net(args, model_config, num_labels, \ train_data_loader, cost, prediction, pred_label, label = create_net(args, model_config, num_labels, \
dict_dim, place, model_name="textcnn_net") dict_dim, place, model_name="textcnn_net")
train_pyreader.decorate_sample_list_generator(reader_res['train_data_generator'], places=place) train_data_loader.set_sample_list_generator(
res["train_pyreader"] = train_pyreader loader_res['train_data_generator'], places=place)
sgd_optimizer = fluid.optimizer.SGD(learning_rate=fluid.layers.exponential_decay( res["train_data_loader"] = train_data_loader
learning_rate=args.learning_rate, decay_steps=1000, decay_rate=0.5, staircase=True)) sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.layers.exponential_decay(
learning_rate=args.learning_rate,
decay_steps=1000,
decay_rate=0.5,
staircase=True))
sgd_optimizer.minimize(cost) sgd_optimizer.minimize(cost)
if args.do_eval: if args.do_eval:
with fluid.program_guard(eval_prog, startup_prog): with fluid.program_guard(eval_prog, startup_prog):
with fluid.unique_name.guard(): with fluid.unique_name.guard():
eval_pyreader, cost, prediction, pred_label, label = create_net(args, model_config, num_labels, \ eval_data_loader, cost, prediction, pred_label, label = create_net(args, model_config, num_labels, \
dict_dim, test_place, model_name="textcnn_net") dict_dim, test_place, model_name="textcnn_net")
eval_pyreader.decorate_sample_list_generator(reader_res['eval_data_generator'], places=test_place) eval_data_loader.set_sample_list_generator(
res["eval_pyreader"] = eval_pyreader loader_res['eval_data_generator'], places=test_place)
res["eval_data_loader"] = eval_data_loader
if args.do_test: if args.do_test:
with fluid.program_guard(test_prog, startup_prog): with fluid.program_guard(test_prog, startup_prog):
with fluid.unique_name.guard(): with fluid.unique_name.guard():
test_pyreader, cost, prediction, pred_label, label = create_net(args, model_config, num_labels, \ test_data_loader, cost, prediction, pred_label, label = create_net(args, model_config, num_labels, \
dict_dim, test_place, model_name="textcnn_net") dict_dim, test_place, model_name="textcnn_net")
test_pyreader.decorate_sample_list_generator(reader_res['test_data_generator'], places=test_place) test_data_loader.set_sample_list_generator(
res["test_pyreader"] = test_pyreader loader_res['test_data_generator'], places=test_place)
res["test_data_loader"] = test_data_loader
res["cost"] = cost res["cost"] = cost
res["prediction"] = prediction res["prediction"] = prediction
res["label"] = label res["label"] = label
res["pred_label"] = pred_label res["pred_label"] = pred_label
res["train_prog"] =train_prog res["train_prog"] = train_prog
res["eval_prog"] = eval_prog res["eval_prog"] = eval_prog
res["test_prog"] = test_prog res["test_prog"] = test_prog
return res return res
...@@ -421,8 +471,9 @@ def main(args): ...@@ -421,8 +471,9 @@ def main(args):
id2intent[int(value)] = key id2intent[int(value)] = key
num_labels = len(intent_dict) num_labels = len(intent_dict)
# build model # build model
reader_res = build_data_reader(args, char_dict, intent_dict) loader_res = build_data_loader(args, char_dict, intent_dict)
build_res = build_graph(args, model_config, num_labels, dict_dim, place, test_place, reader_res) build_res = build_graph(args, model_config, num_labels, dict_dim, place,
test_place, loader_res)
build_res["place"] = place build_res["place"] = place
build_res["test_place"] = test_place build_res["test_place"] = test_place
if not (args.do_train or args.do_eval or args.do_test): if not (args.do_train or args.do_eval or args.do_test):
...@@ -432,11 +483,13 @@ def main(args): ...@@ -432,11 +483,13 @@ def main(args):
exe.run(startup_prog) exe.run(startup_prog)
if args.init_checkpoint and args.init_checkpoint != "None": if args.init_checkpoint and args.init_checkpoint != "None":
try: try:
init_checkpoint(exe, args.init_checkpoint, main_program=startup_prog) init_checkpoint(
exe, args.init_checkpoint, main_program=startup_prog)
logger.info("Load model from %s" % args.init_checkpoint) logger.info("Load model from %s" % args.init_checkpoint)
except Exception as e: except Exception as e:
logger.exception(str(e)) logger.exception(str(e))
logger.error("Faild load model from %s [%s]" % (args.init_checkpoint, str(e))) logger.error("Faild load model from %s [%s]" %
(args.init_checkpoint, str(e)))
build_strategy = fluid.compiler.BuildStrategy() build_strategy = fluid.compiler.BuildStrategy()
build_strategy.fuse_all_reduce_ops = False build_strategy.fuse_all_reduce_ops = False
exec_strategy = fluid.ExecutionStrategy() exec_strategy = fluid.ExecutionStrategy()
...@@ -449,10 +502,12 @@ def main(args): ...@@ -449,10 +502,12 @@ def main(args):
exec_strategy=exec_strategy) exec_strategy=exec_strategy)
build_res["compiled_prog"] = compiled_prog build_res["compiled_prog"] = compiled_prog
if args.do_test: if args.do_test:
test_compiled_prog = fluid.compiler.CompiledProgram(build_res["test_prog"]) test_compiled_prog = fluid.compiler.CompiledProgram(build_res[
"test_prog"])
build_res["test_compiled_prog"] = test_compiled_prog build_res["test_compiled_prog"] = test_compiled_prog
if args.do_eval: if args.do_eval:
eval_compiled_prog = fluid.compiler.CompiledProgram(build_res["eval_prog"]) eval_compiled_prog = fluid.compiler.CompiledProgram(build_res[
"eval_prog"])
build_res["eval_compiled_prog"] = eval_compiled_prog build_res["eval_compiled_prog"] = eval_compiled_prog
if args.do_train: if args.do_train:
...@@ -465,7 +520,6 @@ def main(args): ...@@ -465,7 +520,6 @@ def main(args):
save_result=True, id2intent=id2intent) save_result=True, id2intent=id2intent)
if __name__ == "__main__": if __name__ == "__main__":
logger.info("the paddle version is %s" % paddle.__version__) logger.info("the paddle version is %s" % paddle.__version__)
check_version('1.6.0') check_version('1.6.0')
......
...@@ -32,7 +32,6 @@ try: ...@@ -32,7 +32,6 @@ try:
except ImportError: except ImportError:
import ConfigParser as cp import ConfigParser as cp
random_seed = 7 random_seed = 7
logger = logging.getLogger() logger = logging.getLogger()
format = "%(asctime)s - %(name)s - %(levelname)s -%(filename)s-%(lineno)4d -%(message)s" format = "%(asctime)s - %(name)s - %(levelname)s -%(filename)s-%(lineno)4d -%(message)s"
...@@ -77,6 +76,7 @@ class ArgumentGroup(object): ...@@ -77,6 +76,7 @@ class ArgumentGroup(object):
Arguments: Arguments:
object {[type]} -- [description] object {[type]} -- [description]
""" """
def __init__(self, parser, title, des): def __init__(self, parser, title, des):
self._group = parser.add_argument_group(title=title, description=des) self._group = parser.add_argument_group(title=title, description=des)
...@@ -107,6 +107,7 @@ class DataReader(object): ...@@ -107,6 +107,7 @@ class DataReader(object):
Returns: Returns:
[type] -- [description] [type] -- [description]
""" """
def __init__(self, char_vocab, intent_dict, max_len): def __init__(self, char_vocab, intent_dict, max_len):
self._char_vocab = char_vocab self._char_vocab = char_vocab
self._intent_dict = intent_dict self._intent_dict = intent_dict
...@@ -128,12 +129,17 @@ class DataReader(object): ...@@ -128,12 +129,17 @@ class DataReader(object):
# word_dict_path), "The given word dictionary dose not exist." # word_dict_path), "The given word dictionary dose not exist."
assert os.path.exists(data_path), "The given data file does not exist." assert os.path.exists(data_path), "The given data file does not exist."
if mode == "train": if mode == "train":
train_reader = fluid.io.batch(paddle.reader.shuffle(self.data_reader(data_path, self.max_len, shuffle=True), train_reader = fluid.io.batch(
buf_size=batch_size * 100), batch_size) paddle.reader.shuffle(
self.data_reader(
data_path, self.max_len, shuffle=True),
buf_size=batch_size * 100),
batch_size)
# train_reader = fluid.io.batch(self.data_reader(data_path), batch_size) # train_reader = fluid.io.batch(self.data_reader(data_path), batch_size)
return train_reader return train_reader
else: else:
test_reader = fluid.io.batch(self.data_reader(data_path, self.max_len), batch_size) test_reader = fluid.io.batch(
self.data_reader(data_path, self.max_len), batch_size)
return test_reader return test_reader
def data_reader(self, file_path, max_len, shuffle=False): def data_reader(self, file_path, max_len, shuffle=False):
...@@ -150,7 +156,8 @@ class DataReader(object): ...@@ -150,7 +156,8 @@ class DataReader(object):
char_id_list = list(map(lambda x: 0 if x not in self._char_vocab else int(self._char_vocab[x]), \ char_id_list = list(map(lambda x: 0 if x not in self._char_vocab else int(self._char_vocab[x]), \
list(query))) list(query)))
if len(char_id_list) < max_len: if len(char_id_list) < max_len:
char_id_list.extend([self.padding_id] * (max_len - len(char_id_list))) char_id_list.extend([self.padding_id] *
(max_len - len(char_id_list)))
char_id_list = char_id_list[:max_len] char_id_list = char_id_list[:max_len]
intent_id_list = [self.padding_id] * self.intent_size intent_id_list = [self.padding_id] * self.intent_size
for item in intent.split('\2'): for item in intent.split('\2'):
...@@ -159,6 +166,7 @@ class DataReader(object): ...@@ -159,6 +166,7 @@ class DataReader(object):
if shuffle: if shuffle:
random.seed(random_seed) random.seed(random_seed)
random.shuffle(self.all_data) random.shuffle(self.all_data)
def reader(): def reader():
""" """
reader reader
...@@ -166,6 +174,7 @@ class DataReader(object): ...@@ -166,6 +174,7 @@ class DataReader(object):
for char_id_list, intent_id_list in self.all_data: for char_id_list, intent_id_list in self.all_data:
# print char_id_list, intent_id # print char_id_list, intent_id
yield char_id_list, intent_id_list yield char_id_list, intent_id_list
return reader return reader
...@@ -178,6 +187,7 @@ class DataProcesser(object): ...@@ -178,6 +187,7 @@ class DataProcesser(object):
Returns: Returns:
[type] -- [description] [type] -- [description]
""" """
@staticmethod @staticmethod
def read_dict(filename): def read_dict(filename):
""" """
...@@ -227,7 +237,8 @@ class DataProcesser(object): ...@@ -227,7 +237,8 @@ class DataProcesser(object):
intent_dict[intent] = 0 intent_dict[intent] = 0
intent_dict[intent] += 1 intent_dict[intent] += 1
# save char dict # save char dict
with codecs.open("%s/char.dict" % save_dir, "w", encoding="utf8") as f_out: with codecs.open(
"%s/char.dict" % save_dir, "w", encoding="utf8") as f_out:
f_out.write("PAD\0020\n") f_out.write("PAD\0020\n")
f_out.write("OOV\0021\n") f_out.write("OOV\0021\n")
char_id = 2 char_id = 2
...@@ -238,7 +249,8 @@ class DataProcesser(object): ...@@ -238,7 +249,8 @@ class DataProcesser(object):
f_out.write("%s\002%d\n" % (key, char_id)) f_out.write("%s\002%d\n" % (key, char_id))
char_id += 1 char_id += 1
# save intent dict # save intent dict
with codecs.open("%s/domain.dict" % save_dir, "w", encoding="utf8") as f_out: with codecs.open(
"%s/domain.dict" % save_dir, "w", encoding="utf8") as f_out:
f_out.write("SYS_OTHER\0020\n") f_out.write("SYS_OTHER\0020\n")
intent_id = 1 intent_id = 1
for key, value in intent_dict.items(): for key, value in intent_dict.items():
...@@ -249,7 +261,6 @@ class DataProcesser(object): ...@@ -249,7 +261,6 @@ class DataProcesser(object):
intent_id += 1 intent_id += 1
class ConfigReader(object): class ConfigReader(object):
"""[read model config file] """[read model config file]
...@@ -282,49 +293,13 @@ class ConfigReader(object): ...@@ -282,49 +293,13 @@ class ConfigReader(object):
return flow_data return flow_data
def init_pretraining_params(exe,
pretraining_params_path,
main_program,
use_fp16=False):
"""load params of pretrained model, NOT including moment, learning_rate"""
assert os.path.exists(pretraining_params_path
), "[%s] cann't be found." % pretraining_params_path
def _existed_params(var):
if not isinstance(var, fluid.framework.Parameter):
return False
return os.path.exists(os.path.join(pretraining_params_path, var.name))
fluid.io.load_vars(
exe,
pretraining_params_path,
main_program=main_program,
predicate=_existed_params)
print("Load pretraining parameters from {}.".format(
pretraining_params_path))
def init_checkpoint(exe, init_checkpoint_path, main_program): def init_checkpoint(exe, init_checkpoint_path, main_program):
""" """
Init CheckPoint Init CheckPoint
""" """
assert os.path.exists( fluid.load(main_program, init_checkpoint_path, exe)
init_checkpoint_path), "[%s] cann't be found." % init_checkpoint_path print("Load model from {}".format(init_checkpoint_path))
def existed_persitables(var):
"""
If existed presitabels
"""
if not fluid.io.is_persistable(var):
return False
return os.path.exists(os.path.join(init_checkpoint_path, var.name))
fluid.io.load_vars(
exe,
init_checkpoint_path,
main_program=main_program,
predicate=existed_persitables)
print ("Load model from {}".format(init_checkpoint_path))
def print_arguments(args): def print_arguments(args):
""" """
...@@ -350,5 +325,3 @@ def check_version(version='1.6.0'): ...@@ -350,5 +325,3 @@ def check_version(version='1.6.0'):
except Exception as e: except Exception as e:
logger.error(err) logger.error(err)
sys.exit(1) sys.exit(1)
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
""" """
Emotion Detection Task Emotion Detection Task
""" """
...@@ -38,9 +37,7 @@ import reader ...@@ -38,9 +37,7 @@ import reader
import utils import utils
def create_model(args, def create_model(args, num_labels, is_prediction=False):
num_labels,
is_prediction=False):
""" """
Create Model for Emotion Detection Create Model for Emotion Detection
""" """
...@@ -77,10 +74,17 @@ def create_model(args, ...@@ -77,10 +74,17 @@ def create_model(args,
raise ValueError("Unknown network type!") raise ValueError("Unknown network type!")
if is_prediction: if is_prediction:
probs = network(data, seq_len, None, args.vocab_size, class_dim=num_labels, is_prediction=True) probs = network(
data,
seq_len,
None,
args.vocab_size,
class_dim=num_labels,
is_prediction=True)
return loader, probs, [data.name, seq_len.name] return loader, probs, [data.name, seq_len.name]
avg_loss, probs = network(data, seq_len, label, args.vocab_size, class_dim=num_labels) avg_loss, probs = network(
data, seq_len, label, args.vocab_size, class_dim=num_labels)
num_seqs = fluid.layers.create_tensor(dtype='int64') num_seqs = fluid.layers.create_tensor(dtype='int64')
accuracy = fluid.layers.accuracy(input=probs, label=label, total=num_seqs) accuracy = fluid.layers.accuracy(input=probs, label=label, total=num_seqs)
return loader, avg_loss, accuracy, num_seqs return loader, avg_loss, accuracy, num_seqs
...@@ -142,7 +146,8 @@ def main(args): ...@@ -142,7 +146,8 @@ def main(args):
exe = fluid.Executor(place) exe = fluid.Executor(place)
task_name = args.task_name.lower() task_name = args.task_name.lower()
processor = reader.EmoTectProcessor(data_dir=args.data_dir, processor = reader.EmoTectProcessor(
data_dir=args.data_dir,
vocab_path=args.vocab_path, vocab_path=args.vocab_path,
random_seed=args.random_seed) random_seed=args.random_seed)
#num_labels = len(processor.get_labels()) #num_labels = len(processor.get_labels())
...@@ -173,9 +178,7 @@ def main(args): ...@@ -173,9 +178,7 @@ def main(args):
with fluid.program_guard(train_program, startup_prog): with fluid.program_guard(train_program, startup_prog):
with fluid.unique_name.guard(): with fluid.unique_name.guard():
train_loader, loss, accuracy, num_seqs = create_model( train_loader, loss, accuracy, num_seqs = create_model(
args, args, num_labels=num_labels, is_prediction=False)
num_labels=num_labels,
is_prediction=False)
sgd_optimizer = fluid.optimizer.Adagrad(learning_rate=args.lr) sgd_optimizer = fluid.optimizer.Adagrad(learning_rate=args.lr)
sgd_optimizer.minimize(loss) sgd_optimizer.minimize(loss)
...@@ -189,37 +192,27 @@ def main(args): ...@@ -189,37 +192,27 @@ def main(args):
if args.do_val: if args.do_val:
if args.do_train: if args.do_train:
test_data_generator = processor.data_generator( test_data_generator = processor.data_generator(
batch_size=args.batch_size, batch_size=args.batch_size, phase='dev', epoch=1)
phase='dev',
epoch=1)
else: else:
test_data_generator = processor.data_generator( test_data_generator = processor.data_generator(
batch_size=args.batch_size, batch_size=args.batch_size, phase='test', epoch=1)
phase='test',
epoch=1)
test_prog = fluid.Program() test_prog = fluid.Program()
with fluid.program_guard(test_prog, startup_prog): with fluid.program_guard(test_prog, startup_prog):
with fluid.unique_name.guard(): with fluid.unique_name.guard():
test_loader, loss, accuracy, num_seqs = create_model( test_loader, loss, accuracy, num_seqs = create_model(
args, args, num_labels=num_labels, is_prediction=False)
num_labels=num_labels,
is_prediction=False)
test_prog = test_prog.clone(for_test=True) test_prog = test_prog.clone(for_test=True)
if args.do_infer: if args.do_infer:
infer_data_generator = processor.data_generator( infer_data_generator = processor.data_generator(
batch_size=args.batch_size, batch_size=args.batch_size, phase='infer', epoch=1)
phase='infer',
epoch=1)
test_prog = fluid.Program() test_prog = fluid.Program()
with fluid.program_guard(test_prog, startup_prog): with fluid.program_guard(test_prog, startup_prog):
with fluid.unique_name.guard(): with fluid.unique_name.guard():
infer_loader, probs, _ = create_model( infer_loader, probs, _ = create_model(
args, args, num_labels=num_labels, is_prediction=True)
num_labels=num_labels,
is_prediction=True)
test_prog = test_prog.clone(for_test=True) test_prog = test_prog.clone(for_test=True)
exe.run(startup_prog) exe.run(startup_prog)
...@@ -292,8 +285,9 @@ def main(args): ...@@ -292,8 +285,9 @@ def main(args):
time_begin = time.time() time_begin = time.time()
if steps % args.save_steps == 0: if steps % args.save_steps == 0:
save_path = os.path.join(args.save_checkpoint_dir, "step_" + str(steps)) save_path = os.path.join(args.save_checkpoint_dir,
fluid.io.save_persistables(exe, save_path, train_program) "step_" + str(steps))
fluid.save(train_program, save_path)
if steps % args.validation_steps == 0: if steps % args.validation_steps == 0:
# evaluate on dev set # evaluate on dev set
...@@ -306,11 +300,11 @@ def main(args): ...@@ -306,11 +300,11 @@ def main(args):
print("final step: %d " % steps) print("final step: %d " % steps)
if args.do_val: if args.do_val:
evaluate(test_exe, test_prog, test_loader, evaluate(test_exe, test_prog, test_loader,
[loss.name, accuracy.name, num_seqs.name], [loss.name, accuracy.name, num_seqs.name], "dev")
"dev")
save_path = os.path.join(args.save_checkpoint_dir, "step_" + str(steps)) save_path = os.path.join(args.save_checkpoint_dir,
fluid.io.save_persistables(exe, save_path, train_program) "step_" + str(steps))
fluid.save(train_program, save_path)
train_loader.reset() train_loader.reset()
break break
...@@ -334,15 +328,12 @@ def main(args): ...@@ -334,15 +328,12 @@ def main(args):
if not args.do_train and args.do_val: if not args.do_train and args.do_val:
print("Final test result:") print("Final test result:")
evaluate(test_exe, test_prog, test_loader, evaluate(test_exe, test_prog, test_loader,
[loss.name, accuracy.name, num_seqs.name], [loss.name, accuracy.name, num_seqs.name], "test")
"test")
# infer # infer
if args.do_infer: if args.do_infer:
print("Final infer result:") print("Final infer result:")
infer(test_exe, test_prog, infer_loader, infer(test_exe, test_prog, infer_loader, [probs.name], "infer")
[probs.name],
"infer")
def get_cards(): def get_cards():
......
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
""" """
Emotion Detection Task, based on ERNIE Emotion Detection Task, based on ERNIE
""" """
...@@ -350,7 +349,7 @@ def main(args): ...@@ -350,7 +349,7 @@ def main(args):
if steps % args.save_steps == 0: if steps % args.save_steps == 0:
save_path = os.path.join(args.save_checkpoint_dir, "step_" + str(steps)) save_path = os.path.join(args.save_checkpoint_dir, "step_" + str(steps))
fluid.io.save_persistables(exe, save_path, train_program) fluid.save(train_program, save_path)
if steps % args.validation_steps == 0: if steps % args.validation_steps == 0:
# evaluate dev set # evaluate dev set
...@@ -369,7 +368,7 @@ def main(args): ...@@ -369,7 +368,7 @@ def main(args):
except fluid.core.EOFException: except fluid.core.EOFException:
save_path = os.path.join(args.save_checkpoint_dir, "step_" + str(steps)) save_path = os.path.join(args.save_checkpoint_dir, "step_" + str(steps))
fluid.io.save_persistables(exe, save_path, train_program) fluid.save(train_program, save_path)
train_pyreader.reset() train_pyreader.reset()
break break
......
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
""" """
EmoTect utilities. EmoTect utilities.
""" """
...@@ -29,27 +28,13 @@ import paddle ...@@ -29,27 +28,13 @@ import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import numpy as np import numpy as np
def init_checkpoint(exe, init_checkpoint_path, main_program): def init_checkpoint(exe, init_checkpoint_path, main_program):
""" """
Init CheckPoint Init CheckPoint
""" """
assert os.path.exists(
init_checkpoint_path), "[%s] cann't be found." % init_checkpoint_path
def existed_persitables(var):
"""
If existed presitabels
"""
if not fluid.io.is_persistable(var):
return False
return os.path.exists(os.path.join(init_checkpoint_path, var.name))
fluid.io.load_vars( fluid.load(main_program, init_checkpoint_path, exe)
exe,
init_checkpoint_path,
main_program=main_program,
predicate=existed_persitables)
print("Load model from {}".format(init_checkpoint_path))
def word2id(word_dict, query): def word2id(word_dict, query):
...@@ -57,8 +42,10 @@ def word2id(word_dict, query): ...@@ -57,8 +42,10 @@ def word2id(word_dict, query):
Convert word sequence into id list Convert word sequence into id list
""" """
unk_id = len(word_dict) unk_id = len(word_dict)
wids = [word_dict[w] if w in word_dict else unk_id wids = [
for w in query.strip().split(" ")] word_dict[w] if w in word_dict else unk_id
for w in query.strip().split(" ")
]
return wids return wids
......
...@@ -114,7 +114,6 @@ loss, data_list = model(dict_dim, emb_dim) ...@@ -114,7 +114,6 @@ loss, data_list = model(dict_dim, emb_dim)
sgd = fluid.optimizer.SGD(learning_rate=args.base_lr) sgd = fluid.optimizer.SGD(learning_rate=args.base_lr)
sgd.minimize(loss) sgd.minimize(loss)
place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace() place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
feeder = fluid.DataFeeder(feed_list=data_list, place=place)
exe = fluid.Executor(place) exe = fluid.Executor(place)
exe.run(fluid.default_startup_program()) exe.run(fluid.default_startup_program())
for batch_id in range(100): for batch_id in range(100):
......
...@@ -136,17 +136,19 @@ def start_train(args): ...@@ -136,17 +136,19 @@ def start_train(args):
startup_program = fluid.default_startup_program() startup_program = fluid.default_startup_program()
loop_program = fluid.default_main_program() loop_program = fluid.default_main_program()
feeder = fluid.DataFeeder(feed_list=all_slots, place=place)
exe = fluid.Executor(place) exe = fluid.Executor(place)
exe.run(startup_program) exe.run(startup_program)
loader = fluid.io.DataLoader.from_generator(
feed_list=all_slots, capacity=10000, iterable=True)
loader.set_sample_list_generator(train_reader, places=place)
total_time = 0 total_time = 0
ce_info = [] ce_info = []
for pass_id in range(args.epochs): for pass_id in range(args.epochs):
start_time = time.time() start_time = time.time()
for batch_id, data in enumerate(train_reader()): for batch_id, data in enumerate(loader()):
loss_val, correct_val = exe.run(loop_program, loss_val, correct_val = exe.run(loop_program,
feed=feeder.feed(data), feed=data,
fetch_list=[avg_cost, correct]) fetch_list=[avg_cost, correct])
logger.info("TRAIN --> pass: {} batch_id: {} avg_cost: {}, acc: {}" logger.info("TRAIN --> pass: {} batch_id: {} avg_cost: {}, acc: {}"
.format(pass_id, batch_id, loss_val, .format(pass_id, batch_id, loss_val,
......
...@@ -87,9 +87,12 @@ def train(args): ...@@ -87,9 +87,12 @@ def train(args):
optimizer.minimize(avg_cost) optimizer.minimize(avg_cost)
data_list = [var.name for var in train_input_data] data_list = [var.name for var in train_input_data]
feeder = fluid.DataFeeder(feed_list=data_list, place=place) print(data_list)
exe = fluid.Executor(place) exe = fluid.Executor(place)
exe.run(fluid.default_startup_program()) exe.run(fluid.default_startup_program())
loader = fluid.io.DataLoader.from_generator(
feed_list=train_input_data, capacity=10000, iterable=True)
loader.set_sample_list_generator(train_reader, places=place)
if parallel: if parallel:
train_exe = fluid.ParallelExecutor( train_exe = fluid.ParallelExecutor(
use_cuda=use_cuda, loss_name=avg_cost.name) use_cuda=use_cuda, loss_name=avg_cost.name)
...@@ -103,10 +106,10 @@ def train(args): ...@@ -103,10 +106,10 @@ def train(args):
print("epoch_%d start" % epoch_idx) print("epoch_%d start" % epoch_idx)
t0 = time.time() t0 = time.time()
i = 0 i = 0
for batch_id, data in enumerate(train_reader()): for batch_id, data in enumerate(loader()):
i += 1 i += 1
loss_val, correct_val = train_exe.run( loss_val, correct_val = train_exe.run(
feed=feeder.feed(data), fetch_list=[avg_cost.name, acc.name]) feed=data, fetch_list=[avg_cost.name, acc.name])
ce_info.append(float(np.mean(correct_val)) / args.batch_size) ce_info.append(float(np.mean(correct_val)) / args.batch_size)
if i % args.print_batch == 0: if i % args.print_batch == 0:
logger.info( logger.info(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册