diff --git a/fluid/recommendation/ctr/infer.py b/fluid/recommendation/ctr/infer.py index b09128cdd1afedcdcf2ee0e103e971749f9fa1ee..804498de5356c3cbd5ac84cfb35a00e8ae045095 100644 --- a/fluid/recommendation/ctr/infer.py +++ b/fluid/recommendation/ctr/infer.py @@ -1,4 +1,5 @@ import argparse +import time import numpy as np import paddle @@ -8,6 +9,11 @@ import reader from network_conf import ctr_dnn_model +def print_log(log_str): + time_stamp = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) + print(str(time_stamp) + " " + log_str) + + def parse_args(): parser = argparse.ArgumentParser(description="PaddlePaddle DeepFM example") parser.add_argument( @@ -49,7 +55,6 @@ def infer(): with fluid.scope_guard(inference_scope): [inference_program, _, fetch_targets] = fluid.io.load_inference_model(args.model_path, exe) - print(fetch_targets) def set_zero(var_name): param = inference_scope.var(var_name).get_tensor() @@ -60,14 +65,12 @@ def infer(): for name in auc_states_names: set_zero(name) - batch_id = 0 - for data in test_reader(): + for batch_id, data in enumerate(test_reader()): loss_val, auc_val = exe.run(inference_program, feed=feeder.feed(data), fetch_list=fetch_targets) if batch_id % 100 == 0: - print("loss: " + str(loss_val) + " auc_val:" + str(auc_val)) - batch_id += 1 + print_log("TEST --> batch: {} loss: {} auc: {}".format(batch_id, loss_val, auc_val)) if __name__ == '__main__': diff --git a/fluid/recommendation/ctr/train.py b/fluid/recommendation/ctr/train.py index c44345341db37fc4af6cc6a28a664bbb8f6cf095..1711be148c889526d2738dfff67bc579542d0096 100644 --- a/fluid/recommendation/ctr/train.py +++ b/fluid/recommendation/ctr/train.py @@ -1,16 +1,17 @@ -import os -import logging import argparse +import os +import time +import paddle import paddle.fluid as fluid -from network_conf import ctr_dnn_model import reader -import paddle +from network_conf import ctr_dnn_model + -logging.basicConfig() -logger = logging.getLogger("paddle") -logger.setLevel(logging.INFO) +def print_log(log_str): + time_stamp = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) + print(str(time_stamp) + " " + log_str) def parse_args(): @@ -73,17 +74,14 @@ def train(): exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) for pass_id in range(args.num_passes): - batch_id = 0 - for data in train_reader(): + for batch_id, data in enumerate(train_reader()): loss_val, auc_val, batch_auc_val = exe.run( fluid.default_main_program(), feed=feeder.feed(data), fetch_list=[loss, auc_var, batch_auc_var] ) - print('pass:' + str(pass_id) + ' batch:' + str(batch_id) + - ' loss: ' + str(loss_val) + " auc: " + str(auc_val) + - " batch_auc: " + str(batch_auc_val)) - batch_id += 1 + print_log("TRAIN --> pass: {} batch: {} loss: {} auc: {}, batch_auc: {}" + .format(pass_id, batch_id, loss_val, auc_val, batch_auc_val)) if batch_id % 1000 == 0 and batch_id != 0: model_dir = args.model_output_dir + '/batch-' + str(batch_id) fluid.io.save_inference_model(model_dir, data_name_list, [loss, auc_var], exe)