From 0d4693bd9c4102062abfd003b770b05e41a0ee76 Mon Sep 17 00:00:00 2001 From: Qiao Longfei Date: Wed, 24 Oct 2018 15:52:18 +0800 Subject: [PATCH] use logging --- fluid/recommendation/ctr/infer.py | 11 ++++++----- fluid/recommendation/ctr/train.py | 20 ++++++++++---------- 2 files changed, 16 insertions(+), 15 deletions(-) diff --git a/fluid/recommendation/ctr/infer.py b/fluid/recommendation/ctr/infer.py index 804498de..8a21e28f 100644 --- a/fluid/recommendation/ctr/infer.py +++ b/fluid/recommendation/ctr/infer.py @@ -1,5 +1,5 @@ import argparse -import time +import logging import numpy as np import paddle @@ -9,9 +9,10 @@ import reader from network_conf import ctr_dnn_model -def print_log(log_str): - time_stamp = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) - print(str(time_stamp) + " " + log_str) +logging.basicConfig( + format='%(asctime)s - %(levelname)s - %(message)s') +logger = logging.getLogger("fluid") +logger.setLevel(logging.INFO) def parse_args(): @@ -70,7 +71,7 @@ def infer(): feed=feeder.feed(data), fetch_list=fetch_targets) if batch_id % 100 == 0: - print_log("TEST --> batch: {} loss: {} auc: {}".format(batch_id, loss_val, auc_val)) + logger.info("TEST --> batch: {} loss: {} auc: {}".format(batch_id, loss_val, auc_val)) if __name__ == '__main__': diff --git a/fluid/recommendation/ctr/train.py b/fluid/recommendation/ctr/train.py index 2030971f..1cffff9b 100644 --- a/fluid/recommendation/ctr/train.py +++ b/fluid/recommendation/ctr/train.py @@ -1,8 +1,8 @@ from __future__ import print_function import argparse +import logging import os -import time import paddle import paddle.fluid as fluid @@ -10,10 +10,10 @@ import paddle.fluid as fluid import reader from network_conf import ctr_dnn_model - -def print_log(log_str): - time_stamp = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) - print(str(time_stamp) + " " + log_str) +logging.basicConfig( + format='%(asctime)s - %(levelname)s - %(message)s') +logger = logging.getLogger("fluid") +logger.setLevel(logging.INFO) def parse_args(): @@ -105,7 +105,7 @@ def train_loop(args, train_program, data_list, loss, auc_var, batch_auc_var): feed=feeder.feed(data), fetch_list=[loss, auc_var, batch_auc_var] ) - print_log("TRAIN --> pass: {} batch: {} loss: {} auc: {}, batch_auc: {}" + logger.info("TRAIN --> pass: {} batch: {} loss: {} auc: {}, batch_auc: {}" .format(pass_id, batch_id, loss_val/args.batch_size, auc_val, batch_auc_val)) if batch_id % 1000 == 0 and batch_id != 0: model_dir = args.model_output_dir + '/batch-' + str(batch_id) @@ -127,22 +127,22 @@ def train(): optimizer.minimize(loss) if args.is_local: - print_log("run local training") + logger.info("run local training") main_program = fluid.default_main_program() train_loop(args, main_program, data_list, loss, auc_var, batch_auc_var) else: - print_log("run dist training") + logger.info("run dist training") t = fluid.DistributeTranspiler() t.transpile(args.trainer_id, pservers=args.endpoints, trainers=args.trainers) if args.role == "pserver": - print_log("run pserver") + logger.info("run pserver") prog = t.get_pserver_program(args.current_endpoint) startup = t.get_startup_program(args.current_endpoint, pserver_program=prog) exe = fluid.Executor(fluid.CPUPlace()) exe.run(startup) exe.run(prog) elif args.role == "trainer": - print_log("run trainer") + logger.info("run trainer") train_prog = t.get_trainer_program() train_loop(args, train_prog, data_list, loss, auc_var, batch_auc_var) -- GitLab