From d72fb677c967d8fd58bfd49605658fef1b0169f0 Mon Sep 17 00:00:00 2001 From: chenguowei01 Date: Tue, 18 Aug 2020 17:11:29 +0800 Subject: [PATCH] change logging to logger --- dygraph/core/infer.py | 4 ++-- dygraph/core/train.py | 6 +++--- dygraph/core/val.py | 14 +++++++------- dygraph/utils/__init__.py | 2 +- dygraph/utils/{logging.py => logger.py} | 0 dygraph/utils/utils.py | 16 ++++++++-------- 6 files changed, 21 insertions(+), 21 deletions(-) rename dygraph/utils/{logging.py => logger.py} (100%) diff --git a/dygraph/core/infer.py b/dygraph/core/infer.py index 23890e8e..f86823bc 100644 --- a/dygraph/core/infer.py +++ b/dygraph/core/infer.py @@ -21,7 +21,7 @@ import cv2 import tqdm from dygraph import utils -import dygraph.utils.logging as logging +import dygraph.utils.logger as logger def mkdir(path): @@ -39,7 +39,7 @@ def infer(model, test_dataset=None, model_dir=None, save_dir='output'): added_saved_dir = os.path.join(save_dir, 'added') pred_saved_dir = os.path.join(save_dir, 'prediction') - logging.info("Start to predict...") + logger.info("Start to predict...") for im, im_info, im_path in tqdm.tqdm(test_dataset): im = to_variable(im) pred, _ = model(im) diff --git a/dygraph/core/train.py b/dygraph/core/train.py index a823265f..94a3ee4e 100644 --- a/dygraph/core/train.py +++ b/dygraph/core/train.py @@ -19,7 +19,7 @@ from paddle.fluid.dygraph.parallel import ParallelEnv from paddle.fluid.io import DataLoader from paddle.incubate.hapi.distributed import DistributedBatchSampler -import dygraph.utils.logging as logging +import dygraph.utils.logger as logger from dygraph.utils import load_pretrained_model from dygraph.utils import resume from dygraph.utils import Timer, calculate_eta @@ -111,7 +111,7 @@ def train(model, train_batch_cost = 0.0 remain_steps = total_steps - num_steps eta = calculate_eta(remain_steps, avg_train_batch_cost) - logging.info( + logger.info( "[TRAIN] Epoch={}/{}, Step={}/{}, loss={:.4f}, lr={:.6f}, batch_cost={:.4f}, reader_cost={:.4f} | ETA {}" .format(epoch + 1, num_epochs, step + 1, steps_per_epoch, avg_loss * nranks, lr, avg_train_batch_cost, @@ -152,7 +152,7 @@ def train(model, best_model_dir = os.path.join(save_dir, "best_model") fluid.save_dygraph(model.state_dict(), os.path.join(best_model_dir, 'model')) - logging.info( + logger.info( 'Current evaluated best model in eval_dataset is epoch_{}, miou={:4f}' .format(best_model_epoch, best_mean_iou)) diff --git a/dygraph/core/val.py b/dygraph/core/val.py index 0623b617..a35f0709 100644 --- a/dygraph/core/val.py +++ b/dygraph/core/val.py @@ -20,7 +20,7 @@ import cv2 from paddle.fluid.dygraph.base import to_variable import paddle.fluid as fluid -import dygraph.utils.logging as logging +import dygraph.utils.logger as logger from dygraph.utils import ConfusionMatrix from dygraph.utils import Timer, calculate_eta @@ -39,7 +39,7 @@ def evaluate(model, total_steps = len(eval_dataset) conf_mat = ConfusionMatrix(num_classes, streaming=True) - logging.info( + logger.info( "Start to evaluating(total_samples={}, total_steps={})...".format( len(eval_dataset), total_steps)) timer = Timer() @@ -69,7 +69,7 @@ def evaluate(model, time_step = timer.elapsed_time() remain_step = total_steps - step - 1 - logging.debug( + logger.debug( "[EVAL] Epoch={}, Step={}/{}, iou={:4f}, sec/step={:.4f} | ETA {}". format(epoch_id, step + 1, total_steps, iou, time_step, calculate_eta(remain_step, time_step))) @@ -77,9 +77,9 @@ def evaluate(model, category_iou, miou = conf_mat.mean_iou() category_acc, macc = conf_mat.accuracy() - logging.info("[EVAL] #Images={} mAcc={:.4f} mIoU={:.4f}".format( + logger.info("[EVAL] #Images={} mAcc={:.4f} mIoU={:.4f}".format( len(eval_dataset), macc, miou)) - logging.info("[EVAL] Category IoU: " + str(category_iou)) - logging.info("[EVAL] Category Acc: " + str(category_acc)) - logging.info("[EVAL] Kappa:{:.4f} ".format(conf_mat.kappa())) + logger.info("[EVAL] Category IoU: " + str(category_iou)) + logger.info("[EVAL] Category Acc: " + str(category_acc)) + logger.info("[EVAL] Kappa:{:.4f} ".format(conf_mat.kappa())) return miou, macc diff --git a/dygraph/utils/__init__.py b/dygraph/utils/__init__.py index 68a8136a..071649bb 100644 --- a/dygraph/utils/__init__.py +++ b/dygraph/utils/__init__.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from . import logging +from . import logger from . import download from .metrics import ConfusionMatrix from .utils import * diff --git a/dygraph/utils/logging.py b/dygraph/utils/logger.py similarity index 100% rename from dygraph/utils/logging.py rename to dygraph/utils/logger.py diff --git a/dygraph/utils/utils.py b/dygraph/utils/utils.py index 3d4fc62e..50059378 100644 --- a/dygraph/utils/utils.py +++ b/dygraph/utils/utils.py @@ -18,7 +18,7 @@ import math import cv2 import paddle.fluid as fluid -from . import logging +from . import logger def seconds_to_hms(seconds): @@ -49,7 +49,7 @@ def get_environ_info(): def load_pretrained_model(model, pretrained_model): if pretrained_model is not None: - logging.info('Load pretrained model from {}'.format(pretrained_model)) + logger.info('Load pretrained model from {}'.format(pretrained_model)) if os.path.exists(pretrained_model): ckpt_path = os.path.join(pretrained_model, 'model') try: @@ -62,10 +62,10 @@ def load_pretrained_model(model, pretrained_model): num_params_loaded = 0 for k in keys: if k not in para_state_dict: - logging.warning("{} is not in pretrained model".format(k)) + logger.warning("{} is not in pretrained model".format(k)) elif list(para_state_dict[k].shape) != list( model_state_dict[k].shape): - logging.warning( + logger.warning( "[SKIP] Shape of pretrained params {} doesn't match.(Pretrained: {}, Actual: {})" .format(k, para_state_dict[k].shape, model_state_dict[k].shape)) @@ -73,7 +73,7 @@ def load_pretrained_model(model, pretrained_model): model_state_dict[k] = para_state_dict[k] num_params_loaded += 1 model.set_dict(model_state_dict) - logging.info("There are {}/{} varaibles are loaded.".format( + logger.info("There are {}/{} varaibles are loaded.".format( num_params_loaded, len(model_state_dict))) else: @@ -81,12 +81,12 @@ def load_pretrained_model(model, pretrained_model): 'The pretrained model directory is not Found: {}'.format( pretrained_model)) else: - logging.info('No pretrained model to load, train from scratch') + logger.info('No pretrained model to load, train from scratch') def resume(model, optimizer, resume_model): if resume_model is not None: - logging.info('Resume model from {}'.format(resume_model)) + logger.info('Resume model from {}'.format(resume_model)) if os.path.exists(resume_model): resume_model = os.path.normpath(resume_model) ckpt_path = os.path.join(resume_model, 'model') @@ -102,7 +102,7 @@ def resume(model, optimizer, resume_model): 'The resume model directory is not Found: {}'.format( resume_model)) else: - logging.info('No model need to resume') + logger.info('No model need to resume') def visualize(image, result, save_dir=None, weight=0.6): -- GitLab