From 7d9f4dcb593b994fcb4e5ec3757d0b0ae21da02f Mon Sep 17 00:00:00 2001 From: HydrogenSulfate <490868991@qq.com> Date: Tue, 1 Nov 2022 04:01:01 +0000 Subject: [PATCH] change Tensor.numpy()[0] to float(Tensor) for 0-D tensor case --- ppcls/arch/backbone/model_zoo/gvt.py | 6 ++---- ppcls/engine/evaluation/classification.py | 3 +-- ppcls/engine/evaluation/retrieval.py | 3 ++- ppcls/engine/train/utils.py | 20 ++++++++++---------- ppcls/metric/metrics.py | 8 ++++---- ppcls/optimizer/__init__.py | 5 +++-- ppcls/utils/misc.py | 2 +- 7 files changed, 23 insertions(+), 24 deletions(-) diff --git a/ppcls/arch/backbone/model_zoo/gvt.py b/ppcls/arch/backbone/model_zoo/gvt.py index 2e1ae0fe..d1afbeca 100644 --- a/ppcls/arch/backbone/model_zoo/gvt.py +++ b/ppcls/arch/backbone/model_zoo/gvt.py @@ -324,8 +324,7 @@ class PyramidVisionTransformer(nn.Layer): self.pos_drops.append(nn.Dropout(p=drop_rate)) dpr = [ - x.numpy()[0] - for x in paddle.linspace(0, drop_path_rate, sum(depths)) + float(x) for x in paddle.linspace(0, drop_path_rate, sum(depths)) ] # stochastic depth decay rule cur = 0 @@ -551,8 +550,7 @@ class ALTGVT(PCPVT): self.wss = wss # transformer encoder dpr = [ - x.numpy()[0] - for x in paddle.linspace(0, drop_path_rate, sum(depths)) + float(x) for x in paddle.linspace(0, drop_path_rate, sum(depths)) ] # stochastic depth decay rule cur = 0 self.blocks = nn.LayerList() diff --git a/ppcls/engine/evaluation/classification.py b/ppcls/engine/evaluation/classification.py index da2b817d..637b54f8 100644 --- a/ppcls/engine/evaluation/classification.py +++ b/ppcls/engine/evaluation/classification.py @@ -126,8 +126,7 @@ def classification_eval(engine, epoch_id=0): for key in loss_dict: if key not in output_info: output_info[key] = AverageMeter(key, '7.5f') - output_info[key].update(loss_dict[key].numpy()[0], - current_samples) + output_info[key].update(float(loss_dict[key]), current_samples) # calc metric if engine.eval_metric_func is not None: diff --git a/ppcls/engine/evaluation/retrieval.py b/ppcls/engine/evaluation/retrieval.py index 753a5dcb..4ec7355b 100644 --- a/ppcls/engine/evaluation/retrieval.py +++ b/ppcls/engine/evaluation/retrieval.py @@ -20,6 +20,7 @@ from typing import Optional import numpy as np import paddle +from ppcls.engine.train.utils import type_name from ppcls.utils import logger @@ -65,7 +66,7 @@ def retrieval_eval(engine, epoch_id=0): engine.eval_metric_func.metric_func_list[ i].descending = False logger.warning( - f"re_ranking=True,{engine.eval_metric_func.metric_func_list[i].__class__.__name__}.descending has been set to False" + f"re_ranking=True,{type_name(engine.eval_metric_func.metric_func_list[i])}.descending has been set to False" ) # compute distance matrix(The smaller the value, the more similar) diff --git a/ppcls/engine/train/utils.py b/ppcls/engine/train/utils.py index b649c8e8..091a64d8 100644 --- a/ppcls/engine/train/utils.py +++ b/ppcls/engine/train/utils.py @@ -25,8 +25,8 @@ def update_metric(trainer, out, batch, batch_size): for key in metric_dict: if key not in trainer.output_info: trainer.output_info[key] = AverageMeter(key, '7.5f') - trainer.output_info[key].update(metric_dict[key].numpy()[0], - batch_size) + trainer.output_info[key].update( + float(metric_dict[key]), batch_size) def update_loss(trainer, loss_dict, batch_size): @@ -34,12 +34,12 @@ def update_loss(trainer, loss_dict, batch_size): for key in loss_dict: if key not in trainer.output_info: trainer.output_info[key] = AverageMeter(key, '7.5f') - trainer.output_info[key].update(loss_dict[key].numpy()[0], batch_size) + trainer.output_info[key].update(float(loss_dict[key]), batch_size) def log_info(trainer, batch_size, epoch_id, iter_id): lr_msg = ", ".join([ - "lr({}): {:.8f}".format(lr.__class__.__name__, lr.get_lr()) + "lr({}): {:.8f}".format(type_name(lr), lr.get_lr()) for i, lr in enumerate(trainer.lr_sch) ]) metric_msg = ", ".join([ @@ -54,17 +54,17 @@ def log_info(trainer, batch_size, epoch_id, iter_id): ips_msg = "ips: {:.5f} samples/s".format( batch_size / trainer.time_info["batch_cost"].avg) - eta_sec = ((trainer.config["Global"]["epochs"] - epoch_id + 1) * - trainer.iter_per_epoch - iter_id) * trainer.time_info["batch_cost"].avg + eta_sec = ( + (trainer.config["Global"]["epochs"] - epoch_id + 1) * + trainer.iter_per_epoch - iter_id) * trainer.time_info["batch_cost"].avg eta_msg = "eta: {:s}".format(str(datetime.timedelta(seconds=int(eta_sec)))) logger.info("[Train][Epoch {}/{}][Iter: {}/{}]{}, {}, {}, {}, {}".format( - epoch_id, trainer.config["Global"]["epochs"], iter_id, trainer.iter_per_epoch, - lr_msg, metric_msg, time_msg, ips_msg, eta_msg)) - + epoch_id, trainer.config["Global"]["epochs"], iter_id, trainer. + iter_per_epoch, lr_msg, metric_msg, time_msg, ips_msg, eta_msg)) for i, lr in enumerate(trainer.lr_sch): logger.scaler( - name="lr({})".format(lr.__class__.__name__), + name="lr({})".format(type_name(lr)), value=lr.get_lr(), step=trainer.global_step, writer=trainer.vdl_writer) diff --git a/ppcls/metric/metrics.py b/ppcls/metric/metrics.py index b6dc934f..192ecb06 100644 --- a/ppcls/metric/metrics.py +++ b/ppcls/metric/metrics.py @@ -113,7 +113,7 @@ class mAP(nn.Layer): precision_mask = paddle.multiply(equal_flag, precision) ap = paddle.sum(precision_mask, axis=1) / paddle.sum(equal_flag, axis=1) - metric_dict["mAP"] = paddle.mean(ap).numpy()[0] + metric_dict["mAP"] = float(paddle.mean(ap)) return metric_dict @@ -157,7 +157,7 @@ class mINP(nn.Layer): hard_index = paddle.argmax(auxilary, axis=1).astype("float32") all_INP = paddle.divide(paddle.sum(equal_flag, axis=1), hard_index) mINP = paddle.mean(all_INP) - metric_dict["mINP"] = mINP.numpy()[0] + metric_dict["mINP"] = float(mINP) return metric_dict @@ -360,7 +360,7 @@ class HammingDistance(MultiLabelMetric): metric_dict["HammingDistance"] = paddle.to_tensor( hamming_loss(target, preds)) self.avg_meters["HammingDistance"].update( - metric_dict["HammingDistance"].numpy()[0], output.shape[0]) + float(metric_dict["HammingDistance"]), output.shape[0]) return metric_dict @@ -400,7 +400,7 @@ class AccuracyScore(MultiLabelMetric): sum(tps) + sum(tns) + sum(fns) + sum(fps)) metric_dict["AccuracyScore"] = paddle.to_tensor(accuracy) self.avg_meters["AccuracyScore"].update( - metric_dict["AccuracyScore"].numpy()[0], output.shape[0]) + float(metric_dict["AccuracyScore"]), output.shape[0]) return metric_dict diff --git a/ppcls/optimizer/__init__.py b/ppcls/optimizer/__init__.py index bdee9f9b..72475ce6 100644 --- a/ppcls/optimizer/__init__.py +++ b/ppcls/optimizer/__init__.py @@ -20,6 +20,7 @@ import copy import paddle from typing import Dict, List +from ppcls.engine.train.utils import type_name from ppcls.utils import logger from . import optimizer @@ -111,11 +112,11 @@ def build_optimizer(config, epochs, step_each_epoch, model_list=None): if optim_scope.endswith("Loss"): # optimizer for loss for m in model_list[i].sublayers(True): - if m.__class__.__name__ == optim_scope: + if type_name(m) == optim_scope: optim_model.append(m) else: # opmizer for module in model, such as backbone, neck, head... - if optim_scope == model_list[i].__class__.__name__: + if optim_scope == type_name(model_list[i]): optim_model.append(model_list[i]) elif hasattr(model_list[i], optim_scope): optim_model.append(getattr(model_list[i], optim_scope)) diff --git a/ppcls/utils/misc.py b/ppcls/utils/misc.py index 80155524..b63da7c5 100644 --- a/ppcls/utils/misc.py +++ b/ppcls/utils/misc.py @@ -47,7 +47,7 @@ class AverageMeter(object): @property def avg_info(self): if isinstance(self.avg, paddle.Tensor): - self.avg = self.avg.numpy()[0] + self.avg = float(self.avg) return "{}: {:.5f}".format(self.name, self.avg) @property -- GitLab