diff --git a/ppcls/arch/backbone/model_zoo/gvt.py b/ppcls/arch/backbone/model_zoo/gvt.py index 2e1ae0fedb3e594026da1735645822f8b0d9651a..d1afbecaac6f27d95a9780209cdb5ac33ce911b0 100644 --- a/ppcls/arch/backbone/model_zoo/gvt.py +++ b/ppcls/arch/backbone/model_zoo/gvt.py @@ -324,8 +324,7 @@ class PyramidVisionTransformer(nn.Layer): self.pos_drops.append(nn.Dropout(p=drop_rate)) dpr = [ - x.numpy()[0] - for x in paddle.linspace(0, drop_path_rate, sum(depths)) + float(x) for x in paddle.linspace(0, drop_path_rate, sum(depths)) ] # stochastic depth decay rule cur = 0 @@ -551,8 +550,7 @@ class ALTGVT(PCPVT): self.wss = wss # transformer encoder dpr = [ - x.numpy()[0] - for x in paddle.linspace(0, drop_path_rate, sum(depths)) + float(x) for x in paddle.linspace(0, drop_path_rate, sum(depths)) ] # stochastic depth decay rule cur = 0 self.blocks = nn.LayerList() diff --git a/ppcls/engine/evaluation/classification.py b/ppcls/engine/evaluation/classification.py index da2b817d8de17663feccffb0bb26483032747ae2..637b54f8cb7844d3dcb7e4d73231b35123b9c2bc 100644 --- a/ppcls/engine/evaluation/classification.py +++ b/ppcls/engine/evaluation/classification.py @@ -126,8 +126,7 @@ def classification_eval(engine, epoch_id=0): for key in loss_dict: if key not in output_info: output_info[key] = AverageMeter(key, '7.5f') - output_info[key].update(loss_dict[key].numpy()[0], - current_samples) + output_info[key].update(float(loss_dict[key]), current_samples) # calc metric if engine.eval_metric_func is not None: diff --git a/ppcls/engine/evaluation/retrieval.py b/ppcls/engine/evaluation/retrieval.py index 753a5dcb48dcf5335492e962db38975a925900ab..4ec7355bf035c8586587687ca37a58e45d1a3c80 100644 --- a/ppcls/engine/evaluation/retrieval.py +++ b/ppcls/engine/evaluation/retrieval.py @@ -20,6 +20,7 @@ from typing import Optional import numpy as np import paddle +from ppcls.engine.train.utils import type_name from ppcls.utils import logger @@ -65,7 +66,7 @@ def retrieval_eval(engine, epoch_id=0): engine.eval_metric_func.metric_func_list[ i].descending = False logger.warning( - f"re_ranking=True,{engine.eval_metric_func.metric_func_list[i].__class__.__name__}.descending has been set to False" + f"re_ranking=True,{type_name(engine.eval_metric_func.metric_func_list[i])}.descending has been set to False" ) # compute distance matrix(The smaller the value, the more similar) diff --git a/ppcls/engine/train/utils.py b/ppcls/engine/train/utils.py index b649c8e8bc5592b50eef3ffb5a6cbbb57ef315f9..091a64d8326699100538ceb238ca454e3192b1af 100644 --- a/ppcls/engine/train/utils.py +++ b/ppcls/engine/train/utils.py @@ -25,8 +25,8 @@ def update_metric(trainer, out, batch, batch_size): for key in metric_dict: if key not in trainer.output_info: trainer.output_info[key] = AverageMeter(key, '7.5f') - trainer.output_info[key].update(metric_dict[key].numpy()[0], - batch_size) + trainer.output_info[key].update( + float(metric_dict[key]), batch_size) def update_loss(trainer, loss_dict, batch_size): @@ -34,12 +34,12 @@ def update_loss(trainer, loss_dict, batch_size): for key in loss_dict: if key not in trainer.output_info: trainer.output_info[key] = AverageMeter(key, '7.5f') - trainer.output_info[key].update(loss_dict[key].numpy()[0], batch_size) + trainer.output_info[key].update(float(loss_dict[key]), batch_size) def log_info(trainer, batch_size, epoch_id, iter_id): lr_msg = ", ".join([ - "lr({}): {:.8f}".format(lr.__class__.__name__, lr.get_lr()) + "lr({}): {:.8f}".format(type_name(lr), lr.get_lr()) for i, lr in enumerate(trainer.lr_sch) ]) metric_msg = ", ".join([ @@ -54,17 +54,17 @@ def log_info(trainer, batch_size, epoch_id, iter_id): ips_msg = "ips: {:.5f} samples/s".format( batch_size / trainer.time_info["batch_cost"].avg) - eta_sec = ((trainer.config["Global"]["epochs"] - epoch_id + 1) * - trainer.iter_per_epoch - iter_id) * trainer.time_info["batch_cost"].avg + eta_sec = ( + (trainer.config["Global"]["epochs"] - epoch_id + 1) * + trainer.iter_per_epoch - iter_id) * trainer.time_info["batch_cost"].avg eta_msg = "eta: {:s}".format(str(datetime.timedelta(seconds=int(eta_sec)))) logger.info("[Train][Epoch {}/{}][Iter: {}/{}]{}, {}, {}, {}, {}".format( - epoch_id, trainer.config["Global"]["epochs"], iter_id, trainer.iter_per_epoch, - lr_msg, metric_msg, time_msg, ips_msg, eta_msg)) - + epoch_id, trainer.config["Global"]["epochs"], iter_id, trainer. + iter_per_epoch, lr_msg, metric_msg, time_msg, ips_msg, eta_msg)) for i, lr in enumerate(trainer.lr_sch): logger.scaler( - name="lr({})".format(lr.__class__.__name__), + name="lr({})".format(type_name(lr)), value=lr.get_lr(), step=trainer.global_step, writer=trainer.vdl_writer) diff --git a/ppcls/metric/metrics.py b/ppcls/metric/metrics.py index b6dc934f31c04b0df2a90e63fed48973dddff1ca..192ecb06ac16c9d8153bba34a05e381f1cab82a0 100644 --- a/ppcls/metric/metrics.py +++ b/ppcls/metric/metrics.py @@ -113,7 +113,7 @@ class mAP(nn.Layer): precision_mask = paddle.multiply(equal_flag, precision) ap = paddle.sum(precision_mask, axis=1) / paddle.sum(equal_flag, axis=1) - metric_dict["mAP"] = paddle.mean(ap).numpy()[0] + metric_dict["mAP"] = float(paddle.mean(ap)) return metric_dict @@ -157,7 +157,7 @@ class mINP(nn.Layer): hard_index = paddle.argmax(auxilary, axis=1).astype("float32") all_INP = paddle.divide(paddle.sum(equal_flag, axis=1), hard_index) mINP = paddle.mean(all_INP) - metric_dict["mINP"] = mINP.numpy()[0] + metric_dict["mINP"] = float(mINP) return metric_dict @@ -360,7 +360,7 @@ class HammingDistance(MultiLabelMetric): metric_dict["HammingDistance"] = paddle.to_tensor( hamming_loss(target, preds)) self.avg_meters["HammingDistance"].update( - metric_dict["HammingDistance"].numpy()[0], output.shape[0]) + float(metric_dict["HammingDistance"]), output.shape[0]) return metric_dict @@ -400,7 +400,7 @@ class AccuracyScore(MultiLabelMetric): sum(tps) + sum(tns) + sum(fns) + sum(fps)) metric_dict["AccuracyScore"] = paddle.to_tensor(accuracy) self.avg_meters["AccuracyScore"].update( - metric_dict["AccuracyScore"].numpy()[0], output.shape[0]) + float(metric_dict["AccuracyScore"]), output.shape[0]) return metric_dict diff --git a/ppcls/optimizer/__init__.py b/ppcls/optimizer/__init__.py index bdee9f9b6c4b605a85b635f6a12de5eda6165c90..72475ce60f9d9ae9f0740155d13ef324c9f7395e 100644 --- a/ppcls/optimizer/__init__.py +++ b/ppcls/optimizer/__init__.py @@ -20,6 +20,7 @@ import copy import paddle from typing import Dict, List +from ppcls.engine.train.utils import type_name from ppcls.utils import logger from . import optimizer @@ -111,11 +112,11 @@ def build_optimizer(config, epochs, step_each_epoch, model_list=None): if optim_scope.endswith("Loss"): # optimizer for loss for m in model_list[i].sublayers(True): - if m.__class__.__name__ == optim_scope: + if type_name(m) == optim_scope: optim_model.append(m) else: # opmizer for module in model, such as backbone, neck, head... - if optim_scope == model_list[i].__class__.__name__: + if optim_scope == type_name(model_list[i]): optim_model.append(model_list[i]) elif hasattr(model_list[i], optim_scope): optim_model.append(getattr(model_list[i], optim_scope)) diff --git a/ppcls/utils/misc.py b/ppcls/utils/misc.py index 8015552437998264322661518ba3ce40c7cd7db5..b63da7c5f6930f3a5935b3d1163a8f32bf2484b1 100644 --- a/ppcls/utils/misc.py +++ b/ppcls/utils/misc.py @@ -47,7 +47,7 @@ class AverageMeter(object): @property def avg_info(self): if isinstance(self.avg, paddle.Tensor): - self.avg = self.avg.numpy()[0] + self.avg = float(self.avg) return "{}: {:.5f}".format(self.name, self.avg) @property