diff --git a/ppcls/configs/Attr/StrongBaselineAttr.yaml b/ppcls/configs/Attr/StrongBaselineAttr.yaml index 7f90e74567d1b026a066f9b46dfa1bbeb6de73b6..7501669bc5707fa2577c7d0b573a3b23cd2a0213 100644 --- a/ppcls/configs/Attr/StrongBaselineAttr.yaml +++ b/ppcls/configs/Attr/StrongBaselineAttr.yaml @@ -5,7 +5,7 @@ Global: output_dir: "./output/" device: "gpu" save_interval: 5 - eval_during_train: False + eval_during_train: True eval_interval: 1 epochs: 30 print_batch_step: 20 diff --git a/ppcls/engine/evaluation/classification.py b/ppcls/engine/evaluation/classification.py index c7ace9f101baae13c7bf49782db81a6d117e7c38..1f9b55fc33ff6b49e9e7f7bd3e9bcebdbf3e0093 100644 --- a/ppcls/engine/evaluation/classification.py +++ b/ppcls/engine/evaluation/classification.py @@ -18,7 +18,7 @@ import time import platform import paddle -from ppcls.utils.misc import AverageMeter, AttrMeter +from ppcls.utils.misc import AverageMeter from ppcls.utils import logger @@ -34,10 +34,6 @@ def classification_eval(engine, epoch_id=0): } print_batch_step = engine.config["Global"]["print_batch_step"] - if engine.eval_metric_func is not None and "ATTRMetric" in engine.config[ - "Metric"]["Eval"][0]: - output_info["attr"] = AttrMeter(threshold=0.5) - metric_key = None tic = time.time() accum_samples = 0 @@ -162,7 +158,7 @@ def classification_eval(engine, epoch_id=0): if "ATTRMetric" in engine.config["Metric"]["Eval"][0]: metric_msg = ", ".join([ "evalres: ma: {:.5f} label_f1: {:.5f} label_pos_recall: {:.5f} label_neg_recall: {:.5f} instance_f1: {:.5f} instance_acc: {:.5f} instance_prec: {:.5f} instance_recall: {:.5f}". - format(*output_info["attr"].res()) + format(*engine.eval_metric_func.attr_res()) ]) logger.info("[Eval][Epoch {}][Avg]{}".format(epoch_id, metric_msg)) @@ -170,7 +166,7 @@ def classification_eval(engine, epoch_id=0): if engine.eval_metric_func is None: return -1 # return 1st metric in the dict - return output_info["attr"].res()[0] + return engine.eval_metric_func.attr_res()[0] else: metric_msg = ", ".join([ "{}: {:.5f}".format(key, output_info[key].avg) diff --git a/ppcls/metric/__init__.py b/ppcls/metric/__init__.py index 43a92b83138fc98154c9e3464d828f90520793f9..1f49cc2d9c4e8a70287b416447c0d1d98a582113 100644 --- a/ppcls/metric/__init__.py +++ b/ppcls/metric/__init__.py @@ -56,6 +56,9 @@ class CombinedMetrics(AvgMetrics): def avg(self): return self.metric_func_list[0].avg + def attr_res(self): + return self.metric_func_list[0].attrmeter.res() + def reset(self): for metric in self.metric_func_list: if hasattr(metric, "reset"): diff --git a/ppcls/metric/metrics.py b/ppcls/metric/metrics.py index 7b8cd024c4aa29435450f91c19d2f0cff2a626ff..fb087db10a96cf42a0d86ac5b8d9ca0485ba33cb 100644 --- a/ppcls/metric/metrics.py +++ b/ppcls/metric/metrics.py @@ -25,7 +25,7 @@ from sklearn.preprocessing import binarize from easydict import EasyDict from ppcls.metric.avg_metrics import AvgMetrics -from ppcls.utils.misc import AverageMeter +from ppcls.utils.misc import AverageMeter, AttrMeter class TopkAcc(AvgMetrics): @@ -438,7 +438,11 @@ class ATTRMetric(nn.Layer): super().__init__() self.threshold = threshold + def reset(self): + self.attrmeter = AttrMeter(threshold=0.5) + def forward(self, output, target): metric_dict = get_attr_metrics(target[:, 0, :].numpy(), output.numpy(), self.threshold) + self.attrmeter.update(metric_dict) return metric_dict