classification.py 5.4 KB
Newer Older
D
dongshuilong 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import platform
import paddle

from ppcls.utils.misc import AverageMeter
from ppcls.utils import logger


W
weishengyu 已提交
25
def classification_eval(engine, epoch_id=0):
D
dongshuilong 已提交
26 27 28 29 30 31 32
    output_info = dict()
    time_info = {
        "batch_cost": AverageMeter(
            "batch_cost", '.5f', postfix=" s,"),
        "reader_cost": AverageMeter(
            "reader_cost", ".5f", postfix=" s,"),
    }
W
weishengyu 已提交
33
    print_batch_step = engine.config["Global"]["print_batch_step"]
D
dongshuilong 已提交
34 35 36

    metric_key = None
    tic = time.time()
D
dongshuilong 已提交
37 38 39 40
    accum_samples = 0
    total_samples = len(
        engine.eval_dataloader.
        dataset) if not engine.use_dali else engine.eval_dataloader.size
W
weishengyu 已提交
41 42 43
    max_iter = len(engine.eval_dataloader) - 1 if platform.system(
    ) == "Windows" else len(engine.eval_dataloader)
    for iter_id, batch in enumerate(engine.eval_dataloader):
D
dongshuilong 已提交
44 45 46 47 48
        if iter_id >= max_iter:
            break
        if iter_id == 5:
            for key in time_info:
                time_info[key].reset()
W
weishengyu 已提交
49
        if engine.use_dali:
D
dongshuilong 已提交
50 51 52 53 54 55 56
            batch = [
                paddle.to_tensor(batch[0]['data']),
                paddle.to_tensor(batch[0]['label'])
            ]
        time_info["reader_cost"].update(time.time() - tic)
        batch_size = batch[0].shape[0]
        batch[0] = paddle.to_tensor(batch[0]).astype("float32")
C
cuicheng01 已提交
57
        if not engine.config["Global"].get("use_multilabel", False):
C
cuicheng01 已提交
58
            batch[1] = batch[1].reshape([-1, 1]).astype("int64")
D
dongshuilong 已提交
59
        # image input
W
weishengyu 已提交
60
        out = engine.model(batch[0])
D
dongshuilong 已提交
61
        # calc loss
W
weishengyu 已提交
62 63
        if engine.eval_loss_func is not None:
            loss_dict = engine.eval_loss_func(out, batch[1])
D
dongshuilong 已提交
64 65 66 67
            for key in loss_dict:
                if key not in output_info:
                    output_info[key] = AverageMeter(key, '7.5f')
                output_info[key].update(loss_dict[key].numpy()[0], batch_size)
D
dongshuilong 已提交
68 69 70 71 72

        # just for DistributedBatchSampler issue: repeat sampling
        current_samples = batch_size * paddle.distributed.get_world_size()
        accum_samples += current_samples

D
dongshuilong 已提交
73
        # calc metric
W
weishengyu 已提交
74
        if engine.eval_metric_func is not None:
D
dongshuilong 已提交
75
            if paddle.distributed.get_world_size() > 1:
D
dongshuilong 已提交
76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
                pred_list = []
                label_list = []
                if isinstance(out, dict):
                    out = out["logits"]
                paddle.distributed.all_gather(pred_list, out)
                paddle.distributed.all_gather(label_list, batch[1])
                pred = paddle.concat(pred_list, 0)
                labels = paddle.concat(label_list, 0)
                if accum_samples > total_samples:
                    pred = pred[:total_samples + current_samples -
                                accum_samples]
                    labels = labels[:total_samples + current_samples -
                                    accum_samples]
                    current_samples = total_samples + current_samples - accum_samples
                metric_dict = engine.eval_metric_func(pred, labels)

D
dongshuilong 已提交
92 93 94 95 96
                for key in metric_dict:
                    paddle.distributed.all_reduce(
                        metric_dict[key], op=paddle.distributed.ReduceOp.SUM)
                    metric_dict[key] = metric_dict[
                        key] / paddle.distributed.get_world_size()
D
dongshuilong 已提交
97 98
            else:
                metric_dict = engine.eval_metric_func(out, batch[1])
D
dongshuilong 已提交
99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
            for key in metric_dict:
                if metric_key is None:
                    metric_key = key
                if key not in output_info:
                    output_info[key] = AverageMeter(key, '7.5f')

                output_info[key].update(metric_dict[key].numpy()[0],
                                        batch_size)

        time_info["batch_cost"].update(time.time() - tic)

        if iter_id % print_batch_step == 0:
            time_msg = "s, ".join([
                "{}: {:.5f}".format(key, time_info[key].avg)
                for key in time_info
            ])

            ips_msg = "ips: {:.5f} images/sec".format(
                batch_size / time_info["batch_cost"].avg)

            metric_msg = ", ".join([
                "{}: {:.5f}".format(key, output_info[key].val)
                for key in output_info
            ])
            logger.info("[Eval][Epoch {}][Iter: {}/{}]{}, {}, {}".format(
                epoch_id, iter_id,
W
weishengyu 已提交
125
                len(engine.eval_dataloader), metric_msg, time_msg, ips_msg))
D
dongshuilong 已提交
126 127

        tic = time.time()
W
weishengyu 已提交
128 129
    if engine.use_dali:
        engine.eval_dataloader.reset()
D
dongshuilong 已提交
130 131 132 133 134 135
    metric_msg = ", ".join([
        "{}: {:.5f}".format(key, output_info[key].avg) for key in output_info
    ])
    logger.info("[Eval][Epoch {}][Avg]{}".format(epoch_id, metric_msg))

    # do not try to save best eval.model
W
weishengyu 已提交
136
    if engine.eval_metric_func is None:
D
dongshuilong 已提交
137 138 139
        return -1
    # return 1st metric in the dict
    return output_info[metric_key].avg