classification.py 7.0 KB
Newer Older
D
dongshuilong 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import platform
import paddle

Z
zhiboniu 已提交
21
from ppcls.utils.misc import AverageMeter
D
dongshuilong 已提交
22 23 24
from ppcls.utils import logger


W
weishengyu 已提交
25
def classification_eval(engine, epoch_id=0):
C
cuicheng01 已提交
26 27
    if hasattr(engine.eval_metric_func, "reset"):
        engine.eval_metric_func.reset()
D
dongshuilong 已提交
28 29 30 31 32 33 34
    output_info = dict()
    time_info = {
        "batch_cost": AverageMeter(
            "batch_cost", '.5f', postfix=" s,"),
        "reader_cost": AverageMeter(
            "reader_cost", ".5f", postfix=" s,"),
    }
W
weishengyu 已提交
35
    print_batch_step = engine.config["Global"]["print_batch_step"]
D
dongshuilong 已提交
36 37

    tic = time.time()
38
    total_samples = engine.dataloader_dict["Eval"].total_samples
D
dongshuilong 已提交
39
    accum_samples = 0
40 41
    max_iter = engine.dataloader_dict["Eval"].max_iter
    for iter_id, batch in enumerate(engine.dataloader_dict["Eval"]):
D
dongshuilong 已提交
42 43 44 45 46
        if iter_id >= max_iter:
            break
        if iter_id == 5:
            for key in time_info:
                time_info[key].reset()
47

D
dongshuilong 已提交
48 49
        time_info["reader_cost"].update(time.time() - tic)
        batch_size = batch[0].shape[0]
50
        batch[0] = paddle.to_tensor(batch[0])
C
cuicheng01 已提交
51
        if not engine.config["Global"].get("use_multilabel", False):
C
cuicheng01 已提交
52
            batch[1] = batch[1].reshape([-1, 1]).astype("int64")
53

D
dongshuilong 已提交
54
        # image input
55
        if engine.amp and engine.amp_eval:
56 57 58 59
            with paddle.amp.auto_cast(
                    custom_black_list={
                        "flatten_contiguous_range", "greater_than"
                    },
60
                    level=engine.amp_level):
61
                out = engine.model(batch)
Z
zhangbo9674 已提交
62
        else:
63
            out = engine.model(batch)
D
dongshuilong 已提交
64 65 66 67 68

        # just for DistributedBatchSampler issue: repeat sampling
        current_samples = batch_size * paddle.distributed.get_world_size()
        accum_samples += current_samples

69 70 71 72 73
        if isinstance(out, dict) and "Student" in out:
            out = out["Student"]
        if isinstance(out, dict) and "logits" in out:
            out = out["logits"]

74 75 76
        # gather Tensor when distributed
        if paddle.distributed.get_world_size() > 1:
            label_list = []
77 78
            device_id = paddle.distributed.ParallelEnv().device_id
            label = batch[1].cuda(device_id) if engine.config["Global"][
79 80
                "device"] == "gpu" else batch[1]
            paddle.distributed.all_gather(label_list, label)
81
            labels = paddle.concat(label_list, 0)
D
dongshuilong 已提交
82

83 84 85
            if isinstance(out, list):
                preds = []
                for x in out:
D
dongshuilong 已提交
86
                    pred_list = []
87 88 89 90 91 92 93
                    paddle.distributed.all_gather(pred_list, x)
                    pred_x = paddle.concat(pred_list, 0)
                    preds.append(pred_x)
            else:
                pred_list = []
                paddle.distributed.all_gather(pred_list, out)
                preds = paddle.concat(pred_list, 0)
D
dongshuilong 已提交
94

95
            if accum_samples > total_samples and not engine.use_dali:
H
HydrogenSulfate 已提交
96 97 98 99 100 101 102 103
                if isinstance(preds, list):
                    preds = [
                        pred[:total_samples + current_samples - accum_samples]
                        for pred in preds
                    ]
                else:
                    preds = preds[:total_samples + current_samples -
                                  accum_samples]
104
                labels = labels[:total_samples + current_samples -
D
dongshuilong 已提交
105
                                accum_samples]
106 107 108 109 110 111 112
                current_samples = total_samples + current_samples - accum_samples
        else:
            labels = batch[1]
            preds = out

        # calc loss
        if engine.eval_loss_func is not None:
113
            if engine.amp and engine.amp_eval:
114 115 116 117
                with paddle.amp.auto_cast(
                        custom_black_list={
                            "flatten_contiguous_range", "greater_than"
                        },
118
                        level=engine.amp_level):
119
                    loss_dict = engine.eval_loss_func(preds, labels)
D
dongshuilong 已提交
120
            else:
121
                loss_dict = engine.eval_loss_func(preds, labels)
122

123 124 125
            for key in loss_dict:
                if key not in output_info:
                    output_info[key] = AverageMeter(key, '7.5f')
126
                output_info[key].update(float(loss_dict[key]), current_samples)
Z
zhiboniu 已提交
127

128 129
        #  calc metric
        if engine.eval_metric_func is not None:
C
cuicheng01 已提交
130
            engine.eval_metric_func(preds, labels)
D
dongshuilong 已提交
131 132 133 134 135 136 137 138 139 140 141
        time_info["batch_cost"].update(time.time() - tic)

        if iter_id % print_batch_step == 0:
            time_msg = "s, ".join([
                "{}: {:.5f}".format(key, time_info[key].avg)
                for key in time_info
            ])

            ips_msg = "ips: {:.5f} images/sec".format(
                batch_size / time_info["batch_cost"].avg)

Z
zhiboniu 已提交
142
            if "ATTRMetric" in engine.config["Metric"]["Eval"][0]:
Z
zhiboniu 已提交
143 144 145 146 147 148
                metric_msg = ""
            else:
                metric_msg = ", ".join([
                    "{}: {:.5f}".format(key, output_info[key].val)
                    for key in output_info
                ])
149
                metric_msg += ", {}".format(engine.eval_metric_func.avg_info)
D
dongshuilong 已提交
150
            logger.info("[Eval][Epoch {}][Iter: {}/{}]{}, {}, {}".format(
151
                epoch_id, iter_id, max_iter, metric_msg, time_msg, ips_msg))
D
dongshuilong 已提交
152 153

        tic = time.time()
154
    if engine.use_dali:
155
        engine.dataloader_dict["Eval"].reset()
Z
zhiboniu 已提交
156

Z
zhiboniu 已提交
157
    if "ATTRMetric" in engine.config["Metric"]["Eval"][0]:
Z
zhiboniu 已提交
158 159
        metric_msg = ", ".join([
            "evalres: ma: {:.5f} label_f1: {:.5f} label_pos_recall: {:.5f} label_neg_recall: {:.5f} instance_f1: {:.5f} instance_acc: {:.5f} instance_prec: {:.5f} instance_recall: {:.5f}".
Z
zhiboniu 已提交
160
            format(*engine.eval_metric_func.attr_res())
Z
zhiboniu 已提交
161 162 163 164 165 166 167
        ])
        logger.info("[Eval][Epoch {}][Avg]{}".format(epoch_id, metric_msg))

        # do not try to save best eval.model
        if engine.eval_metric_func is None:
            return -1
        # return 1st metric in the dict
Z
zhiboniu 已提交
168
        return engine.eval_metric_func.attr_res()[0]
Z
zhiboniu 已提交
169 170 171 172 173
    else:
        metric_msg = ", ".join([
            "{}: {:.5f}".format(key, output_info[key].avg)
            for key in output_info
        ])
174
        metric_msg += ", {}".format(engine.eval_metric_func.avg_info)
Z
zhiboniu 已提交
175 176 177 178 179 180
        logger.info("[Eval][Epoch {}][Avg]{}".format(epoch_id, metric_msg))

        # do not try to save best eval.model
        if engine.eval_metric_func is None:
            return -1
        # return 1st metric in the dict
181
        return engine.eval_metric_func.avg