“bf0cfe33e346c469a4e24bb7bf05df4d97957e85”上不存在“generate_sequence_by_rnn_lm/train.py”
retrieval.py 13.3 KB
Newer Older
D
dongshuilong 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
D
dongshuilong 已提交
17

18
from collections import defaultdict
H
HydrogenSulfate 已提交
19 20

import numpy as np
D
dongshuilong 已提交
21
import paddle
22
import scipy
23

T
Tingquan Gao 已提交
24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
from ppcls.utils import all_gather, logger


def retrieval_eval(engine, epoch_id=0):
    engine.model.eval()
    # step1. prepare query and gallery features
    if engine.gallery_query_dataloader is not None:
        gallery_feat, gallery_label, gallery_camera = compute_feature(
            engine, "gallery_query")
        query_feat, query_label, query_camera = gallery_feat, gallery_label, gallery_camera
    else:
        gallery_feat, gallery_label, gallery_camera = compute_feature(
            engine, "gallery")
        query_feat, query_label, query_camera = compute_feature(engine,
                                                                "query")

    # step2. split features into feature blocks for saving memory
    num_query = len(query_feat)
    block_size = engine.config["Global"].get("sim_block_size", 64)
    sections = [block_size] * (num_query // block_size)
    if num_query % block_size > 0:
        sections.append(num_query % block_size)

    query_feat_blocks = paddle.split(query_feat, sections)
    query_label_blocks = paddle.split(query_label, sections)
    query_camera_blocks = paddle.split(
        query_camera, sections) if query_camera is not None else None
    metric_key = None

    # step3. compute metric
    if engine.eval_loss_func is None:
        metric_dict = {metric_key: 0.0}
    else:
        use_reranking = engine.config["Global"].get("re_ranking", False)
        logger.info(f"re_ranking={use_reranking}")
        if use_reranking:
            # compute distance matrix
            distmat = compute_re_ranking_dist(
                query_feat, gallery_feat, engine.config["Global"].get(
                    "feature_normalize", True), 20, 6, 0.3)
            # exclude illegal distance
            if query_camera is not None:
                camera_mask = query_camera != gallery_camera.t()
                label_mask = query_label != gallery_label.t()
                keep_mask = label_mask | camera_mask
                distmat = keep_mask.astype(query_feat.dtype) * distmat + (
                    ~keep_mask).astype(query_feat.dtype) * (distmat.max() + 1)
            else:
                keep_mask = None
            # compute metric with all samples
            metric_dict = engine.eval_metric_func(-distmat, query_label,
                                                  gallery_label, keep_mask)
H
HydrogenSulfate 已提交
76
        else:
T
Tingquan Gao 已提交
77 78
            metric_dict = defaultdict(float)
            for block_idx, block_feat in enumerate(query_feat_blocks):
79
                # compute distance matrix
T
Tingquan Gao 已提交
80 81
                distmat = paddle.matmul(
                    block_feat, gallery_feat, transpose_y=True)
82 83
                # exclude illegal distance
                if query_camera is not None:
T
Tingquan Gao 已提交
84 85 86 87
                    camera_mask = query_camera_blocks[
                        block_idx] != gallery_camera.t()
                    label_mask = query_label_blocks[
                        block_idx] != gallery_label.t()
88
                    keep_mask = label_mask | camera_mask
T
Tingquan Gao 已提交
89
                    distmat = keep_mask.astype(query_feat.dtype) * distmat
D
dongshuilong 已提交
90
                else:
H
HydrogenSulfate 已提交
91
                    keep_mask = None
T
Tingquan Gao 已提交
92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
                # compute metric by block
                metric_block = engine.eval_metric_func(
                    distmat, query_label_blocks[block_idx], gallery_label,
                    keep_mask)
                # accumulate metric
                for key in metric_block:
                    metric_dict[key] += metric_block[key] * block_feat.shape[
                        0] / num_query

    metric_info_list = []
    for key, value in metric_dict.items():
        metric_info_list.append(f"{key}: {value:.5f}")
        if metric_key is None:
            metric_key = key
    metric_msg = ", ".join(metric_info_list)
    logger.info(f"[Eval][Epoch {epoch_id}][Avg]{metric_msg}")

    return metric_dict[metric_key]


def compute_feature(engine, name="gallery"):
    if name == "gallery":
        dataloader = engine.gallery_dataloader
    elif name == "query":
        dataloader = engine.query_dataloader
    elif name == "gallery_query":
        dataloader = engine.gallery_query_dataloader
    else:
        raise ValueError(
            f"Only support gallery or query or gallery_query dataset, but got {name}"
        )

    all_feat = []
    all_label = []
    all_camera = []
    has_camera = False
    for idx, batch in enumerate(dataloader):  # load is very time-consuming
        if idx % engine.config["Global"]["print_batch_step"] == 0:
            logger.info(
                f"{name} feature calculation process: [{idx}/{len(dataloader)}]"
D
dongshuilong 已提交
132
            )
133

T
Tingquan Gao 已提交
134 135 136 137 138 139
        batch = [paddle.to_tensor(x) for x in batch]
        batch[1] = batch[1].reshape([-1, 1]).astype("int64")
        if len(batch) >= 3:
            has_camera = True
            batch[2] = batch[2].reshape([-1, 1]).astype("int64")
        if engine.amp and engine.amp_eval:
G
gaotingquan 已提交
140
            with paddle.amp.auto_cast(level=engine.amp_level):
T
Tingquan Gao 已提交
141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170
                out = engine.model(batch[0])
        else:
            out = engine.model(batch[0])
        if "Student" in out:
            out = out["Student"]

        # get features
        if engine.config["Global"].get("retrieval_feature_from",
                                       "features") == "features":
            # use output from neck as feature
            batch_feat = out["features"]
        else:
            # use output from backbone as feature
            batch_feat = out["backbone"]

        # do norm(optional)
        if engine.config["Global"].get("feature_normalize", True):
            batch_feat = paddle.nn.functional.normalize(batch_feat, p=2)

        # do binarize(optional)
        if engine.config["Global"].get("feature_binarize") == "round":
            batch_feat = paddle.round(batch_feat).astype("float32") * 2.0 - 1.0
        elif engine.config["Global"].get("feature_binarize") == "sign":
            batch_feat = paddle.sign(batch_feat).astype("float32")

        if paddle.distributed.get_world_size() > 1:
            all_feat.append(all_gather(batch_feat))
            all_label.append(all_gather(batch[1]))
            if has_camera:
                all_camera.append(all_gather(batch[2]))
D
dongshuilong 已提交
171
        else:
T
Tingquan Gao 已提交
172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
            all_feat.append(batch_feat)
            all_label.append(batch[1])
            if has_camera:
                all_camera.append(batch[2])

    if engine.use_dali:
        dataloader.reset()

    all_feat = paddle.concat(all_feat)
    all_label = paddle.concat(all_label)
    if has_camera:
        all_camera = paddle.concat(all_camera)
    else:
        all_camera = None
    # discard redundant padding sample(s) at the end
    total_samples = dataloader.size if engine.use_dali else len(
        dataloader.dataset)
    all_feat = all_feat[:total_samples]
    all_label = all_label[:total_samples]
    if has_camera:
        all_camera = all_camera[:total_samples]

    logger.info(f"Build {name} done, all feat shape: {all_feat.shape}")
    return all_feat, all_label, all_camera
H
HydrogenSulfate 已提交
196 197


198 199
def k_reciprocal_neighbor(rank: np.ndarray, p: int, k: int) -> np.ndarray:
    """Implementation of k-reciprocal nearest neighbors, i.e. R(p, k)
H
HydrogenSulfate 已提交
200 201

    Args:
202 203 204
        rank (np.ndarray): Rank mat with shape of [N, N].
        p (int): Probe index.
        k (int): Parameter k for k-reciprocal nearest neighbors algorithm.
H
HydrogenSulfate 已提交
205 206

    Returns:
207
        np.ndarray: K-reciprocal nearest neighbors of probe p with shape of [M, ].
H
HydrogenSulfate 已提交
208
    """
209 210 211 212 213 214 215
    # use k+1 for excluding probe index itself
    forward_k_neigh_index = rank[p, :k + 1]
    backward_k_neigh_index = rank[forward_k_neigh_index, :k + 1]
    candidate = np.where(backward_k_neigh_index == p)[0]
    return forward_k_neigh_index[candidate]


216 217
def compute_re_ranking_dist(query_feat: paddle.Tensor,
                            gallery_feat: paddle.Tensor,
218 219 220 221 222 223 224 225
                            feature_normed: bool=True,
                            k1: int=20,
                            k2: int=6,
                            lamb: float=0.5) -> paddle.Tensor:
    """
    Re-ranking Person Re-identification with k-reciprocal Encoding
    Reference: https://arxiv.org/abs/1701.08398
    Code refernence: https://github.com/michuanhaohao/reid-strong-baseline/blob/master/utils/re_ranking.py
H
HydrogenSulfate 已提交
226

227
    Args:
228 229
        query_feat (paddle.Tensor): Query features with shape of [num_query, feature_dim].
        gallery_feat (paddle.Tensor):  Gallery features with shape of [num_gallery, feature_dim].
230 231 232 233
        feature_normed (bool, optional):  Whether input features are normalized.
        k1 (int, optional): Parameter for K-reciprocal nearest neighbors. Defaults to 20.
        k2 (int, optional): Parameter for K-nearest neighbors. Defaults to 6.
        lamb (float, optional): Penalty factor. Defaults to 0.5.
H
HydrogenSulfate 已提交
234

235 236 237
    Returns:
        paddle.Tensor: (1 - lamb) x Dj + lamb x D, with shape of [num_query, num_gallery].
    """
238 239
    num_query = query_feat.shape[0]
    num_gallery = gallery_feat.shape[0]
240
    num_all = num_query + num_gallery
241
    feat = paddle.concat([query_feat, gallery_feat], 0)
242 243 244 245 246 247 248
    logger.info("Using GPU to compute original distance matrix")
    # use L2 distance
    if feature_normed:
        original_dist = 2 - 2 * paddle.matmul(feat, feat, transpose_y=True)
    else:
        original_dist = paddle.pow(feat, 2).sum(axis=1, keepdim=True).expand([num_all, num_all]) + \
            paddle.pow(feat, 2).sum(axis=1, keepdim=True).expand([num_all, num_all]).t()
249
        original_dist = original_dist.addmm(feat, feat.t(), -2.0, 1.0)
250 251
    original_dist = original_dist.numpy()
    del feat
H
HydrogenSulfate 已提交
252 253 254

    original_dist = np.transpose(original_dist / np.max(original_dist, axis=0))
    V = np.zeros_like(original_dist).astype(np.float16)
悟、's avatar
悟、 已提交
255
    initial_rank = np.argpartition(original_dist, range(1, k1 + 1))
256 257 258 259 260 261 262 263 264 265 266 267
    logger.info("Start re-ranking...")

    for p in range(num_all):
        # compute R(p,k1)
        p_k_reciprocal_ind = k_reciprocal_neighbor(initial_rank, p, k1)

        # compute R*(p,k1)=R(p,k1)∪R(q,k1/2)
        # s.t. |R(p,k1)∩R(q,k1/2)|>=2/3|R(q,k1/2)|, ∀q∈R(p,k1)
        p_k_reciprocal_exp_ind = p_k_reciprocal_ind
        for _, q in enumerate(p_k_reciprocal_ind):
            q_k_reciprocal_ind = k_reciprocal_neighbor(initial_rank, q,
                                                       int(np.around(k1 / 2)))
268 269 270 271 272
            if len(
                    np.intersect1d(
                        p_k_reciprocal_ind,
                        q_k_reciprocal_ind,
                        assume_unique=True)) > 2 / 3 * len(q_k_reciprocal_ind):
273 274 275 276 277 278 279 280 281 282
                p_k_reciprocal_exp_ind = np.append(p_k_reciprocal_exp_ind,
                                                   q_k_reciprocal_ind)
        p_k_reciprocal_exp_ind = np.unique(p_k_reciprocal_exp_ind)
        # reweight distance using gaussian kernel
        weight = np.exp(-original_dist[p, p_k_reciprocal_exp_ind])
        V[p, p_k_reciprocal_exp_ind] = weight / np.sum(weight)

    # local query expansion
    original_dist = original_dist[:num_query, ]
    if k2 > 1:
283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301
        try:
            # use sparse tensor to speed up query expansion
            indices = (np.repeat(np.arange(num_all), k2),
                       initial_rank[:, :k2].reshape([-1, ]))
            values = np.array(
                [1 / k2 for _ in range(num_all * k2)], dtype="float16")
            V = scipy.sparse.coo_matrix(
                (values, indices), V.shape,
                dtype="float16") @V.astype("float16")
        except Exception as e:
            logger.info(
                f"Failed to do local query expansion with sparse tensor for reason: \n{e}\n"
                f"now use for-loop instead")
            # use vanilla for-loop
            V_qe = np.zeros_like(V, dtype=np.float16)
            for i in range(num_all):
                V_qe[i, :] = np.mean(V[initial_rank[i, :k2], :], axis=0)
            V = V_qe
            del V_qe
H
HydrogenSulfate 已提交
302
    del initial_rank
303 304

    # cache k-reciprocal sets which contains gj
H
HydrogenSulfate 已提交
305
    invIndex = []
306 307
    for gj in range(num_all):
        invIndex.append(np.nonzero(V[:, gj])[0])
H
HydrogenSulfate 已提交
308

309
    # compute jaccard distance
H
HydrogenSulfate 已提交
310
    jaccard_dist = np.zeros_like(original_dist, dtype=np.float16)
311 312 313 314 315 316 317 318 319
    for p in range(num_query):
        sum_min = np.zeros(shape=[1, num_all], dtype=np.float16)
        gj_ind = np.nonzero(V[p, :])[0]
        gj_ind_inv = [invIndex[gj] for gj in gj_ind]
        for j, gj in enumerate(gj_ind):
            gi = gj_ind_inv[j]
            sum_min[0, gi] += np.minimum(V[p, gj], V[gi, gj])
        jaccard_dist[p] = 1 - sum_min / (2 - sum_min)

320 321
    # fuse jaccard distance with original distance
    final_dist = (1 - lamb) * jaccard_dist + lamb * original_dist
H
HydrogenSulfate 已提交
322 323 324
    del original_dist
    del V
    del jaccard_dist
325
    final_dist = final_dist[:num_query, num_query:]
H
HydrogenSulfate 已提交
326 327
    final_dist = paddle.to_tensor(final_dist)
    return final_dist