diff --git a/deploy/pphuman/datacollector.py b/deploy/pphuman/datacollector.py index cd459aad0680418bcd087d00662b0c310151ffc3..f1e3a21360fb871e26e53129cd8833cd123f1422 100644 --- a/deploy/pphuman/datacollector.py +++ b/deploy/pphuman/datacollector.py @@ -35,6 +35,9 @@ class Result(object): return self.res_dict[name] return None + def clear(self, name): + self.res_dict[name].clear() + class DataCollector(object): """ @@ -80,7 +83,6 @@ class DataCollector(object): ids = int(mot_item[0]) if ids not in self.collector: self.collector[ids] = copy.deepcopy(self.mots) - self.collector[ids]["frames"].append(frameid) self.collector[ids]["rects"].append([mot_item[2:]]) if attr_res: diff --git a/deploy/pphuman/mtmct.py b/deploy/pphuman/mtmct.py index 30f84724809753b577503b3bb59d50a21731ddb1..5e0abbd9d0c7be69120cac04b3c5794d9bb9c436 100644 --- a/deploy/pphuman/mtmct.py +++ b/deploy/pphuman/mtmct.py @@ -297,10 +297,9 @@ def distill_idfeat(mot_res): feature_new = feature_list #if available frames number is more than 200, take one frame data per 20 frames - if len(qualities_new) > 200: - skipf = 20 - else: - skipf = max(10, len(qualities_new) // 10) + skipf = 1 + if len(qualities_new) > 20: + skipf = 2 quality_skip = np.array(qualities_new[::skipf]) feature_skip = np.array(feature_new[::skipf]) diff --git a/deploy/pphuman/pipeline.py b/deploy/pphuman/pipeline.py index 4d6fa014ae783b61c4464b2e292c5d745a5297d1..9e23e0c0f8e34e963a1cf2597318bff527f991c3 100644 --- a/deploy/pphuman/pipeline.py +++ b/deploy/pphuman/pipeline.py @@ -587,7 +587,7 @@ class PipePredictor(object): if self.cfg['visual']: self.action_visual_helper.update(action_res) - if self.with_mtmct: + if self.with_mtmct and frame_id % 10 == 0: crop_input, img_qualities, rects = self.reid_predictor.crop_image_with_mot( frame, mot_res) if frame_id > self.warmup_frame: @@ -603,6 +603,8 @@ class PipePredictor(object): "rects": rects } self.pipeline_res.update(reid_res_dict, 'reid') + else: + self.pipeline_res.clear('reid') self.collector.append(frame_id, self.pipeline_res)