diff --git a/deploy/pptracking/python/mot/matching/jde_matching.py b/deploy/pptracking/python/mot/matching/jde_matching.py index 3b1cf02edd75cb960e433926274b761d49136033..ff74e0469e20ea720ef22fbf642d3cbb2ef8e59e 100644 --- a/deploy/pptracking/python/mot/matching/jde_matching.py +++ b/deploy/pptracking/python/mot/matching/jde_matching.py @@ -82,8 +82,8 @@ def linear_assignment(cost_matrix, thresh): def bbox_ious(atlbrs, btlbrs): - boxes = np.ascontiguousarray(atlbrs, dtype=np.float) - query_boxes = np.ascontiguousarray(btlbrs, dtype=np.float) + boxes = np.ascontiguousarray(atlbrs, dtype=np.float32) + query_boxes = np.ascontiguousarray(btlbrs, dtype=np.float32) N = boxes.shape[0] K = query_boxes.shape[0] ious = np.zeros((N, K), dtype=boxes.dtype) @@ -127,13 +127,13 @@ def embedding_distance(tracks, detections, metric='euclidean'): """ Compute cost based on features between two list[STrack]. """ - cost_matrix = np.zeros((len(tracks), len(detections)), dtype=np.float) + cost_matrix = np.zeros((len(tracks), len(detections)), dtype=np.float32) if cost_matrix.size == 0: return cost_matrix det_features = np.asarray( - [track.curr_feat for track in detections], dtype=np.float) + [track.curr_feat for track in detections], dtype=np.float32) track_features = np.asarray( - [track.smooth_feat for track in tracks], dtype=np.float) + [track.smooth_feat for track in tracks], dtype=np.float32) cost_matrix = np.maximum(0.0, cdist(track_features, det_features, metric)) # Nomalized features return cost_matrix diff --git a/deploy/pptracking/python/mot/tracker/base_jde_tracker.py b/deploy/pptracking/python/mot/tracker/base_jde_tracker.py index fc086526a984f40a7d5dcfa128a7d51ae068e3ab..838a61ff598e5833e17dd1f02135f50488f584f5 100644 --- a/deploy/pptracking/python/mot/tracker/base_jde_tracker.py +++ b/deploy/pptracking/python/mot/tracker/base_jde_tracker.py @@ -95,14 +95,9 @@ class BaseTrack(object): class STrack(BaseTrack): - def __init__(self, - tlwh, - score, - cls_id, - buff_size=30, - temp_feat=None): + def __init__(self, tlwh, score, cls_id, buff_size=30, temp_feat=None): # wait activate - self._tlwh = np.asarray(tlwh, dtype=np.float) + self._tlwh = np.asarray(tlwh, dtype=np.float32) self.score = score self.cls_id = cls_id self.track_len = 0 diff --git a/deploy/python/utils.py b/deploy/python/utils.py index ac8a3f70259d30534df5e890d3a61f083893b70d..b7f514ebc999c361944fb0f16f73043fbc4e6460 100644 --- a/deploy/python/utils.py +++ b/deploy/python/utils.py @@ -357,7 +357,7 @@ def nms(dets, match_threshold=0.6, match_metric='iou'): order = scores.argsort()[::-1] ndets = dets.shape[0] - suppressed = np.zeros((ndets), dtype=np.int) + suppressed = np.zeros((ndets), dtype=np.int32) for _i in range(ndets): i = order[_i] diff --git a/deploy/third_engine/demo_avh/convert_image.py b/deploy/third_engine/demo_avh/convert_image.py index c2f43f0603c1ff31d8810a2af30280c3ea7c5a14..b440f0853a30ebe051dad1c66bb33241392a6c16 100755 --- a/deploy/third_engine/demo_avh/convert_image.py +++ b/deploy/third_engine/demo_avh/convert_image.py @@ -78,13 +78,13 @@ def create_headers(image_name): # Create input header file create_header_file("inputs", "input", img_data, "./include") # Create output header file - output_data = np.zeros([8500], np.float) + output_data = np.zeros([8500], np.float32) create_header_file( "outputs", "output0", output_data, "./include", ) - output_data = np.zeros([170000], np.float) + output_data = np.zeros([170000], np.float32) create_header_file( "outputs", "output1", diff --git a/ppdet/data/crop_utils/annotation_cropper.py b/ppdet/data/crop_utils/annotation_cropper.py index 93a9a1f75fe46a15336553ea2689c78681780877..e288fabed4bf372186d37637681197bfbd507b87 100644 --- a/ppdet/data/crop_utils/annotation_cropper.py +++ b/ppdet/data/crop_utils/annotation_cropper.py @@ -27,14 +27,15 @@ from .chip_box_utils import intersection_over_box class AnnoCropper(object): - def __init__(self, image_target_sizes: List[int], + def __init__(self, + image_target_sizes: List[int], valid_box_ratio_ranges: List[List[float]], - chip_target_size: int, chip_target_stride: int, - use_neg_chip: bool = False, - max_neg_num_per_im: int = 8, - max_per_img: int = -1, - nms_thresh: int = 0.5 - ): + chip_target_size: int, + chip_target_stride: int, + use_neg_chip: bool=False, + max_neg_num_per_im: int=8, + max_per_img: int=-1, + nms_thresh: int=0.5): """ Generate chips by chip_target_size and chip_target_stride. These two parameters just like kernel_size and stride in cnn. @@ -117,7 +118,8 @@ class AnnoCropper(object): self.chip_records = [] self._global_chip_id = 1 for r in records: - self._cur_im_pos_chips = [] # element: (chip, boxes_idx), chip is [x1, y1, x2, y2], boxes_ids is List[int] + self._cur_im_pos_chips = [ + ] # element: (chip, boxes_idx), chip is [x1, y1, x2, y2], boxes_ids is List[int] self._cur_im_neg_chips = [] # element: (chip, neg_box_num) for scale_i in range(self.scale_num): self._get_current_scale_parameters(scale_i, r) @@ -126,12 +128,16 @@ class AnnoCropper(object): chips = self._create_chips(r['h'], r['w'], self._cur_scale) # # dict: chipid->[box_id, ...] - pos_chip2boxes_idx = self._get_valid_boxes_and_pos_chips(r['gt_bbox'], chips) + pos_chip2boxes_idx = self._get_valid_boxes_and_pos_chips( + r['gt_bbox'], chips) # dict: chipid->neg_box_num - neg_chip2box_num = self._get_neg_boxes_and_chips(chips, list(pos_chip2boxes_idx.keys()), r.get('proposals', None)) + neg_chip2box_num = self._get_neg_boxes_and_chips( + chips, + list(pos_chip2boxes_idx.keys()), r.get('proposals', None)) - self._add_to_cur_im_chips(chips, pos_chip2boxes_idx, neg_chip2box_num) + self._add_to_cur_im_chips(chips, pos_chip2boxes_idx, + neg_chip2box_num) cur_image_records = self._trans_all_chips2annotations(r) self.chip_records.extend(cur_image_records) @@ -147,7 +153,7 @@ class AnnoCropper(object): for neg_chipid, neg_box_num in neg_chip2box_num.items(): chip = np.array(chips[neg_chipid]) - self._cur_im_neg_chips.append((chip, neg_box_num)) + self._cur_im_neg_chips.append((chip, neg_box_num)) def _trans_all_chips2annotations(self, r): gt_bbox = r['gt_bbox'] @@ -156,20 +162,24 @@ class AnnoCropper(object): gt_class = r['gt_class'] # gt_poly = r['gt_poly'] # [None]xN # remaining keys: im_id, h, w - chip_records = self._trans_pos_chips2annotations(im_file, gt_bbox, is_crowd, gt_class) + chip_records = self._trans_pos_chips2annotations(im_file, gt_bbox, + is_crowd, gt_class) if not self.use_neg_chip: return chip_records sampled_neg_chips = self._sample_neg_chips() - neg_chip_records = self._trans_neg_chips2annotations(im_file, sampled_neg_chips) + neg_chip_records = self._trans_neg_chips2annotations(im_file, + sampled_neg_chips) chip_records.extend(neg_chip_records) return chip_records - def _trans_pos_chips2annotations(self, im_file, gt_bbox, is_crowd, gt_class): + def _trans_pos_chips2annotations(self, im_file, gt_bbox, is_crowd, + gt_class): chip_records = [] for chip, boxes_idx in self._cur_im_pos_chips: - chip_bbox, final_boxes_idx = transform_chip_box(gt_bbox, boxes_idx, chip) + chip_bbox, final_boxes_idx = transform_chip_box(gt_bbox, boxes_idx, + chip) x1, y1, x2, y2 = chip chip_h = y2 - y1 chip_w = x2 - x1 @@ -197,12 +207,15 @@ class AnnoCropper(object): return self._cur_im_neg_chips candidate_num = int(sample_num * 1.5) - candidate_neg_chips = sorted(self._cur_im_neg_chips, key=lambda x: -x[1])[:candidate_num] + candidate_neg_chips = sorted( + self._cur_im_neg_chips, key=lambda x: -x[1])[:candidate_num] random.shuffle(candidate_neg_chips) sampled_neg_chips = candidate_neg_chips[:sample_num] return sampled_neg_chips - def _trans_neg_chips2annotations(self, im_file: str, sampled_neg_chips: List[Tuple]): + def _trans_neg_chips2annotations(self, + im_file: str, + sampled_neg_chips: List[Tuple]): chip_records = [] for chip, neg_box_num in sampled_neg_chips: x1, y1, x2, y2 = chip @@ -213,9 +226,12 @@ class AnnoCropper(object): 'im_id': np.array([self._global_chip_id]), 'h': chip_h, 'w': chip_w, - 'gt_bbox': np.zeros((0, 4), dtype=np.float32), - 'is_crowd': np.zeros((0, 1), dtype=np.int32), - 'gt_class': np.zeros((0, 1), dtype=np.int32), + 'gt_bbox': np.zeros( + (0, 4), dtype=np.float32), + 'is_crowd': np.zeros( + (0, 1), dtype=np.int32), + 'gt_class': np.zeros( + (0, 1), dtype=np.int32), # 'gt_poly': [], 'chip': chip } @@ -247,7 +263,8 @@ class AnnoCropper(object): assert chip_size >= stride chip_overlap = chip_size - stride - if (width - chip_overlap) % stride > min_chip_location_diff: # 不能被stride整除的部分比较大,则保留 + if (width - chip_overlap + ) % stride > min_chip_location_diff: # 不能被stride整除的部分比较大,则保留 w_steps = max(1, int(math.ceil((width - chip_overlap) / stride))) else: # 不能被stride整除的部分比较小,则丢弃 w_steps = max(1, int(math.floor((width - chip_overlap) / stride))) @@ -267,9 +284,10 @@ class AnnoCropper(object): # check chip size for item in chips: - if item[2] - item[0] > chip_size * 1.1 or item[3] - item[1] > chip_size * 1.1: + if item[2] - item[0] > chip_size * 1.1 or item[3] - item[ + 1] > chip_size * 1.1: raise ValueError(item) - chips = np.array(chips, dtype=np.float) + chips = np.array(chips, dtype=np.float32) raw_size_chips = chips / scale return raw_size_chips @@ -279,12 +297,15 @@ class AnnoCropper(object): im_size = self._cur_im_size scale = self._cur_scale # Nx4 N - valid_boxes, valid_boxes_idx = self._validate_boxes(valid_ratio_range, im_size, gt_bbox, scale) + valid_boxes, valid_boxes_idx = self._validate_boxes( + valid_ratio_range, im_size, gt_bbox, scale) # dict: chipid->[box_id, ...] - pos_chip2boxes_idx = self._find_pos_chips(chips, valid_boxes, valid_boxes_idx) + pos_chip2boxes_idx = self._find_pos_chips(chips, valid_boxes, + valid_boxes_idx) return pos_chip2boxes_idx - def _validate_boxes(self, valid_ratio_range: List[float], + def _validate_boxes(self, + valid_ratio_range: List[float], im_size: int, gt_boxes: 'np.array of Nx4', scale: float): @@ -299,20 +320,26 @@ class AnnoCropper(object): target_mins = mins * scale low = valid_ratio_range[0] if valid_ratio_range[0] > 0 else 0 - high = valid_ratio_range[1] if valid_ratio_range[1] > 0 else np.finfo(np.float).max + high = valid_ratio_range[1] if valid_ratio_range[1] > 0 else np.finfo( + np.float32).max - valid_boxes_idx = np.nonzero((low <= box_ratio) & (box_ratio < high) & (target_mins >= 2))[0] + valid_boxes_idx = np.nonzero((low <= box_ratio) & (box_ratio < high) & ( + target_mins >= 2))[0] valid_boxes = gt_boxes[valid_boxes_idx] return valid_boxes, valid_boxes_idx - def _find_pos_chips(self, chips: 'Cx4', valid_boxes: 'Bx4', valid_boxes_idx: 'B'): + def _find_pos_chips(self, + chips: 'Cx4', + valid_boxes: 'Bx4', + valid_boxes_idx: 'B'): """ :return: pos_chip2boxes_idx, dict: chipid->[box_id, ...] """ iob = intersection_over_box(chips, valid_boxes) # overlap, CxB iob_threshold_to_find_chips = 1. - pos_chip_ids, _ = self._find_chips_to_cover_overlaped_boxes(iob, iob_threshold_to_find_chips) + pos_chip_ids, _ = self._find_chips_to_cover_overlaped_boxes( + iob, iob_threshold_to_find_chips) pos_chip_ids = set(pos_chip_ids) iob_threshold_to_assign_box = 0.5 @@ -323,7 +350,8 @@ class AnnoCropper(object): def _find_chips_to_cover_overlaped_boxes(self, iob, overlap_threshold): return find_chips_to_cover_overlaped_boxes(iob, overlap_threshold) - def _assign_boxes_to_pos_chips(self, iob, overlap_threshold, pos_chip_ids, valid_boxes_idx): + def _assign_boxes_to_pos_chips(self, iob, overlap_threshold, pos_chip_ids, + valid_boxes_idx): chip_ids, box_ids = np.nonzero(iob >= overlap_threshold) pos_chip2boxes_idx = defaultdict(list) for chip_id, box_id in zip(chip_ids, box_ids): @@ -333,7 +361,10 @@ class AnnoCropper(object): pos_chip2boxes_idx[chip_id].append(raw_gt_box_idx) return pos_chip2boxes_idx - def _get_neg_boxes_and_chips(self, chips: 'Cx4', pos_chip_ids: 'D', proposals: 'Px4'): + def _get_neg_boxes_and_chips(self, + chips: 'Cx4', + pos_chip_ids: 'D', + proposals: 'Px4'): """ :param chips: :param pos_chip_ids: @@ -351,12 +382,16 @@ class AnnoCropper(object): im_size = self._cur_im_size scale = self._cur_scale - valid_props, _ = self._validate_boxes(valid_ratio_range, im_size, proposals, scale) + valid_props, _ = self._validate_boxes(valid_ratio_range, im_size, + proposals, scale) neg_boxes = self._find_neg_boxes(chips, pos_chip_ids, valid_props) neg_chip2box_num = self._find_neg_chips(chips, pos_chip_ids, neg_boxes) return neg_chip2box_num - def _find_neg_boxes(self, chips: 'Cx4', pos_chip_ids: 'D', valid_props: 'Px4'): + def _find_neg_boxes(self, + chips: 'Cx4', + pos_chip_ids: 'D', + valid_props: 'Px4'): """ :return: neg_boxes: Nx4 """ @@ -370,7 +405,8 @@ class AnnoCropper(object): neg_boxes = valid_props[non_overlap_props_idx] return neg_boxes - def _find_neg_chips(self, chips: 'Cx4', pos_chip_ids: 'D', neg_boxes: 'Nx4'): + def _find_neg_chips(self, chips: 'Cx4', pos_chip_ids: 'D', + neg_boxes: 'Nx4'): """ :return: neg_chip2box_num, dict: chipid->neg_box_num """ @@ -469,31 +505,37 @@ class AnnoCropper(object): for result in results: bbox_locs = result['bbox'] bbox_nums = result['bbox_num'] - if len(bbox_locs) == 1 and bbox_locs[0][0] == -1: # current batch has no detections + if len(bbox_locs) == 1 and bbox_locs[0][ + 0] == -1: # current batch has no detections # bbox_locs = array([[-1.]], dtype=float32); bbox_nums = [[1]] # MultiClassNMS output: If there is no detected boxes for all images, lod will be set to {1} and Out only contains one value which is -1. continue - im_ids = result['im_id'] # replace with range(len(bbox_nums)) + im_ids = result['im_id'] # replace with range(len(bbox_nums)) last_bbox_num = 0 for idx, im_id in enumerate(im_ids): cur_bbox_len = bbox_nums[idx] - bboxes = bbox_locs[last_bbox_num: last_bbox_num + cur_bbox_len] + bboxes = bbox_locs[last_bbox_num:last_bbox_num + cur_bbox_len] last_bbox_num += cur_bbox_len # box: [num_id, score, xmin, ymin, xmax, ymax] if len(bboxes) == 0: # current image has no detections continue - chip_rec = records[int(im_id) - 1] # im_id starts from 1, type is np.int64 + chip_rec = records[int(im_id) - + 1] # im_id starts from 1, type is np.int64 image_size = max(chip_rec["ori_im_h"], chip_rec["ori_im_w"]) - bboxes = transform_chip_boxes2image_boxes(bboxes, chip_rec["chip"], chip_rec["ori_im_h"], chip_rec["ori_im_w"]) + bboxes = transform_chip_boxes2image_boxes( + bboxes, chip_rec["chip"], chip_rec["ori_im_h"], + chip_rec["ori_im_w"]) scale_i = chip_rec["scale_i"] - cur_scale = self._get_current_scale(self.target_sizes[scale_i], image_size) - _, valid_boxes_idx = self._validate_boxes(self.valid_box_ratio_ranges[scale_i], image_size, - bboxes[:, 2:], cur_scale) + cur_scale = self._get_current_scale(self.target_sizes[scale_i], + image_size) + _, valid_boxes_idx = self._validate_boxes( + self.valid_box_ratio_ranges[scale_i], image_size, + bboxes[:, 2:], cur_scale) ori_img_id = self._global_chip_id2img_id[int(im_id)] img_id2bbox[ori_img_id].append(bboxes[valid_boxes_idx]) @@ -507,7 +549,8 @@ class AnnoCropper(object): nms_thresh = self.nms_thresh for img_id in img_id2bbox: - box = img_id2bbox[img_id] # list of np.array of shape [N, 6], 6 is [label, score, x1, y1, x2, y2] + box = img_id2bbox[ + img_id] # list of np.array of shape [N, 6], 6 is [label, score, x1, y1, x2, y2] box = np.concatenate(box, axis=0) nms_dets = nms(box, nms_thresh) if max_per_img > 0: @@ -525,18 +568,13 @@ class AnnoCropper(object): results = [] for img_id in im_ids: # output by original im_id order if len(img_id2bbox[img_id]) == 0: - bbox = np.array([[-1., 0., 0., 0., 0., 0.]]) # edge case: no detections + bbox = np.array( + [[-1., 0., 0., 0., 0., 0.]]) # edge case: no detections bbox_num = np.array([0]) else: # np.array of shape [N, 6], 6 is [label, score, x1, y1, x2, y2] bbox = img_id2bbox[img_id] bbox_num = np.array([len(bbox)]) - res = dict( - im_id=np.array([[img_id]]), - bbox=bbox, - bbox_num=bbox_num - ) + res = dict(im_id=np.array([[img_id]]), bbox=bbox, bbox_num=bbox_num) results.append(res) return results - - diff --git a/ppdet/data/crop_utils/chip_box_utils.py b/ppdet/data/crop_utils/chip_box_utils.py index d6e81a1654a269e2a28837bc884dc75c21d98ee4..cfa1e39e9058a3f13ef4f972b3b98acd17bc5080 100644 --- a/ppdet/data/crop_utils/chip_box_utils.py +++ b/ppdet/data/crop_utils/chip_box_utils.py @@ -33,8 +33,10 @@ def intersection_over_box(chips, boxes): box_area = bbox_area(boxes) # B - inter_x2y2 = np.minimum(np.expand_dims(chips, 1)[:, :, 2:], boxes[:, 2:]) # CxBX2 - inter_x1y1 = np.maximum(np.expand_dims(chips, 1)[:, :, :2], boxes[:, :2]) # CxBx2 + inter_x2y2 = np.minimum(np.expand_dims(chips, 1)[:, :, 2:], + boxes[:, 2:]) # CxBX2 + inter_x1y1 = np.maximum(np.expand_dims(chips, 1)[:, :, :2], + boxes[:, :2]) # CxBx2 inter_wh = inter_x2y2 - inter_x1y1 inter_wh = np.clip(inter_wh, a_min=0, a_max=None) inter_area = inter_wh[:, :, 0] * inter_wh[:, :, 1] # CxB @@ -81,8 +83,9 @@ def transform_chip_box(gt_bbox: 'Gx4', boxes_idx: 'B', chip: '4'): def find_chips_to_cover_overlaped_boxes(iob, overlap_threshold): chip_ids, box_ids = np.nonzero(iob >= overlap_threshold) chip_id2overlap_box_num = np.bincount(chip_ids) # 1d array - chip_id2overlap_box_num = np.pad(chip_id2overlap_box_num, (0, len(iob) - len(chip_id2overlap_box_num)), - constant_values=0) + chip_id2overlap_box_num = np.pad( + chip_id2overlap_box_num, (0, len(iob) - len(chip_id2overlap_box_num)), + constant_values=0) chosen_chip_ids = [] while len(box_ids) > 0: @@ -92,7 +95,8 @@ def find_chips_to_cover_overlaped_boxes(iob, overlap_threshold): chosen_chip_ids.append(max_count_chip_id) box_ids_in_cur_chip = box_ids[chip_ids == max_count_chip_id] - ids_not_in_cur_boxes_mask = np.logical_not(np.isin(box_ids, box_ids_in_cur_chip)) + ids_not_in_cur_boxes_mask = np.logical_not( + np.isin(box_ids, box_ids_in_cur_chip)) chip_ids = chip_ids[ids_not_in_cur_boxes_mask] box_ids = box_ids[ids_not_in_cur_boxes_mask] return chosen_chip_ids, chip_id2overlap_box_num @@ -124,7 +128,7 @@ def nms(dets, thresh): order = scores.argsort()[::-1] ndets = dets.shape[0] - suppressed = np.zeros((ndets), dtype=np.int) + suppressed = np.zeros((ndets), dtype=np.int32) # nominal indices # _i, _j diff --git a/ppdet/data/source/keypoint_coco.py b/ppdet/data/source/keypoint_coco.py index 0d809270d2577338acf5588ba860423438648b44..45eb9a91d7381d649ead2bb70954eae7acac76d0 100644 --- a/ppdet/data/source/keypoint_coco.py +++ b/ppdet/data/source/keypoint_coco.py @@ -487,9 +487,9 @@ class KeypointTopDownCocoDataset(KeypointTopDownBaseDataset): continue joints = np.zeros( - (self.ann_info['num_joints'], 3), dtype=np.float) + (self.ann_info['num_joints'], 3), dtype=np.float32) joints_vis = np.zeros( - (self.ann_info['num_joints'], 3), dtype=np.float) + (self.ann_info['num_joints'], 3), dtype=np.float32) for ipt in range(self.ann_info['num_joints']): joints[ipt, 0] = obj['keypoints'][ipt * 3 + 0] joints[ipt, 1] = obj['keypoints'][ipt * 3 + 1] @@ -560,9 +560,10 @@ class KeypointTopDownCocoDataset(KeypointTopDownBaseDataset): continue center, scale = self._box2cs(box) - joints = np.zeros((self.ann_info['num_joints'], 3), dtype=np.float) + joints = np.zeros( + (self.ann_info['num_joints'], 3), dtype=np.float32) joints_vis = np.ones( - (self.ann_info['num_joints'], 3), dtype=np.float) + (self.ann_info['num_joints'], 3), dtype=np.float32) kpt_db.append({ 'image_file': img_name, 'im_id': im_id, @@ -633,8 +634,8 @@ class KeypointTopDownMPIIDataset(KeypointTopDownBaseDataset): im_id = a['image_id'] if 'image_id' in a else int( os.path.splitext(image_name)[0]) - c = np.array(a['center'], dtype=np.float) - s = np.array([a['scale'], a['scale']], dtype=np.float) + c = np.array(a['center'], dtype=np.float32) + s = np.array([a['scale'], a['scale']], dtype=np.float32) # Adjust center/scale slightly to avoid cropping limbs if c[0] != -1: @@ -642,9 +643,10 @@ class KeypointTopDownMPIIDataset(KeypointTopDownBaseDataset): s = s * 1.25 c = c - 1 - joints = np.zeros((self.ann_info['num_joints'], 3), dtype=np.float) + joints = np.zeros( + (self.ann_info['num_joints'], 3), dtype=np.float32) joints_vis = np.zeros( - (self.ann_info['num_joints'], 3), dtype=np.float) + (self.ann_info['num_joints'], 3), dtype=np.float32) if 'joints' in a: joints_ = np.array(a['joints']) joints_[:, 0:2] = joints_[:, 0:2] - 1 diff --git a/ppdet/data/source/pose3d_cmb.py b/ppdet/data/source/pose3d_cmb.py index e5129706da1354f02b511ebbcbd0ee2d12fbb249..ab7123aecaee9e068a0d13ea1f334c571aa19b5f 100644 --- a/ppdet/data/source/pose3d_cmb.py +++ b/ppdet/data/source/pose3d_cmb.py @@ -68,7 +68,7 @@ class Pose3DDataset(DetDataset): def get_mask(self, mvm_percent=0.3): num_joints = self.num_joints - mjm_mask = np.ones((num_joints, 1)).astype(np.float) + mjm_mask = np.ones((num_joints, 1)).astype(np.float32) if self.test_mode == False: pb = np.random.random_sample() masked_num = int( @@ -78,7 +78,7 @@ class Pose3DDataset(DetDataset): np.arange(num_joints), replace=False, size=masked_num) mjm_mask[indices, :] = 0.0 - mvm_mask = np.ones((10, 1)).astype(np.float) + mvm_mask = np.ones((10, 1)).astype(np.float32) if self.test_mode == False: num_vertices = 10 pb = np.random.random_sample() diff --git a/ppdet/modeling/mot/matching/jde_matching.py b/ppdet/modeling/mot/matching/jde_matching.py index 3b1cf02edd75cb960e433926274b761d49136033..ff74e0469e20ea720ef22fbf642d3cbb2ef8e59e 100644 --- a/ppdet/modeling/mot/matching/jde_matching.py +++ b/ppdet/modeling/mot/matching/jde_matching.py @@ -82,8 +82,8 @@ def linear_assignment(cost_matrix, thresh): def bbox_ious(atlbrs, btlbrs): - boxes = np.ascontiguousarray(atlbrs, dtype=np.float) - query_boxes = np.ascontiguousarray(btlbrs, dtype=np.float) + boxes = np.ascontiguousarray(atlbrs, dtype=np.float32) + query_boxes = np.ascontiguousarray(btlbrs, dtype=np.float32) N = boxes.shape[0] K = query_boxes.shape[0] ious = np.zeros((N, K), dtype=boxes.dtype) @@ -127,13 +127,13 @@ def embedding_distance(tracks, detections, metric='euclidean'): """ Compute cost based on features between two list[STrack]. """ - cost_matrix = np.zeros((len(tracks), len(detections)), dtype=np.float) + cost_matrix = np.zeros((len(tracks), len(detections)), dtype=np.float32) if cost_matrix.size == 0: return cost_matrix det_features = np.asarray( - [track.curr_feat for track in detections], dtype=np.float) + [track.curr_feat for track in detections], dtype=np.float32) track_features = np.asarray( - [track.smooth_feat for track in tracks], dtype=np.float) + [track.smooth_feat for track in tracks], dtype=np.float32) cost_matrix = np.maximum(0.0, cdist(track_features, det_features, metric)) # Nomalized features return cost_matrix diff --git a/ppdet/modeling/mot/tracker/base_jde_tracker.py b/ppdet/modeling/mot/tracker/base_jde_tracker.py index 285a260cdfdc8e4e2f716e1caf7b22ed2d75e8e7..2bed2acedb83c7e4bdac95598de1904f8c980cb9 100644 --- a/ppdet/modeling/mot/tracker/base_jde_tracker.py +++ b/ppdet/modeling/mot/tracker/base_jde_tracker.py @@ -102,14 +102,9 @@ class BaseTrack(object): @register @serializable class STrack(BaseTrack): - def __init__(self, - tlwh, - score, - cls_id, - buff_size=30, - temp_feat=None): + def __init__(self, tlwh, score, cls_id, buff_size=30, temp_feat=None): # wait activate - self._tlwh = np.asarray(tlwh, dtype=np.float) + self._tlwh = np.asarray(tlwh, dtype=np.float32) self.score = score self.cls_id = cls_id self.track_len = 0 diff --git a/ppdet/modeling/post_process.py b/ppdet/modeling/post_process.py index 4f7d5f278557e525bec507f222cf1811cbe5970b..002a80d953835952b7a1223da1d1780c6e6798a2 100644 --- a/ppdet/modeling/post_process.py +++ b/ppdet/modeling/post_process.py @@ -635,7 +635,7 @@ def nms(dets, match_threshold=0.6, match_metric='iou'): order = scores.argsort()[::-1] ndets = dets.shape[0] - suppressed = np.zeros((ndets), dtype=np.int) + suppressed = np.zeros((ndets), dtype=np.int32) for _i in range(ndets): i = order[_i] diff --git a/ppdet/modeling/proposal_generator/target.py b/ppdet/modeling/proposal_generator/target.py index d4584e8a00ceb4de78bfa266b939bef51a8d6b0e..f95f906a27fa93eb6543f522a579f0242c919e52 100644 --- a/ppdet/modeling/proposal_generator/target.py +++ b/ppdet/modeling/proposal_generator/target.py @@ -295,7 +295,7 @@ def polygons_to_mask(polygons, height, width): assert len(polygons) > 0, "COCOAPI does not support empty polygons" rles = mask_util.frPyObjects(polygons, height, width) rle = mask_util.merge(rles) - return mask_util.decode(rle).astype(np.bool) + return mask_util.decode(rle).astype(np.bool_) def rasterize_polygons_within_box(poly, box, resolution): @@ -448,7 +448,7 @@ def libra_sample_via_interval(max_overlaps, full_set, num_expected, floor_thr, tmp_sampled_set = np.random.choice( tmp_inds, size=per_num_expected, replace=False) else: - tmp_sampled_set = np.array(tmp_inds, dtype=np.int) + tmp_sampled_set = np.array(tmp_inds, dtype=np.int32) sampled_inds.append(tmp_sampled_set) sampled_inds = np.concatenate(sampled_inds) @@ -509,13 +509,13 @@ def libra_sample_neg(max_overlaps, size=num_expected_iou_sampling, replace=False) else: - iou_sampled_inds = np.array(iou_sampling_neg_inds, dtype=np.int) + iou_sampled_inds = np.array(iou_sampling_neg_inds, dtype=np.int32) num_expected_floor = num_expected - len(iou_sampled_inds) if len(floor_neg_inds) > num_expected_floor: sampled_floor_inds = np.random.choice( floor_neg_inds, size=num_expected_floor, replace=False) else: - sampled_floor_inds = np.array(floor_neg_inds, dtype=np.int) + sampled_floor_inds = np.array(floor_neg_inds, dtype=np.int32) sampled_inds = np.concatenate((sampled_floor_inds, iou_sampled_inds)) if len(sampled_inds) < num_expected: num_extra = num_expected - len(sampled_inds)