提交 59c1dde1 编写于 作者: E Evgeny Izutov

Fixes

上级 718f6b30
...@@ -194,7 +194,6 @@ def load_pascal_annotation(index, pascal_root): ...@@ -194,7 +194,6 @@ def load_pascal_annotation(index, pascal_root):
class_to_ind = dict(zip(classes, xrange(21))) class_to_ind = dict(zip(classes, xrange(21)))
filename = osp.join(pascal_root, 'Annotations', index + '.xml') filename = osp.join(pascal_root, 'Annotations', index + '.xml')
# print 'Loading: {}'.format(filename)
def get_data_from_tag(node, tag): def get_data_from_tag(node, tag):
return node.getElementsByTagName(tag)[0].childNodes[0].data return node.getElementsByTagName(tag)[0].childNodes[0].data
...@@ -247,8 +246,8 @@ def print_info(name, params): ...@@ -247,8 +246,8 @@ def print_info(name, params):
""" """
Output some info regarding the class Output some info regarding the class
""" """
print "{} initialized for split: {}, with bs: {}, im_shape: {}.".format( print("{} initialized for split: {}, with bs: {}, im_shape: {}.".format(
name, name,
params['split'], params['split'],
params['batch_size'], params['batch_size'],
params['im_shape']) params['im_shape']))
...@@ -129,7 +129,7 @@ if __name__ == "__main__": ...@@ -129,7 +129,7 @@ if __name__ == "__main__":
result_file = args.resultfile result_file = args.resultfile
img_dir = args.imgdir img_dir = args.imgdir
if not os.path.exists(img_dir): if not os.path.exists(img_dir):
print "{} does not exist".format(img_dir) print("{} does not exist".format(img_dir))
sys.exit() sys.exit()
labelmap_file = args.labelmap_file labelmap_file = args.labelmap_file
labelmap = None labelmap = None
......
...@@ -25,8 +25,8 @@ from os.path import exists, basename ...@@ -25,8 +25,8 @@ from os.path import exists, basename
import cv2 import cv2
import numpy as np import numpy as np
from lxml import etree from lxml import etree
from tqdm import tqdm
from six import iteritems, itervalues from six import iteritems, itervalues
from tqdm import tqdm
os.environ['GLOG_minloglevel'] = '2' os.environ['GLOG_minloglevel'] = '2'
#pylint: disable=wrong-import-position #pylint: disable=wrong-import-position
...@@ -101,7 +101,7 @@ def load_annotation(annotation_path, video_size): ...@@ -101,7 +101,7 @@ def load_annotation(annotation_path, video_size):
roi = [float(video_size[1]), float(video_size[0]), 0.0, 0.0] roi = [float(video_size[1]), float(video_size[0]), 0.0, 0.0]
for track in tqdm(root, desc='Extracting annotation'): for track in tqdm(root, desc='Extracting annotation'):
if 'label' not in track.attrib.keys() or track.attrib['label'] != 'person': if 'label' not in track.attrib or track.attrib['label'] != 'person':
continue continue
for bbox in track: for bbox in track:
...@@ -111,14 +111,14 @@ def load_annotation(annotation_path, video_size): ...@@ -111,14 +111,14 @@ def load_annotation(annotation_path, video_size):
frame_id = int(bbox.attrib['frame']) frame_id = int(bbox.attrib['frame'])
action_name = None action_name = None
for bbox_attr_id in range(len(bbox)): for bbox_attr_id, _ in enumerate(bbox):
attribute_name = bbox[bbox_attr_id].attrib['name'] attribute_name = bbox[bbox_attr_id].attrib['name']
if attribute_name != 'action': if attribute_name != 'action':
continue continue
action_name = bbox[bbox_attr_id].text action_name = bbox[bbox_attr_id].text
if action_name is not None and action_name in ACTION_NAMES_MAP.keys(): if action_name is not None and action_name in ACTION_NAMES_MAP:
label = ACTION_NAMES_MAP[action_name] label = ACTION_NAMES_MAP[action_name]
xmin = float(bbox.attrib['xtl']) xmin = float(bbox.attrib['xtl'])
...@@ -182,8 +182,8 @@ def calculate_similarity_matrix(set_a, set_b): ...@@ -182,8 +182,8 @@ def calculate_similarity_matrix(set_a, set_b):
""" """
similarity = np.zeros([len(set_a), len(set_b)], dtype=np.float32) similarity = np.zeros([len(set_a), len(set_b)], dtype=np.float32)
for i in range(len(set_a)): for i, _ in enumerate(set_a):
for j in range(len(set_b)): for j, _ in enumerate(set_b):
similarity[i, j] = iou(set_a[i], set_b[j]) similarity[i, j] = iou(set_a[i], set_b[j])
return similarity return similarity
...@@ -352,9 +352,9 @@ def match_detections(predicted_data, gt_data, min_iou): ...@@ -352,9 +352,9 @@ def match_detections(predicted_data, gt_data, min_iou):
total_gt_bbox_num = 0 total_gt_bbox_num = 0
matched_gt_bbox_num = 0 matched_gt_bbox_num = 0
frame_ids = gt_data.keys() frame_ids = list(gt_data)
for frame_id in tqdm(frame_ids, desc='Matching detections'): for frame_id in tqdm(frame_ids, desc='Matching detections'):
if frame_id not in predicted_data.keys(): if frame_id not in predicted_data:
all_matches[frame_id] = [] all_matches[frame_id] = []
continue continue
...@@ -402,7 +402,7 @@ def calc_confusion_matrix(all_matched_ids, predicted_data, gt_data, num_classes) ...@@ -402,7 +402,7 @@ def calc_confusion_matrix(all_matched_ids, predicted_data, gt_data, num_classes)
""" """
out_cm = np.zeros([num_classes, num_classes], dtype=np.int32) out_cm = np.zeros([num_classes, num_classes], dtype=np.int32)
for frame_id in tqdm(all_matched_ids.keys(), desc='Evaluating'): for frame_id in tqdm(all_matched_ids, desc='Evaluating'):
matched_ids = all_matched_ids[frame_id] matched_ids = all_matched_ids[frame_id]
for match_pair in matched_ids: for match_pair in matched_ids:
gt_label = gt_data[frame_id][match_pair[0]].label gt_label = gt_data[frame_id][match_pair[0]].label
...@@ -430,7 +430,7 @@ def detection_classagnostic_metrics(all_matched_ids, predicted_data): ...@@ -430,7 +430,7 @@ def detection_classagnostic_metrics(all_matched_ids, predicted_data):
dtype=np.float32) dtype=np.float32)
bias = 0 bias = 0
for frame_id in all_matched_ids.keys(): for frame_id in all_matched_ids:
matched_ids = all_matched_ids[frame_id] matched_ids = all_matched_ids[frame_id]
predicted_bboxes = predicted_data[frame_id] predicted_bboxes = predicted_data[frame_id]
...@@ -465,7 +465,7 @@ def detection_classspecific_metrics(all_matched_ids, predicted_data, gt_data, cl ...@@ -465,7 +465,7 @@ def detection_classspecific_metrics(all_matched_ids, predicted_data, gt_data, cl
false_positives = np.ones([num_predicted_detections], dtype=np.int32) false_positives = np.ones([num_predicted_detections], dtype=np.int32)
bias = 0 bias = 0
for frame_id in all_matched_ids.keys(): for frame_id in all_matched_ids:
matched_ids = all_matched_ids[frame_id] matched_ids = all_matched_ids[frame_id]
predicted_class_positions = filtered_predicted_pos[frame_id] predicted_class_positions = filtered_predicted_pos[frame_id]
gt_bboxes = gt_data[frame_id] gt_bboxes = gt_data[frame_id]
...@@ -474,7 +474,7 @@ def detection_classspecific_metrics(all_matched_ids, predicted_data, gt_data, cl ...@@ -474,7 +474,7 @@ def detection_classspecific_metrics(all_matched_ids, predicted_data, gt_data, cl
if gt_bboxes[match_pair[0]].label != class_id: if gt_bboxes[match_pair[0]].label != class_id:
continue continue
if match_pair[1] not in predicted_class_positions.keys(): if match_pair[1] not in predicted_class_positions:
continue continue
pred_local_pos = predicted_class_positions[match_pair[1]] pred_local_pos = predicted_class_positions[match_pair[1]]
...@@ -660,7 +660,7 @@ def main(): ...@@ -660,7 +660,7 @@ def main():
video_size = read_video_size(task[1]) video_size = read_video_size(task[1])
annotation, _ = load_annotation(task[0], video_size) annotation, _ = load_annotation(task[0], video_size)
valid_frame_ids = annotation.keys() valid_frame_ids = list(annotation)
predicted_actions = predict_actions(task[1], valid_frame_ids, predicted_actions = predict_actions(task[1], valid_frame_ids,
detection_net, args.in_name, args.out_name) detection_net, args.in_name, args.out_name)
......
...@@ -17,4 +17,5 @@ Pillow>=5.1.0 ...@@ -17,4 +17,5 @@ Pillow>=5.1.0
six>=1.10.0 six>=1.10.0
lxml>=4.2.2 lxml>=4.2.2
opencv-python>=3.3.0 opencv-python>=3.3.0
tqdm>=4.23.4
...@@ -136,7 +136,7 @@ class ResizeCropImagesMapper(mapreducer.BasicMapper): ...@@ -136,7 +136,7 @@ class ResizeCropImagesMapper(mapreducer.BasicMapper):
FLAGS.output_side_length) FLAGS.output_side_length)
except Exception as e: except Exception as e:
# we ignore the exception (maybe the image is corrupted?) # we ignore the exception (maybe the image is corrupted?)
print line, Exception, e print('{}: {}'.format(line, e)
yield value, FLAGS.output_folder yield value, FLAGS.output_folder
mapreducer.REGISTER_DEFAULT_MAPPER(ResizeCropImagesMapper) mapreducer.REGISTER_DEFAULT_MAPPER(ResizeCropImagesMapper)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册