未验证 提交 10baa5f1 编写于 作者: A Anastasia Yasakova 提交者: GitHub

Fix: visibility and ignored information fail to be loaded (MOT dataset format) (#5270)

上级 7706eee5
......@@ -75,6 +75,7 @@ non-ascii paths while adding files from "Connected file share" (issue #4428)
- Oriented bounding boxes broken with COCO format ss(<https://github.com/opencv/cvat/pull/5219>)
- Fixed upload resumption in production environments
(<https://github.com/opencv/cvat/issues/4839>)
- Visibility and ignored information fail to be loaded (MOT dataset format) (<https://github.com/opencv/cvat/pull/5270>)
### Security
- TDB
......
......@@ -5,8 +5,7 @@
from tempfile import TemporaryDirectory
import datumaro.components.extractor as datumaro
from datumaro.components.dataset import Dataset
import datumaro as dm
from pyunpack import Archive
from cvat.apps.dataset_manager.bindings import GetCVATDataExtractor
......@@ -14,9 +13,10 @@ from cvat.apps.dataset_manager.util import make_zip_archive
from .registry import dm_env, exporter, importer
def _import_to_task(dataset, instance_data):
tracks = {}
label_cat = dataset.categories()[datumaro.AnnotationType.label]
label_cat = dataset.categories()[dm.AnnotationType.label]
for item in dataset:
# NOTE: MOT frames start from 1
......@@ -25,21 +25,26 @@ def _import_to_task(dataset, instance_data):
frame_number = instance_data.abs_frame_id(frame_number)
for ann in item.annotations:
if ann.type != datumaro.AnnotationType.bbox:
if ann.type != dm.AnnotationType.bbox:
continue
track_id = ann.attributes.get('track_id')
occluded = ann.attributes.pop('occluded', False) is True
track_id = ann.attributes.pop('track_id', None)
attributes = [
instance_data.Attribute(name=n, value=str(v))
for n, v in ann.attributes.items()
]
if track_id is None:
# Extension. Import regular boxes:
instance_data.add_shape(instance_data.LabeledShape(
type='rectangle',
label=label_cat.items[ann.label].name,
points=ann.points,
occluded=ann.attributes.get('occluded') is True,
occluded=occluded,
z_order=ann.z_order,
group=0,
frame=frame_number,
attributes=[],
attributes=attributes,
source='manual',
))
continue
......@@ -47,12 +52,12 @@ def _import_to_task(dataset, instance_data):
shape = instance_data.TrackedShape(
type='rectangle',
points=ann.points,
occluded=ann.attributes.get('occluded') is True,
occluded=occluded,
outside=False,
keyframe=True,
z_order=ann.z_order,
frame=frame_number,
attributes=[],
attributes=attributes,
source='manual',
)
......@@ -91,7 +96,7 @@ def _import_to_task(dataset, instance_data):
@exporter(name='MOT', ext='ZIP', version='1.1')
def _export(dst_file, instance_data, save_images=False):
dataset = Dataset.from_extractors(GetCVATDataExtractor(
dataset = dm.Dataset.from_extractors(GetCVATDataExtractor(
instance_data, include_images=save_images), env=dm_env)
with TemporaryDirectory() as temp_dir:
dataset.export(temp_dir, 'mot_seq_gt', save_images=save_images)
......@@ -103,7 +108,7 @@ def _import(src_file, instance_data, load_data_callback=None, **kwargs):
with TemporaryDirectory() as tmp_dir:
Archive(src_file.name).extractall(tmp_dir)
dataset = Dataset.import_from(tmp_dir, 'mot_seq', env=dm_env)
dataset = dm.Dataset.import_from(tmp_dir, 'mot_seq', env=dm_env)
if load_data_callback is not None:
load_data_callback(dataset, instance_data)
......@@ -113,4 +118,3 @@ def _import(src_file, instance_data, load_data_callback=None, **kwargs):
_import_to_task(sub_dataset, task_data)
else:
_import_to_task(dataset, instance_data)
......@@ -574,6 +574,35 @@
}
]
},
"MOT 1.1 shapes only": {
"version": 0,
"tags": [],
"shapes": [
{
"type": "rectangle",
"occluded": false,
"z_order": 0,
"points": [9.4, 12.09, 17.2, 18.19],
"frame": 0,
"label_id": null,
"group": 0,
"source": "manual",
"attributes": []
},
{
"type": "rectangle",
"occluded": false,
"z_order": 0,
"points": [2.4, 4.9, 15.24, 13.21],
"frame": 0,
"label_id": null,
"group": 0,
"source": "manual",
"attributes": []
}
],
"tracks": []
},
"MOTS PNG 1.0": {
"version": 0,
"tags": [],
......
......@@ -214,6 +214,53 @@
}
]
},
"MOT 1.1": {
"name": "MOT task",
"overlap": 0,
"segment_size": 100,
"labels": [
{
"name": "car",
"color": "#2080c0",
"attributes": [
{
"name": "ignored",
"mutable": true,
"input_type": "checkbox",
"default_value": "false",
"values": ["false", "true"]
},
{
"name": "visibility",
"mutable": false,
"input_type": "number",
"default_value": "1",
"values": ["0", "1", "1"]
}
]
},
{
"name": "person",
"color": "#c06060",
"attributes": [
{
"name": "ignored",
"mutable": true,
"input_type": "checkbox",
"default_value": "false",
"values": ["false", "true"]
},
{
"name": "visibility",
"mutable": false,
"input_type": "number",
"default_value": "1",
"values": ["0", "1", "1"]
}
]
}
]
},
"wrong_checkbox_value": {
"name": "wrong checkbox value task",
"overlap": 0,
......
......@@ -395,20 +395,20 @@ class TaskDumpUploadTest(_DbTestBase):
images = self._generate_task_images(3)
# create task with annotations
if dump_format_name in [
"Market-1501 1.0", "Cityscapes 1.0", \
"ICDAR Localization 1.0", "ICDAR Recognition 1.0", \
"ICDAR Segmentation 1.0", "COCO Keypoints 1.0",
"Cityscapes 1.0", "COCO Keypoints 1.0",
"ICDAR Localization 1.0", "ICDAR Recognition 1.0",
"ICDAR Segmentation 1.0", "Market-1501 1.0", "MOT 1.1"
]:
task = self._create_task(tasks[dump_format_name], images)
else:
task = self._create_task(tasks["main"], images)
task_id = task["id"]
if dump_format_name in [
"MOT 1.1", "MOTS PNG 1.0", \
"PASCAL VOC 1.1", "Segmentation mask 1.1", \
"TFRecord 1.0", "YOLO 1.1", "ImageNet 1.0", \
"WiderFace 1.0", "VGGFace2 1.0", "Cityscapes 1.0", \
"Datumaro 1.0"\
"Cityscapes 1.0", "Datumaro 1.0",
"ImageNet 1.0", "MOTS PNG 1.0",
"PASCAL VOC 1.1", "Segmentation mask 1.1",
"TFRecord 1.0", "VGGFace2 1.0",
"WiderFace 1.0", "YOLO 1.1"
]:
self._create_annotations(task, dump_format_name, "default")
else:
......@@ -457,9 +457,9 @@ class TaskDumpUploadTest(_DbTestBase):
# remove all annotations from task (create new task without annotation)
images = self._generate_task_images(3)
if upload_format_name in [
"Market-1501 1.0", "Cityscapes 1.0", \
"ICDAR Localization 1.0", "ICDAR Recognition 1.0", \
"ICDAR Segmentation 1.0", "COCO Keypoints 1.0",
"Cityscapes 1.0", "COCO Keypoints 1.0",
"ICDAR Localization 1.0", "ICDAR Recognition 1.0",
"ICDAR Segmentation 1.0", "Market-1501 1.0", "MOT 1.1"
]:
task = self._create_task(tasks[upload_format_name], images)
else:
......@@ -500,9 +500,9 @@ class TaskDumpUploadTest(_DbTestBase):
# create task with annotations
video = self._generate_task_videos(1)
if dump_format_name in [
"Market-1501 1.0", "Cityscapes 1.0", \
"ICDAR Localization 1.0", "ICDAR Recognition 1.0", \
"ICDAR Segmentation 1.0", "COCO Keypoints 1.0",
"Cityscapes 1.0", "COCO Keypoints 1.0",
"ICDAR Localization 1.0", "ICDAR Recognition 1.0",
"ICDAR Segmentation 1.0", "Market-1501 1.0", "MOT 1.1"
]:
task = self._create_task(tasks[dump_format_name], video)
else:
......@@ -510,10 +510,10 @@ class TaskDumpUploadTest(_DbTestBase):
task_id = task["id"]
if dump_format_name in [
"MOT 1.1", "MOTS PNG 1.0", \
"PASCAL VOC 1.1", "Segmentation mask 1.1", \
"TFRecord 1.0", "YOLO 1.1", "ImageNet 1.0", \
"WiderFace 1.0", "VGGFace2 1.0", "Cityscapes 1.0" \
"Cityscapes 1.0", "ImageNet 1.0",
"MOTS PNG 1.0", "PASCAL VOC 1.1",
"Segmentation mask 1.1", "TFRecord 1.0",
"VGGFace2 1.0", "WiderFace 1.0", "YOLO 1.1"
]:
self._create_annotations(task, dump_format_name, "default")
else:
......@@ -561,9 +561,9 @@ class TaskDumpUploadTest(_DbTestBase):
# remove all annotations from task (create new task without annotation)
video = self._generate_task_videos(1)
if upload_format_name in [
"Market-1501 1.0", "Cityscapes 1.0", \
"ICDAR Localization 1.0", "ICDAR Recognition 1.0", \
"ICDAR Segmentation 1.0", "COCO Keypoints 1.0",
"Cityscapes 1.0", "COCO Keypoints 1.0",
"ICDAR Localization 1.0", "ICDAR Recognition 1.0",
"ICDAR Segmentation 1.0", "Market-1501 1.0", "MOT 1.1"
]:
task = self._create_task(tasks[upload_format_name], video)
else:
......@@ -842,9 +842,9 @@ class TaskDumpUploadTest(_DbTestBase):
images = self._generate_task_images(3)
# create task with annotations
if dump_format_name in [
"Market-1501 1.0", "Cityscapes 1.0", \
"ICDAR Localization 1.0", "ICDAR Recognition 1.0", \
"ICDAR Segmentation 1.0","COCO Keypoints 1.0",
"Cityscapes 1.0", "COCO Keypoints 1.0",
"ICDAR Localization 1.0", "ICDAR Recognition 1.0",
"ICDAR Segmentation 1.0", "Market-1501 1.0", "MOT 1.1"
]:
task = self._create_task(tasks[dump_format_name], images)
else:
......@@ -1176,7 +1176,44 @@ class TaskDumpUploadTest(_DbTestBase):
# equals annotations
data_from_task_after_upload = self._get_data_from_task(task_id, include_images)
compare_datasets(self, data_from_task_before_upload, data_from_task_after_upload)\
compare_datasets(self, data_from_task_before_upload, data_from_task_after_upload)
def test_api_v2_check_mot_with_shapes_only(self):
test_name = self._testMethodName
format_name = "MOT 1.1"
for include_images in (False, True):
with self.subTest():
# create task with annotations
images = self._generate_task_images(3)
task = self._create_task(tasks[format_name], images)
self._create_annotations(task, f'{format_name} shapes only', "default")
task_id = task["id"]
data_from_task_before_upload = self._get_data_from_task(task_id, include_images)
# dump annotations
url = self._generate_url_dump_tasks_annotations(task_id)
data = {
"format": format_name,
"action": "download",
}
with TestDir() as test_dir:
file_zip_name = osp.join(test_dir, f'{test_name}_{format_name}.zip')
self._download_file(url, data, self.admin, file_zip_name)
self._check_downloaded_file(file_zip_name)
# remove annotations
self._remove_annotations(url, self.admin)
# upload annotations
url = self._generate_url_upload_tasks_annotations(task_id, format_name)
with open(file_zip_name, 'rb') as binary_file:
self._upload_file(url, binary_file, self.admin)
# equals annotations
data_from_task_after_upload = self._get_data_from_task(task_id, include_images)
compare_datasets(self, data_from_task_before_upload, data_from_task_after_upload)
def test_api_v2_check_attribute_import_in_tracks(self):
test_name = self._testMethodName
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册