未验证 提交 9a600f3f 编写于 作者: M Maxim Zhiltsov 提交者: GitHub

Honey pot server (#6204)

The server part of #6039 

- Added support for Ground Truth jobs in a task
- Added support for job creation and removal (only Ground Truth jobs can
  be created or removed in a task)
- Added a component to autocompute quality metrics for a task
- Added tests
- Fixed https://github.com/opencv/cvat/issues/5971 (both parts - the
  outside problem and the manifest problem, the manifest part fix is also
  available in #6216)
Co-authored-by: Nklakhov <kirill.9992@gmail.com>
Co-authored-by: NRoman Donchenko <roman@cvat.ai>
Co-authored-by: NKirill Sizov <kirill.sizov@cvat.ai>
上级 5fa11c9e
......@@ -12,6 +12,7 @@ jobs:
cvat-sdk/**/*.py
cvat-cli/**/*.py
tests/python/**/*.py
cvat/apps/quality_control/**/*.py
dir_names: true
- name: Run checks
......
......@@ -12,6 +12,7 @@ jobs:
cvat-sdk/**/*.py
cvat-cli/**/*.py
tests/python/**/*.py
cvat/apps/quality_control/**/*.py
dir_names: true
- name: Run checks
......
......@@ -84,6 +84,26 @@
],
"justMyCode": false,
},
{
"name": "REST API tests: Attach to RQ quality reports worker",
"type": "python",
"request": "attach",
"connect": {
"host": "127.0.0.1",
"port": 9094
},
"pathMappings": [
{
"localRoot": "${workspaceFolder}",
"remoteRoot": "/home/django/"
},
{
"localRoot": "${workspaceFolder}/.env",
"remoteRoot": "/opt/venv",
}
],
"justMyCode": false,
},
{
"type": "pwa-chrome",
"request": "launch",
......@@ -199,7 +219,7 @@
"console": "internalConsole"
},
{
"name": "server: RQ - scheduler",
"name": "server: RQ - quality reports",
"type": "python",
"request": "launch",
"stopOnEntry": false,
......@@ -207,13 +227,36 @@
"python": "${command:python.interpreterPath}",
"program": "${workspaceRoot}/manage.py",
"args": [
"rqscheduler",
"--queue",
"export"
"rqworker",
"quality_reports",
"--worker-class",
"cvat.rqworker.SimpleWorker",
],
"django": true,
"cwd": "${workspaceFolder}",
"env": {},
"env": {
"DJANGO_LOG_SERVER_HOST": "localhost",
"DJANGO_LOG_SERVER_PORT": "8282"
},
"console": "internalConsole"
},
{
"name": "server: RQ - scheduler",
"type": "python",
"request": "launch",
"stopOnEntry": false,
"justMyCode": false,
"python": "${command:python.interpreterPath}",
"program": "${workspaceRoot}/rqscheduler.py",
"django": true,
"cwd": "${workspaceFolder}",
"args": [
"-i", "1"
],
"env": {
"DJANGO_LOG_SERVER_HOST": "localhost",
"DJANGO_LOG_SERVER_PORT": "8282"
},
"console": "internalConsole"
},
{
......@@ -455,6 +498,7 @@
"server: RQ - annotation",
"server: RQ - webhooks",
"server: RQ - scheduler",
"server: RQ - quality reports",
"server: RQ - cleaning",
"server: git",
]
......
......@@ -7,6 +7,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## \[2.5.0] - Unreleased
### Added
- New option ``semi-auto`` is available as annotations source (<https://github.com/opencv/cvat/pull/6263>)
- \[API\] Support for Ground Truth job creation and removal (<https://github.com/opencv/cvat/pull/6204>)
- \[API\] Task quality estimation endpoints (<https://github.com/opencv/cvat/pull/6204>)
### Changed
- TDB
### Changed
......@@ -42,8 +47,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Deletion of uploaded files, including annotations and backups,
after they have been uploaded to the server using the TUS protocol but before an RQ job has been initiated. (<https://github.com/opencv/cvat/pull/5909>)
- Simultaneous creation of tasks or projects with identical names from backups by multiple users.(<https://github.com/opencv/cvat/pull/5909>)
- \[Server API\] The `predefined` sorting method for task data uploads (<https://github.com/opencv/cvat/pull/5083>)
- \[API\] The `predefined` sorting method for task data uploads (<https://github.com/opencv/cvat/pull/5083>)
- Allowed slashes in export filenames. (<https://github.com/opencv/cvat/pull/6265>)
- Dataset export error with `outside` property of tracks (<https://github.com/opencv/cvat/issues/5971>)
## \[2.4.5] - 2023-06-02
### Added
......
......@@ -177,6 +177,7 @@ COPY --chown=${USER} ssh ${HOME}/.ssh
COPY --chown=${USER} wait-for-it.sh manage.py backend_entrypoint.sh ${HOME}/
COPY --chown=${USER} utils/ ${HOME}/utils
COPY --chown=${USER} cvat/ ${HOME}/cvat
COPY --chown=${USER} rqscheduler.py ${HOME}
ARG COVERAGE_PROCESS_START
RUN if [ "${COVERAGE_PROCESS_START}" ]; then \
......
......@@ -317,20 +317,19 @@ FrameData.prototype.data.implementation = async function (onServerRequest) {
function getFrameMeta(jobID, frame): FramesMetaData['frames'][0] {
const { meta, mode, startFrame } = frameDataCache[jobID];
let size = null;
let frameMeta = null;
if (mode === 'interpolation') {
[size] = meta.frames;
[frameMeta] = meta.frames;
} else if (mode === 'annotation') {
if (frame >= meta.size) {
if (frame > meta.stop_frame) {
throw new ArgumentError(`Meta information about frame ${frame} can't be received from the server`);
} else {
size = meta.frames[frame - startFrame];
}
frameMeta = meta.frames[frame - startFrame];
} else {
throw new DataError(`Invalid mode is specified ${mode}`);
}
return size;
return frameMeta;
}
class FrameBuffer {
......
......@@ -6,6 +6,7 @@
from copy import copy, deepcopy
import math
from typing import Optional, Sequence
import numpy as np
from itertools import chain
from scipy.optimize import linear_sum_assignment
......@@ -13,6 +14,7 @@ from shapely import geometry
from cvat.apps.engine.models import ShapeType, DimensionType
from cvat.apps.engine.serializers import LabeledDataSerializer
from cvat.apps.dataset_manager.util import deepcopy_simple
class AnnotationIR:
......@@ -160,11 +162,24 @@ class AnnotationManager:
tracks = TrackManager(self.data.tracks, dimension)
tracks.merge(data.tracks, start_frame, overlap, dimension)
def to_shapes(self, end_frame, dimension):
def to_shapes(self,
end_frame: int,
dimension: DimensionType,
*,
included_frames: Optional[Sequence[int]] = None,
include_outside: bool = False,
use_server_track_ids: bool = False
) -> list:
shapes = self.data.shapes
tracks = TrackManager(self.data.tracks, dimension)
return shapes + tracks.to_shapes(end_frame)
if included_frames is not None:
shapes = [s for s in shapes if s["frame"] in included_frames]
return shapes + tracks.to_shapes(end_frame,
included_frames=included_frames, include_outside=include_outside,
use_server_track_ids=use_server_track_ids
)
def to_tracks(self):
tracks = self.data.tracks
......@@ -408,40 +423,58 @@ class TrackManager(ObjectManager):
self._dimension = dimension
super().__init__(objects)
def to_shapes(self, end_frame, end_skeleton_frame=None):
def to_shapes(self, end_frame: int, *,
included_frames: Optional[Sequence[int]] = None,
include_outside: bool = False,
use_server_track_ids: bool = False
) -> list:
shapes = []
for idx, track in enumerate(self.objects):
track_id = track["id"] if use_server_track_ids else idx
track_shapes = {}
for shape in TrackManager.get_interpolated_shapes(
track,
0,
end_frame,
self._dimension,
include_outside_frames=end_skeleton_frame is not None,
include_outside=include_outside,
included_frames=included_frames,
):
shape["label_id"] = track["label_id"]
shape["group"] = track["group"]
shape["track_id"] = track_id
shape["source"] = track["source"]
shape["track_id"] = idx
shape["attributes"] += track["attributes"]
shape["elements"] = []
track_shapes[shape["frame"]] = shape
last_frame = shape["frame"]
while end_skeleton_frame and shape["frame"] < end_skeleton_frame:
shape = deepcopy(shape)
shape["frame"] += 1
track_shapes[shape["frame"]] = shape
if len(track.get("elements", [])):
if not track_shapes:
# This track has no elements on the included frames
continue
if track.get("elements"):
track_elements = TrackManager(track["elements"], self._dimension)
element_shapes = track_elements.to_shapes(end_frame,
end_skeleton_frame=last_frame)
included_frames=set(track_shapes.keys()).intersection(included_frames or []),
include_outside=True, # elements are controlled by the parent shape
use_server_track_ids=use_server_track_ids
)
for shape in element_shapes:
track_shapes[shape["frame"]]["elements"].append(shape)
shapes.extend(list(track_shapes.values()))
# The whole shape can be filtered out, if all its elements are outside,
# and outside shapes are not requested.
if not include_outside:
track_shapes = {
frame_number: shape for frame_number, shape in track_shapes.items()
if not shape["elements"]
or not all(elem["outside"] for elem in shape["elements"])
}
shapes.extend(track_shapes.values())
return shapes
@staticmethod
......@@ -449,6 +482,9 @@ class TrackManager(ObjectManager):
# Just for unification. All tracks are assigned on the same frame
objects_by_frame = {0: []}
for obj in objects:
if not obj["shapes"]:
continue
shape = obj["shapes"][-1] # optimization for old tracks
if shape["frame"] >= start_frame or not shape["outside"]:
objects_by_frame[0].append(obj)
......@@ -471,9 +507,11 @@ class TrackManager(ObjectManager):
end_frame = start_frame + overlap
obj0_shapes = TrackManager.get_interpolated_shapes(obj0, start_frame, end_frame, dimension)
obj1_shapes = TrackManager.get_interpolated_shapes(obj1, start_frame, end_frame, dimension)
if not obj0_shapes or not obj1_shapes:
return 0
obj0_shapes_by_frame = {shape["frame"]:shape for shape in obj0_shapes}
obj1_shapes_by_frame = {shape["frame"]:shape for shape in obj1_shapes}
assert obj0_shapes_by_frame and obj1_shapes_by_frame
count, error = 0, 0
for frame in range(start_frame, end_frame):
......@@ -489,7 +527,7 @@ class TrackManager(ObjectManager):
error += 1
count += 1
return 1 - error / count
return 1 - error / (count or 1)
else:
return 0
......@@ -506,16 +544,30 @@ class TrackManager(ObjectManager):
@staticmethod
def get_interpolated_shapes(
track, start_frame, end_frame, dimension, *, include_outside_frames=False
track, start_frame, end_frame, dimension, *,
included_frames: Optional[Sequence[int]] = None,
include_outside: bool = False,
):
def copy_shape(source, frame, points=None, rotation=None):
copied = deepcopy(source)
copied = source.copy()
copied["attributes"] = deepcopy_simple(source["attributes"])
copied["keyframe"] = False
copied["frame"] = frame
if rotation is not None:
copied["rotation"] = rotation
if points is None:
points = copied["points"]
if isinstance(points, np.ndarray):
points = points.tolist()
else:
points = points.copy()
if points is not None:
copied["points"] = points
return copied
def find_angle_diff(right_angle, left_angle):
......@@ -540,7 +592,8 @@ class TrackManager(ObjectManager):
) * offset + 360) % 360
points = shape0["points"] + diff * offset
shapes.append(copy_shape(shape0, frame, points.tolist(), rotation))
if included_frames is None or frame in included_frames:
shapes.append(copy_shape(shape0, frame, points, rotation))
return shapes
......@@ -567,7 +620,8 @@ class TrackManager(ObjectManager):
else:
shapes = []
for frame in range(shape0["frame"] + 1, shape1["frame"]):
shapes.append(copy_shape(shape0, frame))
if included_frames is None or frame in included_frames:
shapes.append(copy_shape(shape0, frame))
return shapes
......@@ -619,7 +673,7 @@ class TrackManager(ObjectManager):
def match_right_left(left_curve, right_curve, left_right_matching):
matched_right_points = list(chain.from_iterable(left_right_matching.values()))
unmatched_right_points = filter(lambda x: x not in matched_right_points, range(len(right_curve)))
updated_matching = deepcopy(left_right_matching)
updated_matching = deepcopy_simple(left_right_matching)
for right_point in unmatched_right_points:
left_point = find_nearest_pair(right_curve[right_point], left_curve)
......@@ -771,17 +825,22 @@ class TrackManager(ObjectManager):
shapes = []
is_polygon = shape0["type"] == ShapeType.POLYGON
if is_polygon:
shape0["points"].extend(shape0["points"][:2])
shape1["points"].extend(shape1["points"][:2])
# Make the polygon closed for computations
shape0 = shape0.copy()
shape1 = shape1.copy()
shape0["points"] = shape0["points"] + shape0["points"][:2]
shape1["points"] = shape1["points"] + shape1["points"][:2]
distance = shape1["frame"] - shape0["frame"]
for frame in range(shape0["frame"] + 1, shape1["frame"]):
offset = (frame - shape0["frame"]) / distance
points = interpolate_position(shape0, shape1, offset)
shapes.append(copy_shape(shape0, frame, points))
if included_frames is None or frame in included_frames:
shapes.append(copy_shape(shape0, frame, points))
if is_polygon:
# Remove the extra point added
shape0["points"] = shape0["points"][:-2]
shape1["points"] = shape1["points"][:-2]
for shape in shapes:
......@@ -816,38 +875,75 @@ class TrackManager(ObjectManager):
return shapes
def propagate(shape, end_frame, *, included_frames=None):
return [
copy_shape(shape, i)
for i in range(shape["frame"] + 1, end_frame)
if included_frames is None or i in included_frames
]
shapes = []
prev_shape = {}
prev_shape = None
for shape in sorted(track["shapes"], key=lambda shape: shape["frame"]):
curr_frame = shape["frame"]
if end_frame <= curr_frame:
# if we exceed endframe, we still need to interpolate using the next keyframe
# but we keep the results only up to end_frame
interpolated = interpolate(prev_shape, deepcopy(shape))
if prev_shape and end_frame <= curr_frame:
# If we exceed the end_frame and there was a previous shape,
# we still need to interpolate up to the next keyframe,
# but keep the results only up to the end_frame:
# vvvvvvv
# ---- | ------- | ----- | ----->
# prev end cur kf
interpolated = interpolate(prev_shape, shape)
interpolated.append(shape)
for shape in sorted(interpolated, key=lambda shape: shape["frame"]):
if shape["frame"] < end_frame:
shapes.append(shape)
else:
break
return shapes
# Update the last added shape
shape["keyframe"] = True
prev_shape = shape
break # The track finishes here
if prev_shape:
assert shape["frame"] > prev_shape["frame"]
assert curr_frame > prev_shape["frame"] # Catch invalid tracks
# Propagate attributes
for attr in prev_shape["attributes"]:
if attr["spec_id"] not in map(lambda el: el["spec_id"], shape["attributes"]):
shape["attributes"].append(deepcopy(attr))
if not prev_shape["outside"] or include_outside_frames:
shape["attributes"].append(deepcopy_simple(attr))
if not prev_shape["outside"] or include_outside:
shapes.extend(interpolate(prev_shape, shape))
shape["keyframe"] = True
shapes.append(shape)
prev_shape = shape
if not prev_shape["outside"]:
# valid when the latest keyframe of a track less than end_frame and it is not outside, so, need to propagate
shape = deepcopy(prev_shape)
shape["frame"] = end_frame
shapes.extend(interpolate(prev_shape, shape))
if prev_shape and (not prev_shape["outside"] or include_outside):
# When the latest keyframe of a track is less than the end_frame
# and it is not outside, need to propagate
shapes.extend(propagate(prev_shape, end_frame, included_frames=included_frames))
shapes = [
shape for shape in shapes
# After interpolation there can be a finishing frame
# outside of the task boundaries. Filter it out to avoid errors.
# https://github.com/openvinotoolkit/cvat/issues/2827
if track["frame"] <= shape["frame"] < end_frame
# Exclude outside shapes.
# Keyframes should be included regardless the outside value
# If really needed, they can be excluded on the later stages,
# but here they represent a finishing shape in a visible sequence
if shape["keyframe"] or not shape["outside"] or include_outside
if included_frames is None or shape["frame"] in included_frames
]
return shapes
......
......@@ -19,10 +19,11 @@ from cvat.apps.engine import models, serializers
from cvat.apps.engine.plugins import plugin_decorator
from cvat.apps.profiler import silk_profile
from .annotation import AnnotationIR, AnnotationManager
from .bindings import JobData, TaskData, CvatImportError
from .formats.registry import make_exporter, make_importer
from .util import bulk_create
from cvat.apps.dataset_manager.annotation import AnnotationIR, AnnotationManager
from cvat.apps.dataset_manager.bindings import TaskData, JobData, CvatImportError
from cvat.apps.dataset_manager.formats.registry import make_exporter, make_importer
from cvat.apps.dataset_manager.util import add_prefetch_fields, bulk_create
class dotdict(OrderedDict):
"""dot.notation access to dictionary attributes"""
......@@ -73,18 +74,48 @@ def _merge_table_rows(rows, keys_for_merge, field_id):
return list(merged_rows.values())
class JobAnnotation:
def __init__(self, pk, is_prefetched=False):
@classmethod
def add_prefetch_info(cls, queryset):
assert issubclass(queryset.model, models.Job)
label_qs = add_prefetch_fields(models.Label.objects.all(), [
'skeleton',
'parent',
'attributespec_set',
])
label_qs = JobData.add_prefetch_info(label_qs)
return queryset.select_related(
'segment',
'segment__task',
).prefetch_related(
'segment__task__owner',
'segment__task__assignee',
'segment__task__project__owner',
'segment__task__project__assignee',
Prefetch('segment__task__data',
queryset=models.Data.objects.select_related('video').prefetch_related(
Prefetch('images', queryset=models.Image.objects.order_by('frame'))
)),
Prefetch('segment__task__label_set', queryset=label_qs),
Prefetch('segment__task__project__label_set', queryset=label_qs),
)
def __init__(self, pk, *, is_prefetched=False, queryset=None):
if queryset is None:
queryset = self.add_prefetch_info(models.Job.objects).all()
if is_prefetched:
self.db_job = models.Job.objects.select_related('segment__task') \
.select_for_update().get(id=pk)
self.db_job: models.Job = queryset.select_related(
'segment__task'
).select_for_update().get(id=pk)
else:
self.db_job = models.Job.objects.prefetch_related(
'segment',
'segment__task',
Prefetch('segment__task__data', queryset=models.Data.objects.select_related('video').prefetch_related(
Prefetch('images', queryset=models.Image.objects.order_by('frame'))
))
).get(pk=pk)
try:
self.db_job: models.Job = next(job for job in queryset if job.pk == int(pk))
except StopIteration as ex:
raise models.Job.DoesNotExist from ex
db_segment = self.db_job.segment
self.start_frame = db_segment.start_frame
......@@ -660,7 +691,9 @@ class TaskAnnotation:
).get(id=pk)
# Postgres doesn't guarantee an order by default without explicit order_by
self.db_jobs = models.Job.objects.select_related("segment").filter(segment__task_id=pk).order_by('id')
self.db_jobs = models.Job.objects.select_related("segment").filter(
segment__task_id=pk, type=models.JobType.ANNOTATION.value,
).order_by('id')
self.ir_data = AnnotationIR(self.db_task.dimension)
def reset(self):
......@@ -711,6 +744,9 @@ class TaskAnnotation:
self.reset()
for db_job in self.db_jobs:
if db_job.type != models.JobType.ANNOTATION:
continue
annotation = JobAnnotation(db_job.id, is_prefetched=True)
annotation.init_from_db()
if annotation.ir_data.version > self.ir_data.version:
......
......@@ -11,13 +11,21 @@ class TrackManagerTest(TestCase):
def _check_interpolation(self, track):
interpolated = TrackManager.get_interpolated_shapes(track, 0, 7, '2d')
self.assertEqual(len(interpolated), 6)
self.assertTrue(interpolated[0]["keyframe"])
self.assertFalse(interpolated[1]["keyframe"])
self.assertTrue(interpolated[2]["keyframe"])
self.assertTrue(interpolated[3]["keyframe"])
self.assertFalse(interpolated[4]["keyframe"])
self.assertFalse(interpolated[5]["keyframe"])
self.assertEqual(
[
{"frame": 0, "keyframe": True, "outside": False},
{"frame": 1, "keyframe": False, "outside": False},
{"frame": 2, "keyframe": True, "outside": True},
# frame = 3 should be skipped as it is outside and interpolated
{"frame": 4, "keyframe": True, "outside": False},
{"frame": 5, "keyframe": False, "outside": False},
{"frame": 6, "keyframe": False, "outside": False},
],
[
{k: v for k, v in shape.items() if k in ["frame", "keyframe", "outside"]}
for shape in interpolated
]
)
def test_point_interpolation(self):
track = {
......
......@@ -3,10 +3,14 @@
#
# SPDX-License-Identifier: MIT
from copy import deepcopy
from typing import Sequence
import inspect
import os, os.path as osp
import zipfile
from django.conf import settings
from django.db.models import QuerySet
def current_function_name(depth=1):
......@@ -35,3 +39,25 @@ def bulk_create(db_model, objects, flt_param):
return db_model.objects.bulk_create(objects)
return []
def is_prefetched(queryset: QuerySet, field: str) -> bool:
return field in queryset._prefetch_related_lookups
def add_prefetch_fields(queryset: QuerySet, fields: Sequence[str]) -> QuerySet:
for field in fields:
if not is_prefetched(queryset, field):
queryset = queryset.prefetch_related(field)
return queryset
def deepcopy_simple(v):
# Default deepcopy is very slow
if isinstance(v, dict):
return {k: deepcopy_simple(vv) for k, vv in v.items()}
elif isinstance(v, (list, tuple, set)):
return type(v)(deepcopy_simple(vv) for vv in v)
elif isinstance(v, (int, float, str, bool)) or v is None:
return v
else:
return deepcopy(v)
......@@ -31,7 +31,8 @@ from distutils.util import strtobool
import cvat.apps.dataset_manager as dm
from cvat.apps.engine import models
from cvat.apps.engine.log import slogger
from cvat.apps.engine.serializers import (AttributeSerializer, DataSerializer, LabelSerializer,
from cvat.apps.engine.serializers import (AttributeSerializer, DataSerializer,
JobWriteSerializer, LabelSerializer,
LabeledDataSerializer, SegmentSerializer, SimpleJobSerializer, TaskReadSerializer,
ProjectReadSerializer, ProjectFileSerializer, TaskFileSerializer, RqIdSerializer)
from cvat.apps.engine.utils import (
......@@ -147,6 +148,7 @@ class _TaskBackupBase(_BackupBase):
def _prepare_job_meta(self, job):
allowed_fields = {
'status',
'type',
}
return self._prepare_meta(allowed_fields, job)
......@@ -331,12 +333,13 @@ class TaskExporter(_ExporterBase, _TaskBackupBase):
job_serializer.fields.pop(field)
job_data = self._prepare_job_meta(job_serializer.data)
segment_serailizer = SegmentSerializer(db_segment)
segment_serailizer.fields.pop('jobs')
segment = segment_serailizer.data
segment_serializer = SegmentSerializer(db_segment)
segment_serializer.fields.pop('jobs')
segment = segment_serializer.data
segment_type = segment.pop("type")
segment.update(job_data)
if self._db_task.segment_size == 0:
if self._db_task.segment_size == 0 and segment_type == models.SegmentType.RANGE:
segment.update(serialize_custom_file_mapping(db_segment))
return segment
......@@ -510,6 +513,12 @@ class TaskImporter(_ImporterBase, _TaskBackupBase):
@staticmethod
def _calculate_segment_size(jobs):
# The type field will be missing in backups create before the GT jobs were introduced
jobs = [
j for j in jobs
if j.get("type", models.JobType.ANNOTATION) == models.JobType.ANNOTATION
]
segment_size = jobs[0]['stop_frame'] - jobs[0]['start_frame'] + 1
overlap = 0 if len(jobs) == 1 else jobs[0]['stop_frame'] - jobs[1]['start_frame'] + 1
......@@ -605,6 +614,9 @@ class TaskImporter(_ImporterBase, _TaskBackupBase):
data['job_file_mapping'] = job_file_mapping
_create_thread(self._db_task.pk, data.copy(), isBackupRestore=True)
self._db_task.refresh_from_db()
db_data.refresh_from_db()
db_data.start_frame = data['start_frame']
db_data.stop_frame = data['stop_frame']
db_data.frame_filter = data['frame_filter']
......@@ -612,10 +624,36 @@ class TaskImporter(_ImporterBase, _TaskBackupBase):
db_data.storage = StorageChoice.LOCAL
db_data.save(update_fields=['start_frame', 'stop_frame', 'frame_filter', 'storage', 'deleted_frames'])
# Recreate Ground Truth jobs (they won't be created automatically)
self._import_gt_jobs(jobs)
for db_job, job in zip(self._get_db_jobs(), jobs):
db_job.status = job['status']
db_job.save()
def _import_gt_jobs(self, jobs):
for job in jobs:
# The type field will be missing in backups create before the GT jobs were introduced
try:
raw_job_type = job.get("type", models.JobType.ANNOTATION.value)
job_type = models.JobType(raw_job_type)
except ValueError:
raise ValidationError(f"Unexpected job type {raw_job_type}")
if job_type == models.JobType.GROUND_TRUTH:
job_serializer = JobWriteSerializer(data={
'task_id': self._db_task.id,
'type': job_type.value,
'frame_selection_method': models.JobFrameSelectionMethod.MANUAL.value,
'frames': job['frames']
})
job_serializer.is_valid(raise_exception=True)
job_serializer.save()
elif job_type == models.JobType.ANNOTATION:
continue
else:
assert False
def _import_annotations(self):
db_jobs = self._get_db_jobs()
for db_job, annotations in zip(db_jobs, self._annotations):
......
......@@ -12,6 +12,7 @@ from tempfile import NamedTemporaryFile
from typing import Optional, Tuple
import cv2
import PIL.Image
import pytz
from django.conf import settings
from django.core.cache import caches
......@@ -28,7 +29,7 @@ from cvat.apps.engine.media_extractors import (ImageDatasetManifestReader,
ZipChunkWriter,
ZipCompressedChunkWriter)
from cvat.apps.engine.mime_types import mimetypes
from cvat.apps.engine.models import (DataChoice, DimensionType, Image,
from cvat.apps.engine.models import (DataChoice, DimensionType, Job, Image,
StorageChoice, CloudStorage)
from cvat.apps.engine.utils import md5_hash
from utils.dataset_manifest import ImageManifestManager
......@@ -52,10 +53,18 @@ class MediaCache:
return item
def get_buf_chunk_with_mime(self, chunk_number, quality, db_data):
def get_task_chunk_data_with_mime(self, chunk_number, quality, db_data):
item = self._get_or_set_cache_item(
key=f'{db_data.id}_{chunk_number}_{quality}',
create_function=lambda: self._prepare_chunk_buff(db_data, quality, chunk_number),
create_function=lambda: self._prepare_task_chunk(db_data, quality, chunk_number),
)
return item
def get_selective_job_chunk_data_with_mime(self, chunk_number, quality, job):
item = self._get_or_set_cache_item(
key=f'{job.id}_{chunk_number}_{quality}',
create_function=lambda: self.prepare_selective_job_chunk(job, quality, chunk_number),
)
return item
......@@ -96,13 +105,13 @@ class MediaCache:
return item
@staticmethod
def _get_frame_provider():
def _get_frame_provider_class():
from cvat.apps.engine.frame_provider import \
FrameProvider # TODO: remove circular dependency
return FrameProvider
def _prepare_chunk_buff(self, db_data, quality, chunk_number):
FrameProvider = self._get_frame_provider()
def _prepare_task_chunk(self, db_data, quality, chunk_number):
FrameProvider = self._get_frame_provider_class()
writer_classes = {
FrameProvider.Quality.COMPRESSED : Mpeg4CompressedChunkWriter if db_data.compressed_chunk_type == DataChoice.VIDEO else ZipCompressedChunkWriter,
......@@ -177,8 +186,66 @@ class MediaCache:
os.remove(image_path)
return buff, mime_type
def prepare_selective_job_chunk(self, db_job: Job, quality, chunk_number: int):
db_data = db_job.segment.task.data
FrameProvider = self._get_frame_provider_class()
frame_provider = FrameProvider(db_data, self._dimension)
frame_set = db_job.segment.frame_set
frame_step = db_data.get_frame_step()
chunk_frames = []
writer = ZipCompressedChunkWriter(db_data.image_quality, dimension=self._dimension)
dummy_frame = BytesIO()
PIL.Image.new('RGB', (1, 1)).save(dummy_frame, writer.IMAGE_EXT)
if hasattr(db_data, 'video'):
frame_size = (db_data.video.width, db_data.video.height)
else:
frame_size = None
for frame_idx in range(db_data.chunk_size):
frame_idx = (
db_data.start_frame + chunk_number * db_data.chunk_size + frame_idx * frame_step
)
if db_data.stop_frame < frame_idx:
break
frame_bytes = None
if frame_idx in frame_set:
frame_bytes = frame_provider.get_frame(frame_idx, quality=quality)[0]
if frame_size is not None:
# Decoded video frames can have different size, restore the original one
frame = PIL.Image.open(frame_bytes)
if frame.size != frame_size:
frame = frame.resize(frame_size)
frame_bytes = BytesIO()
frame.save(frame_bytes, writer.IMAGE_EXT)
frame_bytes.seek(0)
else:
# Populate skipped frames with placeholder data,
# this is required for video chunk decoding implementation in UI
frame_bytes = BytesIO(dummy_frame.getvalue())
if frame_bytes is not None:
chunk_frames.append((frame_bytes, None, None))
buff = BytesIO()
writer.save_as_chunk(chunk_frames, buff, compress_frames=False,
zip_compress_level=1 # these are likely to be many skips in SPECIFIC_FRAMES segments
)
buff.seek(0)
return buff, 'application/zip'
def _prepare_local_preview(self, frame_number, db_data):
FrameProvider = self._get_frame_provider()
FrameProvider = self._get_frame_provider_class()
frame_provider = FrameProvider(db_data, self._dimension)
buff, mime_type = frame_provider.get_preview(frame_number)
......@@ -218,7 +285,7 @@ class MediaCache:
return buff, mime_type
def _prepare_context_image(self, db_data, frame_number):
zip_buffer = io.BytesIO()
zip_buffer = BytesIO()
try:
image = Image.objects.get(data_id=db_data.id, frame=frame_number)
except Image.DoesNotExist:
......
......@@ -58,7 +58,7 @@ class SearchFilter(filters.SearchFilter):
description=force_str(full_description)
)
)
]
] if search_fields else []
def get_schema_operation_parameters(self, view):
search_fields = getattr(view, 'search_fields', [])
......@@ -73,7 +73,7 @@ class SearchFilter(filters.SearchFilter):
'schema': {
'type': 'string',
},
}]
}] if search_fields else []
class OrderingFilter(filters.OrderingFilter):
ordering_param = 'sort'
......@@ -113,7 +113,7 @@ class OrderingFilter(filters.OrderingFilter):
description=force_str(full_description)
)
)
]
] if ordering_fields else []
def get_schema_operation_parameters(self, view):
ordering_fields = getattr(view, 'ordering_fields', [])
......@@ -128,7 +128,7 @@ class OrderingFilter(filters.OrderingFilter):
'schema': {
'type': 'string',
},
}]
}] if ordering_fields else []
class JsonLogicFilter(filters.BaseFilterBackend):
Rules = Dict[str, Any]
......@@ -219,7 +219,7 @@ class JsonLogicFilter(filters.BaseFilterBackend):
description=force_str(full_description)
)
)
]
] if filter_fields else []
def get_schema_operation_parameters(self, view):
filter_fields = getattr(view, 'filter_fields', [])
......@@ -235,7 +235,7 @@ class JsonLogicFilter(filters.BaseFilterBackend):
'type': 'string',
},
},
]
] if filter_fields else []
def _get_lookup_fields(self, view):
return get_lookup_fields(view)
......
......@@ -101,12 +101,12 @@ class FrameProvider:
self._loaders[self.Quality.COMPRESSED] = self.BuffChunkLoader(
reader_class[db_data.compressed_chunk_type],
cache.get_buf_chunk_with_mime,
cache.get_task_chunk_data_with_mime,
self.Quality.COMPRESSED,
self._db_data)
self._loaders[self.Quality.ORIGINAL] = self.BuffChunkLoader(
reader_class[db_data.original_chunk_type],
cache.get_buf_chunk_with_mime,
cache.get_task_chunk_data_with_mime,
self.Quality.ORIGINAL,
self._db_data)
else:
......
......@@ -661,18 +661,29 @@ class ZipChunkWriter(IChunkWriter):
return []
class ZipCompressedChunkWriter(IChunkWriter):
def save_as_chunk(self, images, chunk_path):
IMAGE_EXT = 'jpeg'
POINT_CLOUD_EXT = 'pcd'
def save_as_chunk(
self, images, chunk_path, *, compress_frames: bool = True, zip_compress_level: int = 0
):
image_sizes = []
with zipfile.ZipFile(chunk_path, 'x') as zip_chunk:
with zipfile.ZipFile(chunk_path, 'x', compresslevel=zip_compress_level) as zip_chunk:
for idx, (image, _, _) in enumerate(images):
if self._dimension == DimensionType.DIM_2D:
w, h, image_buf = self._compress_image(image, self._image_quality)
extension = "jpeg"
if compress_frames:
w, h, image_buf = self._compress_image(image, self._image_quality)
else:
assert isinstance(image, io.IOBase)
image_buf = io.BytesIO(image.read())
w, h = Image.open(image_buf).size
extension = self.IMAGE_EXT
else:
image_buf = open(image, "rb") if isinstance(image, str) else image
properties = ValidateDimension.get_pcd_properties(image_buf)
w, h = int(properties["WIDTH"]), int(properties["HEIGHT"])
extension = "pcd"
extension = self.POINT_CLOUD_EXT
image_buf.seek(0, 0)
image_buf = io.BytesIO(image_buf.read())
image_sizes.append((w, h))
......
import cvat.apps.engine.models
from django.db import migrations, models
import django.utils.timezone
def add_created_date_to_existing_jobs(apps, schema_editor):
Job = apps.get_model("engine", "Job")
jobs = Job.objects.prefetch_related('segment__task').all()
for job in jobs:
task = job.segment.task
job.created_date = task.created_date
Job.objects.bulk_update(jobs, fields=['created_date'])
class Migration(migrations.Migration):
dependencies = [
('engine', '0069_auto_20230608_1915'),
]
operations = [
migrations.AddField(
model_name='job',
name='type',
field=models.CharField(choices=[('annotation', 'ANNOTATION'), ('ground_truth', 'GROUND_TRUTH')], default='annotation', max_length=32),
),
migrations.AddField(
model_name='segment',
name='frames',
field=cvat.apps.engine.models.IntArrayField(blank=True, default=''),
),
migrations.AddField(
model_name='segment',
name='type',
field=models.CharField(choices=[('range', 'RANGE'), ('specific_frames', 'SPECIFIC_FRAMES')], default='range', max_length=32),
),
migrations.AddField(
model_name='job',
name='created_date',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now, null=True),
preserve_default=False,
),
migrations.RunPython(
code=add_created_date_to_existing_jobs,
),
migrations.AlterField(
model_name='job',
name='created_date',
field=models.DateTimeField(auto_now_add=True),
),
]
......@@ -3,16 +3,20 @@
#
# SPDX-License-Identifier: MIT
from __future__ import annotations
import os
import re
import shutil
from enum import Enum
from typing import Optional
from functools import cached_property
from typing import Any, Dict, Optional, Sequence
from django.conf import settings
from django.contrib.auth.models import User
from django.core.files.storage import FileSystemStorage
from django.db import IntegrityError, models
from django.core.exceptions import ValidationError
from django.db import IntegrityError, models, transaction
from django.db.models.fields import FloatField
from drf_spectacular.types import OpenApiTypes
from drf_spectacular.utils import extend_schema_field
......@@ -154,6 +158,28 @@ class SortingMethod(str, Enum):
def __str__(self):
return self.value
class JobType(str, Enum):
ANNOTATION = 'annotation'
GROUND_TRUTH = 'ground_truth'
@classmethod
def choices(cls):
return tuple((x.value, x.name) for x in cls)
def __str__(self):
return self.value
class JobFrameSelectionMethod(str, Enum):
RANDOM_UNIFORM = 'random_uniform'
MANUAL = 'manual'
@classmethod
def choices(cls):
return tuple((x.value, x.name) for x in cls)
def __str__(self):
return self.value
class AbstractArrayField(models.TextField):
separator = ","
converter = lambda x: x
......@@ -213,6 +239,9 @@ class Data(models.Model):
match = re.search(r"step\s*=\s*([1-9]\d*)", self.frame_filter)
return int(match.group(1)) if match else 1
def get_valid_frame_indices(self):
return range(self.start_frame, self.stop_frame, self.get_frame_step())
def get_data_dirname(self):
return os.path.join(settings.MEDIA_DATA_ROOT, str(self.id))
......@@ -387,6 +416,13 @@ class Task(models.Model):
def get_tmp_dirname(self):
return os.path.join(self.get_dirname(), "tmp")
@cached_property
def gt_job(self) -> Optional[Job]:
try:
return Job.objects.get(segment__task=self, type=JobType.GROUND_TRUTH)
except Job.DoesNotExist:
return None
def __str__(self):
return self.name
......@@ -464,21 +500,130 @@ class RelatedFile(models.Model):
# https://github.com/opencv/cvat/pull/5083#discussion_r1038032715
ordering = ('id', )
class SegmentType(str, Enum):
RANGE = 'range'
SPECIFIC_FRAMES = 'specific_frames'
@classmethod
def choices(cls):
return tuple((x.value, x.name) for x in cls)
def __str__(self):
return self.value
class Segment(models.Model):
# Common fields
task = models.ForeignKey(Task, on_delete=models.CASCADE)
start_frame = models.IntegerField()
stop_frame = models.IntegerField()
type = models.CharField(choices=SegmentType.choices(), default=SegmentType.RANGE, max_length=32)
# TODO: try to reuse this field for custom task segments (aka job_file_mapping)
# SegmentType.SPECIFIC_FRAMES fields
frames = IntArrayField(store_sorted=True, unique_values=True, default='', blank=True)
def contains_frame(self, idx: int) -> bool:
return self.start_frame <= idx and idx <= self.stop_frame
return idx in self.frame_set
@property
def frame_count(self) -> int:
return len(self.frame_set)
@property
def frame_set(self) -> Sequence[int]:
data = self.task.data
data_start_frame = data.start_frame
data_stop_frame = data.stop_frame
step = data.get_frame_step()
frame_range = range(
data_start_frame + self.start_frame * step,
min(data_start_frame + self.stop_frame * step, data_stop_frame) + step,
step
)
if self.type == SegmentType.RANGE:
return frame_range
elif self.type == SegmentType.SPECIFIC_FRAMES:
return set(frame_range).intersection(self.frames or [])
else:
assert False
def save(self, *args, **kwargs) -> None:
self.full_clean()
return super().save(*args, **kwargs)
def clean(self) -> None:
if not (self.type == SegmentType.RANGE) ^ bool(self.frames):
raise ValidationError(
f"frames and type == {SegmentType.SPECIFIC_FRAMES} can only be used together"
)
if self.stop_frame < self.start_frame:
raise ValidationError("stop_frame cannot be lesser than start_frame")
return super().clean()
@cache_deleted
def delete(self, using=None, keep_parents=False):
super().delete(using, keep_parents)
class Meta:
default_permissions = ()
class TaskGroundTruthJobsLimitError(ValidationError):
def __init__(self) -> None:
super().__init__("A task can have only 1 ground truth job")
class JobQuerySet(models.QuerySet):
@transaction.atomic
def create(self, **kwargs: Any):
self._validate_constraints(kwargs)
return super().create(**kwargs)
@transaction.atomic
def update(self, **kwargs: Any) -> int:
self._validate_constraints(kwargs)
return super().update(**kwargs)
@transaction.atomic
def get_or_create(self, *args, **kwargs: Any):
self._validate_constraints(kwargs)
return super().get_or_create(*args, **kwargs)
@transaction.atomic
def update_or_create(self, *args, **kwargs: Any):
self._validate_constraints(kwargs)
return super().update_or_create(*args, **kwargs)
def _validate_constraints(self, obj: Dict[str, Any]):
# Constraints can't be set on the related model fields
# This method requires the save operation to be called as a transaction
if obj['type'] == JobType.GROUND_TRUTH and self.filter(
segment__task=obj['segment'].task, type=JobType.GROUND_TRUTH.value
).count() != 0:
raise TaskGroundTruthJobsLimitError()
class Job(models.Model):
objects = JobQuerySet.as_manager()
segment = models.ForeignKey(Segment, on_delete=models.CASCADE)
assignee = models.ForeignKey(User, null=True, blank=True, on_delete=models.SET_NULL)
created_date = models.DateTimeField(auto_now_add=True)
updated_date = models.DateTimeField(auto_now=True)
# TODO: it has to be deleted in Job, Task, Project and replaced by (stage, state)
# The stage field cannot be changed by an assignee, but state field can be. For
# now status is read only and it will be updated by (stage, state). Thus we don't
......@@ -490,6 +635,9 @@ class Job(models.Model):
state = models.CharField(max_length=32, choices=StateChoice.choices(),
default=StateChoice.NEW)
type = models.CharField(max_length=32, choices=JobType.choices(),
default=JobType.ANNOTATION)
def get_dirname(self):
return os.path.join(settings.JOBS_ROOT, str(self.id))
......@@ -526,6 +674,42 @@ class Job(models.Model):
class Meta:
default_permissions = ()
@transaction.atomic
def save(self, *args, **kwargs) -> None:
self.full_clean()
return super().save(*args, **kwargs)
def clean(self) -> None:
if not (self.type == JobType.GROUND_TRUTH) ^ (self.segment.type == SegmentType.RANGE):
raise ValidationError(
f"job type == {JobType.GROUND_TRUTH} and "
f"segment type == {SegmentType.SPECIFIC_FRAMES} "
"can only be used together"
)
return super().clean()
@cache_deleted
def delete(self, using=None, keep_parents=False):
if self.segment:
self.segment.delete(using=using, keep_parents=keep_parents)
super().delete(using, keep_parents)
self.delete_dirs()
def delete_dirs(self):
job_path = self.get_dirname()
if os.path.isdir(job_path):
shutil.rmtree(job_path)
def make_dirs(self):
job_path = self.get_dirname()
if os.path.isdir(job_path):
shutil.rmtree(job_path)
os.makedirs(job_path)
class InvalidLabel(ValueError):
pass
......
......@@ -544,6 +544,7 @@ class JobReadSerializer(serializers.ModelSerializer):
project_id = serializers.ReadOnlyField(source="get_project_id", allow_null=True)
start_frame = serializers.ReadOnlyField(source="segment.start_frame")
stop_frame = serializers.ReadOnlyField(source="segment.stop_frame")
frame_count = serializers.ReadOnlyField(source="segment.frame_count")
assignee = BasicUserSerializer(allow_null=True, read_only=True)
dimension = serializers.CharField(max_length=2, source='segment.task.dimension', read_only=True)
data_chunk_size = serializers.ReadOnlyField(source='segment.task.data.chunk_size')
......@@ -558,20 +559,138 @@ class JobReadSerializer(serializers.ModelSerializer):
class Meta:
model = models.Job
fields = ('url', 'id', 'task_id', 'project_id', 'assignee',
'dimension', 'bug_tracker', 'status', 'stage', 'state', 'mode',
'start_frame', 'stop_frame', 'data_chunk_size', 'organization', 'data_compressed_chunk_type',
'updated_date', 'issues', 'labels'
)
'dimension', 'bug_tracker', 'status', 'stage', 'state', 'mode', 'frame_count',
'start_frame', 'stop_frame', 'data_chunk_size', 'data_compressed_chunk_type',
'created_date', 'updated_date', 'issues', 'labels', 'type', 'organization')
read_only_fields = fields
class JobWriteSerializer(serializers.ModelSerializer):
def to_representation(self, instance):
data = super().to_representation(instance)
if instance.segment.type == models.SegmentType.SPECIFIC_FRAMES:
data['data_compressed_chunk_type'] = models.DataChoice.IMAGESET
return data
class JobWriteSerializer(WriteOnceMixin, serializers.ModelSerializer):
assignee = serializers.IntegerField(allow_null=True, required=False)
# NOTE: Field variations can be expressed using serializer inheritance, but it is
# harder to use then: we need to make a manual switch in get_serializer_class()
# and create an extra serializer type in the API schema.
# Need to investigate how it can be simplified.
type = serializers.ChoiceField(choices=models.JobType.choices())
task_id = serializers.IntegerField()
frame_selection_method = serializers.ChoiceField(
choices=models.JobFrameSelectionMethod.choices(), required=False)
frame_count = serializers.IntegerField(min_value=0, required=False,
help_text=textwrap.dedent("""\
The number of frames included in the job.
Applicable only to the random frame selection
"""))
seed = serializers.IntegerField(min_value=0, required=False,
help_text=textwrap.dedent("""\
The seed value for the random number generator.
The same value will produce the same frame sets.
Applicable only to the random frame selection.
By default, a random value is used.
"""))
frames = serializers.ListField(child=serializers.IntegerField(min_value=0),
required=False, help_text=textwrap.dedent("""\
The list of frame ids. Applicable only to the manual frame selection
"""))
class Meta:
model = models.Job
random_selection_params = ('frame_count', 'seed',)
manual_selection_params = ('frames',)
write_once_fields = ('type', 'task_id', 'frame_selection_method',) \
+ random_selection_params + manual_selection_params
fields = ('assignee', 'stage', 'state', ) + write_once_fields
def to_representation(self, instance):
# FIXME: deal with resquest/response separation
serializer = JobReadSerializer(instance, context=self.context)
return serializer.data
@transaction.atomic
def create(self, validated_data):
task_id = validated_data.pop('task_id')
task = models.Task.objects.get(pk=task_id)
if validated_data["type"] == models.JobType.GROUND_TRUTH:
if not task.data:
raise serializers.ValidationError(
"This task has no data attached yet. Please set up task data and try again"
)
if task.dimension != models.DimensionType.DIM_2D:
raise serializers.ValidationError(
"Ground Truth jobs can only be added in 2d tasks"
)
size = task.data.size
valid_frame_ids = task.data.get_valid_frame_indices()
frame_selection_method = validated_data.pop("frame_selection_method", None)
if frame_selection_method == models.JobFrameSelectionMethod.RANDOM_UNIFORM:
frame_count = validated_data.pop("frame_count")
if size <= frame_count:
raise serializers.ValidationError(
f"The number of frames requested ({frame_count}) must be lesser than "
f"the number of the task frames ({size})"
)
seed = validated_data.pop("seed", None)
# The RNG backend must not change to yield reproducible results,
# so here we specify it explicitly
from numpy import random
rng = random.Generator(random.MT19937(seed=seed))
frames = rng.choice(
list(valid_frame_ids), size=frame_count, shuffle=False, replace=False
).tolist()
elif frame_selection_method == models.JobFrameSelectionMethod.MANUAL:
frames = validated_data.pop("frames")
if not frames:
raise serializers.ValidationError("The list of frames cannot be empty")
unique_frames = set(frames)
if len(unique_frames) != len(frames):
raise serializers.ValidationError(f"Frames must not repeat")
invalid_ids = unique_frames.difference(valid_frame_ids)
if invalid_ids:
raise serializers.ValidationError(
"The following frames are not included "
f"in the task: {','.join(map(str, invalid_ids))}"
)
else:
raise serializers.ValidationError(
f"Unexpected frame selection method '{frame_selection_method}'"
)
segment = models.Segment.objects.create(
start_frame=0,
stop_frame=task.data.size - 1,
frames=frames,
task=task,
type=models.SegmentType.SPECIFIC_FRAMES,
)
else:
raise serializers.ValidationError(f"Unexpected job type '{validated_data['type']}'")
validated_data['segment'] = segment
try:
job = super().create(validated_data)
except models.TaskGroundTruthJobsLimitError as ex:
raise serializers.ValidationError(ex.message) from ex
job.make_dirs()
return job
def update(self, instance, validated_data):
state = validated_data.get('state')
stage = validated_data.get('stage')
......@@ -595,25 +714,21 @@ class JobWriteSerializer(serializers.ModelSerializer):
return instance
class Meta:
model = models.Job
fields = ('assignee', 'stage', 'state')
class SimpleJobSerializer(serializers.ModelSerializer):
assignee = BasicUserSerializer(allow_null=True)
class Meta:
model = models.Job
fields = ('url', 'id', 'assignee', 'status', 'stage', 'state')
fields = ('url', 'id', 'assignee', 'status', 'stage', 'state', 'type')
read_only_fields = fields
class SegmentSerializer(serializers.ModelSerializer):
jobs = SimpleJobSerializer(many=True, source='job_set')
frames = serializers.ListSerializer(child=serializers.IntegerField(), allow_empty=True)
class Meta:
model = models.Segment
fields = ('start_frame', 'stop_frame', 'jobs')
fields = ('start_frame', 'stop_frame', 'jobs', 'type', 'frames')
read_only_fields = fields
class ClientFileSerializer(serializers.ModelSerializer):
......@@ -666,7 +781,7 @@ class RqStatusSerializer(serializers.Serializer):
progress = serializers.FloatField(max_value=100, default=0)
class RqIdSerializer(serializers.Serializer):
rq_id = serializers.CharField()
rq_id = serializers.CharField(help_text="Request id")
class JobFiles(serializers.ListField):
......@@ -1254,6 +1369,11 @@ class DataMetaReadSerializer(serializers.ModelSerializer):
frames = FrameMetaSerializer(many=True, allow_null=True)
image_quality = serializers.IntegerField(min_value=0, max_value=100)
deleted_frames = serializers.ListField(child=serializers.IntegerField(min_value=0))
included_frames = serializers.ListField(
child=serializers.IntegerField(min_value=0), allow_null=True, required=False,
help_text=textwrap.dedent("""\
A list of valid frame ids. The None value means all frames are included.
"""))
class Meta:
model = models.Data
......@@ -1266,8 +1386,16 @@ class DataMetaReadSerializer(serializers.ModelSerializer):
'frame_filter',
'frames',
'deleted_frames',
'included_frames',
)
read_only_fields = fields
extra_kwargs = {
'size': {
'help_text': textwrap.dedent("""\
The number of frames included. Deleted frames do not affect this value.
""")
}
}
class DataMetaWriteSerializer(serializers.ModelSerializer):
deleted_frames = serializers.ListField(child=serializers.IntegerField(min_value=0))
......
# Copyright (C) 2019-2022 Intel Corporation
# Copyright (C) 2023 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
import shutil
......@@ -7,8 +8,8 @@ from django.contrib.auth.models import User
from django.db.models.signals import post_delete, post_save
from django.dispatch import receiver
from .models import (CloudStorage, Data, Job, Profile, Project,
StatusChoice, Task)
from .models import CloudStorage, Data, Job, Profile, Project, StatusChoice, Task
# TODO: need to log any problems reported by shutil.rmtree when the new
# analytics feature is available. Now the log system can write information
......
......@@ -171,11 +171,7 @@ def _save_task_to_db(db_task: models.Task, *, job_file_mapping: Optional[JobFile
db_job = models.Job(segment=db_segment)
db_job.save()
job_path = db_job.get_dirname()
if os.path.isdir(job_path):
shutil.rmtree(job_path)
os.makedirs(job_path)
db_job.make_dirs()
db_task.data.save()
db_task.save()
......
......@@ -219,6 +219,9 @@ def reverse(viewname, *, args=None, kwargs=None,
return url
def get_server_url(request: HttpRequest) -> str:
return request.build_absolute_uri('/')
def build_field_filter_params(field: str, value: Any) -> Dict[str, str]:
"""
Builds a collection filter query params for a single field and value.
......
此差异已折叠。
......@@ -17,9 +17,9 @@ from django.db.models import Q
from rest_framework.exceptions import PermissionDenied, ValidationError
from rest_framework.permissions import BasePermission
from cvat.apps.engine.models import (CloudStorage, Issue, Job, Label, Project,
Task)
from cvat.apps.engine.models import CloudStorage, Label, Project, Task, Job, Issue
from cvat.apps.organizations.models import Membership, Organization
from cvat.apps.quality_control.models import AnnotationConflict, QualityReport, QualitySettings
from cvat.apps.webhooks.models import WebhookTypeChoice
from cvat.utils.http import make_requests_session
......@@ -119,6 +119,8 @@ class OpenPolicyAgentPermission(metaclass=ABCMeta):
@classmethod
def create_base_perm(cls, request, view, scope, iam_context, obj=None, **kwargs):
if not iam_context and request:
iam_context = get_iam_context(request, obj)
return cls(
scope=scope,
obj=obj,
......@@ -126,9 +128,9 @@ class OpenPolicyAgentPermission(metaclass=ABCMeta):
@classmethod
def create_scope_list(cls, request, iam_context=None):
if iam_context:
return cls(**iam_context, scope='list')
return cls(**get_iam_context(request, None), scope='list')
if not iam_context and request:
iam_context = get_iam_context(request, None)
return cls(**iam_context, scope='list')
def __init__(self, **kwargs):
self.obj = None
......@@ -915,6 +917,19 @@ class TaskPermission(OpenPolicyAgentPermission):
return permissions
@classmethod
def create_scope_view(cls, request, task: Union[int, Task], iam_context=None):
if isinstance(task, int):
try:
task = Task.objects.get(id=task)
except Task.DoesNotExist as ex:
raise ValidationError(str(ex))
if not iam_context and request:
iam_context = get_iam_context(request, task)
return cls(**iam_context, obj=task, scope=__class__.Scopes.VIEW)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.url = settings.IAM_OPA_DATA_URL + '/tasks/allow'
......@@ -1167,7 +1182,10 @@ class WebhookPermission(OpenPolicyAgentPermission):
return data
class JobPermission(OpenPolicyAgentPermission):
task_id: Optional[int]
class Scopes(StrEnum):
CREATE = 'create'
LIST = 'list'
VIEW = 'view'
UPDATE = 'update'
......@@ -1193,8 +1211,25 @@ class JobPermission(OpenPolicyAgentPermission):
def create(cls, request, view, obj, iam_context):
permissions = []
if view.basename == 'job':
task_id = request.data.get('task_id')
for scope in cls.get_scopes(request, view, obj):
self = cls.create_base_perm(request, view, scope, iam_context, obj)
scope_params = {}
if scope == __class__.Scopes.CREATE:
scope_params['task_id'] = task_id
if task_id:
try:
task = Task.objects.get(id=task_id)
except Task.DoesNotExist as ex:
raise ValidationError(str(ex))
iam_context = get_iam_context(request, task)
permissions.append(TaskPermission.create_scope_view(
request, task, iam_context=iam_context
))
self = cls.create_base_perm(request, view, scope, iam_context, obj, **scope_params)
permissions.append(self)
if view.action == 'issues':
......@@ -1226,6 +1261,7 @@ class JobPermission(OpenPolicyAgentPermission):
return cls(**iam_context, obj=obj, scope='view:data')
def __init__(self, **kwargs):
self.task_id = kwargs.pop('task_id', None)
super().__init__(**kwargs)
self.url = settings.IAM_OPA_DATA_URL + '/jobs/allow'
......@@ -1233,10 +1269,10 @@ class JobPermission(OpenPolicyAgentPermission):
def get_scopes(request, view, obj):
Scopes = __class__.Scopes
scope = {
('list', 'GET'): Scopes.LIST, # TODO: need to add the method
('list', 'GET'): Scopes.LIST,
('create', 'POST'): Scopes.CREATE,
('retrieve', 'GET'): Scopes.VIEW,
('partial_update', 'PATCH'): Scopes.UPDATE,
('update', 'PUT'): Scopes.UPDATE, # TODO: do we need the method?
('destroy', 'DELETE'): Scopes.DELETE,
('annotations', 'GET'): Scopes.VIEW_ANNOTATIONS,
('annotations', 'PATCH'): Scopes.UPDATE_ANNOTATIONS,
......@@ -1313,6 +1349,29 @@ class JobPermission(OpenPolicyAgentPermission):
"assignee": { "id": getattr(self.obj.segment.task.project.assignee, 'id', None) }
} if self.obj.segment.task.project else None
}
elif self.scope == __class__.Scopes.CREATE:
if self.task_id is None:
raise ValidationError("task_id is not specified")
task = Task.objects.get(id=self.task_id)
if task.project:
organization = task.project.organization
else:
organization = task.organization
data = {
'organization': {
"id": getattr(organization, 'id', None)
},
"task": {
"owner": { "id": getattr(task.owner, 'id', None) },
"assignee": { "id": getattr(task.assignee, 'id', None) }
},
"project": {
"owner": { "id": getattr(task.project.owner, 'id', None) },
"assignee": { "id": getattr(task.project.assignee, 'id', None) }
} if task.project else None
}
return data
......@@ -1594,6 +1653,216 @@ class LabelPermission(OpenPolicyAgentPermission):
return data
class QualityReportPermission(OpenPolicyAgentPermission):
obj: Optional[QualityReport]
job_owner_id: Optional[int]
class Scopes(StrEnum):
LIST = 'list'
CREATE = 'create'
VIEW = 'view'
VIEW_STATUS = 'view:status'
@classmethod
def create_scope_check_status(cls, request, job_owner_id: int, iam_context=None):
if not iam_context and request:
iam_context = get_iam_context(request, None)
return cls(**iam_context, scope='view:status', job_owner_id=job_owner_id)
@classmethod
def create_scope_view(cls, request, report: Union[int, QualityReport], iam_context=None):
if isinstance(report, int):
try:
report = QualityReport.objects.get(id=report)
except QualityReport.DoesNotExist as ex:
raise ValidationError(str(ex))
# Access rights are the same as in the owning task
# This component doesn't define its own rules in this case
return TaskPermission.create_scope_view(request,
task=report.get_task(), iam_context=iam_context,
)
@classmethod
def create(cls, request, view, obj, iam_context):
Scopes = __class__.Scopes
permissions = []
if view.basename == 'quality_reports':
for scope in cls.get_scopes(request, view, obj):
if scope == Scopes.VIEW:
permissions.append(cls.create_scope_view(request, obj, iam_context=iam_context))
elif scope == Scopes.LIST and isinstance(obj, Task):
permissions.append(TaskPermission.create_scope_view(request, task=obj))
elif scope == Scopes.CREATE:
task_id = request.data.get('task_id')
if task_id is not None:
permissions.append(TaskPermission.create_scope_view(request, task_id))
permissions.append(cls.create_base_perm(request, view, scope, iam_context, obj))
else:
permissions.append(cls.create_base_perm(request, view, scope, iam_context, obj))
return permissions
def __init__(self, **kwargs):
if 'job_owner_id' in kwargs:
self.job_owner_id = int(kwargs.pop('job_owner_id'))
super().__init__(**kwargs)
self.url = settings.IAM_OPA_DATA_URL + '/quality_reports/allow'
@staticmethod
def get_scopes(request, view, obj):
Scopes = __class__.Scopes
return [{
'list': Scopes.LIST,
'create': Scopes.CREATE,
'retrieve': Scopes.VIEW,
'data': Scopes.VIEW,
}.get(view.action, None)]
def get_resource(self):
data = None
if self.obj:
task = self.obj.get_task()
if task.project:
organization = task.project.organization
else:
organization = task.organization
data = {
"id": self.obj.id,
'organization': {
"id": getattr(organization, 'id', None)
},
"task": {
"owner": { "id": getattr(task.owner, 'id', None) },
"assignee": { "id": getattr(task.assignee, 'id', None) }
} if task else None,
"project": {
"owner": { "id": getattr(task.project.owner, 'id', None) },
"assignee": { "id": getattr(task.project.assignee, 'id', None) }
} if task.project else None,
}
elif self.scope == self.Scopes.VIEW_STATUS:
data = { "owner": self.job_owner_id }
return data
class AnnotationConflictPermission(OpenPolicyAgentPermission):
obj: Optional[AnnotationConflict]
class Scopes(StrEnum):
LIST = 'list'
@classmethod
def create(cls, request, view, obj, iam_context):
permissions = []
if view.basename == 'annotation_conflicts':
for scope in cls.get_scopes(request, view, obj):
if scope == cls.Scopes.LIST and isinstance(obj, QualityReport):
permissions.append(QualityReportPermission.create_scope_view(
request, obj, iam_context=iam_context,
))
else:
permissions.append(cls.create_base_perm(request, view, scope, iam_context, obj))
return permissions
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.url = settings.IAM_OPA_DATA_URL + '/conflicts/allow'
@staticmethod
def get_scopes(request, view, obj):
Scopes = __class__.Scopes
return [{
'list': Scopes.LIST,
}.get(view.action, None)]
def get_resource(self):
return None
class QualitySettingPermission(OpenPolicyAgentPermission):
obj: Optional[QualitySettings]
class Scopes(StrEnum):
LIST = 'list'
VIEW = 'view'
UPDATE = 'update'
@classmethod
def create(cls, request, view, obj, iam_context):
Scopes = __class__.Scopes
permissions = []
if view.basename == 'quality_settings':
for scope in cls.get_scopes(request, view, obj):
if scope in [Scopes.VIEW, Scopes.UPDATE]:
obj = cast(QualitySettings, obj)
if scope == Scopes.VIEW:
task_scope = TaskPermission.Scopes.VIEW
elif scope == Scopes.UPDATE:
task_scope = TaskPermission.Scopes.UPDATE_DESC
else:
assert False
# Access rights are the same as in the owning task
# This component doesn't define its own rules in this case
permissions.append(TaskPermission.create_base_perm(
request, view, iam_context=iam_context, scope=task_scope, obj=obj.task
))
else:
permissions.append(cls.create_base_perm(request, view, scope, iam_context, obj))
return permissions
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.url = settings.IAM_OPA_DATA_URL + '/quality_settings/allow'
@staticmethod
def get_scopes(request, view, obj):
Scopes = __class__.Scopes
return [{
'list': Scopes.LIST,
'retrieve': Scopes.VIEW,
'partial_update': Scopes.UPDATE,
}.get(view.action, None)]
def get_resource(self):
data = None
if self.obj:
task = self.obj.task
if task.project:
organization = task.project.organization
else:
organization = task.organization
data = {
"id": self.obj.id,
'organization': {
"id": getattr(organization, 'id', None)
},
"task": {
"owner": { "id": getattr(task.owner, 'id', None) },
"assignee": { "id": getattr(task.assignee, 'id', None) }
} if task else None,
"project": {
"owner": { "id": getattr(task.project.owner, 'id', None) },
"assignee": { "id": getattr(task.project.assignee, 'id', None) }
} if task.project else None,
}
return data
class PolicyEnforcer(BasePermission):
# pylint: disable=no-self-use
def check_permission(self, request, view, obj):
......
package conflicts
import future.keywords.if
import future.keywords.in
import data.utils
import data.organizations
# input: {
# "scope": <"list"> or null,
# "auth": {
# "user": {
# "id": <num>,
# "privilege": <"admin"|"business"|"user"|"worker"> or null
# },
# "organization": {
# "id": <num>,
# "owner": {
# "id": <num>
# },
# "user": {
# "role": <"owner"|"maintainer"|"supervisor"|"worker"> or null
# }
# } or null,
# },
# "resource": {
# "id": <num>,
# "owner": { "id": <num> },
# "organization": { "id": <num> } or null,
# "task": {
# "id": <num>,
# "owner": { "id": <num> },
# "assignee": { "id": <num> },
# "organization": { "id": <num> } or null,
# } or null,
# "project": {
# "id": <num>,
# "owner": { "id": <num> },
# "assignee": { "id": <num> },
# "organization": { "id": <num> } or null,
# } or null,
# }
# }
default allow = false
allow {
utils.is_admin
}
allow {
input.scope == utils.LIST
utils.is_sandbox
}
allow {
input.scope == utils.LIST
organizations.is_member
}
filter = [] { # Django Q object to filter list of entries
utils.is_admin
utils.is_sandbox
} else = qobject {
utils.is_admin
utils.is_organization
org := input.auth.organization
qobject := [
{"report__job__segment__task__organization": org.id},
{"report__job__segment__task__project__organization": org.id}, "|",
{"report__task__organization": org.id}, "|",
{"report__task__project__organization": org.id}, "|",
]
} else = qobject {
utils.is_sandbox
user := input.auth.user
qobject := [
{"report__job__segment__task__owner_id": user.id},
{"report__job__segment__task__assignee_id": user.id}, "|",
{"report__job__segment__task__project__owner_id": user.id}, "|",
{"report__job__segment__task__project__assignee_id": user.id}, "|",
{"report__task__owner_id": user.id}, "|",
{"report__task__assignee_id": user.id}, "|",
{"report__task__project__owner_id": user.id}, "|",
{"report__task__project__assignee_id": user.id}, "|",
]
} else = qobject {
utils.is_organization
utils.has_perm(utils.USER)
organizations.has_perm(organizations.MAINTAINER)
org := input.auth.organization
qobject := [
{"report__job__segment__task__organization": org.id},
{"report__job__segment__task__project__organization": org.id}, "|",
{"report__task__organization": org.id}, "|",
{"report__task__project__organization": org.id}, "|",
]
} else = qobject {
organizations.has_perm(organizations.WORKER)
user := input.auth.user
org := input.auth.organization
qobject := [
{"report__job__segment__task__organization": org.id},
{"report__job__segment__task__project__organization": org.id}, "|",
{"report__task__organization": org.id}, "|",
{"report__task__project__organization": org.id}, "|",
{"report__job__segment__task__owner_id": user.id},
{"report__job__segment__task__assignee_id": user.id}, "|",
{"report__job__segment__task__project__owner_id": user.id}, "|",
{"report__job__segment__task__project__assignee_id": user.id}, "|",
{"report__task__owner_id": user.id}, "|",
{"report__task__assignee_id": user.id}, "|",
{"report__task__project__owner_id": user.id}, "|",
{"report__task__project__assignee_id": user.id}, "|",
"&"
]
}
......@@ -3,7 +3,7 @@ import data.utils
import data.organizations
# input: {
# "scope": <"view"|"list"|"update:state"|"update:stage"|"update:assignee""delete"|
# "scope": <"create"|"view"|"list"|"update:state"|"update:stage"|"update:assignee""delete"|
# "view:annotations"|"update:annotations"|"delete:annotations"|"view:data"|
# "export:annotations" | "export:dataset" |> or null,
# "auth": {
......@@ -140,20 +140,44 @@ filter = [] { # Django Q object to filter list of entries
}
allow {
{ utils.VIEW, utils.EXPORT_DATASET, utils.EXPORT_ANNOTATIONS, utils.VIEW_ANNOTATIONS, utils.VIEW_DATA, utils.VIEW_METADATA, utils.VIEW_COMMITS }[input.scope]
{ utils.CREATE, utils.DELETE }[input.scope]
utils.has_perm(utils.USER)
utils.is_sandbox
is_task_staff
}
allow {
{ utils.CREATE, utils.DELETE }[input.scope]
input.auth.organization.id == input.resource.organization.id
organizations.has_perm(organizations.SUPERVISOR)
utils.has_perm(utils.USER)
is_task_staff
}
allow {
{ utils.VIEW,
utils.EXPORT_DATASET, utils.EXPORT_ANNOTATIONS,
utils.VIEW_ANNOTATIONS, utils.VIEW_DATA, utils.VIEW_METADATA, utils.VIEW_COMMITS
}[input.scope]
utils.is_sandbox
is_job_staff
}
allow {
{ utils.VIEW, utils.EXPORT_DATASET, utils.EXPORT_ANNOTATIONS, utils.VIEW_ANNOTATIONS, utils.VIEW_DATA, utils.VIEW_METADATA, utils.VIEW_COMMITS }[input.scope]
{ utils.CREATE, utils.DELETE, utils.VIEW,
utils.EXPORT_DATASET, utils.EXPORT_ANNOTATIONS,
utils.VIEW_ANNOTATIONS, utils.VIEW_DATA, utils.VIEW_METADATA, utils.VIEW_COMMITS
}[input.scope]
input.auth.organization.id == input.resource.organization.id
utils.has_perm(utils.USER)
organizations.has_perm(organizations.MAINTAINER)
}
allow {
{ utils.VIEW, utils.EXPORT_DATASET, utils.EXPORT_ANNOTATIONS, utils.VIEW_ANNOTATIONS, utils.VIEW_DATA, utils.VIEW_METADATA, utils.VIEW_COMMITS }[input.scope]
{ utils.VIEW,
utils.EXPORT_DATASET, utils.EXPORT_ANNOTATIONS,
utils.VIEW_ANNOTATIONS, utils.VIEW_DATA, utils.VIEW_METADATA, utils.VIEW_COMMITS
}[input.scope]
input.auth.organization.id == input.resource.organization.id
organizations.has_perm(organizations.WORKER)
is_job_staff
......
package quality_reports
import future.keywords.if
import future.keywords.in
import data.utils
import data.organizations
# input: {
# "scope": <"view"|"list"|"create"|"view:status"> or null,
# "auth": {
# "user": {
# "id": <num>,
# "privilege": <"admin"|"business"|"user"|"worker"> or null
# },
# "organization": {
# "id": <num>,
# "owner": {
# "id": <num>
# },
# "user": {
# "role": <"owner"|"maintainer"|"supervisor"|"worker"> or null
# }
# } or null,
# },
# "resource": {
# "id": <num>,
# "owner": { "id": <num> },
# "organization": { "id": <num> } or null,
# "task": {
# "id": <num>,
# "owner": { "id": <num> },
# "assignee": { "id": <num> },
# "organization": { "id": <num> } or null,
# } or null,
# "project": {
# "id": <num>,
# "owner": { "id": <num> },
# "assignee": { "id": <num> },
# "organization": { "id": <num> } or null,
# } or null,
# }
# }
default allow = false
allow {
utils.is_admin
}
allow {
input.scope == utils.LIST
utils.is_sandbox
}
allow {
input.scope == utils.LIST
organizations.is_member
}
filter = [] { # Django Q object to filter list of entries
utils.is_admin
utils.is_sandbox
} else = qobject {
utils.is_admin
utils.is_organization
org := input.auth.organization
qobject := [
{"job__segment__task__organization": org.id},
{"job__segment__task__project__organization": org.id}, "|",
{"task__organization": org.id}, "|",
{"task__project__organization": org.id}, "|",
]
} else = qobject {
utils.is_sandbox
user := input.auth.user
qobject := [
{"job__segment__task__owner_id": user.id},
{"job__segment__task__assignee_id": user.id}, "|",
{"job__segment__task__project__owner_id": user.id}, "|",
{"job__segment__task__project__assignee_id": user.id}, "|",
{"task__owner_id": user.id}, "|",
{"task__assignee_id": user.id}, "|",
{"task__project__owner_id": user.id}, "|",
{"task__project__assignee_id": user.id}, "|",
]
} else = qobject {
utils.is_organization
utils.has_perm(utils.USER)
organizations.has_perm(organizations.MAINTAINER)
org := input.auth.organization
qobject := [
{"job__segment__task__organization": org.id},
{"job__segment__task__project__organization": org.id}, "|",
{"task__organization": org.id}, "|",
{"task__project__organization": org.id}, "|",
]
} else = qobject {
organizations.has_perm(organizations.WORKER)
user := input.auth.user
org := input.auth.organization
qobject := [
{"job__segment__task__organization": org.id},
{"job__segment__task__project__organization": org.id}, "|",
{"task__organization": org.id}, "|",
{"task__project__organization": org.id}, "|",
{"job__segment__task__owner_id": user.id},
{"job__segment__task__assignee_id": user.id}, "|",
{"job__segment__task__project__owner_id": user.id}, "|",
{"job__segment__task__project__assignee_id": user.id}, "|",
{"task__owner_id": user.id}, "|",
{"task__assignee_id": user.id}, "|",
{"task__project__owner_id": user.id}, "|",
{"task__project__assignee_id": user.id}, "|",
"&"
]
}
package quality_settings
import future.keywords.if
import future.keywords.in
import data.utils
import data.organizations
# input: {
# "scope": <"view"> or null,
# "auth": {
# "user": {
# "id": <num>,
# "privilege": <"admin"|"business"|"user"|"worker"> or null
# },
# "organization": {
# "id": <num>,
# "owner": {
# "id": <num>
# },
# "user": {
# "role": <"owner"|"maintainer"|"supervisor"|"worker"> or null
# }
# } or null,
# },
# "resource": {
# "id": <num>,
# "owner": { "id": <num> },
# "organization": { "id": <num> } or null,
# "task": {
# "id": <num>,
# "owner": { "id": <num> },
# "assignee": { "id": <num> },
# "organization": { "id": <num> } or null,
# } or null,
# "project": {
# "id": <num>,
# "owner": { "id": <num> },
# "assignee": { "id": <num> },
# "organization": { "id": <num> } or null,
# } or null,
# }
# }
default allow = false
allow {
utils.is_admin
}
allow {
input.scope == utils.LIST
utils.is_sandbox
}
allow {
input.scope == utils.LIST
organizations.is_member
}
filter = [] { # Django Q object to filter list of entries
utils.is_admin
utils.is_sandbox
} else = qobject {
utils.is_admin
utils.is_organization
org := input.auth.organization
qobject := [
{"task__organization": org.id},
{"task__project__organization": org.id}, "|",
]
} else = qobject {
utils.is_sandbox
user := input.auth.user
qobject := [
{"task__owner_id": user.id},
{"task__assignee_id": user.id}, "|",
{"task__project__owner_id": user.id}, "|",
{"task__project__assignee_id": user.id}, "|",
]
} else = qobject {
utils.is_organization
utils.has_perm(utils.USER)
organizations.has_perm(organizations.MAINTAINER)
org := input.auth.organization
qobject := [
{"task__organization": org.id},
{"task__project__organization": org.id}, "|",
]
} else = qobject {
organizations.has_perm(organizations.WORKER)
user := input.auth.user
org := input.auth.organization
qobject := [
{"task__organization": org.id}, "|",
{"task__project__organization": org.id}, "|",
{"task__owner_id": user.id},
{"task__assignee_id": user.id}, "|",
{"task__project__owner_id": user.id}, "|",
{"task__project__assignee_id": user.id}, "|",
"&"
]
}
......@@ -57,3 +57,13 @@ import:annotations,Job,Sandbox,None,,PUT,/jobs/{id}/annotations?format=,Admin,N/
import:annotations,Job,Sandbox,"Project:owner, Project:assignee, Task:owner, Task:assignee, Assignee",,PUT,/jobs/{id}/annotations?format=,Worker,N/A
import:annotations,Job,Organization,None,,PUT,/jobs/{id}/annotations?format=,User,Maintainer
import:annotations,Job,Organization,"Project:owner, Project:assignee, Task:owner, Task:assignee, Assignee",,PUT,/jobs/{id}/annotations?format=,Worker,Worker
create,Job,Sandbox,None,,POST,/jobs,Admin,N/A
create,Job,Sandbox,"Project:owner, Project:assignee, Task:owner, Task:assignee",,POST,/jobs/{id},User,N/A
create,Job,Organization,None,,POST,/jobs,User,Maintainer
create,Job,Organization,"Project:owner, Project:assignee, Task:owner, Task:assignee",,POST,/jobs/{id},User,Supervisor
create,Job,Organization,"Project:owner, Project:assignee, Task:owner, Task:assignee, Assignee",,POST,/jobs/{id},User,Maintainer
delete,Job,Sandbox,None,,DELETE,/jobs/{id},Admin,N/A
delete,Job,Sandbox,"Project:owner, Project:assignee, Task:owner, Task:assignee",,DELETE,/jobs/{id},User,N/A
delete,Job,Organization,None,,DELETE,/jobs/{id},User,Maintainer
delete,Job,Organization,"Project:owner, Project:assignee, Task:owner, Task:assignee",,DELETE,/jobs/{id},User,Supervisor
delete,Job,Organization,"Project:owner, Project:assignee, Task:owner, Task:assignee, Assignee",,DELETE,/jobs/{id},User,Maintainer
# Copyright (C) 2022 Intel Corporation
# Copyright (C) 2022 CVAT.ai Corporation
# Copyright (C) 2022-2023 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
......@@ -10,6 +10,8 @@ from drf_spectacular.authentication import SessionScheme, TokenScheme
from drf_spectacular.extensions import OpenApiAuthenticationExtension
from drf_spectacular.openapi import AutoSchema
from rest_framework import serializers
class SignatureAuthenticationScheme(OpenApiAuthenticationExtension):
"""
......@@ -80,6 +82,9 @@ class CookieAuthenticationScheme(SessionScheme):
class CustomAutoSchema(AutoSchema):
def get_operation_id(self):
# Change style of operation ids to [viewset _ action _ object]
# This form is simpler to handle during SDK generation
tokenized_path = self._tokenize_path()
# replace dashes as they can be problematic later in code generation
tokenized_path = [t.replace('-', '_') for t in tokenized_path]
......@@ -97,3 +102,15 @@ class CustomAutoSchema(AutoSchema):
return '_'.join([tokenized_path[0]] + [action] + tokenized_path[1:])
def _get_request_for_media_type(self, serializer, *args, **kwargs):
# Enables support for required=False serializers in request body specification
# in drf-spectacular. Doesn't block other extensions on the target serializer.
# This is supported by OpenAPI and by SDK generator, but not by drf-spectacular
schema, required = super()._get_request_for_media_type(serializer, *args, **kwargs)
if isinstance(serializer, serializers.Serializer):
if not serializer.required:
required = False
return schema, required
......@@ -3,11 +3,12 @@
#
# SPDX-License-Identifier: MIT
import json
from collections import OrderedDict
from itertools import groupby
from io import BytesIO
from typing import Dict, Optional
from unittest import mock, skip
import json
import os
import requests
......@@ -78,15 +79,15 @@ class _LambdaTestCaseBase(APITestCase):
def setUp(self):
self.client = APIClient()
http_patcher = mock.patch('cvat.apps.lambda_manager.views.LambdaGateway._http', side_effect = self.__get_data_from_lambda_manager_http)
http_patcher = mock.patch('cvat.apps.lambda_manager.views.LambdaGateway._http', side_effect = self._get_data_from_lambda_manager_http)
self.addCleanup(http_patcher.stop)
http_patcher.start()
invoke_patcher = mock.patch('cvat.apps.lambda_manager.views.LambdaGateway.invoke', side_effect = self.__invoke_function)
invoke_patcher = mock.patch('cvat.apps.lambda_manager.views.LambdaGateway.invoke', side_effect = self._invoke_function)
self.addCleanup(invoke_patcher.stop)
invoke_patcher.start()
def __get_data_from_lambda_manager_http(self, **kwargs):
def _get_data_from_lambda_manager_http(self, **kwargs):
url = kwargs["url"]
if url == "/api/functions":
return functions["positive"]
......@@ -104,7 +105,7 @@ class _LambdaTestCaseBase(APITestCase):
r.response = HttpResponseNotFound()
raise r # raise 404 Not Found error
def __invoke_function(self, func, payload):
def _invoke_function(self, func, payload):
data = []
func_id = func.id
type_function = functions["positive"][func_id]["metadata"]["annotations"]["type"]
......@@ -170,15 +171,15 @@ class _LambdaTestCaseBase(APITestCase):
cls.user = user_dummy
def _create_task(self, data, image_data, *, owner=None, org_id=None):
def _create_task(self, task_spec, data, *, owner=None, org_id=None):
with ForceLogin(owner or self.admin, self.client):
response = self.client.post('/api/tasks', data=data, format="json",
response = self.client.post('/api/tasks', data=task_spec, format="json",
QUERY_STRING=f'org_id={org_id}' if org_id is not None else None)
assert response.status_code == status.HTTP_201_CREATED, response.status_code
tid = response.data["id"]
response = self.client.post("/api/tasks/%s/data" % tid,
data=image_data,
data=data,
QUERY_STRING=f'org_id={org_id}' if org_id is not None else None)
assert response.status_code == status.HTTP_202_ACCEPTED, response.status_code
......@@ -1042,6 +1043,163 @@ class LambdaTestCases(_LambdaTestCaseBase):
self.assertEqual(response.status_code, status.HTTP_500_INTERNAL_SERVER_ERROR)
class TestComplexFrameSetupCases(_LambdaTestCaseBase):
def _invoke_function(self, func, payload):
data = []
func_id = func.id
type_function = functions["positive"][func_id]["metadata"]["annotations"]["type"]
if type_function == "reid":
if func_id == id_function_reid_response_data:
data = [0, 1]
else:
data = []
elif type_function == "tracker":
data = {
"shape": [12.34, 34.0, 35.01, 41.99],
"state": {"key": "value"},
}
elif type_function == "interactor":
data = [
[8, 12],
[34, 56],
[77, 77],
]
elif type_function == "detector":
data = [
{
"confidence": "0.9959098",
"label": "car",
"points": [3, 3, 15, 15],
"type": "rectangle",
},
]
return data
def setUp(self):
super().setUp()
image_count = 50
frame_step = 5
start_frame = 3
stop_frame = image_count - 4
data = self._generate_task_images(image_count)
data["frame_filter"] = f"step={frame_step}"
data["start_frame"] = start_frame
data["stop_frame"] = stop_frame
self.task = self._create_task(
task_spec={
'name': 'test_task',
'labels': [{'name': 'car'}],
'segment_size': 2
},
data=data,
owner=self.user
)
self.task_rel_frame_range = range(len(range(start_frame, stop_frame, frame_step)))
self.jobs = get_paginated_collection(lambda page:
self._get_request(
f"/api/jobs?task_id={self.task['id']}&page={page}",
self.admin
)
)
self.function_id = id_function_detector
self.common_request_data = {
"task": self.task['id'],
"cleanup": True,
}
def _run_function(self, function_id, data, user):
data["function"] = function_id
response = self._post_request(LAMBDA_REQUESTS_PATH, user, data)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)
request_id = response.json()["id"]
request_status = self._wait_request(request_id)
self.assertEqual(request_status, "finished")
def _wait_request(self, request_id: str) -> str:
request_status = "started"
while request_status != "finished" and request_status != "failed":
response = self._get_request(f'{LAMBDA_REQUESTS_PATH}/{request_id}', self.admin)
self.assertEqual(response.status_code, status.HTTP_200_OK)
request_status = response.json().get("status")
return request_status
def _run_interactor(self, function_id, data, user):
response = self._post_request(f'{LAMBDA_FUNCTIONS_PATH}/{function_id}', user, data)
return response
def test_can_run_function_on_whole_task(self):
data = self.common_request_data.copy()
self._run_function(self.function_id, data, self.user)
response = self._get_request(f'/api/tasks/{self.task["id"]}/annotations', self.admin)
self.assertEqual(response.status_code, status.HTTP_200_OK)
annotations = response.json()
self.assertEqual(len(annotations["tags"]), 0)
self.assertEqual(len(annotations["tracks"]), 0)
requested_frame_range = self.task_rel_frame_range
self.assertEqual(
{
frame: 1 for frame in requested_frame_range
},
{
frame: len(list(group))
for frame, group in groupby(annotations["shapes"], key=lambda a: a["frame"])
}
)
def test_can_run_interactor_on_valid_task_frame(self):
data = self.common_request_data.copy()
requested_frame = self.task_rel_frame_range[4]
data["frame"] = requested_frame
response = self._run_interactor(self.function_id, data, self.user)
self.assertEqual(response.status_code, status.HTTP_200_OK)
annotations = response.json()
self.assertEqual(1, len(annotations))
def test_can_run_interactor_on_invalid_task_frame(self):
data = self.common_request_data.copy()
requested_frame = self.task_rel_frame_range[-1] + 1
data["frame"] = requested_frame
response = self._run_interactor(self.function_id, data, self.user)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_can_run_interactor_on_valid_job_frame(self):
data = self.common_request_data.copy()
job = self.jobs[2]
requested_frame = job["start_frame"] + 1
data["frame"] = requested_frame
data["job"] = job["id"]
response = self._run_interactor(self.function_id, data, self.user)
self.assertEqual(response.status_code, status.HTTP_200_OK)
annotations = response.json()
self.assertEqual(1, len(annotations))
def test_can_run_interactor_on_invalid_job_frame(self):
data = self.common_request_data.copy()
job = self.jobs[2]
requested_frame = job["stop_frame"] + 1
data["frame"] = requested_frame
data["job"] = job["id"]
response = self._run_interactor(self.function_id, data, self.user)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
class Issue4996_Cases(_LambdaTestCaseBase):
# Check regressions for https://github.com/opencv/cvat/issues/4996#issuecomment-1266123032
# We need to check that job assignee can call functions in the assigned jobs
......@@ -1083,14 +1241,16 @@ class Issue4996_Cases(_LambdaTestCaseBase):
assert response.status_code == status.HTTP_200_OK
def setUp(self):
super().setUp()
self.org = self._create_org(owner=self.admin, members={self.user: 'worker'})
task = self._create_task(data={
task = self._create_task(task_spec={
'name': 'test_task',
'labels': [{'name': 'cat'}],
'segment_size': 2
},
image_data=self._generate_task_images(6),
data=self._generate_task_images(6),
owner=self.admin,
org_id=self.org['id'],
)
......@@ -1104,7 +1264,7 @@ class Issue4996_Cases(_LambdaTestCaseBase):
)
self.job = jobs[1]
self.common_data = {
self.common_request_data = {
"task": self.task['id'],
"frame": 0,
"cleanup": True,
......@@ -1113,81 +1273,79 @@ class Issue4996_Cases(_LambdaTestCaseBase):
},
}
self.function_name = f"{LAMBDA_FUNCTIONS_PATH}/{id_function_detector}"
return super().setUp()
self.function_url = f"{LAMBDA_FUNCTIONS_PATH}/{id_function_detector}"
def _get_valid_job_params(self):
return {
def _get_valid_job_request_data(self):
data = self.common_request_data.copy()
data.update({
"job": self.job['id'],
"frame": 2
}
})
return data
def _get_invalid_job_params(self):
return {
def _get_invalid_job_request_data(self):
data = self.common_request_data.copy()
data.update({
"job": self.job['id'],
"frame": 0
}
})
return data
def test_can_call_function_for_job_worker_in_org__deny_unassigned_worker_with_task_request(self):
data = self.common_data.copy()
data = self.common_request_data.copy()
with self.subTest(job=None, assignee=None):
response = self._post_request(self.function_name, self.user, data,
response = self._post_request(self.function_url, self.user, data,
org_id=self.org['id'])
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_can_call_function_for_job_worker_in_org__deny_unassigned_worker_with_job_request(self):
data = self.common_data.copy()
data.update(self._get_valid_job_params())
data = self._get_valid_job_request_data()
with self.subTest(job='defined', assignee=None):
response = self._post_request(self.function_name, self.user, data,
response = self._post_request(self.function_url, self.user, data,
org_id=self.org['id'])
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_can_call_function_for_job_worker_in_org__allow_task_assigned_worker_with_task_request(self):
self._set_task_assignee(self.task['id'], self.user.id, org_id=self.org['id'])
data = self.common_data.copy()
data = self.common_request_data.copy()
with self.subTest(job=None, assignee='task'):
response = self._post_request(self.function_name, self.user, data,
response = self._post_request(self.function_url, self.user, data,
org_id=self.org['id'])
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_can_call_function_for_job_worker_in_org__deny_job_assigned_worker_with_task_request(self):
self._set_job_assignee(self.job['id'], self.user.id, org_id=self.org['id'])
data = self.common_data.copy()
data = self.common_request_data.copy()
with self.subTest(job=None, assignee='job'):
response = self._post_request(self.function_name, self.user, data,
response = self._post_request(self.function_url, self.user, data,
org_id=self.org['id'])
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_can_call_function_for_job_worker_in_org__allow_job_assigned_worker_with_job_request(self):
self._set_job_assignee(self.job['id'], self.user.id, org_id=self.org['id'])
data = self.common_data.copy()
data.update(self._get_valid_job_params())
data = self._get_valid_job_request_data()
with self.subTest(job='defined', assignee='job'):
response = self._post_request(self.function_name, self.user, data,
response = self._post_request(self.function_url, self.user, data,
org_id=self.org['id'])
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_can_check_job_boundaries_in_function_call__fail_for_frame_outside_job(self):
self._set_job_assignee(self.job['id'], self.user.id, org_id=self.org['id'])
data = self.common_data.copy()
data.update(self._get_invalid_job_params())
data = self._get_invalid_job_request_data()
with self.subTest(job='defined', frame='outside'):
response = self._post_request(self.function_name, self.user, data,
response = self._post_request(self.function_url, self.user, data,
org_id=self.org['id'])
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_can_check_job_boundaries_in_function_call__ok_for_frame_inside_job(self):
self._set_job_assignee(self.job['id'], self.user.id, org_id=self.org['id'])
data = self.common_data.copy()
data.update(self._get_valid_job_params())
data = self._get_valid_job_request_data()
with self.subTest(job='defined', frame='inside'):
response = self._post_request(self.function_name, self.user, data,
response = self._post_request(self.function_url, self.user, data,
org_id=self.org['id'])
self.assertEqual(response.status_code, status.HTTP_200_OK)
......@@ -260,14 +260,24 @@ class LambdaFunction:
supported_attrs[func_label].update({ attr["name"]: task_attributes[mapped_label][mapped_attr] })
# Check job frame boundaries
for key, desc in (
('frame', 'frame'),
('frame0', 'start frame'),
('frame1', 'end frame'),
):
if key in data and db_job and not db_job.segment.contains_frame(data[key]):
raise ValidationError(f"The {desc} is outside the job range",
code=status.HTTP_400_BAD_REQUEST)
if db_job:
task_data = db_task.data
data_start_frame = task_data.start_frame
step = task_data.get_frame_step()
for key, desc in (
('frame', 'frame'),
('frame0', 'start frame'),
('frame1', 'end frame'),
):
if key not in data:
continue
abs_frame_id = data_start_frame + data[key] * step
if not db_job.segment.contains_frame(abs_frame_id):
raise ValidationError(f"The {desc} is outside the job range",
code=status.HTTP_400_BAD_REQUEST)
if self.kind == LambdaType.DETECTOR:
payload.update({
......
# Copyright (C) 2023 Intel Corporation
#
# SPDX-License-Identifier: MIT
from django.apps import AppConfig
class QualityControlConfig(AppConfig):
name = "cvat.apps.quality_control"
def ready(self) -> None:
from django.conf import settings
from . import default_settings
for key in dir(default_settings):
if key.isupper() and not hasattr(settings, key):
setattr(settings, key, getattr(default_settings, key))
# Required to define signals in the application
from . import signals # pylint: disable=unused-import
# Copyright (C) 2023 Intel Corporation
#
# SPDX-License-Identifier: MIT
import os
QUALITY_CHECK_JOB_DELAY = int(os.getenv("CVAT_QUALITY_CHECK_JOB_DELAY", 15 * 60))
"The delay before the next quality check job is queued, in seconds"
# Generated by Django 4.2.1 on 2023-06-08 12:31
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
("engine", "0070_add_job_type_created_date"),
]
operations = [
migrations.CreateModel(
name="AnnotationConflict",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("frame", models.PositiveIntegerField()),
(
"type",
models.CharField(
choices=[
("missing_annotation", "MISSING_ANNOTATION"),
("extra_annotation", "EXTRA_ANNOTATION"),
("mismatching_label", "MISMATCHING_LABEL"),
("low_overlap", "LOW_OVERLAP"),
("mismatching_direction", "MISMATCHING_DIRECTION"),
("mismatching_attributes", "MISMATCHING_ATTRIBUTES"),
("mismatching_groups", "MISMATCHING_GROUPS"),
("covered_annotation", "COVERED_ANNOTATION"),
],
max_length=32,
),
),
(
"severity",
models.CharField(
choices=[("warning", "WARNING"), ("error", "ERROR")], max_length=32
),
),
],
),
migrations.CreateModel(
name="QualitySettings",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("iou_threshold", models.FloatField()),
("oks_sigma", models.FloatField()),
("line_thickness", models.FloatField()),
("low_overlap_threshold", models.FloatField()),
("compare_line_orientation", models.BooleanField()),
("line_orientation_threshold", models.FloatField()),
("compare_groups", models.BooleanField()),
("group_match_threshold", models.FloatField()),
("check_covered_annotations", models.BooleanField()),
("object_visibility_threshold", models.FloatField()),
("panoptic_comparison", models.BooleanField()),
("compare_attributes", models.BooleanField()),
(
"task",
models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE,
related_name="quality_settings",
to="engine.task",
),
),
],
),
migrations.CreateModel(
name="QualityReport",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("created_date", models.DateTimeField(auto_now_add=True)),
("target_last_updated", models.DateTimeField()),
("gt_last_updated", models.DateTimeField()),
("data", models.JSONField()),
(
"job",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="quality_reports",
to="engine.job",
),
),
(
"parent",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="children",
to="quality_control.qualityreport",
),
),
(
"task",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="quality_reports",
to="engine.task",
),
),
],
),
migrations.CreateModel(
name="AnnotationId",
fields=[
(
"id",
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name="ID"
),
),
("obj_id", models.PositiveIntegerField()),
("job_id", models.PositiveIntegerField()),
(
"type",
models.CharField(
choices=[("tag", "TAG"), ("shape", "SHAPE"), ("track", "TRACK")],
max_length=32,
),
),
(
"shape_type",
models.CharField(
choices=[
("rectangle", "RECTANGLE"),
("polygon", "POLYGON"),
("polyline", "POLYLINE"),
("points", "POINTS"),
("ellipse", "ELLIPSE"),
("cuboid", "CUBOID"),
("mask", "MASK"),
("skeleton", "SKELETON"),
],
default=None,
max_length=32,
null=True,
),
),
(
"conflict",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="annotation_ids",
to="quality_control.annotationconflict",
),
),
],
),
migrations.AddField(
model_name="annotationconflict",
name="report",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="conflicts",
to="quality_control.qualityreport",
),
),
]
# Copyright (C) 2023 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
from __future__ import annotations
from copy import deepcopy
from enum import Enum
from typing import Any, Sequence
from django.core.exceptions import ValidationError
from django.db import models
from django.forms.models import model_to_dict
from cvat.apps.engine.models import Job, ShapeType, Task
class AnnotationConflictType(str, Enum):
MISSING_ANNOTATION = "missing_annotation"
EXTRA_ANNOTATION = "extra_annotation"
MISMATCHING_LABEL = "mismatching_label"
LOW_OVERLAP = "low_overlap"
MISMATCHING_DIRECTION = "mismatching_direction"
MISMATCHING_ATTRIBUTES = "mismatching_attributes"
MISMATCHING_GROUPS = "mismatching_groups"
COVERED_ANNOTATION = "covered_annotation"
def __str__(self) -> str:
return self.value
@classmethod
def choices(cls):
return tuple((x.value, x.name) for x in cls)
class AnnotationConflictSeverity(str, Enum):
WARNING = "warning"
ERROR = "error"
def __str__(self) -> str:
return self.value
@classmethod
def choices(cls):
return tuple((x.value, x.name) for x in cls)
class MismatchingAnnotationKind(str, Enum):
ATTRIBUTE = "attribute"
LABEL = "label"
def __str__(self) -> str:
return self.value
@classmethod
def choices(cls):
return tuple((x.value, x.name) for x in cls)
class QualityReportTarget(str, Enum):
JOB = "job"
TASK = "task"
def __str__(self) -> str:
return self.value
@classmethod
def choices(cls):
return tuple((x.value, x.name) for x in cls)
class QualityReport(models.Model):
job = models.ForeignKey(
Job, on_delete=models.CASCADE, related_name="quality_reports", null=True, blank=True
)
task = models.ForeignKey(
Task, on_delete=models.CASCADE, related_name="quality_reports", null=True, blank=True
)
parent = models.ForeignKey(
"self", on_delete=models.CASCADE, related_name="children", null=True, blank=True
)
children: Sequence[QualityReport]
created_date = models.DateTimeField(auto_now_add=True)
target_last_updated = models.DateTimeField()
gt_last_updated = models.DateTimeField()
data = models.JSONField()
conflicts: Sequence[AnnotationConflict]
@property
def target(self) -> QualityReportTarget:
if self.job:
return QualityReportTarget.JOB
elif self.task:
return QualityReportTarget.TASK
else:
assert False
def _parse_report(self):
from cvat.apps.quality_control.quality_reports import ComparisonReport
return ComparisonReport.from_json(self.data)
@property
def summary(self):
report = self._parse_report()
return report.comparison_summary
def get_task(self) -> Task:
if self.task is not None:
return self.task
else:
return self.job.segment.task
def get_json_report(self) -> str:
return self.data
def clean(self):
if not (self.job is not None) ^ (self.task is not None):
raise ValidationError("One of the 'job' and 'task' fields must be set")
@property
def organization_id(self):
if task := self.get_task():
return getattr(task.organization, "id", None)
return None
class AnnotationConflict(models.Model):
report = models.ForeignKey(QualityReport, on_delete=models.CASCADE, related_name="conflicts")
frame = models.PositiveIntegerField()
type = models.CharField(max_length=32, choices=AnnotationConflictType.choices())
severity = models.CharField(max_length=32, choices=AnnotationConflictSeverity.choices())
annotation_ids: Sequence[AnnotationId]
@property
def organization_id(self):
return self.report.organization_id
class AnnotationType(str, Enum):
TAG = "tag"
SHAPE = "shape"
TRACK = "track"
def __str__(self) -> str:
return self.value
@classmethod
def choices(cls):
return tuple((x.value, x.name) for x in cls)
class AnnotationId(models.Model):
conflict = models.ForeignKey(
AnnotationConflict, on_delete=models.CASCADE, related_name="annotation_ids"
)
obj_id = models.PositiveIntegerField()
job_id = models.PositiveIntegerField()
type = models.CharField(max_length=32, choices=AnnotationType.choices())
shape_type = models.CharField(
max_length=32, choices=ShapeType.choices(), null=True, default=None
)
def clean(self) -> None:
if self.type in [AnnotationType.SHAPE, AnnotationType.TRACK]:
if not self.shape_type:
raise ValidationError("Annotation kind must be specified")
elif self.type == AnnotationType.TAG:
if self.shape_type:
raise ValidationError("Annotation kind must be empty")
else:
raise ValidationError(f"Unexpected type value '{self.type}'")
class QualitySettings(models.Model):
task = models.OneToOneField(Task, on_delete=models.CASCADE, related_name="quality_settings")
iou_threshold = models.FloatField()
oks_sigma = models.FloatField()
line_thickness = models.FloatField()
low_overlap_threshold = models.FloatField()
compare_line_orientation = models.BooleanField()
line_orientation_threshold = models.FloatField()
compare_groups = models.BooleanField()
group_match_threshold = models.FloatField()
check_covered_annotations = models.BooleanField()
object_visibility_threshold = models.FloatField()
panoptic_comparison = models.BooleanField()
compare_attributes = models.BooleanField()
def __init__(self, *args: Any, **kwargs: Any) -> None:
defaults = deepcopy(self.get_defaults())
for field in self._meta.fields:
if field.name in defaults:
field.default = defaults[field.name]
super().__init__(*args, **kwargs)
@classmethod
def get_defaults(cls) -> dict:
import cvat.apps.quality_control.quality_reports as qc
default_settings = qc.DatasetComparator.DEFAULT_SETTINGS.to_dict()
existing_fields = {f.name for f in cls._meta.fields}
return {k: v for k, v in default_settings.items() if k in existing_fields}
def to_dict(self):
return model_to_dict(self)
@property
def organization_id(self):
return getattr(self.task.organization, "id", None)
[tool.isort]
profile = "black"
forced_separate = ["tests"]
line_length = 100
skip_gitignore = true # align tool behavior with Black
known_first_party = ["cvat"]
# Can't just use a pyproject in the root dir, so duplicate
# https://github.com/psf/black/issues/2863
[tool.black]
line-length = 100
target-version = ['py38']
此差异已折叠。
# Copyright (C) 2023 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
import textwrap
from rest_framework import serializers
from cvat.apps.quality_control import models
class AnnotationIdSerializer(serializers.ModelSerializer):
class Meta:
model = models.AnnotationId
fields = ("obj_id", "job_id", "type", "shape_type")
read_only_fields = fields
class AnnotationConflictSerializer(serializers.ModelSerializer):
annotation_ids = AnnotationIdSerializer(many=True)
class Meta:
model = models.AnnotationConflict
fields = ("id", "frame", "type", "annotation_ids", "report_id", "severity")
read_only_fields = fields
class QualityReportSummarySerializer(serializers.Serializer):
frame_count = serializers.IntegerField()
frame_share = serializers.FloatField()
conflict_count = serializers.IntegerField()
warning_count = serializers.IntegerField()
error_count = serializers.IntegerField()
conflicts_by_type = serializers.DictField(child=serializers.IntegerField())
# This set is enough for basic characteristics, such as
# DS_unmatched, GT_unmatched, accuracy, precision and recall
valid_count = serializers.IntegerField(source="annotations.valid_count")
ds_count = serializers.IntegerField(source="annotations.ds_count")
gt_count = serializers.IntegerField(source="annotations.gt_count")
class QualityReportSerializer(serializers.ModelSerializer):
target = serializers.ChoiceField(models.QualityReportTarget.choices())
summary = QualityReportSummarySerializer()
class Meta:
model = models.QualityReport
fields = (
"id",
"job_id",
"task_id",
"parent_id",
"target",
"summary",
"created_date",
"target_last_updated",
"gt_last_updated",
)
read_only_fields = fields
class QualityReportCreateSerializer(serializers.Serializer):
task_id = serializers.IntegerField(write_only=True)
class QualitySettingsSerializer(serializers.ModelSerializer):
class Meta:
model = models.QualitySettings
fields = (
"id",
"task_id",
"iou_threshold",
"oks_sigma",
"line_thickness",
"low_overlap_threshold",
"compare_line_orientation",
"line_orientation_threshold",
"compare_groups",
"group_match_threshold",
"check_covered_annotations",
"object_visibility_threshold",
"panoptic_comparison",
"compare_attributes",
)
read_only_fields = (
"id",
"task_id",
)
extra_kwargs = {k: {"required": False} for k in fields}
for field_name, help_text in {
"iou_threshold": "Used for distinction between matched / unmatched shapes",
"low_overlap_threshold": """
Used for distinction between strong / weak (low_overlap) matches
""",
"oks_sigma": """
Like IoU threshold, but for points.
The percent of the bbox area, used as the radius of the circle around the GT point,
where the checked point is expected to be.
Read more: https://cocodataset.org/#keypoints-eval
""",
"line_thickness": """
Thickness of polylines, relatively to the (image area) ^ 0.5.
The distance to the boundary around the GT line,
inside of which the checked line points should be
""",
"compare_line_orientation": "Enables or disables polyline orientation comparison",
"line_orientation_threshold": """
The minimal gain in the GT IoU between the given and reversed line directions
to consider the line inverted.
Only used when the 'compare_line_orientation' parameter is true
""",
"compare_groups": "Enables or disables annotation group checks",
"group_match_threshold": """
Minimal IoU for groups to be considered matching.
Only used when the 'compare_groups' parameter is true
""",
"check_covered_annotations": """
Check for partially-covered annotations, useful in segmentation tasks
""",
"object_visibility_threshold": """
Minimal visible area percent of the spatial annotations (polygons, masks)
for reporting covered annotations.
Only used when the 'object_visibility_threshold' parameter is true
""",
"panoptic_comparison": """
Use only the visible part of the masks and polygons in comparisons
""",
"compare_attributes": "Enables or disables annotation attribute comparison",
}.items():
extra_kwargs.setdefault(field_name, {}).setdefault(
"help_text", textwrap.dedent(help_text.lstrip("\n"))
)
def validate(self, attrs):
for k, v in attrs.items():
if k.endswith("_threshold") or k in ["oks_sigma", "line_thickness"]:
if not 0 <= v <= 1:
raise serializers.ValidationError(f"{k} must be in the range [0; 1]")
return super().validate(attrs)
# Copyright (C) 2023 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
from django.db.models.signals import post_save
from django.dispatch import receiver
from cvat.apps.engine.models import Annotation, Job, Project, Task
from cvat.apps.quality_control import quality_reports as qc
from cvat.apps.quality_control.models import QualitySettings
@receiver(post_save, sender=Job, dispatch_uid=__name__ + ".save_job-update_quality_metrics")
@receiver(post_save, sender=Task, dispatch_uid=__name__ + ".save_task-update_quality_metrics")
@receiver(post_save, sender=Project, dispatch_uid=__name__ + ".save_project-update_quality_metrics")
@receiver(
post_save, sender=Annotation, dispatch_uid=__name__ + ".save_annotation-update_quality_metrics"
)
@receiver(
post_save,
sender=QualitySettings,
dispatch_uid=__name__ + ".save_settings-update_quality_metrics",
)
def __save_job__update_quality_metrics(instance, created, **kwargs):
tasks = []
if isinstance(instance, Project):
tasks += list(instance.tasks.all())
elif isinstance(instance, Task):
tasks.append(instance)
elif isinstance(instance, Job):
tasks.append(instance.segment.task)
elif isinstance(instance, Annotation):
tasks.append(instance.job.segment.task)
elif isinstance(instance, QualitySettings):
tasks.append(instance.task)
else:
assert False
for task in tasks:
qc.QualityReportUpdateManager().schedule_quality_autoupdate_job(task)
@receiver(post_save, sender=Task, dispatch_uid=__name__ + ".save_task-initialize_quality_settings")
@receiver(post_save, sender=Job, dispatch_uid=__name__ + ".save_job-initialize_quality_settings")
def __save_task__initialize_quality_settings(instance, created, **kwargs):
# Initializes default quality settings for the task
# this is done in a signal to decouple this component from the engine app
if created:
if isinstance(instance, Task):
task = instance
elif isinstance(instance, Job):
task = instance.segment.task
else:
assert False
QualitySettings.objects.get_or_create(task=task)
# Copyright (C) 2023 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
from django.urls import include, path
from rest_framework import routers
from cvat.apps.quality_control import views
router = routers.DefaultRouter(trailing_slash=False)
router.register("reports", views.QualityReportViewSet, basename="quality_reports")
router.register("conflicts", views.QualityConflictsViewSet, basename="annotation_conflicts")
router.register("settings", views.QualitySettingsViewSet, basename="quality_settings")
urlpatterns = [
# entry point for API
path("quality/", include(router.urls)),
]
# Copyright (C) 2023 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
import textwrap
from django.db.models import Q
from django.http import HttpResponse
from drf_spectacular.types import OpenApiTypes
from drf_spectacular.utils import (
OpenApiParameter,
OpenApiResponse,
extend_schema,
extend_schema_view,
)
from rest_framework import mixins, status, viewsets
from rest_framework.decorators import action
from rest_framework.exceptions import NotFound, ValidationError
from rest_framework.response import Response
from cvat.apps.engine.mixins import PartialUpdateModelMixin
from cvat.apps.engine.models import Task
from cvat.apps.engine.serializers import RqIdSerializer
from cvat.apps.engine.utils import get_server_url
from cvat.apps.iam.permissions import (
AnnotationConflictPermission,
QualityReportPermission,
QualitySettingPermission,
)
from cvat.apps.quality_control import quality_reports as qc
from cvat.apps.quality_control.models import (
AnnotationConflict,
QualityReport,
QualityReportTarget,
QualitySettings,
)
from cvat.apps.quality_control.serializers import (
AnnotationConflictSerializer,
QualityReportCreateSerializer,
QualityReportSerializer,
QualitySettingsSerializer,
)
@extend_schema(tags=["quality"])
@extend_schema_view(
list=extend_schema(
summary="Method returns a paginated list of annotation conflicts",
parameters=[
# These filters are implemented differently from others
OpenApiParameter(
"report_id",
type=OpenApiTypes.INT,
description="A simple equality filter for report id",
),
],
responses={
"200": AnnotationConflictSerializer(many=True),
},
),
)
class QualityConflictsViewSet(viewsets.GenericViewSet, mixins.ListModelMixin):
queryset = (
AnnotationConflict.objects.select_related(
"report",
"report__parent",
"report__job",
"report__job__segment",
"report__job__segment__task",
"report__job__segment__task__organization",
"report__task",
"report__task__organization",
)
.prefetch_related(
"annotation_ids",
)
.all()
)
iam_organization_field = [
"report__job__segment__task__organization",
"report__task__organization",
]
search_fields = []
filter_fields = list(search_fields) + ["id", "frame", "type", "job_id", "task_id", "severity"]
simple_filters = set(filter_fields) - {"id"}
lookup_fields = {
"job_id": "report__job__id",
"task_id": "report__job__segment__task__id", # task reports do not contain own conflicts
}
ordering_fields = list(filter_fields)
ordering = "-id"
serializer_class = AnnotationConflictSerializer
def get_queryset(self):
queryset = super().get_queryset()
if self.action == "list":
if report_id := self.request.query_params.get("report_id", None):
# NOTE: This filter is too complex to be implemented by other means,
# it has a dependency on the report type
try:
report = QualityReport.objects.get(id=report_id)
except QualityReport.DoesNotExist as ex:
raise NotFound(f"Report {report_id} does not exist") from ex
self.check_object_permissions(self.request, report)
if report.target == QualityReportTarget.TASK:
queryset = queryset.filter(
Q(report=report) | Q(report__parent=report)
).distinct()
elif report.target == QualityReportTarget.JOB:
queryset = queryset.filter(report=report)
else:
assert False
else:
perm = AnnotationConflictPermission.create_scope_list(self.request)
queryset = perm.filter(queryset)
return queryset
@extend_schema(tags=["quality"])
@extend_schema_view(
retrieve=extend_schema(
operation_id="quality_retrieve_report", # the default produces the plural
summary="Method returns details of a quality report",
responses={
"200": QualityReportSerializer,
},
),
list=extend_schema(
summary="Method returns a paginated list of quality reports",
parameters=[
# These filters are implemented differently from others
OpenApiParameter(
"task_id", type=OpenApiTypes.INT, description="A simple equality filter for task id"
),
OpenApiParameter(
"target", type=OpenApiTypes.STR, description="A simple equality filter for target"
),
],
responses={
"200": QualityReportSerializer(many=True),
},
),
)
class QualityReportViewSet(
viewsets.GenericViewSet,
mixins.ListModelMixin,
mixins.RetrieveModelMixin,
mixins.CreateModelMixin,
):
queryset = QualityReport.objects.prefetch_related(
"job",
"job__segment",
"job__segment__task",
"job__segment__task__organization",
"task",
"task__organization",
).all()
iam_organization_field = ["job__segment__task__organization", "task__organization"]
search_fields = []
filter_fields = list(search_fields) + [
"id",
"job_id",
"created_date",
"gt_last_updated",
"target_last_updated",
"parent_id",
]
simple_filters = list(
set(filter_fields) - {"id", "created_date", "gt_last_updated", "target_last_updated"}
)
ordering_fields = list(filter_fields)
ordering = "id"
def get_serializer_class(self):
# a separate method is required for drf-spectacular to work
return QualityReportSerializer
def get_queryset(self):
queryset = super().get_queryset()
if self.action == "list":
if task_id := self.request.query_params.get("task_id", None):
# NOTE: This filter is too complex to be implemented by other means
try:
task = Task.objects.get(id=task_id)
except Task.DoesNotExist as ex:
raise NotFound(f"Task {task_id} does not exist") from ex
self.check_object_permissions(self.request, task)
queryset = queryset.filter(
Q(job__segment__task__id=task_id) | Q(task__id=task_id)
).distinct()
else:
perm = QualityReportPermission.create_scope_list(self.request)
queryset = perm.filter(queryset)
if target := self.request.query_params.get("target", None):
if target == QualityReportTarget.JOB:
queryset = queryset.filter(job__isnull=False)
elif target == QualityReportTarget.TASK:
queryset = queryset.filter(task__isnull=False)
else:
raise ValidationError(
"Unexpected 'target' filter value '{}'. Valid values are: {}".format(
target, ", ".join(m[0] for m in QualityReportTarget.choices())
)
)
return queryset
CREATE_REPORT_RQ_ID_PARAMETER = "rq_id"
@extend_schema(
operation_id="quality_create_report",
summary="Creates a quality report asynchronously and allows to check request status",
parameters=[
OpenApiParameter(
CREATE_REPORT_RQ_ID_PARAMETER,
type=str,
description=textwrap.dedent(
"""\
The report creation request id. Can be specified to check the report
creation status.
"""
),
)
],
request=QualityReportCreateSerializer(required=False),
responses={
"201": QualityReportSerializer,
"202": OpenApiResponse(
RqIdSerializer,
description=textwrap.dedent(
"""\
A quality report request has been enqueued, the request id is returned.
The request status can be checked at this endpoint by passing the {}
as the query parameter. If the request id is specified, this response
means the quality report request is queued or is being processed.
""".format(
CREATE_REPORT_RQ_ID_PARAMETER
)
),
),
"400": OpenApiResponse(
description="Invalid or failed request, check the response data for details"
),
},
)
def create(self, request, *args, **kwargs):
self.check_permissions(request)
rq_id = request.query_params.get(self.CREATE_REPORT_RQ_ID_PARAMETER, None)
if rq_id is None:
input_serializer = QualityReportCreateSerializer(data=request.data)
input_serializer.is_valid(raise_exception=True)
task_id = input_serializer.validated_data["task_id"]
try:
task = Task.objects.get(pk=task_id)
except Task.DoesNotExist as ex:
raise NotFound(f"Task {task_id} does not exist") from ex
try:
rq_id = qc.QualityReportUpdateManager().schedule_quality_check_job(
task, user_id=request.user.id
)
serializer = RqIdSerializer({"rq_id": rq_id})
return Response(serializer.data, status=status.HTTP_202_ACCEPTED)
except qc.QualityReportUpdateManager.QualityReportsNotAvailable as ex:
raise ValidationError(str(ex))
else:
serializer = RqIdSerializer(data={"rq_id": rq_id})
serializer.is_valid(raise_exception=True)
rq_id = serializer.validated_data["rq_id"]
report_manager = qc.QualityReportUpdateManager()
rq_job = report_manager.get_quality_check_job(rq_id)
if (
not rq_job
or not QualityReportPermission.create_scope_check_status(
request, job_owner_id=rq_job.meta["user_id"]
)
.check_access()
.allow
):
# We should not provide job existence information to unauthorized users
raise NotFound("Unknown request id")
if rq_job.is_failed:
message = str(rq_job.exc_info)
rq_job.delete()
raise ValidationError(message)
elif rq_job.is_queued or rq_job.is_started:
return Response(status=status.HTTP_202_ACCEPTED)
elif rq_job.is_finished:
return_value = rq_job.return_value
rq_job.delete()
if not return_value:
raise ValidationError("No report has been computed")
report = self.get_queryset().get(pk=return_value)
report_serializer = QualityReportSerializer(instance=report)
return Response(
data=report_serializer.data,
status=status.HTTP_201_CREATED,
headers=self.get_success_headers(report_serializer.data),
)
@extend_schema(
operation_id="quality_retrieve_report_data",
summary="Retrieve full contents of the report in JSON format",
responses={"200": OpenApiTypes.OBJECT},
)
@action(detail=True, methods=["GET"], url_path="data", serializer_class=None)
def data(self, request, pk):
report = self.get_object() # check permissions
json_report = qc.prepare_report_for_downloading(report, host=get_server_url(request))
return HttpResponse(json_report.encode())
@extend_schema(tags=["quality"])
@extend_schema_view(
list=extend_schema(
summary="Method returns a paginated list of quality settings instances",
responses={
"200": QualitySettingsSerializer(many=True),
},
),
retrieve=extend_schema(
summary="Method returns details of the quality settings instance",
parameters=[
OpenApiParameter(
"id",
type=OpenApiTypes.INT,
location="path",
description="An id of a quality settings instance",
)
],
responses={
"200": QualitySettingsSerializer,
},
),
partial_update=extend_schema(
summary="Methods does a partial update of chosen fields in the quality settings instance",
parameters=[
OpenApiParameter(
"id",
type=OpenApiTypes.INT,
location="path",
description="An id of a quality settings instance",
)
],
request=QualitySettingsSerializer(partial=True),
responses={
"200": QualitySettingsSerializer,
},
),
)
class QualitySettingsViewSet(
viewsets.GenericViewSet,
mixins.ListModelMixin,
mixins.RetrieveModelMixin,
PartialUpdateModelMixin,
):
queryset = QualitySettings.objects.select_related("task", "task__organization").all()
iam_organization_field = "task__organization"
search_fields = []
filter_fields = ["id", "task_id"]
simple_filters = ["task_id"]
ordering_fields = ["id"]
ordering = "id"
serializer_class = QualitySettingsSerializer
def get_queryset(self):
queryset = super().get_queryset()
if self.action == "list":
permissions = QualitySettingPermission.create_scope_list(self.request)
queryset = permissions.filter(queryset)
return queryset
......@@ -6,4 +6,5 @@ pylint-plugin-utils==0.7
rope==0.17.0
django-extensions==3.0.8
snakeviz==2.1.0
django-silk==5.0.3
\ No newline at end of file
django-silk==5.0.3
black==23.3.0
\ No newline at end of file
# SHA1:4ea6010ac7e1df247f335663badaef30aabd8a11
# SHA1:c976e8cb5c96afe9ebe284d9b4dcf8019f55140e
#
# This file is autogenerated by pip-compile-multi
# To update, run:
......@@ -12,6 +12,8 @@ astroid==2.11.7
# via pylint
autopep8==2.0.2
# via django-silk
black==23.3.0
# via -r cvat/requirements/development.in
dill==0.3.6
# via pylint
django-extensions==3.0.8
......@@ -26,8 +28,14 @@ lazy-object-proxy==1.9.0
# via astroid
mccabe==0.7.0
# via pylint
mypy-extensions==1.0.0
# via black
pathspec==0.11.1
# via black
platformdirs==3.5.1
# via pylint
# via
# black
# pylint
pycodestyle==2.10.0
# via autopep8
pylint==2.14.5
......@@ -48,6 +56,7 @@ snakeviz==2.1.0
tomli==2.0.1
# via
# autopep8
# black
# pylint
tomlkit==0.11.8
# via pylint
......
此差异已折叠。
......@@ -141,6 +141,7 @@ INSTALLED_APPS = [
'cvat.apps.webhooks',
'cvat.apps.health',
'cvat.apps.events',
'cvat.apps.quality_control',
]
SITE_ID = 1
......@@ -293,6 +294,7 @@ class CVAT_QUEUES(Enum):
AUTO_ANNOTATION = 'annotation'
WEBHOOKS = 'webhooks'
NOTIFICATIONS = 'notifications'
QUALITY_REPORTS = 'quality_reports'
CLEANING = 'cleaning'
RQ_QUEUES = {
......@@ -326,6 +328,12 @@ RQ_QUEUES = {
'DB': 0,
'DEFAULT_TIMEOUT': '1h'
},
CVAT_QUEUES.QUALITY_REPORTS.value: {
'HOST': 'localhost',
'PORT': 6379,
'DB': 0,
'DEFAULT_TIMEOUT': '1h',
},
CVAT_QUEUES.CLEANING.value: {
'HOST': 'localhost',
'PORT': 6379,
......@@ -624,6 +632,8 @@ SPECTACULAR_SETTINGS = {
'StorageMethod': 'cvat.apps.engine.models.StorageMethodChoice',
'JobStatus': 'cvat.apps.engine.models.StatusChoice',
'JobStage': 'cvat.apps.engine.models.StageChoice',
'JobType': 'cvat.apps.engine.models.JobType',
'QualityReportTarget': 'cvat.apps.quality_control.models.QualityReportTarget',
'StorageType': 'cvat.apps.engine.models.StorageChoice',
'SortingMethod': 'cvat.apps.engine.models.SortingMethod',
'WebhookType': 'cvat.apps.webhooks.models.WebhookTypeChoice',
......
......@@ -63,3 +63,5 @@ SILKY_MAX_RECORDED_REQUESTS = 10**4
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES['default']['HOST'] = os.getenv('CVAT_POSTGRES_HOST', 'localhost')
QUALITY_CHECK_JOB_DELAY = 5
......@@ -11,4 +11,9 @@ PASSWORD_HASHERS = [
"django.contrib.auth.hashers.MD5PasswordHasher",
]
# Avoid quality updates during test runs.
# Note that DB initialization triggers server signals,
# so quality report updates are scheduled for applicable jobs.
QUALITY_CHECK_JOB_DELAY = 10000
IMPORT_CACHE_CLEAN_DELAY = timedelta(seconds=30)
......@@ -46,6 +46,9 @@ if apps.is_installed('cvat.apps.opencv'):
if apps.is_installed('cvat.apps.webhooks'):
urlpatterns.append(path('api/', include('cvat.apps.webhooks.urls')))
if apps.is_installed('cvat.apps.quality_control'):
urlpatterns.append(path('api/', include('cvat.apps.quality_control.urls')))
if apps.is_installed('silk'):
urlpatterns.append(path('profiler/', include('silk.urls')))
......
......@@ -17,7 +17,7 @@ fi
# The commands must be run on each module directory separately,
# otherwise tools confuse the "current" module
for paths in "cvat-sdk" "cvat-cli" "tests/python/"; do
for paths in "cvat-sdk" "cvat-cli" "tests/python/" "cvat/apps/quality_control"; do
${BLACK} -- ${paths}
${ISORT} -- ${paths}
done
此差异已折叠。
此差异已折叠。
此差异已折叠。
#!/usr/bin/env python
# This script adds access to the Django env and settings in the default rqscheduler
# implementation. This is required for correct work with CVAT queue settings and
# their access options such as login and password.
# Required to initialize Django settings correctly
from cvat.asgi import application # pylint: disable=unused-import
from rq_scheduler.scripts import rqscheduler
if __name__ == '__main__':
rqscheduler.main()
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册