提交 7d6dcd1c 编写于 作者: N Nikita Manovich

Release 0.4.0

[bandit]
# B101 : assert_used
# B102 : exec_used
# B320 : xml_bad_etree
# B404 : import_subprocess
# B406 : import_xml_sax
# B410 : import_lxml
skips: B101,B102,B320,B404,B406,B410
exclude_paths:
- '**/3rdparty/**'
/*
* Copyright (C) 2018 Intel Corporation
*
* SPDX-License-Identifier: MIT
*/
module.exports = {
"env": {
"node": false,
"browser": true,
"es6": true,
"jquery": true,
"qunit": true,
},
"parserOptions": {
"sourceType": "script",
},
"plugins": [
"security",
"no-unsanitized",
"no-unsafe-innerhtml",
],
"extends": [
"eslint:recommended",
"plugin:security/recommended",
"plugin:no-unsanitized/DOM",
"airbnb",
],
"rules": {
"no-new": [0],
"class-methods-use-this": [0],
"no-restricted-properties": [0, {
"object": "Math",
"property": "pow",
}],
"no-param-reassign": [0],
"no-underscore-dangle": ["error", { "allowAfterThis": true }],
"no-restricted-syntax": [0, {"selector": "ForOfStatement"}],
"no-continue": [0],
"no-unsafe-innerhtml/no-unsafe-innerhtml": 1,
// This rule actual for user input data on the node.js environment mainly.
"security/detect-object-injection": 0,
"indent": ["warn", 4],
},
};
......@@ -8,6 +8,10 @@
/logs
/components/openvino/*.tgz
/profiles
/ssh/*
!/ssh/README.md
node_modules
# Ignore temporary files
docker-compose.override.yml
......
exports.settings = {bullet: '*', paddedTable: false}
exports.plugins = [
'remark-preset-lint-recommended',
'remark-preset-lint-consistent',
['remark-preset-lint-markdown-style-guide', 'mixed'],
['remark-lint-no-dead-urls', { skipOffline: true }],
['remark-lint-maximum-line-length', 120],
['remark-lint-maximum-heading-length', 120],
['remark-lint-strong-marker', "*"],
['remark-lint-emphasis-marker', "_"],
['remark-lint-unordered-list-marker-style', "-"],
['remark-lint-ordered-list-marker-style', "."],
['remark-lint-no-file-name-irregular-characters', false],
['remark-lint-list-item-spacing', false],
]
sudo: required
language: python
python:
- "3.5"
services:
- docker
before_script:
- docker-compose -f docker-compose.yml -f docker-compose.ci.yml up --build -d
script:
- docker exec -it cvat /bin/bash -c 'tests/node_modules/.bin/karma start tests/karma.conf.js'
......@@ -5,29 +5,24 @@
"version": "0.2.0",
"configurations": [
{
"name": "CVAT Server",
"name": "server",
"type": "python",
"request": "launch",
"stopOnEntry": false,
"debugStdLib": true,
"justMyCode": false,
"pythonPath": "${config:python.pythonPath}",
"program": "${workspaceRoot}/manage.py",
"args": [
"runserver",
"--noreload",
"--nothreading",
"--insecure",
"127.0.0.1:7000"
],
"debugOptions": [
"RedirectOutput",
"DjangoDebugging"
],
"cwd": "${workspaceFolder}",
"envFile": "${workspaceFolder}/.env",
"django": true,
"cwd": "${workspaceFolder}"
},
{
"name": "CVAT Client",
"name": "client",
"type": "chrome",
"request": "launch",
"url": "http://localhost:7000/",
......@@ -40,11 +35,11 @@
}
},
{
"name": "CVAT RQ - default",
"name": "RQ - default",
"type": "python",
"request": "launch",
"stopOnEntry": false,
"debugStdLib": true,
"justMyCode": false,
"pythonPath": "${config:python.pythonPath}",
"program": "${workspaceRoot}/manage.py",
"args": [
......@@ -53,20 +48,15 @@
"--worker-class",
"cvat.simpleworker.SimpleWorker",
],
"debugOptions": [
"RedirectOutput",
"DjangoDebugging"
],
"django": true,
"cwd": "${workspaceFolder}",
"env": {},
"envFile": "${workspaceFolder}/.env",
"env": {}
},
{
"name": "CVAT RQ - low",
"name": "RQ - low",
"type": "python",
"request": "launch",
"debugStdLib": true,
"justMyCode": false,
"stopOnEntry": false,
"pythonPath": "${config:python.pythonPath}",
"program": "${workspaceRoot}/manage.py",
......@@ -76,23 +66,68 @@
"--worker-class",
"cvat.simpleworker.SimpleWorker",
],
"debugOptions": [
"RedirectOutput",
"DjangoDebugging"
"django": true,
"cwd": "${workspaceFolder}",
"env": {}
},
{
"name": "git",
"type": "python",
"request": "launch",
"justMyCode": false,
"stopOnEntry": false,
"pythonPath": "${config:python.pythonPath}",
"program": "${workspaceRoot}/manage.py",
"args": [
"update_git_states"
],
"django": true,
"cwd": "${workspaceFolder}",
"env": {}
},
{
"name": "migrate",
"type": "python",
"request": "launch",
"justMyCode": false,
"stopOnEntry": false,
"pythonPath": "${config:python.pythonPath}",
"program": "${workspaceRoot}/manage.py",
"args": [
"migrate"
],
"django": true,
"cwd": "${workspaceFolder}",
"env": {}
},
{
"name": "tests",
"type": "python",
"request": "launch",
"justMyCode": false,
"stopOnEntry": false,
"pythonPath": "${config:python.pythonPath}",
"program": "${workspaceRoot}/manage.py",
"args": [
"test",
"--settings",
"cvat.settings.testing",
"cvat/apps/engine",
],
"django": true,
"cwd": "${workspaceFolder}",
"env": {},
"envFile": "${workspaceFolder}/.env",
"env": {}
},
],
"compounds": [
{
"name": "CVAT Debugging",
"name": "debugging",
"configurations": [
"CVAT Client",
"CVAT Server",
"CVAT RQ - default",
"CVAT RQ - low",
"client",
"server",
"RQ - default",
"RQ - low",
"git",
]
}
]
......
......@@ -4,6 +4,34 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
### Added
- OpenVINO auto annotation: it is possible to upload a custom model and annotate images automatically.
- Ability to rotate images/video in the client part (Ctrl+R, Shift+Ctrl+R shortcuts) (#305)
- The ReID application for automatic bounding box merging has been added (#299)
- Keyboard shortcuts to switch next/previous default shape type (box, polygon etc) [Alt + <, Alt + >] (#316)
- Converter for VOC now supports interpolation tracks
- REST API (/api/v1/*, /api/docs)
- Semi-automatic semantic segmentation with the [Deep Extreme Cut](http://www.vision.ee.ethz.ch/~cvlsegmentation/dextr/) work
### Changed
- Propagation setup has been moved from settings to bottom player panel
- Additional events like "Debug Info" or "Fit Image" have been added for analitics
- Optional using LFS for git annotation storages (#314)
### Deprecated
- "Flip images" flag in the create task dialog will be removed. Rotation functionality in client part have been added instead.
### Removed
-
### Fixed
- Django 2.1.5 (security fix, https://nvd.nist.gov/vuln/detail/CVE-2019-3498)
- Several scenarious which cause code 400 after undo/redo/save have been fixed (#315)
### Security
-
## [0.3.0] - 2018-12-29
### Added
- Ability to copy Object URL and Frame URL via object context menu and player context menu respectively.
......
......@@ -57,6 +57,11 @@ $ code .
You have done! Now it is possible to insert breakpoints and debug server and client of the tool.
## JavaScript coding style
We use the [Airbnb JavaScript Style Guide](https://github.com/airbnb/javascript) for JavaScript code with a
litle exception - we prefere 4 spaces for indentation of nested blocks and statements.
## Branching model
The project uses [a successful Git branching model](https://nvie.com/posts/a-successful-git-branching-model).
......
......@@ -28,3 +28,11 @@
- **[Sebastián Yonekura](https://github.com/syonekura)**
* [convert_to_voc.py](utils/voc) - an utility for converting CVAT XML to PASCAL VOC data annotation format.
- **[ITLab Team](https://github.com/itlab-vision/cvat):**
**[Vasily Danilin](https://github.com/DanVev)**,
**[Eugene Shashkin](https://github.com/EvgenyShashkin)**,
**[Dmitry Silenko](https://github.com/DimaSilenko)**,
**[Alina Bykovskaya](https://github.com/alinaut)**,
**[Yanina Koltushkina](https://github.com/YaniKolt)**
* Integrating CI tools as Travis CI, Codacy and Coveralls.io
......@@ -3,11 +3,13 @@ FROM ubuntu:16.04
ARG http_proxy
ARG https_proxy
ARG no_proxy
ARG socks_proxy
ENV TERM=xterm \
http_proxy=${http_proxy} \
https_proxy=${https_proxy} \
no_proxy=${no_proxy}
no_proxy=${no_proxy} \
socks_proxy=${socks_proxy}
ENV LANG='C.UTF-8' \
LC_ALL='C.UTF-8'
......@@ -89,6 +91,7 @@ RUN if [ "$WITH_TESTS" = "yes" ]; then \
eslint-detailed-reporter \
karma \
karma-chrome-launcher \
karma-coveralls \
karma-coverage \
karma-junit-reporter \
karma-qunit \
......@@ -100,10 +103,43 @@ RUN if [ "$WITH_TESTS" = "yes" ]; then \
COPY cvat/requirements/ /tmp/requirements/
COPY supervisord.conf mod_wsgi.conf wait-for-it.sh manage.py ${HOME}/
RUN pip3 install --no-cache-dir -r /tmp/requirements/${DJANGO_CONFIGURATION}.txt
# Install git application dependencies
RUN apt-get update && \
apt-get install -y ssh netcat-openbsd git curl zip && \
wget -qO /dev/stdout https://packagecloud.io/install/repositories/github/git-lfs/script.deb.sh | bash && \
apt-get install -y git-lfs && \
git lfs install && \
rm -rf /var/lib/apt/lists/* && \
if [ -z ${socks_proxy} ]; then \
echo export "GIT_SSH_COMMAND=\"ssh -o StrictHostKeyChecking=no -o ConnectTimeout=30\"" >> ${HOME}/.bashrc; \
else \
echo export "GIT_SSH_COMMAND=\"ssh -o StrictHostKeyChecking=no -o ConnectTimeout=30 -o ProxyCommand='nc -X 5 -x ${socks_proxy} %h %p'\"" >> ${HOME}/.bashrc; \
fi
# Download model for re-identification app
ENV REID_MODEL_DIR=${HOME}/reid
RUN if [ "$OPENVINO_TOOLKIT" = "yes" ]; then \
mkdir ${HOME}/reid && \
wget https://download.01.org/openvinotoolkit/2018_R5/open_model_zoo/person-reidentification-retail-0079/FP32/person-reidentification-retail-0079.xml -O reid/reid.xml && \
wget https://download.01.org/openvinotoolkit/2018_R5/open_model_zoo/person-reidentification-retail-0079/FP32/person-reidentification-retail-0079.bin -O reid/reid.bin; \
fi
# TODO: CHANGE URL
ARG WITH_DEXTR
ENV WITH_DEXTR=${WITH_DEXTR}
ENV DEXTR_MODEL_DIR=${HOME}/models/dextr
RUN if [ "$WITH_DEXTR" = "yes" ]; then \
mkdir ${DEXTR_MODEL_DIR} -p && \
wget https://download.01.org/openvinotoolkit/models_contrib/cvat/dextr_model_v1.zip -O ${DEXTR_MODEL_DIR}/dextr.zip && \
unzip ${DEXTR_MODEL_DIR}/dextr.zip -d ${DEXTR_MODEL_DIR} && rm ${DEXTR_MODEL_DIR}/dextr.zip; \
fi
COPY ssh ${HOME}/.ssh
COPY cvat/ ${HOME}/cvat
COPY tests ${HOME}/tests
RUN patch -p1 < ${HOME}/cvat/apps/engine/static/engine/js/3rdparty.patch
RUN chown -R ${USER}:${USER} .
RUN chown -R ${USER}:${USER} .
# RUN all commands below as 'django' user
USER ${USER}
......
# Computer Vision Annotation Tool (CVAT)
[![Build Status](https://travis-ci.org/opencv/cvat.svg?branch=develop)](https://travis-ci.org/opencv/cvat)
[![Codacy Badge](https://api.codacy.com/project/badge/Grade/840351da141e4eaeac6476fd19ec0a33)](https://app.codacy.com/app/nmanovic/cvat?utm_source=github.com&utm_medium=referral&utm_content=opencv/cvat&utm_campaign=Badge_Grade_Settings)
[![Gitter chat](https://badges.gitter.im/opencv-cvat/gitter.png)](https://gitter.im/opencv-cvat)
CVAT is completely re-designed and re-implemented version of [Video Annotation Tool from Irvine, California](http://carlvondrick.com/vatic/) tool. It is free, online, interactive video and image annotation tool for computer vision. It is being used by our team to annotate million of objects with different properties. Many UI and UX decisions are based on feedbacks from professional data annotation team.
......@@ -8,18 +10,25 @@ CVAT is completely re-designed and re-implemented version of [Video Annotation T
## Documentation
- [User's guide](cvat/apps/documentation/user_guide.md)
- [XML annotation format](cvat/apps/documentation/xml_format.md)
- [AWS Deployment Guide](cvat/apps/documentation/AWS-Deployment-Guide.md)
- [Questions](#questions)
- [User's guide](cvat/apps/documentation/user_guide.md)
- [XML annotation format](cvat/apps/documentation/xml_format.md)
- [AWS Deployment Guide](cvat/apps/documentation/AWS-Deployment-Guide.md)
- [Questions](#questions)
## Screencasts
- [Annotation mode](https://youtu.be/6h7HxGL6Ct4)
- [Interpolation mode](https://youtu.be/U3MYDhESHo4)
- [Attribute mode](https://youtu.be/UPNfWl8Egd8)
- [Segmentation mode](https://youtu.be/6IJ0QN7PBKo)
- [Tutorial for polygons](https://www.youtube.com/watch?v=XTwfXDh4clI)
- [Introduction](https://youtu.be/L9_IvUIHGwM)
- [Annotation mode](https://youtu.be/6h7HxGL6Ct4)
- [Interpolation mode](https://youtu.be/U3MYDhESHo4)
- [Attribute mode](https://youtu.be/UPNfWl8Egd8)
- [Segmentation mode](https://youtu.be/Fh8oKuSUIPs)
- [Tutorial for polygons](https://www.youtube.com/watch?v=XTwfXDh4clI)
- [Semi-automatic segmentation](https://www.youtube.com/watch?v=vnqXZ-Z-VTQ)
## Links
- [Intel AI blog: New Computer Vision Tool Accelerates Annotation of Digital Images and Video](https://www.intel.ai/introducing-cvat)
- [Intel Software: Computer Vision Annotation Tool: A Universal Approach to Data Annotation](https://software.intel.com/en-us/articles/computer-vision-annotation-tool-a-universal-approach-to-data-annotation)
- [VentureBeat: Intel open-sources CVAT, a toolkit for data labeling](https://venturebeat.com/2019/03/05/intel-open-sources-cvat-a-toolkit-for-data-labeling/)
## Online Demo
......@@ -27,8 +36,8 @@ CVAT is completely re-designed and re-implemented version of [Video Annotation T
After you click the link above:
- Click on "GO TO WORKSPACE" and the CVAT environment will load up
- The environment is backed by a K80 GPU
- Click on "GO TO WORKSPACE" and the CVAT environment will load up
- The environment is backed by a K80 GPU
If you have any questions, please contact Onepanel directly at support@onepanel.io. If you are in the Onepanel application, you can also use the chat icon in the bottom right corner.
......@@ -68,7 +77,13 @@ docker-compose -f docker-compose.yml -f components/cuda/docker-compose.cuda.yml
docker-compose -f docker-compose.yml -f components/cuda/docker-compose.cuda.yml -f components/openvino/docker-compose.openvino.yml up -d
```
For details please see [components section](components/README.md).
### Additional optional components
- [Auto annotation using DL models in OpenVINO toolkit format](cvat/apps/auto_annotation/README.md)
- [Analytics: management and monitoring of data annotation team](components/analytics/README.md)
- [TF Object Detection API: auto annotation](components/tf_annotation/README.md)
- [Support for NVIDIA GPUs](components/cuda/README.md)
- [Semi-automatic segmentation with Deep Extreme Cut](cvat/apps/dextr_segmentation/README.md)
### Create superuser account
......
### There are some additional components for CVAT
* [NVIDIA CUDA](cuda/README.md)
* [OpenVINO](openvino/README.md)
* [Tensorflow Object Detector](tf_annotation/README.md)
* [Analytics](analytics/README.md)
## Analytics for Computer Vision Annotation Tool (CVAT)
![](/cvat/apps/documentation/static/documentation/images/image097.jpg)
It is possible to proxy annotation logs from client to ELK. To do that run the following command below:
### Build docker image
......
......@@ -10,34 +10,48 @@ filter {
# 1. Decode the event from json in 'message' field
# 2. Remove unnecessary field from it
# 3. Type it as client
mutate {
rename => { "message" => "source_message" }
}
json {
source => "message"
source => "source_message"
}
date {
match => ["timestamp", "UNIX", "UNIX_MS"]
remove_field => "timestamp"
match => ["time", "ISO8601"]
remove_field => "time"
}
if [event] == "Send exception" {
if [payload] {
ruby {
code => "
event.get('payload').each { |key, value|
event.set(key, value)
}
"
}
}
if [name] == "Send exception" {
aggregate {
task_id => "%{userid}_%{application}_%{message}_%{filename}_%{line}"
task_id => "%{username}_%{message}_%{filename}_%{line}"
code => "
require 'time'
map['userid'] ||= event.get('userid');
map['application'] ||= event.get('application');
map['username'] ||= event.get('username');
map['error'] ||= event.get('message');
map['filename'] ||= event.get('filename');
map['line'] ||= event.get('line');
map['task'] ||= event.get('task');
map['task_id'] ||= event.get('task_id');
map['job_id'] ||= event.get('job_id');
map['error_count'] ||= 0;
map['error_count'] += 1;
map['aggregated_stack'] ||= '';
map['aggregated_stack'] += event.get('stack') + '\n\n\n';"
map['aggregated_stack'] += event.get('stack') + '\n\n\n';
"
timeout => 3600
timeout_tags => ['aggregated_exception']
push_map_as_event_on_timeout => true
......@@ -45,12 +59,17 @@ filter {
}
prune {
blacklist_names => ["level", "host", "logger_name", "message", "path",
"port", "stack_info"]
blacklist_names => ["level", "host", "logger_name", "path",
"port", "stack_info", "payload", "source_message"]
}
mutate {
replace => { "type" => "client" }
copy => {
"job_id" => "task"
"username" => "userid"
"name" => "event"
}
}
} else if [logger_name] =~ /cvat.server/ {
# 1. Remove 'logger_name' field and create 'task' field
......@@ -58,14 +77,14 @@ filter {
# 3. Type it as server
if [logger_name] =~ /cvat\.server\.task_[0-9]+/ {
mutate {
rename => { "logger_name" => "task" }
gsub => [ "task", "cvat.server.task_", "" ]
rename => { "logger_name" => "task_id" }
gsub => [ "task_id", "cvat.server.task_", "" ]
}
# Need to split the mutate because otherwise the conversion
# doesn't work.
mutate {
convert => { "task" => "integer" }
convert => { "task_id" => "integer" }
}
}
......
......@@ -6,7 +6,7 @@
### Preparation
* Download latest [OpenVINO toolkit](https://software.intel.com/en-us/openvino-toolkit) installer (offline or online) for Linux platform. It should be .tgz archive. Minimum required version is 2018 R3.
* Download [OpenVINO toolkit 2018R5](https://software.intel.com/en-us/openvino-toolkit) .tgz installer (offline or online) for Ubuntu platforms.
* Put downloaded file into ```components/openvino```.
* Accept EULA in the eula.cfg file.
......
......@@ -12,7 +12,7 @@ if [[ `lscpu | grep -o "GenuineIntel"` != "GenuineIntel" ]]; then
fi
if [[ `lscpu | grep -o "sse4" | head -1` != "sse4" ]] && [[ `lscpu | grep -o "avx2" | head -1` != "avx2" ]]; then
echo "You Intel CPU should support sse4 or avx2 instruction if you want use OpenVINO"
echo "OpenVINO expects your CPU to support SSE4 or AVX2 instructions"
exit 1
fi
......@@ -23,12 +23,18 @@ tar -xzf `ls | grep "openvino_toolkit"`
cd `ls -d */ | grep "openvino_toolkit"`
apt-get update && apt-get install -y sudo cpio && \
./install_cv_sdk_dependencies.sh && SUDO_FORCE_REMOVE=yes apt-get remove -y sudo
if [ -f "install_cv_sdk_dependencies.sh" ]; then ./install_cv_sdk_dependencies.sh; \
else ./install_openvino_dependencies.sh; fi && SUDO_FORCE_REMOVE=yes apt-get remove -y sudo
cat ../eula.cfg >> silent.cfg
./install.sh -s silent.cfg
cd /tmp/components && rm openvino -r
echo "source /opt/intel/computer_vision_sdk/bin/setupvars.sh" >> ${HOME}/.bashrc
echo -e '\nexport IE_PLUGINS_PATH=${IE_PLUGINS_PATH}' >> /opt/intel/computer_vision_sdk/bin/setupvars.sh
if [ -f "/opt/intel/computer_vision_sdk/bin/setupvars.sh" ]; then
echo "source /opt/intel/computer_vision_sdk/bin/setupvars.sh" >> ${HOME}/.bashrc;
echo -e '\nexport IE_PLUGINS_PATH=${IE_PLUGINS_PATH}' >> /opt/intel/computer_vision_sdk/bin/setupvars.sh;
else
echo "source /opt/intel/openvino/bin/setupvars.sh" >> ${HOME}/.bashrc;
echo -e '\nexport IE_PLUGINS_PATH=${IE_PLUGINS_PATH}' >> /opt/intel/openvino/bin/setupvars.sh;
fi
......@@ -16,17 +16,5 @@ if [[ "$CUDA_SUPPORT" = "yes" ]]
then
pip3 install --no-cache-dir tensorflow-gpu==1.7.0
else
if [[ "$OPENVINO_TOOLKIT" = "yes" ]]
then
pip3 install -r ${INTEL_CVSDK_DIR}/deployment_tools/model_optimizer/requirements.txt && \
cd ${HOME}/rcnn/ && \
${INTEL_CVSDK_DIR}/deployment_tools/model_optimizer/mo.py --framework tf \
--data_type FP32 --input_shape [1,600,600,3] \
--input image_tensor --output detection_scores,detection_boxes,num_detections \
--tensorflow_use_custom_operations_config ${INTEL_CVSDK_DIR}/deployment_tools/model_optimizer/extensions/front/tf/faster_rcnn_support.json \
--tensorflow_object_detection_api_pipeline_config pipeline.config --input_model inference_graph.pb && \
rm inference_graph.pb
else
pip3 install --no-cache-dir tensorflow==1.7.0
fi
pip3 install --no-cache-dir tensorflow==1.7.0
fi
......@@ -3,3 +3,8 @@
#
# SPDX-License-Identifier: MIT
from cvat.utils.version import get_version
VERSION = (0, 4, 0, 'final')
__version__ = get_version(VERSION)
......@@ -4,8 +4,10 @@
import os
from django.conf import settings
from django.db.models import Q
import rules
from . import AUTH_ROLE
from rest_framework.permissions import BasePermission
def register_signals():
from django.db.models.signals import post_migrate, post_save
......@@ -67,6 +69,11 @@ def is_job_annotator(db_user, db_job):
return has_rights
# AUTH PERMISSIONS RULES
rules.add_perm('engine.role.user', has_user_role)
rules.add_perm('engine.role.admin', has_admin_role)
rules.add_perm('engine.role.annotator', has_annotator_role)
rules.add_perm('engine.role.observer', has_observer_role)
rules.add_perm('engine.task.create', has_admin_role | has_user_role)
rules.add_perm('engine.task.access', has_admin_role | has_observer_role |
is_task_owner | is_task_annotator)
......@@ -78,3 +85,64 @@ rules.add_perm('engine.job.access', has_admin_role | has_observer_role |
is_job_owner | is_job_annotator)
rules.add_perm('engine.job.change', has_admin_role | is_job_owner |
is_job_annotator)
class AdminRolePermission(BasePermission):
# pylint: disable=no-self-use
def has_permission(self, request, view):
return request.user.has_perm("engine.role.admin")
class UserRolePermission(BasePermission):
# pylint: disable=no-self-use
def has_permission(self, request, view):
return request.user.has_perm("engine.role.user")
class AnnotatorRolePermission(BasePermission):
# pylint: disable=no-self-use
def has_permission(self, request, view):
return request.user.has_perm("engine.role.annotator")
class ObserverRolePermission(BasePermission):
# pylint: disable=no-self-use
def has_permission(self, request, view):
return request.user.has_perm("engine.role.observer")
class TaskCreatePermission(BasePermission):
# pylint: disable=no-self-use
def has_permission(self, request, view):
return request.user.has_perm("engine.task.create")
class TaskAccessPermission(BasePermission):
# pylint: disable=no-self-use
def has_object_permission(self, request, view, obj):
return request.user.has_perm("engine.task.access", obj)
class TaskGetQuerySetMixin(object):
def get_queryset(self):
queryset = super().get_queryset()
user = self.request.user
# Don't filter queryset for admin, observer and detail methods
if has_admin_role(user) or has_observer_role(user) or self.detail:
return queryset
else:
return queryset.filter(Q(owner=user) | Q(assignee=user) |
Q(segment__job__assignee=user) | Q(assignee=None)).distinct()
class TaskChangePermission(BasePermission):
# pylint: disable=no-self-use
def has_object_permission(self, request, view, obj):
return request.user.has_perm("engine.task.change", obj)
class TaskDeletePermission(BasePermission):
# pylint: disable=no-self-use
def has_object_permission(self, request, view, obj):
return request.user.has_perm("engine.task.delete", obj)
class JobAccessPermission(BasePermission):
# pylint: disable=no-self-use
def has_object_permission(self, request, view, obj):
return request.user.has_perm("engine.job.access", obj)
class JobChangePermission(BasePermission):
# pylint: disable=no-self-use
def has_object_permission(self, request, view, obj):
return request.user.has_perm("engine.job.change", obj)
## Auto annotation
### Description
The application will be enabled automatically if
[OpenVINO&trade; component](../../../components/openvino)
is installed. It allows to use custom models for auto annotation. Only models in
OpenVINO&trade; toolkit format are supported. If you would like to annotate a
task with a custom model please convert it to the intermediate representation
(IR) format via the model optimizer tool. See [OpenVINO documentation](https://software.intel.com/en-us/articles/OpenVINO-InferEngine) for details.
### Usage
To annotate a task with a custom model you need to prepare 4 files:
1. __Model config__ (*.xml) - a text file with network configuration.
1. __Model weights__ (*.bin) - a binary file with trained weights.
1. __Label map__ (*.json) - a simple json file with `label_map` dictionary like
object with string values for label numbers.
Example:
```json
{
"label_map": {
"0": "background",
"1": "aeroplane",
"2": "bicycle",
"3": "bird",
"4": "boat",
"5": "bottle",
"6": "bus",
"7": "car",
"8": "cat",
"9": "chair",
"10": "cow",
"11": "diningtable",
"12": "dog",
"13": "horse",
"14": "motorbike",
"15": "person",
"16": "pottedplant",
"17": "sheep",
"18": "sofa",
"19": "train",
"20": "tvmonitor"
}
}
```
1. __Interpretation script__ (*.py) - a file used to convert net output layer
to a predefined structure which can be processed by CVAT. This code will be run
inside a restricted python's environment, but it's possible to use some
builtin functions like __str, int, float, max, min, range__.
Also two variables are available in the scope:
- __detections__ - a list of dictionaries with detections for each frame:
* __frame_id__ - frame number
* __frame_height__ - frame height
* __frame_width__ - frame width
* __detections__ - output np.ndarray (See [ExecutableNetwork.infer](https://software.intel.com/en-us/articles/OpenVINO-InferEngine#inpage-nav-11-6-3) for details).
- __results__ - an instance of python class with converted results.
Following methods should be used to add shapes:
```python
# xtl, ytl, xbr, ybr - expected values are float or int
# label - expected value is int
# frame_number - expected value is int
# attributes - dictionary of attribute_name: attribute_value pairs, for example {"confidence": "0.83"}
add_box(self, xtl, ytl, xbr, ybr, label, frame_number, attributes=None)
# points - list of (x, y) pairs of float or int, for example [(57.3, 100), (67, 102.7)]
# label - expected value is int
# frame_number - expected value is int
# attributes - dictionary of attribute_name: attribute_value pairs, for example {"confidence": "0.83"}
add_points(self, points, label, frame_number, attributes=None)
add_polygon(self, points, label, frame_number, attributes=None)
add_polyline(self, points, label, frame_number, attributes=None)
```
### Examples
#### [Person-vehicle-bike-detection-crossroad-0078](https://github.com/opencv/open_model_zoo/blob/2018/intel_models/person-vehicle-bike-detection-crossroad-0078/description/person-vehicle-bike-detection-crossroad-0078.md) (OpenVINO toolkit)
__Links__
- [person-vehicle-bike-detection-crossroad-0078.xml](https://download.01.org/openvinotoolkit/2018_R5/open_model_zoo/person-vehicle-bike-detection-crossroad-0078/FP32/person-vehicle-bike-detection-crossroad-0078.xml)
- [person-vehicle-bike-detection-crossroad-0078.bin](https://download.01.org/openvinotoolkit/2018_R5/open_model_zoo/person-vehicle-bike-detection-crossroad-0078/FP32/person-vehicle-bike-detection-crossroad-0078.bin)
__Task labels__: person vehicle non-vehicle
__label_map.json__:
```json
{
"label_map": {
"1": "person",
"2": "vehicle",
"3": "non-vehicle"
}
}
```
__Interpretation script for SSD based networks__:
```python
def clip(value):
return max(min(1.0, value), 0.0)
for frame_results in detections:
frame_height = frame_results["frame_height"]
frame_width = frame_results["frame_width"]
frame_number = frame_results["frame_id"]
for i in range(frame_results["detections"].shape[2]):
confidence = frame_results["detections"][0, 0, i, 2]
if confidence < 0.5:
continue
results.add_box(
xtl=clip(frame_results["detections"][0, 0, i, 3]) * frame_width,
ytl=clip(frame_results["detections"][0, 0, i, 4]) * frame_height,
xbr=clip(frame_results["detections"][0, 0, i, 5]) * frame_width,
ybr=clip(frame_results["detections"][0, 0, i, 6]) * frame_height,
label=int(frame_results["detections"][0, 0, i, 1]),
frame_number=frame_number,
attributes={
"confidence": "{:.2f}".format(confidence),
},
)
```
#### [Landmarks-regression-retail-0009](https://github.com/opencv/open_model_zoo/blob/2018/intel_models/landmarks-regression-retail-0009/description/landmarks-regression-retail-0009.md) (OpenVINO toolkit)
__Links__
- [landmarks-regression-retail-0009.xml](https://download.01.org/openvinotoolkit/2018_R5/open_model_zoo/landmarks-regression-retail-0009/FP32/landmarks-regression-retail-0009.xml)
- [landmarks-regression-retail-0009.bin](https://download.01.org/openvinotoolkit/2018_R5/open_model_zoo/landmarks-regression-retail-0009/FP32/landmarks-regression-retail-0009.bin)
__Task labels__: left_eye right_eye tip_of_nose left_lip_corner right_lip_corner
__label_map.json__:
```json
{
"label_map": {
"0": "left_eye",
"1": "right_eye",
"2": "tip_of_nose",
"3": "left_lip_corner",
"4": "right_lip_corner"
}
}
```
__Interpretation script__:
```python
def clip(value):
return max(min(1.0, value), 0.0)
for frame_results in detections:
frame_height = frame_results["frame_height"]
frame_width = frame_results["frame_width"]
frame_number = frame_results["frame_id"]
for i in range(0, frame_results["detections"].shape[1], 2):
x = frame_results["detections"][0, i, 0, 0]
y = frame_results["detections"][0, i + 1, 0, 0]
results.add_points(
points=[(clip(x) * frame_width, clip(y) * frame_height)],
label=i // 2, # see label map and model output specification,
frame_number=frame_number,
)
```
# Copyright (C) 2018 Intel Corporation
#
# SPDX-License-Identifier: MIT
from cvat.settings.base import JS_3RDPARTY, CSS_3RDPARTY
default_app_config = 'cvat.apps.auto_annotation.apps.AutoAnnotationConfig'
JS_3RDPARTY['dashboard'] = JS_3RDPARTY.get('dashboard', []) + ['auto_annotation/js/dashboardPlugin.js']
CSS_3RDPARTY['dashboard'] = CSS_3RDPARTY.get('dashboard', []) + ['auto_annotation/stylesheet.css']
......@@ -2,8 +2,3 @@
# Copyright (C) 2018 Intel Corporation
#
# SPDX-License-Identifier: MIT
from django.contrib import admin
# Register your models here.
# Copyright (C) 2018 Intel Corporation
#
# SPDX-License-Identifier: MIT
from django.apps import AppConfig
class AutoAnnotationConfig(AppConfig):
name = "cvat.apps.auto_annotation"
def ready(self):
from .permissions import setup_permissions
setup_permissions()
# Copyright (C) 2018 Intel Corporation
#
# SPDX-License-Identifier: MIT
import cv2
class ImageLoader():
def __init__(self, image_list):
self.image_list = image_list
def __getitem__(self, i):
return self.image_list[i]
def __iter__(self):
for imagename in self.image_list:
yield self._load_image(imagename)
def __len__(self):
return len(self.image_list)
@staticmethod
def _load_image(path_to_image):
return cv2.imread(path_to_image)
# Copyright (C) 2018 Intel Corporation
#
# SPDX-License-Identifier: MIT
from openvino.inference_engine import IENetwork, IEPlugin
import subprocess
import os
_IE_PLUGINS_PATH = os.getenv("IE_PLUGINS_PATH", None)
def _check_instruction(instruction):
return instruction == str.strip(
subprocess.check_output(
'lscpu | grep -o "{}" | head -1'.format(instruction), shell=True
).decode('utf-8')
)
def make_plugin():
if _IE_PLUGINS_PATH is None:
raise OSError('Inference engine plugin path env not found in the system.')
plugin = IEPlugin(device='CPU', plugin_dirs=[_IE_PLUGINS_PATH])
if (_check_instruction('avx2')):
plugin.add_cpu_extension(os.path.join(_IE_PLUGINS_PATH, 'libcpu_extension_avx2.so'))
elif (_check_instruction('sse4')):
plugin.add_cpu_extension(os.path.join(_IE_PLUGINS_PATH, 'libcpu_extension_sse4.so'))
else:
raise Exception('Inference engine requires a support of avx2 or sse4.')
return plugin
def make_network(model, weights):
return IENetwork.from_ir(model = model, weights = weights)
# Generated by Django 2.1.3 on 2019-01-24 14:05
import cvat.apps.auto_annotation.models
from django.conf import settings
import django.core.files.storage
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='AnnotationModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', cvat.apps.auto_annotation.models.SafeCharField(max_length=256)),
('created_date', models.DateTimeField(auto_now_add=True)),
('updated_date', models.DateTimeField(auto_now_add=True)),
('model_file', models.FileField(storage=django.core.files.storage.FileSystemStorage(), upload_to=cvat.apps.auto_annotation.models.upload_path_handler)),
('weights_file', models.FileField(storage=django.core.files.storage.FileSystemStorage(), upload_to=cvat.apps.auto_annotation.models.upload_path_handler)),
('labelmap_file', models.FileField(storage=django.core.files.storage.FileSystemStorage(), upload_to=cvat.apps.auto_annotation.models.upload_path_handler)),
('interpretation_file', models.FileField(storage=django.core.files.storage.FileSystemStorage(), upload_to=cvat.apps.auto_annotation.models.upload_path_handler)),
('shared', models.BooleanField(default=False)),
('primary', models.BooleanField(default=False)),
('framework', models.CharField(default=cvat.apps.auto_annotation.models.FrameworkChoice('openvino'), max_length=32)),
('owner', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'default_permissions': (),
},
),
]
......@@ -3,7 +3,3 @@
#
# SPDX-License-Identifier: MIT
from django.db import models
# Create your models here.
# Copyright (C) 2018 Intel Corporation
#
# SPDX-License-Identifier: MIT
import json
import cv2
import os
import subprocess
from cvat.apps.auto_annotation.inference_engine import make_plugin, make_network
class ModelLoader():
def __init__(self, model, weights):
self._model = model
self._weights = weights
IE_PLUGINS_PATH = os.getenv("IE_PLUGINS_PATH")
if not IE_PLUGINS_PATH:
raise OSError("Inference engine plugin path env not found in the system.")
plugin = make_plugin()
network = make_network(self._model, self._weights)
supported_layers = plugin.get_supported_layers(network)
not_supported_layers = [l for l in network.layers.keys() if l not in supported_layers]
if len(not_supported_layers) != 0:
raise Exception("Following layers are not supported by the plugin for specified device {}:\n {}".
format(plugin.device, ", ".join(not_supported_layers)))
self._input_blob_name = next(iter(network.inputs))
self._output_blob_name = next(iter(network.outputs))
self._net = plugin.load(network=network, num_requests=2)
input_type = network.inputs[self._input_blob_name]
self._input_layout = input_type if isinstance(input_type, list) else input_type.shape
def infer(self, image):
_, _, h, w = self._input_layout
in_frame = image if image.shape[:-1] == (h, w) else cv2.resize(image, (w, h))
in_frame = in_frame.transpose((2, 0, 1)) # Change data layout from HWC to CHW
return self._net.infer(inputs={self._input_blob_name: in_frame})[self._output_blob_name].copy()
def load_label_map(labels_path):
with open(labels_path, "r") as f:
return json.load(f)["label_map"]
# Copyright (C) 2018 Intel Corporation
#
# SPDX-License-Identifier: MIT
import django_rq
import fnmatch
import numpy as np
import os
import rq
import shutil
import tempfile
from django.db import transaction
from django.utils import timezone
from django.conf import settings
from cvat.apps.engine.log import slogger
from cvat.apps.engine.models import Task as TaskModel
from cvat.apps.engine.serializers import LabeledDataSerializer
from cvat.apps.engine.annotation import put_task_data, patch_task_data
from .models import AnnotationModel, FrameworkChoice
from .model_loader import ModelLoader
from .image_loader import ImageLoader
def _remove_old_file(model_file_field):
if model_file_field and os.path.exists(model_file_field.name):
os.remove(model_file_field.name)
def _update_dl_model_thread(dl_model_id, name, is_shared, model_file, weights_file, labelmap_file,
interpretation_file, run_tests, is_local_storage, delete_if_test_fails):
def _get_file_content(filename):
return os.path.basename(filename), open(filename, "rb")
def _delete_source_files():
for f in [model_file, weights_file, labelmap_file, interpretation_file]:
if f:
os.remove(f)
def _run_test(model_file, weights_file, labelmap_file, interpretation_file):
test_image = np.ones((1024, 1980, 3), np.uint8) * 255
try:
_run_inference_engine_annotation(
data=[test_image,],
model_file=model_file,
weights_file=weights_file,
labels_mapping=labelmap_file,
attribute_spec={},
convertation_file=interpretation_file,
)
except Exception as e:
return False, str(e)
return True, ""
job = rq.get_current_job()
job.meta["progress"] = "Saving data"
job.save_meta()
with transaction.atomic():
dl_model = AnnotationModel.objects.select_for_update().get(pk=dl_model_id)
test_res = True
message = ""
if run_tests:
job.meta["progress"] = "Test started"
job.save_meta()
test_res, message = _run_test(
model_file=model_file or dl_model.model_file.name,
weights_file=weights_file or dl_model.weights_file.name,
labelmap_file=labelmap_file or dl_model.labelmap_file.name,
interpretation_file=interpretation_file or dl_model.interpretation_file.name,
)
if not test_res:
job.meta["progress"] = "Test failed"
if delete_if_test_fails:
shutil.rmtree(dl_model.get_dirname(), ignore_errors=True)
dl_model.delete()
else:
job.meta["progress"] = "Test passed"
job.save_meta()
# update DL model
if test_res:
if model_file:
_remove_old_file(dl_model.model_file)
dl_model.model_file.save(*_get_file_content(model_file))
if weights_file:
_remove_old_file(dl_model.weights_file)
dl_model.weights_file.save(*_get_file_content(weights_file))
if labelmap_file:
_remove_old_file(dl_model.labelmap_file)
dl_model.labelmap_file.save(*_get_file_content(labelmap_file))
if interpretation_file:
_remove_old_file(dl_model.interpretation_file)
dl_model.interpretation_file.save(*_get_file_content(interpretation_file))
if name:
dl_model.name = name
if is_shared != None:
dl_model.shared = is_shared
dl_model.updated_date = timezone.now()
dl_model.save()
if is_local_storage:
_delete_source_files()
if not test_res:
raise Exception("Model was not properly created/updated. Test failed: {}".format(message))
def create_or_update(dl_model_id, name, model_file, weights_file, labelmap_file, interpretation_file, owner, storage, is_shared):
def get_abs_path(share_path):
if not share_path:
return share_path
share_root = settings.SHARE_ROOT
relpath = os.path.normpath(share_path).lstrip('/')
if '..' in relpath.split(os.path.sep):
raise Exception('Permission denied')
abspath = os.path.abspath(os.path.join(share_root, relpath))
if os.path.commonprefix([share_root, abspath]) != share_root:
raise Exception('Bad file path on share: ' + abspath)
return abspath
def save_file_as_tmp(data):
if not data:
return None
fd, filename = tempfile.mkstemp()
with open(filename, 'wb') as tmp_file:
for chunk in data.chunks():
tmp_file.write(chunk)
os.close(fd)
return filename
is_create_request = dl_model_id is None
if is_create_request:
dl_model_id = create_empty(owner=owner)
run_tests = bool(model_file or weights_file or labelmap_file or interpretation_file)
if storage != "local":
model_file = get_abs_path(model_file)
weights_file = get_abs_path(weights_file)
labelmap_file = get_abs_path(labelmap_file)
interpretation_file = get_abs_path(interpretation_file)
else:
model_file = save_file_as_tmp(model_file)
weights_file = save_file_as_tmp(weights_file)
labelmap_file = save_file_as_tmp(labelmap_file)
interpretation_file = save_file_as_tmp(interpretation_file)
rq_id = "auto_annotation.create.{}".format(dl_model_id)
queue = django_rq.get_queue("default")
queue.enqueue_call(
func=_update_dl_model_thread,
args=(
dl_model_id,
name,
is_shared,
model_file,
weights_file,
labelmap_file,
interpretation_file,
run_tests,
storage == "local",
is_create_request,
),
job_id=rq_id
)
return rq_id
@transaction.atomic
def create_empty(owner, framework=FrameworkChoice.OPENVINO):
db_model = AnnotationModel(
owner=owner,
)
db_model.save()
model_path = db_model.get_dirname()
if os.path.isdir(model_path):
shutil.rmtree(model_path)
os.mkdir(model_path)
return db_model.id
@transaction.atomic
def delete(dl_model_id):
dl_model = AnnotationModel.objects.select_for_update().get(pk=dl_model_id)
if dl_model:
if dl_model.primary:
raise Exception("Can not delete primary model {}".format(dl_model_id))
shutil.rmtree(dl_model.get_dirname(), ignore_errors=True)
dl_model.delete()
else:
raise Exception("Requested DL model {} doesn't exist".format(dl_model_id))
def get_image_data(path_to_data):
def get_image_key(item):
return int(os.path.splitext(os.path.basename(item))[0])
image_list = []
for root, _, filenames in os.walk(path_to_data):
for filename in fnmatch.filter(filenames, "*.jpg"):
image_list.append(os.path.join(root, filename))
image_list.sort(key=get_image_key)
return ImageLoader(image_list)
class Results():
def __init__(self):
self._results = {
"shapes": [],
"tracks": []
}
def add_box(self, xtl, ytl, xbr, ybr, label, frame_number, attributes=None):
self.get_shapes().append({
"label": label,
"frame": frame_number,
"points": [xtl, ytl, xbr, ybr],
"type": "rectangle",
"attributes": attributes or {},
})
def add_points(self, points, label, frame_number, attributes=None):
points = self._create_polyshape(points, label, frame_number, attributes)
points["type"] = "points"
self.get_shapes().append(points)
def add_polygon(self, points, label, frame_number, attributes=None):
polygon = self._create_polyshape(points, label, frame_number, attributes)
polygon["type"] = "polygon"
self.get_shapes().append(polygon)
def add_polyline(self, points, label, frame_number, attributes=None):
polyline = self._create_polyshape(points, label, frame_number, attributes)
polyline["type"] = "polyline"
self.get_shapes().append(polyline)
def get_shapes(self):
return self._results["shapes"]
def get_tracks(self):
return self._results["tracks"]
@staticmethod
def _create_polyshape(self, points, label, frame_number, attributes=None):
return {
"label": label,
"frame": frame_number,
"points": " ".join("{},{}".format(pair[0], pair[1]) for pair in points),
"attributes": attributes or {},
}
def _process_detections(detections, path_to_conv_script):
results = Results()
global_vars = {
"__builtins__": {
"str": str,
"int": int,
"float": float,
"max": max,
"min": min,
"range": range,
},
}
local_vars = {
"detections": detections,
"results": results,
}
exec (open(path_to_conv_script).read(), global_vars, local_vars)
return results
def _run_inference_engine_annotation(data, model_file, weights_file,
labels_mapping, attribute_spec, convertation_file, job=None, update_progress=None):
def process_attributes(shape_attributes, label_attr_spec):
attributes = []
for attr_text, attr_value in shape_attributes.items():
if attr_text in label_attr_spec:
attributes.append({
"id": label_attr_spec[attr_text],
"value": attr_value,
})
return attributes
def add_shapes(shapes, target_container):
for shape in shapes:
if shape["label"] not in labels_mapping:
continue
db_label = labels_mapping[shape["label"]]
target_container.append({
"label_id": db_label,
"frame": shape["frame"],
"points": shape["points"],
"type": shape["type"],
"z_order": 0,
"group": None,
"occluded": False,
"attributes": process_attributes(shape["attributes"], attribute_spec[db_label]),
})
result = {
"shapes": [],
"tracks": [],
"tags": [],
"version": 0
}
data_len = len(data)
model = ModelLoader(model=model_file, weights=weights_file)
frame_counter = 0
detections = []
for frame in data:
orig_rows, orig_cols = frame.shape[:2]
detections.append({
"frame_id": frame_counter,
"frame_height": orig_rows,
"frame_width": orig_cols,
"detections": model.infer(frame),
})
frame_counter += 1
if job and update_progress and not update_progress(job, frame_counter * 100 / data_len):
return None
processed_detections = _process_detections(detections, convertation_file)
add_shapes(processed_detections.get_shapes(), result["shapes"])
return result
def run_inference_thread(tid, model_file, weights_file, labels_mapping, attributes, convertation_file, reset, user):
def update_progress(job, progress):
job.refresh()
if "cancel" in job.meta:
del job.meta["cancel"]
job.save()
return False
job.meta["progress"] = progress
job.save_meta()
return True
try:
job = rq.get_current_job()
job.meta["progress"] = 0
job.save_meta()
db_task = TaskModel.objects.get(pk=tid)
result = None
slogger.glob.info("auto annotation with openvino toolkit for task {}".format(tid))
result = _run_inference_engine_annotation(
data=get_image_data(db_task.get_data_dirname()),
model_file=model_file,
weights_file=weights_file,
labels_mapping=labels_mapping,
attribute_spec=attributes,
convertation_file= convertation_file,
job=job,
update_progress=update_progress,
)
if result is None:
slogger.glob.info("auto annotation for task {} canceled by user".format(tid))
return
serializer = LabeledDataSerializer(data = result)
if serializer.is_valid(raise_exception=True):
if reset:
put_task_data(tid, user, result)
else:
patch_task_data(tid, user, result, "create")
slogger.glob.info("auto annotation for task {} done".format(tid))
except Exception as e:
try:
slogger.task[tid].exception("exception was occurred during auto annotation of the task", exc_info=True)
except Exception as ex:
slogger.glob.exception("exception was occurred during auto annotation of the task {}: {}".format(tid, str(ex)), exc_info=True)
raise ex
raise e
# Copyright (C) 2018 Intel Corporation
#
# SPDX-License-Identifier: MIT
from enum import Enum
from django.db import models
from django.conf import settings
from django.contrib.auth.models import User
from django.core.files.storage import FileSystemStorage
fs = FileSystemStorage()
def upload_path_handler(instance, filename):
return "{models_root}/{id}/{file}".format(models_root=settings.MODELS_ROOT, id=instance.id, file=filename)
class FrameworkChoice(Enum):
OPENVINO = 'openvino'
TENSORFLOW = 'tensorflow'
PYTORCH = 'pytorch'
def __str__(self):
return self.value
class SafeCharField(models.CharField):
def get_prep_value(self, value):
value = super().get_prep_value(value)
if value:
return value[:self.max_length]
return value
class AnnotationModel(models.Model):
name = SafeCharField(max_length=256)
owner = models.ForeignKey(User, null=True, blank=True,
on_delete=models.SET_NULL)
created_date = models.DateTimeField(auto_now_add=True)
updated_date = models.DateTimeField(auto_now_add=True)
model_file = models.FileField(upload_to=upload_path_handler, storage=fs)
weights_file = models.FileField(upload_to=upload_path_handler, storage=fs)
labelmap_file = models.FileField(upload_to=upload_path_handler, storage=fs)
interpretation_file = models.FileField(upload_to=upload_path_handler, storage=fs)
shared = models.BooleanField(default=False)
primary = models.BooleanField(default=False)
framework = models.CharField(max_length=32, default=FrameworkChoice.OPENVINO)
class Meta:
default_permissions = ()
def get_dirname(self):
return "{models_root}/{id}".format(models_root=settings.MODELS_ROOT, id=self.id)
def __str__(self):
return self.name
# Copyright (C) 2018 Intel Corporation
#
# SPDX-License-Identifier: MIT
import rules
from cvat.apps.authentication.auth import has_admin_role, has_user_role
@rules.predicate
def is_model_owner(db_user, db_dl_model):
return db_dl_model.owner == db_user
@rules.predicate
def is_shared_model(_, db_dl_model):
return db_dl_model.shared
@rules.predicate
def is_primary_model(_, db_dl_model):
return db_dl_model.primary
def setup_permissions():
rules.add_perm('auto_annotation.model.create', has_admin_role | has_user_role)
rules.add_perm('auto_annotation.model.update', (has_admin_role | is_model_owner) & ~is_primary_model)
rules.add_perm('auto_annotation.model.delete', (has_admin_role | is_model_owner) & ~is_primary_model)
rules.add_perm('auto_annotation.model.access', has_admin_role | is_model_owner |
is_shared_model | is_primary_model)
#annotatorManagerContent, #annotatorRunnerContent {
width: 800px;
height: 300px;
}
#annotatorManagerButton {
padding: 7px;
margin-left: 4px;
}
.modelsTable {
width: 100%;
color:#666;
text-shadow: 1px 1px 0px #fff;
background:#D2D3D4;
border:#ccc 1px solid;
border-radius: 3px;
box-shadow: 0 1px 2px black;
}
.modelsTable th {
border-top: 1px solid #fafafa;
border-bottom: 1px solid #e0e0e0;
background: #ededed;
}
.modelsTable th:first-child {
text-align: left;
padding-left:20px;
}
.modelsTable tr:first-child th:first-child {
border-top-left-radius:3px;
}
.modelsTable tr:first-child th:last-child {
border-top-right-radius:3px;
}
.modelsTable tr {
text-align: center;
padding-left: 20px;
}
.modelsTable td:first-child {
text-align: left;
padding-left: 20px;
border-left: 0;
}
.modelsTable td {
padding: 18px;
border-top: 1px solid #ffffff;
border-bottom:1px solid #e0e0e0;
border-left: 1px solid #e0e0e0;
background: #fafafa;
}
.modelsTable tr.even td {
background: #f6f6f6;
}
.modelsTable tr:last-child td {
border-bottom:0;
}
.modelsTable tr:last-child td:first-child {
border-bottom-left-radius:3px;
}
.modelsTable tr:last-child td:last-child {
border-bottom-right-radius:3px;
}
.modelsTable tr:hover td {
background: #f2f2f2;
}
#annotatorManagerUploadModel {
float: left;
padding-left: 3%;
width: 40%;
}
......@@ -2,8 +2,3 @@
# Copyright (C) 2018 Intel Corporation
#
# SPDX-License-Identifier: MIT
from django.test import TestCase
# Create your tests here.
# Copyright (C) 2018 Intel Corporation
#
# SPDX-License-Identifier: MIT
from django.urls import path
from . import views
urlpatterns = [
path("create", views.create_model),
path("update/<int:mid>", views.update_model),
path("delete/<int:mid>", views.delete_model),
path("start/<int:mid>/<int:tid>", views.start_annotation),
path("check/<str:rq_id>", views.check),
path("cancel/<int:tid>", views.cancel),
path("meta/get", views.get_meta_info),
]
# Copyright (C) 2018 Intel Corporation
#
# SPDX-License-Identifier: MIT
import django_rq
import json
import os
from django.http import HttpResponse, JsonResponse, HttpResponseBadRequest
from django.db.models import Q
from rules.contrib.views import permission_required, objectgetter
from cvat.apps.authentication.decorators import login_required
from cvat.apps.engine.models import Task as TaskModel
from cvat.apps.authentication.auth import has_admin_role
from cvat.apps.engine.log import slogger
from .model_loader import load_label_map
from . import model_manager
from .models import AnnotationModel
@login_required
@permission_required(perm=["engine.task.change"],
fn=objectgetter(TaskModel, "tid"), raise_exception=True)
def cancel(request, tid):
try:
queue = django_rq.get_queue("low")
job = queue.fetch_job("auto_annotation.run.{}".format(tid))
if job is None or job.is_finished or job.is_failed:
raise Exception("Task is not being annotated currently")
elif "cancel" not in job.meta:
job.meta["cancel"] = True
job.save()
except Exception as ex:
try:
slogger.task[tid].exception("cannot cancel auto annotation for task #{}".format(tid), exc_info=True)
except Exception as logger_ex:
slogger.glob.exception("exception was occured during cancel auto annotation request for task {}: {}".format(tid, str(logger_ex)), exc_info=True)
return HttpResponseBadRequest(str(ex))
return HttpResponse()
@login_required
@permission_required(perm=["auto_annotation.model.create"], raise_exception=True)
def create_model(request):
if request.method != 'POST':
return HttpResponseBadRequest("Only POST requests are accepted")
try:
params = request.POST
storage = params["storage"]
name = params["name"]
is_shared = params["shared"].lower() == "true"
if is_shared and not has_admin_role(request.user):
raise Exception("Only admin can create shared models")
files = request.FILES if storage == "local" else params
model = files["xml"]
weights = files["bin"]
labelmap = files["json"]
interpretation_script = files["py"]
owner = request.user
rq_id = model_manager.create_or_update(
dl_model_id=None,
name=name,
model_file=model,
weights_file=weights,
labelmap_file=labelmap,
interpretation_file=interpretation_script,
owner=owner,
storage=storage,
is_shared=is_shared,
)
return JsonResponse({"id": rq_id})
except Exception as e:
return HttpResponseBadRequest(str(e))
@login_required
@permission_required(perm=["auto_annotation.model.update"],
fn=objectgetter(AnnotationModel, "mid"), raise_exception=True)
def update_model(request, mid):
if request.method != 'POST':
return HttpResponseBadRequest("Only POST requests are accepted")
try:
params = request.POST
storage = params["storage"]
name = params.get("name")
is_shared = params.get("shared")
is_shared = is_shared.lower() == "true" if is_shared else None
if is_shared and not has_admin_role(request.user):
raise Exception("Only admin can create shared models")
files = request.FILES
model = files.get("xml")
weights = files.get("bin")
labelmap = files.get("json")
interpretation_script = files.get("py")
rq_id = model_manager.create_or_update(
dl_model_id=mid,
name=name,
model_file=model,
weights_file=weights,
labelmap_file=labelmap,
interpretation_file=interpretation_script,
owner=None,
storage=storage,
is_shared=is_shared,
)
return JsonResponse({"id": rq_id})
except Exception as e:
return HttpResponseBadRequest(str(e))
@login_required
@permission_required(perm=["auto_annotation.model.delete"],
fn=objectgetter(AnnotationModel, "mid"), raise_exception=True)
def delete_model(request, mid):
if request.method != 'DELETE':
return HttpResponseBadRequest("Only DELETE requests are accepted")
model_manager.delete(mid)
return HttpResponse()
@login_required
def get_meta_info(request):
try:
tids = json.loads(request.body.decode('utf-8'))
response = {
"admin": has_admin_role(request.user),
"models": [],
"run": {},
}
dl_model_list = list(AnnotationModel.objects.filter(Q(owner=request.user) | Q(primary=True) | Q(shared=True)).order_by('-created_date'))
for dl_model in dl_model_list:
labels = []
if dl_model.labelmap_file and os.path.exists(dl_model.labelmap_file.name):
with dl_model.labelmap_file.open('r') as f:
labels = list(json.load(f)["label_map"].values())
response["models"].append({
"id": dl_model.id,
"name": dl_model.name,
"primary": dl_model.primary,
"uploadDate": dl_model.created_date,
"updateDate": dl_model.updated_date,
"labels": labels,
})
queue = django_rq.get_queue("low")
for tid in tids:
rq_id = "auto_annotation.run.{}".format(tid)
job = queue.fetch_job(rq_id)
if job is not None:
response["run"][tid] = {
"status": job.get_status(),
"rq_id": rq_id,
}
return JsonResponse(response)
except Exception as e:
return HttpResponseBadRequest(str(e))
@login_required
@permission_required(perm=["engine.task.change"],
fn=objectgetter(TaskModel, "tid"), raise_exception=True)
@permission_required(perm=["auto_annotation.model.access"],
fn=objectgetter(AnnotationModel, "mid"), raise_exception=True)
def start_annotation(request, mid, tid):
slogger.glob.info("auto annotation create request for task {} via DL model {}".format(tid, mid))
try:
db_task = TaskModel.objects.get(pk=tid)
queue = django_rq.get_queue("low")
job = queue.fetch_job("auto_annotation.run.{}".format(tid))
if job is not None and (job.is_started or job.is_queued):
raise Exception("The process is already running")
data = json.loads(request.body.decode('utf-8'))
should_reset = data["reset"]
user_defined_labels_mapping = data["labels"]
dl_model = AnnotationModel.objects.get(pk=mid)
model_file_path = dl_model.model_file.name
weights_file_path = dl_model.weights_file.name
labelmap_file = dl_model.labelmap_file.name
convertation_file_path = dl_model.interpretation_file.name
db_labels = db_task.label_set.prefetch_related("attributespec_set").all()
db_attributes = {db_label.id:
{db_attr.name: db_attr.id for db_attr in db_label.attributespec_set.all()} for db_label in db_labels}
db_labels = {db_label.name:db_label.id for db_label in db_labels}
model_labels = {value: key for key, value in load_label_map(labelmap_file).items()}
labels_mapping = {}
for user_model_label, user_db_label in user_defined_labels_mapping.items():
if user_model_label in model_labels and user_db_label in db_labels:
labels_mapping[int(model_labels[user_model_label])] = db_labels[user_db_label]
if not labels_mapping:
raise Exception("No labels found for annotation")
rq_id="auto_annotation.run.{}".format(tid)
queue.enqueue_call(func=model_manager.run_inference_thread,
args=(
tid,
model_file_path,
weights_file_path,
labels_mapping,
db_attributes,
convertation_file_path,
should_reset,
request.user,
),
job_id = rq_id,
timeout=604800) # 7 days
slogger.task[tid].info("auto annotation job enqueued")
except Exception as ex:
try:
slogger.task[tid].exception("exception was occurred during annotation request", exc_info=True)
except Exception as logger_ex:
slogger.glob.exception("exception was occurred during create auto annotation request for task {}: {}".format(tid, str(logger_ex)), exc_info=True)
return HttpResponseBadRequest(str(ex))
return JsonResponse({"id": rq_id})
@login_required
def check(request, rq_id):
try:
target_queue = "low" if "auto_annotation.run" in rq_id else "default"
queue = django_rq.get_queue(target_queue)
job = queue.fetch_job(rq_id)
if job is not None and "cancel" in job.meta:
return JsonResponse({"status": "finished"})
data = {}
if job is None:
data["status"] = "unknown"
elif job.is_queued:
data["status"] = "queued"
elif job.is_started:
data["status"] = "started"
data["progress"] = job.meta["progress"] if "progress" in job.meta else ""
elif job.is_finished:
data["status"] = "finished"
job.delete()
else:
data["status"] = "failed"
data["error"] = job.exc_info
job.delete()
except Exception:
data["status"] = "unknown"
return JsonResponse(data)
......@@ -5,7 +5,9 @@
from django.apps import AppConfig
class DashboardConfig(AppConfig):
name = 'dashboard'
name = 'cvat.apps.dashboard'
def ready(self):
# plugin registration
pass
\ No newline at end of file
.paginationjs{line-height:1.6;font-family:Marmelad,"Lucida Grande",Arial,"Hiragino Sans GB",Georgia,sans-serif;font-size:14px;box-sizing:initial}.paginationjs:after{display:table;content:" ";clear:both}.paginationjs .paginationjs-pages{float:left}.paginationjs .paginationjs-pages ul{float:left;margin:0;padding:0}.paginationjs .paginationjs-go-button,.paginationjs .paginationjs-go-input,.paginationjs .paginationjs-nav{float:left;margin-left:10px;font-size:14px}.paginationjs .paginationjs-pages li{float:left;border:1px solid #aaa;border-right:none;list-style:none}.paginationjs .paginationjs-pages li>a{min-width:30px;height:28px;line-height:28px;display:block;background:#fff;font-size:14px;color:#333;text-decoration:none;text-align:center}.paginationjs .paginationjs-pages li>a:hover{background:#eee}.paginationjs .paginationjs-pages li.active{border:none}.paginationjs .paginationjs-pages li.active>a{height:30px;line-height:30px;background:#aaa;color:#fff}.paginationjs .paginationjs-pages li.disabled>a{opacity:.3}.paginationjs .paginationjs-pages li.disabled>a:hover{background:0 0}.paginationjs .paginationjs-pages li:first-child,.paginationjs .paginationjs-pages li:first-child>a{border-radius:3px 0 0 3px}.paginationjs .paginationjs-pages li:last-child{border-right:1px solid #aaa;border-radius:0 3px 3px 0}.paginationjs .paginationjs-pages li:last-child>a{border-radius:0 3px 3px 0}.paginationjs .paginationjs-go-input>input[type=text]{width:30px;height:28px;background:#fff;border-radius:3px;border:1px solid #aaa;padding:0;font-size:14px;text-align:center;vertical-align:baseline;outline:0;box-shadow:none;box-sizing:initial}.paginationjs .paginationjs-go-button>input[type=button]{min-width:40px;height:30px;line-height:28px;background:#fff;border-radius:3px;border:1px solid #aaa;text-align:center;padding:0 8px;font-size:14px;vertical-align:baseline;outline:0;box-shadow:none;color:#333;cursor:pointer;vertical-align:middle\9}.paginationjs.paginationjs-theme-blue .paginationjs-go-input>input[type=text],.paginationjs.paginationjs-theme-blue .paginationjs-pages li{border-color:#289de9}.paginationjs .paginationjs-go-button>input[type=button]:hover{background-color:#f8f8f8}.paginationjs .paginationjs-nav{height:30px;line-height:30px}.paginationjs .paginationjs-go-button,.paginationjs .paginationjs-go-input{margin-left:5px\9}.paginationjs.paginationjs-small{font-size:12px}.paginationjs.paginationjs-small .paginationjs-pages li>a{min-width:26px;height:24px;line-height:24px;font-size:12px}.paginationjs.paginationjs-small .paginationjs-pages li.active>a{height:26px;line-height:26px}.paginationjs.paginationjs-small .paginationjs-go-input{font-size:12px}.paginationjs.paginationjs-small .paginationjs-go-input>input[type=text]{width:26px;height:24px;font-size:12px}.paginationjs.paginationjs-small .paginationjs-go-button{font-size:12px}.paginationjs.paginationjs-small .paginationjs-go-button>input[type=button]{min-width:30px;height:26px;line-height:24px;padding:0 6px;font-size:12px}.paginationjs.paginationjs-small .paginationjs-nav{height:26px;line-height:26px;font-size:12px}.paginationjs.paginationjs-big{font-size:16px}.paginationjs.paginationjs-big .paginationjs-pages li>a{min-width:36px;height:34px;line-height:34px;font-size:16px}.paginationjs.paginationjs-big .paginationjs-pages li.active>a{height:36px;line-height:36px}.paginationjs.paginationjs-big .paginationjs-go-input{font-size:16px}.paginationjs.paginationjs-big .paginationjs-go-input>input[type=text]{width:36px;height:34px;font-size:16px}.paginationjs.paginationjs-big .paginationjs-go-button{font-size:16px}.paginationjs.paginationjs-big .paginationjs-go-button>input[type=button]{min-width:50px;height:36px;line-height:34px;padding:0 12px;font-size:16px}.paginationjs.paginationjs-big .paginationjs-nav{height:36px;line-height:36px;font-size:16px}.paginationjs.paginationjs-theme-blue .paginationjs-pages li>a{color:#289de9}.paginationjs.paginationjs-theme-blue .paginationjs-pages li>a:hover{background:#e9f4fc}.paginationjs.paginationjs-theme-blue .paginationjs-pages li.active>a{background:#289de9;color:#fff}.paginationjs.paginationjs-theme-blue .paginationjs-pages li.disabled>a:hover{background:0 0}.paginationjs.paginationjs-theme-blue .paginationjs-go-button>input[type=button]{background:#289de9;border-color:#289de9;color:#fff}.paginationjs.paginationjs-theme-green .paginationjs-go-input>input[type=text],.paginationjs.paginationjs-theme-green .paginationjs-pages li{border-color:#449d44}.paginationjs.paginationjs-theme-blue .paginationjs-go-button>input[type=button]:hover{background-color:#3ca5ea}.paginationjs.paginationjs-theme-green .paginationjs-pages li>a{color:#449d44}.paginationjs.paginationjs-theme-green .paginationjs-pages li>a:hover{background:#ebf4eb}.paginationjs.paginationjs-theme-green .paginationjs-pages li.active>a{background:#449d44;color:#fff}.paginationjs.paginationjs-theme-green .paginationjs-pages li.disabled>a:hover{background:0 0}.paginationjs.paginationjs-theme-green .paginationjs-go-button>input[type=button]{background:#449d44;border-color:#449d44;color:#fff}.paginationjs.paginationjs-theme-yellow .paginationjs-go-input>input[type=text],.paginationjs.paginationjs-theme-yellow .paginationjs-pages li{border-color:#ec971f}.paginationjs.paginationjs-theme-green .paginationjs-go-button>input[type=button]:hover{background-color:#55a555}.paginationjs.paginationjs-theme-yellow .paginationjs-pages li>a{color:#ec971f}.paginationjs.paginationjs-theme-yellow .paginationjs-pages li>a:hover{background:#fdf5e9}.paginationjs.paginationjs-theme-yellow .paginationjs-pages li.active>a{background:#ec971f;color:#fff}.paginationjs.paginationjs-theme-yellow .paginationjs-pages li.disabled>a:hover{background:0 0}.paginationjs.paginationjs-theme-yellow .paginationjs-go-button>input[type=button]{background:#ec971f;border-color:#ec971f;color:#fff}.paginationjs.paginationjs-theme-red .paginationjs-go-input>input[type=text],.paginationjs.paginationjs-theme-red .paginationjs-pages li{border-color:#c9302c}.paginationjs.paginationjs-theme-yellow .paginationjs-go-button>input[type=button]:hover{background-color:#eea135}.paginationjs.paginationjs-theme-red .paginationjs-pages li>a{color:#c9302c}.paginationjs.paginationjs-theme-red .paginationjs-pages li>a:hover{background:#faeaea}.paginationjs.paginationjs-theme-red .paginationjs-pages li.active>a{background:#c9302c;color:#fff}.paginationjs.paginationjs-theme-red .paginationjs-pages li.disabled>a:hover{background:0 0}.paginationjs.paginationjs-theme-red .paginationjs-go-button>input[type=button]{background:#c9302c;border-color:#c9302c;color:#fff}.paginationjs.paginationjs-theme-red .paginationjs-go-button>input[type=button]:hover{background-color:#ce4541}.paginationjs .paginationjs-pages li.paginationjs-next{border-right:1px solid #aaa\9}.paginationjs .paginationjs-go-input>input[type=text]{line-height:28px\9;vertical-align:middle\9}.paginationjs.paginationjs-big .paginationjs-pages li>a{line-height:36px\9}.paginationjs.paginationjs-big .paginationjs-go-input>input[type=text]{height:36px\9;line-height:36px\9}
\ No newline at end of file
/*
* pagination.js 2.1.4
* A jQuery plugin to provide simple yet fully customisable pagination
* https://github.com/superRaytin/paginationjs
* Homepage: http://pagination.js.org
*
* Copyright 2014-2100, superRaytin
* Released under the MIT license.
*/
!function(a,b){function c(a){throw new Error("Pagination: "+a)}function d(a){a.dataSource||c('"dataSource" is required.'),"string"==typeof a.dataSource?void 0===a.totalNumberLocator?void 0===a.totalNumber?c('"totalNumber" is required.'):b.isNumeric(a.totalNumber)||c('"totalNumber" is incorrect. (Number)'):b.isFunction(a.totalNumberLocator)||c('"totalNumberLocator" should be a Function.'):i.isObject(a.dataSource)&&(void 0===a.locator?c('"dataSource" is an Object, please specify "locator".'):"string"==typeof a.locator||b.isFunction(a.locator)||c(a.locator+" is incorrect. (String | Function)")),void 0===a.formatResult||b.isFunction(a.formatResult)||c('"formatResult" should be a Function.')}function e(a){var c=["go","previous","next","disable","enable","refresh","show","hide","destroy"];b.each(c,function(b,c){a.off(h+c)}),a.data("pagination",{}),b(".paginationjs",a).remove()}function f(a,b){return("object"==(b=typeof a)?null==a&&"null"||Object.prototype.toString.call(a).slice(8,-1):b).toLowerCase()}void 0===b&&c("Pagination requires jQuery.");var g="pagination",h="__pagination-";b.fn.pagination&&(g="pagination2"),b.fn[g]=function(f){if(void 0===f)return this;var j=b(this),k=b.extend({},b.fn[g].defaults,f),l={initialize:function(){var a=this;if(j.data("pagination")||j.data("pagination",{}),!1!==a.callHook("beforeInit")){j.data("pagination").initialized&&b(".paginationjs",j).remove(),a.disabled=!!k.disabled;var c=a.model={pageRange:k.pageRange,pageSize:k.pageSize};a.parseDataSource(k.dataSource,function(b){if(a.isAsync=i.isString(b),i.isArray(b)&&(c.totalNumber=k.totalNumber=b.length),a.isDynamicTotalNumber=a.isAsync&&k.totalNumberLocator,!(k.hideWhenLessThanOnePage&&a.getTotalPage()<=1)){var d=a.render(!0);k.className&&d.addClass(k.className),c.el=d,j["bottom"===k.position?"append":"prepend"](d),a.observer(),j.data("pagination").initialized=!0,a.callHook("afterInit",d)}})}},render:function(a){var c=this,d=c.model,e=d.el||b('<div class="paginationjs"></div>'),f=!0!==a;c.callHook("beforeRender",f);var g=d.pageNumber||k.pageNumber,h=k.pageRange,i=c.getTotalPage(),j=g-h,l=g+h;return l>i&&(l=i,j=i-2*h,j=j<1?1:j),j<=1&&(j=1,l=Math.min(2*h+1,i)),e.html(c.generateHTML({currentPage:g,pageRange:h,rangeStart:j,rangeEnd:l})),c.callHook("afterRender",f),e},generateHTML:function(a){var c,d,e=this,f=a.currentPage,g=e.getTotalPage(),h=a.rangeStart,i=a.rangeEnd,j=e.getTotalNumber(),l=k.showPrevious,m=k.showNext,n=k.showPageNumbers,o=k.showNavigator,p=k.showGoInput,q=k.showGoButton,r=k.pageLink,s=k.prevText,t=k.nextText,u=k.ellipsisText,v=k.goButtonText,w=k.classPrefix,x=k.activeClassName,y=k.disableClassName,z=k.ulClassName,A="",B='<input type="text" class="J-paginationjs-go-pagenumber">',C='<input type="button" class="J-paginationjs-go-button" value="'+v+'">',D=b.isFunction(k.formatNavigator)?k.formatNavigator(f,g,j):k.formatNavigator,E=b.isFunction(k.formatGoInput)?k.formatGoInput(B,f,g,j):k.formatGoInput,F=b.isFunction(k.formatGoButton)?k.formatGoButton(C,f,g,j):k.formatGoButton,G=b.isFunction(k.autoHidePrevious)?k.autoHidePrevious():k.autoHidePrevious,H=b.isFunction(k.autoHideNext)?k.autoHideNext():k.autoHideNext,I=b.isFunction(k.header)?k.header(f,g,j):k.header,J=b.isFunction(k.footer)?k.footer(f,g,j):k.footer;if(I&&(c=e.replaceVariables(I,{currentPage:f,totalPage:g,totalNumber:j}),A+=c),l||n||m){if(A+='<div class="paginationjs-pages">',A+=z?'<ul class="'+z+'">':"<ul>",l&&(f<=1?G||(A+='<li class="'+w+"-prev "+y+'"><a>'+s+"</a></li>"):A+='<li class="'+w+'-prev J-paginationjs-previous" data-num="'+(f-1)+'" title="Previous page"><a href="'+r+'">'+s+"</a></li>"),n){if(h<=3)for(d=1;d<h;d++)A+=d==f?'<li class="'+w+"-page J-paginationjs-page "+x+'" data-num="'+d+'"><a>'+d+"</a></li>":'<li class="'+w+'-page J-paginationjs-page" data-num="'+d+'"><a href="'+r+'">'+d+"</a></li>";else k.showFirstOnEllipsisShow&&(A+='<li class="'+w+"-page "+w+'-first J-paginationjs-page" data-num="1"><a href="'+r+'">1</a></li>'),A+='<li class="'+w+"-ellipsis "+y+'"><a>'+u+"</a></li>";for(d=h;d<=i;d++)A+=d==f?'<li class="'+w+"-page J-paginationjs-page "+x+'" data-num="'+d+'"><a>'+d+"</a></li>":'<li class="'+w+'-page J-paginationjs-page" data-num="'+d+'"><a href="'+r+'">'+d+"</a></li>";if(i>=g-2)for(d=i+1;d<=g;d++)A+='<li class="'+w+'-page J-paginationjs-page" data-num="'+d+'"><a href="'+r+'">'+d+"</a></li>";else A+='<li class="'+w+"-ellipsis "+y+'"><a>'+u+"</a></li>",k.showLastOnEllipsisShow&&(A+='<li class="'+w+"-page "+w+'-last J-paginationjs-page" data-num="'+g+'"><a href="'+r+'">'+g+"</a></li>")}m&&(f>=g?H||(A+='<li class="'+w+"-next "+y+'"><a>'+t+"</a></li>"):A+='<li class="'+w+'-next J-paginationjs-next" data-num="'+(f+1)+'" title="Next page"><a href="'+r+'">'+t+"</a></li>"),A+="</ul></div>"}return o&&D&&(c=e.replaceVariables(D,{currentPage:f,totalPage:g,totalNumber:j}),A+='<div class="'+w+'-nav J-paginationjs-nav">'+c+"</div>"),p&&E&&(c=e.replaceVariables(E,{currentPage:f,totalPage:g,totalNumber:j,input:B}),A+='<div class="'+w+'-go-input">'+c+"</div>"),q&&F&&(c=e.replaceVariables(F,{currentPage:f,totalPage:g,totalNumber:j,button:C}),A+='<div class="'+w+'-go-button">'+c+"</div>"),J&&(c=e.replaceVariables(J,{currentPage:f,totalPage:g,totalNumber:j}),A+=c),A},findTotalNumberFromRemoteResponse:function(a){this.model.totalNumber=k.totalNumberLocator(a)},go:function(a,c){function d(a){if(!1===e.callHook("beforePaging",g))return!1;if(f.direction=void 0===f.pageNumber?0:g>f.pageNumber?1:-1,f.pageNumber=g,e.render(),e.disabled&&e.isAsync&&e.enable(),j.data("pagination").model=f,k.formatResult){var d=b.extend(!0,[],a);i.isArray(a=k.formatResult(d))||(a=d)}j.data("pagination").currentPageData=a,e.doCallback(a,c),e.callHook("afterPaging",g),1==g&&e.callHook("afterIsFirstPage"),g==e.getTotalPage()&&e.callHook("afterIsLastPage")}var e=this,f=e.model;if(!e.disabled){var g=a;if((g=parseInt(g))&&!(g<1)){var h=k.pageSize,l=e.getTotalNumber(),m=e.getTotalPage();if(!(l>0&&g>m)){if(!e.isAsync)return void d(e.getDataFragment(g));var n={},o=k.alias||{};n[o.pageSize?o.pageSize:"pageSize"]=h,n[o.pageNumber?o.pageNumber:"pageNumber"]=g;var p=b.isFunction(k.ajax)?k.ajax():k.ajax,q={type:"get",cache:!1,data:{},contentType:"application/x-www-form-urlencoded; charset=UTF-8",dataType:"json",async:!0};b.extend(!0,q,p),b.extend(q.data,n),q.url=k.dataSource,q.success=function(a){e.isDynamicTotalNumber?e.findTotalNumberFromRemoteResponse(a):e.model.totalNumber=k.totalNumber,d(e.filterDataByLocator(a))},q.error=function(a,b,c){k.formatAjaxError&&k.formatAjaxError(a,b,c),e.enable()},e.disable(),b.ajax(q)}}}},doCallback:function(a,c){var d=this,e=d.model;b.isFunction(c)?c(a,e):b.isFunction(k.callback)&&k.callback(a,e)},destroy:function(){!1!==this.callHook("beforeDestroy")&&(this.model.el.remove(),j.off(),b("#paginationjs-style").remove(),this.callHook("afterDestroy"))},previous:function(a){this.go(this.model.pageNumber-1,a)},next:function(a){this.go(this.model.pageNumber+1,a)},disable:function(){var a=this,b=a.isAsync?"async":"sync";!1!==a.callHook("beforeDisable",b)&&(a.disabled=!0,a.model.disabled=!0,a.callHook("afterDisable",b))},enable:function(){var a=this,b=a.isAsync?"async":"sync";!1!==a.callHook("beforeEnable",b)&&(a.disabled=!1,a.model.disabled=!1,a.callHook("afterEnable",b))},refresh:function(a){this.go(this.model.pageNumber,a)},show:function(){var a=this;a.model.el.is(":visible")||a.model.el.show()},hide:function(){var a=this;a.model.el.is(":visible")&&a.model.el.hide()},replaceVariables:function(a,b){var c;for(var d in b){var e=b[d],f=new RegExp("<%=\\s*"+d+"\\s*%>","img");c=(c||a).replace(f,e)}return c},getDataFragment:function(a){var b=k.pageSize,c=k.dataSource,d=this.getTotalNumber(),e=b*(a-1)+1,f=Math.min(a*b,d);return c.slice(e-1,f)},getTotalNumber:function(){return this.model.totalNumber||k.totalNumber||0},getTotalPage:function(){return Math.ceil(this.getTotalNumber()/k.pageSize)},getLocator:function(a){var d;return"string"==typeof a?d=a:b.isFunction(a)?d=a():c('"locator" is incorrect. (String | Function)'),d},filterDataByLocator:function(a){var d,e=this.getLocator(k.locator);if(i.isObject(a)){try{b.each(e.split("."),function(b,c){d=(d||a)[c]})}catch(a){}d?i.isArray(d)||c("dataSource."+e+" must be an Array."):c("dataSource."+e+" is undefined.")}return d||a},parseDataSource:function(a,d){var e=this;i.isObject(a)?d(k.dataSource=e.filterDataByLocator(a)):i.isArray(a)?d(k.dataSource=a):b.isFunction(a)?k.dataSource(function(a){i.isArray(a)||c('The parameter of "done" Function should be an Array.'),e.parseDataSource.call(e,a,d)}):"string"==typeof a?(/^https?|file:/.test(a)&&(k.ajaxDataType="jsonp"),d(a)):c('Unexpected type of "dataSource".')},callHook:function(c){var d,e=j.data("pagination"),f=Array.prototype.slice.apply(arguments);return f.shift(),k[c]&&b.isFunction(k[c])&&!1===k[c].apply(a,f)&&(d=!1),e.hooks&&e.hooks[c]&&b.each(e.hooks[c],function(b,c){!1===c.apply(a,f)&&(d=!1)}),!1!==d},observer:function(){var a=this,d=a.model.el;j.on(h+"go",function(d,e,f){(e=parseInt(b.trim(e)))&&(b.isNumeric(e)||c('"pageNumber" is incorrect. (Number)'),a.go(e,f))}),d.delegate(".J-paginationjs-page","click",function(c){var d=b(c.currentTarget),e=b.trim(d.attr("data-num"));if(e&&!d.hasClass(k.disableClassName)&&!d.hasClass(k.activeClassName))return!1!==a.callHook("beforePageOnClick",c,e)&&(a.go(e),a.callHook("afterPageOnClick",c,e),!!k.pageLink&&void 0)}),d.delegate(".J-paginationjs-previous","click",function(c){var d=b(c.currentTarget),e=b.trim(d.attr("data-num"));if(e&&!d.hasClass(k.disableClassName))return!1!==a.callHook("beforePreviousOnClick",c,e)&&(a.go(e),a.callHook("afterPreviousOnClick",c,e),!!k.pageLink&&void 0)}),d.delegate(".J-paginationjs-next","click",function(c){var d=b(c.currentTarget),e=b.trim(d.attr("data-num"));if(e&&!d.hasClass(k.disableClassName))return!1!==a.callHook("beforeNextOnClick",c,e)&&(a.go(e),a.callHook("afterNextOnClick",c,e),!!k.pageLink&&void 0)}),d.delegate(".J-paginationjs-go-button","click",function(c){var e=b(".J-paginationjs-go-pagenumber",d).val();if(!1===a.callHook("beforeGoButtonOnClick",c,e))return!1;j.trigger(h+"go",e),a.callHook("afterGoButtonOnClick",c,e)}),d.delegate(".J-paginationjs-go-pagenumber","keyup",function(c){if(13===c.which){var e=b(c.currentTarget).val();if(!1===a.callHook("beforeGoInputOnEnter",c,e))return!1;j.trigger(h+"go",e),b(".J-paginationjs-go-pagenumber",d).focus(),a.callHook("afterGoInputOnEnter",c,e)}}),j.on(h+"previous",function(b,c){a.previous(c)}),j.on(h+"next",function(b,c){a.next(c)}),j.on(h+"disable",function(){a.disable()}),j.on(h+"enable",function(){a.enable()}),j.on(h+"refresh",function(b,c){a.refresh(c)}),j.on(h+"show",function(){a.show()}),j.on(h+"hide",function(){a.hide()}),j.on(h+"destroy",function(){a.destroy()});var e=Math.max(a.getTotalPage(),1),f=k.pageNumber;a.isDynamicTotalNumber&&(f=1),k.triggerPagingOnInit&&j.trigger(h+"go",Math.min(f,e))}};if(j.data("pagination")&&!0===j.data("pagination").initialized){if(b.isNumeric(f))return j.trigger.call(this,h+"go",f,arguments[1]),this;if("string"==typeof f){var m=Array.prototype.slice.apply(arguments);switch(m[0]=h+m[0],f){case"previous":case"next":case"go":case"disable":case"enable":case"refresh":case"show":case"hide":case"destroy":j.trigger.apply(this,m);break;case"getSelectedPageNum":return j.data("pagination").model?j.data("pagination").model.pageNumber:j.data("pagination").attributes.pageNumber;case"getTotalPage":return Math.ceil(j.data("pagination").model.totalNumber/j.data("pagination").model.pageSize);case"getSelectedPageData":return j.data("pagination").currentPageData;case"isDisabled":return!0===j.data("pagination").model.disabled;default:c("Unknown action: "+f)}return this}e(j)}else i.isObject(f)||c("Illegal options");return d(k),l.initialize(),this},b.fn[g].defaults={totalNumber:0,pageNumber:1,pageSize:10,pageRange:2,showPrevious:!0,showNext:!0,showPageNumbers:!0,showNavigator:!1,showGoInput:!1,showGoButton:!1,pageLink:"",prevText:"&laquo;",nextText:"&raquo;",ellipsisText:"...",goButtonText:"Go",classPrefix:"paginationjs",activeClassName:"active",disableClassName:"disabled",inlineStyle:!0,formatNavigator:"<%= currentPage %> / <%= totalPage %>",formatGoInput:"<%= input %>",formatGoButton:"<%= button %>",position:"bottom",autoHidePrevious:!1,autoHideNext:!1,triggerPagingOnInit:!0,hideWhenLessThanOnePage:!1,showFirstOnEllipsisShow:!0,showLastOnEllipsisShow:!0,callback:function(){}},b.fn.addHook=function(a,d){arguments.length<2&&c("Missing argument."),b.isFunction(d)||c("callback must be a function.");var e=b(this),f=e.data("pagination");f||(e.data("pagination",{}),f=e.data("pagination")),!f.hooks&&(f.hooks={}),f.hooks[a]=f.hooks[a]||[],f.hooks[a].push(d)},b[g]=function(a,d){arguments.length<2&&c("Requires two parameters.");var e;if(e="string"!=typeof a&&a instanceof jQuery?a:b(a),e.length)return e.pagination(d),e};var i={};b.each(["Object","Array","String"],function(a,b){i["is"+b]=function(a){return f(a)===b.toLowerCase()}}),"function"==typeof define&&define.amd&&define(function(){return b})}(this,window.jQuery);
\ No newline at end of file
......@@ -4,12 +4,9 @@
* SPDX-License-Identifier: MIT
*/
"use strict";
window.addEventListener('DOMContentLoaded', () => {
$(`<button class="menuButton semiBold h2"> Open Task </button>`).on('click', () => {
let win = window.open(`${window.location.origin }/dashboard/?jid=${window.cvat.job.id}`, '_blank');
$('<button class="menuButton semiBold h2"> Open Task </button>').on('click', () => {
const win = window.open(`${window.location.origin}/dashboard/?id=${window.cvat.job.task_id}`, '_blank');
win.focus();
}).prependTo('#engineMenuButtons');
});
......@@ -7,7 +7,7 @@ from django.urls import path
from . import views
urlpatterns = [
path('get_share_nodes', views.JsTreeView),
path('', views.DashboardView),
path('meta', views.DashboardMeta),
]
此差异已折叠。
此差异已折叠。
# Copyright (C) 2018 Intel Corporation
#
# SPDX-License-Identifier: MIT
from cvat.settings.base import JS_3RDPARTY
JS_3RDPARTY['engine'] = JS_3RDPARTY.get('engine', []) + ['dextr_segmentation/js/enginePlugin.js']
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册