diff --git a/CHANGELOG.md b/CHANGELOG.md index 836a26f923f9aa8e5784ca1f7557b56b8ddbc49c..d39abcbd12f4c85c0b820a62d41504f98b16d363 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Support of context images for 2D image tasks () - Filter `is_active` for user list () +- Ability to export/import tasks () + ### Changed diff --git a/cvat-core/src/server-proxy.js b/cvat-core/src/server-proxy.js index cf31969a302dd75b516a86c82b00950f8a3267b1..524ceeafd70164db7fb3dee3357dccfeb800f607 100644 --- a/cvat-core/src/server-proxy.js +++ b/cvat-core/src/server-proxy.js @@ -490,6 +490,59 @@ }); } + async function exportTask(id) { + const { backendAPI } = config; + const url = `${backendAPI}/tasks/${id}`; + + return new Promise((resolve, reject) => { + async function request() { + try { + const response = await Axios.get(`${url}?action=export`, { + proxy: config.proxy, + }); + if (response.status === 202) { + setTimeout(request, 3000); + } else { + resolve(`${url}?action=download`); + } + } catch (errorData) { + reject(generateError(errorData)); + } + } + + setTimeout(request); + }); + } + + async function importTask(file) { + const { backendAPI } = config; + + let taskData = new FormData(); + taskData.append('task_file', file); + + return new Promise((resolve, reject) => { + async function request() { + try { + const response = await Axios.post(`${backendAPI}/tasks?action=import`, taskData, { + proxy: config.proxy, + }); + if (response.status === 202) { + taskData = new FormData(); + taskData.append('rq_id', response.data.rq_id); + setTimeout(request, 3000); + } else { + const importedTask = await getTasks(`?id=${response.data.id}`); + resolve(importedTask[0]); + } + } catch (errorData) { + reject(generateError(errorData)); + } + } + + setTimeout(request); + }); + } + async function createTask(taskSpec, taskDataSpec, onUpdate) { const { backendAPI } = config; @@ -1157,6 +1210,8 @@ createTask, deleteTask, exportDataset, + exportTask, + importTask, }), writable: false, }, diff --git a/cvat-core/src/session.js b/cvat-core/src/session.js index 7c9b9665b684e7ffef38219628cb3c062ecddb71..a1136030fd9ef01ba216f82b590cf2d907a9aa09 100644 --- a/cvat-core/src/session.js +++ b/cvat-core/src/session.js @@ -1664,6 +1664,36 @@ const result = await PluginRegistry.apiWrapper.call(this, Task.prototype.delete); return result; } + + /** + * Method makes a backup of a task + * @method export + * @memberof module:API.cvat.classes.Task + * @readonly + * @instance + * @async + * @throws {module:API.cvat.exceptions.ServerError} + * @throws {module:API.cvat.exceptions.PluginError} + */ + async export() { + const result = await PluginRegistry.apiWrapper.call(this, Task.prototype.export); + return result; + } + + /** + * Method imports a task from a backup + * @method import + * @memberof module:API.cvat.classes.Task + * @readonly + * @instance + * @async + * @throws {module:API.cvat.exceptions.ServerError} + * @throws {module:API.cvat.exceptions.PluginError} + */ + static async import(file) { + const result = await PluginRegistry.apiWrapper.call(this, Task.import, file); + return result; + } } module.exports = { @@ -2073,6 +2103,16 @@ return result; }; + Task.prototype.export.implementation = async function () { + const result = await serverProxy.tasks.exportTask(this.id); + return result; + }; + + Task.import.implementation = async function (file) { + const result = await serverProxy.tasks.importTask(file); + return result; + }; + Task.prototype.frames.get.implementation = async function (frame, isPlaying, step) { if (!Number.isInteger(frame) || frame < 0) { throw new ArgumentError(`Frame must be a positive integer. Got: "${frame}"`); diff --git a/cvat-ui/package-lock.json b/cvat-ui/package-lock.json index e7741951972c6500c0d695a01cce32c4c06d765c..137df02c974fd5689e1a79a9ab478d2ac5eb1f57 100644 --- a/cvat-ui/package-lock.json +++ b/cvat-ui/package-lock.json @@ -53735,9 +53735,9 @@ } }, "rc-menu": { - "version": "8.10.6", - "resolved": "https://registry.npmjs.org/rc-menu/-/rc-menu-8.10.6.tgz", - "integrity": "sha512-RVkd8XChwSmVOdNULbqLNnABthRZWnhqct1Q74onEXTClsXvsLADMhlIJtw/umglVSECM+14TJdIli9rl2Bzlw==", + "version": "8.10.7", + "resolved": "https://registry.npmjs.org/rc-menu/-/rc-menu-8.10.7.tgz", + "integrity": "sha512-m/ypV7OjkkUsMdutzMUxEI8tWyi0Y1TQ5YkSDk7k2uv2aCKkHYEoDKsDAfcPeejo3HMo2z5unWE+jD+dCphraw==", "requires": { "@babel/runtime": "^7.10.1", "classnames": "2.x", @@ -53758,9 +53758,9 @@ } }, "rc-util": { - "version": "5.9.4", - "resolved": "https://registry.npmjs.org/rc-util/-/rc-util-5.9.4.tgz", - "integrity": "sha512-pzFmYZsKLJ1p+Uv4NqA4aNBaFh8/hOQxOOxA5G4TiyPboa0o/PjminxUCKvoSwVJVW5YgleSM2XPCTpTV6DCsQ==", + "version": "5.9.8", + "resolved": "https://registry.npmjs.org/rc-util/-/rc-util-5.9.8.tgz", + "integrity": "sha512-typLSHYGf5irvGLYQshs0Ra3aze086h0FhzsAkyirMunYZ7b3Te8gKa5PVaanoHaZa9sS6qx98BxgysoRP+6Tw==", "requires": { "@babel/runtime": "^7.12.5", "react-is": "^16.12.0", diff --git a/cvat-ui/package.json b/cvat-ui/package.json index 578538b8b8a0f8f5c565415f1faab73d16488c5e..9a36d5d6e530d63d4638dd2762b790e66f461a2b 100644 --- a/cvat-ui/package.json +++ b/cvat-ui/package.json @@ -73,6 +73,7 @@ "mousetrap": "^1.6.5", "platform": "^1.3.6", "prop-types": "^15.7.2", + "rc-menu": "^8.10.7", "react": "^16.14.0", "react-awesome-query-builder": "^3.0.0", "react-color": "^2.19.3", diff --git a/cvat-ui/src/actions/tasks-actions.ts b/cvat-ui/src/actions/tasks-actions.ts index cdb993cd1cb062ef1df2e434cceb38c3be49d5d6..c0fabf97ecdf538757bf1f550d4dd12146b3c763 100644 --- a/cvat-ui/src/actions/tasks-actions.ts +++ b/cvat-ui/src/actions/tasks-actions.ts @@ -35,6 +35,12 @@ export enum TasksActionTypes { UPDATE_TASK_SUCCESS = 'UPDATE_TASK_SUCCESS', UPDATE_TASK_FAILED = 'UPDATE_TASK_FAILED', HIDE_EMPTY_TASKS = 'HIDE_EMPTY_TASKS', + EXPORT_TASK = 'EXPORT_TASK', + EXPORT_TASK_SUCCESS = 'EXPORT_TASK_SUCCESS', + EXPORT_TASK_FAILED = 'EXPORT_TASK_FAILED', + IMPORT_TASK = 'IMPORT_TASK', + IMPORT_TASK_SUCCESS = 'IMPORT_TASK_SUCCESS', + IMPORT_TASK_FAILED = 'IMPORT_TASK_FAILED', SWITCH_MOVE_TASK_MODAL_VISIBLE = 'SWITCH_MOVE_TASK_MODAL_VISIBLE', } @@ -214,6 +220,49 @@ export function loadAnnotationsAsync( }; } +function importTask(): AnyAction { + const action = { + type: TasksActionTypes.IMPORT_TASK, + payload: {}, + }; + + return action; +} + +function importTaskSuccess(task: any): AnyAction { + const action = { + type: TasksActionTypes.IMPORT_TASK_SUCCESS, + payload: { + task, + }, + }; + + return action; +} + +function importTaskFailed(error: any): AnyAction { + const action = { + type: TasksActionTypes.IMPORT_TASK_FAILED, + payload: { + error, + }, + }; + + return action; +} + +export function importTaskAsync(file: File): ThunkAction, {}, {}, AnyAction> { + return async (dispatch: ActionCreator): Promise => { + try { + dispatch(importTask()); + const taskInstance = await cvat.classes.Task.import(file); + dispatch(importTaskSuccess(taskInstance)); + } catch (error) { + dispatch(importTaskFailed(error)); + } + }; +} + function exportDataset(task: any, exporter: any): AnyAction { const action = { type: TasksActionTypes.EXPORT_DATASET, @@ -268,6 +317,56 @@ export function exportDatasetAsync(task: any, exporter: any): ThunkAction, {}, {}, AnyAction> { + return async (dispatch: ActionCreator): Promise => { + dispatch(exportTask(taskInstance.id)); + + try { + const url = await taskInstance.export(); + const downloadAnchor = window.document.getElementById('downloadAnchor') as HTMLAnchorElement; + downloadAnchor.href = url; + downloadAnchor.click(); + dispatch(exportTaskSuccess(taskInstance.id)); + } catch (error) { + dispatch(exportTaskFailed(taskInstance.id, error)); + } + }; +} + function deleteTask(taskID: number): AnyAction { const action = { type: TasksActionTypes.DELETE_TASK, diff --git a/cvat-ui/src/components/actions-menu/actions-menu.tsx b/cvat-ui/src/components/actions-menu/actions-menu.tsx index eed1db757567e916625f619163d650773f23b7ca..aa4d2acfd50616ef8095d997b29eaaec8d72b60a 100644 --- a/cvat-ui/src/components/actions-menu/actions-menu.tsx +++ b/cvat-ui/src/components/actions-menu/actions-menu.tsx @@ -6,6 +6,7 @@ import './styles.scss'; import React from 'react'; import Menu from 'antd/lib/menu'; import Modal from 'antd/lib/modal'; +import { LoadingOutlined } from '@ant-design/icons'; // eslint-disable-next-line import/no-extraneous-dependencies import { MenuInfo } from 'rc-menu/lib/interface'; import DumpSubmenu from './dump-submenu'; @@ -25,6 +26,7 @@ interface Props { inferenceIsActive: boolean; taskDimension: DimensionType; onClickMenu: (params: MenuInfo, file?: File) => void; + exportIsActive: boolean; } export enum Actions { @@ -35,6 +37,7 @@ export enum Actions { RUN_AUTO_ANNOTATION = 'run_auto_annotation', MOVE_TASK_TO_PROJECT = 'move_task_to_project', OPEN_BUG_TRACKER = 'open_bug_tracker', + EXPORT_TASK = 'export_task', } export default function ActionsMenuComponent(props: Props): JSX.Element { @@ -50,6 +53,7 @@ export default function ActionsMenuComponent(props: Props): JSX.Element { exportActivities, loadActivity, taskDimension, + exportIsActive, } = props; let latestParams: MenuInfo | null = null; @@ -128,6 +132,10 @@ export default function ActionsMenuComponent(props: Props): JSX.Element { Automatic annotation + + {exportIsActive && } + Export Task +
Move to project Delete diff --git a/cvat-ui/src/components/actions-menu/styles.scss b/cvat-ui/src/components/actions-menu/styles.scss index ccb003064759197c0979bf1f897766ec2601ae84..1b69092249afb5444c3ec7462dbcd2b843d79765 100644 --- a/cvat-ui/src/components/actions-menu/styles.scss +++ b/cvat-ui/src/components/actions-menu/styles.scss @@ -48,3 +48,7 @@ .cvat-menu-icon { transform: scale(0.5); } + +#cvat-export-task-loading { + margin-left: 10; +} diff --git a/cvat-ui/src/components/shortcuts-dialog/shortcuts-dialog.tsx b/cvat-ui/src/components/shortcuts-dialog/shortcuts-dialog.tsx index fc03ddacb0ee1cf8a55efbaf1b8c4943f3da69c3..6d12ca5ba284fa8ea90037c866e82c0c236f4031 100644 --- a/cvat-ui/src/components/shortcuts-dialog/shortcuts-dialog.tsx +++ b/cvat-ui/src/components/shortcuts-dialog/shortcuts-dialog.tsx @@ -95,7 +95,12 @@ function ShortcutsDialog(props: StateToProps & DispatchToProps): JSX.Element | n zIndex={1001} /* default antd is 1000 */ className='cvat-shortcuts-modal-window' > - +
); } diff --git a/cvat-ui/src/components/tasks-page/styles.scss b/cvat-ui/src/components/tasks-page/styles.scss index 73ad1e575b4b4ef547252f016f3f6f1160aaaa07..2c6dce8ce447d4297d9d1f9c8296a5f70f26b8b1 100644 --- a/cvat-ui/src/components/tasks-page/styles.scss +++ b/cvat-ui/src/components/tasks-page/styles.scss @@ -11,6 +11,23 @@ height: 100%; width: 100%; + .cvat-tasks-page-top-bar { + > div:nth-child(1) { + > div:nth-child(1) { + width: 100%; + + > div:nth-child(1) { + display: flex; + + > span:nth-child(2) { + width: 200px; + margin-left: 10px; + } + } + } + } + } + > div:nth-child(2) { height: 83%; padding-top: 10px; @@ -19,22 +36,6 @@ > div:nth-child(3) { padding-top: 10px; } - - > div:nth-child(1) { - > div:nth-child(1) { - display: flex; - - > span:nth-child(2) { - width: 200px; - margin-left: 10px; - } - } - - > div:nth-child(2) { - display: flex; - justify-content: flex-end; - } - } } /* empty-tasks icon */ @@ -157,3 +158,11 @@ #cvat-create-task-button { padding: 0 30px; } + +#cvat-import-task-button { + padding: 0 30px; +} + +#cvat-import-task-button-loading { + margin-left: 10; +} diff --git a/cvat-ui/src/components/tasks-page/tasks-page.tsx b/cvat-ui/src/components/tasks-page/tasks-page.tsx index daafb44d976e483536d37b45ea2a19cd3f868629..6e0df457716e39185a14d3f0ef26799088da7cda 100644 --- a/cvat-ui/src/components/tasks-page/tasks-page.tsx +++ b/cvat-ui/src/components/tasks-page/tasks-page.tsx @@ -1,4 +1,4 @@ -// Copyright (C) 2020 Intel Corporation +// Copyright (C) 2020-2021 Intel Corporation // // SPDX-License-Identifier: MIT @@ -25,6 +25,8 @@ interface TasksPageProps { numberOfHiddenTasks: number; onGetTasks: (gettingQuery: TasksQuery) => void; hideEmptyTasks: (hideEmpty: boolean) => void; + onImportTask: (file: File) => void; + taskImporting: boolean; } function getSearchField(gettingQuery: TasksQuery): string { @@ -81,9 +83,20 @@ class TasksPageComponent extends React.PureComponent; @@ -194,7 +209,12 @@ class TasksPageComponent extends React.PureComponent - + {numberOfVisibleTasks ? ( ) : ( diff --git a/cvat-ui/src/components/tasks-page/top-bar.tsx b/cvat-ui/src/components/tasks-page/top-bar.tsx index 1e4ac48674d65783525d65337676bb70ed102f18..af8aa1ba319a46146812889650e3387b978b49b4 100644 --- a/cvat-ui/src/components/tasks-page/top-bar.tsx +++ b/cvat-ui/src/components/tasks-page/top-bar.tsx @@ -5,50 +5,83 @@ import React from 'react'; import { useHistory } from 'react-router'; import { Row, Col } from 'antd/lib/grid'; -import { PlusOutlined } from '@ant-design/icons'; +import { PlusOutlined, UploadOutlined, LoadingOutlined } from '@ant-design/icons'; import Button from 'antd/lib/button'; import Input from 'antd/lib/input'; import Text from 'antd/lib/typography/Text'; +import Upload from 'antd/lib/upload'; import SearchTooltip from 'components/search-tooltip/search-tooltip'; interface VisibleTopBarProps { onSearch: (value: string) => void; + onFileUpload(file: File): void; searchValue: string; + taskImporting: boolean; } export default function TopBarComponent(props: VisibleTopBarProps): JSX.Element { - const { searchValue, onSearch } = props; + const { + searchValue, onSearch, onFileUpload, taskImporting, + } = props; const history = useHistory(); return ( - <> - - - Tasks - - - - - - - - - + + + + + Tasks + + + + + + + + { + onFileUpload(file); + return false; + }} + > + + + + + + + + + + + ); } diff --git a/cvat-ui/src/containers/actions-menu/actions-menu.tsx b/cvat-ui/src/containers/actions-menu/actions-menu.tsx index f83072bc72cd8e5763bdee2a9655bf19ab2d1a3f..5923928c0a5900b458c162d63e0499bea8244945 100644 --- a/cvat-ui/src/containers/actions-menu/actions-menu.tsx +++ b/cvat-ui/src/containers/actions-menu/actions-menu.tsx @@ -16,6 +16,7 @@ import { loadAnnotationsAsync, exportDatasetAsync, deleteTaskAsync, + exportTaskAsync, switchMoveTaskModalVisible, } from 'actions/tasks-actions'; @@ -29,6 +30,7 @@ interface StateToProps { dumpActivities: string[] | null; exportActivities: string[] | null; inferenceIsActive: boolean; + exportIsActive: boolean; } interface DispatchToProps { @@ -37,6 +39,7 @@ interface DispatchToProps { exportDataset: (taskInstance: any, exporter: any) => void; deleteTask: (taskInstance: any) => void; openRunModelWindow: (taskInstance: any) => void; + exportTask: (taskInstance: any) => void; openMoveTaskToProjectWindow: (taskInstance: any) => void; } @@ -48,7 +51,9 @@ function mapStateToProps(state: CombinedState, own: OwnProps): StateToProps { const { formats: { annotationFormats }, tasks: { - activities: { dumps, loads, exports: activeExports }, + activities: { + dumps, loads, exports: activeExports, backups, + }, }, } = state; @@ -58,6 +63,7 @@ function mapStateToProps(state: CombinedState, own: OwnProps): StateToProps { loadActivity: tid in loads ? loads[tid] : null, annotationFormats, inferenceIsActive: tid in state.models.inferences, + exportIsActive: tid in backups, }; } @@ -78,6 +84,9 @@ function mapDispatchToProps(dispatch: any): DispatchToProps { openRunModelWindow: (taskInstance: any): void => { dispatch(modelsActions.showRunModelDialog(taskInstance)); }, + exportTask: (taskInstance: any): void => { + dispatch(exportTaskAsync(taskInstance)); + }, openMoveTaskToProjectWindow: (taskId: number): void => { dispatch(switchMoveTaskModalVisible(true, taskId)); }, @@ -92,12 +101,14 @@ function ActionsMenuContainer(props: OwnProps & StateToProps & DispatchToProps): dumpActivities, exportActivities, inferenceIsActive, + exportIsActive, loadAnnotations, dumpAnnotations, exportDataset, deleteTask, openRunModelWindow, + exportTask, openMoveTaskToProjectWindow, } = props; @@ -131,6 +142,8 @@ function ActionsMenuContainer(props: OwnProps & StateToProps & DispatchToProps): window.open(`${taskInstance.bugTracker}`, '_blank'); } else if (action === Actions.RUN_AUTO_ANNOTATION) { openRunModelWindow(taskInstance); + } else if (action === Actions.EXPORT_TASK) { + exportTask(taskInstance); } else if (action === Actions.MOVE_TASK_TO_PROJECT) { openMoveTaskToProjectWindow(taskInstance.id); } @@ -150,6 +163,7 @@ function ActionsMenuContainer(props: OwnProps & StateToProps & DispatchToProps): inferenceIsActive={inferenceIsActive} onClickMenu={onClickMenu} taskDimension={taskInstance.dimension} + exportIsActive={exportIsActive} /> ); } diff --git a/cvat-ui/src/containers/tasks-page/tasks-page.tsx b/cvat-ui/src/containers/tasks-page/tasks-page.tsx index b18aaf00fae6211c0d16fe090dff9e18b6604746..54a7f6c1044c52e90c362391a2461194f96c580f 100644 --- a/cvat-ui/src/containers/tasks-page/tasks-page.tsx +++ b/cvat-ui/src/containers/tasks-page/tasks-page.tsx @@ -8,7 +8,7 @@ import { Task, TasksQuery, CombinedState } from 'reducers/interfaces'; import TasksPageComponent from 'components/tasks-page/tasks-page'; -import { getTasksAsync, hideEmptyTasks } from 'actions/tasks-actions'; +import { getTasksAsync, hideEmptyTasks, importTaskAsync } from 'actions/tasks-actions'; interface StateToProps { tasksFetching: boolean; @@ -16,11 +16,13 @@ interface StateToProps { numberOfTasks: number; numberOfVisibleTasks: number; numberOfHiddenTasks: number; + taskImporting: boolean; } interface DispatchToProps { onGetTasks: (gettingQuery: TasksQuery) => void; hideEmptyTasks: (hideEmpty: boolean) => void; + onImportTask: (file: File) => void; } function mapStateToProps(state: CombinedState): StateToProps { @@ -34,6 +36,7 @@ function mapStateToProps(state: CombinedState): StateToProps { numberOfHiddenTasks: tasks.hideEmpty ? tasks.current.filter((task: Task): boolean => !task.instance.jobs.length).length : 0, + taskImporting: state.tasks.importing, }; } @@ -45,6 +48,9 @@ function mapDispatchToProps(dispatch: any): DispatchToProps { hideEmptyTasks: (hideEmpty: boolean): void => { dispatch(hideEmptyTasks(hideEmpty)); }, + onImportTask: (file: File): void => { + dispatch(importTaskAsync(file)); + }, }; } diff --git a/cvat-ui/src/reducers/interfaces.ts b/cvat-ui/src/reducers/interfaces.ts index 9faf068b46d6ce179ccad61e77493a6425fdf47d..d00b5543fa2eb6d03fb79d0319eb70b08e60cf0b 100644 --- a/cvat-ui/src/reducers/interfaces.ts +++ b/cvat-ui/src/reducers/interfaces.ts @@ -73,6 +73,7 @@ export interface Task { } export interface TasksState { + importing: boolean; initialized: boolean; fetching: boolean; updating: boolean; @@ -105,6 +106,9 @@ export interface TasksState { status: string; error: string; }; + backups: { + [tid: number]: boolean; + }; }; } @@ -249,9 +253,11 @@ export interface NotificationsState { updating: null | ErrorState; dumping: null | ErrorState; loading: null | ErrorState; - exporting: null | ErrorState; + exportingAsDataset: null | ErrorState; deleting: null | ErrorState; creating: null | ErrorState; + exporting: null | ErrorState; + importing: null | ErrorState; moving: null | ErrorState; }; formats: { @@ -318,6 +324,7 @@ export interface NotificationsState { messages: { tasks: { loadingDone: string; + importingDone: string; movingDone: string; }; models: { diff --git a/cvat-ui/src/reducers/notifications-reducer.ts b/cvat-ui/src/reducers/notifications-reducer.ts index e6dce4bffcad4a6fc64532423d82e9d4970807a5..e09db0b2cd6ae6cbbdf9a134b10708e98a1facd6 100644 --- a/cvat-ui/src/reducers/notifications-reducer.ts +++ b/cvat-ui/src/reducers/notifications-reducer.ts @@ -42,9 +42,11 @@ const defaultState: NotificationsState = { updating: null, dumping: null, loading: null, - exporting: null, + exportingAsDataset: null, deleting: null, creating: null, + exporting: null, + importing: null, moving: null, }, formats: { @@ -111,6 +113,7 @@ const defaultState: NotificationsState = { messages: { tasks: { loadingDone: '', + importingDone: '', movingDone: '', }, models: { @@ -313,7 +316,7 @@ export default function (state = defaultState, action: AnyAction): Notifications ...state.errors, tasks: { ...state.errors.tasks, - exporting: { + exportingAsDataset: { message: 'Could not export dataset for the ' + `task ${taskID}`, @@ -389,24 +392,6 @@ export default function (state = defaultState, action: AnyAction): Notifications }, }; } - case TasksActionTypes.MOVE_TASK_TO_PROJECT_FAILED: { - const taskID = action.payload.task.id; - return { - ...state, - errors: { - ...state.errors, - tasks: { - ...state.errors.tasks, - moving: { - message: - 'Could not move the' + - `task ${taskID} to a project`, - reason: action.payload.error.toString(), - }, - }, - }, - }; - } case TasksActionTypes.DUMP_ANNOTATIONS_FAILED: { const taskID = action.payload.task.id; return { @@ -460,16 +445,45 @@ export default function (state = defaultState, action: AnyAction): Notifications }, }; } - case TasksActionTypes.MOVE_TASK_TO_PROJECT_SUCCESS: { - const { id: taskId, projectId } = action.payload.task; - + case TasksActionTypes.EXPORT_TASK_FAILED: { + return { + ...state, + errors: { + ...state.errors, + tasks: { + ...state.errors.tasks, + exporting: { + message: 'Could not export the task', + reason: action.payload.error.toString(), + }, + }, + }, + }; + } + case TasksActionTypes.IMPORT_TASK_FAILED: { + return { + ...state, + errors: { + ...state.errors, + tasks: { + ...state.errors.tasks, + importing: { + message: 'Could not import the task', + reason: action.payload.error.toString(), + }, + }, + }, + }; + } + case TasksActionTypes.IMPORT_TASK_SUCCESS: { + const taskID = action.payload.task.id; return { ...state, messages: { ...state.messages, tasks: { ...state.messages.tasks, - movingDone: `The task #${taskId} has been successfully moved to the project #${projectId}`, + importingDone: `Task has been imported succesfully Open task`, }, }, }; diff --git a/cvat-ui/src/reducers/tasks-reducer.ts b/cvat-ui/src/reducers/tasks-reducer.ts index bebb9a628d390b45faadeb3ba97acbce2c467d52..78236132a9e31074c37e26653643e3f5d1ec8f40 100644 --- a/cvat-ui/src/reducers/tasks-reducer.ts +++ b/cvat-ui/src/reducers/tasks-reducer.ts @@ -3,6 +3,7 @@ // SPDX-License-Identifier: MIT import { AnyAction } from 'redux'; +import { omit } from 'lodash'; import { BoundariesActionTypes } from 'actions/boundaries-actions'; import { TasksActionTypes } from 'actions/tasks-actions'; import { AuthActionTypes } from 'actions/auth-actions'; @@ -40,7 +41,9 @@ const defaultState: TasksState = { status: '', error: '', }, + backups: {}, }, + importing: false, }; export default (state: TasksState = defaultState, action: AnyAction): TasksState => { @@ -242,6 +245,49 @@ export default (state: TasksState = defaultState, action: AnyAction): TasksState }, }; } + case TasksActionTypes.EXPORT_TASK: { + const { taskID } = action.payload; + const { backups } = state.activities; + + return { + ...state, + activities: { + ...state.activities, + backups: { + ...backups, + ...Object.fromEntries([[taskID, true]]), + }, + }, + }; + } + case TasksActionTypes.EXPORT_TASK_FAILED: + case TasksActionTypes.EXPORT_TASK_SUCCESS: { + const { taskID } = action.payload; + const { backups } = state.activities; + + delete backups[taskID]; + + return { + ...state, + activities: { + ...state.activities, + backups: omit(backups, [taskID]), + }, + }; + } + case TasksActionTypes.IMPORT_TASK: { + return { + ...state, + importing: true, + }; + } + case TasksActionTypes.IMPORT_TASK_FAILED: + case TasksActionTypes.IMPORT_TASK_SUCCESS: { + return { + ...state, + importing: false, + }; + } case TasksActionTypes.CREATE_TASK: { return { ...state, diff --git a/cvat/apps/dataset_manager/views.py b/cvat/apps/dataset_manager/views.py index b622eaa65af22355a55df9937143eae9cc51f14a..36fcea63fd3a7cfdc4c74371fa84213c93ceee8d 100644 --- a/cvat/apps/dataset_manager/views.py +++ b/cvat/apps/dataset_manager/views.py @@ -8,18 +8,18 @@ import tempfile from datetime import timedelta import django_rq +from datumaro.cli.util import make_file_name +from datumaro.util import to_snake_case from django.utils import timezone import cvat.apps.dataset_manager.task as task +from cvat.apps.engine.backup import TaskExporter from cvat.apps.engine.log import slogger from cvat.apps.engine.models import Task -from datumaro.cli.util import make_file_name -from datumaro.util import to_snake_case from .formats.registry import EXPORT_FORMATS, IMPORT_FORMATS from .util import current_function_name - _MODULE_NAME = __package__ + '.' + osp.splitext(osp.basename(__file__))[0] def log_exception(logger=None, exc_info=True): if logger is None: @@ -97,6 +97,40 @@ def clear_export_cache(task_id, file_path, file_ctime): log_exception(slogger.task[task_id]) raise +def backup_task(task_id, output_path): + try: + db_task = Task.objects.get(pk=task_id) + + cache_dir = get_export_cache_dir(db_task) + output_path = osp.join(cache_dir, output_path) + + task_time = timezone.localtime(db_task.updated_date).timestamp() + if not (osp.exists(output_path) and \ + task_time <= osp.getmtime(output_path)): + os.makedirs(cache_dir, exist_ok=True) + with tempfile.TemporaryDirectory(dir=cache_dir) as temp_dir: + temp_file = osp.join(temp_dir, 'dump') + task_exporter = TaskExporter(task_id) + task_exporter.export_to(temp_file) + os.replace(temp_file, output_path) + + archive_ctime = osp.getctime(output_path) + scheduler = django_rq.get_scheduler() + cleaning_job = scheduler.enqueue_in(time_delta=CACHE_TTL, + func=clear_export_cache, + task_id=task_id, + file_path=output_path, file_ctime=archive_ctime) + slogger.task[task_id].info( + "The task '{}' is backuped at '{}' " + "and available for downloading for the next {}. " + "Export cache cleaning job is enqueued, id '{}'".format( + db_task.name, output_path, CACHE_TTL, + cleaning_job.id)) + + return output_path + except Exception: + log_exception(slogger.task[task_id]) + raise def get_export_formats(): return list(EXPORT_FORMATS.values()) @@ -108,4 +142,4 @@ def get_all_formats(): return { 'importers': get_import_formats(), 'exporters': get_export_formats(), - } \ No newline at end of file + } diff --git a/cvat/apps/documentation/backup_hld.md b/cvat/apps/documentation/backup_hld.md new file mode 100644 index 0000000000000000000000000000000000000000..ee6309cd10afc9dd77854d6d2cbe058b3d395c73 --- /dev/null +++ b/cvat/apps/documentation/backup_hld.md @@ -0,0 +1,54 @@ +## Task and Project Import/Export functionality + +This document describes the high-level design for implementing import / export implementation for tasks and projects. + +API endpoints: + +- Import task + + - endpoint: `/api/v1/tasks?action=import​` + - method: `POST` + - Content-Type: `multipart/form-data​` + - returns: json + +- Export task + + - endpoint: `/api/v1/tasks/{id}?action=export​` + - method: `GET` + - returns: zip archive + + The zip archive has the following structure: + + ``` + . + ├── data + │   ├── {user uploaded data} + │   ├── manifest.jsonl + ├── task.json + └── annotations.json + ``` + +- Import project + + - endpoint: `/api/v1/projects?action=import​` + - method: `POST` + - Content-Type: `multipart/form-data​` + - returns: json + +- Export project + + - endpoint: `/api/v1/projects/?action=export​` + - method: `GET` + - returns: zip archive + + The zip archive has the following structure: + + ``` + . + ├── tasks + │   ├── task_1 + │   ├── task_2 + │   ├── ... + │ └── task_N + └── project.json + ``` diff --git a/cvat/apps/engine/backup.py b/cvat/apps/engine/backup.py new file mode 100644 index 0000000000000000000000000000000000000000..da42cab6b305348e00dd65b90615a4119c7c4796 --- /dev/null +++ b/cvat/apps/engine/backup.py @@ -0,0 +1,550 @@ +# Copyright (C) 2021 Intel Corporation +# +# SPDX-License-Identifier: MIT + +import io +import os +from enum import Enum +import shutil +from zipfile import ZipFile + +from django.conf import settings +from django.db import transaction +from rest_framework.parsers import JSONParser +from rest_framework.renderers import JSONRenderer + +import cvat.apps.dataset_manager as dm +from cvat.apps.engine import models +from cvat.apps.engine.log import slogger +from cvat.apps.engine.serializers import (AttributeSerializer, DataSerializer, + LabeledDataSerializer, SegmentSerializer, SimpleJobSerializer, TaskSerializer, + ReviewSerializer, IssueSerializer, CommentSerializer) +from cvat.apps.engine.utils import av_scan_paths +from cvat.apps.engine.models import StorageChoice, StorageMethodChoice, DataChoice +from cvat.apps.engine.task import _create_thread + + +class Version(Enum): + V1 = '1.0' + +class _TaskBackupBase(): + MANIFEST_FILENAME = 'task.json' + ANNOTATIONS_FILENAME = 'annotations.json' + DATA_DIRNAME = 'data' + TASK_DIRNAME = 'task' + + def _prepare_meta(self, allowed_keys, meta): + keys_to_drop = set(meta.keys()) - allowed_keys + if keys_to_drop: + logger = slogger.task[self._db_task.id] if hasattr(self, '_db_task') else slogger.glob + + logger.warning('the following keys are dropped {}'.format(keys_to_drop)) + for key in keys_to_drop: + del meta[key] + + return meta + + def _prepare_task_meta(self, task): + allowed_fields = { + 'name', + 'bug_tracker', + 'status', + 'subset', + 'labels', + } + + return self._prepare_meta(allowed_fields, task) + + def _prepare_data_meta(self, data): + allowed_fields = { + 'chunk_size', + 'image_quality', + 'start_frame', + 'stop_frame', + 'frame_filter', + 'chunk_type', + 'storage_method', + 'storage', + } + + self._prepare_meta(allowed_fields, data) + if 'frame_filter' in data and not data['frame_filter']: + data.pop('frame_filter') + + return data + + def _prepare_job_meta(self, job): + allowed_fields = { + 'status', + } + return self._prepare_meta(allowed_fields, job) + + def _prepare_attribute_meta(self, attribute): + allowed_fields = { + 'name', + 'mutable', + 'input_type', + 'default_value', + 'values', + } + return self._prepare_meta(allowed_fields, attribute) + + def _prepare_label_meta(self, labels): + allowed_fields = { + 'name', + 'color', + 'attributes', + } + return self._prepare_meta(allowed_fields, labels) + + def _prepare_annotations(self, annotations, label_mapping): + allowed_fields = { + 'label', + 'label_id', + 'type', + 'occluded', + 'outside', + 'z_order', + 'points', + 'frame', + 'group', + 'source', + 'attributes', + 'shapes', + } + + def _update_attribute(attribute, label): + if 'name' in attribute: + source, dest = attribute.pop('name'), 'spec_id' + else: + source, dest = attribute.pop('spec_id'), 'name' + attribute[dest] = label_mapping[label]['attributes'][source] + + def _update_label(shape): + if 'label_id' in shape: + source, dest = shape.pop('label_id'), 'label' + elif 'label' in shape: + source, dest = shape.pop('label'), 'label_id' + shape[dest] = label_mapping[source]['value'] + + return source + + for tag in annotations['tags']: + label = _update_label(tag) + for attr in tag['attributes']: + _update_attribute(attr, label) + self._prepare_meta(allowed_fields, tag) + + for shape in annotations['shapes']: + label = _update_label(shape) + for attr in shape['attributes']: + _update_attribute(attr, label) + self._prepare_meta(allowed_fields, shape) + + for track in annotations['tracks']: + label = _update_label(track) + for shape in track['shapes']: + for attr in shape['attributes']: + _update_attribute(attr, label) + self._prepare_meta(allowed_fields, shape) + + for attr in track['attributes']: + _update_attribute(attr, label) + self._prepare_meta(allowed_fields, track) + + return annotations + + def _prepare_review_meta(self, review): + allowed_fields = { + 'estimated_quality', + 'status', + 'issues', + } + return self._prepare_meta(allowed_fields, review) + + def _prepare_issue_meta(self, issue): + allowed_fields = { + 'frame', + 'position', + 'created_date', + 'resolved_date', + 'comments', + } + return self._prepare_meta(allowed_fields, issue) + + def _prepare_comment_meta(self, comment): + allowed_fields = { + 'message', + 'created_date', + 'updated_date', + } + return self._prepare_meta(allowed_fields, comment) + + def _get_db_jobs(self): + if self._db_task: + db_segments = list(self._db_task.segment_set.all().prefetch_related('job_set')) + db_segments.sort(key=lambda i: i.job_set.first().id) + db_jobs = (s.job_set.first() for s in db_segments) + return db_jobs + return () + +class TaskExporter(_TaskBackupBase): + def __init__(self, pk, version=Version.V1): + self._db_task = models.Task.objects.prefetch_related('data__images').select_related('data__video').get(pk=pk) + self._db_data = self._db_task.data + self._version = version + + db_labels = (self._db_task.project if self._db_task.project_id else self._db_task).label_set.all().prefetch_related( + 'attributespec_set') + + self._label_mapping = {} + self._label_mapping = {db_label.id: db_label.name for db_label in db_labels} + self._attribute_mapping = {} + for db_label in db_labels: + self._label_mapping[db_label.id] = { + 'value': db_label.name, + 'attributes': {}, + } + for db_attribute in db_label.attributespec_set.all(): + self._label_mapping[db_label.id]['attributes'][db_attribute.id] = db_attribute.name + + def _write_files(self, source_dir, zip_object, files, target_dir): + for filename in files: + arcname = os.path.normpath( + os.path.join( + target_dir, + os.path.relpath(filename, source_dir), + ) + ) + zip_object.write(filename=filename, arcname=arcname) + + def _write_directory(self, source_dir, zip_object, target_dir, recursive=True, exclude_files=None): + for root, dirs, files in os.walk(source_dir, topdown=True): + if not recursive: + dirs.clear() + + if files: + self._write_files( + source_dir=source_dir, + zip_object=zip_object, + files=(os.path.join(root, f) for f in files if not exclude_files or f not in exclude_files), + target_dir=target_dir, + ) + + def _write_data(self, zip_object): + if self._db_data.storage == StorageChoice.LOCAL: + self._write_directory( + source_dir=self._db_data.get_upload_dirname(), + zip_object=zip_object, + target_dir=self.DATA_DIRNAME, + ) + elif self._db_data.storage == StorageChoice.SHARE: + data_dir = settings.SHARE_ROOT + if hasattr(self._db_data, 'video'): + media_files = (os.path.join(data_dir, self._db_data.video.path), ) + else: + media_files = (os.path.join(data_dir, im.path) for im in self._db_data.images.all().order_by('frame')) + + self._write_files( + source_dir=data_dir, + zip_object=zip_object, + files=media_files, + target_dir=self.DATA_DIRNAME + ) + + upload_dir = self._db_data.get_upload_dirname() + self._write_files( + source_dir=upload_dir, + zip_object=zip_object, + files=(os.path.join(upload_dir, f) for f in ('manifest.jsonl',)), + target_dir=self.DATA_DIRNAME + ) + else: + raise NotImplementedError() + + def _write_task(self, zip_object): + task_dir = self._db_task.get_task_dirname() + self._write_directory( + source_dir=task_dir, + zip_object=zip_object, + target_dir=self.TASK_DIRNAME, + recursive=False, + ) + + def _write_manifest(self, zip_object): + def serialize_task(): + task_serializer = TaskSerializer(self._db_task) + task_serializer.fields.pop('url') + task_serializer.fields.pop('owner') + task_serializer.fields.pop('assignee') + task_serializer.fields.pop('segments') + + task = self._prepare_task_meta(task_serializer.data) + task['labels'] = [self._prepare_label_meta(l) for l in task['labels']] + for label in task['labels']: + label['attributes'] = [self._prepare_attribute_meta(a) for a in label['attributes']] + + return task + + def serialize_comment(db_comment): + comment_serializer = CommentSerializer(db_comment) + comment_serializer.fields.pop('author') + + return self._prepare_comment_meta(comment_serializer.data) + + def serialize_issue(db_issue): + issue_serializer = IssueSerializer(db_issue) + issue_serializer.fields.pop('owner') + issue_serializer.fields.pop('resolver') + + issue = issue_serializer.data + issue['comments'] = (serialize_comment(c) for c in db_issue.comment_set.order_by('id')) + + return self._prepare_issue_meta(issue) + + def serialize_review(db_review): + review_serializer = ReviewSerializer(db_review) + review_serializer.fields.pop('reviewer') + review_serializer.fields.pop('assignee') + + review = review_serializer.data + review['issues'] = (serialize_issue(i) for i in db_review.issue_set.order_by('id')) + + return self._prepare_review_meta(review) + + def serialize_segment(db_segment): + db_job = db_segment.job_set.first() + job_serializer = SimpleJobSerializer(db_job) + job_serializer.fields.pop('url') + job_serializer.fields.pop('assignee') + job_serializer.fields.pop('reviewer') + job_data = self._prepare_job_meta(job_serializer.data) + + segment_serailizer = SegmentSerializer(db_segment) + segment_serailizer.fields.pop('jobs') + segment = segment_serailizer.data + segment.update(job_data) + + db_reviews = db_job.review_set.order_by('id') + segment['reviews'] = (serialize_review(r) for r in db_reviews) + + return segment + + def serialize_jobs(): + db_segments = list(self._db_task.segment_set.all()) + db_segments.sort(key=lambda i: i.job_set.first().id) + return (serialize_segment(s) for s in db_segments) + + def serialize_data(): + data_serializer = DataSerializer(self._db_data) + data = data_serializer.data + data['chunk_type'] = data.pop('compressed_chunk_type') + return self._prepare_data_meta(data) + + task = serialize_task() + task['version'] = self._version.value + task['data'] = serialize_data() + task['jobs'] = serialize_jobs() + + zip_object.writestr(self.MANIFEST_FILENAME, data=JSONRenderer().render(task)) + + def _write_annotations(self, zip_object): + def serialize_annotations(): + job_annotations = [] + db_jobs = self._get_db_jobs() + db_job_ids = (j.id for j in db_jobs) + for db_job_id in db_job_ids: + annotations = dm.task.get_job_data(db_job_id) + annotations_serializer = LabeledDataSerializer(data=annotations) + annotations_serializer.is_valid(raise_exception=True) + job_annotations.append(self._prepare_annotations(annotations_serializer.data, self._label_mapping)) + + return job_annotations + + annotations = serialize_annotations() + zip_object.writestr(self.ANNOTATIONS_FILENAME, data=JSONRenderer().render(annotations)) + + def export_to(self, filename): + if self._db_task.data.storage_method == StorageMethodChoice.FILE_SYSTEM and \ + self._db_task.data.storage == StorageChoice.SHARE: + raise Exception('The task cannot be exported because it does not contain any raw data') + with ZipFile(filename, 'w') as output_file: + self._write_data(output_file) + self._write_task(output_file) + self._write_manifest(output_file) + self._write_annotations(output_file) + +class TaskImporter(_TaskBackupBase): + def __init__(self, filename, user_id): + self._filename = filename + self._user_id = user_id + self._manifest, self._annotations = self._read_meta() + self._version = self._read_version() + self._labels_mapping = {} + self._db_task = None + + def _read_meta(self): + with ZipFile(self._filename, 'r') as input_file: + manifest = JSONParser().parse(io.BytesIO(input_file.read(self.MANIFEST_FILENAME))) + annotations = JSONParser().parse(io.BytesIO(input_file.read(self.ANNOTATIONS_FILENAME))) + + return manifest, annotations + + def _read_version(self): + version = self._manifest.pop('version') + try: + return Version(version) + except ValueError: + raise ValueError('{} version is not supported'.format(version)) + + @staticmethod + def _prepare_dirs(filepath): + target_dir = os.path.dirname(filepath) + if not os.path.exists(target_dir): + os.makedirs(target_dir) + + def _create_labels(self, db_task, labels): + label_mapping = {} + + for label in labels: + label_name = label['name'] + attributes = label.pop('attributes', []) + db_label = models.Label.objects.create(task=db_task, **label) + label_mapping[label_name] = { + 'value': db_label.id, + 'attributes': {}, + } + + for attribute in attributes: + attribute_name = attribute['name'] + attribute_serializer = AttributeSerializer(data=attribute) + attribute_serializer.is_valid(raise_exception=True) + db_attribute = attribute_serializer.save(label=db_label) + label_mapping[label_name]['attributes'][attribute_name] = db_attribute.id + + return label_mapping + + def _create_annotations(self, db_job, annotations): + self._prepare_annotations(annotations, self._labels_mapping) + + serializer = LabeledDataSerializer(data=annotations) + serializer.is_valid(raise_exception=True) + dm.task.put_job_data(db_job.id, serializer.data) + + @staticmethod + def _calculate_segment_size(jobs): + segment_size = jobs[0]['stop_frame'] - jobs[0]['start_frame'] + 1 + overlap = 0 if len(jobs) == 1 else jobs[0]['stop_frame'] - jobs[1]['start_frame'] + 1 + + return segment_size, overlap + + def _import_task(self): + + def _create_comment(comment, db_issue): + comment['issue'] = db_issue.id + comment_serializer = CommentSerializer(data=comment) + comment_serializer.is_valid(raise_exception=True) + db_comment = comment_serializer.save() + return db_comment + + def _create_issue(issue, db_review, db_job): + issue['review'] = db_review.id + issue['job'] = db_job.id + comments = issue.pop('comments') + + issue_serializer = IssueSerializer(data=issue) + issue_serializer.is_valid( raise_exception=True) + db_issue = issue_serializer.save() + + for comment in comments: + _create_comment(comment, db_issue) + + return db_issue + + def _create_review(review, db_job): + review['job'] = db_job.id + issues = review.pop('issues') + + review_serializer = ReviewSerializer(data=review) + review_serializer.is_valid(raise_exception=True) + db_review = review_serializer.save() + + for issue in issues: + _create_issue(issue, db_review, db_job) + + return db_review + + data = self._manifest.pop('data') + labels = self._manifest.pop('labels') + jobs = self._manifest.pop('jobs') + + self._prepare_task_meta(self._manifest) + self._manifest['segment_size'], self._manifest['overlap'] = self._calculate_segment_size(jobs) + self._manifest["owner_id"] = self._user_id + + self._db_task = models.Task.objects.create(**self._manifest) + task_path = self._db_task.get_task_dirname() + if os.path.isdir(task_path): + shutil.rmtree(task_path) + + os.makedirs(self._db_task.get_task_logs_dirname()) + os.makedirs(self._db_task.get_task_artifacts_dirname()) + + self._labels_mapping = self._create_labels(self._db_task, labels) + + self._prepare_data_meta(data) + data_serializer = DataSerializer(data=data) + data_serializer.is_valid(raise_exception=True) + db_data = data_serializer.save() + self._db_task.data = db_data + self._db_task.save() + + data_path = self._db_task.data.get_upload_dirname() + uploaded_files = [] + with ZipFile(self._filename, 'r') as input_file: + for f in input_file.namelist(): + if f.startswith(self.DATA_DIRNAME + os.path.sep): + target_file = os.path.join(data_path, os.path.relpath(f, self.DATA_DIRNAME)) + self._prepare_dirs(target_file) + with open(target_file, "wb") as out: + out.write(input_file.read(f)) + uploaded_files.append(os.path.relpath(f, self.DATA_DIRNAME)) + elif f.startswith(self.TASK_DIRNAME + os.path.sep): + target_file = os.path.join(task_path, os.path.relpath(f, self.TASK_DIRNAME)) + self._prepare_dirs(target_file) + with open(target_file, "wb") as out: + out.write(input_file.read(f)) + + data['use_zip_chunks'] = data.pop('chunk_type') == DataChoice.IMAGESET + data = data_serializer.data + data['client_files'] = uploaded_files + _create_thread(self._db_task.pk, data.copy(), True) + db_data.start_frame = data['start_frame'] + db_data.stop_frame = data['stop_frame'] + db_data.frame_filter = data['frame_filter'] + db_data.storage = StorageChoice.LOCAL + db_data.save(update_fields=['start_frame', 'stop_frame', 'frame_filter', 'storage']) + + for db_job, job in zip(self._get_db_jobs(), jobs): + db_job.status = job['status'] + db_job.save() + + for review in job['reviews']: + _create_review(review, db_job) + + def _import_annotations(self): + db_jobs = self._get_db_jobs() + for db_job, annotations in zip(db_jobs, self._annotations): + self._create_annotations(db_job, annotations) + + def import_task(self): + self._import_task() + self._import_annotations() + return self._db_task + +@transaction.atomic +def import_task(filename, user): + av_scan_paths(filename) + task_importer = TaskImporter(filename, user) + db_task = task_importer.import_task() + return db_task.id diff --git a/cvat/apps/engine/media_extractors.py b/cvat/apps/engine/media_extractors.py index ffa58deb2dec35396c95193cf3bb9500be708b7c..d3184e0968d7e3a5f20917e531827d6f4ff18ddb 100644 --- a/cvat/apps/engine/media_extractors.py +++ b/cvat/apps/engine/media_extractors.py @@ -48,11 +48,12 @@ def files_to_ignore(directory): return False class IMediaReader(ABC): - def __init__(self, source_path, step, start, stop): + def __init__(self, source_path, step, start, stop, dimension): self._source_path = sorted(source_path) self._step = step self._start = start self._stop = stop + self._dimension = dimension @abstractmethod def __iter__(self): @@ -89,7 +90,7 @@ class IMediaReader(ABC): return range(self._start, self._stop, self._step) class ImageListReader(IMediaReader): - def __init__(self, source_path, step=1, start=0, stop=None): + def __init__(self, source_path, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D): if not source_path: raise Exception('No image found') @@ -105,6 +106,7 @@ class ImageListReader(IMediaReader): step=step, start=start, stop=stop, + dimension=dimension ) def __iter__(self): @@ -113,7 +115,14 @@ class ImageListReader(IMediaReader): def filter(self, callback): source_path = list(filter(callback, self._source_path)) - ImageListReader.__init__(self, source_path, step=self._step, start=self._start, stop=self._stop) + ImageListReader.__init__( + self, + source_path, + step=self._step, + start=self._start, + stop=self._stop, + dimension=self._dimension + ) def get_path(self, i): return self._source_path[i] @@ -125,19 +134,36 @@ class ImageListReader(IMediaReader): return (pos - self._start + 1) / (self._stop - self._start) def get_preview(self): - fp = open(self._source_path[0], "rb") + if self._dimension == DimensionType.DIM_3D: + fp = open(os.path.join(os.path.dirname(__file__), 'assets/3d_preview.jpeg'), "rb") + else: + fp = open(self._source_path[0], "rb") return self._get_preview(fp) def get_image_size(self, i): + if self._dimension == DimensionType.DIM_3D: + with open(self.get_path(i), 'rb') as f: + properties = ValidateDimension.get_pcd_properties(f) + return int(properties["WIDTH"]), int(properties["HEIGHT"]) img = Image.open(self._source_path[i]) return img.width, img.height + def reconcile(self, source_files, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D): + # FIXME + ImageListReader.__init__(self, + source_path=source_files, + step=step, + start=start, + stop=stop + ) + self._dimension = dimension + @property def absolute_source_paths(self): return [self.get_path(idx) for idx, _ in enumerate(self._source_path)] class DirectoryReader(ImageListReader): - def __init__(self, source_path, step=1, start=0, stop=None): + def __init__(self, source_path, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D): image_paths = [] for source in source_path: for root, _, files in os.walk(source): @@ -149,10 +175,11 @@ class DirectoryReader(ImageListReader): step=step, start=start, stop=stop, + dimension=dimension, ) class ArchiveReader(DirectoryReader): - def __init__(self, source_path, step=1, start=0, stop=None): + def __init__(self, source_path, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D): self._archive_source = source_path[0] extract_dir = source_path[1] if len(source_path) > 1 else os.path.dirname(source_path[0]) Archive(self._archive_source).extractall(extract_dir) @@ -163,10 +190,11 @@ class ArchiveReader(DirectoryReader): step=step, start=start, stop=stop, + dimension=dimension ) class PdfReader(ImageListReader): - def __init__(self, source_path, step=1, start=0, stop=None): + def __init__(self, source_path, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D): if not source_path: raise Exception('No PDF found') @@ -194,21 +222,22 @@ class PdfReader(ImageListReader): step=step, start=start, stop=stop, + dimension=dimension, ) class ZipReader(ImageListReader): - def __init__(self, source_path, step=1, start=0, stop=None): - self._dimension = DimensionType.DIM_2D - self._zip_source = zipfile.ZipFile(source_path[0], mode='a') + def __init__(self, source_path, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D): + self._zip_source = zipfile.ZipFile(source_path[0], mode='r') self.extract_dir = source_path[1] if len(source_path) > 1 else None file_list = [f for f in self._zip_source.namelist() if files_to_ignore(f) and get_mime(f) == 'image'] - super().__init__(file_list, step=step, start=start, stop=stop) + super().__init__(file_list, step=step, start=start, stop=stop, dimension=dimension) def __del__(self): self._zip_source.close() def get_preview(self): if self._dimension == DimensionType.DIM_3D: + # TODO fp = open(os.path.join(os.path.dirname(__file__), 'assets/3d_preview.jpeg'), "rb") return self._get_preview(fp) io_image = io.BytesIO(self._zip_source.read(self._source_path[0])) @@ -216,32 +245,20 @@ class ZipReader(ImageListReader): def get_image_size(self, i): if self._dimension == DimensionType.DIM_3D: - with self._zip_source.open(self._source_path[i], "r") as file: - properties = ValidateDimension.get_pcd_properties(file) + with open(self.get_path(i), 'rb') as f: + properties = ValidateDimension.get_pcd_properties(f) return int(properties["WIDTH"]), int(properties["HEIGHT"]) img = Image.open(io.BytesIO(self._zip_source.read(self._source_path[i]))) return img.width, img.height def get_image(self, i): + if self._dimension == DimensionType.DIM_3D: + return self.get_path(i) return io.BytesIO(self._zip_source.read(self._source_path[i])) - def add_files(self, source_path): - root_path = os.path.split(self._zip_source.filename)[0] - for path in source_path: - self._zip_source.write(path, path.replace(root_path, "")) - def get_zip_filename(self): return self._zip_source.filename - def reconcile(self, source_files, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D): - self._dimension = dimension - super().__init__( - source_path=source_files, - step=step, - start=start, - stop=stop - ) - def get_path(self, i): if self._zip_source.filename: return os.path.join(os.path.dirname(self._zip_source.filename), self._source_path[i]) \ @@ -249,18 +266,28 @@ class ZipReader(ImageListReader): else: # necessary for mime_type definition return self._source_path[i] + def reconcile(self, source_files, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D): + super().reconcile( + source_files=source_files, + step=step, + start=start, + stop=stop, + dimension=dimension, + ) + def extract(self): self._zip_source.extractall(self.extract_dir if self.extract_dir else os.path.dirname(self._zip_source.filename)) if not self.extract_dir: os.remove(self._zip_source.filename) class VideoReader(IMediaReader): - def __init__(self, source_path, step=1, start=0, stop=None): + def __init__(self, source_path, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D): super().__init__( source_path=source_path, step=step, start=start, stop=stop + 1 if stop is not None else stop, + dimension=dimension, ) def _has_frame(self, i): @@ -743,15 +770,15 @@ class ValidateDimension: pcd_files = {} for file in files: - file_name, file_extension = file.rsplit('.', maxsplit=1) + file_name, file_extension = os.path.splitext(file) file_path = os.path.abspath(os.path.join(root, file)) - if file_extension == "bin": + if file_extension == ".bin": path = self.bin_operation(file_path, actual_path) pcd_files[file_name] = path self.related_files[path] = [] - elif file_extension == "pcd": + elif file_extension == ".pcd": path = ValidateDimension.pcd_operation(file_path, actual_path) if path == file_path: self.image_files[file_name] = file_path @@ -759,7 +786,8 @@ class ValidateDimension: pcd_files[file_name] = path self.related_files[path] = [] else: - self.image_files[file_name] = file_path + if _is_image(file_path): + self.image_files[file_name] = file_path return pcd_files def validate(self): diff --git a/cvat/apps/engine/serializers.py b/cvat/apps/engine/serializers.py index edb96df4b66c9bfa5cda39ab930a4aa47465b8d3..aaabe18a3ae54337cece382b438de7ff4fbc7bff 100644 --- a/cvat/apps/engine/serializers.py +++ b/cvat/apps/engine/serializers.py @@ -1,4 +1,4 @@ -# Copyright (C) 2019 Intel Corporation +# Copyright (C) 2019-2021 Intel Corporation # # SPDX-License-Identifier: MIT @@ -278,7 +278,7 @@ class DataSerializer(serializers.ModelSerializer): model = models.Data fields = ('chunk_size', 'size', 'image_quality', 'start_frame', 'stop_frame', 'frame_filter', 'compressed_chunk_type', 'original_chunk_type', 'client_files', 'server_files', 'remote_files', 'use_zip_chunks', - 'use_cache', 'copy_data') + 'use_cache', 'copy_data', 'storage_method', 'storage') # pylint: disable=no-self-use def validate_frame_filter(self, value): @@ -707,6 +707,9 @@ class LogEventSerializer(serializers.Serializer): class AnnotationFileSerializer(serializers.Serializer): annotation_file = serializers.FileField() +class TaskFileSerializer(serializers.Serializer): + task_file = serializers.FileField() + class ReviewSerializer(serializers.ModelSerializer): assignee = BasicUserSerializer(allow_null=True, required=False) assignee_id = serializers.IntegerField(write_only=True, allow_null=True, required=False) @@ -767,3 +770,10 @@ class CombinedReviewSerializer(ReviewSerializer): models.Comment.objects.create(**comment) return db_review + +class RelatedFileSerializer(serializers.ModelSerializer): + + class Meta: + model = models.RelatedFile + fields = '__all__' + read_only_fields = ('path',) \ No newline at end of file diff --git a/cvat/apps/engine/task.py b/cvat/apps/engine/task.py index e2700d8fb6503cdbedeb31f9806de249a8979efc..4aa123eb653addb3b99017ba510f55cf39cb59d8 100644 --- a/cvat/apps/engine/task.py +++ b/cvat/apps/engine/task.py @@ -9,27 +9,25 @@ import sys import rq import re import shutil +from distutils.dir_util import copy_tree from traceback import print_exception from urllib import parse as urlparse from urllib import request as urlrequest import requests +import django_rq + +from django.conf import settings +from django.db import transaction -from cvat.apps.engine.media_extractors import get_mime, MEDIA_TYPES, Mpeg4ChunkWriter, ZipChunkWriter, Mpeg4CompressedChunkWriter, ZipCompressedChunkWriter, ValidateDimension -from cvat.apps.engine.models import DataChoice, StorageMethodChoice, StorageChoice, RelatedFile +from cvat.apps.engine import models +from cvat.apps.engine.log import slogger +from cvat.apps.engine.media_extractors import (MEDIA_TYPES, Mpeg4ChunkWriter, Mpeg4CompressedChunkWriter, + ValidateDimension, ZipChunkWriter, ZipCompressedChunkWriter, get_mime) from cvat.apps.engine.utils import av_scan_paths -from cvat.apps.engine.models import DimensionType from utils.dataset_manifest import ImageManifestManager, VideoManifestManager from utils.dataset_manifest.core import VideoManifestValidator from utils.dataset_manifest.utils import detect_related_images -import django_rq -from django.conf import settings -from django.db import transaction -from distutils.dir_util import copy_tree - -from . import models -from .log import slogger - ############################# Low Level server API def create(tid, data): @@ -41,12 +39,13 @@ def create(tid, data): @transaction.atomic def rq_handler(job, exc_type, exc_value, traceback): split = job.id.split('/') - tid = int(split[split.index('tasks') + 1]) + tid = split[split.index('tasks') + 1] try: + tid = int(tid) db_task = models.Task.objects.select_for_update().get(pk=tid) with open(db_task.get_log_path(), "wt") as log_file: print_exception(exc_type, exc_value, traceback, file=log_file) - except models.Task.DoesNotExist: + except (models.Task.DoesNotExist, ValueError): pass # skip exceptions in the code return False @@ -76,8 +75,9 @@ def _save_task_to_db(db_task): segment_size = db_task.segment_size segment_step = segment_size - if segment_size == 0: + if segment_size == 0 or segment_size > db_task.data.size: segment_size = db_task.data.size + db_task.segment_size = segment_size # Segment step must be more than segment_size + overlap in single-segment tasks # Otherwise a task contains an extra segment @@ -209,15 +209,15 @@ def _download_data(urls, upload_dir): return list(local_files.keys()) +def _get_manifest_frame_indexer(start_frame=0, frame_step=1): + return lambda frame_id: start_frame + frame_id * frame_step + @transaction.atomic -def _create_thread(tid, data): +def _create_thread(tid, data, isImport=False): slogger.glob.info("create task #{}".format(tid)) db_task = models.Task.objects.select_for_update().get(pk=tid) db_data = db_task.data - if db_task.data.size != 0: - raise NotImplementedError("Adding more data is not implemented") - upload_dir = db_data.get_upload_dirname() if data['remote_files']: @@ -227,11 +227,11 @@ def _create_thread(tid, data): media = _count_files(data, manifest_file) media, task_mode = _validate_data(media, manifest_file) if manifest_file: - assert settings.USE_CACHE and db_data.storage_method == StorageMethodChoice.CACHE, \ + assert settings.USE_CACHE and db_data.storage_method == models.StorageMethodChoice.CACHE, \ "File with meta information can be uploaded if 'Use cache' option is also selected" if data['server_files']: - if db_data.storage == StorageChoice.LOCAL: + if db_data.storage == models.StorageChoice.LOCAL: _copy_data_from_share(data['server_files'], upload_dir) else: upload_dir = settings.SHARE_ROOT @@ -244,16 +244,23 @@ def _create_thread(tid, data): db_images = [] extractor = None + manifest_index = _get_manifest_frame_indexer() for media_type, media_files in media.items(): if media_files: if extractor is not None: raise Exception('Combined data types are not supported') source_paths=[os.path.join(upload_dir, f) for f in media_files] - if media_type in {'archive', 'zip'} and db_data.storage == StorageChoice.SHARE: + if media_type in {'archive', 'zip'} and db_data.storage == models.StorageChoice.SHARE: source_paths.append(db_data.get_upload_dirname()) upload_dir = db_data.get_upload_dirname() - db_data.storage = StorageChoice.LOCAL + db_data.storage = models.StorageChoice.LOCAL + if isImport and media_type == 'image' and db_data.storage == models.StorageChoice.SHARE: + manifest_index = _get_manifest_frame_indexer(db_data.start_frame, db_data.get_frame_step()) + db_data.start_frame = 0 + data['stop_frame'] = None + db_data.frame_filter = '' + extractor = MEDIA_TYPES[media_type]['extractor']( source_path=source_paths, step=db_data.get_frame_step(), @@ -261,22 +268,27 @@ def _create_thread(tid, data): stop=data['stop_frame'], ) + validate_dimension = ValidateDimension() - if extractor.__class__ == MEDIA_TYPES['zip']['extractor']: + if isinstance(extractor, MEDIA_TYPES['zip']['extractor']): extractor.extract() - validate_dimension.set_path(os.path.split(extractor.get_zip_filename())[0]) + + if db_data.storage == models.StorageChoice.LOCAL or \ + (db_data.storage == models.StorageChoice.SHARE and \ + isinstance(extractor, MEDIA_TYPES['zip']['extractor'])): + validate_dimension.set_path(upload_dir) validate_dimension.validate() - if validate_dimension.dimension == DimensionType.DIM_3D: - db_task.dimension = DimensionType.DIM_3D - extractor.reconcile( - source_files=list(validate_dimension.related_files.keys()), - step=db_data.get_frame_step(), - start=db_data.start_frame, - stop=data['stop_frame'], - dimension=DimensionType.DIM_3D, - ) - extractor.add_files(validate_dimension.converted_files) + if validate_dimension.dimension == models.DimensionType.DIM_3D: + db_task.dimension = models.DimensionType.DIM_3D + + extractor.reconcile( + source_files=[os.path.join(upload_dir, f) for f in validate_dimension.related_files.keys()], + step=db_data.get_frame_step(), + start=db_data.start_frame, + stop=data['stop_frame'], + dimension=models.DimensionType.DIM_3D, + ) related_images = {} if isinstance(extractor, MEDIA_TYPES['image']['extractor']): @@ -301,8 +313,8 @@ def _create_thread(tid, data): job.save_meta() update_progress.call_counter = (update_progress.call_counter + 1) % len(progress_animation) - compressed_chunk_writer_class = Mpeg4CompressedChunkWriter if db_data.compressed_chunk_type == DataChoice.VIDEO else ZipCompressedChunkWriter - if db_data.original_chunk_type == DataChoice.VIDEO: + compressed_chunk_writer_class = Mpeg4CompressedChunkWriter if db_data.compressed_chunk_type == models.DataChoice.VIDEO else ZipCompressedChunkWriter + if db_data.original_chunk_type == models.DataChoice.VIDEO: original_chunk_writer_class = Mpeg4ChunkWriter # Let's use QP=17 (that is 67 for 0-100 range) for the original chunks, which should be visually lossless or nearly so. # A lower value will significantly increase the chunk size with a slight increase of quality. @@ -312,7 +324,7 @@ def _create_thread(tid, data): original_quality = 100 kwargs = {} - if validate_dimension.dimension == DimensionType.DIM_3D: + if validate_dimension.dimension == models.DimensionType.DIM_3D: kwargs["dimension"] = validate_dimension.dimension compressed_chunk_writer = compressed_chunk_writer_class(db_data.image_quality, **kwargs) original_chunk_writer = original_chunk_writer_class(original_quality) @@ -326,7 +338,6 @@ def _create_thread(tid, data): else: db_data.chunk_size = 36 - video_path = "" video_size = (0, 0) @@ -334,7 +345,7 @@ def _create_thread(tid, data): job.meta['status'] = msg job.save_meta() - if settings.USE_CACHE and db_data.storage_method == StorageMethodChoice.CACHE: + if settings.USE_CACHE and db_data.storage_method == models.StorageMethodChoice.CACHE: for media_type, media_files in media.items(): if not media_files: @@ -392,7 +403,7 @@ def _create_thread(tid, data): if data['stop_frame'] else all_frames, all_frames), db_data.get_frame_step())) video_path = os.path.join(upload_dir, media_files[0]) except Exception as ex: - db_data.storage_method = StorageMethodChoice.FILE_SYSTEM + db_data.storage_method = models.StorageMethodChoice.FILE_SYSTEM if os.path.exists(db_data.get_manifest_path()): os.remove(db_data.get_manifest_path()) if os.path.exists(db_data.get_index_path()): @@ -404,7 +415,7 @@ def _create_thread(tid, data): db_data.size = len(extractor) manifest = ImageManifestManager(db_data.get_manifest_path()) if not manifest_file: - if db_task.dimension == DimensionType.DIM_2D: + if db_task.dimension == models.DimensionType.DIM_2D: meta_info = manifest.prepare_meta( sources=extractor.absolute_source_paths, meta={ k: {'related_images': related_images[k] } for k in related_images }, @@ -428,8 +439,8 @@ def _create_thread(tid, data): img_sizes = [] for _, frame_id in chunk_paths: - properties = manifest[frame_id] - if db_task.dimension == DimensionType.DIM_2D: + properties = manifest[manifest_index(frame_id)] + if db_task.dimension == models.DimensionType.DIM_2D: resolution = (properties['width'], properties['height']) else: resolution = extractor.get_image_size(frame_id) @@ -442,7 +453,7 @@ def _create_thread(tid, data): for (path, frame), (w, h) in zip(chunk_paths, img_sizes) ]) - if db_data.storage_method == StorageMethodChoice.FILE_SYSTEM or not settings.USE_CACHE: + if db_data.storage_method == models.StorageMethodChoice.FILE_SYSTEM or not settings.USE_CACHE: counter = itertools.count() generator = itertools.groupby(extractor, lambda x: next(counter) // db_data.chunk_size) for chunk_idx, chunk_data in generator: @@ -477,11 +488,11 @@ def _create_thread(tid, data): created_images = models.Image.objects.filter(data_id=db_data.id) db_related_files = [ - RelatedFile(data=image.data, primary_image=image, path=os.path.join(upload_dir, related_file_path)) + models.RelatedFile(data=image.data, primary_image=image, path=os.path.join(upload_dir, related_file_path)) for image in created_images for related_file_path in related_images.get(image.path, []) ] - RelatedFile.objects.bulk_create(db_related_files) + models.RelatedFile.objects.bulk_create(db_related_files) db_images = [] else: models.Video.objects.create( diff --git a/cvat/apps/engine/tests/test_rest_api.py b/cvat/apps/engine/tests/test_rest_api.py index c99536da13f6bc2fe70460b222051d29d3899a4b..a5c279609c058084fa9b5eb6dc076140399139a6 100644 --- a/cvat/apps/engine/tests/test_rest_api.py +++ b/cvat/apps/engine/tests/test_rest_api.py @@ -1,4 +1,4 @@ -# Copyright (C) 2020 Intel Corporation +# Copyright (C) 2020-2021 Intel Corporation # # SPDX-License-Identifier: MIT @@ -2071,6 +2071,348 @@ class TaskCreateAPITestCase(APITestCase): } self._check_api_v1_tasks(None, data) + + +class TaskImportExportAPITestCase(APITestCase): + + def setUp(self): + self.client = APIClient() + self.tasks = [] + + @classmethod + def setUpTestData(cls): + create_db_users(cls) + + cls.media_data = [] + + image_count = 10 + imagename_pattern = "test_{}.jpg" + for i in range(image_count): + filename = imagename_pattern.format(i) + path = os.path.join(settings.SHARE_ROOT, filename) + _, data = generate_image_file(filename) + with open(path, "wb") as image: + image.write(data.read()) + + cls.media_data.append( + { + **{"image_quality": 75, + "copy_data": True, + "start_frame": 2, + "stop_frame": 9, + "frame_filter": "step=2", + }, + **{"server_files[{}]".format(i): imagename_pattern.format(i) for i in range(image_count)}, + } + ) + + filename = "test_video_1.mp4" + path = os.path.join(settings.SHARE_ROOT, filename) + _, data = generate_video_file(filename, width=1280, height=720) + with open(path, "wb") as video: + video.write(data.read()) + cls.media_data.append( + { + "image_quality": 75, + "copy_data": True, + "start_frame": 2, + "stop_frame": 24, + "frame_filter": "step=2", + "server_files[0]": filename, + } + ) + + filename = os.path.join("test_archive_1.zip") + path = os.path.join(settings.SHARE_ROOT, filename) + _, data = generate_zip_archive_file(filename, count=5) + with open(path, "wb") as zip_archive: + zip_archive.write(data.read()) + cls.media_data.append( + { + "image_quality": 75, + "server_files[0]": filename, + } + ) + + filename = "test_pointcloud_pcd.zip" + source_path = os.path.join(os.path.dirname(__file__), 'assets', filename) + path = os.path.join(settings.SHARE_ROOT, filename) + shutil.copyfile(source_path, path) + cls.media_data.append( + { + "image_quality": 75, + "server_files[0]": filename, + } + ) + + filename = "test_velodyne_points.zip" + source_path = os.path.join(os.path.dirname(__file__), 'assets', filename) + path = os.path.join(settings.SHARE_ROOT, filename) + shutil.copyfile(source_path, path) + cls.media_data.append( + { + "image_quality": 75, + "server_files[0]": filename, + } + ) + + filename = os.path.join("videos", "test_video_1.mp4") + path = os.path.join(settings.SHARE_ROOT, filename) + os.makedirs(os.path.dirname(path)) + _, data = generate_video_file(filename, width=1280, height=720) + with open(path, "wb") as video: + video.write(data.read()) + + generate_manifest_file(data_type='video', manifest_path=os.path.join(settings.SHARE_ROOT, 'videos', 'manifest.jsonl'), + sources=[path]) + + cls.media_data.append( + { + "image_quality": 70, + "copy_data": True, + "server_files[0]": filename, + "server_files[1]": os.path.join("videos", "manifest.jsonl"), + "use_cache": True, + } + ) + + generate_manifest_file(data_type='images', manifest_path=os.path.join(settings.SHARE_ROOT, 'manifest.jsonl'), + sources=[os.path.join(settings.SHARE_ROOT, imagename_pattern.format(i)) for i in range(1, 8)]) + cls.media_data.append( + { + **{"image_quality": 70, + "copy_data": True, + "use_cache": True, + "frame_filter": "step=2", + "server_files[0]": "manifest.jsonl", + }, + **{ + **{"server_files[{}]".format(i): imagename_pattern.format(i) for i in range(1, 8)}, + } + } + ) + + cls.media_data.extend([ + # image list local + { + "client_files[0]": generate_image_file("test_1.jpg")[1], + "client_files[1]": generate_image_file("test_2.jpg")[1], + "client_files[2]": generate_image_file("test_3.jpg")[1], + "image_quality": 75, + }, + # video local + { + "client_files[0]": generate_video_file("test_video.mp4")[1], + "image_quality": 75, + }, + # zip archive local + { + "client_files[0]": generate_zip_archive_file("test_archive_1.zip", 10)[1], + "image_quality": 50, + }, + # pdf local + { + "client_files[0]": generate_pdf_file("test_pdf_1.pdf", 7)[1], + "image_quality": 54, + }, + ]) + + def tearDown(self): + for task in self.tasks: + shutil.rmtree(os.path.join(settings.TASKS_ROOT, str(task["id"]))) + shutil.rmtree(os.path.join(settings.MEDIA_DATA_ROOT, str(task["data_id"]))) + + @classmethod + def tearDownClass(cls): + super().tearDownClass() + path = os.path.join(settings.SHARE_ROOT, "test_1.jpg") + os.remove(path) + + path = os.path.join(settings.SHARE_ROOT, "test_2.jpg") + os.remove(path) + + path = os.path.join(settings.SHARE_ROOT, "test_3.jpg") + os.remove(path) + + path = os.path.join(settings.SHARE_ROOT, "test_video_1.mp4") + os.remove(path) + + path = os.path.join(settings.SHARE_ROOT, "videos", "test_video_1.mp4") + os.remove(path) + + path = os.path.join(settings.SHARE_ROOT, "videos", "manifest.jsonl") + os.remove(path) + os.rmdir(os.path.dirname(path)) + + path = os.path.join(settings.SHARE_ROOT, "test_pointcloud_pcd.zip") + os.remove(path) + + path = os.path.join(settings.SHARE_ROOT, "test_velodyne_points.zip") + os.remove(path) + + path = os.path.join(settings.SHARE_ROOT, "manifest.jsonl") + os.remove(path) + + def _create_tasks(self): + self.tasks = [] + + def _create_task(task_data, media_data): + response = self.client.post('/api/v1/tasks', data=task_data, format="json") + assert response.status_code == status.HTTP_201_CREATED + tid = response.data["id"] + + for media in media_data.values(): + if isinstance(media, io.BytesIO): + media.seek(0) + response = self.client.post("/api/v1/tasks/{}/data".format(tid), data=media_data) + assert response.status_code == status.HTTP_202_ACCEPTED + response = self.client.get("/api/v1/tasks/{}".format(tid)) + data_id = response.data["data"] + self.tasks.append({ + "id": tid, + "data_id": data_id, + }) + + task_data = [ + { + "name": "my task #1", + "owner_id": self.owner.id, + "assignee_id": self.assignee.id, + "overlap": 0, + "segment_size": 100, + "labels": [{ + "name": "car", + "color": "#ff00ff", + "attributes": [{ + "name": "bool_attribute", + "mutable": True, + "input_type": AttributeType.CHECKBOX, + "default_value": "true" + }], + }, { + "name": "person", + }, + ] + }, + { + "name": "my task #2", + "owner_id": self.owner.id, + "assignee_id": self.assignee.id, + "overlap": 1, + "segment_size": 3, + "labels": [{ + "name": "car", + "color": "#ff00ff", + "attributes": [{ + "name": "bool_attribute", + "mutable": True, + "input_type": AttributeType.CHECKBOX, + "default_value": "true" + }], + }, { + "name": "person", + }, + ] + }, + ] + + with ForceLogin(self.owner, self.client): + for data in task_data: + for media in self.media_data: + _create_task(data, media) + + def _run_api_v1_tasks_id_export(self, tid, user, query_params=""): + with ForceLogin(user, self.client): + response = self.client.get('/api/v1/tasks/{}?{}'.format(tid, query_params), format="json") + + return response + + def _run_api_v1_tasks_id_import(self, user, data): + with ForceLogin(user, self.client): + response = self.client.post('/api/v1/tasks?action=import', data=data, format="multipart") + + return response + + def _run_api_v1_tasks_id(self, tid, user): + with ForceLogin(user, self.client): + response = self.client.get('/api/v1/tasks/{}'.format(tid), format="json") + + return response.data + + def _run_api_v1_tasks_id_export_import(self, user): + if user: + if user is self.user or user is self.annotator: + HTTP_200_OK = status.HTTP_403_FORBIDDEN + HTTP_202_ACCEPTED = status.HTTP_403_FORBIDDEN + HTTP_201_CREATED = status.HTTP_403_FORBIDDEN + else: + HTTP_200_OK = status.HTTP_200_OK + HTTP_202_ACCEPTED = status.HTTP_202_ACCEPTED + HTTP_201_CREATED = status.HTTP_201_CREATED + else: + HTTP_200_OK = status.HTTP_401_UNAUTHORIZED + HTTP_202_ACCEPTED = status.HTTP_401_UNAUTHORIZED + HTTP_201_CREATED = status.HTTP_401_UNAUTHORIZED + + self._create_tasks() + for task in self.tasks: + tid = task["id"] + response = self._run_api_v1_tasks_id_export(tid, user, "action=export") + self.assertEqual(response.status_code, HTTP_202_ACCEPTED) + + response = self._run_api_v1_tasks_id_export(tid, user, "action=export") + self.assertEqual(response.status_code, HTTP_201_CREATED) + + response = self._run_api_v1_tasks_id_export(tid, user, "action=download") + self.assertEqual(response.status_code, HTTP_200_OK) + + if user and user is not self.observer and user is not self.user and user is not self.annotator: + self.assertTrue(response.streaming) + content = io.BytesIO(b"".join(response.streaming_content)) + content.seek(0) + + uploaded_data = { + "task_file": content, + } + response = self._run_api_v1_tasks_id_import(user, uploaded_data) + self.assertEqual(response.status_code, HTTP_202_ACCEPTED) + if user is not self.observer and user is not self.user and user is not self.annotator: + rq_id = response.data["rq_id"] + response = self._run_api_v1_tasks_id_import(user, {"rq_id": rq_id}) + self.assertEqual(response.status_code, HTTP_201_CREATED) + original_task = self._run_api_v1_tasks_id(tid, user) + imported_task = self._run_api_v1_tasks_id(response.data["id"], user) + compare_objects( + self=self, + obj1=original_task, + obj2=imported_task, + ignore_keys=( + "id", + "url", + "owner", + "project_id", + "assignee", + "created_date", + "updated_date", + "data", + ), + ) + + def test_api_v1_tasks_id_export_admin(self): + self._run_api_v1_tasks_id_export_import(self.admin) + + def test_api_v1_tasks_id_export_user(self): + self._run_api_v1_tasks_id_export_import(self.user) + + def test_api_v1_tasks_id_export_annotator(self): + self._run_api_v1_tasks_id_export_import(self.annotator) + + def test_api_v1_tasks_id_export_observer(self): + self._run_api_v1_tasks_id_export_import(self.observer) + + def test_api_v1_tasks_id_export_no_auth(self): + self._run_api_v1_tasks_id_export_import(None) + def generate_image_file(filename): f = BytesIO() gen = random.SystemRandom() @@ -2326,6 +2668,7 @@ class TaskDataAPITestCase(APITestCase): path = os.path.join(settings.SHARE_ROOT, "videos", "manifest.jsonl") os.remove(path) + os.rmdir(os.path.dirname(path)) path = os.path.join(settings.SHARE_ROOT, "manifest.jsonl") os.remove(path) @@ -2995,7 +3338,7 @@ def compare_objects(self, obj1, obj2, ignore_keys, fp_tolerance=.001): continue v2 = obj2[k] if k == 'attributes': - key = lambda a: a['spec_id'] + key = lambda a: a['spec_id'] if 'spec_id' in a else a['id'] v1.sort(key=key) v2.sort(key=key) compare_objects(self, v1, v2, ignore_keys) diff --git a/cvat/apps/engine/views.py b/cvat/apps/engine/views.py index b1815d7d1e9d3742bb0a0f2f0f2edd9c4a4903eb..2b23febeb5779d0e2a056a824004cd887120ad0f 100644 --- a/cvat/apps/engine/views.py +++ b/cvat/apps/engine/views.py @@ -1,4 +1,4 @@ -# Copyright (C) 2018-2020 Intel Corporation +# Copyright (C) 2018-2021 Intel Corporation # # SPDX-License-Identifier: MIT @@ -7,6 +7,7 @@ import os import os.path as osp import shutil import traceback +import uuid from datetime import datetime from distutils.util import strtobool from tempfile import mkstemp @@ -50,9 +51,11 @@ from cvat.apps.engine.serializers import ( FileInfoSerializer, JobSerializer, LabeledDataSerializer, LogEventSerializer, ProjectSerializer, ProjectSearchSerializer, ProjectWithoutTaskSerializer, RqStatusSerializer, TaskSerializer, UserSerializer, PluginsSerializer, ReviewSerializer, - CombinedReviewSerializer, IssueSerializer, CombinedIssueSerializer, CommentSerializer + CombinedReviewSerializer, IssueSerializer, CombinedIssueSerializer, CommentSerializer, + TaskFileSerializer, ) from cvat.apps.engine.utils import av_scan_paths +from cvat.apps.engine.backup import import_task from . import models, task from .log import clogger, slogger @@ -360,20 +363,134 @@ class TaskViewSet(auth.TaskGetQuerySetMixin, viewsets.ModelViewSet): return [perm() for perm in permissions] - def perform_create(self, serializer): - def validate_task_limit(owner): - admin_perm = auth.AdminRolePermission() - is_admin = admin_perm.has_permission(self.request, self) - if not is_admin and settings.RESTRICTIONS['task_limit'] is not None and \ - Task.objects.filter(owner=owner).count() >= settings.RESTRICTIONS['task_limit']: - raise serializers.ValidationError('The user has the maximum number of tasks') + def _validate_task_limit(self, owner): + admin_perm = auth.AdminRolePermission() + is_admin = admin_perm.has_permission(self.request, self) + if not is_admin and settings.RESTRICTIONS['task_limit'] is not None and \ + Task.objects.filter(owner=owner).count() >= settings.RESTRICTIONS['task_limit']: + raise serializers.ValidationError('The user has the maximum number of tasks') + + def create(self, request): + action = self.request.query_params.get('action', None) + if action is None: + return super().create(request) + elif action == 'import': + self._validate_task_limit(owner=self.request.user) + if 'rq_id' in request.data: + rq_id = request.data['rq_id'] + else: + rq_id = "{}@/api/v1/tasks/{}/import".format(request.user, uuid.uuid4()) + + queue = django_rq.get_queue("default") + rq_job = queue.fetch_job(rq_id) + + if not rq_job: + serializer = TaskFileSerializer(data=request.data) + serializer.is_valid(raise_exception=True) + task_file = serializer.validated_data['task_file'] + fd, filename = mkstemp(prefix='cvat_') + with open(filename, 'wb+') as f: + for chunk in task_file.chunks(): + f.write(chunk) + rq_job = queue.enqueue_call( + func=import_task, + args=(filename, request.user.id), + job_id=rq_id, + meta={ + 'tmp_file': filename, + 'tmp_file_descriptor': fd, + }, + ) + else: + if rq_job.is_finished: + task_id = rq_job.return_value + os.close(rq_job.meta['tmp_file_descriptor']) + os.remove(rq_job.meta['tmp_file']) + rq_job.delete() + return Response({'id': task_id}, status=status.HTTP_201_CREATED) + elif rq_job.is_failed: + os.close(rq_job.meta['tmp_file_descriptor']) + os.remove(rq_job.meta['tmp_file']) + exc_info = str(rq_job.exc_info) + rq_job.delete() + + # RQ adds a prefix with exception class name + import_error_prefix = '{}.{}'.format( + CvatImportError.__module__, CvatImportError.__name__) + if exc_info.startswith(import_error_prefix): + exc_info = exc_info.replace(import_error_prefix + ': ', '') + return Response(data=exc_info, + status=status.HTTP_400_BAD_REQUEST) + else: + return Response(data=exc_info, + status=status.HTTP_500_INTERNAL_SERVER_ERROR) + + return Response({'rq_id': rq_id}, status=status.HTTP_202_ACCEPTED) + else: + raise serializers.ValidationError( + "Unexpected action specified for the request") + + def retrieve(self, request, pk=None): + db_task = self.get_object() # force to call check_object_permissions + action = self.request.query_params.get('action', None) + if action is None: + return super().retrieve(request, pk) + elif action in ('export', 'download'): + queue = django_rq.get_queue("default") + rq_id = "/api/v1/tasks/{}/export".format(pk) + + rq_job = queue.fetch_job(rq_id) + if rq_job: + last_task_update_time = timezone.localtime(db_task.updated_date) + request_time = rq_job.meta.get('request_time', None) + if request_time is None or request_time < last_task_update_time: + rq_job.cancel() + rq_job.delete() + else: + if rq_job.is_finished: + file_path = rq_job.return_value + if action == "download" and osp.exists(file_path): + rq_job.delete() + + timestamp = datetime.strftime(last_task_update_time, + "%Y_%m_%d_%H_%M_%S") + filename = "task_{}_backup_{}{}".format( + db_task.name, timestamp, + osp.splitext(file_path)[1]) + return sendfile(request, file_path, attachment=True, + attachment_filename=filename.lower()) + else: + if osp.exists(file_path): + return Response(status=status.HTTP_201_CREATED) + elif rq_job.is_failed: + exc_info = str(rq_job.exc_info) + rq_job.delete() + return Response(exc_info, + status=status.HTTP_500_INTERNAL_SERVER_ERROR) + else: + return Response(status=status.HTTP_202_ACCEPTED) + + ttl = dm.views.CACHE_TTL.total_seconds() + queue.enqueue_call( + func=dm.views.backup_task, + args=(pk, 'task_dump.zip'), + job_id=rq_id, + meta={ 'request_time': timezone.localtime() }, + result_ttl=ttl, failure_ttl=ttl) + return Response(status=status.HTTP_202_ACCEPTED) + + else: + raise serializers.ValidationError( + "Unexpected action specified for the request") + + def perform_create(self, serializer): owner = self.request.data.get('owner', None) if owner: - validate_task_limit(owner) + self._validate_task_limit(owner) serializer.save() else: - validate_task_limit(self.request.user) + self._validate_task_limit(self.request.user) serializer.save(owner=self.request.user) def perform_destroy(self, instance): @@ -414,6 +531,9 @@ class TaskViewSet(auth.TaskGetQuerySetMixin, viewsets.ModelViewSet): def data(self, request, pk): if request.method == 'POST': db_task = self.get_object() # call check_object_permissions as well + if db_task.data: + return Response(data='Adding more data is not supported', + status=status.HTTP_400_BAD_REQUEST) serializer = DataSerializer(data=request.data) serializer.is_valid(raise_exception=True) db_data = serializer.save() diff --git a/tests/cypress/integration/actions_objects2/case_9_cuboid_shape_track_label.js b/tests/cypress/integration/actions_objects2/case_9_cuboid_shape_track_label.js index c2de3c65f387c573dfec5e159ae0db45fe4de031..921a11163f9fd0a87641bda251d9484d7d1c7e15 100644 --- a/tests/cypress/integration/actions_objects2/case_9_cuboid_shape_track_label.js +++ b/tests/cypress/integration/actions_objects2/case_9_cuboid_shape_track_label.js @@ -86,7 +86,7 @@ context('Actions on Cuboid', () => { it('Draw a Cuboid shape in two ways (From rectangle, by 4 points)', () => { cy.createCuboid(createCuboidShape2Points); cy.get('.cvat-canvas-container').trigger('mousemove', 300, 400); - cy.get('#cvat_canvas_shape_1').should('have.class', 'cvat_canvas_shape_activated') + cy.get('#cvat_canvas_shape_1').should('have.class', 'cvat_canvas_shape_activated'); // Increase code coverage for cvat-canvas/src/typescript/svg.patch.ts. Block start // Checking for changes in the size and orientation of the shape is based on @@ -95,7 +95,7 @@ context('Actions on Cuboid', () => { .trigger('mouseenter', 360, 340); cy.get('.cvat_canvas_selected_point').should('exist'); cy.get('.cvat-canvas-container') - .trigger('mousedown', 360, 340, {button: 0}) + .trigger('mousedown', 360, 340, { button: 0 }) .trigger('mousemove', 360, 240) .trigger('mouseup', 360, 240); @@ -103,7 +103,7 @@ context('Actions on Cuboid', () => { .trigger('mouseenter', 360, 340); cy.get('.cvat_canvas_selected_point').should('exist'); cy.get('.cvat-canvas-container') - .trigger('mousedown', 360, 340, {which: 1}) + .trigger('mousedown', 360, 340, { which: 1 }) .trigger('mousemove', 430, 340) .trigger('mouseup', 430, 340); @@ -111,7 +111,7 @@ context('Actions on Cuboid', () => { .trigger('mouseenter', 250, 250); cy.get('.cvat_canvas_selected_point').should('exist'); cy.get('.cvat-canvas-container') - .trigger('mousedown', 250, 250, {button: 0}) + .trigger('mousedown', 250, 250, { button: 0 }) .trigger('mousemove', 200, 250) .trigger('mouseup', 200, 250); @@ -119,7 +119,7 @@ context('Actions on Cuboid', () => { .trigger('mouseenter', 350, 250); cy.get('.cvat_canvas_selected_point').should('exist'); cy.get('.cvat-canvas-container') - .trigger('mousedown', 350, 250, {button: 0}) + .trigger('mousedown', 350, 250, { button: 0 }) .trigger('mousemove', 300, 250) .trigger('mouseup', 300, 250); @@ -127,13 +127,13 @@ context('Actions on Cuboid', () => { .trigger('mouseenter', 200, 350); cy.get('.cvat_canvas_selected_point').should('exist'); cy.get('.cvat-canvas-container') - .trigger('mousedown', 200, 350, {which: 1}) + .trigger('mousedown', 200, 350, { which: 1 }) .trigger('mousemove', 150, 350) .trigger('mouseup', 150, 350); cy.get('.cvat-canvas-container') // Orientation to right. drCenter.hide() .trigger('mouseenter', 300, 200) - .trigger('mousedown', 300, 200, {which: 1}) + .trigger('mousedown', 300, 200, { which: 1 }) .trigger('mousemove', 150, 200) .trigger('mouseup', 150, 200); @@ -141,7 +141,7 @@ context('Actions on Cuboid', () => { .trigger('mouseenter', 85, 270); cy.get('.cvat_canvas_selected_point').should('exist'); cy.get('.cvat-canvas-container') - .trigger('mousedown', 85, 270, {which: 1}) + .trigger('mousedown', 85, 270, { which: 1 }) .trigger('mousemove', 120, 270) .trigger('mouseup', 120, 270); @@ -149,19 +149,19 @@ context('Actions on Cuboid', () => { .trigger('mouseenter', 120, 410); cy.get('.cvat_canvas_selected_point').should('exist'); cy.get('.cvat-canvas-container') - .trigger('mousedown', 120, 410, {button: 0}) + .trigger('mousedown', 120, 410, { button: 0 }) .trigger('mousemove', 120, 350) .trigger('mouseup', 120, 350); cy.get('.cvat-canvas-container') // this.face .trigger('mouseenter', 230, 300) - .trigger('mousedown', 230, 300, {which: 1}) + .trigger('mousedown', 230, 300, { which: 1 }) .trigger('mousemove', 200, 300) .trigger('mouseup', 200, 300); cy.get('.cvat-canvas-container') // this.right .trigger('mouseenter', 250, 240) - .trigger('mousedown', 250, 240, {which: 1}) + .trigger('mousedown', 250, 240, { which: 1 }) .trigger('mousemove', 280, 200) .trigger('mouseup', 280, 200); @@ -169,8 +169,8 @@ context('Actions on Cuboid', () => { .trigger('mouseenter', 90, 215); cy.get('.cvat_canvas_selected_point').should('exist'); cy.get('.cvat-canvas-container') - .trigger('mousedown', 90, 215, {button: 0, shiftKey: true}) - .trigger('mousemove', 90, 270, {shiftKey: true}) + .trigger('mousedown', 90, 215, { button: 0, shiftKey: true }) + .trigger('mousemove', 90, 270, { shiftKey: true }) .trigger('mouseup', 90, 270); cy.get('.cvat-appearance-cuboid-projections-checkbox').click(); // if (v === true) @@ -182,58 +182,57 @@ context('Actions on Cuboid', () => { cy.get('.cvat-canvas-container') // Moving the shape for further testing convenience .trigger('mouseenter', 150, 305) - .trigger('mousedown', 230, 300, {which: 1}) + .trigger('mousedown', 230, 300, { which: 1 }) .trigger('mousemove', 400, 200) .trigger('mouseup', 400, 200); cy.get('.cvat-canvas-container') // if (this.cuboidModel.orientation === Orientation.LEFT) ecle{} .trigger('mouseenter', 260, 250); cy.get('.cvat_canvas_selected_point').should('exist'); - cy.get('.cvat-canvas-container') - .dblclick(260, 250, {shiftKey: true}) + cy.get('.cvat-canvas-container').dblclick(260, 250, { shiftKey: true }); cy.get('.cvat-canvas-container') // Change orientation to left .trigger('mouseenter', 300, 130) - .trigger('mousedown', 300, 130, {which: 1}) + .trigger('mousedown', 300, 130, { which: 1 }) .trigger('mousemove', 500, 100) .trigger('mouseup', 500, 100); cy.get('.cvat-canvas-container') // frCenter - .trigger('mouseenter', 465, 180) + .trigger('mouseenter', 465, 180); cy.get('.cvat_canvas_selected_point').should('exist'); cy.get('.cvat-canvas-container') - .trigger('mousedown', 465, 180, {which: 1}) + .trigger('mousedown', 465, 180, { which: 1 }) .trigger('mousemove', 500, 180) .trigger('mouseup', 500, 180); cy.get('.cvat-canvas-container') // ftCenter - .trigger('mouseenter', 395, 125) + .trigger('mouseenter', 395, 125); cy.get('.cvat_canvas_selected_point').should('exist'); cy.get('.cvat-canvas-container') - .trigger('mousedown', 395, 125, {which: 1}) + .trigger('mousedown', 395, 125, { which: 1 }) .trigger('mousemove', 395, 150) .trigger('mouseup', 395, 150); cy.get('.cvat-canvas-container') // fbCenter - .trigger('mouseenter', 400, 265) + .trigger('mouseenter', 400, 265); cy.get('.cvat_canvas_selected_point').should('exist'); cy.get('.cvat-canvas-container') - .trigger('mousedown', 400, 265, {which: 1}) + .trigger('mousedown', 400, 265, { which: 1 }) .trigger('mousemove', 400, 250) .trigger('mouseup', 400, 250); cy.get('.cvat-canvas-container') // if (this.cuboidModel.orientation === Orientation.LEFT) - .trigger('mouseenter', 600, 180) + .trigger('mouseenter', 600, 180); cy.get('.cvat_canvas_selected_point').should('exist'); cy.get('.cvat-canvas-container') - .trigger('mousedown', 600, 180, {button: 0, shiftKey: true}) - .trigger('mousemove', 600, 150, {shiftKey: true}) + .trigger('mousedown', 600, 180, { button: 0, shiftKey: true }) + .trigger('mousemove', 600, 150, { shiftKey: true }) .trigger('mouseup', 600, 150) - .dblclick(600, 150, {shiftKey: true}); + .dblclick(600, 150, { shiftKey: true }); cy.get('.cvat-canvas-container') // this.left .trigger('mouseenter', 400, 130) - .trigger('mousedown', 400, 130, {which: 1}) + .trigger('mousedown', 400, 130, { which: 1 }) .trigger('mousemove', 400, 100) .trigger('mouseup', 400, 100) .trigger('mouseout', 400, 100); diff --git a/tests/cypress/integration/actions_tasks/case_71_shortcuts_window.js b/tests/cypress/integration/actions_tasks/case_71_shortcuts_window.js index b96c6332a63a39e522a6d8f4291cc42e24544cef..cd0493801cb18a16966006c2e53373e08592f657 100644 --- a/tests/cypress/integration/actions_tasks/case_71_shortcuts_window.js +++ b/tests/cypress/integration/actions_tasks/case_71_shortcuts_window.js @@ -18,27 +18,37 @@ context('Shortcuts window.', () => { describe(`Testing case "${caseId}"`, () => { it('Press "F1" from a task. Shortcuts window be visible. Closing the modal window by button "OK".', () => { cy.get('body').trigger('keydown', { keyCode: keyCodeF1 }); - cy.get('.cvat-shortcuts-modal-window').should('exist').and('be.visible').within(() => { - cy.get('.cvat-shortcuts-modal-window-table').within(() => { - cy.get('tr').should('exist').then(($shortcutsTableTrCount) => { - shortcutsTableTrCount = $shortcutsTableTrCount.length; + cy.get('.cvat-shortcuts-modal-window') + .should('exist') + .and('be.visible') + .within(() => { + cy.get('.cvat-shortcuts-modal-window-table').within(() => { + cy.get('tr') + .should('exist') + .then(($shortcutsTableTrCount) => { + shortcutsTableTrCount = $shortcutsTableTrCount.length; + }); }); + cy.contains('button', 'OK').click(); }); - cy.contains('button', 'OK').click(); - }); cy.get('.cvat-shortcuts-modal-window').should('not.be.visible'); }); it('Open a job. Press "F1". Shortcuts window be visible. Closing the modal window by F1.', () => { cy.openJob(); cy.get('body').trigger('keydown', { keyCode: keyCodeF1 }); - cy.get('.cvat-shortcuts-modal-window').should('exist').and('be.visible').within(() => { - cy.get('.cvat-shortcuts-modal-window-table').within(() => { - cy.get('tr').should('exist').then(($shortcutsTableTrCount) => { - expect($shortcutsTableTrCount.length).to.be.gt(shortcutsTableTrCount); + cy.get('.cvat-shortcuts-modal-window') + .should('exist') + .and('be.visible') + .within(() => { + cy.get('.cvat-shortcuts-modal-window-table').within(() => { + cy.get('tr') + .should('exist') + .then(($shortcutsTableTrCount) => { + expect($shortcutsTableTrCount.length).to.be.gt(shortcutsTableTrCount); + }); }); }); - }); cy.get('body').trigger('keydown', { keyCode: keyCodeF1 }); cy.get('.cvat-shortcuts-modal-window').should('not.be.visible'); }); diff --git a/tests/cypress/integration/actions_tasks/case_75_overlap_size.js b/tests/cypress/integration/actions_tasks/case_75_overlap_size.js index 44cfc8f606710fecf3a153b6c4109906ad174942..8b384447bffd9a9a14f707832e7ddd1afa96fd89 100644 --- a/tests/cypress/integration/actions_tasks/case_75_overlap_size.js +++ b/tests/cypress/integration/actions_tasks/case_75_overlap_size.js @@ -33,7 +33,15 @@ context('Overlap size.', () => { cy.login(); cy.imageGenerator(imagesFolder, imageFileName, width, height, color, posX, posY, labelName, imagesCount); cy.createZipArchive(directoryToArchive, archivePath); - cy.createAnnotationTask(taskName, labelName, attrName, textDefaultValue, archiveName, false, advancedConfigurationParams); + cy.createAnnotationTask( + taskName, + labelName, + attrName, + textDefaultValue, + archiveName, + false, + advancedConfigurationParams, + ); cy.openTask(taskName); }); @@ -45,31 +53,43 @@ context('Overlap size.', () => { describe(`Testing case "${caseId}"`, () => { it('The task parameters is correct.', () => { cy.get('.cvat-task-parameters').within(() => { - cy.get('table').find('tr').last().find('td').then(($taskParameters) => { - expect(Number($taskParameters[0].innerText)).equal(calculatedOverlapSize); - expect(Number($taskParameters[1].innerText)).equal(advancedConfigurationParams.segmentSize); - }); + cy.get('table') + .find('tr') + .last() + .find('td') + .then(($taskParameters) => { + expect(Number($taskParameters[0].innerText)).equal(calculatedOverlapSize); + expect(Number($taskParameters[1].innerText)).equal(advancedConfigurationParams.segmentSize); + }); }); }); it('The range of frame values corresponds to the parameters.', () => { cy.getJobNum(0).then(($job) => { - cy.contains('a', `Job #${$job}`).parents('tr').find('.cvat-job-item-frames').then(($frameRange) => { - expect(Number($frameRange.text().split('-')[1])).equal(advancedConfigurationParams.segmentSize - 1); // expected 4 to equal 4 - }); + cy.contains('a', `Job #${$job}`) + .parents('tr') + .find('.cvat-job-item-frames') + .then(($frameRange) => { + expect(Number($frameRange.text().split('-')[1])).equal( + advancedConfigurationParams.segmentSize - 1, + ); // expected 4 to equal 4 + }); }); cy.getJobNum(1).then(($job) => { - cy.contains('a', `Job #${$job}`).parents('tr').find('.cvat-job-item-frames').then(($frameRange) => { - expect(Number($frameRange.text().split('-')[0])).equal(advancedConfigurationParams.segmentSize - 2); // expected 3 to equal 3 - }); + cy.contains('a', `Job #${$job}`) + .parents('tr') + .find('.cvat-job-item-frames') + .then(($frameRange) => { + expect(Number($frameRange.text().split('-')[0])).equal( + advancedConfigurationParams.segmentSize - 2, + ); // expected 3 to equal 3 + }); }); }); it('The range of frame values in a job corresponds to the parameters.', () => { cy.openJob(0); - cy.get('.cvat-player-frame-selector') - .find('input[role="spinbutton"]') - .should('have.value', '0'); + cy.get('.cvat-player-frame-selector').find('input[role="spinbutton"]').should('have.value', '0'); cy.get('.cvat-player-last-button').click(); cy.get('.cvat-player-frame-selector') .find('input[role="spinbutton"]') diff --git a/tests/cypress/integration/actions_tasks2/case_31_label_constructor_color_name_label.js b/tests/cypress/integration/actions_tasks2/case_31_label_constructor_color_name_label.js index 73345f04e774fdd5bd4598122e4beb158ca87a56..78314ac223f3573eb4d9739d639841e46c1a965c 100644 --- a/tests/cypress/integration/actions_tasks2/case_31_label_constructor_color_name_label.js +++ b/tests/cypress/integration/actions_tasks2/case_31_label_constructor_color_name_label.js @@ -170,7 +170,8 @@ context('Label constructor. Color label. Label name editing', () => { }); cy.get('.cvat-change-task-label-color-badge') .children() - .should('have.attr', 'style').and('contain', 'rgb(179, 179, 179)'); + .should('have.attr', 'style') + .and('contain', 'rgb(179, 179, 179)'); cy.get('.cvat-label-constructor-updater').contains('button', 'Done').click(); cy.contains('.cvat-constructor-viewer-item', `Case ${caseId}`) .should('have.attr', 'style') diff --git a/tests/cypress/integration/actions_tasks3/case_74_drag_canvas.js b/tests/cypress/integration/actions_tasks3/case_74_drag_canvas.js index bfa01c8c2ddbb894a4d14d113c3d41e3eae6585d..a02e2d56e498ba88bc1881f2fb199263b0e3cc05 100644 --- a/tests/cypress/integration/actions_tasks3/case_74_drag_canvas.js +++ b/tests/cypress/integration/actions_tasks3/case_74_drag_canvas.js @@ -17,27 +17,33 @@ context('Drag canvas.', () => { describe(`Testing case "${caseId}"`, () => { it('Drag canvas', () => { - cy.get('#cvat_canvas_background').invoke('attr', 'style').then(($style) => { - topBefore = Number($style.split(';')[0].split(' ')[1].replace('px', '')); - leftBefore = Number($style.split(';')[1].split(' ')[2].replace('px', '')); - }); + cy.get('#cvat_canvas_background') + .invoke('attr', 'style') + .then(($style) => { + topBefore = Number($style.split(';')[0].split(' ')[1].replace('px', '')); + leftBefore = Number($style.split(';')[1].split(' ')[2].replace('px', '')); + }); cy.get('.cvat-move-control').click(); // Without this action, the function is not covered - cy.get('.cvat-canvas-container').trigger('mousedown', {button: 0}).trigger('mousemove', 500, 500); + cy.get('.cvat-canvas-container').trigger('mousedown', { button: 0 }).trigger('mousemove', 500, 500); }); it('Top and left style perameters is changed.', () => { - cy.get('#cvat_canvas_background').invoke('attr', 'style').then(($style) => { - expect(topBefore).not.equal(Number($style.split(';')[0].split(' ')[1].replace('px', ''))); // expected 20 to not equal 95 - expect(leftBefore).not.equal(Number($style.split(';')[1].split(' ')[2].replace('px', ''))); // expected 73 to not equal 95 - }); + cy.get('#cvat_canvas_background') + .invoke('attr', 'style') + .then(($style) => { + expect(topBefore).not.equal(Number($style.split(';')[0].split(' ')[1].replace('px', ''))); // expected 20 to not equal 95 + expect(leftBefore).not.equal(Number($style.split(';')[1].split(' ')[2].replace('px', ''))); // expected 73 to not equal 95 + }); }); it('Duble click on canvas. Parameters returned to their original value', () => { cy.get('.cvat-canvas-container').dblclick(); - cy.get('#cvat_canvas_background').invoke('attr', 'style').then(($style) => { - expect(topBefore).equal(Number($style.split(';')[0].split(' ')[1].replace('px', ''))); // expected 20 to equal 20 - expect(leftBefore).equal(Number($style.split(';')[1].split(' ')[2].replace('px', ''))); // expected 73 to equal 73 - }); + cy.get('#cvat_canvas_background') + .invoke('attr', 'style') + .then(($style) => { + expect(topBefore).equal(Number($style.split(';')[0].split(' ')[1].replace('px', ''))); // expected 20 to equal 20 + expect(leftBefore).equal(Number($style.split(';')[1].split(' ')[2].replace('px', ''))); // expected 73 to equal 73 + }); }); }); }); diff --git a/tests/cypress/integration/actions_users/case_73_reset_password_notification.js b/tests/cypress/integration/actions_users/case_73_reset_password_notification.js index e44c7352e87fc9f946cb3d1adbe8c747d6ace39e..91006f32b0bcdea9deb8af84973f87481155d641 100644 --- a/tests/cypress/integration/actions_users/case_73_reset_password_notification.js +++ b/tests/cypress/integration/actions_users/case_73_reset_password_notification.js @@ -6,7 +6,7 @@ context('Reset password notification.', () => { const caseId = '73'; - const dummyEmail = 'admin@local.local' + const dummyEmail = 'admin@local.local'; before(() => { cy.visit('auth/login'); diff --git a/tests/cypress/support/commands.js b/tests/cypress/support/commands.js index d91cfa96b5023198ab70b12a4f0b00229ef8f7af..d2d0b926231ad1526bf58408b2494f37e828733c 100644 --- a/tests/cypress/support/commands.js +++ b/tests/cypress/support/commands.js @@ -424,7 +424,10 @@ Cypress.Commands.add('updateAttributes', (multiAttrParams) => { } if (multiAttrParams.mutable) { cy.get('.cvat-attribute-mutable-checkbox') - .find('[type="checkbox"]').should('not.be.checked').check().should('be.checked'); + .find('[type="checkbox"]') + .should('not.be.checked') + .check() + .should('be.checked'); } }); }); diff --git a/tests/cypress/support/commands_filters_feature.js b/tests/cypress/support/commands_filters_feature.js index 4258a762e176b2ed58dc627c2d1e2c54d5588a61..2b45d86f2b03611a16660c923d57f0a594582dfb 100644 --- a/tests/cypress/support/commands_filters_feature.js +++ b/tests/cypress/support/commands_filters_feature.js @@ -66,7 +66,7 @@ Cypress.Commands.add('setGroupCondition', (groupIndex, condition) => { Cypress.Commands.add( 'setFilter', - ({groupIndex, ruleIndex, field, operator, valueSource, value, label, labelAttr, submit}) => { + ({ groupIndex, ruleIndex, field, operator, valueSource, value, label, labelAttr, submit }) => { cy.сheckFiltersModalOpened(); cy.collectGroupID().then((groupIdIndex) => { cy.collectRuleID().then((ruleIdIndex) => { diff --git a/utils/cli/cli.py b/utils/cli/cli.py index ae08ad1a327926e5737bdd2486f588b96197d0c2..52f60ee1e4bca717342c1da6c938f43b4784ea5a 100755 --- a/utils/cli/cli.py +++ b/utils/cli/cli.py @@ -19,12 +19,16 @@ def config_log(level): def main(): - actions = {'create': CLI.tasks_create, - 'delete': CLI.tasks_delete, - 'ls': CLI.tasks_list, - 'frames': CLI.tasks_frame, - 'dump': CLI.tasks_dump, - 'upload': CLI.tasks_upload} + actions = { + 'create': CLI.tasks_create, + 'delete': CLI.tasks_delete, + 'ls': CLI.tasks_list, + 'frames': CLI.tasks_frame, + 'dump': CLI.tasks_dump, + 'upload': CLI.tasks_upload, + 'export': CLI.tasks_export, + 'import': CLI.tasks_import, + } args = parser.parse_args() config_log(args.loglevel) with requests.Session() as session: diff --git a/utils/cli/core/core.py b/utils/cli/core/core.py index 5bcf1c22252797ef1b370de057ca6ff44095210a..3de53809266cf45500021d8ba99ec779a359ecfc 100644 --- a/utils/cli/core/core.py +++ b/utils/cli/core/core.py @@ -213,6 +213,53 @@ class CLI(): "with annotation file {} finished".format(filename) log.info(logger_string) + def tasks_export(self, task_id, filename, export_verification_period=3, **kwargs): + """ Export and download a whole task """ + url = self.api.tasks_id(task_id) + export_url = url + '?action=export' + + while True: + response = self.session.get(export_url) + response.raise_for_status() + log.info('STATUS {}'.format(response.status_code)) + if response.status_code == 201: + break + sleep(export_verification_period) + + response = self.session.get(url + '?action=download') + response.raise_for_status() + + with open(filename, 'wb') as fp: + fp.write(response.content) + logger_string = "Task {} has been exported sucessfully. ".format(task_id) +\ + "to {}".format(os.path.abspath(filename)) + log.info(logger_string) + + def tasks_import(self, filename, import_verification_period=3, **kwargs): + """ Import a task""" + url = self.api.tasks + '?action=import' + with open(filename, 'rb') as input_file: + response = self.session.post( + url, + files={'task_file': input_file} + ) + response.raise_for_status() + response_json = response.json() + rq_id = response_json['rq_id'] + while True: + sleep(import_verification_period) + response = self.session.post( + url, + data={'rq_id': rq_id} + ) + response.raise_for_status() + if response.status_code == 201: + break + + task_id = response.json()['id'] + logger_string = "Task has been imported sucessfully. Task ID: {}".format(task_id) + log.info(logger_string) + def login(self, credentials): url = self.api.login auth = {'username': credentials[0], 'password': credentials[1]} diff --git a/utils/cli/core/definition.py b/utils/cli/core/definition.py index db44551e51385b2be406de66cda4ac3d72cef3dd..bab3cacb067a88b8666085c8ac7bc82ee35f494b 100644 --- a/utils/cli/core/definition.py +++ b/utils/cli/core/definition.py @@ -310,3 +310,36 @@ upload_parser.add_argument( default='CVAT 1.1', help='annotation format (default: %(default)s)' ) + +####################################################################### +# Export task +####################################################################### + +export_task_parser = task_subparser.add_parser( + 'export', + description='Export a CVAT task.' +) +export_task_parser.add_argument( + 'task_id', + type=int, + help='task ID' +) +export_task_parser.add_argument( + 'filename', + type=str, + help='output file' +) + +####################################################################### +# Import task +####################################################################### + +import_task_parser = task_subparser.add_parser( + 'import', + description='import a CVAT task.' +) +import_task_parser.add_argument( + 'filename', + type=str, + help='upload file' +)