未验证 提交 72fdef43 编写于 作者: A Andrey Zhavoronkov 提交者: GitHub

Az/import export tasks (#3056)

* initial version of task export/import feature

* fixed tests

* CLI

* fix comments

* updated license headers

* fix eslint issues

* fix comments

* fixed comments

* reverted changes in *.md files

* fixed comments

* fix pylint issues

* fix import for share case

* improved unit tests

* updated changelog

* fixed Maria's comments

* fixed comments

* Fixed position of create new task button

* Fixed span position

* fixed comments
Co-authored-by: NNikita Manovich <nikita.manovich@intel.com>
Co-authored-by: NBoris Sekachev <boris.sekachev@intel.com>
上级 6665fe1d
......@@ -11,6 +11,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Support of context images for 2D image tasks (<https://github.com/openvinotoolkit/cvat/pull/3122>)
- Filter `is_active` for user list (<https://github.com/openvinotoolkit/cvat/pull/3235>)
- Ability to export/import tasks (<https://github.com/openvinotoolkit/cvat/pull/3056>)
### Changed
......
......@@ -490,6 +490,59 @@
});
}
async function exportTask(id) {
const { backendAPI } = config;
const url = `${backendAPI}/tasks/${id}`;
return new Promise((resolve, reject) => {
async function request() {
try {
const response = await Axios.get(`${url}?action=export`, {
proxy: config.proxy,
});
if (response.status === 202) {
setTimeout(request, 3000);
} else {
resolve(`${url}?action=download`);
}
} catch (errorData) {
reject(generateError(errorData));
}
}
setTimeout(request);
});
}
async function importTask(file) {
const { backendAPI } = config;
let taskData = new FormData();
taskData.append('task_file', file);
return new Promise((resolve, reject) => {
async function request() {
try {
const response = await Axios.post(`${backendAPI}/tasks?action=import`, taskData, {
proxy: config.proxy,
});
if (response.status === 202) {
taskData = new FormData();
taskData.append('rq_id', response.data.rq_id);
setTimeout(request, 3000);
} else {
const importedTask = await getTasks(`?id=${response.data.id}`);
resolve(importedTask[0]);
}
} catch (errorData) {
reject(generateError(errorData));
}
}
setTimeout(request);
});
}
async function createTask(taskSpec, taskDataSpec, onUpdate) {
const { backendAPI } = config;
......@@ -1157,6 +1210,8 @@
createTask,
deleteTask,
exportDataset,
exportTask,
importTask,
}),
writable: false,
},
......
......@@ -1664,6 +1664,36 @@
const result = await PluginRegistry.apiWrapper.call(this, Task.prototype.delete);
return result;
}
/**
* Method makes a backup of a task
* @method export
* @memberof module:API.cvat.classes.Task
* @readonly
* @instance
* @async
* @throws {module:API.cvat.exceptions.ServerError}
* @throws {module:API.cvat.exceptions.PluginError}
*/
async export() {
const result = await PluginRegistry.apiWrapper.call(this, Task.prototype.export);
return result;
}
/**
* Method imports a task from a backup
* @method import
* @memberof module:API.cvat.classes.Task
* @readonly
* @instance
* @async
* @throws {module:API.cvat.exceptions.ServerError}
* @throws {module:API.cvat.exceptions.PluginError}
*/
static async import(file) {
const result = await PluginRegistry.apiWrapper.call(this, Task.import, file);
return result;
}
}
module.exports = {
......@@ -2073,6 +2103,16 @@
return result;
};
Task.prototype.export.implementation = async function () {
const result = await serverProxy.tasks.exportTask(this.id);
return result;
};
Task.import.implementation = async function (file) {
const result = await serverProxy.tasks.importTask(file);
return result;
};
Task.prototype.frames.get.implementation = async function (frame, isPlaying, step) {
if (!Number.isInteger(frame) || frame < 0) {
throw new ArgumentError(`Frame must be a positive integer. Got: "${frame}"`);
......
......@@ -53735,9 +53735,9 @@
}
},
"rc-menu": {
"version": "8.10.6",
"resolved": "https://registry.npmjs.org/rc-menu/-/rc-menu-8.10.6.tgz",
"integrity": "sha512-RVkd8XChwSmVOdNULbqLNnABthRZWnhqct1Q74onEXTClsXvsLADMhlIJtw/umglVSECM+14TJdIli9rl2Bzlw==",
"version": "8.10.7",
"resolved": "https://registry.npmjs.org/rc-menu/-/rc-menu-8.10.7.tgz",
"integrity": "sha512-m/ypV7OjkkUsMdutzMUxEI8tWyi0Y1TQ5YkSDk7k2uv2aCKkHYEoDKsDAfcPeejo3HMo2z5unWE+jD+dCphraw==",
"requires": {
"@babel/runtime": "^7.10.1",
"classnames": "2.x",
......@@ -53758,9 +53758,9 @@
}
},
"rc-util": {
"version": "5.9.4",
"resolved": "https://registry.npmjs.org/rc-util/-/rc-util-5.9.4.tgz",
"integrity": "sha512-pzFmYZsKLJ1p+Uv4NqA4aNBaFh8/hOQxOOxA5G4TiyPboa0o/PjminxUCKvoSwVJVW5YgleSM2XPCTpTV6DCsQ==",
"version": "5.9.8",
"resolved": "https://registry.npmjs.org/rc-util/-/rc-util-5.9.8.tgz",
"integrity": "sha512-typLSHYGf5irvGLYQshs0Ra3aze086h0FhzsAkyirMunYZ7b3Te8gKa5PVaanoHaZa9sS6qx98BxgysoRP+6Tw==",
"requires": {
"@babel/runtime": "^7.12.5",
"react-is": "^16.12.0",
......@@ -73,6 +73,7 @@
"mousetrap": "^1.6.5",
"platform": "^1.3.6",
"prop-types": "^15.7.2",
"rc-menu": "^8.10.7",
"react": "^16.14.0",
"react-awesome-query-builder": "^3.0.0",
"react-color": "^2.19.3",
......
......@@ -35,6 +35,12 @@ export enum TasksActionTypes {
UPDATE_TASK_SUCCESS = 'UPDATE_TASK_SUCCESS',
UPDATE_TASK_FAILED = 'UPDATE_TASK_FAILED',
HIDE_EMPTY_TASKS = 'HIDE_EMPTY_TASKS',
EXPORT_TASK = 'EXPORT_TASK',
EXPORT_TASK_SUCCESS = 'EXPORT_TASK_SUCCESS',
EXPORT_TASK_FAILED = 'EXPORT_TASK_FAILED',
IMPORT_TASK = 'IMPORT_TASK',
IMPORT_TASK_SUCCESS = 'IMPORT_TASK_SUCCESS',
IMPORT_TASK_FAILED = 'IMPORT_TASK_FAILED',
SWITCH_MOVE_TASK_MODAL_VISIBLE = 'SWITCH_MOVE_TASK_MODAL_VISIBLE',
}
......@@ -214,6 +220,49 @@ export function loadAnnotationsAsync(
};
}
function importTask(): AnyAction {
const action = {
type: TasksActionTypes.IMPORT_TASK,
payload: {},
};
return action;
}
function importTaskSuccess(task: any): AnyAction {
const action = {
type: TasksActionTypes.IMPORT_TASK_SUCCESS,
payload: {
task,
},
};
return action;
}
function importTaskFailed(error: any): AnyAction {
const action = {
type: TasksActionTypes.IMPORT_TASK_FAILED,
payload: {
error,
},
};
return action;
}
export function importTaskAsync(file: File): ThunkAction<Promise<void>, {}, {}, AnyAction> {
return async (dispatch: ActionCreator<Dispatch>): Promise<void> => {
try {
dispatch(importTask());
const taskInstance = await cvat.classes.Task.import(file);
dispatch(importTaskSuccess(taskInstance));
} catch (error) {
dispatch(importTaskFailed(error));
}
};
}
function exportDataset(task: any, exporter: any): AnyAction {
const action = {
type: TasksActionTypes.EXPORT_DATASET,
......@@ -268,6 +317,56 @@ export function exportDatasetAsync(task: any, exporter: any): ThunkAction<Promis
};
}
function exportTask(taskID: number): AnyAction {
const action = {
type: TasksActionTypes.EXPORT_TASK,
payload: {
taskID,
},
};
return action;
}
function exportTaskSuccess(taskID: number): AnyAction {
const action = {
type: TasksActionTypes.EXPORT_TASK_SUCCESS,
payload: {
taskID,
},
};
return action;
}
function exportTaskFailed(taskID: number, error: Error): AnyAction {
const action = {
type: TasksActionTypes.EXPORT_TASK_FAILED,
payload: {
taskID,
error,
},
};
return action;
}
export function exportTaskAsync(taskInstance: any): ThunkAction<Promise<void>, {}, {}, AnyAction> {
return async (dispatch: ActionCreator<Dispatch>): Promise<void> => {
dispatch(exportTask(taskInstance.id));
try {
const url = await taskInstance.export();
const downloadAnchor = window.document.getElementById('downloadAnchor') as HTMLAnchorElement;
downloadAnchor.href = url;
downloadAnchor.click();
dispatch(exportTaskSuccess(taskInstance.id));
} catch (error) {
dispatch(exportTaskFailed(taskInstance.id, error));
}
};
}
function deleteTask(taskID: number): AnyAction {
const action = {
type: TasksActionTypes.DELETE_TASK,
......
......@@ -6,6 +6,7 @@ import './styles.scss';
import React from 'react';
import Menu from 'antd/lib/menu';
import Modal from 'antd/lib/modal';
import { LoadingOutlined } from '@ant-design/icons';
// eslint-disable-next-line import/no-extraneous-dependencies
import { MenuInfo } from 'rc-menu/lib/interface';
import DumpSubmenu from './dump-submenu';
......@@ -25,6 +26,7 @@ interface Props {
inferenceIsActive: boolean;
taskDimension: DimensionType;
onClickMenu: (params: MenuInfo, file?: File) => void;
exportIsActive: boolean;
}
export enum Actions {
......@@ -35,6 +37,7 @@ export enum Actions {
RUN_AUTO_ANNOTATION = 'run_auto_annotation',
MOVE_TASK_TO_PROJECT = 'move_task_to_project',
OPEN_BUG_TRACKER = 'open_bug_tracker',
EXPORT_TASK = 'export_task',
}
export default function ActionsMenuComponent(props: Props): JSX.Element {
......@@ -50,6 +53,7 @@ export default function ActionsMenuComponent(props: Props): JSX.Element {
exportActivities,
loadActivity,
taskDimension,
exportIsActive,
} = props;
let latestParams: MenuInfo | null = null;
......@@ -128,6 +132,10 @@ export default function ActionsMenuComponent(props: Props): JSX.Element {
<Menu.Item disabled={inferenceIsActive} key={Actions.RUN_AUTO_ANNOTATION}>
Automatic annotation
</Menu.Item>
<Menu.Item key={Actions.EXPORT_TASK} disabled={exportIsActive}>
{exportIsActive && <LoadingOutlined id='cvat-export-task-loading' />}
Export Task
</Menu.Item>
<hr />
<Menu.Item key={Actions.MOVE_TASK_TO_PROJECT}>Move to project</Menu.Item>
<Menu.Item key={Actions.DELETE_TASK}>Delete</Menu.Item>
......
......@@ -48,3 +48,7 @@
.cvat-menu-icon {
transform: scale(0.5);
}
#cvat-export-task-loading {
margin-left: 10;
}
......@@ -95,7 +95,12 @@ function ShortcutsDialog(props: StateToProps & DispatchToProps): JSX.Element | n
zIndex={1001} /* default antd is 1000 */
className='cvat-shortcuts-modal-window'
>
<Table dataSource={dataSource} columns={columns} size='small' className='cvat-shortcuts-modal-window-table' />
<Table
dataSource={dataSource}
columns={columns}
size='small'
className='cvat-shortcuts-modal-window-table'
/>
</Modal>
);
}
......
......@@ -11,6 +11,23 @@
height: 100%;
width: 100%;
.cvat-tasks-page-top-bar {
> div:nth-child(1) {
> div:nth-child(1) {
width: 100%;
> div:nth-child(1) {
display: flex;
> span:nth-child(2) {
width: 200px;
margin-left: 10px;
}
}
}
}
}
> div:nth-child(2) {
height: 83%;
padding-top: 10px;
......@@ -19,22 +36,6 @@
> div:nth-child(3) {
padding-top: 10px;
}
> div:nth-child(1) {
> div:nth-child(1) {
display: flex;
> span:nth-child(2) {
width: 200px;
margin-left: 10px;
}
}
> div:nth-child(2) {
display: flex;
justify-content: flex-end;
}
}
}
/* empty-tasks icon */
......@@ -157,3 +158,11 @@
#cvat-create-task-button {
padding: 0 30px;
}
#cvat-import-task-button {
padding: 0 30px;
}
#cvat-import-task-button-loading {
margin-left: 10;
}
// Copyright (C) 2020 Intel Corporation
// Copyright (C) 2020-2021 Intel Corporation
//
// SPDX-License-Identifier: MIT
......@@ -25,6 +25,8 @@ interface TasksPageProps {
numberOfHiddenTasks: number;
onGetTasks: (gettingQuery: TasksQuery) => void;
hideEmptyTasks: (hideEmpty: boolean) => void;
onImportTask: (file: File) => void;
taskImporting: boolean;
}
function getSearchField(gettingQuery: TasksQuery): string {
......@@ -81,9 +83,20 @@ class TasksPageComponent extends React.PureComponent<TasksPageProps & RouteCompo
}
public componentDidUpdate(prevProps: TasksPageProps & RouteComponentProps): void {
const { location, gettingQuery, tasksFetching, numberOfHiddenTasks, onGetTasks, hideEmptyTasks } = this.props;
if (prevProps.location.search !== location.search) {
const {
location,
gettingQuery,
tasksFetching,
numberOfHiddenTasks,
onGetTasks,
hideEmptyTasks,
taskImporting,
} = this.props;
if (
prevProps.location.search !== location.search ||
(prevProps.taskImporting === true && taskImporting === false)
) {
// get new tasks if any query changes
const query = updateQuery(gettingQuery, location.search);
message.destroy();
......@@ -186,7 +199,9 @@ class TasksPageComponent extends React.PureComponent<TasksPageProps & RouteCompo
}
public render(): JSX.Element {
const { tasksFetching, gettingQuery, numberOfVisibleTasks } = this.props;
const {
tasksFetching, gettingQuery, numberOfVisibleTasks, onImportTask, taskImporting,
} = this.props;
if (tasksFetching) {
return <Spin size='large' className='cvat-spinner' />;
......@@ -194,7 +209,12 @@ class TasksPageComponent extends React.PureComponent<TasksPageProps & RouteCompo
return (
<div className='cvat-tasks-page'>
<TopBar onSearch={this.handleSearch} searchValue={getSearchField(gettingQuery)} />
<TopBar
onSearch={this.handleSearch}
searchValue={getSearchField(gettingQuery)}
onFileUpload={onImportTask}
taskImporting={taskImporting}
/>
{numberOfVisibleTasks ? (
<TaskListContainer onSwitchPage={this.handlePagination} />
) : (
......
......@@ -5,50 +5,83 @@
import React from 'react';
import { useHistory } from 'react-router';
import { Row, Col } from 'antd/lib/grid';
import { PlusOutlined } from '@ant-design/icons';
import { PlusOutlined, UploadOutlined, LoadingOutlined } from '@ant-design/icons';
import Button from 'antd/lib/button';
import Input from 'antd/lib/input';
import Text from 'antd/lib/typography/Text';
import Upload from 'antd/lib/upload';
import SearchTooltip from 'components/search-tooltip/search-tooltip';
interface VisibleTopBarProps {
onSearch: (value: string) => void;
onFileUpload(file: File): void;
searchValue: string;
taskImporting: boolean;
}
export default function TopBarComponent(props: VisibleTopBarProps): JSX.Element {
const { searchValue, onSearch } = props;
const {
searchValue, onSearch, onFileUpload, taskImporting,
} = props;
const history = useHistory();
return (
<>
<Row justify='center' align='middle'>
<Col md={11} lg={9} xl={8} xxl={7}>
<Text className='cvat-title'>Tasks</Text>
<SearchTooltip instance='task'>
<Input.Search
className='cvat-task-page-search-task'
defaultValue={searchValue}
onSearch={onSearch}
size='large'
placeholder='Search'
/>
</SearchTooltip>
</Col>
<Col md={{ span: 11 }} lg={{ span: 9 }} xl={{ span: 8 }} xxl={{ span: 7 }}>
<Button
size='large'
id='cvat-create-task-button'
type='primary'
onClick={(): void => history.push('/tasks/create')}
icon={<PlusOutlined />}
>
Create new task
</Button>
</Col>
</Row>
</>
<Row className='cvat-tasks-page-top-bar' justify='center' align='middle'>
<Col md={22} lg={18} xl={16} xxl={14}>
<Row justify='space-between' align='bottom'>
<Col>
<Text className='cvat-title'>Tasks</Text>
<SearchTooltip instance='task'>
<Input.Search
className='cvat-task-page-search-task'
defaultValue={searchValue}
onSearch={onSearch}
size='large'
placeholder='Search'
/>
</SearchTooltip>
</Col>
<Col>
<Row gutter={8}>
<Col>
<Upload
accept='.zip'
multiple={false}
showUploadList={false}
beforeUpload={(file: File): boolean => {
onFileUpload(file);
return false;
}}
>
<Button
size='large'
id='cvat-import-task-button'
type='primary'
disabled={taskImporting}
icon={<UploadOutlined />}
>
Import Task
{taskImporting && <LoadingOutlined id='cvat-import-task-button-loading' />}
</Button>
</Upload>
</Col>
<Col>
<Button
size='large'
id='cvat-create-task-button'
type='primary'
onClick={(): void => history.push('/tasks/create')}
icon={<PlusOutlined />}
>
Create new task
</Button>
</Col>
</Row>
</Col>
</Row>
</Col>
</Row>
);
}
......@@ -16,6 +16,7 @@ import {
loadAnnotationsAsync,
exportDatasetAsync,
deleteTaskAsync,
exportTaskAsync,
switchMoveTaskModalVisible,
} from 'actions/tasks-actions';
......@@ -29,6 +30,7 @@ interface StateToProps {
dumpActivities: string[] | null;
exportActivities: string[] | null;
inferenceIsActive: boolean;
exportIsActive: boolean;
}
interface DispatchToProps {
......@@ -37,6 +39,7 @@ interface DispatchToProps {
exportDataset: (taskInstance: any, exporter: any) => void;
deleteTask: (taskInstance: any) => void;
openRunModelWindow: (taskInstance: any) => void;
exportTask: (taskInstance: any) => void;
openMoveTaskToProjectWindow: (taskInstance: any) => void;
}
......@@ -48,7 +51,9 @@ function mapStateToProps(state: CombinedState, own: OwnProps): StateToProps {
const {
formats: { annotationFormats },
tasks: {
activities: { dumps, loads, exports: activeExports },
activities: {
dumps, loads, exports: activeExports, backups,
},
},
} = state;
......@@ -58,6 +63,7 @@ function mapStateToProps(state: CombinedState, own: OwnProps): StateToProps {
loadActivity: tid in loads ? loads[tid] : null,
annotationFormats,
inferenceIsActive: tid in state.models.inferences,
exportIsActive: tid in backups,
};
}
......@@ -78,6 +84,9 @@ function mapDispatchToProps(dispatch: any): DispatchToProps {
openRunModelWindow: (taskInstance: any): void => {
dispatch(modelsActions.showRunModelDialog(taskInstance));
},
exportTask: (taskInstance: any): void => {
dispatch(exportTaskAsync(taskInstance));
},
openMoveTaskToProjectWindow: (taskId: number): void => {
dispatch(switchMoveTaskModalVisible(true, taskId));
},
......@@ -92,12 +101,14 @@ function ActionsMenuContainer(props: OwnProps & StateToProps & DispatchToProps):
dumpActivities,
exportActivities,
inferenceIsActive,
exportIsActive,
loadAnnotations,
dumpAnnotations,
exportDataset,
deleteTask,
openRunModelWindow,
exportTask,
openMoveTaskToProjectWindow,
} = props;
......@@ -131,6 +142,8 @@ function ActionsMenuContainer(props: OwnProps & StateToProps & DispatchToProps):
window.open(`${taskInstance.bugTracker}`, '_blank');
} else if (action === Actions.RUN_AUTO_ANNOTATION) {
openRunModelWindow(taskInstance);
} else if (action === Actions.EXPORT_TASK) {
exportTask(taskInstance);
} else if (action === Actions.MOVE_TASK_TO_PROJECT) {
openMoveTaskToProjectWindow(taskInstance.id);
}
......@@ -150,6 +163,7 @@ function ActionsMenuContainer(props: OwnProps & StateToProps & DispatchToProps):
inferenceIsActive={inferenceIsActive}
onClickMenu={onClickMenu}
taskDimension={taskInstance.dimension}
exportIsActive={exportIsActive}
/>
);
}
......
......@@ -8,7 +8,7 @@ import { Task, TasksQuery, CombinedState } from 'reducers/interfaces';
import TasksPageComponent from 'components/tasks-page/tasks-page';
import { getTasksAsync, hideEmptyTasks } from 'actions/tasks-actions';
import { getTasksAsync, hideEmptyTasks, importTaskAsync } from 'actions/tasks-actions';
interface StateToProps {
tasksFetching: boolean;
......@@ -16,11 +16,13 @@ interface StateToProps {
numberOfTasks: number;
numberOfVisibleTasks: number;
numberOfHiddenTasks: number;
taskImporting: boolean;
}
interface DispatchToProps {
onGetTasks: (gettingQuery: TasksQuery) => void;
hideEmptyTasks: (hideEmpty: boolean) => void;
onImportTask: (file: File) => void;
}
function mapStateToProps(state: CombinedState): StateToProps {
......@@ -34,6 +36,7 @@ function mapStateToProps(state: CombinedState): StateToProps {
numberOfHiddenTasks: tasks.hideEmpty ?
tasks.current.filter((task: Task): boolean => !task.instance.jobs.length).length :
0,
taskImporting: state.tasks.importing,
};
}
......@@ -45,6 +48,9 @@ function mapDispatchToProps(dispatch: any): DispatchToProps {
hideEmptyTasks: (hideEmpty: boolean): void => {
dispatch(hideEmptyTasks(hideEmpty));
},
onImportTask: (file: File): void => {
dispatch(importTaskAsync(file));
},
};
}
......
......@@ -73,6 +73,7 @@ export interface Task {
}
export interface TasksState {
importing: boolean;
initialized: boolean;
fetching: boolean;
updating: boolean;
......@@ -105,6 +106,9 @@ export interface TasksState {
status: string;
error: string;
};
backups: {
[tid: number]: boolean;
};
};
}
......@@ -249,9 +253,11 @@ export interface NotificationsState {
updating: null | ErrorState;
dumping: null | ErrorState;
loading: null | ErrorState;
exporting: null | ErrorState;
exportingAsDataset: null | ErrorState;
deleting: null | ErrorState;
creating: null | ErrorState;
exporting: null | ErrorState;
importing: null | ErrorState;
moving: null | ErrorState;
};
formats: {
......@@ -318,6 +324,7 @@ export interface NotificationsState {
messages: {
tasks: {
loadingDone: string;
importingDone: string;
movingDone: string;
};
models: {
......
......@@ -42,9 +42,11 @@ const defaultState: NotificationsState = {
updating: null,
dumping: null,
loading: null,
exporting: null,
exportingAsDataset: null,
deleting: null,
creating: null,
exporting: null,
importing: null,
moving: null,
},
formats: {
......@@ -111,6 +113,7 @@ const defaultState: NotificationsState = {
messages: {
tasks: {
loadingDone: '',
importingDone: '',
movingDone: '',
},
models: {
......@@ -313,7 +316,7 @@ export default function (state = defaultState, action: AnyAction): Notifications
...state.errors,
tasks: {
...state.errors.tasks,
exporting: {
exportingAsDataset: {
message:
'Could not export dataset for the ' +
`<a href="/tasks/${taskID}" target="_blank">task ${taskID}</a>`,
......@@ -389,24 +392,6 @@ export default function (state = defaultState, action: AnyAction): Notifications
},
};
}
case TasksActionTypes.MOVE_TASK_TO_PROJECT_FAILED: {
const taskID = action.payload.task.id;
return {
...state,
errors: {
...state.errors,
tasks: {
...state.errors.tasks,
moving: {
message:
'Could not move the' +
`<a href="/tasks/${taskID}" target="_blank">task ${taskID}</a> to a project`,
reason: action.payload.error.toString(),
},
},
},
};
}
case TasksActionTypes.DUMP_ANNOTATIONS_FAILED: {
const taskID = action.payload.task.id;
return {
......@@ -460,16 +445,45 @@ export default function (state = defaultState, action: AnyAction): Notifications
},
};
}
case TasksActionTypes.MOVE_TASK_TO_PROJECT_SUCCESS: {
const { id: taskId, projectId } = action.payload.task;
case TasksActionTypes.EXPORT_TASK_FAILED: {
return {
...state,
errors: {
...state.errors,
tasks: {
...state.errors.tasks,
exporting: {
message: 'Could not export the task',
reason: action.payload.error.toString(),
},
},
},
};
}
case TasksActionTypes.IMPORT_TASK_FAILED: {
return {
...state,
errors: {
...state.errors,
tasks: {
...state.errors.tasks,
importing: {
message: 'Could not import the task',
reason: action.payload.error.toString(),
},
},
},
};
}
case TasksActionTypes.IMPORT_TASK_SUCCESS: {
const taskID = action.payload.task.id;
return {
...state,
messages: {
...state.messages,
tasks: {
...state.messages.tasks,
movingDone: `The task #${taskId} has been successfully moved to the project #${projectId}`,
importingDone: `Task has been imported succesfully <a href="/tasks/${taskID}">Open task</a>`,
},
},
};
......
......@@ -3,6 +3,7 @@
// SPDX-License-Identifier: MIT
import { AnyAction } from 'redux';
import { omit } from 'lodash';
import { BoundariesActionTypes } from 'actions/boundaries-actions';
import { TasksActionTypes } from 'actions/tasks-actions';
import { AuthActionTypes } from 'actions/auth-actions';
......@@ -40,7 +41,9 @@ const defaultState: TasksState = {
status: '',
error: '',
},
backups: {},
},
importing: false,
};
export default (state: TasksState = defaultState, action: AnyAction): TasksState => {
......@@ -242,6 +245,49 @@ export default (state: TasksState = defaultState, action: AnyAction): TasksState
},
};
}
case TasksActionTypes.EXPORT_TASK: {
const { taskID } = action.payload;
const { backups } = state.activities;
return {
...state,
activities: {
...state.activities,
backups: {
...backups,
...Object.fromEntries([[taskID, true]]),
},
},
};
}
case TasksActionTypes.EXPORT_TASK_FAILED:
case TasksActionTypes.EXPORT_TASK_SUCCESS: {
const { taskID } = action.payload;
const { backups } = state.activities;
delete backups[taskID];
return {
...state,
activities: {
...state.activities,
backups: omit(backups, [taskID]),
},
};
}
case TasksActionTypes.IMPORT_TASK: {
return {
...state,
importing: true,
};
}
case TasksActionTypes.IMPORT_TASK_FAILED:
case TasksActionTypes.IMPORT_TASK_SUCCESS: {
return {
...state,
importing: false,
};
}
case TasksActionTypes.CREATE_TASK: {
return {
...state,
......
......@@ -8,18 +8,18 @@ import tempfile
from datetime import timedelta
import django_rq
from datumaro.cli.util import make_file_name
from datumaro.util import to_snake_case
from django.utils import timezone
import cvat.apps.dataset_manager.task as task
from cvat.apps.engine.backup import TaskExporter
from cvat.apps.engine.log import slogger
from cvat.apps.engine.models import Task
from datumaro.cli.util import make_file_name
from datumaro.util import to_snake_case
from .formats.registry import EXPORT_FORMATS, IMPORT_FORMATS
from .util import current_function_name
_MODULE_NAME = __package__ + '.' + osp.splitext(osp.basename(__file__))[0]
def log_exception(logger=None, exc_info=True):
if logger is None:
......@@ -97,6 +97,40 @@ def clear_export_cache(task_id, file_path, file_ctime):
log_exception(slogger.task[task_id])
raise
def backup_task(task_id, output_path):
try:
db_task = Task.objects.get(pk=task_id)
cache_dir = get_export_cache_dir(db_task)
output_path = osp.join(cache_dir, output_path)
task_time = timezone.localtime(db_task.updated_date).timestamp()
if not (osp.exists(output_path) and \
task_time <= osp.getmtime(output_path)):
os.makedirs(cache_dir, exist_ok=True)
with tempfile.TemporaryDirectory(dir=cache_dir) as temp_dir:
temp_file = osp.join(temp_dir, 'dump')
task_exporter = TaskExporter(task_id)
task_exporter.export_to(temp_file)
os.replace(temp_file, output_path)
archive_ctime = osp.getctime(output_path)
scheduler = django_rq.get_scheduler()
cleaning_job = scheduler.enqueue_in(time_delta=CACHE_TTL,
func=clear_export_cache,
task_id=task_id,
file_path=output_path, file_ctime=archive_ctime)
slogger.task[task_id].info(
"The task '{}' is backuped at '{}' "
"and available for downloading for the next {}. "
"Export cache cleaning job is enqueued, id '{}'".format(
db_task.name, output_path, CACHE_TTL,
cleaning_job.id))
return output_path
except Exception:
log_exception(slogger.task[task_id])
raise
def get_export_formats():
return list(EXPORT_FORMATS.values())
......@@ -108,4 +142,4 @@ def get_all_formats():
return {
'importers': get_import_formats(),
'exporters': get_export_formats(),
}
\ No newline at end of file
}
## Task and Project Import/Export functionality
This document describes the high-level design for implementing import / export implementation for tasks and projects.
API endpoints:
- Import task
- endpoint: `/api/v1/tasks?action=import​`
- method: `POST`
- Content-Type: `multipart/form-data​`
- returns: json
- Export task
- endpoint: `/api/v1/tasks/{id}?action=export​`
- method: `GET`
- returns: zip archive
The zip archive has the following structure:
```
.
├── data
│   ├── {user uploaded data}
│   ├── manifest.jsonl
├── task.json
└── annotations.json
```
- Import project
- endpoint: `/api/v1/projects?action=import​`
- method: `POST`
- Content-Type: `multipart/form-data​`
- returns: json
- Export project
- endpoint: `/api/v1/projects/<id>?action=export​`
- method: `GET`
- returns: zip archive
The zip archive has the following structure:
```
.
├── tasks
│   ├── task_1
│   ├── task_2
│   ├── ...
│ └── task_N
└── project.json
```
此差异已折叠。
......@@ -48,11 +48,12 @@ def files_to_ignore(directory):
return False
class IMediaReader(ABC):
def __init__(self, source_path, step, start, stop):
def __init__(self, source_path, step, start, stop, dimension):
self._source_path = sorted(source_path)
self._step = step
self._start = start
self._stop = stop
self._dimension = dimension
@abstractmethod
def __iter__(self):
......@@ -89,7 +90,7 @@ class IMediaReader(ABC):
return range(self._start, self._stop, self._step)
class ImageListReader(IMediaReader):
def __init__(self, source_path, step=1, start=0, stop=None):
def __init__(self, source_path, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D):
if not source_path:
raise Exception('No image found')
......@@ -105,6 +106,7 @@ class ImageListReader(IMediaReader):
step=step,
start=start,
stop=stop,
dimension=dimension
)
def __iter__(self):
......@@ -113,7 +115,14 @@ class ImageListReader(IMediaReader):
def filter(self, callback):
source_path = list(filter(callback, self._source_path))
ImageListReader.__init__(self, source_path, step=self._step, start=self._start, stop=self._stop)
ImageListReader.__init__(
self,
source_path,
step=self._step,
start=self._start,
stop=self._stop,
dimension=self._dimension
)
def get_path(self, i):
return self._source_path[i]
......@@ -125,19 +134,36 @@ class ImageListReader(IMediaReader):
return (pos - self._start + 1) / (self._stop - self._start)
def get_preview(self):
fp = open(self._source_path[0], "rb")
if self._dimension == DimensionType.DIM_3D:
fp = open(os.path.join(os.path.dirname(__file__), 'assets/3d_preview.jpeg'), "rb")
else:
fp = open(self._source_path[0], "rb")
return self._get_preview(fp)
def get_image_size(self, i):
if self._dimension == DimensionType.DIM_3D:
with open(self.get_path(i), 'rb') as f:
properties = ValidateDimension.get_pcd_properties(f)
return int(properties["WIDTH"]), int(properties["HEIGHT"])
img = Image.open(self._source_path[i])
return img.width, img.height
def reconcile(self, source_files, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D):
# FIXME
ImageListReader.__init__(self,
source_path=source_files,
step=step,
start=start,
stop=stop
)
self._dimension = dimension
@property
def absolute_source_paths(self):
return [self.get_path(idx) for idx, _ in enumerate(self._source_path)]
class DirectoryReader(ImageListReader):
def __init__(self, source_path, step=1, start=0, stop=None):
def __init__(self, source_path, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D):
image_paths = []
for source in source_path:
for root, _, files in os.walk(source):
......@@ -149,10 +175,11 @@ class DirectoryReader(ImageListReader):
step=step,
start=start,
stop=stop,
dimension=dimension,
)
class ArchiveReader(DirectoryReader):
def __init__(self, source_path, step=1, start=0, stop=None):
def __init__(self, source_path, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D):
self._archive_source = source_path[0]
extract_dir = source_path[1] if len(source_path) > 1 else os.path.dirname(source_path[0])
Archive(self._archive_source).extractall(extract_dir)
......@@ -163,10 +190,11 @@ class ArchiveReader(DirectoryReader):
step=step,
start=start,
stop=stop,
dimension=dimension
)
class PdfReader(ImageListReader):
def __init__(self, source_path, step=1, start=0, stop=None):
def __init__(self, source_path, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D):
if not source_path:
raise Exception('No PDF found')
......@@ -194,21 +222,22 @@ class PdfReader(ImageListReader):
step=step,
start=start,
stop=stop,
dimension=dimension,
)
class ZipReader(ImageListReader):
def __init__(self, source_path, step=1, start=0, stop=None):
self._dimension = DimensionType.DIM_2D
self._zip_source = zipfile.ZipFile(source_path[0], mode='a')
def __init__(self, source_path, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D):
self._zip_source = zipfile.ZipFile(source_path[0], mode='r')
self.extract_dir = source_path[1] if len(source_path) > 1 else None
file_list = [f for f in self._zip_source.namelist() if files_to_ignore(f) and get_mime(f) == 'image']
super().__init__(file_list, step=step, start=start, stop=stop)
super().__init__(file_list, step=step, start=start, stop=stop, dimension=dimension)
def __del__(self):
self._zip_source.close()
def get_preview(self):
if self._dimension == DimensionType.DIM_3D:
# TODO
fp = open(os.path.join(os.path.dirname(__file__), 'assets/3d_preview.jpeg'), "rb")
return self._get_preview(fp)
io_image = io.BytesIO(self._zip_source.read(self._source_path[0]))
......@@ -216,32 +245,20 @@ class ZipReader(ImageListReader):
def get_image_size(self, i):
if self._dimension == DimensionType.DIM_3D:
with self._zip_source.open(self._source_path[i], "r") as file:
properties = ValidateDimension.get_pcd_properties(file)
with open(self.get_path(i), 'rb') as f:
properties = ValidateDimension.get_pcd_properties(f)
return int(properties["WIDTH"]), int(properties["HEIGHT"])
img = Image.open(io.BytesIO(self._zip_source.read(self._source_path[i])))
return img.width, img.height
def get_image(self, i):
if self._dimension == DimensionType.DIM_3D:
return self.get_path(i)
return io.BytesIO(self._zip_source.read(self._source_path[i]))
def add_files(self, source_path):
root_path = os.path.split(self._zip_source.filename)[0]
for path in source_path:
self._zip_source.write(path, path.replace(root_path, ""))
def get_zip_filename(self):
return self._zip_source.filename
def reconcile(self, source_files, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D):
self._dimension = dimension
super().__init__(
source_path=source_files,
step=step,
start=start,
stop=stop
)
def get_path(self, i):
if self._zip_source.filename:
return os.path.join(os.path.dirname(self._zip_source.filename), self._source_path[i]) \
......@@ -249,18 +266,28 @@ class ZipReader(ImageListReader):
else: # necessary for mime_type definition
return self._source_path[i]
def reconcile(self, source_files, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D):
super().reconcile(
source_files=source_files,
step=step,
start=start,
stop=stop,
dimension=dimension,
)
def extract(self):
self._zip_source.extractall(self.extract_dir if self.extract_dir else os.path.dirname(self._zip_source.filename))
if not self.extract_dir:
os.remove(self._zip_source.filename)
class VideoReader(IMediaReader):
def __init__(self, source_path, step=1, start=0, stop=None):
def __init__(self, source_path, step=1, start=0, stop=None, dimension=DimensionType.DIM_2D):
super().__init__(
source_path=source_path,
step=step,
start=start,
stop=stop + 1 if stop is not None else stop,
dimension=dimension,
)
def _has_frame(self, i):
......@@ -743,15 +770,15 @@ class ValidateDimension:
pcd_files = {}
for file in files:
file_name, file_extension = file.rsplit('.', maxsplit=1)
file_name, file_extension = os.path.splitext(file)
file_path = os.path.abspath(os.path.join(root, file))
if file_extension == "bin":
if file_extension == ".bin":
path = self.bin_operation(file_path, actual_path)
pcd_files[file_name] = path
self.related_files[path] = []
elif file_extension == "pcd":
elif file_extension == ".pcd":
path = ValidateDimension.pcd_operation(file_path, actual_path)
if path == file_path:
self.image_files[file_name] = file_path
......@@ -759,7 +786,8 @@ class ValidateDimension:
pcd_files[file_name] = path
self.related_files[path] = []
else:
self.image_files[file_name] = file_path
if _is_image(file_path):
self.image_files[file_name] = file_path
return pcd_files
def validate(self):
......
# Copyright (C) 2019 Intel Corporation
# Copyright (C) 2019-2021 Intel Corporation
#
# SPDX-License-Identifier: MIT
......@@ -278,7 +278,7 @@ class DataSerializer(serializers.ModelSerializer):
model = models.Data
fields = ('chunk_size', 'size', 'image_quality', 'start_frame', 'stop_frame', 'frame_filter',
'compressed_chunk_type', 'original_chunk_type', 'client_files', 'server_files', 'remote_files', 'use_zip_chunks',
'use_cache', 'copy_data')
'use_cache', 'copy_data', 'storage_method', 'storage')
# pylint: disable=no-self-use
def validate_frame_filter(self, value):
......@@ -707,6 +707,9 @@ class LogEventSerializer(serializers.Serializer):
class AnnotationFileSerializer(serializers.Serializer):
annotation_file = serializers.FileField()
class TaskFileSerializer(serializers.Serializer):
task_file = serializers.FileField()
class ReviewSerializer(serializers.ModelSerializer):
assignee = BasicUserSerializer(allow_null=True, required=False)
assignee_id = serializers.IntegerField(write_only=True, allow_null=True, required=False)
......@@ -767,3 +770,10 @@ class CombinedReviewSerializer(ReviewSerializer):
models.Comment.objects.create(**comment)
return db_review
class RelatedFileSerializer(serializers.ModelSerializer):
class Meta:
model = models.RelatedFile
fields = '__all__'
read_only_fields = ('path',)
\ No newline at end of file
......@@ -9,27 +9,25 @@ import sys
import rq
import re
import shutil
from distutils.dir_util import copy_tree
from traceback import print_exception
from urllib import parse as urlparse
from urllib import request as urlrequest
import requests
import django_rq
from django.conf import settings
from django.db import transaction
from cvat.apps.engine.media_extractors import get_mime, MEDIA_TYPES, Mpeg4ChunkWriter, ZipChunkWriter, Mpeg4CompressedChunkWriter, ZipCompressedChunkWriter, ValidateDimension
from cvat.apps.engine.models import DataChoice, StorageMethodChoice, StorageChoice, RelatedFile
from cvat.apps.engine import models
from cvat.apps.engine.log import slogger
from cvat.apps.engine.media_extractors import (MEDIA_TYPES, Mpeg4ChunkWriter, Mpeg4CompressedChunkWriter,
ValidateDimension, ZipChunkWriter, ZipCompressedChunkWriter, get_mime)
from cvat.apps.engine.utils import av_scan_paths
from cvat.apps.engine.models import DimensionType
from utils.dataset_manifest import ImageManifestManager, VideoManifestManager
from utils.dataset_manifest.core import VideoManifestValidator
from utils.dataset_manifest.utils import detect_related_images
import django_rq
from django.conf import settings
from django.db import transaction
from distutils.dir_util import copy_tree
from . import models
from .log import slogger
############################# Low Level server API
def create(tid, data):
......@@ -41,12 +39,13 @@ def create(tid, data):
@transaction.atomic
def rq_handler(job, exc_type, exc_value, traceback):
split = job.id.split('/')
tid = int(split[split.index('tasks') + 1])
tid = split[split.index('tasks') + 1]
try:
tid = int(tid)
db_task = models.Task.objects.select_for_update().get(pk=tid)
with open(db_task.get_log_path(), "wt") as log_file:
print_exception(exc_type, exc_value, traceback, file=log_file)
except models.Task.DoesNotExist:
except (models.Task.DoesNotExist, ValueError):
pass # skip exceptions in the code
return False
......@@ -76,8 +75,9 @@ def _save_task_to_db(db_task):
segment_size = db_task.segment_size
segment_step = segment_size
if segment_size == 0:
if segment_size == 0 or segment_size > db_task.data.size:
segment_size = db_task.data.size
db_task.segment_size = segment_size
# Segment step must be more than segment_size + overlap in single-segment tasks
# Otherwise a task contains an extra segment
......@@ -209,15 +209,15 @@ def _download_data(urls, upload_dir):
return list(local_files.keys())
def _get_manifest_frame_indexer(start_frame=0, frame_step=1):
return lambda frame_id: start_frame + frame_id * frame_step
@transaction.atomic
def _create_thread(tid, data):
def _create_thread(tid, data, isImport=False):
slogger.glob.info("create task #{}".format(tid))
db_task = models.Task.objects.select_for_update().get(pk=tid)
db_data = db_task.data
if db_task.data.size != 0:
raise NotImplementedError("Adding more data is not implemented")
upload_dir = db_data.get_upload_dirname()
if data['remote_files']:
......@@ -227,11 +227,11 @@ def _create_thread(tid, data):
media = _count_files(data, manifest_file)
media, task_mode = _validate_data(media, manifest_file)
if manifest_file:
assert settings.USE_CACHE and db_data.storage_method == StorageMethodChoice.CACHE, \
assert settings.USE_CACHE and db_data.storage_method == models.StorageMethodChoice.CACHE, \
"File with meta information can be uploaded if 'Use cache' option is also selected"
if data['server_files']:
if db_data.storage == StorageChoice.LOCAL:
if db_data.storage == models.StorageChoice.LOCAL:
_copy_data_from_share(data['server_files'], upload_dir)
else:
upload_dir = settings.SHARE_ROOT
......@@ -244,16 +244,23 @@ def _create_thread(tid, data):
db_images = []
extractor = None
manifest_index = _get_manifest_frame_indexer()
for media_type, media_files in media.items():
if media_files:
if extractor is not None:
raise Exception('Combined data types are not supported')
source_paths=[os.path.join(upload_dir, f) for f in media_files]
if media_type in {'archive', 'zip'} and db_data.storage == StorageChoice.SHARE:
if media_type in {'archive', 'zip'} and db_data.storage == models.StorageChoice.SHARE:
source_paths.append(db_data.get_upload_dirname())
upload_dir = db_data.get_upload_dirname()
db_data.storage = StorageChoice.LOCAL
db_data.storage = models.StorageChoice.LOCAL
if isImport and media_type == 'image' and db_data.storage == models.StorageChoice.SHARE:
manifest_index = _get_manifest_frame_indexer(db_data.start_frame, db_data.get_frame_step())
db_data.start_frame = 0
data['stop_frame'] = None
db_data.frame_filter = ''
extractor = MEDIA_TYPES[media_type]['extractor'](
source_path=source_paths,
step=db_data.get_frame_step(),
......@@ -261,22 +268,27 @@ def _create_thread(tid, data):
stop=data['stop_frame'],
)
validate_dimension = ValidateDimension()
if extractor.__class__ == MEDIA_TYPES['zip']['extractor']:
if isinstance(extractor, MEDIA_TYPES['zip']['extractor']):
extractor.extract()
validate_dimension.set_path(os.path.split(extractor.get_zip_filename())[0])
if db_data.storage == models.StorageChoice.LOCAL or \
(db_data.storage == models.StorageChoice.SHARE and \
isinstance(extractor, MEDIA_TYPES['zip']['extractor'])):
validate_dimension.set_path(upload_dir)
validate_dimension.validate()
if validate_dimension.dimension == DimensionType.DIM_3D:
db_task.dimension = DimensionType.DIM_3D
extractor.reconcile(
source_files=list(validate_dimension.related_files.keys()),
step=db_data.get_frame_step(),
start=db_data.start_frame,
stop=data['stop_frame'],
dimension=DimensionType.DIM_3D,
)
extractor.add_files(validate_dimension.converted_files)
if validate_dimension.dimension == models.DimensionType.DIM_3D:
db_task.dimension = models.DimensionType.DIM_3D
extractor.reconcile(
source_files=[os.path.join(upload_dir, f) for f in validate_dimension.related_files.keys()],
step=db_data.get_frame_step(),
start=db_data.start_frame,
stop=data['stop_frame'],
dimension=models.DimensionType.DIM_3D,
)
related_images = {}
if isinstance(extractor, MEDIA_TYPES['image']['extractor']):
......@@ -301,8 +313,8 @@ def _create_thread(tid, data):
job.save_meta()
update_progress.call_counter = (update_progress.call_counter + 1) % len(progress_animation)
compressed_chunk_writer_class = Mpeg4CompressedChunkWriter if db_data.compressed_chunk_type == DataChoice.VIDEO else ZipCompressedChunkWriter
if db_data.original_chunk_type == DataChoice.VIDEO:
compressed_chunk_writer_class = Mpeg4CompressedChunkWriter if db_data.compressed_chunk_type == models.DataChoice.VIDEO else ZipCompressedChunkWriter
if db_data.original_chunk_type == models.DataChoice.VIDEO:
original_chunk_writer_class = Mpeg4ChunkWriter
# Let's use QP=17 (that is 67 for 0-100 range) for the original chunks, which should be visually lossless or nearly so.
# A lower value will significantly increase the chunk size with a slight increase of quality.
......@@ -312,7 +324,7 @@ def _create_thread(tid, data):
original_quality = 100
kwargs = {}
if validate_dimension.dimension == DimensionType.DIM_3D:
if validate_dimension.dimension == models.DimensionType.DIM_3D:
kwargs["dimension"] = validate_dimension.dimension
compressed_chunk_writer = compressed_chunk_writer_class(db_data.image_quality, **kwargs)
original_chunk_writer = original_chunk_writer_class(original_quality)
......@@ -326,7 +338,6 @@ def _create_thread(tid, data):
else:
db_data.chunk_size = 36
video_path = ""
video_size = (0, 0)
......@@ -334,7 +345,7 @@ def _create_thread(tid, data):
job.meta['status'] = msg
job.save_meta()
if settings.USE_CACHE and db_data.storage_method == StorageMethodChoice.CACHE:
if settings.USE_CACHE and db_data.storage_method == models.StorageMethodChoice.CACHE:
for media_type, media_files in media.items():
if not media_files:
......@@ -392,7 +403,7 @@ def _create_thread(tid, data):
if data['stop_frame'] else all_frames, all_frames), db_data.get_frame_step()))
video_path = os.path.join(upload_dir, media_files[0])
except Exception as ex:
db_data.storage_method = StorageMethodChoice.FILE_SYSTEM
db_data.storage_method = models.StorageMethodChoice.FILE_SYSTEM
if os.path.exists(db_data.get_manifest_path()):
os.remove(db_data.get_manifest_path())
if os.path.exists(db_data.get_index_path()):
......@@ -404,7 +415,7 @@ def _create_thread(tid, data):
db_data.size = len(extractor)
manifest = ImageManifestManager(db_data.get_manifest_path())
if not manifest_file:
if db_task.dimension == DimensionType.DIM_2D:
if db_task.dimension == models.DimensionType.DIM_2D:
meta_info = manifest.prepare_meta(
sources=extractor.absolute_source_paths,
meta={ k: {'related_images': related_images[k] } for k in related_images },
......@@ -428,8 +439,8 @@ def _create_thread(tid, data):
img_sizes = []
for _, frame_id in chunk_paths:
properties = manifest[frame_id]
if db_task.dimension == DimensionType.DIM_2D:
properties = manifest[manifest_index(frame_id)]
if db_task.dimension == models.DimensionType.DIM_2D:
resolution = (properties['width'], properties['height'])
else:
resolution = extractor.get_image_size(frame_id)
......@@ -442,7 +453,7 @@ def _create_thread(tid, data):
for (path, frame), (w, h) in zip(chunk_paths, img_sizes)
])
if db_data.storage_method == StorageMethodChoice.FILE_SYSTEM or not settings.USE_CACHE:
if db_data.storage_method == models.StorageMethodChoice.FILE_SYSTEM or not settings.USE_CACHE:
counter = itertools.count()
generator = itertools.groupby(extractor, lambda x: next(counter) // db_data.chunk_size)
for chunk_idx, chunk_data in generator:
......@@ -477,11 +488,11 @@ def _create_thread(tid, data):
created_images = models.Image.objects.filter(data_id=db_data.id)
db_related_files = [
RelatedFile(data=image.data, primary_image=image, path=os.path.join(upload_dir, related_file_path))
models.RelatedFile(data=image.data, primary_image=image, path=os.path.join(upload_dir, related_file_path))
for image in created_images
for related_file_path in related_images.get(image.path, [])
]
RelatedFile.objects.bulk_create(db_related_files)
models.RelatedFile.objects.bulk_create(db_related_files)
db_images = []
else:
models.Video.objects.create(
......
# Copyright (C) 2020 Intel Corporation
# Copyright (C) 2020-2021 Intel Corporation
#
# SPDX-License-Identifier: MIT
......@@ -2071,6 +2071,348 @@ class TaskCreateAPITestCase(APITestCase):
}
self._check_api_v1_tasks(None, data)
class TaskImportExportAPITestCase(APITestCase):
def setUp(self):
self.client = APIClient()
self.tasks = []
@classmethod
def setUpTestData(cls):
create_db_users(cls)
cls.media_data = []
image_count = 10
imagename_pattern = "test_{}.jpg"
for i in range(image_count):
filename = imagename_pattern.format(i)
path = os.path.join(settings.SHARE_ROOT, filename)
_, data = generate_image_file(filename)
with open(path, "wb") as image:
image.write(data.read())
cls.media_data.append(
{
**{"image_quality": 75,
"copy_data": True,
"start_frame": 2,
"stop_frame": 9,
"frame_filter": "step=2",
},
**{"server_files[{}]".format(i): imagename_pattern.format(i) for i in range(image_count)},
}
)
filename = "test_video_1.mp4"
path = os.path.join(settings.SHARE_ROOT, filename)
_, data = generate_video_file(filename, width=1280, height=720)
with open(path, "wb") as video:
video.write(data.read())
cls.media_data.append(
{
"image_quality": 75,
"copy_data": True,
"start_frame": 2,
"stop_frame": 24,
"frame_filter": "step=2",
"server_files[0]": filename,
}
)
filename = os.path.join("test_archive_1.zip")
path = os.path.join(settings.SHARE_ROOT, filename)
_, data = generate_zip_archive_file(filename, count=5)
with open(path, "wb") as zip_archive:
zip_archive.write(data.read())
cls.media_data.append(
{
"image_quality": 75,
"server_files[0]": filename,
}
)
filename = "test_pointcloud_pcd.zip"
source_path = os.path.join(os.path.dirname(__file__), 'assets', filename)
path = os.path.join(settings.SHARE_ROOT, filename)
shutil.copyfile(source_path, path)
cls.media_data.append(
{
"image_quality": 75,
"server_files[0]": filename,
}
)
filename = "test_velodyne_points.zip"
source_path = os.path.join(os.path.dirname(__file__), 'assets', filename)
path = os.path.join(settings.SHARE_ROOT, filename)
shutil.copyfile(source_path, path)
cls.media_data.append(
{
"image_quality": 75,
"server_files[0]": filename,
}
)
filename = os.path.join("videos", "test_video_1.mp4")
path = os.path.join(settings.SHARE_ROOT, filename)
os.makedirs(os.path.dirname(path))
_, data = generate_video_file(filename, width=1280, height=720)
with open(path, "wb") as video:
video.write(data.read())
generate_manifest_file(data_type='video', manifest_path=os.path.join(settings.SHARE_ROOT, 'videos', 'manifest.jsonl'),
sources=[path])
cls.media_data.append(
{
"image_quality": 70,
"copy_data": True,
"server_files[0]": filename,
"server_files[1]": os.path.join("videos", "manifest.jsonl"),
"use_cache": True,
}
)
generate_manifest_file(data_type='images', manifest_path=os.path.join(settings.SHARE_ROOT, 'manifest.jsonl'),
sources=[os.path.join(settings.SHARE_ROOT, imagename_pattern.format(i)) for i in range(1, 8)])
cls.media_data.append(
{
**{"image_quality": 70,
"copy_data": True,
"use_cache": True,
"frame_filter": "step=2",
"server_files[0]": "manifest.jsonl",
},
**{
**{"server_files[{}]".format(i): imagename_pattern.format(i) for i in range(1, 8)},
}
}
)
cls.media_data.extend([
# image list local
{
"client_files[0]": generate_image_file("test_1.jpg")[1],
"client_files[1]": generate_image_file("test_2.jpg")[1],
"client_files[2]": generate_image_file("test_3.jpg")[1],
"image_quality": 75,
},
# video local
{
"client_files[0]": generate_video_file("test_video.mp4")[1],
"image_quality": 75,
},
# zip archive local
{
"client_files[0]": generate_zip_archive_file("test_archive_1.zip", 10)[1],
"image_quality": 50,
},
# pdf local
{
"client_files[0]": generate_pdf_file("test_pdf_1.pdf", 7)[1],
"image_quality": 54,
},
])
def tearDown(self):
for task in self.tasks:
shutil.rmtree(os.path.join(settings.TASKS_ROOT, str(task["id"])))
shutil.rmtree(os.path.join(settings.MEDIA_DATA_ROOT, str(task["data_id"])))
@classmethod
def tearDownClass(cls):
super().tearDownClass()
path = os.path.join(settings.SHARE_ROOT, "test_1.jpg")
os.remove(path)
path = os.path.join(settings.SHARE_ROOT, "test_2.jpg")
os.remove(path)
path = os.path.join(settings.SHARE_ROOT, "test_3.jpg")
os.remove(path)
path = os.path.join(settings.SHARE_ROOT, "test_video_1.mp4")
os.remove(path)
path = os.path.join(settings.SHARE_ROOT, "videos", "test_video_1.mp4")
os.remove(path)
path = os.path.join(settings.SHARE_ROOT, "videos", "manifest.jsonl")
os.remove(path)
os.rmdir(os.path.dirname(path))
path = os.path.join(settings.SHARE_ROOT, "test_pointcloud_pcd.zip")
os.remove(path)
path = os.path.join(settings.SHARE_ROOT, "test_velodyne_points.zip")
os.remove(path)
path = os.path.join(settings.SHARE_ROOT, "manifest.jsonl")
os.remove(path)
def _create_tasks(self):
self.tasks = []
def _create_task(task_data, media_data):
response = self.client.post('/api/v1/tasks', data=task_data, format="json")
assert response.status_code == status.HTTP_201_CREATED
tid = response.data["id"]
for media in media_data.values():
if isinstance(media, io.BytesIO):
media.seek(0)
response = self.client.post("/api/v1/tasks/{}/data".format(tid), data=media_data)
assert response.status_code == status.HTTP_202_ACCEPTED
response = self.client.get("/api/v1/tasks/{}".format(tid))
data_id = response.data["data"]
self.tasks.append({
"id": tid,
"data_id": data_id,
})
task_data = [
{
"name": "my task #1",
"owner_id": self.owner.id,
"assignee_id": self.assignee.id,
"overlap": 0,
"segment_size": 100,
"labels": [{
"name": "car",
"color": "#ff00ff",
"attributes": [{
"name": "bool_attribute",
"mutable": True,
"input_type": AttributeType.CHECKBOX,
"default_value": "true"
}],
}, {
"name": "person",
},
]
},
{
"name": "my task #2",
"owner_id": self.owner.id,
"assignee_id": self.assignee.id,
"overlap": 1,
"segment_size": 3,
"labels": [{
"name": "car",
"color": "#ff00ff",
"attributes": [{
"name": "bool_attribute",
"mutable": True,
"input_type": AttributeType.CHECKBOX,
"default_value": "true"
}],
}, {
"name": "person",
},
]
},
]
with ForceLogin(self.owner, self.client):
for data in task_data:
for media in self.media_data:
_create_task(data, media)
def _run_api_v1_tasks_id_export(self, tid, user, query_params=""):
with ForceLogin(user, self.client):
response = self.client.get('/api/v1/tasks/{}?{}'.format(tid, query_params), format="json")
return response
def _run_api_v1_tasks_id_import(self, user, data):
with ForceLogin(user, self.client):
response = self.client.post('/api/v1/tasks?action=import', data=data, format="multipart")
return response
def _run_api_v1_tasks_id(self, tid, user):
with ForceLogin(user, self.client):
response = self.client.get('/api/v1/tasks/{}'.format(tid), format="json")
return response.data
def _run_api_v1_tasks_id_export_import(self, user):
if user:
if user is self.user or user is self.annotator:
HTTP_200_OK = status.HTTP_403_FORBIDDEN
HTTP_202_ACCEPTED = status.HTTP_403_FORBIDDEN
HTTP_201_CREATED = status.HTTP_403_FORBIDDEN
else:
HTTP_200_OK = status.HTTP_200_OK
HTTP_202_ACCEPTED = status.HTTP_202_ACCEPTED
HTTP_201_CREATED = status.HTTP_201_CREATED
else:
HTTP_200_OK = status.HTTP_401_UNAUTHORIZED
HTTP_202_ACCEPTED = status.HTTP_401_UNAUTHORIZED
HTTP_201_CREATED = status.HTTP_401_UNAUTHORIZED
self._create_tasks()
for task in self.tasks:
tid = task["id"]
response = self._run_api_v1_tasks_id_export(tid, user, "action=export")
self.assertEqual(response.status_code, HTTP_202_ACCEPTED)
response = self._run_api_v1_tasks_id_export(tid, user, "action=export")
self.assertEqual(response.status_code, HTTP_201_CREATED)
response = self._run_api_v1_tasks_id_export(tid, user, "action=download")
self.assertEqual(response.status_code, HTTP_200_OK)
if user and user is not self.observer and user is not self.user and user is not self.annotator:
self.assertTrue(response.streaming)
content = io.BytesIO(b"".join(response.streaming_content))
content.seek(0)
uploaded_data = {
"task_file": content,
}
response = self._run_api_v1_tasks_id_import(user, uploaded_data)
self.assertEqual(response.status_code, HTTP_202_ACCEPTED)
if user is not self.observer and user is not self.user and user is not self.annotator:
rq_id = response.data["rq_id"]
response = self._run_api_v1_tasks_id_import(user, {"rq_id": rq_id})
self.assertEqual(response.status_code, HTTP_201_CREATED)
original_task = self._run_api_v1_tasks_id(tid, user)
imported_task = self._run_api_v1_tasks_id(response.data["id"], user)
compare_objects(
self=self,
obj1=original_task,
obj2=imported_task,
ignore_keys=(
"id",
"url",
"owner",
"project_id",
"assignee",
"created_date",
"updated_date",
"data",
),
)
def test_api_v1_tasks_id_export_admin(self):
self._run_api_v1_tasks_id_export_import(self.admin)
def test_api_v1_tasks_id_export_user(self):
self._run_api_v1_tasks_id_export_import(self.user)
def test_api_v1_tasks_id_export_annotator(self):
self._run_api_v1_tasks_id_export_import(self.annotator)
def test_api_v1_tasks_id_export_observer(self):
self._run_api_v1_tasks_id_export_import(self.observer)
def test_api_v1_tasks_id_export_no_auth(self):
self._run_api_v1_tasks_id_export_import(None)
def generate_image_file(filename):
f = BytesIO()
gen = random.SystemRandom()
......@@ -2326,6 +2668,7 @@ class TaskDataAPITestCase(APITestCase):
path = os.path.join(settings.SHARE_ROOT, "videos", "manifest.jsonl")
os.remove(path)
os.rmdir(os.path.dirname(path))
path = os.path.join(settings.SHARE_ROOT, "manifest.jsonl")
os.remove(path)
......@@ -2995,7 +3338,7 @@ def compare_objects(self, obj1, obj2, ignore_keys, fp_tolerance=.001):
continue
v2 = obj2[k]
if k == 'attributes':
key = lambda a: a['spec_id']
key = lambda a: a['spec_id'] if 'spec_id' in a else a['id']
v1.sort(key=key)
v2.sort(key=key)
compare_objects(self, v1, v2, ignore_keys)
......
# Copyright (C) 2018-2020 Intel Corporation
# Copyright (C) 2018-2021 Intel Corporation
#
# SPDX-License-Identifier: MIT
......@@ -7,6 +7,7 @@ import os
import os.path as osp
import shutil
import traceback
import uuid
from datetime import datetime
from distutils.util import strtobool
from tempfile import mkstemp
......@@ -50,9 +51,11 @@ from cvat.apps.engine.serializers import (
FileInfoSerializer, JobSerializer, LabeledDataSerializer,
LogEventSerializer, ProjectSerializer, ProjectSearchSerializer, ProjectWithoutTaskSerializer,
RqStatusSerializer, TaskSerializer, UserSerializer, PluginsSerializer, ReviewSerializer,
CombinedReviewSerializer, IssueSerializer, CombinedIssueSerializer, CommentSerializer
CombinedReviewSerializer, IssueSerializer, CombinedIssueSerializer, CommentSerializer,
TaskFileSerializer,
)
from cvat.apps.engine.utils import av_scan_paths
from cvat.apps.engine.backup import import_task
from . import models, task
from .log import clogger, slogger
......@@ -360,20 +363,134 @@ class TaskViewSet(auth.TaskGetQuerySetMixin, viewsets.ModelViewSet):
return [perm() for perm in permissions]
def perform_create(self, serializer):
def validate_task_limit(owner):
admin_perm = auth.AdminRolePermission()
is_admin = admin_perm.has_permission(self.request, self)
if not is_admin and settings.RESTRICTIONS['task_limit'] is not None and \
Task.objects.filter(owner=owner).count() >= settings.RESTRICTIONS['task_limit']:
raise serializers.ValidationError('The user has the maximum number of tasks')
def _validate_task_limit(self, owner):
admin_perm = auth.AdminRolePermission()
is_admin = admin_perm.has_permission(self.request, self)
if not is_admin and settings.RESTRICTIONS['task_limit'] is not None and \
Task.objects.filter(owner=owner).count() >= settings.RESTRICTIONS['task_limit']:
raise serializers.ValidationError('The user has the maximum number of tasks')
def create(self, request):
action = self.request.query_params.get('action', None)
if action is None:
return super().create(request)
elif action == 'import':
self._validate_task_limit(owner=self.request.user)
if 'rq_id' in request.data:
rq_id = request.data['rq_id']
else:
rq_id = "{}@/api/v1/tasks/{}/import".format(request.user, uuid.uuid4())
queue = django_rq.get_queue("default")
rq_job = queue.fetch_job(rq_id)
if not rq_job:
serializer = TaskFileSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
task_file = serializer.validated_data['task_file']
fd, filename = mkstemp(prefix='cvat_')
with open(filename, 'wb+') as f:
for chunk in task_file.chunks():
f.write(chunk)
rq_job = queue.enqueue_call(
func=import_task,
args=(filename, request.user.id),
job_id=rq_id,
meta={
'tmp_file': filename,
'tmp_file_descriptor': fd,
},
)
else:
if rq_job.is_finished:
task_id = rq_job.return_value
os.close(rq_job.meta['tmp_file_descriptor'])
os.remove(rq_job.meta['tmp_file'])
rq_job.delete()
return Response({'id': task_id}, status=status.HTTP_201_CREATED)
elif rq_job.is_failed:
os.close(rq_job.meta['tmp_file_descriptor'])
os.remove(rq_job.meta['tmp_file'])
exc_info = str(rq_job.exc_info)
rq_job.delete()
# RQ adds a prefix with exception class name
import_error_prefix = '{}.{}'.format(
CvatImportError.__module__, CvatImportError.__name__)
if exc_info.startswith(import_error_prefix):
exc_info = exc_info.replace(import_error_prefix + ': ', '')
return Response(data=exc_info,
status=status.HTTP_400_BAD_REQUEST)
else:
return Response(data=exc_info,
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return Response({'rq_id': rq_id}, status=status.HTTP_202_ACCEPTED)
else:
raise serializers.ValidationError(
"Unexpected action specified for the request")
def retrieve(self, request, pk=None):
db_task = self.get_object() # force to call check_object_permissions
action = self.request.query_params.get('action', None)
if action is None:
return super().retrieve(request, pk)
elif action in ('export', 'download'):
queue = django_rq.get_queue("default")
rq_id = "/api/v1/tasks/{}/export".format(pk)
rq_job = queue.fetch_job(rq_id)
if rq_job:
last_task_update_time = timezone.localtime(db_task.updated_date)
request_time = rq_job.meta.get('request_time', None)
if request_time is None or request_time < last_task_update_time:
rq_job.cancel()
rq_job.delete()
else:
if rq_job.is_finished:
file_path = rq_job.return_value
if action == "download" and osp.exists(file_path):
rq_job.delete()
timestamp = datetime.strftime(last_task_update_time,
"%Y_%m_%d_%H_%M_%S")
filename = "task_{}_backup_{}{}".format(
db_task.name, timestamp,
osp.splitext(file_path)[1])
return sendfile(request, file_path, attachment=True,
attachment_filename=filename.lower())
else:
if osp.exists(file_path):
return Response(status=status.HTTP_201_CREATED)
elif rq_job.is_failed:
exc_info = str(rq_job.exc_info)
rq_job.delete()
return Response(exc_info,
status=status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
return Response(status=status.HTTP_202_ACCEPTED)
ttl = dm.views.CACHE_TTL.total_seconds()
queue.enqueue_call(
func=dm.views.backup_task,
args=(pk, 'task_dump.zip'),
job_id=rq_id,
meta={ 'request_time': timezone.localtime() },
result_ttl=ttl, failure_ttl=ttl)
return Response(status=status.HTTP_202_ACCEPTED)
else:
raise serializers.ValidationError(
"Unexpected action specified for the request")
def perform_create(self, serializer):
owner = self.request.data.get('owner', None)
if owner:
validate_task_limit(owner)
self._validate_task_limit(owner)
serializer.save()
else:
validate_task_limit(self.request.user)
self._validate_task_limit(self.request.user)
serializer.save(owner=self.request.user)
def perform_destroy(self, instance):
......@@ -414,6 +531,9 @@ class TaskViewSet(auth.TaskGetQuerySetMixin, viewsets.ModelViewSet):
def data(self, request, pk):
if request.method == 'POST':
db_task = self.get_object() # call check_object_permissions as well
if db_task.data:
return Response(data='Adding more data is not supported',
status=status.HTTP_400_BAD_REQUEST)
serializer = DataSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
db_data = serializer.save()
......
......@@ -86,7 +86,7 @@ context('Actions on Cuboid', () => {
it('Draw a Cuboid shape in two ways (From rectangle, by 4 points)', () => {
cy.createCuboid(createCuboidShape2Points);
cy.get('.cvat-canvas-container').trigger('mousemove', 300, 400);
cy.get('#cvat_canvas_shape_1').should('have.class', 'cvat_canvas_shape_activated')
cy.get('#cvat_canvas_shape_1').should('have.class', 'cvat_canvas_shape_activated');
// Increase code coverage for cvat-canvas/src/typescript/svg.patch.ts. Block start
// Checking for changes in the size and orientation of the shape is based on
......@@ -95,7 +95,7 @@ context('Actions on Cuboid', () => {
.trigger('mouseenter', 360, 340);
cy.get('.cvat_canvas_selected_point').should('exist');
cy.get('.cvat-canvas-container')
.trigger('mousedown', 360, 340, {button: 0})
.trigger('mousedown', 360, 340, { button: 0 })
.trigger('mousemove', 360, 240)
.trigger('mouseup', 360, 240);
......@@ -103,7 +103,7 @@ context('Actions on Cuboid', () => {
.trigger('mouseenter', 360, 340);
cy.get('.cvat_canvas_selected_point').should('exist');
cy.get('.cvat-canvas-container')
.trigger('mousedown', 360, 340, {which: 1})
.trigger('mousedown', 360, 340, { which: 1 })
.trigger('mousemove', 430, 340)
.trigger('mouseup', 430, 340);
......@@ -111,7 +111,7 @@ context('Actions on Cuboid', () => {
.trigger('mouseenter', 250, 250);
cy.get('.cvat_canvas_selected_point').should('exist');
cy.get('.cvat-canvas-container')
.trigger('mousedown', 250, 250, {button: 0})
.trigger('mousedown', 250, 250, { button: 0 })
.trigger('mousemove', 200, 250)
.trigger('mouseup', 200, 250);
......@@ -119,7 +119,7 @@ context('Actions on Cuboid', () => {
.trigger('mouseenter', 350, 250);
cy.get('.cvat_canvas_selected_point').should('exist');
cy.get('.cvat-canvas-container')
.trigger('mousedown', 350, 250, {button: 0})
.trigger('mousedown', 350, 250, { button: 0 })
.trigger('mousemove', 300, 250)
.trigger('mouseup', 300, 250);
......@@ -127,13 +127,13 @@ context('Actions on Cuboid', () => {
.trigger('mouseenter', 200, 350);
cy.get('.cvat_canvas_selected_point').should('exist');
cy.get('.cvat-canvas-container')
.trigger('mousedown', 200, 350, {which: 1})
.trigger('mousedown', 200, 350, { which: 1 })
.trigger('mousemove', 150, 350)
.trigger('mouseup', 150, 350);
cy.get('.cvat-canvas-container') // Orientation to right. drCenter.hide()
.trigger('mouseenter', 300, 200)
.trigger('mousedown', 300, 200, {which: 1})
.trigger('mousedown', 300, 200, { which: 1 })
.trigger('mousemove', 150, 200)
.trigger('mouseup', 150, 200);
......@@ -141,7 +141,7 @@ context('Actions on Cuboid', () => {
.trigger('mouseenter', 85, 270);
cy.get('.cvat_canvas_selected_point').should('exist');
cy.get('.cvat-canvas-container')
.trigger('mousedown', 85, 270, {which: 1})
.trigger('mousedown', 85, 270, { which: 1 })
.trigger('mousemove', 120, 270)
.trigger('mouseup', 120, 270);
......@@ -149,19 +149,19 @@ context('Actions on Cuboid', () => {
.trigger('mouseenter', 120, 410);
cy.get('.cvat_canvas_selected_point').should('exist');
cy.get('.cvat-canvas-container')
.trigger('mousedown', 120, 410, {button: 0})
.trigger('mousedown', 120, 410, { button: 0 })
.trigger('mousemove', 120, 350)
.trigger('mouseup', 120, 350);
cy.get('.cvat-canvas-container') // this.face
.trigger('mouseenter', 230, 300)
.trigger('mousedown', 230, 300, {which: 1})
.trigger('mousedown', 230, 300, { which: 1 })
.trigger('mousemove', 200, 300)
.trigger('mouseup', 200, 300);
cy.get('.cvat-canvas-container') // this.right
.trigger('mouseenter', 250, 240)
.trigger('mousedown', 250, 240, {which: 1})
.trigger('mousedown', 250, 240, { which: 1 })
.trigger('mousemove', 280, 200)
.trigger('mouseup', 280, 200);
......@@ -169,8 +169,8 @@ context('Actions on Cuboid', () => {
.trigger('mouseenter', 90, 215);
cy.get('.cvat_canvas_selected_point').should('exist');
cy.get('.cvat-canvas-container')
.trigger('mousedown', 90, 215, {button: 0, shiftKey: true})
.trigger('mousemove', 90, 270, {shiftKey: true})
.trigger('mousedown', 90, 215, { button: 0, shiftKey: true })
.trigger('mousemove', 90, 270, { shiftKey: true })
.trigger('mouseup', 90, 270);
cy.get('.cvat-appearance-cuboid-projections-checkbox').click(); // if (v === true)
......@@ -182,58 +182,57 @@ context('Actions on Cuboid', () => {
cy.get('.cvat-canvas-container') // Moving the shape for further testing convenience
.trigger('mouseenter', 150, 305)
.trigger('mousedown', 230, 300, {which: 1})
.trigger('mousedown', 230, 300, { which: 1 })
.trigger('mousemove', 400, 200)
.trigger('mouseup', 400, 200);
cy.get('.cvat-canvas-container') // if (this.cuboidModel.orientation === Orientation.LEFT) ecle{}
.trigger('mouseenter', 260, 250);
cy.get('.cvat_canvas_selected_point').should('exist');
cy.get('.cvat-canvas-container')
.dblclick(260, 250, {shiftKey: true})
cy.get('.cvat-canvas-container').dblclick(260, 250, { shiftKey: true });
cy.get('.cvat-canvas-container') // Change orientation to left
.trigger('mouseenter', 300, 130)
.trigger('mousedown', 300, 130, {which: 1})
.trigger('mousedown', 300, 130, { which: 1 })
.trigger('mousemove', 500, 100)
.trigger('mouseup', 500, 100);
cy.get('.cvat-canvas-container') // frCenter
.trigger('mouseenter', 465, 180)
.trigger('mouseenter', 465, 180);
cy.get('.cvat_canvas_selected_point').should('exist');
cy.get('.cvat-canvas-container')
.trigger('mousedown', 465, 180, {which: 1})
.trigger('mousedown', 465, 180, { which: 1 })
.trigger('mousemove', 500, 180)
.trigger('mouseup', 500, 180);
cy.get('.cvat-canvas-container') // ftCenter
.trigger('mouseenter', 395, 125)
.trigger('mouseenter', 395, 125);
cy.get('.cvat_canvas_selected_point').should('exist');
cy.get('.cvat-canvas-container')
.trigger('mousedown', 395, 125, {which: 1})
.trigger('mousedown', 395, 125, { which: 1 })
.trigger('mousemove', 395, 150)
.trigger('mouseup', 395, 150);
cy.get('.cvat-canvas-container') // fbCenter
.trigger('mouseenter', 400, 265)
.trigger('mouseenter', 400, 265);
cy.get('.cvat_canvas_selected_point').should('exist');
cy.get('.cvat-canvas-container')
.trigger('mousedown', 400, 265, {which: 1})
.trigger('mousedown', 400, 265, { which: 1 })
.trigger('mousemove', 400, 250)
.trigger('mouseup', 400, 250);
cy.get('.cvat-canvas-container') // if (this.cuboidModel.orientation === Orientation.LEFT)
.trigger('mouseenter', 600, 180)
.trigger('mouseenter', 600, 180);
cy.get('.cvat_canvas_selected_point').should('exist');
cy.get('.cvat-canvas-container')
.trigger('mousedown', 600, 180, {button: 0, shiftKey: true})
.trigger('mousemove', 600, 150, {shiftKey: true})
.trigger('mousedown', 600, 180, { button: 0, shiftKey: true })
.trigger('mousemove', 600, 150, { shiftKey: true })
.trigger('mouseup', 600, 150)
.dblclick(600, 150, {shiftKey: true});
.dblclick(600, 150, { shiftKey: true });
cy.get('.cvat-canvas-container') // this.left
.trigger('mouseenter', 400, 130)
.trigger('mousedown', 400, 130, {which: 1})
.trigger('mousedown', 400, 130, { which: 1 })
.trigger('mousemove', 400, 100)
.trigger('mouseup', 400, 100)
.trigger('mouseout', 400, 100);
......
......@@ -18,27 +18,37 @@ context('Shortcuts window.', () => {
describe(`Testing case "${caseId}"`, () => {
it('Press "F1" from a task. Shortcuts window be visible. Closing the modal window by button "OK".', () => {
cy.get('body').trigger('keydown', { keyCode: keyCodeF1 });
cy.get('.cvat-shortcuts-modal-window').should('exist').and('be.visible').within(() => {
cy.get('.cvat-shortcuts-modal-window-table').within(() => {
cy.get('tr').should('exist').then(($shortcutsTableTrCount) => {
shortcutsTableTrCount = $shortcutsTableTrCount.length;
cy.get('.cvat-shortcuts-modal-window')
.should('exist')
.and('be.visible')
.within(() => {
cy.get('.cvat-shortcuts-modal-window-table').within(() => {
cy.get('tr')
.should('exist')
.then(($shortcutsTableTrCount) => {
shortcutsTableTrCount = $shortcutsTableTrCount.length;
});
});
cy.contains('button', 'OK').click();
});
cy.contains('button', 'OK').click();
});
cy.get('.cvat-shortcuts-modal-window').should('not.be.visible');
});
it('Open a job. Press "F1". Shortcuts window be visible. Closing the modal window by F1.', () => {
cy.openJob();
cy.get('body').trigger('keydown', { keyCode: keyCodeF1 });
cy.get('.cvat-shortcuts-modal-window').should('exist').and('be.visible').within(() => {
cy.get('.cvat-shortcuts-modal-window-table').within(() => {
cy.get('tr').should('exist').then(($shortcutsTableTrCount) => {
expect($shortcutsTableTrCount.length).to.be.gt(shortcutsTableTrCount);
cy.get('.cvat-shortcuts-modal-window')
.should('exist')
.and('be.visible')
.within(() => {
cy.get('.cvat-shortcuts-modal-window-table').within(() => {
cy.get('tr')
.should('exist')
.then(($shortcutsTableTrCount) => {
expect($shortcutsTableTrCount.length).to.be.gt(shortcutsTableTrCount);
});
});
});
});
cy.get('body').trigger('keydown', { keyCode: keyCodeF1 });
cy.get('.cvat-shortcuts-modal-window').should('not.be.visible');
});
......
......@@ -33,7 +33,15 @@ context('Overlap size.', () => {
cy.login();
cy.imageGenerator(imagesFolder, imageFileName, width, height, color, posX, posY, labelName, imagesCount);
cy.createZipArchive(directoryToArchive, archivePath);
cy.createAnnotationTask(taskName, labelName, attrName, textDefaultValue, archiveName, false, advancedConfigurationParams);
cy.createAnnotationTask(
taskName,
labelName,
attrName,
textDefaultValue,
archiveName,
false,
advancedConfigurationParams,
);
cy.openTask(taskName);
});
......@@ -45,31 +53,43 @@ context('Overlap size.', () => {
describe(`Testing case "${caseId}"`, () => {
it('The task parameters is correct.', () => {
cy.get('.cvat-task-parameters').within(() => {
cy.get('table').find('tr').last().find('td').then(($taskParameters) => {
expect(Number($taskParameters[0].innerText)).equal(calculatedOverlapSize);
expect(Number($taskParameters[1].innerText)).equal(advancedConfigurationParams.segmentSize);
});
cy.get('table')
.find('tr')
.last()
.find('td')
.then(($taskParameters) => {
expect(Number($taskParameters[0].innerText)).equal(calculatedOverlapSize);
expect(Number($taskParameters[1].innerText)).equal(advancedConfigurationParams.segmentSize);
});
});
});
it('The range of frame values corresponds to the parameters.', () => {
cy.getJobNum(0).then(($job) => {
cy.contains('a', `Job #${$job}`).parents('tr').find('.cvat-job-item-frames').then(($frameRange) => {
expect(Number($frameRange.text().split('-')[1])).equal(advancedConfigurationParams.segmentSize - 1); // expected 4 to equal 4
});
cy.contains('a', `Job #${$job}`)
.parents('tr')
.find('.cvat-job-item-frames')
.then(($frameRange) => {
expect(Number($frameRange.text().split('-')[1])).equal(
advancedConfigurationParams.segmentSize - 1,
); // expected 4 to equal 4
});
});
cy.getJobNum(1).then(($job) => {
cy.contains('a', `Job #${$job}`).parents('tr').find('.cvat-job-item-frames').then(($frameRange) => {
expect(Number($frameRange.text().split('-')[0])).equal(advancedConfigurationParams.segmentSize - 2); // expected 3 to equal 3
});
cy.contains('a', `Job #${$job}`)
.parents('tr')
.find('.cvat-job-item-frames')
.then(($frameRange) => {
expect(Number($frameRange.text().split('-')[0])).equal(
advancedConfigurationParams.segmentSize - 2,
); // expected 3 to equal 3
});
});
});
it('The range of frame values in a job corresponds to the parameters.', () => {
cy.openJob(0);
cy.get('.cvat-player-frame-selector')
.find('input[role="spinbutton"]')
.should('have.value', '0');
cy.get('.cvat-player-frame-selector').find('input[role="spinbutton"]').should('have.value', '0');
cy.get('.cvat-player-last-button').click();
cy.get('.cvat-player-frame-selector')
.find('input[role="spinbutton"]')
......
......@@ -170,7 +170,8 @@ context('Label constructor. Color label. Label name editing', () => {
});
cy.get('.cvat-change-task-label-color-badge')
.children()
.should('have.attr', 'style').and('contain', 'rgb(179, 179, 179)');
.should('have.attr', 'style')
.and('contain', 'rgb(179, 179, 179)');
cy.get('.cvat-label-constructor-updater').contains('button', 'Done').click();
cy.contains('.cvat-constructor-viewer-item', `Case ${caseId}`)
.should('have.attr', 'style')
......
......@@ -17,27 +17,33 @@ context('Drag canvas.', () => {
describe(`Testing case "${caseId}"`, () => {
it('Drag canvas', () => {
cy.get('#cvat_canvas_background').invoke('attr', 'style').then(($style) => {
topBefore = Number($style.split(';')[0].split(' ')[1].replace('px', ''));
leftBefore = Number($style.split(';')[1].split(' ')[2].replace('px', ''));
});
cy.get('#cvat_canvas_background')
.invoke('attr', 'style')
.then(($style) => {
topBefore = Number($style.split(';')[0].split(' ')[1].replace('px', ''));
leftBefore = Number($style.split(';')[1].split(' ')[2].replace('px', ''));
});
cy.get('.cvat-move-control').click(); // Without this action, the function is not covered
cy.get('.cvat-canvas-container').trigger('mousedown', {button: 0}).trigger('mousemove', 500, 500);
cy.get('.cvat-canvas-container').trigger('mousedown', { button: 0 }).trigger('mousemove', 500, 500);
});
it('Top and left style perameters is changed.', () => {
cy.get('#cvat_canvas_background').invoke('attr', 'style').then(($style) => {
expect(topBefore).not.equal(Number($style.split(';')[0].split(' ')[1].replace('px', ''))); // expected 20 to not equal 95
expect(leftBefore).not.equal(Number($style.split(';')[1].split(' ')[2].replace('px', ''))); // expected 73 to not equal 95
});
cy.get('#cvat_canvas_background')
.invoke('attr', 'style')
.then(($style) => {
expect(topBefore).not.equal(Number($style.split(';')[0].split(' ')[1].replace('px', ''))); // expected 20 to not equal 95
expect(leftBefore).not.equal(Number($style.split(';')[1].split(' ')[2].replace('px', ''))); // expected 73 to not equal 95
});
});
it('Duble click on canvas. Parameters returned to their original value', () => {
cy.get('.cvat-canvas-container').dblclick();
cy.get('#cvat_canvas_background').invoke('attr', 'style').then(($style) => {
expect(topBefore).equal(Number($style.split(';')[0].split(' ')[1].replace('px', ''))); // expected 20 to equal 20
expect(leftBefore).equal(Number($style.split(';')[1].split(' ')[2].replace('px', ''))); // expected 73 to equal 73
});
cy.get('#cvat_canvas_background')
.invoke('attr', 'style')
.then(($style) => {
expect(topBefore).equal(Number($style.split(';')[0].split(' ')[1].replace('px', ''))); // expected 20 to equal 20
expect(leftBefore).equal(Number($style.split(';')[1].split(' ')[2].replace('px', ''))); // expected 73 to equal 73
});
});
});
});
......@@ -6,7 +6,7 @@
context('Reset password notification.', () => {
const caseId = '73';
const dummyEmail = 'admin@local.local'
const dummyEmail = 'admin@local.local';
before(() => {
cy.visit('auth/login');
......
......@@ -424,7 +424,10 @@ Cypress.Commands.add('updateAttributes', (multiAttrParams) => {
}
if (multiAttrParams.mutable) {
cy.get('.cvat-attribute-mutable-checkbox')
.find('[type="checkbox"]').should('not.be.checked').check().should('be.checked');
.find('[type="checkbox"]')
.should('not.be.checked')
.check()
.should('be.checked');
}
});
});
......
......@@ -66,7 +66,7 @@ Cypress.Commands.add('setGroupCondition', (groupIndex, condition) => {
Cypress.Commands.add(
'setFilter',
({groupIndex, ruleIndex, field, operator, valueSource, value, label, labelAttr, submit}) => {
({ groupIndex, ruleIndex, field, operator, valueSource, value, label, labelAttr, submit }) => {
cy.сheckFiltersModalOpened();
cy.collectGroupID().then((groupIdIndex) => {
cy.collectRuleID().then((ruleIdIndex) => {
......
......@@ -19,12 +19,16 @@ def config_log(level):
def main():
actions = {'create': CLI.tasks_create,
'delete': CLI.tasks_delete,
'ls': CLI.tasks_list,
'frames': CLI.tasks_frame,
'dump': CLI.tasks_dump,
'upload': CLI.tasks_upload}
actions = {
'create': CLI.tasks_create,
'delete': CLI.tasks_delete,
'ls': CLI.tasks_list,
'frames': CLI.tasks_frame,
'dump': CLI.tasks_dump,
'upload': CLI.tasks_upload,
'export': CLI.tasks_export,
'import': CLI.tasks_import,
}
args = parser.parse_args()
config_log(args.loglevel)
with requests.Session() as session:
......
......@@ -213,6 +213,53 @@ class CLI():
"with annotation file {} finished".format(filename)
log.info(logger_string)
def tasks_export(self, task_id, filename, export_verification_period=3, **kwargs):
""" Export and download a whole task """
url = self.api.tasks_id(task_id)
export_url = url + '?action=export'
while True:
response = self.session.get(export_url)
response.raise_for_status()
log.info('STATUS {}'.format(response.status_code))
if response.status_code == 201:
break
sleep(export_verification_period)
response = self.session.get(url + '?action=download')
response.raise_for_status()
with open(filename, 'wb') as fp:
fp.write(response.content)
logger_string = "Task {} has been exported sucessfully. ".format(task_id) +\
"to {}".format(os.path.abspath(filename))
log.info(logger_string)
def tasks_import(self, filename, import_verification_period=3, **kwargs):
""" Import a task"""
url = self.api.tasks + '?action=import'
with open(filename, 'rb') as input_file:
response = self.session.post(
url,
files={'task_file': input_file}
)
response.raise_for_status()
response_json = response.json()
rq_id = response_json['rq_id']
while True:
sleep(import_verification_period)
response = self.session.post(
url,
data={'rq_id': rq_id}
)
response.raise_for_status()
if response.status_code == 201:
break
task_id = response.json()['id']
logger_string = "Task has been imported sucessfully. Task ID: {}".format(task_id)
log.info(logger_string)
def login(self, credentials):
url = self.api.login
auth = {'username': credentials[0], 'password': credentials[1]}
......
......@@ -310,3 +310,36 @@ upload_parser.add_argument(
default='CVAT 1.1',
help='annotation format (default: %(default)s)'
)
#######################################################################
# Export task
#######################################################################
export_task_parser = task_subparser.add_parser(
'export',
description='Export a CVAT task.'
)
export_task_parser.add_argument(
'task_id',
type=int,
help='task ID'
)
export_task_parser.add_argument(
'filename',
type=str,
help='output file'
)
#######################################################################
# Import task
#######################################################################
import_task_parser = task_subparser.add_parser(
'import',
description='import a CVAT task.'
)
import_task_parser.add_argument(
'filename',
type=str,
help='upload file'
)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册