未验证 提交 1ecc6072 编写于 作者: A Andrey Zhavoronkov 提交者: GitHub

Get preview images on the fly and keep them in cache (#5478)

Improved image preview loading for **Tasks**, **Jobs** and **Projects**
views
Backend behaviour change: creating image previews by request and storing
them in the cache
Added corresponding endpoints:
tasks/{id}/preview
projects/{id}/preview
jobs/{id}/preview

Demonstration(added random 0-1s delay for demo purposes):
https://user-images.githubusercontent.com/41117609/208106321-951b8647-6e6b-452e-910c-31c4d0b8682d.mp4
https://user-images.githubusercontent.com/41117609/208106339-2d3a5a7b-d422-4b27-9e76-08729022e1ca.mp4
上级 37b685f4
{
"name": "cvat-core",
"version": "7.3.0",
"version": "7.4.0",
"description": "Part of Computer Vision Tool which presents an interface for client-side integration",
"main": "src/api.ts",
"scripts": {
......
......@@ -1356,11 +1356,8 @@ async function getPreview(tid, jid) {
let response = null;
try {
const url = `${backendAPI}/${jid !== null ? 'jobs' : 'tasks'}/${jid || tid}/data`;
const url = `${backendAPI}/${jid !== null ? 'jobs' : 'tasks'}/${jid || tid}/preview`;
response = await Axios.get(url, {
params: {
type: 'preview',
},
proxy: config.proxy,
responseType: 'blob',
});
......
......@@ -112,7 +112,7 @@ class Job(
def get_preview(
self,
) -> io.RawIOBase:
(_, response) = self.api.retrieve_data(self.id, type="preview")
(_, response) = self.api.retrieve_preview(self.id)
return io.BytesIO(response.data)
def download_frames(
......
......@@ -159,7 +159,7 @@ class Task(
def get_preview(
self,
) -> io.RawIOBase:
(_, response) = self.api.retrieve_data(self.id, type="preview")
(_, response) = self.api.retrieve_preview(self.id)
return io.BytesIO(response.data)
def download_chunk(
......
{
"name": "cvat-ui",
"version": "1.45.0",
"version": "1.46.0",
"description": "CVAT single-page application",
"main": "src/index.tsx",
"scripts": {
......
......@@ -996,8 +996,7 @@ export function getJobAsync(
// Check if the task was already downloaded to the state
let job: any | null = null;
const [task] = state.tasks.current
.filter((_task: Task) => _task.instance.id === tid)
.map((_task: Task) => _task.instance);
.filter((_task: Task) => _task.id === tid);
if (task) {
[job] = task.jobs.filter((_job: any) => _job.id === jid);
if (!job) {
......
......@@ -4,7 +4,7 @@
import { ActionUnion, createAction, ThunkAction } from 'utils/redux';
import { getCore } from 'cvat-core-wrapper';
import { Indexable, JobsQuery } from 'reducers';
import { Indexable, JobsQuery, Job } from 'reducers';
const cvat = getCore();
......@@ -12,6 +12,9 @@ export enum JobsActionTypes {
GET_JOBS = 'GET_JOBS',
GET_JOBS_SUCCESS = 'GET_JOBS_SUCCESS',
GET_JOBS_FAILED = 'GET_JOBS_FAILED',
GET_JOB_PREVIEW = 'GET_JOB_PREVIEW',
GET_JOB_PREVIEW_SUCCESS = 'GET_JOB_PREVIEW_SUCCESS',
GET_JOB_PREVIEW_FAILED = 'GET_JOB_PREVIEW_FAILED',
}
interface JobsList extends Array<any> {
......@@ -20,10 +23,19 @@ interface JobsList extends Array<any> {
const jobsActions = {
getJobs: (query: Partial<JobsQuery>) => createAction(JobsActionTypes.GET_JOBS, { query }),
getJobsSuccess: (jobs: JobsList, previews: string[]) => (
createAction(JobsActionTypes.GET_JOBS_SUCCESS, { jobs, previews })
getJobsSuccess: (jobs: JobsList) => (
createAction(JobsActionTypes.GET_JOBS_SUCCESS, { jobs })
),
getJobsFailed: (error: any) => createAction(JobsActionTypes.GET_JOBS_FAILED, { error }),
getJobPreiew: (jobID: number) => (
createAction(JobsActionTypes.GET_JOB_PREVIEW, { jobID })
),
getJobPreiewSuccess: (jobID: number, preview: string) => (
createAction(JobsActionTypes.GET_JOB_PREVIEW_SUCCESS, { jobID, preview })
),
getJobPreiewFailed: (jobID: number, error: any) => (
createAction(JobsActionTypes.GET_JOB_PREVIEW_FAILED, { jobID, error })
),
};
export type JobsActions = ActionUnion<typeof jobsActions>;
......@@ -40,9 +52,18 @@ export const getJobsAsync = (query: JobsQuery): ThunkAction => async (dispatch)
dispatch(jobsActions.getJobs(filteredQuery));
const jobs = await cvat.jobs.get(filteredQuery);
const previewPromises = jobs.map((job: any) => (job as any).frames.preview().catch(() => ''));
dispatch(jobsActions.getJobsSuccess(jobs, await Promise.all(previewPromises)));
dispatch(jobsActions.getJobsSuccess(jobs));
} catch (error) {
dispatch(jobsActions.getJobsFailed(error));
}
};
export const getJobPreviewAsync = (job: Job): ThunkAction => async (dispatch) => {
dispatch(jobsActions.getJobPreiew(job.id));
try {
const result = await job.frames.preview();
dispatch(jobsActions.getJobPreiewSuccess(job.id, result));
} catch (error) {
dispatch(jobsActions.getJobPreiewFailed(job.id, error));
}
};
......@@ -29,13 +29,16 @@ export enum ProjectsActionTypes {
DELETE_PROJECT = 'DELETE_PROJECT',
DELETE_PROJECT_SUCCESS = 'DELETE_PROJECT_SUCCESS',
DELETE_PROJECT_FAILED = 'DELETE_PROJECT_FAILED',
GET_PROJECT_PREVIEW = 'GET_PROJECT_PREVIEW',
GET_PROJECT_PREVIEW_SUCCESS = 'GET_PROJECT_PREVIEW_SUCCESS',
GET_PROJECT_PREVIEW_FAILED = 'GET_PROJECT_PREVIEW_FAILED',
}
// prettier-ignore
const projectActions = {
getProjects: () => createAction(ProjectsActionTypes.GET_PROJECTS),
getProjectsSuccess: (array: any[], previews: string[], count: number) => (
createAction(ProjectsActionTypes.GET_PROJECTS_SUCCESS, { array, previews, count })
getProjectsSuccess: (array: any[], count: number) => (
createAction(ProjectsActionTypes.GET_PROJECTS_SUCCESS, { array, count })
),
getProjectsFailed: (error: any) => createAction(ProjectsActionTypes.GET_PROJECTS_FAILED, { error }),
updateProjectsGettingQuery: (query: Partial<ProjectsQuery>, tasksQuery: Partial<TasksQuery> = {}) => (
......@@ -58,6 +61,15 @@ const projectActions = {
deleteProjectFailed: (projectId: number, error: any) => (
createAction(ProjectsActionTypes.DELETE_PROJECT_FAILED, { projectId, error })
),
getProjectPreiew: (projectID: number) => (
createAction(ProjectsActionTypes.GET_PROJECT_PREVIEW, { projectID })
),
getProjectPreiewSuccess: (projectID: number, preview: string) => (
createAction(ProjectsActionTypes.GET_PROJECT_PREVIEW_SUCCESS, { projectID, preview })
),
getProjectPreiewFailed: (projectID: number, error: any) => (
createAction(ProjectsActionTypes.GET_PROJECT_PREVIEW_FAILED, { projectID, error })
),
};
export type ProjectActions = ActionUnion<typeof projectActions>;
......@@ -109,8 +121,7 @@ export function getProjectsAsync(
const array = Array.from(result);
const previewPromises = array.map((project): string => (project as any).preview().catch(() => ''));
dispatch(projectActions.getProjectsSuccess(array, await Promise.all(previewPromises), result.count));
dispatch(projectActions.getProjectsSuccess(array, result.count));
// Appropriate tasks fetching proccess needs with retrieving only a single project
if (Object.keys(filteredQuery).includes('id') && typeof filteredQuery.id === 'number') {
......@@ -171,3 +182,13 @@ export function deleteProjectAsync(projectInstance: any): ThunkAction {
}
};
}
export const getProjectsPreviewAsync = (project: any): ThunkAction => async (dispatch) => {
dispatch(projectActions.getProjectPreiew(project.id));
try {
const result = await project.preview();
dispatch(projectActions.getProjectPreiewSuccess(project.id, result));
} catch (error) {
dispatch(projectActions.getProjectPreiewFailed(project.id, error));
}
};
......@@ -29,6 +29,9 @@ export enum TasksActionTypes {
UPDATE_JOB_FAILED = 'UPDATE_JOB_FAILED',
HIDE_EMPTY_TASKS = 'HIDE_EMPTY_TASKS',
SWITCH_MOVE_TASK_MODAL_VISIBLE = 'SWITCH_MOVE_TASK_MODAL_VISIBLE',
GET_TASK_PREVIEW = 'GET_TASK_PREVIEW',
GET_TASK_PREVIEW_SUCCESS = 'GET_TASK_PREVIEW_SUCCESS',
GET_TASK_PREVIEW_FAILED = 'GET_TASK_PREVIEW_FAILED',
}
function getTasks(query: Partial<TasksQuery>, updateQuery: boolean): AnyAction {
......@@ -43,11 +46,10 @@ function getTasks(query: Partial<TasksQuery>, updateQuery: boolean): AnyAction {
return action;
}
export function getTasksSuccess(array: any[], previews: string[], count: number): AnyAction {
export function getTasksSuccess(array: any[], count: number): AnyAction {
const action = {
type: TasksActionTypes.GET_TASKS_SUCCESS,
payload: {
previews,
array,
count,
},
......@@ -89,10 +91,9 @@ export function getTasksAsync(
}
const array = Array.from(result);
const promises = array.map((task): string => (task as any).frames.preview().catch(() => ''));
dispatch(getInferenceStatusAsync());
dispatch(getTasksSuccess(array, await Promise.all(promises), result.count));
dispatch(getTasksSuccess(array, result.count));
};
}
......@@ -379,3 +380,50 @@ export function moveTaskToProjectAsync(
}
};
}
function getTaskPreview(taskID: number): AnyAction {
const action = {
type: TasksActionTypes.GET_TASK_PREVIEW,
payload: {
taskID,
},
};
return action;
}
function getTaskPreviewSuccess(taskID: number, preview: string): AnyAction {
const action = {
type: TasksActionTypes.GET_TASK_PREVIEW_SUCCESS,
payload: {
taskID,
preview,
},
};
return action;
}
function getTaskPreviewFailed(taskID: number, error: any): AnyAction {
const action = {
type: TasksActionTypes.GET_TASK_PREVIEW_FAILED,
payload: {
taskID,
error,
},
};
return action;
}
export function getTaskPreviewAsync(taskInstance: any): ThunkAction<Promise<void>, {}, {}, AnyAction> {
return async (dispatch: ActionCreator<Dispatch>): Promise<void> => {
try {
dispatch(getTaskPreview(taskInstance.id));
const result = await taskInstance.frames.preview();
dispatch(getTaskPreviewSuccess(taskInstance.id, result));
} catch (error) {
dispatch(getTaskPreviewFailed(taskInstance.id, error));
}
};
}
// Copyright (C) 2021-2022 Intel Corporation
// Copyright (C) 2022 CVAT.ai Corporation
//
// SPDX-License-Identifier: MIT
......@@ -19,8 +20,8 @@ import moment from 'moment';
import { CloudStorage, CombinedState } from 'reducers';
import { deleteCloudStorageAsync } from 'actions/cloud-storage-actions';
import CVATTooltip from 'components/common/cvat-tooltip';
import Preview from 'components/common/preview';
import Status from './cloud-storage-status';
import Preview from './cloud-storage-preview';
interface Props {
cloudStorage: CloudStorage;
......@@ -74,7 +75,12 @@ export default function CloudStorageItemComponent(props: Props): JSX.Element {
<Card
cover={(
<>
<Preview cloudStorage={cloudStorage} />
<Preview
cloudStorage={cloudStorage}
loadingClassName='cvat-cloud-storage-item-loading-preview'
emptyPreviewClassName='cvat-cloud-storage-item-empty-preview'
previewClassName='cvat-cloud-storage-item-preview'
/>
{description ? (
<CVATTooltip overlay={description}>
<QuestionCircleOutlined className='cvat-cloud-storage-description-icon' />
......
// Copyright (C) 2021-2022 Intel Corporation
//
// SPDX-License-Identifier: MIT
import React, { useEffect } from 'react';
import { useDispatch, useSelector } from 'react-redux';
import { PictureOutlined } from '@ant-design/icons';
import Spin from 'antd/lib/spin';
import { getCloudStoragePreviewAsync } from 'actions/cloud-storage-actions';
import { CombinedState, CloudStorage } from 'reducers';
interface Props {
cloudStorage: CloudStorage;
}
export default function Preview({ cloudStorage }: Props): JSX.Element {
const dispatch = useDispatch();
const preview = useSelector((state: CombinedState) => state.cloudStorages.previews[cloudStorage.id]);
useEffect(() => {
if (preview === undefined) {
dispatch(getCloudStoragePreviewAsync(cloudStorage));
}
}, [preview]);
if (!preview || (preview && preview.fetching)) {
return (
<div className='cvat-cloud-storage-item-loading-preview' aria-hidden>
<Spin size='default' />
</div>
);
}
if (preview.initialized && !preview.preview) {
return (
<div className='cvat-cloud-storage-item-empty-preview' aria-hidden>
<PictureOutlined />
</div>
);
}
return (
<img
className='cvat-cloud-storage-item-preview'
src={preview.preview}
alt='Preview image'
aria-hidden
/>
);
}
// Copyright (C) 2021-2022 Intel Corporation
// Copyright (C) 2022 CVAT.ai Corporation
//
// SPDX-License-Identifier: MIT
......@@ -78,10 +79,11 @@
height: $grid-unit-size * 24;
}
img {
.cvat-cloud-storage-item-preview {
height: $grid-unit-size * 24;
object-fit: cover;
margin: auto;
width: 100%;
}
.cvat-cloud-storage-item-menu-button {
......
// Copyright (C) 2022 CVAT.ai Corporation
//
// SPDX-License-Identifier: MIT
import React, { useEffect } from 'react';
import { useDispatch, useSelector } from 'react-redux';
import { PictureOutlined } from '@ant-design/icons';
import Spin from 'antd/lib/spin';
import { getJobPreviewAsync } from 'actions/jobs-actions';
import { getTaskPreviewAsync } from 'actions/tasks-actions';
import { getProjectsPreviewAsync } from 'actions/projects-actions';
import { getCloudStoragePreviewAsync } from 'actions/cloud-storage-actions';
import {
CombinedState, Job, Task, Project, CloudStorage,
} from 'reducers';
interface Props {
job?: Job | undefined;
task?: Task | undefined;
project?: Project | undefined;
cloudStorage?: CloudStorage | undefined;
onClick?: (event: React.MouseEvent) => void;
loadingClassName?: string;
emptyPreviewClassName?: string;
previewWrapperClassName?: string;
previewClassName?: string;
}
export default function Preview(props: Props): JSX.Element {
const dispatch = useDispatch();
const {
job,
task,
project,
cloudStorage,
onClick,
loadingClassName,
emptyPreviewClassName,
previewWrapperClassName,
previewClassName,
} = props;
const preview = useSelector((state: CombinedState) => {
if (job !== undefined) {
return state.jobs.previews[job.id];
} if (project !== undefined) {
return state.projects.previews[project.id];
} if (task !== undefined) {
return state.tasks.previews[task.id];
} if (cloudStorage !== undefined) {
return state.cloudStorages.previews[cloudStorage.id];
}
return '';
});
useEffect(() => {
if (preview === undefined) {
if (job !== undefined) {
dispatch(getJobPreviewAsync(job));
} else if (project !== undefined) {
dispatch(getProjectsPreviewAsync(project));
} else if (task !== undefined) {
dispatch(getTaskPreviewAsync(task));
} else if (cloudStorage !== undefined) {
dispatch(getCloudStoragePreviewAsync(cloudStorage));
}
}
}, [preview]);
if (!preview || (preview && preview.fetching)) {
return (
<div className={loadingClassName || ''} aria-hidden>
<Spin size='default' />
</div>
);
}
if (preview.initialized && !preview.preview) {
return (
<div className={emptyPreviewClassName || ''} aria-hidden>
<PictureOutlined />
</div>
);
}
return (
<div className={previewWrapperClassName || ''} aria-hidden>
<img
className={previewClassName || ''}
src={preview.preview}
onClick={onClick}
alt='Preview image'
aria-hidden
/>
</div>
);
}
......@@ -7,7 +7,6 @@ import React, { useState } from 'react';
import { useDispatch } from 'react-redux';
import { useHistory } from 'react-router';
import Card from 'antd/lib/card';
import Empty from 'antd/lib/empty';
import Descriptions from 'antd/lib/descriptions';
import { MoreOutlined } from '@ant-design/icons';
import Dropdown from 'antd/lib/dropdown';
......@@ -16,6 +15,7 @@ import Menu from 'antd/lib/menu';
import { MenuInfo } from 'rc-menu/lib/interface';
import { useCardHeightHOC } from 'utils/hooks';
import { exportActions } from 'actions/export-actions';
import Preview from 'components/common/preview';
const useCardHeight = useCardHeightHOC({
containerClassName: 'cvat-jobs-page',
......@@ -26,12 +26,11 @@ const useCardHeight = useCardHeightHOC({
interface Props {
job: any;
preview: string;
}
function JobCardComponent(props: Props): JSX.Element {
const dispatch = useDispatch();
const { job, preview } = props;
const { job } = props;
const [expanded, setExpanded] = useState<boolean>(false);
const history = useHistory();
const height = useCardHeight();
......@@ -53,19 +52,14 @@ function JobCardComponent(props: Props): JSX.Element {
className='cvat-job-page-list-item'
cover={(
<>
{preview ? (
<img
className='cvat-jobs-page-job-item-card-preview'
src={preview}
alt='Preview'
onClick={onClick}
aria-hidden
/>
) : (
<div className='cvat-jobs-page-job-item-card-preview' onClick={onClick} aria-hidden>
<Empty description='Preview not found' />
</div>
)}
<Preview
job={job}
onClick={onClick}
loadingClassName='cvat-job-item-loading-preview'
emptyPreviewClassName='cvat-job-item-empty-preview'
previewWrapperClassName='cvat-jobs-page-job-item-card-preview-wrapper'
previewClassName='cvat-jobs-page-job-item-card-preview'
/>
<div className='cvat-job-page-list-item-id'>
ID:
{` ${job.id}`}
......
// Copyright (C) 2022 Intel Corporation
// Copyright (C) 2022 CVAT.ai Corporation
//
// SPDX-License-Identifier: MIT
......@@ -59,17 +60,34 @@
}
}
.cvat-jobs-page-job-item-card-preview {
.ant-empty-image {
height: $grid-unit-size * 10;
.cvat-jobs-page-job-item-card-preview-wrapper {
height: 100%;
width: 100%;
> .cvat-jobs-page-job-item-card-preview {
.ant-empty-image {
height: $grid-unit-size * 10;
}
height: 100%;
width: 100%;
display: flex;
align-items: center;
justify-content: space-around;
object-fit: cover;
cursor: pointer;
}
}
height: 100%;
display: flex;
align-items: center;
justify-content: space-around;
object-fit: cover;
cursor: pointer;
.cvat-job-item-loading-preview,
.cvat-job-item-empty-preview {
.ant-spin {
position: inherit;
}
font-size: $grid-unit-size * 15;
text-align: center;
height: $grid-unit-size * 24;
}
.cvat-job-page-list-item-dimension {
......
// Copyright (C) 2021-2022 Intel Corporation
// Copyright (C) 2022 CVAT.ai Corporation
//
// SPDX-License-Identifier: MIT
......@@ -23,8 +24,8 @@ const core = getCore();
export default function MoveTaskModal(): JSX.Element {
const visible = useSelector((state: CombinedState) => state.tasks.moveTask.modalVisible);
const task = useSelector((state: CombinedState) => {
const [taskInstance] = state.tasks.current.filter((_task) => _task.instance.id === state.tasks.moveTask.taskId);
return taskInstance?.instance;
const [taskInstance] = state.tasks.current.filter((_task) => _task.id === state.tasks.moveTask.taskId);
return taskInstance;
});
const taskUpdating = useSelector((state: CombinedState) => state.tasks.updating);
const dispatch = useDispatch();
......
......@@ -48,7 +48,7 @@ export default function ProjectPageComponent(): JSX.Element {
const id = +useParams<ParamType>().id;
const dispatch = useDispatch();
const history = useHistory();
const projects = useSelector((state: CombinedState) => state.projects.current).map((project) => project.instance);
const projects = useSelector((state: CombinedState) => state.projects.current);
const projectsFetching = useSelector((state: CombinedState) => state.projects.fetching);
const deletes = useSelector((state: CombinedState) => state.projects.activities.deletes);
const taskDeletes = useSelector((state: CombinedState) => state.tasks.activities.deletes);
......@@ -77,7 +77,7 @@ export default function ProjectPageComponent(): JSX.Element {
const [project] = projects.filter((_project) => _project.id === id);
const projectSubsets: Array<string> = [];
for (const task of tasks) {
if (!projectSubsets.includes(task.instance.subset)) projectSubsets.push(task.instance.subset);
if (!projectSubsets.includes(task.subset)) projectSubsets.push(task.subset);
}
useEffect(() => {
......@@ -121,18 +121,17 @@ export default function ProjectPageComponent(): JSX.Element {
<React.Fragment key={subset}>
{subset && <Title level={4}>{subset}</Title>}
{tasks
.filter((task) => task.instance.projectId === project.id && task.instance.subset === subset)
.filter((task) => task.projectId === project.id && task.subset === subset)
.map((task: Task) => (
<TaskItem
key={task.instance.id}
deleted={task.instance.id in taskDeletes ? taskDeletes[task.instance.id] : false}
key={task.id}
deleted={task.id in taskDeletes ? taskDeletes[task.id] : false}
hidden={false}
activeInference={tasksActiveInferences[task.instance.id] || null}
activeInference={tasksActiveInferences[task.id] || null}
cancelAutoAnnotation={() => {
dispatch(cancelInferenceAsync(task.instance.id));
dispatch(cancelInferenceAsync(task.id));
}}
previewImage={task.preview}
taskInstance={task.instance}
taskInstance={task}
/>
))}
</React.Fragment>
......
// Copyright (C) 2020-2022 Intel Corporation
// Copyright (C) 2022 CVAT.ai Corporation
//
// SPDX-License-Identifier: MIT
......@@ -7,7 +8,6 @@ import moment from 'moment';
import { useSelector } from 'react-redux';
import { useHistory } from 'react-router';
import Text from 'antd/lib/typography/Text';
import Empty from 'antd/lib/empty';
import Card from 'antd/lib/card';
import Meta from 'antd/lib/card/Meta';
import Dropdown from 'antd/lib/dropdown';
......@@ -16,6 +16,7 @@ import { MoreOutlined } from '@ant-design/icons';
import { CombinedState, Project } from 'reducers';
import { useCardHeightHOC } from 'utils/hooks';
import Preview from 'components/common/preview';
import ProjectActionsMenuComponent from './actions-menu';
interface Props {
......@@ -31,7 +32,7 @@ const useCardHeight = useCardHeightHOC({
export default function ProjectItemComponent(props: Props): JSX.Element {
const {
projectInstance: { instance, preview },
projectInstance: instance,
} = props;
const history = useHistory();
......@@ -53,21 +54,16 @@ export default function ProjectItemComponent(props: Props): JSX.Element {
return (
<Card
cover={
preview ? (
<img
className='cvat-projects-project-item-card-preview'
src={preview}
alt='Preview'
onClick={onOpenProject}
aria-hidden
/>
) : (
<div className='cvat-projects-project-item-card-preview' onClick={onOpenProject} aria-hidden>
<Empty description='No tasks' />
</div>
)
}
cover={(
<Preview
project={instance}
loadingClassName='cvat-project-item-loading-preview'
emptyPreviewClassName='cvat-project-item-empty-preview'
previewWrapperClassName='cvat-projects-project-item-card-preview-wrapper'
previewClassName='cvat-projects-project-item-card-preview'
onClick={onOpenProject}
/>
)}
size='small'
style={style}
className='cvat-projects-project-item-card'
......
// Copyright (C) 2020-2022 Intel Corporation
// Copyright (C) 2022 CVAT.ai Corporation
//
// SPDX-License-Identifier: MIT
......@@ -41,7 +42,7 @@ export default function ProjectListComponent(): JSX.Element {
<Col className='cvat-projects-list' {...dimensions}>
{projects.map(
(project: Project): JSX.Element => (
<ProjectItem key={project.instance.id} projectInstance={project} />
<ProjectItem key={project.id} projectInstance={project} />
),
)}
</Col>
......
// Copyright (C) 2020-2022 Intel Corporation
// Copyright (C) 2022 CVAT.ai Corporation
//
// SPDX-License-Identifier: MIT
......@@ -118,27 +119,29 @@
}
.cvat-projects-project-item-card {
.cvat-projects-project-item-card-preview {
.ant-empty {
margin: $grid-unit-size;
height: inherit;
display: grid;
.ant-empty-image {
height: $grid-unit-size * 10;
}
.cvat-projects-project-item-card-preview-wrapper {
height: 100%;
> div:first-child {
margin: auto;
}
.cvat-projects-project-item-card-preview {
height: 100%;
width: 100%;
display: flex;
align-items: center;
justify-content: space-around;
object-fit: cover;
cursor: pointer;
}
}
height: 100%;
display: flex;
align-items: center;
justify-content: space-around;
object-fit: cover;
cursor: pointer;
.cvat-project-item-loading-preview,
.cvat-project-item-empty-preview {
.ant-spin {
position: inherit;
}
font-size: $grid-unit-size * 15;
text-align: center;
height: $grid-unit-size * 24;
}
.cvat-projects-project-item-title {
......
// Copyright (C) 2019-2022 Intel Corporation
// Copyright (C) 2022 CVAT.ai Corporation
//
// SPDX-License-Identifier: MIT
......@@ -18,6 +19,7 @@ import { getCore } from 'cvat-core-wrapper';
import { getReposData, syncRepos, changeRepo } from 'utils/git-utils';
import { ActiveInference } from 'reducers';
import AutomaticAnnotationProgress from 'components/tasks-page/automatic-annotation-progress';
import Preview from 'components/common/preview';
import Descriptions from 'antd/lib/descriptions';
import Space from 'antd/lib/space';
import UserSelector, { User } from './user-selector';
......@@ -30,7 +32,6 @@ const { Option } = Select;
const core = getCore();
interface Props {
previewImage: string;
taskInstance: any;
installedGit: boolean; // change to git repos url
activeInference: ActiveInference | null;
......@@ -53,8 +54,6 @@ interface State {
export default class DetailsComponent extends React.PureComponent<Props, State> {
private mounted: boolean;
private previewImageElement: HTMLImageElement;
private previewWrapperRef: React.RefObject<HTMLDivElement>;
constructor(props: Props) {
super(props);
......@@ -62,8 +61,6 @@ export default class DetailsComponent extends React.PureComponent<Props, State>
const { taskInstance } = props;
this.mounted = false;
this.previewImageElement = new Image();
this.previewWrapperRef = React.createRef<HTMLDivElement>();
this.state = {
name: taskInstance.name,
subset: taskInstance.subset,
......@@ -76,25 +73,9 @@ export default class DetailsComponent extends React.PureComponent<Props, State>
}
public componentDidMount(): void {
const { taskInstance, previewImage } = this.props;
const { previewImageElement, previewWrapperRef } = this;
const { taskInstance } = this.props;
this.mounted = true;
previewImageElement.onload = () => {
const { height, width } = previewImageElement;
if (width > height) {
previewImageElement.style.width = '100%';
} else {
previewImageElement.style.height = '100%';
}
};
previewImageElement.src = previewImage;
previewImageElement.alt = 'Preview';
if (previewWrapperRef.current) {
previewWrapperRef.current.appendChild(previewImageElement);
}
getReposData(taskInstance.id)
.then((data): void => {
if (data !== null && this.mounted) {
......@@ -212,13 +193,6 @@ export default class DetailsComponent extends React.PureComponent<Props, State>
);
}
private renderPreview(): JSX.Element {
const { previewWrapperRef } = this;
// Add image on mount after get its width and height to fit it into wrapper
return <div ref={previewWrapperRef} className='cvat-task-preview-wrapper' />;
}
private renderParameters(): JSX.Element {
const { taskInstance } = this.props;
const { overlap, segmentSize, imageQuality } = taskInstance;
......@@ -414,7 +388,14 @@ export default class DetailsComponent extends React.PureComponent<Props, State>
<Row justify='space-between' align='top'>
<Col md={8} lg={7} xl={7} xxl={6}>
<Row justify='start' align='middle'>
<Col span={24}>{this.renderPreview()}</Col>
<Col span={24}>
<Preview
task={taskInstance}
loadingClassName='cvat-task-item-loading-preview'
emptyPreviewClassName='cvat-task-item-empty-preview'
previewClassName='cvat-task-item-preview'
/>
</Col>
</Row>
<Row>
<Col span={24}>{this.renderParameters()}</Col>
......
// Copyright (C) 2020-2022 Intel Corporation
// Copyright (C) 2022 CVAT.ai Corporation
//
// SPDX-License-Identifier: MIT
......@@ -36,6 +37,25 @@
.cvat-project-search-field {
width: $grid-unit-size * 20;
}
.cvat-task-item-loading-preview,
.cvat-task-item-empty-preview {
.ant-spin {
position: inherit;
}
height: $grid-unit-size * 18;
font-size: $grid-unit-size * 10;
text-align: center;
margin-bottom: $grid-unit-size * 3;
}
.cvat-task-item-preview {
width: 100%;
object-fit: cover;
margin-bottom: $grid-unit-size * 3;
height: $grid-unit-size * 18;
}
}
.cvat-task-page-actions-button {
......
// Copyright (C) 2020-2022 Intel Corporation
// Copyright (C) 2022 CVAT.ai Corporation
//
// SPDX-License-Identifier: MIT
......@@ -81,7 +82,7 @@ class TaskPageComponent extends React.PureComponent<Props> {
className='cvat-task-details-wrapper'
>
<Col md={22} lg={18} xl={16} xxl={14}>
<TopBarComponent taskInstance={(task as Task).instance} />
<TopBarComponent taskInstance={task as Task} />
<DetailsContainer task={task as Task} />
<JobListContainer task={task as Task} />
</Col>
......
// Copyright (C) 2020-2022 Intel Corporation
// Copyright (C) 2022 CVAT.ai Corporation
//
// SPDX-License-Identifier: MIT
......@@ -91,18 +92,28 @@
&:hover {
border: 1px solid $border-color-hover;
}
}
.cvat-task-item-preview-wrapper {
display: flex;
justify-content: center;
overflow: hidden;
margin: 20px;
margin-top: 0;
.cvat-task-item-loading-preview,
.cvat-task-item-empty-preview {
.ant-spin {
position: inherit;
}
> .cvat-task-item-preview {
max-width: 140px;
max-height: 80px;
font-size: $grid-unit-size * 6;
text-align: center;
}
.cvat-task-item-preview-wrapper {
display: flex;
justify-content: center;
overflow: hidden;
margin: $grid-unit-size * 3;
margin-top: 0;
> .cvat-task-item-preview {
max-width: 140px;
max-height: 80px;
}
}
}
......
// Copyright (C) 2020-2022 Intel Corporation
// Copyright (C) 2022 CVAT.ai Corporation
//
// SPDX-License-Identifier: MIT
......@@ -14,12 +15,12 @@ import Progress from 'antd/lib/progress';
import moment from 'moment';
import ActionsMenuContainer from 'containers/actions-menu/actions-menu';
import Preview from 'components/common/preview';
import { ActiveInference } from 'reducers';
import AutomaticAnnotationProgress from './automatic-annotation-progress';
export interface TaskItemProps {
taskInstance: any;
previewImage: string;
deleted: boolean;
hidden: boolean;
activeInference: ActiveInference | null;
......@@ -28,12 +29,16 @@ export interface TaskItemProps {
class TaskItemComponent extends React.PureComponent<TaskItemProps & RouteComponentProps> {
private renderPreview(): JSX.Element {
const { previewImage } = this.props;
const { taskInstance } = this.props;
return (
<Col span={4}>
<div className='cvat-task-item-preview-wrapper'>
<img alt='Preview' className='cvat-task-item-preview' src={previewImage} />
</div>
<Preview
task={taskInstance}
loadingClassName='cvat-task-item-loading-preview'
emptyPreviewClassName='cvat-task-item-empty-preview'
previewWrapperClassName='cvat-task-item-preview-wrapper'
previewClassName='cvat-task-item-preview'
/>
</Col>
);
}
......
// Copyright (C) 2019-2022 Intel Corporation
// Copyright (C) 2022 CVAT.ai Corporation
//
// SPDX-License-Identifier: MIT
......@@ -29,16 +30,16 @@ interface DispatchToProps {
function mapStateToProps(state: CombinedState, own: OwnProps): StateToProps {
const { list } = state.plugins;
const [taskProject] = state.projects.current.filter((project) => project.id === own.task.instance.projectId);
const [taskProject] = state.projects.current.filter((project) => project.id === own.task.projectId);
return {
dumpers: state.formats.annotationFormats.dumpers,
user: state.auth.user,
installedGit: list.GIT_INTEGRATION,
activeInference: state.models.inferences[own.task.instance.id] || null,
activeInference: state.models.inferences[own.task.id] || null,
projectSubsets: taskProject ?
([
...new Set(taskProject.tasks.map((task: any) => task.subset).filter((subset: string) => subset)),
...new Set(taskProject.subsets),
] as string[]) :
[],
};
......@@ -50,7 +51,7 @@ function mapDispatchToProps(dispatch: any, own: OwnProps): DispatchToProps {
dispatch(updateTaskAsync(taskInstance));
},
cancelAutoAnnotation(): void {
dispatch(cancelInferenceAsync(own.task.instance.id));
dispatch(cancelInferenceAsync(own.task.id));
},
};
}
......@@ -64,8 +65,7 @@ function TaskPageContainer(props: StateToProps & DispatchToProps & OwnProps): JS
<DetailsComponent
dumpers={dumpers}
user={user}
previewImage={task.preview}
taskInstance={task.instance}
taskInstance={task}
installedGit={installedGit}
activeInference={activeInference}
projectSubsets={projectSubsets}
......
// Copyright (C) 2020-2022 Intel Corporation
// Copyright (C) 2022 CVAT.ai Corporation
//
// SPDX-License-Identifier: MIT
......@@ -26,7 +27,7 @@ function mapDispatchToProps(dispatch: any): DispatchToProps {
function TaskPageContainer(props: DispatchToProps & OwnProps): JSX.Element {
const { task, onJobUpdate } = props;
return <JobListComponent taskInstance={task.instance} onJobUpdate={onJobUpdate} />;
return <JobListComponent taskInstance={task} onJobUpdate={onJobUpdate} />;
}
export default connect(null, mapDispatchToProps)(TaskPageContainer);
......@@ -37,7 +37,7 @@ function mapStateToProps(state: CombinedState, own: Props): StateToProps {
const id = +own.match.params.id;
const filteredTasks = state.tasks.current.filter((task) => task.instance.id === id);
const filteredTasks = state.tasks.current.filter((task) => task.id === id);
const task = filteredTasks[0] || (gettingQuery.id === id || Number.isNaN(id) ? undefined : null);
......@@ -46,7 +46,7 @@ function mapStateToProps(state: CombinedState, own: Props): StateToProps {
deleteActivity = deletes[id];
}
const jobIDs = task ? Object.fromEntries(task.instance.jobs.map((job:any) => [job.id])) : {};
const jobIDs = task ? Object.fromEntries(task.jobs.map((job:any) => [job.id])) : {};
const updatingJobs = Object.keys(jobUpdates);
const jobUpdating = updatingJobs.some((jobID) => jobID in jobIDs);
......
// Copyright (C) 2020-2022 Intel Corporation
// Copyright (C) 2022 CVAT.ai Corporation
//
// SPDX-License-Identifier: MIT
......@@ -14,7 +15,6 @@ import { cancelInferenceAsync } from 'actions/models-actions';
interface StateToProps {
deleted: boolean;
hidden: boolean;
previewImage: string;
taskInstance: any;
activeInference: ActiveInference | null;
}
......@@ -35,10 +35,9 @@ function mapStateToProps(state: CombinedState, own: OwnProps): StateToProps {
const id = own.taskID;
return {
hidden: state.tasks.hideEmpty && task.instance.jobs.length === 0,
hidden: state.tasks.hideEmpty && task.jobs.length === 0,
deleted: id in deletes ? deletes[id] === true : false,
previewImage: task.preview,
taskInstance: task.instance,
taskInstance: task,
activeInference: state.models.inferences[id] || null,
};
}
......
// Copyright (C) 2020-2022 Intel Corporation
// Copyright (C) 2022 CVAT.ai Corporation
//
// SPDX-License-Identifier: MIT
......@@ -37,7 +38,7 @@ function TasksListContainer(props: TasksListContainerProps): JSX.Element {
return (
<TasksListComponent
currentTasksIndexes={tasks.current.map((task): number => task.instance.id)}
currentTasksIndexes={tasks.current.map((task): number => task.id)}
/>
);
}
......
......@@ -23,7 +23,7 @@ function mapStateToProps(state: CombinedState): StateToProps {
query: tasks.gettingQuery,
count: state.tasks.count,
countInvisible: tasks.hideEmpty ?
tasks.current.filter((task: Task): boolean => !task.instance.jobs.length).length :
tasks.current.filter((task: Task): boolean => !task.jobs.length).length :
0,
importing: state.import.tasks.backup.importing,
};
......
// Copyright (C) 2021-2022 Intel Corporation
// Copyright (C) 2022 CVAT.ai Corporation
//
// SPDX-License-Identifier: MIT
......@@ -306,41 +307,46 @@ export default (
case CloudStorageActionTypes.GET_CLOUD_STORAGE_PREVIEW: {
const { cloudStorageID } = action.payload;
const { previews } = state;
previews[cloudStorageID] = {
preview: '',
fetching: true,
initialized: false,
};
return {
...state,
previews,
previews: {
...previews,
[cloudStorageID]: {
preview: '',
fetching: true,
initialized: false,
},
},
};
}
case CloudStorageActionTypes.GET_CLOUD_STORAGE_PREVIEW_SUCCESS: {
const { cloudStorageID, preview } = action.payload;
const { previews } = state;
previews[cloudStorageID] = {
...previews[cloudStorageID],
preview,
initialized: true,
fetching: false,
};
return {
...state,
previews,
previews: {
...previews,
[cloudStorageID]: {
preview,
fetching: false,
initialized: true,
},
},
};
}
case CloudStorageActionTypes.GET_CLOUD_STORAGE_PREVIEW_FAILED: {
const { cloudStorageID } = action.payload;
const { previews } = state;
previews[cloudStorageID] = {
...previews[cloudStorageID],
initialized: true,
fetching: false,
};
return {
...state,
previews,
previews: {
...previews,
[cloudStorageID]: {
...previews[cloudStorageID],
fetching: false,
initialized: true,
},
},
};
}
case AuthActionTypes.LOGOUT_SUCCESS: {
......
......@@ -46,16 +46,22 @@ export interface ProjectsQuery {
sort: string | null;
}
export interface Project {
instance: any;
interface Preview {
fetching: boolean;
initialized: boolean;
preview: string;
}
export type Project = any;
export interface ProjectsState {
initialized: boolean;
fetching: boolean;
count: number;
current: Project[];
previews: {
[index: number]: Preview;
};
gettingQuery: ProjectsQuery;
tasksGettingQuery: TasksQuery & { ordering: string };
activities: {
......@@ -78,10 +84,7 @@ export interface TasksQuery {
projectId: number | null;
}
export interface Task {
instance: any; // cvat-core instance
preview: string;
}
export type Task = any; // cvat-core instance
export interface JobsQuery {
page: number;
......@@ -90,12 +93,16 @@ export interface JobsQuery {
filter: string | null;
}
export type Job = any;
export interface JobsState {
query: JobsQuery;
fetching: boolean;
count: number;
current: any[];
previews: string[];
current: Job[];
previews: {
[index: number]: Preview;
};
}
export interface TasksState {
......@@ -110,6 +117,9 @@ export interface TasksState {
gettingQuery: TasksQuery;
count: number;
current: Task[];
previews: {
[index: number]: Preview;
};
activities: {
deletes: {
[tid: number]: boolean; // deleted (deleting if in dictionary)
......@@ -214,14 +224,11 @@ export interface CloudStoragesQuery {
filter: string | null;
}
interface CloudStorageAdditional {
interface CloudStorageStatus {
fetching: boolean;
initialized: boolean;
status: string | null;
preview: string;
}
type CloudStorageStatus = Pick<CloudStorageAdditional, 'fetching' | 'initialized' | 'status'>;
type CloudStoragePreview = Pick<CloudStorageAdditional, 'fetching' | 'initialized' | 'preview'>;
export type CloudStorage = any;
......@@ -234,7 +241,7 @@ export interface CloudStoragesState {
[index: number]: CloudStorageStatus;
};
previews: {
[index: number]: CloudStoragePreview;
[index: number]: Preview;
};
gettingQuery: CloudStoragesQuery;
activities: {
......
......@@ -15,7 +15,7 @@ const defaultState: JobsState = {
search: null,
},
current: [],
previews: [],
previews: {},
};
export default (state: JobsState = defaultState, action: JobsActions): JobsState => {
......@@ -36,7 +36,6 @@ export default (state: JobsState = defaultState, action: JobsActions): JobsState
fetching: false,
count: action.payload.jobs.count,
current: action.payload.jobs,
previews: action.payload.previews,
};
}
case JobsActionTypes.GET_JOBS_FAILED: {
......@@ -45,6 +44,51 @@ export default (state: JobsState = defaultState, action: JobsActions): JobsState
fetching: false,
};
}
case JobsActionTypes.GET_JOB_PREVIEW: {
const { jobID } = action.payload;
const { previews } = state;
return {
...state,
previews: {
...previews,
[jobID]: {
preview: '',
fetching: true,
initialized: false,
},
},
};
}
case JobsActionTypes.GET_JOB_PREVIEW_SUCCESS: {
const { jobID, preview } = action.payload;
const { previews } = state;
return {
...state,
previews: {
...previews,
[jobID]: {
preview,
fetching: false,
initialized: true,
},
},
};
}
case JobsActionTypes.GET_JOB_PREVIEW_FAILED: {
const { jobID } = action.payload;
const { previews } = state;
return {
...state,
previews: {
...previews,
[jobID]: {
...previews[jobID],
fetching: false,
initialized: true,
},
},
};
}
default: {
return state;
}
......
......@@ -15,6 +15,7 @@ const defaultState: ProjectsState = {
fetching: false,
count: 0,
current: [],
previews: {},
gettingQuery: {
page: 1,
id: null,
......@@ -63,19 +64,12 @@ export default (state: ProjectsState = defaultState, action: AnyAction): Project
current: [],
};
case ProjectsActionTypes.GET_PROJECTS_SUCCESS: {
const combinedWithPreviews = action.payload.array.map(
(project: any, index: number): Project => ({
instance: project,
preview: action.payload.previews[index],
}),
);
return {
...state,
initialized: true,
fetching: false,
count: action.payload.count,
current: combinedWithPreviews,
current: action.payload.array,
};
}
case ProjectsActionTypes.GET_PROJECTS_FAILED: {
......@@ -130,13 +124,11 @@ export default (state: ProjectsState = defaultState, action: AnyAction): Project
return {
...state,
current: state.current.map(
(project): Project => ({
...project,
instance:
project.instance.id === action.payload.project.id ?
action.payload.project :
project.instance,
}),
(project): Project => (
project.id === action.payload.project.id ?
action.payload.project :
project
),
),
};
}
......@@ -144,13 +136,9 @@ export default (state: ProjectsState = defaultState, action: AnyAction): Project
return {
...state,
current: state.current.map(
(project): Project => ({
...project,
instance:
project.instance.id === action.payload.project.id ?
action.payload.project :
project.instance,
}),
(project): Project => (project.id === action.payload.project.id ?
action.payload.project :
project),
),
};
}
......@@ -206,6 +194,51 @@ export default (state: ProjectsState = defaultState, action: AnyAction): Project
case AuthActionTypes.LOGOUT_SUCCESS: {
return { ...defaultState };
}
case ProjectsActionTypes.GET_PROJECT_PREVIEW: {
const { projectID } = action.payload;
const { previews } = state;
return {
...state,
previews: {
...previews,
[projectID]: {
preview: '',
fetching: true,
initialized: false,
},
},
};
}
case ProjectsActionTypes.GET_PROJECT_PREVIEW_SUCCESS: {
const { projectID, preview } = action.payload;
const { previews } = state;
return {
...state,
previews: {
...previews,
[projectID]: {
preview,
fetching: false,
initialized: true,
},
},
};
}
case ProjectsActionTypes.GET_PROJECT_PREVIEW_FAILED: {
const { projectID } = action.payload;
const { previews } = state;
return {
...state,
previews: {
...previews,
[projectID]: {
...previews[projectID],
fetching: false,
initialized: true,
},
},
};
}
default:
return state;
}
......
......@@ -23,6 +23,7 @@ const defaultState: TasksState = {
},
count: 0,
current: [],
previews: {},
gettingQuery: {
page: 1,
id: null,
......@@ -56,20 +57,13 @@ export default (state: TasksState = defaultState, action: AnyAction): TasksState
} : state.gettingQuery,
};
case TasksActionTypes.GET_TASKS_SUCCESS: {
const combinedWithPreviews = action.payload.array.map(
(task: any, index: number): Task => ({
instance: task,
preview: action.payload.previews[index],
}),
);
return {
...state,
initialized: true,
fetching: false,
updating: false,
count: action.payload.count,
current: combinedWithPreviews,
current: action.payload.array,
};
}
case TasksActionTypes.GET_TASKS_FAILED:
......@@ -140,7 +134,7 @@ export default (state: TasksState = defaultState, action: AnyAction): TasksState
return {
...state,
updating: false,
current: state.current.filter((_task: Task): boolean => _task.instance.id !== taskID),
current: state.current.filter((_task: Task): boolean => _task.id !== taskID),
};
}
......@@ -149,11 +143,8 @@ export default (state: TasksState = defaultState, action: AnyAction): TasksState
updating: false,
current: state.current.map(
(_task): Task => {
if (_task.instance.id === task.id) {
return {
..._task,
instance: task,
};
if (_task.id === task.id) {
return task;
}
return _task;
......@@ -167,11 +158,8 @@ export default (state: TasksState = defaultState, action: AnyAction): TasksState
updating: false,
current: state.current.map(
(task): Task => {
if (task.instance.id === action.payload.task.id) {
return {
...task,
instance: action.payload.task,
};
if (task.id === action.payload.task.id) {
return action.payload.task;
}
return task;
......@@ -236,6 +224,51 @@ export default (state: TasksState = defaultState, action: AnyAction): TasksState
case AuthActionTypes.LOGOUT_SUCCESS: {
return { ...defaultState };
}
case TasksActionTypes.GET_TASK_PREVIEW: {
const { taskID } = action.payload;
const { previews } = state;
return {
...state,
previews: {
...previews,
[taskID]: {
preview: '',
fetching: true,
initialized: false,
},
},
};
}
case TasksActionTypes.GET_TASK_PREVIEW_SUCCESS: {
const { taskID, preview } = action.payload;
const { previews } = state;
return {
...state,
previews: {
...previews,
[taskID]: {
preview,
fetching: false,
initialized: true,
},
},
};
}
case TasksActionTypes.GET_TASK_PREVIEW_FAILED: {
const { taskID } = action.payload;
const { previews } = state;
return {
...state,
previews: {
...previews,
[taskID]: {
...previews[taskID],
fetching: false,
initialized: true,
},
},
};
}
default:
return state;
}
......
......@@ -4,10 +4,13 @@
import os
from io import BytesIO
from datetime import datetime
from tempfile import NamedTemporaryFile
import pytz
from diskcache import Cache
from django.conf import settings
from tempfile import NamedTemporaryFile
from rest_framework.exceptions import ValidationError, NotFound
from cvat.apps.engine.log import slogger
from cvat.apps.engine.media_extractors import (Mpeg4ChunkWriter,
......@@ -17,6 +20,11 @@ from cvat.apps.engine.models import DataChoice, StorageChoice
from cvat.apps.engine.models import DimensionType
from cvat.apps.engine.cloud_provider import get_cloud_storage_instance, Credentials
from cvat.apps.engine.utils import md5_hash
from cvat.apps.engine.cloud_provider import db_storage_to_storage_instance
from cvat.apps.engine.mime_types import mimetypes
from utils.dataset_manifest import ImageManifestManager
class CacheInteraction:
def __init__(self, dimension=DimensionType.DIM_2D):
self._cache = Cache(settings.CACHE_ROOT)
......@@ -25,16 +33,44 @@ class CacheInteraction:
def __del__(self):
self._cache.close()
def get_buff_mime(self, chunk_number, quality, db_data):
chunk, tag = self._cache.get('{}_{}_{}'.format(db_data.id, chunk_number, quality), tag=True)
def get_buf_chunk_with_mime(self, chunk_number, quality, db_data):
cache_key = f'{db_data.id}_{chunk_number}_{quality}'
chunk, tag = self._cache.get(cache_key, tag=True)
if not chunk:
chunk, tag = self.prepare_chunk_buff(db_data, quality, chunk_number)
self.save_chunk(db_data.id, chunk_number, quality, chunk, tag)
chunk, tag = self._prepare_chunk_buff(db_data, quality, chunk_number)
self._cache.set(cache_key, chunk, tag=tag)
return chunk, tag
def prepare_chunk_buff(self, db_data, quality, chunk_number):
def get_local_preview_with_mime(self, frame_number, db_data):
key = f'data_{db_data.id}_{frame_number}_preview'
buf, mime = self._cache.get(key, tag=True)
if not buf:
buf, mime = self._prepare_local_preview(frame_number, db_data)
self._cache.set(key, buf, tag=mime)
return buf, mime
def get_cloud_preview_with_mime(self, db_storage):
key = f'cloudstorage_{db_storage.id}_preview'
preview, mime = self._cache.get(key, tag=True)
if not preview:
preview, mime = self._prepare_cloud_preview(db_storage)
self._cache.set(key, preview, tag=mime)
return preview, mime
@staticmethod
def _get_frame_provider():
from cvat.apps.engine.frame_provider import FrameProvider # TODO: remove circular dependency
return FrameProvider
def _prepare_chunk_buff(self, db_data, quality, chunk_number):
FrameProvider = self._get_frame_provider()
writer_classes = {
FrameProvider.Quality.COMPRESSED : Mpeg4CompressedChunkWriter if db_data.compressed_chunk_type == DataChoice.VIDEO else ZipCompressedChunkWriter,
FrameProvider.Quality.ORIGINAL : Mpeg4ChunkWriter if db_data.original_chunk_type == DataChoice.VIDEO else ZipChunkWriter,
......@@ -108,5 +144,42 @@ class CacheInteraction:
os.remove(image_path)
return buff, mime_type
def save_chunk(self, db_data_id, chunk_number, quality, buff, mime_type):
self._cache.set('{}_{}_{}'.format(db_data_id, chunk_number, quality), buff, tag=mime_type)
def _prepare_local_preview(self, frame_number, db_data):
FrameProvider = self._get_frame_provider()
frame_provider = FrameProvider(db_data, self._dimension)
buf, mime = frame_provider.get_preview(frame_number)
return buf, mime
def _prepare_cloud_preview(self, db_storage):
storage = db_storage_to_storage_instance(db_storage)
if not db_storage.manifests.count():
raise ValidationError('Cannot get the cloud storage preview. There is no manifest file')
preview_path = None
for manifest_model in db_storage.manifests.all():
manifest_prefix = os.path.dirname(manifest_model.filename)
full_manifest_path = os.path.join(db_storage.get_storage_dirname(), manifest_model.filename)
if not os.path.exists(full_manifest_path) or \
datetime.utcfromtimestamp(os.path.getmtime(full_manifest_path)).replace(tzinfo=pytz.UTC) < storage.get_file_last_modified(manifest_model.filename):
storage.download_file(manifest_model.filename, full_manifest_path)
manifest = ImageManifestManager(
os.path.join(db_storage.get_storage_dirname(), manifest_model.filename),
db_storage.get_storage_dirname()
)
# need to update index
manifest.set_index()
if not len(manifest):
continue
preview_info = manifest[0]
preview_filename = ''.join([preview_info['name'], preview_info['extension']])
preview_path = os.path.join(manifest_prefix, preview_filename)
break
if not preview_path:
msg = 'Cloud storage {} does not contain any images'.format(db_storage.pk)
slogger.cloud_storage[db_storage.pk].info(msg)
raise NotFound(msg)
preview = storage.download_fileobj(preview_path)
mime = mimetypes.guess_type(preview_path)[0]
return preview, mime
......@@ -6,6 +6,7 @@
import math
from enum import Enum
from io import BytesIO
import os
import cv2
import numpy as np
......@@ -15,6 +16,7 @@ from cvat.apps.engine.cache import CacheInteraction
from cvat.apps.engine.media_extractors import VideoReader, ZipReader
from cvat.apps.engine.mime_types import mimetypes
from cvat.apps.engine.models import DataChoice, StorageMethodChoice, DimensionType
from cvat.apps.engine.media_extractors import rotate_within_exif
from rest_framework.exceptions import ValidationError
class RandomAccessIterator:
......@@ -86,6 +88,7 @@ class FrameProvider:
def __init__(self, db_data, dimension=DimensionType.DIM_2D):
self._db_data = db_data
self._dimension = dimension
self._loaders = {}
reader_class = {
......@@ -98,12 +101,12 @@ class FrameProvider:
self._loaders[self.Quality.COMPRESSED] = self.BuffChunkLoader(
reader_class[db_data.compressed_chunk_type],
cache.get_buff_mime,
cache.get_buf_chunk_with_mime,
self.Quality.COMPRESSED,
self._db_data)
self._loaders[self.Quality.ORIGINAL] = self.BuffChunkLoader(
reader_class[db_data.original_chunk_type],
cache.get_buff_mime,
cache.get_buf_chunk_with_mime,
self.Quality.ORIGINAL,
self._db_data)
else:
......@@ -162,8 +165,23 @@ class FrameProvider:
else:
raise RuntimeError('unsupported output type')
def get_preview(self):
return self._db_data.get_preview_path()
def get_preview(self, frame_number):
PREVIEW_SIZE = (256, 256)
PREVIEW_MIME = 'image/jpeg'
if self._dimension == DimensionType.DIM_3D:
# TODO
preview = Image.open(os.path.join(os.path.dirname(__file__), 'assets/3d_preview.jpeg'))
else:
preview, _ = self.get_frame(frame_number, self.Quality.COMPRESSED, self.Type.PIL)
preview = rotate_within_exif(preview)
preview.thumbnail(PREVIEW_SIZE)
output_buf = BytesIO()
preview.convert('RGB').save(output_buf, format="JPEG")
return output_buf, PREVIEW_MIME
def get_chunk(self, chunk_number, quality=Quality.ORIGINAL):
chunk_number = self._validate_chunk_number(chunk_number)
......@@ -186,3 +204,7 @@ class FrameProvider:
def get_frames(self, start_frame, stop_frame, quality=Quality.ORIGINAL, out_type=Type.BUFFER):
for idx in range(start_frame, stop_frame):
yield self.get_frame(idx, quality=quality, out_type=out_type)
@property
def data_id(self):
return self._db_data.id
......@@ -4,9 +4,13 @@
import logging
import sys
import os.path as osp
from typing import Dict
from contextlib import contextmanager
from attr import define, field
from django.conf import settings
from cvat.settings.base import LOGGING
from .models import Job, Task, Project, CloudStorage
......@@ -175,3 +179,25 @@ def close_all():
for logger in _opened_loggers.values():
_close_logger(logger)
@contextmanager
def get_migration_logger(migration_name):
migration_log_file = '{}.log'.format(migration_name)
stdout = sys.stdout
stderr = sys.stderr
# redirect all stdout to the file
log_file_object = open(osp.join(settings.MIGRATIONS_LOGS_ROOT, migration_log_file), 'w')
sys.stdout = log_file_object
sys.stderr = log_file_object
log = logging.getLogger(migration_name)
log.addHandler(logging.StreamHandler(stdout))
log.addHandler(logging.StreamHandler(log_file_object))
log.setLevel(logging.INFO)
try:
yield log
finally:
log_file_object.close()
sys.stdout = stdout
sys.stderr = stderr
......@@ -4,7 +4,6 @@ import os
import re
import shutil
import glob
import logging
import sys
import traceback
import itertools
......@@ -19,17 +18,18 @@ from cvat.apps.engine.media_extractors import (VideoReader, ArchiveReader, ZipRe
PdfReader , ImageListReader, Mpeg4ChunkWriter,
ZipChunkWriter, ZipCompressedChunkWriter, get_mime)
from cvat.apps.engine.models import DataChoice
from cvat.apps.engine.log import get_migration_logger
MIGRATION_THREAD_COUNT = 2
def fix_path(path):
ind = path.find('.upload')
if ind != -1:
path = path[ind + len('.upload') + 1:]
return path
ind = path.find('.upload')
if ind != -1:
path = path[ind + len('.upload') + 1:]
return path
def get_frame_step(frame_filter):
match = re.search("step\s*=\s*([1-9]\d*)", frame_filter)
match = re.search(r"step\s*=\s*([1-9]\d*)", frame_filter)
return int(match.group(1)) if match else 1
def get_task_on_disk():
......@@ -235,126 +235,110 @@ def migrate_task_schema(db_task, Data, log):
def create_data_objects(apps, schema_editor):
migration_name = os.path.splitext(os.path.basename(__file__))[0]
migration_log_file = '{}.log'.format(migration_name)
stdout = sys.stdout
stderr = sys.stderr
# redirect all stdout to the file
log_file_object = open(os.path.join(settings.MIGRATIONS_LOGS_ROOT, migration_log_file), 'w')
sys.stdout = log_file_object
sys.stderr = log_file_object
log = logging.getLogger(migration_name)
log.addHandler(logging.StreamHandler(stdout))
log.addHandler(logging.StreamHandler(log_file_object))
log.setLevel(logging.INFO)
disk_tasks = get_task_on_disk()
Task = apps.get_model('engine', 'Task')
Data = apps.get_model('engine', 'Data')
db_tasks = Task.objects
task_count = db_tasks.count()
log.info('\nStart schema migration...')
migrated_db_tasks = []
for counter, db_task in enumerate(db_tasks.all().iterator()):
res = migrate_task_schema(db_task, Data, log)
log.info('Schema migration for the task {} completed. Progress {}/{}'.format(db_task.id, counter+1, task_count))
if res:
migrated_db_tasks.append(res)
log.info('\nSchema migration is finished...')
log.info('\nStart data migration...')
manager = multiprocessing.Manager()
return_dict = manager.dict()
def create_process(db_task_id, db_data_id):
db_data = Data.objects.get(pk=db_data_id)
db_data_dir = os.path.join(settings.MEDIA_DATA_ROOT, str(db_data_id))
new_raw_dir = os.path.join(db_data_dir, 'raw')
original_video = None
original_images = None
if hasattr(db_data, 'video'):
original_video = os.path.join(new_raw_dir, db_data.video.path)
else:
original_images = [os.path.realpath(os.path.join(new_raw_dir, db_image.path)) for db_image in db_data.images.all()]
args = (db_task_id, db_data_id, original_video, original_images, db_data.size,
db_data.start_frame, db_data.stop_frame, db_data.frame_filter, db_data.image_quality, db_data.chunk_size, return_dict)
return multiprocessing.Process(target=migrate_task_data, args=args)
results = {}
task_idx = 0
while True:
for res_idx in list(results.keys()):
res = results[res_idx]
if not res.is_alive():
del results[res_idx]
if res.exitcode == 0:
ret_code, message = return_dict[res_idx]
if ret_code:
counter = (task_idx - len(results))
progress = (100 * counter) / task_count
log.info('Data migration for the task {} completed. Progress: {:.02f}% | {}/{}.'.format(res_idx, progress, counter, task_count))
with get_migration_logger(migration_name) as log:
disk_tasks = get_task_on_disk()
Task = apps.get_model('engine', 'Task')
Data = apps.get_model('engine', 'Data')
db_tasks = Task.objects
task_count = db_tasks.count()
log.info('\nStart schema migration...')
migrated_db_tasks = []
for counter, db_task in enumerate(db_tasks.all().iterator()):
res = migrate_task_schema(db_task, Data, log)
log.info('Schema migration for the task {} completed. Progress {}/{}'.format(db_task.id, counter+1, task_count))
if res:
migrated_db_tasks.append(res)
log.info('\nSchema migration is finished...')
log.info('\nStart data migration...')
manager = multiprocessing.Manager()
return_dict = manager.dict()
def create_process(db_task_id, db_data_id):
db_data = Data.objects.get(pk=db_data_id)
db_data_dir = os.path.join(settings.MEDIA_DATA_ROOT, str(db_data_id))
new_raw_dir = os.path.join(db_data_dir, 'raw')
original_video = None
original_images = None
if hasattr(db_data, 'video'):
original_video = os.path.join(new_raw_dir, db_data.video.path)
else:
original_images = [os.path.realpath(os.path.join(new_raw_dir, db_image.path)) for db_image in db_data.images.all()]
args = (db_task_id, db_data_id, original_video, original_images, db_data.size,
db_data.start_frame, db_data.stop_frame, db_data.frame_filter, db_data.image_quality, db_data.chunk_size, return_dict)
return multiprocessing.Process(target=migrate_task_data, args=args)
results = {}
task_idx = 0
while True:
for res_idx in list(results.keys()):
res = results[res_idx]
if not res.is_alive():
del results[res_idx]
if res.exitcode == 0:
ret_code, message = return_dict[res_idx]
if ret_code:
counter = (task_idx - len(results))
progress = (100 * counter) / task_count
log.info('Data migration for the task {} completed. Progress: {:.02f}% | {}/{}.'.format(res_idx, progress, counter, task_count))
else:
log.error('Cannot migrate data for the task: {}'.format(res_idx))
log.error(str(message))
if res_idx in disk_tasks:
disk_tasks.remove(res_idx)
else:
log.error('Cannot migrate data for the task: {}'.format(res_idx))
log.error(str(message))
if res_idx in disk_tasks:
disk_tasks.remove(res_idx)
else:
log.error('#Cannot migrate data for the task: {}'.format(res_idx))
while task_idx < len(migrated_db_tasks) and len(results) < MIGRATION_THREAD_COUNT:
log.info('Start data migration for the task {}, data ID {}'.format(migrated_db_tasks[task_idx][0], migrated_db_tasks[task_idx][1]))
results[migrated_db_tasks[task_idx][0]] = create_process(*migrated_db_tasks[task_idx])
results[migrated_db_tasks[task_idx][0]].start()
task_idx += 1
if len(results) == 0:
break
time.sleep(5)
if disk_tasks:
suspicious_tasks_dir = os.path.join(settings.DATA_ROOT, 'suspicious_tasks')
os.makedirs(suspicious_tasks_dir, exist_ok=True)
for tid in disk_tasks:
suspicious_task_path = os.path.join(settings.DATA_ROOT, str(tid))
try:
shutil.move(suspicious_task_path, suspicious_tasks_dir)
except Exception as e:
log.error('Cannot move data for the suspicious task {}, \
that is not represented in the database.'.format(suspicious_task_path))
log.error(str(e))
# DL models migration
if apps.is_installed('auto_annotation'):
DLModel = apps.get_model('auto_annotation', 'AnnotationModel')
for db_model in DLModel.objects.all():
try:
old_location = os.path.join(settings.BASE_DIR, 'models', str(db_model.id))
new_location = os.path.join(settings.BASE_DIR, 'data', 'models', str(db_model.id))
if os.path.isdir(old_location):
shutil.move(old_location, new_location)
db_model.model_file.name = db_model.model_file.name.replace(old_location, new_location)
db_model.weights_file.name = db_model.weights_file.name.replace(old_location, new_location)
db_model.labelmap_file.name = db_model.labelmap_file.name.replace(old_location, new_location)
db_model.interpretation_file.name = db_model.interpretation_file.name.replace(old_location, new_location)
db_model.save()
except Exception as e:
log.error('Cannot migrate data for the DL model: {}'.format(db_model.id))
log.error(str(e))
log_file_object.close()
sys.stdout = stdout
sys.stderr = stderr
log.error('#Cannot migrate data for the task: {}'.format(res_idx))
while task_idx < len(migrated_db_tasks) and len(results) < MIGRATION_THREAD_COUNT:
log.info('Start data migration for the task {}, data ID {}'.format(migrated_db_tasks[task_idx][0], migrated_db_tasks[task_idx][1]))
results[migrated_db_tasks[task_idx][0]] = create_process(*migrated_db_tasks[task_idx])
results[migrated_db_tasks[task_idx][0]].start()
task_idx += 1
if len(results) == 0:
break
time.sleep(5)
if disk_tasks:
suspicious_tasks_dir = os.path.join(settings.DATA_ROOT, 'suspicious_tasks')
os.makedirs(suspicious_tasks_dir, exist_ok=True)
for tid in disk_tasks:
suspicious_task_path = os.path.join(settings.DATA_ROOT, str(tid))
try:
shutil.move(suspicious_task_path, suspicious_tasks_dir)
except Exception as e:
log.error('Cannot move data for the suspicious task {}, \
that is not represented in the database.'.format(suspicious_task_path))
log.error(str(e))
# DL models migration
if apps.is_installed('auto_annotation'):
DLModel = apps.get_model('auto_annotation', 'AnnotationModel')
for db_model in DLModel.objects.all():
try:
old_location = os.path.join(settings.BASE_DIR, 'models', str(db_model.id))
new_location = os.path.join(settings.BASE_DIR, 'data', 'models', str(db_model.id))
if os.path.isdir(old_location):
shutil.move(old_location, new_location)
db_model.model_file.name = db_model.model_file.name.replace(old_location, new_location)
db_model.weights_file.name = db_model.weights_file.name.replace(old_location, new_location)
db_model.labelmap_file.name = db_model.labelmap_file.name.replace(old_location, new_location)
db_model.interpretation_file.name = db_model.interpretation_file.name.replace(old_location, new_location)
db_model.save()
except Exception as e:
log.error('Cannot migrate data for the DL model: {}'.format(db_model.id))
log.error(str(e))
class Migration(migrations.Migration):
......
import os
import sys
import traceback
from django.db import migrations
from django.conf import settings
from cvat.apps.engine.log import get_migration_logger
def delete_previews(apps, schema_editor):
migration_name = os.path.splitext(os.path.basename(__file__))[0]
with get_migration_logger(migration_name) as log:
def delete_object_previews(db_objects, root_path):
for db_obj in db_objects:
preview_path = os.path.join(root_path, str(db_obj.id), 'preview.jpeg')
try:
os.remove(preview_path)
except Exception as e:
log.error(f'Cannot delete path {preview_path}')
log.error(str(e))
traceback.print_exc(file=sys.stderr)
log.info('\nDeleting Data previews...')
Data = apps.get_model('engine', 'Data')
delete_object_previews(Data.objects.all(), settings.MEDIA_DATA_ROOT)
log.info('\nDeleting Job previews...')
Job = apps.get_model('engine', 'Job')
delete_object_previews(Job.objects.all(), settings.JOBS_ROOT)
log.info('\nDeleting CloudStorage previews...')
CloudStorage = apps.get_model('engine', 'CloudStorage')
delete_object_previews(CloudStorage.objects.all(), settings.CLOUD_STORAGE_ROOT)
class Migration(migrations.Migration):
dependencies = [
('engine', '0061_auto_20221130_0844'),
]
operations = [
migrations.RunPython(
code=delete_previews
),
]
......@@ -249,9 +249,6 @@ class Data(models.Model):
return os.path.join(self.get_compressed_cache_dirname(),
self._get_compressed_chunk_name(chunk_number))
def get_preview_path(self):
return os.path.join(self.get_data_dirname(), 'preview.jpeg')
def get_manifest_path(self):
return os.path.join(self.get_upload_dirname(), 'manifest.jsonl')
......@@ -501,9 +498,6 @@ class Job(models.Model):
})
db_commit.save()
def get_preview_path(self):
return os.path.join(self.get_dirname(), "preview.jpeg")
class Meta:
default_permissions = ()
......@@ -810,9 +804,6 @@ class CloudStorage(models.Model):
def get_log_path(self):
return os.path.join(self.get_storage_logs_dirname(), "storage.log")
def get_preview_path(self):
return os.path.join(self.get_storage_dirname(), 'preview.jpeg')
def get_specific_attributes(self):
return parse_specific_attributes(self.specific_attributes)
......
......@@ -124,9 +124,6 @@ def _save_task_to_db(db_task, extractor):
shutil.rmtree(job_path)
os.makedirs(job_path)
preview = extractor.get_preview(frame=start_frame)
preview.save(db_job.get_preview_path())
db_task.data.save()
db_task.save()
......@@ -688,8 +685,5 @@ def _create_thread(db_task, data, isBackupRestore=False, isDatasetImport=False):
db_data.stop_frame = min(db_data.stop_frame, \
db_data.start_frame + (db_data.size - 1) * db_data.get_frame_step())
task_preview = extractor.get_preview(frame=0)
task_preview.save(db_data.get_preview_path())
slogger.glob.info("Found frames {} for Data #{}".format(db_data.size, db_data.id))
_save_task_to_db(db_task, extractor)
......@@ -17,6 +17,7 @@ from glob import glob
from io import BytesIO
from unittest import mock
import logging
import copy
import av
import numpy as np
......@@ -3095,7 +3096,12 @@ def generate_manifest_file(data_type, manifest_path, sources):
manifest.create()
class TaskDataAPITestCase(APITestCase):
_image_sizes = {}
_share_image_sizes = {}
_client_images = {}
_client_mp4_video = {}
_client_archive = {}
_client_pdf = {}
_client_mxf_video = {}
class ChunkType(str, Enum):
IMAGESET = 'imageset'
......@@ -3119,28 +3125,28 @@ class TaskDataAPITestCase(APITestCase):
img_size, data = generate_image_file(filename)
with open(path, "wb") as image:
image.write(data.read())
cls._image_sizes[filename] = img_size
cls._share_image_sizes[filename] = img_size
filename = "test_2.jpg"
path = os.path.join(settings.SHARE_ROOT, filename)
img_size, data = generate_image_file(filename)
with open(path, "wb") as image:
image.write(data.read())
cls._image_sizes[filename] = img_size
cls._share_image_sizes[filename] = img_size
filename = "test_3.jpg"
path = os.path.join(settings.SHARE_ROOT, filename)
img_size, data = generate_image_file(filename)
with open(path, "wb") as image:
image.write(data.read())
cls._image_sizes[filename] = img_size
cls._share_image_sizes[filename] = img_size
filename = "test_10.jpg"
path = os.path.join(settings.SHARE_ROOT, filename)
img_size, data = generate_image_file(filename)
with open(path, "wb") as image:
image.write(data.read())
cls._image_sizes[filename] = img_size
cls._share_image_sizes[filename] = img_size
filename = os.path.join("data", "test_3.jpg")
path = os.path.join(settings.SHARE_ROOT, filename)
......@@ -3148,14 +3154,14 @@ class TaskDataAPITestCase(APITestCase):
img_size, data = generate_image_file(filename)
with open(path, "wb") as image:
image.write(data.read())
cls._image_sizes[filename] = img_size
cls._share_image_sizes[filename] = img_size
filename = "test_video_1.mp4"
path = os.path.join(settings.SHARE_ROOT, filename)
img_sizes, data = generate_video_file(filename, width=1280, height=720)
with open(path, "wb") as video:
video.write(data.read())
cls._image_sizes[filename] = img_sizes
cls._share_image_sizes[filename] = img_sizes
filename = "test_rotated_90_video.mp4"
path = os.path.join(os.path.dirname(__file__), 'assets', 'test_rotated_90_video.mp4')
......@@ -3165,7 +3171,7 @@ class TaskDataAPITestCase(APITestCase):
img_sizes = [(frame.height, frame.width)] * container.streams.video[0].frames
break
container.close()
cls._image_sizes[filename] = img_sizes
cls._share_image_sizes[filename] = img_sizes
filename = os.path.join("videos", "test_video_1.mp4")
path = os.path.join(settings.SHARE_ROOT, filename)
......@@ -3173,14 +3179,14 @@ class TaskDataAPITestCase(APITestCase):
img_sizes, data = generate_video_file(filename, width=1280, height=720)
with open(path, "wb") as video:
video.write(data.read())
cls._image_sizes[filename] = img_sizes
cls._share_image_sizes[filename] = img_sizes
filename = os.path.join("test_archive_1.zip")
path = os.path.join(settings.SHARE_ROOT, filename)
img_sizes, data = generate_zip_archive_file(filename, count=5)
with open(path, "wb") as zip_archive:
zip_archive.write(data.read())
cls._image_sizes[filename] = img_sizes
cls._share_image_sizes[filename] = img_sizes
filename = "test_pointcloud_pcd.zip"
path = os.path.join(os.path.dirname(__file__), 'assets', filename)
......@@ -3192,7 +3198,7 @@ class TaskDataAPITestCase(APITestCase):
with zip_file.open(info, "r") as file:
data = ValidateDimension.get_pcd_properties(file)
image_sizes.append((int(data["WIDTH"]), int(data["HEIGHT"])))
cls._image_sizes[filename] = image_sizes
cls._share_image_sizes[filename] = image_sizes
filename = "test_velodyne_points.zip"
path = os.path.join(os.path.dirname(__file__), 'assets', filename)
......@@ -3221,14 +3227,14 @@ class TaskDataAPITestCase(APITestCase):
root_path = os.path.abspath(os.path.join(root_path, filename.split(".")[0]))
shutil.rmtree(root_path)
cls._image_sizes[filename] = image_sizes
cls._share_image_sizes[filename] = image_sizes
file_name = 'test_1.pdf'
path = os.path.join(settings.SHARE_ROOT, file_name)
img_sizes, data = generate_pdf_file(file_name, page_count=5)
with open(path, "wb") as pdf_file:
pdf_file.write(data.read())
cls._image_sizes[file_name] = img_sizes
cls._share_image_sizes[file_name] = img_sizes
generate_manifest_file(data_type='video', manifest_path=os.path.join(settings.SHARE_ROOT, 'videos', 'manifest.jsonl'),
sources=[os.path.join(settings.SHARE_ROOT, 'videos', 'test_video_1.mp4')])
......@@ -3236,6 +3242,36 @@ class TaskDataAPITestCase(APITestCase):
generate_manifest_file(data_type='images', manifest_path=os.path.join(settings.SHARE_ROOT, 'manifest.jsonl'),
sources=[os.path.join(settings.SHARE_ROOT, f'test_{i}.jpg') for i in range(1,4)])
image_sizes, images = generate_image_files("test_1.jpg", "test_2.jpg", "test_3.jpg")
cls._client_images = {
'images': images,
'image_sizes': image_sizes,
}
image_sizes, video = generate_video_file(filename="test_video_1.mp4", width=1280, height=720)
cls._client_mp4_video = {
'video': video,
'image_sizes': image_sizes,
}
image_sizes, archive = generate_zip_archive_file("test_archive_2.zip", 7)
cls._client_archive = {
'archive': archive,
'image_sizes': image_sizes
}
image_sizes, document = generate_pdf_file("test_pdf_1.pdf", 5)
cls._client_pdf = {
'pdf': document,
'image_sizes': image_sizes
}
image_sizes, video = generate_video_file(filename="test_video_1.mxf", width=1280, height=720, codec_name='mpeg2video')
cls._client_mxf_video = {
'video': video,
'image_sizes': image_sizes,
}
@classmethod
def tearDownClass(cls):
super().tearDownClass()
......@@ -3296,7 +3332,9 @@ class TaskDataAPITestCase(APITestCase):
return self.client.get(url)
def _get_preview(self, tid, user):
return self._run_api_v2_task_id_data_get(tid, user, "preview")
url = '/api/tasks/{}/preview'.format(tid)
with ForceLogin(user, self.client):
return self.client.get(url)
def _get_compressed_chunk(self, tid, user, number):
return self._run_api_v2_task_id_data_get(tid, user, "chunk", "compressed", number)
......@@ -3364,7 +3402,7 @@ class TaskDataAPITestCase(APITestCase):
self.assertEqual(response.status_code, expected_status_code)
if expected_status_code == status.HTTP_200_OK:
if dimension == DimensionType.DIM_2D:
preview = Image.open(io.BytesIO(b"".join(response.streaming_content)))
preview = Image.open(io.BytesIO(response.content))
self.assertLessEqual(preview.size, image_sizes[0])
# check compressed chunk
......@@ -3458,7 +3496,8 @@ class TaskDataAPITestCase(APITestCase):
]
}
image_sizes, images = generate_image_files("test_1.jpg", "test_2.jpg", "test_3.jpg")
images = copy.deepcopy(self._client_images['images'])
image_sizes = self._client_images['image_sizes']
task_data = {
"client_files[0]": images[0],
"client_files[1]": images[1],
......@@ -3486,10 +3525,10 @@ class TaskDataAPITestCase(APITestCase):
"image_quality": 75,
}
image_sizes = [
self._image_sizes[task_data["server_files[3]"]],
self._image_sizes[task_data["server_files[0]"]],
self._image_sizes[task_data["server_files[1]"]],
self._image_sizes[task_data["server_files[2]"]],
self._share_image_sizes[task_data["server_files[3]"]],
self._share_image_sizes[task_data["server_files[0]"]],
self._share_image_sizes[task_data["server_files[1]"]],
self._share_image_sizes[task_data["server_files[2]"]],
]
self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.IMAGESET, self.ChunkType.IMAGESET, image_sizes,
......@@ -3509,7 +3548,8 @@ class TaskDataAPITestCase(APITestCase):
{"name": "person"},
]
}
image_sizes, video = generate_video_file(filename="test_video_1.mp4", width=1280, height=720)
video = copy.deepcopy(self._client_mp4_video['video'])
image_sizes = self._client_mp4_video['image_sizes']
task_data = {
"client_files[0]": video,
"image_quality": 43,
......@@ -3531,7 +3571,7 @@ class TaskDataAPITestCase(APITestCase):
"server_files[0]": "test_video_1.mp4",
"image_quality": 57,
}
image_sizes = self._image_sizes[task_data["server_files[0]"]]
image_sizes = self._share_image_sizes[task_data["server_files[0]"]]
self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.VIDEO, self.ChunkType.VIDEO, image_sizes,
expected_uploaded_data_location=StorageChoice.SHARE)
......@@ -3554,7 +3594,7 @@ class TaskDataAPITestCase(APITestCase):
"server_files[0]": os.path.join("videos", "test_video_1.mp4"),
"image_quality": 57,
}
image_sizes = self._image_sizes[task_data["server_files[0]"]]
image_sizes = self._share_image_sizes[task_data["server_files[0]"]]
self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.VIDEO, self.ChunkType.VIDEO, image_sizes,
expected_uploaded_data_location=StorageChoice.SHARE)
......@@ -3579,7 +3619,7 @@ class TaskDataAPITestCase(APITestCase):
"image_quality": 12,
"use_zip_chunks": True,
}
image_sizes = self._image_sizes[task_data["server_files[0]"]]
image_sizes = self._share_image_sizes[task_data["server_files[0]"]]
self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.IMAGESET, self.ChunkType.VIDEO, image_sizes,
expected_uploaded_data_location=StorageChoice.SHARE)
......@@ -3602,7 +3642,7 @@ class TaskDataAPITestCase(APITestCase):
"server_files[0]": "test_archive_1.zip",
"image_quality": 88,
}
image_sizes = self._image_sizes[task_data["server_files[0]"]]
image_sizes = self._share_image_sizes[task_data["server_files[0]"]]
self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.IMAGESET, self.ChunkType.IMAGESET, image_sizes,
expected_uploaded_data_location=StorageChoice.LOCAL)
......@@ -3621,7 +3661,8 @@ class TaskDataAPITestCase(APITestCase):
{"name": "person"},
]
}
image_sizes, archive = generate_zip_archive_file("test_archive_2.zip", 7)
archive = copy.deepcopy(self._client_archive['archive'])
image_sizes = self._client_archive['image_sizes']
task_data = {
"client_files[0]": archive,
"image_quality": 100,
......@@ -3645,7 +3686,7 @@ class TaskDataAPITestCase(APITestCase):
"use_cache": True,
}
image_sizes = self._image_sizes[task_data["server_files[0]"]]
image_sizes = self._share_image_sizes[task_data["server_files[0]"]]
self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.VIDEO,
self.ChunkType.VIDEO, image_sizes, StorageMethodChoice.CACHE, StorageChoice.SHARE)
......@@ -3673,9 +3714,9 @@ class TaskDataAPITestCase(APITestCase):
"use_cache": True,
}
image_sizes = [
self._image_sizes[task_data["server_files[0]"]],
self._image_sizes[task_data["server_files[2]"]],
self._image_sizes[task_data["server_files[1]"]],
self._share_image_sizes[task_data["server_files[0]"]],
self._share_image_sizes[task_data["server_files[2]"]],
self._share_image_sizes[task_data["server_files[1]"]],
]
self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.IMAGESET,
......@@ -3702,7 +3743,7 @@ class TaskDataAPITestCase(APITestCase):
"use_cache": True
}
image_sizes = self._image_sizes[task_data["server_files[0]"]]
image_sizes = self._share_image_sizes[task_data["server_files[0]"]]
self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.IMAGESET,
self.ChunkType.IMAGESET, image_sizes, StorageMethodChoice.CACHE, StorageChoice.LOCAL)
......@@ -3722,7 +3763,8 @@ class TaskDataAPITestCase(APITestCase):
]
}
image_sizes, document = generate_pdf_file("test_pdf_1.pdf", 5)
document = copy.deepcopy(self._client_pdf['pdf'])
image_sizes = self._client_pdf['image_sizes']
task_data = {
"client_files[0]": document,
......@@ -3744,8 +3786,7 @@ class TaskDataAPITestCase(APITestCase):
]
}
image_sizes, document = generate_pdf_file("test_pdf_2.pdf", 4)
document = copy.deepcopy(self._client_pdf['pdf'])
task_data = {
"client_files[0]": document,
"image_quality": 70,
......@@ -3769,7 +3810,7 @@ class TaskDataAPITestCase(APITestCase):
"image_quality": 70,
"use_cache": True
}
image_sizes = self._image_sizes[task_data['server_files[0]']]
image_sizes = self._share_image_sizes[task_data['server_files[0]']]
self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.VIDEO,
self.ChunkType.VIDEO, image_sizes, StorageMethodChoice.CACHE,
......@@ -3796,7 +3837,7 @@ class TaskDataAPITestCase(APITestCase):
"use_zip_chunks": True
}
image_sizes = self._image_sizes['test_rotated_90_video.mp4']
image_sizes = self._share_image_sizes['test_rotated_90_video.mp4']
self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.IMAGESET,
self.ChunkType.VIDEO, image_sizes, StorageMethodChoice.FILE_SYSTEM)
......@@ -3817,7 +3858,7 @@ class TaskDataAPITestCase(APITestCase):
"use_zip_chunks": True
}
image_sizes = self._image_sizes['test_rotated_90_video.mp4']
image_sizes = self._share_image_sizes['test_rotated_90_video.mp4']
self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.IMAGESET,
self.ChunkType.VIDEO, image_sizes, StorageMethodChoice.CACHE)
......@@ -3830,7 +3871,8 @@ class TaskDataAPITestCase(APITestCase):
],
}
image_sizes, video = generate_video_file(filename="test_video_1.mxf", width=1280, height=720, codec_name='mpeg2video')
video = copy.deepcopy(self._client_mxf_video['video'])
image_sizes = self._client_mxf_video['image_sizes']
task_data = {
"client_files[0]": video,
"image_quality": 51,
......@@ -3852,7 +3894,7 @@ class TaskDataAPITestCase(APITestCase):
"client_files[0]": open(os.path.join(os.path.dirname(__file__), 'assets', 'test_pointcloud_pcd.zip'), 'rb'),
"image_quality": 100,
}
image_sizes = self._image_sizes["test_pointcloud_pcd.zip"]
image_sizes = self._share_image_sizes["test_pointcloud_pcd.zip"]
self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.IMAGESET,
self.ChunkType.IMAGESET,
image_sizes, dimension=DimensionType.DIM_3D)
......@@ -3872,7 +3914,7 @@ class TaskDataAPITestCase(APITestCase):
'rb'),
"image_quality": 100,
}
image_sizes = self._image_sizes["test_velodyne_points.zip"]
image_sizes = self._share_image_sizes["test_velodyne_points.zip"]
self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.IMAGESET,
self.ChunkType.IMAGESET,
image_sizes, dimension=DimensionType.DIM_3D)
......@@ -3896,9 +3938,9 @@ class TaskDataAPITestCase(APITestCase):
"use_cache": True
}
image_sizes = [
self._image_sizes[task_data["server_files[0]"]],
self._image_sizes[task_data["server_files[1]"]],
self._image_sizes[task_data["server_files[2]"]],
self._share_image_sizes[task_data["server_files[0]"]],
self._share_image_sizes[task_data["server_files[1]"]],
self._share_image_sizes[task_data["server_files[2]"]],
]
self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.IMAGESET, self.ChunkType.IMAGESET,
......@@ -3920,9 +3962,9 @@ class TaskDataAPITestCase(APITestCase):
"sorting_method": SortingMethod.PREDEFINED
}
image_sizes = [
self._image_sizes[task_data["server_files[0]"]],
self._image_sizes[task_data["server_files[1]"]],
self._image_sizes[task_data["server_files[2]"]],
self._share_image_sizes[task_data["server_files[0]"]],
self._share_image_sizes[task_data["server_files[1]"]],
self._share_image_sizes[task_data["server_files[2]"]],
]
self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.IMAGESET, self.ChunkType.IMAGESET,
......@@ -3939,9 +3981,9 @@ class TaskDataAPITestCase(APITestCase):
"sorting_method": SortingMethod.NATURAL
}
image_sizes = [
self._image_sizes[task_data["server_files[2]"]],
self._image_sizes[task_data["server_files[1]"]],
self._image_sizes[task_data["server_files[0]"]],
self._share_image_sizes[task_data["server_files[2]"]],
self._share_image_sizes[task_data["server_files[1]"]],
self._share_image_sizes[task_data["server_files[0]"]],
]
self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.IMAGESET, self.ChunkType.IMAGESET,
......@@ -3954,7 +3996,7 @@ class TaskDataAPITestCase(APITestCase):
"copy_data": False,
"use_cache": True,
}
image_sizes = self._image_sizes[task_data["server_files[0]"]]
image_sizes = self._share_image_sizes[task_data["server_files[0]"]]
self._test_api_v2_tasks_id_data_spec(user, task_spec, task_data, self.ChunkType.IMAGESET, self.ChunkType.IMAGESET,
image_sizes, StorageMethodChoice.CACHE, StorageChoice.LOCAL)
......
......@@ -11,7 +11,7 @@ import shutil
import traceback
from datetime import datetime
from distutils.util import strtobool
from tempfile import mkstemp, NamedTemporaryFile
from tempfile import mkstemp
import cv2
from django.db.models.query import Prefetch
......@@ -45,8 +45,6 @@ from cvat.apps.engine.cloud_provider import db_storage_to_storage_instance
from cvat.apps.dataset_manager.bindings import CvatImportError
from cvat.apps.dataset_manager.serializers import DatasetFormatsSerializer
from cvat.apps.engine.frame_provider import FrameProvider
from cvat.apps.engine.media_extractors import ImageListReader
from cvat.apps.engine.mime_types import mimetypes
from cvat.apps.engine.media_extractors import get_mime
from cvat.apps.engine.models import (
Job, Task, Project, Issue, Data,
......@@ -77,6 +75,7 @@ from .log import clogger, slogger
from cvat.apps.iam.permissions import (CloudStoragePermission,
CommentPermission, IssuePermission, JobPermission, ProjectPermission,
TaskPermission, UserPermission)
from cvat.apps.engine.cache import CacheInteraction
@extend_schema(tags=['server'])
......@@ -622,6 +621,30 @@ class ProjectViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
def append_backup_chunk(self, request, file_id):
return self.append_tus_chunk(request, file_id)
@extend_schema(summary='Method returns a preview image for the project',
responses={
'200': OpenApiResponse(description='Project image preview'),
'404': OpenApiResponse(description='Project image preview not found'),
})
@action(detail=True, methods=['GET'], url_path='preview')
def preview(self, request, pk):
self._object = self.get_object() # call check_object_permissions as well
first_task = self._object.tasks.order_by('-id').first()
if not first_task:
return HttpResponseNotFound('Project image preview not found')
data_getter = DataChunkGetter(
data_type='preview',
data_quality='compressed',
data_num=first_task.data.start_frame,
task_dim=first_task.dimension
)
return data_getter(request, first_task.data.start_frame,
first_task.data.stop_frame, first_task.data)
@staticmethod
def _get_rq_response(queue, job_id):
queue = django_rq.get_queue(queue)
......@@ -648,21 +671,20 @@ class DataChunkGetter:
if not data_type or data_type not in possible_data_type_values:
raise ValidationError('Data type not specified or has wrong value')
elif data_type == 'chunk' or data_type == 'frame':
if not data_num:
elif data_type == 'chunk' or data_type == 'frame' or data_type == 'preview':
if data_num is None:
raise ValidationError('Number is not specified')
elif data_quality not in possible_quality_values:
raise ValidationError('Wrong quality value')
self.type = data_type
self.number = int(data_num) if data_num else None
self.number = int(data_num) if data_num is not None else None
self.quality = FrameProvider.Quality.COMPRESSED \
if data_quality == 'compressed' else FrameProvider.Quality.ORIGINAL
self.dimension = task_dim
def __call__(self, request, start, stop, db_data, db_object):
def __call__(self, request, start, stop, db_data):
if not db_data:
raise NotFound(detail='Cannot find requested data')
......@@ -687,16 +709,18 @@ class DataChunkGetter:
path = os.path.realpath(frame_provider.get_chunk(self.number, self.quality))
return sendfile(request, path)
elif self.type == 'frame':
elif self.type == 'frame' or self.type == 'preview':
if not (start <= self.number <= stop):
raise ValidationError('The frame number should be in ' +
f'[{start}, {stop}] range')
buf, mime = frame_provider.get_frame(self.number, self.quality)
return HttpResponse(buf.getvalue(), content_type=mime)
if self.type == 'preview':
cache = CacheInteraction(self.dimension)
buf, mime = cache.get_local_preview_with_mime(self.number, db_data)
else:
buf, mime = frame_provider.get_frame(self.number, self.quality)
elif self.type == 'preview':
return sendfile(request, db_object.get_preview_path())
return HttpResponse(buf.getvalue(), content_type=mime)
elif self.type == 'context_image':
if not (start <= self.number <= stop):
......@@ -982,13 +1006,13 @@ class TaskViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
@extend_schema(methods=['GET'], summary='Method returns data for a specific task',
parameters=[
OpenApiParameter('type', location=OpenApiParameter.QUERY, required=False,
type=OpenApiTypes.STR, enum=['chunk', 'frame', 'preview', 'context_image'],
type=OpenApiTypes.STR, enum=['chunk', 'frame', 'context_image'],
description='Specifies the type of the requested data'),
OpenApiParameter('quality', location=OpenApiParameter.QUERY, required=False,
type=OpenApiTypes.STR, enum=['compressed', 'original'],
description="Specifies the quality level of the requested data, doesn't matter for 'preview' type"),
description="Specifies the quality level of the requested data"),
OpenApiParameter('number', location=OpenApiParameter.QUERY, required=False, type=OpenApiTypes.INT,
description="A unique number value identifying chunk or frame, doesn't matter for 'preview' type"),
description="A unique number value identifying chunk or frame"),
],
responses={
'200': OpenApiResponse(description='Data of a specific type'),
......@@ -1017,7 +1041,7 @@ class TaskViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
self._object.dimension)
return data_getter(request, self._object.data.start_frame,
self._object.data.stop_frame, self._object.data, self._object.data)
self._object.data.stop_frame, self._object.data)
@extend_schema(methods=['PATCH'],
operation_id='tasks_partial_update_data_file',
......@@ -1317,6 +1341,28 @@ class TaskViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
return Response(data="Exporting a dataset from a task without data is not allowed",
status=status.HTTP_400_BAD_REQUEST)
@extend_schema(summary='Method returns a preview image for the task',
responses={
'200': OpenApiResponse(description='Task image preview'),
'404': OpenApiResponse(description='Task image preview not found'),
})
@action(detail=True, methods=['GET'], url_path='preview')
def preview(self, request, pk):
self._object = self.get_object() # call check_object_permissions as well
if not self._object.data:
return HttpResponseNotFound('Task image preview not found')
data_getter = DataChunkGetter(
data_type='preview',
data_quality='compressed',
data_num=self._object.data.start_frame,
task_dim=self._object.dimension
)
return data_getter(request, self._object.data.start_frame,
self._object.data.stop_frame, self._object.data)
@extend_schema(tags=['jobs'])
@extend_schema_view(
retrieve=extend_schema(
......@@ -1625,12 +1671,12 @@ class JobViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
parameters=[
OpenApiParameter('type', description='Specifies the type of the requested data',
location=OpenApiParameter.QUERY, required=False, type=OpenApiTypes.STR,
enum=['chunk', 'frame', 'preview', 'context_image']),
enum=['chunk', 'frame', 'context_image']),
OpenApiParameter('quality', location=OpenApiParameter.QUERY, required=False,
type=OpenApiTypes.STR, enum=['compressed', 'original'],
description="Specifies the quality level of the requested data, doesn't matter for 'preview' type"),
description="Specifies the quality level of the requested data"),
OpenApiParameter('number', location=OpenApiParameter.QUERY, required=False, type=OpenApiTypes.INT,
description="A unique number value identifying chunk or frame, doesn't matter for 'preview' type"),
description="A unique number value identifying chunk or frame"),
],
responses={
'200': OpenApiResponse(OpenApiTypes.BINARY, description='Data of a specific type'),
......@@ -1646,7 +1692,7 @@ class JobViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
db_job.segment.task.dimension)
return data_getter(request, db_job.segment.start_frame,
db_job.segment.stop_frame, db_job.segment.task.data, db_job)
db_job.segment.stop_frame, db_job.segment.task.data)
@extend_schema(summary='Method provides a meta information about media files which are related with the job',
......@@ -1737,6 +1783,24 @@ class JobViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
serializer = JobCommitSerializer(queryset, context={'request': request}, many=True)
return Response(serializer.data)
@extend_schema(summary='Method returns a preview image for the job',
responses={
'200': OpenApiResponse(description='Job image preview'),
})
@action(detail=True, methods=['GET'], url_path='preview')
def preview(self, request, pk):
self._object = self.get_object() # call check_object_permissions as well
data_getter = DataChunkGetter(
data_type='preview',
data_quality='compressed',
data_num=self._object.segment.start_frame,
task_dim=self._object.segment.task.dimension
)
return data_getter(request, self._object.segment.start_frame,
self._object.segment.stop_frame, self._object.segment.task.data)
@extend_schema(tags=['issues'])
@extend_schema_view(
retrieve=extend_schema(
......@@ -2095,47 +2159,16 @@ class CloudStorageViewSet(viewsets.GenericViewSet, mixins.ListModelMixin,
@extend_schema(summary='Method returns a preview image from a cloud storage',
responses={
'200': OpenApiResponse(description='Cloud Storage preview'),
'400': OpenApiResponse(description='Failed to get cloud storage preview'),
'404': OpenApiResponse(description='Cloud Storage preview not found'),
})
@action(detail=True, methods=['GET'], url_path='preview')
def preview(self, request, pk):
storage = None
try:
db_storage = self.get_object()
if not os.path.exists(db_storage.get_preview_path()):
storage = db_storage_to_storage_instance(db_storage)
if not db_storage.manifests.count():
raise ValidationError('Cannot get the cloud storage preview. There is no manifest file')
preview_path = None
for manifest_model in db_storage.manifests.all():
manifest_prefix = os.path.dirname(manifest_model.filename)
full_manifest_path = os.path.join(db_storage.get_storage_dirname(), manifest_model.filename)
if not os.path.exists(full_manifest_path) or \
datetime.utcfromtimestamp(os.path.getmtime(full_manifest_path)).replace(tzinfo=pytz.UTC) < storage.get_file_last_modified(manifest_model.filename):
storage.download_file(manifest_model.filename, full_manifest_path)
manifest = ImageManifestManager(
os.path.join(db_storage.get_storage_dirname(), manifest_model.filename),
db_storage.get_storage_dirname()
)
# need to update index
manifest.set_index()
if not len(manifest):
continue
preview_info = manifest[0]
preview_filename = ''.join([preview_info['name'], preview_info['extension']])
preview_path = os.path.join(manifest_prefix, preview_filename)
break
if not preview_path:
msg = 'Cloud storage {} does not contain any images'.format(pk)
slogger.cloud_storage[pk].info(msg)
return HttpResponseBadRequest(msg)
with NamedTemporaryFile() as temp_image:
storage.download_file(preview_path, temp_image.name)
reader = ImageListReader([temp_image.name])
preview = reader.get_preview(frame=0)
preview.save(db_storage.get_preview_path())
content_type = mimetypes.guess_type(db_storage.get_preview_path())[0]
return HttpResponse(open(db_storage.get_preview_path(), 'rb').read(), content_type)
cache = CacheInteraction()
preview, mime = cache.get_cloud_preview_with_mime(db_storage)
return HttpResponse(preview, mime)
except CloudStorageModel.DoesNotExist:
message = f"Storage {pk} does not exist"
slogger.glob.error(message)
......
......@@ -507,6 +507,7 @@ class ProjectPermission(OpenPolicyAgentPermission):
('import_backup', 'POST'): 'import:backup',
('append_backup_chunk', 'PATCH'): 'import:backup',
('append_backup_chunk', 'HEAD'): 'import:backup',
('preview', 'GET'): 'view',
}.get((view.action, request.method))
scopes = []
......@@ -669,6 +670,7 @@ class TaskPermission(OpenPolicyAgentPermission):
('append_backup_chunk', 'PATCH'): 'import:backup',
('append_backup_chunk', 'HEAD'): 'import:backup',
('export_backup', 'GET'): 'export:backup',
('preview', 'GET'): 'view',
}.get((view.action, request.method))
scopes = []
......@@ -914,7 +916,8 @@ class JobPermission(OpenPolicyAgentPermission):
('metadata','GET'): 'view:metadata',
('metadata','PATCH'): 'update:metadata',
('issues', 'GET'): 'view',
('commits', 'GET'): 'view:commits'
('commits', 'GET'): 'view:commits',
('preview', 'GET'): 'view',
}.get((view.action, request.method))
scopes = []
......
......@@ -41,7 +41,7 @@ tensorflow==2.9.3 # Optional requirement of Datumaro. Use tensorflow-macos==2.8.
# The package is used by pyunpack as a command line tool to support multiple
# archives. Don't use as a python module because it has GPL license.
patool==1.12
diskcache==5.0.2
diskcache==5.4.0
boto3==1.17.61
azure-storage-blob==12.13.0
google-cloud-storage==1.42.0
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册