提交 1d7a5cbf 编写于 作者: T Thomas Hellstrom

drm/vmwgfx: Implement a buffer object synccpu ioctl.

This ioctl enables inter-process synchronization of buffer objects,
which is needed for mesa Guest-Backed objects.
Signed-off-by: NThomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: NBrian Paul <brianp@vmware.com>
上级 15c6f656
......@@ -124,6 +124,9 @@
#define DRM_IOCTL_VMW_GB_SURFACE_REF \
DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF, \
union drm_vmw_gb_surface_reference_arg)
#define DRM_IOCTL_VMW_SYNCCPU \
DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \
struct drm_vmw_synccpu_arg)
/**
* The core DRM version of this macro doesn't account for
......@@ -201,6 +204,9 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
vmw_gb_surface_reference_ioctl,
DRM_AUTH | DRM_UNLOCKED),
VMW_IOCTL_DEF(VMW_SYNCCPU,
vmw_user_dmabuf_synccpu_ioctl,
DRM_AUTH | DRM_UNLOCKED),
};
static struct pci_device_id vmw_pci_id_list[] = {
......
......@@ -533,6 +533,8 @@ extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
uint32_t cur_validate_node);
extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
......
......@@ -441,6 +441,21 @@ static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
ttm_bo_unref(&bo);
}
static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
enum ttm_ref_type ref_type)
{
struct vmw_user_dma_buffer *user_bo;
user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base);
switch (ref_type) {
case TTM_REF_SYNCCPU_WRITE:
ttm_bo_synccpu_write_release(&user_bo->dma.base);
break;
default:
BUG();
}
}
/**
* vmw_user_dmabuf_alloc - Allocate a user dma buffer
*
......@@ -484,7 +499,8 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
&user_bo->prime,
shareable,
ttm_buffer_type,
&vmw_user_dmabuf_release, NULL);
&vmw_user_dmabuf_release,
&vmw_user_dmabuf_ref_obj_release);
if (unlikely(ret != 0)) {
ttm_bo_unref(&tmp);
goto out_no_base_object;
......@@ -517,6 +533,130 @@ int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
vmw_user_bo->prime.base.shareable) ? 0 : -EPERM;
}
/**
* vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
* access, idling previous GPU operations on the buffer and optionally
* blocking it for further command submissions.
*
* @user_bo: Pointer to the buffer object being grabbed for CPU access
* @tfile: Identifying the caller.
* @flags: Flags indicating how the grab should be performed.
*
* A blocking grab will be automatically released when @tfile is closed.
*/
static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
struct ttm_object_file *tfile,
uint32_t flags)
{
struct ttm_buffer_object *bo = &user_bo->dma.base;
bool existed;
int ret;
if (flags & drm_vmw_synccpu_allow_cs) {
struct ttm_bo_device *bdev = bo->bdev;
spin_lock(&bdev->fence_lock);
ret = ttm_bo_wait(bo, false, true,
!!(flags & drm_vmw_synccpu_dontblock));
spin_unlock(&bdev->fence_lock);
return ret;
}
ret = ttm_bo_synccpu_write_grab
(bo, !!(flags & drm_vmw_synccpu_dontblock));
if (unlikely(ret != 0))
return ret;
ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
TTM_REF_SYNCCPU_WRITE, &existed);
if (ret != 0 || existed)
ttm_bo_synccpu_write_release(&user_bo->dma.base);
return ret;
}
/**
* vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
* and unblock command submission on the buffer if blocked.
*
* @handle: Handle identifying the buffer object.
* @tfile: Identifying the caller.
* @flags: Flags indicating the type of release.
*/
static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
struct ttm_object_file *tfile,
uint32_t flags)
{
if (!(flags & drm_vmw_synccpu_allow_cs))
return ttm_ref_object_base_unref(tfile, handle,
TTM_REF_SYNCCPU_WRITE);
return 0;
}
/**
* vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
* functionality.
*
* @dev: Identifies the drm device.
* @data: Pointer to the ioctl argument.
* @file_priv: Identifies the caller.
*
* This function checks the ioctl arguments for validity and calls the
* relevant synccpu functions.
*/
int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_vmw_synccpu_arg *arg =
(struct drm_vmw_synccpu_arg *) data;
struct vmw_dma_buffer *dma_buf;
struct vmw_user_dma_buffer *user_bo;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
int ret;
if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
|| (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
drm_vmw_synccpu_dontblock |
drm_vmw_synccpu_allow_cs)) != 0) {
DRM_ERROR("Illegal synccpu flags.\n");
return -EINVAL;
}
switch (arg->op) {
case drm_vmw_synccpu_grab:
ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf);
if (unlikely(ret != 0))
return ret;
user_bo = container_of(dma_buf, struct vmw_user_dma_buffer,
dma);
ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
vmw_dmabuf_unreference(&dma_buf);
if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
ret != -EBUSY)) {
DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
(unsigned int) arg->handle);
return ret;
}
break;
case drm_vmw_synccpu_release:
ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile,
arg->flags);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
(unsigned int) arg->handle);
return ret;
}
break;
default:
DRM_ERROR("Invalid synccpu operation.\n");
return -EINVAL;
}
return 0;
}
int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
......
......@@ -28,6 +28,10 @@
#ifndef __VMWGFX_DRM_H__
#define __VMWGFX_DRM_H__
#ifndef __KERNEL__
#include <drm.h>
#endif
#define DRM_VMW_MAX_SURFACE_FACES 6
#define DRM_VMW_MAX_MIP_LEVELS 24
......@@ -59,7 +63,7 @@
#define DRM_VMW_UNREF_SHADER 22
#define DRM_VMW_GB_SURFACE_CREATE 23
#define DRM_VMW_GB_SURFACE_REF 24
#define DRM_VMW_SYNCCPU 25
/*************************************************************************/
/**
......@@ -985,5 +989,62 @@ union drm_vmw_gb_surface_reference_arg {
};
/*************************************************************************/
/**
* DRM_VMW_SYNCCPU - Sync a DMA buffer / MOB for CPU access.
*
* Idles any previously submitted GPU operations on the buffer and
* by default blocks command submissions that reference the buffer.
* If the file descriptor used to grab a blocking CPU sync is closed, the
* cpu sync is released.
* The flags argument indicates how the grab / release operation should be
* performed:
*/
/**
* enum drm_vmw_synccpu_flags - Synccpu flags:
*
* @drm_vmw_synccpu_read: Sync for read. If sync is done for read only, it's a
* hint to the kernel to allow command submissions that references the buffer
* for read-only.
* @drm_vmw_synccpu_write: Sync for write. Block all command submissions
* referencing this buffer.
* @drm_vmw_synccpu_dontblock: Dont wait for GPU idle, but rather return
* -EBUSY should the buffer be busy.
* @drm_vmw_synccpu_allow_cs: Allow command submission that touches the buffer
* while the buffer is synced for CPU. This is similar to the GEM bo idle
* behavior.
*/
enum drm_vmw_synccpu_flags {
drm_vmw_synccpu_read = (1 << 0),
drm_vmw_synccpu_write = (1 << 1),
drm_vmw_synccpu_dontblock = (1 << 2),
drm_vmw_synccpu_allow_cs = (1 << 3)
};
/**
* enum drm_vmw_synccpu_op - Synccpu operations:
*
* @drm_vmw_synccpu_grab: Grab the buffer for CPU operations
* @drm_vmw_synccpu_release: Release a previous grab.
*/
enum drm_vmw_synccpu_op {
drm_vmw_synccpu_grab,
drm_vmw_synccpu_release
};
/**
* struct drm_vmw_synccpu_arg
*
* @op: The synccpu operation as described above.
* @handle: Handle identifying the buffer object.
* @flags: Flags as described above.
*/
struct drm_vmw_synccpu_arg {
enum drm_vmw_synccpu_op op;
enum drm_vmw_synccpu_flags flags;
uint32_t handle;
uint32_t pad64;
};
#endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册