提交 be38ab6e 编写于 作者: T Thomas Hellstrom 提交者: Dave Airlie

vmwgfx: Fix potential execbuf deadlocks

Perform all command stream validation in a bounce buffer separate from the
fifo. This makes the fifo available to all validation-generated commands,
which would otherwise attempt to grab the fifo recursively, causing a
deadlock. This is in preparation for GMR2 and swappable surfaces.

Also maintain references to all surfaces in the command stream until the
command stream has been fired in order to avoid racing with surface
destruction taking place after validation but before submission.
Signed-off-by: NThomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: NJakob Bornecrantz <jakob@vmware.com>
Signed-off-by: NDave Airlie <airlied@redhat.com>
上级 0bef23f9
......@@ -467,6 +467,8 @@ static int vmw_driver_unload(struct drm_device *dev)
unregister_pm_notifier(&dev_priv->pm_nb);
if (dev_priv->ctx.cmd_bounce)
vfree(dev_priv->ctx.cmd_bounce);
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
drm_irq_uninstall(dev_priv->dev);
if (dev_priv->enable_fb) {
......
......@@ -46,8 +46,9 @@
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_MAX_RELOCATIONS 2048
#define VMWGFX_MAX_GMRS 2048
#define VMWGFX_MAX_VALIDATIONS 2048
#define VMWGFX_MAX_DISPLAYS 16
#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
#define VMW_PL_GMR TTM_PL_PRIV0
#define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
......@@ -74,7 +75,7 @@ struct vmw_resource {
bool avail;
void (*hw_destroy) (struct vmw_resource *res);
void (*res_free) (struct vmw_resource *res);
bool on_validate_list;
/* TODO is a generic snooper needed? */
#if 0
void (*snoop)(struct vmw_resource *res,
......@@ -143,8 +144,12 @@ struct vmw_sw_context{
struct list_head validate_nodes;
struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
uint32_t cur_reloc;
struct ttm_validate_buffer val_bufs[VMWGFX_MAX_GMRS];
struct ttm_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
uint32_t cur_val_buf;
uint32_t *cmd_bounce;
uint32_t cmd_bounce_size;
struct vmw_resource *resources[VMWGFX_MAX_VALIDATIONS];
uint32_t num_ref_resources;
};
struct vmw_legacy_display;
......@@ -340,7 +345,8 @@ extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_context_check(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
int id);
int id,
struct vmw_resource **p_res);
extern void vmw_surface_res_free(struct vmw_resource *res);
extern int vmw_surface_init(struct vmw_private *dev_priv,
struct vmw_surface *srf,
......
......@@ -44,10 +44,36 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
return 0;
}
static int vmw_resource_to_validate_list(struct vmw_sw_context *sw_context,
struct vmw_resource **p_res)
{
int ret = 0;
struct vmw_resource *res = *p_res;
if (!res->on_validate_list) {
if (sw_context->num_ref_resources >= VMWGFX_MAX_VALIDATIONS) {
DRM_ERROR("Too many resources referenced in "
"command stream.\n");
ret = -ENOMEM;
goto out;
}
sw_context->resources[sw_context->num_ref_resources++] = res;
res->on_validate_list = true;
return 0;
}
out:
vmw_resource_unreference(p_res);
return ret;
}
static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_resource *ctx;
struct vmw_cid_cmd {
SVGA3dCmdHeader header;
__le32 cid;
......@@ -58,7 +84,8 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid))
return 0;
ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid);
ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid,
&ctx);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use context %u\n",
(unsigned) cmd->cid);
......@@ -67,39 +94,43 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
sw_context->last_cid = cmd->cid;
sw_context->cid_valid = true;
return 0;
return vmw_resource_to_validate_list(sw_context, &ctx);
}
static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
uint32_t *sid)
{
struct vmw_surface *srf;
int ret;
struct vmw_resource *res;
if (*sid == SVGA3D_INVALID_ID)
return 0;
if (unlikely((!sw_context->sid_valid ||
*sid != sw_context->last_sid))) {
int real_id;
int ret = vmw_surface_check(dev_priv, sw_context->tfile,
*sid, &real_id);
if (unlikely(ret != 0)) {
DRM_ERROR("Could ot find or use surface 0x%08x "
"address 0x%08lx\n",
(unsigned int) *sid,
(unsigned long) sid);
return ret;
}
sw_context->last_sid = *sid;
sw_context->sid_valid = true;
*sid = real_id;
sw_context->sid_translation = real_id;
} else
if (likely((sw_context->sid_valid &&
*sid == sw_context->last_sid))) {
*sid = sw_context->sid_translation;
return 0;
}
return 0;
ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
*sid, &srf);
if (unlikely(ret != 0)) {
DRM_ERROR("Could ot find or use surface 0x%08x "
"address 0x%08lx\n",
(unsigned int) *sid,
(unsigned long) sid);
return ret;
}
sw_context->last_sid = *sid;
sw_context->sid_valid = true;
sw_context->sid_translation = srf->res.id;
*sid = sw_context->sid_translation;
res = &srf->res;
return vmw_resource_to_validate_list(sw_context, &res);
}
......@@ -213,7 +244,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
reloc->location = ptr;
cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) {
if (unlikely(cur_validate_node >= VMWGFX_MAX_VALIDATIONS)) {
DRM_ERROR("Max number of DMA buffers per submission"
" exceeded.\n");
ret = -EINVAL;
......@@ -303,6 +334,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
SVGA3dCmdSurfaceDMA dma;
} *cmd;
int ret;
struct vmw_resource *res;
cmd = container_of(header, struct vmw_dma_cmd, header);
ret = vmw_translate_guest_ptr(dev_priv, sw_context,
......@@ -319,17 +351,16 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
goto out_no_reloc;
}
/**
/*
* Patch command stream with device SID.
*/
cmd->dma.host.sid = srf->res.id;
vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
/**
* FIXME: May deadlock here when called from the
* command parsing code.
*/
vmw_surface_unreference(&srf);
vmw_dmabuf_unreference(&vmw_bo);
res = &srf->res;
return vmw_resource_to_validate_list(sw_context, &res);
out_no_reloc:
vmw_dmabuf_unreference(&vmw_bo);
......@@ -501,8 +532,9 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
static int vmw_cmd_check_all(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf, uint32_t size)
uint32_t size)
{
void *buf = sw_context->cmd_bounce;
int32_t cur_size = size;
int ret;
......@@ -551,7 +583,11 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
static void vmw_clear_validations(struct vmw_sw_context *sw_context)
{
struct ttm_validate_buffer *entry, *next;
uint32_t i = sw_context->num_ref_resources;
/*
* Drop references to DMA buffers held during command submission.
*/
list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
head) {
list_del(&entry->head);
......@@ -560,6 +596,14 @@ static void vmw_clear_validations(struct vmw_sw_context *sw_context)
sw_context->cur_val_buf--;
}
BUG_ON(sw_context->cur_val_buf != 0);
/*
* Drop references to resources held during command submission.
*/
while (i-- > 0) {
sw_context->resources[i]->on_validate_list = false;
vmw_resource_unreference(&sw_context->resources[i]);
}
}
static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
......@@ -603,6 +647,35 @@ static int vmw_validate_buffers(struct vmw_private *dev_priv,
return 0;
}
static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
uint32_t size)
{
if (likely(sw_context->cmd_bounce_size >= size))
return 0;
if (sw_context->cmd_bounce_size == 0)
sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
while (sw_context->cmd_bounce_size < size) {
sw_context->cmd_bounce_size =
PAGE_ALIGN(sw_context->cmd_bounce_size +
(sw_context->cmd_bounce_size >> 1));
}
if (sw_context->cmd_bounce != NULL)
vfree(sw_context->cmd_bounce);
sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
if (sw_context->cmd_bounce == NULL) {
DRM_ERROR("Failed to allocate command bounce buffer.\n");
sw_context->cmd_bounce_size = 0;
return -ENOMEM;
}
return 0;
}
int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
......@@ -627,20 +700,18 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
goto out_no_cmd_mutex;
}
cmd = vmw_fifo_reserve(dev_priv, arg->command_size);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving fifo space for commands.\n");
ret = -ENOMEM;
ret = vmw_resize_cmd_bounce(sw_context, arg->command_size);
if (unlikely(ret != 0))
goto out_unlock;
}
user_cmd = (void __user *)(unsigned long)arg->commands;
ret = copy_from_user(cmd, user_cmd, arg->command_size);
ret = copy_from_user(sw_context->cmd_bounce,
user_cmd, arg->command_size);
if (unlikely(ret != 0)) {
ret = -EFAULT;
DRM_ERROR("Failed copying commands.\n");
goto out_commit;
goto out_unlock;
}
sw_context->tfile = vmw_fpriv(file_priv)->tfile;
......@@ -648,12 +719,14 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
sw_context->sid_valid = false;
sw_context->cur_reloc = 0;
sw_context->cur_val_buf = 0;
sw_context->num_ref_resources = 0;
INIT_LIST_HEAD(&sw_context->validate_nodes);
ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size);
ret = vmw_cmd_check_all(dev_priv, sw_context, arg->command_size);
if (unlikely(ret != 0))
goto out_err;
ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes);
if (unlikely(ret != 0))
goto out_err;
......@@ -669,9 +742,17 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
arg->throttle_us);
if (unlikely(ret != 0))
goto out_err;
goto out_throttle;
}
cmd = vmw_fifo_reserve(dev_priv, arg->command_size);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving fifo space for commands.\n");
ret = -ENOMEM;
goto out_err;
}
memcpy(cmd, sw_context->cmd_bounce, arg->command_size);
vmw_fifo_commit(dev_priv, arg->command_size);
ret = vmw_fifo_send_fence(dev_priv, &sequence);
......@@ -708,10 +789,9 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
return 0;
out_err:
vmw_free_relocations(sw_context);
out_throttle:
ttm_eu_backoff_reservation(&sw_context->validate_nodes);
vmw_clear_validations(sw_context);
out_commit:
vmw_fifo_commit(dev_priv, 0);
out_unlock:
mutex_unlock(&dev_priv->cmdbuf_mutex);
out_no_cmd_mutex:
......
......@@ -364,7 +364,8 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
int vmw_context_check(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
int id)
int id,
struct vmw_resource **p_res)
{
struct vmw_resource *res;
int ret = 0;
......@@ -376,6 +377,8 @@ int vmw_context_check(struct vmw_private *dev_priv,
container_of(res, struct vmw_user_context, res);
if (ctx->base.tfile != tfile && !ctx->base.shareable)
ret = -EPERM;
if (p_res)
*p_res = vmw_resource_reference(res);
} else
ret = -EINVAL;
read_unlock(&dev_priv->resource_lock);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册