提交 c0951b79 编写于 作者: T Thomas Hellstrom 提交者: Dave Airlie

drm/vmwgfx: Refactor resource management

Refactor resource management to make it easy to hook up resources
that are backed up by buffers. In particular, resources and their
backing buffers can be evicted and rebound, if supported by the device.
To avoid query deadlocks, the query code is also modified somewhat.
Signed-off-by: NThomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: NBrian Paul <brianp@vmware.com>
Reviewed-by: NDmitry Torokhov <dtor@vmware.com>
Signed-off-by: NDave Airlie <airlied@redhat.com>
上级 bf6f0368
...@@ -60,7 +60,7 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv, ...@@ -60,7 +60,7 @@ int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
vmw_execbuf_release_pinned_bo(dev_priv, false, 0); vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, false, 0); ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
...@@ -105,7 +105,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv, ...@@ -105,7 +105,7 @@ int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
return ret; return ret;
if (pin) if (pin)
vmw_execbuf_release_pinned_bo(dev_priv, false, 0); vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, false, 0); ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
...@@ -214,8 +214,7 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv, ...@@ -214,8 +214,7 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
return ret; return ret;
if (pin) if (pin)
vmw_execbuf_release_pinned_bo(dev_priv, false, 0); vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, false, 0); ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto err_unlock; goto err_unlock;
......
...@@ -432,6 +432,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) ...@@ -432,6 +432,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
struct vmw_private *dev_priv; struct vmw_private *dev_priv;
int ret; int ret;
uint32_t svga_id; uint32_t svga_id;
enum vmw_res_type i;
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
if (unlikely(dev_priv == NULL)) { if (unlikely(dev_priv == NULL)) {
...@@ -448,15 +449,18 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) ...@@ -448,15 +449,18 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
mutex_init(&dev_priv->cmdbuf_mutex); mutex_init(&dev_priv->cmdbuf_mutex);
mutex_init(&dev_priv->release_mutex); mutex_init(&dev_priv->release_mutex);
rwlock_init(&dev_priv->resource_lock); rwlock_init(&dev_priv->resource_lock);
idr_init(&dev_priv->context_idr);
idr_init(&dev_priv->surface_idr); for (i = vmw_res_context; i < vmw_res_max; ++i) {
idr_init(&dev_priv->stream_idr); idr_init(&dev_priv->res_idr[i]);
INIT_LIST_HEAD(&dev_priv->res_lru[i]);
}
mutex_init(&dev_priv->init_mutex); mutex_init(&dev_priv->init_mutex);
init_waitqueue_head(&dev_priv->fence_queue); init_waitqueue_head(&dev_priv->fence_queue);
init_waitqueue_head(&dev_priv->fifo_queue); init_waitqueue_head(&dev_priv->fifo_queue);
dev_priv->fence_queue_waiters = 0; dev_priv->fence_queue_waiters = 0;
atomic_set(&dev_priv->fifo_queue_waiters, 0); atomic_set(&dev_priv->fifo_queue_waiters, 0);
INIT_LIST_HEAD(&dev_priv->surface_lru);
dev_priv->used_memory_size = 0; dev_priv->used_memory_size = 0;
dev_priv->io_start = pci_resource_start(dev->pdev, 0); dev_priv->io_start = pci_resource_start(dev->pdev, 0);
...@@ -670,9 +674,9 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) ...@@ -670,9 +674,9 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
out_err1: out_err1:
vmw_ttm_global_release(dev_priv); vmw_ttm_global_release(dev_priv);
out_err0: out_err0:
idr_destroy(&dev_priv->surface_idr); for (i = vmw_res_context; i < vmw_res_max; ++i)
idr_destroy(&dev_priv->context_idr); idr_destroy(&dev_priv->res_idr[i]);
idr_destroy(&dev_priv->stream_idr);
kfree(dev_priv); kfree(dev_priv);
return ret; return ret;
} }
...@@ -680,9 +684,12 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) ...@@ -680,9 +684,12 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
static int vmw_driver_unload(struct drm_device *dev) static int vmw_driver_unload(struct drm_device *dev)
{ {
struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_private *dev_priv = vmw_priv(dev);
enum vmw_res_type i;
unregister_pm_notifier(&dev_priv->pm_nb); unregister_pm_notifier(&dev_priv->pm_nb);
if (dev_priv->ctx.res_ht_initialized)
drm_ht_remove(&dev_priv->ctx.res_ht);
if (dev_priv->ctx.cmd_bounce) if (dev_priv->ctx.cmd_bounce)
vfree(dev_priv->ctx.cmd_bounce); vfree(dev_priv->ctx.cmd_bounce);
if (dev_priv->enable_fb) { if (dev_priv->enable_fb) {
...@@ -709,9 +716,9 @@ static int vmw_driver_unload(struct drm_device *dev) ...@@ -709,9 +716,9 @@ static int vmw_driver_unload(struct drm_device *dev)
(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
(void)ttm_bo_device_release(&dev_priv->bdev); (void)ttm_bo_device_release(&dev_priv->bdev);
vmw_ttm_global_release(dev_priv); vmw_ttm_global_release(dev_priv);
idr_destroy(&dev_priv->surface_idr);
idr_destroy(&dev_priv->context_idr); for (i = vmw_res_context; i < vmw_res_max; ++i)
idr_destroy(&dev_priv->stream_idr); idr_destroy(&dev_priv->res_idr[i]);
kfree(dev_priv); kfree(dev_priv);
...@@ -935,7 +942,7 @@ static void vmw_master_drop(struct drm_device *dev, ...@@ -935,7 +942,7 @@ static void vmw_master_drop(struct drm_device *dev,
vmw_fp->locked_master = drm_master_get(file_priv->master); vmw_fp->locked_master = drm_master_get(file_priv->master);
ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
vmw_execbuf_release_pinned_bo(dev_priv, false, 0); vmw_execbuf_release_pinned_bo(dev_priv);
if (unlikely((ret != 0))) { if (unlikely((ret != 0))) {
DRM_ERROR("Unable to lock TTM at VT switch.\n"); DRM_ERROR("Unable to lock TTM at VT switch.\n");
...@@ -987,7 +994,8 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, ...@@ -987,7 +994,8 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
* This empties VRAM and unbinds all GMR bindings. * This empties VRAM and unbinds all GMR bindings.
* Buffer contents is moved to swappable memory. * Buffer contents is moved to swappable memory.
*/ */
vmw_execbuf_release_pinned_bo(dev_priv, false, 0); vmw_execbuf_release_pinned_bo(dev_priv);
vmw_resource_evict_all(dev_priv);
ttm_bo_swapout_all(&dev_priv->bdev); ttm_bo_swapout_all(&dev_priv->bdev);
break; break;
......
...@@ -67,31 +67,46 @@ struct vmw_fpriv { ...@@ -67,31 +67,46 @@ struct vmw_fpriv {
struct vmw_dma_buffer { struct vmw_dma_buffer {
struct ttm_buffer_object base; struct ttm_buffer_object base;
struct list_head validate_list; struct list_head res_list;
bool gmr_bound;
uint32_t cur_validate_node;
bool on_validate_list;
}; };
/**
* struct vmw_validate_buffer - Carries validation info about buffers.
*
* @base: Validation info for TTM.
* @hash: Hash entry for quick lookup of the TTM buffer object.
*
* This structure contains also driver private validation info
* on top of the info needed by TTM.
*/
struct vmw_validate_buffer {
struct ttm_validate_buffer base;
struct drm_hash_item hash;
};
struct vmw_res_func;
struct vmw_resource { struct vmw_resource {
struct kref kref; struct kref kref;
struct vmw_private *dev_priv; struct vmw_private *dev_priv;
struct idr *idr;
int id; int id;
enum ttm_object_type res_type;
bool avail; bool avail;
void (*remove_from_lists) (struct vmw_resource *res); unsigned long backup_size;
void (*hw_destroy) (struct vmw_resource *res); bool res_dirty; /* Protected by backup buffer reserved */
bool backup_dirty; /* Protected by backup buffer reserved */
struct vmw_dma_buffer *backup;
unsigned long backup_offset;
const struct vmw_res_func *func;
struct list_head lru_head; /* Protected by the resource lock */
struct list_head mob_head; /* Protected by @backup reserved */
void (*res_free) (struct vmw_resource *res); void (*res_free) (struct vmw_resource *res);
struct list_head validate_head; void (*hw_destroy) (struct vmw_resource *res);
struct list_head query_head; /* Protected by the cmdbuf mutex */ };
/* TODO is a generic snooper needed? */
#if 0 enum vmw_res_type {
void (*snoop)(struct vmw_resource *res, vmw_res_context,
struct ttm_object_file *tfile, vmw_res_surface,
SVGA3dCmdHeader *header); vmw_res_stream,
void *snoop_priv; vmw_res_max
#endif
}; };
struct vmw_cursor_snooper { struct vmw_cursor_snooper {
...@@ -105,20 +120,18 @@ struct vmw_surface_offset; ...@@ -105,20 +120,18 @@ struct vmw_surface_offset;
struct vmw_surface { struct vmw_surface {
struct vmw_resource res; struct vmw_resource res;
struct list_head lru_head; /* Protected by the resource lock */
uint32_t flags; uint32_t flags;
uint32_t format; uint32_t format;
uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES]; uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
struct drm_vmw_size base_size;
struct drm_vmw_size *sizes; struct drm_vmw_size *sizes;
uint32_t num_sizes; uint32_t num_sizes;
bool scanout; bool scanout;
/* TODO so far just a extra pointer */ /* TODO so far just a extra pointer */
struct vmw_cursor_snooper snooper; struct vmw_cursor_snooper snooper;
struct ttm_buffer_object *backup;
struct vmw_surface_offset *offsets; struct vmw_surface_offset *offsets;
uint32_t backup_size; SVGA3dTextureFilter autogen_filter;
uint32_t multisample_count;
}; };
struct vmw_marker_queue { struct vmw_marker_queue {
...@@ -145,29 +158,46 @@ struct vmw_relocation { ...@@ -145,29 +158,46 @@ struct vmw_relocation {
uint32_t index; uint32_t index;
}; };
/**
* struct vmw_res_cache_entry - resource information cache entry
*
* @valid: Whether the entry is valid, which also implies that the execbuf
* code holds a reference to the resource, and it's placed on the
* validation list.
* @handle: User-space handle of a resource.
* @res: Non-ref-counted pointer to the resource.
*
* Used to avoid frequent repeated user-space handle lookups of the
* same resource.
*/
struct vmw_res_cache_entry {
bool valid;
uint32_t handle;
struct vmw_resource *res;
struct vmw_resource_val_node *node;
};
struct vmw_sw_context{ struct vmw_sw_context{
struct ida bo_list; struct drm_open_hash res_ht;
uint32_t last_cid; bool res_ht_initialized;
bool cid_valid;
bool kernel; /**< is the called made from the kernel */ bool kernel; /**< is the called made from the kernel */
struct vmw_resource *cur_ctx;
uint32_t last_sid;
uint32_t sid_translation;
bool sid_valid;
struct ttm_object_file *tfile; struct ttm_object_file *tfile;
struct list_head validate_nodes; struct list_head validate_nodes;
struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS]; struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
uint32_t cur_reloc; uint32_t cur_reloc;
struct ttm_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS]; struct vmw_validate_buffer val_bufs[VMWGFX_MAX_VALIDATIONS];
uint32_t cur_val_buf; uint32_t cur_val_buf;
uint32_t *cmd_bounce; uint32_t *cmd_bounce;
uint32_t cmd_bounce_size; uint32_t cmd_bounce_size;
struct list_head resource_list; struct list_head resource_list;
uint32_t fence_flags; uint32_t fence_flags;
struct list_head query_list;
struct ttm_buffer_object *cur_query_bo; struct ttm_buffer_object *cur_query_bo;
uint32_t cur_query_cid; struct list_head res_relocations;
bool query_cid_valid; uint32_t *buf_start;
struct vmw_res_cache_entry res_cache[vmw_res_max];
struct vmw_resource *last_query_ctx;
bool needs_post_query_barrier;
struct vmw_resource *error_resource;
}; };
struct vmw_legacy_display; struct vmw_legacy_display;
...@@ -242,10 +272,7 @@ struct vmw_private { ...@@ -242,10 +272,7 @@ struct vmw_private {
*/ */
rwlock_t resource_lock; rwlock_t resource_lock;
struct idr context_idr; struct idr res_idr[vmw_res_max];
struct idr surface_idr;
struct idr stream_idr;
/* /*
* Block lastclose from racing with firstopen. * Block lastclose from racing with firstopen.
*/ */
...@@ -320,6 +347,7 @@ struct vmw_private { ...@@ -320,6 +347,7 @@ struct vmw_private {
struct ttm_buffer_object *dummy_query_bo; struct ttm_buffer_object *dummy_query_bo;
struct ttm_buffer_object *pinned_bo; struct ttm_buffer_object *pinned_bo;
uint32_t query_cid; uint32_t query_cid;
uint32_t query_cid_valid;
bool dummy_query_bo_pinned; bool dummy_query_bo_pinned;
/* /*
...@@ -329,10 +357,15 @@ struct vmw_private { ...@@ -329,10 +357,15 @@ struct vmw_private {
* protected by the cmdbuf mutex for simplicity. * protected by the cmdbuf mutex for simplicity.
*/ */
struct list_head surface_lru; struct list_head res_lru[vmw_res_max];
uint32_t used_memory_size; uint32_t used_memory_size;
}; };
static inline struct vmw_surface *vmw_res_to_srf(struct vmw_resource *res)
{
return container_of(res, struct vmw_surface, res);
}
static inline struct vmw_private *vmw_priv(struct drm_device *dev) static inline struct vmw_private *vmw_priv(struct drm_device *dev)
{ {
return (struct vmw_private *)dev->dev_private; return (struct vmw_private *)dev->dev_private;
...@@ -381,10 +414,16 @@ extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id); ...@@ -381,10 +414,16 @@ extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
/** /**
* Resource utilities - vmwgfx_resource.c * Resource utilities - vmwgfx_resource.c
*/ */
struct vmw_user_resource_conv;
extern const struct vmw_user_resource_conv *user_surface_converter;
extern const struct vmw_user_resource_conv *user_context_converter;
extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv); extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
extern void vmw_resource_unreference(struct vmw_resource **p_res); extern void vmw_resource_unreference(struct vmw_resource **p_res);
extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res); extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
extern int vmw_resource_validate(struct vmw_resource *res);
extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
extern int vmw_context_define_ioctl(struct drm_device *dev, void *data, extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
...@@ -398,14 +437,13 @@ extern int vmw_user_lookup_handle(struct vmw_private *dev_priv, ...@@ -398,14 +437,13 @@ extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
uint32_t handle, uint32_t handle,
struct vmw_surface **out_surf, struct vmw_surface **out_surf,
struct vmw_dma_buffer **out_buf); struct vmw_dma_buffer **out_buf);
extern int vmw_user_resource_lookup_handle(
struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t handle,
const struct vmw_user_resource_conv *converter,
struct vmw_resource **p_res);
extern void vmw_surface_res_free(struct vmw_resource *res); extern void vmw_surface_res_free(struct vmw_resource *res);
extern int vmw_surface_init(struct vmw_private *dev_priv,
struct vmw_surface *srf,
void (*res_free) (struct vmw_resource *res));
extern int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t handle,
struct vmw_surface **out);
extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data, extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
...@@ -440,7 +478,14 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv, ...@@ -440,7 +478,14 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
struct ttm_object_file *tfile, struct ttm_object_file *tfile,
uint32_t *inout_id, uint32_t *inout_id,
struct vmw_resource **out); struct vmw_resource **out);
extern void vmw_resource_unreserve(struct list_head *list); extern void vmw_resource_unreserve(struct vmw_resource *res,
struct vmw_dma_buffer *new_backup,
unsigned long new_backup_offset);
extern void vmw_resource_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
extern void vmw_fence_single_bo(struct ttm_buffer_object *bo,
struct vmw_fence_obj *fence);
extern void vmw_resource_evict_all(struct vmw_private *dev_priv);
/** /**
* DMA buffer helper routines - vmwgfx_dmabuf.c * DMA buffer helper routines - vmwgfx_dmabuf.c
...@@ -538,10 +583,9 @@ extern int vmw_execbuf_process(struct drm_file *file_priv, ...@@ -538,10 +583,9 @@ extern int vmw_execbuf_process(struct drm_file *file_priv,
struct drm_vmw_fence_rep __user struct drm_vmw_fence_rep __user
*user_fence_rep, *user_fence_rep,
struct vmw_fence_obj **out_fence); struct vmw_fence_obj **out_fence);
extern void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
extern void struct vmw_fence_obj *fence);
vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, extern void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv);
bool only_on_cid_match, uint32_t cid);
extern int vmw_execbuf_fence_commands(struct drm_file *file_priv, extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
struct vmw_private *dev_priv, struct vmw_private *dev_priv,
......
...@@ -131,6 +131,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data, ...@@ -131,6 +131,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
struct drm_vmw_rect *clips = NULL; struct drm_vmw_rect *clips = NULL;
struct drm_mode_object *obj; struct drm_mode_object *obj;
struct vmw_framebuffer *vfb; struct vmw_framebuffer *vfb;
struct vmw_resource *res;
uint32_t num_clips; uint32_t num_clips;
int ret; int ret;
...@@ -178,11 +179,13 @@ int vmw_present_ioctl(struct drm_device *dev, void *data, ...@@ -178,11 +179,13 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_no_ttm_lock; goto out_no_ttm_lock;
ret = vmw_user_surface_lookup_handle(dev_priv, tfile, arg->sid, ret = vmw_user_resource_lookup_handle(dev_priv, tfile, arg->sid,
&surface); user_surface_converter,
&res);
if (ret) if (ret)
goto out_no_surface; goto out_no_surface;
surface = vmw_res_to_srf(res);
ret = vmw_kms_present(dev_priv, file_priv, ret = vmw_kms_present(dev_priv, file_priv,
vfb, surface, arg->sid, vfb, surface, arg->sid,
arg->dest_x, arg->dest_y, arg->dest_x, arg->dest_y,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册