提交 fb1d9738 编写于 作者: J Jakob Bornecrantz 提交者: Dave Airlie

drm/vmwgfx: Add DRM driver for VMware Virtual GPU

This commit adds the vmwgfx driver for the VWware Virtual GPU aka SVGA.
The driver is under staging the same as Nouveau and Radeon KMS. Hopefully
the 2D ioctls are bug free and don't need changing, so that part of the
API should be stable. But there there is a pretty big chance that the 3D API
will change in the future.
Signed-off-by: NThomas Hellström <thellstrom@vmware.com>
Signed-off-by: NJakob Bornecrantz <jakob@vmware.com>
Signed-off-by: NDave Airlie <airlied@redhat.com>
上级 632f6117
......@@ -30,4 +30,5 @@ obj-$(CONFIG_DRM_I830) += i830/
obj-$(CONFIG_DRM_I915) += i915/
obj-$(CONFIG_DRM_SIS) += sis/
obj-$(CONFIG_DRM_SAVAGE)+= savage/
obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
obj-$(CONFIG_DRM_VIA) +=via/
config DRM_VMWGFX
tristate "DRM driver for VMware Virtual GPU"
depends on DRM && PCI
select FB_DEFERRED_IO
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
select DRM_TTM
help
KMS enabled DRM driver for SVGA2 virtual hardware.
If unsure say n. The compiled module will be
called vmwgfx.ko
ccflags-y := -Iinclude/drm
vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
vmwgfx_overlay.o
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_drv.h"
#include "ttm/ttm_bo_driver.h"
#include "ttm/ttm_placement.h"
static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM |
TTM_PL_FLAG_CACHED;
static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM |
TTM_PL_FLAG_CACHED |
TTM_PL_FLAG_NO_EVICT;
static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM |
TTM_PL_FLAG_CACHED;
struct ttm_placement vmw_vram_placement = {
.fpfn = 0,
.lpfn = 0,
.num_placement = 1,
.placement = &vram_placement_flags,
.num_busy_placement = 1,
.busy_placement = &vram_placement_flags
};
struct ttm_placement vmw_vram_ne_placement = {
.fpfn = 0,
.lpfn = 0,
.num_placement = 1,
.placement = &vram_ne_placement_flags,
.num_busy_placement = 1,
.busy_placement = &vram_ne_placement_flags
};
struct ttm_placement vmw_sys_placement = {
.fpfn = 0,
.lpfn = 0,
.num_placement = 1,
.placement = &sys_placement_flags,
.num_busy_placement = 1,
.busy_placement = &sys_placement_flags
};
struct vmw_ttm_backend {
struct ttm_backend backend;
};
static int vmw_ttm_populate(struct ttm_backend *backend,
unsigned long num_pages, struct page **pages,
struct page *dummy_read_page)
{
return 0;
}
static int vmw_ttm_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
{
return 0;
}
static int vmw_ttm_unbind(struct ttm_backend *backend)
{
return 0;
}
static void vmw_ttm_clear(struct ttm_backend *backend)
{
}
static void vmw_ttm_destroy(struct ttm_backend *backend)
{
struct vmw_ttm_backend *vmw_be =
container_of(backend, struct vmw_ttm_backend, backend);
kfree(vmw_be);
}
static struct ttm_backend_func vmw_ttm_func = {
.populate = vmw_ttm_populate,
.clear = vmw_ttm_clear,
.bind = vmw_ttm_bind,
.unbind = vmw_ttm_unbind,
.destroy = vmw_ttm_destroy,
};
struct ttm_backend *vmw_ttm_backend_init(struct ttm_bo_device *bdev)
{
struct vmw_ttm_backend *vmw_be;
vmw_be = kmalloc(sizeof(*vmw_be), GFP_KERNEL);
if (!vmw_be)
return NULL;
vmw_be->backend.func = &vmw_ttm_func;
return &vmw_be->backend;
}
int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
{
return 0;
}
int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man)
{
struct vmw_private *dev_priv =
container_of(bdev, struct vmw_private, bdev);
switch (type) {
case TTM_PL_SYSTEM:
/* System memory */
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case TTM_PL_VRAM:
/* "On-card" video ram */
man->gpu_offset = 0;
man->io_offset = dev_priv->vram_start;
man->io_size = dev_priv->vram_size;
man->flags = TTM_MEMTYPE_FLAG_FIXED |
TTM_MEMTYPE_FLAG_NEEDS_IOREMAP | TTM_MEMTYPE_FLAG_MAPPABLE;
man->io_addr = NULL;
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_WC;
break;
default:
DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
return -EINVAL;
}
return 0;
}
void vmw_evict_flags(struct ttm_buffer_object *bo,
struct ttm_placement *placement)
{
*placement = vmw_sys_placement;
}
/**
* FIXME: Proper access checks on buffers.
*/
static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
return 0;
}
/**
* FIXME: We're using the old vmware polling method to sync.
* Do this with fences instead.
*/
static void *vmw_sync_obj_ref(void *sync_obj)
{
return sync_obj;
}
static void vmw_sync_obj_unref(void **sync_obj)
{
*sync_obj = NULL;
}
static int vmw_sync_obj_flush(void *sync_obj, void *sync_arg)
{
struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
mutex_unlock(&dev_priv->hw_mutex);
return 0;
}
static bool vmw_sync_obj_signaled(void *sync_obj, void *sync_arg)
{
struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
uint32_t sequence = (unsigned long) sync_obj;
return vmw_fence_signaled(dev_priv, sequence);
}
static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg,
bool lazy, bool interruptible)
{
struct vmw_private *dev_priv = (struct vmw_private *)sync_arg;
uint32_t sequence = (unsigned long) sync_obj;
return vmw_wait_fence(dev_priv, false, sequence, false, 3*HZ);
}
struct ttm_bo_driver vmw_bo_driver = {
.create_ttm_backend_entry = vmw_ttm_backend_init,
.invalidate_caches = vmw_invalidate_caches,
.init_mem_type = vmw_init_mem_type,
.evict_flags = vmw_evict_flags,
.move = NULL,
.verify_access = vmw_verify_access,
.sync_obj_signaled = vmw_sync_obj_signaled,
.sync_obj_wait = vmw_sync_obj_wait,
.sync_obj_flush = vmw_sync_obj_flush,
.sync_obj_unref = vmw_sync_obj_unref,
.sync_obj_ref = vmw_sync_obj_ref
};
此差异已折叠。
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef _VMWGFX_DRV_H_
#define _VMWGFX_DRV_H_
#include "vmwgfx_reg.h"
#include "drmP.h"
#include "vmwgfx_drm.h"
#include "drm_hashtab.h"
#include "ttm/ttm_bo_driver.h"
#include "ttm/ttm_object.h"
#include "ttm/ttm_lock.h"
#include "ttm/ttm_execbuf_util.h"
#include "ttm/ttm_module.h"
#define VMWGFX_DRIVER_DATE "20090724"
#define VMWGFX_DRIVER_MAJOR 0
#define VMWGFX_DRIVER_MINOR 1
#define VMWGFX_DRIVER_PATCHLEVEL 2
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
#define VMWGFX_MAX_RELOCATIONS 2048
#define VMWGFX_MAX_GMRS 2048
struct vmw_fpriv {
struct drm_master *locked_master;
struct ttm_object_file *tfile;
};
struct vmw_dma_buffer {
struct ttm_buffer_object base;
struct list_head validate_list;
struct list_head gmr_lru;
uint32_t gmr_id;
bool gmr_bound;
uint32_t cur_validate_node;
bool on_validate_list;
};
struct vmw_resource {
struct kref kref;
struct vmw_private *dev_priv;
struct idr *idr;
int id;
enum ttm_object_type res_type;
bool avail;
void (*hw_destroy) (struct vmw_resource *res);
void (*res_free) (struct vmw_resource *res);
/* TODO is a generic snooper needed? */
#if 0
void (*snoop)(struct vmw_resource *res,
struct ttm_object_file *tfile,
SVGA3dCmdHeader *header);
void *snoop_priv;
#endif
};
struct vmw_cursor_snooper {
struct drm_crtc *crtc;
size_t age;
uint32_t *image;
};
struct vmw_surface {
struct vmw_resource res;
uint32_t flags;
uint32_t format;
uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
struct drm_vmw_size *sizes;
uint32_t num_sizes;
/* TODO so far just a extra pointer */
struct vmw_cursor_snooper snooper;
};
struct vmw_fifo_state {
unsigned long reserved_size;
__le32 *dynamic_buffer;
__le32 *static_buffer;
__le32 *last_buffer;
uint32_t last_data_size;
uint32_t last_buffer_size;
bool last_buffer_add;
unsigned long static_buffer_size;
bool using_bounce_buffer;
uint32_t capabilities;
struct rw_semaphore rwsem;
};
struct vmw_relocation {
SVGAGuestPtr *location;
uint32_t index;
};
struct vmw_sw_context{
struct ida bo_list;
uint32_t last_cid;
bool cid_valid;
uint32_t last_sid;
bool sid_valid;
struct ttm_object_file *tfile;
struct list_head validate_nodes;
struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
uint32_t cur_reloc;
struct ttm_validate_buffer val_bufs[VMWGFX_MAX_GMRS];
uint32_t cur_val_buf;
};
struct vmw_legacy_display;
struct vmw_overlay;
struct vmw_master {
struct ttm_lock lock;
};
struct vmw_private {
struct ttm_bo_device bdev;
struct ttm_bo_global_ref bo_global_ref;
struct ttm_global_reference mem_global_ref;
struct vmw_fifo_state fifo;
struct drm_device *dev;
unsigned long vmw_chipset;
unsigned int io_start;
uint32_t vram_start;
uint32_t vram_size;
uint32_t mmio_start;
uint32_t mmio_size;
uint32_t fb_max_width;
uint32_t fb_max_height;
__le32 __iomem *mmio_virt;
int mmio_mtrr;
uint32_t capabilities;
uint32_t max_gmr_descriptors;
uint32_t max_gmr_ids;
struct mutex hw_mutex;
/*
* VGA registers.
*/
uint32_t vga_width;
uint32_t vga_height;
uint32_t vga_depth;
uint32_t vga_bpp;
uint32_t vga_pseudo;
uint32_t vga_red_mask;
uint32_t vga_blue_mask;
uint32_t vga_green_mask;
/*
* Framebuffer info.
*/
void *fb_info;
struct vmw_legacy_display *ldu_priv;
struct vmw_overlay *overlay_priv;
/*
* Context and surface management.
*/
rwlock_t resource_lock;
struct idr context_idr;
struct idr surface_idr;
struct idr stream_idr;
/*
* Block lastclose from racing with firstopen.
*/
struct mutex init_mutex;
/*
* A resource manager for kernel-only surfaces and
* contexts.
*/
struct ttm_object_device *tdev;
/*
* Fencing and IRQs.
*/
uint32_t fence_seq;
wait_queue_head_t fence_queue;
wait_queue_head_t fifo_queue;
atomic_t fence_queue_waiters;
atomic_t fifo_queue_waiters;
uint32_t last_read_sequence;
spinlock_t irq_lock;
/*
* Device state
*/
uint32_t traces_state;
uint32_t enable_state;
uint32_t config_done_state;
/**
* Execbuf
*/
/**
* Protected by the cmdbuf mutex.
*/
struct vmw_sw_context ctx;
uint32_t val_seq;
struct mutex cmdbuf_mutex;
/**
* GMR management. Protected by the lru spinlock.
*/
struct ida gmr_ida;
struct list_head gmr_lru;
/**
* Operating mode.
*/
bool stealth;
bool is_opened;
/**
* Master management.
*/
struct vmw_master *active_master;
struct vmw_master fbdev_master;
};
static inline struct vmw_private *vmw_priv(struct drm_device *dev)
{
return (struct vmw_private *)dev->dev_private;
}
static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
{
return (struct vmw_fpriv *)file_priv->driver_priv;
}
static inline struct vmw_master *vmw_master(struct drm_master *master)
{
return (struct vmw_master *) master->driver_priv;
}
static inline void vmw_write(struct vmw_private *dev_priv,
unsigned int offset, uint32_t value)
{
outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
}
static inline uint32_t vmw_read(struct vmw_private *dev_priv,
unsigned int offset)
{
uint32_t val;
outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
return val;
}
/**
* GMR utilities - vmwgfx_gmr.c
*/
extern int vmw_gmr_bind(struct vmw_private *dev_priv,
struct ttm_buffer_object *bo);
extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
/**
* Resource utilities - vmwgfx_resource.c
*/
extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv);
extern void vmw_resource_unreference(struct vmw_resource **p_res);
extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_context_check(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
int id);
extern void vmw_surface_res_free(struct vmw_resource *res);
extern int vmw_surface_init(struct vmw_private *dev_priv,
struct vmw_surface *srf,
void (*res_free) (struct vmw_resource *res));
extern int vmw_user_surface_lookup(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
int sid, struct vmw_surface **out);
extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_surface_check(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
int id);
extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
struct vmw_dma_buffer *vmw_bo,
size_t size, struct ttm_placement *placement,
bool interuptable,
void (*bo_free) (struct ttm_buffer_object *bo));
extern int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
uint32_t cur_validate_node);
extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
uint32_t id, struct vmw_dma_buffer **out);
extern uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo);
extern void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id);
extern int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id);
extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
struct vmw_dma_buffer *bo);
extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
struct vmw_dma_buffer *bo);
extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t *inout_id,
struct vmw_resource **out);
/**
* Misc Ioctl functionality - vmwgfx_ioctl.c
*/
extern int vmw_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_fifo_debug_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
/**
* Fifo utilities - vmwgfx_fifo.c
*/
extern int vmw_fifo_init(struct vmw_private *dev_priv,
struct vmw_fifo_state *fifo);
extern void vmw_fifo_release(struct vmw_private *dev_priv,
struct vmw_fifo_state *fifo);
extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes);
extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
uint32_t *sequence);
extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
extern int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma);
/**
* TTM glue - vmwgfx_ttm_glue.c
*/
extern int vmw_ttm_global_init(struct vmw_private *dev_priv);
extern void vmw_ttm_global_release(struct vmw_private *dev_priv);
extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
/**
* TTM buffer object driver - vmwgfx_buffer.c
*/
extern struct ttm_placement vmw_vram_placement;
extern struct ttm_placement vmw_vram_ne_placement;
extern struct ttm_placement vmw_sys_placement;
extern struct ttm_bo_driver vmw_bo_driver;
extern int vmw_dma_quiescent(struct drm_device *dev);
/**
* Command submission - vmwgfx_execbuf.c
*/
extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
/**
* IRQs and wating - vmwgfx_irq.c
*/
extern irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS);
extern int vmw_wait_fence(struct vmw_private *dev_priv, bool lazy,
uint32_t sequence, bool interruptible,
unsigned long timeout);
extern void vmw_irq_preinstall(struct drm_device *dev);
extern int vmw_irq_postinstall(struct drm_device *dev);
extern void vmw_irq_uninstall(struct drm_device *dev);
extern bool vmw_fence_signaled(struct vmw_private *dev_priv,
uint32_t sequence);
extern int vmw_fence_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_fallback_wait(struct vmw_private *dev_priv,
bool lazy,
bool fifo_idle,
uint32_t sequence,
bool interruptible,
unsigned long timeout);
/**
* Kernel framebuffer - vmwgfx_fb.c
*/
int vmw_fb_init(struct vmw_private *vmw_priv);
int vmw_fb_close(struct vmw_private *dev_priv);
int vmw_fb_off(struct vmw_private *vmw_priv);
int vmw_fb_on(struct vmw_private *vmw_priv);
/**
* Kernel modesetting - vmwgfx_kms.c
*/
int vmw_kms_init(struct vmw_private *dev_priv);
int vmw_kms_close(struct vmw_private *dev_priv);
int vmw_kms_save_vga(struct vmw_private *vmw_priv);
int vmw_kms_restore_vga(struct vmw_private *vmw_priv);
int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
void vmw_kms_cursor_snoop(struct vmw_surface *srf,
struct ttm_object_file *tfile,
struct ttm_buffer_object *bo,
SVGA3dCmdHeader *header);
/**
* Overlay control - vmwgfx_overlay.c
*/
int vmw_overlay_init(struct vmw_private *dev_priv);
int vmw_overlay_close(struct vmw_private *dev_priv);
int vmw_overlay_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int vmw_overlay_stop_all(struct vmw_private *dev_priv);
int vmw_overlay_resume_all(struct vmw_private *dev_priv);
int vmw_overlay_pause_all(struct vmw_private *dev_priv);
int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out);
int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id);
int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
/**
* Inline helper functions
*/
static inline void vmw_surface_unreference(struct vmw_surface **srf)
{
struct vmw_surface *tmp_srf = *srf;
struct vmw_resource *res = &tmp_srf->res;
*srf = NULL;
vmw_resource_unreference(&res);
}
static inline struct vmw_surface *vmw_surface_reference(struct vmw_surface *srf)
{
(void) vmw_resource_reference(&srf->res);
return srf;
}
static inline void vmw_dmabuf_unreference(struct vmw_dma_buffer **buf)
{
struct vmw_dma_buffer *tmp_buf = *buf;
struct ttm_buffer_object *bo = &tmp_buf->base;
*buf = NULL;
ttm_bo_unref(&bo);
}
static inline struct vmw_dma_buffer *vmw_dmabuf_reference(struct vmw_dma_buffer *buf)
{
if (ttm_bo_reference(&buf->base))
return buf;
return NULL;
}
#endif
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_drv.h"
#include "vmwgfx_reg.h"
#include "ttm/ttm_bo_api.h"
#include "ttm/ttm_placement.h"
static int vmw_cmd_invalid(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
return capable(CAP_SYS_ADMIN) ? : -EINVAL;
}
static int vmw_cmd_ok(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
return 0;
}
static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_cid_cmd {
SVGA3dCmdHeader header;
__le32 cid;
} *cmd;
int ret;
cmd = container_of(header, struct vmw_cid_cmd, header);
if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid))
return 0;
ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use context %u\n",
(unsigned) cmd->cid);
return ret;
}
sw_context->last_cid = cmd->cid;
sw_context->cid_valid = true;
return 0;
}
static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
uint32_t sid)
{
if (unlikely((!sw_context->sid_valid || sid != sw_context->last_sid) &&
sid != SVGA3D_INVALID_ID)) {
int ret = vmw_surface_check(dev_priv, sw_context->tfile, sid);
if (unlikely(ret != 0)) {
DRM_ERROR("Could ot find or use surface %u\n",
(unsigned) sid);
return ret;
}
sw_context->last_sid = sid;
sw_context->sid_valid = true;
}
return 0;
}
static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_sid_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdSetRenderTarget body;
} *cmd;
int ret;
ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
if (unlikely(ret != 0))
return ret;
cmd = container_of(header, struct vmw_sid_cmd, header);
return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.target.sid);
}
static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_sid_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdSurfaceCopy body;
} *cmd;
int ret;
cmd = container_of(header, struct vmw_sid_cmd, header);
ret = vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.src.sid);
if (unlikely(ret != 0))
return ret;
return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.dest.sid);
}
static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_sid_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdSurfaceStretchBlt body;
} *cmd;
int ret;
cmd = container_of(header, struct vmw_sid_cmd, header);
ret = vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.src.sid);
if (unlikely(ret != 0))
return ret;
return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.dest.sid);
}
static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_sid_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdBlitSurfaceToScreen body;
} *cmd;
cmd = container_of(header, struct vmw_sid_cmd, header);
return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.srcImage.sid);
}
static int vmw_cmd_present_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_sid_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdPresent body;
} *cmd;
cmd = container_of(header, struct vmw_sid_cmd, header);
return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.sid);
}
static int vmw_cmd_dma(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
uint32_t handle;
struct vmw_dma_buffer *vmw_bo = NULL;
struct ttm_buffer_object *bo;
struct vmw_surface *srf = NULL;
struct vmw_dma_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdSurfaceDMA dma;
} *cmd;
struct vmw_relocation *reloc;
int ret;
uint32_t cur_validate_node;
struct ttm_validate_buffer *val_buf;
cmd = container_of(header, struct vmw_dma_cmd, header);
ret = vmw_cmd_sid_check(dev_priv, sw_context, cmd->dma.host.sid);
if (unlikely(ret != 0))
return ret;
handle = cmd->dma.guest.ptr.gmrId;
ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use GMR region.\n");
return -EINVAL;
}
bo = &vmw_bo->base;
if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
DRM_ERROR("Max number of DMA commands per submission"
" exceeded\n");
ret = -EINVAL;
goto out_no_reloc;
}
reloc = &sw_context->relocs[sw_context->cur_reloc++];
reloc->location = &cmd->dma.guest.ptr;
cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) {
DRM_ERROR("Max number of DMA buffers per submission"
" exceeded.\n");
ret = -EINVAL;
goto out_no_reloc;
}
reloc->index = cur_validate_node;
if (unlikely(cur_validate_node == sw_context->cur_val_buf)) {
val_buf = &sw_context->val_bufs[cur_validate_node];
val_buf->bo = ttm_bo_reference(bo);
val_buf->new_sync_obj_arg = (void *) dev_priv;
list_add_tail(&val_buf->head, &sw_context->validate_nodes);
++sw_context->cur_val_buf;
}
ret = vmw_user_surface_lookup(dev_priv, sw_context->tfile,
cmd->dma.host.sid, &srf);
if (ret) {
DRM_ERROR("could not find surface\n");
goto out_no_reloc;
}
vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
vmw_surface_unreference(&srf);
out_no_reloc:
vmw_dmabuf_unreference(&vmw_bo);
return ret;
}
typedef int (*vmw_cmd_func) (struct vmw_private *,
struct vmw_sw_context *,
SVGA3dCmdHeader *);
#define VMW_CMD_DEF(cmd, func) \
[cmd - SVGA_3D_CMD_BASE] = func
static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid),
VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid),
VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check),
VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check),
VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma),
VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid),
VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid),
VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
&vmw_cmd_set_render_target_check),
VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_cid_check),
VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
&vmw_cmd_blt_surf_screen_check)
};
static int vmw_cmd_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf, uint32_t *size)
{
uint32_t cmd_id;
SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
int ret;
cmd_id = ((uint32_t *)buf)[0];
if (cmd_id == SVGA_CMD_UPDATE) {
*size = 5 << 2;
return 0;
}
cmd_id = le32_to_cpu(header->id);
*size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
cmd_id -= SVGA_3D_CMD_BASE;
if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
goto out_err;
ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header);
if (unlikely(ret != 0))
goto out_err;
return 0;
out_err:
DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n",
cmd_id + SVGA_3D_CMD_BASE);
return -EINVAL;
}
static int vmw_cmd_check_all(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
void *buf, uint32_t size)
{
int32_t cur_size = size;
int ret;
while (cur_size > 0) {
ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
if (unlikely(ret != 0))
return ret;
buf = (void *)((unsigned long) buf + size);
cur_size -= size;
}
if (unlikely(cur_size != 0)) {
DRM_ERROR("Command verifier out of sync.\n");
return -EINVAL;
}
return 0;
}
static void vmw_free_relocations(struct vmw_sw_context *sw_context)
{
sw_context->cur_reloc = 0;
}
static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
{
uint32_t i;
struct vmw_relocation *reloc;
struct ttm_validate_buffer *validate;
struct ttm_buffer_object *bo;
for (i = 0; i < sw_context->cur_reloc; ++i) {
reloc = &sw_context->relocs[i];
validate = &sw_context->val_bufs[reloc->index];
bo = validate->bo;
reloc->location->offset += bo->offset;
reloc->location->gmrId = vmw_dmabuf_gmr(bo);
}
vmw_free_relocations(sw_context);
}
static void vmw_clear_validations(struct vmw_sw_context *sw_context)
{
struct ttm_validate_buffer *entry, *next;
list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
head) {
list_del(&entry->head);
vmw_dmabuf_validate_clear(entry->bo);
ttm_bo_unref(&entry->bo);
sw_context->cur_val_buf--;
}
BUG_ON(sw_context->cur_val_buf != 0);
}
static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
struct ttm_buffer_object *bo)
{
int ret;
if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL)
return 0;
ret = vmw_gmr_bind(dev_priv, bo);
if (likely(ret == 0 || ret == -ERESTART))
return ret;
ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false);
return ret;
}
static int vmw_validate_buffers(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context)
{
struct ttm_validate_buffer *entry;
int ret;
list_for_each_entry(entry, &sw_context->validate_nodes, head) {
ret = vmw_validate_single_buffer(dev_priv, entry->bo);
if (unlikely(ret != 0))
return ret;
}
return 0;
}
int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
struct drm_vmw_fence_rep fence_rep;
struct drm_vmw_fence_rep __user *user_fence_rep;
int ret;
void *user_cmd;
void *cmd;
uint32_t sequence;
struct vmw_sw_context *sw_context = &dev_priv->ctx;
struct vmw_master *vmaster = vmw_master(file_priv->master);
ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0))
return ret;
ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
if (unlikely(ret != 0)) {
ret = -ERESTART;
goto out_no_cmd_mutex;
}
cmd = vmw_fifo_reserve(dev_priv, arg->command_size);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving fifo space for commands.\n");
ret = -ENOMEM;
goto out_unlock;
}
user_cmd = (void __user *)(unsigned long)arg->commands;
ret = copy_from_user(cmd, user_cmd, arg->command_size);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed copying commands.\n");
goto out_commit;
}
sw_context->tfile = vmw_fpriv(file_priv)->tfile;
sw_context->cid_valid = false;
sw_context->sid_valid = false;
sw_context->cur_reloc = 0;
sw_context->cur_val_buf = 0;
INIT_LIST_HEAD(&sw_context->validate_nodes);
ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size);
if (unlikely(ret != 0))
goto out_err;
ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes,
dev_priv->val_seq++);
if (unlikely(ret != 0))
goto out_err;
ret = vmw_validate_buffers(dev_priv, sw_context);
if (unlikely(ret != 0))
goto out_err;
vmw_apply_relocations(sw_context);
vmw_fifo_commit(dev_priv, arg->command_size);
ret = vmw_fifo_send_fence(dev_priv, &sequence);
ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
(void *)(unsigned long) sequence);
vmw_clear_validations(sw_context);
mutex_unlock(&dev_priv->cmdbuf_mutex);
/*
* This error is harmless, because if fence submission fails,
* vmw_fifo_send_fence will sync.
*/
if (ret != 0)
DRM_ERROR("Fence submission error. Syncing.\n");
fence_rep.error = ret;
fence_rep.fence_seq = (uint64_t) sequence;
user_fence_rep = (struct drm_vmw_fence_rep __user *)
(unsigned long)arg->fence_rep;
/*
* copy_to_user errors will be detected by user space not
* seeing fence_rep::error filled in.
*/
ret = copy_to_user(user_fence_rep, &fence_rep, sizeof(fence_rep));
vmw_kms_cursor_post_execbuf(dev_priv);
ttm_read_unlock(&vmaster->lock);
return 0;
out_err:
vmw_free_relocations(sw_context);
ttm_eu_backoff_reservation(&sw_context->validate_nodes);
vmw_clear_validations(sw_context);
out_commit:
vmw_fifo_commit(dev_priv, 0);
out_unlock:
mutex_unlock(&dev_priv->cmdbuf_mutex);
out_no_cmd_mutex:
ttm_read_unlock(&vmaster->lock);
return ret;
}
/**************************************************************************
*
* Copyright © 2007 David Airlie
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "drmP.h"
#include "vmwgfx_drv.h"
#include "ttm/ttm_placement.h"
#define VMW_DIRTY_DELAY (HZ / 30)
struct vmw_fb_par {
struct vmw_private *vmw_priv;
void *vmalloc;
struct vmw_dma_buffer *vmw_bo;
struct ttm_bo_kmap_obj map;
u32 pseudo_palette[17];
unsigned depth;
unsigned bpp;
unsigned max_width;
unsigned max_height;
void *bo_ptr;
unsigned bo_size;
bool bo_iowrite;
struct {
spinlock_t lock;
bool active;
unsigned x1;
unsigned y1;
unsigned x2;
unsigned y2;
} dirty;
};
static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
unsigned blue, unsigned transp,
struct fb_info *info)
{
struct vmw_fb_par *par = info->par;
u32 *pal = par->pseudo_palette;
if (regno > 15) {
DRM_ERROR("Bad regno %u.\n", regno);
return 1;
}
switch (par->depth) {
case 24:
case 32:
pal[regno] = ((red & 0xff00) << 8) |
(green & 0xff00) |
((blue & 0xff00) >> 8);
break;
default:
DRM_ERROR("Bad depth %u, bpp %u.\n", par->depth, par->bpp);
return 1;
}
return 0;
}
static int vmw_fb_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
int depth = var->bits_per_pixel;
struct vmw_fb_par *par = info->par;
struct vmw_private *vmw_priv = par->vmw_priv;
switch (var->bits_per_pixel) {
case 32:
depth = (var->transp.length > 0) ? 32 : 24;
break;
default:
DRM_ERROR("Bad bpp %u.\n", var->bits_per_pixel);
return -EINVAL;
}
switch (depth) {
case 24:
var->red.offset = 16;
var->green.offset = 8;
var->blue.offset = 0;
var->red.length = 8;
var->green.length = 8;
var->blue.length = 8;
var->transp.length = 0;
var->transp.offset = 0;
break;
case 32:
var->red.offset = 16;
var->green.offset = 8;
var->blue.offset = 0;
var->red.length = 8;
var->green.length = 8;
var->blue.length = 8;
var->transp.length = 8;
var->transp.offset = 24;
break;
default:
DRM_ERROR("Bad depth %u.\n", depth);
return -EINVAL;
}
/* without multimon its hard to resize */
if (!(vmw_priv->capabilities & SVGA_CAP_MULTIMON) &&
(var->xres != par->max_width ||
var->yres != par->max_height)) {
DRM_ERROR("Tried to resize, but we don't have multimon\n");
return -EINVAL;
}
if (var->xres > par->max_width ||
var->yres > par->max_height) {
DRM_ERROR("Requested geom can not fit in framebuffer\n");
return -EINVAL;
}
return 0;
}
static int vmw_fb_set_par(struct fb_info *info)
{
struct vmw_fb_par *par = info->par;
struct vmw_private *vmw_priv = par->vmw_priv;
if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, 0);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, 0);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, 0);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, 0);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
vmw_write(vmw_priv, SVGA_REG_ENABLE, 1);
vmw_write(vmw_priv, SVGA_REG_WIDTH, par->max_width);
vmw_write(vmw_priv, SVGA_REG_HEIGHT, par->max_height);
vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, par->bpp);
vmw_write(vmw_priv, SVGA_REG_DEPTH, par->depth);
vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
/* TODO check if pitch and offset changes */
vmw_write(vmw_priv, SVGA_REG_NUM_GUEST_DISPLAYS, 1);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, 0);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, true);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, info->var.xoffset);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
} else {
vmw_write(vmw_priv, SVGA_REG_WIDTH, info->var.xres);
vmw_write(vmw_priv, SVGA_REG_HEIGHT, info->var.yres);
/* TODO check if pitch and offset changes */
}
return 0;
}
static int vmw_fb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
return 0;
}
static int vmw_fb_blank(int blank, struct fb_info *info)
{
return 0;
}
/*
* Dirty code
*/
static void vmw_fb_dirty_flush(struct vmw_fb_par *par)
{
struct vmw_private *vmw_priv = par->vmw_priv;
struct fb_info *info = vmw_priv->fb_info;
int stride = (info->fix.line_length / 4);
int *src = (int *)info->screen_base;
__le32 __iomem *vram_mem = par->bo_ptr;
unsigned long flags;
unsigned x, y, w, h;
int i, k;
struct {
uint32_t header;
SVGAFifoCmdUpdate body;
} *cmd;
spin_lock_irqsave(&par->dirty.lock, flags);
if (!par->dirty.active) {
spin_unlock_irqrestore(&par->dirty.lock, flags);
return;
}
x = par->dirty.x1;
y = par->dirty.y1;
w = min(par->dirty.x2, info->var.xres) - x;
h = min(par->dirty.y2, info->var.yres) - y;
par->dirty.x1 = par->dirty.x2 = 0;
par->dirty.y1 = par->dirty.y2 = 0;
spin_unlock_irqrestore(&par->dirty.lock, flags);
for (i = y * stride; i < info->fix.smem_len / 4; i += stride) {
for (k = i+x; k < i+x+w && k < info->fix.smem_len / 4; k++)
iowrite32(src[k], vram_mem + k);
}
#if 0
DRM_INFO("%s, (%u, %u) (%ux%u)\n", __func__, x, y, w, h);
#endif
cmd = vmw_fifo_reserve(vmw_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
DRM_ERROR("Fifo reserve failed.\n");
return;
}
cmd->header = cpu_to_le32(SVGA_CMD_UPDATE);
cmd->body.x = cpu_to_le32(x);
cmd->body.y = cpu_to_le32(y);
cmd->body.width = cpu_to_le32(w);
cmd->body.height = cpu_to_le32(h);
vmw_fifo_commit(vmw_priv, sizeof(*cmd));
}
static void vmw_fb_dirty_mark(struct vmw_fb_par *par,
unsigned x1, unsigned y1,
unsigned width, unsigned height)
{
struct fb_info *info = par->vmw_priv->fb_info;
unsigned long flags;
unsigned x2 = x1 + width;
unsigned y2 = y1 + height;
spin_lock_irqsave(&par->dirty.lock, flags);
if (par->dirty.x1 == par->dirty.x2) {
par->dirty.x1 = x1;
par->dirty.y1 = y1;
par->dirty.x2 = x2;
par->dirty.y2 = y2;
/* if we are active start the dirty work
* we share the work with the defio system */
if (par->dirty.active)
schedule_delayed_work(&info->deferred_work, VMW_DIRTY_DELAY);
} else {
if (x1 < par->dirty.x1)
par->dirty.x1 = x1;
if (y1 < par->dirty.y1)
par->dirty.y1 = y1;
if (x2 > par->dirty.x2)
par->dirty.x2 = x2;
if (y2 > par->dirty.y2)
par->dirty.y2 = y2;
}
spin_unlock_irqrestore(&par->dirty.lock, flags);
}
static void vmw_deferred_io(struct fb_info *info,
struct list_head *pagelist)
{
struct vmw_fb_par *par = info->par;
unsigned long start, end, min, max;
unsigned long flags;
struct page *page;
int y1, y2;
min = ULONG_MAX;
max = 0;
list_for_each_entry(page, pagelist, lru) {
start = page->index << PAGE_SHIFT;
end = start + PAGE_SIZE - 1;
min = min(min, start);
max = max(max, end);
}
if (min < max) {
y1 = min / info->fix.line_length;
y2 = (max / info->fix.line_length) + 1;
spin_lock_irqsave(&par->dirty.lock, flags);
par->dirty.x1 = 0;
par->dirty.y1 = y1;
par->dirty.x2 = info->var.xres;
par->dirty.y2 = y2;
spin_unlock_irqrestore(&par->dirty.lock, flags);
}
vmw_fb_dirty_flush(par);
};
struct fb_deferred_io vmw_defio = {
.delay = VMW_DIRTY_DELAY,
.deferred_io = vmw_deferred_io,
};
/*
* Draw code
*/
static void vmw_fb_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
{
cfb_fillrect(info, rect);
vmw_fb_dirty_mark(info->par, rect->dx, rect->dy,
rect->width, rect->height);
}
static void vmw_fb_copyarea(struct fb_info *info, const struct fb_copyarea *region)
{
cfb_copyarea(info, region);
vmw_fb_dirty_mark(info->par, region->dx, region->dy,
region->width, region->height);
}
static void vmw_fb_imageblit(struct fb_info *info, const struct fb_image *image)
{
cfb_imageblit(info, image);
vmw_fb_dirty_mark(info->par, image->dx, image->dy,
image->width, image->height);
}
/*
* Bring up code
*/
static struct fb_ops vmw_fb_ops = {
.owner = THIS_MODULE,
.fb_check_var = vmw_fb_check_var,
.fb_set_par = vmw_fb_set_par,
.fb_setcolreg = vmw_fb_setcolreg,
.fb_fillrect = vmw_fb_fillrect,
.fb_copyarea = vmw_fb_copyarea,
.fb_imageblit = vmw_fb_imageblit,
.fb_pan_display = vmw_fb_pan_display,
.fb_blank = vmw_fb_blank,
};
static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
size_t size, struct vmw_dma_buffer **out)
{
struct vmw_dma_buffer *vmw_bo;
struct ttm_placement ne_placement = vmw_vram_ne_placement;
int ret;
ne_placement.lpfn = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
/* interuptable? */
ret = ttm_write_lock(&vmw_priv->fbdev_master.lock, false);
if (unlikely(ret != 0))
return ret;
vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
if (!vmw_bo)
goto err_unlock;
ret = vmw_dmabuf_init(vmw_priv, vmw_bo, size,
&ne_placement,
false,
&vmw_dmabuf_bo_free);
if (unlikely(ret != 0))
goto err_unlock; /* init frees the buffer on failure */
*out = vmw_bo;
ttm_write_unlock(&vmw_priv->fbdev_master.lock);
return 0;
err_unlock:
ttm_write_unlock(&vmw_priv->fbdev_master.lock);
return ret;
}
int vmw_fb_init(struct vmw_private *vmw_priv)
{
struct device *device = &vmw_priv->dev->pdev->dev;
struct vmw_fb_par *par;
struct fb_info *info;
unsigned initial_width, initial_height;
unsigned fb_width, fb_height;
unsigned fb_bbp, fb_depth, fb_offset, fb_pitch, fb_size;
int ret;
initial_width = 800;
initial_height = 600;
fb_bbp = 32;
fb_depth = 24;
if (vmw_priv->capabilities & SVGA_CAP_MULTIMON) {
fb_width = min(vmw_priv->fb_max_width, (unsigned)2048);
fb_height = min(vmw_priv->fb_max_height, (unsigned)2048);
} else {
fb_width = min(vmw_priv->fb_max_width, initial_width);
fb_height = min(vmw_priv->fb_max_height, initial_height);
}
initial_width = min(fb_width, initial_width);
initial_height = min(fb_height, initial_height);
vmw_write(vmw_priv, SVGA_REG_WIDTH, fb_width);
vmw_write(vmw_priv, SVGA_REG_HEIGHT, fb_height);
vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, fb_bbp);
vmw_write(vmw_priv, SVGA_REG_DEPTH, fb_depth);
vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
fb_size = vmw_read(vmw_priv, SVGA_REG_FB_SIZE);
fb_offset = vmw_read(vmw_priv, SVGA_REG_FB_OFFSET);
fb_pitch = vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE);
DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_WIDTH));
DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_MAX_HEIGHT));
DRM_DEBUG("width %u\n", vmw_read(vmw_priv, SVGA_REG_WIDTH));
DRM_DEBUG("height %u\n", vmw_read(vmw_priv, SVGA_REG_HEIGHT));
DRM_DEBUG("bpp %u\n", vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL));
DRM_DEBUG("depth %u\n", vmw_read(vmw_priv, SVGA_REG_DEPTH));
DRM_DEBUG("bpl %u\n", vmw_read(vmw_priv, SVGA_REG_BYTES_PER_LINE));
DRM_DEBUG("r mask %08x\n", vmw_read(vmw_priv, SVGA_REG_RED_MASK));
DRM_DEBUG("g mask %08x\n", vmw_read(vmw_priv, SVGA_REG_GREEN_MASK));
DRM_DEBUG("b mask %08x\n", vmw_read(vmw_priv, SVGA_REG_BLUE_MASK));
DRM_DEBUG("fb_offset 0x%08x\n", fb_offset);
DRM_DEBUG("fb_pitch %u\n", fb_pitch);
DRM_DEBUG("fb_size %u kiB\n", fb_size / 1024);
info = framebuffer_alloc(sizeof(*par), device);
if (!info)
return -ENOMEM;
/*
* Par
*/
vmw_priv->fb_info = info;
par = info->par;
par->vmw_priv = vmw_priv;
par->depth = fb_depth;
par->bpp = fb_bbp;
par->vmalloc = NULL;
par->max_width = fb_width;
par->max_height = fb_height;
/*
* Create buffers and alloc memory
*/
par->vmalloc = vmalloc(fb_size);
if (unlikely(par->vmalloc == NULL)) {
ret = -ENOMEM;
goto err_free;
}
ret = vmw_fb_create_bo(vmw_priv, fb_size, &par->vmw_bo);
if (unlikely(ret != 0))
goto err_free;
ret = ttm_bo_kmap(&par->vmw_bo->base,
0,
par->vmw_bo->base.num_pages,
&par->map);
if (unlikely(ret != 0))
goto err_unref;
par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
par->bo_size = fb_size;
/*
* Fixed and var
*/
strcpy(info->fix.id, "svgadrmfb");
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = FB_VISUAL_TRUECOLOR;
info->fix.type_aux = 0;
info->fix.xpanstep = 1; /* doing it in hw */
info->fix.ypanstep = 1; /* doing it in hw */
info->fix.ywrapstep = 0;
info->fix.accel = FB_ACCEL_NONE;
info->fix.line_length = fb_pitch;
info->fix.smem_start = 0;
info->fix.smem_len = fb_size;
info->fix.mmio_start = 0;
info->fix.mmio_len = 0;
info->pseudo_palette = par->pseudo_palette;
info->screen_base = par->vmalloc;
info->screen_size = fb_size;
info->flags = FBINFO_DEFAULT;
info->fbops = &vmw_fb_ops;
/* 24 depth per default */
info->var.red.offset = 16;
info->var.green.offset = 8;
info->var.blue.offset = 0;
info->var.red.length = 8;
info->var.green.length = 8;
info->var.blue.length = 8;
info->var.transp.offset = 0;
info->var.transp.length = 0;
info->var.xres_virtual = fb_width;
info->var.yres_virtual = fb_height;
info->var.bits_per_pixel = par->bpp;
info->var.xoffset = 0;
info->var.yoffset = 0;
info->var.activate = FB_ACTIVATE_NOW;
info->var.height = -1;
info->var.width = -1;
info->var.xres = initial_width;
info->var.yres = initial_height;
#if 0
info->pixmap.size = 64*1024;
info->pixmap.buf_align = 8;
info->pixmap.access_align = 32;
info->pixmap.flags = FB_PIXMAP_SYSTEM;
info->pixmap.scan_align = 1;
#else
info->pixmap.size = 0;
info->pixmap.buf_align = 8;
info->pixmap.access_align = 32;
info->pixmap.flags = FB_PIXMAP_SYSTEM;
info->pixmap.scan_align = 1;
#endif
/*
* Dirty & Deferred IO
*/
par->dirty.x1 = par->dirty.x2 = 0;
par->dirty.y1 = par->dirty.y1 = 0;
par->dirty.active = true;
spin_lock_init(&par->dirty.lock);
info->fbdefio = &vmw_defio;
fb_deferred_io_init(info);
ret = register_framebuffer(info);
if (unlikely(ret != 0))
goto err_defio;
return 0;
err_defio:
fb_deferred_io_cleanup(info);
ttm_bo_kunmap(&par->map);
err_unref:
ttm_bo_unref((struct ttm_buffer_object **)&par->vmw_bo);
err_free:
vfree(par->vmalloc);
framebuffer_release(info);
vmw_priv->fb_info = NULL;
return ret;
}
int vmw_fb_close(struct vmw_private *vmw_priv)
{
struct fb_info *info;
struct vmw_fb_par *par;
struct ttm_buffer_object *bo;
if (!vmw_priv->fb_info)
return 0;
info = vmw_priv->fb_info;
par = info->par;
bo = &par->vmw_bo->base;
par->vmw_bo = NULL;
/* ??? order */
fb_deferred_io_cleanup(info);
unregister_framebuffer(info);
ttm_bo_kunmap(&par->map);
ttm_bo_unref(&bo);
vfree(par->vmalloc);
framebuffer_release(info);
return 0;
}
int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
struct vmw_dma_buffer *vmw_bo)
{
struct ttm_buffer_object *bo = &vmw_bo->base;
int ret = 0;
ret = ttm_bo_reserve(bo, false, false, false, 0);
if (unlikely(ret != 0))
return ret;
ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false);
ttm_bo_unreserve(bo);
return ret;
}
int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
struct vmw_dma_buffer *vmw_bo)
{
struct ttm_buffer_object *bo = &vmw_bo->base;
struct ttm_placement ne_placement = vmw_vram_ne_placement;
int ret = 0;
ne_placement.lpfn = bo->num_pages;
/* interuptable? */
ret = ttm_write_lock(&vmw_priv->active_master->lock, false);
if (unlikely(ret != 0))
return ret;
ret = ttm_bo_reserve(bo, false, false, false, 0);
if (unlikely(ret != 0))
goto err_unlock;
if (vmw_bo->gmr_bound) {
vmw_gmr_unbind(vmw_priv, vmw_bo->gmr_id);
spin_lock(&bo->glob->lru_lock);
ida_remove(&vmw_priv->gmr_ida, vmw_bo->gmr_id);
spin_unlock(&bo->glob->lru_lock);
vmw_bo->gmr_bound = NULL;
}
ret = ttm_bo_validate(bo, &ne_placement, false, false);
ttm_bo_unreserve(bo);
err_unlock:
ttm_write_unlock(&vmw_priv->active_master->lock);
return ret;
}
int vmw_fb_off(struct vmw_private *vmw_priv)
{
struct fb_info *info;
struct vmw_fb_par *par;
unsigned long flags;
if (!vmw_priv->fb_info)
return -EINVAL;
info = vmw_priv->fb_info;
par = info->par;
spin_lock_irqsave(&par->dirty.lock, flags);
par->dirty.active = false;
spin_unlock_irqrestore(&par->dirty.lock, flags);
flush_scheduled_work();
par->bo_ptr = NULL;
ttm_bo_kunmap(&par->map);
vmw_dmabuf_from_vram(vmw_priv, par->vmw_bo);
return 0;
}
int vmw_fb_on(struct vmw_private *vmw_priv)
{
struct fb_info *info;
struct vmw_fb_par *par;
unsigned long flags;
bool dummy;
int ret;
if (!vmw_priv->fb_info)
return -EINVAL;
info = vmw_priv->fb_info;
par = info->par;
/* we are already active */
if (par->bo_ptr != NULL)
return 0;
/* Make sure that all overlays are stoped when we take over */
vmw_overlay_stop_all(vmw_priv);
ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo);
if (unlikely(ret != 0)) {
DRM_ERROR("could not move buffer to start of VRAM\n");
goto err_no_buffer;
}
ret = ttm_bo_kmap(&par->vmw_bo->base,
0,
par->vmw_bo->base.num_pages,
&par->map);
BUG_ON(ret != 0);
par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &dummy);
spin_lock_irqsave(&par->dirty.lock, flags);
par->dirty.active = true;
spin_unlock_irqrestore(&par->dirty.lock, flags);
err_no_buffer:
vmw_fb_set_par(info);
vmw_fb_dirty_mark(par, 0, 0, info->var.xres, info->var.yres);
/* If there already was stuff dirty we wont
* schedule a new work, so lets do it now */
schedule_delayed_work(&info->deferred_work, 0);
return 0;
}
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_drv.h"
#include "drmP.h"
#include "ttm/ttm_placement.h"
int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t max;
uint32_t min;
uint32_t dummy;
int ret;
fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
fifo->static_buffer = vmalloc(fifo->static_buffer_size);
if (unlikely(fifo->static_buffer == NULL))
return -ENOMEM;
fifo->last_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
fifo->last_data_size = 0;
fifo->last_buffer_add = false;
fifo->last_buffer = vmalloc(fifo->last_buffer_size);
if (unlikely(fifo->last_buffer == NULL)) {
ret = -ENOMEM;
goto out_err;
}
fifo->dynamic_buffer = NULL;
fifo->reserved_size = 0;
fifo->using_bounce_buffer = false;
init_rwsem(&fifo->rwsem);
/*
* Allow mapping the first page read-only to user-space.
*/
DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
mutex_lock(&dev_priv->hw_mutex);
dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
min = 4;
if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
min <<= 2;
if (min < PAGE_SIZE)
min = PAGE_SIZE;
iowrite32(min, fifo_mem + SVGA_FIFO_MIN);
iowrite32(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX);
wmb();
iowrite32(min, fifo_mem + SVGA_FIFO_NEXT_CMD);
iowrite32(min, fifo_mem + SVGA_FIFO_STOP);
iowrite32(0, fifo_mem + SVGA_FIFO_BUSY);
mb();
vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
mutex_unlock(&dev_priv->hw_mutex);
max = ioread32(fifo_mem + SVGA_FIFO_MAX);
min = ioread32(fifo_mem + SVGA_FIFO_MIN);
fifo->capabilities = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES);
DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n",
(unsigned int) max,
(unsigned int) min,
(unsigned int) fifo->capabilities);
dev_priv->fence_seq = (uint32_t) -100;
dev_priv->last_read_sequence = (uint32_t) -100;
iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
return vmw_fifo_send_fence(dev_priv, &dummy);
out_err:
vfree(fifo->static_buffer);
fifo->static_buffer = NULL;
return ret;
}
void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
mutex_lock(&dev_priv->hw_mutex);
if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
vmw_write(dev_priv, SVGA_REG_SYNC, reason);
}
mutex_unlock(&dev_priv->hw_mutex);
}
void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
mutex_lock(&dev_priv->hw_mutex);
while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE);
vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
dev_priv->config_done_state);
vmw_write(dev_priv, SVGA_REG_ENABLE,
dev_priv->enable_state);
mutex_unlock(&dev_priv->hw_mutex);
if (likely(fifo->last_buffer != NULL)) {
vfree(fifo->last_buffer);
fifo->last_buffer = NULL;
}
if (likely(fifo->static_buffer != NULL)) {
vfree(fifo->static_buffer);
fifo->static_buffer = NULL;
}
if (likely(fifo->dynamic_buffer != NULL)) {
vfree(fifo->dynamic_buffer);
fifo->dynamic_buffer = NULL;
}
}
static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
return ((max - next_cmd) + (stop - min) <= bytes);
}
static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
uint32_t bytes, bool interruptible,
unsigned long timeout)
{
int ret = 0;
unsigned long end_jiffies = jiffies + timeout;
DEFINE_WAIT(__wait);
DRM_INFO("Fifo wait noirq.\n");
for (;;) {
prepare_to_wait(&dev_priv->fifo_queue, &__wait,
(interruptible) ?
TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
if (!vmw_fifo_is_full(dev_priv, bytes))
break;
if (time_after_eq(jiffies, end_jiffies)) {
ret = -EBUSY;
DRM_ERROR("SVGA device lockup.\n");
break;
}
schedule_timeout(1);
if (interruptible && signal_pending(current)) {
ret = -ERESTART;
break;
}
}
finish_wait(&dev_priv->fifo_queue, &__wait);
wake_up_all(&dev_priv->fifo_queue);
DRM_INFO("Fifo noirq exit.\n");
return ret;
}
static int vmw_fifo_wait(struct vmw_private *dev_priv,
uint32_t bytes, bool interruptible,
unsigned long timeout)
{
long ret = 1L;
unsigned long irq_flags;
if (likely(!vmw_fifo_is_full(dev_priv, bytes)))
return 0;
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL);
if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
return vmw_fifo_wait_noirq(dev_priv, bytes,
interruptible, timeout);
mutex_lock(&dev_priv->hw_mutex);
if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
outl(SVGA_IRQFLAG_FIFO_PROGRESS,
dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
vmw_write(dev_priv, SVGA_REG_IRQMASK,
vmw_read(dev_priv, SVGA_REG_IRQMASK) |
SVGA_IRQFLAG_FIFO_PROGRESS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
mutex_unlock(&dev_priv->hw_mutex);
if (interruptible)
ret = wait_event_interruptible_timeout
(dev_priv->fifo_queue,
!vmw_fifo_is_full(dev_priv, bytes), timeout);
else
ret = wait_event_timeout
(dev_priv->fifo_queue,
!vmw_fifo_is_full(dev_priv, bytes), timeout);
if (unlikely(ret == -ERESTARTSYS))
ret = -ERESTART;
else if (unlikely(ret == 0))
ret = -EBUSY;
else if (likely(ret > 0))
ret = 0;
mutex_lock(&dev_priv->hw_mutex);
if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
vmw_write(dev_priv, SVGA_REG_IRQMASK,
vmw_read(dev_priv, SVGA_REG_IRQMASK) &
~SVGA_IRQFLAG_FIFO_PROGRESS);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
mutex_unlock(&dev_priv->hw_mutex);
return ret;
}
void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
{
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t max;
uint32_t min;
uint32_t next_cmd;
uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
int ret;
down_write(&fifo_state->rwsem);
max = ioread32(fifo_mem + SVGA_FIFO_MAX);
min = ioread32(fifo_mem + SVGA_FIFO_MIN);
next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
if (unlikely(bytes >= (max - min)))
goto out_err;
BUG_ON(fifo_state->reserved_size != 0);
BUG_ON(fifo_state->dynamic_buffer != NULL);
fifo_state->reserved_size = bytes;
while (1) {
uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP);
bool need_bounce = false;
bool reserve_in_place = false;
if (next_cmd >= stop) {
if (likely((next_cmd + bytes < max ||
(next_cmd + bytes == max && stop > min))))
reserve_in_place = true;
else if (vmw_fifo_is_full(dev_priv, bytes)) {
ret = vmw_fifo_wait(dev_priv, bytes,
false, 3 * HZ);
if (unlikely(ret != 0))
goto out_err;
} else
need_bounce = true;
} else {
if (likely((next_cmd + bytes < stop)))
reserve_in_place = true;
else {
ret = vmw_fifo_wait(dev_priv, bytes,
false, 3 * HZ);
if (unlikely(ret != 0))
goto out_err;
}
}
if (reserve_in_place) {
if (reserveable || bytes <= sizeof(uint32_t)) {
fifo_state->using_bounce_buffer = false;
if (reserveable)
iowrite32(bytes, fifo_mem +
SVGA_FIFO_RESERVED);
return fifo_mem + (next_cmd >> 2);
} else {
need_bounce = true;
}
}
if (need_bounce) {
fifo_state->using_bounce_buffer = true;
if (bytes < fifo_state->static_buffer_size)
return fifo_state->static_buffer;
else {
fifo_state->dynamic_buffer = vmalloc(bytes);
return fifo_state->dynamic_buffer;
}
}
}
out_err:
fifo_state->reserved_size = 0;
up_write(&fifo_state->rwsem);
return NULL;
}
static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state,
__le32 __iomem *fifo_mem,
uint32_t next_cmd,
uint32_t max, uint32_t min, uint32_t bytes)
{
uint32_t chunk_size = max - next_cmd;
uint32_t rest;
uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
fifo_state->dynamic_buffer : fifo_state->static_buffer;
if (bytes < chunk_size)
chunk_size = bytes;
iowrite32(bytes, fifo_mem + SVGA_FIFO_RESERVED);
mb();
memcpy_toio(fifo_mem + (next_cmd >> 2), buffer, chunk_size);
rest = bytes - chunk_size;
if (rest)
memcpy_toio(fifo_mem + (min >> 2), buffer + (chunk_size >> 2),
rest);
}
static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
__le32 __iomem *fifo_mem,
uint32_t next_cmd,
uint32_t max, uint32_t min, uint32_t bytes)
{
uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ?
fifo_state->dynamic_buffer : fifo_state->static_buffer;
while (bytes > 0) {
iowrite32(*buffer++, fifo_mem + (next_cmd >> 2));
next_cmd += sizeof(uint32_t);
if (unlikely(next_cmd == max))
next_cmd = min;
mb();
iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
mb();
bytes -= sizeof(uint32_t);
}
}
void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
{
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD);
uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX);
uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN);
bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
BUG_ON((bytes & 3) != 0);
BUG_ON(bytes > fifo_state->reserved_size);
fifo_state->reserved_size = 0;
if (fifo_state->using_bounce_buffer) {
if (reserveable)
vmw_fifo_res_copy(fifo_state, fifo_mem,
next_cmd, max, min, bytes);
else
vmw_fifo_slow_copy(fifo_state, fifo_mem,
next_cmd, max, min, bytes);
if (fifo_state->dynamic_buffer) {
vfree(fifo_state->dynamic_buffer);
fifo_state->dynamic_buffer = NULL;
}
}
if (fifo_state->using_bounce_buffer || reserveable) {
next_cmd += bytes;
if (next_cmd >= max)
next_cmd -= max - min;
mb();
iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD);
}
if (reserveable)
iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED);
mb();
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
up_write(&fifo_state->rwsem);
}
int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *sequence)
{
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
struct svga_fifo_cmd_fence *cmd_fence;
void *fm;
int ret = 0;
uint32_t bytes = sizeof(__le32) + sizeof(*cmd_fence);
fm = vmw_fifo_reserve(dev_priv, bytes);
if (unlikely(fm == NULL)) {
down_write(&fifo_state->rwsem);
*sequence = dev_priv->fence_seq;
up_write(&fifo_state->rwsem);
ret = -ENOMEM;
(void)vmw_fallback_wait(dev_priv, false, true, *sequence,
false, 3*HZ);
goto out_err;
}
do {
*sequence = dev_priv->fence_seq++;
} while (*sequence == 0);
if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
/*
* Don't request hardware to send a fence. The
* waiting code in vmwgfx_irq.c will emulate this.
*/
vmw_fifo_commit(dev_priv, 0);
return 0;
}
*(__le32 *) fm = cpu_to_le32(SVGA_CMD_FENCE);
cmd_fence = (struct svga_fifo_cmd_fence *)
((unsigned long)fm + sizeof(__le32));
iowrite32(*sequence, &cmd_fence->fence);
fifo_state->last_buffer_add = true;
vmw_fifo_commit(dev_priv, bytes);
fifo_state->last_buffer_add = false;
out_err:
return ret;
}
/**
* Map the first page of the FIFO read-only to user-space.
*/
static int vmw_fifo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
int ret;
unsigned long address = (unsigned long)vmf->virtual_address;
if (address != vma->vm_start)
return VM_FAULT_SIGBUS;
ret = vm_insert_pfn(vma, address, vma->vm_pgoff);
if (likely(ret == -EBUSY || ret == 0))
return VM_FAULT_NOPAGE;
else if (ret == -ENOMEM)
return VM_FAULT_OOM;
return VM_FAULT_SIGBUS;
}
static struct vm_operations_struct vmw_fifo_vm_ops = {
.fault = vmw_fifo_vm_fault,
.open = NULL,
.close = NULL
};
int vmw_fifo_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *file_priv;
struct vmw_private *dev_priv;
file_priv = (struct drm_file *)filp->private_data;
dev_priv = vmw_priv(file_priv->minor->dev);
if (vma->vm_pgoff != (dev_priv->mmio_start >> PAGE_SHIFT) ||
(vma->vm_end - vma->vm_start) != PAGE_SIZE)
return -EINVAL;
vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_SHARED;
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
vma->vm_page_prot = ttm_io_prot(TTM_PL_FLAG_UNCACHED,
vma->vm_page_prot);
vma->vm_ops = &vmw_fifo_vm_ops;
return 0;
}
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_drv.h"
#include "drmP.h"
#include "ttm/ttm_bo_driver.h"
/**
* FIXME: Adjust to the ttm lowmem / highmem storage to minimize
* the number of used descriptors.
*/
static int vmw_gmr_build_descriptors(struct list_head *desc_pages,
struct page *pages[],
unsigned long num_pages)
{
struct page *page, *next;
struct svga_guest_mem_descriptor *page_virtual = NULL;
struct svga_guest_mem_descriptor *desc_virtual = NULL;
unsigned int desc_per_page;
unsigned long prev_pfn;
unsigned long pfn;
int ret;
desc_per_page = PAGE_SIZE /
sizeof(struct svga_guest_mem_descriptor) - 1;
while (likely(num_pages != 0)) {
page = alloc_page(__GFP_HIGHMEM);
if (unlikely(page == NULL)) {
ret = -ENOMEM;
goto out_err;
}
list_add_tail(&page->lru, desc_pages);
/*
* Point previous page terminating descriptor to this
* page before unmapping it.
*/
if (likely(page_virtual != NULL)) {
desc_virtual->ppn = page_to_pfn(page);
kunmap_atomic(page_virtual, KM_USER0);
}
page_virtual = kmap_atomic(page, KM_USER0);
desc_virtual = page_virtual - 1;
prev_pfn = ~(0UL);
while (likely(num_pages != 0)) {
pfn = page_to_pfn(*pages);
if (pfn != prev_pfn + 1) {
if (desc_virtual - page_virtual ==
desc_per_page - 1)
break;
(++desc_virtual)->ppn = cpu_to_le32(pfn);
desc_virtual->num_pages = cpu_to_le32(1);
} else {
uint32_t tmp =
le32_to_cpu(desc_virtual->num_pages);
desc_virtual->num_pages = cpu_to_le32(tmp + 1);
}
prev_pfn = pfn;
--num_pages;
++pages;
}
(++desc_virtual)->ppn = cpu_to_le32(0);
desc_virtual->num_pages = cpu_to_le32(0);
}
if (likely(page_virtual != NULL))
kunmap_atomic(page_virtual, KM_USER0);
return 0;
out_err:
list_for_each_entry_safe(page, next, desc_pages, lru) {
list_del_init(&page->lru);
__free_page(page);
}
return ret;
}
static inline void vmw_gmr_free_descriptors(struct list_head *desc_pages)
{
struct page *page, *next;
list_for_each_entry_safe(page, next, desc_pages, lru) {
list_del_init(&page->lru);
__free_page(page);
}
}
static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv,
int gmr_id, struct list_head *desc_pages)
{
struct page *page;
if (unlikely(list_empty(desc_pages)))
return;
page = list_entry(desc_pages->next, struct page, lru);
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
wmb();
vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, page_to_pfn(page));
mb();
mutex_unlock(&dev_priv->hw_mutex);
}
/**
* FIXME: Adjust to the ttm lowmem / highmem storage to minimize
* the number of used descriptors.
*/
static unsigned long vmw_gmr_count_descriptors(struct page *pages[],
unsigned long num_pages)
{
unsigned long prev_pfn = ~(0UL);
unsigned long pfn;
unsigned long descriptors = 0;
while (num_pages--) {
pfn = page_to_pfn(*pages++);
if (prev_pfn + 1 != pfn)
++descriptors;
prev_pfn = pfn;
}
return descriptors;
}
int vmw_gmr_bind(struct vmw_private *dev_priv,
struct ttm_buffer_object *bo)
{
struct ttm_tt *ttm = bo->ttm;
unsigned long descriptors;
int ret;
uint32_t id;
struct list_head desc_pages;
if (!(dev_priv->capabilities & SVGA_CAP_GMR))
return -EINVAL;
ret = ttm_tt_populate(ttm);
if (unlikely(ret != 0))
return ret;
descriptors = vmw_gmr_count_descriptors(ttm->pages, ttm->num_pages);
if (unlikely(descriptors > dev_priv->max_gmr_descriptors))
return -EINVAL;
INIT_LIST_HEAD(&desc_pages);
ret = vmw_gmr_build_descriptors(&desc_pages, ttm->pages,
ttm->num_pages);
if (unlikely(ret != 0))
return ret;
ret = vmw_gmr_id_alloc(dev_priv, &id);
if (unlikely(ret != 0))
goto out_no_id;
vmw_gmr_fire_descriptors(dev_priv, id, &desc_pages);
vmw_gmr_free_descriptors(&desc_pages);
vmw_dmabuf_set_gmr(bo, id);
return 0;
out_no_id:
vmw_gmr_free_descriptors(&desc_pages);
return ret;
}
void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
{
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
wmb();
vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, 0);
mb();
mutex_unlock(&dev_priv->hw_mutex);
}
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_drv.h"
#include "vmwgfx_drm.h"
int vmw_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_vmw_getparam_arg *param =
(struct drm_vmw_getparam_arg *)data;
switch (param->param) {
case DRM_VMW_PARAM_NUM_STREAMS:
param->value = vmw_overlay_num_overlays(dev_priv);
break;
case DRM_VMW_PARAM_NUM_FREE_STREAMS:
param->value = vmw_overlay_num_free_overlays(dev_priv);
break;
case DRM_VMW_PARAM_3D:
param->value = dev_priv->capabilities & SVGA_CAP_3D ? 1 : 0;
break;
case DRM_VMW_PARAM_FIFO_OFFSET:
param->value = dev_priv->mmio_start;
break;
default:
DRM_ERROR("Illegal vmwgfx get param request: %d\n",
param->param);
return -EINVAL;
}
return 0;
}
int vmw_fifo_debug_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
struct drm_vmw_fifo_debug_arg *arg =
(struct drm_vmw_fifo_debug_arg *)data;
__le32 __user *buffer = (__le32 __user *)
(unsigned long)arg->debug_buffer;
if (unlikely(fifo_state->last_buffer == NULL))
return -EINVAL;
if (arg->debug_buffer_size < fifo_state->last_data_size) {
arg->used_size = arg->debug_buffer_size;
arg->did_not_fit = 1;
} else {
arg->used_size = fifo_state->last_data_size;
arg->did_not_fit = 0;
}
return copy_to_user(buffer, fifo_state->last_buffer, arg->used_size);
}
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "drmP.h"
#include "vmwgfx_drv.h"
#define VMW_FENCE_WRAP (1 << 24)
irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *)arg;
struct vmw_private *dev_priv = vmw_priv(dev);
uint32_t status;
spin_lock(&dev_priv->irq_lock);
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
spin_unlock(&dev_priv->irq_lock);
if (status & SVGA_IRQFLAG_ANY_FENCE)
wake_up_all(&dev_priv->fence_queue);
if (status & SVGA_IRQFLAG_FIFO_PROGRESS)
wake_up_all(&dev_priv->fifo_queue);
if (likely(status)) {
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t sequence)
{
uint32_t busy;
mutex_lock(&dev_priv->hw_mutex);
busy = vmw_read(dev_priv, SVGA_REG_BUSY);
mutex_unlock(&dev_priv->hw_mutex);
return (busy == 0);
}
bool vmw_fence_signaled(struct vmw_private *dev_priv,
uint32_t sequence)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
struct vmw_fifo_state *fifo_state;
bool ret;
if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
return true;
dev_priv->last_read_sequence = ioread32(fifo_mem + SVGA_FIFO_FENCE);
if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
return true;
fifo_state = &dev_priv->fifo;
if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) &&
vmw_fifo_idle(dev_priv, sequence))
return true;
/**
* Below is to signal stale fences that have wrapped.
* First, block fence submission.
*/
down_read(&fifo_state->rwsem);
/**
* Then check if the sequence is higher than what we've actually
* emitted. Then the fence is stale and signaled.
*/
ret = ((dev_priv->fence_seq - sequence) > VMW_FENCE_WRAP);
up_read(&fifo_state->rwsem);
return ret;
}
int vmw_fallback_wait(struct vmw_private *dev_priv,
bool lazy,
bool fifo_idle,
uint32_t sequence,
bool interruptible,
unsigned long timeout)
{
struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
uint32_t count = 0;
uint32_t signal_seq;
int ret;
unsigned long end_jiffies = jiffies + timeout;
bool (*wait_condition)(struct vmw_private *, uint32_t);
DEFINE_WAIT(__wait);
wait_condition = (fifo_idle) ? &vmw_fifo_idle :
&vmw_fence_signaled;
/**
* Block command submission while waiting for idle.
*/
if (fifo_idle)
down_read(&fifo_state->rwsem);
signal_seq = dev_priv->fence_seq;
ret = 0;
for (;;) {
prepare_to_wait(&dev_priv->fence_queue, &__wait,
(interruptible) ?
TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
if (wait_condition(dev_priv, sequence))
break;
if (time_after_eq(jiffies, end_jiffies)) {
DRM_ERROR("SVGA device lockup.\n");
break;
}
if (lazy)
schedule_timeout(1);
else if ((++count & 0x0F) == 0) {
/**
* FIXME: Use schedule_hr_timeout here for
* newer kernels and lower CPU utilization.
*/
__set_current_state(TASK_RUNNING);
schedule();
__set_current_state((interruptible) ?
TASK_INTERRUPTIBLE :
TASK_UNINTERRUPTIBLE);
}
if (interruptible && signal_pending(current)) {
ret = -ERESTART;
break;
}
}
finish_wait(&dev_priv->fence_queue, &__wait);
if (ret == 0 && fifo_idle) {
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
}
wake_up_all(&dev_priv->fence_queue);
if (fifo_idle)
up_read(&fifo_state->rwsem);
return ret;
}
int vmw_wait_fence(struct vmw_private *dev_priv,
bool lazy, uint32_t sequence,
bool interruptible, unsigned long timeout)
{
long ret;
unsigned long irq_flags;
struct vmw_fifo_state *fifo = &dev_priv->fifo;
if (likely(dev_priv->last_read_sequence - sequence < VMW_FENCE_WRAP))
return 0;
if (likely(vmw_fence_signaled(dev_priv, sequence)))
return 0;
vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE))
return vmw_fallback_wait(dev_priv, lazy, true, sequence,
interruptible, timeout);
if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
return vmw_fallback_wait(dev_priv, lazy, false, sequence,
interruptible, timeout);
mutex_lock(&dev_priv->hw_mutex);
if (atomic_add_return(1, &dev_priv->fence_queue_waiters) > 0) {
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
outl(SVGA_IRQFLAG_ANY_FENCE,
dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
vmw_write(dev_priv, SVGA_REG_IRQMASK,
vmw_read(dev_priv, SVGA_REG_IRQMASK) |
SVGA_IRQFLAG_ANY_FENCE);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
mutex_unlock(&dev_priv->hw_mutex);
if (interruptible)
ret = wait_event_interruptible_timeout
(dev_priv->fence_queue,
vmw_fence_signaled(dev_priv, sequence),
timeout);
else
ret = wait_event_timeout
(dev_priv->fence_queue,
vmw_fence_signaled(dev_priv, sequence),
timeout);
if (unlikely(ret == -ERESTARTSYS))
ret = -ERESTART;
else if (unlikely(ret == 0))
ret = -EBUSY;
else if (likely(ret > 0))
ret = 0;
mutex_lock(&dev_priv->hw_mutex);
if (atomic_dec_and_test(&dev_priv->fence_queue_waiters)) {
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
vmw_write(dev_priv, SVGA_REG_IRQMASK,
vmw_read(dev_priv, SVGA_REG_IRQMASK) &
~SVGA_IRQFLAG_ANY_FENCE);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
mutex_unlock(&dev_priv->hw_mutex);
return ret;
}
void vmw_irq_preinstall(struct drm_device *dev)
{
struct vmw_private *dev_priv = vmw_priv(dev);
uint32_t status;
if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
return;
spin_lock_init(&dev_priv->irq_lock);
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
}
int vmw_irq_postinstall(struct drm_device *dev)
{
return 0;
}
void vmw_irq_uninstall(struct drm_device *dev)
{
struct vmw_private *dev_priv = vmw_priv(dev);
uint32_t status;
if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
return;
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
mutex_unlock(&dev_priv->hw_mutex);
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
}
#define VMW_FENCE_WAIT_TIMEOUT 3*HZ;
int vmw_fence_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_vmw_fence_wait_arg *arg =
(struct drm_vmw_fence_wait_arg *)data;
unsigned long timeout;
if (!arg->cookie_valid) {
arg->cookie_valid = 1;
arg->kernel_cookie = jiffies + VMW_FENCE_WAIT_TIMEOUT;
}
timeout = jiffies;
if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie))
return -EBUSY;
timeout = (unsigned long)arg->kernel_cookie - timeout;
return vmw_wait_fence(vmw_priv(dev), true, arg->sequence, true, timeout);
}
此差异已折叠。
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef VMWGFX_KMS_H_
#define VMWGFX_KMS_H_
#include "drmP.h"
#include "vmwgfx_drv.h"
#define vmw_framebuffer_to_vfb(x) \
container_of(x, struct vmw_framebuffer, base)
/**
* Base class for framebuffers
*
* @pin is called the when ever a crtc uses this framebuffer
* @unpin is called
*/
struct vmw_framebuffer {
struct drm_framebuffer base;
int (*pin)(struct vmw_framebuffer *fb);
int (*unpin)(struct vmw_framebuffer *fb);
};
#define vmw_crtc_to_du(x) \
container_of(x, struct vmw_display_unit, crtc)
/*
* Basic cursor manipulation
*/
int vmw_cursor_update_image(struct vmw_private *dev_priv,
u32 *image, u32 width, u32 height,
u32 hotspotX, u32 hotspotY);
void vmw_cursor_update_position(struct vmw_private *dev_priv,
bool show, int x, int y);
/**
* Base class display unit.
*
* Since the SVGA hw doesn't have a concept of a crtc, encoder or connector
* so the display unit is all of them at the same time. This is true for both
* legacy multimon and screen objects.
*/
struct vmw_display_unit {
struct drm_crtc crtc;
struct drm_encoder encoder;
struct drm_connector connector;
struct vmw_surface *cursor_surface;
struct vmw_dma_buffer *cursor_dmabuf;
size_t cursor_age;
int cursor_x;
int cursor_y;
int hotspot_x;
int hotspot_y;
unsigned unit;
};
/*
* Shared display unit functions - vmwgfx_kms.c
*/
void vmw_display_unit_cleanup(struct vmw_display_unit *du);
int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
uint32_t handle, uint32_t width, uint32_t height);
int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
/*
* Legacy display unit functions - vmwgfx_ldu.h
*/
int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv);
int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv);
#endif
此差异已折叠。
此差异已折叠。
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/**
* This file contains virtual hardware defines for kernel space.
*/
#ifndef _VMWGFX_REG_H_
#define _VMWGFX_REG_H_
#include <linux/types.h>
#define VMWGFX_INDEX_PORT 0x0
#define VMWGFX_VALUE_PORT 0x1
#define VMWGFX_IRQSTATUS_PORT 0x8
struct svga_guest_mem_descriptor {
__le32 ppn;
__le32 num_pages;
};
struct svga_fifo_cmd_fence {
__le32 fence;
};
#define SVGA_SYNC_GENERIC 1
#define SVGA_SYNC_FIFOFULL 2
#include "svga_types.h"
#include "svga3d_reg.h"
#endif
此差异已折叠。
/**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "drmP.h"
#include "vmwgfx_drv.h"
int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *file_priv;
struct vmw_private *dev_priv;
if (unlikely(vma->vm_pgoff < VMWGFX_FILE_PAGE_OFFSET)) {
if (vmw_fifo_mmap(filp, vma) == 0)
return 0;
return drm_mmap(filp, vma);
}
file_priv = (struct drm_file *)filp->private_data;
dev_priv = vmw_priv(file_priv->minor->dev);
return ttm_bo_mmap(filp, vma, &dev_priv->bdev);
}
static int vmw_ttm_mem_global_init(struct ttm_global_reference *ref)
{
DRM_INFO("global init.\n");
return ttm_mem_global_init(ref->object);
}
static void vmw_ttm_mem_global_release(struct ttm_global_reference *ref)
{
ttm_mem_global_release(ref->object);
}
int vmw_ttm_global_init(struct vmw_private *dev_priv)
{
struct ttm_global_reference *global_ref;
int ret;
global_ref = &dev_priv->mem_global_ref;
global_ref->global_type = TTM_GLOBAL_TTM_MEM;
global_ref->size = sizeof(struct ttm_mem_global);
global_ref->init = &vmw_ttm_mem_global_init;
global_ref->release = &vmw_ttm_mem_global_release;
ret = ttm_global_item_ref(global_ref);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed setting up TTM memory accounting.\n");
return ret;
}
dev_priv->bo_global_ref.mem_glob =
dev_priv->mem_global_ref.object;
global_ref = &dev_priv->bo_global_ref.ref;
global_ref->global_type = TTM_GLOBAL_TTM_BO;
global_ref->size = sizeof(struct ttm_bo_global);
global_ref->init = &ttm_bo_global_init;
global_ref->release = &ttm_bo_global_release;
ret = ttm_global_item_ref(global_ref);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed setting up TTM buffer objects.\n");
goto out_no_bo;
}
return 0;
out_no_bo:
ttm_global_item_unref(&dev_priv->mem_global_ref);
return ret;
}
void vmw_ttm_global_release(struct vmw_private *dev_priv)
{
ttm_global_item_unref(&dev_priv->bo_global_ref.ref);
ttm_global_item_unref(&dev_priv->mem_global_ref);
}
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册