提交 a2c0a97b 编写于 作者: J Jesse Barnes 提交者: Dave Airlie

drm: GEM mmap support

Add core support for mapping of GEM objects.  Drivers should provide a
vm_operations_struct if they want to support page faulting of objects.
The code for handling GEM object offsets was taken from TTM, which was
written by Thomas Hellström.
Signed-off-by: NJesse Barnes <jbarnes@virtuousgeek.org>
Signed-off-by: NEric Anholt <eric@anholt.net>
Signed-off-by: NDave Airlie <airlied@redhat.com>
上级 a9587470
...@@ -261,6 +261,9 @@ static int drm_addmap_core(struct drm_device * dev, unsigned int offset, ...@@ -261,6 +261,9 @@ static int drm_addmap_core(struct drm_device * dev, unsigned int offset,
} }
DRM_DEBUG("AGP offset = 0x%08lx, size = 0x%08lx\n", map->offset, map->size); DRM_DEBUG("AGP offset = 0x%08lx, size = 0x%08lx\n", map->offset, map->size);
break;
case _DRM_GEM:
DRM_ERROR("tried to rmmap GEM object\n");
break; break;
} }
case _DRM_SCATTER_GATHER: case _DRM_SCATTER_GATHER:
...@@ -429,6 +432,9 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map) ...@@ -429,6 +432,9 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map)
dmah.size = map->size; dmah.size = map->size;
__drm_pci_free(dev, &dmah); __drm_pci_free(dev, &dmah);
break; break;
case _DRM_GEM:
DRM_ERROR("tried to rmmap GEM object\n");
break;
} }
drm_free(map, sizeof(*map), DRM_MEM_MAPS); drm_free(map, sizeof(*map), DRM_MEM_MAPS);
......
...@@ -209,6 +209,7 @@ int drm_lastclose(struct drm_device * dev) ...@@ -209,6 +209,7 @@ int drm_lastclose(struct drm_device * dev)
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
drm_dma_takedown(dev); drm_dma_takedown(dev);
dev->dev_mapping = NULL;
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
DRM_DEBUG("lastclose completed\n"); DRM_DEBUG("lastclose completed\n");
...@@ -273,6 +274,8 @@ EXPORT_SYMBOL(drm_init); ...@@ -273,6 +274,8 @@ EXPORT_SYMBOL(drm_init);
*/ */
static void drm_cleanup(struct drm_device * dev) static void drm_cleanup(struct drm_device * dev)
{ {
struct drm_driver *driver = dev->driver;
DRM_DEBUG("\n"); DRM_DEBUG("\n");
if (!dev) { if (!dev) {
...@@ -304,6 +307,9 @@ static void drm_cleanup(struct drm_device * dev) ...@@ -304,6 +307,9 @@ static void drm_cleanup(struct drm_device * dev)
drm_ht_remove(&dev->map_hash); drm_ht_remove(&dev->map_hash);
drm_ctxbitmap_cleanup(dev); drm_ctxbitmap_cleanup(dev);
if (driver->driver_features & DRIVER_GEM)
drm_gem_destroy(dev);
drm_put_minor(&dev->primary); drm_put_minor(&dev->primary);
if (drm_put_dev(dev)) if (drm_put_dev(dev))
DRM_ERROR("Cannot unload module\n"); DRM_ERROR("Cannot unload module\n");
......
...@@ -133,11 +133,21 @@ int drm_open(struct inode *inode, struct file *filp) ...@@ -133,11 +133,21 @@ int drm_open(struct inode *inode, struct file *filp)
spin_lock(&dev->count_lock); spin_lock(&dev->count_lock);
if (!dev->open_count++) { if (!dev->open_count++) {
spin_unlock(&dev->count_lock); spin_unlock(&dev->count_lock);
return drm_setup(dev); retcode = drm_setup(dev);
goto out;
} }
spin_unlock(&dev->count_lock); spin_unlock(&dev->count_lock);
} }
out:
mutex_lock(&dev->struct_mutex);
if (dev->dev_mapping == NULL)
dev->dev_mapping = inode->i_mapping;
else if (dev->dev_mapping != inode->i_mapping)
WARN(1, "dev->dev_mapping not inode mapping (%p expected %p)\n",
dev->dev_mapping, inode->i_mapping);
mutex_unlock(&dev->struct_mutex);
return retcode; return retcode;
} }
EXPORT_SYMBOL(drm_open); EXPORT_SYMBOL(drm_open);
......
...@@ -64,6 +64,13 @@ ...@@ -64,6 +64,13 @@
* up at a later date, and as our interface with shmfs for memory allocation. * up at a later date, and as our interface with shmfs for memory allocation.
*/ */
/*
* We make up offsets for buffer objects so we can recognize them at
* mmap time.
*/
#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
/** /**
* Initialize the GEM device fields * Initialize the GEM device fields
*/ */
...@@ -71,6 +78,8 @@ ...@@ -71,6 +78,8 @@
int int
drm_gem_init(struct drm_device *dev) drm_gem_init(struct drm_device *dev)
{ {
struct drm_gem_mm *mm;
spin_lock_init(&dev->object_name_lock); spin_lock_init(&dev->object_name_lock);
idr_init(&dev->object_name_idr); idr_init(&dev->object_name_idr);
atomic_set(&dev->object_count, 0); atomic_set(&dev->object_count, 0);
...@@ -79,9 +88,41 @@ drm_gem_init(struct drm_device *dev) ...@@ -79,9 +88,41 @@ drm_gem_init(struct drm_device *dev)
atomic_set(&dev->pin_memory, 0); atomic_set(&dev->pin_memory, 0);
atomic_set(&dev->gtt_count, 0); atomic_set(&dev->gtt_count, 0);
atomic_set(&dev->gtt_memory, 0); atomic_set(&dev->gtt_memory, 0);
mm = drm_calloc(1, sizeof(struct drm_gem_mm), DRM_MEM_MM);
if (!mm) {
DRM_ERROR("out of memory\n");
return -ENOMEM;
}
dev->mm_private = mm;
if (drm_ht_create(&mm->offset_hash, 19)) {
drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM);
return -ENOMEM;
}
if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START,
DRM_FILE_PAGE_OFFSET_SIZE)) {
drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM);
drm_ht_remove(&mm->offset_hash);
return -ENOMEM;
}
return 0; return 0;
} }
void
drm_gem_destroy(struct drm_device *dev)
{
struct drm_gem_mm *mm = dev->mm_private;
drm_mm_takedown(&mm->offset_manager);
drm_ht_remove(&mm->offset_hash);
drm_free(mm, sizeof(struct drm_gem_mm), DRM_MEM_MM);
dev->mm_private = NULL;
}
/** /**
* Allocate a GEM object of the specified size with shmfs backing store * Allocate a GEM object of the specified size with shmfs backing store
*/ */
...@@ -419,3 +460,71 @@ drm_gem_object_handle_free(struct kref *kref) ...@@ -419,3 +460,71 @@ drm_gem_object_handle_free(struct kref *kref)
} }
EXPORT_SYMBOL(drm_gem_object_handle_free); EXPORT_SYMBOL(drm_gem_object_handle_free);
/**
* drm_gem_mmap - memory map routine for GEM objects
* @filp: DRM file pointer
* @vma: VMA for the area to be mapped
*
* If a driver supports GEM object mapping, mmap calls on the DRM file
* descriptor will end up here.
*
* If we find the object based on the offset passed in (vma->vm_pgoff will
* contain the fake offset we created when the GTT map ioctl was called on
* the object), we set up the driver fault handler so that any accesses
* to the object can be trapped, to perform migration, GTT binding, surface
* register allocation, or performance monitoring.
*/
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *priv = filp->private_data;
struct drm_device *dev = priv->minor->dev;
struct drm_gem_mm *mm = dev->mm_private;
struct drm_map *map = NULL;
struct drm_gem_object *obj;
struct drm_hash_item *hash;
unsigned long prot;
int ret = 0;
mutex_lock(&dev->struct_mutex);
if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) {
mutex_unlock(&dev->struct_mutex);
return drm_mmap(filp, vma);
}
map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
if (!map ||
((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) {
ret = -EPERM;
goto out_unlock;
}
/* Check for valid size. */
if (map->size < vma->vm_end - vma->vm_start) {
ret = -EINVAL;
goto out_unlock;
}
obj = map->handle;
if (!obj->dev->driver->gem_vm_ops) {
ret = -EINVAL;
goto out_unlock;
}
vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
vma->vm_ops = obj->dev->driver->gem_vm_ops;
vma->vm_private_data = map->handle;
/* FIXME: use pgprot_writecombine when available */
prot = pgprot_val(vma->vm_page_prot);
prot |= _PAGE_CACHE_WC;
vma->vm_page_prot = __pgprot(prot);
vma->vm_file = filp; /* Needed for drm_vm_open() */
drm_vm_open_locked(vma);
out_unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
EXPORT_SYMBOL(drm_gem_mmap);
...@@ -127,6 +127,7 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item) ...@@ -127,6 +127,7 @@ int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
} }
return 0; return 0;
} }
EXPORT_SYMBOL(drm_ht_insert_item);
/* /*
* Just insert an item and return any "bits" bit key that hasn't been * Just insert an item and return any "bits" bit key that hasn't been
...@@ -188,6 +189,7 @@ int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item) ...@@ -188,6 +189,7 @@ int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
ht->fill--; ht->fill--;
return 0; return 0;
} }
EXPORT_SYMBOL(drm_ht_remove_item);
void drm_ht_remove(struct drm_open_hash *ht) void drm_ht_remove(struct drm_open_hash *ht)
{ {
......
...@@ -267,6 +267,9 @@ static void drm_vm_shm_close(struct vm_area_struct *vma) ...@@ -267,6 +267,9 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
dmah.size = map->size; dmah.size = map->size;
__drm_pci_free(dev, &dmah); __drm_pci_free(dev, &dmah);
break; break;
case _DRM_GEM:
DRM_ERROR("tried to rmmap GEM object\n");
break;
} }
drm_free(map, sizeof(*map), DRM_MEM_MAPS); drm_free(map, sizeof(*map), DRM_MEM_MAPS);
} }
...@@ -399,7 +402,7 @@ static struct vm_operations_struct drm_vm_sg_ops = { ...@@ -399,7 +402,7 @@ static struct vm_operations_struct drm_vm_sg_ops = {
* Create a new drm_vma_entry structure as the \p vma private data entry and * Create a new drm_vma_entry structure as the \p vma private data entry and
* add it to drm_device::vmalist. * add it to drm_device::vmalist.
*/ */
static void drm_vm_open_locked(struct vm_area_struct *vma) void drm_vm_open_locked(struct vm_area_struct *vma)
{ {
struct drm_file *priv = vma->vm_file->private_data; struct drm_file *priv = vma->vm_file->private_data;
struct drm_device *dev = priv->minor->dev; struct drm_device *dev = priv->minor->dev;
...@@ -540,7 +543,7 @@ EXPORT_SYMBOL(drm_core_get_reg_ofs); ...@@ -540,7 +543,7 @@ EXPORT_SYMBOL(drm_core_get_reg_ofs);
* according to the mapping type and remaps the pages. Finally sets the file * according to the mapping type and remaps the pages. Finally sets the file
* pointer and calls vm_open(). * pointer and calls vm_open().
*/ */
static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
{ {
struct drm_file *priv = filp->private_data; struct drm_file *priv = filp->private_data;
struct drm_device *dev = priv->minor->dev; struct drm_device *dev = priv->minor->dev;
......
...@@ -173,6 +173,7 @@ enum drm_map_type { ...@@ -173,6 +173,7 @@ enum drm_map_type {
_DRM_AGP = 3, /**< AGP/GART */ _DRM_AGP = 3, /**< AGP/GART */
_DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */ _DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
_DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */ _DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */
_DRM_GEM = 6, /**< GEM object */
}; };
/** /**
......
...@@ -528,6 +528,7 @@ struct drm_map_list { ...@@ -528,6 +528,7 @@ struct drm_map_list {
struct drm_map *map; /**< mapping */ struct drm_map *map; /**< mapping */
uint64_t user_token; uint64_t user_token;
struct drm_master *master; struct drm_master *master;
struct drm_mm_node *file_offset_node; /**< fake offset */
}; };
typedef struct drm_map drm_local_map_t; typedef struct drm_map drm_local_map_t;
...@@ -567,6 +568,14 @@ struct drm_ati_pcigart_info { ...@@ -567,6 +568,14 @@ struct drm_ati_pcigart_info {
int table_size; int table_size;
}; };
/**
* GEM specific mm private for tracking GEM objects
*/
struct drm_gem_mm {
struct drm_mm offset_manager; /**< Offset mgmt for buffer objects */
struct drm_open_hash offset_hash; /**< User token hash table for maps */
};
/** /**
* This structure defines the drm_mm memory object, which will be used by the * This structure defines the drm_mm memory object, which will be used by the
* DRM for its buffer objects. * DRM for its buffer objects.
...@@ -584,6 +593,9 @@ struct drm_gem_object { ...@@ -584,6 +593,9 @@ struct drm_gem_object {
/** File representing the shmem storage */ /** File representing the shmem storage */
struct file *filp; struct file *filp;
/* Mapping info for this object */
struct drm_map_list map_list;
/** /**
* Size of the object, in bytes. Immutable over the object's * Size of the object, in bytes. Immutable over the object's
* lifetime. * lifetime.
...@@ -758,6 +770,9 @@ struct drm_driver { ...@@ -758,6 +770,9 @@ struct drm_driver {
int (*gem_init_object) (struct drm_gem_object *obj); int (*gem_init_object) (struct drm_gem_object *obj);
void (*gem_free_object) (struct drm_gem_object *obj); void (*gem_free_object) (struct drm_gem_object *obj);
/* Driver private ops for this object */
struct vm_operations_struct *gem_vm_ops;
int major; int major;
int minor; int minor;
int patchlevel; int patchlevel;
...@@ -910,6 +925,8 @@ struct drm_device { ...@@ -910,6 +925,8 @@ struct drm_device {
struct drm_sg_mem *sg; /**< Scatter gather memory */ struct drm_sg_mem *sg; /**< Scatter gather memory */
int num_crtcs; /**< Number of CRTCs on this device */ int num_crtcs; /**< Number of CRTCs on this device */
void *dev_private; /**< device private data */ void *dev_private; /**< device private data */
void *mm_private;
struct address_space *dev_mapping;
struct drm_sigdata sigdata; /**< For block_all_signals */ struct drm_sigdata sigdata; /**< For block_all_signals */
sigset_t sigmask; sigset_t sigmask;
...@@ -1026,6 +1043,8 @@ extern int drm_release(struct inode *inode, struct file *filp); ...@@ -1026,6 +1043,8 @@ extern int drm_release(struct inode *inode, struct file *filp);
/* Mapping support (drm_vm.h) */ /* Mapping support (drm_vm.h) */
extern int drm_mmap(struct file *filp, struct vm_area_struct *vma); extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma);
extern void drm_vm_open_locked(struct vm_area_struct *vma);
extern unsigned long drm_core_get_map_ofs(struct drm_map * map); extern unsigned long drm_core_get_map_ofs(struct drm_map * map);
extern unsigned long drm_core_get_reg_ofs(struct drm_device *dev); extern unsigned long drm_core_get_reg_ofs(struct drm_device *dev);
extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
...@@ -1287,10 +1306,12 @@ extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size); ...@@ -1287,10 +1306,12 @@ extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size);
/* Graphics Execution Manager library functions (drm_gem.c) */ /* Graphics Execution Manager library functions (drm_gem.c) */
int drm_gem_init(struct drm_device *dev); int drm_gem_init(struct drm_device *dev);
void drm_gem_destroy(struct drm_device *dev);
void drm_gem_object_free(struct kref *kref); void drm_gem_object_free(struct kref *kref);
struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev, struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
size_t size); size_t size);
void drm_gem_object_handle_free(struct kref *kref); void drm_gem_object_handle_free(struct kref *kref);
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
static inline void static inline void
drm_gem_object_reference(struct drm_gem_object *obj) drm_gem_object_reference(struct drm_gem_object *obj)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册