提交 f7ea4a4b 编写于 作者: L Linus Torvalds

Merge branch 'drm-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (44 commits)
  drm/i915: fix ioremap of a user address for non-root (CVE-2008-3831)
  drm: make CONFIG_DRM depend on CONFIG_SHMEM.
  radeon: fix PCI bus mastering support enables.
  radeon: add RS400 family support.
  drm/radeon: add support for RS740 IGP chipsets.
  i915: GM45 has GM965-style MCH setup.
  i915: Don't run retire work handler while suspended
  i915: Map status page cached for chips with GTT-based HWS location.
  i915: Fix up ring initialization to cover G45 oddities
  i915: Use non-reserved status page index for breadcrumb
  drm: Increment dev_priv->irq_received so i915_gem_interrupts count works.
  drm: kill drm_device->irq
  drm: wbinvd is cache coherent.
  i915: add missing return in error path.
  i915: fixup permissions on gem ioctls.
  drm: Clean up many sparse warnings in i915.
  drm: Use ioremap_wc in i915_driver instead of ioremap, since we always want WC.
  drm: G33-class hardware has a newer 965-style MCH (no DCC register).
  drm: Avoid oops in GEM execbuffers with bad arguments.
  DRM: Return -EBADF on bad object in flink, and return curent name if it exists.
  ...
......@@ -137,6 +137,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
return (void*) vaddr;
}
EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */
struct page *kmap_atomic_to_page(void *ptr)
{
......
......@@ -6,7 +6,7 @@
#
menuconfig DRM
tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG
depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG && SHMEM
help
Kernel-level support for the Direct Rendering Infrastructure (DRI)
introduced in XFree86 4.0. If you say Y here, you need to select
......@@ -87,6 +87,7 @@ config DRM_MGA
config DRM_SIS
tristate "SiS video cards"
depends on DRM && AGP
depends on FB_SIS || FB_SIS=n
help
Choose this option if you have a SiS 630 or compatible video
chipset. If M is selected the module will be called sis. AGP
......
......@@ -4,8 +4,9 @@
ccflags-y := -Iinclude/drm
drm-y := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
drm_drv.o drm_fops.o drm_ioctl.o drm_irq.o \
drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm_context.o drm_dma.o drm_drawable.o \
drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o
......
......@@ -33,6 +33,7 @@
#include "drmP.h"
#include <linux/module.h>
#include <asm/agp.h>
#if __OS_HAS_AGP
......@@ -452,4 +453,53 @@ int drm_agp_unbind_memory(DRM_AGP_MEM * handle)
return agp_unbind_memory(handle);
}
#endif /* __OS_HAS_AGP */
/**
* Binds a collection of pages into AGP memory at the given offset, returning
* the AGP memory structure containing them.
*
* No reference is held on the pages during this time -- it is up to the
* caller to handle that.
*/
DRM_AGP_MEM *
drm_agp_bind_pages(struct drm_device *dev,
struct page **pages,
unsigned long num_pages,
uint32_t gtt_offset,
u32 type)
{
DRM_AGP_MEM *mem;
int ret, i;
DRM_DEBUG("\n");
mem = drm_agp_allocate_memory(dev->agp->bridge, num_pages,
type);
if (mem == NULL) {
DRM_ERROR("Failed to allocate memory for %ld pages\n",
num_pages);
return NULL;
}
for (i = 0; i < num_pages; i++)
mem->memory[i] = phys_to_gart(page_to_phys(pages[i]));
mem->page_count = num_pages;
mem->is_flushed = true;
ret = drm_agp_bind_memory(mem, gtt_offset / PAGE_SIZE);
if (ret != 0) {
DRM_ERROR("Failed to bind AGP memory: %d\n", ret);
agp_free_memory(mem);
return NULL;
}
return mem;
}
EXPORT_SYMBOL(drm_agp_bind_pages);
void drm_agp_chipset_flush(struct drm_device *dev)
{
agp_flush_chipset(dev->agp->bridge);
}
EXPORT_SYMBOL(drm_agp_chipset_flush);
#endif /* __OS_HAS_AGP */
/**************************************************************************
*
* Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
*/
#include "drmP.h"
#if defined(CONFIG_X86)
static void
drm_clflush_page(struct page *page)
{
uint8_t *page_virtual;
unsigned int i;
if (unlikely(page == NULL))
return;
page_virtual = kmap_atomic(page, KM_USER0);
for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
clflush(page_virtual + i);
kunmap_atomic(page_virtual, KM_USER0);
}
#endif
void
drm_clflush_pages(struct page *pages[], unsigned long num_pages)
{
#if defined(CONFIG_X86)
if (cpu_has_clflush) {
unsigned long i;
mb();
for (i = 0; i < num_pages; ++i)
drm_clflush_page(*pages++);
mb();
return;
}
wbinvd();
#endif
}
EXPORT_SYMBOL(drm_clflush_pages);
......@@ -116,7 +116,13 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
......
......@@ -246,7 +246,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
memset(priv, 0, sizeof(*priv));
filp->private_data = priv;
priv->filp = filp;
priv->uid = current->euid;
priv->uid = current_euid();
priv->pid = task_pid_nr(current);
priv->minor = idr_find(&drm_minors_idr, minor_id);
priv->ioctl_count = 0;
......@@ -256,6 +256,9 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
INIT_LIST_HEAD(&priv->lhead);
if (dev->driver->driver_features & DRIVER_GEM)
drm_gem_open(dev, priv);
if (dev->driver->open) {
ret = dev->driver->open(dev, priv);
if (ret < 0)
......@@ -400,6 +403,9 @@ int drm_release(struct inode *inode, struct file *filp)
dev->driver->reclaim_buffers(dev, file_priv);
}
if (dev->driver->driver_features & DRIVER_GEM)
drm_gem_release(dev, file_priv);
drm_fasync(-1, filp, 0);
mutex_lock(&dev->ctxlist_mutex);
......
/*
* Copyright © 2008 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/module.h>
#include <linux/mman.h>
#include <linux/pagemap.h>
#include "drmP.h"
/** @file drm_gem.c
*
* This file provides some of the base ioctls and library routines for
* the graphics memory manager implemented by each device driver.
*
* Because various devices have different requirements in terms of
* synchronization and migration strategies, implementing that is left up to
* the driver, and all that the general API provides should be generic --
* allocating objects, reading/writing data with the cpu, freeing objects.
* Even there, platform-dependent optimizations for reading/writing data with
* the CPU mean we'll likely hook those out to driver-specific calls. However,
* the DRI2 implementation wants to have at least allocate/mmap be generic.
*
* The goal was to have swap-backed object allocation managed through
* struct file. However, file descriptors as handles to a struct file have
* two major failings:
* - Process limits prevent more than 1024 or so being used at a time by
* default.
* - Inability to allocate high fds will aggravate the X Server's select()
* handling, and likely that of many GL client applications as well.
*
* This led to a plan of using our own integer IDs (called handles, following
* DRM terminology) to mimic fds, and implement the fd syscalls we need as
* ioctls. The objects themselves will still include the struct file so
* that we can transition to fds if the required kernel infrastructure shows
* up at a later date, and as our interface with shmfs for memory allocation.
*/
/**
* Initialize the GEM device fields
*/
int
drm_gem_init(struct drm_device *dev)
{
spin_lock_init(&dev->object_name_lock);
idr_init(&dev->object_name_idr);
atomic_set(&dev->object_count, 0);
atomic_set(&dev->object_memory, 0);
atomic_set(&dev->pin_count, 0);
atomic_set(&dev->pin_memory, 0);
atomic_set(&dev->gtt_count, 0);
atomic_set(&dev->gtt_memory, 0);
return 0;
}
/**
* Allocate a GEM object of the specified size with shmfs backing store
*/
struct drm_gem_object *
drm_gem_object_alloc(struct drm_device *dev, size_t size)
{
struct drm_gem_object *obj;
BUG_ON((size & (PAGE_SIZE - 1)) != 0);
obj = kcalloc(1, sizeof(*obj), GFP_KERNEL);
obj->dev = dev;
obj->filp = shmem_file_setup("drm mm object", size, 0);
if (IS_ERR(obj->filp)) {
kfree(obj);
return NULL;
}
kref_init(&obj->refcount);
kref_init(&obj->handlecount);
obj->size = size;
if (dev->driver->gem_init_object != NULL &&
dev->driver->gem_init_object(obj) != 0) {
fput(obj->filp);
kfree(obj);
return NULL;
}
atomic_inc(&dev->object_count);
atomic_add(obj->size, &dev->object_memory);
return obj;
}
EXPORT_SYMBOL(drm_gem_object_alloc);
/**
* Removes the mapping from handle to filp for this object.
*/
static int
drm_gem_handle_delete(struct drm_file *filp, int handle)
{
struct drm_device *dev;
struct drm_gem_object *obj;
/* This is gross. The idr system doesn't let us try a delete and
* return an error code. It just spews if you fail at deleting.
* So, we have to grab a lock around finding the object and then
* doing the delete on it and dropping the refcount, or the user
* could race us to double-decrement the refcount and cause a
* use-after-free later. Given the frequency of our handle lookups,
* we may want to use ida for number allocation and a hash table
* for the pointers, anyway.
*/
spin_lock(&filp->table_lock);
/* Check if we currently have a reference on the object */
obj = idr_find(&filp->object_idr, handle);
if (obj == NULL) {
spin_unlock(&filp->table_lock);
return -EINVAL;
}
dev = obj->dev;
/* Release reference and decrement refcount. */
idr_remove(&filp->object_idr, handle);
spin_unlock(&filp->table_lock);
mutex_lock(&dev->struct_mutex);
drm_gem_object_handle_unreference(obj);
mutex_unlock(&dev->struct_mutex);
return 0;
}
/**
* Create a handle for this object. This adds a handle reference
* to the object, which includes a regular reference count. Callers
* will likely want to dereference the object afterwards.
*/
int
drm_gem_handle_create(struct drm_file *file_priv,
struct drm_gem_object *obj,
int *handlep)
{
int ret;
/*
* Get the user-visible handle using idr.
*/
again:
/* ensure there is space available to allocate a handle */
if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
return -ENOMEM;
/* do the allocation under our spinlock */
spin_lock(&file_priv->table_lock);
ret = idr_get_new_above(&file_priv->object_idr, obj, 1, handlep);
spin_unlock(&file_priv->table_lock);
if (ret == -EAGAIN)
goto again;
if (ret != 0)
return ret;
drm_gem_object_handle_reference(obj);
return 0;
}
EXPORT_SYMBOL(drm_gem_handle_create);
/** Returns a reference to the object named by the handle. */
struct drm_gem_object *
drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
int handle)
{
struct drm_gem_object *obj;
spin_lock(&filp->table_lock);
/* Check if we currently have a reference on the object */
obj = idr_find(&filp->object_idr, handle);
if (obj == NULL) {
spin_unlock(&filp->table_lock);
return NULL;
}
drm_gem_object_reference(obj);
spin_unlock(&filp->table_lock);
return obj;
}
EXPORT_SYMBOL(drm_gem_object_lookup);
/**
* Releases the handle to an mm object.
*/
int
drm_gem_close_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_gem_close *args = data;
int ret;
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
ret = drm_gem_handle_delete(file_priv, args->handle);
return ret;
}
/**
* Create a global name for an object, returning the name.
*
* Note that the name does not hold a reference; when the object
* is freed, the name goes away.
*/
int
drm_gem_flink_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_gem_flink *args = data;
struct drm_gem_object *obj;
int ret;
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
return -EBADF;
again:
if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0)
return -ENOMEM;
spin_lock(&dev->object_name_lock);
if (obj->name) {
args->name = obj->name;
spin_unlock(&dev->object_name_lock);
return 0;
}
ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
&obj->name);
spin_unlock(&dev->object_name_lock);
if (ret == -EAGAIN)
goto again;
if (ret != 0) {
mutex_lock(&dev->struct_mutex);
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
return ret;
}
/*
* Leave the reference from the lookup around as the
* name table now holds one
*/
args->name = (uint64_t) obj->name;
return 0;
}
/**
* Open an object using the global name, returning a handle and the size.
*
* This handle (of course) holds a reference to the object, so the object
* will not go away until the handle is deleted.
*/
int
drm_gem_open_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_gem_open *args = data;
struct drm_gem_object *obj;
int ret;
int handle;
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
spin_lock(&dev->object_name_lock);
obj = idr_find(&dev->object_name_idr, (int) args->name);
if (obj)
drm_gem_object_reference(obj);
spin_unlock(&dev->object_name_lock);
if (!obj)
return -ENOENT;
ret = drm_gem_handle_create(file_priv, obj, &handle);
mutex_lock(&dev->struct_mutex);
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
if (ret)
return ret;
args->handle = handle;
args->size = obj->size;
return 0;
}
/**
* Called at device open time, sets up the structure for handling refcounting
* of mm objects.
*/
void
drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
{
idr_init(&file_private->object_idr);
spin_lock_init(&file_private->table_lock);
}
/**
* Called at device close to release the file's
* handle references on objects.
*/
static int
drm_gem_object_release_handle(int id, void *ptr, void *data)
{
struct drm_gem_object *obj = ptr;
drm_gem_object_handle_unreference(obj);
return 0;
}
/**
* Called at close time when the filp is going away.
*
* Releases any remaining references on objects by this filp.
*/
void
drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
{
mutex_lock(&dev->struct_mutex);
idr_for_each(&file_private->object_idr,
&drm_gem_object_release_handle, NULL);
idr_destroy(&file_private->object_idr);
mutex_unlock(&dev->struct_mutex);
}
/**
* Called after the last reference to the object has been lost.
*
* Frees the object
*/
void
drm_gem_object_free(struct kref *kref)
{
struct drm_gem_object *obj = (struct drm_gem_object *) kref;
struct drm_device *dev = obj->dev;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
if (dev->driver->gem_free_object != NULL)
dev->driver->gem_free_object(obj);
fput(obj->filp);
atomic_dec(&dev->object_count);
atomic_sub(obj->size, &dev->object_memory);
kfree(obj);
}
EXPORT_SYMBOL(drm_gem_object_free);
/**
* Called after the last handle to the object has been closed
*
* Removes any name for the object. Note that this must be
* called before drm_gem_object_free or we'll be touching
* freed memory
*/
void
drm_gem_object_handle_free(struct kref *kref)
{
struct drm_gem_object *obj = container_of(kref,
struct drm_gem_object,
handlecount);
struct drm_device *dev = obj->dev;
/* Remove any name for this object */
spin_lock(&dev->object_name_lock);
if (obj->name) {
idr_remove(&dev->object_name_idr, obj->name);
spin_unlock(&dev->object_name_lock);
/*
* The object name held a reference to this object, drop
* that now.
*/
drm_gem_object_unreference(obj);
} else
spin_unlock(&dev->object_name_lock);
}
EXPORT_SYMBOL(drm_gem_object_handle_free);
......@@ -63,7 +63,7 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
return -EINVAL;
p->irq = dev->irq;
p->irq = dev->pdev->irq;
DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
p->irq);
......@@ -71,25 +71,137 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
return 0;
}
static void vblank_disable_fn(unsigned long arg)
{
struct drm_device *dev = (struct drm_device *)arg;
unsigned long irqflags;
int i;
if (!dev->vblank_disable_allowed)
return;
for (i = 0; i < dev->num_crtcs; i++) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
dev->vblank_enabled[i]) {
DRM_DEBUG("disabling vblank on crtc %d\n", i);
dev->last_vblank[i] =
dev->driver->get_vblank_counter(dev, i);
dev->driver->disable_vblank(dev, i);
dev->vblank_enabled[i] = 0;
}
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
}
static void drm_vblank_cleanup(struct drm_device *dev)
{
/* Bail if the driver didn't call drm_vblank_init() */
if (dev->num_crtcs == 0)
return;
del_timer(&dev->vblank_disable_timer);
vblank_disable_fn((unsigned long)dev);
drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs,
DRM_MEM_DRIVER);
drm_free(dev->vbl_sigs, sizeof(*dev->vbl_sigs) * dev->num_crtcs,
DRM_MEM_DRIVER);
drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) *
dev->num_crtcs, DRM_MEM_DRIVER);
drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) *
dev->num_crtcs, DRM_MEM_DRIVER);
drm_free(dev->vblank_enabled, sizeof(*dev->vblank_enabled) *
dev->num_crtcs, DRM_MEM_DRIVER);
drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs,
DRM_MEM_DRIVER);
drm_free(dev->vblank_inmodeset, sizeof(*dev->vblank_inmodeset) *
dev->num_crtcs, DRM_MEM_DRIVER);
dev->num_crtcs = 0;
}
int drm_vblank_init(struct drm_device *dev, int num_crtcs)
{
int i, ret = -ENOMEM;
setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
(unsigned long)dev);
spin_lock_init(&dev->vbl_lock);
atomic_set(&dev->vbl_signal_pending, 0);
dev->num_crtcs = num_crtcs;
dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs,
DRM_MEM_DRIVER);
if (!dev->vbl_queue)
goto err;
dev->vbl_sigs = drm_alloc(sizeof(struct list_head) * num_crtcs,
DRM_MEM_DRIVER);
if (!dev->vbl_sigs)
goto err;
dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs,
DRM_MEM_DRIVER);
if (!dev->_vblank_count)
goto err;
dev->vblank_refcount = drm_alloc(sizeof(atomic_t) * num_crtcs,
DRM_MEM_DRIVER);
if (!dev->vblank_refcount)
goto err;
dev->vblank_enabled = drm_calloc(num_crtcs, sizeof(int),
DRM_MEM_DRIVER);
if (!dev->vblank_enabled)
goto err;
dev->last_vblank = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
if (!dev->last_vblank)
goto err;
dev->vblank_inmodeset = drm_calloc(num_crtcs, sizeof(int),
DRM_MEM_DRIVER);
if (!dev->vblank_inmodeset)
goto err;
/* Zero per-crtc vblank stuff */
for (i = 0; i < num_crtcs; i++) {
init_waitqueue_head(&dev->vbl_queue[i]);
INIT_LIST_HEAD(&dev->vbl_sigs[i]);
atomic_set(&dev->_vblank_count[i], 0);
atomic_set(&dev->vblank_refcount[i], 0);
}
dev->vblank_disable_allowed = 0;
return 0;
err:
drm_vblank_cleanup(dev);
return ret;
}
EXPORT_SYMBOL(drm_vblank_init);
/**
* Install IRQ handler.
*
* \param dev DRM device.
* \param irq IRQ number.
*
* Initializes the IRQ related data, and setups drm_device::vbl_queue. Installs the handler, calling the driver
* Initializes the IRQ related data. Installs the handler, calling the driver
* \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions
* before and after the installation.
*/
static int drm_irq_install(struct drm_device * dev)
int drm_irq_install(struct drm_device *dev)
{
int ret;
int ret = 0;
unsigned long sh_flags = 0;
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return -EINVAL;
if (dev->irq == 0)
if (dev->pdev->irq == 0)
return -EINVAL;
mutex_lock(&dev->struct_mutex);
......@@ -107,18 +219,7 @@ static int drm_irq_install(struct drm_device * dev)
dev->irq_enabled = 1;
mutex_unlock(&dev->struct_mutex);
DRM_DEBUG("irq=%d\n", dev->irq);
if (drm_core_check_feature(dev, DRIVER_IRQ_VBL)) {
init_waitqueue_head(&dev->vbl_queue);
spin_lock_init(&dev->vbl_lock);
INIT_LIST_HEAD(&dev->vbl_sigs);
INIT_LIST_HEAD(&dev->vbl_sigs2);
dev->vbl_pending = 0;
}
DRM_DEBUG("irq=%d\n", dev->pdev->irq);
/* Before installing handler */
dev->driver->irq_preinstall(dev);
......@@ -127,8 +228,9 @@ static int drm_irq_install(struct drm_device * dev)
if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
sh_flags = IRQF_SHARED;
ret = request_irq(dev->irq, dev->driver->irq_handler,
ret = request_irq(drm_dev_to_irq(dev), dev->driver->irq_handler,
sh_flags, dev->devname, dev);
if (ret < 0) {
mutex_lock(&dev->struct_mutex);
dev->irq_enabled = 0;
......@@ -137,10 +239,16 @@ static int drm_irq_install(struct drm_device * dev)
}
/* After installing handler */
dev->driver->irq_postinstall(dev);
ret = dev->driver->irq_postinstall(dev);
if (ret < 0) {
mutex_lock(&dev->struct_mutex);
dev->irq_enabled = 0;
mutex_unlock(&dev->struct_mutex);
}
return 0;
return ret;
}
EXPORT_SYMBOL(drm_irq_install);
/**
* Uninstall the IRQ handler.
......@@ -164,17 +272,18 @@ int drm_irq_uninstall(struct drm_device * dev)
if (!irq_enabled)
return -EINVAL;
DRM_DEBUG("irq=%d\n", dev->irq);
DRM_DEBUG("irq=%d\n", dev->pdev->irq);
dev->driver->irq_uninstall(dev);
free_irq(dev->irq, dev);
free_irq(dev->pdev->irq, dev);
drm_vblank_cleanup(dev);
dev->locked_tasklet_func = NULL;
return 0;
}
EXPORT_SYMBOL(drm_irq_uninstall);
/**
......@@ -201,7 +310,7 @@ int drm_control(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return 0;
if (dev->if_version < DRM_IF_VERSION(1, 2) &&
ctl->irq != dev->irq)
ctl->irq != dev->pdev->irq)
return -EINVAL;
return drm_irq_install(dev);
case DRM_UNINST_HANDLER:
......@@ -213,6 +322,174 @@ int drm_control(struct drm_device *dev, void *data,
}
}
/**
* drm_vblank_count - retrieve "cooked" vblank counter value
* @dev: DRM device
* @crtc: which counter to retrieve
*
* Fetches the "cooked" vblank count value that represents the number of
* vblank events since the system was booted, including lost events due to
* modesetting activity.
*/
u32 drm_vblank_count(struct drm_device *dev, int crtc)
{
return atomic_read(&dev->_vblank_count[crtc]);
}
EXPORT_SYMBOL(drm_vblank_count);
/**
* drm_update_vblank_count - update the master vblank counter
* @dev: DRM device
* @crtc: counter to update
*
* Call back into the driver to update the appropriate vblank counter
* (specified by @crtc). Deal with wraparound, if it occurred, and
* update the last read value so we can deal with wraparound on the next
* call if necessary.
*
* Only necessary when going from off->on, to account for frames we
* didn't get an interrupt for.
*
* Note: caller must hold dev->vbl_lock since this reads & writes
* device vblank fields.
*/
static void drm_update_vblank_count(struct drm_device *dev, int crtc)
{
u32 cur_vblank, diff;
/*
* Interrupts were disabled prior to this call, so deal with counter
* wrap if needed.
* NOTE! It's possible we lost a full dev->max_vblank_count events
* here if the register is small or we had vblank interrupts off for
* a long time.
*/
cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
diff = cur_vblank - dev->last_vblank[crtc];
if (cur_vblank < dev->last_vblank[crtc]) {
diff += dev->max_vblank_count;
DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
crtc, dev->last_vblank[crtc], cur_vblank, diff);
}
DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
crtc, diff);
atomic_add(diff, &dev->_vblank_count[crtc]);
}
/**
* drm_vblank_get - get a reference count on vblank events
* @dev: DRM device
* @crtc: which CRTC to own
*
* Acquire a reference count on vblank events to avoid having them disabled
* while in use.
*
* RETURNS
* Zero on success, nonzero on failure.
*/
int drm_vblank_get(struct drm_device *dev, int crtc)
{
unsigned long irqflags;
int ret = 0;
spin_lock_irqsave(&dev->vbl_lock, irqflags);
/* Going from 0->1 means we have to enable interrupts again */
if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 &&
!dev->vblank_enabled[crtc]) {
ret = dev->driver->enable_vblank(dev, crtc);
DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
if (ret)
atomic_dec(&dev->vblank_refcount[crtc]);
else {
dev->vblank_enabled[crtc] = 1;
drm_update_vblank_count(dev, crtc);
}
}
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
return ret;
}
EXPORT_SYMBOL(drm_vblank_get);
/**
* drm_vblank_put - give up ownership of vblank events
* @dev: DRM device
* @crtc: which counter to give up
*
* Release ownership of a given vblank counter, turning off interrupts
* if possible.
*/
void drm_vblank_put(struct drm_device *dev, int crtc)
{
/* Last user schedules interrupt disable */
if (atomic_dec_and_test(&dev->vblank_refcount[crtc]))
mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ);
}
EXPORT_SYMBOL(drm_vblank_put);
/**
* drm_modeset_ctl - handle vblank event counter changes across mode switch
* @DRM_IOCTL_ARGS: standard ioctl arguments
*
* Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET
* ioctls around modesetting so that any lost vblank events are accounted for.
*
* Generally the counter will reset across mode sets. If interrupts are
* enabled around this call, we don't have to do anything since the counter
* will have already been incremented.
*/
int drm_modeset_ctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_modeset_ctl *modeset = data;
unsigned long irqflags;
int crtc, ret = 0;
/* If drm_vblank_init() hasn't been called yet, just no-op */
if (!dev->num_crtcs)
goto out;
crtc = modeset->crtc;
if (crtc >= dev->num_crtcs) {
ret = -EINVAL;
goto out;
}
/*
* To avoid all the problems that might happen if interrupts
* were enabled/disabled around or between these calls, we just
* have the kernel take a reference on the CRTC (just once though
* to avoid corrupting the count if multiple, mismatch calls occur),
* so that interrupts remain enabled in the interim.
*/
switch (modeset->cmd) {
case _DRM_PRE_MODESET:
if (!dev->vblank_inmodeset[crtc]) {
dev->vblank_inmodeset[crtc] = 1;
drm_vblank_get(dev, crtc);
}
break;
case _DRM_POST_MODESET:
if (dev->vblank_inmodeset[crtc]) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
dev->vblank_disable_allowed = 1;
dev->vblank_inmodeset[crtc] = 0;
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
drm_vblank_put(dev, crtc);
}
break;
default:
ret = -EINVAL;
break;
}
out:
return ret;
}
/**
* Wait for VBLANK.
*
......@@ -232,14 +509,14 @@ int drm_control(struct drm_device *dev, void *data,
*
* If a signal is not requested, then calls vblank_wait().
*/
int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv)
int drm_wait_vblank(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
union drm_wait_vblank *vblwait = data;
struct timeval now;
int ret = 0;
unsigned int flags, seq;
unsigned int flags, seq, crtc;
if ((!dev->irq) || (!dev->irq_enabled))
if ((!dev->pdev->irq) || (!dev->irq_enabled))
return -EINVAL;
if (vblwait->request.type &
......@@ -251,13 +528,17 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
}
flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
if (!drm_core_check_feature(dev, (flags & _DRM_VBLANK_SECONDARY) ?
DRIVER_IRQ_VBL2 : DRIVER_IRQ_VBL))
if (crtc >= dev->num_crtcs)
return -EINVAL;
seq = atomic_read((flags & _DRM_VBLANK_SECONDARY) ? &dev->vbl_received2
: &dev->vbl_received);
ret = drm_vblank_get(dev, crtc);
if (ret) {
DRM_ERROR("failed to acquire vblank counter, %d\n", ret);
return ret;
}
seq = drm_vblank_count(dev, crtc);
switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
case _DRM_VBLANK_RELATIVE:
......@@ -266,7 +547,8 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
case _DRM_VBLANK_ABSOLUTE:
break;
default:
return -EINVAL;
ret = -EINVAL;
goto done;
}
if ((flags & _DRM_VBLANK_NEXTONMISS) &&
......@@ -276,8 +558,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
if (flags & _DRM_VBLANK_SIGNAL) {
unsigned long irqflags;
struct list_head *vbl_sigs = (flags & _DRM_VBLANK_SECONDARY)
? &dev->vbl_sigs2 : &dev->vbl_sigs;
struct list_head *vbl_sigs = &dev->vbl_sigs[crtc];
struct drm_vbl_sig *vbl_sig;
spin_lock_irqsave(&dev->vbl_lock, irqflags);
......@@ -298,22 +579,29 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
}
}
if (dev->vbl_pending >= 100) {
if (atomic_read(&dev->vbl_signal_pending) >= 100) {
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
return -EBUSY;
ret = -EBUSY;
goto done;
}
dev->vbl_pending++;
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
if (!
(vbl_sig =
drm_alloc(sizeof(struct drm_vbl_sig), DRM_MEM_DRIVER))) {
return -ENOMEM;
vbl_sig = drm_calloc(1, sizeof(struct drm_vbl_sig),
DRM_MEM_DRIVER);
if (!vbl_sig) {
ret = -ENOMEM;
goto done;
}
ret = drm_vblank_get(dev, crtc);
if (ret) {
drm_free(vbl_sig, sizeof(struct drm_vbl_sig),
DRM_MEM_DRIVER);
return ret;
}
memset((void *)vbl_sig, 0, sizeof(*vbl_sig));
atomic_inc(&dev->vbl_signal_pending);
vbl_sig->sequence = vblwait->request.sequence;
vbl_sig->info.si_signo = vblwait->request.signal;
......@@ -327,20 +615,29 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
vblwait->reply.sequence = seq;
} else {
if (flags & _DRM_VBLANK_SECONDARY) {
if (dev->driver->vblank_wait2)
ret = dev->driver->vblank_wait2(dev, &vblwait->request.sequence);
} else if (dev->driver->vblank_wait)
ret =
dev->driver->vblank_wait(dev,
&vblwait->request.sequence);
do_gettimeofday(&now);
vblwait->reply.tval_sec = now.tv_sec;
vblwait->reply.tval_usec = now.tv_usec;
DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
vblwait->request.sequence, crtc);
DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
((drm_vblank_count(dev, crtc)
- vblwait->request.sequence) <= (1 << 23)));
if (ret != -EINTR) {
struct timeval now;
do_gettimeofday(&now);
vblwait->reply.tval_sec = now.tv_sec;
vblwait->reply.tval_usec = now.tv_usec;
vblwait->reply.sequence = drm_vblank_count(dev, crtc);
DRM_DEBUG("returning %d to client\n",
vblwait->reply.sequence);
} else {
DRM_DEBUG("vblank wait interrupted by signal\n");
}
}
done:
done:
drm_vblank_put(dev, crtc);
return ret;
}
......@@ -348,44 +645,57 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
* Send the VBLANK signals.
*
* \param dev DRM device.
* \param crtc CRTC where the vblank event occurred
*
* Sends a signal for each task in drm_device::vbl_sigs and empties the list.
*
* If a signal is not requested, then calls vblank_wait().
*/
void drm_vbl_send_signals(struct drm_device * dev)
static void drm_vbl_send_signals(struct drm_device *dev, int crtc)
{
struct drm_vbl_sig *vbl_sig, *tmp;
struct list_head *vbl_sigs;
unsigned int vbl_seq;
unsigned long flags;
int i;
spin_lock_irqsave(&dev->vbl_lock, flags);
for (i = 0; i < 2; i++) {
struct drm_vbl_sig *vbl_sig, *tmp;
struct list_head *vbl_sigs = i ? &dev->vbl_sigs2 : &dev->vbl_sigs;
unsigned int vbl_seq = atomic_read(i ? &dev->vbl_received2 :
&dev->vbl_received);
vbl_sigs = &dev->vbl_sigs[crtc];
vbl_seq = drm_vblank_count(dev, crtc);
list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
vbl_sig->info.si_code = vbl_seq;
send_sig_info(vbl_sig->info.si_signo,
&vbl_sig->info, vbl_sig->task);
list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
vbl_sig->info.si_code = vbl_seq;
send_sig_info(vbl_sig->info.si_signo,
&vbl_sig->info, vbl_sig->task);
list_del(&vbl_sig->head);
drm_free(vbl_sig, sizeof(*vbl_sig),
DRM_MEM_DRIVER);
list_del(&vbl_sig->head);
dev->vbl_pending--;
}
}
drm_free(vbl_sig, sizeof(*vbl_sig),
DRM_MEM_DRIVER);
atomic_dec(&dev->vbl_signal_pending);
drm_vblank_put(dev, crtc);
}
}
spin_unlock_irqrestore(&dev->vbl_lock, flags);
}
EXPORT_SYMBOL(drm_vbl_send_signals);
/**
* drm_handle_vblank - handle a vblank event
* @dev: DRM device
* @crtc: where this event occurred
*
* Drivers should call this routine in their vblank interrupt handlers to
* update the vblank counter and send any signals that may be pending.
*/
void drm_handle_vblank(struct drm_device *dev, int crtc)
{
atomic_inc(&dev->_vblank_count[crtc]);
DRM_WAKEUP(&dev->vbl_queue[crtc]);
drm_vbl_send_signals(dev, crtc);
}
EXPORT_SYMBOL(drm_handle_vblank);
/**
* Tasklet wrapper function.
......
......@@ -133,6 +133,7 @@ int drm_free_agp(DRM_AGP_MEM * handle, int pages)
{
return drm_agp_free_memory(handle) ? 0 : -EINVAL;
}
EXPORT_SYMBOL(drm_free_agp);
/** Wrapper around agp_bind_memory() */
int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
......@@ -145,6 +146,7 @@ int drm_unbind_agp(DRM_AGP_MEM * handle)
{
return drm_agp_unbind_memory(handle);
}
EXPORT_SYMBOL(drm_unbind_agp);
#else /* __OS_HAS_AGP */
static inline void *agp_remap(unsigned long offset, unsigned long size,
......
......@@ -169,6 +169,7 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
return child;
}
EXPORT_SYMBOL(drm_mm_get_block);
/*
* Put a block. Merge with the previous and / or next block if they are free.
......@@ -217,6 +218,7 @@ void drm_mm_put_block(struct drm_mm_node * cur)
drm_free(cur, sizeof(*cur), DRM_MEM_MM);
}
}
EXPORT_SYMBOL(drm_mm_put_block);
struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
unsigned long size,
......@@ -265,6 +267,7 @@ int drm_mm_clean(struct drm_mm * mm)
return (head->next->next == head);
}
EXPORT_SYMBOL(drm_mm_search_free);
int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
{
......@@ -273,7 +276,7 @@ int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
return drm_mm_create_tail_node(mm, start, size);
}
EXPORT_SYMBOL(drm_mm_init);
void drm_mm_takedown(struct drm_mm * mm)
{
......
......@@ -49,6 +49,10 @@ static int drm_queues_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
static int drm_bufs_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
static int drm_gem_name_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
static int drm_gem_object_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
#if DRM_DEBUG_CODE
static int drm_vma_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
......@@ -60,13 +64,16 @@ static int drm_vma_info(char *buf, char **start, off_t offset,
static struct drm_proc_list {
const char *name; /**< file name */
int (*f) (char *, char **, off_t, int, int *, void *); /**< proc callback*/
u32 driver_features; /**< Required driver features for this entry */
} drm_proc_list[] = {
{"name", drm_name_info},
{"mem", drm_mem_info},
{"vm", drm_vm_info},
{"clients", drm_clients_info},
{"queues", drm_queues_info},
{"bufs", drm_bufs_info},
{"name", drm_name_info, 0},
{"mem", drm_mem_info, 0},
{"vm", drm_vm_info, 0},
{"clients", drm_clients_info, 0},
{"queues", drm_queues_info, 0},
{"bufs", drm_bufs_info, 0},
{"gem_names", drm_gem_name_info, DRIVER_GEM},
{"gem_objects", drm_gem_object_info, DRIVER_GEM},
#if DRM_DEBUG_CODE
{"vma", drm_vma_info},
#endif
......@@ -90,8 +97,9 @@ static struct drm_proc_list {
int drm_proc_init(struct drm_minor *minor, int minor_id,
struct proc_dir_entry *root)
{
struct drm_device *dev = minor->dev;
struct proc_dir_entry *ent;
int i, j;
int i, j, ret;
char name[64];
sprintf(name, "%d", minor_id);
......@@ -102,23 +110,42 @@ int drm_proc_init(struct drm_minor *minor, int minor_id,
}
for (i = 0; i < DRM_PROC_ENTRIES; i++) {
u32 features = drm_proc_list[i].driver_features;
if (features != 0 &&
(dev->driver->driver_features & features) != features)
continue;
ent = create_proc_entry(drm_proc_list[i].name,
S_IFREG | S_IRUGO, minor->dev_root);
if (!ent) {
DRM_ERROR("Cannot create /proc/dri/%s/%s\n",
name, drm_proc_list[i].name);
for (j = 0; j < i; j++)
remove_proc_entry(drm_proc_list[i].name,
minor->dev_root);
remove_proc_entry(name, root);
minor->dev_root = NULL;
return -1;
ret = -1;
goto fail;
}
ent->read_proc = drm_proc_list[i].f;
ent->data = minor;
}
if (dev->driver->proc_init) {
ret = dev->driver->proc_init(minor);
if (ret) {
DRM_ERROR("DRM: Driver failed to initialize "
"/proc/dri.\n");
goto fail;
}
}
return 0;
fail:
for (j = 0; j < i; j++)
remove_proc_entry(drm_proc_list[i].name,
minor->dev_root);
remove_proc_entry(name, root);
minor->dev_root = NULL;
return ret;
}
/**
......@@ -133,12 +160,16 @@ int drm_proc_init(struct drm_minor *minor, int minor_id,
*/
int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
{
struct drm_device *dev = minor->dev;
int i;
char name[64];
if (!root || !minor->dev_root)
return 0;
if (dev->driver->proc_cleanup)
dev->driver->proc_cleanup(minor);
for (i = 0; i < DRM_PROC_ENTRIES; i++)
remove_proc_entry(drm_proc_list[i].name, minor->dev_root);
sprintf(name, "%d", minor->index);
......@@ -480,6 +511,84 @@ static int drm_clients_info(char *buf, char **start, off_t offset,
return ret;
}
struct drm_gem_name_info_data {
int len;
char *buf;
int eof;
};
static int drm_gem_one_name_info(int id, void *ptr, void *data)
{
struct drm_gem_object *obj = ptr;
struct drm_gem_name_info_data *nid = data;
DRM_INFO("name %d size %d\n", obj->name, obj->size);
if (nid->eof)
return 0;
nid->len += sprintf(&nid->buf[nid->len],
"%6d%9d%8d%9d\n",
obj->name, obj->size,
atomic_read(&obj->handlecount.refcount),
atomic_read(&obj->refcount.refcount));
if (nid->len > DRM_PROC_LIMIT) {
nid->eof = 1;
return 0;
}
return 0;
}
static int drm_gem_name_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
struct drm_gem_name_info_data nid;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
nid.len = sprintf(buf, " name size handles refcount\n");
nid.buf = buf;
nid.eof = 0;
idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, &nid);
*start = &buf[offset];
*eof = 0;
if (nid.len > request + offset)
return request;
*eof = 1;
return nid.len - offset;
}
static int drm_gem_object_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
int len = 0;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
*start = &buf[offset];
*eof = 0;
DRM_PROC_PRINT("%d objects\n", atomic_read(&dev->object_count));
DRM_PROC_PRINT("%d object bytes\n", atomic_read(&dev->object_memory));
DRM_PROC_PRINT("%d pinned\n", atomic_read(&dev->pin_count));
DRM_PROC_PRINT("%d pin bytes\n", atomic_read(&dev->pin_memory));
DRM_PROC_PRINT("%d gtt bytes\n", atomic_read(&dev->gtt_memory));
DRM_PROC_PRINT("%d gtt total\n", dev->gtt_total);
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
#if DRM_DEBUG_CODE
static int drm__vma_info(char *buf, char **start, off_t offset, int request,
......
......@@ -107,7 +107,6 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
#ifdef __alpha__
dev->hose = pdev->sysdata;
#endif
dev->irq = pdev->irq;
if (drm_ht_create(&dev->map_hash, 12)) {
return -ENOMEM;
......@@ -152,6 +151,15 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
goto error_out_unreg;
}
if (driver->driver_features & DRIVER_GEM) {
retcode = drm_gem_init(dev);
if (retcode) {
DRM_ERROR("Cannot initialize graphics execution "
"manager (GEM)\n");
goto error_out_unreg;
}
}
return 0;
error_out_unreg:
......@@ -317,6 +325,7 @@ int drm_put_dev(struct drm_device * dev)
int drm_put_minor(struct drm_minor **minor_p)
{
struct drm_minor *minor = *minor_p;
DRM_DEBUG("release secondary minor %d\n", minor->index);
if (minor->type == DRM_MINOR_LEGACY)
......
......@@ -184,7 +184,7 @@ int drm_sysfs_device_add(struct drm_minor *minor)
err_out_files:
if (i > 0)
for (j = 0; j < i; j++)
device_remove_file(&minor->kdev, &device_attrs[i]);
device_remove_file(&minor->kdev, &device_attrs[j]);
device_unregister(&minor->kdev);
err_out:
......
......@@ -3,7 +3,12 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
ccflags-y := -Iinclude/drm
i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o
i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_opregion.o \
i915_suspend.o \
i915_gem.o \
i915_gem_debug.o \
i915_gem_proc.o \
i915_gem_tiling.o
i915-$(CONFIG_COMPAT) += i915_ioc32.o
......
......@@ -40,40 +40,96 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
{
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
u32 last_head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
u32 last_acthd = I915_READ(acthd_reg);
u32 acthd;
u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
int i;
for (i = 0; i < 10000; i++) {
ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
for (i = 0; i < 100000; i++) {
ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
acthd = I915_READ(acthd_reg);
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
ring->space += ring->Size;
if (ring->space >= n)
return 0;
dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
if (dev_priv->sarea_priv)
dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
if (ring->head != last_head)
i = 0;
if (acthd != last_acthd)
i = 0;
last_head = ring->head;
last_acthd = acthd;
msleep_interruptible(10);
}
return -EBUSY;
}
/**
* Sets up the hardware status page for devices that need a physical address
* in the register.
*/
static int i915_init_phys_hws(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
/* Program Hardware Status Page */
dev_priv->status_page_dmah =
drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
if (!dev_priv->status_page_dmah) {
DRM_ERROR("Can not allocate hardware status page\n");
return -ENOMEM;
}
dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
DRM_DEBUG("Enabled hardware status page\n");
return 0;
}
/**
* Frees the hardware status page, whether it's a physical address or a virtual
* address set up by the X Server.
*/
static void i915_free_hws(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
if (dev_priv->status_page_dmah) {
drm_pci_free(dev, dev_priv->status_page_dmah);
dev_priv->status_page_dmah = NULL;
}
if (dev_priv->status_gfx_addr) {
dev_priv->status_gfx_addr = 0;
drm_core_ioremapfree(&dev_priv->hws_map, dev);
}
/* Need to rewrite hardware status page */
I915_WRITE(HWS_PGA, 0x1ffff000);
}
void i915_kernel_lost_context(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
ring->tail = I915_READ(LP_RING + RING_TAIL) & TAIL_ADDR;
ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
ring->space += ring->Size;
if (ring->head == ring->tail)
if (ring->head == ring->tail && dev_priv->sarea_priv)
dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
}
......@@ -84,28 +140,19 @@ static int i915_dma_cleanup(struct drm_device * dev)
* may not have been called from userspace and after dev_private
* is freed, it's too late.
*/
if (dev->irq)
if (dev->irq_enabled)
drm_irq_uninstall(dev);
if (dev_priv->ring.virtual_start) {
drm_core_ioremapfree(&dev_priv->ring.map, dev);
dev_priv->ring.virtual_start = 0;
dev_priv->ring.map.handle = 0;
dev_priv->ring.virtual_start = NULL;
dev_priv->ring.map.handle = NULL;
dev_priv->ring.map.size = 0;
}
if (dev_priv->status_page_dmah) {
drm_pci_free(dev, dev_priv->status_page_dmah);
dev_priv->status_page_dmah = NULL;
/* Need to rewrite hardware status page */
I915_WRITE(0x02080, 0x1ffff000);
}
if (dev_priv->status_gfx_addr) {
dev_priv->status_gfx_addr = 0;
drm_core_ioremapfree(&dev_priv->hws_map, dev);
I915_WRITE(0x2080, 0x1ffff000);
}
/* Clear the HWS virtual address at teardown */
if (I915_NEED_GFX_HWS(dev))
i915_free_hws(dev);
return 0;
}
......@@ -121,34 +168,34 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
return -EINVAL;
}
dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
if (!dev_priv->mmio_map) {
i915_dma_cleanup(dev);
DRM_ERROR("can not find mmio map!\n");
return -EINVAL;
}
dev_priv->sarea_priv = (drm_i915_sarea_t *)
((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
dev_priv->ring.Start = init->ring_start;
dev_priv->ring.End = init->ring_end;
dev_priv->ring.Size = init->ring_size;
dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
if (init->ring_size != 0) {
if (dev_priv->ring.ring_obj != NULL) {
i915_dma_cleanup(dev);
DRM_ERROR("Client tried to initialize ringbuffer in "
"GEM mode\n");
return -EINVAL;
}
dev_priv->ring.map.offset = init->ring_start;
dev_priv->ring.map.size = init->ring_size;
dev_priv->ring.map.type = 0;
dev_priv->ring.map.flags = 0;
dev_priv->ring.map.mtrr = 0;
dev_priv->ring.Size = init->ring_size;
dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
drm_core_ioremap(&dev_priv->ring.map, dev);
dev_priv->ring.map.offset = init->ring_start;
dev_priv->ring.map.size = init->ring_size;
dev_priv->ring.map.type = 0;
dev_priv->ring.map.flags = 0;
dev_priv->ring.map.mtrr = 0;
if (dev_priv->ring.map.handle == NULL) {
i915_dma_cleanup(dev);
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
return -ENOMEM;
drm_core_ioremap(&dev_priv->ring.map, dev);
if (dev_priv->ring.map.handle == NULL) {
i915_dma_cleanup(dev);
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
return -ENOMEM;
}
}
dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
......@@ -159,34 +206,10 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
dev_priv->current_page = 0;
dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
/* We are using separate values as placeholders for mechanisms for
* private backbuffer/depthbuffer usage.
*/
dev_priv->use_mi_batchbuffer_start = 0;
if (IS_I965G(dev)) /* 965 doesn't support older method */
dev_priv->use_mi_batchbuffer_start = 1;
/* Allow hardware batchbuffers unless told otherwise.
*/
dev_priv->allow_batchbuffer = 1;
/* Program Hardware Status Page */
if (!I915_NEED_GFX_HWS(dev)) {
dev_priv->status_page_dmah =
drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
if (!dev_priv->status_page_dmah) {
i915_dma_cleanup(dev);
DRM_ERROR("Can not allocate hardware status page\n");
return -ENOMEM;
}
dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
I915_WRITE(0x02080, dev_priv->dma_status_page);
}
DRM_DEBUG("Enabled hardware status page\n");
return 0;
}
......@@ -201,11 +224,6 @@ static int i915_dma_resume(struct drm_device * dev)
return -EINVAL;
}
if (!dev_priv->mmio_map) {
DRM_ERROR("can not find mmio map!\n");
return -EINVAL;
}
if (dev_priv->ring.map.handle == NULL) {
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
......@@ -220,9 +238,9 @@ static int i915_dma_resume(struct drm_device * dev)
DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
if (dev_priv->status_gfx_addr != 0)
I915_WRITE(0x02080, dev_priv->status_gfx_addr);
I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
else
I915_WRITE(0x02080, dev_priv->dma_status_page);
I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
DRM_DEBUG("Enabled hardware status page\n");
return 0;
......@@ -367,9 +385,10 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor
return 0;
}
static int i915_emit_box(struct drm_device * dev,
struct drm_clip_rect __user * boxes,
int i, int DR1, int DR4)
int
i915_emit_box(struct drm_device *dev,
struct drm_clip_rect __user *boxes,
int i, int DR1, int DR4)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_clip_rect box;
......@@ -415,14 +434,15 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
dev_priv->counter++;
if (dev_priv->counter > 0x7FFFFFFFUL)
dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
dev_priv->counter = 0;
if (dev_priv->sarea_priv)
dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
BEGIN_LP_RING(4);
OUT_RING(CMD_STORE_DWORD_IDX);
OUT_RING(20);
OUT_RING(MI_STORE_DWORD_INDEX);
OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
OUT_RING(dev_priv->counter);
OUT_RING(0);
ADVANCE_LP_RING();
......@@ -486,7 +506,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
return ret;
}
if (dev_priv->use_mi_batchbuffer_start) {
if (!IS_I830(dev) && !IS_845G(dev)) {
BEGIN_LP_RING(2);
if (IS_I965G(dev)) {
OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
......@@ -516,6 +536,9 @@ static int i915_dispatch_flip(struct drm_device * dev)
drm_i915_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
if (!dev_priv->sarea_priv)
return -EINVAL;
DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
__func__,
dev_priv->current_page,
......@@ -524,7 +547,7 @@ static int i915_dispatch_flip(struct drm_device * dev)
i915_kernel_lost_context(dev);
BEGIN_LP_RING(2);
OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
OUT_RING(MI_FLUSH | MI_READ_FLUSH);
OUT_RING(0);
ADVANCE_LP_RING();
......@@ -549,8 +572,8 @@ static int i915_dispatch_flip(struct drm_device * dev)
dev_priv->sarea_priv->last_enqueue = dev_priv->counter++;
BEGIN_LP_RING(4);
OUT_RING(CMD_STORE_DWORD_IDX);
OUT_RING(20);
OUT_RING(MI_STORE_DWORD_INDEX);
OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
OUT_RING(dev_priv->counter);
OUT_RING(0);
ADVANCE_LP_RING();
......@@ -570,9 +593,15 @@ static int i915_quiescent(struct drm_device * dev)
static int i915_flush_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
LOCK_TEST_WITH_RETURN(dev, file_priv);
int ret;
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
mutex_lock(&dev->struct_mutex);
ret = i915_quiescent(dev);
mutex_unlock(&dev->struct_mutex);
return i915_quiescent(dev);
return ret;
}
static int i915_batchbuffer(struct drm_device *dev, void *data,
......@@ -593,16 +622,19 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
batch->start, batch->used, batch->num_cliprects);
LOCK_TEST_WITH_RETURN(dev, file_priv);
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
batch->num_cliprects *
sizeof(struct drm_clip_rect)))
return -EFAULT;
mutex_lock(&dev->struct_mutex);
ret = i915_dispatch_batchbuffer(dev, batch);
mutex_unlock(&dev->struct_mutex);
sarea_priv->last_dispatch = (int)hw_status[5];
if (sarea_priv)
sarea_priv->last_dispatch = (int)hw_status[5];
return ret;
}
......@@ -619,7 +651,7 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
LOCK_TEST_WITH_RETURN(dev, file_priv);
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
if (cmdbuf->num_cliprects &&
DRM_VERIFYAREA_READ(cmdbuf->cliprects,
......@@ -629,24 +661,33 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
return -EFAULT;
}
mutex_lock(&dev->struct_mutex);
ret = i915_dispatch_cmdbuffer(dev, cmdbuf);
mutex_unlock(&dev->struct_mutex);
if (ret) {
DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
return ret;
}
sarea_priv->last_dispatch = (int)hw_status[5];
if (sarea_priv)
sarea_priv->last_dispatch = (int)hw_status[5];
return 0;
}
static int i915_flip_bufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
int ret;
DRM_DEBUG("%s\n", __func__);
LOCK_TEST_WITH_RETURN(dev, file_priv);
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
mutex_lock(&dev->struct_mutex);
ret = i915_dispatch_flip(dev);
mutex_unlock(&dev->struct_mutex);
return i915_dispatch_flip(dev);
return ret;
}
static int i915_getparam(struct drm_device *dev, void *data,
......@@ -663,7 +704,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
switch (param->param) {
case I915_PARAM_IRQ_ACTIVE:
value = dev->irq ? 1 : 0;
value = dev->pdev->irq ? 1 : 0;
break;
case I915_PARAM_ALLOW_BATCHBUFFER:
value = dev_priv->allow_batchbuffer ? 1 : 0;
......@@ -671,6 +712,12 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_LAST_DISPATCH:
value = READ_BREADCRUMB(dev_priv);
break;
case I915_PARAM_CHIPSET_ID:
value = dev->pci_device;
break;
case I915_PARAM_HAS_GEM:
value = 1;
break;
default:
DRM_ERROR("Unknown parameter %d\n", param->param);
return -EINVAL;
......@@ -697,8 +744,6 @@ static int i915_setparam(struct drm_device *dev, void *data,
switch (param->param) {
case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
if (!IS_I965G(dev))
dev_priv->use_mi_batchbuffer_start = param->value;
break;
case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
dev_priv->tex_lru_log_granularity = param->value;
......@@ -749,8 +794,8 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
dev_priv->hw_status_page = dev_priv->hws_map.handle;
memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
I915_WRITE(0x02080, dev_priv->status_gfx_addr);
DRM_DEBUG("load hws 0x2080 with gfx mem 0x%x\n",
I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
DRM_DEBUG("load hws HWS_PGA with gfx mem 0x%x\n",
dev_priv->status_gfx_addr);
DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
return 0;
......@@ -776,14 +821,38 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
memset(dev_priv, 0, sizeof(drm_i915_private_t));
dev->dev_private = (void *)dev_priv;
dev_priv->dev = dev;
/* Add register map (needed for suspend/resume) */
base = drm_get_resource_start(dev, mmio_bar);
size = drm_get_resource_len(dev, mmio_bar);
ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
_DRM_KERNEL | _DRM_DRIVER,
&dev_priv->mmio_map);
dev_priv->regs = ioremap(base, size);
i915_gem_load(dev);
/* Init HWS */
if (!I915_NEED_GFX_HWS(dev)) {
ret = i915_init_phys_hws(dev);
if (ret != 0)
return ret;
}
/* On the 945G/GM, the chipset reports the MSI capability on the
* integrated graphics even though the support isn't actually there
* according to the published specs. It doesn't appear to function
* correctly in testing on 945G.
* This may be a side effect of MSI having been made available for PEG
* and the registers being closely associated.
*/
if (!IS_I945G(dev) && !IS_I945GM(dev))
if (pci_enable_msi(dev->pdev))
DRM_ERROR("failed to enable MSI\n");
intel_opregion_init(dev);
spin_lock_init(&dev_priv->user_irq_lock);
return ret;
}
......@@ -791,8 +860,15 @@ int i915_driver_unload(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->mmio_map)
drm_rmmap(dev, dev_priv->mmio_map);
if (dev->pdev->msi_enabled)
pci_disable_msi(dev->pdev);
i915_free_hws(dev);
if (dev_priv->regs != NULL)
iounmap(dev_priv->regs);
intel_opregion_free(dev);
drm_free(dev->dev_private, sizeof(drm_i915_private_t),
DRM_MEM_DRIVER);
......@@ -800,6 +876,25 @@ int i915_driver_unload(struct drm_device *dev)
return 0;
}
int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
{
struct drm_i915_file_private *i915_file_priv;
DRM_DEBUG("\n");
i915_file_priv = (struct drm_i915_file_private *)
drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES);
if (!i915_file_priv)
return -ENOMEM;
file_priv->driver_priv = i915_file_priv;
i915_file_priv->mm.last_gem_seqno = 0;
i915_file_priv->mm.last_gem_throttle_seqno = 0;
return 0;
}
void i915_driver_lastclose(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
......@@ -807,6 +902,8 @@ void i915_driver_lastclose(struct drm_device * dev)
if (!dev_priv)
return;
i915_gem_lastclose(dev);
if (dev_priv->agp_heap)
i915_mem_takedown(&(dev_priv->agp_heap));
......@@ -819,6 +916,13 @@ void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
i915_mem_release(dev, file_priv, dev_priv->agp_heap);
}
void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
{
struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
drm_free(i915_file_priv, sizeof(*i915_file_priv), DRM_MEM_FILES);
}
struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
......@@ -836,7 +940,23 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ),
DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH),
DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH),
DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
......
......@@ -38,211 +38,9 @@ static struct pci_device_id pciidlist[] = {
i915_PCI_IDS
};
enum pipe {
PIPE_A = 0,
PIPE_B,
};
static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (pipe == PIPE_A)
return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE);
else
return (I915_READ(DPLL_B) & DPLL_VCO_ENABLE);
}
static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
u32 *array;
int i;
if (!i915_pipe_enabled(dev, pipe))
return;
if (pipe == PIPE_A)
array = dev_priv->save_palette_a;
else
array = dev_priv->save_palette_b;
for(i = 0; i < 256; i++)
array[i] = I915_READ(reg + (i << 2));
}
static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
u32 *array;
int i;
if (!i915_pipe_enabled(dev, pipe))
return;
if (pipe == PIPE_A)
array = dev_priv->save_palette_a;
else
array = dev_priv->save_palette_b;
for(i = 0; i < 256; i++)
I915_WRITE(reg + (i << 2), array[i]);
}
static u8 i915_read_indexed(u16 index_port, u16 data_port, u8 reg)
{
outb(reg, index_port);
return inb(data_port);
}
static u8 i915_read_ar(u16 st01, u8 reg, u16 palette_enable)
{
inb(st01);
outb(palette_enable | reg, VGA_AR_INDEX);
return inb(VGA_AR_DATA_READ);
}
static void i915_write_ar(u8 st01, u8 reg, u8 val, u16 palette_enable)
{
inb(st01);
outb(palette_enable | reg, VGA_AR_INDEX);
outb(val, VGA_AR_DATA_WRITE);
}
static void i915_write_indexed(u16 index_port, u16 data_port, u8 reg, u8 val)
{
outb(reg, index_port);
outb(val, data_port);
}
static void i915_save_vga(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
u16 cr_index, cr_data, st01;
/* VGA color palette registers */
dev_priv->saveDACMASK = inb(VGA_DACMASK);
/* DACCRX automatically increments during read */
outb(0, VGA_DACRX);
/* Read 3 bytes of color data from each index */
for (i = 0; i < 256 * 3; i++)
dev_priv->saveDACDATA[i] = inb(VGA_DACDATA);
/* MSR bits */
dev_priv->saveMSR = inb(VGA_MSR_READ);
if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
cr_index = VGA_CR_INDEX_CGA;
cr_data = VGA_CR_DATA_CGA;
st01 = VGA_ST01_CGA;
} else {
cr_index = VGA_CR_INDEX_MDA;
cr_data = VGA_CR_DATA_MDA;
st01 = VGA_ST01_MDA;
}
/* CRT controller regs */
i915_write_indexed(cr_index, cr_data, 0x11,
i915_read_indexed(cr_index, cr_data, 0x11) &
(~0x80));
for (i = 0; i <= 0x24; i++)
dev_priv->saveCR[i] =
i915_read_indexed(cr_index, cr_data, i);
/* Make sure we don't turn off CR group 0 writes */
dev_priv->saveCR[0x11] &= ~0x80;
/* Attribute controller registers */
inb(st01);
dev_priv->saveAR_INDEX = inb(VGA_AR_INDEX);
for (i = 0; i <= 0x14; i++)
dev_priv->saveAR[i] = i915_read_ar(st01, i, 0);
inb(st01);
outb(dev_priv->saveAR_INDEX, VGA_AR_INDEX);
inb(st01);
/* Graphics controller registers */
for (i = 0; i < 9; i++)
dev_priv->saveGR[i] =
i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, i);
dev_priv->saveGR[0x10] =
i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10);
dev_priv->saveGR[0x11] =
i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11);
dev_priv->saveGR[0x18] =
i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18);
/* Sequencer registers */
for (i = 0; i < 8; i++)
dev_priv->saveSR[i] =
i915_read_indexed(VGA_SR_INDEX, VGA_SR_DATA, i);
}
static void i915_restore_vga(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
u16 cr_index, cr_data, st01;
/* MSR bits */
outb(dev_priv->saveMSR, VGA_MSR_WRITE);
if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
cr_index = VGA_CR_INDEX_CGA;
cr_data = VGA_CR_DATA_CGA;
st01 = VGA_ST01_CGA;
} else {
cr_index = VGA_CR_INDEX_MDA;
cr_data = VGA_CR_DATA_MDA;
st01 = VGA_ST01_MDA;
}
/* Sequencer registers, don't write SR07 */
for (i = 0; i < 7; i++)
i915_write_indexed(VGA_SR_INDEX, VGA_SR_DATA, i,
dev_priv->saveSR[i]);
/* CRT controller regs */
/* Enable CR group 0 writes */
i915_write_indexed(cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]);
for (i = 0; i <= 0x24; i++)
i915_write_indexed(cr_index, cr_data, i, dev_priv->saveCR[i]);
/* Graphics controller regs */
for (i = 0; i < 9; i++)
i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, i,
dev_priv->saveGR[i]);
i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10,
dev_priv->saveGR[0x10]);
i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11,
dev_priv->saveGR[0x11]);
i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18,
dev_priv->saveGR[0x18]);
/* Attribute controller registers */
inb(st01);
for (i = 0; i <= 0x14; i++)
i915_write_ar(st01, i, dev_priv->saveAR[i], 0);
inb(st01); /* switch back to index mode */
outb(dev_priv->saveAR_INDEX | 0x20, VGA_AR_INDEX);
inb(st01);
/* VGA color palette registers */
outb(dev_priv->saveDACMASK, VGA_DACMASK);
/* DACCRX automatically increments during read */
outb(0, VGA_DACWX);
/* Read 3 bytes of color data from each index */
for (i = 0; i < 256 * 3; i++)
outb(dev_priv->saveDACDATA[i], VGA_DACDATA);
}
static int i915_suspend(struct drm_device *dev, pm_message_t state)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
if (!dev || !dev_priv) {
printk(KERN_ERR "dev: %p, dev_priv: %p\n", dev, dev_priv);
......@@ -254,122 +52,10 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
return 0;
pci_save_state(dev->pdev);
pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
/* Display arbitration control */
dev_priv->saveDSPARB = I915_READ(DSPARB);
/* Pipe & plane A info */
dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
dev_priv->saveFPA0 = I915_READ(FPA0);
dev_priv->saveFPA1 = I915_READ(FPA1);
dev_priv->saveDPLL_A = I915_READ(DPLL_A);
if (IS_I965G(dev))
dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
dev_priv->saveHSYNC_A = I915_READ(HSYNC_A);
dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
dev_priv->saveDSPACNTR = I915_READ(DSPACNTR);
dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE);
dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
dev_priv->saveDSPABASE = I915_READ(DSPABASE);
if (IS_I965G(dev)) {
dev_priv->saveDSPASURF = I915_READ(DSPASURF);
dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
}
i915_save_palette(dev, PIPE_A);
dev_priv->savePIPEASTAT = I915_READ(I915REG_PIPEASTAT);
/* Pipe & plane B info */
dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
dev_priv->saveFPB0 = I915_READ(FPB0);
dev_priv->saveFPB1 = I915_READ(FPB1);
dev_priv->saveDPLL_B = I915_READ(DPLL_B);
if (IS_I965G(dev))
dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
dev_priv->saveHSYNC_B = I915_READ(HSYNC_B);
dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR);
dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE);
dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
dev_priv->saveDSPBBASE = I915_READ(DSPBBASE);
if (IS_I965GM(dev) || IS_IGD_GM(dev)) {
dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
}
i915_save_palette(dev, PIPE_B);
dev_priv->savePIPEBSTAT = I915_READ(I915REG_PIPEBSTAT);
/* CRT state */
dev_priv->saveADPA = I915_READ(ADPA);
i915_save_state(dev);
/* LVDS state */
dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
if (IS_I965G(dev))
dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
if (IS_MOBILE(dev) && !IS_I830(dev))
dev_priv->saveLVDS = I915_READ(LVDS);
if (!IS_I830(dev) && !IS_845G(dev))
dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
dev_priv->saveLVDSPP_ON = I915_READ(LVDSPP_ON);
dev_priv->saveLVDSPP_OFF = I915_READ(LVDSPP_OFF);
dev_priv->savePP_CYCLE = I915_READ(PP_CYCLE);
/* FIXME: save TV & SDVO state */
/* FBC state */
dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
/* Interrupt state */
dev_priv->saveIIR = I915_READ(I915REG_INT_IDENTITY_R);
dev_priv->saveIER = I915_READ(I915REG_INT_ENABLE_R);
dev_priv->saveIMR = I915_READ(I915REG_INT_MASK_R);
/* VGA state */
dev_priv->saveVCLK_DIVISOR_VGA0 = I915_READ(VCLK_DIVISOR_VGA0);
dev_priv->saveVCLK_DIVISOR_VGA1 = I915_READ(VCLK_DIVISOR_VGA1);
dev_priv->saveVCLK_POST_DIV = I915_READ(VCLK_POST_DIV);
dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
/* Clock gating state */
dev_priv->saveD_STATE = I915_READ(D_STATE);
dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D);
/* Cache mode state */
dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
/* Memory Arbitration state */
dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
/* Scratch space */
for (i = 0; i < 16; i++) {
dev_priv->saveSWF0[i] = I915_READ(SWF0 + (i << 2));
dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
}
for (i = 0; i < 3; i++)
dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
i915_save_vga(dev);
intel_opregion_free(dev);
if (state.event == PM_EVENT_SUSPEND) {
/* Shut down the device */
......@@ -382,155 +68,15 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
static int i915_resume(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
pci_set_power_state(dev->pdev, PCI_D0);
pci_restore_state(dev->pdev);
if (pci_enable_device(dev->pdev))
return -1;
pci_set_master(dev->pdev);
pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
I915_WRITE(DSPARB, dev_priv->saveDSPARB);
/* Pipe & plane A info */
/* Prime the clock */
if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
I915_WRITE(DPLL_A, dev_priv->saveDPLL_A &
~DPLL_VCO_ENABLE);
udelay(150);
}
I915_WRITE(FPA0, dev_priv->saveFPA0);
I915_WRITE(FPA1, dev_priv->saveFPA1);
/* Actually enable it */
I915_WRITE(DPLL_A, dev_priv->saveDPLL_A);
udelay(150);
if (IS_I965G(dev))
I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
udelay(150);
/* Restore mode */
I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A);
I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A);
I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
/* Restore plane info */
I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
I915_WRITE(DSPABASE, dev_priv->saveDSPABASE);
I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
if (IS_I965G(dev)) {
I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
}
I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
i915_restore_palette(dev, PIPE_A);
/* Enable the plane */
I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR);
I915_WRITE(DSPABASE, I915_READ(DSPABASE));
/* Pipe & plane B info */
if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
I915_WRITE(DPLL_B, dev_priv->saveDPLL_B &
~DPLL_VCO_ENABLE);
udelay(150);
}
I915_WRITE(FPB0, dev_priv->saveFPB0);
I915_WRITE(FPB1, dev_priv->saveFPB1);
/* Actually enable it */
I915_WRITE(DPLL_B, dev_priv->saveDPLL_B);
udelay(150);
if (IS_I965G(dev))
I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
udelay(150);
/* Restore mode */
I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B);
I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B);
I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
/* Restore plane info */
I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS);
I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
I915_WRITE(DSPBBASE, dev_priv->saveDSPBBASE);
I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
if (IS_I965G(dev)) {
I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
}
I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
i915_restore_palette(dev, PIPE_B);
/* Enable the plane */
I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
I915_WRITE(DSPBBASE, I915_READ(DSPBBASE));
/* CRT state */
I915_WRITE(ADPA, dev_priv->saveADPA);
/* LVDS state */
if (IS_I965G(dev))
I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
if (IS_MOBILE(dev) && !IS_I830(dev))
I915_WRITE(LVDS, dev_priv->saveLVDS);
if (!IS_I830(dev) && !IS_845G(dev))
I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
I915_WRITE(LVDSPP_ON, dev_priv->saveLVDSPP_ON);
I915_WRITE(LVDSPP_OFF, dev_priv->saveLVDSPP_OFF);
I915_WRITE(PP_CYCLE, dev_priv->savePP_CYCLE);
I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
/* FIXME: restore TV & SDVO state */
/* FBC info */
I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
/* VGA state */
I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
I915_WRITE(VCLK_DIVISOR_VGA0, dev_priv->saveVCLK_DIVISOR_VGA0);
I915_WRITE(VCLK_DIVISOR_VGA1, dev_priv->saveVCLK_DIVISOR_VGA1);
I915_WRITE(VCLK_POST_DIV, dev_priv->saveVCLK_POST_DIV);
udelay(150);
/* Clock gating state */
I915_WRITE (D_STATE, dev_priv->saveD_STATE);
I915_WRITE (DSPCLK_GATE_D, dev_priv->saveDSPCLK_GATE_D);
/* Cache mode state */
I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
/* Memory arbitration state */
I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
for (i = 0; i < 16; i++) {
I915_WRITE(SWF0 + (i << 2), dev_priv->saveSWF0[i]);
I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]);
}
for (i = 0; i < 3; i++)
I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
i915_restore_state(dev);
i915_restore_vga(dev);
intel_opregion_init(dev);
return 0;
}
......@@ -541,17 +87,19 @@ static struct drm_driver driver = {
*/
.driver_features =
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL |
DRIVER_IRQ_VBL2,
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM,
.load = i915_driver_load,
.unload = i915_driver_unload,
.open = i915_driver_open,
.lastclose = i915_driver_lastclose,
.preclose = i915_driver_preclose,
.postclose = i915_driver_postclose,
.suspend = i915_suspend,
.resume = i915_resume,
.device_is_agp = i915_driver_device_is_agp,
.vblank_wait = i915_driver_vblank_wait,
.vblank_wait2 = i915_driver_vblank_wait2,
.get_vblank_counter = i915_get_vblank_counter,
.enable_vblank = i915_enable_vblank,
.disable_vblank = i915_disable_vblank,
.irq_preinstall = i915_driver_irq_preinstall,
.irq_postinstall = i915_driver_irq_postinstall,
.irq_uninstall = i915_driver_irq_uninstall,
......@@ -559,6 +107,10 @@ static struct drm_driver driver = {
.reclaim_buffers = drm_core_reclaim_buffers,
.get_map_ofs = drm_core_get_map_ofs,
.get_reg_ofs = drm_core_get_reg_ofs,
.proc_init = i915_gem_proc_init,
.proc_cleanup = i915_gem_proc_cleanup,
.gem_init_object = i915_gem_init_object,
.gem_free_object = i915_gem_free_object,
.ioctls = i915_ioctls,
.fops = {
.owner = THIS_MODULE,
......
此差异已折叠。
此差异已折叠。
/*
* Copyright © 2008 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Keith Packard <keithp@keithp.com>
*
*/
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
#if WATCH_INACTIVE
void
i915_verify_inactive(struct drm_device *dev, char *file, int line)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
obj = obj_priv->obj;
if (obj_priv->pin_count || obj_priv->active ||
(obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
I915_GEM_DOMAIN_GTT)))
DRM_ERROR("inactive %p (p %d a %d w %x) %s:%d\n",
obj,
obj_priv->pin_count, obj_priv->active,
obj->write_domain, file, line);
}
}
#endif /* WATCH_INACTIVE */
#if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE
static void
i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
uint32_t bias, uint32_t mark)
{
uint32_t *mem = kmap_atomic(page, KM_USER0);
int i;
for (i = start; i < end; i += 4)
DRM_INFO("%08x: %08x%s\n",
(int) (bias + i), mem[i / 4],
(bias + i == mark) ? " ********" : "");
kunmap_atomic(mem, KM_USER0);
/* give syslog time to catch up */
msleep(1);
}
void
i915_gem_dump_object(struct drm_gem_object *obj, int len,
const char *where, uint32_t mark)
{
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int page;
DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset);
for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
int page_len, chunk, chunk_len;
page_len = len - page * PAGE_SIZE;
if (page_len > PAGE_SIZE)
page_len = PAGE_SIZE;
for (chunk = 0; chunk < page_len; chunk += 128) {
chunk_len = page_len - chunk;
if (chunk_len > 128)
chunk_len = 128;
i915_gem_dump_page(obj_priv->page_list[page],
chunk, chunk + chunk_len,
obj_priv->gtt_offset +
page * PAGE_SIZE,
mark);
}
}
}
#endif
#if WATCH_LRU
void
i915_dump_lru(struct drm_device *dev, const char *where)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv;
DRM_INFO("active list %s {\n", where);
list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
list)
{
DRM_INFO(" %p: %08x\n", obj_priv,
obj_priv->last_rendering_seqno);
}
DRM_INFO("}\n");
DRM_INFO("flushing list %s {\n", where);
list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
list)
{
DRM_INFO(" %p: %08x\n", obj_priv,
obj_priv->last_rendering_seqno);
}
DRM_INFO("}\n");
DRM_INFO("inactive %s {\n", where);
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
DRM_INFO(" %p: %08x\n", obj_priv,
obj_priv->last_rendering_seqno);
}
DRM_INFO("}\n");
}
#endif
#if WATCH_COHERENCY
void
i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
{
struct drm_device *dev = obj->dev;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int page;
uint32_t *gtt_mapping;
uint32_t *backing_map = NULL;
int bad_count = 0;
DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %dkb):\n",
__func__, obj, obj_priv->gtt_offset, handle,
obj->size / 1024);
gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset,
obj->size);
if (gtt_mapping == NULL) {
DRM_ERROR("failed to map GTT space\n");
return;
}
for (page = 0; page < obj->size / PAGE_SIZE; page++) {
int i;
backing_map = kmap_atomic(obj_priv->page_list[page], KM_USER0);
if (backing_map == NULL) {
DRM_ERROR("failed to map backing page\n");
goto out;
}
for (i = 0; i < PAGE_SIZE / 4; i++) {
uint32_t cpuval = backing_map[i];
uint32_t gttval = readl(gtt_mapping +
page * 1024 + i);
if (cpuval != gttval) {
DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
"0x%08x vs 0x%08x\n",
(int)(obj_priv->gtt_offset +
page * PAGE_SIZE + i * 4),
cpuval, gttval);
if (bad_count++ >= 8) {
DRM_INFO("...\n");
goto out;
}
}
}
kunmap_atomic(backing_map, KM_USER0);
backing_map = NULL;
}
out:
if (backing_map != NULL)
kunmap_atomic(backing_map, KM_USER0);
iounmap(gtt_mapping);
/* give syslog time to catch up */
msleep(1);
/* Directly flush the object, since we just loaded values with the CPU
* from the backing pages and we don't want to disturb the cache
* management that we're trying to observe.
*/
i915_gem_clflush_object(obj);
}
#endif
/*
* Copyright © 2008 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
* Keith Packard <keithp@keithp.com>
*
*/
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
static int i915_gem_active_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv;
int len = 0;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
*start = &buf[offset];
*eof = 0;
DRM_PROC_PRINT("Active:\n");
list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
list)
{
struct drm_gem_object *obj = obj_priv->obj;
if (obj->name) {
DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
obj, obj->name,
obj->read_domains, obj->write_domain,
obj_priv->last_rendering_seqno);
} else {
DRM_PROC_PRINT(" %p: %08x %08x %d\n",
obj,
obj->read_domains, obj->write_domain,
obj_priv->last_rendering_seqno);
}
}
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
static int i915_gem_flushing_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv;
int len = 0;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
*start = &buf[offset];
*eof = 0;
DRM_PROC_PRINT("Flushing:\n");
list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
list)
{
struct drm_gem_object *obj = obj_priv->obj;
if (obj->name) {
DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
obj, obj->name,
obj->read_domains, obj->write_domain,
obj_priv->last_rendering_seqno);
} else {
DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj,
obj->read_domains, obj->write_domain,
obj_priv->last_rendering_seqno);
}
}
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
static int i915_gem_inactive_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv;
int len = 0;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
*start = &buf[offset];
*eof = 0;
DRM_PROC_PRINT("Inactive:\n");
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list,
list)
{
struct drm_gem_object *obj = obj_priv->obj;
if (obj->name) {
DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
obj, obj->name,
obj->read_domains, obj->write_domain,
obj_priv->last_rendering_seqno);
} else {
DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj,
obj->read_domains, obj->write_domain,
obj_priv->last_rendering_seqno);
}
}
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
static int i915_gem_request_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_request *gem_request;
int len = 0;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
*start = &buf[offset];
*eof = 0;
DRM_PROC_PRINT("Request:\n");
list_for_each_entry(gem_request, &dev_priv->mm.request_list,
list)
{
DRM_PROC_PRINT(" %d @ %d %08x\n",
gem_request->seqno,
(int) (jiffies - gem_request->emitted_jiffies),
gem_request->flush_domains);
}
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
static int i915_gem_seqno_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
int len = 0;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
*start = &buf[offset];
*eof = 0;
DRM_PROC_PRINT("Current sequence: %d\n", i915_get_gem_seqno(dev));
DRM_PROC_PRINT("Waiter sequence: %d\n",
dev_priv->mm.waiting_gem_seqno);
DRM_PROC_PRINT("IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
static int i915_interrupt_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
int len = 0;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
*start = &buf[offset];
*eof = 0;
DRM_PROC_PRINT("Interrupt enable: %08x\n",
I915_READ(IER));
DRM_PROC_PRINT("Interrupt identity: %08x\n",
I915_READ(IIR));
DRM_PROC_PRINT("Interrupt mask: %08x\n",
I915_READ(IMR));
DRM_PROC_PRINT("Pipe A stat: %08x\n",
I915_READ(PIPEASTAT));
DRM_PROC_PRINT("Pipe B stat: %08x\n",
I915_READ(PIPEBSTAT));
DRM_PROC_PRINT("Interrupts received: %d\n",
atomic_read(&dev_priv->irq_received));
DRM_PROC_PRINT("Current sequence: %d\n",
i915_get_gem_seqno(dev));
DRM_PROC_PRINT("Waiter sequence: %d\n",
dev_priv->mm.waiting_gem_seqno);
DRM_PROC_PRINT("IRQ sequence: %d\n",
dev_priv->mm.irq_gem_seqno);
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
static struct drm_proc_list {
/** file name */
const char *name;
/** proc callback*/
int (*f) (char *, char **, off_t, int, int *, void *);
} i915_gem_proc_list[] = {
{"i915_gem_active", i915_gem_active_info},
{"i915_gem_flushing", i915_gem_flushing_info},
{"i915_gem_inactive", i915_gem_inactive_info},
{"i915_gem_request", i915_gem_request_info},
{"i915_gem_seqno", i915_gem_seqno_info},
{"i915_gem_interrupt", i915_interrupt_info},
};
#define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list)
int i915_gem_proc_init(struct drm_minor *minor)
{
struct proc_dir_entry *ent;
int i, j;
for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) {
ent = create_proc_entry(i915_gem_proc_list[i].name,
S_IFREG | S_IRUGO, minor->dev_root);
if (!ent) {
DRM_ERROR("Cannot create /proc/dri/.../%s\n",
i915_gem_proc_list[i].name);
for (j = 0; j < i; j++)
remove_proc_entry(i915_gem_proc_list[i].name,
minor->dev_root);
return -1;
}
ent->read_proc = i915_gem_proc_list[i].f;
ent->data = minor;
}
return 0;
}
void i915_gem_proc_cleanup(struct drm_minor *minor)
{
int i;
if (!minor->dev_root)
return;
for (i = 0; i < I915_GEM_PROC_ENTRIES; i++)
remove_proc_entry(i915_gem_proc_list[i].name, minor->dev_root);
}
/*
* Copyright © 2008 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
/** @file i915_gem_tiling.c
*
* Support for managing tiling state of buffer objects.
*
* The idea behind tiling is to increase cache hit rates by rearranging
* pixel data so that a group of pixel accesses are in the same cacheline.
* Performance improvement from doing this on the back/depth buffer are on
* the order of 30%.
*
* Intel architectures make this somewhat more complicated, though, by
* adjustments made to addressing of data when the memory is in interleaved
* mode (matched pairs of DIMMS) to improve memory bandwidth.
* For interleaved memory, the CPU sends every sequential 64 bytes
* to an alternate memory channel so it can get the bandwidth from both.
*
* The GPU also rearranges its accesses for increased bandwidth to interleaved
* memory, and it matches what the CPU does for non-tiled. However, when tiled
* it does it a little differently, since one walks addresses not just in the
* X direction but also Y. So, along with alternating channels when bit
* 6 of the address flips, it also alternates when other bits flip -- Bits 9
* (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
* are common to both the 915 and 965-class hardware.
*
* The CPU also sometimes XORs in higher bits as well, to improve
* bandwidth doing strided access like we do so frequently in graphics. This
* is called "Channel XOR Randomization" in the MCH documentation. The result
* is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
* decode.
*
* All of this bit 6 XORing has an effect on our memory management,
* as we need to make sure that the 3d driver can correctly address object
* contents.
*
* If we don't have interleaved memory, all tiling is safe and no swizzling is
* required.
*
* When bit 17 is XORed in, we simply refuse to tile at all. Bit
* 17 is not just a page offset, so as we page an objet out and back in,
* individual pages in it will have different bit 17 addresses, resulting in
* each 64 bytes being swapped with its neighbor!
*
* Otherwise, if interleaved, we have to tell the 3d driver what the address
* swizzling it needs to do is, since it's writing with the CPU to the pages
* (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
* pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
* required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
* to match what the GPU expects.
*/
/**
* Detects bit 6 swizzling of address lookup between IGD access and CPU
* access through main memory.
*/
void
i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
if (!IS_I9XX(dev)) {
/* As far as we know, the 865 doesn't have these bit 6
* swizzling issues.
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
} else if ((!IS_I965G(dev) && !IS_G33(dev)) || IS_I965GM(dev) ||
IS_GM45(dev)) {
uint32_t dcc;
/* On 915-945 and GM965, channel interleave by the CPU is
* determined by DCC. The CPU will alternate based on bit 6
* in interleaved mode, and the GPU will then also alternate
* on bit 6, 9, and 10 for X, but the CPU may also optionally
* alternate based on bit 17 (XOR not disabled and XOR
* bit == 17).
*/
dcc = I915_READ(DCC);
switch (dcc & DCC_ADDRESSING_MODE_MASK) {
case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
break;
case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
if (IS_I915G(dev) || IS_I915GM(dev) ||
dcc & DCC_CHANNEL_XOR_DISABLE) {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
} else if (IS_I965GM(dev) || IS_GM45(dev)) {
/* GM965 only does bit 11-based channel
* randomization
*/
swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
swizzle_y = I915_BIT_6_SWIZZLE_9_11;
} else {
/* Bit 17 or perhaps other swizzling */
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
}
break;
}
if (dcc == 0xffffffff) {
DRM_ERROR("Couldn't read from MCHBAR. "
"Disabling tiling.\n");
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
}
} else {
/* The 965, G33, and newer, have a very flexible memory
* configuration. It will enable dual-channel mode
* (interleaving) on as much memory as it can, and the GPU
* will additionally sometimes enable different bit 6
* swizzling for tiled objects from the CPU.
*
* Here's what I found on the G965:
* slot fill memory size swizzling
* 0A 0B 1A 1B 1-ch 2-ch
* 512 0 0 0 512 0 O
* 512 0 512 0 16 1008 X
* 512 0 0 512 16 1008 X
* 0 512 0 512 16 1008 X
* 1024 1024 1024 0 2048 1024 O
*
* We could probably detect this based on either the DRB
* matching, which was the case for the swizzling required in
* the table above, or from the 1-ch value being less than
* the minimum size of a rank.
*/
if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) {
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
} else {
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
}
}
dev_priv->mm.bit_6_swizzle_x = swizzle_x;
dev_priv->mm.bit_6_swizzle_y = swizzle_y;
}
/**
* Sets the tiling mode of an object, returning the required swizzling of
* bit 6 of addresses in the object.
*/
int
i915_gem_set_tiling(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_gem_set_tiling *args = data;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
return -EINVAL;
obj_priv = obj->driver_private;
mutex_lock(&dev->struct_mutex);
if (args->tiling_mode == I915_TILING_NONE) {
obj_priv->tiling_mode = I915_TILING_NONE;
args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
} else {
if (args->tiling_mode == I915_TILING_X)
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
else
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
/* If we can't handle the swizzling, make it untiled. */
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
args->tiling_mode = I915_TILING_NONE;
args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
}
}
obj_priv->tiling_mode = args->tiling_mode;
mutex_unlock(&dev->struct_mutex);
drm_gem_object_unreference(obj);
return 0;
}
/**
* Returns the current tiling mode and required bit 6 swizzling for the object.
*/
int
i915_gem_get_tiling(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_gem_get_tiling *args = data;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
return -EINVAL;
obj_priv = obj->driver_private;
mutex_lock(&dev->struct_mutex);
args->tiling_mode = obj_priv->tiling_mode;
switch (obj_priv->tiling_mode) {
case I915_TILING_X:
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
break;
case I915_TILING_Y:
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
break;
case I915_TILING_NONE:
args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
break;
default:
DRM_ERROR("unknown tiling mode\n");
}
mutex_unlock(&dev->struct_mutex);
drm_gem_object_unreference(obj);
return 0;
}
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -45,15 +45,16 @@ static struct pci_device_id pciidlist[] = {
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA |
DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
DRIVER_IRQ_VBL,
DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
.dev_priv_size = sizeof(drm_mga_buf_priv_t),
.load = mga_driver_load,
.unload = mga_driver_unload,
.lastclose = mga_driver_lastclose,
.dma_quiescent = mga_driver_dma_quiescent,
.device_is_agp = mga_driver_device_is_agp,
.vblank_wait = mga_driver_vblank_wait,
.get_vblank_counter = mga_get_vblank_counter,
.enable_vblank = mga_enable_vblank,
.disable_vblank = mga_disable_vblank,
.irq_preinstall = mga_driver_irq_preinstall,
.irq_postinstall = mga_driver_irq_postinstall,
.irq_uninstall = mga_driver_irq_uninstall,
......@@ -64,20 +65,20 @@ static struct drm_driver driver = {
.ioctls = mga_ioctls,
.dma_ioctl = mga_dma_buffers,
.fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
#ifdef CONFIG_COMPAT
.compat_ioctl = mga_compat_ioctl,
.compat_ioctl = mga_compat_ioctl,
#endif
},
},
.pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
.name = DRIVER_NAME,
.id_table = pciidlist,
},
.name = DRIVER_NAME,
......
......@@ -120,6 +120,7 @@ typedef struct drm_mga_private {
u32 clear_cmd;
u32 maccess;
atomic_t vbl_received; /**< Number of vblanks received. */
wait_queue_head_t fence_queue;
atomic_t last_fence_retired;
u32 next_fence_to_post;
......@@ -181,11 +182,14 @@ extern int mga_warp_install_microcode(drm_mga_private_t * dev_priv);
extern int mga_warp_init(drm_mga_private_t * dev_priv);
/* mga_irq.c */
extern int mga_enable_vblank(struct drm_device *dev, int crtc);
extern void mga_disable_vblank(struct drm_device *dev, int crtc);
extern u32 mga_get_vblank_counter(struct drm_device *dev, int crtc);
extern int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence);
extern int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence);
extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS);
extern void mga_driver_irq_preinstall(struct drm_device * dev);
extern void mga_driver_irq_postinstall(struct drm_device * dev);
extern int mga_driver_irq_postinstall(struct drm_device *dev);
extern void mga_driver_irq_uninstall(struct drm_device * dev);
extern long mga_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
......
/* mga_irq.c -- IRQ handling for radeon -*- linux-c -*-
*
*/
/*
* Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
*
* The Weather Channel (TM) funded Tungsten Graphics to develop the
......@@ -35,6 +36,18 @@
#include "mga_drm.h"
#include "mga_drv.h"
u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
{
const drm_mga_private_t *const dev_priv =
(drm_mga_private_t *) dev->dev_private;
if (crtc != 0)
return 0;
return atomic_read(&dev_priv->vbl_received);
}
irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
......@@ -47,9 +60,8 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
/* VBLANK interrupt */
if (status & MGA_VLINEPEN) {
MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
atomic_inc(&dev->vbl_received);
DRM_WAKEUP(&dev->vbl_queue);
drm_vbl_send_signals(dev);
atomic_inc(&dev_priv->vbl_received);
drm_handle_vblank(dev, 0);
handled = 1;
}
......@@ -58,6 +70,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
const u32 prim_start = MGA_READ(MGA_PRIMADDRESS);
const u32 prim_end = MGA_READ(MGA_PRIMEND);
MGA_WRITE(MGA_ICLEAR, MGA_SOFTRAPICLR);
/* In addition to clearing the interrupt-pending bit, we
......@@ -72,28 +85,39 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
handled = 1;
}
if (handled) {
if (handled)
return IRQ_HANDLED;
}
return IRQ_NONE;
}
int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence)
int mga_enable_vblank(struct drm_device *dev, int crtc)
{
unsigned int cur_vblank;
int ret = 0;
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
/* Assume that the user has missed the current sequence number
* by about a day rather than she wants to wait for years
* using vertical blanks...
*/
DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
(((cur_vblank = atomic_read(&dev->vbl_received))
- *sequence) <= (1 << 23)));
if (crtc != 0) {
DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
crtc);
return 0;
}
*sequence = cur_vblank;
MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN);
return 0;
}
return ret;
void mga_disable_vblank(struct drm_device *dev, int crtc)
{
if (crtc != 0) {
DRM_ERROR("tried to disable vblank on non-existent crtc %d\n",
crtc);
}
/* Do *NOT* disable the vertical refresh interrupt. MGA doesn't have
* a nice hardware counter that tracks the number of refreshes when
* the interrupt is disabled, and the kernel doesn't know the refresh
* rate to calculate an estimate.
*/
/* MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); */
}
int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
......@@ -125,14 +149,22 @@ void mga_driver_irq_preinstall(struct drm_device * dev)
MGA_WRITE(MGA_ICLEAR, ~0);
}
void mga_driver_irq_postinstall(struct drm_device * dev)
int mga_driver_irq_postinstall(struct drm_device *dev)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
int ret;
ret = drm_vblank_init(dev, 1);
if (ret)
return ret;
DRM_INIT_WAITQUEUE(&dev_priv->fence_queue);
/* Turn on vertical blank interrupt and soft trap interrupt. */
MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN);
/* Turn on soft trap interrupt. Vertical blank interrupts are enabled
* in mga_enable_vblank.
*/
MGA_WRITE(MGA_IEN, MGA_SOFTRAPEN);
return 0;
}
void mga_driver_irq_uninstall(struct drm_device * dev)
......
......@@ -1022,7 +1022,7 @@ static int mga_getparam(struct drm_device *dev, void *data, struct drm_file *fil
switch (param->param) {
case MGA_PARAM_IRQ_NR:
value = dev->irq;
value = drm_dev_to_irq(dev);
break;
case MGA_PARAM_CARD_TYPE:
value = dev_priv->chipset;
......
......@@ -43,12 +43,13 @@ static struct pci_device_id pciidlist[] = {
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
DRIVER_IRQ_VBL,
DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
.dev_priv_size = sizeof(drm_r128_buf_priv_t),
.preclose = r128_driver_preclose,
.lastclose = r128_driver_lastclose,
.vblank_wait = r128_driver_vblank_wait,
.get_vblank_counter = r128_get_vblank_counter,
.enable_vblank = r128_enable_vblank,
.disable_vblank = r128_disable_vblank,
.irq_preinstall = r128_driver_irq_preinstall,
.irq_postinstall = r128_driver_irq_postinstall,
.irq_uninstall = r128_driver_irq_uninstall,
......@@ -59,21 +60,20 @@ static struct drm_driver driver = {
.ioctls = r128_ioctls,
.dma_ioctl = r128_cce_buffers,
.fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.ioctl = drm_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
#ifdef CONFIG_COMPAT
.compat_ioctl = r128_compat_ioctl,
.compat_ioctl = r128_compat_ioctl,
#endif
},
.pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
.name = DRIVER_NAME,
.id_table = pciidlist,
},
.name = DRIVER_NAME,
......@@ -87,6 +87,7 @@ static struct drm_driver driver = {
static int __init r128_init(void)
{
driver.num_ioctls = r128_max_ioctl;
return drm_init(&driver);
}
......
......@@ -29,7 +29,7 @@
* Rickard E. (Rik) Faith <faith@valinux.com>
* Kevin E. Martin <martin@valinux.com>
* Gareth Hughes <gareth@valinux.com>
* Michel Dänzer <daenzerm@student.ethz.ch>
* Michel Dzer <daenzerm@student.ethz.ch>
*/
#ifndef __R128_DRV_H__
......@@ -97,6 +97,8 @@ typedef struct drm_r128_private {
u32 crtc_offset;
u32 crtc_offset_cntl;
atomic_t vbl_received;
u32 color_fmt;
unsigned int front_offset;
unsigned int front_pitch;
......@@ -149,11 +151,12 @@ extern int r128_wait_ring(drm_r128_private_t * dev_priv, int n);
extern int r128_do_cce_idle(drm_r128_private_t * dev_priv);
extern int r128_do_cleanup_cce(struct drm_device * dev);
extern int r128_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence);
extern int r128_enable_vblank(struct drm_device *dev, int crtc);
extern void r128_disable_vblank(struct drm_device *dev, int crtc);
extern u32 r128_get_vblank_counter(struct drm_device *dev, int crtc);
extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS);
extern void r128_driver_irq_preinstall(struct drm_device * dev);
extern void r128_driver_irq_postinstall(struct drm_device * dev);
extern int r128_driver_irq_postinstall(struct drm_device *dev);
extern void r128_driver_irq_uninstall(struct drm_device * dev);
extern void r128_driver_lastclose(struct drm_device * dev);
extern void r128_driver_preclose(struct drm_device * dev,
......
此差异已折叠。
......@@ -1629,7 +1629,7 @@ static int r128_getparam(struct drm_device *dev, void *data, struct drm_file *fi
switch (param->param) {
case R128_PARAM_IRQ_NR:
value = dev->irq;
value = drm_dev_to_irq(dev);
break;
default:
return -EINVAL;
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -2997,7 +2997,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
value = GET_SCRATCH(2);
break;
case RADEON_PARAM_IRQ_NR:
value = dev->irq;
value = drm_dev_to_irq(dev);
break;
case RADEON_PARAM_GART_BASE:
value = dev_priv->gart_vm_start;
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册