提交 f698f1f7 编写于 作者: L Linus Torvalds

Merge branch 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6:
  drm: fix for non-coherent DMA PowerPC
  drm: radeon: fix sparse integer as NULL pointer warnings in radeon_mem.c
  drm/i915: fix oops on agp=off
  drm/r300: fix bug in r300 userspace hardware wait emission
......@@ -168,6 +168,12 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
}
}
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN)
dma_sync_single_for_device(&dev->pdev->dev,
bus_address,
max_pages * sizeof(u32),
PCI_DMA_TODEVICE);
ret = 1;
#if defined(__i386__) || defined(__x86_64__)
......
......@@ -36,6 +36,15 @@
#define DEBUG_SCATTER 0
static inline void *drm_vmalloc_dma(unsigned long size)
{
#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL | _PAGE_NO_CACHE);
#else
return vmalloc_32(size);
#endif
}
void drm_sg_cleanup(struct drm_sg_mem * entry)
{
struct page *page;
......@@ -104,7 +113,7 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
}
memset((void *)entry->busaddr, 0, pages * sizeof(*entry->busaddr));
entry->virtual = vmalloc_32(pages << PAGE_SHIFT);
entry->virtual = drm_vmalloc_dma(pages << PAGE_SHIFT);
if (!entry->virtual) {
drm_free(entry->busaddr,
entry->pages * sizeof(*entry->busaddr), DRM_MEM_PAGES);
......
......@@ -54,13 +54,24 @@ static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
pgprot_val(tmp) |= _PAGE_NO_CACHE;
if (map_type == _DRM_REGISTERS)
pgprot_val(tmp) |= _PAGE_GUARDED;
#endif
#if defined(__ia64__)
#elif defined(__ia64__)
if (efi_range_is_wc(vma->vm_start, vma->vm_end -
vma->vm_start))
tmp = pgprot_writecombine(tmp);
else
tmp = pgprot_noncached(tmp);
#elif defined(__sparc__)
tmp = pgprot_noncached(tmp);
#endif
return tmp;
}
static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
{
pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
tmp |= _PAGE_NO_CACHE;
#endif
return tmp;
}
......@@ -603,9 +614,6 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
offset = dev->driver->get_reg_ofs(dev);
vma->vm_flags |= VM_IO; /* not in core dump */
vma->vm_page_prot = drm_io_prot(map->type, vma);
#ifdef __sparc__
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
#endif
if (io_remap_pfn_range(vma, vma->vm_start,
(map->offset + offset) >> PAGE_SHIFT,
vma->vm_end - vma->vm_start,
......@@ -624,6 +632,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
page_to_pfn(virt_to_page(map->handle)),
vma->vm_end - vma->vm_start, vma->vm_page_prot))
return -EAGAIN;
vma->vm_page_prot = drm_dma_prot(map->type, vma);
/* fall through to _DRM_SHM */
case _DRM_SHM:
vma->vm_ops = &drm_vm_shm_ops;
......@@ -631,6 +640,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
/* Don't let this area swap. Change when
DRM_KERNEL advisory is supported. */
vma->vm_flags |= VM_RESERVED;
vma->vm_page_prot = drm_dma_prot(map->type, vma);
break;
case _DRM_SCATTER_GATHER:
vma->vm_ops = &drm_vm_sg_ops;
......
......@@ -804,6 +804,9 @@ void i915_driver_lastclose(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
if (!dev_priv)
return;
if (dev_priv->agp_heap)
i915_mem_takedown(&(dev_priv->agp_heap));
......
......@@ -729,6 +729,47 @@ static void r300_discard_buffer(struct drm_device * dev, struct drm_buf * buf)
buf->used = 0;
}
static void r300_cmd_wait(drm_radeon_private_t * dev_priv,
drm_r300_cmd_header_t header)
{
u32 wait_until;
RING_LOCALS;
if (!header.wait.flags)
return;
wait_until = 0;
switch(header.wait.flags) {
case R300_WAIT_2D:
wait_until = RADEON_WAIT_2D_IDLE;
break;
case R300_WAIT_3D:
wait_until = RADEON_WAIT_3D_IDLE;
break;
case R300_NEW_WAIT_2D_3D:
wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_3D_IDLE;
break;
case R300_NEW_WAIT_2D_2D_CLEAN:
wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_2D_IDLECLEAN;
break;
case R300_NEW_WAIT_3D_3D_CLEAN:
wait_until = RADEON_WAIT_3D_IDLE|RADEON_WAIT_3D_IDLECLEAN;
break;
case R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN:
wait_until = RADEON_WAIT_2D_IDLE|RADEON_WAIT_2D_IDLECLEAN;
wait_until |= RADEON_WAIT_3D_IDLE|RADEON_WAIT_3D_IDLECLEAN;
break;
default:
return;
}
BEGIN_RING(2);
OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
OUT_RING(wait_until);
ADVANCE_RING();
}
static int r300_scratch(drm_radeon_private_t *dev_priv,
drm_radeon_kcmd_buffer_t *cmdbuf,
drm_r300_cmd_header_t header)
......@@ -909,19 +950,8 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
break;
case R300_CMD_WAIT:
/* simple enough, we can do it here */
DRM_DEBUG("R300_CMD_WAIT\n");
if (header.wait.flags == 0)
break; /* nothing to do */
{
RING_LOCALS;
BEGIN_RING(2);
OUT_RING(CP_PACKET0(RADEON_WAIT_UNTIL, 0));
OUT_RING((header.wait.flags & 0xf) << 14);
ADVANCE_RING();
}
r300_cmd_wait(dev_priv, header);
break;
case R300_CMD_SCRATCH:
......
......@@ -225,8 +225,20 @@ typedef union {
#define R300_CMD_WAIT 7
# define R300_WAIT_2D 0x1
# define R300_WAIT_3D 0x2
/* these two defines are DOING IT WRONG - however
* we have userspace which relies on using these.
* The wait interface is backwards compat new
* code should use the NEW_WAIT defines below
* THESE ARE NOT BIT FIELDS
*/
# define R300_WAIT_2D_CLEAN 0x3
# define R300_WAIT_3D_CLEAN 0x4
# define R300_NEW_WAIT_2D_3D 0x3
# define R300_NEW_WAIT_2D_2D_CLEAN 0x4
# define R300_NEW_WAIT_3D_3D_CLEAN 0x6
# define R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN 0x8
#define R300_CMD_SCRATCH 8
typedef union {
......
......@@ -88,7 +88,7 @@ static struct mem_block *alloc_block(struct mem_block *heap, int size,
list_for_each(p, heap) {
int start = (p->start + mask) & ~mask;
if (p->file_priv == 0 && start + size <= p->start + p->size)
if (p->file_priv == NULL && start + size <= p->start + p->size)
return split_block(p, start, size, file_priv);
}
......@@ -113,7 +113,7 @@ static void free_block(struct mem_block *p)
/* Assumes a single contiguous range. Needs a special file_priv in
* 'heap' to stop it being subsumed.
*/
if (p->next->file_priv == 0) {
if (p->next->file_priv == NULL) {
struct mem_block *q = p->next;
p->size += q->size;
p->next = q->next;
......@@ -121,7 +121,7 @@ static void free_block(struct mem_block *p)
drm_free(q, sizeof(*q), DRM_MEM_BUFS);
}
if (p->prev->file_priv == 0) {
if (p->prev->file_priv == NULL) {
struct mem_block *q = p->prev;
q->size += p->size;
q->next = p->next;
......@@ -174,7 +174,7 @@ void radeon_mem_release(struct drm_file *file_priv, struct mem_block *heap)
* 'heap' to stop it being subsumed.
*/
list_for_each(p, heap) {
while (p->file_priv == 0 && p->next->file_priv == 0) {
while (p->file_priv == NULL && p->next->file_priv == NULL) {
struct mem_block *q = p->next;
p->size += q->size;
p->next = q->next;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册