diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 868f574363ad8c77b779782bf1103d3d9839e19c..bbadf1c041426ca73e9fd1bf0e749c0050dbde92 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1160,6 +1160,8 @@ int i915_driver_unload(struct drm_device *dev) if (drm_core_check_feature(dev, DRIVER_MODESET)) { intel_modeset_cleanup(dev); + i915_gem_free_all_phys_object(dev); + mutex_lock(&dev->struct_mutex); i915_gem_cleanup_ringbuffer(dev); mutex_unlock(&dev->struct_mutex); diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 563de18063fdca4063349d32778bd772f5679c10..e1351825200712078b8fe2bf65d238d57435e7b5 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -72,6 +72,18 @@ enum pipe { #define WATCH_INACTIVE 0 #define WATCH_PWRITE 0 +#define I915_GEM_PHYS_CURSOR_0 1 +#define I915_GEM_PHYS_CURSOR_1 2 +#define I915_GEM_PHYS_OVERLAY_REGS 3 +#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS) + +struct drm_i915_gem_phys_object { + int id; + struct page **page_list; + drm_dma_handle_t *handle; + struct drm_gem_object *cur_obj; +}; + typedef struct _drm_i915_ring_buffer { int tail_mask; unsigned long Size; @@ -358,6 +370,9 @@ typedef struct drm_i915_private { uint32_t bit_6_swizzle_x; /** Bit 6 swizzling required for Y tiling */ uint32_t bit_6_swizzle_y; + + /* storage for physical objects */ + struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; } mm; } drm_i915_private_t; @@ -436,6 +451,9 @@ struct drm_i915_gem_object { /** User space pin count and filp owning the pin */ uint32_t user_pin_count; struct drm_file *pin_filp; + + /** for phy allocated objects */ + struct drm_i915_gem_phys_object *phys_obj; }; /** @@ -598,6 +616,11 @@ int i915_gem_do_init(struct drm_device *dev, unsigned long start, int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write); +int i915_gem_attach_phys_object(struct drm_device *dev, + struct drm_gem_object *obj, int id); +void i915_gem_detach_phys_object(struct drm_device *dev, + struct drm_gem_object *obj); +void i915_gem_free_all_phys_object(struct drm_device *dev); /* i915_gem_tiling.c */ void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 1384d6686555b4418d5e3be9a1dbf3fc41f4438f..96316fd4723322f7ac277e3747abdc806ab3a98e 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -55,6 +55,9 @@ static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, static void i915_gem_object_get_fence_reg(struct drm_gem_object *obj); static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); static int i915_gem_evict_something(struct drm_device *dev); +static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, + struct drm_i915_gem_pwrite *args, + struct drm_file *file_priv); int i915_gem_do_init(struct drm_device *dev, unsigned long start, unsigned long end) @@ -386,8 +389,10 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, * pread/pwrite currently are reading and writing from the CPU * perspective, requiring manual detiling by the client. */ - if (obj_priv->tiling_mode == I915_TILING_NONE && - dev->gtt_total != 0) + if (obj_priv->phys_obj) + ret = i915_gem_phys_pwrite(dev, obj, args, file_priv); + else if (obj_priv->tiling_mode == I915_TILING_NONE && + dev->gtt_total != 0) ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv); else ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv); @@ -2858,6 +2863,9 @@ void i915_gem_free_object(struct drm_gem_object *obj) while (obj_priv->pin_count > 0) i915_gem_object_unpin(obj); + if (obj_priv->phys_obj) + i915_gem_detach_phys_object(dev, obj); + i915_gem_object_unbind(obj); list = &obj->map_list; @@ -3293,3 +3301,180 @@ i915_gem_load(struct drm_device *dev) i915_gem_detect_bit_6_swizzle(dev); } + +/* + * Create a physically contiguous memory object for this object + * e.g. for cursor + overlay regs + */ +int i915_gem_init_phys_object(struct drm_device *dev, + int id, int size) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_phys_object *phys_obj; + int ret; + + if (dev_priv->mm.phys_objs[id - 1] || !size) + return 0; + + phys_obj = drm_calloc(1, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER); + if (!phys_obj) + return -ENOMEM; + + phys_obj->id = id; + + phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff); + if (!phys_obj->handle) { + ret = -ENOMEM; + goto kfree_obj; + } +#ifdef CONFIG_X86 + set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE); +#endif + + dev_priv->mm.phys_objs[id - 1] = phys_obj; + + return 0; +kfree_obj: + drm_free(phys_obj, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER); + return ret; +} + +void i915_gem_free_phys_object(struct drm_device *dev, int id) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_phys_object *phys_obj; + + if (!dev_priv->mm.phys_objs[id - 1]) + return; + + phys_obj = dev_priv->mm.phys_objs[id - 1]; + if (phys_obj->cur_obj) { + i915_gem_detach_phys_object(dev, phys_obj->cur_obj); + } + +#ifdef CONFIG_X86 + set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE); +#endif + drm_pci_free(dev, phys_obj->handle); + kfree(phys_obj); + dev_priv->mm.phys_objs[id - 1] = NULL; +} + +void i915_gem_free_all_phys_object(struct drm_device *dev) +{ + int i; + + for (i = 0; i < I915_MAX_PHYS_OBJECT; i++) + i915_gem_free_phys_object(dev, i); +} + +void i915_gem_detach_phys_object(struct drm_device *dev, + struct drm_gem_object *obj) +{ + struct drm_i915_gem_object *obj_priv; + int i; + int ret; + int page_count; + + obj_priv = obj->driver_private; + if (!obj_priv->phys_obj) + return; + + ret = i915_gem_object_get_page_list(obj); + if (ret) + goto out; + + page_count = obj->size / PAGE_SIZE; + + for (i = 0; i < page_count; i++) { + char *dst = kmap_atomic(obj_priv->page_list[i], KM_USER0); + char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); + + memcpy(dst, src, PAGE_SIZE); + kunmap_atomic(dst, KM_USER0); + } + drm_clflush_pages(obj_priv->page_list, page_count); + drm_agp_chipset_flush(dev); +out: + obj_priv->phys_obj->cur_obj = NULL; + obj_priv->phys_obj = NULL; +} + +int +i915_gem_attach_phys_object(struct drm_device *dev, + struct drm_gem_object *obj, int id) +{ + drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv; + int ret = 0; + int page_count; + int i; + + if (id > I915_MAX_PHYS_OBJECT) + return -EINVAL; + + obj_priv = obj->driver_private; + + if (obj_priv->phys_obj) { + if (obj_priv->phys_obj->id == id) + return 0; + i915_gem_detach_phys_object(dev, obj); + } + + + /* create a new object */ + if (!dev_priv->mm.phys_objs[id - 1]) { + ret = i915_gem_init_phys_object(dev, id, + obj->size); + if (ret) { + DRM_ERROR("failed to init phys object %d size: %d\n", id, obj->size); + goto out; + } + } + + /* bind to the object */ + obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; + obj_priv->phys_obj->cur_obj = obj; + + ret = i915_gem_object_get_page_list(obj); + if (ret) { + DRM_ERROR("failed to get page list\n"); + goto out; + } + + page_count = obj->size / PAGE_SIZE; + + for (i = 0; i < page_count; i++) { + char *src = kmap_atomic(obj_priv->page_list[i], KM_USER0); + char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); + + memcpy(dst, src, PAGE_SIZE); + kunmap_atomic(src, KM_USER0); + } + + return 0; +out: + return ret; +} + +static int +i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, + struct drm_i915_gem_pwrite *args, + struct drm_file *file_priv) +{ + struct drm_i915_gem_object *obj_priv = obj->driver_private; + void *obj_addr; + int ret; + char __user *user_data; + + user_data = (char __user *) (uintptr_t) args->data_ptr; + obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset; + + DRM_ERROR("obj_addr %p, %lld\n", obj_addr, args->size); + ret = copy_from_user(obj_addr, user_data, args->size); + if (ret) + return -EFAULT; + + drm_agp_chipset_flush(dev); + return 0; +} diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 4372acff5a016327d336e452e76be2f41bd27a97..114a7a1a8740096217b62d5fc108210c3cc8f44d 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1020,17 +1020,23 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, return -ENOMEM; } - if (dev_priv->cursor_needs_physical) { - addr = dev->agp->base + obj_priv->gtt_offset; - } else { + /* we only need to pin inside GTT if cursor is non-phy */ + if (!dev_priv->cursor_needs_physical) { + ret = i915_gem_object_pin(bo, PAGE_SIZE); + if (ret) { + DRM_ERROR("failed to pin cursor bo\n"); + drm_gem_object_unreference(bo); + return ret; + } addr = obj_priv->gtt_offset; - } - - ret = i915_gem_object_pin(bo, PAGE_SIZE); - if (ret) { - DRM_ERROR("failed to pin cursor bo\n"); - drm_gem_object_unreference(bo); - return ret; + } else { + ret = i915_gem_attach_phys_object(dev, bo, (pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1); + if (ret) { + DRM_ERROR("failed to attach phys object\n"); + drm_gem_object_unreference(bo); + return ret; + } + addr = obj_priv->phys_obj->handle->busaddr; } temp = 0; @@ -1043,7 +1049,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, I915_WRITE(base, addr); if (intel_crtc->cursor_bo) { - i915_gem_object_unpin(intel_crtc->cursor_bo); + if (dev_priv->cursor_needs_physical) { + if (intel_crtc->cursor_bo != bo) + i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); + } else + i915_gem_object_unpin(intel_crtc->cursor_bo); drm_gem_object_unreference(intel_crtc->cursor_bo); }