提交 05394f39 编写于 作者: C Chris Wilson

drm/i915: Use drm_i915_gem_object as the preferred type

A glorified s/obj_priv/obj/ with a net reduction of over a 100 lines and
many characters!
Signed-off-by: NChris Wilson <chris@chris-wilson.co.uk>
上级 185cbcb3
...@@ -87,19 +87,19 @@ static int i915_capabilities(struct seq_file *m, void *data) ...@@ -87,19 +87,19 @@ static int i915_capabilities(struct seq_file *m, void *data)
return 0; return 0;
} }
static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv) static const char *get_pin_flag(struct drm_i915_gem_object *obj)
{ {
if (obj_priv->user_pin_count > 0) if (obj->user_pin_count > 0)
return "P"; return "P";
else if (obj_priv->pin_count > 0) else if (obj->pin_count > 0)
return "p"; return "p";
else else
return " "; return " ";
} }
static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv) static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
{ {
switch (obj_priv->tiling_mode) { switch (obj->tiling_mode) {
default: default:
case I915_TILING_NONE: return " "; case I915_TILING_NONE: return " ";
case I915_TILING_X: return "X"; case I915_TILING_X: return "X";
...@@ -140,7 +140,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) ...@@ -140,7 +140,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
struct list_head *head; struct list_head *head;
struct drm_device *dev = node->minor->dev; struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv; struct drm_i915_gem_object *obj;
size_t total_obj_size, total_gtt_size; size_t total_obj_size, total_gtt_size;
int count, ret; int count, ret;
...@@ -175,12 +175,12 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) ...@@ -175,12 +175,12 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
} }
total_obj_size = total_gtt_size = count = 0; total_obj_size = total_gtt_size = count = 0;
list_for_each_entry(obj_priv, head, mm_list) { list_for_each_entry(obj, head, mm_list) {
seq_printf(m, " "); seq_printf(m, " ");
describe_obj(m, obj_priv); describe_obj(m, obj);
seq_printf(m, "\n"); seq_printf(m, "\n");
total_obj_size += obj_priv->base.size; total_obj_size += obj->base.size;
total_gtt_size += obj_priv->gtt_space->size; total_gtt_size += obj->gtt_space->size;
count++; count++;
} }
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
...@@ -251,14 +251,14 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data) ...@@ -251,14 +251,14 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
seq_printf(m, "%d prepares\n", work->pending); seq_printf(m, "%d prepares\n", work->pending);
if (work->old_fb_obj) { if (work->old_fb_obj) {
struct drm_i915_gem_object *obj_priv = to_intel_bo(work->old_fb_obj); struct drm_i915_gem_object *obj = work->old_fb_obj;
if(obj_priv) if (obj)
seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset ); seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
} }
if (work->pending_flip_obj) { if (work->pending_flip_obj) {
struct drm_i915_gem_object *obj_priv = to_intel_bo(work->pending_flip_obj); struct drm_i915_gem_object *obj = work->pending_flip_obj;
if(obj_priv) if (obj)
seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset ); seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
} }
} }
spin_unlock_irqrestore(&dev->event_lock, flags); spin_unlock_irqrestore(&dev->event_lock, flags);
...@@ -421,17 +421,17 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data) ...@@ -421,17 +421,17 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
for (i = 0; i < dev_priv->num_fence_regs; i++) { for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_gem_object *obj = dev_priv->fence_regs[i].obj; struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
seq_printf(m, "Fenced object[%2d] = ", i); seq_printf(m, "Fenced object[%2d] = ", i);
if (obj == NULL) if (obj == NULL)
seq_printf(m, "unused"); seq_printf(m, "unused");
else else
describe_obj(m, to_intel_bo(obj)); describe_obj(m, obj);
seq_printf(m, "\n"); seq_printf(m, "\n");
} }
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev->struct_mutex);
return 0; return 0;
} }
...@@ -465,14 +465,14 @@ static int i915_hws_info(struct seq_file *m, void *data) ...@@ -465,14 +465,14 @@ static int i915_hws_info(struct seq_file *m, void *data)
static void i915_dump_object(struct seq_file *m, static void i915_dump_object(struct seq_file *m,
struct io_mapping *mapping, struct io_mapping *mapping,
struct drm_i915_gem_object *obj_priv) struct drm_i915_gem_object *obj)
{ {
int page, page_count, i; int page, page_count, i;
page_count = obj_priv->base.size / PAGE_SIZE; page_count = obj->base.size / PAGE_SIZE;
for (page = 0; page < page_count; page++) { for (page = 0; page < page_count; page++) {
u32 *mem = io_mapping_map_wc(mapping, u32 *mem = io_mapping_map_wc(mapping,
obj_priv->gtt_offset + page * PAGE_SIZE); obj->gtt_offset + page * PAGE_SIZE);
for (i = 0; i < PAGE_SIZE; i += 4) for (i = 0; i < PAGE_SIZE; i += 4)
seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
io_mapping_unmap(mem); io_mapping_unmap(mem);
...@@ -484,25 +484,21 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data) ...@@ -484,25 +484,21 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private; struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev; struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_gem_object *obj; struct drm_i915_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
int ret; int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex); ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret) if (ret)
return ret; return ret;
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
obj = &obj_priv->base; if (obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) {
if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
seq_printf(m, "--- gtt_offset = 0x%08x\n", i915_dump_object(m, dev_priv->mm.gtt_mapping, obj);
obj_priv->gtt_offset);
i915_dump_object(m, dev_priv->mm.gtt_mapping, obj_priv);
} }
} }
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return 0; return 0;
} }
...@@ -525,7 +521,7 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data) ...@@ -525,7 +521,7 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data)
if (ret) if (ret)
return ret; return ret;
if (!ring->gem_object) { if (!ring->obj) {
seq_printf(m, "No ringbuffer setup\n"); seq_printf(m, "No ringbuffer setup\n");
} else { } else {
u8 *virt = ring->virtual_start; u8 *virt = ring->virtual_start;
...@@ -983,7 +979,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) ...@@ -983,7 +979,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
fb->base.height, fb->base.height,
fb->base.depth, fb->base.depth,
fb->base.bits_per_pixel); fb->base.bits_per_pixel);
describe_obj(m, to_intel_bo(fb->obj)); describe_obj(m, fb->obj);
seq_printf(m, "\n"); seq_printf(m, "\n");
list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) { list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
...@@ -995,7 +991,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) ...@@ -995,7 +991,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
fb->base.height, fb->base.height,
fb->base.depth, fb->base.depth,
fb->base.bits_per_pixel); fb->base.bits_per_pixel);
describe_obj(m, to_intel_bo(fb->obj)); describe_obj(m, fb->obj);
seq_printf(m, "\n"); seq_printf(m, "\n");
} }
......
...@@ -157,7 +157,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) ...@@ -157,7 +157,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
} }
if (init->ring_size != 0) { if (init->ring_size != 0) {
if (dev_priv->render_ring.gem_object != NULL) { if (dev_priv->render_ring.obj != NULL) {
i915_dma_cleanup(dev); i915_dma_cleanup(dev);
DRM_ERROR("Client tried to initialize ringbuffer in " DRM_ERROR("Client tried to initialize ringbuffer in "
"GEM mode\n"); "GEM mode\n");
......
...@@ -32,7 +32,6 @@ ...@@ -32,7 +32,6 @@
#include "i915_reg.h" #include "i915_reg.h"
#include "intel_bios.h" #include "intel_bios.h"
#include "i915_trace.h"
#include "intel_ringbuffer.h" #include "intel_ringbuffer.h"
#include <linux/io-mapping.h> #include <linux/io-mapping.h>
#include <linux/i2c.h> #include <linux/i2c.h>
...@@ -90,7 +89,7 @@ struct drm_i915_gem_phys_object { ...@@ -90,7 +89,7 @@ struct drm_i915_gem_phys_object {
int id; int id;
struct page **page_list; struct page **page_list;
drm_dma_handle_t *handle; drm_dma_handle_t *handle;
struct drm_gem_object *cur_obj; struct drm_i915_gem_object *cur_obj;
}; };
struct mem_block { struct mem_block {
...@@ -125,7 +124,7 @@ struct drm_i915_master_private { ...@@ -125,7 +124,7 @@ struct drm_i915_master_private {
#define I915_FENCE_REG_NONE -1 #define I915_FENCE_REG_NONE -1
struct drm_i915_fence_reg { struct drm_i915_fence_reg {
struct drm_gem_object *obj; struct drm_i915_gem_object *obj;
struct list_head lru_list; struct list_head lru_list;
bool gpu; bool gpu;
}; };
...@@ -280,9 +279,9 @@ typedef struct drm_i915_private { ...@@ -280,9 +279,9 @@ typedef struct drm_i915_private {
uint32_t counter; uint32_t counter;
unsigned int seqno_gfx_addr; unsigned int seqno_gfx_addr;
drm_local_map_t hws_map; drm_local_map_t hws_map;
struct drm_gem_object *seqno_obj; struct drm_i915_gem_object *seqno_obj;
struct drm_gem_object *pwrctx; struct drm_i915_gem_object *pwrctx;
struct drm_gem_object *renderctx; struct drm_i915_gem_object *renderctx;
struct resource mch_res; struct resource mch_res;
...@@ -690,14 +689,14 @@ typedef struct drm_i915_private { ...@@ -690,14 +689,14 @@ typedef struct drm_i915_private {
u8 fmax; u8 fmax;
u8 fstart; u8 fstart;
u64 last_count1; u64 last_count1;
unsigned long last_time1; unsigned long last_time1;
u64 last_count2; u64 last_count2;
struct timespec last_time2; struct timespec last_time2;
unsigned long gfx_power; unsigned long gfx_power;
int c_m; int c_m;
int r_t; int r_t;
u8 corr; u8 corr;
spinlock_t *mchdev_lock; spinlock_t *mchdev_lock;
enum no_fbc_reason no_fbc_reason; enum no_fbc_reason no_fbc_reason;
...@@ -711,7 +710,6 @@ typedef struct drm_i915_private { ...@@ -711,7 +710,6 @@ typedef struct drm_i915_private {
struct intel_fbdev *fbdev; struct intel_fbdev *fbdev;
} drm_i915_private_t; } drm_i915_private_t;
/** driver private structure attached to each drm_gem_object */
struct drm_i915_gem_object { struct drm_i915_gem_object {
struct drm_gem_object base; struct drm_gem_object base;
...@@ -918,7 +916,7 @@ enum intel_chip_family { ...@@ -918,7 +916,7 @@ enum intel_chip_family {
#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring) #define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay) #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical) #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
...@@ -947,6 +945,8 @@ enum intel_chip_family { ...@@ -947,6 +945,8 @@ enum intel_chip_family {
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
#include "i915_trace.h"
extern struct drm_ioctl_desc i915_ioctls[]; extern struct drm_ioctl_desc i915_ioctls[];
extern int i915_max_ioctl; extern int i915_max_ioctl;
extern unsigned int i915_fbpercrtc; extern unsigned int i915_fbpercrtc;
...@@ -1085,14 +1085,15 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, ...@@ -1085,14 +1085,15 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev); void i915_gem_load(struct drm_device *dev);
int i915_gem_init_object(struct drm_gem_object *obj); int i915_gem_init_object(struct drm_gem_object *obj);
struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size); size_t size);
void i915_gem_free_object(struct drm_gem_object *obj); void i915_gem_free_object(struct drm_gem_object *obj);
int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment, int i915_gem_object_pin(struct drm_i915_gem_object *obj,
uint32_t alignment,
bool map_and_fenceable); bool map_and_fenceable);
void i915_gem_object_unpin(struct drm_gem_object *obj); void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
int i915_gem_object_unbind(struct drm_gem_object *obj); int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
void i915_gem_release_mmap(struct drm_gem_object *obj); void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev); void i915_gem_lastclose(struct drm_device *dev);
/** /**
...@@ -1104,14 +1105,14 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2) ...@@ -1104,14 +1105,14 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
return (int32_t)(seq1 - seq2) >= 0; return (int32_t)(seq1 - seq2) >= 0;
} }
int i915_gem_object_get_fence_reg(struct drm_gem_object *obj, int i915_gem_object_get_fence_reg(struct drm_i915_gem_object *obj,
bool interruptible); bool interruptible);
int i915_gem_object_put_fence_reg(struct drm_gem_object *obj, int i915_gem_object_put_fence_reg(struct drm_i915_gem_object *obj,
bool interruptible); bool interruptible);
void i915_gem_retire_requests(struct drm_device *dev); void i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_reset(struct drm_device *dev); void i915_gem_reset(struct drm_device *dev);
void i915_gem_clflush_object(struct drm_gem_object *obj); void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
int i915_gem_object_set_domain(struct drm_gem_object *obj, int i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
uint32_t read_domains, uint32_t read_domains,
uint32_t write_domain); uint32_t write_domain);
int i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, int i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
...@@ -1131,23 +1132,23 @@ int i915_do_wait_request(struct drm_device *dev, ...@@ -1131,23 +1132,23 @@ int i915_do_wait_request(struct drm_device *dev,
bool interruptible, bool interruptible,
struct intel_ring_buffer *ring); struct intel_ring_buffer *ring);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
int write); int write);
int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj, int i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
bool pipelined); bool pipelined);
int i915_gem_attach_phys_object(struct drm_device *dev, int i915_gem_attach_phys_object(struct drm_device *dev,
struct drm_gem_object *obj, struct drm_i915_gem_object *obj,
int id, int id,
int align); int align);
void i915_gem_detach_phys_object(struct drm_device *dev, void i915_gem_detach_phys_object(struct drm_device *dev,
struct drm_gem_object *obj); struct drm_i915_gem_object *obj);
void i915_gem_free_all_phys_object(struct drm_device *dev); void i915_gem_free_all_phys_object(struct drm_device *dev);
void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); void i915_gem_release(struct drm_device *dev, struct drm_file *file);
/* i915_gem_gtt.c */ /* i915_gem_gtt.c */
void i915_gem_restore_gtt_mappings(struct drm_device *dev); void i915_gem_restore_gtt_mappings(struct drm_device *dev);
int i915_gem_gtt_bind_object(struct drm_gem_object *obj); int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
void i915_gem_gtt_unbind_object(struct drm_gem_object *obj); void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
/* i915_gem_evict.c */ /* i915_gem_evict.c */
int i915_gem_evict_something(struct drm_device *dev, int min_size, int i915_gem_evict_something(struct drm_device *dev, int min_size,
...@@ -1157,19 +1158,20 @@ int i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only); ...@@ -1157,19 +1158,20 @@ int i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only);
/* i915_gem_tiling.c */ /* i915_gem_tiling.c */
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj); void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
/* i915_gem_debug.c */ /* i915_gem_debug.c */
void i915_gem_dump_object(struct drm_gem_object *obj, int len, void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
const char *where, uint32_t mark); const char *where, uint32_t mark);
#if WATCH_LISTS #if WATCH_LISTS
int i915_verify_lists(struct drm_device *dev); int i915_verify_lists(struct drm_device *dev);
#else #else
#define i915_verify_lists(dev) 0 #define i915_verify_lists(dev) 0
#endif #endif
void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle); void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj,
void i915_gem_dump_object(struct drm_gem_object *obj, int len, int handle);
void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
const char *where, uint32_t mark); const char *where, uint32_t mark);
/* i915_debugfs.c */ /* i915_debugfs.c */
...@@ -1251,10 +1253,10 @@ extern void intel_display_print_error_state(struct seq_file *m, ...@@ -1251,10 +1253,10 @@ extern void intel_display_print_error_state(struct seq_file *m,
* In that case, we don't need to do it when GEM is initialized as nobody else * In that case, we don't need to do it when GEM is initialized as nobody else
* has access to the ring. * has access to the ring.
*/ */
#define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \ #define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
if (((drm_i915_private_t *)dev->dev_private)->render_ring.gem_object \ if (((drm_i915_private_t *)dev->dev_private)->render_ring.obj \
== NULL) \ == NULL) \
LOCK_TEST_WITH_RETURN(dev, file_priv); \ LOCK_TEST_WITH_RETURN(dev, file); \
} while (0) } while (0)
......
...@@ -41,29 +41,30 @@ struct change_domains { ...@@ -41,29 +41,30 @@ struct change_domains {
uint32_t flush_rings; uint32_t flush_rings;
}; };
static uint32_t i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj_priv); static uint32_t i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj);
static uint32_t i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv); static uint32_t i915_gem_get_gtt_size(struct drm_i915_gem_object *obj);
static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, static int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj,
bool pipelined); bool pipelined);
static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, static int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
int write); int write);
static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, static int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
uint64_t offset, uint64_t offset,
uint64_t size); uint64_t size);
static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
static int i915_gem_object_wait_rendering(struct drm_gem_object *obj, static int i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool interruptible); bool interruptible);
static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, static int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
unsigned alignment, unsigned alignment,
bool map_and_fenceable); bool map_and_fenceable);
static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); static void i915_gem_clear_fence_reg(struct drm_i915_gem_object *obj);
static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, static int i915_gem_phys_pwrite(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args, struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv); struct drm_file *file);
static void i915_gem_free_object_tail(struct drm_gem_object *obj); static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
static int i915_gem_inactive_shrink(struct shrinker *shrinker, static int i915_gem_inactive_shrink(struct shrinker *shrinker,
int nr_to_scan, int nr_to_scan,
...@@ -212,11 +213,9 @@ static int i915_mutex_lock_interruptible(struct drm_device *dev) ...@@ -212,11 +213,9 @@ static int i915_mutex_lock_interruptible(struct drm_device *dev)
} }
static inline bool static inline bool
i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv) i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
{ {
return obj_priv->gtt_space && return obj->gtt_space && !obj->active && obj->pin_count == 0;
!obj_priv->active &&
obj_priv->pin_count == 0;
} }
int i915_gem_do_init(struct drm_device *dev, int i915_gem_do_init(struct drm_device *dev,
...@@ -244,7 +243,7 @@ int i915_gem_do_init(struct drm_device *dev, ...@@ -244,7 +243,7 @@ int i915_gem_do_init(struct drm_device *dev,
int int
i915_gem_init_ioctl(struct drm_device *dev, void *data, i915_gem_init_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file)
{ {
struct drm_i915_gem_init *args = data; struct drm_i915_gem_init *args = data;
int ret; int ret;
...@@ -258,7 +257,7 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data, ...@@ -258,7 +257,7 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data,
int int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_get_aperture *args = data; struct drm_i915_gem_get_aperture *args = data;
...@@ -280,10 +279,10 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, ...@@ -280,10 +279,10 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
*/ */
int int
i915_gem_create_ioctl(struct drm_device *dev, void *data, i915_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file)
{ {
struct drm_i915_gem_create *args = data; struct drm_i915_gem_create *args = data;
struct drm_gem_object *obj; struct drm_i915_gem_object *obj;
int ret; int ret;
u32 handle; u32 handle;
...@@ -294,29 +293,28 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, ...@@ -294,29 +293,28 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
if (obj == NULL) if (obj == NULL)
return -ENOMEM; return -ENOMEM;
ret = drm_gem_handle_create(file_priv, obj, &handle); ret = drm_gem_handle_create(file, &obj->base, &handle);
if (ret) { if (ret) {
drm_gem_object_release(obj); drm_gem_object_release(&obj->base);
i915_gem_info_remove_obj(dev->dev_private, obj->size); i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
kfree(obj); kfree(obj);
return ret; return ret;
} }
/* drop reference from allocate - handle holds it now */ /* drop reference from allocate - handle holds it now */
drm_gem_object_unreference(obj); drm_gem_object_unreference(&obj->base);
trace_i915_gem_object_create(obj); trace_i915_gem_object_create(obj);
args->handle = handle; args->handle = handle;
return 0; return 0;
} }
static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj) static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
{ {
drm_i915_private_t *dev_priv = obj->dev->dev_private; drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
obj_priv->tiling_mode != I915_TILING_NONE; obj->tiling_mode != I915_TILING_NONE;
} }
static inline void static inline void
...@@ -392,12 +390,12 @@ slow_shmem_bit17_copy(struct page *gpu_page, ...@@ -392,12 +390,12 @@ slow_shmem_bit17_copy(struct page *gpu_page,
* fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow(). * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
*/ */
static int static int
i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, i915_gem_shmem_pread_fast(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct drm_i915_gem_pread *args, struct drm_i915_gem_pread *args,
struct drm_file *file_priv) struct drm_file *file)
{ {
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping;
ssize_t remain; ssize_t remain;
loff_t offset; loff_t offset;
char __user *user_data; char __user *user_data;
...@@ -406,7 +404,6 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, ...@@ -406,7 +404,6 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
user_data = (char __user *) (uintptr_t) args->data_ptr; user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size; remain = args->size;
obj_priv = to_intel_bo(obj);
offset = args->offset; offset = args->offset;
while (remain > 0) { while (remain > 0) {
...@@ -455,12 +452,12 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, ...@@ -455,12 +452,12 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
* and not take page faults. * and not take page faults.
*/ */
static int static int
i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, i915_gem_shmem_pread_slow(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct drm_i915_gem_pread *args, struct drm_i915_gem_pread *args,
struct drm_file *file_priv) struct drm_file *file)
{ {
struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping; struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
struct page **user_pages; struct page **user_pages;
ssize_t remain; ssize_t remain;
...@@ -506,7 +503,6 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, ...@@ -506,7 +503,6 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
obj_priv = to_intel_bo(obj);
offset = args->offset; offset = args->offset;
while (remain > 0) { while (remain > 0) {
...@@ -575,11 +571,10 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, ...@@ -575,11 +571,10 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
*/ */
int int
i915_gem_pread_ioctl(struct drm_device *dev, void *data, i915_gem_pread_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file)
{ {
struct drm_i915_gem_pread *args = data; struct drm_i915_gem_pread *args = data;
struct drm_gem_object *obj; struct drm_i915_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
int ret = 0; int ret = 0;
if (args->size == 0) if (args->size == 0)
...@@ -599,15 +594,15 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, ...@@ -599,15 +594,15 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
if (ret) if (ret)
return ret; return ret;
obj = drm_gem_object_lookup(dev, file_priv, args->handle); obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) { if (obj == NULL) {
ret = -ENOENT; ret = -ENOENT;
goto unlock; goto unlock;
} }
obj_priv = to_intel_bo(obj);
/* Bounds check source. */ /* Bounds check source. */
if (args->offset > obj->size || args->size > obj->size - args->offset) { if (args->offset > obj->base.size ||
args->size > obj->base.size - args->offset) {
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
...@@ -620,12 +615,12 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, ...@@ -620,12 +615,12 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
ret = -EFAULT; ret = -EFAULT;
if (!i915_gem_object_needs_bit17_swizzle(obj)) if (!i915_gem_object_needs_bit17_swizzle(obj))
ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv); ret = i915_gem_shmem_pread_fast(dev, obj, args, file);
if (ret == -EFAULT) if (ret == -EFAULT)
ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv); ret = i915_gem_shmem_pread_slow(dev, obj, args, file);
out: out:
drm_gem_object_unreference(obj); drm_gem_object_unreference(&obj->base);
unlock: unlock:
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return ret; return ret;
...@@ -680,11 +675,11 @@ slow_kernel_write(struct io_mapping *mapping, ...@@ -680,11 +675,11 @@ slow_kernel_write(struct io_mapping *mapping,
* user into the GTT, uncached. * user into the GTT, uncached.
*/ */
static int static int
i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, i915_gem_gtt_pwrite_fast(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args, struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv) struct drm_file *file)
{ {
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
ssize_t remain; ssize_t remain;
loff_t offset, page_base; loff_t offset, page_base;
...@@ -694,8 +689,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, ...@@ -694,8 +689,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
user_data = (char __user *) (uintptr_t) args->data_ptr; user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size; remain = args->size;
obj_priv = to_intel_bo(obj); offset = obj->gtt_offset + args->offset;
offset = obj_priv->gtt_offset + args->offset;
while (remain > 0) { while (remain > 0) {
/* Operation in this page /* Operation in this page
...@@ -735,11 +729,11 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, ...@@ -735,11 +729,11 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
* than using i915_gem_gtt_pwrite_fast on a G45 (32-bit). * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
*/ */
static int static int
i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, i915_gem_gtt_pwrite_slow(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args, struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv) struct drm_file *file)
{ {
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
ssize_t remain; ssize_t remain;
loff_t gtt_page_base, offset; loff_t gtt_page_base, offset;
...@@ -780,8 +774,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, ...@@ -780,8 +774,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
if (ret) if (ret)
goto out_unpin_pages; goto out_unpin_pages;
obj_priv = to_intel_bo(obj); offset = obj->gtt_offset + args->offset;
offset = obj_priv->gtt_offset + args->offset;
while (remain > 0) { while (remain > 0) {
/* Operation in this page /* Operation in this page
...@@ -827,12 +820,12 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, ...@@ -827,12 +820,12 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
* copy_from_user into the kmapped pages backing the object. * copy_from_user into the kmapped pages backing the object.
*/ */
static int static int
i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, i915_gem_shmem_pwrite_fast(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args, struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv) struct drm_file *file)
{ {
struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping; struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
ssize_t remain; ssize_t remain;
loff_t offset; loff_t offset;
char __user *user_data; char __user *user_data;
...@@ -841,9 +834,8 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, ...@@ -841,9 +834,8 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
user_data = (char __user *) (uintptr_t) args->data_ptr; user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size; remain = args->size;
obj_priv = to_intel_bo(obj);
offset = args->offset; offset = args->offset;
obj_priv->dirty = 1; obj->dirty = 1;
while (remain > 0) { while (remain > 0) {
struct page *page; struct page *page;
...@@ -898,12 +890,12 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, ...@@ -898,12 +890,12 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
* struct_mutex is held. * struct_mutex is held.
*/ */
static int static int
i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, i915_gem_shmem_pwrite_slow(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args, struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv) struct drm_file *file)
{ {
struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping; struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
struct page **user_pages; struct page **user_pages;
ssize_t remain; ssize_t remain;
...@@ -947,9 +939,8 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, ...@@ -947,9 +939,8 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
obj_priv = to_intel_bo(obj);
offset = args->offset; offset = args->offset;
obj_priv->dirty = 1; obj->dirty = 1;
while (remain > 0) { while (remain > 0) {
struct page *page; struct page *page;
...@@ -1020,8 +1011,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, ...@@ -1020,8 +1011,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
struct drm_file *file) struct drm_file *file)
{ {
struct drm_i915_gem_pwrite *args = data; struct drm_i915_gem_pwrite *args = data;
struct drm_gem_object *obj; struct drm_i915_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
int ret; int ret;
if (args->size == 0) if (args->size == 0)
...@@ -1041,15 +1031,15 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, ...@@ -1041,15 +1031,15 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
if (ret) if (ret)
return ret; return ret;
obj = drm_gem_object_lookup(dev, file, args->handle); obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) { if (obj == NULL) {
ret = -ENOENT; ret = -ENOENT;
goto unlock; goto unlock;
} }
obj_priv = to_intel_bo(obj);
/* Bounds check destination. */ /* Bounds check destination. */
if (args->offset > obj->size || args->size > obj->size - args->offset) { if (args->offset > obj->base.size ||
args->size > obj->base.size - args->offset) {
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
...@@ -1060,11 +1050,11 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, ...@@ -1060,11 +1050,11 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
* pread/pwrite currently are reading and writing from the CPU * pread/pwrite currently are reading and writing from the CPU
* perspective, requiring manual detiling by the client. * perspective, requiring manual detiling by the client.
*/ */
if (obj_priv->phys_obj) if (obj->phys_obj)
ret = i915_gem_phys_pwrite(dev, obj, args, file); ret = i915_gem_phys_pwrite(dev, obj, args, file);
else if (obj_priv->tiling_mode == I915_TILING_NONE && else if (obj->tiling_mode == I915_TILING_NONE &&
obj_priv->gtt_space && obj->gtt_space &&
obj->write_domain != I915_GEM_DOMAIN_CPU) { obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
ret = i915_gem_object_pin(obj, 0, true); ret = i915_gem_object_pin(obj, 0, true);
if (ret) if (ret)
goto out; goto out;
...@@ -1092,7 +1082,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, ...@@ -1092,7 +1082,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
} }
out: out:
drm_gem_object_unreference(obj); drm_gem_object_unreference(&obj->base);
unlock: unlock:
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return ret; return ret;
...@@ -1104,12 +1094,11 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, ...@@ -1104,12 +1094,11 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
*/ */
int int
i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_set_domain *args = data; struct drm_i915_gem_set_domain *args = data;
struct drm_gem_object *obj; struct drm_i915_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
uint32_t read_domains = args->read_domains; uint32_t read_domains = args->read_domains;
uint32_t write_domain = args->write_domain; uint32_t write_domain = args->write_domain;
int ret; int ret;
...@@ -1134,12 +1123,11 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, ...@@ -1134,12 +1123,11 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
if (ret) if (ret)
return ret; return ret;
obj = drm_gem_object_lookup(dev, file_priv, args->handle); obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) { if (obj == NULL) {
ret = -ENOENT; ret = -ENOENT;
goto unlock; goto unlock;
} }
obj_priv = to_intel_bo(obj);
intel_mark_busy(dev, obj); intel_mark_busy(dev, obj);
...@@ -1149,9 +1137,9 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, ...@@ -1149,9 +1137,9 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
/* Update the LRU on the fence for the CPU access that's /* Update the LRU on the fence for the CPU access that's
* about to occur. * about to occur.
*/ */
if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { if (obj->fence_reg != I915_FENCE_REG_NONE) {
struct drm_i915_fence_reg *reg = struct drm_i915_fence_reg *reg =
&dev_priv->fence_regs[obj_priv->fence_reg]; &dev_priv->fence_regs[obj->fence_reg];
list_move_tail(&reg->lru_list, list_move_tail(&reg->lru_list,
&dev_priv->mm.fence_list); &dev_priv->mm.fence_list);
} }
...@@ -1167,10 +1155,10 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, ...@@ -1167,10 +1155,10 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
} }
/* Maintain LRU order of "inactive" objects */ /* Maintain LRU order of "inactive" objects */
if (ret == 0 && i915_gem_object_is_inactive(obj_priv)) if (ret == 0 && i915_gem_object_is_inactive(obj))
list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
drm_gem_object_unreference(obj); drm_gem_object_unreference(&obj->base);
unlock: unlock:
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return ret; return ret;
...@@ -1181,10 +1169,10 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, ...@@ -1181,10 +1169,10 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
*/ */
int int
i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file)
{ {
struct drm_i915_gem_sw_finish *args = data; struct drm_i915_gem_sw_finish *args = data;
struct drm_gem_object *obj; struct drm_i915_gem_object *obj;
int ret = 0; int ret = 0;
if (!(dev->driver->driver_features & DRIVER_GEM)) if (!(dev->driver->driver_features & DRIVER_GEM))
...@@ -1194,17 +1182,17 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, ...@@ -1194,17 +1182,17 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
if (ret) if (ret)
return ret; return ret;
obj = drm_gem_object_lookup(dev, file_priv, args->handle); obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) { if (obj == NULL) {
ret = -ENOENT; ret = -ENOENT;
goto unlock; goto unlock;
} }
/* Pinned buffers may be scanout, so flush the cache */ /* Pinned buffers may be scanout, so flush the cache */
if (to_intel_bo(obj)->pin_count) if (obj->pin_count)
i915_gem_object_flush_cpu_write_domain(obj); i915_gem_object_flush_cpu_write_domain(obj);
drm_gem_object_unreference(obj); drm_gem_object_unreference(&obj->base);
unlock: unlock:
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return ret; return ret;
...@@ -1219,7 +1207,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, ...@@ -1219,7 +1207,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
*/ */
int int
i915_gem_mmap_ioctl(struct drm_device *dev, void *data, i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_mmap *args = data; struct drm_i915_gem_mmap *args = data;
...@@ -1230,7 +1218,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, ...@@ -1230,7 +1218,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
if (!(dev->driver->driver_features & DRIVER_GEM)) if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV; return -ENODEV;
obj = drm_gem_object_lookup(dev, file_priv, args->handle); obj = drm_gem_object_lookup(dev, file, args->handle);
if (obj == NULL) if (obj == NULL)
return -ENOENT; return -ENOENT;
...@@ -1273,10 +1261,9 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, ...@@ -1273,10 +1261,9 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
*/ */
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{ {
struct drm_gem_object *obj = vma->vm_private_data; struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
pgoff_t page_offset; pgoff_t page_offset;
unsigned long pfn; unsigned long pfn;
int ret = 0; int ret = 0;
...@@ -1288,17 +1275,17 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -1288,17 +1275,17 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/* Now bind it into the GTT if needed */ /* Now bind it into the GTT if needed */
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
BUG_ON(obj_priv->pin_count && !obj_priv->pin_mappable); BUG_ON(obj->pin_count && !obj->pin_mappable);
if (obj_priv->gtt_space) { if (obj->gtt_space) {
if (!obj_priv->map_and_fenceable) { if (!obj->map_and_fenceable) {
ret = i915_gem_object_unbind(obj); ret = i915_gem_object_unbind(obj);
if (ret) if (ret)
goto unlock; goto unlock;
} }
} }
if (!obj_priv->gtt_space) { if (!obj->gtt_space) {
ret = i915_gem_object_bind_to_gtt(obj, 0, true); ret = i915_gem_object_bind_to_gtt(obj, 0, true);
if (ret) if (ret)
goto unlock; goto unlock;
...@@ -1308,22 +1295,22 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -1308,22 +1295,22 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (ret) if (ret)
goto unlock; goto unlock;
if (!obj_priv->fault_mappable) { if (!obj->fault_mappable) {
obj_priv->fault_mappable = true; obj->fault_mappable = true;
i915_gem_info_update_mappable(dev_priv, obj_priv, true); i915_gem_info_update_mappable(dev_priv, obj, true);
} }
/* Need a new fence register? */ /* Need a new fence register? */
if (obj_priv->tiling_mode != I915_TILING_NONE) { if (obj->tiling_mode != I915_TILING_NONE) {
ret = i915_gem_object_get_fence_reg(obj, true); ret = i915_gem_object_get_fence_reg(obj, true);
if (ret) if (ret)
goto unlock; goto unlock;
} }
if (i915_gem_object_is_inactive(obj_priv)) if (i915_gem_object_is_inactive(obj))
list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) +
page_offset; page_offset;
/* Finally, remap it using the new GTT offset */ /* Finally, remap it using the new GTT offset */
...@@ -1356,36 +1343,39 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -1356,36 +1343,39 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
* This routine allocates and attaches a fake offset for @obj. * This routine allocates and attaches a fake offset for @obj.
*/ */
static int static int
i915_gem_create_mmap_offset(struct drm_gem_object *obj) i915_gem_create_mmap_offset(struct drm_i915_gem_object *obj)
{ {
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->base.dev;
struct drm_gem_mm *mm = dev->mm_private; struct drm_gem_mm *mm = dev->mm_private;
struct drm_map_list *list; struct drm_map_list *list;
struct drm_local_map *map; struct drm_local_map *map;
int ret = 0; int ret = 0;
/* Set the object up for mmap'ing */ /* Set the object up for mmap'ing */
list = &obj->map_list; list = &obj->base.map_list;
list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL); list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
if (!list->map) if (!list->map)
return -ENOMEM; return -ENOMEM;
map = list->map; map = list->map;
map->type = _DRM_GEM; map->type = _DRM_GEM;
map->size = obj->size; map->size = obj->base.size;
map->handle = obj; map->handle = obj;
/* Get a DRM GEM mmap offset allocated... */ /* Get a DRM GEM mmap offset allocated... */
list->file_offset_node = drm_mm_search_free(&mm->offset_manager, list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
obj->size / PAGE_SIZE, 0, 0); obj->base.size / PAGE_SIZE,
0, 0);
if (!list->file_offset_node) { if (!list->file_offset_node) {
DRM_ERROR("failed to allocate offset for bo %d\n", obj->name); DRM_ERROR("failed to allocate offset for bo %d\n",
obj->base.name);
ret = -ENOSPC; ret = -ENOSPC;
goto out_free_list; goto out_free_list;
} }
list->file_offset_node = drm_mm_get_block(list->file_offset_node, list->file_offset_node = drm_mm_get_block(list->file_offset_node,
obj->size / PAGE_SIZE, 0); obj->base.size / PAGE_SIZE,
0);
if (!list->file_offset_node) { if (!list->file_offset_node) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_free_list; goto out_free_list;
...@@ -1424,29 +1414,28 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj) ...@@ -1424,29 +1414,28 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
* fixup by i915_gem_fault(). * fixup by i915_gem_fault().
*/ */
void void
i915_gem_release_mmap(struct drm_gem_object *obj) i915_gem_release_mmap(struct drm_i915_gem_object *obj)
{ {
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
if (unlikely(obj->map_list.map && dev->dev_mapping)) if (unlikely(obj->base.map_list.map && dev->dev_mapping))
unmap_mapping_range(dev->dev_mapping, unmap_mapping_range(dev->dev_mapping,
(loff_t)obj->map_list.hash.key<<PAGE_SHIFT, (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
obj->size, 1); obj->base.size, 1);
if (obj_priv->fault_mappable) { if (obj->fault_mappable) {
obj_priv->fault_mappable = false; obj->fault_mappable = false;
i915_gem_info_update_mappable(dev_priv, obj_priv, false); i915_gem_info_update_mappable(dev_priv, obj, false);
} }
} }
static void static void
i915_gem_free_mmap_offset(struct drm_gem_object *obj) i915_gem_free_mmap_offset(struct drm_i915_gem_object *obj)
{ {
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->base.dev;
struct drm_gem_mm *mm = dev->mm_private; struct drm_gem_mm *mm = dev->mm_private;
struct drm_map_list *list = &obj->map_list; struct drm_map_list *list = &obj->base.map_list;
drm_ht_remove_item(&mm->offset_hash, &list->hash); drm_ht_remove_item(&mm->offset_hash, &list->hash);
drm_mm_put_block(list->file_offset_node); drm_mm_put_block(list->file_offset_node);
...@@ -1462,23 +1451,23 @@ i915_gem_free_mmap_offset(struct drm_gem_object *obj) ...@@ -1462,23 +1451,23 @@ i915_gem_free_mmap_offset(struct drm_gem_object *obj)
* potential fence register mapping. * potential fence register mapping.
*/ */
static uint32_t static uint32_t
i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj_priv) i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj)
{ {
struct drm_device *dev = obj_priv->base.dev; struct drm_device *dev = obj->base.dev;
/* /*
* Minimum alignment is 4k (GTT page size), but might be greater * Minimum alignment is 4k (GTT page size), but might be greater
* if a fence register is needed for the object. * if a fence register is needed for the object.
*/ */
if (INTEL_INFO(dev)->gen >= 4 || if (INTEL_INFO(dev)->gen >= 4 ||
obj_priv->tiling_mode == I915_TILING_NONE) obj->tiling_mode == I915_TILING_NONE)
return 4096; return 4096;
/* /*
* Previous chips need to be aligned to the size of the smallest * Previous chips need to be aligned to the size of the smallest
* fence register that can contain the object. * fence register that can contain the object.
*/ */
return i915_gem_get_gtt_size(obj_priv); return i915_gem_get_gtt_size(obj);
} }
/** /**
...@@ -1490,16 +1479,16 @@ i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj_priv) ...@@ -1490,16 +1479,16 @@ i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj_priv)
* unfenced tiled surface requirements. * unfenced tiled surface requirements.
*/ */
static uint32_t static uint32_t
i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj_priv) i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
{ {
struct drm_device *dev = obj_priv->base.dev; struct drm_device *dev = obj->base.dev;
int tile_height; int tile_height;
/* /*
* Minimum alignment is 4k (GTT page size) for sane hw. * Minimum alignment is 4k (GTT page size) for sane hw.
*/ */
if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) || if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
obj_priv->tiling_mode == I915_TILING_NONE) obj->tiling_mode == I915_TILING_NONE)
return 4096; return 4096;
/* /*
...@@ -1508,18 +1497,18 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj_priv) ...@@ -1508,18 +1497,18 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj_priv)
* placed in a fenced gtt region). * placed in a fenced gtt region).
*/ */
if (IS_GEN2(dev) || if (IS_GEN2(dev) ||
(obj_priv->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))) (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
tile_height = 32; tile_height = 32;
else else
tile_height = 8; tile_height = 8;
return tile_height * obj_priv->stride * 2; return tile_height * obj->stride * 2;
} }
static uint32_t static uint32_t
i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv) i915_gem_get_gtt_size(struct drm_i915_gem_object *obj)
{ {
struct drm_device *dev = obj_priv->base.dev; struct drm_device *dev = obj->base.dev;
uint32_t size; uint32_t size;
/* /*
...@@ -1527,7 +1516,7 @@ i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv) ...@@ -1527,7 +1516,7 @@ i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv)
* if a fence register is needed for the object. * if a fence register is needed for the object.
*/ */
if (INTEL_INFO(dev)->gen >= 4) if (INTEL_INFO(dev)->gen >= 4)
return obj_priv->base.size; return obj->base.size;
/* /*
* Previous chips need to be aligned to the size of the smallest * Previous chips need to be aligned to the size of the smallest
...@@ -1538,7 +1527,7 @@ i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv) ...@@ -1538,7 +1527,7 @@ i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv)
else else
size = 512*1024; size = 512*1024;
while (size < obj_priv->base.size) while (size < obj->base.size)
size <<= 1; size <<= 1;
return size; return size;
...@@ -1548,7 +1537,7 @@ i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv) ...@@ -1548,7 +1537,7 @@ i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv)
* i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
* @dev: DRM device * @dev: DRM device
* @data: GTT mapping ioctl data * @data: GTT mapping ioctl data
* @file_priv: GEM object info * @file: GEM object info
* *
* Simply returns the fake offset to userspace so it can mmap it. * Simply returns the fake offset to userspace so it can mmap it.
* The mmap call will end up in drm_gem_mmap(), which will set things * The mmap call will end up in drm_gem_mmap(), which will set things
...@@ -1561,12 +1550,11 @@ i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv) ...@@ -1561,12 +1550,11 @@ i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv)
*/ */
int int
i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_mmap_gtt *args = data; struct drm_i915_gem_mmap_gtt *args = data;
struct drm_gem_object *obj; struct drm_i915_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
int ret; int ret;
if (!(dev->driver->driver_features & DRIVER_GEM)) if (!(dev->driver->driver_features & DRIVER_GEM))
...@@ -1576,44 +1564,42 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, ...@@ -1576,44 +1564,42 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
if (ret) if (ret)
return ret; return ret;
obj = drm_gem_object_lookup(dev, file_priv, args->handle); obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) { if (obj == NULL) {
ret = -ENOENT; ret = -ENOENT;
goto unlock; goto unlock;
} }
obj_priv = to_intel_bo(obj);
if (obj->size > dev_priv->mm.gtt_mappable_end) { if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
ret = -E2BIG; ret = -E2BIG;
goto unlock; goto unlock;
} }
if (obj_priv->madv != I915_MADV_WILLNEED) { if (obj->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to mmap a purgeable buffer\n"); DRM_ERROR("Attempting to mmap a purgeable buffer\n");
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
if (!obj->map_list.map) { if (!obj->base.map_list.map) {
ret = i915_gem_create_mmap_offset(obj); ret = i915_gem_create_mmap_offset(obj);
if (ret) if (ret)
goto out; goto out;
} }
args->offset = (u64)obj->map_list.hash.key << PAGE_SHIFT; args->offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
out: out:
drm_gem_object_unreference(obj); drm_gem_object_unreference(&obj->base);
unlock: unlock:
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return ret; return ret;
} }
static int static int
i915_gem_object_get_pages_gtt(struct drm_gem_object *obj, i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
gfp_t gfpmask) gfp_t gfpmask)
{ {
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int page_count, i; int page_count, i;
struct address_space *mapping; struct address_space *mapping;
struct inode *inode; struct inode *inode;
...@@ -1622,13 +1608,13 @@ i915_gem_object_get_pages_gtt(struct drm_gem_object *obj, ...@@ -1622,13 +1608,13 @@ i915_gem_object_get_pages_gtt(struct drm_gem_object *obj,
/* Get the list of pages out of our struct file. They'll be pinned /* Get the list of pages out of our struct file. They'll be pinned
* at this point until we release them. * at this point until we release them.
*/ */
page_count = obj->size / PAGE_SIZE; page_count = obj->base.size / PAGE_SIZE;
BUG_ON(obj_priv->pages != NULL); BUG_ON(obj->pages != NULL);
obj_priv->pages = drm_malloc_ab(page_count, sizeof(struct page *)); obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
if (obj_priv->pages == NULL) if (obj->pages == NULL)
return -ENOMEM; return -ENOMEM;
inode = obj->filp->f_path.dentry->d_inode; inode = obj->base.filp->f_path.dentry->d_inode;
mapping = inode->i_mapping; mapping = inode->i_mapping;
for (i = 0; i < page_count; i++) { for (i = 0; i < page_count; i++) {
page = read_cache_page_gfp(mapping, i, page = read_cache_page_gfp(mapping, i,
...@@ -1639,51 +1625,50 @@ i915_gem_object_get_pages_gtt(struct drm_gem_object *obj, ...@@ -1639,51 +1625,50 @@ i915_gem_object_get_pages_gtt(struct drm_gem_object *obj,
if (IS_ERR(page)) if (IS_ERR(page))
goto err_pages; goto err_pages;
obj_priv->pages[i] = page; obj->pages[i] = page;
} }
if (obj_priv->tiling_mode != I915_TILING_NONE) if (obj->tiling_mode != I915_TILING_NONE)
i915_gem_object_do_bit_17_swizzle(obj); i915_gem_object_do_bit_17_swizzle(obj);
return 0; return 0;
err_pages: err_pages:
while (i--) while (i--)
page_cache_release(obj_priv->pages[i]); page_cache_release(obj->pages[i]);
drm_free_large(obj_priv->pages); drm_free_large(obj->pages);
obj_priv->pages = NULL; obj->pages = NULL;
return PTR_ERR(page); return PTR_ERR(page);
} }
static void static void
i915_gem_object_put_pages_gtt(struct drm_gem_object *obj) i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
{ {
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int page_count = obj->base.size / PAGE_SIZE;
int page_count = obj->size / PAGE_SIZE;
int i; int i;
BUG_ON(obj_priv->madv == __I915_MADV_PURGED); BUG_ON(obj->madv == __I915_MADV_PURGED);
if (obj_priv->tiling_mode != I915_TILING_NONE) if (obj->tiling_mode != I915_TILING_NONE)
i915_gem_object_save_bit_17_swizzle(obj); i915_gem_object_save_bit_17_swizzle(obj);
if (obj_priv->madv == I915_MADV_DONTNEED) if (obj->madv == I915_MADV_DONTNEED)
obj_priv->dirty = 0; obj->dirty = 0;
for (i = 0; i < page_count; i++) { for (i = 0; i < page_count; i++) {
if (obj_priv->dirty) if (obj->dirty)
set_page_dirty(obj_priv->pages[i]); set_page_dirty(obj->pages[i]);
if (obj_priv->madv == I915_MADV_WILLNEED) if (obj->madv == I915_MADV_WILLNEED)
mark_page_accessed(obj_priv->pages[i]); mark_page_accessed(obj->pages[i]);
page_cache_release(obj_priv->pages[i]); page_cache_release(obj->pages[i]);
} }
obj_priv->dirty = 0; obj->dirty = 0;
drm_free_large(obj_priv->pages); drm_free_large(obj->pages);
obj_priv->pages = NULL; obj->pages = NULL;
} }
static uint32_t static uint32_t
...@@ -1695,47 +1680,44 @@ i915_gem_next_request_seqno(struct drm_device *dev, ...@@ -1695,47 +1680,44 @@ i915_gem_next_request_seqno(struct drm_device *dev,
} }
static void static void
i915_gem_object_move_to_active(struct drm_gem_object *obj, i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring) struct intel_ring_buffer *ring)
{ {
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t seqno = i915_gem_next_request_seqno(dev, ring); uint32_t seqno = i915_gem_next_request_seqno(dev, ring);
BUG_ON(ring == NULL); BUG_ON(ring == NULL);
obj_priv->ring = ring; obj->ring = ring;
/* Add a reference if we're newly entering the active list. */ /* Add a reference if we're newly entering the active list. */
if (!obj_priv->active) { if (!obj->active) {
drm_gem_object_reference(obj); drm_gem_object_reference(&obj->base);
obj_priv->active = 1; obj->active = 1;
} }
/* Move from whatever list we were on to the tail of execution. */ /* Move from whatever list we were on to the tail of execution. */
list_move_tail(&obj_priv->mm_list, &dev_priv->mm.active_list); list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
list_move_tail(&obj_priv->ring_list, &ring->active_list); list_move_tail(&obj->ring_list, &ring->active_list);
obj_priv->last_rendering_seqno = seqno; obj->last_rendering_seqno = seqno;
} }
static void static void
i915_gem_object_move_to_flushing(struct drm_gem_object *obj) i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
{ {
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
BUG_ON(!obj_priv->active); BUG_ON(!obj->active);
list_move_tail(&obj_priv->mm_list, &dev_priv->mm.flushing_list); list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
list_del_init(&obj_priv->ring_list); list_del_init(&obj->ring_list);
obj_priv->last_rendering_seqno = 0; obj->last_rendering_seqno = 0;
} }
/* Immediately discard the backing storage */ /* Immediately discard the backing storage */
static void static void
i915_gem_object_truncate(struct drm_gem_object *obj) i915_gem_object_truncate(struct drm_i915_gem_object *obj)
{ {
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct inode *inode; struct inode *inode;
/* Our goal here is to return as much of the memory as /* Our goal here is to return as much of the memory as
...@@ -1744,40 +1726,39 @@ i915_gem_object_truncate(struct drm_gem_object *obj) ...@@ -1744,40 +1726,39 @@ i915_gem_object_truncate(struct drm_gem_object *obj)
* backing pages, *now*. Here we mirror the actions taken * backing pages, *now*. Here we mirror the actions taken
* when by shmem_delete_inode() to release the backing store. * when by shmem_delete_inode() to release the backing store.
*/ */
inode = obj->filp->f_path.dentry->d_inode; inode = obj->base.filp->f_path.dentry->d_inode;
truncate_inode_pages(inode->i_mapping, 0); truncate_inode_pages(inode->i_mapping, 0);
if (inode->i_op->truncate_range) if (inode->i_op->truncate_range)
inode->i_op->truncate_range(inode, 0, (loff_t)-1); inode->i_op->truncate_range(inode, 0, (loff_t)-1);
obj_priv->madv = __I915_MADV_PURGED; obj->madv = __I915_MADV_PURGED;
} }
static inline int static inline int
i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv) i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
{ {
return obj_priv->madv == I915_MADV_DONTNEED; return obj->madv == I915_MADV_DONTNEED;
} }
static void static void
i915_gem_object_move_to_inactive(struct drm_gem_object *obj) i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
{ {
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
if (obj_priv->pin_count != 0) if (obj->pin_count != 0)
list_move_tail(&obj_priv->mm_list, &dev_priv->mm.pinned_list); list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
else else
list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
list_del_init(&obj_priv->ring_list); list_del_init(&obj->ring_list);
BUG_ON(!list_empty(&obj_priv->gpu_write_list)); BUG_ON(!list_empty(&obj->gpu_write_list));
obj_priv->last_rendering_seqno = 0; obj->last_rendering_seqno = 0;
obj_priv->ring = NULL; obj->ring = NULL;
if (obj_priv->active) { if (obj->active) {
obj_priv->active = 0; obj->active = 0;
drm_gem_object_unreference(obj); drm_gem_object_unreference(&obj->base);
} }
WARN_ON(i915_verify_lists(dev)); WARN_ON(i915_verify_lists(dev));
} }
...@@ -1788,30 +1769,28 @@ i915_gem_process_flushing_list(struct drm_device *dev, ...@@ -1788,30 +1769,28 @@ i915_gem_process_flushing_list(struct drm_device *dev,
struct intel_ring_buffer *ring) struct intel_ring_buffer *ring)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv, *next; struct drm_i915_gem_object *obj, *next;
list_for_each_entry_safe(obj_priv, next, list_for_each_entry_safe(obj, next,
&ring->gpu_write_list, &ring->gpu_write_list,
gpu_write_list) { gpu_write_list) {
struct drm_gem_object *obj = &obj_priv->base; if (obj->base.write_domain & flush_domains) {
uint32_t old_write_domain = obj->base.write_domain;
if (obj->write_domain & flush_domains) { obj->base.write_domain = 0;
uint32_t old_write_domain = obj->write_domain; list_del_init(&obj->gpu_write_list);
obj->write_domain = 0;
list_del_init(&obj_priv->gpu_write_list);
i915_gem_object_move_to_active(obj, ring); i915_gem_object_move_to_active(obj, ring);
/* update the fence lru list */ /* update the fence lru list */
if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { if (obj->fence_reg != I915_FENCE_REG_NONE) {
struct drm_i915_fence_reg *reg = struct drm_i915_fence_reg *reg =
&dev_priv->fence_regs[obj_priv->fence_reg]; &dev_priv->fence_regs[obj->fence_reg];
list_move_tail(&reg->lru_list, list_move_tail(&reg->lru_list,
&dev_priv->mm.fence_list); &dev_priv->mm.fence_list);
} }
trace_i915_gem_object_change_domain(obj, trace_i915_gem_object_change_domain(obj,
obj->read_domains, obj->base.read_domains,
old_write_domain); old_write_domain);
} }
} }
...@@ -1912,22 +1891,22 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, ...@@ -1912,22 +1891,22 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
} }
while (!list_empty(&ring->active_list)) { while (!list_empty(&ring->active_list)) {
struct drm_i915_gem_object *obj_priv; struct drm_i915_gem_object *obj;
obj_priv = list_first_entry(&ring->active_list, obj = list_first_entry(&ring->active_list,
struct drm_i915_gem_object, struct drm_i915_gem_object,
ring_list); ring_list);
obj_priv->base.write_domain = 0; obj->base.write_domain = 0;
list_del_init(&obj_priv->gpu_write_list); list_del_init(&obj->gpu_write_list);
i915_gem_object_move_to_inactive(&obj_priv->base); i915_gem_object_move_to_inactive(obj);
} }
} }
void i915_gem_reset(struct drm_device *dev) void i915_gem_reset(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv; struct drm_i915_gem_object *obj;
int i; int i;
i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring); i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring);
...@@ -1939,23 +1918,23 @@ void i915_gem_reset(struct drm_device *dev) ...@@ -1939,23 +1918,23 @@ void i915_gem_reset(struct drm_device *dev)
* lost bo to the inactive list. * lost bo to the inactive list.
*/ */
while (!list_empty(&dev_priv->mm.flushing_list)) { while (!list_empty(&dev_priv->mm.flushing_list)) {
obj_priv = list_first_entry(&dev_priv->mm.flushing_list, obj= list_first_entry(&dev_priv->mm.flushing_list,
struct drm_i915_gem_object, struct drm_i915_gem_object,
mm_list); mm_list);
obj_priv->base.write_domain = 0; obj->base.write_domain = 0;
list_del_init(&obj_priv->gpu_write_list); list_del_init(&obj->gpu_write_list);
i915_gem_object_move_to_inactive(&obj_priv->base); i915_gem_object_move_to_inactive(obj);
} }
/* Move everything out of the GPU domains to ensure we do any /* Move everything out of the GPU domains to ensure we do any
* necessary invalidation upon reuse. * necessary invalidation upon reuse.
*/ */
list_for_each_entry(obj_priv, list_for_each_entry(obj,
&dev_priv->mm.inactive_list, &dev_priv->mm.inactive_list,
mm_list) mm_list)
{ {
obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS; obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
} }
/* The fence registers are invalidated so clear them out */ /* The fence registers are invalidated so clear them out */
...@@ -2008,18 +1987,16 @@ i915_gem_retire_requests_ring(struct drm_device *dev, ...@@ -2008,18 +1987,16 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
* by the ringbuffer to the flushing/inactive lists as appropriate. * by the ringbuffer to the flushing/inactive lists as appropriate.
*/ */
while (!list_empty(&ring->active_list)) { while (!list_empty(&ring->active_list)) {
struct drm_gem_object *obj; struct drm_i915_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
obj_priv = list_first_entry(&ring->active_list, obj= list_first_entry(&ring->active_list,
struct drm_i915_gem_object, struct drm_i915_gem_object,
ring_list); ring_list);
if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno)) if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
break; break;
obj = &obj_priv->base; if (obj->base.write_domain != 0)
if (obj->write_domain != 0)
i915_gem_object_move_to_flushing(obj); i915_gem_object_move_to_flushing(obj);
else else
i915_gem_object_move_to_inactive(obj); i915_gem_object_move_to_inactive(obj);
...@@ -2040,17 +2017,17 @@ i915_gem_retire_requests(struct drm_device *dev) ...@@ -2040,17 +2017,17 @@ i915_gem_retire_requests(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
if (!list_empty(&dev_priv->mm.deferred_free_list)) { if (!list_empty(&dev_priv->mm.deferred_free_list)) {
struct drm_i915_gem_object *obj_priv, *tmp; struct drm_i915_gem_object *obj, *next;
/* We must be careful that during unbind() we do not /* We must be careful that during unbind() we do not
* accidentally infinitely recurse into retire requests. * accidentally infinitely recurse into retire requests.
* Currently: * Currently:
* retire -> free -> unbind -> wait -> retire_ring * retire -> free -> unbind -> wait -> retire_ring
*/ */
list_for_each_entry_safe(obj_priv, tmp, list_for_each_entry_safe(obj, next,
&dev_priv->mm.deferred_free_list, &dev_priv->mm.deferred_free_list,
mm_list) mm_list)
i915_gem_free_object_tail(&obj_priv->base); i915_gem_free_object_tail(obj);
} }
i915_gem_retire_requests_ring(dev, &dev_priv->render_ring); i915_gem_retire_requests_ring(dev, &dev_priv->render_ring);
...@@ -2175,7 +2152,6 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno, ...@@ -2175,7 +2152,6 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno,
static void static void
i915_gem_flush_ring(struct drm_device *dev, i915_gem_flush_ring(struct drm_device *dev,
struct drm_file *file_priv,
struct intel_ring_buffer *ring, struct intel_ring_buffer *ring,
uint32_t invalidate_domains, uint32_t invalidate_domains,
uint32_t flush_domains) uint32_t flush_domains)
...@@ -2186,7 +2162,6 @@ i915_gem_flush_ring(struct drm_device *dev, ...@@ -2186,7 +2162,6 @@ i915_gem_flush_ring(struct drm_device *dev,
static void static void
i915_gem_flush(struct drm_device *dev, i915_gem_flush(struct drm_device *dev,
struct drm_file *file_priv,
uint32_t invalidate_domains, uint32_t invalidate_domains,
uint32_t flush_domains, uint32_t flush_domains,
uint32_t flush_rings) uint32_t flush_rings)
...@@ -2198,16 +2173,13 @@ i915_gem_flush(struct drm_device *dev, ...@@ -2198,16 +2173,13 @@ i915_gem_flush(struct drm_device *dev,
if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) { if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
if (flush_rings & RING_RENDER) if (flush_rings & RING_RENDER)
i915_gem_flush_ring(dev, file_priv, i915_gem_flush_ring(dev, &dev_priv->render_ring,
&dev_priv->render_ring,
invalidate_domains, flush_domains); invalidate_domains, flush_domains);
if (flush_rings & RING_BSD) if (flush_rings & RING_BSD)
i915_gem_flush_ring(dev, file_priv, i915_gem_flush_ring(dev, &dev_priv->bsd_ring,
&dev_priv->bsd_ring,
invalidate_domains, flush_domains); invalidate_domains, flush_domains);
if (flush_rings & RING_BLT) if (flush_rings & RING_BLT)
i915_gem_flush_ring(dev, file_priv, i915_gem_flush_ring(dev, &dev_priv->blt_ring,
&dev_priv->blt_ring,
invalidate_domains, flush_domains); invalidate_domains, flush_domains);
} }
} }
...@@ -2217,26 +2189,25 @@ i915_gem_flush(struct drm_device *dev, ...@@ -2217,26 +2189,25 @@ i915_gem_flush(struct drm_device *dev,
* safe to unbind from the GTT or access from the CPU. * safe to unbind from the GTT or access from the CPU.
*/ */
static int static int
i915_gem_object_wait_rendering(struct drm_gem_object *obj, i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool interruptible) bool interruptible)
{ {
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->base.dev;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret; int ret;
/* This function only exists to support waiting for existing rendering, /* This function only exists to support waiting for existing rendering,
* not for emitting required flushes. * not for emitting required flushes.
*/ */
BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0); BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
/* If there is rendering queued on the buffer being evicted, wait for /* If there is rendering queued on the buffer being evicted, wait for
* it. * it.
*/ */
if (obj_priv->active) { if (obj->active) {
ret = i915_do_wait_request(dev, ret = i915_do_wait_request(dev,
obj_priv->last_rendering_seqno, obj->last_rendering_seqno,
interruptible, interruptible,
obj_priv->ring); obj->ring);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -2248,17 +2219,16 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj, ...@@ -2248,17 +2219,16 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj,
* Unbinds an object from the GTT aperture. * Unbinds an object from the GTT aperture.
*/ */
int int
i915_gem_object_unbind(struct drm_gem_object *obj) i915_gem_object_unbind(struct drm_i915_gem_object *obj)
{ {
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret = 0; int ret = 0;
if (obj_priv->gtt_space == NULL) if (obj->gtt_space == NULL)
return 0; return 0;
if (obj_priv->pin_count != 0) { if (obj->pin_count != 0) {
DRM_ERROR("Attempting to unbind pinned buffer\n"); DRM_ERROR("Attempting to unbind pinned buffer\n");
return -EINVAL; return -EINVAL;
} }
...@@ -2281,27 +2251,27 @@ i915_gem_object_unbind(struct drm_gem_object *obj) ...@@ -2281,27 +2251,27 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
*/ */
if (ret) { if (ret) {
i915_gem_clflush_object(obj); i915_gem_clflush_object(obj);
obj->read_domains = obj->write_domain = I915_GEM_DOMAIN_CPU; obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
} }
/* release the fence reg _after_ flushing */ /* release the fence reg _after_ flushing */
if (obj_priv->fence_reg != I915_FENCE_REG_NONE) if (obj->fence_reg != I915_FENCE_REG_NONE)
i915_gem_clear_fence_reg(obj); i915_gem_clear_fence_reg(obj);
i915_gem_gtt_unbind_object(obj); i915_gem_gtt_unbind_object(obj);
i915_gem_object_put_pages_gtt(obj); i915_gem_object_put_pages_gtt(obj);
i915_gem_info_remove_gtt(dev_priv, obj_priv); i915_gem_info_remove_gtt(dev_priv, obj);
list_del_init(&obj_priv->mm_list); list_del_init(&obj->mm_list);
/* Avoid an unnecessary call to unbind on rebind. */ /* Avoid an unnecessary call to unbind on rebind. */
obj_priv->map_and_fenceable = true; obj->map_and_fenceable = true;
drm_mm_put_block(obj_priv->gtt_space); drm_mm_put_block(obj->gtt_space);
obj_priv->gtt_space = NULL; obj->gtt_space = NULL;
obj_priv->gtt_offset = 0; obj->gtt_offset = 0;
if (i915_gem_object_is_purgeable(obj_priv)) if (i915_gem_object_is_purgeable(obj))
i915_gem_object_truncate(obj); i915_gem_object_truncate(obj);
trace_i915_gem_object_unbind(obj); trace_i915_gem_object_unbind(obj);
...@@ -2315,7 +2285,7 @@ static int i915_ring_idle(struct drm_device *dev, ...@@ -2315,7 +2285,7 @@ static int i915_ring_idle(struct drm_device *dev,
if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list)) if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
return 0; return 0;
i915_gem_flush_ring(dev, NULL, ring, i915_gem_flush_ring(dev, ring,
I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
return i915_wait_request(dev, return i915_wait_request(dev,
i915_gem_next_request_seqno(dev, ring), i915_gem_next_request_seqno(dev, ring),
...@@ -2350,89 +2320,86 @@ i915_gpu_idle(struct drm_device *dev) ...@@ -2350,89 +2320,86 @@ i915_gpu_idle(struct drm_device *dev)
return 0; return 0;
} }
static void sandybridge_write_fence_reg(struct drm_gem_object *obj) static void sandybridge_write_fence_reg(struct drm_i915_gem_object *obj)
{ {
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); u32 size = obj->gtt_space->size;
u32 size = i915_gem_get_gtt_size(obj_priv); int regnum = obj->fence_reg;
int regnum = obj_priv->fence_reg;
uint64_t val; uint64_t val;
val = (uint64_t)((obj_priv->gtt_offset + size - 4096) & val = (uint64_t)((obj->gtt_offset + size - 4096) &
0xfffff000) << 32; 0xfffff000) << 32;
val |= obj_priv->gtt_offset & 0xfffff000; val |= obj->gtt_offset & 0xfffff000;
val |= (uint64_t)((obj_priv->stride / 128) - 1) << val |= (uint64_t)((obj->stride / 128) - 1) <<
SANDYBRIDGE_FENCE_PITCH_SHIFT; SANDYBRIDGE_FENCE_PITCH_SHIFT;
if (obj_priv->tiling_mode == I915_TILING_Y) if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I965_FENCE_TILING_Y_SHIFT; val |= 1 << I965_FENCE_TILING_Y_SHIFT;
val |= I965_FENCE_REG_VALID; val |= I965_FENCE_REG_VALID;
I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val); I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val);
} }
static void i965_write_fence_reg(struct drm_gem_object *obj) static void i965_write_fence_reg(struct drm_i915_gem_object *obj)
{ {
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); u32 size = obj->gtt_space->size;
u32 size = i915_gem_get_gtt_size(obj_priv); int regnum = obj->fence_reg;
int regnum = obj_priv->fence_reg;
uint64_t val; uint64_t val;
val = (uint64_t)((obj_priv->gtt_offset + size - 4096) & val = (uint64_t)((obj->gtt_offset + size - 4096) &
0xfffff000) << 32; 0xfffff000) << 32;
val |= obj_priv->gtt_offset & 0xfffff000; val |= obj->gtt_offset & 0xfffff000;
val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT; val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
if (obj_priv->tiling_mode == I915_TILING_Y) if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I965_FENCE_TILING_Y_SHIFT; val |= 1 << I965_FENCE_TILING_Y_SHIFT;
val |= I965_FENCE_REG_VALID; val |= I965_FENCE_REG_VALID;
I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val); I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
} }
static void i915_write_fence_reg(struct drm_gem_object *obj) static void i915_write_fence_reg(struct drm_i915_gem_object *obj)
{ {
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); u32 size = obj->gtt_space->size;
u32 size = i915_gem_get_gtt_size(obj_priv);
uint32_t fence_reg, val, pitch_val; uint32_t fence_reg, val, pitch_val;
int tile_width; int tile_width;
if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || if ((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
(obj_priv->gtt_offset & (size - 1))) { (obj->gtt_offset & (size - 1))) {
WARN(1, "%s: object 0x%08x [fenceable? %d] not 1M or size (0x%08x) aligned [gtt_space offset=%lx, size=%lx]\n", WARN(1, "%s: object 0x%08x [fenceable? %d] not 1M or size (0x%08x) aligned [gtt_space offset=%lx, size=%lx]\n",
__func__, obj_priv->gtt_offset, obj_priv->map_and_fenceable, size, __func__, obj->gtt_offset, obj->map_and_fenceable, size,
obj_priv->gtt_space->start, obj_priv->gtt_space->size); obj->gtt_space->start, obj->gtt_space->size);
return; return;
} }
if (obj_priv->tiling_mode == I915_TILING_Y && if (obj->tiling_mode == I915_TILING_Y &&
HAS_128_BYTE_Y_TILING(dev)) HAS_128_BYTE_Y_TILING(dev))
tile_width = 128; tile_width = 128;
else else
tile_width = 512; tile_width = 512;
/* Note: pitch better be a power of two tile widths */ /* Note: pitch better be a power of two tile widths */
pitch_val = obj_priv->stride / tile_width; pitch_val = obj->stride / tile_width;
pitch_val = ffs(pitch_val) - 1; pitch_val = ffs(pitch_val) - 1;
if (obj_priv->tiling_mode == I915_TILING_Y && if (obj->tiling_mode == I915_TILING_Y &&
HAS_128_BYTE_Y_TILING(dev)) HAS_128_BYTE_Y_TILING(dev))
WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL); WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
else else
WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL); WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
val = obj_priv->gtt_offset; val = obj->gtt_offset;
if (obj_priv->tiling_mode == I915_TILING_Y) if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT; val |= 1 << I830_FENCE_TILING_Y_SHIFT;
val |= I915_FENCE_SIZE_BITS(size); val |= I915_FENCE_SIZE_BITS(size);
val |= pitch_val << I830_FENCE_PITCH_SHIFT; val |= pitch_val << I830_FENCE_PITCH_SHIFT;
val |= I830_FENCE_REG_VALID; val |= I830_FENCE_REG_VALID;
fence_reg = obj_priv->fence_reg; fence_reg = obj->fence_reg;
if (fence_reg < 8) if (fence_reg < 8)
fence_reg = FENCE_REG_830_0 + fence_reg * 4; fence_reg = FENCE_REG_830_0 + fence_reg * 4;
else else
...@@ -2440,30 +2407,29 @@ static void i915_write_fence_reg(struct drm_gem_object *obj) ...@@ -2440,30 +2407,29 @@ static void i915_write_fence_reg(struct drm_gem_object *obj)
I915_WRITE(fence_reg, val); I915_WRITE(fence_reg, val);
} }
static void i830_write_fence_reg(struct drm_gem_object *obj) static void i830_write_fence_reg(struct drm_i915_gem_object *obj)
{ {
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); u32 size = obj->gtt_space->size;
u32 size = i915_gem_get_gtt_size(obj_priv); int regnum = obj->fence_reg;
int regnum = obj_priv->fence_reg;
uint32_t val; uint32_t val;
uint32_t pitch_val; uint32_t pitch_val;
uint32_t fence_size_bits; uint32_t fence_size_bits;
if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) || if ((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
(obj_priv->gtt_offset & (obj->size - 1))) { (obj->gtt_offset & (obj->base.size - 1))) {
WARN(1, "%s: object 0x%08x not 512K or size aligned\n", WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
__func__, obj_priv->gtt_offset); __func__, obj->gtt_offset);
return; return;
} }
pitch_val = obj_priv->stride / 128; pitch_val = obj->stride / 128;
pitch_val = ffs(pitch_val) - 1; pitch_val = ffs(pitch_val) - 1;
WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL); WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
val = obj_priv->gtt_offset; val = obj->gtt_offset;
if (obj_priv->tiling_mode == I915_TILING_Y) if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT; val |= 1 << I830_FENCE_TILING_Y_SHIFT;
fence_size_bits = I830_FENCE_SIZE_BITS(size); fence_size_bits = I830_FENCE_SIZE_BITS(size);
WARN_ON(fence_size_bits & ~0x00000f00); WARN_ON(fence_size_bits & ~0x00000f00);
...@@ -2479,7 +2445,7 @@ static int i915_find_fence_reg(struct drm_device *dev, ...@@ -2479,7 +2445,7 @@ static int i915_find_fence_reg(struct drm_device *dev,
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_fence_reg *reg; struct drm_i915_fence_reg *reg;
struct drm_i915_gem_object *obj_priv = NULL; struct drm_i915_gem_object *obj = NULL;
int i, avail, ret; int i, avail, ret;
/* First try to find a free reg */ /* First try to find a free reg */
...@@ -2489,9 +2455,8 @@ static int i915_find_fence_reg(struct drm_device *dev, ...@@ -2489,9 +2455,8 @@ static int i915_find_fence_reg(struct drm_device *dev,
if (!reg->obj) if (!reg->obj)
return i; return i;
obj_priv = to_intel_bo(reg->obj); if (!reg->obj->pin_count)
if (!obj_priv->pin_count) avail++;
avail++;
} }
if (avail == 0) if (avail == 0)
...@@ -2501,12 +2466,12 @@ static int i915_find_fence_reg(struct drm_device *dev, ...@@ -2501,12 +2466,12 @@ static int i915_find_fence_reg(struct drm_device *dev,
avail = I915_FENCE_REG_NONE; avail = I915_FENCE_REG_NONE;
list_for_each_entry(reg, &dev_priv->mm.fence_list, list_for_each_entry(reg, &dev_priv->mm.fence_list,
lru_list) { lru_list) {
obj_priv = to_intel_bo(reg->obj); obj = reg->obj;
if (obj_priv->pin_count) if (obj->pin_count)
continue; continue;
/* found one! */ /* found one! */
avail = obj_priv->fence_reg; avail = obj->fence_reg;
break; break;
} }
...@@ -2516,9 +2481,9 @@ static int i915_find_fence_reg(struct drm_device *dev, ...@@ -2516,9 +2481,9 @@ static int i915_find_fence_reg(struct drm_device *dev,
* might drop that one, causing a use-after-free in it. So hold a * might drop that one, causing a use-after-free in it. So hold a
* private reference to obj like the other callers of put_fence_reg * private reference to obj like the other callers of put_fence_reg
* (set_tiling ioctl) do. */ * (set_tiling ioctl) do. */
drm_gem_object_reference(&obj_priv->base); drm_gem_object_reference(&obj->base);
ret = i915_gem_object_put_fence_reg(&obj_priv->base, interruptible); ret = i915_gem_object_put_fence_reg(obj, interruptible);
drm_gem_object_unreference(&obj_priv->base); drm_gem_object_unreference(&obj->base);
if (ret != 0) if (ret != 0)
return ret; return ret;
...@@ -2539,39 +2504,38 @@ static int i915_find_fence_reg(struct drm_device *dev, ...@@ -2539,39 +2504,38 @@ static int i915_find_fence_reg(struct drm_device *dev,
* and tiling format. * and tiling format.
*/ */
int int
i915_gem_object_get_fence_reg(struct drm_gem_object *obj, i915_gem_object_get_fence_reg(struct drm_i915_gem_object *obj,
bool interruptible) bool interruptible)
{ {
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct drm_i915_fence_reg *reg = NULL; struct drm_i915_fence_reg *reg = NULL;
int ret; int ret;
/* Just update our place in the LRU if our fence is getting used. */ /* Just update our place in the LRU if our fence is getting used. */
if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { if (obj->fence_reg != I915_FENCE_REG_NONE) {
reg = &dev_priv->fence_regs[obj_priv->fence_reg]; reg = &dev_priv->fence_regs[obj->fence_reg];
list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list); list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
return 0; return 0;
} }
switch (obj_priv->tiling_mode) { switch (obj->tiling_mode) {
case I915_TILING_NONE: case I915_TILING_NONE:
WARN(1, "allocating a fence for non-tiled object?\n"); WARN(1, "allocating a fence for non-tiled object?\n");
break; break;
case I915_TILING_X: case I915_TILING_X:
if (!obj_priv->stride) if (!obj->stride)
return -EINVAL; return -EINVAL;
WARN((obj_priv->stride & (512 - 1)), WARN((obj->stride & (512 - 1)),
"object 0x%08x is X tiled but has non-512B pitch\n", "object 0x%08x is X tiled but has non-512B pitch\n",
obj_priv->gtt_offset); obj->gtt_offset);
break; break;
case I915_TILING_Y: case I915_TILING_Y:
if (!obj_priv->stride) if (!obj->stride)
return -EINVAL; return -EINVAL;
WARN((obj_priv->stride & (128 - 1)), WARN((obj->stride & (128 - 1)),
"object 0x%08x is Y tiled but has non-128B pitch\n", "object 0x%08x is Y tiled but has non-128B pitch\n",
obj_priv->gtt_offset); obj->gtt_offset);
break; break;
} }
...@@ -2579,8 +2543,8 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj, ...@@ -2579,8 +2543,8 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
if (ret < 0) if (ret < 0)
return ret; return ret;
obj_priv->fence_reg = ret; obj->fence_reg = ret;
reg = &dev_priv->fence_regs[obj_priv->fence_reg]; reg = &dev_priv->fence_regs[obj->fence_reg];
list_add_tail(&reg->lru_list, &dev_priv->mm.fence_list); list_add_tail(&reg->lru_list, &dev_priv->mm.fence_list);
reg->obj = obj; reg->obj = obj;
...@@ -2602,8 +2566,8 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj, ...@@ -2602,8 +2566,8 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
} }
trace_i915_gem_object_get_fence(obj, trace_i915_gem_object_get_fence(obj,
obj_priv->fence_reg, obj->fence_reg,
obj_priv->tiling_mode); obj->tiling_mode);
return 0; return 0;
} }
...@@ -2613,40 +2577,38 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj, ...@@ -2613,40 +2577,38 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
* @obj: object to clear * @obj: object to clear
* *
* Zeroes out the fence register itself and clears out the associated * Zeroes out the fence register itself and clears out the associated
* data structures in dev_priv and obj_priv. * data structures in dev_priv and obj.
*/ */
static void static void
i915_gem_clear_fence_reg(struct drm_gem_object *obj) i915_gem_clear_fence_reg(struct drm_i915_gem_object *obj)
{ {
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[obj->fence_reg];
struct drm_i915_fence_reg *reg =
&dev_priv->fence_regs[obj_priv->fence_reg];
uint32_t fence_reg; uint32_t fence_reg;
switch (INTEL_INFO(dev)->gen) { switch (INTEL_INFO(dev)->gen) {
case 6: case 6:
I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
(obj_priv->fence_reg * 8), 0); (obj->fence_reg * 8), 0);
break; break;
case 5: case 5:
case 4: case 4:
I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); I915_WRITE64(FENCE_REG_965_0 + (obj->fence_reg * 8), 0);
break; break;
case 3: case 3:
if (obj_priv->fence_reg >= 8) if (obj->fence_reg >= 8)
fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4; fence_reg = FENCE_REG_945_8 + (obj->fence_reg - 8) * 4;
else else
case 2: case 2:
fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4; fence_reg = FENCE_REG_830_0 + obj->fence_reg * 4;
I915_WRITE(fence_reg, 0); I915_WRITE(fence_reg, 0);
break; break;
} }
reg->obj = NULL; reg->obj = NULL;
obj_priv->fence_reg = I915_FENCE_REG_NONE; obj->fence_reg = I915_FENCE_REG_NONE;
list_del_init(&reg->lru_list); list_del_init(&reg->lru_list);
} }
...@@ -2657,18 +2619,17 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj) ...@@ -2657,18 +2619,17 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
* @bool: whether the wait upon the fence is interruptible * @bool: whether the wait upon the fence is interruptible
* *
* Zeroes out the fence register itself and clears out the associated * Zeroes out the fence register itself and clears out the associated
* data structures in dev_priv and obj_priv. * data structures in dev_priv and obj.
*/ */
int int
i915_gem_object_put_fence_reg(struct drm_gem_object *obj, i915_gem_object_put_fence_reg(struct drm_i915_gem_object *obj,
bool interruptible) bool interruptible)
{ {
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct drm_i915_fence_reg *reg; struct drm_i915_fence_reg *reg;
if (obj_priv->fence_reg == I915_FENCE_REG_NONE) if (obj->fence_reg == I915_FENCE_REG_NONE)
return 0; return 0;
/* If we've changed tiling, GTT-mappings of the object /* If we've changed tiling, GTT-mappings of the object
...@@ -2681,7 +2642,7 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj, ...@@ -2681,7 +2642,7 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
* therefore we must wait for any outstanding access to complete * therefore we must wait for any outstanding access to complete
* before clearing the fence. * before clearing the fence.
*/ */
reg = &dev_priv->fence_regs[obj_priv->fence_reg]; reg = &dev_priv->fence_regs[obj->fence_reg];
if (reg->gpu) { if (reg->gpu) {
int ret; int ret;
...@@ -2706,27 +2667,26 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj, ...@@ -2706,27 +2667,26 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
* Finds free space in the GTT aperture and binds the object there. * Finds free space in the GTT aperture and binds the object there.
*/ */
static int static int
i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
unsigned alignment, unsigned alignment,
bool map_and_fenceable) bool map_and_fenceable)
{ {
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct drm_mm_node *free_space; struct drm_mm_node *free_space;
gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
u32 size, fence_size, fence_alignment, unfenced_alignment; u32 size, fence_size, fence_alignment, unfenced_alignment;
bool mappable, fenceable; bool mappable, fenceable;
int ret; int ret;
if (obj_priv->madv != I915_MADV_WILLNEED) { if (obj->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to bind a purgeable object\n"); DRM_ERROR("Attempting to bind a purgeable object\n");
return -EINVAL; return -EINVAL;
} }
fence_size = i915_gem_get_gtt_size(obj_priv); fence_size = i915_gem_get_gtt_size(obj);
fence_alignment = i915_gem_get_gtt_alignment(obj_priv); fence_alignment = i915_gem_get_gtt_alignment(obj);
unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj_priv); unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj);
if (alignment == 0) if (alignment == 0)
alignment = map_and_fenceable ? fence_alignment : alignment = map_and_fenceable ? fence_alignment :
...@@ -2736,12 +2696,12 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, ...@@ -2736,12 +2696,12 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
return -EINVAL; return -EINVAL;
} }
size = map_and_fenceable ? fence_size : obj->size; size = map_and_fenceable ? fence_size : obj->base.size;
/* If the object is bigger than the entire aperture, reject it early /* If the object is bigger than the entire aperture, reject it early
* before evicting everything in a vain attempt to find space. * before evicting everything in a vain attempt to find space.
*/ */
if (obj->size > if (obj->base.size >
(map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) { (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
DRM_ERROR("Attempting to bind an object larger than the aperture\n"); DRM_ERROR("Attempting to bind an object larger than the aperture\n");
return -E2BIG; return -E2BIG;
...@@ -2760,16 +2720,16 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, ...@@ -2760,16 +2720,16 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
if (free_space != NULL) { if (free_space != NULL) {
if (map_and_fenceable) if (map_and_fenceable)
obj_priv->gtt_space = obj->gtt_space =
drm_mm_get_block_range_generic(free_space, drm_mm_get_block_range_generic(free_space,
size, alignment, 0, size, alignment, 0,
dev_priv->mm.gtt_mappable_end, dev_priv->mm.gtt_mappable_end,
0); 0);
else else
obj_priv->gtt_space = obj->gtt_space =
drm_mm_get_block(free_space, size, alignment); drm_mm_get_block(free_space, size, alignment);
} }
if (obj_priv->gtt_space == NULL) { if (obj->gtt_space == NULL) {
/* If the gtt is empty and we're still having trouble /* If the gtt is empty and we're still having trouble
* fitting our object in, we're out of memory. * fitting our object in, we're out of memory.
*/ */
...@@ -2783,8 +2743,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, ...@@ -2783,8 +2743,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
ret = i915_gem_object_get_pages_gtt(obj, gfpmask); ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
if (ret) { if (ret) {
drm_mm_put_block(obj_priv->gtt_space); drm_mm_put_block(obj->gtt_space);
obj_priv->gtt_space = NULL; obj->gtt_space = NULL;
if (ret == -ENOMEM) { if (ret == -ENOMEM) {
/* first try to clear up some space from the GTT */ /* first try to clear up some space from the GTT */
...@@ -2810,8 +2770,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, ...@@ -2810,8 +2770,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
ret = i915_gem_gtt_bind_object(obj); ret = i915_gem_gtt_bind_object(obj);
if (ret) { if (ret) {
i915_gem_object_put_pages_gtt(obj); i915_gem_object_put_pages_gtt(obj);
drm_mm_put_block(obj_priv->gtt_space); drm_mm_put_block(obj->gtt_space);
obj_priv->gtt_space = NULL; obj->gtt_space = NULL;
ret = i915_gem_evict_something(dev, size, ret = i915_gem_evict_something(dev, size,
alignment, map_and_fenceable); alignment, map_and_fenceable);
...@@ -2821,65 +2781,61 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, ...@@ -2821,65 +2781,61 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
goto search_free; goto search_free;
} }
obj_priv->gtt_offset = obj_priv->gtt_space->start; obj->gtt_offset = obj->gtt_space->start;
/* keep track of bounds object by adding it to the inactive list */ /* keep track of bounds object by adding it to the inactive list */
list_add_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
i915_gem_info_add_gtt(dev_priv, obj_priv); i915_gem_info_add_gtt(dev_priv, obj);
/* Assert that the object is not currently in any GPU domain. As it /* Assert that the object is not currently in any GPU domain. As it
* wasn't in the GTT, there shouldn't be any way it could have been in * wasn't in the GTT, there shouldn't be any way it could have been in
* a GPU cache * a GPU cache
*/ */
BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
trace_i915_gem_object_bind(obj, obj_priv->gtt_offset, map_and_fenceable); trace_i915_gem_object_bind(obj, obj->gtt_offset, map_and_fenceable);
fenceable = fenceable =
obj_priv->gtt_space->size == fence_size && obj->gtt_space->size == fence_size &&
(obj_priv->gtt_space->start & (fence_alignment -1)) == 0; (obj->gtt_space->start & (fence_alignment -1)) == 0;
mappable = mappable =
obj_priv->gtt_offset + obj->size <= dev_priv->mm.gtt_mappable_end; obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
obj_priv->map_and_fenceable = mappable && fenceable; obj->map_and_fenceable = mappable && fenceable;
return 0; return 0;
} }
void void
i915_gem_clflush_object(struct drm_gem_object *obj) i915_gem_clflush_object(struct drm_i915_gem_object *obj)
{ {
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
/* If we don't have a page list set up, then we're not pinned /* If we don't have a page list set up, then we're not pinned
* to GPU, and we can ignore the cache flush because it'll happen * to GPU, and we can ignore the cache flush because it'll happen
* again at bind time. * again at bind time.
*/ */
if (obj_priv->pages == NULL) if (obj->pages == NULL)
return; return;
trace_i915_gem_object_clflush(obj); trace_i915_gem_object_clflush(obj);
drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE); drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
} }
/** Flushes any GPU write domain for the object if it's dirty. */ /** Flushes any GPU write domain for the object if it's dirty. */
static int static int
i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj,
bool pipelined) bool pipelined)
{ {
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->base.dev;
if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
return 0; return 0;
/* Queue the GPU write cache flushing we need. */ /* Queue the GPU write cache flushing we need. */
i915_gem_flush_ring(dev, NULL, i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain);
to_intel_bo(obj)->ring, BUG_ON(obj->base.write_domain);
0, obj->write_domain);
BUG_ON(obj->write_domain);
if (pipelined) if (pipelined)
return 0; return 0;
...@@ -2889,11 +2845,11 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, ...@@ -2889,11 +2845,11 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
/** Flushes the GTT write domain for the object if it's dirty. */ /** Flushes the GTT write domain for the object if it's dirty. */
static void static void
i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj) i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
{ {
uint32_t old_write_domain; uint32_t old_write_domain;
if (obj->write_domain != I915_GEM_DOMAIN_GTT) if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
return; return;
/* No actual flushing is required for the GTT write domain. Writes /* No actual flushing is required for the GTT write domain. Writes
...@@ -2902,30 +2858,30 @@ i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj) ...@@ -2902,30 +2858,30 @@ i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
*/ */
i915_gem_release_mmap(obj); i915_gem_release_mmap(obj);
old_write_domain = obj->write_domain; old_write_domain = obj->base.write_domain;
obj->write_domain = 0; obj->base.write_domain = 0;
trace_i915_gem_object_change_domain(obj, trace_i915_gem_object_change_domain(obj,
obj->read_domains, obj->base.read_domains,
old_write_domain); old_write_domain);
} }
/** Flushes the CPU write domain for the object if it's dirty. */ /** Flushes the CPU write domain for the object if it's dirty. */
static void static void
i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
{ {
uint32_t old_write_domain; uint32_t old_write_domain;
if (obj->write_domain != I915_GEM_DOMAIN_CPU) if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
return; return;
i915_gem_clflush_object(obj); i915_gem_clflush_object(obj);
intel_gtt_chipset_flush(); intel_gtt_chipset_flush();
old_write_domain = obj->write_domain; old_write_domain = obj->base.write_domain;
obj->write_domain = 0; obj->base.write_domain = 0;
trace_i915_gem_object_change_domain(obj, trace_i915_gem_object_change_domain(obj,
obj->read_domains, obj->base.read_domains,
old_write_domain); old_write_domain);
} }
...@@ -2936,14 +2892,13 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) ...@@ -2936,14 +2892,13 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
* flushes to occur. * flushes to occur.
*/ */
int int
i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, int write)
{ {
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t old_write_domain, old_read_domains; uint32_t old_write_domain, old_read_domains;
int ret; int ret;
/* Not valid to be called on unbound objects. */ /* Not valid to be called on unbound objects. */
if (obj_priv->gtt_space == NULL) if (obj->gtt_space == NULL)
return -EINVAL; return -EINVAL;
ret = i915_gem_object_flush_gpu_write_domain(obj, false); ret = i915_gem_object_flush_gpu_write_domain(obj, false);
...@@ -2958,18 +2913,18 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) ...@@ -2958,18 +2913,18 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
return ret; return ret;
} }
old_write_domain = obj->write_domain; old_write_domain = obj->base.write_domain;
old_read_domains = obj->read_domains; old_read_domains = obj->base.read_domains;
/* It should now be out of any other write domains, and we can update /* It should now be out of any other write domains, and we can update
* the domain values for our changes. * the domain values for our changes.
*/ */
BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
obj->read_domains |= I915_GEM_DOMAIN_GTT; obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
if (write) { if (write) {
obj->read_domains = I915_GEM_DOMAIN_GTT; obj->base.read_domains = I915_GEM_DOMAIN_GTT;
obj->write_domain = I915_GEM_DOMAIN_GTT; obj->base.write_domain = I915_GEM_DOMAIN_GTT;
obj_priv->dirty = 1; obj->dirty = 1;
} }
trace_i915_gem_object_change_domain(obj, trace_i915_gem_object_change_domain(obj,
...@@ -2984,15 +2939,14 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) ...@@ -2984,15 +2939,14 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
* wait, as in modesetting process we're not supposed to be interrupted. * wait, as in modesetting process we're not supposed to be interrupted.
*/ */
int int
i915_gem_object_set_to_display_plane(struct drm_gem_object *obj, i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
bool pipelined) bool pipelined)
{ {
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t old_read_domains; uint32_t old_read_domains;
int ret; int ret;
/* Not valid to be called on unbound objects. */ /* Not valid to be called on unbound objects. */
if (obj_priv->gtt_space == NULL) if (obj->gtt_space == NULL)
return -EINVAL; return -EINVAL;
ret = i915_gem_object_flush_gpu_write_domain(obj, true); ret = i915_gem_object_flush_gpu_write_domain(obj, true);
...@@ -3008,12 +2962,12 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj, ...@@ -3008,12 +2962,12 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
i915_gem_object_flush_cpu_write_domain(obj); i915_gem_object_flush_cpu_write_domain(obj);
old_read_domains = obj->read_domains; old_read_domains = obj->base.read_domains;
obj->read_domains |= I915_GEM_DOMAIN_GTT; obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
trace_i915_gem_object_change_domain(obj, trace_i915_gem_object_change_domain(obj,
old_read_domains, old_read_domains,
obj->write_domain); obj->base.write_domain);
return 0; return 0;
} }
...@@ -3026,10 +2980,10 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, ...@@ -3026,10 +2980,10 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
return 0; return 0;
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
i915_gem_flush_ring(obj->base.dev, NULL, obj->ring, i915_gem_flush_ring(obj->base.dev, obj->ring,
0, obj->base.write_domain); 0, obj->base.write_domain);
return i915_gem_object_wait_rendering(&obj->base, interruptible); return i915_gem_object_wait_rendering(obj, interruptible);
} }
/** /**
...@@ -3039,7 +2993,7 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, ...@@ -3039,7 +2993,7 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
* flushes to occur. * flushes to occur.
*/ */
static int static int
i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, int write)
{ {
uint32_t old_write_domain, old_read_domains; uint32_t old_write_domain, old_read_domains;
int ret; int ret;
...@@ -3061,27 +3015,27 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) ...@@ -3061,27 +3015,27 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
return ret; return ret;
} }
old_write_domain = obj->write_domain; old_write_domain = obj->base.write_domain;
old_read_domains = obj->read_domains; old_read_domains = obj->base.read_domains;
/* Flush the CPU cache if it's still invalid. */ /* Flush the CPU cache if it's still invalid. */
if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) { if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
i915_gem_clflush_object(obj); i915_gem_clflush_object(obj);
obj->read_domains |= I915_GEM_DOMAIN_CPU; obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
} }
/* It should now be out of any other write domains, and we can update /* It should now be out of any other write domains, and we can update
* the domain values for our changes. * the domain values for our changes.
*/ */
BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0); BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
/* If we're writing through the CPU, then the GPU read domains will /* If we're writing through the CPU, then the GPU read domains will
* need to be invalidated at next use. * need to be invalidated at next use.
*/ */
if (write) { if (write) {
obj->read_domains = I915_GEM_DOMAIN_CPU; obj->base.read_domains = I915_GEM_DOMAIN_CPU;
obj->write_domain = I915_GEM_DOMAIN_CPU; obj->base.write_domain = I915_GEM_DOMAIN_CPU;
} }
trace_i915_gem_object_change_domain(obj, trace_i915_gem_object_change_domain(obj,
...@@ -3203,20 +3157,18 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) ...@@ -3203,20 +3157,18 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
* drm_agp_chipset_flush * drm_agp_chipset_flush
*/ */
static void static void
i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring, struct intel_ring_buffer *ring,
struct change_domains *cd) struct change_domains *cd)
{ {
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); uint32_t invalidate_domains = 0, flush_domains = 0;
uint32_t invalidate_domains = 0;
uint32_t flush_domains = 0;
/* /*
* If the object isn't moving to a new write domain, * If the object isn't moving to a new write domain,
* let the object stay in multiple read domains * let the object stay in multiple read domains
*/ */
if (obj->pending_write_domain == 0) if (obj->base.pending_write_domain == 0)
obj->pending_read_domains |= obj->read_domains; obj->base.pending_read_domains |= obj->base.read_domains;
/* /*
* Flush the current write domain if * Flush the current write domain if
...@@ -3224,18 +3176,18 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, ...@@ -3224,18 +3176,18 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
* any read domains which differ from the old * any read domains which differ from the old
* write domain * write domain
*/ */
if (obj->write_domain && if (obj->base.write_domain &&
(obj->write_domain != obj->pending_read_domains || (obj->base.write_domain != obj->base.pending_read_domains ||
obj_priv->ring != ring)) { obj->ring != ring)) {
flush_domains |= obj->write_domain; flush_domains |= obj->base.write_domain;
invalidate_domains |= invalidate_domains |=
obj->pending_read_domains & ~obj->write_domain; obj->base.pending_read_domains & ~obj->base.write_domain;
} }
/* /*
* Invalidate any read caches which may have * Invalidate any read caches which may have
* stale data. That is, any new read domains. * stale data. That is, any new read domains.
*/ */
invalidate_domains |= obj->pending_read_domains & ~obj->read_domains; invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains;
if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
i915_gem_clflush_object(obj); i915_gem_clflush_object(obj);
...@@ -3249,13 +3201,13 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, ...@@ -3249,13 +3201,13 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
* write_domains). So if we have a current write domain that we * write_domains). So if we have a current write domain that we
* aren't changing, set pending_write_domain to that. * aren't changing, set pending_write_domain to that.
*/ */
if (flush_domains == 0 && obj->pending_write_domain == 0) if (flush_domains == 0 && obj->base.pending_write_domain == 0)
obj->pending_write_domain = obj->write_domain; obj->base.pending_write_domain = obj->base.write_domain;
cd->invalidate_domains |= invalidate_domains; cd->invalidate_domains |= invalidate_domains;
cd->flush_domains |= flush_domains; cd->flush_domains |= flush_domains;
if (flush_domains & I915_GEM_GPU_DOMAINS) if (flush_domains & I915_GEM_GPU_DOMAINS)
cd->flush_rings |= obj_priv->ring->id; cd->flush_rings |= obj->ring->id;
if (invalidate_domains & I915_GEM_GPU_DOMAINS) if (invalidate_domains & I915_GEM_GPU_DOMAINS)
cd->flush_rings |= ring->id; cd->flush_rings |= ring->id;
} }
...@@ -3267,30 +3219,28 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, ...@@ -3267,30 +3219,28 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
* and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU). * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
*/ */
static void static void
i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj)
{ {
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); if (!obj->page_cpu_valid)
if (!obj_priv->page_cpu_valid)
return; return;
/* If we're partially in the CPU read domain, finish moving it in. /* If we're partially in the CPU read domain, finish moving it in.
*/ */
if (obj->read_domains & I915_GEM_DOMAIN_CPU) { if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) {
int i; int i;
for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) { for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) {
if (obj_priv->page_cpu_valid[i]) if (obj->page_cpu_valid[i])
continue; continue;
drm_clflush_pages(obj_priv->pages + i, 1); drm_clflush_pages(obj->pages + i, 1);
} }
} }
/* Free the page_cpu_valid mappings which are now stale, whether /* Free the page_cpu_valid mappings which are now stale, whether
* or not we've got I915_GEM_DOMAIN_CPU. * or not we've got I915_GEM_DOMAIN_CPU.
*/ */
kfree(obj_priv->page_cpu_valid); kfree(obj->page_cpu_valid);
obj_priv->page_cpu_valid = NULL; obj->page_cpu_valid = NULL;
} }
/** /**
...@@ -3306,14 +3256,13 @@ i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) ...@@ -3306,14 +3256,13 @@ i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
* flushes to occur. * flushes to occur.
*/ */
static int static int
i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
uint64_t offset, uint64_t size) uint64_t offset, uint64_t size)
{ {
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t old_read_domains; uint32_t old_read_domains;
int i, ret; int i, ret;
if (offset == 0 && size == obj->size) if (offset == 0 && size == obj->base.size)
return i915_gem_object_set_to_cpu_domain(obj, 0); return i915_gem_object_set_to_cpu_domain(obj, 0);
ret = i915_gem_object_flush_gpu_write_domain(obj, false); ret = i915_gem_object_flush_gpu_write_domain(obj, false);
...@@ -3322,45 +3271,45 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, ...@@ -3322,45 +3271,45 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
i915_gem_object_flush_gtt_write_domain(obj); i915_gem_object_flush_gtt_write_domain(obj);
/* If we're already fully in the CPU read domain, we're done. */ /* If we're already fully in the CPU read domain, we're done. */
if (obj_priv->page_cpu_valid == NULL && if (obj->page_cpu_valid == NULL &&
(obj->read_domains & I915_GEM_DOMAIN_CPU) != 0) (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0)
return 0; return 0;
/* Otherwise, create/clear the per-page CPU read domain flag if we're /* Otherwise, create/clear the per-page CPU read domain flag if we're
* newly adding I915_GEM_DOMAIN_CPU * newly adding I915_GEM_DOMAIN_CPU
*/ */
if (obj_priv->page_cpu_valid == NULL) { if (obj->page_cpu_valid == NULL) {
obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE, obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE,
GFP_KERNEL); GFP_KERNEL);
if (obj_priv->page_cpu_valid == NULL) if (obj->page_cpu_valid == NULL)
return -ENOMEM; return -ENOMEM;
} else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE); memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE);
/* Flush the cache on any pages that are still invalid from the CPU's /* Flush the cache on any pages that are still invalid from the CPU's
* perspective. * perspective.
*/ */
for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
i++) { i++) {
if (obj_priv->page_cpu_valid[i]) if (obj->page_cpu_valid[i])
continue; continue;
drm_clflush_pages(obj_priv->pages + i, 1); drm_clflush_pages(obj->pages + i, 1);
obj_priv->page_cpu_valid[i] = 1; obj->page_cpu_valid[i] = 1;
} }
/* It should now be out of any other write domains, and we can update /* It should now be out of any other write domains, and we can update
* the domain values for our changes. * the domain values for our changes.
*/ */
BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0); BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
old_read_domains = obj->read_domains; old_read_domains = obj->base.read_domains;
obj->read_domains |= I915_GEM_DOMAIN_CPU; obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
trace_i915_gem_object_change_domain(obj, trace_i915_gem_object_change_domain(obj,
old_read_domains, old_read_domains,
obj->write_domain); obj->base.write_domain);
return 0; return 0;
} }
...@@ -3490,7 +3439,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, ...@@ -3490,7 +3439,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
uint32_t __iomem *reloc_entry; uint32_t __iomem *reloc_entry;
void __iomem *reloc_page; void __iomem *reloc_page;
ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1); ret = i915_gem_object_set_to_gtt_domain(obj, 1);
if (ret) if (ret)
goto err; goto err;
...@@ -3564,14 +3513,14 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, ...@@ -3564,14 +3513,14 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
static int static int
i915_gem_execbuffer_relocate(struct drm_device *dev, i915_gem_execbuffer_relocate(struct drm_device *dev,
struct drm_file *file, struct drm_file *file,
struct drm_gem_object **object_list, struct drm_i915_gem_object **object_list,
struct drm_i915_gem_exec_object2 *exec_list, struct drm_i915_gem_exec_object2 *exec_list,
int count) int count)
{ {
int i, ret; int i, ret;
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]); struct drm_i915_gem_object *obj = object_list[i];
obj->base.pending_read_domains = 0; obj->base.pending_read_domains = 0;
obj->base.pending_write_domain = 0; obj->base.pending_write_domain = 0;
ret = i915_gem_execbuffer_relocate_object(obj, file, ret = i915_gem_execbuffer_relocate_object(obj, file,
...@@ -3586,7 +3535,7 @@ i915_gem_execbuffer_relocate(struct drm_device *dev, ...@@ -3586,7 +3535,7 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
static int static int
i915_gem_execbuffer_reserve(struct drm_device *dev, i915_gem_execbuffer_reserve(struct drm_device *dev,
struct drm_file *file, struct drm_file *file,
struct drm_gem_object **object_list, struct drm_i915_gem_object **object_list,
struct drm_i915_gem_exec_object2 *exec_list, struct drm_i915_gem_exec_object2 *exec_list,
int count) int count)
{ {
...@@ -3599,7 +3548,7 @@ i915_gem_execbuffer_reserve(struct drm_device *dev, ...@@ -3599,7 +3548,7 @@ i915_gem_execbuffer_reserve(struct drm_device *dev,
ret = 0; ret = 0;
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
struct drm_i915_gem_exec_object2 *entry = &exec_list[i]; struct drm_i915_gem_exec_object2 *entry = &exec_list[i];
struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]); struct drm_i915_gem_object *obj = object_list[i];
bool need_fence = bool need_fence =
entry->flags & EXEC_OBJECT_NEEDS_FENCE && entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
obj->tiling_mode != I915_TILING_NONE; obj->tiling_mode != I915_TILING_NONE;
...@@ -3610,12 +3559,12 @@ i915_gem_execbuffer_reserve(struct drm_device *dev, ...@@ -3610,12 +3559,12 @@ i915_gem_execbuffer_reserve(struct drm_device *dev,
/* Check fence reg constraints and rebind if necessary */ /* Check fence reg constraints and rebind if necessary */
if (need_mappable && !obj->map_and_fenceable) { if (need_mappable && !obj->map_and_fenceable) {
ret = i915_gem_object_unbind(&obj->base); ret = i915_gem_object_unbind(obj);
if (ret) if (ret)
break; break;
} }
ret = i915_gem_object_pin(&obj->base, ret = i915_gem_object_pin(obj,
entry->alignment, entry->alignment,
need_mappable); need_mappable);
if (ret) if (ret)
...@@ -3626,9 +3575,9 @@ i915_gem_execbuffer_reserve(struct drm_device *dev, ...@@ -3626,9 +3575,9 @@ i915_gem_execbuffer_reserve(struct drm_device *dev,
* to properly handle blits to/from tiled surfaces. * to properly handle blits to/from tiled surfaces.
*/ */
if (need_fence) { if (need_fence) {
ret = i915_gem_object_get_fence_reg(&obj->base, true); ret = i915_gem_object_get_fence_reg(obj, true);
if (ret) { if (ret) {
i915_gem_object_unpin(&obj->base); i915_gem_object_unpin(obj);
break; break;
} }
...@@ -3658,17 +3607,15 @@ i915_gem_execbuffer_reserve(struct drm_device *dev, ...@@ -3658,17 +3607,15 @@ i915_gem_execbuffer_reserve(struct drm_device *dev,
static int static int
i915_gem_execbuffer_relocate_slow(struct drm_device *dev, i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
struct drm_file *file, struct drm_file *file,
struct drm_gem_object **object_list, struct drm_i915_gem_object **object_list,
struct drm_i915_gem_exec_object2 *exec_list, struct drm_i915_gem_exec_object2 *exec_list,
int count) int count)
{ {
struct drm_i915_gem_relocation_entry *reloc; struct drm_i915_gem_relocation_entry *reloc;
int i, total, ret; int i, total, ret;
for (i = 0; i < count; i++) { for (i = 0; i < count; i++)
struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]); object_list[i]->in_execbuffer = false;
obj->in_execbuffer = false;
}
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
...@@ -3713,7 +3660,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev, ...@@ -3713,7 +3660,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
total = 0; total = 0;
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]); struct drm_i915_gem_object *obj = object_list[i];
obj->base.pending_read_domains = 0; obj->base.pending_read_domains = 0;
obj->base.pending_write_domain = 0; obj->base.pending_write_domain = 0;
ret = i915_gem_execbuffer_relocate_object_slow(obj, file, ret = i915_gem_execbuffer_relocate_object_slow(obj, file,
...@@ -3740,7 +3687,7 @@ static int ...@@ -3740,7 +3687,7 @@ static int
i915_gem_execbuffer_move_to_gpu(struct drm_device *dev, i915_gem_execbuffer_move_to_gpu(struct drm_device *dev,
struct drm_file *file, struct drm_file *file,
struct intel_ring_buffer *ring, struct intel_ring_buffer *ring,
struct drm_gem_object **objects, struct drm_i915_gem_object **objects,
int count) int count)
{ {
struct change_domains cd; struct change_domains cd;
...@@ -3759,17 +3706,17 @@ i915_gem_execbuffer_move_to_gpu(struct drm_device *dev, ...@@ -3759,17 +3706,17 @@ i915_gem_execbuffer_move_to_gpu(struct drm_device *dev,
cd.invalidate_domains, cd.invalidate_domains,
cd.flush_domains); cd.flush_domains);
#endif #endif
i915_gem_flush(dev, file, i915_gem_flush(dev,
cd.invalidate_domains, cd.invalidate_domains,
cd.flush_domains, cd.flush_domains,
cd.flush_rings); cd.flush_rings);
} }
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
struct drm_i915_gem_object *obj = to_intel_bo(objects[i]); struct drm_i915_gem_object *obj = objects[i];
/* XXX replace with semaphores */ /* XXX replace with semaphores */
if (obj->ring && ring != obj->ring) { if (obj->ring && ring != obj->ring) {
ret = i915_gem_object_wait_rendering(&obj->base, true); ret = i915_gem_object_wait_rendering(obj, true);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -3891,8 +3838,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -3891,8 +3838,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_i915_gem_exec_object2 *exec_list) struct drm_i915_gem_exec_object2 *exec_list)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_gem_object **object_list = NULL; struct drm_i915_gem_object **object_list = NULL;
struct drm_gem_object *batch_obj; struct drm_i915_gem_object *batch_obj;
struct drm_clip_rect *cliprects = NULL; struct drm_clip_rect *cliprects = NULL;
struct drm_i915_gem_request *request = NULL; struct drm_i915_gem_request *request = NULL;
int ret, i, flips; int ret, i, flips;
...@@ -3987,29 +3934,29 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -3987,29 +3934,29 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
/* Look up object handles */ /* Look up object handles */
for (i = 0; i < args->buffer_count; i++) { for (i = 0; i < args->buffer_count; i++) {
struct drm_i915_gem_object *obj_priv; struct drm_i915_gem_object *obj;
object_list[i] = drm_gem_object_lookup(dev, file, obj = to_intel_bo (drm_gem_object_lookup(dev, file,
exec_list[i].handle); exec_list[i].handle));
if (object_list[i] == NULL) { if (obj == NULL) {
DRM_ERROR("Invalid object handle %d at index %d\n", DRM_ERROR("Invalid object handle %d at index %d\n",
exec_list[i].handle, i); exec_list[i].handle, i);
/* prevent error path from reading uninitialized data */ /* prevent error path from reading uninitialized data */
args->buffer_count = i + 1; args->buffer_count = i;
ret = -ENOENT; ret = -ENOENT;
goto err; goto err;
} }
object_list[i] = obj;
obj_priv = to_intel_bo(object_list[i]); if (obj->in_execbuffer) {
if (obj_priv->in_execbuffer) {
DRM_ERROR("Object %p appears more than once in object list\n", DRM_ERROR("Object %p appears more than once in object list\n",
object_list[i]); obj);
/* prevent error path from reading uninitialized data */ /* prevent error path from reading uninitialized data */
args->buffer_count = i + 1; args->buffer_count = i + 1;
ret = -EINVAL; ret = -EINVAL;
goto err; goto err;
} }
obj_priv->in_execbuffer = true; obj->in_execbuffer = true;
} }
/* Move the objects en-masse into the GTT, evicting if necessary. */ /* Move the objects en-masse into the GTT, evicting if necessary. */
...@@ -4037,15 +3984,15 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -4037,15 +3984,15 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
/* Set the pending read domains for the batch buffer to COMMAND */ /* Set the pending read domains for the batch buffer to COMMAND */
batch_obj = object_list[args->buffer_count-1]; batch_obj = object_list[args->buffer_count-1];
if (batch_obj->pending_write_domain) { if (batch_obj->base.pending_write_domain) {
DRM_ERROR("Attempting to use self-modifying batch buffer\n"); DRM_ERROR("Attempting to use self-modifying batch buffer\n");
ret = -EINVAL; ret = -EINVAL;
goto err; goto err;
} }
batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND; batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
/* Sanity check the batch buffer */ /* Sanity check the batch buffer */
exec_offset = to_intel_bo(batch_obj)->gtt_offset; exec_offset = batch_obj->gtt_offset;
ret = i915_gem_check_execbuffer(args, exec_offset); ret = i915_gem_check_execbuffer(args, exec_offset);
if (ret != 0) { if (ret != 0) {
DRM_ERROR("execbuf with invalid offset/length\n"); DRM_ERROR("execbuf with invalid offset/length\n");
...@@ -4077,8 +4024,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -4077,8 +4024,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
*/ */
flips = 0; flips = 0;
for (i = 0; i < args->buffer_count; i++) { for (i = 0; i < args->buffer_count; i++) {
if (object_list[i]->write_domain) if (object_list[i]->base.write_domain)
flips |= atomic_read(&to_intel_bo(object_list[i])->pending_flip); flips |= atomic_read(&object_list[i]->pending_flip);
} }
if (flips) { if (flips) {
int plane, flip_mask; int plane, flip_mask;
...@@ -4110,23 +4057,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -4110,23 +4057,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
} }
for (i = 0; i < args->buffer_count; i++) { for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i]; struct drm_i915_gem_object *obj = object_list[i];
obj->read_domains = obj->pending_read_domains; obj->base.read_domains = obj->base.pending_read_domains;
obj->write_domain = obj->pending_write_domain; obj->base.write_domain = obj->base.pending_write_domain;
i915_gem_object_move_to_active(obj, ring); i915_gem_object_move_to_active(obj, ring);
if (obj->write_domain) { if (obj->base.write_domain) {
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); obj->dirty = 1;
obj_priv->dirty = 1; list_move_tail(&obj->gpu_write_list,
list_move_tail(&obj_priv->gpu_write_list,
&ring->gpu_write_list); &ring->gpu_write_list);
intel_mark_busy(dev, obj); intel_mark_busy(dev, obj);
} }
trace_i915_gem_object_change_domain(obj, trace_i915_gem_object_change_domain(obj,
obj->read_domains, obj->base.read_domains,
obj->write_domain); obj->base.write_domain);
} }
/* /*
...@@ -4142,11 +4088,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -4142,11 +4088,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
err: err:
for (i = 0; i < args->buffer_count; i++) { for (i = 0; i < args->buffer_count; i++) {
if (object_list[i] == NULL) object_list[i]->in_execbuffer = false;
break; drm_gem_object_unreference(&object_list[i]->base);
to_intel_bo(object_list[i])->in_execbuffer = false;
drm_gem_object_unreference(object_list[i]);
} }
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
...@@ -4165,7 +4108,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -4165,7 +4108,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
*/ */
int int
i915_gem_execbuffer(struct drm_device *dev, void *data, i915_gem_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file)
{ {
struct drm_i915_gem_execbuffer *args = data; struct drm_i915_gem_execbuffer *args = data;
struct drm_i915_gem_execbuffer2 exec2; struct drm_i915_gem_execbuffer2 exec2;
...@@ -4227,7 +4170,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, ...@@ -4227,7 +4170,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
exec2.cliprects_ptr = args->cliprects_ptr; exec2.cliprects_ptr = args->cliprects_ptr;
exec2.flags = I915_EXEC_RENDER; exec2.flags = I915_EXEC_RENDER;
ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list); ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
if (!ret) { if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */ /* Copy the new buffer offsets back to the user's exec list. */
for (i = 0; i < args->buffer_count; i++) for (i = 0; i < args->buffer_count; i++)
...@@ -4252,7 +4195,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, ...@@ -4252,7 +4195,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
int int
i915_gem_execbuffer2(struct drm_device *dev, void *data, i915_gem_execbuffer2(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file)
{ {
struct drm_i915_gem_execbuffer2 *args = data; struct drm_i915_gem_execbuffer2 *args = data;
struct drm_i915_gem_exec_object2 *exec2_list = NULL; struct drm_i915_gem_exec_object2 *exec2_list = NULL;
...@@ -4285,7 +4228,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, ...@@ -4285,7 +4228,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
return -EFAULT; return -EFAULT;
} }
ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list); ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
if (!ret) { if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */ /* Copy the new buffer offsets back to the user's exec list. */
ret = copy_to_user((struct drm_i915_relocation_entry __user *) ret = copy_to_user((struct drm_i915_relocation_entry __user *)
...@@ -4305,109 +4248,106 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data, ...@@ -4305,109 +4248,106 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
} }
int int
i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment, i915_gem_object_pin(struct drm_i915_gem_object *obj,
uint32_t alignment,
bool map_and_fenceable) bool map_and_fenceable)
{ {
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret; int ret;
BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT); BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
BUG_ON(map_and_fenceable && !map_and_fenceable); BUG_ON(map_and_fenceable && !map_and_fenceable);
WARN_ON(i915_verify_lists(dev)); WARN_ON(i915_verify_lists(dev));
if (obj_priv->gtt_space != NULL) { if (obj->gtt_space != NULL) {
if ((alignment && obj_priv->gtt_offset & (alignment - 1)) || if ((alignment && obj->gtt_offset & (alignment - 1)) ||
(map_and_fenceable && !obj_priv->map_and_fenceable)) { (map_and_fenceable && !obj->map_and_fenceable)) {
WARN(obj_priv->pin_count, WARN(obj->pin_count,
"bo is already pinned with incorrect alignment:" "bo is already pinned with incorrect alignment:"
" offset=%x, req.alignment=%x, req.map_and_fenceable=%d," " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
" obj->map_and_fenceable=%d\n", " obj->map_and_fenceable=%d\n",
obj_priv->gtt_offset, alignment, obj->gtt_offset, alignment,
map_and_fenceable, map_and_fenceable,
obj_priv->map_and_fenceable); obj->map_and_fenceable);
ret = i915_gem_object_unbind(obj); ret = i915_gem_object_unbind(obj);
if (ret) if (ret)
return ret; return ret;
} }
} }
if (obj_priv->gtt_space == NULL) { if (obj->gtt_space == NULL) {
ret = i915_gem_object_bind_to_gtt(obj, alignment, ret = i915_gem_object_bind_to_gtt(obj, alignment,
map_and_fenceable); map_and_fenceable);
if (ret) if (ret)
return ret; return ret;
} }
if (obj_priv->pin_count++ == 0) { if (obj->pin_count++ == 0) {
i915_gem_info_add_pin(dev_priv, obj_priv, map_and_fenceable); i915_gem_info_add_pin(dev_priv, obj, map_and_fenceable);
if (!obj_priv->active) if (!obj->active)
list_move_tail(&obj_priv->mm_list, list_move_tail(&obj->mm_list,
&dev_priv->mm.pinned_list); &dev_priv->mm.pinned_list);
} }
BUG_ON(!obj_priv->pin_mappable && map_and_fenceable); BUG_ON(!obj->pin_mappable && map_and_fenceable);
WARN_ON(i915_verify_lists(dev)); WARN_ON(i915_verify_lists(dev));
return 0; return 0;
} }
void void
i915_gem_object_unpin(struct drm_gem_object *obj) i915_gem_object_unpin(struct drm_i915_gem_object *obj)
{ {
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
WARN_ON(i915_verify_lists(dev)); WARN_ON(i915_verify_lists(dev));
BUG_ON(obj_priv->pin_count == 0); BUG_ON(obj->pin_count == 0);
BUG_ON(obj_priv->gtt_space == NULL); BUG_ON(obj->gtt_space == NULL);
if (--obj_priv->pin_count == 0) { if (--obj->pin_count == 0) {
if (!obj_priv->active) if (!obj->active)
list_move_tail(&obj_priv->mm_list, list_move_tail(&obj->mm_list,
&dev_priv->mm.inactive_list); &dev_priv->mm.inactive_list);
i915_gem_info_remove_pin(dev_priv, obj_priv); i915_gem_info_remove_pin(dev_priv, obj);
} }
WARN_ON(i915_verify_lists(dev)); WARN_ON(i915_verify_lists(dev));
} }
int int
i915_gem_pin_ioctl(struct drm_device *dev, void *data, i915_gem_pin_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file)
{ {
struct drm_i915_gem_pin *args = data; struct drm_i915_gem_pin *args = data;
struct drm_gem_object *obj; struct drm_i915_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
int ret; int ret;
ret = i915_mutex_lock_interruptible(dev); ret = i915_mutex_lock_interruptible(dev);
if (ret) if (ret)
return ret; return ret;
obj = drm_gem_object_lookup(dev, file_priv, args->handle); obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) { if (obj == NULL) {
ret = -ENOENT; ret = -ENOENT;
goto unlock; goto unlock;
} }
obj_priv = to_intel_bo(obj);
if (obj_priv->madv != I915_MADV_WILLNEED) { if (obj->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to pin a purgeable buffer\n"); DRM_ERROR("Attempting to pin a purgeable buffer\n");
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { if (obj->pin_filp != NULL && obj->pin_filp != file) {
DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
args->handle); args->handle);
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
obj_priv->user_pin_count++; obj->user_pin_count++;
obj_priv->pin_filp = file_priv; obj->pin_filp = file;
if (obj_priv->user_pin_count == 1) { if (obj->user_pin_count == 1) {
ret = i915_gem_object_pin(obj, args->alignment, true); ret = i915_gem_object_pin(obj, args->alignment, true);
if (ret) if (ret)
goto out; goto out;
...@@ -4417,9 +4357,9 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, ...@@ -4417,9 +4357,9 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
* as the X server doesn't manage domains yet * as the X server doesn't manage domains yet
*/ */
i915_gem_object_flush_cpu_write_domain(obj); i915_gem_object_flush_cpu_write_domain(obj);
args->offset = obj_priv->gtt_offset; args->offset = obj->gtt_offset;
out: out:
drm_gem_object_unreference(obj); drm_gem_object_unreference(&obj->base);
unlock: unlock:
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return ret; return ret;
...@@ -4427,38 +4367,36 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, ...@@ -4427,38 +4367,36 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
int int
i915_gem_unpin_ioctl(struct drm_device *dev, void *data, i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file)
{ {
struct drm_i915_gem_pin *args = data; struct drm_i915_gem_pin *args = data;
struct drm_gem_object *obj; struct drm_i915_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
int ret; int ret;
ret = i915_mutex_lock_interruptible(dev); ret = i915_mutex_lock_interruptible(dev);
if (ret) if (ret)
return ret; return ret;
obj = drm_gem_object_lookup(dev, file_priv, args->handle); obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) { if (obj == NULL) {
ret = -ENOENT; ret = -ENOENT;
goto unlock; goto unlock;
} }
obj_priv = to_intel_bo(obj);
if (obj_priv->pin_filp != file_priv) { if (obj->pin_filp != file) {
DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
args->handle); args->handle);
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
obj_priv->user_pin_count--; obj->user_pin_count--;
if (obj_priv->user_pin_count == 0) { if (obj->user_pin_count == 0) {
obj_priv->pin_filp = NULL; obj->pin_filp = NULL;
i915_gem_object_unpin(obj); i915_gem_object_unpin(obj);
} }
out: out:
drm_gem_object_unreference(obj); drm_gem_object_unreference(&obj->base);
unlock: unlock:
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return ret; return ret;
...@@ -4466,52 +4404,49 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data, ...@@ -4466,52 +4404,49 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
int int
i915_gem_busy_ioctl(struct drm_device *dev, void *data, i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file)
{ {
struct drm_i915_gem_busy *args = data; struct drm_i915_gem_busy *args = data;
struct drm_gem_object *obj; struct drm_i915_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
int ret; int ret;
ret = i915_mutex_lock_interruptible(dev); ret = i915_mutex_lock_interruptible(dev);
if (ret) if (ret)
return ret; return ret;
obj = drm_gem_object_lookup(dev, file_priv, args->handle); obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) { if (obj == NULL) {
ret = -ENOENT; ret = -ENOENT;
goto unlock; goto unlock;
} }
obj_priv = to_intel_bo(obj);
/* Count all active objects as busy, even if they are currently not used /* Count all active objects as busy, even if they are currently not used
* by the gpu. Users of this interface expect objects to eventually * by the gpu. Users of this interface expect objects to eventually
* become non-busy without any further actions, therefore emit any * become non-busy without any further actions, therefore emit any
* necessary flushes here. * necessary flushes here.
*/ */
args->busy = obj_priv->active; args->busy = obj->active;
if (args->busy) { if (args->busy) {
/* Unconditionally flush objects, even when the gpu still uses this /* Unconditionally flush objects, even when the gpu still uses this
* object. Userspace calling this function indicates that it wants to * object. Userspace calling this function indicates that it wants to
* use this buffer rather sooner than later, so issuing the required * use this buffer rather sooner than later, so issuing the required
* flush earlier is beneficial. * flush earlier is beneficial.
*/ */
if (obj->write_domain & I915_GEM_GPU_DOMAINS) if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
i915_gem_flush_ring(dev, file_priv, i915_gem_flush_ring(dev, obj->ring,
obj_priv->ring, 0, obj->base.write_domain);
0, obj->write_domain);
/* Update the active list for the hardware's current position. /* Update the active list for the hardware's current position.
* Otherwise this only updates on a delayed timer or when irqs * Otherwise this only updates on a delayed timer or when irqs
* are actually unmasked, and our working set ends up being * are actually unmasked, and our working set ends up being
* larger than required. * larger than required.
*/ */
i915_gem_retire_requests_ring(dev, obj_priv->ring); i915_gem_retire_requests_ring(dev, obj->ring);
args->busy = obj_priv->active; args->busy = obj->active;
} }
drm_gem_object_unreference(obj); drm_gem_object_unreference(&obj->base);
unlock: unlock:
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return ret; return ret;
...@@ -4529,8 +4464,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, ...@@ -4529,8 +4464,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
struct drm_i915_gem_madvise *args = data; struct drm_i915_gem_madvise *args = data;
struct drm_gem_object *obj; struct drm_i915_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
int ret; int ret;
switch (args->madv) { switch (args->madv) {
...@@ -4545,37 +4479,36 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, ...@@ -4545,37 +4479,36 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
if (ret) if (ret)
return ret; return ret;
obj = drm_gem_object_lookup(dev, file_priv, args->handle); obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
if (obj == NULL) { if (obj == NULL) {
ret = -ENOENT; ret = -ENOENT;
goto unlock; goto unlock;
} }
obj_priv = to_intel_bo(obj);
if (obj_priv->pin_count) { if (obj->pin_count) {
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
if (obj_priv->madv != __I915_MADV_PURGED) if (obj->madv != __I915_MADV_PURGED)
obj_priv->madv = args->madv; obj->madv = args->madv;
/* if the object is no longer bound, discard its backing storage */ /* if the object is no longer bound, discard its backing storage */
if (i915_gem_object_is_purgeable(obj_priv) && if (i915_gem_object_is_purgeable(obj) &&
obj_priv->gtt_space == NULL) obj->gtt_space == NULL)
i915_gem_object_truncate(obj); i915_gem_object_truncate(obj);
args->retained = obj_priv->madv != __I915_MADV_PURGED; args->retained = obj->madv != __I915_MADV_PURGED;
out: out:
drm_gem_object_unreference(obj); drm_gem_object_unreference(&obj->base);
unlock: unlock:
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return ret; return ret;
} }
struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size) size_t size)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
...@@ -4605,7 +4538,7 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, ...@@ -4605,7 +4538,7 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
/* Avoid an unnecessary call to unbind on the first bind. */ /* Avoid an unnecessary call to unbind on the first bind. */
obj->map_and_fenceable = true; obj->map_and_fenceable = true;
return &obj->base; return obj;
} }
int i915_gem_init_object(struct drm_gem_object *obj) int i915_gem_init_object(struct drm_gem_object *obj)
...@@ -4615,42 +4548,41 @@ int i915_gem_init_object(struct drm_gem_object *obj) ...@@ -4615,42 +4548,41 @@ int i915_gem_init_object(struct drm_gem_object *obj)
return 0; return 0;
} }
static void i915_gem_free_object_tail(struct drm_gem_object *obj) static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
{ {
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret; int ret;
ret = i915_gem_object_unbind(obj); ret = i915_gem_object_unbind(obj);
if (ret == -ERESTARTSYS) { if (ret == -ERESTARTSYS) {
list_move(&obj_priv->mm_list, list_move(&obj->mm_list,
&dev_priv->mm.deferred_free_list); &dev_priv->mm.deferred_free_list);
return; return;
} }
if (obj->map_list.map) if (obj->base.map_list.map)
i915_gem_free_mmap_offset(obj); i915_gem_free_mmap_offset(obj);
drm_gem_object_release(obj); drm_gem_object_release(&obj->base);
i915_gem_info_remove_obj(dev_priv, obj->size); i915_gem_info_remove_obj(dev_priv, obj->base.size);
kfree(obj_priv->page_cpu_valid); kfree(obj->page_cpu_valid);
kfree(obj_priv->bit_17); kfree(obj->bit_17);
kfree(obj_priv); kfree(obj);
} }
void i915_gem_free_object(struct drm_gem_object *obj) void i915_gem_free_object(struct drm_gem_object *gem_obj)
{ {
struct drm_device *dev = obj->dev; struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); struct drm_device *dev = obj->base.dev;
trace_i915_gem_object_destroy(obj); trace_i915_gem_object_destroy(obj);
while (obj_priv->pin_count > 0) while (obj->pin_count > 0)
i915_gem_object_unpin(obj); i915_gem_object_unpin(obj);
if (obj_priv->phys_obj) if (obj->phys_obj)
i915_gem_detach_phys_object(dev, obj); i915_gem_detach_phys_object(dev, obj);
i915_gem_free_object_tail(obj); i915_gem_free_object_tail(obj);
...@@ -4710,8 +4642,7 @@ static int ...@@ -4710,8 +4642,7 @@ static int
i915_gem_init_pipe_control(struct drm_device *dev) i915_gem_init_pipe_control(struct drm_device *dev)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_gem_object *obj; struct drm_i915_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
int ret; int ret;
obj = i915_gem_alloc_object(dev, 4096); obj = i915_gem_alloc_object(dev, 4096);
...@@ -4720,15 +4651,14 @@ i915_gem_init_pipe_control(struct drm_device *dev) ...@@ -4720,15 +4651,14 @@ i915_gem_init_pipe_control(struct drm_device *dev)
ret = -ENOMEM; ret = -ENOMEM;
goto err; goto err;
} }
obj_priv = to_intel_bo(obj); obj->agp_type = AGP_USER_CACHED_MEMORY;
obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
ret = i915_gem_object_pin(obj, 4096, true); ret = i915_gem_object_pin(obj, 4096, true);
if (ret) if (ret)
goto err_unref; goto err_unref;
dev_priv->seqno_gfx_addr = obj_priv->gtt_offset; dev_priv->seqno_gfx_addr = obj->gtt_offset;
dev_priv->seqno_page = kmap(obj_priv->pages[0]); dev_priv->seqno_page = kmap(obj->pages[0]);
if (dev_priv->seqno_page == NULL) if (dev_priv->seqno_page == NULL)
goto err_unpin; goto err_unpin;
...@@ -4740,7 +4670,7 @@ i915_gem_init_pipe_control(struct drm_device *dev) ...@@ -4740,7 +4670,7 @@ i915_gem_init_pipe_control(struct drm_device *dev)
err_unpin: err_unpin:
i915_gem_object_unpin(obj); i915_gem_object_unpin(obj);
err_unref: err_unref:
drm_gem_object_unreference(obj); drm_gem_object_unreference(&obj->base);
err: err:
return ret; return ret;
} }
...@@ -4750,14 +4680,12 @@ static void ...@@ -4750,14 +4680,12 @@ static void
i915_gem_cleanup_pipe_control(struct drm_device *dev) i915_gem_cleanup_pipe_control(struct drm_device *dev)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_gem_object *obj; struct drm_i915_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
obj = dev_priv->seqno_obj; obj = dev_priv->seqno_obj;
obj_priv = to_intel_bo(obj); kunmap(obj->pages[0]);
kunmap(obj_priv->pages[0]);
i915_gem_object_unpin(obj); i915_gem_object_unpin(obj);
drm_gem_object_unreference(obj); drm_gem_object_unreference(&obj->base);
dev_priv->seqno_obj = NULL; dev_priv->seqno_obj = NULL;
dev_priv->seqno_page = NULL; dev_priv->seqno_page = NULL;
...@@ -5035,20 +4963,18 @@ void i915_gem_free_all_phys_object(struct drm_device *dev) ...@@ -5035,20 +4963,18 @@ void i915_gem_free_all_phys_object(struct drm_device *dev)
} }
void i915_gem_detach_phys_object(struct drm_device *dev, void i915_gem_detach_phys_object(struct drm_device *dev,
struct drm_gem_object *obj) struct drm_i915_gem_object *obj)
{ {
struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping; struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
char *vaddr; char *vaddr;
int i; int i;
int page_count; int page_count;
if (!obj_priv->phys_obj) if (!obj->phys_obj)
return; return;
vaddr = obj_priv->phys_obj->handle->vaddr; vaddr = obj->phys_obj->handle->vaddr;
page_count = obj->size / PAGE_SIZE;
page_count = obj->base.size / PAGE_SIZE;
for (i = 0; i < page_count; i++) { for (i = 0; i < page_count; i++) {
struct page *page = read_cache_page_gfp(mapping, i, struct page *page = read_cache_page_gfp(mapping, i,
GFP_HIGHUSER | __GFP_RECLAIMABLE); GFP_HIGHUSER | __GFP_RECLAIMABLE);
...@@ -5066,19 +4992,18 @@ void i915_gem_detach_phys_object(struct drm_device *dev, ...@@ -5066,19 +4992,18 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
} }
intel_gtt_chipset_flush(); intel_gtt_chipset_flush();
obj_priv->phys_obj->cur_obj = NULL; obj->phys_obj->cur_obj = NULL;
obj_priv->phys_obj = NULL; obj->phys_obj = NULL;
} }
int int
i915_gem_attach_phys_object(struct drm_device *dev, i915_gem_attach_phys_object(struct drm_device *dev,
struct drm_gem_object *obj, struct drm_i915_gem_object *obj,
int id, int id,
int align) int align)
{ {
struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping; struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv;
int ret = 0; int ret = 0;
int page_count; int page_count;
int i; int i;
...@@ -5086,10 +5011,8 @@ i915_gem_attach_phys_object(struct drm_device *dev, ...@@ -5086,10 +5011,8 @@ i915_gem_attach_phys_object(struct drm_device *dev,
if (id > I915_MAX_PHYS_OBJECT) if (id > I915_MAX_PHYS_OBJECT)
return -EINVAL; return -EINVAL;
obj_priv = to_intel_bo(obj); if (obj->phys_obj) {
if (obj->phys_obj->id == id)
if (obj_priv->phys_obj) {
if (obj_priv->phys_obj->id == id)
return 0; return 0;
i915_gem_detach_phys_object(dev, obj); i915_gem_detach_phys_object(dev, obj);
} }
...@@ -5097,18 +5020,19 @@ i915_gem_attach_phys_object(struct drm_device *dev, ...@@ -5097,18 +5020,19 @@ i915_gem_attach_phys_object(struct drm_device *dev,
/* create a new object */ /* create a new object */
if (!dev_priv->mm.phys_objs[id - 1]) { if (!dev_priv->mm.phys_objs[id - 1]) {
ret = i915_gem_init_phys_object(dev, id, ret = i915_gem_init_phys_object(dev, id,
obj->size, align); obj->base.size, align);
if (ret) { if (ret) {
DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size); DRM_ERROR("failed to init phys object %d size: %zu\n",
id, obj->base.size);
return ret; return ret;
} }
} }
/* bind to the object */ /* bind to the object */
obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
obj_priv->phys_obj->cur_obj = obj; obj->phys_obj->cur_obj = obj;
page_count = obj->size / PAGE_SIZE; page_count = obj->base.size / PAGE_SIZE;
for (i = 0; i < page_count; i++) { for (i = 0; i < page_count; i++) {
struct page *page; struct page *page;
...@@ -5120,7 +5044,7 @@ i915_gem_attach_phys_object(struct drm_device *dev, ...@@ -5120,7 +5044,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
return PTR_ERR(page); return PTR_ERR(page);
src = kmap_atomic(page); src = kmap_atomic(page);
dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
memcpy(dst, src, PAGE_SIZE); memcpy(dst, src, PAGE_SIZE);
kunmap_atomic(src); kunmap_atomic(src);
...@@ -5132,16 +5056,14 @@ i915_gem_attach_phys_object(struct drm_device *dev, ...@@ -5132,16 +5056,14 @@ i915_gem_attach_phys_object(struct drm_device *dev,
} }
static int static int
i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, i915_gem_phys_pwrite(struct drm_device *dev,
struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args, struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
void *vaddr = obj_priv->phys_obj->handle->vaddr + args->offset;
char __user *user_data = (char __user *) (uintptr_t) args->data_ptr; char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
DRM_DEBUG_DRIVER("vaddr %p, %lld\n", vaddr, args->size);
if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
unsigned long unwritten; unsigned long unwritten;
...@@ -5228,7 +5150,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, ...@@ -5228,7 +5150,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker,
&dev_priv->mm.inactive_list, &dev_priv->mm.inactive_list,
mm_list) { mm_list) {
if (i915_gem_object_is_purgeable(obj)) { if (i915_gem_object_is_purgeable(obj)) {
i915_gem_object_unbind(&obj->base); i915_gem_object_unbind(obj);
if (--nr_to_scan == 0) if (--nr_to_scan == 0)
break; break;
} }
...@@ -5240,7 +5162,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, ...@@ -5240,7 +5162,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker,
&dev_priv->mm.inactive_list, &dev_priv->mm.inactive_list,
mm_list) { mm_list) {
if (nr_to_scan) { if (nr_to_scan) {
i915_gem_object_unbind(&obj->base); i915_gem_object_unbind(obj);
nr_to_scan--; nr_to_scan--;
} else } else
cnt++; cnt++;
......
...@@ -152,13 +152,12 @@ i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end, ...@@ -152,13 +152,12 @@ i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
} }
void void
i915_gem_dump_object(struct drm_gem_object *obj, int len, i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
const char *where, uint32_t mark) const char *where, uint32_t mark)
{ {
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int page; int page;
DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset); DRM_INFO("%s: object at offset %08x\n", where, obj->gtt_offset);
for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) { for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
int page_len, chunk, chunk_len; int page_len, chunk, chunk_len;
...@@ -170,9 +169,9 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len, ...@@ -170,9 +169,9 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len,
chunk_len = page_len - chunk; chunk_len = page_len - chunk;
if (chunk_len > 128) if (chunk_len > 128)
chunk_len = 128; chunk_len = 128;
i915_gem_dump_page(obj_priv->pages[page], i915_gem_dump_page(obj->pages[page],
chunk, chunk + chunk_len, chunk, chunk + chunk_len,
obj_priv->gtt_offset + obj->gtt_offset +
page * PAGE_SIZE, page * PAGE_SIZE,
mark); mark);
} }
...@@ -182,21 +181,19 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len, ...@@ -182,21 +181,19 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len,
#if WATCH_COHERENCY #if WATCH_COHERENCY
void void
i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
{ {
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->base.dev;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int page; int page;
uint32_t *gtt_mapping; uint32_t *gtt_mapping;
uint32_t *backing_map = NULL; uint32_t *backing_map = NULL;
int bad_count = 0; int bad_count = 0;
DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n", DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n",
__func__, obj, obj_priv->gtt_offset, handle, __func__, obj, obj->gtt_offset, handle,
obj->size / 1024); obj->size / 1024);
gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset, gtt_mapping = ioremap(dev->agp->base + obj->gtt_offset, obj->base.size);
obj->size);
if (gtt_mapping == NULL) { if (gtt_mapping == NULL) {
DRM_ERROR("failed to map GTT space\n"); DRM_ERROR("failed to map GTT space\n");
return; return;
...@@ -205,7 +202,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) ...@@ -205,7 +202,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
for (page = 0; page < obj->size / PAGE_SIZE; page++) { for (page = 0; page < obj->size / PAGE_SIZE; page++) {
int i; int i;
backing_map = kmap_atomic(obj_priv->pages[page], KM_USER0); backing_map = kmap_atomic(obj->pages[page], KM_USER0);
if (backing_map == NULL) { if (backing_map == NULL) {
DRM_ERROR("failed to map backing page\n"); DRM_ERROR("failed to map backing page\n");
...@@ -220,7 +217,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) ...@@ -220,7 +217,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
if (cpuval != gttval) { if (cpuval != gttval) {
DRM_INFO("incoherent CPU vs GPU at 0x%08x: " DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
"0x%08x vs 0x%08x\n", "0x%08x vs 0x%08x\n",
(int)(obj_priv->gtt_offset + (int)(obj->gtt_offset +
page * PAGE_SIZE + i * 4), page * PAGE_SIZE + i * 4),
cpuval, gttval); cpuval, gttval);
if (bad_count++ >= 8) { if (bad_count++ >= 8) {
......
...@@ -32,12 +32,11 @@ ...@@ -32,12 +32,11 @@
#include "i915_drm.h" #include "i915_drm.h"
static bool static bool
mark_free(struct drm_i915_gem_object *obj_priv, mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
struct list_head *unwind)
{ {
list_add(&obj_priv->evict_list, unwind); list_add(&obj->evict_list, unwind);
drm_gem_object_reference(&obj_priv->base); drm_gem_object_reference(&obj->base);
return drm_mm_scan_add_block(obj_priv->gtt_space); return drm_mm_scan_add_block(obj->gtt_space);
} }
int int
...@@ -46,7 +45,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, ...@@ -46,7 +45,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct list_head eviction_list, unwind_list; struct list_head eviction_list, unwind_list;
struct drm_i915_gem_object *obj_priv; struct drm_i915_gem_object *obj;
int ret = 0; int ret = 0;
i915_gem_retire_requests(dev); i915_gem_retire_requests(dev);
...@@ -96,42 +95,42 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, ...@@ -96,42 +95,42 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment); drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
/* First see if there is a large enough contiguous idle region... */ /* First see if there is a large enough contiguous idle region... */
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) { list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
if (mark_free(obj_priv, &unwind_list)) if (mark_free(obj, &unwind_list))
goto found; goto found;
} }
/* Now merge in the soon-to-be-expired objects... */ /* Now merge in the soon-to-be-expired objects... */
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
/* Does the object require an outstanding flush? */ /* Does the object require an outstanding flush? */
if (obj_priv->base.write_domain || obj_priv->pin_count) if (obj->base.write_domain || obj->pin_count)
continue; continue;
if (mark_free(obj_priv, &unwind_list)) if (mark_free(obj, &unwind_list))
goto found; goto found;
} }
/* Finally add anything with a pending flush (in order of retirement) */ /* Finally add anything with a pending flush (in order of retirement) */
list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) { list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
if (obj_priv->pin_count) if (obj->pin_count)
continue; continue;
if (mark_free(obj_priv, &unwind_list)) if (mark_free(obj, &unwind_list))
goto found; goto found;
} }
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
if (! obj_priv->base.write_domain || obj_priv->pin_count) if (! obj->base.write_domain || obj->pin_count)
continue; continue;
if (mark_free(obj_priv, &unwind_list)) if (mark_free(obj, &unwind_list))
goto found; goto found;
} }
/* Nothing found, clean up and bail out! */ /* Nothing found, clean up and bail out! */
list_for_each_entry(obj_priv, &unwind_list, evict_list) { list_for_each_entry(obj, &unwind_list, evict_list) {
ret = drm_mm_scan_remove_block(obj_priv->gtt_space); ret = drm_mm_scan_remove_block(obj->gtt_space);
BUG_ON(ret); BUG_ON(ret);
drm_gem_object_unreference(&obj_priv->base); drm_gem_object_unreference(&obj->base);
} }
/* We expect the caller to unpin, evict all and try again, or give up. /* We expect the caller to unpin, evict all and try again, or give up.
...@@ -145,26 +144,26 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, ...@@ -145,26 +144,26 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
* temporary list. */ * temporary list. */
INIT_LIST_HEAD(&eviction_list); INIT_LIST_HEAD(&eviction_list);
while (!list_empty(&unwind_list)) { while (!list_empty(&unwind_list)) {
obj_priv = list_first_entry(&unwind_list, obj = list_first_entry(&unwind_list,
struct drm_i915_gem_object, struct drm_i915_gem_object,
evict_list); evict_list);
if (drm_mm_scan_remove_block(obj_priv->gtt_space)) { if (drm_mm_scan_remove_block(obj->gtt_space)) {
list_move(&obj_priv->evict_list, &eviction_list); list_move(&obj->evict_list, &eviction_list);
continue; continue;
} }
list_del(&obj_priv->evict_list); list_del(&obj->evict_list);
drm_gem_object_unreference(&obj_priv->base); drm_gem_object_unreference(&obj->base);
} }
/* Unbinding will emit any required flushes */ /* Unbinding will emit any required flushes */
while (!list_empty(&eviction_list)) { while (!list_empty(&eviction_list)) {
obj_priv = list_first_entry(&eviction_list, obj = list_first_entry(&eviction_list,
struct drm_i915_gem_object, struct drm_i915_gem_object,
evict_list); evict_list);
if (ret == 0) if (ret == 0)
ret = i915_gem_object_unbind(&obj_priv->base); ret = i915_gem_object_unbind(obj);
list_del(&obj_priv->evict_list); list_del(&obj->evict_list);
drm_gem_object_unreference(&obj_priv->base); drm_gem_object_unreference(&obj->base);
} }
return ret; return ret;
...@@ -203,7 +202,7 @@ i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only) ...@@ -203,7 +202,7 @@ i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only)
list_for_each_entry_safe(obj, next, list_for_each_entry_safe(obj, next,
&dev_priv->mm.inactive_list, mm_list) { &dev_priv->mm.inactive_list, mm_list) {
if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) { if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) {
int ret = i915_gem_object_unbind(&obj->base); int ret = i915_gem_object_unbind(obj);
if (ret) if (ret)
return ret; return ret;
} }
......
...@@ -32,71 +32,67 @@ ...@@ -32,71 +32,67 @@
void i915_gem_restore_gtt_mappings(struct drm_device *dev) void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv; struct drm_i915_gem_object *obj;
list_for_each_entry(obj_priv, list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
&dev_priv->mm.gtt_list,
gtt_list) {
if (dev_priv->mm.gtt->needs_dmar) { if (dev_priv->mm.gtt->needs_dmar) {
BUG_ON(!obj_priv->sg_list); BUG_ON(!obj->sg_list);
intel_gtt_insert_sg_entries(obj_priv->sg_list, intel_gtt_insert_sg_entries(obj->sg_list,
obj_priv->num_sg, obj->num_sg,
obj_priv->gtt_space->start obj->gtt_space->start
>> PAGE_SHIFT, >> PAGE_SHIFT,
obj_priv->agp_type); obj->agp_type);
} else } else
intel_gtt_insert_pages(obj_priv->gtt_space->start intel_gtt_insert_pages(obj->gtt_space->start
>> PAGE_SHIFT, >> PAGE_SHIFT,
obj_priv->base.size >> PAGE_SHIFT, obj->base.size >> PAGE_SHIFT,
obj_priv->pages, obj->pages,
obj_priv->agp_type); obj->agp_type);
} }
/* Be paranoid and flush the chipset cache. */ /* Be paranoid and flush the chipset cache. */
intel_gtt_chipset_flush(); intel_gtt_chipset_flush();
} }
int i915_gem_gtt_bind_object(struct drm_gem_object *obj) int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
{ {
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret; int ret;
if (dev_priv->mm.gtt->needs_dmar) { if (dev_priv->mm.gtt->needs_dmar) {
ret = intel_gtt_map_memory(obj_priv->pages, ret = intel_gtt_map_memory(obj->pages,
obj->size >> PAGE_SHIFT, obj->base.size >> PAGE_SHIFT,
&obj_priv->sg_list, &obj->sg_list,
&obj_priv->num_sg); &obj->num_sg);
if (ret != 0) if (ret != 0)
return ret; return ret;
intel_gtt_insert_sg_entries(obj_priv->sg_list, obj_priv->num_sg, intel_gtt_insert_sg_entries(obj->sg_list,
obj_priv->gtt_space->start obj->num_sg,
>> PAGE_SHIFT, obj->gtt_space->start >> PAGE_SHIFT,
obj_priv->agp_type); obj->agp_type);
} else } else
intel_gtt_insert_pages(obj_priv->gtt_space->start >> PAGE_SHIFT, intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
obj->size >> PAGE_SHIFT, obj->base.size >> PAGE_SHIFT,
obj_priv->pages, obj->pages,
obj_priv->agp_type); obj->agp_type);
return 0; return 0;
} }
void i915_gem_gtt_unbind_object(struct drm_gem_object *obj) void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
{ {
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
if (dev_priv->mm.gtt->needs_dmar) { if (dev_priv->mm.gtt->needs_dmar) {
intel_gtt_unmap_memory(obj_priv->sg_list, obj_priv->num_sg); intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
obj_priv->sg_list = NULL; obj->sg_list = NULL;
obj_priv->num_sg = 0; obj->num_sg = 0;
} }
intel_gtt_clear_range(obj_priv->gtt_space->start >> PAGE_SHIFT, intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
obj->size >> PAGE_SHIFT); obj->base.size >> PAGE_SHIFT);
} }
...@@ -234,25 +234,24 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) ...@@ -234,25 +234,24 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
/* Is the current GTT allocation valid for the change in tiling? */ /* Is the current GTT allocation valid for the change in tiling? */
static bool static bool
i915_gem_object_fence_ok(struct drm_gem_object *obj, int tiling_mode) i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
{ {
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
u32 size; u32 size;
if (tiling_mode == I915_TILING_NONE) if (tiling_mode == I915_TILING_NONE)
return true; return true;
if (INTEL_INFO(obj->dev)->gen >= 4) if (INTEL_INFO(obj->base.dev)->gen >= 4)
return true; return true;
if (!obj_priv->gtt_space) if (!obj->gtt_space)
return true; return true;
if (INTEL_INFO(obj->dev)->gen == 3) { if (INTEL_INFO(obj->base.dev)->gen == 3) {
if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK) if (obj->gtt_offset & ~I915_FENCE_START_MASK)
return false; return false;
} else { } else {
if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK) if (obj->gtt_offset & ~I830_FENCE_START_MASK)
return false; return false;
} }
...@@ -260,18 +259,18 @@ i915_gem_object_fence_ok(struct drm_gem_object *obj, int tiling_mode) ...@@ -260,18 +259,18 @@ i915_gem_object_fence_ok(struct drm_gem_object *obj, int tiling_mode)
* Previous chips need to be aligned to the size of the smallest * Previous chips need to be aligned to the size of the smallest
* fence register that can contain the object. * fence register that can contain the object.
*/ */
if (INTEL_INFO(obj->dev)->gen == 3) if (INTEL_INFO(obj->base.dev)->gen == 3)
size = 1024*1024; size = 1024*1024;
else else
size = 512*1024; size = 512*1024;
while (size < obj_priv->base.size) while (size < obj->base.size)
size <<= 1; size <<= 1;
if (obj_priv->gtt_space->size != size) if (obj->gtt_space->size != size)
return false; return false;
if (obj_priv->gtt_offset & (size - 1)) if (obj->gtt_offset & (size - 1))
return false; return false;
return true; return true;
...@@ -283,30 +282,29 @@ i915_gem_object_fence_ok(struct drm_gem_object *obj, int tiling_mode) ...@@ -283,30 +282,29 @@ i915_gem_object_fence_ok(struct drm_gem_object *obj, int tiling_mode)
*/ */
int int
i915_gem_set_tiling(struct drm_device *dev, void *data, i915_gem_set_tiling(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file)
{ {
struct drm_i915_gem_set_tiling *args = data; struct drm_i915_gem_set_tiling *args = data;
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_gem_object *obj; struct drm_i915_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
int ret; int ret;
ret = i915_gem_check_is_wedged(dev); ret = i915_gem_check_is_wedged(dev);
if (ret) if (ret)
return ret; return ret;
obj = drm_gem_object_lookup(dev, file_priv, args->handle); obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) if (obj == NULL)
return -ENOENT; return -ENOENT;
obj_priv = to_intel_bo(obj);
if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) { if (!i915_tiling_ok(dev,
drm_gem_object_unreference_unlocked(obj); args->stride, obj->base.size, args->tiling_mode)) {
drm_gem_object_unreference_unlocked(&obj->base);
return -EINVAL; return -EINVAL;
} }
if (obj_priv->pin_count) { if (obj->pin_count) {
drm_gem_object_unreference_unlocked(obj); drm_gem_object_unreference_unlocked(&obj->base);
return -EBUSY; return -EBUSY;
} }
...@@ -340,8 +338,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, ...@@ -340,8 +338,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
} }
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
if (args->tiling_mode != obj_priv->tiling_mode || if (args->tiling_mode != obj->tiling_mode ||
args->stride != obj_priv->stride) { args->stride != obj->stride) {
/* We need to rebind the object if its current allocation /* We need to rebind the object if its current allocation
* no longer meets the alignment restrictions for its new * no longer meets the alignment restrictions for its new
* tiling mode. Otherwise we can just leave it alone, but * tiling mode. Otherwise we can just leave it alone, but
...@@ -349,22 +347,22 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, ...@@ -349,22 +347,22 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
*/ */
if (!i915_gem_object_fence_ok(obj, args->tiling_mode)) if (!i915_gem_object_fence_ok(obj, args->tiling_mode))
ret = i915_gem_object_unbind(obj); ret = i915_gem_object_unbind(obj);
else if (obj_priv->fence_reg != I915_FENCE_REG_NONE) else if (obj->fence_reg != I915_FENCE_REG_NONE)
ret = i915_gem_object_put_fence_reg(obj, true); ret = i915_gem_object_put_fence_reg(obj, true);
else else
i915_gem_release_mmap(obj); i915_gem_release_mmap(obj);
if (ret != 0) { if (ret != 0) {
args->tiling_mode = obj_priv->tiling_mode; args->tiling_mode = obj->tiling_mode;
args->stride = obj_priv->stride; args->stride = obj->stride;
goto err; goto err;
} }
obj_priv->tiling_mode = args->tiling_mode; obj->tiling_mode = args->tiling_mode;
obj_priv->stride = args->stride; obj->stride = args->stride;
} }
err: err:
drm_gem_object_unreference(obj); drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return ret; return ret;
...@@ -375,22 +373,20 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, ...@@ -375,22 +373,20 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
*/ */
int int
i915_gem_get_tiling(struct drm_device *dev, void *data, i915_gem_get_tiling(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file)
{ {
struct drm_i915_gem_get_tiling *args = data; struct drm_i915_gem_get_tiling *args = data;
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_gem_object *obj; struct drm_i915_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
obj = drm_gem_object_lookup(dev, file_priv, args->handle); obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) if (obj == NULL)
return -ENOENT; return -ENOENT;
obj_priv = to_intel_bo(obj);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
args->tiling_mode = obj_priv->tiling_mode; args->tiling_mode = obj->tiling_mode;
switch (obj_priv->tiling_mode) { switch (obj->tiling_mode) {
case I915_TILING_X: case I915_TILING_X:
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
break; break;
...@@ -410,7 +406,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data, ...@@ -410,7 +406,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
drm_gem_object_unreference(obj); drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return 0; return 0;
...@@ -440,46 +436,44 @@ i915_gem_swizzle_page(struct page *page) ...@@ -440,46 +436,44 @@ i915_gem_swizzle_page(struct page *page)
} }
void void
i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj) i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
{ {
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int page_count = obj->base.size >> PAGE_SHIFT;
int page_count = obj->size >> PAGE_SHIFT;
int i; int i;
if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17) if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
return; return;
if (obj_priv->bit_17 == NULL) if (obj->bit_17 == NULL)
return; return;
for (i = 0; i < page_count; i++) { for (i = 0; i < page_count; i++) {
char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17; char new_bit_17 = page_to_phys(obj->pages[i]) >> 17;
if ((new_bit_17 & 0x1) != if ((new_bit_17 & 0x1) !=
(test_bit(i, obj_priv->bit_17) != 0)) { (test_bit(i, obj->bit_17) != 0)) {
i915_gem_swizzle_page(obj_priv->pages[i]); i915_gem_swizzle_page(obj->pages[i]);
set_page_dirty(obj_priv->pages[i]); set_page_dirty(obj->pages[i]);
} }
} }
} }
void void
i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj) i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
{ {
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); int page_count = obj->base.size >> PAGE_SHIFT;
int page_count = obj->size >> PAGE_SHIFT;
int i; int i;
if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17) if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
return; return;
if (obj_priv->bit_17 == NULL) { if (obj->bit_17 == NULL) {
obj_priv->bit_17 = kmalloc(BITS_TO_LONGS(page_count) * obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
sizeof(long), GFP_KERNEL); sizeof(long), GFP_KERNEL);
if (obj_priv->bit_17 == NULL) { if (obj->bit_17 == NULL) {
DRM_ERROR("Failed to allocate memory for bit 17 " DRM_ERROR("Failed to allocate memory for bit 17 "
"record\n"); "record\n");
return; return;
...@@ -487,9 +481,9 @@ i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj) ...@@ -487,9 +481,9 @@ i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj)
} }
for (i = 0; i < page_count; i++) { for (i = 0; i < page_count; i++) {
if (page_to_phys(obj_priv->pages[i]) & (1 << 17)) if (page_to_phys(obj->pages[i]) & (1 << 17))
__set_bit(i, obj_priv->bit_17); __set_bit(i, obj->bit_17);
else else
__clear_bit(i, obj_priv->bit_17); __clear_bit(i, obj->bit_17);
} }
} }
...@@ -423,28 +423,23 @@ static void i915_error_work_func(struct work_struct *work) ...@@ -423,28 +423,23 @@ static void i915_error_work_func(struct work_struct *work)
#ifdef CONFIG_DEBUG_FS #ifdef CONFIG_DEBUG_FS
static struct drm_i915_error_object * static struct drm_i915_error_object *
i915_error_object_create(struct drm_device *dev, i915_error_object_create(struct drm_device *dev,
struct drm_gem_object *src) struct drm_i915_gem_object *src)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_error_object *dst; struct drm_i915_error_object *dst;
struct drm_i915_gem_object *src_priv;
int page, page_count; int page, page_count;
u32 reloc_offset; u32 reloc_offset;
if (src == NULL) if (src == NULL || src->pages == NULL)
return NULL; return NULL;
src_priv = to_intel_bo(src); page_count = src->base.size / PAGE_SIZE;
if (src_priv->pages == NULL)
return NULL;
page_count = src->size / PAGE_SIZE;
dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC); dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC);
if (dst == NULL) if (dst == NULL)
return NULL; return NULL;
reloc_offset = src_priv->gtt_offset; reloc_offset = src->gtt_offset;
for (page = 0; page < page_count; page++) { for (page = 0; page < page_count; page++) {
unsigned long flags; unsigned long flags;
void __iomem *s; void __iomem *s;
...@@ -466,7 +461,7 @@ i915_error_object_create(struct drm_device *dev, ...@@ -466,7 +461,7 @@ i915_error_object_create(struct drm_device *dev,
reloc_offset += PAGE_SIZE; reloc_offset += PAGE_SIZE;
} }
dst->page_count = page_count; dst->page_count = page_count;
dst->gtt_offset = src_priv->gtt_offset; dst->gtt_offset = src->gtt_offset;
return dst; return dst;
...@@ -598,9 +593,9 @@ static u32 capture_bo_list(struct drm_i915_error_buffer *err, ...@@ -598,9 +593,9 @@ static u32 capture_bo_list(struct drm_i915_error_buffer *err,
static void i915_capture_error_state(struct drm_device *dev) static void i915_capture_error_state(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv; struct drm_i915_gem_object *obj;
struct drm_i915_error_state *error; struct drm_i915_error_state *error;
struct drm_gem_object *batchbuffer[2]; struct drm_i915_gem_object *batchbuffer[2];
unsigned long flags; unsigned long flags;
u32 bbaddr; u32 bbaddr;
int count; int count;
...@@ -668,34 +663,30 @@ static void i915_capture_error_state(struct drm_device *dev) ...@@ -668,34 +663,30 @@ static void i915_capture_error_state(struct drm_device *dev)
batchbuffer[0] = NULL; batchbuffer[0] = NULL;
batchbuffer[1] = NULL; batchbuffer[1] = NULL;
count = 0; count = 0;
list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
struct drm_gem_object *obj = &obj_priv->base;
if (batchbuffer[0] == NULL && if (batchbuffer[0] == NULL &&
bbaddr >= obj_priv->gtt_offset && bbaddr >= obj->gtt_offset &&
bbaddr < obj_priv->gtt_offset + obj->size) bbaddr < obj->gtt_offset + obj->base.size)
batchbuffer[0] = obj; batchbuffer[0] = obj;
if (batchbuffer[1] == NULL && if (batchbuffer[1] == NULL &&
error->acthd >= obj_priv->gtt_offset && error->acthd >= obj->gtt_offset &&
error->acthd < obj_priv->gtt_offset + obj->size) error->acthd < obj->gtt_offset + obj->base.size)
batchbuffer[1] = obj; batchbuffer[1] = obj;
count++; count++;
} }
/* Scan the other lists for completeness for those bizarre errors. */ /* Scan the other lists for completeness for those bizarre errors. */
if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) { list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
struct drm_gem_object *obj = &obj_priv->base;
if (batchbuffer[0] == NULL && if (batchbuffer[0] == NULL &&
bbaddr >= obj_priv->gtt_offset && bbaddr >= obj->gtt_offset &&
bbaddr < obj_priv->gtt_offset + obj->size) bbaddr < obj->gtt_offset + obj->base.size)
batchbuffer[0] = obj; batchbuffer[0] = obj;
if (batchbuffer[1] == NULL && if (batchbuffer[1] == NULL &&
error->acthd >= obj_priv->gtt_offset && error->acthd >= obj->gtt_offset &&
error->acthd < obj_priv->gtt_offset + obj->size) error->acthd < obj->gtt_offset + obj->base.size)
batchbuffer[1] = obj; batchbuffer[1] = obj;
if (batchbuffer[0] && batchbuffer[1]) if (batchbuffer[0] && batchbuffer[1])
...@@ -703,17 +694,15 @@ static void i915_capture_error_state(struct drm_device *dev) ...@@ -703,17 +694,15 @@ static void i915_capture_error_state(struct drm_device *dev)
} }
} }
if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) { list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
struct drm_gem_object *obj = &obj_priv->base;
if (batchbuffer[0] == NULL && if (batchbuffer[0] == NULL &&
bbaddr >= obj_priv->gtt_offset && bbaddr >= obj->gtt_offset &&
bbaddr < obj_priv->gtt_offset + obj->size) bbaddr < obj->gtt_offset + obj->base.size)
batchbuffer[0] = obj; batchbuffer[0] = obj;
if (batchbuffer[1] == NULL && if (batchbuffer[1] == NULL &&
error->acthd >= obj_priv->gtt_offset && error->acthd >= obj->gtt_offset &&
error->acthd < obj_priv->gtt_offset + obj->size) error->acthd < obj->gtt_offset + obj->base.size)
batchbuffer[1] = obj; batchbuffer[1] = obj;
if (batchbuffer[0] && batchbuffer[1]) if (batchbuffer[0] && batchbuffer[1])
...@@ -732,14 +721,14 @@ static void i915_capture_error_state(struct drm_device *dev) ...@@ -732,14 +721,14 @@ static void i915_capture_error_state(struct drm_device *dev)
/* Record the ringbuffer */ /* Record the ringbuffer */
error->ringbuffer = i915_error_object_create(dev, error->ringbuffer = i915_error_object_create(dev,
dev_priv->render_ring.gem_object); dev_priv->render_ring.obj);
/* Record buffers on the active and pinned lists. */ /* Record buffers on the active and pinned lists. */
error->active_bo = NULL; error->active_bo = NULL;
error->pinned_bo = NULL; error->pinned_bo = NULL;
error->active_bo_count = count; error->active_bo_count = count;
list_for_each_entry(obj_priv, &dev_priv->mm.pinned_list, mm_list) list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
count++; count++;
error->pinned_bo_count = count - error->active_bo_count; error->pinned_bo_count = count - error->active_bo_count;
...@@ -948,7 +937,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) ...@@ -948,7 +937,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_i915_gem_object *obj_priv; struct drm_i915_gem_object *obj;
struct intel_unpin_work *work; struct intel_unpin_work *work;
unsigned long flags; unsigned long flags;
bool stall_detected; bool stall_detected;
...@@ -967,13 +956,13 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) ...@@ -967,13 +956,13 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
} }
/* Potential stall - if we see that the flip has happened, assume a missed interrupt */ /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
obj_priv = to_intel_bo(work->pending_flip_obj); obj = work->pending_flip_obj;
if (INTEL_INFO(dev)->gen >= 4) { if (INTEL_INFO(dev)->gen >= 4) {
int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF; int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF;
stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset; stall_detected = I915_READ(dspsurf) == obj->gtt_offset;
} else { } else {
int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR; int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR;
stall_detected = I915_READ(dspaddr) == (obj_priv->gtt_offset + stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
crtc->y * crtc->fb->pitch + crtc->y * crtc->fb->pitch +
crtc->x * crtc->fb->bits_per_pixel/8); crtc->x * crtc->fb->bits_per_pixel/8);
} }
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <linux/tracepoint.h> #include <linux/tracepoint.h>
#include <drm/drmP.h> #include <drm/drmP.h>
#include "i915_drv.h"
#undef TRACE_SYSTEM #undef TRACE_SYSTEM
#define TRACE_SYSTEM i915 #define TRACE_SYSTEM i915
...@@ -16,18 +17,18 @@ ...@@ -16,18 +17,18 @@
TRACE_EVENT(i915_gem_object_create, TRACE_EVENT(i915_gem_object_create,
TP_PROTO(struct drm_gem_object *obj), TP_PROTO(struct drm_i915_gem_object *obj),
TP_ARGS(obj), TP_ARGS(obj),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(struct drm_gem_object *, obj) __field(struct drm_i915_gem_object *, obj)
__field(u32, size) __field(u32, size)
), ),
TP_fast_assign( TP_fast_assign(
__entry->obj = obj; __entry->obj = obj;
__entry->size = obj->size; __entry->size = obj->base.size;
), ),
TP_printk("obj=%p, size=%u", __entry->obj, __entry->size) TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
...@@ -35,12 +36,12 @@ TRACE_EVENT(i915_gem_object_create, ...@@ -35,12 +36,12 @@ TRACE_EVENT(i915_gem_object_create,
TRACE_EVENT(i915_gem_object_bind, TRACE_EVENT(i915_gem_object_bind,
TP_PROTO(struct drm_gem_object *obj, u32 gtt_offset, bool mappable), TP_PROTO(struct drm_i915_gem_object *obj, u32 gtt_offset, bool mappable),
TP_ARGS(obj, gtt_offset, mappable), TP_ARGS(obj, gtt_offset, mappable),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(struct drm_gem_object *, obj) __field(struct drm_i915_gem_object *, obj)
__field(u32, gtt_offset) __field(u32, gtt_offset)
__field(bool, mappable) __field(bool, mappable)
), ),
...@@ -58,20 +59,20 @@ TRACE_EVENT(i915_gem_object_bind, ...@@ -58,20 +59,20 @@ TRACE_EVENT(i915_gem_object_bind,
TRACE_EVENT(i915_gem_object_change_domain, TRACE_EVENT(i915_gem_object_change_domain,
TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain), TP_PROTO(struct drm_i915_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain),
TP_ARGS(obj, old_read_domains, old_write_domain), TP_ARGS(obj, old_read_domains, old_write_domain),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(struct drm_gem_object *, obj) __field(struct drm_i915_gem_object *, obj)
__field(u32, read_domains) __field(u32, read_domains)
__field(u32, write_domain) __field(u32, write_domain)
), ),
TP_fast_assign( TP_fast_assign(
__entry->obj = obj; __entry->obj = obj;
__entry->read_domains = obj->read_domains | (old_read_domains << 16); __entry->read_domains = obj->base.read_domains | (old_read_domains << 16);
__entry->write_domain = obj->write_domain | (old_write_domain << 16); __entry->write_domain = obj->base.write_domain | (old_write_domain << 16);
), ),
TP_printk("obj=%p, read=%04x, write=%04x", TP_printk("obj=%p, read=%04x, write=%04x",
...@@ -81,12 +82,12 @@ TRACE_EVENT(i915_gem_object_change_domain, ...@@ -81,12 +82,12 @@ TRACE_EVENT(i915_gem_object_change_domain,
TRACE_EVENT(i915_gem_object_get_fence, TRACE_EVENT(i915_gem_object_get_fence,
TP_PROTO(struct drm_gem_object *obj, int fence, int tiling_mode), TP_PROTO(struct drm_i915_gem_object *obj, int fence, int tiling_mode),
TP_ARGS(obj, fence, tiling_mode), TP_ARGS(obj, fence, tiling_mode),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(struct drm_gem_object *, obj) __field(struct drm_i915_gem_object *, obj)
__field(int, fence) __field(int, fence)
__field(int, tiling_mode) __field(int, tiling_mode)
), ),
...@@ -103,12 +104,12 @@ TRACE_EVENT(i915_gem_object_get_fence, ...@@ -103,12 +104,12 @@ TRACE_EVENT(i915_gem_object_get_fence,
DECLARE_EVENT_CLASS(i915_gem_object, DECLARE_EVENT_CLASS(i915_gem_object,
TP_PROTO(struct drm_gem_object *obj), TP_PROTO(struct drm_i915_gem_object *obj),
TP_ARGS(obj), TP_ARGS(obj),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(struct drm_gem_object *, obj) __field(struct drm_i915_gem_object *, obj)
), ),
TP_fast_assign( TP_fast_assign(
...@@ -120,21 +121,21 @@ DECLARE_EVENT_CLASS(i915_gem_object, ...@@ -120,21 +121,21 @@ DECLARE_EVENT_CLASS(i915_gem_object,
DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush, DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
TP_PROTO(struct drm_gem_object *obj), TP_PROTO(struct drm_i915_gem_object *obj),
TP_ARGS(obj) TP_ARGS(obj)
); );
DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind, DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind,
TP_PROTO(struct drm_gem_object *obj), TP_PROTO(struct drm_i915_gem_object *obj),
TP_ARGS(obj) TP_ARGS(obj)
); );
DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy, DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
TP_PROTO(struct drm_gem_object *obj), TP_PROTO(struct drm_i915_gem_object *obj),
TP_ARGS(obj) TP_ARGS(obj)
); );
...@@ -266,13 +267,13 @@ DEFINE_EVENT(i915_ring, i915_ring_wait_end, ...@@ -266,13 +267,13 @@ DEFINE_EVENT(i915_ring, i915_ring_wait_end,
); );
TRACE_EVENT(i915_flip_request, TRACE_EVENT(i915_flip_request,
TP_PROTO(int plane, struct drm_gem_object *obj), TP_PROTO(int plane, struct drm_i915_gem_object *obj),
TP_ARGS(plane, obj), TP_ARGS(plane, obj),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(int, plane) __field(int, plane)
__field(struct drm_gem_object *, obj) __field(struct drm_i915_gem_object *, obj)
), ),
TP_fast_assign( TP_fast_assign(
...@@ -284,13 +285,13 @@ TRACE_EVENT(i915_flip_request, ...@@ -284,13 +285,13 @@ TRACE_EVENT(i915_flip_request,
); );
TRACE_EVENT(i915_flip_complete, TRACE_EVENT(i915_flip_complete,
TP_PROTO(int plane, struct drm_gem_object *obj), TP_PROTO(int plane, struct drm_i915_gem_object *obj),
TP_ARGS(plane, obj), TP_ARGS(plane, obj),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(int, plane) __field(int, plane)
__field(struct drm_gem_object *, obj) __field(struct drm_i915_gem_object *, obj)
), ),
TP_fast_assign( TP_fast_assign(
......
...@@ -1066,13 +1066,13 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) ...@@ -1066,13 +1066,13 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->fb; struct drm_framebuffer *fb = crtc->fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int plane, i; int plane, i;
u32 fbc_ctl, fbc_ctl2; u32 fbc_ctl, fbc_ctl2;
if (fb->pitch == dev_priv->cfb_pitch && if (fb->pitch == dev_priv->cfb_pitch &&
obj_priv->fence_reg == dev_priv->cfb_fence && obj->fence_reg == dev_priv->cfb_fence &&
intel_crtc->plane == dev_priv->cfb_plane && intel_crtc->plane == dev_priv->cfb_plane &&
I915_READ(FBC_CONTROL) & FBC_CTL_EN) I915_READ(FBC_CONTROL) & FBC_CTL_EN)
return; return;
...@@ -1086,7 +1086,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) ...@@ -1086,7 +1086,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
/* FBC_CTL wants 64B units */ /* FBC_CTL wants 64B units */
dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
dev_priv->cfb_fence = obj_priv->fence_reg; dev_priv->cfb_fence = obj->fence_reg;
dev_priv->cfb_plane = intel_crtc->plane; dev_priv->cfb_plane = intel_crtc->plane;
plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB; plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
...@@ -1096,7 +1096,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) ...@@ -1096,7 +1096,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
/* Set it up... */ /* Set it up... */
fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane; fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane;
if (obj_priv->tiling_mode != I915_TILING_NONE) if (obj->tiling_mode != I915_TILING_NONE)
fbc_ctl2 |= FBC_CTL_CPU_FENCE; fbc_ctl2 |= FBC_CTL_CPU_FENCE;
I915_WRITE(FBC_CONTROL2, fbc_ctl2); I915_WRITE(FBC_CONTROL2, fbc_ctl2);
I915_WRITE(FBC_FENCE_OFF, crtc->y); I915_WRITE(FBC_FENCE_OFF, crtc->y);
...@@ -1107,7 +1107,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) ...@@ -1107,7 +1107,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
if (obj_priv->tiling_mode != I915_TILING_NONE) if (obj->tiling_mode != I915_TILING_NONE)
fbc_ctl |= dev_priv->cfb_fence; fbc_ctl |= dev_priv->cfb_fence;
I915_WRITE(FBC_CONTROL, fbc_ctl); I915_WRITE(FBC_CONTROL, fbc_ctl);
...@@ -1150,7 +1150,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) ...@@ -1150,7 +1150,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->fb; struct drm_framebuffer *fb = crtc->fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
unsigned long stall_watermark = 200; unsigned long stall_watermark = 200;
...@@ -1159,7 +1159,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) ...@@ -1159,7 +1159,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
dpfc_ctl = I915_READ(DPFC_CONTROL); dpfc_ctl = I915_READ(DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) { if (dpfc_ctl & DPFC_CTL_EN) {
if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 && if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
dev_priv->cfb_fence == obj_priv->fence_reg && dev_priv->cfb_fence == obj->fence_reg &&
dev_priv->cfb_plane == intel_crtc->plane && dev_priv->cfb_plane == intel_crtc->plane &&
dev_priv->cfb_y == crtc->y) dev_priv->cfb_y == crtc->y)
return; return;
...@@ -1170,12 +1170,12 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) ...@@ -1170,12 +1170,12 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
} }
dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
dev_priv->cfb_fence = obj_priv->fence_reg; dev_priv->cfb_fence = obj->fence_reg;
dev_priv->cfb_plane = intel_crtc->plane; dev_priv->cfb_plane = intel_crtc->plane;
dev_priv->cfb_y = crtc->y; dev_priv->cfb_y = crtc->y;
dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
if (obj_priv->tiling_mode != I915_TILING_NONE) { if (obj->tiling_mode != I915_TILING_NONE) {
dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence; dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence;
I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY); I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
} else { } else {
...@@ -1221,7 +1221,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) ...@@ -1221,7 +1221,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->fb; struct drm_framebuffer *fb = crtc->fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
unsigned long stall_watermark = 200; unsigned long stall_watermark = 200;
...@@ -1230,9 +1230,9 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) ...@@ -1230,9 +1230,9 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) { if (dpfc_ctl & DPFC_CTL_EN) {
if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 && if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
dev_priv->cfb_fence == obj_priv->fence_reg && dev_priv->cfb_fence == obj->fence_reg &&
dev_priv->cfb_plane == intel_crtc->plane && dev_priv->cfb_plane == intel_crtc->plane &&
dev_priv->cfb_offset == obj_priv->gtt_offset && dev_priv->cfb_offset == obj->gtt_offset &&
dev_priv->cfb_y == crtc->y) dev_priv->cfb_y == crtc->y)
return; return;
...@@ -1242,14 +1242,14 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) ...@@ -1242,14 +1242,14 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
} }
dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
dev_priv->cfb_fence = obj_priv->fence_reg; dev_priv->cfb_fence = obj->fence_reg;
dev_priv->cfb_plane = intel_crtc->plane; dev_priv->cfb_plane = intel_crtc->plane;
dev_priv->cfb_offset = obj_priv->gtt_offset; dev_priv->cfb_offset = obj->gtt_offset;
dev_priv->cfb_y = crtc->y; dev_priv->cfb_y = crtc->y;
dpfc_ctl &= DPFC_RESERVED; dpfc_ctl &= DPFC_RESERVED;
dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
if (obj_priv->tiling_mode != I915_TILING_NONE) { if (obj->tiling_mode != I915_TILING_NONE) {
dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence); dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence);
I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY); I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
} else { } else {
...@@ -1260,7 +1260,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) ...@@ -1260,7 +1260,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
(stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
(interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
I915_WRITE(ILK_FBC_RT_BASE, obj_priv->gtt_offset | ILK_FBC_RT_VALID); I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
/* enable it... */ /* enable it... */
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
...@@ -1345,7 +1345,7 @@ static void intel_update_fbc(struct drm_device *dev) ...@@ -1345,7 +1345,7 @@ static void intel_update_fbc(struct drm_device *dev)
struct intel_crtc *intel_crtc; struct intel_crtc *intel_crtc;
struct drm_framebuffer *fb; struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb; struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj_priv; struct drm_i915_gem_object *obj;
DRM_DEBUG_KMS("\n"); DRM_DEBUG_KMS("\n");
...@@ -1384,9 +1384,9 @@ static void intel_update_fbc(struct drm_device *dev) ...@@ -1384,9 +1384,9 @@ static void intel_update_fbc(struct drm_device *dev)
intel_crtc = to_intel_crtc(crtc); intel_crtc = to_intel_crtc(crtc);
fb = crtc->fb; fb = crtc->fb;
intel_fb = to_intel_framebuffer(fb); intel_fb = to_intel_framebuffer(fb);
obj_priv = to_intel_bo(intel_fb->obj); obj = intel_fb->obj;
if (intel_fb->obj->size > dev_priv->cfb_size) { if (intel_fb->obj->base.size > dev_priv->cfb_size) {
DRM_DEBUG_KMS("framebuffer too large, disabling " DRM_DEBUG_KMS("framebuffer too large, disabling "
"compression\n"); "compression\n");
dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
...@@ -1410,7 +1410,7 @@ static void intel_update_fbc(struct drm_device *dev) ...@@ -1410,7 +1410,7 @@ static void intel_update_fbc(struct drm_device *dev)
dev_priv->no_fbc_reason = FBC_BAD_PLANE; dev_priv->no_fbc_reason = FBC_BAD_PLANE;
goto out_disable; goto out_disable;
} }
if (obj_priv->tiling_mode != I915_TILING_X) { if (obj->tiling_mode != I915_TILING_X) {
DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n"); DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n");
dev_priv->no_fbc_reason = FBC_NOT_TILED; dev_priv->no_fbc_reason = FBC_NOT_TILED;
goto out_disable; goto out_disable;
...@@ -1433,14 +1433,13 @@ static void intel_update_fbc(struct drm_device *dev) ...@@ -1433,14 +1433,13 @@ static void intel_update_fbc(struct drm_device *dev)
int int
intel_pin_and_fence_fb_obj(struct drm_device *dev, intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_gem_object *obj, struct drm_i915_gem_object *obj,
bool pipelined) bool pipelined)
{ {
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
u32 alignment; u32 alignment;
int ret; int ret;
switch (obj_priv->tiling_mode) { switch (obj->tiling_mode) {
case I915_TILING_NONE: case I915_TILING_NONE:
if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
alignment = 128 * 1024; alignment = 128 * 1024;
...@@ -1474,7 +1473,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, ...@@ -1474,7 +1473,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
* framebuffer compression. For simplicity, we always install * framebuffer compression. For simplicity, we always install
* a fence as the cost is not that onerous. * a fence as the cost is not that onerous.
*/ */
if (obj_priv->tiling_mode != I915_TILING_NONE) { if (obj->tiling_mode != I915_TILING_NONE) {
ret = i915_gem_object_get_fence_reg(obj, false); ret = i915_gem_object_get_fence_reg(obj, false);
if (ret) if (ret)
goto err_unpin; goto err_unpin;
...@@ -1496,8 +1495,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, ...@@ -1496,8 +1495,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_framebuffer *intel_fb; struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj_priv; struct drm_i915_gem_object *obj;
struct drm_gem_object *obj;
int plane = intel_crtc->plane; int plane = intel_crtc->plane;
unsigned long Start, Offset; unsigned long Start, Offset;
u32 dspcntr; u32 dspcntr;
...@@ -1514,7 +1512,6 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, ...@@ -1514,7 +1512,6 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
intel_fb = to_intel_framebuffer(fb); intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj; obj = intel_fb->obj;
obj_priv = to_intel_bo(obj);
reg = DSPCNTR(plane); reg = DSPCNTR(plane);
dspcntr = I915_READ(reg); dspcntr = I915_READ(reg);
...@@ -1539,7 +1536,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, ...@@ -1539,7 +1536,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
return -EINVAL; return -EINVAL;
} }
if (INTEL_INFO(dev)->gen >= 4) { if (INTEL_INFO(dev)->gen >= 4) {
if (obj_priv->tiling_mode != I915_TILING_NONE) if (obj->tiling_mode != I915_TILING_NONE)
dspcntr |= DISPPLANE_TILED; dspcntr |= DISPPLANE_TILED;
else else
dspcntr &= ~DISPPLANE_TILED; dspcntr &= ~DISPPLANE_TILED;
...@@ -1551,7 +1548,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, ...@@ -1551,7 +1548,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
I915_WRITE(reg, dspcntr); I915_WRITE(reg, dspcntr);
Start = obj_priv->gtt_offset; Start = obj->gtt_offset;
Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8); Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
...@@ -1605,18 +1602,17 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, ...@@ -1605,18 +1602,17 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
if (old_fb) { if (old_fb) {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_gem_object *obj = to_intel_framebuffer(old_fb)->obj; struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
wait_event(dev_priv->pending_flip_queue, wait_event(dev_priv->pending_flip_queue,
atomic_read(&obj_priv->pending_flip) == 0); atomic_read(&obj->pending_flip) == 0);
/* Big Hammer, we also need to ensure that any pending /* Big Hammer, we also need to ensure that any pending
* MI_WAIT_FOR_EVENT inside a user batch buffer on the * MI_WAIT_FOR_EVENT inside a user batch buffer on the
* current scanout is retired before unpinning the old * current scanout is retired before unpinning the old
* framebuffer. * framebuffer.
*/ */
ret = i915_gem_object_flush_gpu(obj_priv, false); ret = i915_gem_object_flush_gpu(obj, false);
if (ret) { if (ret) {
i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
...@@ -2010,16 +2006,16 @@ static void intel_clear_scanline_wait(struct drm_device *dev) ...@@ -2010,16 +2006,16 @@ static void intel_clear_scanline_wait(struct drm_device *dev)
static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
{ {
struct drm_i915_gem_object *obj_priv; struct drm_i915_gem_object *obj;
struct drm_i915_private *dev_priv; struct drm_i915_private *dev_priv;
if (crtc->fb == NULL) if (crtc->fb == NULL)
return; return;
obj_priv = to_intel_bo(to_intel_framebuffer(crtc->fb)->obj); obj = to_intel_framebuffer(crtc->fb)->obj;
dev_priv = crtc->dev->dev_private; dev_priv = crtc->dev->dev_private;
wait_event(dev_priv->pending_flip_queue, wait_event(dev_priv->pending_flip_queue,
atomic_read(&obj_priv->pending_flip) == 0); atomic_read(&obj->pending_flip) == 0);
} }
static void ironlake_crtc_enable(struct drm_crtc *crtc) static void ironlake_crtc_enable(struct drm_crtc *crtc)
...@@ -4333,15 +4329,14 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, ...@@ -4333,15 +4329,14 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
} }
static int intel_crtc_cursor_set(struct drm_crtc *crtc, static int intel_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_file *file_priv, struct drm_file *file,
uint32_t handle, uint32_t handle,
uint32_t width, uint32_t height) uint32_t width, uint32_t height)
{ {
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_gem_object *bo; struct drm_i915_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
uint32_t addr; uint32_t addr;
int ret; int ret;
...@@ -4351,7 +4346,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, ...@@ -4351,7 +4346,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
if (!handle) { if (!handle) {
DRM_DEBUG_KMS("cursor off\n"); DRM_DEBUG_KMS("cursor off\n");
addr = 0; addr = 0;
bo = NULL; obj = NULL;
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
goto finish; goto finish;
} }
...@@ -4362,13 +4357,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, ...@@ -4362,13 +4357,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
return -EINVAL; return -EINVAL;
} }
bo = drm_gem_object_lookup(dev, file_priv, handle); obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
if (!bo) if (!obj)
return -ENOENT; return -ENOENT;
obj_priv = to_intel_bo(bo); if (obj->base.size < width * height * 4) {
if (bo->size < width * height * 4) {
DRM_ERROR("buffer is to small\n"); DRM_ERROR("buffer is to small\n");
ret = -ENOMEM; ret = -ENOMEM;
goto fail; goto fail;
...@@ -4377,29 +4370,29 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, ...@@ -4377,29 +4370,29 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
/* we only need to pin inside GTT if cursor is non-phy */ /* we only need to pin inside GTT if cursor is non-phy */
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
if (!dev_priv->info->cursor_needs_physical) { if (!dev_priv->info->cursor_needs_physical) {
ret = i915_gem_object_pin(bo, PAGE_SIZE, true); ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
if (ret) { if (ret) {
DRM_ERROR("failed to pin cursor bo\n"); DRM_ERROR("failed to pin cursor bo\n");
goto fail_locked; goto fail_locked;
} }
ret = i915_gem_object_set_to_gtt_domain(bo, 0); ret = i915_gem_object_set_to_gtt_domain(obj, 0);
if (ret) { if (ret) {
DRM_ERROR("failed to move cursor bo into the GTT\n"); DRM_ERROR("failed to move cursor bo into the GTT\n");
goto fail_unpin; goto fail_unpin;
} }
addr = obj_priv->gtt_offset; addr = obj->gtt_offset;
} else { } else {
int align = IS_I830(dev) ? 16 * 1024 : 256; int align = IS_I830(dev) ? 16 * 1024 : 256;
ret = i915_gem_attach_phys_object(dev, bo, ret = i915_gem_attach_phys_object(dev, obj,
(intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1, (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
align); align);
if (ret) { if (ret) {
DRM_ERROR("failed to attach phys object\n"); DRM_ERROR("failed to attach phys object\n");
goto fail_locked; goto fail_locked;
} }
addr = obj_priv->phys_obj->handle->busaddr; addr = obj->phys_obj->handle->busaddr;
} }
if (IS_GEN2(dev)) if (IS_GEN2(dev))
...@@ -4408,17 +4401,17 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, ...@@ -4408,17 +4401,17 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
finish: finish:
if (intel_crtc->cursor_bo) { if (intel_crtc->cursor_bo) {
if (dev_priv->info->cursor_needs_physical) { if (dev_priv->info->cursor_needs_physical) {
if (intel_crtc->cursor_bo != bo) if (intel_crtc->cursor_bo != obj)
i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
} else } else
i915_gem_object_unpin(intel_crtc->cursor_bo); i915_gem_object_unpin(intel_crtc->cursor_bo);
drm_gem_object_unreference(intel_crtc->cursor_bo); drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
} }
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
intel_crtc->cursor_addr = addr; intel_crtc->cursor_addr = addr;
intel_crtc->cursor_bo = bo; intel_crtc->cursor_bo = obj;
intel_crtc->cursor_width = width; intel_crtc->cursor_width = width;
intel_crtc->cursor_height = height; intel_crtc->cursor_height = height;
...@@ -4426,11 +4419,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc, ...@@ -4426,11 +4419,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
return 0; return 0;
fail_unpin: fail_unpin:
i915_gem_object_unpin(bo); i915_gem_object_unpin(obj);
fail_locked: fail_locked:
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
fail: fail:
drm_gem_object_unreference_unlocked(bo); drm_gem_object_unreference_unlocked(&obj->base);
return ret; return ret;
} }
...@@ -4890,7 +4883,7 @@ static void intel_idle_update(struct work_struct *work) ...@@ -4890,7 +4883,7 @@ static void intel_idle_update(struct work_struct *work)
* buffer), we'll also mark the display as busy, so we know to increase its * buffer), we'll also mark the display as busy, so we know to increase its
* clock frequency. * clock frequency.
*/ */
void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj) void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_crtc *crtc = NULL; struct drm_crtc *crtc = NULL;
...@@ -4971,8 +4964,8 @@ static void intel_unpin_work_fn(struct work_struct *__work) ...@@ -4971,8 +4964,8 @@ static void intel_unpin_work_fn(struct work_struct *__work)
mutex_lock(&work->dev->struct_mutex); mutex_lock(&work->dev->struct_mutex);
i915_gem_object_unpin(work->old_fb_obj); i915_gem_object_unpin(work->old_fb_obj);
drm_gem_object_unreference(work->pending_flip_obj); drm_gem_object_unreference(&work->pending_flip_obj->base);
drm_gem_object_unreference(work->old_fb_obj); drm_gem_object_unreference(&work->old_fb_obj->base);
mutex_unlock(&work->dev->struct_mutex); mutex_unlock(&work->dev->struct_mutex);
kfree(work); kfree(work);
} }
...@@ -4983,7 +4976,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev, ...@@ -4983,7 +4976,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work; struct intel_unpin_work *work;
struct drm_i915_gem_object *obj_priv; struct drm_i915_gem_object *obj;
struct drm_pending_vblank_event *e; struct drm_pending_vblank_event *e;
struct timeval now; struct timeval now;
unsigned long flags; unsigned long flags;
...@@ -5015,10 +5008,10 @@ static void do_intel_finish_page_flip(struct drm_device *dev, ...@@ -5015,10 +5008,10 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
spin_unlock_irqrestore(&dev->event_lock, flags); spin_unlock_irqrestore(&dev->event_lock, flags);
obj_priv = to_intel_bo(work->old_fb_obj); obj = work->old_fb_obj;
atomic_clear_mask(1 << intel_crtc->plane, atomic_clear_mask(1 << intel_crtc->plane,
&obj_priv->pending_flip.counter); &obj->pending_flip.counter);
if (atomic_read(&obj_priv->pending_flip) == 0) if (atomic_read(&obj->pending_flip) == 0)
wake_up(&dev_priv->pending_flip_queue); wake_up(&dev_priv->pending_flip_queue);
schedule_work(&work->work); schedule_work(&work->work);
...@@ -5065,8 +5058,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, ...@@ -5065,8 +5058,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_framebuffer *intel_fb; struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj_priv; struct drm_i915_gem_object *obj;
struct drm_gem_object *obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work; struct intel_unpin_work *work;
unsigned long flags, offset; unsigned long flags, offset;
...@@ -5105,8 +5097,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, ...@@ -5105,8 +5097,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
goto cleanup_work; goto cleanup_work;
/* Reference the objects for the scheduled work. */ /* Reference the objects for the scheduled work. */
drm_gem_object_reference(work->old_fb_obj); drm_gem_object_reference(&work->old_fb_obj->base);
drm_gem_object_reference(obj); drm_gem_object_reference(&obj->base);
crtc->fb = fb; crtc->fb = fb;
...@@ -5134,7 +5126,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, ...@@ -5134,7 +5126,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
} }
work->pending_flip_obj = obj; work->pending_flip_obj = obj;
obj_priv = to_intel_bo(obj);
work->enable_stall_check = true; work->enable_stall_check = true;
...@@ -5148,15 +5139,14 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, ...@@ -5148,15 +5139,14 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
/* Block clients from rendering to the new back buffer until /* Block clients from rendering to the new back buffer until
* the flip occurs and the object is no longer visible. * the flip occurs and the object is no longer visible.
*/ */
atomic_add(1 << intel_crtc->plane, atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
&to_intel_bo(work->old_fb_obj)->pending_flip);
switch (INTEL_INFO(dev)->gen) { switch (INTEL_INFO(dev)->gen) {
case 2: case 2:
OUT_RING(MI_DISPLAY_FLIP | OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch); OUT_RING(fb->pitch);
OUT_RING(obj_priv->gtt_offset + offset); OUT_RING(obj->gtt_offset + offset);
OUT_RING(MI_NOOP); OUT_RING(MI_NOOP);
break; break;
...@@ -5164,7 +5154,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, ...@@ -5164,7 +5154,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
OUT_RING(MI_DISPLAY_FLIP_I915 | OUT_RING(MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch); OUT_RING(fb->pitch);
OUT_RING(obj_priv->gtt_offset + offset); OUT_RING(obj->gtt_offset + offset);
OUT_RING(MI_NOOP); OUT_RING(MI_NOOP);
break; break;
...@@ -5177,7 +5167,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, ...@@ -5177,7 +5167,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
OUT_RING(MI_DISPLAY_FLIP | OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch); OUT_RING(fb->pitch);
OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); OUT_RING(obj->gtt_offset | obj->tiling_mode);
/* XXX Enabling the panel-fitter across page-flip is so far /* XXX Enabling the panel-fitter across page-flip is so far
* untested on non-native modes, so ignore it for now. * untested on non-native modes, so ignore it for now.
...@@ -5191,8 +5181,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, ...@@ -5191,8 +5181,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
case 6: case 6:
OUT_RING(MI_DISPLAY_FLIP | OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch | obj_priv->tiling_mode); OUT_RING(fb->pitch | obj->tiling_mode);
OUT_RING(obj_priv->gtt_offset); OUT_RING(obj->gtt_offset);
pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff; pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff;
...@@ -5208,8 +5198,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, ...@@ -5208,8 +5198,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
return 0; return 0;
cleanup_objs: cleanup_objs:
drm_gem_object_unreference(work->old_fb_obj); drm_gem_object_unreference(&work->old_fb_obj->base);
drm_gem_object_unreference(obj); drm_gem_object_unreference(&obj->base);
cleanup_work: cleanup_work:
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
...@@ -5295,7 +5285,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) ...@@ -5295,7 +5285,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
} }
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
...@@ -5440,19 +5430,19 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) ...@@ -5440,19 +5430,19 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
drm_framebuffer_cleanup(fb); drm_framebuffer_cleanup(fb);
drm_gem_object_unreference_unlocked(intel_fb->obj); drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
kfree(intel_fb); kfree(intel_fb);
} }
static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
struct drm_file *file_priv, struct drm_file *file,
unsigned int *handle) unsigned int *handle)
{ {
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
struct drm_gem_object *object = intel_fb->obj; struct drm_i915_gem_object *obj = intel_fb->obj;
return drm_gem_handle_create(file_priv, object, handle); return drm_gem_handle_create(file, &obj->base, handle);
} }
static const struct drm_framebuffer_funcs intel_fb_funcs = { static const struct drm_framebuffer_funcs intel_fb_funcs = {
...@@ -5463,12 +5453,11 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = { ...@@ -5463,12 +5453,11 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
int intel_framebuffer_init(struct drm_device *dev, int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *intel_fb, struct intel_framebuffer *intel_fb,
struct drm_mode_fb_cmd *mode_cmd, struct drm_mode_fb_cmd *mode_cmd,
struct drm_gem_object *obj) struct drm_i915_gem_object *obj)
{ {
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret; int ret;
if (obj_priv->tiling_mode == I915_TILING_Y) if (obj->tiling_mode == I915_TILING_Y)
return -EINVAL; return -EINVAL;
if (mode_cmd->pitch & 63) if (mode_cmd->pitch & 63)
...@@ -5500,11 +5489,11 @@ intel_user_framebuffer_create(struct drm_device *dev, ...@@ -5500,11 +5489,11 @@ intel_user_framebuffer_create(struct drm_device *dev,
struct drm_file *filp, struct drm_file *filp,
struct drm_mode_fb_cmd *mode_cmd) struct drm_mode_fb_cmd *mode_cmd)
{ {
struct drm_gem_object *obj; struct drm_i915_gem_object *obj;
struct intel_framebuffer *intel_fb; struct intel_framebuffer *intel_fb;
int ret; int ret;
obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle); obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle));
if (!obj) if (!obj)
return ERR_PTR(-ENOENT); return ERR_PTR(-ENOENT);
...@@ -5512,10 +5501,9 @@ intel_user_framebuffer_create(struct drm_device *dev, ...@@ -5512,10 +5501,9 @@ intel_user_framebuffer_create(struct drm_device *dev,
if (!intel_fb) if (!intel_fb)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
ret = intel_framebuffer_init(dev, intel_fb, ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
mode_cmd, obj);
if (ret) { if (ret) {
drm_gem_object_unreference_unlocked(obj); drm_gem_object_unreference_unlocked(&obj->base);
kfree(intel_fb); kfree(intel_fb);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
...@@ -5528,10 +5516,10 @@ static const struct drm_mode_config_funcs intel_mode_funcs = { ...@@ -5528,10 +5516,10 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
.output_poll_changed = intel_fb_output_poll_changed, .output_poll_changed = intel_fb_output_poll_changed,
}; };
static struct drm_gem_object * static struct drm_i915_gem_object *
intel_alloc_context_page(struct drm_device *dev) intel_alloc_context_page(struct drm_device *dev)
{ {
struct drm_gem_object *ctx; struct drm_i915_gem_object *ctx;
int ret; int ret;
ctx = i915_gem_alloc_object(dev, 4096); ctx = i915_gem_alloc_object(dev, 4096);
...@@ -5559,7 +5547,7 @@ intel_alloc_context_page(struct drm_device *dev) ...@@ -5559,7 +5547,7 @@ intel_alloc_context_page(struct drm_device *dev)
err_unpin: err_unpin:
i915_gem_object_unpin(ctx); i915_gem_object_unpin(ctx);
err_unref: err_unref:
drm_gem_object_unreference(ctx); drm_gem_object_unreference(&ctx->base);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
return NULL; return NULL;
} }
...@@ -5886,20 +5874,17 @@ void intel_init_clock_gating(struct drm_device *dev) ...@@ -5886,20 +5874,17 @@ void intel_init_clock_gating(struct drm_device *dev)
if (dev_priv->renderctx == NULL) if (dev_priv->renderctx == NULL)
dev_priv->renderctx = intel_alloc_context_page(dev); dev_priv->renderctx = intel_alloc_context_page(dev);
if (dev_priv->renderctx) { if (dev_priv->renderctx) {
struct drm_i915_gem_object *obj_priv; struct drm_i915_gem_object *obj = dev_priv->renderctx;
obj_priv = to_intel_bo(dev_priv->renderctx); if (BEGIN_LP_RING(4) == 0) {
if (obj_priv) { OUT_RING(MI_SET_CONTEXT);
if (BEGIN_LP_RING(4) == 0) { OUT_RING(obj->gtt_offset |
OUT_RING(MI_SET_CONTEXT); MI_MM_SPACE_GTT |
OUT_RING(obj_priv->gtt_offset | MI_SAVE_EXT_STATE_EN |
MI_MM_SPACE_GTT | MI_RESTORE_EXT_STATE_EN |
MI_SAVE_EXT_STATE_EN | MI_RESTORE_INHIBIT);
MI_RESTORE_EXT_STATE_EN | OUT_RING(MI_NOOP);
MI_RESTORE_INHIBIT); OUT_RING(MI_FLUSH);
OUT_RING(MI_NOOP); ADVANCE_LP_RING();
OUT_RING(MI_FLUSH);
ADVANCE_LP_RING();
}
} }
} else } else
DRM_DEBUG_KMS("Failed to allocate render context." DRM_DEBUG_KMS("Failed to allocate render context."
...@@ -5907,22 +5892,11 @@ void intel_init_clock_gating(struct drm_device *dev) ...@@ -5907,22 +5892,11 @@ void intel_init_clock_gating(struct drm_device *dev)
} }
if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) { if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) {
struct drm_i915_gem_object *obj_priv = NULL; if (dev_priv->pwrctx == NULL)
dev_priv->pwrctx = intel_alloc_context_page(dev);
if (dev_priv->pwrctx) { if (dev_priv->pwrctx) {
obj_priv = to_intel_bo(dev_priv->pwrctx); struct drm_i915_gem_object *obj = dev_priv->pwrctx;
} else { I915_WRITE(PWRCTXA, obj->gtt_offset | PWRCTX_EN);
struct drm_gem_object *pwrctx;
pwrctx = intel_alloc_context_page(dev);
if (pwrctx) {
dev_priv->pwrctx = pwrctx;
obj_priv = to_intel_bo(pwrctx);
}
}
if (obj_priv) {
I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN);
I915_WRITE(MCHBAR_RENDER_STANDBY, I915_WRITE(MCHBAR_RENDER_STANDBY,
I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT); I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT);
} }
...@@ -6197,23 +6171,25 @@ void intel_modeset_cleanup(struct drm_device *dev) ...@@ -6197,23 +6171,25 @@ void intel_modeset_cleanup(struct drm_device *dev)
dev_priv->display.disable_fbc(dev); dev_priv->display.disable_fbc(dev);
if (dev_priv->renderctx) { if (dev_priv->renderctx) {
struct drm_i915_gem_object *obj_priv; struct drm_i915_gem_object *obj = dev_priv->renderctx;
I915_WRITE(CCID, obj->gtt_offset &~ CCID_EN);
POSTING_READ(CCID);
obj_priv = to_intel_bo(dev_priv->renderctx); i915_gem_object_unpin(obj);
I915_WRITE(CCID, obj_priv->gtt_offset &~ CCID_EN); drm_gem_object_unreference(&obj->base);
I915_READ(CCID); dev_priv->renderctx = NULL;
i915_gem_object_unpin(dev_priv->renderctx);
drm_gem_object_unreference(dev_priv->renderctx);
} }
if (dev_priv->pwrctx) { if (dev_priv->pwrctx) {
struct drm_i915_gem_object *obj_priv; struct drm_i915_gem_object *obj = dev_priv->pwrctx;
I915_WRITE(PWRCTXA, obj->gtt_offset &~ PWRCTX_EN);
POSTING_READ(PWRCTXA);
obj_priv = to_intel_bo(dev_priv->pwrctx); i915_gem_object_unpin(obj);
I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN); drm_gem_object_unreference(&obj->base);
I915_READ(PWRCTXA); dev_priv->pwrctx = NULL;
i915_gem_object_unpin(dev_priv->pwrctx);
drm_gem_object_unreference(dev_priv->pwrctx);
} }
if (IS_IRONLAKE_M(dev)) if (IS_IRONLAKE_M(dev))
......
...@@ -127,7 +127,7 @@ intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode) ...@@ -127,7 +127,7 @@ intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode)
struct intel_framebuffer { struct intel_framebuffer {
struct drm_framebuffer base; struct drm_framebuffer base;
struct drm_gem_object *obj; struct drm_i915_gem_object *obj;
}; };
struct intel_fbdev { struct intel_fbdev {
...@@ -166,7 +166,7 @@ struct intel_crtc { ...@@ -166,7 +166,7 @@ struct intel_crtc {
struct intel_unpin_work *unpin_work; struct intel_unpin_work *unpin_work;
int fdi_lanes; int fdi_lanes;
struct drm_gem_object *cursor_bo; struct drm_i915_gem_object *cursor_bo;
uint32_t cursor_addr; uint32_t cursor_addr;
int16_t cursor_x, cursor_y; int16_t cursor_x, cursor_y;
int16_t cursor_width, cursor_height; int16_t cursor_width, cursor_height;
...@@ -220,8 +220,8 @@ intel_get_crtc_for_pipe(struct drm_device *dev, int pipe) ...@@ -220,8 +220,8 @@ intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
struct intel_unpin_work { struct intel_unpin_work {
struct work_struct work; struct work_struct work;
struct drm_device *dev; struct drm_device *dev;
struct drm_gem_object *old_fb_obj; struct drm_i915_gem_object *old_fb_obj;
struct drm_gem_object *pending_flip_obj; struct drm_i915_gem_object *pending_flip_obj;
struct drm_pending_vblank_event *event; struct drm_pending_vblank_event *event;
int pending; int pending;
bool enable_stall_check; bool enable_stall_check;
...@@ -236,7 +236,8 @@ void intel_dip_infoframe_csum(struct dip_infoframe *avi_if); ...@@ -236,7 +236,8 @@ void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
extern bool intel_sdvo_init(struct drm_device *dev, int output_device); extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
extern void intel_dvo_init(struct drm_device *dev); extern void intel_dvo_init(struct drm_device *dev);
extern void intel_tv_init(struct drm_device *dev); extern void intel_tv_init(struct drm_device *dev);
extern void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj); extern void intel_mark_busy(struct drm_device *dev,
struct drm_i915_gem_object *obj);
extern void intel_lvds_init(struct drm_device *dev); extern void intel_lvds_init(struct drm_device *dev);
extern void intel_dp_init(struct drm_device *dev, int dp_reg); extern void intel_dp_init(struct drm_device *dev, int dp_reg);
void void
...@@ -299,13 +300,13 @@ extern void ironlake_disable_drps(struct drm_device *dev); ...@@ -299,13 +300,13 @@ extern void ironlake_disable_drps(struct drm_device *dev);
extern void intel_init_emon(struct drm_device *dev); extern void intel_init_emon(struct drm_device *dev);
extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_gem_object *obj, struct drm_i915_gem_object *obj,
bool pipelined); bool pipelined);
extern int intel_framebuffer_init(struct drm_device *dev, extern int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *ifb, struct intel_framebuffer *ifb,
struct drm_mode_fb_cmd *mode_cmd, struct drm_mode_fb_cmd *mode_cmd,
struct drm_gem_object *obj); struct drm_i915_gem_object *obj);
extern int intel_fbdev_init(struct drm_device *dev); extern int intel_fbdev_init(struct drm_device *dev);
extern void intel_fbdev_fini(struct drm_device *dev); extern void intel_fbdev_fini(struct drm_device *dev);
......
...@@ -65,8 +65,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev, ...@@ -65,8 +65,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
struct fb_info *info; struct fb_info *info;
struct drm_framebuffer *fb; struct drm_framebuffer *fb;
struct drm_mode_fb_cmd mode_cmd; struct drm_mode_fb_cmd mode_cmd;
struct drm_gem_object *fbo = NULL; struct drm_i915_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
struct device *device = &dev->pdev->dev; struct device *device = &dev->pdev->dev;
int size, ret, mmio_bar = IS_GEN2(dev) ? 1 : 0; int size, ret, mmio_bar = IS_GEN2(dev) ? 1 : 0;
...@@ -83,18 +82,17 @@ static int intelfb_create(struct intel_fbdev *ifbdev, ...@@ -83,18 +82,17 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
size = mode_cmd.pitch * mode_cmd.height; size = mode_cmd.pitch * mode_cmd.height;
size = ALIGN(size, PAGE_SIZE); size = ALIGN(size, PAGE_SIZE);
fbo = i915_gem_alloc_object(dev, size); obj = i915_gem_alloc_object(dev, size);
if (!fbo) { if (!obj) {
DRM_ERROR("failed to allocate framebuffer\n"); DRM_ERROR("failed to allocate framebuffer\n");
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
obj_priv = to_intel_bo(fbo);
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
/* Flush everything out, we'll be doing GTT only from now on */ /* Flush everything out, we'll be doing GTT only from now on */
ret = intel_pin_and_fence_fb_obj(dev, fbo, false); ret = intel_pin_and_fence_fb_obj(dev, obj, false);
if (ret) { if (ret) {
DRM_ERROR("failed to pin fb: %d\n", ret); DRM_ERROR("failed to pin fb: %d\n", ret);
goto out_unref; goto out_unref;
...@@ -108,7 +106,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev, ...@@ -108,7 +106,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
info->par = ifbdev; info->par = ifbdev;
ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, fbo); ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
if (ret) if (ret)
goto out_unpin; goto out_unpin;
...@@ -134,11 +132,10 @@ static int intelfb_create(struct intel_fbdev *ifbdev, ...@@ -134,11 +132,10 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
else else
info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0); info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset; info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
info->fix.smem_len = size; info->fix.smem_len = size;
info->screen_base = ioremap_wc(dev->agp->base + obj_priv->gtt_offset, info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size);
size);
if (!info->screen_base) { if (!info->screen_base) {
ret = -ENOSPC; ret = -ENOSPC;
goto out_unpin; goto out_unpin;
...@@ -168,7 +165,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev, ...@@ -168,7 +165,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n", DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
fb->width, fb->height, fb->width, fb->height,
obj_priv->gtt_offset, fbo); obj->gtt_offset, obj);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
...@@ -176,9 +173,9 @@ static int intelfb_create(struct intel_fbdev *ifbdev, ...@@ -176,9 +173,9 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
return 0; return 0;
out_unpin: out_unpin:
i915_gem_object_unpin(fbo); i915_gem_object_unpin(obj);
out_unref: out_unref:
drm_gem_object_unreference(fbo); drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
out: out:
return ret; return ret;
...@@ -225,7 +222,7 @@ static void intel_fbdev_destroy(struct drm_device *dev, ...@@ -225,7 +222,7 @@ static void intel_fbdev_destroy(struct drm_device *dev,
drm_framebuffer_cleanup(&ifb->base); drm_framebuffer_cleanup(&ifb->base);
if (ifb->obj) { if (ifb->obj) {
drm_gem_object_unreference_unlocked(ifb->obj); drm_gem_object_unreference_unlocked(&ifb->obj->base);
ifb->obj = NULL; ifb->obj = NULL;
} }
} }
......
...@@ -376,24 +376,23 @@ static int intel_overlay_continue(struct intel_overlay *overlay, ...@@ -376,24 +376,23 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay) static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
{ {
struct drm_gem_object *obj = &overlay->old_vid_bo->base; struct drm_i915_gem_object *obj = overlay->old_vid_bo;
i915_gem_object_unpin(obj); i915_gem_object_unpin(obj);
drm_gem_object_unreference(obj); drm_gem_object_unreference(&obj->base);
overlay->old_vid_bo = NULL; overlay->old_vid_bo = NULL;
} }
static void intel_overlay_off_tail(struct intel_overlay *overlay) static void intel_overlay_off_tail(struct intel_overlay *overlay)
{ {
struct drm_gem_object *obj; struct drm_i915_gem_object *obj = overlay->vid_bo;
/* never have the overlay hw on without showing a frame */ /* never have the overlay hw on without showing a frame */
BUG_ON(!overlay->vid_bo); BUG_ON(!overlay->vid_bo);
obj = &overlay->vid_bo->base;
i915_gem_object_unpin(obj); i915_gem_object_unpin(obj);
drm_gem_object_unreference(obj); drm_gem_object_unreference(&obj->base);
overlay->vid_bo = NULL; overlay->vid_bo = NULL;
overlay->crtc->overlay = NULL; overlay->crtc->overlay = NULL;
...@@ -764,13 +763,12 @@ static u32 overlay_cmd_reg(struct put_image_params *params) ...@@ -764,13 +763,12 @@ static u32 overlay_cmd_reg(struct put_image_params *params)
} }
static int intel_overlay_do_put_image(struct intel_overlay *overlay, static int intel_overlay_do_put_image(struct intel_overlay *overlay,
struct drm_gem_object *new_bo, struct drm_i915_gem_object *new_bo,
struct put_image_params *params) struct put_image_params *params)
{ {
int ret, tmp_width; int ret, tmp_width;
struct overlay_registers *regs; struct overlay_registers *regs;
bool scale_changed = false; bool scale_changed = false;
struct drm_i915_gem_object *bo_priv = to_intel_bo(new_bo);
struct drm_device *dev = overlay->dev; struct drm_device *dev = overlay->dev;
BUG_ON(!mutex_is_locked(&dev->struct_mutex)); BUG_ON(!mutex_is_locked(&dev->struct_mutex));
...@@ -825,7 +823,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, ...@@ -825,7 +823,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
regs->SWIDTHSW = calc_swidthsw(overlay->dev, regs->SWIDTHSW = calc_swidthsw(overlay->dev,
params->offset_Y, tmp_width); params->offset_Y, tmp_width);
regs->SHEIGHT = params->src_h; regs->SHEIGHT = params->src_h;
regs->OBUF_0Y = bo_priv->gtt_offset + params-> offset_Y; regs->OBUF_0Y = new_bo->gtt_offset + params-> offset_Y;
regs->OSTRIDE = params->stride_Y; regs->OSTRIDE = params->stride_Y;
if (params->format & I915_OVERLAY_YUV_PLANAR) { if (params->format & I915_OVERLAY_YUV_PLANAR) {
...@@ -839,8 +837,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, ...@@ -839,8 +837,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
params->src_w/uv_hscale); params->src_w/uv_hscale);
regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16; regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16;
regs->SHEIGHT |= (params->src_h/uv_vscale) << 16; regs->SHEIGHT |= (params->src_h/uv_vscale) << 16;
regs->OBUF_0U = bo_priv->gtt_offset + params->offset_U; regs->OBUF_0U = new_bo->gtt_offset + params->offset_U;
regs->OBUF_0V = bo_priv->gtt_offset + params->offset_V; regs->OBUF_0V = new_bo->gtt_offset + params->offset_V;
regs->OSTRIDE |= params->stride_UV << 16; regs->OSTRIDE |= params->stride_UV << 16;
} }
...@@ -857,7 +855,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, ...@@ -857,7 +855,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
goto out_unpin; goto out_unpin;
overlay->old_vid_bo = overlay->vid_bo; overlay->old_vid_bo = overlay->vid_bo;
overlay->vid_bo = to_intel_bo(new_bo); overlay->vid_bo = new_bo;
return 0; return 0;
...@@ -970,7 +968,7 @@ static int check_overlay_scaling(struct put_image_params *rec) ...@@ -970,7 +968,7 @@ static int check_overlay_scaling(struct put_image_params *rec)
static int check_overlay_src(struct drm_device *dev, static int check_overlay_src(struct drm_device *dev,
struct drm_intel_overlay_put_image *rec, struct drm_intel_overlay_put_image *rec,
struct drm_gem_object *new_bo) struct drm_i915_gem_object *new_bo)
{ {
int uv_hscale = uv_hsubsampling(rec->flags); int uv_hscale = uv_hsubsampling(rec->flags);
int uv_vscale = uv_vsubsampling(rec->flags); int uv_vscale = uv_vsubsampling(rec->flags);
...@@ -1055,7 +1053,7 @@ static int check_overlay_src(struct drm_device *dev, ...@@ -1055,7 +1053,7 @@ static int check_overlay_src(struct drm_device *dev,
return -EINVAL; return -EINVAL;
tmp = rec->stride_Y*rec->src_height; tmp = rec->stride_Y*rec->src_height;
if (rec->offset_Y + tmp > new_bo->size) if (rec->offset_Y + tmp > new_bo->base.size)
return -EINVAL; return -EINVAL;
break; break;
...@@ -1066,12 +1064,12 @@ static int check_overlay_src(struct drm_device *dev, ...@@ -1066,12 +1064,12 @@ static int check_overlay_src(struct drm_device *dev,
return -EINVAL; return -EINVAL;
tmp = rec->stride_Y * rec->src_height; tmp = rec->stride_Y * rec->src_height;
if (rec->offset_Y + tmp > new_bo->size) if (rec->offset_Y + tmp > new_bo->base.size)
return -EINVAL; return -EINVAL;
tmp = rec->stride_UV * (rec->src_height / uv_vscale); tmp = rec->stride_UV * (rec->src_height / uv_vscale);
if (rec->offset_U + tmp > new_bo->size || if (rec->offset_U + tmp > new_bo->base.size ||
rec->offset_V + tmp > new_bo->size) rec->offset_V + tmp > new_bo->base.size)
return -EINVAL; return -EINVAL;
break; break;
} }
...@@ -1114,7 +1112,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, ...@@ -1114,7 +1112,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
struct intel_overlay *overlay; struct intel_overlay *overlay;
struct drm_mode_object *drmmode_obj; struct drm_mode_object *drmmode_obj;
struct intel_crtc *crtc; struct intel_crtc *crtc;
struct drm_gem_object *new_bo; struct drm_i915_gem_object *new_bo;
struct put_image_params *params; struct put_image_params *params;
int ret; int ret;
...@@ -1153,8 +1151,8 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, ...@@ -1153,8 +1151,8 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
} }
crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
new_bo = drm_gem_object_lookup(dev, file_priv, new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv,
put_image_rec->bo_handle); put_image_rec->bo_handle));
if (!new_bo) { if (!new_bo) {
ret = -ENOENT; ret = -ENOENT;
goto out_free; goto out_free;
...@@ -1245,7 +1243,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data, ...@@ -1245,7 +1243,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
out_unlock: out_unlock:
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev->mode_config.mutex); mutex_unlock(&dev->mode_config.mutex);
drm_gem_object_unreference_unlocked(new_bo); drm_gem_object_unreference_unlocked(&new_bo->base);
out_free: out_free:
kfree(params); kfree(params);
...@@ -1398,7 +1396,7 @@ void intel_setup_overlay(struct drm_device *dev) ...@@ -1398,7 +1396,7 @@ void intel_setup_overlay(struct drm_device *dev)
{ {
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_overlay *overlay; struct intel_overlay *overlay;
struct drm_gem_object *reg_bo; struct drm_i915_gem_object *reg_bo;
struct overlay_registers *regs; struct overlay_registers *regs;
int ret; int ret;
...@@ -1413,7 +1411,7 @@ void intel_setup_overlay(struct drm_device *dev) ...@@ -1413,7 +1411,7 @@ void intel_setup_overlay(struct drm_device *dev)
reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE); reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
if (!reg_bo) if (!reg_bo)
goto out_free; goto out_free;
overlay->reg_bo = to_intel_bo(reg_bo); overlay->reg_bo = reg_bo;
if (OVERLAY_NEEDS_PHYSICAL(dev)) { if (OVERLAY_NEEDS_PHYSICAL(dev)) {
ret = i915_gem_attach_phys_object(dev, reg_bo, ret = i915_gem_attach_phys_object(dev, reg_bo,
...@@ -1423,14 +1421,14 @@ void intel_setup_overlay(struct drm_device *dev) ...@@ -1423,14 +1421,14 @@ void intel_setup_overlay(struct drm_device *dev)
DRM_ERROR("failed to attach phys overlay regs\n"); DRM_ERROR("failed to attach phys overlay regs\n");
goto out_free_bo; goto out_free_bo;
} }
overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr; overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
} else { } else {
ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true); ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true);
if (ret) { if (ret) {
DRM_ERROR("failed to pin overlay register bo\n"); DRM_ERROR("failed to pin overlay register bo\n");
goto out_free_bo; goto out_free_bo;
} }
overlay->flip_addr = overlay->reg_bo->gtt_offset; overlay->flip_addr = reg_bo->gtt_offset;
ret = i915_gem_object_set_to_gtt_domain(reg_bo, true); ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
if (ret) { if (ret) {
...@@ -1462,7 +1460,7 @@ void intel_setup_overlay(struct drm_device *dev) ...@@ -1462,7 +1460,7 @@ void intel_setup_overlay(struct drm_device *dev)
out_unpin_bo: out_unpin_bo:
i915_gem_object_unpin(reg_bo); i915_gem_object_unpin(reg_bo);
out_free_bo: out_free_bo:
drm_gem_object_unreference(reg_bo); drm_gem_object_unreference(&reg_bo->base);
out_free: out_free:
kfree(overlay); kfree(overlay);
return; return;
......
...@@ -139,7 +139,7 @@ u32 intel_ring_get_active_head(struct intel_ring_buffer *ring) ...@@ -139,7 +139,7 @@ u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
static int init_ring_common(struct intel_ring_buffer *ring) static int init_ring_common(struct intel_ring_buffer *ring)
{ {
drm_i915_private_t *dev_priv = ring->dev->dev_private; drm_i915_private_t *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(ring->gem_object); struct drm_i915_gem_object *obj = ring->obj;
u32 head; u32 head;
/* Stop the ring if it's running. */ /* Stop the ring if it's running. */
...@@ -148,7 +148,7 @@ static int init_ring_common(struct intel_ring_buffer *ring) ...@@ -148,7 +148,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
ring->write_tail(ring, 0); ring->write_tail(ring, 0);
/* Initialize the ring. */ /* Initialize the ring. */
I915_WRITE_START(ring, obj_priv->gtt_offset); I915_WRITE_START(ring, obj->gtt_offset);
head = I915_READ_HEAD(ring) & HEAD_ADDR; head = I915_READ_HEAD(ring) & HEAD_ADDR;
/* G45 ring initialization fails to reset head to zero */ /* G45 ring initialization fails to reset head to zero */
...@@ -178,7 +178,7 @@ static int init_ring_common(struct intel_ring_buffer *ring) ...@@ -178,7 +178,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
/* If the head is still not zero, the ring is dead */ /* If the head is still not zero, the ring is dead */
if ((I915_READ_CTL(ring) & RING_VALID) == 0 || if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
I915_READ_START(ring) != obj_priv->gtt_offset || I915_READ_START(ring) != obj->gtt_offset ||
(I915_READ_HEAD(ring) & HEAD_ADDR) != 0) { (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
DRM_ERROR("%s initialization failed " DRM_ERROR("%s initialization failed "
"ctl %08x head %08x tail %08x start %08x\n", "ctl %08x head %08x tail %08x start %08x\n",
...@@ -514,17 +514,15 @@ render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, ...@@ -514,17 +514,15 @@ render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
static void cleanup_status_page(struct intel_ring_buffer *ring) static void cleanup_status_page(struct intel_ring_buffer *ring)
{ {
drm_i915_private_t *dev_priv = ring->dev->dev_private; drm_i915_private_t *dev_priv = ring->dev->dev_private;
struct drm_gem_object *obj; struct drm_i915_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
obj = ring->status_page.obj; obj = ring->status_page.obj;
if (obj == NULL) if (obj == NULL)
return; return;
obj_priv = to_intel_bo(obj);
kunmap(obj_priv->pages[0]); kunmap(obj->pages[0]);
i915_gem_object_unpin(obj); i915_gem_object_unpin(obj);
drm_gem_object_unreference(obj); drm_gem_object_unreference(&obj->base);
ring->status_page.obj = NULL; ring->status_page.obj = NULL;
memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
...@@ -534,8 +532,7 @@ static int init_status_page(struct intel_ring_buffer *ring) ...@@ -534,8 +532,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
{ {
struct drm_device *dev = ring->dev; struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private; drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_gem_object *obj; struct drm_i915_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
int ret; int ret;
obj = i915_gem_alloc_object(dev, 4096); obj = i915_gem_alloc_object(dev, 4096);
...@@ -544,16 +541,15 @@ static int init_status_page(struct intel_ring_buffer *ring) ...@@ -544,16 +541,15 @@ static int init_status_page(struct intel_ring_buffer *ring)
ret = -ENOMEM; ret = -ENOMEM;
goto err; goto err;
} }
obj_priv = to_intel_bo(obj); obj->agp_type = AGP_USER_CACHED_MEMORY;
obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
ret = i915_gem_object_pin(obj, 4096, true); ret = i915_gem_object_pin(obj, 4096, true);
if (ret != 0) { if (ret != 0) {
goto err_unref; goto err_unref;
} }
ring->status_page.gfx_addr = obj_priv->gtt_offset; ring->status_page.gfx_addr = obj->gtt_offset;
ring->status_page.page_addr = kmap(obj_priv->pages[0]); ring->status_page.page_addr = kmap(obj->pages[0]);
if (ring->status_page.page_addr == NULL) { if (ring->status_page.page_addr == NULL) {
memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
goto err_unpin; goto err_unpin;
...@@ -570,7 +566,7 @@ static int init_status_page(struct intel_ring_buffer *ring) ...@@ -570,7 +566,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
err_unpin: err_unpin:
i915_gem_object_unpin(obj); i915_gem_object_unpin(obj);
err_unref: err_unref:
drm_gem_object_unreference(obj); drm_gem_object_unreference(&obj->base);
err: err:
return ret; return ret;
} }
...@@ -578,8 +574,7 @@ static int init_status_page(struct intel_ring_buffer *ring) ...@@ -578,8 +574,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
int intel_init_ring_buffer(struct drm_device *dev, int intel_init_ring_buffer(struct drm_device *dev,
struct intel_ring_buffer *ring) struct intel_ring_buffer *ring)
{ {
struct drm_i915_gem_object *obj_priv; struct drm_i915_gem_object *obj;
struct drm_gem_object *obj;
int ret; int ret;
ring->dev = dev; ring->dev = dev;
...@@ -600,15 +595,14 @@ int intel_init_ring_buffer(struct drm_device *dev, ...@@ -600,15 +595,14 @@ int intel_init_ring_buffer(struct drm_device *dev,
goto err_hws; goto err_hws;
} }
ring->gem_object = obj; ring->obj = obj;
ret = i915_gem_object_pin(obj, PAGE_SIZE, true); ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
if (ret) if (ret)
goto err_unref; goto err_unref;
obj_priv = to_intel_bo(obj);
ring->map.size = ring->size; ring->map.size = ring->size;
ring->map.offset = dev->agp->base + obj_priv->gtt_offset; ring->map.offset = dev->agp->base + obj->gtt_offset;
ring->map.type = 0; ring->map.type = 0;
ring->map.flags = 0; ring->map.flags = 0;
ring->map.mtrr = 0; ring->map.mtrr = 0;
...@@ -632,8 +626,8 @@ int intel_init_ring_buffer(struct drm_device *dev, ...@@ -632,8 +626,8 @@ int intel_init_ring_buffer(struct drm_device *dev,
err_unpin: err_unpin:
i915_gem_object_unpin(obj); i915_gem_object_unpin(obj);
err_unref: err_unref:
drm_gem_object_unreference(obj); drm_gem_object_unreference(&obj->base);
ring->gem_object = NULL; ring->obj = NULL;
err_hws: err_hws:
cleanup_status_page(ring); cleanup_status_page(ring);
return ret; return ret;
...@@ -644,7 +638,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) ...@@ -644,7 +638,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
struct drm_i915_private *dev_priv; struct drm_i915_private *dev_priv;
int ret; int ret;
if (ring->gem_object == NULL) if (ring->obj == NULL)
return; return;
/* Disable the ring buffer. The ring must be idle at this point */ /* Disable the ring buffer. The ring must be idle at this point */
...@@ -654,9 +648,9 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) ...@@ -654,9 +648,9 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
drm_core_ioremapfree(&ring->map, ring->dev); drm_core_ioremapfree(&ring->map, ring->dev);
i915_gem_object_unpin(ring->gem_object); i915_gem_object_unpin(ring->obj);
drm_gem_object_unreference(ring->gem_object); drm_gem_object_unreference(&ring->obj->base);
ring->gem_object = NULL; ring->obj = NULL;
if (ring->cleanup) if (ring->cleanup)
ring->cleanup(ring); ring->cleanup(ring);
...@@ -902,11 +896,11 @@ static int blt_ring_init(struct intel_ring_buffer *ring) ...@@ -902,11 +896,11 @@ static int blt_ring_init(struct intel_ring_buffer *ring)
u32 *ptr; u32 *ptr;
int ret; int ret;
obj = to_intel_bo(i915_gem_alloc_object(ring->dev, 4096)); obj = i915_gem_alloc_object(ring->dev, 4096);
if (obj == NULL) if (obj == NULL)
return -ENOMEM; return -ENOMEM;
ret = i915_gem_object_pin(&obj->base, 4096, true); ret = i915_gem_object_pin(obj, 4096, true);
if (ret) { if (ret) {
drm_gem_object_unreference(&obj->base); drm_gem_object_unreference(&obj->base);
return ret; return ret;
...@@ -917,9 +911,9 @@ static int blt_ring_init(struct intel_ring_buffer *ring) ...@@ -917,9 +911,9 @@ static int blt_ring_init(struct intel_ring_buffer *ring)
*ptr++ = MI_NOOP; *ptr++ = MI_NOOP;
kunmap(obj->pages[0]); kunmap(obj->pages[0]);
ret = i915_gem_object_set_to_gtt_domain(&obj->base, false); ret = i915_gem_object_set_to_gtt_domain(obj, false);
if (ret) { if (ret) {
i915_gem_object_unpin(&obj->base); i915_gem_object_unpin(obj);
drm_gem_object_unreference(&obj->base); drm_gem_object_unreference(&obj->base);
return ret; return ret;
} }
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
struct intel_hw_status_page { struct intel_hw_status_page {
u32 __iomem *page_addr; u32 __iomem *page_addr;
unsigned int gfx_addr; unsigned int gfx_addr;
struct drm_gem_object *obj; struct drm_i915_gem_object *obj;
}; };
#define I915_RING_READ(reg) i915_safe_read(dev_priv, reg) #define I915_RING_READ(reg) i915_safe_read(dev_priv, reg)
...@@ -32,7 +32,7 @@ struct intel_ring_buffer { ...@@ -32,7 +32,7 @@ struct intel_ring_buffer {
u32 mmio_base; u32 mmio_base;
void *virtual_start; void *virtual_start;
struct drm_device *dev; struct drm_device *dev;
struct drm_gem_object *gem_object; struct drm_i915_gem_object *obj;
unsigned int head; unsigned int head;
unsigned int tail; unsigned int tail;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册