diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index a18e93687b8ba11dcec3087f0746de4f1a2f77c2..608d3ae2b5ba5dc50be4ee3615f398be67260959 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -211,7 +211,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data) dev_priv->mm.object_memory); size = count = mappable_size = mappable_count = 0; - count_objects(&dev_priv->mm.gtt_list, gtt_list); + count_objects(&dev_priv->mm.bound_list, gtt_list); seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n", count, mappable_count, size, mappable_size); @@ -225,8 +225,13 @@ static int i915_gem_object_info(struct seq_file *m, void* data) seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", count, mappable_count, size, mappable_size); + size = count = 0; + list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) + size += obj->base.size, ++count; + seq_printf(m, "%u unbound objects, %zu bytes\n", count, size); + size = count = mappable_size = mappable_count = 0; - list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { + list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { if (obj->fault_mappable) { size += obj->gtt_space->size; ++count; @@ -264,7 +269,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void* data) return ret; total_obj_size = total_gtt_size = count = 0; - list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { + list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { if (list == PINNED_LIST && obj->pin_count == 0) continue; @@ -526,7 +531,8 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data) for (i = 0; i < dev_priv->num_fence_regs; i++) { struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj; - seq_printf(m, "Fenced object[%2d] = ", i); + seq_printf(m, "Fence %d, pin count = %d, object = ", + i, dev_priv->fence_regs[i].pin_count); if (obj == NULL) seq_printf(m, "unused"); else diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ed3ba70923ac1e40a4df1bab8674363024d9766f..a2382a175d9268288217d160a16ecf357b8cf1c8 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -685,7 +685,13 @@ typedef struct drm_i915_private { struct drm_mm gtt_space; /** List of all objects in gtt_space. Used to restore gtt * mappings on resume */ - struct list_head gtt_list; + struct list_head bound_list; + /** + * List of objects which are not bound to the GTT (thus + * are idle and not used by the GPU) but still have + * (presumably uncached) pages still attached. + */ + struct list_head unbound_list; /** Usable portion of the GTT for GEM */ unsigned long gtt_start; @@ -1306,8 +1312,7 @@ int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj); void i915_gem_release_mmap(struct drm_i915_gem_object *obj); void i915_gem_lastclose(struct drm_device *dev); -int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, - gfp_t gfpmask); +int __must_check i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj); int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); int i915_gem_object_sync(struct drm_i915_gem_object *obj, struct intel_ring_buffer *to); @@ -1449,7 +1454,7 @@ int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment, unsigned cache_level, bool mappable); -int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only); +int i915_gem_evict_everything(struct drm_device *dev); /* i915_gem_stolen.c */ int i915_gem_init_stolen(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 0f70c2acfefac48a740779df1518959a923cdb15..462a8f2ad3a200be2822662e7bece791f50102d3 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -55,6 +55,8 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, static int i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc); +static long i915_gem_purge(struct drm_i915_private *dev_priv, long target); +static void i915_gem_shrink_all(struct drm_i915_private *dev_priv); static void i915_gem_object_truncate(struct drm_i915_gem_object *obj); static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj) @@ -140,7 +142,7 @@ int i915_mutex_lock_interruptible(struct drm_device *dev) static inline bool i915_gem_object_is_inactive(struct drm_i915_gem_object *obj) { - return !obj->active; + return obj->gtt_space && !obj->active; } int @@ -179,7 +181,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, pinned = 0; mutex_lock(&dev->struct_mutex); - list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) + list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) if (obj->pin_count) pinned += obj->gtt_space->size; mutex_unlock(&dev->struct_mutex); @@ -423,9 +425,11 @@ i915_gem_shmem_pread(struct drm_device *dev, * anyway again before the next pread happens. */ if (obj->cache_level == I915_CACHE_NONE) needs_clflush = 1; - ret = i915_gem_object_set_to_gtt_domain(obj, false); - if (ret) - return ret; + if (obj->gtt_space) { + ret = i915_gem_object_set_to_gtt_domain(obj, false); + if (ret) + return ret; + } } offset = args->offset; @@ -751,9 +755,11 @@ i915_gem_shmem_pwrite(struct drm_device *dev, * right away and we therefore have to clflush anyway. */ if (obj->cache_level == I915_CACHE_NONE) needs_clflush_after = 1; - ret = i915_gem_object_set_to_gtt_domain(obj, true); - if (ret) - return ret; + if (obj->gtt_space) { + ret = i915_gem_object_set_to_gtt_domain(obj, true); + if (ret) + return ret; + } } /* Same trick applies for invalidate partially written cachelines before * writing. */ @@ -1366,17 +1372,28 @@ i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj) return obj->madv == I915_MADV_DONTNEED; } -static void +static int i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) { int page_count = obj->base.size / PAGE_SIZE; - int i; + int ret, i; - if (!obj->pages) - return; + if (obj->pages == NULL) + return 0; + BUG_ON(obj->gtt_space); BUG_ON(obj->madv == __I915_MADV_PURGED); + ret = i915_gem_object_set_to_cpu_domain(obj, true); + if (ret) { + /* In the event of a disaster, abandon all caches and + * hope for the best. + */ + WARN_ON(ret != -EIO); + i915_gem_clflush_object(obj); + obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; + } + if (i915_gem_object_needs_bit17_swizzle(obj)) i915_gem_object_save_bit_17_swizzle(obj); @@ -1396,37 +1413,112 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) drm_free_large(obj->pages); obj->pages = NULL; + + list_del(&obj->gtt_list); + + if (i915_gem_object_is_purgeable(obj)) + i915_gem_object_truncate(obj); + + return 0; +} + +static long +i915_gem_purge(struct drm_i915_private *dev_priv, long target) +{ + struct drm_i915_gem_object *obj, *next; + long count = 0; + + list_for_each_entry_safe(obj, next, + &dev_priv->mm.unbound_list, + gtt_list) { + if (i915_gem_object_is_purgeable(obj) && + i915_gem_object_put_pages_gtt(obj) == 0) { + count += obj->base.size >> PAGE_SHIFT; + if (count >= target) + return count; + } + } + + list_for_each_entry_safe(obj, next, + &dev_priv->mm.inactive_list, + mm_list) { + if (i915_gem_object_is_purgeable(obj) && + i915_gem_object_unbind(obj) == 0 && + i915_gem_object_put_pages_gtt(obj) == 0) { + count += obj->base.size >> PAGE_SHIFT; + if (count >= target) + return count; + } + } + + return count; +} + +static void +i915_gem_shrink_all(struct drm_i915_private *dev_priv) +{ + struct drm_i915_gem_object *obj, *next; + + i915_gem_evict_everything(dev_priv->dev); + + list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list) + i915_gem_object_put_pages_gtt(obj); } int -i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, - gfp_t gfpmask) +i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) { + struct drm_i915_private *dev_priv = obj->base.dev->dev_private; int page_count, i; struct address_space *mapping; - struct inode *inode; struct page *page; + gfp_t gfp; if (obj->pages || obj->sg_table) return 0; + /* Assert that the object is not currently in any GPU domain. As it + * wasn't in the GTT, there shouldn't be any way it could have been in + * a GPU cache + */ + BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); + BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); + /* Get the list of pages out of our struct file. They'll be pinned * at this point until we release them. */ page_count = obj->base.size / PAGE_SIZE; - BUG_ON(obj->pages != NULL); obj->pages = drm_malloc_ab(page_count, sizeof(struct page *)); if (obj->pages == NULL) return -ENOMEM; - inode = obj->base.filp->f_path.dentry->d_inode; - mapping = inode->i_mapping; - gfpmask |= mapping_gfp_mask(mapping); - + /* Fail silently without starting the shrinker */ + mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping; + gfp = mapping_gfp_mask(mapping); + gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD; + gfp &= ~(__GFP_IO | __GFP_WAIT); for (i = 0; i < page_count; i++) { - page = shmem_read_mapping_page_gfp(mapping, i, gfpmask); - if (IS_ERR(page)) - goto err_pages; + page = shmem_read_mapping_page_gfp(mapping, i, gfp); + if (IS_ERR(page)) { + i915_gem_purge(dev_priv, page_count); + page = shmem_read_mapping_page_gfp(mapping, i, gfp); + } + if (IS_ERR(page)) { + /* We've tried hard to allocate the memory by reaping + * our own buffer, now let the real VM do its job and + * go down in flames if truly OOM. + */ + gfp &= ~(__GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD); + gfp |= __GFP_IO | __GFP_WAIT; + + i915_gem_shrink_all(dev_priv); + page = shmem_read_mapping_page_gfp(mapping, i, gfp); + if (IS_ERR(page)) + goto err_pages; + + gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD; + gfp &= ~(__GFP_IO | __GFP_WAIT); + } obj->pages[i] = page; } @@ -1434,6 +1526,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, if (i915_gem_object_needs_bit17_swizzle(obj)) i915_gem_object_do_bit_17_swizzle(obj); + list_add_tail(&obj->gtt_list, &dev_priv->mm.unbound_list); return 0; err_pages: @@ -1698,6 +1791,7 @@ void i915_gem_reset(struct drm_device *dev) obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; } + /* The fence registers are invalidated so clear them out */ i915_gem_reset_fences(dev); } @@ -2209,22 +2303,6 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) i915_gem_object_finish_gtt(obj); - /* Move the object to the CPU domain to ensure that - * any possible CPU writes while it's not in the GTT - * are flushed when we go to remap it. - */ - if (ret == 0) - ret = i915_gem_object_set_to_cpu_domain(obj, 1); - if (ret == -ERESTARTSYS) - return ret; - if (ret) { - /* In the event of a disaster, abandon all caches and - * hope for the best. - */ - i915_gem_clflush_object(obj); - obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU; - } - /* release the fence reg _after_ flushing */ ret = i915_gem_object_put_fence(obj); if (ret) @@ -2240,10 +2318,8 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) } i915_gem_gtt_finish_object(obj); - i915_gem_object_put_pages_gtt(obj); - - list_del_init(&obj->gtt_list); - list_del_init(&obj->mm_list); + list_del(&obj->mm_list); + list_move_tail(&obj->gtt_list, &dev_priv->mm.unbound_list); /* Avoid an unnecessary call to unbind on rebind. */ obj->map_and_fenceable = true; @@ -2251,10 +2327,7 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj) obj->gtt_space = NULL; obj->gtt_offset = 0; - if (i915_gem_object_is_purgeable(obj)) - i915_gem_object_truncate(obj); - - return ret; + return 0; } static int i915_ring_idle(struct intel_ring_buffer *ring) @@ -2667,7 +2740,6 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, struct drm_device *dev = obj->base.dev; drm_i915_private_t *dev_priv = dev->dev_private; struct drm_mm_node *free_space; - gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; u32 size, fence_size, fence_alignment, unfenced_alignment; bool mappable, fenceable; int ret; @@ -2707,6 +2779,10 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, return -E2BIG; } + ret = i915_gem_object_get_pages_gtt(obj); + if (ret) + return ret; + search_free: if (map_and_fenceable) free_space = @@ -2733,9 +2809,6 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, false); } if (obj->gtt_space == NULL) { - /* If the gtt is empty and we're still having trouble - * fitting our object in, we're out of memory. - */ ret = i915_gem_evict_something(dev, size, alignment, obj->cache_level, map_and_fenceable); @@ -2752,55 +2825,20 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, return -EINVAL; } - ret = i915_gem_object_get_pages_gtt(obj, gfpmask); - if (ret) { - drm_mm_put_block(obj->gtt_space); - obj->gtt_space = NULL; - - if (ret == -ENOMEM) { - /* first try to reclaim some memory by clearing the GTT */ - ret = i915_gem_evict_everything(dev, false); - if (ret) { - /* now try to shrink everyone else */ - if (gfpmask) { - gfpmask = 0; - goto search_free; - } - - return -ENOMEM; - } - - goto search_free; - } - - return ret; - } ret = i915_gem_gtt_prepare_object(obj); if (ret) { - i915_gem_object_put_pages_gtt(obj); drm_mm_put_block(obj->gtt_space); obj->gtt_space = NULL; - - if (i915_gem_evict_everything(dev, false)) - return ret; - - goto search_free; + return ret; } if (!dev_priv->mm.aliasing_ppgtt) i915_gem_gtt_bind_object(obj, obj->cache_level); - list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list); + list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list); list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); - /* Assert that the object is not currently in any GPU domain. As it - * wasn't in the GTT, there shouldn't be any way it could have been in - * a GPU cache - */ - BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); - BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); - obj->gtt_offset = obj->gtt_space->start; fenceable = @@ -3464,9 +3502,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data, if (obj->madv != __I915_MADV_PURGED) obj->madv = args->madv; - /* if the object is no longer bound, discard its backing storage */ - if (i915_gem_object_is_purgeable(obj) && - obj->gtt_space == NULL) + /* if the object is no longer attached, discard its backing storage */ + if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL) i915_gem_object_truncate(obj); args->retained = obj->madv != __I915_MADV_PURGED; @@ -3573,6 +3610,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj) dev_priv->mm.interruptible = was_interruptible; } + i915_gem_object_put_pages_gtt(obj); if (obj->base.map_list.map) drm_gem_free_mmap_offset(&obj->base); @@ -3605,7 +3643,7 @@ i915_gem_idle(struct drm_device *dev) /* Under UMS, be paranoid and evict. */ if (!drm_core_check_feature(dev, DRIVER_MODESET)) - i915_gem_evict_everything(dev, false); + i915_gem_evict_everything(dev); i915_gem_reset_fences(dev); @@ -3963,8 +4001,9 @@ i915_gem_load(struct drm_device *dev) INIT_LIST_HEAD(&dev_priv->mm.active_list); INIT_LIST_HEAD(&dev_priv->mm.inactive_list); + INIT_LIST_HEAD(&dev_priv->mm.unbound_list); + INIT_LIST_HEAD(&dev_priv->mm.bound_list); INIT_LIST_HEAD(&dev_priv->mm.fence_list); - INIT_LIST_HEAD(&dev_priv->mm.gtt_list); for (i = 0; i < I915_NUM_RINGS; i++) init_ring_lists(&dev_priv->ring[i]); for (i = 0; i < I915_MAX_NUM_FENCES; i++) @@ -4208,13 +4247,6 @@ void i915_gem_release(struct drm_device *dev, struct drm_file *file) spin_unlock(&file_priv->mm.lock); } -static int -i915_gpu_is_active(struct drm_device *dev) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - return !list_empty(&dev_priv->mm.active_list); -} - static int i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) { @@ -4223,60 +4255,26 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc) struct drm_i915_private, mm.inactive_shrinker); struct drm_device *dev = dev_priv->dev; - struct drm_i915_gem_object *obj, *next; + struct drm_i915_gem_object *obj; int nr_to_scan = sc->nr_to_scan; int cnt; if (!mutex_trylock(&dev->struct_mutex)) return 0; - /* "fast-path" to count number of available objects */ - if (nr_to_scan == 0) { - cnt = 0; - list_for_each_entry(obj, - &dev_priv->mm.inactive_list, - mm_list) - cnt++; - mutex_unlock(&dev->struct_mutex); - return cnt / 100 * sysctl_vfs_cache_pressure; + if (nr_to_scan) { + nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan); + if (nr_to_scan > 0) + i915_gem_shrink_all(dev_priv); } -rescan: - /* first scan for clean buffers */ - i915_gem_retire_requests(dev); - - list_for_each_entry_safe(obj, next, - &dev_priv->mm.inactive_list, - mm_list) { - if (i915_gem_object_is_purgeable(obj)) { - if (i915_gem_object_unbind(obj) == 0 && - --nr_to_scan == 0) - break; - } - } - - /* second pass, evict/count anything still on the inactive list */ cnt = 0; - list_for_each_entry_safe(obj, next, - &dev_priv->mm.inactive_list, - mm_list) { - if (nr_to_scan && - i915_gem_object_unbind(obj) == 0) - nr_to_scan--; - else - cnt++; - } + list_for_each_entry(obj, &dev_priv->mm.unbound_list, gtt_list) + cnt += obj->base.size >> PAGE_SHIFT; + list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) + if (obj->pin_count == 0) + cnt += obj->base.size >> PAGE_SHIFT; - if (nr_to_scan && i915_gpu_is_active(dev)) { - /* - * We are desperate for pages, so as a last resort, wait - * for the GPU to finish and discard whatever we can. - * This has a dramatic impact to reduce the number of - * OOM-killer events whilst running the GPU aggressively. - */ - if (i915_gpu_idle(dev) == 0) - goto rescan; - } mutex_unlock(&dev->struct_mutex); - return cnt / 100 * sysctl_vfs_cache_pressure; + return cnt; } diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index ceaad5af01a468ef4c3655391e936ad7798eae6f..43c95307f99ed36980cfc69d7f23d9d635f0a08c 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -33,7 +33,7 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme struct drm_i915_gem_object *obj = attachment->dmabuf->priv; struct drm_device *dev = obj->base.dev; int npages = obj->base.size / PAGE_SIZE; - struct sg_table *sg = NULL; + struct sg_table *sg; int ret; int nents; @@ -41,10 +41,10 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme if (ret) return ERR_PTR(ret); - if (!obj->pages) { - ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN); - if (ret) - goto out; + ret = i915_gem_object_get_pages_gtt(obj); + if (ret) { + sg = ERR_PTR(ret); + goto out; } /* link the pages into an SG then map the sg */ @@ -89,12 +89,10 @@ static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf) goto out_unlock; } - if (!obj->pages) { - ret = i915_gem_object_get_pages_gtt(obj, __GFP_NORETRY | __GFP_NOWARN); - if (ret) { - mutex_unlock(&dev->struct_mutex); - return ERR_PTR(ret); - } + ret = i915_gem_object_get_pages_gtt(obj); + if (ret) { + mutex_unlock(&dev->struct_mutex); + return ERR_PTR(ret); } obj->dma_buf_vmapping = vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL); diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 7279c31d4a9ae53211671aa308fe9b443b0e9a2d..74635da7c4984bc2c22fd82cc0348431328e1df9 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -148,7 +148,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, } int -i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only) +i915_gem_evict_everything(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj, *next; @@ -160,7 +160,7 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only) if (lists_empty) return -ENOSPC; - trace_i915_gem_evict_everything(dev, purgeable_only); + trace_i915_gem_evict_everything(dev); /* The gpu_idle will flush everything in the write domain to the * active list. Then we must move everything off the active list @@ -174,12 +174,9 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only) /* Having flushed everything, unbind() should never raise an error */ list_for_each_entry_safe(obj, next, - &dev_priv->mm.inactive_list, mm_list) { - if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) { - if (obj->pin_count == 0) - WARN_ON(i915_gem_object_unbind(obj)); - } - } + &dev_priv->mm.inactive_list, mm_list) + if (obj->pin_count == 0) + WARN_ON(i915_gem_object_unbind(obj)); return 0; } diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index afb312ee050ca972daf537061c68a1163df51e5a..834a636b44f078eb5c4d3016663f16adf7975c4c 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -502,17 +502,12 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, } } - if (ret != -ENOSPC || retry > 1) + if (ret != -ENOSPC || retry++) return ret; - /* First attempt, just clear anything that is purgeable. - * Second attempt, clear the entire GTT. - */ - ret = i915_gem_evict_everything(ring->dev, retry == 0); + ret = i915_gem_evict_everything(ring->dev); if (ret) return ret; - - retry++; } while (1); err: diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 3b3b731a17c3418d132c4b0943ca7bbf4f305796..8329a14862ed23bbdebca3c6065f2471b14f4b21 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -348,7 +348,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE, (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE); - list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) { + list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { i915_gem_clflush_object(obj); i915_gem_gtt_bind_object(obj, obj->cache_level); } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index a61b41a8c607878cb82d881d8645d12dba9805b6..002dceebbeabfa9e1f17dea7345f5fbf78486428 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1221,7 +1221,7 @@ static void i915_capture_error_state(struct drm_device *dev) list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) i++; error->active_bo_count = i; - list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) + list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) if (obj->pin_count) i++; error->pinned_bo_count = i - error->active_bo_count; @@ -1246,7 +1246,7 @@ static void i915_capture_error_state(struct drm_device *dev) error->pinned_bo_count = capture_pinned_bo(error->pinned_bo, error->pinned_bo_count, - &dev_priv->mm.gtt_list); + &dev_priv->mm.bound_list); do_gettimeofday(&error->time); diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index fe90b3a84a6d9db3b7cac327173749fe0fc0b0a9..3c4093d91f60d1e5b6e903d9a24f66285884ed82 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -214,22 +214,18 @@ TRACE_EVENT(i915_gem_evict, ); TRACE_EVENT(i915_gem_evict_everything, - TP_PROTO(struct drm_device *dev, bool purgeable), - TP_ARGS(dev, purgeable), + TP_PROTO(struct drm_device *dev), + TP_ARGS(dev), TP_STRUCT__entry( __field(u32, dev) - __field(bool, purgeable) ), TP_fast_assign( __entry->dev = dev->primary->index; - __entry->purgeable = purgeable; ), - TP_printk("dev=%d%s", - __entry->dev, - __entry->purgeable ? ", purgeable only" : "") + TP_printk("dev=%d", __entry->dev) ); TRACE_EVENT(i915_gem_ring_dispatch,