diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c index 20119ea9c4185f8f3a70b0a1f9fabd629cd93bb5..b8739f3d34623b22d6b1df9ee44ccb32f4dc86c2 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c @@ -175,11 +175,7 @@ static bool i915_ttm_eviction_valuable(struct ttm_buffer_object *bo, struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo); /* Will do for now. Our pinned objects are still on TTM's LRU lists */ - if (!i915_gem_object_evictable(obj)) - return false; - - /* This isn't valid with a buddy allocator */ - return ttm_bo_eviction_valuable(bo, place); + return i915_gem_object_evictable(obj); } static void i915_ttm_evict_flags(struct ttm_buffer_object *bo, @@ -654,20 +650,8 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem, static struct lock_class_key lock_class; struct drm_i915_private *i915 = mem->i915; enum ttm_bo_type bo_type; - size_t alignment = 0; int ret; - /* Adjust alignment to GPU- and CPU huge page sizes. */ - - if (mem->is_range_manager) { - if (size >= SZ_1G) - alignment = SZ_1G >> PAGE_SHIFT; - else if (size >= SZ_2M) - alignment = SZ_2M >> PAGE_SHIFT; - else if (size >= SZ_64K) - alignment = SZ_64K >> PAGE_SHIFT; - } - drm_gem_private_object_init(&i915->drm, &obj->base, size); i915_gem_object_init(obj, &i915_gem_ttm_obj_ops, &lock_class, flags); i915_gem_object_init_memory_region(obj, mem); @@ -688,7 +672,7 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem, */ obj->base.vma_node.driver_private = i915_gem_to_ttm(obj); ret = ttm_bo_init(&i915->bdev, i915_gem_to_ttm(obj), size, - bo_type, &i915_sys_placement, alignment, + bo_type, &i915_sys_placement, 1, true, NULL, NULL, i915_ttm_bo_destroy); if (!ret) diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c index 12fb5423fd5e83713ffac99024e9861b9556c186..df59f884d37c2a46049c035d408be312d8c4796d 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.c +++ b/drivers/gpu/drm/i915/intel_memory_region.c @@ -5,6 +5,7 @@ #include "intel_memory_region.h" #include "i915_drv.h" +#include "i915_ttm_buddy_manager.h" static const struct { u16 class; @@ -28,11 +29,6 @@ static const struct { }, }; -struct intel_region_reserve { - struct list_head link; - struct ttm_resource *res; -}; - struct intel_memory_region * intel_memory_region_lookup(struct drm_i915_private *i915, u16 class, u16 instance) @@ -63,27 +59,6 @@ intel_memory_region_by_type(struct drm_i915_private *i915, return NULL; } -/** - * intel_memory_region_unreserve - Unreserve all previously reserved - * ranges - * @mem: The region containing the reserved ranges. - */ -void intel_memory_region_unreserve(struct intel_memory_region *mem) -{ - struct intel_region_reserve *reserve, *next; - - if (!mem->priv_ops || !mem->priv_ops->free) - return; - - mutex_lock(&mem->mm_lock); - list_for_each_entry_safe(reserve, next, &mem->reserved, link) { - list_del(&reserve->link); - mem->priv_ops->free(mem, reserve->res); - kfree(reserve); - } - mutex_unlock(&mem->mm_lock); -} - /** * intel_memory_region_reserve - Reserve a memory range * @mem: The region for which we want to reserve a range. @@ -96,28 +71,11 @@ int intel_memory_region_reserve(struct intel_memory_region *mem, resource_size_t offset, resource_size_t size) { - int ret; - struct intel_region_reserve *reserve; - - if (!mem->priv_ops || !mem->priv_ops->reserve) - return -EINVAL; - - reserve = kzalloc(sizeof(*reserve), GFP_KERNEL); - if (!reserve) - return -ENOMEM; + struct ttm_resource_manager *man = mem->region_private; - reserve->res = mem->priv_ops->reserve(mem, offset, size); - if (IS_ERR(reserve->res)) { - ret = PTR_ERR(reserve->res); - kfree(reserve); - return ret; - } - - mutex_lock(&mem->mm_lock); - list_add_tail(&reserve->link, &mem->reserved); - mutex_unlock(&mem->mm_lock); + GEM_BUG_ON(mem->is_range_manager); - return 0; + return i915_ttm_buddy_man_reserve(man, offset, size); } struct intel_memory_region * @@ -149,9 +107,6 @@ intel_memory_region_create(struct drm_i915_private *i915, mutex_init(&mem->objects.lock); INIT_LIST_HEAD(&mem->objects.list); - INIT_LIST_HEAD(&mem->reserved); - - mutex_init(&mem->mm_lock); if (ops->init) { err = ops->init(mem); @@ -182,11 +137,9 @@ static void __intel_memory_region_destroy(struct kref *kref) struct intel_memory_region *mem = container_of(kref, typeof(*mem), kref); - intel_memory_region_unreserve(mem); if (mem->ops->release) mem->ops->release(mem); - mutex_destroy(&mem->mm_lock); mutex_destroy(&mem->objects.lock); kfree(mem); } diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h index c7e635d62e1a72c478dbe375181751aaa8967c91..b04fb22726d9dfc1238b89bf5e49f126efd410f3 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.h +++ b/drivers/gpu/drm/i915/intel_memory_region.h @@ -59,19 +59,10 @@ struct intel_memory_region_ops { unsigned int flags); }; -struct intel_memory_region_private_ops { - struct ttm_resource *(*reserve)(struct intel_memory_region *mem, - resource_size_t offset, - resource_size_t size); - void (*free)(struct intel_memory_region *mem, - struct ttm_resource *res); -}; - struct intel_memory_region { struct drm_i915_private *i915; const struct intel_memory_region_ops *ops; - const struct intel_memory_region_private_ops *priv_ops; struct io_mapping iomap; struct resource region; @@ -79,8 +70,6 @@ struct intel_memory_region { /* For fake LMEM */ struct drm_mm_node fake_mappable; - struct mutex mm_lock; - struct kref kref; resource_size_t io_start; @@ -94,8 +83,6 @@ struct intel_memory_region { char name[16]; bool private; /* not for userspace */ - struct list_head reserved; - dma_addr_t remap_addr; struct { @@ -103,8 +90,6 @@ struct intel_memory_region { struct list_head list; } objects; - size_t chunk_size; - unsigned int max_order; bool is_range_manager; void *region_private; @@ -138,8 +123,6 @@ __printf(2, 3) void intel_memory_region_set_name(struct intel_memory_region *mem, const char *fmt, ...); -void intel_memory_region_unreserve(struct intel_memory_region *mem); - int intel_memory_region_reserve(struct intel_memory_region *mem, resource_size_t offset, resource_size_t size); diff --git a/drivers/gpu/drm/i915/intel_region_ttm.c b/drivers/gpu/drm/i915/intel_region_ttm.c index f9d616544728f6e5f50bdd67aabc88e6e08bf202..052253c81e984d8eafd672a499a8f76f0f2e7e6f 100644 --- a/drivers/gpu/drm/i915/intel_region_ttm.c +++ b/drivers/gpu/drm/i915/intel_region_ttm.c @@ -8,6 +8,7 @@ #include "i915_drv.h" #include "i915_scatterlist.h" +#include "i915_ttm_buddy_manager.h" #include "intel_region_ttm.h" @@ -67,72 +68,28 @@ int intel_region_to_ttm_type(const struct intel_memory_region *mem) return type; } -static struct ttm_resource * -intel_region_ttm_resource_reserve(struct intel_memory_region *mem, - resource_size_t offset, - resource_size_t size) -{ - struct ttm_resource_manager *man = mem->region_private; - struct ttm_place place = {}; - struct ttm_buffer_object mock_bo = {}; - struct ttm_resource *res; - int ret; - - /* - * Having to use a mock_bo is unfortunate but stems from some - * drivers having private managers that insist to know what the - * allocate memory is intended for, using it to send private - * data to the manager. Also recently the bo has been used to send - * alignment info to the manager. Assume that apart from the latter, - * none of the managers we use will ever access the buffer object - * members, hoping we can pass the alignment info in the - * struct ttm_place in the future. - */ - - place.fpfn = offset >> PAGE_SHIFT; - place.lpfn = place.fpfn + (size >> PAGE_SHIFT); - mock_bo.base.size = size; - ret = man->func->alloc(man, &mock_bo, &place, &res); - if (ret == -ENOSPC) - ret = -ENXIO; - - return ret ? ERR_PTR(ret) : res; -} - /** - * intel_region_ttm_resource_free - Free a resource allocated from a resource manager - * @mem: The region the resource was allocated from. - * @res: The opaque resource representing an allocation. + * intel_region_ttm_init - Initialize a memory region for TTM. + * @mem: The region to initialize. + * + * This function initializes a suitable TTM resource manager for the + * region, and if it's a LMEM region type, attaches it to the TTM + * device. MOCK regions are NOT attached to the TTM device, since we don't + * have one for the mock selftests. + * + * Return: 0 on success, negative error code on failure. */ -void intel_region_ttm_resource_free(struct intel_memory_region *mem, - struct ttm_resource *res) -{ - struct ttm_resource_manager *man = mem->region_private; - - man->func->free(man, res); -} - -static const struct intel_memory_region_private_ops priv_ops = { - .reserve = intel_region_ttm_resource_reserve, - .free = intel_region_ttm_resource_free, -}; - int intel_region_ttm_init(struct intel_memory_region *mem) { struct ttm_device *bdev = &mem->i915->bdev; int mem_type = intel_region_to_ttm_type(mem); int ret; - ret = ttm_range_man_init(bdev, mem_type, false, - resource_size(&mem->region) >> PAGE_SHIFT); + ret = i915_ttm_buddy_man_init(bdev, mem_type, false, + resource_size(&mem->region), PAGE_SIZE); if (ret) return ret; - mem->chunk_size = PAGE_SIZE; - mem->max_order = - get_order(rounddown_pow_of_two(resource_size(&mem->region))); - mem->is_range_manager = true; - mem->priv_ops = &priv_ops; mem->region_private = ttm_manager_type(bdev, mem_type); return 0; @@ -150,8 +107,8 @@ void intel_region_ttm_fini(struct intel_memory_region *mem) { int ret; - ret = ttm_range_man_fini(&mem->i915->bdev, - intel_region_to_ttm_type(mem)); + ret = i915_ttm_buddy_man_fini(&mem->i915->bdev, + intel_region_to_ttm_type(mem)); GEM_WARN_ON(ret); mem->region_private = NULL; } @@ -171,12 +128,15 @@ void intel_region_ttm_fini(struct intel_memory_region *mem) struct sg_table *intel_region_ttm_resource_to_st(struct intel_memory_region *mem, struct ttm_resource *res) { - struct ttm_range_mgr_node *range_node = - container_of(res, typeof(*range_node), base); + if (mem->is_range_manager) { + struct ttm_range_mgr_node *range_node = + to_ttm_range_mgr_node(res); - GEM_WARN_ON(!mem->is_range_manager); - return i915_sg_from_mm_node(&range_node->mm_nodes[0], - mem->region.start); + return i915_sg_from_mm_node(&range_node->mm_nodes[0], + mem->region.start); + } else { + return i915_sg_from_buddy_resource(res, mem->region.start); + } } #ifdef CONFIG_DRM_I915_SELFTEST @@ -206,25 +166,35 @@ intel_region_ttm_resource_alloc(struct intel_memory_region *mem, struct ttm_resource *res; int ret; - /* - * We ignore the flags for now since we're using the range - * manager and contigous and min page size would be fulfilled - * by default if size is min page size aligned. - */ mock_bo.base.size = size; - - if (mem->is_range_manager) { - if (size >= SZ_1G) - mock_bo.page_alignment = SZ_1G >> PAGE_SHIFT; - else if (size >= SZ_2M) - mock_bo.page_alignment = SZ_2M >> PAGE_SHIFT; - else if (size >= SZ_64K) - mock_bo.page_alignment = SZ_64K >> PAGE_SHIFT; - } + mock_bo.page_alignment = 1; + place.flags = flags; ret = man->func->alloc(man, &mock_bo, &place, &res); if (ret == -ENOSPC) ret = -ENXIO; return ret ? ERR_PTR(ret) : res; } + #endif + +void intel_region_ttm_node_free(struct intel_memory_region *mem, + struct ttm_resource *res) +{ + struct ttm_resource_manager *man = mem->region_private; + + man->func->free(man, res); +} + +/** + * intel_region_ttm_resource_free - Free a resource allocated from a resource manager + * @mem: The region the resource was allocated from. + * @res: The opaque resource representing an allocation. + */ +void intel_region_ttm_resource_free(struct intel_memory_region *mem, + struct ttm_resource *res) +{ + struct ttm_resource_manager *man = mem->region_private; + + man->func->free(man, res); +} diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c index c85d516b85cd88183a2d5d7241a6f9d02ebec05e..118a66c29695b09cc2dad652670004f12e3d4c14 100644 --- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c +++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c @@ -20,7 +20,9 @@ #include "gem/selftests/mock_context.h" #include "gt/intel_engine_user.h" #include "gt/intel_gt.h" +#include "i915_buddy.h" #include "i915_memcpy.h" +#include "i915_ttm_buddy_manager.h" #include "selftests/igt_flush_test.h" #include "selftests/i915_random.h" @@ -57,10 +59,9 @@ static int igt_mock_fill(void *arg) LIST_HEAD(objects); int err = 0; - page_size = mem->chunk_size; + page_size = PAGE_SIZE; + max_pages = div64_u64(total, page_size); rem = total; -retry: - max_pages = div64_u64(rem, page_size); for_each_prime_number_from(page_num, 1, max_pages) { resource_size_t size = page_num * page_size; @@ -86,11 +87,6 @@ static int igt_mock_fill(void *arg) err = 0; if (err == -ENXIO) { if (page_num * page_size <= rem) { - if (mem->is_range_manager && max_pages > 1) { - max_pages >>= 1; - goto retry; - } - pr_err("%s failed, space still left in region\n", __func__); err = -EINVAL; @@ -157,6 +153,7 @@ static bool is_contiguous(struct drm_i915_gem_object *obj) static int igt_mock_reserve(void *arg) { struct intel_memory_region *mem = arg; + struct drm_i915_private *i915 = mem->i915; resource_size_t avail = resource_size(&mem->region); struct drm_i915_gem_object *obj; const u32 chunk_size = SZ_32M; @@ -166,16 +163,18 @@ static int igt_mock_reserve(void *arg) LIST_HEAD(objects); int err = 0; - if (!list_empty(&mem->reserved)) { - pr_err("%s region reserved list is not empty\n", __func__); - return -EINVAL; - } - count = avail / chunk_size; order = i915_random_order(count, &prng); if (!order) return 0; + mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0); + if (IS_ERR(mem)) { + pr_err("failed to create memory region\n"); + err = PTR_ERR(mem); + goto out_close; + } + /* Reserve a bunch of ranges within the region */ for (i = 0; i < count; ++i) { u64 start = order[i] * chunk_size; @@ -205,18 +204,12 @@ static int igt_mock_reserve(void *arg) do { u32 size = i915_prandom_u32_max_state(cur_avail, &prng); -retry: size = max_t(u32, round_up(size, PAGE_SIZE), PAGE_SIZE); obj = igt_object_create(mem, &objects, size, 0); if (IS_ERR(obj)) { - if (PTR_ERR(obj) == -ENXIO) { - if (mem->is_range_manager && - size > mem->chunk_size) { - size >>= 1; - goto retry; - } + if (PTR_ERR(obj) == -ENXIO) break; - } + err = PTR_ERR(obj); goto out_close; } @@ -232,7 +225,7 @@ static int igt_mock_reserve(void *arg) out_close: kfree(order); close_objects(mem, &objects); - intel_memory_region_unreserve(mem); + intel_memory_region_put(mem); return err; } @@ -252,7 +245,7 @@ static int igt_mock_contiguous(void *arg) total = resource_size(&mem->region); /* Min size */ - obj = igt_object_create(mem, &objects, mem->chunk_size, + obj = igt_object_create(mem, &objects, PAGE_SIZE, I915_BO_ALLOC_CONTIGUOUS); if (IS_ERR(obj)) return PTR_ERR(obj); @@ -333,17 +326,15 @@ static int igt_mock_contiguous(void *arg) min = target; target = total >> 1; - if (!mem->is_range_manager) { - /* Make sure we can still allocate all the fragmented space */ - obj = igt_object_create(mem, &objects, target, 0); - if (IS_ERR(obj)) { - err = PTR_ERR(obj); - goto err_close_objects; - } - - igt_object_release(obj); + /* Make sure we can still allocate all the fragmented space */ + obj = igt_object_create(mem, &objects, target, 0); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto err_close_objects; } + igt_object_release(obj); + /* * Even though we have enough free space, we don't have a big enough * contiguous block. Make sure that holds true. @@ -362,7 +353,7 @@ static int igt_mock_contiguous(void *arg) } target >>= 1; - } while (target >= mem->chunk_size); + } while (target >= PAGE_SIZE); err_close_objects: list_splice_tail(&holes, &objects); @@ -374,7 +365,9 @@ static int igt_mock_splintered_region(void *arg) { struct intel_memory_region *mem = arg; struct drm_i915_private *i915 = mem->i915; + struct i915_ttm_buddy_resource *res; struct drm_i915_gem_object *obj; + struct i915_buddy_mm *mm; unsigned int expected_order; LIST_HEAD(objects); u64 size; @@ -382,7 +375,7 @@ static int igt_mock_splintered_region(void *arg) /* * Sanity check we can still allocate everything even if the - * max_order != mm.size. i.e our starting address space size is not a + * mm.max_order != mm.size. i.e our starting address space size is not a * power-of-two. */ @@ -391,20 +384,29 @@ static int igt_mock_splintered_region(void *arg) if (IS_ERR(mem)) return PTR_ERR(mem); - expected_order = get_order(rounddown_pow_of_two(size)); - if (mem->max_order != expected_order) { - pr_err("%s order mismatch(%u != %u)\n", - __func__, mem->max_order, expected_order); - err = -EINVAL; - goto out_put; - } - obj = igt_object_create(mem, &objects, size, 0); if (IS_ERR(obj)) { err = PTR_ERR(obj); goto out_close; } + res = to_ttm_buddy_resource(obj->mm.res); + mm = res->mm; + if (mm->size != size) { + pr_err("%s size mismatch(%llu != %llu)\n", + __func__, mm->size, size); + err = -EINVAL; + goto out_put; + } + + expected_order = get_order(rounddown_pow_of_two(size)); + if (mm->max_order != expected_order) { + pr_err("%s order mismatch(%u != %u)\n", + __func__, mm->max_order, expected_order); + err = -EINVAL; + goto out_put; + } + close_objects(mem, &objects); /* @@ -415,15 +417,12 @@ static int igt_mock_splintered_region(void *arg) * sure that does indeed hold true. */ - if (!mem->is_range_manager) { - obj = igt_object_create(mem, &objects, size, - I915_BO_ALLOC_CONTIGUOUS); - if (!IS_ERR(obj)) { - pr_err("%s too large contiguous allocation was not rejected\n", - __func__); - err = -EINVAL; - goto out_close; - } + obj = igt_object_create(mem, &objects, size, I915_BO_ALLOC_CONTIGUOUS); + if (!IS_ERR(obj)) { + pr_err("%s too large contiguous allocation was not rejected\n", + __func__); + err = -EINVAL; + goto out_close; } obj = igt_object_create(mem, &objects, rounddown_pow_of_two(size), @@ -442,6 +441,74 @@ static int igt_mock_splintered_region(void *arg) return err; } +#ifndef SZ_8G +#define SZ_8G BIT_ULL(33) +#endif + +static int igt_mock_max_segment(void *arg) +{ + const unsigned int max_segment = rounddown(UINT_MAX, PAGE_SIZE); + struct intel_memory_region *mem = arg; + struct drm_i915_private *i915 = mem->i915; + struct i915_ttm_buddy_resource *res; + struct drm_i915_gem_object *obj; + struct i915_buddy_block *block; + struct i915_buddy_mm *mm; + struct list_head *blocks; + struct scatterlist *sg; + LIST_HEAD(objects); + u64 size; + int err = 0; + + /* + * While we may create very large contiguous blocks, we may need + * to break those down for consumption elsewhere. In particular, + * dma-mapping with scatterlist elements have an implicit limit of + * UINT_MAX on each element. + */ + + size = SZ_8G; + mem = mock_region_create(i915, 0, size, PAGE_SIZE, 0); + if (IS_ERR(mem)) + return PTR_ERR(mem); + + obj = igt_object_create(mem, &objects, size, 0); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto out_put; + } + + res = to_ttm_buddy_resource(obj->mm.res); + blocks = &res->blocks; + mm = res->mm; + size = 0; + list_for_each_entry(block, blocks, link) { + if (i915_buddy_block_size(mm, block) > size) + size = i915_buddy_block_size(mm, block); + } + if (size < max_segment) { + pr_err("%s: Failed to create a huge contiguous block [> %u], largest block %lld\n", + __func__, max_segment, size); + err = -EINVAL; + goto out_close; + } + + for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) { + if (sg->length > max_segment) { + pr_err("%s: Created an oversized scatterlist entry, %u > %u\n", + __func__, sg->length, max_segment); + err = -EINVAL; + goto out_close; + } + } + +out_close: + close_objects(mem, &objects); +out_put: + intel_memory_region_put(mem); + return err; +} + static int igt_gpu_write_dw(struct intel_context *ce, struct i915_vma *vma, u32 dword, @@ -1046,6 +1113,7 @@ int intel_memory_region_mock_selftests(void) SUBTEST(igt_mock_fill), SUBTEST(igt_mock_contiguous), SUBTEST(igt_mock_splintered_region), + SUBTEST(igt_mock_max_segment), }; struct intel_memory_region *mem; struct drm_i915_private *i915; diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c index 6120d43fe504349caa83c21b1e6ad9043a9d9ba5..3b3264311c910932fe44cba2348481e95eae5999 100644 --- a/drivers/gpu/drm/i915/selftests/mock_region.c +++ b/drivers/gpu/drm/i915/selftests/mock_region.c @@ -3,6 +3,7 @@ * Copyright © 2019-2021 Intel Corporation */ +#include #include #include @@ -25,10 +26,11 @@ static int mock_region_get_pages(struct drm_i915_gem_object *obj) { unsigned int flags; struct sg_table *pages; + int err; flags = I915_ALLOC_MIN_PAGE_SIZE; if (obj->flags & I915_BO_ALLOC_CONTIGUOUS) - flags |= I915_ALLOC_CONTIGUOUS; + flags |= TTM_PL_FLAG_CONTIGUOUS; obj->mm.res = intel_region_ttm_resource_alloc(obj->mm.region, obj->base.size, @@ -38,13 +40,17 @@ static int mock_region_get_pages(struct drm_i915_gem_object *obj) pages = intel_region_ttm_resource_to_st(obj->mm.region, obj->mm.res); if (IS_ERR(pages)) { - intel_region_ttm_resource_free(obj->mm.region, obj->mm.res); - return PTR_ERR(pages); + err = PTR_ERR(pages); + goto err_free_resource; } __i915_gem_object_set_pages(obj, pages, i915_sg_dma_sizes(pages->sgl)); return 0; + +err_free_resource: + intel_region_ttm_resource_free(obj->mm.region, obj->mm.res); + return err; } static const struct drm_i915_gem_object_ops mock_region_obj_ops = {