提交 82ad6443 编写于 作者: C Chris Wilson

drm/i915/gtt: Rename i915_hw_ppgtt base member

In the near future, I want to subclass gen6_hw_ppgtt as it contains a
few specialised members and I wish to add more. To avoid the ugliness of
using ppgtt->base.base, rename the i915_hw_ppgtt base member
(i915_address_space) as vm, which is our common shorthand for an
i915_address_space local.
Signed-off-by: NChris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Matthew Auld <matthew.william.auld@gmail.com>
Reviewed-by: NMika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180605153758.18422-1-chris@chris-wilson.co.uk
上级 cd68e04c
...@@ -61,7 +61,7 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) ...@@ -61,7 +61,7 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
} }
mutex_lock(&dev_priv->drm.struct_mutex); mutex_lock(&dev_priv->drm.struct_mutex);
ret = i915_gem_gtt_insert(&dev_priv->ggtt.base, node, ret = i915_gem_gtt_insert(&dev_priv->ggtt.vm, node,
size, I915_GTT_PAGE_SIZE, size, I915_GTT_PAGE_SIZE,
I915_COLOR_UNEVICTABLE, I915_COLOR_UNEVICTABLE,
start, end, flags); start, end, flags);
......
...@@ -361,9 +361,9 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt); ...@@ -361,9 +361,9 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
#define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end) #define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end)
#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.gmadr.start) #define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.gmadr.start)
#define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.base.total) #define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.vm.total)
#define gvt_ggtt_sz(gvt) \ #define gvt_ggtt_sz(gvt) \
((gvt->dev_priv->ggtt.base.total >> PAGE_SHIFT) << 3) ((gvt->dev_priv->ggtt.vm.total >> PAGE_SHIFT) << 3)
#define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt)) #define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
#define gvt_aperture_gmadr_base(gvt) (0) #define gvt_aperture_gmadr_base(gvt) (0)
......
...@@ -328,7 +328,7 @@ static int per_file_stats(int id, void *ptr, void *data) ...@@ -328,7 +328,7 @@ static int per_file_stats(int id, void *ptr, void *data)
} else { } else {
struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm); struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
if (ppgtt->base.file != stats->file_priv) if (ppgtt->vm.file != stats->file_priv)
continue; continue;
} }
...@@ -508,7 +508,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data) ...@@ -508,7 +508,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
dpy_count, dpy_size); dpy_count, dpy_size);
seq_printf(m, "%llu [%pa] gtt total\n", seq_printf(m, "%llu [%pa] gtt total\n",
ggtt->base.total, &ggtt->mappable_end); ggtt->vm.total, &ggtt->mappable_end);
seq_printf(m, "Supported page sizes: %s\n", seq_printf(m, "Supported page sizes: %s\n",
stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes, stringify_page_sizes(INTEL_INFO(dev_priv)->page_sizes,
buf, sizeof(buf))); buf, sizeof(buf)));
......
...@@ -3213,7 +3213,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev, ...@@ -3213,7 +3213,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
static inline struct i915_hw_ppgtt * static inline struct i915_hw_ppgtt *
i915_vm_to_ppgtt(struct i915_address_space *vm) i915_vm_to_ppgtt(struct i915_address_space *vm)
{ {
return container_of(vm, struct i915_hw_ppgtt, base); return container_of(vm, struct i915_hw_ppgtt, vm);
} }
/* i915_gem_fence_reg.c */ /* i915_gem_fence_reg.c */
......
...@@ -65,7 +65,7 @@ insert_mappable_node(struct i915_ggtt *ggtt, ...@@ -65,7 +65,7 @@ insert_mappable_node(struct i915_ggtt *ggtt,
struct drm_mm_node *node, u32 size) struct drm_mm_node *node, u32 size)
{ {
memset(node, 0, sizeof(*node)); memset(node, 0, sizeof(*node));
return drm_mm_insert_node_in_range(&ggtt->base.mm, node, return drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
size, 0, I915_COLOR_UNEVICTABLE, size, 0, I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end, 0, ggtt->mappable_end,
DRM_MM_INSERT_LOW); DRM_MM_INSERT_LOW);
...@@ -249,17 +249,17 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, ...@@ -249,17 +249,17 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct i915_vma *vma; struct i915_vma *vma;
u64 pinned; u64 pinned;
pinned = ggtt->base.reserved; pinned = ggtt->vm.reserved;
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
list_for_each_entry(vma, &ggtt->base.active_list, vm_link) list_for_each_entry(vma, &ggtt->vm.active_list, vm_link)
if (i915_vma_is_pinned(vma)) if (i915_vma_is_pinned(vma))
pinned += vma->node.size; pinned += vma->node.size;
list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link) list_for_each_entry(vma, &ggtt->vm.inactive_list, vm_link)
if (i915_vma_is_pinned(vma)) if (i915_vma_is_pinned(vma))
pinned += vma->node.size; pinned += vma->node.size;
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
args->aper_size = ggtt->base.total; args->aper_size = ggtt->vm.total;
args->aper_available_size = args->aper_size - pinned; args->aper_available_size = args->aper_size - pinned;
return 0; return 0;
...@@ -1223,9 +1223,9 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj, ...@@ -1223,9 +1223,9 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
page_length = remain < page_length ? remain : page_length; page_length = remain < page_length ? remain : page_length;
if (node.allocated) { if (node.allocated) {
wmb(); wmb();
ggtt->base.insert_page(&ggtt->base, ggtt->vm.insert_page(&ggtt->vm,
i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT), i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
node.start, I915_CACHE_NONE, 0); node.start, I915_CACHE_NONE, 0);
wmb(); wmb();
} else { } else {
page_base += offset & PAGE_MASK; page_base += offset & PAGE_MASK;
...@@ -1246,8 +1246,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj, ...@@ -1246,8 +1246,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
out_unpin: out_unpin:
if (node.allocated) { if (node.allocated) {
wmb(); wmb();
ggtt->base.clear_range(&ggtt->base, ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
node.start, node.size);
remove_mappable_node(&node); remove_mappable_node(&node);
} else { } else {
i915_vma_unpin(vma); i915_vma_unpin(vma);
...@@ -1426,9 +1425,9 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, ...@@ -1426,9 +1425,9 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
page_length = remain < page_length ? remain : page_length; page_length = remain < page_length ? remain : page_length;
if (node.allocated) { if (node.allocated) {
wmb(); /* flush the write before we modify the GGTT */ wmb(); /* flush the write before we modify the GGTT */
ggtt->base.insert_page(&ggtt->base, ggtt->vm.insert_page(&ggtt->vm,
i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT), i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
node.start, I915_CACHE_NONE, 0); node.start, I915_CACHE_NONE, 0);
wmb(); /* flush modifications to the GGTT (insert_page) */ wmb(); /* flush modifications to the GGTT (insert_page) */
} else { } else {
page_base += offset & PAGE_MASK; page_base += offset & PAGE_MASK;
...@@ -1455,8 +1454,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, ...@@ -1455,8 +1454,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
out_unpin: out_unpin:
if (node.allocated) { if (node.allocated) {
wmb(); wmb();
ggtt->base.clear_range(&ggtt->base, ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
node.start, node.size);
remove_mappable_node(&node); remove_mappable_node(&node);
} else { } else {
i915_vma_unpin(vma); i915_vma_unpin(vma);
...@@ -4374,7 +4372,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, ...@@ -4374,7 +4372,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
u64 flags) u64 flags)
{ {
struct drm_i915_private *dev_priv = to_i915(obj->base.dev); struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_address_space *vm = &dev_priv->ggtt.base; struct i915_address_space *vm = &dev_priv->ggtt.vm;
struct i915_vma *vma; struct i915_vma *vma;
int ret; int ret;
......
...@@ -197,7 +197,7 @@ static void context_close(struct i915_gem_context *ctx) ...@@ -197,7 +197,7 @@ static void context_close(struct i915_gem_context *ctx)
*/ */
lut_close(ctx); lut_close(ctx);
if (ctx->ppgtt) if (ctx->ppgtt)
i915_ppgtt_close(&ctx->ppgtt->base); i915_ppgtt_close(&ctx->ppgtt->vm);
ctx->file_priv = ERR_PTR(-EBADF); ctx->file_priv = ERR_PTR(-EBADF);
i915_gem_context_put(ctx); i915_gem_context_put(ctx);
...@@ -249,7 +249,7 @@ static u32 default_desc_template(const struct drm_i915_private *i915, ...@@ -249,7 +249,7 @@ static u32 default_desc_template(const struct drm_i915_private *i915,
desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE; desc = GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE;
address_mode = INTEL_LEGACY_32B_CONTEXT; address_mode = INTEL_LEGACY_32B_CONTEXT;
if (ppgtt && i915_vm_is_48bit(&ppgtt->base)) if (ppgtt && i915_vm_is_48bit(&ppgtt->vm))
address_mode = INTEL_LEGACY_64B_CONTEXT; address_mode = INTEL_LEGACY_64B_CONTEXT;
desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT; desc |= address_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT;
...@@ -810,11 +810,11 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, ...@@ -810,11 +810,11 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
break; break;
case I915_CONTEXT_PARAM_GTT_SIZE: case I915_CONTEXT_PARAM_GTT_SIZE:
if (ctx->ppgtt) if (ctx->ppgtt)
args->value = ctx->ppgtt->base.total; args->value = ctx->ppgtt->vm.total;
else if (to_i915(dev)->mm.aliasing_ppgtt) else if (to_i915(dev)->mm.aliasing_ppgtt)
args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total; args->value = to_i915(dev)->mm.aliasing_ppgtt->vm.total;
else else
args->value = to_i915(dev)->ggtt.base.total; args->value = to_i915(dev)->ggtt.vm.total;
break; break;
case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
args->value = i915_gem_context_no_error_capture(ctx); args->value = i915_gem_context_no_error_capture(ctx);
......
...@@ -703,7 +703,7 @@ static int eb_select_context(struct i915_execbuffer *eb) ...@@ -703,7 +703,7 @@ static int eb_select_context(struct i915_execbuffer *eb)
return -ENOENT; return -ENOENT;
eb->ctx = ctx; eb->ctx = ctx;
eb->vm = ctx->ppgtt ? &ctx->ppgtt->base : &eb->i915->ggtt.base; eb->vm = ctx->ppgtt ? &ctx->ppgtt->vm : &eb->i915->ggtt.vm;
eb->context_flags = 0; eb->context_flags = 0;
if (ctx->flags & CONTEXT_NO_ZEROMAP) if (ctx->flags & CONTEXT_NO_ZEROMAP)
...@@ -943,9 +943,9 @@ static void reloc_cache_reset(struct reloc_cache *cache) ...@@ -943,9 +943,9 @@ static void reloc_cache_reset(struct reloc_cache *cache)
if (cache->node.allocated) { if (cache->node.allocated) {
struct i915_ggtt *ggtt = cache_to_ggtt(cache); struct i915_ggtt *ggtt = cache_to_ggtt(cache);
ggtt->base.clear_range(&ggtt->base, ggtt->vm.clear_range(&ggtt->vm,
cache->node.start, cache->node.start,
cache->node.size); cache->node.size);
drm_mm_remove_node(&cache->node); drm_mm_remove_node(&cache->node);
} else { } else {
i915_vma_unpin((struct i915_vma *)cache->node.mm); i915_vma_unpin((struct i915_vma *)cache->node.mm);
...@@ -1016,7 +1016,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj, ...@@ -1016,7 +1016,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
memset(&cache->node, 0, sizeof(cache->node)); memset(&cache->node, 0, sizeof(cache->node));
err = drm_mm_insert_node_in_range err = drm_mm_insert_node_in_range
(&ggtt->base.mm, &cache->node, (&ggtt->vm.mm, &cache->node,
PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE, PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end, 0, ggtt->mappable_end,
DRM_MM_INSERT_LOW); DRM_MM_INSERT_LOW);
...@@ -1037,9 +1037,9 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj, ...@@ -1037,9 +1037,9 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
offset = cache->node.start; offset = cache->node.start;
if (cache->node.allocated) { if (cache->node.allocated) {
wmb(); wmb();
ggtt->base.insert_page(&ggtt->base, ggtt->vm.insert_page(&ggtt->vm,
i915_gem_object_get_dma_address(obj, page), i915_gem_object_get_dma_address(obj, page),
offset, I915_CACHE_NONE, 0); offset, I915_CACHE_NONE, 0);
} else { } else {
offset += page << PAGE_SHIFT; offset += page << PAGE_SHIFT;
} }
......
此差异已折叠。
...@@ -65,7 +65,7 @@ typedef u64 gen8_pde_t; ...@@ -65,7 +65,7 @@ typedef u64 gen8_pde_t;
typedef u64 gen8_ppgtt_pdpe_t; typedef u64 gen8_ppgtt_pdpe_t;
typedef u64 gen8_ppgtt_pml4e_t; typedef u64 gen8_ppgtt_pml4e_t;
#define ggtt_total_entries(ggtt) ((ggtt)->base.total >> PAGE_SHIFT) #define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT)
/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */ /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
...@@ -367,7 +367,7 @@ i915_vm_has_scratch_64K(struct i915_address_space *vm) ...@@ -367,7 +367,7 @@ i915_vm_has_scratch_64K(struct i915_address_space *vm)
* the spec. * the spec.
*/ */
struct i915_ggtt { struct i915_ggtt {
struct i915_address_space base; struct i915_address_space vm;
struct io_mapping iomap; /* Mapping to our CPU mappable region */ struct io_mapping iomap; /* Mapping to our CPU mappable region */
struct resource gmadr; /* GMADR resource */ struct resource gmadr; /* GMADR resource */
...@@ -385,7 +385,7 @@ struct i915_ggtt { ...@@ -385,7 +385,7 @@ struct i915_ggtt {
}; };
struct i915_hw_ppgtt { struct i915_hw_ppgtt {
struct i915_address_space base; struct i915_address_space vm;
struct kref ref; struct kref ref;
struct drm_mm_node node; struct drm_mm_node node;
unsigned long pd_dirty_rings; unsigned long pd_dirty_rings;
...@@ -543,7 +543,7 @@ static inline struct i915_ggtt * ...@@ -543,7 +543,7 @@ static inline struct i915_ggtt *
i915_vm_to_ggtt(struct i915_address_space *vm) i915_vm_to_ggtt(struct i915_address_space *vm)
{ {
GEM_BUG_ON(!i915_is_ggtt(vm)); GEM_BUG_ON(!i915_is_ggtt(vm));
return container_of(vm, struct i915_ggtt, base); return container_of(vm, struct i915_ggtt, vm);
} }
#define INTEL_MAX_PPAT_ENTRIES 8 #define INTEL_MAX_PPAT_ENTRIES 8
......
...@@ -194,7 +194,7 @@ int i915_gem_render_state_emit(struct i915_request *rq) ...@@ -194,7 +194,7 @@ int i915_gem_render_state_emit(struct i915_request *rq)
if (IS_ERR(so.obj)) if (IS_ERR(so.obj))
return PTR_ERR(so.obj); return PTR_ERR(so.obj);
so.vma = i915_vma_instance(so.obj, &engine->i915->ggtt.base, NULL); so.vma = i915_vma_instance(so.obj, &engine->i915->ggtt.vm, NULL);
if (IS_ERR(so.vma)) { if (IS_ERR(so.vma)) {
err = PTR_ERR(so.vma); err = PTR_ERR(so.vma);
goto err_obj; goto err_obj;
......
...@@ -480,7 +480,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr ...@@ -480,7 +480,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
/* We also want to clear any cached iomaps as they wrap vmap */ /* We also want to clear any cached iomaps as they wrap vmap */
list_for_each_entry_safe(vma, next, list_for_each_entry_safe(vma, next,
&i915->ggtt.base.inactive_list, vm_link) { &i915->ggtt.vm.inactive_list, vm_link) {
unsigned long count = vma->node.size >> PAGE_SHIFT; unsigned long count = vma->node.size >> PAGE_SHIFT;
if (vma->iomap && i915_vma_unbind(vma) == 0) if (vma->iomap && i915_vma_unbind(vma) == 0)
freed_pages += count; freed_pages += count;
......
...@@ -642,7 +642,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv ...@@ -642,7 +642,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
if (ret) if (ret)
goto err; goto err;
vma = i915_vma_instance(obj, &ggtt->base, NULL); vma = i915_vma_instance(obj, &ggtt->vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
ret = PTR_ERR(vma); ret = PTR_ERR(vma);
goto err_pages; goto err_pages;
...@@ -653,7 +653,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv ...@@ -653,7 +653,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
* setting up the GTT space. The actual reservation will occur * setting up the GTT space. The actual reservation will occur
* later. * later.
*/ */
ret = i915_gem_gtt_reserve(&ggtt->base, &vma->node, ret = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
size, gtt_offset, obj->cache_level, size, gtt_offset, obj->cache_level,
0); 0);
if (ret) { if (ret) {
...@@ -666,7 +666,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv ...@@ -666,7 +666,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv
vma->pages = obj->mm.pages; vma->pages = obj->mm.pages;
vma->flags |= I915_VMA_GLOBAL_BIND; vma->flags |= I915_VMA_GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma); __i915_vma_set_map_and_fenceable(vma);
list_move_tail(&vma->vm_link, &ggtt->base.inactive_list); list_move_tail(&vma->vm_link, &ggtt->vm.inactive_list);
spin_lock(&dev_priv->mm.obj_lock); spin_lock(&dev_priv->mm.obj_lock);
list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list); list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
......
...@@ -973,8 +973,7 @@ i915_error_object_create(struct drm_i915_private *i915, ...@@ -973,8 +973,7 @@ i915_error_object_create(struct drm_i915_private *i915,
void __iomem *s; void __iomem *s;
int ret; int ret;
ggtt->base.insert_page(&ggtt->base, dma, slot, ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
I915_CACHE_NONE, 0);
s = io_mapping_map_atomic_wc(&ggtt->iomap, slot); s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
ret = compress_page(&compress, (void __force *)s, dst); ret = compress_page(&compress, (void __force *)s, dst);
...@@ -993,7 +992,7 @@ i915_error_object_create(struct drm_i915_private *i915, ...@@ -993,7 +992,7 @@ i915_error_object_create(struct drm_i915_private *i915,
out: out:
compress_fini(&compress, dst); compress_fini(&compress, dst);
ggtt->base.clear_range(&ggtt->base, slot, PAGE_SIZE); ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
return dst; return dst;
} }
...@@ -1466,7 +1465,7 @@ static void gem_record_rings(struct i915_gpu_state *error) ...@@ -1466,7 +1465,7 @@ static void gem_record_rings(struct i915_gpu_state *error)
struct i915_gem_context *ctx = request->gem_context; struct i915_gem_context *ctx = request->gem_context;
struct intel_ring *ring; struct intel_ring *ring;
ee->vm = ctx->ppgtt ? &ctx->ppgtt->base : &ggtt->base; ee->vm = ctx->ppgtt ? &ctx->ppgtt->vm : &ggtt->vm;
record_context(&ee->context, ctx); record_context(&ee->context, ctx);
...@@ -1564,7 +1563,7 @@ static void capture_active_buffers(struct i915_gpu_state *error) ...@@ -1564,7 +1563,7 @@ static void capture_active_buffers(struct i915_gpu_state *error)
static void capture_pinned_buffers(struct i915_gpu_state *error) static void capture_pinned_buffers(struct i915_gpu_state *error)
{ {
struct i915_address_space *vm = &error->i915->ggtt.base; struct i915_address_space *vm = &error->i915->ggtt.vm;
struct drm_i915_error_buffer *bo; struct drm_i915_error_buffer *bo;
struct i915_vma *vma; struct i915_vma *vma;
int count_inactive, count_active; int count_inactive, count_active;
......
...@@ -956,7 +956,7 @@ DECLARE_EVENT_CLASS(i915_context, ...@@ -956,7 +956,7 @@ DECLARE_EVENT_CLASS(i915_context,
__entry->dev = ctx->i915->drm.primary->index; __entry->dev = ctx->i915->drm.primary->index;
__entry->ctx = ctx; __entry->ctx = ctx;
__entry->hw_id = ctx->hw_id; __entry->hw_id = ctx->hw_id;
__entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL; __entry->vm = ctx->ppgtt ? &ctx->ppgtt->vm : NULL;
), ),
TP_printk("dev=%u, ctx=%p, ctx_vm=%p, hw_id=%u", TP_printk("dev=%u, ctx=%p, ctx_vm=%p, hw_id=%u",
...@@ -997,7 +997,7 @@ TRACE_EVENT(switch_mm, ...@@ -997,7 +997,7 @@ TRACE_EVENT(switch_mm,
__entry->class = engine->uabi_class; __entry->class = engine->uabi_class;
__entry->instance = engine->instance; __entry->instance = engine->instance;
__entry->to = to; __entry->to = to;
__entry->vm = to->ppgtt? &to->ppgtt->base : NULL; __entry->vm = to->ppgtt ? &to->ppgtt->vm : NULL;
__entry->dev = engine->i915->drm.primary->index; __entry->dev = engine->i915->drm.primary->index;
), ),
......
...@@ -105,7 +105,7 @@ static void vgt_deballoon_space(struct i915_ggtt *ggtt, ...@@ -105,7 +105,7 @@ static void vgt_deballoon_space(struct i915_ggtt *ggtt,
node->start + node->size, node->start + node->size,
node->size / 1024); node->size / 1024);
ggtt->base.reserved -= node->size; ggtt->vm.reserved -= node->size;
drm_mm_remove_node(node); drm_mm_remove_node(node);
} }
...@@ -141,11 +141,11 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt, ...@@ -141,11 +141,11 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt,
DRM_INFO("balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n", DRM_INFO("balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n",
start, end, size / 1024); start, end, size / 1024);
ret = i915_gem_gtt_reserve(&ggtt->base, node, ret = i915_gem_gtt_reserve(&ggtt->vm, node,
size, start, I915_COLOR_UNEVICTABLE, size, start, I915_COLOR_UNEVICTABLE,
0); 0);
if (!ret) if (!ret)
ggtt->base.reserved += size; ggtt->vm.reserved += size;
return ret; return ret;
} }
...@@ -197,7 +197,7 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt, ...@@ -197,7 +197,7 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt,
int intel_vgt_balloon(struct drm_i915_private *dev_priv) int intel_vgt_balloon(struct drm_i915_private *dev_priv)
{ {
struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_ggtt *ggtt = &dev_priv->ggtt;
unsigned long ggtt_end = ggtt->base.total; unsigned long ggtt_end = ggtt->vm.total;
unsigned long mappable_base, mappable_size, mappable_end; unsigned long mappable_base, mappable_size, mappable_end;
unsigned long unmappable_base, unmappable_size, unmappable_end; unsigned long unmappable_base, unmappable_size, unmappable_end;
......
...@@ -85,7 +85,7 @@ vma_create(struct drm_i915_gem_object *obj, ...@@ -85,7 +85,7 @@ vma_create(struct drm_i915_gem_object *obj,
int i; int i;
/* The aliasing_ppgtt should never be used directly! */ /* The aliasing_ppgtt should never be used directly! */
GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base); GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL); vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL);
if (vma == NULL) if (vma == NULL)
......
...@@ -515,7 +515,7 @@ int intel_engine_create_scratch(struct intel_engine_cs *engine, int size) ...@@ -515,7 +515,7 @@ int intel_engine_create_scratch(struct intel_engine_cs *engine, int size)
return PTR_ERR(obj); return PTR_ERR(obj);
} }
vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL); vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
ret = PTR_ERR(vma); ret = PTR_ERR(vma);
goto err_unref; goto err_unref;
...@@ -585,7 +585,7 @@ static int init_status_page(struct intel_engine_cs *engine) ...@@ -585,7 +585,7 @@ static int init_status_page(struct intel_engine_cs *engine)
if (ret) if (ret)
goto err; goto err;
vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL); vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
ret = PTR_ERR(vma); ret = PTR_ERR(vma);
goto err; goto err;
......
...@@ -570,7 +570,7 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size) ...@@ -570,7 +570,7 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
if (IS_ERR(obj)) if (IS_ERR(obj))
return ERR_CAST(obj); return ERR_CAST(obj);
vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL); vma = i915_vma_instance(obj, &dev_priv->ggtt.vm, NULL);
if (IS_ERR(vma)) if (IS_ERR(vma))
goto err; goto err;
......
...@@ -536,7 +536,7 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq) ...@@ -536,7 +536,7 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
*/ */
static void flush_ggtt_writes(struct i915_vma *vma) static void flush_ggtt_writes(struct i915_vma *vma)
{ {
struct drm_i915_private *dev_priv = to_i915(vma->obj->base.dev); struct drm_i915_private *dev_priv = vma->vm->i915;
if (i915_vma_is_map_and_fenceable(vma)) if (i915_vma_is_map_and_fenceable(vma))
POSTING_READ_FW(GUC_STATUS); POSTING_READ_FW(GUC_STATUS);
......
...@@ -431,7 +431,7 @@ static u64 execlists_update_context(struct i915_request *rq) ...@@ -431,7 +431,7 @@ static u64 execlists_update_context(struct i915_request *rq)
* PML4 is allocated during ppgtt init, so this is not needed * PML4 is allocated during ppgtt init, so this is not needed
* in 48-bit mode. * in 48-bit mode.
*/ */
if (ppgtt && !i915_vm_is_48bit(&ppgtt->base)) if (ppgtt && !i915_vm_is_48bit(&ppgtt->vm))
execlists_update_context_pdps(ppgtt, reg_state); execlists_update_context_pdps(ppgtt, reg_state);
return ce->lrc_desc; return ce->lrc_desc;
...@@ -1672,7 +1672,7 @@ static int lrc_setup_wa_ctx(struct intel_engine_cs *engine) ...@@ -1672,7 +1672,7 @@ static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
if (IS_ERR(obj)) if (IS_ERR(obj))
return PTR_ERR(obj); return PTR_ERR(obj);
vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL); vma = i915_vma_instance(obj, &engine->i915->ggtt.vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
err = PTR_ERR(vma); err = PTR_ERR(vma);
goto err; goto err;
...@@ -2070,7 +2070,7 @@ static int gen8_emit_bb_start(struct i915_request *rq, ...@@ -2070,7 +2070,7 @@ static int gen8_emit_bb_start(struct i915_request *rq,
* not needed in 48-bit.*/ * not needed in 48-bit.*/
if (rq->gem_context->ppgtt && if (rq->gem_context->ppgtt &&
(intel_engine_flag(rq->engine) & rq->gem_context->ppgtt->pd_dirty_rings) && (intel_engine_flag(rq->engine) & rq->gem_context->ppgtt->pd_dirty_rings) &&
!i915_vm_is_48bit(&rq->gem_context->ppgtt->base) && !i915_vm_is_48bit(&rq->gem_context->ppgtt->vm) &&
!intel_vgpu_active(rq->i915)) { !intel_vgpu_active(rq->i915)) {
ret = intel_logical_ring_emit_pdps(rq); ret = intel_logical_ring_emit_pdps(rq);
if (ret) if (ret)
...@@ -2668,7 +2668,7 @@ static void execlists_init_reg_state(u32 *regs, ...@@ -2668,7 +2668,7 @@ static void execlists_init_reg_state(u32 *regs,
CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), 0); CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), 0);
CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 0); CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 0);
if (ppgtt && i915_vm_is_48bit(&ppgtt->base)) { if (ppgtt && i915_vm_is_48bit(&ppgtt->vm)) {
/* 64b PPGTT (48bit canonical) /* 64b PPGTT (48bit canonical)
* PDP0_DESCRIPTOR contains the base address to PML4 and * PDP0_DESCRIPTOR contains the base address to PML4 and
* other PDP Descriptors are ignored. * other PDP Descriptors are ignored.
...@@ -2774,7 +2774,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, ...@@ -2774,7 +2774,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
goto error_deref_obj; goto error_deref_obj;
} }
vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.base, NULL); vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
ret = PTR_ERR(vma); ret = PTR_ERR(vma);
goto error_deref_obj; goto error_deref_obj;
......
...@@ -1121,7 +1121,7 @@ intel_ring_create_vma(struct drm_i915_private *dev_priv, int size) ...@@ -1121,7 +1121,7 @@ intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
/* mark ring buffers as read-only from GPU side by default */ /* mark ring buffers as read-only from GPU side by default */
obj->gt_ro = 1; obj->gt_ro = 1;
vma = i915_vma_instance(obj, &dev_priv->ggtt.base, NULL); vma = i915_vma_instance(obj, &dev_priv->ggtt.vm, NULL);
if (IS_ERR(vma)) if (IS_ERR(vma))
goto err; goto err;
...@@ -1300,7 +1300,7 @@ alloc_context_vma(struct intel_engine_cs *engine) ...@@ -1300,7 +1300,7 @@ alloc_context_vma(struct intel_engine_cs *engine)
i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC); i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
} }
vma = i915_vma_instance(obj, &i915->ggtt.base, NULL); vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
err = PTR_ERR(vma); err = PTR_ERR(vma);
goto err_obj; goto err_obj;
......
...@@ -338,7 +338,7 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single) ...@@ -338,7 +338,7 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single)
static int igt_check_page_sizes(struct i915_vma *vma) static int igt_check_page_sizes(struct i915_vma *vma)
{ {
struct drm_i915_private *i915 = to_i915(vma->obj->base.dev); struct drm_i915_private *i915 = vma->vm->i915;
unsigned int supported = INTEL_INFO(i915)->page_sizes; unsigned int supported = INTEL_INFO(i915)->page_sizes;
struct drm_i915_gem_object *obj = vma->obj; struct drm_i915_gem_object *obj = vma->obj;
int err = 0; int err = 0;
...@@ -379,7 +379,7 @@ static int igt_check_page_sizes(struct i915_vma *vma) ...@@ -379,7 +379,7 @@ static int igt_check_page_sizes(struct i915_vma *vma)
static int igt_mock_exhaust_device_supported_pages(void *arg) static int igt_mock_exhaust_device_supported_pages(void *arg)
{ {
struct i915_hw_ppgtt *ppgtt = arg; struct i915_hw_ppgtt *ppgtt = arg;
struct drm_i915_private *i915 = ppgtt->base.i915; struct drm_i915_private *i915 = ppgtt->vm.i915;
unsigned int saved_mask = INTEL_INFO(i915)->page_sizes; unsigned int saved_mask = INTEL_INFO(i915)->page_sizes;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct i915_vma *vma; struct i915_vma *vma;
...@@ -415,7 +415,7 @@ static int igt_mock_exhaust_device_supported_pages(void *arg) ...@@ -415,7 +415,7 @@ static int igt_mock_exhaust_device_supported_pages(void *arg)
goto out_put; goto out_put;
} }
vma = i915_vma_instance(obj, &ppgtt->base, NULL); vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
err = PTR_ERR(vma); err = PTR_ERR(vma);
goto out_put; goto out_put;
...@@ -458,7 +458,7 @@ static int igt_mock_exhaust_device_supported_pages(void *arg) ...@@ -458,7 +458,7 @@ static int igt_mock_exhaust_device_supported_pages(void *arg)
static int igt_mock_ppgtt_misaligned_dma(void *arg) static int igt_mock_ppgtt_misaligned_dma(void *arg)
{ {
struct i915_hw_ppgtt *ppgtt = arg; struct i915_hw_ppgtt *ppgtt = arg;
struct drm_i915_private *i915 = ppgtt->base.i915; struct drm_i915_private *i915 = ppgtt->vm.i915;
unsigned long supported = INTEL_INFO(i915)->page_sizes; unsigned long supported = INTEL_INFO(i915)->page_sizes;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
int bit; int bit;
...@@ -500,7 +500,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg) ...@@ -500,7 +500,7 @@ static int igt_mock_ppgtt_misaligned_dma(void *arg)
/* Force the page size for this object */ /* Force the page size for this object */
obj->mm.page_sizes.sg = page_size; obj->mm.page_sizes.sg = page_size;
vma = i915_vma_instance(obj, &ppgtt->base, NULL); vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
err = PTR_ERR(vma); err = PTR_ERR(vma);
goto out_unpin; goto out_unpin;
...@@ -591,7 +591,7 @@ static void close_object_list(struct list_head *objects, ...@@ -591,7 +591,7 @@ static void close_object_list(struct list_head *objects,
list_for_each_entry_safe(obj, on, objects, st_link) { list_for_each_entry_safe(obj, on, objects, st_link) {
struct i915_vma *vma; struct i915_vma *vma;
vma = i915_vma_instance(obj, &ppgtt->base, NULL); vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
if (!IS_ERR(vma)) if (!IS_ERR(vma))
i915_vma_close(vma); i915_vma_close(vma);
...@@ -604,8 +604,8 @@ static void close_object_list(struct list_head *objects, ...@@ -604,8 +604,8 @@ static void close_object_list(struct list_head *objects,
static int igt_mock_ppgtt_huge_fill(void *arg) static int igt_mock_ppgtt_huge_fill(void *arg)
{ {
struct i915_hw_ppgtt *ppgtt = arg; struct i915_hw_ppgtt *ppgtt = arg;
struct drm_i915_private *i915 = ppgtt->base.i915; struct drm_i915_private *i915 = ppgtt->vm.i915;
unsigned long max_pages = ppgtt->base.total >> PAGE_SHIFT; unsigned long max_pages = ppgtt->vm.total >> PAGE_SHIFT;
unsigned long page_num; unsigned long page_num;
bool single = false; bool single = false;
LIST_HEAD(objects); LIST_HEAD(objects);
...@@ -641,7 +641,7 @@ static int igt_mock_ppgtt_huge_fill(void *arg) ...@@ -641,7 +641,7 @@ static int igt_mock_ppgtt_huge_fill(void *arg)
list_add(&obj->st_link, &objects); list_add(&obj->st_link, &objects);
vma = i915_vma_instance(obj, &ppgtt->base, NULL); vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
err = PTR_ERR(vma); err = PTR_ERR(vma);
break; break;
...@@ -725,7 +725,7 @@ static int igt_mock_ppgtt_huge_fill(void *arg) ...@@ -725,7 +725,7 @@ static int igt_mock_ppgtt_huge_fill(void *arg)
static int igt_mock_ppgtt_64K(void *arg) static int igt_mock_ppgtt_64K(void *arg)
{ {
struct i915_hw_ppgtt *ppgtt = arg; struct i915_hw_ppgtt *ppgtt = arg;
struct drm_i915_private *i915 = ppgtt->base.i915; struct drm_i915_private *i915 = ppgtt->vm.i915;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
const struct object_info { const struct object_info {
unsigned int size; unsigned int size;
...@@ -819,7 +819,7 @@ static int igt_mock_ppgtt_64K(void *arg) ...@@ -819,7 +819,7 @@ static int igt_mock_ppgtt_64K(void *arg)
*/ */
obj->mm.page_sizes.sg &= ~I915_GTT_PAGE_SIZE_2M; obj->mm.page_sizes.sg &= ~I915_GTT_PAGE_SIZE_2M;
vma = i915_vma_instance(obj, &ppgtt->base, NULL); vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
err = PTR_ERR(vma); err = PTR_ERR(vma);
goto out_object_unpin; goto out_object_unpin;
...@@ -887,8 +887,8 @@ static int igt_mock_ppgtt_64K(void *arg) ...@@ -887,8 +887,8 @@ static int igt_mock_ppgtt_64K(void *arg)
static struct i915_vma * static struct i915_vma *
gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val) gpu_write_dw(struct i915_vma *vma, u64 offset, u32 val)
{ {
struct drm_i915_private *i915 = to_i915(vma->obj->base.dev); struct drm_i915_private *i915 = vma->vm->i915;
const int gen = INTEL_GEN(vma->vm->i915); const int gen = INTEL_GEN(i915);
unsigned int count = vma->size >> PAGE_SHIFT; unsigned int count = vma->size >> PAGE_SHIFT;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct i915_vma *batch; struct i915_vma *batch;
...@@ -1047,7 +1047,8 @@ static int __igt_write_huge(struct i915_gem_context *ctx, ...@@ -1047,7 +1047,8 @@ static int __igt_write_huge(struct i915_gem_context *ctx,
u32 dword, u32 val) u32 dword, u32 val)
{ {
struct drm_i915_private *i915 = to_i915(obj->base.dev); struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base; struct i915_address_space *vm =
ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
unsigned int flags = PIN_USER | PIN_OFFSET_FIXED; unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
struct i915_vma *vma; struct i915_vma *vma;
int err; int err;
...@@ -1100,7 +1101,8 @@ static int igt_write_huge(struct i915_gem_context *ctx, ...@@ -1100,7 +1101,8 @@ static int igt_write_huge(struct i915_gem_context *ctx,
struct drm_i915_gem_object *obj) struct drm_i915_gem_object *obj)
{ {
struct drm_i915_private *i915 = to_i915(obj->base.dev); struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base; struct i915_address_space *vm =
ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
static struct intel_engine_cs *engines[I915_NUM_ENGINES]; static struct intel_engine_cs *engines[I915_NUM_ENGINES];
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
I915_RND_STATE(prng); I915_RND_STATE(prng);
...@@ -1439,7 +1441,7 @@ static int igt_ppgtt_pin_update(void *arg) ...@@ -1439,7 +1441,7 @@ static int igt_ppgtt_pin_update(void *arg)
if (IS_ERR(obj)) if (IS_ERR(obj))
return PTR_ERR(obj); return PTR_ERR(obj);
vma = i915_vma_instance(obj, &ppgtt->base, NULL); vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
err = PTR_ERR(vma); err = PTR_ERR(vma);
goto out_put; goto out_put;
...@@ -1493,7 +1495,7 @@ static int igt_ppgtt_pin_update(void *arg) ...@@ -1493,7 +1495,7 @@ static int igt_ppgtt_pin_update(void *arg)
if (IS_ERR(obj)) if (IS_ERR(obj))
return PTR_ERR(obj); return PTR_ERR(obj);
vma = i915_vma_instance(obj, &ppgtt->base, NULL); vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
err = PTR_ERR(vma); err = PTR_ERR(vma);
goto out_put; goto out_put;
...@@ -1531,7 +1533,8 @@ static int igt_tmpfs_fallback(void *arg) ...@@ -1531,7 +1533,8 @@ static int igt_tmpfs_fallback(void *arg)
struct i915_gem_context *ctx = arg; struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915; struct drm_i915_private *i915 = ctx->i915;
struct vfsmount *gemfs = i915->mm.gemfs; struct vfsmount *gemfs = i915->mm.gemfs;
struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base; struct i915_address_space *vm =
ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct i915_vma *vma; struct i915_vma *vma;
u32 *vaddr; u32 *vaddr;
...@@ -1587,7 +1590,8 @@ static int igt_shrink_thp(void *arg) ...@@ -1587,7 +1590,8 @@ static int igt_shrink_thp(void *arg)
{ {
struct i915_gem_context *ctx = arg; struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915; struct drm_i915_private *i915 = ctx->i915;
struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base; struct i915_address_space *vm =
ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct i915_vma *vma; struct i915_vma *vma;
unsigned int flags = PIN_USER; unsigned int flags = PIN_USER;
...@@ -1696,14 +1700,14 @@ int i915_gem_huge_page_mock_selftests(void) ...@@ -1696,14 +1700,14 @@ int i915_gem_huge_page_mock_selftests(void)
goto out_unlock; goto out_unlock;
} }
if (!i915_vm_is_48bit(&ppgtt->base)) { if (!i915_vm_is_48bit(&ppgtt->vm)) {
pr_err("failed to create 48b PPGTT\n"); pr_err("failed to create 48b PPGTT\n");
err = -EINVAL; err = -EINVAL;
goto out_close; goto out_close;
} }
/* If we were ever hit this then it's time to mock the 64K scratch */ /* If we were ever hit this then it's time to mock the 64K scratch */
if (!i915_vm_has_scratch_64K(&ppgtt->base)) { if (!i915_vm_has_scratch_64K(&ppgtt->vm)) {
pr_err("PPGTT missing 64K scratch page\n"); pr_err("PPGTT missing 64K scratch page\n");
err = -EINVAL; err = -EINVAL;
goto out_close; goto out_close;
...@@ -1712,7 +1716,7 @@ int i915_gem_huge_page_mock_selftests(void) ...@@ -1712,7 +1716,7 @@ int i915_gem_huge_page_mock_selftests(void)
err = i915_subtests(tests, ppgtt); err = i915_subtests(tests, ppgtt);
out_close: out_close:
i915_ppgtt_close(&ppgtt->base); i915_ppgtt_close(&ppgtt->vm);
i915_ppgtt_put(ppgtt); i915_ppgtt_put(ppgtt);
out_unlock: out_unlock:
...@@ -1758,7 +1762,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv) ...@@ -1758,7 +1762,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv)
} }
if (ctx->ppgtt) if (ctx->ppgtt)
ctx->ppgtt->base.scrub_64K = true; ctx->ppgtt->vm.scrub_64K = true;
err = i915_subtests(tests, ctx); err = i915_subtests(tests, ctx);
......
...@@ -115,7 +115,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj, ...@@ -115,7 +115,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
{ {
struct drm_i915_private *i915 = to_i915(obj->base.dev); struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct i915_address_space *vm = struct i915_address_space *vm =
ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base; ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
struct i915_request *rq; struct i915_request *rq;
struct i915_vma *vma; struct i915_vma *vma;
struct i915_vma *batch; struct i915_vma *batch;
...@@ -290,7 +290,7 @@ create_test_object(struct i915_gem_context *ctx, ...@@ -290,7 +290,7 @@ create_test_object(struct i915_gem_context *ctx,
{ {
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct i915_address_space *vm = struct i915_address_space *vm =
ctx->ppgtt ? &ctx->ppgtt->base : &ctx->i915->ggtt.base; ctx->ppgtt ? &ctx->ppgtt->vm : &ctx->i915->ggtt.vm;
u64 size; u64 size;
int err; int err;
...@@ -557,7 +557,7 @@ static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915) ...@@ -557,7 +557,7 @@ static int fake_aliasing_ppgtt_enable(struct drm_i915_private *i915)
list_for_each_entry(obj, &i915->mm.bound_list, mm.link) { list_for_each_entry(obj, &i915->mm.bound_list, mm.link) {
struct i915_vma *vma; struct i915_vma *vma;
vma = i915_vma_instance(obj, &i915->ggtt.base, NULL); vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) if (IS_ERR(vma))
continue; continue;
......
...@@ -35,7 +35,7 @@ static int populate_ggtt(struct drm_i915_private *i915) ...@@ -35,7 +35,7 @@ static int populate_ggtt(struct drm_i915_private *i915)
u64 size; u64 size;
for (size = 0; for (size = 0;
size + I915_GTT_PAGE_SIZE <= i915->ggtt.base.total; size + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
size += I915_GTT_PAGE_SIZE) { size += I915_GTT_PAGE_SIZE) {
struct i915_vma *vma; struct i915_vma *vma;
...@@ -57,7 +57,7 @@ static int populate_ggtt(struct drm_i915_private *i915) ...@@ -57,7 +57,7 @@ static int populate_ggtt(struct drm_i915_private *i915)
return -EINVAL; return -EINVAL;
} }
if (list_empty(&i915->ggtt.base.inactive_list)) { if (list_empty(&i915->ggtt.vm.inactive_list)) {
pr_err("No objects on the GGTT inactive list!\n"); pr_err("No objects on the GGTT inactive list!\n");
return -EINVAL; return -EINVAL;
} }
...@@ -69,7 +69,7 @@ static void unpin_ggtt(struct drm_i915_private *i915) ...@@ -69,7 +69,7 @@ static void unpin_ggtt(struct drm_i915_private *i915)
{ {
struct i915_vma *vma; struct i915_vma *vma;
list_for_each_entry(vma, &i915->ggtt.base.inactive_list, vm_link) list_for_each_entry(vma, &i915->ggtt.vm.inactive_list, vm_link)
i915_vma_unpin(vma); i915_vma_unpin(vma);
} }
...@@ -103,7 +103,7 @@ static int igt_evict_something(void *arg) ...@@ -103,7 +103,7 @@ static int igt_evict_something(void *arg)
goto cleanup; goto cleanup;
/* Everything is pinned, nothing should happen */ /* Everything is pinned, nothing should happen */
err = i915_gem_evict_something(&ggtt->base, err = i915_gem_evict_something(&ggtt->vm,
I915_GTT_PAGE_SIZE, 0, 0, I915_GTT_PAGE_SIZE, 0, 0,
0, U64_MAX, 0, U64_MAX,
0); 0);
...@@ -116,7 +116,7 @@ static int igt_evict_something(void *arg) ...@@ -116,7 +116,7 @@ static int igt_evict_something(void *arg)
unpin_ggtt(i915); unpin_ggtt(i915);
/* Everything is unpinned, we should be able to evict something */ /* Everything is unpinned, we should be able to evict something */
err = i915_gem_evict_something(&ggtt->base, err = i915_gem_evict_something(&ggtt->vm,
I915_GTT_PAGE_SIZE, 0, 0, I915_GTT_PAGE_SIZE, 0, 0,
0, U64_MAX, 0, U64_MAX,
0); 0);
...@@ -181,7 +181,7 @@ static int igt_evict_for_vma(void *arg) ...@@ -181,7 +181,7 @@ static int igt_evict_for_vma(void *arg)
goto cleanup; goto cleanup;
/* Everything is pinned, nothing should happen */ /* Everything is pinned, nothing should happen */
err = i915_gem_evict_for_node(&ggtt->base, &target, 0); err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
if (err != -ENOSPC) { if (err != -ENOSPC) {
pr_err("i915_gem_evict_for_node on a full GGTT returned err=%d\n", pr_err("i915_gem_evict_for_node on a full GGTT returned err=%d\n",
err); err);
...@@ -191,7 +191,7 @@ static int igt_evict_for_vma(void *arg) ...@@ -191,7 +191,7 @@ static int igt_evict_for_vma(void *arg)
unpin_ggtt(i915); unpin_ggtt(i915);
/* Everything is unpinned, we should be able to evict the node */ /* Everything is unpinned, we should be able to evict the node */
err = i915_gem_evict_for_node(&ggtt->base, &target, 0); err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
if (err) { if (err) {
pr_err("i915_gem_evict_for_node returned err=%d\n", pr_err("i915_gem_evict_for_node returned err=%d\n",
err); err);
...@@ -229,7 +229,7 @@ static int igt_evict_for_cache_color(void *arg) ...@@ -229,7 +229,7 @@ static int igt_evict_for_cache_color(void *arg)
* i915_gtt_color_adjust throughout our driver, so using a mock color * i915_gtt_color_adjust throughout our driver, so using a mock color
* adjust will work just fine for our purposes. * adjust will work just fine for our purposes.
*/ */
ggtt->base.mm.color_adjust = mock_color_adjust; ggtt->vm.mm.color_adjust = mock_color_adjust;
obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE); obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
if (IS_ERR(obj)) { if (IS_ERR(obj)) {
...@@ -265,7 +265,7 @@ static int igt_evict_for_cache_color(void *arg) ...@@ -265,7 +265,7 @@ static int igt_evict_for_cache_color(void *arg)
i915_vma_unpin(vma); i915_vma_unpin(vma);
/* Remove just the second vma */ /* Remove just the second vma */
err = i915_gem_evict_for_node(&ggtt->base, &target, 0); err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
if (err) { if (err) {
pr_err("[0]i915_gem_evict_for_node returned err=%d\n", err); pr_err("[0]i915_gem_evict_for_node returned err=%d\n", err);
goto cleanup; goto cleanup;
...@@ -276,7 +276,7 @@ static int igt_evict_for_cache_color(void *arg) ...@@ -276,7 +276,7 @@ static int igt_evict_for_cache_color(void *arg)
*/ */
target.color = I915_CACHE_L3_LLC; target.color = I915_CACHE_L3_LLC;
err = i915_gem_evict_for_node(&ggtt->base, &target, 0); err = i915_gem_evict_for_node(&ggtt->vm, &target, 0);
if (!err) { if (!err) {
pr_err("[1]i915_gem_evict_for_node returned err=%d\n", err); pr_err("[1]i915_gem_evict_for_node returned err=%d\n", err);
err = -EINVAL; err = -EINVAL;
...@@ -288,7 +288,7 @@ static int igt_evict_for_cache_color(void *arg) ...@@ -288,7 +288,7 @@ static int igt_evict_for_cache_color(void *arg)
cleanup: cleanup:
unpin_ggtt(i915); unpin_ggtt(i915);
cleanup_objects(i915); cleanup_objects(i915);
ggtt->base.mm.color_adjust = NULL; ggtt->vm.mm.color_adjust = NULL;
return err; return err;
} }
...@@ -305,7 +305,7 @@ static int igt_evict_vm(void *arg) ...@@ -305,7 +305,7 @@ static int igt_evict_vm(void *arg)
goto cleanup; goto cleanup;
/* Everything is pinned, nothing should happen */ /* Everything is pinned, nothing should happen */
err = i915_gem_evict_vm(&ggtt->base); err = i915_gem_evict_vm(&ggtt->vm);
if (err) { if (err) {
pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n", pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
err); err);
...@@ -314,7 +314,7 @@ static int igt_evict_vm(void *arg) ...@@ -314,7 +314,7 @@ static int igt_evict_vm(void *arg)
unpin_ggtt(i915); unpin_ggtt(i915);
err = i915_gem_evict_vm(&ggtt->base); err = i915_gem_evict_vm(&ggtt->vm);
if (err) { if (err) {
pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n", pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n",
err); err);
...@@ -359,9 +359,9 @@ static int igt_evict_contexts(void *arg) ...@@ -359,9 +359,9 @@ static int igt_evict_contexts(void *arg)
/* Reserve a block so that we know we have enough to fit a few rq */ /* Reserve a block so that we know we have enough to fit a few rq */
memset(&hole, 0, sizeof(hole)); memset(&hole, 0, sizeof(hole));
err = i915_gem_gtt_insert(&i915->ggtt.base, &hole, err = i915_gem_gtt_insert(&i915->ggtt.vm, &hole,
PRETEND_GGTT_SIZE, 0, I915_COLOR_UNEVICTABLE, PRETEND_GGTT_SIZE, 0, I915_COLOR_UNEVICTABLE,
0, i915->ggtt.base.total, 0, i915->ggtt.vm.total,
PIN_NOEVICT); PIN_NOEVICT);
if (err) if (err)
goto out_locked; goto out_locked;
...@@ -377,9 +377,9 @@ static int igt_evict_contexts(void *arg) ...@@ -377,9 +377,9 @@ static int igt_evict_contexts(void *arg)
goto out_locked; goto out_locked;
} }
if (i915_gem_gtt_insert(&i915->ggtt.base, &r->node, if (i915_gem_gtt_insert(&i915->ggtt.vm, &r->node,
1ul << 20, 0, I915_COLOR_UNEVICTABLE, 1ul << 20, 0, I915_COLOR_UNEVICTABLE,
0, i915->ggtt.base.total, 0, i915->ggtt.vm.total,
PIN_NOEVICT)) { PIN_NOEVICT)) {
kfree(r); kfree(r);
break; break;
......
...@@ -151,14 +151,14 @@ static int igt_ppgtt_alloc(void *arg) ...@@ -151,14 +151,14 @@ static int igt_ppgtt_alloc(void *arg)
if (err) if (err)
goto err_ppgtt; goto err_ppgtt;
if (!ppgtt->base.allocate_va_range) if (!ppgtt->vm.allocate_va_range)
goto err_ppgtt_cleanup; goto err_ppgtt_cleanup;
/* Check we can allocate the entire range */ /* Check we can allocate the entire range */
for (size = 4096; for (size = 4096;
size <= ppgtt->base.total; size <= ppgtt->vm.total;
size <<= 2) { size <<= 2) {
err = ppgtt->base.allocate_va_range(&ppgtt->base, 0, size); err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, size);
if (err) { if (err) {
if (err == -ENOMEM) { if (err == -ENOMEM) {
pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n", pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n",
...@@ -168,15 +168,15 @@ static int igt_ppgtt_alloc(void *arg) ...@@ -168,15 +168,15 @@ static int igt_ppgtt_alloc(void *arg)
goto err_ppgtt_cleanup; goto err_ppgtt_cleanup;
} }
ppgtt->base.clear_range(&ppgtt->base, 0, size); ppgtt->vm.clear_range(&ppgtt->vm, 0, size);
} }
/* Check we can incrementally allocate the entire range */ /* Check we can incrementally allocate the entire range */
for (last = 0, size = 4096; for (last = 0, size = 4096;
size <= ppgtt->base.total; size <= ppgtt->vm.total;
last = size, size <<= 2) { last = size, size <<= 2) {
err = ppgtt->base.allocate_va_range(&ppgtt->base, err = ppgtt->vm.allocate_va_range(&ppgtt->vm,
last, size - last); last, size - last);
if (err) { if (err) {
if (err == -ENOMEM) { if (err == -ENOMEM) {
pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n", pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n",
...@@ -188,7 +188,7 @@ static int igt_ppgtt_alloc(void *arg) ...@@ -188,7 +188,7 @@ static int igt_ppgtt_alloc(void *arg)
} }
err_ppgtt_cleanup: err_ppgtt_cleanup:
ppgtt->base.cleanup(&ppgtt->base); ppgtt->vm.cleanup(&ppgtt->vm);
err_ppgtt: err_ppgtt:
mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&dev_priv->drm.struct_mutex);
kfree(ppgtt); kfree(ppgtt);
...@@ -987,12 +987,12 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv, ...@@ -987,12 +987,12 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
err = PTR_ERR(ppgtt); err = PTR_ERR(ppgtt);
goto out_unlock; goto out_unlock;
} }
GEM_BUG_ON(offset_in_page(ppgtt->base.total)); GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
GEM_BUG_ON(ppgtt->base.closed); GEM_BUG_ON(ppgtt->vm.closed);
err = func(dev_priv, &ppgtt->base, 0, ppgtt->base.total, end_time); err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
i915_ppgtt_close(&ppgtt->base); i915_ppgtt_close(&ppgtt->vm);
i915_ppgtt_put(ppgtt); i915_ppgtt_put(ppgtt);
out_unlock: out_unlock:
mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&dev_priv->drm.struct_mutex);
...@@ -1061,18 +1061,18 @@ static int exercise_ggtt(struct drm_i915_private *i915, ...@@ -1061,18 +1061,18 @@ static int exercise_ggtt(struct drm_i915_private *i915,
mutex_lock(&i915->drm.struct_mutex); mutex_lock(&i915->drm.struct_mutex);
restart: restart:
list_sort(NULL, &ggtt->base.mm.hole_stack, sort_holes); list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
drm_mm_for_each_hole(node, &ggtt->base.mm, hole_start, hole_end) { drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
if (hole_start < last) if (hole_start < last)
continue; continue;
if (ggtt->base.mm.color_adjust) if (ggtt->vm.mm.color_adjust)
ggtt->base.mm.color_adjust(node, 0, ggtt->vm.mm.color_adjust(node, 0,
&hole_start, &hole_end); &hole_start, &hole_end);
if (hole_start >= hole_end) if (hole_start >= hole_end)
continue; continue;
err = func(i915, &ggtt->base, hole_start, hole_end, end_time); err = func(i915, &ggtt->vm, hole_start, hole_end, end_time);
if (err) if (err)
break; break;
...@@ -1134,7 +1134,7 @@ static int igt_ggtt_page(void *arg) ...@@ -1134,7 +1134,7 @@ static int igt_ggtt_page(void *arg)
goto out_free; goto out_free;
memset(&tmp, 0, sizeof(tmp)); memset(&tmp, 0, sizeof(tmp));
err = drm_mm_insert_node_in_range(&ggtt->base.mm, &tmp, err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
count * PAGE_SIZE, 0, count * PAGE_SIZE, 0,
I915_COLOR_UNEVICTABLE, I915_COLOR_UNEVICTABLE,
0, ggtt->mappable_end, 0, ggtt->mappable_end,
...@@ -1147,9 +1147,9 @@ static int igt_ggtt_page(void *arg) ...@@ -1147,9 +1147,9 @@ static int igt_ggtt_page(void *arg)
for (n = 0; n < count; n++) { for (n = 0; n < count; n++) {
u64 offset = tmp.start + n * PAGE_SIZE; u64 offset = tmp.start + n * PAGE_SIZE;
ggtt->base.insert_page(&ggtt->base, ggtt->vm.insert_page(&ggtt->vm,
i915_gem_object_get_dma_address(obj, 0), i915_gem_object_get_dma_address(obj, 0),
offset, I915_CACHE_NONE, 0); offset, I915_CACHE_NONE, 0);
} }
order = i915_random_order(count, &prng); order = i915_random_order(count, &prng);
...@@ -1188,7 +1188,7 @@ static int igt_ggtt_page(void *arg) ...@@ -1188,7 +1188,7 @@ static int igt_ggtt_page(void *arg)
kfree(order); kfree(order);
out_remove: out_remove:
ggtt->base.clear_range(&ggtt->base, tmp.start, tmp.size); ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
intel_runtime_pm_put(i915); intel_runtime_pm_put(i915);
drm_mm_remove_node(&tmp); drm_mm_remove_node(&tmp);
out_unpin: out_unpin:
...@@ -1229,7 +1229,7 @@ static int exercise_mock(struct drm_i915_private *i915, ...@@ -1229,7 +1229,7 @@ static int exercise_mock(struct drm_i915_private *i915,
ppgtt = ctx->ppgtt; ppgtt = ctx->ppgtt;
GEM_BUG_ON(!ppgtt); GEM_BUG_ON(!ppgtt);
err = func(i915, &ppgtt->base, 0, ppgtt->base.total, end_time); err = func(i915, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
mock_context_close(ctx); mock_context_close(ctx);
return err; return err;
...@@ -1270,7 +1270,7 @@ static int igt_gtt_reserve(void *arg) ...@@ -1270,7 +1270,7 @@ static int igt_gtt_reserve(void *arg)
/* Start by filling the GGTT */ /* Start by filling the GGTT */
for (total = 0; for (total = 0;
total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total; total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
total += 2*I915_GTT_PAGE_SIZE) { total += 2*I915_GTT_PAGE_SIZE) {
struct i915_vma *vma; struct i915_vma *vma;
...@@ -1288,20 +1288,20 @@ static int igt_gtt_reserve(void *arg) ...@@ -1288,20 +1288,20 @@ static int igt_gtt_reserve(void *arg)
list_add(&obj->st_link, &objects); list_add(&obj->st_link, &objects);
vma = i915_vma_instance(obj, &i915->ggtt.base, NULL); vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
err = PTR_ERR(vma); err = PTR_ERR(vma);
goto out; goto out;
} }
err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node, err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
obj->base.size, obj->base.size,
total, total,
obj->cache_level, obj->cache_level,
0); 0);
if (err) { if (err) {
pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n", pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
total, i915->ggtt.base.total, err); total, i915->ggtt.vm.total, err);
goto out; goto out;
} }
track_vma_bind(vma); track_vma_bind(vma);
...@@ -1319,7 +1319,7 @@ static int igt_gtt_reserve(void *arg) ...@@ -1319,7 +1319,7 @@ static int igt_gtt_reserve(void *arg)
/* Now we start forcing evictions */ /* Now we start forcing evictions */
for (total = I915_GTT_PAGE_SIZE; for (total = I915_GTT_PAGE_SIZE;
total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total; total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
total += 2*I915_GTT_PAGE_SIZE) { total += 2*I915_GTT_PAGE_SIZE) {
struct i915_vma *vma; struct i915_vma *vma;
...@@ -1337,20 +1337,20 @@ static int igt_gtt_reserve(void *arg) ...@@ -1337,20 +1337,20 @@ static int igt_gtt_reserve(void *arg)
list_add(&obj->st_link, &objects); list_add(&obj->st_link, &objects);
vma = i915_vma_instance(obj, &i915->ggtt.base, NULL); vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
err = PTR_ERR(vma); err = PTR_ERR(vma);
goto out; goto out;
} }
err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node, err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
obj->base.size, obj->base.size,
total, total,
obj->cache_level, obj->cache_level,
0); 0);
if (err) { if (err) {
pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n", pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
total, i915->ggtt.base.total, err); total, i915->ggtt.vm.total, err);
goto out; goto out;
} }
track_vma_bind(vma); track_vma_bind(vma);
...@@ -1371,7 +1371,7 @@ static int igt_gtt_reserve(void *arg) ...@@ -1371,7 +1371,7 @@ static int igt_gtt_reserve(void *arg)
struct i915_vma *vma; struct i915_vma *vma;
u64 offset; u64 offset;
vma = i915_vma_instance(obj, &i915->ggtt.base, NULL); vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
err = PTR_ERR(vma); err = PTR_ERR(vma);
goto out; goto out;
...@@ -1383,18 +1383,18 @@ static int igt_gtt_reserve(void *arg) ...@@ -1383,18 +1383,18 @@ static int igt_gtt_reserve(void *arg)
goto out; goto out;
} }
offset = random_offset(0, i915->ggtt.base.total, offset = random_offset(0, i915->ggtt.vm.total,
2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
I915_GTT_MIN_ALIGNMENT); I915_GTT_MIN_ALIGNMENT);
err = i915_gem_gtt_reserve(&i915->ggtt.base, &vma->node, err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
obj->base.size, obj->base.size,
offset, offset,
obj->cache_level, obj->cache_level,
0); 0);
if (err) { if (err) {
pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n", pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
total, i915->ggtt.base.total, err); total, i915->ggtt.vm.total, err);
goto out; goto out;
} }
track_vma_bind(vma); track_vma_bind(vma);
...@@ -1429,8 +1429,8 @@ static int igt_gtt_insert(void *arg) ...@@ -1429,8 +1429,8 @@ static int igt_gtt_insert(void *arg)
u64 start, end; u64 start, end;
} invalid_insert[] = { } invalid_insert[] = {
{ {
i915->ggtt.base.total + I915_GTT_PAGE_SIZE, 0, i915->ggtt.vm.total + I915_GTT_PAGE_SIZE, 0,
0, i915->ggtt.base.total, 0, i915->ggtt.vm.total,
}, },
{ {
2*I915_GTT_PAGE_SIZE, 0, 2*I915_GTT_PAGE_SIZE, 0,
...@@ -1460,7 +1460,7 @@ static int igt_gtt_insert(void *arg) ...@@ -1460,7 +1460,7 @@ static int igt_gtt_insert(void *arg)
/* Check a couple of obviously invalid requests */ /* Check a couple of obviously invalid requests */
for (ii = invalid_insert; ii->size; ii++) { for (ii = invalid_insert; ii->size; ii++) {
err = i915_gem_gtt_insert(&i915->ggtt.base, &tmp, err = i915_gem_gtt_insert(&i915->ggtt.vm, &tmp,
ii->size, ii->alignment, ii->size, ii->alignment,
I915_COLOR_UNEVICTABLE, I915_COLOR_UNEVICTABLE,
ii->start, ii->end, ii->start, ii->end,
...@@ -1475,7 +1475,7 @@ static int igt_gtt_insert(void *arg) ...@@ -1475,7 +1475,7 @@ static int igt_gtt_insert(void *arg)
/* Start by filling the GGTT */ /* Start by filling the GGTT */
for (total = 0; for (total = 0;
total + I915_GTT_PAGE_SIZE <= i915->ggtt.base.total; total + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
total += I915_GTT_PAGE_SIZE) { total += I915_GTT_PAGE_SIZE) {
struct i915_vma *vma; struct i915_vma *vma;
...@@ -1493,15 +1493,15 @@ static int igt_gtt_insert(void *arg) ...@@ -1493,15 +1493,15 @@ static int igt_gtt_insert(void *arg)
list_add(&obj->st_link, &objects); list_add(&obj->st_link, &objects);
vma = i915_vma_instance(obj, &i915->ggtt.base, NULL); vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
err = PTR_ERR(vma); err = PTR_ERR(vma);
goto out; goto out;
} }
err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node, err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
obj->base.size, 0, obj->cache_level, obj->base.size, 0, obj->cache_level,
0, i915->ggtt.base.total, 0, i915->ggtt.vm.total,
0); 0);
if (err == -ENOSPC) { if (err == -ENOSPC) {
/* maxed out the GGTT space */ /* maxed out the GGTT space */
...@@ -1510,7 +1510,7 @@ static int igt_gtt_insert(void *arg) ...@@ -1510,7 +1510,7 @@ static int igt_gtt_insert(void *arg)
} }
if (err) { if (err) {
pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n", pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
total, i915->ggtt.base.total, err); total, i915->ggtt.vm.total, err);
goto out; goto out;
} }
track_vma_bind(vma); track_vma_bind(vma);
...@@ -1522,7 +1522,7 @@ static int igt_gtt_insert(void *arg) ...@@ -1522,7 +1522,7 @@ static int igt_gtt_insert(void *arg)
list_for_each_entry(obj, &objects, st_link) { list_for_each_entry(obj, &objects, st_link) {
struct i915_vma *vma; struct i915_vma *vma;
vma = i915_vma_instance(obj, &i915->ggtt.base, NULL); vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
err = PTR_ERR(vma); err = PTR_ERR(vma);
goto out; goto out;
...@@ -1542,7 +1542,7 @@ static int igt_gtt_insert(void *arg) ...@@ -1542,7 +1542,7 @@ static int igt_gtt_insert(void *arg)
struct i915_vma *vma; struct i915_vma *vma;
u64 offset; u64 offset;
vma = i915_vma_instance(obj, &i915->ggtt.base, NULL); vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
err = PTR_ERR(vma); err = PTR_ERR(vma);
goto out; goto out;
...@@ -1557,13 +1557,13 @@ static int igt_gtt_insert(void *arg) ...@@ -1557,13 +1557,13 @@ static int igt_gtt_insert(void *arg)
goto out; goto out;
} }
err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node, err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
obj->base.size, 0, obj->cache_level, obj->base.size, 0, obj->cache_level,
0, i915->ggtt.base.total, 0, i915->ggtt.vm.total,
0); 0);
if (err) { if (err) {
pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n", pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
total, i915->ggtt.base.total, err); total, i915->ggtt.vm.total, err);
goto out; goto out;
} }
track_vma_bind(vma); track_vma_bind(vma);
...@@ -1579,7 +1579,7 @@ static int igt_gtt_insert(void *arg) ...@@ -1579,7 +1579,7 @@ static int igt_gtt_insert(void *arg)
/* And then force evictions */ /* And then force evictions */
for (total = 0; for (total = 0;
total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.base.total; total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
total += 2*I915_GTT_PAGE_SIZE) { total += 2*I915_GTT_PAGE_SIZE) {
struct i915_vma *vma; struct i915_vma *vma;
...@@ -1597,19 +1597,19 @@ static int igt_gtt_insert(void *arg) ...@@ -1597,19 +1597,19 @@ static int igt_gtt_insert(void *arg)
list_add(&obj->st_link, &objects); list_add(&obj->st_link, &objects);
vma = i915_vma_instance(obj, &i915->ggtt.base, NULL); vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
err = PTR_ERR(vma); err = PTR_ERR(vma);
goto out; goto out;
} }
err = i915_gem_gtt_insert(&i915->ggtt.base, &vma->node, err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
obj->base.size, 0, obj->cache_level, obj->base.size, 0, obj->cache_level,
0, i915->ggtt.base.total, 0, i915->ggtt.vm.total,
0); 0);
if (err) { if (err) {
pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n", pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
total, i915->ggtt.base.total, err); total, i915->ggtt.vm.total, err);
goto out; goto out;
} }
track_vma_bind(vma); track_vma_bind(vma);
...@@ -1669,7 +1669,7 @@ int i915_gem_gtt_live_selftests(struct drm_i915_private *i915) ...@@ -1669,7 +1669,7 @@ int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_ggtt_page), SUBTEST(igt_ggtt_page),
}; };
GEM_BUG_ON(offset_in_page(i915->ggtt.base.total)); GEM_BUG_ON(offset_in_page(i915->ggtt.vm.total));
return i915_subtests(tests, i915); return i915_subtests(tests, i915);
} }
...@@ -113,7 +113,7 @@ static int igt_gem_huge(void *arg) ...@@ -113,7 +113,7 @@ static int igt_gem_huge(void *arg)
obj = huge_gem_object(i915, obj = huge_gem_object(i915,
nreal * PAGE_SIZE, nreal * PAGE_SIZE,
i915->ggtt.base.total + PAGE_SIZE); i915->ggtt.vm.total + PAGE_SIZE);
if (IS_ERR(obj)) if (IS_ERR(obj))
return PTR_ERR(obj); return PTR_ERR(obj);
...@@ -311,7 +311,7 @@ static int igt_partial_tiling(void *arg) ...@@ -311,7 +311,7 @@ static int igt_partial_tiling(void *arg)
obj = huge_gem_object(i915, obj = huge_gem_object(i915,
nreal << PAGE_SHIFT, nreal << PAGE_SHIFT,
(1 + next_prime_number(i915->ggtt.base.total >> PAGE_SHIFT)) << PAGE_SHIFT); (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
if (IS_ERR(obj)) if (IS_ERR(obj))
return PTR_ERR(obj); return PTR_ERR(obj);
...@@ -440,7 +440,7 @@ static int make_obj_busy(struct drm_i915_gem_object *obj) ...@@ -440,7 +440,7 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
struct i915_vma *vma; struct i915_vma *vma;
int err; int err;
vma = i915_vma_instance(obj, &i915->ggtt.base, NULL); vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) if (IS_ERR(vma))
return PTR_ERR(vma); return PTR_ERR(vma);
......
...@@ -430,7 +430,7 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915) ...@@ -430,7 +430,7 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915)
if (err) if (err)
goto err; goto err;
vma = i915_vma_instance(obj, &i915->ggtt.base, NULL); vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
err = PTR_ERR(vma); err = PTR_ERR(vma);
goto err; goto err;
...@@ -555,7 +555,8 @@ static int live_empty_request(void *arg) ...@@ -555,7 +555,8 @@ static int live_empty_request(void *arg)
static struct i915_vma *recursive_batch(struct drm_i915_private *i915) static struct i915_vma *recursive_batch(struct drm_i915_private *i915)
{ {
struct i915_gem_context *ctx = i915->kernel_context; struct i915_gem_context *ctx = i915->kernel_context;
struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base; struct i915_address_space *vm =
ctx->ppgtt ? &ctx->ppgtt->vm : &i915->ggtt.vm;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
const int gen = INTEL_GEN(i915); const int gen = INTEL_GEN(i915);
struct i915_vma *vma; struct i915_vma *vma;
......
...@@ -35,7 +35,7 @@ static bool assert_vma(struct i915_vma *vma, ...@@ -35,7 +35,7 @@ static bool assert_vma(struct i915_vma *vma,
{ {
bool ok = true; bool ok = true;
if (vma->vm != &ctx->ppgtt->base) { if (vma->vm != &ctx->ppgtt->vm) {
pr_err("VMA created with wrong VM\n"); pr_err("VMA created with wrong VM\n");
ok = false; ok = false;
} }
...@@ -110,8 +110,7 @@ static int create_vmas(struct drm_i915_private *i915, ...@@ -110,8 +110,7 @@ static int create_vmas(struct drm_i915_private *i915,
list_for_each_entry(obj, objects, st_link) { list_for_each_entry(obj, objects, st_link) {
for (pinned = 0; pinned <= 1; pinned++) { for (pinned = 0; pinned <= 1; pinned++) {
list_for_each_entry(ctx, contexts, link) { list_for_each_entry(ctx, contexts, link) {
struct i915_address_space *vm = struct i915_address_space *vm = &ctx->ppgtt->vm;
&ctx->ppgtt->base;
struct i915_vma *vma; struct i915_vma *vma;
int err; int err;
...@@ -259,12 +258,12 @@ static int igt_vma_pin1(void *arg) ...@@ -259,12 +258,12 @@ static int igt_vma_pin1(void *arg)
VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | 8192), VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | 8192),
VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)), VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)), VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.base.total - 4096)), VALID(0, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.vm.total - 4096)),
VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)), VALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)),
INVALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | i915->ggtt.mappable_end), INVALID(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | i915->ggtt.mappable_end),
VALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.base.total - 4096)), VALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.vm.total - 4096)),
INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | i915->ggtt.base.total), INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | i915->ggtt.vm.total),
INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | round_down(U64_MAX, PAGE_SIZE)), INVALID(0, PIN_GLOBAL | PIN_OFFSET_FIXED | round_down(U64_MAX, PAGE_SIZE)),
VALID(4096, PIN_GLOBAL), VALID(4096, PIN_GLOBAL),
...@@ -272,12 +271,12 @@ static int igt_vma_pin1(void *arg) ...@@ -272,12 +271,12 @@ static int igt_vma_pin1(void *arg)
VALID(i915->ggtt.mappable_end - 4096, PIN_GLOBAL | PIN_MAPPABLE), VALID(i915->ggtt.mappable_end - 4096, PIN_GLOBAL | PIN_MAPPABLE),
VALID(i915->ggtt.mappable_end, PIN_GLOBAL | PIN_MAPPABLE), VALID(i915->ggtt.mappable_end, PIN_GLOBAL | PIN_MAPPABLE),
NOSPACE(i915->ggtt.mappable_end + 4096, PIN_GLOBAL | PIN_MAPPABLE), NOSPACE(i915->ggtt.mappable_end + 4096, PIN_GLOBAL | PIN_MAPPABLE),
VALID(i915->ggtt.base.total - 4096, PIN_GLOBAL), VALID(i915->ggtt.vm.total - 4096, PIN_GLOBAL),
VALID(i915->ggtt.base.total, PIN_GLOBAL), VALID(i915->ggtt.vm.total, PIN_GLOBAL),
NOSPACE(i915->ggtt.base.total + 4096, PIN_GLOBAL), NOSPACE(i915->ggtt.vm.total + 4096, PIN_GLOBAL),
NOSPACE(round_down(U64_MAX, PAGE_SIZE), PIN_GLOBAL), NOSPACE(round_down(U64_MAX, PAGE_SIZE), PIN_GLOBAL),
INVALID(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)), INVALID(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_FIXED | (i915->ggtt.mappable_end - 4096)),
INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.base.total - 4096)), INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (i915->ggtt.vm.total - 4096)),
INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (round_down(U64_MAX, PAGE_SIZE) - 4096)), INVALID(8192, PIN_GLOBAL | PIN_OFFSET_FIXED | (round_down(U64_MAX, PAGE_SIZE) - 4096)),
VALID(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)), VALID(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
...@@ -289,9 +288,9 @@ static int igt_vma_pin1(void *arg) ...@@ -289,9 +288,9 @@ static int igt_vma_pin1(void *arg)
* variable start, end and size. * variable start, end and size.
*/ */
NOSPACE(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | i915->ggtt.mappable_end), NOSPACE(0, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | i915->ggtt.mappable_end),
NOSPACE(0, PIN_GLOBAL | PIN_OFFSET_BIAS | i915->ggtt.base.total), NOSPACE(0, PIN_GLOBAL | PIN_OFFSET_BIAS | i915->ggtt.vm.total),
NOSPACE(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)), NOSPACE(8192, PIN_GLOBAL | PIN_MAPPABLE | PIN_OFFSET_BIAS | (i915->ggtt.mappable_end - 4096)),
NOSPACE(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.base.total - 4096)), NOSPACE(8192, PIN_GLOBAL | PIN_OFFSET_BIAS | (i915->ggtt.vm.total - 4096)),
#endif #endif
{ }, { },
#undef NOSPACE #undef NOSPACE
...@@ -307,13 +306,13 @@ static int igt_vma_pin1(void *arg) ...@@ -307,13 +306,13 @@ static int igt_vma_pin1(void *arg)
* focusing on error handling of boundary conditions. * focusing on error handling of boundary conditions.
*/ */
GEM_BUG_ON(!drm_mm_clean(&i915->ggtt.base.mm)); GEM_BUG_ON(!drm_mm_clean(&i915->ggtt.vm.mm));
obj = i915_gem_object_create_internal(i915, PAGE_SIZE); obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj)) if (IS_ERR(obj))
return PTR_ERR(obj); return PTR_ERR(obj);
vma = checked_vma_instance(obj, &i915->ggtt.base, NULL); vma = checked_vma_instance(obj, &i915->ggtt.vm, NULL);
if (IS_ERR(vma)) if (IS_ERR(vma))
goto out; goto out;
...@@ -405,7 +404,7 @@ static unsigned int rotated_size(const struct intel_rotation_plane_info *a, ...@@ -405,7 +404,7 @@ static unsigned int rotated_size(const struct intel_rotation_plane_info *a,
static int igt_vma_rotate(void *arg) static int igt_vma_rotate(void *arg)
{ {
struct drm_i915_private *i915 = arg; struct drm_i915_private *i915 = arg;
struct i915_address_space *vm = &i915->ggtt.base; struct i915_address_space *vm = &i915->ggtt.vm;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
const struct intel_rotation_plane_info planes[] = { const struct intel_rotation_plane_info planes[] = {
{ .width = 1, .height = 1, .stride = 1 }, { .width = 1, .height = 1, .stride = 1 },
...@@ -604,7 +603,7 @@ static bool assert_pin(struct i915_vma *vma, ...@@ -604,7 +603,7 @@ static bool assert_pin(struct i915_vma *vma,
static int igt_vma_partial(void *arg) static int igt_vma_partial(void *arg)
{ {
struct drm_i915_private *i915 = arg; struct drm_i915_private *i915 = arg;
struct i915_address_space *vm = &i915->ggtt.base; struct i915_address_space *vm = &i915->ggtt.vm;
const unsigned int npages = 1021; /* prime! */ const unsigned int npages = 1021; /* prime! */
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
const struct phase { const struct phase {
......
...@@ -107,8 +107,8 @@ static int emit_recurse_batch(struct hang *h, ...@@ -107,8 +107,8 @@ static int emit_recurse_batch(struct hang *h,
struct drm_i915_private *i915 = h->i915; struct drm_i915_private *i915 = h->i915;
struct i915_address_space *vm = struct i915_address_space *vm =
rq->gem_context->ppgtt ? rq->gem_context->ppgtt ?
&rq->gem_context->ppgtt->base : &rq->gem_context->ppgtt->vm :
&i915->ggtt.base; &i915->ggtt.vm;
struct i915_vma *hws, *vma; struct i915_vma *hws, *vma;
unsigned int flags; unsigned int flags;
u32 *batch; u32 *batch;
......
...@@ -83,7 +83,7 @@ static int emit_recurse_batch(struct spinner *spin, ...@@ -83,7 +83,7 @@ static int emit_recurse_batch(struct spinner *spin,
struct i915_request *rq, struct i915_request *rq,
u32 arbitration_command) u32 arbitration_command)
{ {
struct i915_address_space *vm = &rq->gem_context->ppgtt->base; struct i915_address_space *vm = &rq->gem_context->ppgtt->vm;
struct i915_vma *hws, *vma; struct i915_vma *hws, *vma;
u32 *batch; u32 *batch;
int err; int err;
......
...@@ -33,7 +33,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) ...@@ -33,7 +33,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
memset(cs, 0xc5, PAGE_SIZE); memset(cs, 0xc5, PAGE_SIZE);
i915_gem_object_unpin_map(result); i915_gem_object_unpin_map(result);
vma = i915_vma_instance(result, &engine->i915->ggtt.base, NULL); vma = i915_vma_instance(result, &engine->i915->ggtt.vm, NULL);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
err = PTR_ERR(vma); err = PTR_ERR(vma);
goto err_obj; goto err_obj;
......
...@@ -66,25 +66,25 @@ mock_ppgtt(struct drm_i915_private *i915, ...@@ -66,25 +66,25 @@ mock_ppgtt(struct drm_i915_private *i915,
return NULL; return NULL;
kref_init(&ppgtt->ref); kref_init(&ppgtt->ref);
ppgtt->base.i915 = i915; ppgtt->vm.i915 = i915;
ppgtt->base.total = round_down(U64_MAX, PAGE_SIZE); ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE);
ppgtt->base.file = ERR_PTR(-ENODEV); ppgtt->vm.file = ERR_PTR(-ENODEV);
INIT_LIST_HEAD(&ppgtt->base.active_list); INIT_LIST_HEAD(&ppgtt->vm.active_list);
INIT_LIST_HEAD(&ppgtt->base.inactive_list); INIT_LIST_HEAD(&ppgtt->vm.inactive_list);
INIT_LIST_HEAD(&ppgtt->base.unbound_list); INIT_LIST_HEAD(&ppgtt->vm.unbound_list);
INIT_LIST_HEAD(&ppgtt->base.global_link); INIT_LIST_HEAD(&ppgtt->vm.global_link);
drm_mm_init(&ppgtt->base.mm, 0, ppgtt->base.total); drm_mm_init(&ppgtt->vm.mm, 0, ppgtt->vm.total);
ppgtt->base.clear_range = nop_clear_range; ppgtt->vm.clear_range = nop_clear_range;
ppgtt->base.insert_page = mock_insert_page; ppgtt->vm.insert_page = mock_insert_page;
ppgtt->base.insert_entries = mock_insert_entries; ppgtt->vm.insert_entries = mock_insert_entries;
ppgtt->base.bind_vma = mock_bind_ppgtt; ppgtt->vm.bind_vma = mock_bind_ppgtt;
ppgtt->base.unbind_vma = mock_unbind_ppgtt; ppgtt->vm.unbind_vma = mock_unbind_ppgtt;
ppgtt->base.set_pages = ppgtt_set_pages; ppgtt->vm.set_pages = ppgtt_set_pages;
ppgtt->base.clear_pages = clear_pages; ppgtt->vm.clear_pages = clear_pages;
ppgtt->base.cleanup = mock_cleanup; ppgtt->vm.cleanup = mock_cleanup;
return ppgtt; return ppgtt;
} }
...@@ -107,27 +107,27 @@ void mock_init_ggtt(struct drm_i915_private *i915) ...@@ -107,27 +107,27 @@ void mock_init_ggtt(struct drm_i915_private *i915)
INIT_LIST_HEAD(&i915->vm_list); INIT_LIST_HEAD(&i915->vm_list);
ggtt->base.i915 = i915; ggtt->vm.i915 = i915;
ggtt->gmadr = (struct resource) DEFINE_RES_MEM(0, 2048 * PAGE_SIZE); ggtt->gmadr = (struct resource) DEFINE_RES_MEM(0, 2048 * PAGE_SIZE);
ggtt->mappable_end = resource_size(&ggtt->gmadr); ggtt->mappable_end = resource_size(&ggtt->gmadr);
ggtt->base.total = 4096 * PAGE_SIZE; ggtt->vm.total = 4096 * PAGE_SIZE;
ggtt->base.clear_range = nop_clear_range; ggtt->vm.clear_range = nop_clear_range;
ggtt->base.insert_page = mock_insert_page; ggtt->vm.insert_page = mock_insert_page;
ggtt->base.insert_entries = mock_insert_entries; ggtt->vm.insert_entries = mock_insert_entries;
ggtt->base.bind_vma = mock_bind_ggtt; ggtt->vm.bind_vma = mock_bind_ggtt;
ggtt->base.unbind_vma = mock_unbind_ggtt; ggtt->vm.unbind_vma = mock_unbind_ggtt;
ggtt->base.set_pages = ggtt_set_pages; ggtt->vm.set_pages = ggtt_set_pages;
ggtt->base.clear_pages = clear_pages; ggtt->vm.clear_pages = clear_pages;
ggtt->base.cleanup = mock_cleanup; ggtt->vm.cleanup = mock_cleanup;
i915_address_space_init(&ggtt->base, i915, "global"); i915_address_space_init(&ggtt->vm, i915, "global");
} }
void mock_fini_ggtt(struct drm_i915_private *i915) void mock_fini_ggtt(struct drm_i915_private *i915)
{ {
struct i915_ggtt *ggtt = &i915->ggtt; struct i915_ggtt *ggtt = &i915->ggtt;
i915_address_space_fini(&ggtt->base); i915_address_space_fini(&ggtt->vm);
} }
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册