提交 72e96d64 编写于 作者: J Joonas Lahtinen

drm/i915: Refer to GGTT {,VM} consistently

Refer to the GGTT VM consistently as "ggtt->base" instead of just "ggtt",
"vm" or indirectly through other variables like "dev_priv->ggtt.base"
to avoid confusion with the i915_ggtt object itself and PPGTT VMs.

Refer to the GGTT as "ggtt" instead of indirectly through chaining.

As a bonus gets rid of the long-standing i915_obj_to_ggtt vs.
i915_gem_obj_to_ggtt conflict, due to removal of i915_obj_to_ggtt!

v2:
- Added some more after grepping sources with Chris

v3:
- Refer to GGTT VM through ggtt->base consistently instead of ggtt_vm
  (Chris)

v4:
- Convert all dev_priv->ggtt->foo accesses to ggtt->foo.

v5:
- Make patch checker happy

Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: NJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: NChris Wilson <chris@chris-wilson.co.uk>
上级 20a34e78
...@@ -202,8 +202,8 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) ...@@ -202,8 +202,8 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
uintptr_t list = (uintptr_t) node->info_ent->data; uintptr_t list = (uintptr_t) node->info_ent->data;
struct list_head *head; struct list_head *head;
struct drm_device *dev = node->minor->dev; struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_address_space *vm = &dev_priv->ggtt.base; struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_vma *vma; struct i915_vma *vma;
u64 total_obj_size, total_gtt_size; u64 total_obj_size, total_gtt_size;
int count, ret; int count, ret;
...@@ -216,11 +216,11 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) ...@@ -216,11 +216,11 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
switch (list) { switch (list) {
case ACTIVE_LIST: case ACTIVE_LIST:
seq_puts(m, "Active:\n"); seq_puts(m, "Active:\n");
head = &vm->active_list; head = &ggtt->base.active_list;
break; break;
case INACTIVE_LIST: case INACTIVE_LIST:
seq_puts(m, "Inactive:\n"); seq_puts(m, "Inactive:\n");
head = &vm->inactive_list; head = &ggtt->base.inactive_list;
break; break;
default: default:
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
...@@ -429,11 +429,11 @@ static int i915_gem_object_info(struct seq_file *m, void* data) ...@@ -429,11 +429,11 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
{ {
struct drm_info_node *node = m->private; struct drm_info_node *node = m->private;
struct drm_device *dev = node->minor->dev; struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
u32 count, mappable_count, purgeable_count; u32 count, mappable_count, purgeable_count;
u64 size, mappable_size, purgeable_size; u64 size, mappable_size, purgeable_size;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct i915_address_space *vm = &dev_priv->ggtt.base;
struct drm_file *file; struct drm_file *file;
struct i915_vma *vma; struct i915_vma *vma;
int ret; int ret;
...@@ -452,12 +452,12 @@ static int i915_gem_object_info(struct seq_file *m, void* data) ...@@ -452,12 +452,12 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
count, mappable_count, size, mappable_size); count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0; size = count = mappable_size = mappable_count = 0;
count_vmas(&vm->active_list, vm_link); count_vmas(&ggtt->base.active_list, vm_link);
seq_printf(m, " %u [%u] active objects, %llu [%llu] bytes\n", seq_printf(m, " %u [%u] active objects, %llu [%llu] bytes\n",
count, mappable_count, size, mappable_size); count, mappable_count, size, mappable_size);
size = count = mappable_size = mappable_count = 0; size = count = mappable_size = mappable_count = 0;
count_vmas(&vm->inactive_list, vm_link); count_vmas(&ggtt->base.inactive_list, vm_link);
seq_printf(m, " %u [%u] inactive objects, %llu [%llu] bytes\n", seq_printf(m, " %u [%u] inactive objects, %llu [%llu] bytes\n",
count, mappable_count, size, mappable_size); count, mappable_count, size, mappable_size);
...@@ -492,8 +492,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data) ...@@ -492,8 +492,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
count, size); count, size);
seq_printf(m, "%llu [%llu] gtt total\n", seq_printf(m, "%llu [%llu] gtt total\n",
dev_priv->ggtt.base.total, ggtt->base.total, ggtt->mappable_end - ggtt->base.start);
(u64)dev_priv->ggtt.mappable_end - dev_priv->ggtt.base.start);
seq_putc(m, '\n'); seq_putc(m, '\n');
print_batch_pool_stats(m, dev_priv); print_batch_pool_stats(m, dev_priv);
......
...@@ -527,6 +527,7 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) ...@@ -527,6 +527,7 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
{ {
struct apertures_struct *ap; struct apertures_struct *ap;
struct pci_dev *pdev = dev_priv->dev->pdev; struct pci_dev *pdev = dev_priv->dev->pdev;
struct i915_ggtt *ggtt = &dev_priv->ggtt;
bool primary; bool primary;
int ret; int ret;
...@@ -534,8 +535,8 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) ...@@ -534,8 +535,8 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
if (!ap) if (!ap)
return -ENOMEM; return -ENOMEM;
ap->ranges[0].base = dev_priv->ggtt.mappable_base; ap->ranges[0].base = ggtt->mappable_base;
ap->ranges[0].size = dev_priv->ggtt.mappable_end; ap->ranges[0].size = ggtt->mappable_end;
primary = primary =
pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
...@@ -1170,6 +1171,7 @@ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv) ...@@ -1170,6 +1171,7 @@ static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
static int i915_driver_init_hw(struct drm_i915_private *dev_priv) static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
{ {
struct drm_device *dev = dev_priv->dev; struct drm_device *dev = dev_priv->dev;
struct i915_ggtt *ggtt = &dev_priv->ggtt;
uint32_t aperture_size; uint32_t aperture_size;
int ret; int ret;
...@@ -1213,17 +1215,17 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) ...@@ -1213,17 +1215,17 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
aperture_size = dev_priv->ggtt.mappable_end; aperture_size = ggtt->mappable_end;
dev_priv->ggtt.mappable = ggtt->mappable =
io_mapping_create_wc(dev_priv->ggtt.mappable_base, io_mapping_create_wc(ggtt->mappable_base,
aperture_size); aperture_size);
if (dev_priv->ggtt.mappable == NULL) { if (!ggtt->mappable) {
ret = -EIO; ret = -EIO;
goto out_ggtt; goto out_ggtt;
} }
dev_priv->ggtt.mtrr = arch_phys_wc_add(dev_priv->ggtt.mappable_base, ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base,
aperture_size); aperture_size);
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
...@@ -1266,13 +1268,14 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) ...@@ -1266,13 +1268,14 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv) static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
{ {
struct drm_device *dev = dev_priv->dev; struct drm_device *dev = dev_priv->dev;
struct i915_ggtt *ggtt = &dev_priv->ggtt;
if (dev->pdev->msi_enabled) if (dev->pdev->msi_enabled)
pci_disable_msi(dev->pdev); pci_disable_msi(dev->pdev);
pm_qos_remove_request(&dev_priv->pm_qos); pm_qos_remove_request(&dev_priv->pm_qos);
arch_phys_wc_del(dev_priv->ggtt.mtrr); arch_phys_wc_del(ggtt->mtrr);
io_mapping_free(dev_priv->ggtt.mappable); io_mapping_free(ggtt->mappable);
i915_ggtt_cleanup_hw(dev); i915_ggtt_cleanup_hw(dev);
} }
......
...@@ -3154,9 +3154,6 @@ i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj) ...@@ -3154,9 +3154,6 @@ i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj); bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj);
/* Some GGTT VM helpers */ /* Some GGTT VM helpers */
#define i915_obj_to_ggtt(obj) \
(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->ggtt.base)
static inline struct i915_hw_ppgtt * static inline struct i915_hw_ppgtt *
i915_vm_to_ppgtt(struct i915_address_space *vm) i915_vm_to_ppgtt(struct i915_address_space *vm)
{ {
...@@ -3173,7 +3170,10 @@ static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj) ...@@ -3173,7 +3170,10 @@ static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
static inline unsigned long static inline unsigned long
i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj) i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
{ {
return i915_gem_obj_size(obj, i915_obj_to_ggtt(obj)); struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
return i915_gem_obj_size(obj, &ggtt->base);
} }
static inline int __must_check static inline int __must_check
...@@ -3181,7 +3181,10 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj, ...@@ -3181,7 +3181,10 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
uint32_t alignment, uint32_t alignment,
unsigned flags) unsigned flags)
{ {
return i915_gem_object_pin(obj, i915_obj_to_ggtt(obj), struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
return i915_gem_object_pin(obj, &ggtt->base,
alignment, flags | PIN_GLOBAL); alignment, flags | PIN_GLOBAL);
} }
......
...@@ -130,9 +130,9 @@ int ...@@ -130,9 +130,9 @@ int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file) struct drm_file *file)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_i915_gem_get_aperture *args = data;
struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_get_aperture *args = data;
struct i915_vma *vma; struct i915_vma *vma;
size_t pinned; size_t pinned;
...@@ -146,7 +146,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, ...@@ -146,7 +146,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
pinned += vma->node.size; pinned += vma->node.size;
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
args->aper_size = dev_priv->ggtt.base.total; args->aper_size = ggtt->base.total;
args->aper_available_size = args->aper_size - pinned; args->aper_available_size = args->aper_size - pinned;
return 0; return 0;
...@@ -765,7 +765,8 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, ...@@ -765,7 +765,8 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
struct drm_i915_gem_pwrite *args, struct drm_i915_gem_pwrite *args,
struct drm_file *file) struct drm_file *file)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
ssize_t remain; ssize_t remain;
loff_t offset, page_base; loff_t offset, page_base;
char __user *user_data; char __user *user_data;
...@@ -807,7 +808,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, ...@@ -807,7 +808,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
* source page isn't available. Return the error and we'll * source page isn't available. Return the error and we'll
* retry in the slow path. * retry in the slow path.
*/ */
if (fast_user_write(dev_priv->ggtt.mappable, page_base, if (fast_user_write(ggtt->mappable, page_base,
page_offset, user_data, page_length)) { page_offset, user_data, page_length)) {
ret = -EFAULT; ret = -EFAULT;
goto out_flush; goto out_flush;
...@@ -1790,7 +1791,8 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -1790,7 +1791,8 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{ {
struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data); struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_ggtt_view view = i915_ggtt_view_normal; struct i915_ggtt_view view = i915_ggtt_view_normal;
pgoff_t page_offset; pgoff_t page_offset;
unsigned long pfn; unsigned long pfn;
...@@ -1825,7 +1827,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -1825,7 +1827,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
} }
/* Use a partial view if the object is bigger than the aperture. */ /* Use a partial view if the object is bigger than the aperture. */
if (obj->base.size >= dev_priv->ggtt.mappable_end && if (obj->base.size >= ggtt->mappable_end &&
obj->tiling_mode == I915_TILING_NONE) { obj->tiling_mode == I915_TILING_NONE) {
static const unsigned int chunk_size = 256; // 1 MiB static const unsigned int chunk_size = 256; // 1 MiB
...@@ -1853,7 +1855,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -1853,7 +1855,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
goto unpin; goto unpin;
/* Finally, remap it using the new GTT offset */ /* Finally, remap it using the new GTT offset */
pfn = dev_priv->ggtt.mappable_base + pfn = ggtt->mappable_base +
i915_gem_obj_ggtt_offset_view(obj, &view); i915_gem_obj_ggtt_offset_view(obj, &view);
pfn >>= PAGE_SHIFT; pfn >>= PAGE_SHIFT;
...@@ -3458,7 +3460,8 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, ...@@ -3458,7 +3460,8 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
uint64_t flags) uint64_t flags)
{ {
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
u32 fence_alignment, unfenced_alignment; u32 fence_alignment, unfenced_alignment;
u32 search_flag, alloc_flag; u32 search_flag, alloc_flag;
u64 start, end; u64 start, end;
...@@ -3505,7 +3508,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, ...@@ -3505,7 +3508,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
end = vm->total; end = vm->total;
if (flags & PIN_MAPPABLE) if (flags & PIN_MAPPABLE)
end = min_t(u64, end, dev_priv->ggtt.mappable_end); end = min_t(u64, end, ggtt->mappable_end);
if (flags & PIN_ZONE_4G) if (flags & PIN_ZONE_4G)
end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE); end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
...@@ -3712,6 +3715,9 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj) ...@@ -3712,6 +3715,9 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
int int
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
{ {
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
uint32_t old_write_domain, old_read_domains; uint32_t old_write_domain, old_read_domains;
struct i915_vma *vma; struct i915_vma *vma;
int ret; int ret;
...@@ -3766,7 +3772,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) ...@@ -3766,7 +3772,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
vma = i915_gem_obj_to_ggtt(obj); vma = i915_gem_obj_to_ggtt(obj);
if (vma && drm_mm_node_allocated(&vma->node) && !obj->active) if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
list_move_tail(&vma->vm_link, list_move_tail(&vma->vm_link,
&to_i915(obj->base.dev)->ggtt.base.inactive_list); &ggtt->base.inactive_list);
return 0; return 0;
} }
...@@ -4297,9 +4303,13 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, ...@@ -4297,9 +4303,13 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
uint32_t alignment, uint32_t alignment,
uint64_t flags) uint64_t flags)
{ {
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
BUG_ON(!view); BUG_ON(!view);
return i915_gem_object_do_pin(obj, i915_obj_to_ggtt(obj), view, return i915_gem_object_do_pin(obj, &ggtt->base, view,
alignment, flags | PIN_GLOBAL); alignment, flags | PIN_GLOBAL);
} }
...@@ -4611,13 +4621,15 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, ...@@ -4611,13 +4621,15 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj, struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view) const struct i915_ggtt_view *view)
{ {
struct i915_address_space *ggtt = i915_obj_to_ggtt(obj); struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_vma *vma; struct i915_vma *vma;
BUG_ON(!view); BUG_ON(!view);
list_for_each_entry(vma, &obj->vma_list, obj_link) list_for_each_entry(vma, &obj->vma_list, obj_link)
if (vma->vm == ggtt && if (vma->vm == &ggtt->base &&
i915_ggtt_view_equal(&vma->ggtt_view, view)) i915_ggtt_view_equal(&vma->ggtt_view, view))
return vma; return vma;
return NULL; return NULL;
...@@ -5210,11 +5222,12 @@ u64 i915_gem_obj_offset(struct drm_i915_gem_object *o, ...@@ -5210,11 +5222,12 @@ u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
const struct i915_ggtt_view *view) const struct i915_ggtt_view *view)
{ {
struct i915_address_space *ggtt = i915_obj_to_ggtt(o); struct drm_i915_private *dev_priv = to_i915(o->base.dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_vma *vma; struct i915_vma *vma;
list_for_each_entry(vma, &o->vma_list, obj_link) list_for_each_entry(vma, &o->vma_list, obj_link)
if (vma->vm == ggtt && if (vma->vm == &ggtt->base &&
i915_ggtt_view_equal(&vma->ggtt_view, view)) i915_ggtt_view_equal(&vma->ggtt_view, view))
return vma->node.start; return vma->node.start;
...@@ -5241,11 +5254,12 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o, ...@@ -5241,11 +5254,12 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
const struct i915_ggtt_view *view) const struct i915_ggtt_view *view)
{ {
struct i915_address_space *ggtt = i915_obj_to_ggtt(o); struct drm_i915_private *dev_priv = to_i915(o->base.dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_vma *vma; struct i915_vma *vma;
list_for_each_entry(vma, &o->vma_list, obj_link) list_for_each_entry(vma, &o->vma_list, obj_link)
if (vma->vm == ggtt && if (vma->vm == &ggtt->base &&
i915_ggtt_view_equal(&vma->ggtt_view, view) && i915_ggtt_view_equal(&vma->ggtt_view, view) &&
drm_mm_node_allocated(&vma->node)) drm_mm_node_allocated(&vma->node))
return true; return true;
......
...@@ -313,7 +313,8 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj, ...@@ -313,7 +313,8 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
uint64_t target_offset) uint64_t target_offset)
{ {
struct drm_device *dev = obj->base.dev; struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
uint64_t delta = relocation_target(reloc, target_offset); uint64_t delta = relocation_target(reloc, target_offset);
uint64_t offset; uint64_t offset;
void __iomem *reloc_page; void __iomem *reloc_page;
...@@ -330,7 +331,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj, ...@@ -330,7 +331,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
/* Map the page containing the relocation we're going to perform. */ /* Map the page containing the relocation we're going to perform. */
offset = i915_gem_obj_ggtt_offset(obj); offset = i915_gem_obj_ggtt_offset(obj);
offset += reloc->offset; offset += reloc->offset;
reloc_page = io_mapping_map_atomic_wc(dev_priv->ggtt.mappable, reloc_page = io_mapping_map_atomic_wc(ggtt->mappable,
offset & PAGE_MASK); offset & PAGE_MASK);
iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset)); iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
...@@ -340,7 +341,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj, ...@@ -340,7 +341,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
if (offset_in_page(offset) == 0) { if (offset_in_page(offset) == 0) {
io_mapping_unmap_atomic(reloc_page); io_mapping_unmap_atomic(reloc_page);
reloc_page = reloc_page =
io_mapping_map_atomic_wc(dev_priv->ggtt.mappable, io_mapping_map_atomic_wc(ggtt->mappable,
offset); offset);
} }
...@@ -1431,7 +1432,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -1431,7 +1432,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_i915_gem_execbuffer2 *args, struct drm_i915_gem_execbuffer2 *args,
struct drm_i915_gem_exec_object2 *exec) struct drm_i915_gem_exec_object2 *exec)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_request *req = NULL; struct drm_i915_gem_request *req = NULL;
struct eb_vmas *eb; struct eb_vmas *eb;
struct drm_i915_gem_object *batch_obj; struct drm_i915_gem_object *batch_obj;
...@@ -1504,7 +1506,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, ...@@ -1504,7 +1506,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ctx->ppgtt) if (ctx->ppgtt)
vm = &ctx->ppgtt->base; vm = &ctx->ppgtt->base;
else else
vm = &dev_priv->ggtt.base; vm = &ggtt->base;
memset(&params_master, 0x00, sizeof(params_master)); memset(&params_master, 0x00, sizeof(params_master));
......
...@@ -1629,6 +1629,7 @@ static void gen6_write_page_range(struct drm_i915_private *dev_priv, ...@@ -1629,6 +1629,7 @@ static void gen6_write_page_range(struct drm_i915_private *dev_priv,
struct i915_page_directory *pd, struct i915_page_directory *pd,
uint32_t start, uint32_t length) uint32_t start, uint32_t length)
{ {
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_page_table *pt; struct i915_page_table *pt;
uint32_t pde, temp; uint32_t pde, temp;
...@@ -1637,7 +1638,7 @@ static void gen6_write_page_range(struct drm_i915_private *dev_priv, ...@@ -1637,7 +1638,7 @@ static void gen6_write_page_range(struct drm_i915_private *dev_priv,
/* Make sure write is complete before other code can use this page /* Make sure write is complete before other code can use this page
* table. Also require for WC mapped PTEs */ * table. Also require for WC mapped PTEs */
readl(dev_priv->ggtt.gsm); readl(ggtt->gsm);
} }
static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt) static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
...@@ -1862,7 +1863,8 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, ...@@ -1862,7 +1863,8 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
{ {
DECLARE_BITMAP(new_page_tables, I915_PDES); DECLARE_BITMAP(new_page_tables, I915_PDES);
struct drm_device *dev = vm->dev; struct drm_device *dev = vm->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_hw_ppgtt *ppgtt = struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base); container_of(vm, struct i915_hw_ppgtt, base);
struct i915_page_table *pt; struct i915_page_table *pt;
...@@ -1930,7 +1932,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, ...@@ -1930,7 +1932,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
/* Make sure write is complete before other code can use this page /* Make sure write is complete before other code can use this page
* table. Also require for WC mapped PTEs */ * table. Also require for WC mapped PTEs */
readl(dev_priv->ggtt.gsm); readl(ggtt->gsm);
mark_tlbs_dirty(ppgtt); mark_tlbs_dirty(ppgtt);
return 0; return 0;
...@@ -1995,7 +1997,8 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt) ...@@ -1995,7 +1997,8 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
{ {
struct i915_address_space *vm = &ppgtt->base; struct i915_address_space *vm = &ppgtt->base;
struct drm_device *dev = ppgtt->base.dev; struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
bool retried = false; bool retried = false;
int ret; int ret;
...@@ -2003,23 +2006,23 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt) ...@@ -2003,23 +2006,23 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
* allocator works in address space sizes, so it's multiplied by page * allocator works in address space sizes, so it's multiplied by page
* size. We allocate at the top of the GTT to avoid fragmentation. * size. We allocate at the top of the GTT to avoid fragmentation.
*/ */
BUG_ON(!drm_mm_initialized(&dev_priv->ggtt.base.mm)); BUG_ON(!drm_mm_initialized(&ggtt->base.mm));
ret = gen6_init_scratch(vm); ret = gen6_init_scratch(vm);
if (ret) if (ret)
return ret; return ret;
alloc: alloc:
ret = drm_mm_insert_node_in_range_generic(&dev_priv->ggtt.base.mm, ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm,
&ppgtt->node, GEN6_PD_SIZE, &ppgtt->node, GEN6_PD_SIZE,
GEN6_PD_ALIGN, 0, GEN6_PD_ALIGN, 0,
0, dev_priv->ggtt.base.total, 0, ggtt->base.total,
DRM_MM_TOPDOWN); DRM_MM_TOPDOWN);
if (ret == -ENOSPC && !retried) { if (ret == -ENOSPC && !retried) {
ret = i915_gem_evict_something(dev, &dev_priv->ggtt.base, ret = i915_gem_evict_something(dev, &ggtt->base,
GEN6_PD_SIZE, GEN6_PD_ALIGN, GEN6_PD_SIZE, GEN6_PD_ALIGN,
I915_CACHE_NONE, I915_CACHE_NONE,
0, dev_priv->ggtt.base.total, 0, ggtt->base.total,
0); 0);
if (ret) if (ret)
goto err_out; goto err_out;
...@@ -2032,7 +2035,7 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt) ...@@ -2032,7 +2035,7 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
goto err_out; goto err_out;
if (ppgtt->node.start < dev_priv->ggtt.mappable_end) if (ppgtt->node.start < ggtt->mappable_end)
DRM_DEBUG("Forced to use aperture for PDEs\n"); DRM_DEBUG("Forced to use aperture for PDEs\n");
return 0; return 0;
...@@ -2060,10 +2063,11 @@ static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt, ...@@ -2060,10 +2063,11 @@ static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
{ {
struct drm_device *dev = ppgtt->base.dev; struct drm_device *dev = ppgtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
int ret; int ret;
ppgtt->base.pte_encode = dev_priv->ggtt.base.pte_encode; ppgtt->base.pte_encode = ggtt->base.pte_encode;
if (IS_GEN6(dev)) { if (IS_GEN6(dev)) {
ppgtt->switch_mm = gen6_mm_switch; ppgtt->switch_mm = gen6_mm_switch;
} else if (IS_HASWELL(dev)) { } else if (IS_HASWELL(dev)) {
...@@ -2093,7 +2097,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) ...@@ -2093,7 +2097,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->pd.base.ggtt_offset = ppgtt->pd.base.ggtt_offset =
ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t); ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
ppgtt->pd_addr = (gen6_pte_t __iomem *)dev_priv->ggtt.gsm + ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm +
ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t); ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total); gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
...@@ -2261,9 +2265,10 @@ static bool needs_idle_maps(struct drm_device *dev) ...@@ -2261,9 +2265,10 @@ static bool needs_idle_maps(struct drm_device *dev)
static bool do_idling(struct drm_i915_private *dev_priv) static bool do_idling(struct drm_i915_private *dev_priv)
{ {
struct i915_ggtt *ggtt = &dev_priv->ggtt;
bool ret = dev_priv->mm.interruptible; bool ret = dev_priv->mm.interruptible;
if (unlikely(dev_priv->ggtt.do_idle_maps)) { if (unlikely(ggtt->do_idle_maps)) {
dev_priv->mm.interruptible = false; dev_priv->mm.interruptible = false;
if (i915_gpu_idle(dev_priv->dev)) { if (i915_gpu_idle(dev_priv->dev)) {
DRM_ERROR("Couldn't idle GPU\n"); DRM_ERROR("Couldn't idle GPU\n");
...@@ -2277,7 +2282,9 @@ static bool do_idling(struct drm_i915_private *dev_priv) ...@@ -2277,7 +2282,9 @@ static bool do_idling(struct drm_i915_private *dev_priv)
static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible) static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
{ {
if (unlikely(dev_priv->ggtt.do_idle_maps)) struct i915_ggtt *ggtt = &dev_priv->ggtt;
if (unlikely(ggtt->do_idle_maps))
dev_priv->mm.interruptible = interruptible; dev_priv->mm.interruptible = interruptible;
} }
...@@ -2321,7 +2328,8 @@ static void i915_ggtt_flush(struct drm_i915_private *dev_priv) ...@@ -2321,7 +2328,8 @@ static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
void i915_gem_suspend_gtt_mappings(struct drm_device *dev) void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
/* Don't bother messing with faults pre GEN6 as we have little /* Don't bother messing with faults pre GEN6 as we have little
* documentation supporting that it's a good idea. * documentation supporting that it's a good idea.
...@@ -2331,10 +2339,8 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev) ...@@ -2331,10 +2339,8 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
i915_check_and_clear_faults(dev); i915_check_and_clear_faults(dev);
dev_priv->ggtt.base.clear_range(&dev_priv->ggtt.base, ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
dev_priv->ggtt.base.start, true);
dev_priv->ggtt.base.total,
true);
i915_ggtt_flush(dev_priv); i915_ggtt_flush(dev_priv);
} }
...@@ -2364,10 +2370,11 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, ...@@ -2364,10 +2370,11 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
uint64_t start, uint64_t start,
enum i915_cache_level level, u32 unused) enum i915_cache_level level, u32 unused)
{ {
struct drm_i915_private *dev_priv = vm->dev->dev_private; struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
unsigned first_entry = start >> PAGE_SHIFT; unsigned first_entry = start >> PAGE_SHIFT;
gen8_pte_t __iomem *gtt_entries = gen8_pte_t __iomem *gtt_entries =
(gen8_pte_t __iomem *)dev_priv->ggtt.gsm + first_entry; (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
int i = 0; int i = 0;
struct sg_page_iter sg_iter; struct sg_page_iter sg_iter;
dma_addr_t addr = 0; /* shut up gcc */ dma_addr_t addr = 0; /* shut up gcc */
...@@ -2441,10 +2448,11 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, ...@@ -2441,10 +2448,11 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
uint64_t start, uint64_t start,
enum i915_cache_level level, u32 flags) enum i915_cache_level level, u32 flags)
{ {
struct drm_i915_private *dev_priv = vm->dev->dev_private; struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
unsigned first_entry = start >> PAGE_SHIFT; unsigned first_entry = start >> PAGE_SHIFT;
gen6_pte_t __iomem *gtt_entries = gen6_pte_t __iomem *gtt_entries =
(gen6_pte_t __iomem *)dev_priv->ggtt.gsm + first_entry; (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
int i = 0; int i = 0;
struct sg_page_iter sg_iter; struct sg_page_iter sg_iter;
dma_addr_t addr = 0; dma_addr_t addr = 0;
...@@ -2484,12 +2492,13 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm, ...@@ -2484,12 +2492,13 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
uint64_t length, uint64_t length,
bool use_scratch) bool use_scratch)
{ {
struct drm_i915_private *dev_priv = vm->dev->dev_private; struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
unsigned first_entry = start >> PAGE_SHIFT; unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT; unsigned num_entries = length >> PAGE_SHIFT;
gen8_pte_t scratch_pte, __iomem *gtt_base = gen8_pte_t scratch_pte, __iomem *gtt_base =
(gen8_pte_t __iomem *) dev_priv->ggtt.gsm + first_entry; (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
const int max_entries = gtt_total_entries(dev_priv->ggtt) - first_entry; const int max_entries = ggtt_total_entries(ggtt) - first_entry;
int i; int i;
int rpm_atomic_seq; int rpm_atomic_seq;
...@@ -2515,12 +2524,13 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm, ...@@ -2515,12 +2524,13 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
uint64_t length, uint64_t length,
bool use_scratch) bool use_scratch)
{ {
struct drm_i915_private *dev_priv = vm->dev->dev_private; struct drm_i915_private *dev_priv = to_i915(vm->dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
unsigned first_entry = start >> PAGE_SHIFT; unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT; unsigned num_entries = length >> PAGE_SHIFT;
gen6_pte_t scratch_pte, __iomem *gtt_base = gen6_pte_t scratch_pte, __iomem *gtt_base =
(gen6_pte_t __iomem *) dev_priv->ggtt.gsm + first_entry; (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
const int max_entries = gtt_total_entries(dev_priv->ggtt) - first_entry; const int max_entries = ggtt_total_entries(ggtt) - first_entry;
int i; int i;
int rpm_atomic_seq; int rpm_atomic_seq;
...@@ -2713,8 +2723,8 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev, ...@@ -2713,8 +2723,8 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
* aperture. One page should be enough to keep any prefetching inside * aperture. One page should be enough to keep any prefetching inside
* of the aperture. * of the aperture.
*/ */
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_address_space *ggtt_vm = &dev_priv->ggtt.base; struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_mm_node *entry; struct drm_mm_node *entry;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
unsigned long hole_start, hole_end; unsigned long hole_start, hole_end;
...@@ -2722,13 +2732,13 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev, ...@@ -2722,13 +2732,13 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
BUG_ON(mappable_end > end); BUG_ON(mappable_end > end);
ggtt_vm->start = start; ggtt->base.start = start;
/* Subtract the guard page before address space initialization to /* Subtract the guard page before address space initialization to
* shrink the range used by drm_mm */ * shrink the range used by drm_mm */
ggtt_vm->total = end - start - PAGE_SIZE; ggtt->base.total = end - start - PAGE_SIZE;
i915_address_space_init(ggtt_vm, dev_priv); i915_address_space_init(&ggtt->base, dev_priv);
ggtt_vm->total += PAGE_SIZE; ggtt->base.total += PAGE_SIZE;
if (intel_vgpu_active(dev)) { if (intel_vgpu_active(dev)) {
ret = intel_vgt_balloon(dev); ret = intel_vgt_balloon(dev);
...@@ -2737,36 +2747,36 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev, ...@@ -2737,36 +2747,36 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
} }
if (!HAS_LLC(dev)) if (!HAS_LLC(dev))
ggtt_vm->mm.color_adjust = i915_gtt_color_adjust; ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
/* Mark any preallocated objects as occupied */ /* Mark any preallocated objects as occupied */
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm); struct i915_vma *vma = i915_gem_obj_to_vma(obj, &ggtt->base);
DRM_DEBUG_KMS("reserving preallocated space: %llx + %zx\n", DRM_DEBUG_KMS("reserving preallocated space: %llx + %zx\n",
i915_gem_obj_ggtt_offset(obj), obj->base.size); i915_gem_obj_ggtt_offset(obj), obj->base.size);
WARN_ON(i915_gem_obj_ggtt_bound(obj)); WARN_ON(i915_gem_obj_ggtt_bound(obj));
ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node); ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
if (ret) { if (ret) {
DRM_DEBUG_KMS("Reservation failed: %i\n", ret); DRM_DEBUG_KMS("Reservation failed: %i\n", ret);
return ret; return ret;
} }
vma->bound |= GLOBAL_BIND; vma->bound |= GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma); __i915_vma_set_map_and_fenceable(vma);
list_add_tail(&vma->vm_link, &ggtt_vm->inactive_list); list_add_tail(&vma->vm_link, &ggtt->base.inactive_list);
} }
/* Clear any non-preallocated blocks */ /* Clear any non-preallocated blocks */
drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) { drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) {
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
hole_start, hole_end); hole_start, hole_end);
ggtt_vm->clear_range(ggtt_vm, hole_start, ggtt->base.clear_range(&ggtt->base, hole_start,
hole_end - hole_start, true); hole_end - hole_start, true);
} }
/* And finally clear the reserved guard page */ /* And finally clear the reserved guard page */
ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true); ggtt->base.clear_range(&ggtt->base, end - PAGE_SIZE, PAGE_SIZE, true);
if (USES_PPGTT(dev) && !USES_FULL_PPGTT(dev)) { if (USES_PPGTT(dev) && !USES_FULL_PPGTT(dev)) {
struct i915_hw_ppgtt *ppgtt; struct i915_hw_ppgtt *ppgtt;
...@@ -2797,8 +2807,8 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev, ...@@ -2797,8 +2807,8 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
true); true);
dev_priv->mm.aliasing_ppgtt = ppgtt; dev_priv->mm.aliasing_ppgtt = ppgtt;
WARN_ON(dev_priv->ggtt.base.bind_vma != ggtt_bind_vma); WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma);
dev_priv->ggtt.base.bind_vma = aliasing_gtt_bind_vma; ggtt->base.bind_vma = aliasing_gtt_bind_vma;
} }
return 0; return 0;
...@@ -2810,13 +2820,10 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev, ...@@ -2810,13 +2820,10 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
*/ */
void i915_gem_init_ggtt(struct drm_device *dev) void i915_gem_init_ggtt(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
u64 gtt_size, mappable_size; struct i915_ggtt *ggtt = &dev_priv->ggtt;
gtt_size = dev_priv->ggtt.base.total;
mappable_size = dev_priv->ggtt.mappable_end;
i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size); i915_gem_setup_global_gtt(dev, 0, ggtt->mappable_end, ggtt->base.total);
} }
/** /**
...@@ -2825,8 +2832,8 @@ void i915_gem_init_ggtt(struct drm_device *dev) ...@@ -2825,8 +2832,8 @@ void i915_gem_init_ggtt(struct drm_device *dev)
*/ */
void i915_ggtt_cleanup_hw(struct drm_device *dev) void i915_ggtt_cleanup_hw(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_address_space *vm = &dev_priv->ggtt.base; struct i915_ggtt *ggtt = &dev_priv->ggtt;
if (dev_priv->mm.aliasing_ppgtt) { if (dev_priv->mm.aliasing_ppgtt) {
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
...@@ -2836,15 +2843,15 @@ void i915_ggtt_cleanup_hw(struct drm_device *dev) ...@@ -2836,15 +2843,15 @@ void i915_ggtt_cleanup_hw(struct drm_device *dev)
i915_gem_cleanup_stolen(dev); i915_gem_cleanup_stolen(dev);
if (drm_mm_initialized(&vm->mm)) { if (drm_mm_initialized(&ggtt->base.mm)) {
if (intel_vgpu_active(dev)) if (intel_vgpu_active(dev))
intel_vgt_deballoon(); intel_vgt_deballoon();
drm_mm_takedown(&vm->mm); drm_mm_takedown(&ggtt->base.mm);
list_del(&vm->global_link); list_del(&ggtt->base.global_link);
} }
vm->cleanup(vm); ggtt->base.cleanup(&ggtt->base);
} }
static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
...@@ -2928,13 +2935,14 @@ static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl) ...@@ -2928,13 +2935,14 @@ static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
static int ggtt_probe_common(struct drm_device *dev, static int ggtt_probe_common(struct drm_device *dev,
size_t gtt_size) size_t gtt_size)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_page_scratch *scratch_page; struct i915_page_scratch *scratch_page;
phys_addr_t gtt_phys_addr; phys_addr_t ggtt_phys_addr;
/* For Modern GENs the PTEs and register space are split in the BAR */ /* For Modern GENs the PTEs and register space are split in the BAR */
gtt_phys_addr = pci_resource_start(dev->pdev, 0) + ggtt_phys_addr = pci_resource_start(dev->pdev, 0) +
(pci_resource_len(dev->pdev, 0) / 2); (pci_resource_len(dev->pdev, 0) / 2);
/* /*
* On BXT writes larger than 64 bit to the GTT pagetable range will be * On BXT writes larger than 64 bit to the GTT pagetable range will be
...@@ -2944,10 +2952,10 @@ static int ggtt_probe_common(struct drm_device *dev, ...@@ -2944,10 +2952,10 @@ static int ggtt_probe_common(struct drm_device *dev,
* readback check when writing GTT PTE entries. * readback check when writing GTT PTE entries.
*/ */
if (IS_BROXTON(dev)) if (IS_BROXTON(dev))
dev_priv->ggtt.gsm = ioremap_nocache(gtt_phys_addr, gtt_size); ggtt->gsm = ioremap_nocache(ggtt_phys_addr, gtt_size);
else else
dev_priv->ggtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size); ggtt->gsm = ioremap_wc(ggtt_phys_addr, gtt_size);
if (!dev_priv->ggtt.gsm) { if (!ggtt->gsm) {
DRM_ERROR("Failed to map the gtt page table\n"); DRM_ERROR("Failed to map the gtt page table\n");
return -ENOMEM; return -ENOMEM;
} }
...@@ -2956,11 +2964,11 @@ static int ggtt_probe_common(struct drm_device *dev, ...@@ -2956,11 +2964,11 @@ static int ggtt_probe_common(struct drm_device *dev,
if (IS_ERR(scratch_page)) { if (IS_ERR(scratch_page)) {
DRM_ERROR("Scratch setup failed\n"); DRM_ERROR("Scratch setup failed\n");
/* iounmap will also get called at remove, but meh */ /* iounmap will also get called at remove, but meh */
iounmap(dev_priv->ggtt.gsm); iounmap(ggtt->gsm);
return PTR_ERR(scratch_page); return PTR_ERR(scratch_page);
} }
dev_priv->ggtt.base.scratch_page = scratch_page; ggtt->base.scratch_page = scratch_page;
return 0; return 0;
} }
...@@ -3041,7 +3049,7 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv) ...@@ -3041,7 +3049,7 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
static int gen8_gmch_probe(struct i915_ggtt *ggtt) static int gen8_gmch_probe(struct i915_ggtt *ggtt)
{ {
struct drm_device *dev = ggtt->base.dev; struct drm_device *dev = ggtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
u16 snb_gmch_ctl; u16 snb_gmch_ctl;
int ret; int ret;
...@@ -3082,7 +3090,6 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) ...@@ -3082,7 +3090,6 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->base.bind_vma = ggtt_bind_vma; ggtt->base.bind_vma = ggtt_bind_vma;
ggtt->base.unbind_vma = ggtt_unbind_vma; ggtt->base.unbind_vma = ggtt_unbind_vma;
return ret; return ret;
} }
...@@ -3132,7 +3139,7 @@ static void gen6_gmch_remove(struct i915_address_space *vm) ...@@ -3132,7 +3139,7 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
static int i915_gmch_probe(struct i915_ggtt *ggtt) static int i915_gmch_probe(struct i915_ggtt *ggtt)
{ {
struct drm_device *dev = ggtt->base.dev; struct drm_device *dev = ggtt->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
int ret; int ret;
ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL); ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
...@@ -3167,7 +3174,7 @@ static void i915_gmch_remove(struct i915_address_space *vm) ...@@ -3167,7 +3174,7 @@ static void i915_gmch_remove(struct i915_address_space *vm)
*/ */
int i915_ggtt_init_hw(struct drm_device *dev) int i915_ggtt_init_hw(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_ggtt *ggtt = &dev_priv->ggtt;
int ret; int ret;
...@@ -3236,33 +3243,30 @@ int i915_ggtt_init_hw(struct drm_device *dev) ...@@ -3236,33 +3243,30 @@ int i915_ggtt_init_hw(struct drm_device *dev)
return 0; return 0;
out_gtt_cleanup: out_gtt_cleanup:
ggtt->base.cleanup(&dev_priv->ggtt.base); ggtt->base.cleanup(&ggtt->base);
return ret; return ret;
} }
void i915_gem_restore_gtt_mappings(struct drm_device *dev) void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
struct i915_vma *vma; struct i915_vma *vma;
bool flush; bool flush;
i915_check_and_clear_faults(dev); i915_check_and_clear_faults(dev);
/* First fill our portion of the GTT with scratch pages */ /* First fill our portion of the GTT with scratch pages */
dev_priv->ggtt.base.clear_range(&dev_priv->ggtt.base, ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
dev_priv->ggtt.base.start, true);
dev_priv->ggtt.base.total,
true);
/* Cache flush objects bound into GGTT and rebind them. */ /* Cache flush objects bound into GGTT and rebind them. */
vm = &dev_priv->ggtt.base;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
flush = false; flush = false;
list_for_each_entry(vma, &obj->vma_list, obj_link) { list_for_each_entry(vma, &obj->vma_list, obj_link) {
if (vma->vm != vm) if (vma->vm != &ggtt->base)
continue; continue;
WARN_ON(i915_vma_bind(vma, obj->cache_level, WARN_ON(i915_vma_bind(vma, obj->cache_level,
...@@ -3285,6 +3289,8 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev) ...@@ -3285,6 +3289,8 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
} }
if (USES_PPGTT(dev)) { if (USES_PPGTT(dev)) {
struct i915_address_space *vm;
list_for_each_entry(vm, &dev_priv->vm_list, global_link) { list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
/* TODO: Perhaps it shouldn't be gen6 specific */ /* TODO: Perhaps it shouldn't be gen6 specific */
...@@ -3352,11 +3358,13 @@ struct i915_vma * ...@@ -3352,11 +3358,13 @@ struct i915_vma *
i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj, i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view) const struct i915_ggtt_view *view)
{ {
struct i915_address_space *ggtt = i915_obj_to_ggtt(obj); struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view); struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
if (!vma) if (!vma)
vma = __i915_gem_vma_create(obj, ggtt, view); vma = __i915_gem_vma_create(obj, &ggtt->base, view);
return vma; return vma;
......
...@@ -42,7 +42,7 @@ typedef uint64_t gen8_pde_t; ...@@ -42,7 +42,7 @@ typedef uint64_t gen8_pde_t;
typedef uint64_t gen8_ppgtt_pdpe_t; typedef uint64_t gen8_ppgtt_pdpe_t;
typedef uint64_t gen8_ppgtt_pml4e_t; typedef uint64_t gen8_ppgtt_pml4e_t;
#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT) #define ggtt_total_entries(ggtt) ((ggtt)->base.total >> PAGE_SHIFT)
/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */ /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0)) #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
......
...@@ -72,9 +72,11 @@ int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv, ...@@ -72,9 +72,11 @@ int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
struct drm_mm_node *node, u64 size, struct drm_mm_node *node, u64 size,
unsigned alignment) unsigned alignment)
{ {
struct i915_ggtt *ggtt = &dev_priv->ggtt;
return i915_gem_stolen_insert_node_in_range(dev_priv, node, size, return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
alignment, 0, alignment, 0,
dev_priv->ggtt.stolen_usable_size); ggtt->stolen_usable_size);
} }
void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
...@@ -87,7 +89,8 @@ void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, ...@@ -87,7 +89,8 @@ void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
static unsigned long i915_stolen_to_physical(struct drm_device *dev) static unsigned long i915_stolen_to_physical(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct resource *r; struct resource *r;
u32 base; u32 base;
...@@ -134,7 +137,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) ...@@ -134,7 +137,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
I85X_DRB3, &tmp); I85X_DRB3, &tmp);
tom = tmp * MB(32); tom = tmp * MB(32);
base = tom - tseg_size - dev_priv->ggtt.stolen_size; base = tom - tseg_size - ggtt->stolen_size;
} else if (IS_845G(dev)) { } else if (IS_845G(dev)) {
u32 tseg_size = 0; u32 tseg_size = 0;
u32 tom; u32 tom;
...@@ -158,7 +161,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) ...@@ -158,7 +161,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
I830_DRB3, &tmp); I830_DRB3, &tmp);
tom = tmp * MB(32); tom = tmp * MB(32);
base = tom - tseg_size - dev_priv->ggtt.stolen_size; base = tom - tseg_size - ggtt->stolen_size;
} else if (IS_I830(dev)) { } else if (IS_I830(dev)) {
u32 tseg_size = 0; u32 tseg_size = 0;
u32 tom; u32 tom;
...@@ -178,7 +181,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) ...@@ -178,7 +181,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
I830_DRB3, &tmp); I830_DRB3, &tmp);
tom = tmp * MB(32); tom = tmp * MB(32);
base = tom - tseg_size - dev_priv->ggtt.stolen_size; base = tom - tseg_size - ggtt->stolen_size;
} }
if (base == 0) if (base == 0)
...@@ -189,41 +192,41 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) ...@@ -189,41 +192,41 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
struct { struct {
u32 start, end; u32 start, end;
} stolen[2] = { } stolen[2] = {
{ .start = base, .end = base + dev_priv->ggtt.stolen_size, }, { .start = base, .end = base + ggtt->stolen_size, },
{ .start = base, .end = base + dev_priv->ggtt.stolen_size, }, { .start = base, .end = base + ggtt->stolen_size, },
}; };
u64 gtt_start, gtt_end; u64 ggtt_start, ggtt_end;
gtt_start = I915_READ(PGTBL_CTL); ggtt_start = I915_READ(PGTBL_CTL);
if (IS_GEN4(dev)) if (IS_GEN4(dev))
gtt_start = (gtt_start & PGTBL_ADDRESS_LO_MASK) | ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
(gtt_start & PGTBL_ADDRESS_HI_MASK) << 28; (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
else else
gtt_start &= PGTBL_ADDRESS_LO_MASK; ggtt_start &= PGTBL_ADDRESS_LO_MASK;
gtt_end = gtt_start + gtt_total_entries(dev_priv->ggtt) * 4; ggtt_end = ggtt_start + ggtt_total_entries(ggtt) * 4;
if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end) if (ggtt_start >= stolen[0].start && ggtt_start < stolen[0].end)
stolen[0].end = gtt_start; stolen[0].end = ggtt_start;
if (gtt_end > stolen[1].start && gtt_end <= stolen[1].end) if (ggtt_end > stolen[1].start && ggtt_end <= stolen[1].end)
stolen[1].start = gtt_end; stolen[1].start = ggtt_end;
/* pick the larger of the two chunks */ /* pick the larger of the two chunks */
if (stolen[0].end - stolen[0].start > if (stolen[0].end - stolen[0].start >
stolen[1].end - stolen[1].start) { stolen[1].end - stolen[1].start) {
base = stolen[0].start; base = stolen[0].start;
dev_priv->ggtt.stolen_size = stolen[0].end - stolen[0].start; ggtt->stolen_size = stolen[0].end - stolen[0].start;
} else { } else {
base = stolen[1].start; base = stolen[1].start;
dev_priv->ggtt.stolen_size = stolen[1].end - stolen[1].start; ggtt->stolen_size = stolen[1].end - stolen[1].start;
} }
if (stolen[0].start != stolen[1].start || if (stolen[0].start != stolen[1].start ||
stolen[0].end != stolen[1].end) { stolen[0].end != stolen[1].end) {
DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n", DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n",
(unsigned long long) gtt_start, (unsigned long long)ggtt_start,
(unsigned long long) gtt_end - 1); (unsigned long long)ggtt_end - 1);
DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n", DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n",
base, base + (u32) dev_priv->ggtt.stolen_size - 1); base, base + (u32)ggtt->stolen_size - 1);
} }
} }
...@@ -233,7 +236,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) ...@@ -233,7 +236,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
* kernel. So if the region is already marked as busy, something * kernel. So if the region is already marked as busy, something
* is seriously wrong. * is seriously wrong.
*/ */
r = devm_request_mem_region(dev->dev, base, dev_priv->ggtt.stolen_size, r = devm_request_mem_region(dev->dev, base, ggtt->stolen_size,
"Graphics Stolen Memory"); "Graphics Stolen Memory");
if (r == NULL) { if (r == NULL) {
/* /*
...@@ -245,7 +248,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) ...@@ -245,7 +248,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
* reservation starting from 1 instead of 0. * reservation starting from 1 instead of 0.
*/ */
r = devm_request_mem_region(dev->dev, base + 1, r = devm_request_mem_region(dev->dev, base + 1,
dev_priv->ggtt.stolen_size - 1, ggtt->stolen_size - 1,
"Graphics Stolen Memory"); "Graphics Stolen Memory");
/* /*
* GEN3 firmware likes to smash pci bridges into the stolen * GEN3 firmware likes to smash pci bridges into the stolen
...@@ -253,7 +256,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) ...@@ -253,7 +256,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
*/ */
if (r == NULL && !IS_GEN3(dev)) { if (r == NULL && !IS_GEN3(dev)) {
DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n", DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
base, base + (uint32_t)dev_priv->ggtt.stolen_size); base, base + (uint32_t)ggtt->stolen_size);
base = 0; base = 0;
} }
} }
...@@ -274,11 +277,12 @@ void i915_gem_cleanup_stolen(struct drm_device *dev) ...@@ -274,11 +277,12 @@ void i915_gem_cleanup_stolen(struct drm_device *dev)
static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv, static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
unsigned long *base, unsigned long *size) unsigned long *base, unsigned long *size)
{ {
struct i915_ggtt *ggtt = &dev_priv->ggtt;
uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ? uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ?
CTG_STOLEN_RESERVED : CTG_STOLEN_RESERVED :
ELK_STOLEN_RESERVED); ELK_STOLEN_RESERVED);
unsigned long stolen_top = dev_priv->mm.stolen_base + unsigned long stolen_top = dev_priv->mm.stolen_base +
dev_priv->ggtt.stolen_size; ggtt->stolen_size;
*base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16; *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
...@@ -369,10 +373,11 @@ static void gen8_get_stolen_reserved(struct drm_i915_private *dev_priv, ...@@ -369,10 +373,11 @@ static void gen8_get_stolen_reserved(struct drm_i915_private *dev_priv,
static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv, static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
unsigned long *base, unsigned long *size) unsigned long *base, unsigned long *size)
{ {
struct i915_ggtt *ggtt = &dev_priv->ggtt;
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED); uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
unsigned long stolen_top; unsigned long stolen_top;
stolen_top = dev_priv->mm.stolen_base + dev_priv->ggtt.stolen_size; stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK; *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
...@@ -388,7 +393,8 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv, ...@@ -388,7 +393,8 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
int i915_gem_init_stolen(struct drm_device *dev) int i915_gem_init_stolen(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
unsigned long reserved_total, reserved_base = 0, reserved_size; unsigned long reserved_total, reserved_base = 0, reserved_size;
unsigned long stolen_top; unsigned long stolen_top;
...@@ -401,14 +407,14 @@ int i915_gem_init_stolen(struct drm_device *dev) ...@@ -401,14 +407,14 @@ int i915_gem_init_stolen(struct drm_device *dev)
} }
#endif #endif
if (dev_priv->ggtt.stolen_size == 0) if (ggtt->stolen_size == 0)
return 0; return 0;
dev_priv->mm.stolen_base = i915_stolen_to_physical(dev); dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
if (dev_priv->mm.stolen_base == 0) if (dev_priv->mm.stolen_base == 0)
return 0; return 0;
stolen_top = dev_priv->mm.stolen_base + dev_priv->ggtt.stolen_size; stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
switch (INTEL_INFO(dev_priv)->gen) { switch (INTEL_INFO(dev_priv)->gen) {
case 2: case 2:
...@@ -458,19 +464,18 @@ int i915_gem_init_stolen(struct drm_device *dev) ...@@ -458,19 +464,18 @@ int i915_gem_init_stolen(struct drm_device *dev)
return 0; return 0;
} }
dev_priv->ggtt.stolen_reserved_base = reserved_base; ggtt->stolen_reserved_base = reserved_base;
dev_priv->ggtt.stolen_reserved_size = reserved_size; ggtt->stolen_reserved_size = reserved_size;
/* It is possible for the reserved area to end before the end of stolen /* It is possible for the reserved area to end before the end of stolen
* memory, so just consider the start. */ * memory, so just consider the start. */
reserved_total = stolen_top - reserved_base; reserved_total = stolen_top - reserved_base;
DRM_DEBUG_KMS("Memory reserved for graphics device: %zuK, usable: %luK\n", DRM_DEBUG_KMS("Memory reserved for graphics device: %zuK, usable: %luK\n",
dev_priv->ggtt.stolen_size >> 10, ggtt->stolen_size >> 10,
(dev_priv->ggtt.stolen_size - reserved_total) >> 10); (ggtt->stolen_size - reserved_total) >> 10);
dev_priv->ggtt.stolen_usable_size = dev_priv->ggtt.stolen_size - ggtt->stolen_usable_size = ggtt->stolen_size - reserved_total;
reserved_total;
/* /*
* Basic memrange allocator for stolen space. * Basic memrange allocator for stolen space.
...@@ -483,7 +488,7 @@ int i915_gem_init_stolen(struct drm_device *dev) ...@@ -483,7 +488,7 @@ int i915_gem_init_stolen(struct drm_device *dev)
* i915_gem_stolen_insert_node_in_range(). We may want to fix the fbcon * i915_gem_stolen_insert_node_in_range(). We may want to fix the fbcon
* problem later. * problem later.
*/ */
drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->ggtt.stolen_usable_size); drm_mm_init(&dev_priv->mm.stolen, 0, ggtt->stolen_usable_size);
return 0; return 0;
} }
...@@ -492,12 +497,13 @@ static struct sg_table * ...@@ -492,12 +497,13 @@ static struct sg_table *
i915_pages_create_for_stolen(struct drm_device *dev, i915_pages_create_for_stolen(struct drm_device *dev,
u32 offset, u32 size) u32 offset, u32 size)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct sg_table *st; struct sg_table *st;
struct scatterlist *sg; struct scatterlist *sg;
DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size); DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
BUG_ON(offset > dev_priv->ggtt.stolen_size - size); BUG_ON(offset > ggtt->stolen_size - size);
/* We hide that we have no struct page backing our stolen object /* We hide that we have no struct page backing our stolen object
* by wrapping the contiguous physical allocation with a fake * by wrapping the contiguous physical allocation with a fake
...@@ -628,8 +634,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, ...@@ -628,8 +634,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
u32 gtt_offset, u32 gtt_offset,
u32 size) u32 size)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_address_space *ggtt = &dev_priv->ggtt.base; struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen; struct drm_mm_node *stolen;
struct i915_vma *vma; struct i915_vma *vma;
...@@ -675,7 +681,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, ...@@ -675,7 +681,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
if (gtt_offset == I915_GTT_OFFSET_NONE) if (gtt_offset == I915_GTT_OFFSET_NONE)
return obj; return obj;
vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt); vma = i915_gem_obj_lookup_or_create_vma(obj, &ggtt->base);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
ret = PTR_ERR(vma); ret = PTR_ERR(vma);
goto err; goto err;
...@@ -688,8 +694,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, ...@@ -688,8 +694,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
*/ */
vma->node.start = gtt_offset; vma->node.start = gtt_offset;
vma->node.size = size; vma->node.size = size;
if (drm_mm_initialized(&ggtt->mm)) { if (drm_mm_initialized(&ggtt->base.mm)) {
ret = drm_mm_reserve_node(&ggtt->mm, &vma->node); ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
if (ret) { if (ret) {
DRM_DEBUG_KMS("failed to allocate stolen GTT space\n"); DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
goto err; goto err;
...@@ -697,7 +703,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, ...@@ -697,7 +703,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
vma->bound |= GLOBAL_BIND; vma->bound |= GLOBAL_BIND;
__i915_vma_set_map_and_fenceable(vma); __i915_vma_set_map_and_fenceable(vma);
list_add_tail(&vma->vm_link, &ggtt->inactive_list); list_add_tail(&vma->vm_link, &ggtt->base.inactive_list);
} }
list_add_tail(&obj->global_list, &dev_priv->mm.bound_list); list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
......
...@@ -627,6 +627,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv, ...@@ -627,6 +627,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
struct drm_i915_gem_object *src, struct drm_i915_gem_object *src,
struct i915_address_space *vm) struct i915_address_space *vm)
{ {
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_error_object *dst; struct drm_i915_error_object *dst;
struct i915_vma *vma = NULL; struct i915_vma *vma = NULL;
int num_pages; int num_pages;
...@@ -653,7 +654,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv, ...@@ -653,7 +654,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
vma = i915_gem_obj_to_ggtt(src); vma = i915_gem_obj_to_ggtt(src);
use_ggtt = (src->cache_level == I915_CACHE_NONE && use_ggtt = (src->cache_level == I915_CACHE_NONE &&
vma && (vma->bound & GLOBAL_BIND) && vma && (vma->bound & GLOBAL_BIND) &&
reloc_offset + num_pages * PAGE_SIZE <= dev_priv->ggtt.mappable_end); reloc_offset + num_pages * PAGE_SIZE <= ggtt->mappable_end);
/* Cannot access stolen address directly, try to use the aperture */ /* Cannot access stolen address directly, try to use the aperture */
if (src->stolen) { if (src->stolen) {
...@@ -663,7 +664,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv, ...@@ -663,7 +664,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
goto unwind; goto unwind;
reloc_offset = i915_gem_obj_ggtt_offset(src); reloc_offset = i915_gem_obj_ggtt_offset(src);
if (reloc_offset + num_pages * PAGE_SIZE > dev_priv->ggtt.mappable_end) if (reloc_offset + num_pages * PAGE_SIZE > ggtt->mappable_end)
goto unwind; goto unwind;
} }
...@@ -689,7 +690,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv, ...@@ -689,7 +690,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
* captures what the GPU read. * captures what the GPU read.
*/ */
s = io_mapping_map_atomic_wc(dev_priv->ggtt.mappable, s = io_mapping_map_atomic_wc(ggtt->mappable,
reloc_offset); reloc_offset);
memcpy_fromio(d, s, PAGE_SIZE); memcpy_fromio(d, s, PAGE_SIZE);
io_mapping_unmap_atomic(s); io_mapping_unmap_atomic(s);
...@@ -1015,7 +1016,8 @@ static void i915_gem_record_active_context(struct intel_engine_cs *engine, ...@@ -1015,7 +1016,8 @@ static void i915_gem_record_active_context(struct intel_engine_cs *engine,
static void i915_gem_record_rings(struct drm_device *dev, static void i915_gem_record_rings(struct drm_device *dev,
struct drm_i915_error_state *error) struct drm_i915_error_state *error)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
int i, count; int i, count;
...@@ -1038,7 +1040,7 @@ static void i915_gem_record_rings(struct drm_device *dev, ...@@ -1038,7 +1040,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
vm = request->ctx && request->ctx->ppgtt ? vm = request->ctx && request->ctx->ppgtt ?
&request->ctx->ppgtt->base : &request->ctx->ppgtt->base :
&dev_priv->ggtt.base; &ggtt->base;
/* We need to copy these to an anonymous buffer /* We need to copy these to an anonymous buffer
* as the simplest method to avoid being overwritten * as the simplest method to avoid being overwritten
......
...@@ -181,8 +181,8 @@ static int vgt_balloon_space(struct drm_mm *mm, ...@@ -181,8 +181,8 @@ static int vgt_balloon_space(struct drm_mm *mm,
int intel_vgt_balloon(struct drm_device *dev) int intel_vgt_balloon(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_address_space *ggtt_vm = &dev_priv->ggtt.base; struct i915_ggtt *ggtt = &dev_priv->ggtt;
unsigned long ggtt_vm_end = ggtt_vm->start + ggtt_vm->total; unsigned long ggtt_end = ggtt->base.start + ggtt->base.total;
unsigned long mappable_base, mappable_size, mappable_end; unsigned long mappable_base, mappable_size, mappable_end;
unsigned long unmappable_base, unmappable_size, unmappable_end; unsigned long unmappable_base, unmappable_size, unmappable_end;
...@@ -202,19 +202,19 @@ int intel_vgt_balloon(struct drm_device *dev) ...@@ -202,19 +202,19 @@ int intel_vgt_balloon(struct drm_device *dev)
DRM_INFO("Unmappable graphic memory: base 0x%lx size %ldKiB\n", DRM_INFO("Unmappable graphic memory: base 0x%lx size %ldKiB\n",
unmappable_base, unmappable_size / 1024); unmappable_base, unmappable_size / 1024);
if (mappable_base < ggtt_vm->start || if (mappable_base < ggtt->base.start ||
mappable_end > dev_priv->ggtt.mappable_end || mappable_end > ggtt->mappable_end ||
unmappable_base < dev_priv->ggtt.mappable_end || unmappable_base < ggtt->mappable_end ||
unmappable_end > ggtt_vm_end) { unmappable_end > ggtt_end) {
DRM_ERROR("Invalid ballooning configuration!\n"); DRM_ERROR("Invalid ballooning configuration!\n");
return -EINVAL; return -EINVAL;
} }
/* Unmappable graphic memory ballooning */ /* Unmappable graphic memory ballooning */
if (unmappable_base > dev_priv->ggtt.mappable_end) { if (unmappable_base > ggtt->mappable_end) {
ret = vgt_balloon_space(&ggtt_vm->mm, ret = vgt_balloon_space(&ggtt->base.mm,
&bl_info.space[2], &bl_info.space[2],
dev_priv->ggtt.mappable_end, ggtt->mappable_end,
unmappable_base); unmappable_base);
if (ret) if (ret)
...@@ -225,30 +225,30 @@ int intel_vgt_balloon(struct drm_device *dev) ...@@ -225,30 +225,30 @@ int intel_vgt_balloon(struct drm_device *dev)
* No need to partition out the last physical page, * No need to partition out the last physical page,
* because it is reserved to the guard page. * because it is reserved to the guard page.
*/ */
if (unmappable_end < ggtt_vm_end - PAGE_SIZE) { if (unmappable_end < ggtt_end - PAGE_SIZE) {
ret = vgt_balloon_space(&ggtt_vm->mm, ret = vgt_balloon_space(&ggtt->base.mm,
&bl_info.space[3], &bl_info.space[3],
unmappable_end, unmappable_end,
ggtt_vm_end - PAGE_SIZE); ggtt_end - PAGE_SIZE);
if (ret) if (ret)
goto err; goto err;
} }
/* Mappable graphic memory ballooning */ /* Mappable graphic memory ballooning */
if (mappable_base > ggtt_vm->start) { if (mappable_base > ggtt->base.start) {
ret = vgt_balloon_space(&ggtt_vm->mm, ret = vgt_balloon_space(&ggtt->base.mm,
&bl_info.space[0], &bl_info.space[0],
ggtt_vm->start, mappable_base); ggtt->base.start, mappable_base);
if (ret) if (ret)
goto err; goto err;
} }
if (mappable_end < dev_priv->ggtt.mappable_end) { if (mappable_end < ggtt->mappable_end) {
ret = vgt_balloon_space(&ggtt_vm->mm, ret = vgt_balloon_space(&ggtt->base.mm,
&bl_info.space[1], &bl_info.space[1],
mappable_end, mappable_end,
dev_priv->ggtt.mappable_end); ggtt->mappable_end);
if (ret) if (ret)
goto err; goto err;
......
...@@ -2444,6 +2444,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc, ...@@ -2444,6 +2444,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
{ {
struct drm_device *dev = crtc->base.dev; struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_object *obj = NULL; struct drm_i915_gem_object *obj = NULL;
struct drm_mode_fb_cmd2 mode_cmd = { 0 }; struct drm_mode_fb_cmd2 mode_cmd = { 0 };
struct drm_framebuffer *fb = &plane_config->fb->base; struct drm_framebuffer *fb = &plane_config->fb->base;
...@@ -2459,7 +2460,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc, ...@@ -2459,7 +2460,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
/* If the FB is too big, just don't use it since fbdev is not very /* If the FB is too big, just don't use it since fbdev is not very
* important and we should probably use that space with FBC or other * important and we should probably use that space with FBC or other
* features. */ * features. */
if (size_aligned * 2 > dev_priv->ggtt.stolen_usable_size) if (size_aligned * 2 > ggtt->stolen_usable_size)
return false; return false;
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
...@@ -15282,7 +15283,8 @@ static void sanitize_watermarks(struct drm_device *dev) ...@@ -15282,7 +15283,8 @@ static void sanitize_watermarks(struct drm_device *dev)
void intel_modeset_init(struct drm_device *dev) void intel_modeset_init(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
int sprite, ret; int sprite, ret;
enum pipe pipe; enum pipe pipe;
struct intel_crtc *crtc; struct intel_crtc *crtc;
...@@ -15346,7 +15348,7 @@ void intel_modeset_init(struct drm_device *dev) ...@@ -15346,7 +15348,7 @@ void intel_modeset_init(struct drm_device *dev)
dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT; dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
} }
dev->mode_config.fb_base = dev_priv->ggtt.mappable_base; dev->mode_config.fb_base = ggtt->mappable_base;
DRM_DEBUG_KMS("%d display pipe%s available.\n", DRM_DEBUG_KMS("%d display pipe%s available.\n",
INTEL_INFO(dev)->num_pipes, INTEL_INFO(dev)->num_pipes,
......
...@@ -506,6 +506,7 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv, ...@@ -506,6 +506,7 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
int size, int size,
int fb_cpp) int fb_cpp)
{ {
struct i915_ggtt *ggtt = &dev_priv->ggtt;
int compression_threshold = 1; int compression_threshold = 1;
int ret; int ret;
u64 end; u64 end;
...@@ -516,9 +517,9 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv, ...@@ -516,9 +517,9 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
* underruns, even if that range is not reserved by the BIOS. */ * underruns, even if that range is not reserved by the BIOS. */
if (IS_BROADWELL(dev_priv) || if (IS_BROADWELL(dev_priv) ||
IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
end = dev_priv->ggtt.stolen_size - 8 * 1024 * 1024; end = ggtt->stolen_size - 8 * 1024 * 1024;
else else
end = dev_priv->ggtt.stolen_usable_size; end = ggtt->stolen_usable_size;
/* HACK: This code depends on what we will do in *_enable_fbc. If that /* HACK: This code depends on what we will do in *_enable_fbc. If that
* code changes, this code needs to change as well. * code changes, this code needs to change as well.
......
...@@ -122,6 +122,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper, ...@@ -122,6 +122,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
struct drm_framebuffer *fb; struct drm_framebuffer *fb;
struct drm_device *dev = helper->dev; struct drm_device *dev = helper->dev;
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_mode_fb_cmd2 mode_cmd = {}; struct drm_mode_fb_cmd2 mode_cmd = {};
struct drm_i915_gem_object *obj = NULL; struct drm_i915_gem_object *obj = NULL;
int size, ret; int size, ret;
...@@ -146,7 +147,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper, ...@@ -146,7 +147,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
/* If the FB is too big, just don't use it since fbdev is not very /* If the FB is too big, just don't use it since fbdev is not very
* important and we should probably use that space with FBC or other * important and we should probably use that space with FBC or other
* features. */ * features. */
if (size * 2 < dev_priv->ggtt.stolen_usable_size) if (size * 2 < ggtt->stolen_usable_size)
obj = i915_gem_object_create_stolen(dev, size); obj = i915_gem_object_create_stolen(dev, size);
if (obj == NULL) if (obj == NULL)
obj = i915_gem_alloc_object(dev, size); obj = i915_gem_alloc_object(dev, size);
...@@ -181,7 +182,8 @@ static int intelfb_create(struct drm_fb_helper *helper, ...@@ -181,7 +182,8 @@ static int intelfb_create(struct drm_fb_helper *helper,
container_of(helper, struct intel_fbdev, helper); container_of(helper, struct intel_fbdev, helper);
struct intel_framebuffer *intel_fb = ifbdev->fb; struct intel_framebuffer *intel_fb = ifbdev->fb;
struct drm_device *dev = helper->dev; struct drm_device *dev = helper->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct fb_info *info; struct fb_info *info;
struct drm_framebuffer *fb; struct drm_framebuffer *fb;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
...@@ -244,13 +246,13 @@ static int intelfb_create(struct drm_fb_helper *helper, ...@@ -244,13 +246,13 @@ static int intelfb_create(struct drm_fb_helper *helper,
/* setup aperture base/size for vesafb takeover */ /* setup aperture base/size for vesafb takeover */
info->apertures->ranges[0].base = dev->mode_config.fb_base; info->apertures->ranges[0].base = dev->mode_config.fb_base;
info->apertures->ranges[0].size = dev_priv->ggtt.mappable_end; info->apertures->ranges[0].size = ggtt->mappable_end;
info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj); info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj);
info->fix.smem_len = size; info->fix.smem_len = size;
info->screen_base = info->screen_base =
ioremap_wc(dev_priv->ggtt.mappable_base + i915_gem_obj_ggtt_offset(obj), ioremap_wc(ggtt->mappable_base + i915_gem_obj_ggtt_offset(obj),
size); size);
if (!info->screen_base) { if (!info->screen_base) {
DRM_ERROR("Failed to remap framebuffer into virtual memory\n"); DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
......
...@@ -190,13 +190,14 @@ struct intel_overlay { ...@@ -190,13 +190,14 @@ struct intel_overlay {
static struct overlay_registers __iomem * static struct overlay_registers __iomem *
intel_overlay_map_regs(struct intel_overlay *overlay) intel_overlay_map_regs(struct intel_overlay *overlay)
{ {
struct drm_i915_private *dev_priv = overlay->dev->dev_private; struct drm_i915_private *dev_priv = to_i915(overlay->dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct overlay_registers __iomem *regs; struct overlay_registers __iomem *regs;
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr; regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
else else
regs = io_mapping_map_wc(dev_priv->ggtt.mappable, regs = io_mapping_map_wc(ggtt->mappable,
i915_gem_obj_ggtt_offset(overlay->reg_bo)); i915_gem_obj_ggtt_offset(overlay->reg_bo));
return regs; return regs;
...@@ -1481,7 +1482,8 @@ struct intel_overlay_error_state { ...@@ -1481,7 +1482,8 @@ struct intel_overlay_error_state {
static struct overlay_registers __iomem * static struct overlay_registers __iomem *
intel_overlay_map_regs_atomic(struct intel_overlay *overlay) intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
{ {
struct drm_i915_private *dev_priv = overlay->dev->dev_private; struct drm_i915_private *dev_priv = to_i915(overlay->dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct overlay_registers __iomem *regs; struct overlay_registers __iomem *regs;
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
...@@ -1490,7 +1492,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay) ...@@ -1490,7 +1492,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
regs = (struct overlay_registers __iomem *) regs = (struct overlay_registers __iomem *)
overlay->reg_bo->phys_handle->vaddr; overlay->reg_bo->phys_handle->vaddr;
else else
regs = io_mapping_map_atomic_wc(dev_priv->ggtt.mappable, regs = io_mapping_map_atomic_wc(ggtt->mappable,
i915_gem_obj_ggtt_offset(overlay->reg_bo)); i915_gem_obj_ggtt_offset(overlay->reg_bo));
return regs; return regs;
......
...@@ -4630,7 +4630,8 @@ static void intel_print_rc6_info(struct drm_device *dev, u32 mode) ...@@ -4630,7 +4630,8 @@ static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
static bool bxt_check_bios_rc6_setup(const struct drm_device *dev) static bool bxt_check_bios_rc6_setup(const struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
bool enable_rc6 = true; bool enable_rc6 = true;
unsigned long rc6_ctx_base; unsigned long rc6_ctx_base;
...@@ -4644,9 +4645,9 @@ static bool bxt_check_bios_rc6_setup(const struct drm_device *dev) ...@@ -4644,9 +4645,9 @@ static bool bxt_check_bios_rc6_setup(const struct drm_device *dev)
* for this check. * for this check.
*/ */
rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK; rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
if (!((rc6_ctx_base >= dev_priv->ggtt.stolen_reserved_base) && if (!((rc6_ctx_base >= ggtt->stolen_reserved_base) &&
(rc6_ctx_base + PAGE_SIZE <= dev_priv->ggtt.stolen_reserved_base + (rc6_ctx_base + PAGE_SIZE <= ggtt->stolen_reserved_base +
dev_priv->ggtt.stolen_reserved_size))) { ggtt->stolen_reserved_size))) {
DRM_DEBUG_KMS("RC6 Base address not as expected.\n"); DRM_DEBUG_KMS("RC6 Base address not as expected.\n");
enable_rc6 = false; enable_rc6 = false;
} }
...@@ -5287,9 +5288,9 @@ static void cherryview_check_pctx(struct drm_i915_private *dev_priv) ...@@ -5287,9 +5288,9 @@ static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
static void cherryview_setup_pctx(struct drm_device *dev) static void cherryview_setup_pctx(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = to_i915(dev);
unsigned long pctx_paddr, paddr;
struct i915_ggtt *ggtt = &dev_priv->ggtt; struct i915_ggtt *ggtt = &dev_priv->ggtt;
unsigned long pctx_paddr, paddr;
u32 pcbr; u32 pcbr;
int pctx_size = 32*1024; int pctx_size = 32*1024;
......
...@@ -2111,6 +2111,7 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, ...@@ -2111,6 +2111,7 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
struct intel_ringbuffer *ringbuf) struct intel_ringbuffer *ringbuf)
{ {
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct drm_i915_gem_object *obj = ringbuf->obj; struct drm_i915_gem_object *obj = ringbuf->obj;
int ret; int ret;
...@@ -2144,7 +2145,7 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, ...@@ -2144,7 +2145,7 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
/* Access through the GTT requires the device to be awake. */ /* Access through the GTT requires the device to be awake. */
assert_rpm_wakelock_held(dev_priv); assert_rpm_wakelock_held(dev_priv);
ringbuf->virtual_start = ioremap_wc(dev_priv->ggtt.mappable_base + ringbuf->virtual_start = ioremap_wc(ggtt->mappable_base +
i915_gem_obj_ggtt_offset(obj), ringbuf->size); i915_gem_obj_ggtt_offset(obj), ringbuf->size);
if (ringbuf->virtual_start == NULL) { if (ringbuf->virtual_start == NULL) {
i915_gem_object_ggtt_unpin(obj); i915_gem_object_ggtt_unpin(obj);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册