提交 d315fe8b 编写于 作者: C Chris Wilson

drm/i915/gt: Trim gen6 ppgtt updates to PD cachelines

It appears now that we have the ring TLB invalidation in place, we need
only update the page directory cachelines that we have altered. A great
reduction from rewriting the whole 2MiB ppgtt on every update.
Signed-off-by: NChris Wilson <chris@chris-wilson.co.uk>
Acked-by: NMika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191205234059.1010030-1-chris@chris-wilson.co.uk
上级 bbca083d
......@@ -1693,15 +1693,18 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
}
static void gen6_flush_pd(struct gen6_ppgtt *ppgtt)
static void gen6_flush_pd(struct gen6_ppgtt *ppgtt, u64 start, u64 end)
{
struct i915_page_directory * const pd = ppgtt->base.pd;
struct i915_page_table *pt;
unsigned int pde;
start = round_down(start, SZ_64K);
end = round_up(end, SZ_64K) - start;
mutex_lock(&ppgtt->flush);
gen6_for_all_pdes(pt, pd, pde)
gen6_for_each_pde(pt, pd, start, end, pde)
gen6_write_pde(ppgtt, pde, pt);
ioread32(ppgtt->pd_addr + pde - 1);
......@@ -1754,8 +1757,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
spin_unlock(&pd->lock);
if (i915_vma_is_bound(ppgtt->vma, I915_VMA_GLOBAL_BIND))
/* Rewrite them all! Anything less misses an invalidate. */
gen6_flush_pd(ppgtt);
gen6_flush_pd(ppgtt, from, start);
goto out;
......@@ -1844,7 +1846,7 @@ static int pd_vma_bind(struct i915_vma *vma,
px_base(ppgtt->base.pd)->ggtt_offset = ggtt_offset * sizeof(gen6_pte_t);
ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset;
gen6_flush_pd(ppgtt);
gen6_flush_pd(ppgtt, 0, ppgtt->base.vm.total);
return 0;
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册