提交 69876bed 编写于 作者: M Michel Thierry 提交者: Daniel Vetter

drm/i915/gen8: page directories rework allocation

Start using gen8_for_each_pdpe macro to allocate the page directories.

Similar to PTs, while setting up a page directory, make all entries of
the  pd point to the scratch pd before mapping (and make all its entries
point to the scratch page); this is to be safe in case of out of bound
access or  proactive prefetch. Systems without LLC require an explicit
flush.

v2: Rebased after s/free_pt_*/unmap_and_free_pt/ change.
v3: Rebased after teardown va range logic was removed.
v4: Keep setting up all page directories for systems with less than 4GB
of memory.
v5: Initialize PDs. (Mika)
v6: Initialize also the extra PDs from systems with less than 4GB of
memory. (Mika)

Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: NBen Widawsky <ben@bwidawsk.net>
Signed-off-by: Michel Thierry <michel.thierry@intel.com> (v2+)
Reviewed-by: NMika Kuoppala <mika.kuoppala@intel.com>
Signed-off-by: NDaniel Vetter <daniel.vetter@ffwll.ch>
上级 9271d959
master alk-4.19.24 alk-4.19.30 alk-4.19.34 alk-4.19.36 alk-4.19.43 alk-4.19.48 alk-4.19.57 ck-4.19.67 ck-4.19.81 ck-4.19.91 github/fork/deepanshu1422/fix-typo-in-comment github/fork/haosdent/fix-typo linux-next v4.19.91 v4.19.90 v4.19.89 v4.19.88 v4.19.87 v4.19.86 v4.19.85 v4.19.84 v4.19.83 v4.19.82 v4.19.81 v4.19.80 v4.19.79 v4.19.78 v4.19.77 v4.19.76 v4.19.75 v4.19.74 v4.19.73 v4.19.72 v4.19.71 v4.19.70 v4.19.69 v4.19.68 v4.19.67 v4.19.66 v4.19.65 v4.19.64 v4.19.63 v4.19.62 v4.19.61 v4.19.60 v4.19.59 v4.19.58 v4.19.57 v4.19.56 v4.19.55 v4.19.54 v4.19.53 v4.19.52 v4.19.51 v4.19.50 v4.19.49 v4.19.48 v4.19.47 v4.19.46 v4.19.45 v4.19.44 v4.19.43 v4.19.42 v4.19.41 v4.19.40 v4.19.39 v4.19.38 v4.19.37 v4.19.36 v4.19.35 v4.19.34 v4.19.33 v4.19.32 v4.19.31 v4.19.30 v4.19.29 v4.19.28 v4.19.27 v4.19.26 v4.19.25 v4.19.24 v4.19.23 v4.19.22 v4.19.21 v4.19.20 v4.19.19 v4.19.18 v4.19.17 v4.19.16 v4.19.15 v4.19.14 v4.19.13 v4.19.12 v4.19.11 v4.19.10 v4.19.9 v4.19.8 v4.19.7 v4.19.6 v4.19.5 v4.19.4 v4.19.3 v4.19.2 v4.19.1 v4.19 v4.19-rc8 v4.19-rc7 v4.19-rc6 v4.19-rc5 v4.19-rc4 v4.19-rc3 v4.19-rc2 v4.19-rc1 ck-release-21 ck-release-20 ck-release-19.2 ck-release-19.1 ck-release-19 ck-release-18 ck-release-17.2 ck-release-17.1 ck-release-17 ck-release-16 ck-release-15.1 ck-release-15 ck-release-14 ck-release-13.2 ck-release-13 ck-release-12 ck-release-11 ck-release-10 ck-release-9 ck-release-7 alk-release-15 alk-release-14 alk-release-13.2 alk-release-13 alk-release-12 alk-release-11 alk-release-10 alk-release-9 alk-release-7
无相关合并请求
......@@ -606,6 +606,36 @@ static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
}
}
static void __gen8_do_map_pt(gen8_pde_t * const pde,
struct i915_page_table *pt,
struct drm_device *dev)
{
gen8_pde_t entry =
gen8_pde_encode(dev, pt->daddr, I915_CACHE_LLC);
*pde = entry;
}
static void gen8_initialize_pd(struct i915_address_space *vm,
struct i915_page_directory *pd)
{
struct i915_hw_ppgtt *ppgtt =
container_of(vm, struct i915_hw_ppgtt, base);
gen8_pde_t *page_directory;
struct i915_page_table *pt;
int i;
page_directory = kmap_atomic(pd->page);
pt = ppgtt->scratch_pt;
for (i = 0; i < I915_PDES; i++)
/* Map the PDE to the page table */
__gen8_do_map_pt(page_directory + i, pt, vm->dev);
if (!HAS_LLC(vm->dev))
drm_clflush_virt_range(page_directory, PAGE_SIZE);
kunmap_atomic(page_directory);
}
static void gen8_free_page_tables(struct i915_page_directory *pd, struct drm_device *dev)
{
int i;
......@@ -633,6 +663,8 @@ static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
gen8_free_page_tables(ppgtt->pdp.page_directory[i], ppgtt->base.dev);
unmap_and_free_pd(ppgtt->pdp.page_directory[i]);
}
unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev);
}
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
......@@ -663,25 +695,55 @@ static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt)
return -ENOMEM;
}
static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt,
const int max_pdp)
static int gen8_ppgtt_alloc_page_directories(struct i915_page_directory_pointer *pdp,
uint64_t start,
uint64_t length)
{
int i;
struct i915_hw_ppgtt *ppgtt =
container_of(pdp, struct i915_hw_ppgtt, pdp);
struct i915_page_directory *unused;
uint64_t temp;
uint32_t pdpe;
/* FIXME: PPGTT container_of won't work for 64b */
WARN_ON((start + length) > 0x800000000ULL);
gen8_for_each_pdpe(unused, pdp, start, length, temp, pdpe) {
WARN_ON(unused);
pdp->page_directory[pdpe] = alloc_pd_single();
if (IS_ERR(ppgtt->pdp.page_directory[pdpe]))
goto unwind_out;
gen8_initialize_pd(&ppgtt->base,
ppgtt->pdp.page_directory[pdpe]);
ppgtt->num_pd_pages++;
}
for (i = 0; i < max_pdp; i++) {
ppgtt->pdp.page_directory[i] = alloc_pd_single();
if (IS_ERR(ppgtt->pdp.page_directory[i]))
/* XXX: Still alloc all page directories in systems with less than
* 4GB of memory. This won't be needed after a subsequent patch.
*/
while (ppgtt->num_pd_pages < GEN8_LEGACY_PDPES) {
ppgtt->pdp.page_directory[ppgtt->num_pd_pages] = alloc_pd_single();
if (IS_ERR(ppgtt->pdp.page_directory[ppgtt->num_pd_pages]))
goto unwind_out;
gen8_initialize_pd(&ppgtt->base,
ppgtt->pdp.page_directory[ppgtt->num_pd_pages]);
pdpe++;
ppgtt->num_pd_pages++;
}
ppgtt->num_pd_pages = max_pdp;
BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPES);
return 0;
unwind_out:
while (i--)
unmap_and_free_pd(ppgtt->pdp.page_directory[i]);
while (pdpe--) {
unmap_and_free_pd(ppgtt->pdp.page_directory[pdpe]);
ppgtt->num_pd_pages--;
}
WARN_ON(ppgtt->num_pd_pages);
return -ENOMEM;
}
......@@ -691,7 +753,8 @@ static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
{
int ret;
ret = gen8_ppgtt_allocate_page_directories(ppgtt, max_pdp);
ret = gen8_ppgtt_alloc_page_directories(&ppgtt->pdp, ppgtt->base.start,
ppgtt->base.total);
if (ret)
return ret;
......@@ -769,6 +832,17 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
if (size % (1<<30))
DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);
ppgtt->base.start = 0;
/* This is the area that we advertise as usable for the caller */
ppgtt->base.total = max_pdp * I915_PDES * GEN8_PTES * PAGE_SIZE;
WARN_ON(ppgtt->base.total == 0);
ppgtt->scratch_pt = alloc_pt_single(ppgtt->base.dev);
if (IS_ERR(ppgtt->scratch_pt))
return PTR_ERR(ppgtt->scratch_pt);
gen8_initialize_pt(&ppgtt->base, ppgtt->scratch_pt);
/* 1. Do all our allocations for page directories and page tables.
* We allocate more than was asked so that we can point the unused parts
* to valid entries that point to scratch page. Dynamic page tables
......@@ -794,7 +868,7 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
}
/*
* 3. Map all the page directory entires to point to the page tables
* 3. Map all the page directory entries to point to the page tables
* we've allocated.
*
* For now, the PPGTT helper functions all require that the PDEs are
......@@ -820,10 +894,6 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
ppgtt->base.clear_range = gen8_ppgtt_clear_range;
ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
ppgtt->base.cleanup = gen8_ppgtt_cleanup;
ppgtt->base.start = 0;
/* This is the area that we advertise as usable for the caller */
ppgtt->base.total = max_pdp * I915_PDES * GEN8_PTES * PAGE_SIZE;
/* Set all ptes to a valid scratch page. Also above requested space */
ppgtt->base.clear_range(&ppgtt->base, 0,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册
反馈
建议
客服 返回
顶部