提交 09942c65 编写于 作者: M Michel Thierry 提交者: Daniel Vetter

drm/i915: num_pd_pages/num_pd_entries isn't useful

These values are never quite useful for dynamic allocations of the page
tables. Getting rid of them will help prevent later confusion.

v2: Updated to use unmap_and_free_pd functions.
v3: Updated gen8_ppgtt_free after teardown logic was removed.
v4: Rebase after s/page_tables/page_table/.
v5: Keep allocating all page directories in GEN8+ systems with less
than 4GB of memory. Updated gen6_for_all_pdes.
v6: Prevent (harmless) out of range access in gen6_for_all_pdes.
Signed-off-by: NBen Widawsky <ben@bwidawsk.net>
Signed-off-by: Michel Thierry <michel.thierry@intel.com> (v2+)
Reviewed-by: NMika Kuoppala <mika.kuoppala@intel.com>
Signed-off-by: NDaniel Vetter <daniel.vetter@ffwll.ch>
上级 7cb6d7ac
master alk-4.19.24 alk-4.19.30 alk-4.19.34 alk-4.19.36 alk-4.19.43 alk-4.19.48 alk-4.19.57 ck-4.19.67 ck-4.19.81 ck-4.19.91 github/fork/deepanshu1422/fix-typo-in-comment github/fork/haosdent/fix-typo linux-next v4.19.91 v4.19.90 v4.19.89 v4.19.88 v4.19.87 v4.19.86 v4.19.85 v4.19.84 v4.19.83 v4.19.82 v4.19.81 v4.19.80 v4.19.79 v4.19.78 v4.19.77 v4.19.76 v4.19.75 v4.19.74 v4.19.73 v4.19.72 v4.19.71 v4.19.70 v4.19.69 v4.19.68 v4.19.67 v4.19.66 v4.19.65 v4.19.64 v4.19.63 v4.19.62 v4.19.61 v4.19.60 v4.19.59 v4.19.58 v4.19.57 v4.19.56 v4.19.55 v4.19.54 v4.19.53 v4.19.52 v4.19.51 v4.19.50 v4.19.49 v4.19.48 v4.19.47 v4.19.46 v4.19.45 v4.19.44 v4.19.43 v4.19.42 v4.19.41 v4.19.40 v4.19.39 v4.19.38 v4.19.37 v4.19.36 v4.19.35 v4.19.34 v4.19.33 v4.19.32 v4.19.31 v4.19.30 v4.19.29 v4.19.28 v4.19.27 v4.19.26 v4.19.25 v4.19.24 v4.19.23 v4.19.22 v4.19.21 v4.19.20 v4.19.19 v4.19.18 v4.19.17 v4.19.16 v4.19.15 v4.19.14 v4.19.13 v4.19.12 v4.19.11 v4.19.10 v4.19.9 v4.19.8 v4.19.7 v4.19.6 v4.19.5 v4.19.4 v4.19.3 v4.19.2 v4.19.1 v4.19 v4.19-rc8 v4.19-rc7 v4.19-rc6 v4.19-rc5 v4.19-rc4 v4.19-rc3 v4.19-rc2 v4.19-rc1 ck-release-21 ck-release-20 ck-release-19.2 ck-release-19.1 ck-release-19 ck-release-18 ck-release-17.2 ck-release-17.1 ck-release-17 ck-release-16 ck-release-15.1 ck-release-15 ck-release-14 ck-release-13.2 ck-release-13 ck-release-12 ck-release-11 ck-release-10 ck-release-9 ck-release-7 alk-release-15 alk-release-14 alk-release-13.2 alk-release-13 alk-release-12 alk-release-11 alk-release-10 alk-release-9 alk-release-7
无相关合并请求
......@@ -2188,8 +2188,6 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
if (!ppgtt)
return;
seq_printf(m, "Page directories: %d\n", ppgtt->num_pd_pages);
seq_printf(m, "Page tables: %d\n", ppgtt->num_pd_entries);
for_each_ring(ring, dev_priv, unused) {
seq_printf(m, "%s\n", ring->name);
for (i = 0; i < 4; i++) {
......
......@@ -657,7 +657,7 @@ static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
{
int i;
for (i = 0; i < ppgtt->num_pd_pages; i++) {
for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
if (WARN_ON(!ppgtt->pdp.page_directory[i]))
continue;
......@@ -737,34 +737,26 @@ static int gen8_ppgtt_alloc_page_directories(struct i915_page_directory_pointer
gen8_initialize_pd(&ppgtt->base,
ppgtt->pdp.page_directory[pdpe]);
ppgtt->num_pd_pages++;
}
/* XXX: Still alloc all page directories in systems with less than
* 4GB of memory. This won't be needed after a subsequent patch.
*/
while (ppgtt->num_pd_pages < GEN8_LEGACY_PDPES) {
ppgtt->pdp.page_directory[ppgtt->num_pd_pages] = alloc_pd_single();
if (IS_ERR(ppgtt->pdp.page_directory[ppgtt->num_pd_pages]))
while (pdpe < GEN8_LEGACY_PDPES) {
ppgtt->pdp.page_directory[pdpe] = alloc_pd_single();
if (IS_ERR(ppgtt->pdp.page_directory[pdpe]))
goto unwind_out;
gen8_initialize_pd(&ppgtt->base,
ppgtt->pdp.page_directory[ppgtt->num_pd_pages]);
ppgtt->pdp.page_directory[pdpe]);
pdpe++;
ppgtt->num_pd_pages++;
}
BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPES);
return 0;
unwind_out:
while (pdpe--) {
while (pdpe--)
unmap_and_free_pd(ppgtt->pdp.page_directory[pdpe]);
ppgtt->num_pd_pages--;
}
WARN_ON(ppgtt->num_pd_pages);
return -ENOMEM;
}
......@@ -787,8 +779,6 @@ static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
&ppgtt->base);
if (ret)
goto err_out;
ppgtt->num_pd_entries += I915_PDES;
}
/* XXX: We allocated all page directories in systems with less than
......@@ -801,12 +791,9 @@ static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
if (ret)
goto err_out;
ppgtt->num_pd_entries += I915_PDES;
pdpe++;
}
WARN_ON(pdpe > ppgtt->num_pd_pages);
return 0;
err_out:
......@@ -868,8 +855,6 @@ static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt,
*/
static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
{
const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
const int min_pt_pages = I915_PDES * max_pdp;
int i, j, ret;
if (size % (1<<30))
......@@ -940,16 +925,7 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
ppgtt->base.cleanup = gen8_ppgtt_cleanup;
/* Set all ptes to a valid scratch page. Also above requested space */
ppgtt->base.clear_range(&ppgtt->base, 0,
ppgtt->num_pd_pages * GEN8_PTES * PAGE_SIZE,
true);
DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n",
ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp);
DRM_DEBUG_DRIVER("Allocated %d pages for page tables (%lld wasted)\n",
ppgtt->num_pd_entries,
(ppgtt->num_pd_entries - min_pt_pages) + size % (1<<30));
ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
return 0;
bail:
......@@ -959,26 +935,20 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
{
struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
struct i915_address_space *vm = &ppgtt->base;
gen6_pte_t __iomem *pd_addr;
struct i915_page_table *unused;
gen6_pte_t scratch_pte;
uint32_t pd_entry;
int pte, pde;
uint32_t pte, pde, temp;
uint32_t start = ppgtt->base.start, length = ppgtt->base.total;
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm +
ppgtt->pd.pd_offset / sizeof(gen6_pte_t);
seq_printf(m, " VM %p (pd_offset %x-%x):\n", vm,
ppgtt->pd.pd_offset,
ppgtt->pd.pd_offset + ppgtt->num_pd_entries);
for (pde = 0; pde < ppgtt->num_pd_entries; pde++) {
gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) {
u32 expected;
gen6_pte_t *pt_vaddr;
dma_addr_t pt_addr = ppgtt->pd.page_table[pde]->daddr;
pd_entry = readl(pd_addr + pde);
pd_entry = readl(ppgtt->pd_addr + pde);
expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
if (pd_entry != expected)
......@@ -1376,13 +1346,12 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
{
int i;
for (i = 0; i < ppgtt->num_pd_entries; i++) {
struct i915_page_table *pt = ppgtt->pd.page_table[i];
struct i915_page_table *pt;
uint32_t pde;
gen6_for_all_pdes(pt, ppgtt, pde) {
if (pt != ppgtt->scratch_pt)
unmap_and_free_pt(ppgtt->pd.page_table[i], ppgtt->base.dev);
unmap_and_free_pt(pt, ppgtt->base.dev);
}
unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev);
......@@ -1443,7 +1412,6 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
if (ppgtt->node.start < dev_priv->gtt.mappable_end)
DRM_DEBUG("Forced to use aperture for PDEs\n");
ppgtt->num_pd_entries = I915_PDES;
return 0;
err_out:
......@@ -1491,7 +1459,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt, bool aliasing)
if (aliasing) {
/* preallocate all pts */
ret = alloc_pt_range(&ppgtt->pd, 0, ppgtt->num_pd_entries,
ret = alloc_pt_range(&ppgtt->pd, 0, I915_PDES,
ppgtt->base.dev);
if (ret) {
......@@ -1505,7 +1473,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt, bool aliasing)
ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
ppgtt->base.cleanup = gen6_ppgtt_cleanup;
ppgtt->base.start = 0;
ppgtt->base.total = ppgtt->num_pd_entries * GEN6_PTES * PAGE_SIZE;
ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
ppgtt->debug_dump = gen6_dump_ppgtt;
ppgtt->pd.pd_offset =
......
......@@ -314,8 +314,6 @@ struct i915_hw_ppgtt {
struct kref ref;
struct drm_mm_node node;
unsigned long pd_dirty_rings;
unsigned num_pd_entries;
unsigned num_pd_pages; /* gen8+ */
union {
struct i915_page_directory_pointer pdp;
struct i915_page_directory pd;
......@@ -350,6 +348,11 @@ struct i915_hw_ppgtt {
temp = min_t(unsigned, temp, length), \
start += temp, length -= temp)
#define gen6_for_all_pdes(pt, ppgtt, iter) \
for (iter = 0; \
pt = ppgtt->pd.page_table[iter], iter < I915_PDES; \
iter++)
static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift)
{
const uint32_t mask = NUM_PTE(pde_shift) - 1;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册
反馈
建议
客服 返回
顶部