提交 09942c65 编写于 作者: M Michel Thierry 提交者: Daniel Vetter

drm/i915: num_pd_pages/num_pd_entries isn't useful

These values are never quite useful for dynamic allocations of the page
tables. Getting rid of them will help prevent later confusion.

v2: Updated to use unmap_and_free_pd functions.
v3: Updated gen8_ppgtt_free after teardown logic was removed.
v4: Rebase after s/page_tables/page_table/.
v5: Keep allocating all page directories in GEN8+ systems with less
than 4GB of memory. Updated gen6_for_all_pdes.
v6: Prevent (harmless) out of range access in gen6_for_all_pdes.
Signed-off-by: NBen Widawsky <ben@bwidawsk.net>
Signed-off-by: Michel Thierry <michel.thierry@intel.com> (v2+)
Reviewed-by: NMika Kuoppala <mika.kuoppala@intel.com>
Signed-off-by: NDaniel Vetter <daniel.vetter@ffwll.ch>
上级 7cb6d7ac
......@@ -2188,8 +2188,6 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
if (!ppgtt)
return;
seq_printf(m, "Page directories: %d\n", ppgtt->num_pd_pages);
seq_printf(m, "Page tables: %d\n", ppgtt->num_pd_entries);
for_each_ring(ring, dev_priv, unused) {
seq_printf(m, "%s\n", ring->name);
for (i = 0; i < 4; i++) {
......
......@@ -657,7 +657,7 @@ static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
{
int i;
for (i = 0; i < ppgtt->num_pd_pages; i++) {
for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
if (WARN_ON(!ppgtt->pdp.page_directory[i]))
continue;
......@@ -737,34 +737,26 @@ static int gen8_ppgtt_alloc_page_directories(struct i915_page_directory_pointer
gen8_initialize_pd(&ppgtt->base,
ppgtt->pdp.page_directory[pdpe]);
ppgtt->num_pd_pages++;
}
/* XXX: Still alloc all page directories in systems with less than
* 4GB of memory. This won't be needed after a subsequent patch.
*/
while (ppgtt->num_pd_pages < GEN8_LEGACY_PDPES) {
ppgtt->pdp.page_directory[ppgtt->num_pd_pages] = alloc_pd_single();
if (IS_ERR(ppgtt->pdp.page_directory[ppgtt->num_pd_pages]))
while (pdpe < GEN8_LEGACY_PDPES) {
ppgtt->pdp.page_directory[pdpe] = alloc_pd_single();
if (IS_ERR(ppgtt->pdp.page_directory[pdpe]))
goto unwind_out;
gen8_initialize_pd(&ppgtt->base,
ppgtt->pdp.page_directory[ppgtt->num_pd_pages]);
ppgtt->pdp.page_directory[pdpe]);
pdpe++;
ppgtt->num_pd_pages++;
}
BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPES);
return 0;
unwind_out:
while (pdpe--) {
while (pdpe--)
unmap_and_free_pd(ppgtt->pdp.page_directory[pdpe]);
ppgtt->num_pd_pages--;
}
WARN_ON(ppgtt->num_pd_pages);
return -ENOMEM;
}
......@@ -787,8 +779,6 @@ static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
&ppgtt->base);
if (ret)
goto err_out;
ppgtt->num_pd_entries += I915_PDES;
}
/* XXX: We allocated all page directories in systems with less than
......@@ -801,12 +791,9 @@ static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
if (ret)
goto err_out;
ppgtt->num_pd_entries += I915_PDES;
pdpe++;
}
WARN_ON(pdpe > ppgtt->num_pd_pages);
return 0;
err_out:
......@@ -868,8 +855,6 @@ static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt,
*/
static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
{
const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
const int min_pt_pages = I915_PDES * max_pdp;
int i, j, ret;
if (size % (1<<30))
......@@ -940,16 +925,7 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
ppgtt->base.cleanup = gen8_ppgtt_cleanup;
/* Set all ptes to a valid scratch page. Also above requested space */
ppgtt->base.clear_range(&ppgtt->base, 0,
ppgtt->num_pd_pages * GEN8_PTES * PAGE_SIZE,
true);
DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n",
ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp);
DRM_DEBUG_DRIVER("Allocated %d pages for page tables (%lld wasted)\n",
ppgtt->num_pd_entries,
(ppgtt->num_pd_entries - min_pt_pages) + size % (1<<30));
ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
return 0;
bail:
......@@ -959,26 +935,20 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
{
struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
struct i915_address_space *vm = &ppgtt->base;
gen6_pte_t __iomem *pd_addr;
struct i915_page_table *unused;
gen6_pte_t scratch_pte;
uint32_t pd_entry;
int pte, pde;
uint32_t pte, pde, temp;
uint32_t start = ppgtt->base.start, length = ppgtt->base.total;
scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm +
ppgtt->pd.pd_offset / sizeof(gen6_pte_t);
seq_printf(m, " VM %p (pd_offset %x-%x):\n", vm,
ppgtt->pd.pd_offset,
ppgtt->pd.pd_offset + ppgtt->num_pd_entries);
for (pde = 0; pde < ppgtt->num_pd_entries; pde++) {
gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) {
u32 expected;
gen6_pte_t *pt_vaddr;
dma_addr_t pt_addr = ppgtt->pd.page_table[pde]->daddr;
pd_entry = readl(pd_addr + pde);
pd_entry = readl(ppgtt->pd_addr + pde);
expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
if (pd_entry != expected)
......@@ -1376,13 +1346,12 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
{
int i;
for (i = 0; i < ppgtt->num_pd_entries; i++) {
struct i915_page_table *pt = ppgtt->pd.page_table[i];
struct i915_page_table *pt;
uint32_t pde;
gen6_for_all_pdes(pt, ppgtt, pde) {
if (pt != ppgtt->scratch_pt)
unmap_and_free_pt(ppgtt->pd.page_table[i], ppgtt->base.dev);
unmap_and_free_pt(pt, ppgtt->base.dev);
}
unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev);
......@@ -1443,7 +1412,6 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
if (ppgtt->node.start < dev_priv->gtt.mappable_end)
DRM_DEBUG("Forced to use aperture for PDEs\n");
ppgtt->num_pd_entries = I915_PDES;
return 0;
err_out:
......@@ -1491,7 +1459,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt, bool aliasing)
if (aliasing) {
/* preallocate all pts */
ret = alloc_pt_range(&ppgtt->pd, 0, ppgtt->num_pd_entries,
ret = alloc_pt_range(&ppgtt->pd, 0, I915_PDES,
ppgtt->base.dev);
if (ret) {
......@@ -1505,7 +1473,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt, bool aliasing)
ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
ppgtt->base.cleanup = gen6_ppgtt_cleanup;
ppgtt->base.start = 0;
ppgtt->base.total = ppgtt->num_pd_entries * GEN6_PTES * PAGE_SIZE;
ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
ppgtt->debug_dump = gen6_dump_ppgtt;
ppgtt->pd.pd_offset =
......
......@@ -314,8 +314,6 @@ struct i915_hw_ppgtt {
struct kref ref;
struct drm_mm_node node;
unsigned long pd_dirty_rings;
unsigned num_pd_entries;
unsigned num_pd_pages; /* gen8+ */
union {
struct i915_page_directory_pointer pdp;
struct i915_page_directory pd;
......@@ -350,6 +348,11 @@ struct i915_hw_ppgtt {
temp = min_t(unsigned, temp, length), \
start += temp, length -= temp)
#define gen6_for_all_pdes(pt, ppgtt, iter) \
for (iter = 0; \
pt = ppgtt->pd.page_table[iter], iter < I915_PDES; \
iter++)
static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift)
{
const uint32_t mask = NUM_PTE(pde_shift) - 1;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册