提交 d7813bc9 编写于 作者: P Paul Mundt

sh: Build PMB entry links for existing contiguous multi-page mappings.

This plugs in entry sizing support for existing mappings and then builds
on top of that for linking together entries that are mapping contiguous
areas. This will ultimately permit us to coalesce mappings and promote
head pages while reclaiming PMB slots for dynamic remapping.
Signed-off-by: NPaul Mundt <lethal@linux-sh.org>
上级 9edef286
...@@ -59,6 +59,7 @@ struct pmb_entry { ...@@ -59,6 +59,7 @@ struct pmb_entry {
unsigned long vpn; unsigned long vpn;
unsigned long ppn; unsigned long ppn;
unsigned long flags; unsigned long flags;
unsigned long size;
/* /*
* 0 .. NR_PMB_ENTRIES for specific entry selection, or * 0 .. NR_PMB_ENTRIES for specific entry selection, or
...@@ -66,7 +67,6 @@ struct pmb_entry { ...@@ -66,7 +67,6 @@ struct pmb_entry {
*/ */
int entry; int entry;
struct pmb_entry *next;
/* Adjacent entry link for contiguous multi-entry mappings */ /* Adjacent entry link for contiguous multi-entry mappings */
struct pmb_entry *link; struct pmb_entry *link;
}; };
......
...@@ -90,20 +90,15 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, ...@@ -90,20 +90,15 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn,
pmbe->ppn = ppn; pmbe->ppn = ppn;
pmbe->flags = flags; pmbe->flags = flags;
pmbe->entry = pos; pmbe->entry = pos;
pmbe->size = 0;
return pmbe; return pmbe;
} }
static void pmb_free(struct pmb_entry *pmbe) static void pmb_free(struct pmb_entry *pmbe)
{ {
int pos = pmbe->entry; clear_bit(pmbe->entry, pmb_map);
pmbe->entry = PMB_NO_ENTRY;
pmbe->vpn = 0;
pmbe->ppn = 0;
pmbe->flags = 0;
pmbe->entry = 0;
clear_bit(pos, pmb_map);
} }
/* /*
...@@ -198,6 +193,8 @@ long pmb_remap(unsigned long vaddr, unsigned long phys, ...@@ -198,6 +193,8 @@ long pmb_remap(unsigned long vaddr, unsigned long phys,
vaddr += pmb_sizes[i].size; vaddr += pmb_sizes[i].size;
size -= pmb_sizes[i].size; size -= pmb_sizes[i].size;
pmbe->size = pmb_sizes[i].size;
/* /*
* Link adjacent entries that span multiple PMB entries * Link adjacent entries that span multiple PMB entries
* for easier tear-down. * for easier tear-down.
...@@ -273,25 +270,7 @@ static void pmb_unmap_entry(struct pmb_entry *pmbe) ...@@ -273,25 +270,7 @@ static void pmb_unmap_entry(struct pmb_entry *pmbe)
} while (pmbe); } while (pmbe);
} }
static inline void static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn)
pmb_log_mapping(unsigned long data_val, unsigned long vpn, unsigned long ppn)
{
unsigned int size;
const char *sz_str;
size = data_val & PMB_SZ_MASK;
sz_str = (size == PMB_SZ_16M) ? " 16MB":
(size == PMB_SZ_64M) ? " 64MB":
(size == PMB_SZ_128M) ? "128MB":
"512MB";
pr_info("\t0x%08lx -> 0x%08lx [ %s %scached ]\n",
vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, sz_str,
(data_val & PMB_C) ? "" : "un");
}
static inline unsigned int pmb_ppn_in_range(unsigned long ppn)
{ {
return ppn >= __pa(memory_start) && ppn < __pa(memory_end); return ppn >= __pa(memory_start) && ppn < __pa(memory_end);
} }
...@@ -299,7 +278,8 @@ static inline unsigned int pmb_ppn_in_range(unsigned long ppn) ...@@ -299,7 +278,8 @@ static inline unsigned int pmb_ppn_in_range(unsigned long ppn)
static int pmb_synchronize_mappings(void) static int pmb_synchronize_mappings(void)
{ {
unsigned int applied = 0; unsigned int applied = 0;
int i; struct pmb_entry *pmbp = NULL;
int i, j;
pr_info("PMB: boot mappings:\n"); pr_info("PMB: boot mappings:\n");
...@@ -323,6 +303,7 @@ static int pmb_synchronize_mappings(void) ...@@ -323,6 +303,7 @@ static int pmb_synchronize_mappings(void)
unsigned long addr, data; unsigned long addr, data;
unsigned long addr_val, data_val; unsigned long addr_val, data_val;
unsigned long ppn, vpn, flags; unsigned long ppn, vpn, flags;
unsigned int size;
struct pmb_entry *pmbe; struct pmb_entry *pmbe;
addr = mk_pmb_addr(i); addr = mk_pmb_addr(i);
...@@ -366,7 +347,8 @@ static int pmb_synchronize_mappings(void) ...@@ -366,7 +347,8 @@ static int pmb_synchronize_mappings(void)
__raw_writel(data_val, data); __raw_writel(data_val, data);
} }
flags = data_val & (PMB_SZ_MASK | PMB_CACHE_MASK); size = data_val & PMB_SZ_MASK;
flags = size | (data_val & PMB_CACHE_MASK);
pmbe = pmb_alloc(vpn, ppn, flags, i); pmbe = pmb_alloc(vpn, ppn, flags, i);
if (IS_ERR(pmbe)) { if (IS_ERR(pmbe)) {
...@@ -374,7 +356,24 @@ static int pmb_synchronize_mappings(void) ...@@ -374,7 +356,24 @@ static int pmb_synchronize_mappings(void)
continue; continue;
} }
pmb_log_mapping(data_val, vpn, ppn); for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++)
if (pmb_sizes[j].flag == size)
pmbe->size = pmb_sizes[j].size;
/*
* Compare the previous entry against the current one to
* see if the entries span a contiguous mapping. If so,
* setup the entry links accordingly.
*/
if (pmbp && ((pmbe->vpn == (pmbp->vpn + pmbp->size)) &&
(pmbe->ppn == (pmbp->ppn + pmbp->size))))
pmbp->link = pmbe;
pmbp = pmbe;
pr_info("\t0x%08lx -> 0x%08lx [ %ldMB %scached ]\n",
vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, pmbe->size >> 20,
(data_val & PMB_C) ? "" : "un");
applied++; applied++;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册