diff --git a/arch/sh/include/asm/mmu.h b/arch/sh/include/asm/mmu.h index 44c904341414d0791a76f9c166d972566204c13f..5453169bf0529a259bd3197557f478307e0946fa 100644 --- a/arch/sh/include/asm/mmu.h +++ b/arch/sh/include/asm/mmu.h @@ -59,6 +59,7 @@ struct pmb_entry { unsigned long vpn; unsigned long ppn; unsigned long flags; + unsigned long size; /* * 0 .. NR_PMB_ENTRIES for specific entry selection, or @@ -66,7 +67,6 @@ struct pmb_entry { */ int entry; - struct pmb_entry *next; /* Adjacent entry link for contiguous multi-entry mappings */ struct pmb_entry *link; }; diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index 924f3e4b3a82f145859bf4379f1b1c5d297179ed..f2ad6e374b648eaec2d3a9f329078add82396a12 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -90,20 +90,15 @@ static struct pmb_entry *pmb_alloc(unsigned long vpn, unsigned long ppn, pmbe->ppn = ppn; pmbe->flags = flags; pmbe->entry = pos; + pmbe->size = 0; return pmbe; } static void pmb_free(struct pmb_entry *pmbe) { - int pos = pmbe->entry; - - pmbe->vpn = 0; - pmbe->ppn = 0; - pmbe->flags = 0; - pmbe->entry = 0; - - clear_bit(pos, pmb_map); + clear_bit(pmbe->entry, pmb_map); + pmbe->entry = PMB_NO_ENTRY; } /* @@ -198,6 +193,8 @@ long pmb_remap(unsigned long vaddr, unsigned long phys, vaddr += pmb_sizes[i].size; size -= pmb_sizes[i].size; + pmbe->size = pmb_sizes[i].size; + /* * Link adjacent entries that span multiple PMB entries * for easier tear-down. @@ -273,25 +270,7 @@ static void pmb_unmap_entry(struct pmb_entry *pmbe) } while (pmbe); } -static inline void -pmb_log_mapping(unsigned long data_val, unsigned long vpn, unsigned long ppn) -{ - unsigned int size; - const char *sz_str; - - size = data_val & PMB_SZ_MASK; - - sz_str = (size == PMB_SZ_16M) ? " 16MB": - (size == PMB_SZ_64M) ? " 64MB": - (size == PMB_SZ_128M) ? "128MB": - "512MB"; - - pr_info("\t0x%08lx -> 0x%08lx [ %s %scached ]\n", - vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, sz_str, - (data_val & PMB_C) ? "" : "un"); -} - -static inline unsigned int pmb_ppn_in_range(unsigned long ppn) +static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn) { return ppn >= __pa(memory_start) && ppn < __pa(memory_end); } @@ -299,7 +278,8 @@ static inline unsigned int pmb_ppn_in_range(unsigned long ppn) static int pmb_synchronize_mappings(void) { unsigned int applied = 0; - int i; + struct pmb_entry *pmbp = NULL; + int i, j; pr_info("PMB: boot mappings:\n"); @@ -323,6 +303,7 @@ static int pmb_synchronize_mappings(void) unsigned long addr, data; unsigned long addr_val, data_val; unsigned long ppn, vpn, flags; + unsigned int size; struct pmb_entry *pmbe; addr = mk_pmb_addr(i); @@ -366,7 +347,8 @@ static int pmb_synchronize_mappings(void) __raw_writel(data_val, data); } - flags = data_val & (PMB_SZ_MASK | PMB_CACHE_MASK); + size = data_val & PMB_SZ_MASK; + flags = size | (data_val & PMB_CACHE_MASK); pmbe = pmb_alloc(vpn, ppn, flags, i); if (IS_ERR(pmbe)) { @@ -374,7 +356,24 @@ static int pmb_synchronize_mappings(void) continue; } - pmb_log_mapping(data_val, vpn, ppn); + for (j = 0; j < ARRAY_SIZE(pmb_sizes); j++) + if (pmb_sizes[j].flag == size) + pmbe->size = pmb_sizes[j].size; + + /* + * Compare the previous entry against the current one to + * see if the entries span a contiguous mapping. If so, + * setup the entry links accordingly. + */ + if (pmbp && ((pmbe->vpn == (pmbp->vpn + pmbp->size)) && + (pmbe->ppn == (pmbp->ppn + pmbp->size)))) + pmbp->link = pmbe; + + pmbp = pmbe; + + pr_info("\t0x%08lx -> 0x%08lx [ %ldMB %scached ]\n", + vpn >> PAGE_SHIFT, ppn >> PAGE_SHIFT, pmbe->size >> 20, + (data_val & PMB_C) ? "" : "un"); applied++; }