提交 2dc2f8e0 编写于 作者: P Paul Mundt

sh: Kill off the special uncached section and fixmap.

Now that cached_to_uncached works as advertized in 32-bit mode and we're
never going to be able to map < 16MB anyways, there's no need for the
special uncached section. Kill it off.
Signed-off-by: NPaul Mundt <lethal@linux-sh.org>
上级 3125ee72
...@@ -55,11 +55,12 @@ enum fixed_addresses { ...@@ -55,11 +55,12 @@ enum fixed_addresses {
#define FIX_N_COLOURS 8 #define FIX_N_COLOURS 8
FIX_CMAP_BEGIN, FIX_CMAP_BEGIN,
FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * NR_CPUS) - 1, FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * NR_CPUS) - 1,
FIX_UNCACHED,
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
#endif #endif
/* /*
* FIX_IOREMAP entries are useful for mapping physical address * FIX_IOREMAP entries are useful for mapping physical address
* space before ioremap() is useable, e.g. really early in boot * space before ioremap() is useable, e.g. really early in boot
...@@ -68,6 +69,7 @@ enum fixed_addresses { ...@@ -68,6 +69,7 @@ enum fixed_addresses {
#define FIX_N_IOREMAPS 32 #define FIX_N_IOREMAPS 32
FIX_IOREMAP_BEGIN, FIX_IOREMAP_BEGIN,
FIX_IOREMAP_END = FIX_IOREMAP_BEGIN + FIX_N_IOREMAPS, FIX_IOREMAP_END = FIX_IOREMAP_BEGIN + FIX_N_IOREMAPS,
__end_of_fixed_addresses __end_of_fixed_addresses
}; };
......
...@@ -145,9 +145,6 @@ do { \ ...@@ -145,9 +145,6 @@ do { \
__restore_dsp(prev); \ __restore_dsp(prev); \
} while (0) } while (0)
#define __uses_jump_to_uncached \
noinline __attribute__ ((__section__ (".uncached.text")))
/* /*
* Jump to uncached area. * Jump to uncached area.
* When handling TLB or caches, we need to do it from an uncached area. * When handling TLB or caches, we need to do it from an uncached area.
......
...@@ -33,8 +33,6 @@ do { \ ...@@ -33,8 +33,6 @@ do { \
&next->thread); \ &next->thread); \
} while (0) } while (0)
#define __uses_jump_to_uncached
#define jump_to_uncached() do { } while (0) #define jump_to_uncached() do { } while (0)
#define back_to_cached() do { } while (0) #define back_to_cached() do { } while (0)
......
...@@ -98,7 +98,7 @@ static void __init expmask_init(void) ...@@ -98,7 +98,7 @@ static void __init expmask_init(void)
#endif #endif
/* 2nd-level cache init */ /* 2nd-level cache init */
void __uses_jump_to_uncached __attribute__ ((weak)) l2_cache_init(void) void __attribute__ ((weak)) l2_cache_init(void)
{ {
} }
...@@ -106,7 +106,7 @@ void __uses_jump_to_uncached __attribute__ ((weak)) l2_cache_init(void) ...@@ -106,7 +106,7 @@ void __uses_jump_to_uncached __attribute__ ((weak)) l2_cache_init(void)
* Generic first-level cache init * Generic first-level cache init
*/ */
#ifdef CONFIG_SUPERH32 #ifdef CONFIG_SUPERH32
static void __uses_jump_to_uncached cache_init(void) static void cache_init(void)
{ {
unsigned long ccr, flags; unsigned long ccr, flags;
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
#include <asm/cache.h> #include <asm/cache.h>
#include <asm/io.h> #include <asm/io.h>
int __uses_jump_to_uncached detect_cpu_and_cache_system(void) int detect_cpu_and_cache_system(void)
{ {
unsigned long addr0, addr1, data0, data1, data2, data3; unsigned long addr0, addr1, data0, data1, data2, data3;
......
...@@ -592,7 +592,8 @@ void __init plat_early_device_setup(void) ...@@ -592,7 +592,8 @@ void __init plat_early_device_setup(void)
#define RAMCR_CACHE_L2FC 0x0002 #define RAMCR_CACHE_L2FC 0x0002
#define RAMCR_CACHE_L2E 0x0001 #define RAMCR_CACHE_L2E 0x0001
#define L2_CACHE_ENABLE (RAMCR_CACHE_L2E|RAMCR_CACHE_L2FC) #define L2_CACHE_ENABLE (RAMCR_CACHE_L2E|RAMCR_CACHE_L2FC)
void __uses_jump_to_uncached l2_cache_init(void)
void l2_cache_init(void)
{ {
/* Enable L2 cache */ /* Enable L2 cache */
ctrl_outl(L2_CACHE_ENABLE, RAMCR); ctrl_outl(L2_CACHE_ENABLE, RAMCR);
......
...@@ -714,7 +714,8 @@ void __init plat_early_device_setup(void) ...@@ -714,7 +714,8 @@ void __init plat_early_device_setup(void)
#define RAMCR_CACHE_L2FC 0x0002 #define RAMCR_CACHE_L2FC 0x0002
#define RAMCR_CACHE_L2E 0x0001 #define RAMCR_CACHE_L2E 0x0001
#define L2_CACHE_ENABLE (RAMCR_CACHE_L2E|RAMCR_CACHE_L2FC) #define L2_CACHE_ENABLE (RAMCR_CACHE_L2E|RAMCR_CACHE_L2FC)
void __uses_jump_to_uncached l2_cache_init(void)
void l2_cache_init(void)
{ {
/* Enable L2 cache */ /* Enable L2 cache */
ctrl_outl(L2_CACHE_ENABLE, RAMCR); ctrl_outl(L2_CACHE_ENABLE, RAMCR);
......
...@@ -53,18 +53,6 @@ SECTIONS ...@@ -53,18 +53,6 @@ SECTIONS
NOTES NOTES
RO_DATA(PAGE_SIZE) RO_DATA(PAGE_SIZE)
/*
* Code which must be executed uncached and the associated data
*/
. = ALIGN(PAGE_SIZE);
.uncached : AT(ADDR(.uncached) - LOAD_OFFSET) {
__uncached_start = .;
*(.uncached.text)
*(.uncached.data)
__uncached_end = .;
}
RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
_edata = .; /* End of data section */ _edata = .; /* End of data section */
......
...@@ -22,8 +22,7 @@ enum cache_type { ...@@ -22,8 +22,7 @@ enum cache_type {
CACHE_TYPE_UNIFIED, CACHE_TYPE_UNIFIED,
}; };
static int __uses_jump_to_uncached cache_seq_show(struct seq_file *file, static int cache_seq_show(struct seq_file *file, void *iter)
void *iter)
{ {
unsigned int cache_type = (unsigned int)file->private; unsigned int cache_type = (unsigned int)file->private;
struct cache_info *cache; struct cache_info *cache;
......
...@@ -36,7 +36,7 @@ static void __flush_cache_one(unsigned long addr, unsigned long phys, ...@@ -36,7 +36,7 @@ static void __flush_cache_one(unsigned long addr, unsigned long phys,
* Called from kernel/module.c:sys_init_module and routine for a.out format, * Called from kernel/module.c:sys_init_module and routine for a.out format,
* signal handler code and kprobes code * signal handler code and kprobes code
*/ */
static void __uses_jump_to_uncached sh4_flush_icache_range(void *args) static void sh4_flush_icache_range(void *args)
{ {
struct flusher_data *data = args; struct flusher_data *data = args;
unsigned long start, end; unsigned long start, end;
...@@ -124,7 +124,7 @@ static void sh4_flush_dcache_page(void *arg) ...@@ -124,7 +124,7 @@ static void sh4_flush_dcache_page(void *arg)
} }
/* TODO: Selective icache invalidation through IC address array.. */ /* TODO: Selective icache invalidation through IC address array.. */
static void __uses_jump_to_uncached flush_icache_all(void) static void flush_icache_all(void)
{ {
unsigned long flags, ccr; unsigned long flags, ccr;
......
...@@ -78,7 +78,7 @@ static void sh7705_flush_icache_range(void *args) ...@@ -78,7 +78,7 @@ static void sh7705_flush_icache_range(void *args)
/* /*
* Writeback&Invalidate the D-cache of the page * Writeback&Invalidate the D-cache of the page
*/ */
static void __uses_jump_to_uncached __flush_dcache_page(unsigned long phys) static void __flush_dcache_page(unsigned long phys)
{ {
unsigned long ways, waysize, addrstart; unsigned long ways, waysize, addrstart;
unsigned long flags; unsigned long flags;
...@@ -144,7 +144,7 @@ static void sh7705_flush_dcache_page(void *arg) ...@@ -144,7 +144,7 @@ static void sh7705_flush_dcache_page(void *arg)
__flush_dcache_page(__pa(page_address(page))); __flush_dcache_page(__pa(page_address(page)));
} }
static void __uses_jump_to_uncached sh7705_flush_cache_all(void *args) static void sh7705_flush_cache_all(void *args)
{ {
unsigned long flags; unsigned long flags;
......
...@@ -211,9 +211,6 @@ void __init paging_init(void) ...@@ -211,9 +211,6 @@ void __init paging_init(void)
} }
free_area_init_nodes(max_zone_pfns); free_area_init_nodes(max_zone_pfns);
/* Set up the uncached fixmap */
set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start));
} }
/* /*
......
...@@ -127,14 +127,14 @@ static void __set_pmb_entry(unsigned long vpn, unsigned long ppn, ...@@ -127,14 +127,14 @@ static void __set_pmb_entry(unsigned long vpn, unsigned long ppn,
ctrl_outl(ppn | flags | PMB_V, mk_pmb_data(pos)); ctrl_outl(ppn | flags | PMB_V, mk_pmb_data(pos));
} }
static void __uses_jump_to_uncached set_pmb_entry(struct pmb_entry *pmbe) static void set_pmb_entry(struct pmb_entry *pmbe)
{ {
jump_to_uncached(); jump_to_uncached();
__set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, pmbe->entry); __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, pmbe->entry);
back_to_cached(); back_to_cached();
} }
static void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe) static void clear_pmb_entry(struct pmb_entry *pmbe)
{ {
unsigned int entry = pmbe->entry; unsigned int entry = pmbe->entry;
unsigned long addr; unsigned long addr;
...@@ -364,7 +364,7 @@ static inline int pmb_apply_legacy_mappings(void) ...@@ -364,7 +364,7 @@ static inline int pmb_apply_legacy_mappings(void)
} }
#endif #endif
int __uses_jump_to_uncached pmb_init(void) int pmb_init(void)
{ {
int i; int i;
unsigned long addr, data; unsigned long addr, data;
......
...@@ -68,8 +68,7 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) ...@@ -68,8 +68,7 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
* in extended mode, the legacy 8-bit ASID field in address array 1 has * in extended mode, the legacy 8-bit ASID field in address array 1 has
* undefined behaviour. * undefined behaviour.
*/ */
void __uses_jump_to_uncached local_flush_tlb_one(unsigned long asid, void local_flush_tlb_one(unsigned long asid, unsigned long page)
unsigned long page)
{ {
jump_to_uncached(); jump_to_uncached();
__raw_writel(page, MMU_UTLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT); __raw_writel(page, MMU_UTLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT);
......
...@@ -64,8 +64,7 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) ...@@ -64,8 +64,7 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
local_irq_restore(flags); local_irq_restore(flags);
} }
void __uses_jump_to_uncached local_flush_tlb_one(unsigned long asid, void local_flush_tlb_one(unsigned long asid, unsigned long page)
unsigned long page)
{ {
unsigned long addr, data; unsigned long addr, data;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册