cache.c 6.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
6
 * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
7
 * Copyright (C) 2007 MIPS Technologies, Inc.
L
Linus Torvalds 已提交
8
 */
9 10
#include <linux/fs.h>
#include <linux/fcntl.h>
L
Linus Torvalds 已提交
11 12
#include <linux/init.h>
#include <linux/kernel.h>
13
#include <linux/linkage.h>
L
Linus Torvalds 已提交
14 15
#include <linux/module.h>
#include <linux/sched.h>
16
#include <linux/syscalls.h>
L
Linus Torvalds 已提交
17 18 19 20 21 22 23 24 25 26 27 28 29
#include <linux/mm.h>

#include <asm/cacheflush.h>
#include <asm/processor.h>
#include <asm/cpu.h>
#include <asm/cpu-features.h>

/* Cache operations. */
void (*flush_cache_all)(void);
void (*__flush_cache_all)(void);
void (*flush_cache_mm)(struct mm_struct *mm);
void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start,
	unsigned long end);
R
Ralf Baechle 已提交
30 31
void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page,
	unsigned long pfn);
32
void (*flush_icache_range)(unsigned long start, unsigned long end);
33
void (*local_flush_icache_range)(unsigned long start, unsigned long end);
L
Linus Torvalds 已提交
34

35 36 37
void (*__flush_cache_vmap)(void);
void (*__flush_cache_vunmap)(void);

L
Linus Torvalds 已提交
38 39
/* MIPS specific cache operations */
void (*flush_cache_sigtramp)(unsigned long addr);
40
void (*local_flush_data_cache_page)(void * addr);
L
Linus Torvalds 已提交
41 42 43
void (*flush_data_cache_page)(unsigned long addr);
void (*flush_icache_all)(void);

44
EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
45 46
EXPORT_SYMBOL(flush_data_cache_page);

L
Linus Torvalds 已提交
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61
#ifdef CONFIG_DMA_NONCOHERENT

/* DMA cache operations. */
void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size);
void (*_dma_cache_wback)(unsigned long start, unsigned long size);
void (*_dma_cache_inv)(unsigned long start, unsigned long size);

EXPORT_SYMBOL(_dma_cache_wback_inv);

#endif /* CONFIG_DMA_NONCOHERENT */

/*
 * We could optimize the case where the cache argument is not BCACHE but
 * that seems very atypical use ...
 */
62 63
SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
	unsigned int, cache)
L
Linus Torvalds 已提交
64
{
A
Atsushi Nemoto 已提交
65 66
	if (bytes == 0)
		return 0;
R
Ralf Baechle 已提交
67
	if (!access_ok(VERIFY_WRITE, (void __user *) addr, bytes))
L
Linus Torvalds 已提交
68 69 70 71 72 73 74 75 76 77 78 79
		return -EFAULT;

	flush_icache_range(addr, addr + bytes);

	return 0;
}

void __flush_dcache_page(struct page *page)
{
	struct address_space *mapping = page_mapping(page);
	unsigned long addr;

80 81
	if (PageHighMem(page))
		return;
L
Linus Torvalds 已提交
82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
	if (mapping && !mapping_mapped(mapping)) {
		SetPageDcacheDirty(page);
		return;
	}

	/*
	 * We could delay the flush for the !page_mapping case too.  But that
	 * case is for exec env/arg pages and those are %99 certainly going to
	 * get faulted into the tlb (and thus flushed) anyways.
	 */
	addr = (unsigned long) page_address(page);
	flush_data_cache_page(addr);
}

EXPORT_SYMBOL(__flush_dcache_page);

98 99
void __flush_anon_page(struct page *page, unsigned long vmaddr)
{
100
	unsigned long addr = (unsigned long) page_address(page);
101

102 103 104 105 106 107 108 109 110
	if (pages_do_alias(addr, vmaddr)) {
		if (page_mapped(page) && !Page_dcache_dirty(page)) {
			void *kaddr;

			kaddr = kmap_coherent(page, vmaddr);
			flush_data_cache_page((unsigned long)kaddr);
			kunmap_coherent();
		} else
			flush_data_cache_page(addr);
111 112 113 114 115
	}
}

EXPORT_SYMBOL(__flush_anon_page);

L
Linus Torvalds 已提交
116 117 118 119 120
void __update_cache(struct vm_area_struct *vma, unsigned long address,
	pte_t pte)
{
	struct page *page;
	unsigned long pfn, addr;
121
	int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc;
L
Linus Torvalds 已提交
122 123

	pfn = pte_pfn(pte);
124 125 126 127 128 129
	if (unlikely(!pfn_valid(pfn)))
		return;
	page = pfn_to_page(pfn);
	if (page_mapping(page) && Page_dcache_dirty(page)) {
		addr = (unsigned long) page_address(page);
		if (exec || pages_do_alias(addr, address & PAGE_MASK))
L
Linus Torvalds 已提交
130 131 132 133 134
			flush_data_cache_page(addr);
		ClearPageDcacheDirty(page);
	}
}

135
unsigned long _page_cachable_default;
136
EXPORT_SYMBOL(_page_cachable_default);
137 138 139

static inline void setup_protection_map(void)
{
140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
	if (kernel_uses_smartmips_rixi) {
		protection_map[0]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
		protection_map[1]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
		protection_map[2]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
		protection_map[3]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
		protection_map[4]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
		protection_map[5]  = __pgprot(_page_cachable_default | _PAGE_PRESENT);
		protection_map[6]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
		protection_map[7]  = __pgprot(_page_cachable_default | _PAGE_PRESENT);

		protection_map[8]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
		protection_map[9]  = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
		protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ);
		protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
		protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ);
		protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
		protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE  | _PAGE_NO_READ);
		protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);

	} else {
		protection_map[0] = PAGE_NONE;
		protection_map[1] = PAGE_READONLY;
		protection_map[2] = PAGE_COPY;
		protection_map[3] = PAGE_COPY;
		protection_map[4] = PAGE_READONLY;
		protection_map[5] = PAGE_READONLY;
		protection_map[6] = PAGE_COPY;
		protection_map[7] = PAGE_COPY;
		protection_map[8] = PAGE_NONE;
		protection_map[9] = PAGE_READONLY;
		protection_map[10] = PAGE_SHARED;
		protection_map[11] = PAGE_SHARED;
		protection_map[12] = PAGE_READONLY;
		protection_map[13] = PAGE_READONLY;
		protection_map[14] = PAGE_SHARED;
		protection_map[15] = PAGE_SHARED;
	}
177
}
L
Linus Torvalds 已提交
178

179
void __cpuinit cpu_cache_init(void)
L
Linus Torvalds 已提交
180
{
R
Ralf Baechle 已提交
181 182 183 184 185 186 187 188 189
	if (cpu_has_3k_cache) {
		extern void __weak r3k_cache_init(void);

		r3k_cache_init();
	}
	if (cpu_has_6k_cache) {
		extern void __weak r6k_cache_init(void);

		r6k_cache_init();
L
Linus Torvalds 已提交
190
	}
R
Ralf Baechle 已提交
191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
	if (cpu_has_4k_cache) {
		extern void __weak r4k_cache_init(void);

		r4k_cache_init();
	}
	if (cpu_has_8k_cache) {
		extern void __weak r8k_cache_init(void);

		r8k_cache_init();
	}
	if (cpu_has_tx39_cache) {
		extern void __weak tx39_cache_init(void);

		tx39_cache_init();
	}

207 208 209 210 211 212
	if (cpu_has_octeon_cache) {
		extern void __weak octeon_cache_init(void);

		octeon_cache_init();
	}

213
	setup_protection_map();
L
Linus Torvalds 已提交
214
}
215 216 217

int __weak __uncached_access(struct file *file, unsigned long addr)
{
218
	if (file->f_flags & O_DSYNC)
219 220 221 222
		return 1;

	return addr >= __pa(high_memory);
}