cache.c 9.3 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
P
Paul Mundt 已提交
2
 * arch/sh/mm/cache.c
L
Linus Torvalds 已提交
3 4
 *
 * Copyright (C) 1999, 2000, 2002  Niibe Yutaka
5
 * Copyright (C) 2002 - 2010  Paul Mundt
L
Linus Torvalds 已提交
6 7 8 9
 *
 * Released under the terms of the GNU GPL v2.0.
 */
#include <linux/mm.h>
10
#include <linux/init.h>
P
Paul Mundt 已提交
11
#include <linux/mutex.h>
12
#include <linux/fs.h>
P
Paul Mundt 已提交
13
#include <linux/smp.h>
P
Paul Mundt 已提交
14 15
#include <linux/highmem.h>
#include <linux/module.h>
L
Linus Torvalds 已提交
16 17 18
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>

P
Paul Mundt 已提交
19 20 21 22 23 24 25 26 27 28
void (*local_flush_cache_all)(void *args) = cache_noop;
void (*local_flush_cache_mm)(void *args) = cache_noop;
void (*local_flush_cache_dup_mm)(void *args) = cache_noop;
void (*local_flush_cache_page)(void *args) = cache_noop;
void (*local_flush_cache_range)(void *args) = cache_noop;
void (*local_flush_dcache_page)(void *args) = cache_noop;
void (*local_flush_icache_range)(void *args) = cache_noop;
void (*local_flush_icache_page)(void *args) = cache_noop;
void (*local_flush_cache_sigtramp)(void *args) = cache_noop;

29
void (*__flush_wback_region)(void *start, int size);
P
Paul Mundt 已提交
30
EXPORT_SYMBOL(__flush_wback_region);
31
void (*__flush_purge_region)(void *start, int size);
P
Paul Mundt 已提交
32
EXPORT_SYMBOL(__flush_purge_region);
33
void (*__flush_invalidate_region)(void *start, int size);
P
Paul Mundt 已提交
34
EXPORT_SYMBOL(__flush_invalidate_region);
35 36 37 38 39

static inline void noop__flush_region(void *start, int size)
{
}

40 41 42 43
static inline void cacheop_on_each_cpu(void (*func) (void *info), void *info,
                                   int wait)
{
	preempt_disable();
44 45 46 47 48 49 50 51 52

	/*
	 * It's possible that this gets called early on when IRQs are
	 * still disabled due to ioremapping by the boot CPU, so don't
	 * even attempt IPIs unless there are other CPUs online.
	 */
	if (num_online_cpus() > 1)
		smp_call_function(func, info, wait);

53
	func(info);
54

55 56 57
	preempt_enable();
}

58 59 60
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
		       unsigned long vaddr, void *dst, const void *src,
		       unsigned long len)
L
Linus Torvalds 已提交
61
{
62
	if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
63
	    test_bit(PG_dcache_clean, &page->flags)) {
64 65
		void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
		memcpy(vto, src, len);
66
		kunmap_coherent(vto);
67 68
	} else {
		memcpy(dst, src, len);
69
		if (boot_cpu_data.dcache.n_aliases)
70
			clear_bit(PG_dcache_clean, &page->flags);
71
	}
72 73 74 75 76 77 78 79 80

	if (vma->vm_flags & VM_EXEC)
		flush_cache_page(vma, vaddr, page_to_pfn(page));
}

void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
			 unsigned long vaddr, void *dst, const void *src,
			 unsigned long len)
{
81
	if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
82
	    test_bit(PG_dcache_clean, &page->flags)) {
83 84
		void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
		memcpy(dst, vfrom, len);
85
		kunmap_coherent(vfrom);
86 87
	} else {
		memcpy(dst, src, len);
88
		if (boot_cpu_data.dcache.n_aliases)
89
			clear_bit(PG_dcache_clean, &page->flags);
90
	}
L
Linus Torvalds 已提交
91
}
92

P
Paul Mundt 已提交
93 94 95 96 97
void copy_user_highpage(struct page *to, struct page *from,
			unsigned long vaddr, struct vm_area_struct *vma)
{
	void *vfrom, *vto;

98
	vto = kmap_atomic(to);
P
Paul Mundt 已提交
99

100
	if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
101
	    test_bit(PG_dcache_clean, &from->flags)) {
102
		vfrom = kmap_coherent(from, vaddr);
103
		copy_page(vto, vfrom);
104 105
		kunmap_coherent(vfrom);
	} else {
106
		vfrom = kmap_atomic(from);
107
		copy_page(vto, vfrom);
108
		kunmap_atomic(vfrom);
109
	}
P
Paul Mundt 已提交
110

111 112
	if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) ||
	    (vma->vm_flags & VM_EXEC))
113
		__flush_purge_region(vto, PAGE_SIZE);
114

115
	kunmap_atomic(vto);
P
Paul Mundt 已提交
116 117 118 119
	/* Make sure this page is cleared on other CPU's too before using it */
	smp_wmb();
}
EXPORT_SYMBOL(copy_user_highpage);
120 121 122

void clear_user_highpage(struct page *page, unsigned long vaddr)
{
123
	void *kaddr = kmap_atomic(page);
124

125
	clear_page(kaddr);
126

127 128
	if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
		__flush_purge_region(kaddr, PAGE_SIZE);
129

130
	kunmap_atomic(kaddr);
131 132
}
EXPORT_SYMBOL(clear_user_highpage);
133 134 135 136 137 138 139 140 141 142 143

void __update_cache(struct vm_area_struct *vma,
		    unsigned long address, pte_t pte)
{
	struct page *page;
	unsigned long pfn = pte_pfn(pte);

	if (!boot_cpu_data.dcache.n_aliases)
		return;

	page = pfn_to_page(pfn);
144
	if (pfn_valid(pfn)) {
145
		int dirty = !test_and_set_bit(PG_dcache_clean, &page->flags);
146 147
		if (dirty)
			__flush_purge_region(page_address(page), PAGE_SIZE);
148 149
	}
}
P
Paul Mundt 已提交
150 151 152 153 154 155 156

void __flush_anon_page(struct page *page, unsigned long vmaddr)
{
	unsigned long addr = (unsigned long) page_address(page);

	if (pages_do_alias(addr, vmaddr)) {
		if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
157
		    test_bit(PG_dcache_clean, &page->flags)) {
P
Paul Mundt 已提交
158 159 160
			void *kaddr;

			kaddr = kmap_coherent(page, vmaddr);
161 162
			/* XXX.. For now kunmap_coherent() does a purge */
			/* __flush_purge_region((void *)kaddr, PAGE_SIZE); */
163
			kunmap_coherent(kaddr);
P
Paul Mundt 已提交
164
		} else
165
			__flush_purge_region((void *)addr, PAGE_SIZE);
P
Paul Mundt 已提交
166 167
	}
}
168

P
Paul Mundt 已提交
169 170
void flush_cache_all(void)
{
171
	cacheop_on_each_cpu(local_flush_cache_all, NULL, 1);
P
Paul Mundt 已提交
172
}
P
Paul Mundt 已提交
173
EXPORT_SYMBOL(flush_cache_all);
P
Paul Mundt 已提交
174 175 176

void flush_cache_mm(struct mm_struct *mm)
{
177 178 179
	if (boot_cpu_data.dcache.n_aliases == 0)
		return;

180
	cacheop_on_each_cpu(local_flush_cache_mm, mm, 1);
P
Paul Mundt 已提交
181 182 183 184
}

void flush_cache_dup_mm(struct mm_struct *mm)
{
185 186 187
	if (boot_cpu_data.dcache.n_aliases == 0)
		return;

188
	cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1);
P
Paul Mundt 已提交
189 190 191 192 193 194 195 196 197 198 199
}

void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
		      unsigned long pfn)
{
	struct flusher_data data;

	data.vma = vma;
	data.addr1 = addr;
	data.addr2 = pfn;

200
	cacheop_on_each_cpu(local_flush_cache_page, (void *)&data, 1);
P
Paul Mundt 已提交
201 202 203 204 205 206 207 208 209 210 211
}

void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
		       unsigned long end)
{
	struct flusher_data data;

	data.vma = vma;
	data.addr1 = start;
	data.addr2 = end;

212
	cacheop_on_each_cpu(local_flush_cache_range, (void *)&data, 1);
P
Paul Mundt 已提交
213
}
P
Paul Mundt 已提交
214
EXPORT_SYMBOL(flush_cache_range);
P
Paul Mundt 已提交
215 216 217

void flush_dcache_page(struct page *page)
{
218
	cacheop_on_each_cpu(local_flush_dcache_page, page, 1);
P
Paul Mundt 已提交
219
}
P
Paul Mundt 已提交
220
EXPORT_SYMBOL(flush_dcache_page);
P
Paul Mundt 已提交
221 222 223 224 225 226 227 228 229

void flush_icache_range(unsigned long start, unsigned long end)
{
	struct flusher_data data;

	data.vma = NULL;
	data.addr1 = start;
	data.addr2 = end;

230
	cacheop_on_each_cpu(local_flush_icache_range, (void *)&data, 1);
P
Paul Mundt 已提交
231
}
232
EXPORT_SYMBOL(flush_icache_range);
P
Paul Mundt 已提交
233 234 235 236

void flush_icache_page(struct vm_area_struct *vma, struct page *page)
{
	/* Nothing uses the VMA, so just pass the struct page along */
237
	cacheop_on_each_cpu(local_flush_icache_page, page, 1);
P
Paul Mundt 已提交
238 239 240 241
}

void flush_cache_sigtramp(unsigned long address)
{
242
	cacheop_on_each_cpu(local_flush_cache_sigtramp, (void *)address, 1);
P
Paul Mundt 已提交
243 244
}

245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
static void compute_alias(struct cache_info *c)
{
	c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
	c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0;
}

static void __init emit_cache_params(void)
{
	printk(KERN_NOTICE "I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
		boot_cpu_data.icache.ways,
		boot_cpu_data.icache.sets,
		boot_cpu_data.icache.way_incr);
	printk(KERN_NOTICE "I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
		boot_cpu_data.icache.entry_mask,
		boot_cpu_data.icache.alias_mask,
		boot_cpu_data.icache.n_aliases);
	printk(KERN_NOTICE "D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
		boot_cpu_data.dcache.ways,
		boot_cpu_data.dcache.sets,
		boot_cpu_data.dcache.way_incr);
	printk(KERN_NOTICE "D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
		boot_cpu_data.dcache.entry_mask,
		boot_cpu_data.dcache.alias_mask,
		boot_cpu_data.dcache.n_aliases);

	/*
	 * Emit Secondary Cache parameters if the CPU has a probed L2.
	 */
	if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) {
		printk(KERN_NOTICE "S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
			boot_cpu_data.scache.ways,
			boot_cpu_data.scache.sets,
			boot_cpu_data.scache.way_incr);
		printk(KERN_NOTICE "S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
			boot_cpu_data.scache.entry_mask,
			boot_cpu_data.scache.alias_mask,
			boot_cpu_data.scache.n_aliases);
	}
}

285 286
void __init cpu_cache_init(void)
{
P
Paul Mundt 已提交
287 288
	unsigned int cache_disabled = 0;

289 290
#ifdef SH_CCR
	cache_disabled = !(__raw_readl(SH_CCR) & CCR_CACHE_ENABLE);
P
Paul Mundt 已提交
291
#endif
M
Magnus Damm 已提交
292

293 294 295 296
	compute_alias(&boot_cpu_data.icache);
	compute_alias(&boot_cpu_data.dcache);
	compute_alias(&boot_cpu_data.scache);

297 298 299 300
	__flush_wback_region		= noop__flush_region;
	__flush_purge_region		= noop__flush_region;
	__flush_invalidate_region	= noop__flush_region;

M
Magnus Damm 已提交
301 302 303 304 305 306 307
	/*
	 * No flushing is necessary in the disabled cache case so we can
	 * just keep the noop functions in local_flush_..() and __flush_..()
	 */
	if (unlikely(cache_disabled))
		goto skip;

308 309 310 311 312 313
	if (boot_cpu_data.family == CPU_FAMILY_SH2) {
		extern void __weak sh2_cache_init(void);

		sh2_cache_init();
	}

314 315 316 317 318 319
	if (boot_cpu_data.family == CPU_FAMILY_SH2A) {
		extern void __weak sh2a_cache_init(void);

		sh2a_cache_init();
	}

320 321 322 323
	if (boot_cpu_data.family == CPU_FAMILY_SH3) {
		extern void __weak sh3_cache_init(void);

		sh3_cache_init();
324 325 326 327 328 329 330

		if ((boot_cpu_data.type == CPU_SH7705) &&
		    (boot_cpu_data.dcache.sets == 512)) {
			extern void __weak sh7705_cache_init(void);

			sh7705_cache_init();
		}
331 332
	}

333 334 335 336 337 338
	if ((boot_cpu_data.family == CPU_FAMILY_SH4) ||
	    (boot_cpu_data.family == CPU_FAMILY_SH4A) ||
	    (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) {
		extern void __weak sh4_cache_init(void);

		sh4_cache_init();
339 340 341 342 343 344 345

		if ((boot_cpu_data.type == CPU_SH7786) ||
		    (boot_cpu_data.type == CPU_SHX3)) {
			extern void __weak shx3_cache_init(void);

			shx3_cache_init();
		}
346
	}
347

P
Paul Mundt 已提交
348 349 350 351 352 353
	if (boot_cpu_data.family == CPU_FAMILY_SH5) {
		extern void __weak sh5_cache_init(void);

		sh5_cache_init();
	}

M
Magnus Damm 已提交
354
skip:
355
	emit_cache_params();
356
}