cache.c 9.0 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
P
Paul Mundt 已提交
2
 * arch/sh/mm/cache.c
L
Linus Torvalds 已提交
3 4
 *
 * Copyright (C) 1999, 2000, 2002  Niibe Yutaka
5
 * Copyright (C) 2002 - 2009  Paul Mundt
L
Linus Torvalds 已提交
6 7 8 9
 *
 * Released under the terms of the GNU GPL v2.0.
 */
#include <linux/mm.h>
10
#include <linux/init.h>
P
Paul Mundt 已提交
11
#include <linux/mutex.h>
12
#include <linux/fs.h>
P
Paul Mundt 已提交
13
#include <linux/smp.h>
P
Paul Mundt 已提交
14 15
#include <linux/highmem.h>
#include <linux/module.h>
L
Linus Torvalds 已提交
16 17 18
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>

P
Paul Mundt 已提交
19 20 21 22 23 24 25 26 27 28
void (*local_flush_cache_all)(void *args) = cache_noop;
void (*local_flush_cache_mm)(void *args) = cache_noop;
void (*local_flush_cache_dup_mm)(void *args) = cache_noop;
void (*local_flush_cache_page)(void *args) = cache_noop;
void (*local_flush_cache_range)(void *args) = cache_noop;
void (*local_flush_dcache_page)(void *args) = cache_noop;
void (*local_flush_icache_range)(void *args) = cache_noop;
void (*local_flush_icache_page)(void *args) = cache_noop;
void (*local_flush_cache_sigtramp)(void *args) = cache_noop;

29
void (*__flush_wback_region)(void *start, int size);
P
Paul Mundt 已提交
30
EXPORT_SYMBOL(__flush_wback_region);
31
void (*__flush_purge_region)(void *start, int size);
P
Paul Mundt 已提交
32
EXPORT_SYMBOL(__flush_purge_region);
33
void (*__flush_invalidate_region)(void *start, int size);
P
Paul Mundt 已提交
34
EXPORT_SYMBOL(__flush_invalidate_region);
35 36 37 38 39

static inline void noop__flush_region(void *start, int size)
{
}

40 41 42 43 44 45 46 47 48
static inline void cacheop_on_each_cpu(void (*func) (void *info), void *info,
                                   int wait)
{
	preempt_disable();
	smp_call_function(func, info, wait);
	func(info);
	preempt_enable();
}

49 50 51
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
		       unsigned long vaddr, void *dst, const void *src,
		       unsigned long len)
L
Linus Torvalds 已提交
52
{
53 54
	if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
	    !test_bit(PG_dcache_dirty, &page->flags)) {
55 56
		void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
		memcpy(vto, src, len);
57
		kunmap_coherent(vto);
58 59
	} else {
		memcpy(dst, src, len);
60 61
		if (boot_cpu_data.dcache.n_aliases)
			set_bit(PG_dcache_dirty, &page->flags);
62
	}
63 64 65 66 67 68 69 70 71

	if (vma->vm_flags & VM_EXEC)
		flush_cache_page(vma, vaddr, page_to_pfn(page));
}

void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
			 unsigned long vaddr, void *dst, const void *src,
			 unsigned long len)
{
72 73
	if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
	    !test_bit(PG_dcache_dirty, &page->flags)) {
74 75
		void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
		memcpy(dst, vfrom, len);
76
		kunmap_coherent(vfrom);
77 78
	} else {
		memcpy(dst, src, len);
79 80
		if (boot_cpu_data.dcache.n_aliases)
			set_bit(PG_dcache_dirty, &page->flags);
81
	}
L
Linus Torvalds 已提交
82
}
83

P
Paul Mundt 已提交
84 85 86 87 88 89 90
void copy_user_highpage(struct page *to, struct page *from,
			unsigned long vaddr, struct vm_area_struct *vma)
{
	void *vfrom, *vto;

	vto = kmap_atomic(to, KM_USER1);

91 92
	if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
	    !test_bit(PG_dcache_dirty, &from->flags)) {
93 94
		vfrom = kmap_coherent(from, vaddr);
		copy_page(vto, vfrom);
95
		kunmap_coherent(vfrom);
96 97 98 99 100 101 102
	} else {
		vfrom = kmap_atomic(from, KM_USER0);
		copy_page(vto, vfrom);
		kunmap_atomic(vfrom, KM_USER0);
	}

	if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
103
		__flush_purge_region(vto, PAGE_SIZE);
P
Paul Mundt 已提交
104 105 106 107 108 109

	kunmap_atomic(vto, KM_USER1);
	/* Make sure this page is cleared on other CPU's too before using it */
	smp_wmb();
}
EXPORT_SYMBOL(copy_user_highpage);
110 111 112 113 114 115 116 117

void clear_user_highpage(struct page *page, unsigned long vaddr)
{
	void *kaddr = kmap_atomic(page, KM_USER0);

	clear_page(kaddr);

	if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
118
		__flush_purge_region(kaddr, PAGE_SIZE);
119 120 121 122

	kunmap_atomic(kaddr, KM_USER0);
}
EXPORT_SYMBOL(clear_user_highpage);
123 124 125 126 127 128 129 130 131 132 133

void __update_cache(struct vm_area_struct *vma,
		    unsigned long address, pte_t pte)
{
	struct page *page;
	unsigned long pfn = pte_pfn(pte);

	if (!boot_cpu_data.dcache.n_aliases)
		return;

	page = pfn_to_page(pfn);
134
	if (pfn_valid(pfn)) {
135 136 137 138 139
		int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
		if (dirty) {
			unsigned long addr = (unsigned long)page_address(page);

			if (pages_do_alias(addr, address & PAGE_MASK))
140
				__flush_purge_region((void *)addr, PAGE_SIZE);
141 142 143
		}
	}
}
P
Paul Mundt 已提交
144 145 146 147 148 149 150 151 152 153 154

void __flush_anon_page(struct page *page, unsigned long vmaddr)
{
	unsigned long addr = (unsigned long) page_address(page);

	if (pages_do_alias(addr, vmaddr)) {
		if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
		    !test_bit(PG_dcache_dirty, &page->flags)) {
			void *kaddr;

			kaddr = kmap_coherent(page, vmaddr);
155 156
			/* XXX.. For now kunmap_coherent() does a purge */
			/* __flush_purge_region((void *)kaddr, PAGE_SIZE); */
157
			kunmap_coherent(kaddr);
P
Paul Mundt 已提交
158
		} else
159
			__flush_purge_region((void *)addr, PAGE_SIZE);
P
Paul Mundt 已提交
160 161
	}
}
162

P
Paul Mundt 已提交
163 164
void flush_cache_all(void)
{
165
	cacheop_on_each_cpu(local_flush_cache_all, NULL, 1);
P
Paul Mundt 已提交
166
}
P
Paul Mundt 已提交
167
EXPORT_SYMBOL(flush_cache_all);
P
Paul Mundt 已提交
168 169 170

void flush_cache_mm(struct mm_struct *mm)
{
171 172 173
	if (boot_cpu_data.dcache.n_aliases == 0)
		return;

174
	cacheop_on_each_cpu(local_flush_cache_mm, mm, 1);
P
Paul Mundt 已提交
175 176 177 178
}

void flush_cache_dup_mm(struct mm_struct *mm)
{
179 180 181
	if (boot_cpu_data.dcache.n_aliases == 0)
		return;

182
	cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1);
P
Paul Mundt 已提交
183 184 185 186 187 188 189 190 191 192 193
}

void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
		      unsigned long pfn)
{
	struct flusher_data data;

	data.vma = vma;
	data.addr1 = addr;
	data.addr2 = pfn;

194
	cacheop_on_each_cpu(local_flush_cache_page, (void *)&data, 1);
P
Paul Mundt 已提交
195 196 197 198 199 200 201 202 203 204 205
}

void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
		       unsigned long end)
{
	struct flusher_data data;

	data.vma = vma;
	data.addr1 = start;
	data.addr2 = end;

206
	cacheop_on_each_cpu(local_flush_cache_range, (void *)&data, 1);
P
Paul Mundt 已提交
207
}
P
Paul Mundt 已提交
208
EXPORT_SYMBOL(flush_cache_range);
P
Paul Mundt 已提交
209 210 211

void flush_dcache_page(struct page *page)
{
212
	cacheop_on_each_cpu(local_flush_dcache_page, page, 1);
P
Paul Mundt 已提交
213
}
P
Paul Mundt 已提交
214
EXPORT_SYMBOL(flush_dcache_page);
P
Paul Mundt 已提交
215 216 217 218 219 220 221 222 223

void flush_icache_range(unsigned long start, unsigned long end)
{
	struct flusher_data data;

	data.vma = NULL;
	data.addr1 = start;
	data.addr2 = end;

224
	cacheop_on_each_cpu(local_flush_icache_range, (void *)&data, 1);
P
Paul Mundt 已提交
225 226 227 228 229
}

void flush_icache_page(struct vm_area_struct *vma, struct page *page)
{
	/* Nothing uses the VMA, so just pass the struct page along */
230
	cacheop_on_each_cpu(local_flush_icache_page, page, 1);
P
Paul Mundt 已提交
231 232 233 234
}

void flush_cache_sigtramp(unsigned long address)
{
235
	cacheop_on_each_cpu(local_flush_cache_sigtramp, (void *)address, 1);
P
Paul Mundt 已提交
236 237
}

238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277
static void compute_alias(struct cache_info *c)
{
	c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
	c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0;
}

static void __init emit_cache_params(void)
{
	printk(KERN_NOTICE "I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
		boot_cpu_data.icache.ways,
		boot_cpu_data.icache.sets,
		boot_cpu_data.icache.way_incr);
	printk(KERN_NOTICE "I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
		boot_cpu_data.icache.entry_mask,
		boot_cpu_data.icache.alias_mask,
		boot_cpu_data.icache.n_aliases);
	printk(KERN_NOTICE "D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
		boot_cpu_data.dcache.ways,
		boot_cpu_data.dcache.sets,
		boot_cpu_data.dcache.way_incr);
	printk(KERN_NOTICE "D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
		boot_cpu_data.dcache.entry_mask,
		boot_cpu_data.dcache.alias_mask,
		boot_cpu_data.dcache.n_aliases);

	/*
	 * Emit Secondary Cache parameters if the CPU has a probed L2.
	 */
	if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) {
		printk(KERN_NOTICE "S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
			boot_cpu_data.scache.ways,
			boot_cpu_data.scache.sets,
			boot_cpu_data.scache.way_incr);
		printk(KERN_NOTICE "S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
			boot_cpu_data.scache.entry_mask,
			boot_cpu_data.scache.alias_mask,
			boot_cpu_data.scache.n_aliases);
	}
}

278 279
void __init cpu_cache_init(void)
{
P
Paul Mundt 已提交
280 281 282 283 284
	unsigned int cache_disabled = 0;

#ifdef CCR
	cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE);
#endif
M
Magnus Damm 已提交
285

286 287 288 289
	compute_alias(&boot_cpu_data.icache);
	compute_alias(&boot_cpu_data.dcache);
	compute_alias(&boot_cpu_data.scache);

290 291 292 293
	__flush_wback_region		= noop__flush_region;
	__flush_purge_region		= noop__flush_region;
	__flush_invalidate_region	= noop__flush_region;

M
Magnus Damm 已提交
294 295 296 297 298 299 300
	/*
	 * No flushing is necessary in the disabled cache case so we can
	 * just keep the noop functions in local_flush_..() and __flush_..()
	 */
	if (unlikely(cache_disabled))
		goto skip;

301 302 303 304 305 306
	if (boot_cpu_data.family == CPU_FAMILY_SH2) {
		extern void __weak sh2_cache_init(void);

		sh2_cache_init();
	}

307 308 309 310 311 312
	if (boot_cpu_data.family == CPU_FAMILY_SH2A) {
		extern void __weak sh2a_cache_init(void);

		sh2a_cache_init();
	}

313 314 315 316
	if (boot_cpu_data.family == CPU_FAMILY_SH3) {
		extern void __weak sh3_cache_init(void);

		sh3_cache_init();
317 318 319 320 321 322 323

		if ((boot_cpu_data.type == CPU_SH7705) &&
		    (boot_cpu_data.dcache.sets == 512)) {
			extern void __weak sh7705_cache_init(void);

			sh7705_cache_init();
		}
324 325
	}

326 327 328 329 330 331 332
	if ((boot_cpu_data.family == CPU_FAMILY_SH4) ||
	    (boot_cpu_data.family == CPU_FAMILY_SH4A) ||
	    (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) {
		extern void __weak sh4_cache_init(void);

		sh4_cache_init();
	}
333

P
Paul Mundt 已提交
334 335 336 337 338 339
	if (boot_cpu_data.family == CPU_FAMILY_SH5) {
		extern void __weak sh5_cache_init(void);

		sh5_cache_init();
	}

M
Magnus Damm 已提交
340
skip:
341
	emit_cache_params();
342
}