cache.c 8.9 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
P
Paul Mundt 已提交
2
 * arch/sh/mm/cache.c
L
Linus Torvalds 已提交
3 4
 *
 * Copyright (C) 1999, 2000, 2002  Niibe Yutaka
5
 * Copyright (C) 2002 - 2009  Paul Mundt
L
Linus Torvalds 已提交
6 7 8 9
 *
 * Released under the terms of the GNU GPL v2.0.
 */
#include <linux/mm.h>
10
#include <linux/init.h>
P
Paul Mundt 已提交
11
#include <linux/mutex.h>
12
#include <linux/fs.h>
P
Paul Mundt 已提交
13
#include <linux/smp.h>
P
Paul Mundt 已提交
14 15
#include <linux/highmem.h>
#include <linux/module.h>
L
Linus Torvalds 已提交
16 17 18
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>

P
Paul Mundt 已提交
19 20 21 22 23 24 25 26 27 28
void (*local_flush_cache_all)(void *args) = cache_noop;
void (*local_flush_cache_mm)(void *args) = cache_noop;
void (*local_flush_cache_dup_mm)(void *args) = cache_noop;
void (*local_flush_cache_page)(void *args) = cache_noop;
void (*local_flush_cache_range)(void *args) = cache_noop;
void (*local_flush_dcache_page)(void *args) = cache_noop;
void (*local_flush_icache_range)(void *args) = cache_noop;
void (*local_flush_icache_page)(void *args) = cache_noop;
void (*local_flush_cache_sigtramp)(void *args) = cache_noop;

29
void (*__flush_wback_region)(void *start, int size);
P
Paul Mundt 已提交
30
EXPORT_SYMBOL(__flush_wback_region);
31
void (*__flush_purge_region)(void *start, int size);
P
Paul Mundt 已提交
32
EXPORT_SYMBOL(__flush_purge_region);
33
void (*__flush_invalidate_region)(void *start, int size);
P
Paul Mundt 已提交
34
EXPORT_SYMBOL(__flush_invalidate_region);
35 36 37 38 39

static inline void noop__flush_region(void *start, int size)
{
}

40 41 42 43 44 45 46 47 48
static inline void cacheop_on_each_cpu(void (*func) (void *info), void *info,
                                   int wait)
{
	preempt_disable();
	smp_call_function(func, info, wait);
	func(info);
	preempt_enable();
}

49 50 51
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
		       unsigned long vaddr, void *dst, const void *src,
		       unsigned long len)
L
Linus Torvalds 已提交
52
{
53 54
	if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
	    !test_bit(PG_dcache_dirty, &page->flags)) {
55 56
		void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
		memcpy(vto, src, len);
57
		kunmap_coherent(vto);
58 59
	} else {
		memcpy(dst, src, len);
60 61
		if (boot_cpu_data.dcache.n_aliases)
			set_bit(PG_dcache_dirty, &page->flags);
62
	}
63 64 65 66 67 68 69 70 71

	if (vma->vm_flags & VM_EXEC)
		flush_cache_page(vma, vaddr, page_to_pfn(page));
}

void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
			 unsigned long vaddr, void *dst, const void *src,
			 unsigned long len)
{
72 73
	if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
	    !test_bit(PG_dcache_dirty, &page->flags)) {
74 75
		void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
		memcpy(dst, vfrom, len);
76
		kunmap_coherent(vfrom);
77 78
	} else {
		memcpy(dst, src, len);
79 80
		if (boot_cpu_data.dcache.n_aliases)
			set_bit(PG_dcache_dirty, &page->flags);
81
	}
L
Linus Torvalds 已提交
82
}
83

P
Paul Mundt 已提交
84 85 86 87 88 89 90
void copy_user_highpage(struct page *to, struct page *from,
			unsigned long vaddr, struct vm_area_struct *vma)
{
	void *vfrom, *vto;

	vto = kmap_atomic(to, KM_USER1);

91 92
	if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
	    !test_bit(PG_dcache_dirty, &from->flags)) {
93
		vfrom = kmap_coherent(from, vaddr);
94
		copy_page(vto, vfrom);
95 96 97 98 99 100
		kunmap_coherent(vfrom);
	} else {
		vfrom = kmap_atomic(from, KM_USER0);
		copy_page(vto, vfrom);
		kunmap_atomic(vfrom, KM_USER0);
	}
P
Paul Mundt 已提交
101

102 103
	if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
		__flush_purge_region(vto, PAGE_SIZE);
104

105
	kunmap_atomic(vto, KM_USER1);
P
Paul Mundt 已提交
106 107 108 109
	/* Make sure this page is cleared on other CPU's too before using it */
	smp_wmb();
}
EXPORT_SYMBOL(copy_user_highpage);
110 111 112 113 114

void clear_user_highpage(struct page *page, unsigned long vaddr)
{
	void *kaddr = kmap_atomic(page, KM_USER0);

115
	clear_page(kaddr);
116

117 118
	if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
		__flush_purge_region(kaddr, PAGE_SIZE);
119 120 121 122

	kunmap_atomic(kaddr, KM_USER0);
}
EXPORT_SYMBOL(clear_user_highpage);
123 124 125 126 127 128 129 130 131 132 133

void __update_cache(struct vm_area_struct *vma,
		    unsigned long address, pte_t pte)
{
	struct page *page;
	unsigned long pfn = pte_pfn(pte);

	if (!boot_cpu_data.dcache.n_aliases)
		return;

	page = pfn_to_page(pfn);
134
	if (pfn_valid(pfn)) {
135
		int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
136 137
		if (dirty)
			__flush_purge_region(page_address(page), PAGE_SIZE);
138 139
	}
}
P
Paul Mundt 已提交
140 141 142 143 144 145 146 147 148 149 150

void __flush_anon_page(struct page *page, unsigned long vmaddr)
{
	unsigned long addr = (unsigned long) page_address(page);

	if (pages_do_alias(addr, vmaddr)) {
		if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
		    !test_bit(PG_dcache_dirty, &page->flags)) {
			void *kaddr;

			kaddr = kmap_coherent(page, vmaddr);
151 152
			/* XXX.. For now kunmap_coherent() does a purge */
			/* __flush_purge_region((void *)kaddr, PAGE_SIZE); */
153
			kunmap_coherent(kaddr);
P
Paul Mundt 已提交
154
		} else
155
			__flush_purge_region((void *)addr, PAGE_SIZE);
P
Paul Mundt 已提交
156 157
	}
}
158

P
Paul Mundt 已提交
159 160
void flush_cache_all(void)
{
161
	cacheop_on_each_cpu(local_flush_cache_all, NULL, 1);
P
Paul Mundt 已提交
162
}
P
Paul Mundt 已提交
163
EXPORT_SYMBOL(flush_cache_all);
P
Paul Mundt 已提交
164 165 166

void flush_cache_mm(struct mm_struct *mm)
{
167 168 169
	if (boot_cpu_data.dcache.n_aliases == 0)
		return;

170
	cacheop_on_each_cpu(local_flush_cache_mm, mm, 1);
P
Paul Mundt 已提交
171 172 173 174
}

void flush_cache_dup_mm(struct mm_struct *mm)
{
175 176 177
	if (boot_cpu_data.dcache.n_aliases == 0)
		return;

178
	cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1);
P
Paul Mundt 已提交
179 180 181 182 183 184 185 186 187 188 189
}

void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
		      unsigned long pfn)
{
	struct flusher_data data;

	data.vma = vma;
	data.addr1 = addr;
	data.addr2 = pfn;

190
	cacheop_on_each_cpu(local_flush_cache_page, (void *)&data, 1);
P
Paul Mundt 已提交
191 192 193 194 195 196 197 198 199 200 201
}

void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
		       unsigned long end)
{
	struct flusher_data data;

	data.vma = vma;
	data.addr1 = start;
	data.addr2 = end;

202
	cacheop_on_each_cpu(local_flush_cache_range, (void *)&data, 1);
P
Paul Mundt 已提交
203
}
P
Paul Mundt 已提交
204
EXPORT_SYMBOL(flush_cache_range);
P
Paul Mundt 已提交
205 206 207

void flush_dcache_page(struct page *page)
{
208
	cacheop_on_each_cpu(local_flush_dcache_page, page, 1);
P
Paul Mundt 已提交
209
}
P
Paul Mundt 已提交
210
EXPORT_SYMBOL(flush_dcache_page);
P
Paul Mundt 已提交
211 212 213 214 215 216 217 218 219

void flush_icache_range(unsigned long start, unsigned long end)
{
	struct flusher_data data;

	data.vma = NULL;
	data.addr1 = start;
	data.addr2 = end;

220
	cacheop_on_each_cpu(local_flush_icache_range, (void *)&data, 1);
P
Paul Mundt 已提交
221 222 223 224 225
}

void flush_icache_page(struct vm_area_struct *vma, struct page *page)
{
	/* Nothing uses the VMA, so just pass the struct page along */
226
	cacheop_on_each_cpu(local_flush_icache_page, page, 1);
P
Paul Mundt 已提交
227 228 229 230
}

void flush_cache_sigtramp(unsigned long address)
{
231
	cacheop_on_each_cpu(local_flush_cache_sigtramp, (void *)address, 1);
P
Paul Mundt 已提交
232 233
}

234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273
static void compute_alias(struct cache_info *c)
{
	c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
	c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0;
}

static void __init emit_cache_params(void)
{
	printk(KERN_NOTICE "I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
		boot_cpu_data.icache.ways,
		boot_cpu_data.icache.sets,
		boot_cpu_data.icache.way_incr);
	printk(KERN_NOTICE "I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
		boot_cpu_data.icache.entry_mask,
		boot_cpu_data.icache.alias_mask,
		boot_cpu_data.icache.n_aliases);
	printk(KERN_NOTICE "D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
		boot_cpu_data.dcache.ways,
		boot_cpu_data.dcache.sets,
		boot_cpu_data.dcache.way_incr);
	printk(KERN_NOTICE "D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
		boot_cpu_data.dcache.entry_mask,
		boot_cpu_data.dcache.alias_mask,
		boot_cpu_data.dcache.n_aliases);

	/*
	 * Emit Secondary Cache parameters if the CPU has a probed L2.
	 */
	if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) {
		printk(KERN_NOTICE "S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
			boot_cpu_data.scache.ways,
			boot_cpu_data.scache.sets,
			boot_cpu_data.scache.way_incr);
		printk(KERN_NOTICE "S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
			boot_cpu_data.scache.entry_mask,
			boot_cpu_data.scache.alias_mask,
			boot_cpu_data.scache.n_aliases);
	}
}

274 275
void __init cpu_cache_init(void)
{
P
Paul Mundt 已提交
276 277 278 279 280
	unsigned int cache_disabled = 0;

#ifdef CCR
	cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE);
#endif
M
Magnus Damm 已提交
281

282 283 284 285
	compute_alias(&boot_cpu_data.icache);
	compute_alias(&boot_cpu_data.dcache);
	compute_alias(&boot_cpu_data.scache);

286 287 288 289
	__flush_wback_region		= noop__flush_region;
	__flush_purge_region		= noop__flush_region;
	__flush_invalidate_region	= noop__flush_region;

M
Magnus Damm 已提交
290 291 292 293 294 295 296
	/*
	 * No flushing is necessary in the disabled cache case so we can
	 * just keep the noop functions in local_flush_..() and __flush_..()
	 */
	if (unlikely(cache_disabled))
		goto skip;

297 298 299 300 301 302
	if (boot_cpu_data.family == CPU_FAMILY_SH2) {
		extern void __weak sh2_cache_init(void);

		sh2_cache_init();
	}

303 304 305 306 307 308
	if (boot_cpu_data.family == CPU_FAMILY_SH2A) {
		extern void __weak sh2a_cache_init(void);

		sh2a_cache_init();
	}

309 310 311 312
	if (boot_cpu_data.family == CPU_FAMILY_SH3) {
		extern void __weak sh3_cache_init(void);

		sh3_cache_init();
313 314 315 316 317 318 319

		if ((boot_cpu_data.type == CPU_SH7705) &&
		    (boot_cpu_data.dcache.sets == 512)) {
			extern void __weak sh7705_cache_init(void);

			sh7705_cache_init();
		}
320 321
	}

322 323 324 325 326 327 328
	if ((boot_cpu_data.family == CPU_FAMILY_SH4) ||
	    (boot_cpu_data.family == CPU_FAMILY_SH4A) ||
	    (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) {
		extern void __weak sh4_cache_init(void);

		sh4_cache_init();
	}
329

P
Paul Mundt 已提交
330 331 332 333 334 335
	if (boot_cpu_data.family == CPU_FAMILY_SH5) {
		extern void __weak sh5_cache_init(void);

		sh5_cache_init();
	}

M
Magnus Damm 已提交
336
skip:
337
	emit_cache_params();
338
}