cache.c 8.6 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
P
Paul Mundt 已提交
2
 * arch/sh/mm/cache.c
L
Linus Torvalds 已提交
3 4
 *
 * Copyright (C) 1999, 2000, 2002  Niibe Yutaka
5
 * Copyright (C) 2002 - 2009  Paul Mundt
L
Linus Torvalds 已提交
6 7 8 9
 *
 * Released under the terms of the GNU GPL v2.0.
 */
#include <linux/mm.h>
10
#include <linux/init.h>
P
Paul Mundt 已提交
11
#include <linux/mutex.h>
12
#include <linux/fs.h>
P
Paul Mundt 已提交
13
#include <linux/smp.h>
P
Paul Mundt 已提交
14 15
#include <linux/highmem.h>
#include <linux/module.h>
L
Linus Torvalds 已提交
16 17 18
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>

P
Paul Mundt 已提交
19 20 21 22 23 24 25 26 27 28
void (*local_flush_cache_all)(void *args) = cache_noop;
void (*local_flush_cache_mm)(void *args) = cache_noop;
void (*local_flush_cache_dup_mm)(void *args) = cache_noop;
void (*local_flush_cache_page)(void *args) = cache_noop;
void (*local_flush_cache_range)(void *args) = cache_noop;
void (*local_flush_dcache_page)(void *args) = cache_noop;
void (*local_flush_icache_range)(void *args) = cache_noop;
void (*local_flush_icache_page)(void *args) = cache_noop;
void (*local_flush_cache_sigtramp)(void *args) = cache_noop;

29 30 31 32 33 34 35 36
void (*__flush_wback_region)(void *start, int size);
void (*__flush_purge_region)(void *start, int size);
void (*__flush_invalidate_region)(void *start, int size);

static inline void noop__flush_region(void *start, int size)
{
}

37 38 39 40 41 42 43 44 45
static inline void cacheop_on_each_cpu(void (*func) (void *info), void *info,
                                   int wait)
{
	preempt_disable();
	smp_call_function(func, info, wait);
	func(info);
	preempt_enable();
}

46 47 48
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
		       unsigned long vaddr, void *dst, const void *src,
		       unsigned long len)
L
Linus Torvalds 已提交
49
{
50 51
	if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
	    !test_bit(PG_dcache_dirty, &page->flags)) {
52 53
		void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
		memcpy(vto, src, len);
54
		kunmap_coherent(vto);
55 56
	} else {
		memcpy(dst, src, len);
57 58
		if (boot_cpu_data.dcache.n_aliases)
			set_bit(PG_dcache_dirty, &page->flags);
59
	}
60 61 62 63 64 65 66 67 68

	if (vma->vm_flags & VM_EXEC)
		flush_cache_page(vma, vaddr, page_to_pfn(page));
}

void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
			 unsigned long vaddr, void *dst, const void *src,
			 unsigned long len)
{
69 70
	if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
	    !test_bit(PG_dcache_dirty, &page->flags)) {
71 72
		void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
		memcpy(dst, vfrom, len);
73
		kunmap_coherent(vfrom);
74 75
	} else {
		memcpy(dst, src, len);
76 77
		if (boot_cpu_data.dcache.n_aliases)
			set_bit(PG_dcache_dirty, &page->flags);
78
	}
L
Linus Torvalds 已提交
79
}
80

P
Paul Mundt 已提交
81 82 83 84 85 86 87
void copy_user_highpage(struct page *to, struct page *from,
			unsigned long vaddr, struct vm_area_struct *vma)
{
	void *vfrom, *vto;

	vto = kmap_atomic(to, KM_USER1);

88 89
	if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
	    !test_bit(PG_dcache_dirty, &from->flags)) {
90 91
		vfrom = kmap_coherent(from, vaddr);
		copy_page(vto, vfrom);
92
		kunmap_coherent(vfrom);
93 94 95 96 97 98 99
	} else {
		vfrom = kmap_atomic(from, KM_USER0);
		copy_page(vto, vfrom);
		kunmap_atomic(vfrom, KM_USER0);
	}

	if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
100
		__flush_purge_region(vto, PAGE_SIZE);
P
Paul Mundt 已提交
101 102 103 104 105 106

	kunmap_atomic(vto, KM_USER1);
	/* Make sure this page is cleared on other CPU's too before using it */
	smp_wmb();
}
EXPORT_SYMBOL(copy_user_highpage);
107 108 109 110 111 112 113 114

void clear_user_highpage(struct page *page, unsigned long vaddr)
{
	void *kaddr = kmap_atomic(page, KM_USER0);

	clear_page(kaddr);

	if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
115
		__flush_purge_region(kaddr, PAGE_SIZE);
116 117 118 119

	kunmap_atomic(kaddr, KM_USER0);
}
EXPORT_SYMBOL(clear_user_highpage);
120 121 122 123 124 125 126 127 128 129 130

void __update_cache(struct vm_area_struct *vma,
		    unsigned long address, pte_t pte)
{
	struct page *page;
	unsigned long pfn = pte_pfn(pte);

	if (!boot_cpu_data.dcache.n_aliases)
		return;

	page = pfn_to_page(pfn);
131
	if (pfn_valid(pfn)) {
132 133 134 135 136
		int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
		if (dirty) {
			unsigned long addr = (unsigned long)page_address(page);

			if (pages_do_alias(addr, address & PAGE_MASK))
137
				__flush_purge_region((void *)addr, PAGE_SIZE);
138 139 140
		}
	}
}
P
Paul Mundt 已提交
141 142 143 144 145 146 147 148 149 150 151

void __flush_anon_page(struct page *page, unsigned long vmaddr)
{
	unsigned long addr = (unsigned long) page_address(page);

	if (pages_do_alias(addr, vmaddr)) {
		if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
		    !test_bit(PG_dcache_dirty, &page->flags)) {
			void *kaddr;

			kaddr = kmap_coherent(page, vmaddr);
152 153
			/* XXX.. For now kunmap_coherent() does a purge */
			/* __flush_purge_region((void *)kaddr, PAGE_SIZE); */
154
			kunmap_coherent(kaddr);
P
Paul Mundt 已提交
155
		} else
156
			__flush_purge_region((void *)addr, PAGE_SIZE);
P
Paul Mundt 已提交
157 158
	}
}
159

P
Paul Mundt 已提交
160 161
void flush_cache_all(void)
{
162
	cacheop_on_each_cpu(local_flush_cache_all, NULL, 1);
P
Paul Mundt 已提交
163 164 165 166
}

void flush_cache_mm(struct mm_struct *mm)
{
167
	cacheop_on_each_cpu(local_flush_cache_mm, mm, 1);
P
Paul Mundt 已提交
168 169 170 171
}

void flush_cache_dup_mm(struct mm_struct *mm)
{
172
	cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1);
P
Paul Mundt 已提交
173 174 175 176 177 178 179 180 181 182 183
}

void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
		      unsigned long pfn)
{
	struct flusher_data data;

	data.vma = vma;
	data.addr1 = addr;
	data.addr2 = pfn;

184
	cacheop_on_each_cpu(local_flush_cache_page, (void *)&data, 1);
P
Paul Mundt 已提交
185 186 187 188 189 190 191 192 193 194 195
}

void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
		       unsigned long end)
{
	struct flusher_data data;

	data.vma = vma;
	data.addr1 = start;
	data.addr2 = end;

196
	cacheop_on_each_cpu(local_flush_cache_range, (void *)&data, 1);
P
Paul Mundt 已提交
197 198 199 200
}

void flush_dcache_page(struct page *page)
{
201
	cacheop_on_each_cpu(local_flush_dcache_page, page, 1);
P
Paul Mundt 已提交
202 203 204 205 206 207 208 209 210 211
}

void flush_icache_range(unsigned long start, unsigned long end)
{
	struct flusher_data data;

	data.vma = NULL;
	data.addr1 = start;
	data.addr2 = end;

212
	cacheop_on_each_cpu(local_flush_icache_range, (void *)&data, 1);
P
Paul Mundt 已提交
213 214 215 216 217
}

void flush_icache_page(struct vm_area_struct *vma, struct page *page)
{
	/* Nothing uses the VMA, so just pass the struct page along */
218
	cacheop_on_each_cpu(local_flush_icache_page, page, 1);
P
Paul Mundt 已提交
219 220 221 222
}

void flush_cache_sigtramp(unsigned long address)
{
223
	cacheop_on_each_cpu(local_flush_cache_sigtramp, (void *)address, 1);
P
Paul Mundt 已提交
224 225
}

226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
static void compute_alias(struct cache_info *c)
{
	c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
	c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0;
}

static void __init emit_cache_params(void)
{
	printk(KERN_NOTICE "I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
		boot_cpu_data.icache.ways,
		boot_cpu_data.icache.sets,
		boot_cpu_data.icache.way_incr);
	printk(KERN_NOTICE "I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
		boot_cpu_data.icache.entry_mask,
		boot_cpu_data.icache.alias_mask,
		boot_cpu_data.icache.n_aliases);
	printk(KERN_NOTICE "D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
		boot_cpu_data.dcache.ways,
		boot_cpu_data.dcache.sets,
		boot_cpu_data.dcache.way_incr);
	printk(KERN_NOTICE "D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
		boot_cpu_data.dcache.entry_mask,
		boot_cpu_data.dcache.alias_mask,
		boot_cpu_data.dcache.n_aliases);

	/*
	 * Emit Secondary Cache parameters if the CPU has a probed L2.
	 */
	if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) {
		printk(KERN_NOTICE "S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
			boot_cpu_data.scache.ways,
			boot_cpu_data.scache.sets,
			boot_cpu_data.scache.way_incr);
		printk(KERN_NOTICE "S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
			boot_cpu_data.scache.entry_mask,
			boot_cpu_data.scache.alias_mask,
			boot_cpu_data.scache.n_aliases);
	}
}

266 267
void __init cpu_cache_init(void)
{
M
Magnus Damm 已提交
268 269
	unsigned int cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE);

270 271 272 273
	compute_alias(&boot_cpu_data.icache);
	compute_alias(&boot_cpu_data.dcache);
	compute_alias(&boot_cpu_data.scache);

274 275 276 277
	__flush_wback_region		= noop__flush_region;
	__flush_purge_region		= noop__flush_region;
	__flush_invalidate_region	= noop__flush_region;

M
Magnus Damm 已提交
278 279 280 281 282 283 284
	/*
	 * No flushing is necessary in the disabled cache case so we can
	 * just keep the noop functions in local_flush_..() and __flush_..()
	 */
	if (unlikely(cache_disabled))
		goto skip;

285 286 287 288 289 290
	if (boot_cpu_data.family == CPU_FAMILY_SH2) {
		extern void __weak sh2_cache_init(void);

		sh2_cache_init();
	}

291 292 293 294 295 296
	if (boot_cpu_data.family == CPU_FAMILY_SH2A) {
		extern void __weak sh2a_cache_init(void);

		sh2a_cache_init();
	}

297 298 299 300
	if (boot_cpu_data.family == CPU_FAMILY_SH3) {
		extern void __weak sh3_cache_init(void);

		sh3_cache_init();
301 302 303 304 305 306 307

		if ((boot_cpu_data.type == CPU_SH7705) &&
		    (boot_cpu_data.dcache.sets == 512)) {
			extern void __weak sh7705_cache_init(void);

			sh7705_cache_init();
		}
308 309
	}

310 311 312 313 314 315 316
	if ((boot_cpu_data.family == CPU_FAMILY_SH4) ||
	    (boot_cpu_data.family == CPU_FAMILY_SH4A) ||
	    (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) {
		extern void __weak sh4_cache_init(void);

		sh4_cache_init();
	}
317

P
Paul Mundt 已提交
318 319 320 321 322 323
	if (boot_cpu_data.family == CPU_FAMILY_SH5) {
		extern void __weak sh5_cache_init(void);

		sh5_cache_init();
	}

M
Magnus Damm 已提交
324
skip:
325
	emit_cache_params();
326
}