cplbmgr.c 9.2 KB
Newer Older
1
/*
2
 * Blackfin CPLB exception handling for when MPU in on
3
 *
4
 * Copyright 2008-2009 Analog Devices Inc.
5
 *
6
 * Licensed under the GPL-2 or later.
7
 */
8

9 10 11 12
#include <linux/module.h>
#include <linux/mm.h>

#include <asm/blackfin.h>
13
#include <asm/cacheflush.h>
14
#include <asm/cplb.h>
15 16 17
#include <asm/cplbinit.h>
#include <asm/mmu_context.h>

18 19 20 21 22 23 24
/*
 * WARNING
 *
 * This file is compiled with certain -ffixed-reg options.  We have to
 * make sure not to call any functions here that could clobber these
 * registers.
 */
25 26 27

int page_mask_nelts;
int page_mask_order;
28
unsigned long *current_rwx_mask[NR_CPUS];
29

30 31 32
int nr_dcplb_miss[NR_CPUS], nr_icplb_miss[NR_CPUS];
int nr_icplb_supv_miss[NR_CPUS], nr_dcplb_prot[NR_CPUS];
int nr_cplb_flush[NR_CPUS];
33

34 35 36 37 38 39
#ifdef CONFIG_EXCPT_IRQ_SYSC_L1
#define MGR_ATTR __attribute__((l1_text))
#else
#define MGR_ATTR
#endif

40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
/*
 * Given the contents of the status register, return the index of the
 * CPLB that caused the fault.
 */
static inline int faulting_cplb_index(int status)
{
	int signbits = __builtin_bfin_norm_fr1x32(status & 0xFFFF);
	return 30 - signbits;
}

/*
 * Given the contents of the status register and the DCPLB_DATA contents,
 * return true if a write access should be permitted.
 */
static inline int write_permitted(int status, unsigned long data)
{
	if (status & FAULT_USERSUPV)
		return !!(data & CPLB_SUPV_WR);
	else
		return !!(data & CPLB_USER_WR);
}

/* Counters to implement round-robin replacement.  */
63
static int icplb_rr_index[NR_CPUS], dcplb_rr_index[NR_CPUS];
64 65 66 67

/*
 * Find an ICPLB entry to be evicted and return its index.
 */
68
MGR_ATTR static int evict_one_icplb(unsigned int cpu)
69 70 71
{
	int i;
	for (i = first_switched_icplb; i < MAX_CPLBS; i++)
72
		if ((icplb_tbl[cpu][i].data & CPLB_VALID) == 0)
73
			return i;
74
	i = first_switched_icplb + icplb_rr_index[cpu];
75 76
	if (i >= MAX_CPLBS) {
		i -= MAX_CPLBS - first_switched_icplb;
77
		icplb_rr_index[cpu] -= MAX_CPLBS - first_switched_icplb;
78
	}
79
	icplb_rr_index[cpu]++;
80 81 82
	return i;
}

83
MGR_ATTR static int evict_one_dcplb(unsigned int cpu)
84 85 86
{
	int i;
	for (i = first_switched_dcplb; i < MAX_CPLBS; i++)
87
		if ((dcplb_tbl[cpu][i].data & CPLB_VALID) == 0)
88
			return i;
89
	i = first_switched_dcplb + dcplb_rr_index[cpu];
90 91
	if (i >= MAX_CPLBS) {
		i -= MAX_CPLBS - first_switched_dcplb;
92
		dcplb_rr_index[cpu] -= MAX_CPLBS - first_switched_dcplb;
93
	}
94
	dcplb_rr_index[cpu]++;
95 96 97
	return i;
}

98
MGR_ATTR static noinline int dcplb_miss(unsigned int cpu)
99 100 101 102 103 104 105
{
	unsigned long addr = bfin_read_DCPLB_FAULT_ADDR();
	int status = bfin_read_DCPLB_STATUS();
	unsigned long *mask;
	int idx;
	unsigned long d_data;

106
	nr_dcplb_miss[cpu]++;
107 108

	d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
109
#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
110
	if (bfin_addr_dcacheable(addr)) {
111
		d_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
112
# ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
113
		d_data |= CPLB_L1_AOW | CPLB_WT;
114
# endif
115
	}
116
#endif
117 118 119 120 121

	if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
		addr = L2_START;
		d_data = L2_DMEMORY;
	} else if (addr >= physical_mem_end) {
122
		if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
123 124 125 126 127 128 129 130 131
			mask = current_rwx_mask[cpu];
			if (mask) {
				int page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> PAGE_SHIFT;
				int idx = page >> 5;
				int bit = 1 << (page & 31);

				if (mask[idx] & bit)
					d_data |= CPLB_USER_RD;
			}
132 133 134 135
		} else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
		    && (status & (FAULT_RW | FAULT_USERSUPV)) == FAULT_USERSUPV) {
			addr &= ~(1 * 1024 * 1024 - 1);
			d_data &= ~PAGE_SIZE_4KB;
136
			d_data |= PAGE_SIZE_1MB;
137 138
		} else
			return CPLB_PROT_VIOL;
139
	} else if (addr >= _ramend) {
140 141 142
		d_data |= CPLB_USER_RD | CPLB_USER_WR;
		if (reserved_mem_dcache_on)
			d_data |= CPLB_L1_CHBL;
143
	} else {
144
		mask = current_rwx_mask[cpu];
145 146
		if (mask) {
			int page = addr >> PAGE_SHIFT;
147
			int idx = page >> 5;
148 149
			int bit = 1 << (page & 31);

150
			if (mask[idx] & bit)
151
				d_data |= CPLB_USER_RD;
152

153
			mask += page_mask_nelts;
154
			if (mask[idx] & bit)
155 156 157
				d_data |= CPLB_USER_WR;
		}
	}
158
	idx = evict_one_dcplb(cpu);
159 160

	addr &= PAGE_MASK;
161 162
	dcplb_tbl[cpu][idx].addr = addr;
	dcplb_tbl[cpu][idx].data = d_data;
163

164
	_disable_dcplb();
165 166
	bfin_write32(DCPLB_DATA0 + idx * 4, d_data);
	bfin_write32(DCPLB_ADDR0 + idx * 4, addr);
167
	_enable_dcplb();
168 169 170 171

	return 0;
}

172
MGR_ATTR static noinline int icplb_miss(unsigned int cpu)
173 174 175 176 177 178
{
	unsigned long addr = bfin_read_ICPLB_FAULT_ADDR();
	int status = bfin_read_ICPLB_STATUS();
	int idx;
	unsigned long i_data;

179
	nr_icplb_miss[cpu]++;
180

181 182
	/* If inside the uncached DMA region, fault.  */
	if (addr >= _ramend - DMA_UNCACHED_REGION && addr < _ramend)
183 184
		return CPLB_PROT_VIOL;

185
	if (status & FAULT_USERSUPV)
186
		nr_icplb_supv_miss[cpu]++;
187

188 189 190 191 192 193
	/*
	 * First, try to find a CPLB that matches this address.  If we
	 * find one, then the fact that we're in the miss handler means
	 * that the instruction crosses a page boundary.
	 */
	for (idx = first_switched_icplb; idx < MAX_CPLBS; idx++) {
194 195
		if (icplb_tbl[cpu][idx].data & CPLB_VALID) {
			unsigned long this_addr = icplb_tbl[cpu][idx].addr;
196 197 198 199 200 201 202 203 204
			if (this_addr <= addr && this_addr + PAGE_SIZE > addr) {
				addr += PAGE_SIZE;
				break;
			}
		}
	}

	i_data = CPLB_VALID | CPLB_PORTPRIO | PAGE_SIZE_4KB;

205
#ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
206
	/*
207 208
	 * Normal RAM, and possibly the reserved memory area, are
	 * cacheable.
209
	 */
210 211 212 213
	if (addr < _ramend ||
	    (addr < physical_mem_end && reserved_mem_icache_on))
		i_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
#endif
214

215 216 217 218
	if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
		addr = L2_START;
		i_data = L2_IMEMORY;
	} else if (addr >= physical_mem_end) {
219
		if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
220 221 222 223 224 225 226 227 228 229 230 231 232
			if (!(status & FAULT_USERSUPV)) {
				unsigned long *mask = current_rwx_mask[cpu];

				if (mask) {
					int page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> PAGE_SHIFT;
					int idx = page >> 5;
					int bit = 1 << (page & 31);

					mask += 2 * page_mask_nelts;
					if (mask[idx] & bit)
						i_data |= CPLB_USER_RD;
				}
			}
233
		} else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
234 235 236 237 238 239
		    && (status & FAULT_USERSUPV)) {
			addr &= ~(1 * 1024 * 1024 - 1);
			i_data &= ~PAGE_SIZE_4KB;
			i_data |= PAGE_SIZE_1MB;
		} else
		    return CPLB_PROT_VIOL;
240 241
	} else if (addr >= _ramend) {
		i_data |= CPLB_USER_RD;
242 243
		if (reserved_mem_icache_on)
			i_data |= CPLB_L1_CHBL;
244 245 246 247 248 249 250 251
	} else {
		/*
		 * Two cases to distinguish - a supervisor access must
		 * necessarily be for a module page; we grant it
		 * unconditionally (could do better here in the future).
		 * Otherwise, check the x bitmap of the current process.
		 */
		if (!(status & FAULT_USERSUPV)) {
252
			unsigned long *mask = current_rwx_mask[cpu];
253 254 255

			if (mask) {
				int page = addr >> PAGE_SHIFT;
256
				int idx = page >> 5;
257 258 259
				int bit = 1 << (page & 31);

				mask += 2 * page_mask_nelts;
260
				if (mask[idx] & bit)
261 262
					i_data |= CPLB_USER_RD;
			}
263 264
		}
	}
265
	idx = evict_one_icplb(cpu);
266
	addr &= PAGE_MASK;
267 268
	icplb_tbl[cpu][idx].addr = addr;
	icplb_tbl[cpu][idx].data = i_data;
269

270
	_disable_icplb();
271 272
	bfin_write32(ICPLB_DATA0 + idx * 4, i_data);
	bfin_write32(ICPLB_ADDR0 + idx * 4, addr);
273
	_enable_icplb();
274 275 276 277

	return 0;
}

278
MGR_ATTR static noinline int dcplb_protection_fault(unsigned int cpu)
279 280 281
{
	int status = bfin_read_DCPLB_STATUS();

282
	nr_dcplb_prot[cpu]++;
283 284 285

	if (status & FAULT_RW) {
		int idx = faulting_cplb_index(status);
286
		unsigned long data = dcplb_tbl[cpu][idx].data;
287 288 289
		if (!(data & CPLB_WT) && !(data & CPLB_DIRTY) &&
		    write_permitted(status, data)) {
			data |= CPLB_DIRTY;
290
			dcplb_tbl[cpu][idx].data = data;
291 292 293 294 295 296 297
			bfin_write32(DCPLB_DATA0 + idx * 4, data);
			return 0;
		}
	}
	return CPLB_PROT_VIOL;
}

298
MGR_ATTR int cplb_hdr(int seqstat, struct pt_regs *regs)
299 300
{
	int cause = seqstat & 0x3f;
301
	unsigned int cpu = raw_smp_processor_id();
302 303
	switch (cause) {
	case 0x23:
304
		return dcplb_protection_fault(cpu);
305
	case 0x2C:
306
		return icplb_miss(cpu);
307
	case 0x26:
308
		return dcplb_miss(cpu);
309
	default:
310
		return 1;
311 312 313
	}
}

314
void flush_switched_cplbs(unsigned int cpu)
315 316
{
	int i;
317
	unsigned long flags;
318

319
	nr_cplb_flush[cpu]++;
320

321
	flags = hard_local_irq_save();
322
	_disable_icplb();
323
	for (i = first_switched_icplb; i < MAX_CPLBS; i++) {
324
		icplb_tbl[cpu][i].data = 0;
325 326
		bfin_write32(ICPLB_DATA0 + i * 4, 0);
	}
327
	_enable_icplb();
328

329
	_disable_dcplb();
330
	for (i = first_switched_dcplb; i < MAX_CPLBS; i++) {
331
		dcplb_tbl[cpu][i].data = 0;
332 333
		bfin_write32(DCPLB_DATA0 + i * 4, 0);
	}
334
	_enable_dcplb();
335
	hard_local_irq_restore(flags);
336

337 338
}

339
void set_mask_dcplbs(unsigned long *masks, unsigned int cpu)
340 341 342 343
{
	int i;
	unsigned long addr = (unsigned long)masks;
	unsigned long d_data;
344
	unsigned long flags;
345

346
	if (!masks) {
347
		current_rwx_mask[cpu] = masks;
348
		return;
349 350
	}

351
	flags = hard_local_irq_save();
352
	current_rwx_mask[cpu] = masks;
353

354 355 356 357 358 359 360 361 362 363
	if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
		addr = L2_START;
		d_data = L2_DMEMORY;
	} else {
		d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
		d_data |= CPLB_L1_CHBL;
# ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
		d_data |= CPLB_L1_AOW | CPLB_WT;
# endif
364
#endif
365
	}
366

367
	_disable_dcplb();
368
	for (i = first_mask_dcplb; i < first_switched_dcplb; i++) {
369 370
		dcplb_tbl[cpu][i].addr = addr;
		dcplb_tbl[cpu][i].data = d_data;
371 372 373 374
		bfin_write32(DCPLB_DATA0 + i * 4, d_data);
		bfin_write32(DCPLB_ADDR0 + i * 4, addr);
		addr += PAGE_SIZE;
	}
375
	_enable_dcplb();
376
	hard_local_irq_restore(flags);
377
}