cplbmgr.c 8.9 KB
Newer Older
1
/*
2
 * Blackfin CPLB exception handling for when MPU in on
3
 *
4
 * Copyright 2008-2009 Analog Devices Inc.
5
 *
6
 * Licensed under the GPL-2 or later.
7
 */
8

9 10 11 12
#include <linux/module.h>
#include <linux/mm.h>

#include <asm/blackfin.h>
13
#include <asm/cacheflush.h>
14
#include <asm/cplb.h>
15 16 17
#include <asm/cplbinit.h>
#include <asm/mmu_context.h>

18 19 20 21 22 23 24
/*
 * WARNING
 *
 * This file is compiled with certain -ffixed-reg options.  We have to
 * make sure not to call any functions here that could clobber these
 * registers.
 */
25 26 27

int page_mask_nelts;
int page_mask_order;
28
unsigned long *current_rwx_mask[NR_CPUS];
29

30 31 32
int nr_dcplb_miss[NR_CPUS], nr_icplb_miss[NR_CPUS];
int nr_icplb_supv_miss[NR_CPUS], nr_dcplb_prot[NR_CPUS];
int nr_cplb_flush[NR_CPUS];
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56

/*
 * Given the contents of the status register, return the index of the
 * CPLB that caused the fault.
 */
static inline int faulting_cplb_index(int status)
{
	int signbits = __builtin_bfin_norm_fr1x32(status & 0xFFFF);
	return 30 - signbits;
}

/*
 * Given the contents of the status register and the DCPLB_DATA contents,
 * return true if a write access should be permitted.
 */
static inline int write_permitted(int status, unsigned long data)
{
	if (status & FAULT_USERSUPV)
		return !!(data & CPLB_SUPV_WR);
	else
		return !!(data & CPLB_USER_WR);
}

/* Counters to implement round-robin replacement.  */
57
static int icplb_rr_index[NR_CPUS], dcplb_rr_index[NR_CPUS];
58 59 60 61

/*
 * Find an ICPLB entry to be evicted and return its index.
 */
62
static int evict_one_icplb(unsigned int cpu)
63 64 65
{
	int i;
	for (i = first_switched_icplb; i < MAX_CPLBS; i++)
66
		if ((icplb_tbl[cpu][i].data & CPLB_VALID) == 0)
67
			return i;
68
	i = first_switched_icplb + icplb_rr_index[cpu];
69 70
	if (i >= MAX_CPLBS) {
		i -= MAX_CPLBS - first_switched_icplb;
71
		icplb_rr_index[cpu] -= MAX_CPLBS - first_switched_icplb;
72
	}
73
	icplb_rr_index[cpu]++;
74 75 76
	return i;
}

77
static int evict_one_dcplb(unsigned int cpu)
78 79 80
{
	int i;
	for (i = first_switched_dcplb; i < MAX_CPLBS; i++)
81
		if ((dcplb_tbl[cpu][i].data & CPLB_VALID) == 0)
82
			return i;
83
	i = first_switched_dcplb + dcplb_rr_index[cpu];
84 85
	if (i >= MAX_CPLBS) {
		i -= MAX_CPLBS - first_switched_dcplb;
86
		dcplb_rr_index[cpu] -= MAX_CPLBS - first_switched_dcplb;
87
	}
88
	dcplb_rr_index[cpu]++;
89 90 91
	return i;
}

92
static noinline int dcplb_miss(unsigned int cpu)
93 94 95 96 97 98 99
{
	unsigned long addr = bfin_read_DCPLB_FAULT_ADDR();
	int status = bfin_read_DCPLB_STATUS();
	unsigned long *mask;
	int idx;
	unsigned long d_data;

100
	nr_dcplb_miss[cpu]++;
101 102

	d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
103
#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
104
	if (bfin_addr_dcacheable(addr)) {
105
		d_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
106
# ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
107
		d_data |= CPLB_L1_AOW | CPLB_WT;
108
# endif
109
	}
110
#endif
111 112 113 114 115

	if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
		addr = L2_START;
		d_data = L2_DMEMORY;
	} else if (addr >= physical_mem_end) {
116
		if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
117 118 119 120 121 122 123 124 125
			mask = current_rwx_mask[cpu];
			if (mask) {
				int page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> PAGE_SHIFT;
				int idx = page >> 5;
				int bit = 1 << (page & 31);

				if (mask[idx] & bit)
					d_data |= CPLB_USER_RD;
			}
126 127 128 129
		} else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
		    && (status & (FAULT_RW | FAULT_USERSUPV)) == FAULT_USERSUPV) {
			addr &= ~(1 * 1024 * 1024 - 1);
			d_data &= ~PAGE_SIZE_4KB;
130
			d_data |= PAGE_SIZE_1MB;
131 132
		} else
			return CPLB_PROT_VIOL;
133 134
	} else if (addr >= _ramend) {
	    d_data |= CPLB_USER_RD | CPLB_USER_WR;
135
	} else {
136
		mask = current_rwx_mask[cpu];
137 138
		if (mask) {
			int page = addr >> PAGE_SHIFT;
139
			int idx = page >> 5;
140 141
			int bit = 1 << (page & 31);

142
			if (mask[idx] & bit)
143
				d_data |= CPLB_USER_RD;
144

145
			mask += page_mask_nelts;
146
			if (mask[idx] & bit)
147 148 149
				d_data |= CPLB_USER_WR;
		}
	}
150
	idx = evict_one_dcplb(cpu);
151 152

	addr &= PAGE_MASK;
153 154
	dcplb_tbl[cpu][idx].addr = addr;
	dcplb_tbl[cpu][idx].data = d_data;
155

156
	_disable_dcplb();
157 158
	bfin_write32(DCPLB_DATA0 + idx * 4, d_data);
	bfin_write32(DCPLB_ADDR0 + idx * 4, addr);
159
	_enable_dcplb();
160 161 162 163

	return 0;
}

164
static noinline int icplb_miss(unsigned int cpu)
165 166 167 168 169 170
{
	unsigned long addr = bfin_read_ICPLB_FAULT_ADDR();
	int status = bfin_read_ICPLB_STATUS();
	int idx;
	unsigned long i_data;

171
	nr_icplb_miss[cpu]++;
172

173 174
	/* If inside the uncached DMA region, fault.  */
	if (addr >= _ramend - DMA_UNCACHED_REGION && addr < _ramend)
175 176
		return CPLB_PROT_VIOL;

177
	if (status & FAULT_USERSUPV)
178
		nr_icplb_supv_miss[cpu]++;
179

180 181 182 183 184 185
	/*
	 * First, try to find a CPLB that matches this address.  If we
	 * find one, then the fact that we're in the miss handler means
	 * that the instruction crosses a page boundary.
	 */
	for (idx = first_switched_icplb; idx < MAX_CPLBS; idx++) {
186 187
		if (icplb_tbl[cpu][idx].data & CPLB_VALID) {
			unsigned long this_addr = icplb_tbl[cpu][idx].addr;
188 189 190 191 192 193 194 195 196
			if (this_addr <= addr && this_addr + PAGE_SIZE > addr) {
				addr += PAGE_SIZE;
				break;
			}
		}
	}

	i_data = CPLB_VALID | CPLB_PORTPRIO | PAGE_SIZE_4KB;

197
#ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
198
	/*
199 200
	 * Normal RAM, and possibly the reserved memory area, are
	 * cacheable.
201
	 */
202 203 204 205
	if (addr < _ramend ||
	    (addr < physical_mem_end && reserved_mem_icache_on))
		i_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
#endif
206

207 208 209 210
	if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
		addr = L2_START;
		i_data = L2_IMEMORY;
	} else if (addr >= physical_mem_end) {
211
		if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
212 213 214 215 216 217 218 219 220 221 222 223 224
			if (!(status & FAULT_USERSUPV)) {
				unsigned long *mask = current_rwx_mask[cpu];

				if (mask) {
					int page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> PAGE_SHIFT;
					int idx = page >> 5;
					int bit = 1 << (page & 31);

					mask += 2 * page_mask_nelts;
					if (mask[idx] & bit)
						i_data |= CPLB_USER_RD;
				}
			}
225
		} else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
226 227 228 229 230 231
		    && (status & FAULT_USERSUPV)) {
			addr &= ~(1 * 1024 * 1024 - 1);
			i_data &= ~PAGE_SIZE_4KB;
			i_data |= PAGE_SIZE_1MB;
		} else
		    return CPLB_PROT_VIOL;
232 233 234 235 236 237 238 239 240 241
	} else if (addr >= _ramend) {
		i_data |= CPLB_USER_RD;
	} else {
		/*
		 * Two cases to distinguish - a supervisor access must
		 * necessarily be for a module page; we grant it
		 * unconditionally (could do better here in the future).
		 * Otherwise, check the x bitmap of the current process.
		 */
		if (!(status & FAULT_USERSUPV)) {
242
			unsigned long *mask = current_rwx_mask[cpu];
243 244 245

			if (mask) {
				int page = addr >> PAGE_SHIFT;
246
				int idx = page >> 5;
247 248 249
				int bit = 1 << (page & 31);

				mask += 2 * page_mask_nelts;
250
				if (mask[idx] & bit)
251 252
					i_data |= CPLB_USER_RD;
			}
253 254
		}
	}
255
	idx = evict_one_icplb(cpu);
256
	addr &= PAGE_MASK;
257 258
	icplb_tbl[cpu][idx].addr = addr;
	icplb_tbl[cpu][idx].data = i_data;
259

260
	_disable_icplb();
261 262
	bfin_write32(ICPLB_DATA0 + idx * 4, i_data);
	bfin_write32(ICPLB_ADDR0 + idx * 4, addr);
263
	_enable_icplb();
264 265 266 267

	return 0;
}

268
static noinline int dcplb_protection_fault(unsigned int cpu)
269 270 271
{
	int status = bfin_read_DCPLB_STATUS();

272
	nr_dcplb_prot[cpu]++;
273 274 275

	if (status & FAULT_RW) {
		int idx = faulting_cplb_index(status);
276
		unsigned long data = dcplb_tbl[cpu][idx].data;
277 278 279
		if (!(data & CPLB_WT) && !(data & CPLB_DIRTY) &&
		    write_permitted(status, data)) {
			data |= CPLB_DIRTY;
280
			dcplb_tbl[cpu][idx].data = data;
281 282 283 284 285 286 287 288 289 290
			bfin_write32(DCPLB_DATA0 + idx * 4, data);
			return 0;
		}
	}
	return CPLB_PROT_VIOL;
}

int cplb_hdr(int seqstat, struct pt_regs *regs)
{
	int cause = seqstat & 0x3f;
291
	unsigned int cpu = raw_smp_processor_id();
292 293
	switch (cause) {
	case 0x23:
294
		return dcplb_protection_fault(cpu);
295
	case 0x2C:
296
		return icplb_miss(cpu);
297
	case 0x26:
298
		return dcplb_miss(cpu);
299
	default:
300
		return 1;
301 302 303
	}
}

304
void flush_switched_cplbs(unsigned int cpu)
305 306
{
	int i;
307
	unsigned long flags;
308

309
	nr_cplb_flush[cpu]++;
310

311
	local_irq_save_hw(flags);
312
	_disable_icplb();
313
	for (i = first_switched_icplb; i < MAX_CPLBS; i++) {
314
		icplb_tbl[cpu][i].data = 0;
315 316
		bfin_write32(ICPLB_DATA0 + i * 4, 0);
	}
317
	_enable_icplb();
318

319
	_disable_dcplb();
320
	for (i = first_switched_dcplb; i < MAX_CPLBS; i++) {
321
		dcplb_tbl[cpu][i].data = 0;
322 323
		bfin_write32(DCPLB_DATA0 + i * 4, 0);
	}
324
	_enable_dcplb();
325
	local_irq_restore_hw(flags);
326

327 328
}

329
void set_mask_dcplbs(unsigned long *masks, unsigned int cpu)
330 331 332 333
{
	int i;
	unsigned long addr = (unsigned long)masks;
	unsigned long d_data;
334
	unsigned long flags;
335

336
	if (!masks) {
337
		current_rwx_mask[cpu] = masks;
338
		return;
339 340
	}

341
	local_irq_save_hw(flags);
342
	current_rwx_mask[cpu] = masks;
343

344 345 346 347 348 349 350 351 352 353
	if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
		addr = L2_START;
		d_data = L2_DMEMORY;
	} else {
		d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
		d_data |= CPLB_L1_CHBL;
# ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
		d_data |= CPLB_L1_AOW | CPLB_WT;
# endif
354
#endif
355
	}
356

357
	_disable_dcplb();
358
	for (i = first_mask_dcplb; i < first_switched_dcplb; i++) {
359 360
		dcplb_tbl[cpu][i].addr = addr;
		dcplb_tbl[cpu][i].data = d_data;
361 362 363 364
		bfin_write32(DCPLB_DATA0 + i * 4, d_data);
		bfin_write32(DCPLB_ADDR0 + i * 4, addr);
		addr += PAGE_SIZE;
	}
365
	_enable_dcplb();
366
	local_irq_restore_hw(flags);
367
}