slb.c 13.6 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4
/*
 * PowerPC64 SLB support.
 *
 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM
5
 * Based on earlier code written by:
L
Linus Torvalds 已提交
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com
 *    Copyright (c) 2001 Dave Engebretsen
 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
 *
 *
 *      This program is free software; you can redistribute it and/or
 *      modify it under the terms of the GNU General Public License
 *      as published by the Free Software Foundation; either version
 *      2 of the License, or (at your option) any later version.
 */

#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/paca.h>
#include <asm/cputable.h>
22
#include <asm/cacheflush.h>
23 24
#include <asm/smp.h>
#include <linux/compiler.h>
25
#include <linux/context_tracking.h>
26 27
#include <linux/mm_types.h>

28
#include <asm/udbg.h>
29
#include <asm/code-patching.h>
30

31 32 33 34 35
enum slb_index {
	LINEAR_INDEX	= 0, /* Kernel linear map  (0xc000000000000000) */
	VMALLOC_INDEX	= 1, /* Kernel virtual map (0xd000000000000000) */
	KSTACK_INDEX	= 2, /* Kernel stack map */
};
L
Linus Torvalds 已提交
36

37
extern void slb_allocate(unsigned long ea);
L
Linus Torvalds 已提交
38

39 40 41
#define slb_esid_mask(ssize)	\
	(((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T)

P
Paul Mackerras 已提交
42
static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
43
					 enum slb_index index)
L
Linus Torvalds 已提交
44
{
45
	return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | index;
L
Linus Torvalds 已提交
46 47
}

P
Paul Mackerras 已提交
48 49
static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
					 unsigned long flags)
L
Linus Torvalds 已提交
50
{
P
Paul Mackerras 已提交
51 52
	return (get_kernel_vsid(ea, ssize) << slb_vsid_shift(ssize)) | flags |
		((unsigned long) ssize << SLB_VSID_SSIZE_SHIFT);
L
Linus Torvalds 已提交
53 54
}

P
Paul Mackerras 已提交
55
static inline void slb_shadow_update(unsigned long ea, int ssize,
56
				     unsigned long flags,
57
				     enum slb_index index)
L
Linus Torvalds 已提交
58
{
59 60
	struct slb_shadow *p = get_slb_shadow();

61 62
	/*
	 * Clear the ESID first so the entry is not valid while we are
63 64
	 * updating it.  No write barriers are needed here, provided
	 * we only update the current CPU's SLB shadow buffer.
65
	 */
66 67 68
	WRITE_ONCE(p->save_area[index].esid, 0);
	WRITE_ONCE(p->save_area[index].vsid, cpu_to_be64(mk_vsid_data(ea, ssize, flags)));
	WRITE_ONCE(p->save_area[index].esid, cpu_to_be64(mk_esid_data(ea, ssize, index)));
69 70
}

71
static inline void slb_shadow_clear(enum slb_index index)
72
{
73
	WRITE_ONCE(get_slb_shadow()->save_area[index].esid, 0);
L
Linus Torvalds 已提交
74 75
}

P
Paul Mackerras 已提交
76 77
static inline void create_shadowed_slbe(unsigned long ea, int ssize,
					unsigned long flags,
78
					enum slb_index index)
79 80 81 82 83 84
{
	/*
	 * Updating the shadow buffer before writing the SLB ensures
	 * we don't get a stale entry here if we get preempted by PHYP
	 * between these two statements.
	 */
85
	slb_shadow_update(ea, ssize, flags, index);
86 87

	asm volatile("slbmte  %0,%1" :
P
Paul Mackerras 已提交
88
		     : "r" (mk_vsid_data(ea, ssize, flags)),
89
		       "r" (mk_esid_data(ea, ssize, index))
90 91 92
		     : "memory" );
}

93
static void __slb_flush_and_rebolt(void)
L
Linus Torvalds 已提交
94 95
{
	/* If you change this make sure you change SLB_NUM_BOLTED
96
	 * and PR KVM appropriately too. */
97
	unsigned long linear_llp, vmalloc_llp, lflags, vflags;
P
Paul Mackerras 已提交
98
	unsigned long ksp_esid_data, ksp_vsid_data;
L
Linus Torvalds 已提交
99

100
	linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
101
	vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
102
	lflags = SLB_VSID_KERNEL | linear_llp;
103
	vflags = SLB_VSID_KERNEL | vmalloc_llp;
L
Linus Torvalds 已提交
104

105
	ksp_esid_data = mk_esid_data(get_paca()->kstack, mmu_kernel_ssize, KSTACK_INDEX);
P
Paul Mackerras 已提交
106
	if ((ksp_esid_data & ~0xfffffffUL) <= PAGE_OFFSET) {
L
Linus Torvalds 已提交
107
		ksp_esid_data &= ~SLB_ESID_V;
P
Paul Mackerras 已提交
108
		ksp_vsid_data = 0;
109
		slb_shadow_clear(KSTACK_INDEX);
110 111
	} else {
		/* Update stack entry; others don't change */
112
		slb_shadow_update(get_paca()->kstack, mmu_kernel_ssize, lflags, KSTACK_INDEX);
113
		ksp_vsid_data =
114
			be64_to_cpu(get_slb_shadow()->save_area[KSTACK_INDEX].vsid);
115
	}
116

L
Linus Torvalds 已提交
117 118 119 120 121 122 123 124 125
	/* We need to do this all in asm, so we're sure we don't touch
	 * the stack between the slbia and rebolting it. */
	asm volatile("isync\n"
		     "slbia\n"
		     /* Slot 1 - first VMALLOC segment */
		     "slbmte	%0,%1\n"
		     /* Slot 2 - kernel stack */
		     "slbmte	%2,%3\n"
		     "isync"
P
Paul Mackerras 已提交
126
		     :: "r"(mk_vsid_data(VMALLOC_START, mmu_kernel_ssize, vflags)),
127
		        "r"(mk_esid_data(VMALLOC_START, mmu_kernel_ssize, VMALLOC_INDEX)),
P
Paul Mackerras 已提交
128
		        "r"(ksp_vsid_data),
L
Linus Torvalds 已提交
129 130 131 132
		        "r"(ksp_esid_data)
		     : "memory");
}

133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
void slb_flush_and_rebolt(void)
{

	WARN_ON(!irqs_disabled());

	/*
	 * We can't take a PMU exception in the following code, so hard
	 * disable interrupts.
	 */
	hard_irq_disable();

	__slb_flush_and_rebolt();
	get_paca()->slb_cache_ptr = 0;
}

148 149 150 151 152
void slb_vmalloc_update(void)
{
	unsigned long vflags;

	vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp;
153
	slb_shadow_update(VMALLOC_START, mmu_kernel_ssize, vflags, VMALLOC_INDEX);
154 155 156
	slb_flush_and_rebolt();
}

157 158 159 160 161 162 163 164 165 166 167
/* Helper function to compare esids.  There are four cases to handle.
 * 1. The system is not 1T segment size capable.  Use the GET_ESID compare.
 * 2. The system is 1T capable, both addresses are < 1T, use the GET_ESID compare.
 * 3. The system is 1T capable, only one of the two addresses is > 1T.  This is not a match.
 * 4. The system is 1T capable, both addresses are > 1T, use the GET_ESID_1T macro to compare.
 */
static inline int esids_match(unsigned long addr1, unsigned long addr2)
{
	int esid_1t_count;

	/* System is not 1T segment size capable. */
168
	if (!mmu_has_feature(MMU_FTR_1T_SEGMENT))
169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185
		return (GET_ESID(addr1) == GET_ESID(addr2));

	esid_1t_count = (((addr1 >> SID_SHIFT_1T) != 0) +
				((addr2 >> SID_SHIFT_1T) != 0));

	/* both addresses are < 1T */
	if (esid_1t_count == 0)
		return (GET_ESID(addr1) == GET_ESID(addr2));

	/* One address < 1T, the other > 1T.  Not a match */
	if (esid_1t_count == 1)
		return 0;

	/* Both addresses are > 1T. */
	return (GET_ESID_1T(addr1) == GET_ESID_1T(addr2));
}

L
Linus Torvalds 已提交
186 187 188
/* Flush all user entries from the segment table of the current processor. */
void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
{
189
	unsigned long offset;
P
Paul Mackerras 已提交
190
	unsigned long slbie_data = 0;
L
Linus Torvalds 已提交
191 192
	unsigned long pc = KSTK_EIP(tsk);
	unsigned long stack = KSTK_ESP(tsk);
193
	unsigned long exec_base;
L
Linus Torvalds 已提交
194

195 196 197 198 199 200 201 202
	/*
	 * We need interrupts hard-disabled here, not just soft-disabled,
	 * so that a PMU interrupt can't occur, which might try to access
	 * user memory (to get a stack trace) and possible cause an SLB miss
	 * which would update the slb_cache/slb_cache_ptr fields in the PACA.
	 */
	hard_irq_disable();
	offset = get_paca()->slb_cache_ptr;
203
	if (!mmu_has_feature(MMU_FTR_NO_SLBIE_B) &&
204
	    offset <= SLB_CACHE_ENTRIES) {
L
Linus Torvalds 已提交
205 206 207
		int i;
		asm volatile("isync" : : : "memory");
		for (i = 0; i < offset; i++) {
P
Paul Mackerras 已提交
208 209 210 211 212 213
			slbie_data = (unsigned long)get_paca()->slb_cache[i]
				<< SID_SHIFT; /* EA */
			slbie_data |= user_segment_size(slbie_data)
				<< SLBIE_SSIZE_SHIFT;
			slbie_data |= SLBIE_C; /* C set for user addresses */
			asm volatile("slbie %0" : : "r" (slbie_data));
L
Linus Torvalds 已提交
214 215 216
		}
		asm volatile("isync" : : : "memory");
	} else {
217
		__slb_flush_and_rebolt();
L
Linus Torvalds 已提交
218 219 220 221
	}

	/* Workaround POWER5 < DD2.1 issue */
	if (offset == 1 || offset > SLB_CACHE_ENTRIES)
P
Paul Mackerras 已提交
222
		asm volatile("slbie %0" : : "r" (slbie_data));
L
Linus Torvalds 已提交
223 224

	get_paca()->slb_cache_ptr = 0;
225
	copy_mm_to_paca(mm);
L
Linus Torvalds 已提交
226 227 228

	/*
	 * preload some userspace segments into the SLB.
229 230
	 * Almost all 32 and 64bit PowerPC executables are linked at
	 * 0x10000000 so it makes sense to preload this segment.
L
Linus Torvalds 已提交
231
	 */
232
	exec_base = 0x10000000;
L
Linus Torvalds 已提交
233

234
	if (is_kernel_addr(pc) || is_kernel_addr(stack) ||
235
	    is_kernel_addr(exec_base))
L
Linus Torvalds 已提交
236 237
		return;

238
	slb_allocate(pc);
L
Linus Torvalds 已提交
239

240 241
	if (!esids_match(pc, stack))
		slb_allocate(stack);
L
Linus Torvalds 已提交
242

243 244 245
	if (!esids_match(pc, exec_base) &&
	    !esids_match(stack, exec_base))
		slb_allocate(exec_base);
L
Linus Torvalds 已提交
246 247
}

248 249 250
static inline void patch_slb_encoding(unsigned int *insn_addr,
				      unsigned int immed)
{
251 252 253 254 255 256 257 258 259 260 261 262 263 264 265

	/*
	 * This function patches either an li or a cmpldi instruction with
	 * a new immediate value. This relies on the fact that both li
	 * (which is actually addi) and cmpldi both take a 16-bit immediate
	 * value, and it is situated in the same location in the instruction,
	 * ie. bits 16-31 (Big endian bit order) or the lower 16 bits.
	 * The signedness of the immediate operand differs between the two
	 * instructions however this code is only ever patching a small value,
	 * much less than 1 << 15, so we can get away with it.
	 * To patch the value we read the existing instruction, clear the
	 * immediate value, and or in our new value, then write the instruction
	 * back.
	 */
	unsigned int insn = (*insn_addr & 0xffff0000) | immed;
266
	patch_instruction(insn_addr, insn);
267 268
}

269 270 271 272 273
extern u32 slb_miss_kernel_load_linear[];
extern u32 slb_miss_kernel_load_io[];
extern u32 slb_compare_rr_to_size[];
extern u32 slb_miss_kernel_load_vmemmap[];

274 275 276 277 278 279 280 281 282
void slb_set_size(u16 size)
{
	if (mmu_slb_size == size)
		return;

	mmu_slb_size = size;
	patch_slb_encoding(slb_compare_rr_to_size, mmu_slb_size);
}

L
Linus Torvalds 已提交
283 284
void slb_initialize(void)
{
285
	unsigned long linear_llp, vmalloc_llp, io_llp;
286
	unsigned long lflags, vflags;
287
	static int slb_encoding_inited;
288 289 290
#ifdef CONFIG_SPARSEMEM_VMEMMAP
	unsigned long vmemmap_llp;
#endif
291 292 293

	/* Prepare our SLB miss handler based on our page size */
	linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
294 295 296
	io_llp = mmu_psize_defs[mmu_io_psize].sllp;
	vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
	get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp;
297 298 299
#ifdef CONFIG_SPARSEMEM_VMEMMAP
	vmemmap_llp = mmu_psize_defs[mmu_vmemmap_psize].sllp;
#endif
300 301 302 303
	if (!slb_encoding_inited) {
		slb_encoding_inited = 1;
		patch_slb_encoding(slb_miss_kernel_load_linear,
				   SLB_VSID_KERNEL | linear_llp);
304 305
		patch_slb_encoding(slb_miss_kernel_load_io,
				   SLB_VSID_KERNEL | io_llp);
306 307
		patch_slb_encoding(slb_compare_rr_to_size,
				   mmu_slb_size);
308

309 310
		pr_devel("SLB: linear  LLP = %04lx\n", linear_llp);
		pr_devel("SLB: io      LLP = %04lx\n", io_llp);
311 312 313 314

#ifdef CONFIG_SPARSEMEM_VMEMMAP
		patch_slb_encoding(slb_miss_kernel_load_vmemmap,
				   SLB_VSID_KERNEL | vmemmap_llp);
315
		pr_devel("SLB: vmemmap LLP = %04lx\n", vmemmap_llp);
316
#endif
317 318
	}

319 320
	get_paca()->stab_rr = SLB_NUM_BOLTED;

321
	lflags = SLB_VSID_KERNEL | linear_llp;
322
	vflags = SLB_VSID_KERNEL | vmalloc_llp;
L
Linus Torvalds 已提交
323

324
	/* Invalidate the entire SLB (even entry 0) & all the ERATS */
325 326 327
	asm volatile("isync":::"memory");
	asm volatile("slbmte  %0,%0"::"r" (0) : "memory");
	asm volatile("isync; slbia; isync":::"memory");
328 329
	create_shadowed_slbe(PAGE_OFFSET, mmu_kernel_ssize, lflags, LINEAR_INDEX);
	create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, VMALLOC_INDEX);
330

331 332 333 334 335
	/* For the boot cpu, we're running on the stack in init_thread_union,
	 * which is in the first segment of the linear mapping, and also
	 * get_paca()->kstack hasn't been initialized yet.
	 * For secondary cpus, we need to bolt the kernel stack entry now.
	 */
336
	slb_shadow_clear(KSTACK_INDEX);
337 338 339
	if (raw_smp_processor_id() != boot_cpuid &&
	    (get_paca()->kstack & slb_esid_mask(mmu_kernel_ssize)) > PAGE_OFFSET)
		create_shadowed_slbe(get_paca()->kstack,
340
				     mmu_kernel_ssize, lflags, KSTACK_INDEX);
341

342
	asm volatile("isync":::"memory");
L
Linus Torvalds 已提交
343
}
344 345 346 347 348 349 350 351 352 353 354

static void insert_slb_entry(unsigned long vsid, unsigned long ea,
			     int bpsize, int ssize)
{
	unsigned long flags, vsid_data, esid_data;
	enum slb_index index;
	int slb_cache_index;

	/*
	 * We are irq disabled, hence should be safe to access PACA.
	 */
355 356 357 358 359 360 361 362
	VM_WARN_ON(!irqs_disabled());

	/*
	 * We can't take a PMU exception in the following code, so hard
	 * disable interrupts.
	 */
	hard_irq_disable();

363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379
	index = get_paca()->stab_rr;

	/*
	 * simple round-robin replacement of slb starting at SLB_NUM_BOLTED.
	 */
	if (index < (mmu_slb_size - 1))
		index++;
	else
		index = SLB_NUM_BOLTED;

	get_paca()->stab_rr = index;

	flags = SLB_VSID_USER | mmu_psize_defs[bpsize].sllp;
	vsid_data = (vsid << slb_vsid_shift(ssize)) | flags |
		    ((unsigned long) ssize << SLB_VSID_SSIZE_SHIFT);
	esid_data = mk_esid_data(ea, ssize, index);

380 381 382 383 384
	/*
	 * No need for an isync before or after this slbmte. The exception
	 * we enter with and the rfid we exit with are context synchronizing.
	 * Also we only handle user segments here.
	 */
385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463
	asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data)
		     : "memory");

	/*
	 * Now update slb cache entries
	 */
	slb_cache_index = get_paca()->slb_cache_ptr;
	if (slb_cache_index < SLB_CACHE_ENTRIES) {
		/*
		 * We have space in slb cache for optimized switch_slb().
		 * Top 36 bits from esid_data as per ISA
		 */
		get_paca()->slb_cache[slb_cache_index++] = esid_data >> 28;
		get_paca()->slb_cache_ptr++;
	} else {
		/*
		 * Our cache is full and the current cache content strictly
		 * doesn't indicate the active SLB conents. Bump the ptr
		 * so that switch_slb() will ignore the cache.
		 */
		get_paca()->slb_cache_ptr = SLB_CACHE_ENTRIES + 1;
	}
}

static void handle_multi_context_slb_miss(int context_id, unsigned long ea)
{
	struct mm_struct *mm = current->mm;
	unsigned long vsid;
	int bpsize;

	/*
	 * We are always above 1TB, hence use high user segment size.
	 */
	vsid = get_vsid(context_id, ea, mmu_highuser_ssize);
	bpsize = get_slice_psize(mm, ea);
	insert_slb_entry(vsid, ea, bpsize, mmu_highuser_ssize);
}

void slb_miss_large_addr(struct pt_regs *regs)
{
	enum ctx_state prev_state = exception_enter();
	unsigned long ea = regs->dar;
	int context;

	if (REGION_ID(ea) != USER_REGION_ID)
		goto slb_bad_addr;

	/*
	 * Are we beyound what the page table layout supports ?
	 */
	if ((ea & ~REGION_MASK) >= H_PGTABLE_RANGE)
		goto slb_bad_addr;

	/* Lower address should have been handled by asm code */
	if (ea < (1UL << MAX_EA_BITS_PER_CONTEXT))
		goto slb_bad_addr;

	/*
	 * consider this as bad access if we take a SLB miss
	 * on an address above addr limit.
	 */
	if (ea >= current->mm->context.slb_addr_limit)
		goto slb_bad_addr;

	context = get_ea_context(&current->mm->context, ea);
	if (!context)
		goto slb_bad_addr;

	handle_multi_context_slb_miss(context, ea);
	exception_exit(prev_state);
	return;

slb_bad_addr:
	if (user_mode(regs))
		_exception(SIGSEGV, regs, SEGV_BNDERR, ea);
	else
		bad_page_fault(regs, ea, SIGSEGV);
	exception_exit(prev_state);
}