alternative.c 14.6 KB
Newer Older
G
Gerd Hoffmann 已提交
1
#include <linux/module.h>
A
Al Viro 已提交
2
#include <linux/sched.h>
3
#include <linux/mutex.h>
G
Gerd Hoffmann 已提交
4
#include <linux/list.h>
5
#include <linux/stringify.h>
6 7 8
#include <linux/kprobes.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
9
#include <linux/memory.h>
G
Gerd Hoffmann 已提交
10 11
#include <asm/alternative.h>
#include <asm/sections.h>
12
#include <asm/pgtable.h>
13 14
#include <asm/mce.h>
#include <asm/nmi.h>
D
Dave Jones 已提交
15
#include <asm/vsyscall.h>
16
#include <asm/cacheflush.h>
17
#include <asm/tlbflush.h>
18
#include <asm/io.h>
19
#include <asm/fixmap.h>
G
Gerd Hoffmann 已提交
20

21 22
#define MAX_PATCH_LEN (255-1)

23 24
#ifdef CONFIG_HOTPLUG_CPU
static int smp_alt_once;
G
Gerd Hoffmann 已提交
25

26 27 28 29 30
static int __init bootonly(char *str)
{
	smp_alt_once = 1;
	return 1;
}
31
__setup("smp-alt-boot", bootonly);
32 33 34 35
#else
#define smp_alt_once 1
#endif

36
static int __initdata_or_module debug_alternative;
37

38 39 40 41 42 43 44
static int __init debug_alt(char *str)
{
	debug_alternative = 1;
	return 1;
}
__setup("debug-alternative", debug_alt);

45 46
static int noreplace_smp;

47 48 49 50 51 52 53
static int __init setup_noreplace_smp(char *str)
{
	noreplace_smp = 1;
	return 1;
}
__setup("noreplace-smp", setup_noreplace_smp);

54
#ifdef CONFIG_PARAVIRT
55
static int __initdata_or_module noreplace_paravirt = 0;
56 57 58 59 60 61 62 63

static int __init setup_noreplace_paravirt(char *str)
{
	noreplace_paravirt = 1;
	return 1;
}
__setup("noreplace-paravirt", setup_noreplace_paravirt);
#endif
64

65 66 67
#define DPRINTK(fmt, args...) if (debug_alternative) \
	printk(KERN_DEBUG fmt, args)

68
#if defined(GENERIC_NOP1) && !defined(CONFIG_X86_64)
G
Gerd Hoffmann 已提交
69 70 71
/* Use inline assembly to define this because the nops are defined
   as inline assembly strings in the include files and we cannot
   get them easily into strings. */
72
asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nintelnops: "
G
Gerd Hoffmann 已提交
73
	GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
74 75
	GENERIC_NOP7 GENERIC_NOP8
    "\t.previous");
J
Jan Beulich 已提交
76
extern const unsigned char intelnops[];
77 78
static const unsigned char *const __initconst_or_module
intel_nops[ASM_NOP_MAX+1] = {
G
Gerd Hoffmann 已提交
79 80 81 82 83 84 85 86 87 88
	NULL,
	intelnops,
	intelnops + 1,
	intelnops + 1 + 2,
	intelnops + 1 + 2 + 3,
	intelnops + 1 + 2 + 3 + 4,
	intelnops + 1 + 2 + 3 + 4 + 5,
	intelnops + 1 + 2 + 3 + 4 + 5 + 6,
	intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
};
89 90 91
#endif

#ifdef K8_NOP1
92
asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nk8nops: "
93
	K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
94 95
	K8_NOP7 K8_NOP8
    "\t.previous");
J
Jan Beulich 已提交
96
extern const unsigned char k8nops[];
97 98
static const unsigned char *const __initconst_or_module
k8_nops[ASM_NOP_MAX+1] = {
G
Gerd Hoffmann 已提交
99 100 101 102 103 104 105 106 107 108
	NULL,
	k8nops,
	k8nops + 1,
	k8nops + 1 + 2,
	k8nops + 1 + 2 + 3,
	k8nops + 1 + 2 + 3 + 4,
	k8nops + 1 + 2 + 3 + 4 + 5,
	k8nops + 1 + 2 + 3 + 4 + 5 + 6,
	k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
};
109 110
#endif

111 112
#if defined(K7_NOP1) && !defined(CONFIG_X86_64)
asm("\t" __stringify(__INITRODATA_OR_MODULE) "\nk7nops: "
113
	K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
114 115
	K7_NOP7 K7_NOP8
    "\t.previous");
J
Jan Beulich 已提交
116
extern const unsigned char k7nops[];
117 118
static const unsigned char *const __initconst_or_module
k7_nops[ASM_NOP_MAX+1] = {
G
Gerd Hoffmann 已提交
119 120 121 122 123 124 125 126 127 128
	NULL,
	k7nops,
	k7nops + 1,
	k7nops + 1 + 2,
	k7nops + 1 + 2 + 3,
	k7nops + 1 + 2 + 3 + 4,
	k7nops + 1 + 2 + 3 + 4 + 5,
	k7nops + 1 + 2 + 3 + 4 + 5 + 6,
	k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
};
129 130
#endif

131
#ifdef P6_NOP1
132
asm("\t" __stringify(__INITRODATA_OR_MODULE) "\np6nops: "
133
	P6_NOP1 P6_NOP2 P6_NOP3 P6_NOP4 P6_NOP5 P6_NOP6
134 135
	P6_NOP7 P6_NOP8
    "\t.previous");
136
extern const unsigned char p6nops[];
137 138
static const unsigned char *const __initconst_or_module
p6_nops[ASM_NOP_MAX+1] = {
139 140 141 142 143 144 145 146 147 148 149 150
	NULL,
	p6nops,
	p6nops + 1,
	p6nops + 1 + 2,
	p6nops + 1 + 2 + 3,
	p6nops + 1 + 2 + 3 + 4,
	p6nops + 1 + 2 + 3 + 4 + 5,
	p6nops + 1 + 2 + 3 + 4 + 5 + 6,
	p6nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
};
#endif

151 152 153
#ifdef CONFIG_X86_64

extern char __vsyscall_0;
154
static const unsigned char *const *__init_or_module find_nop_table(void)
155
{
156 157 158 159 160
	if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
	    boot_cpu_has(X86_FEATURE_NOPL))
		return p6_nops;
	else
		return k8_nops;
161 162 163 164
}

#else /* CONFIG_X86_64 */

165
static const unsigned char *const *__init_or_module find_nop_table(void)
G
Gerd Hoffmann 已提交
166
{
167 168 169 170 171 172 173 174
	if (boot_cpu_has(X86_FEATURE_K8))
		return k8_nops;
	else if (boot_cpu_has(X86_FEATURE_K7))
		return k7_nops;
	else if (boot_cpu_has(X86_FEATURE_NOPL))
		return p6_nops;
	else
		return intel_nops;
G
Gerd Hoffmann 已提交
175 176
}

177 178
#endif /* CONFIG_X86_64 */

179
/* Use this to add nops to a buffer, then text_poke the whole buffer. */
180
static void __init_or_module add_nops(void *insns, unsigned int len)
181
{
J
Jan Beulich 已提交
182
	const unsigned char *const *noptable = find_nop_table();
183 184 185 186 187

	while (len > 0) {
		unsigned int noplen = len;
		if (noplen > ASM_NOP_MAX)
			noplen = ASM_NOP_MAX;
188
		memcpy(insns, noptable[noplen], noplen);
189 190 191 192 193
		insns += noplen;
		len -= noplen;
	}
}

194 195
extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
extern u8 *__smp_locks[], *__smp_locks_end[];
196
static void *text_poke_early(void *addr, const void *opcode, size_t len);
197

G
Gerd Hoffmann 已提交
198 199 200 201 202 203
/* Replace instructions with better alternatives for this CPU type.
   This runs before SMP is initialized to avoid SMP problems with
   self modifying code. This implies that assymetric systems where
   APs have less capabilities than the boot processor are not handled.
   Tough. Make sure you disable such features by hand. */

204 205
void __init_or_module apply_alternatives(struct alt_instr *start,
					 struct alt_instr *end)
G
Gerd Hoffmann 已提交
206 207
{
	struct alt_instr *a;
208
	u8 insnbuf[MAX_PATCH_LEN];
G
Gerd Hoffmann 已提交
209

210
	DPRINTK("%s: alt table %p -> %p\n", __func__, start, end);
G
Gerd Hoffmann 已提交
211
	for (a = start; a < end; a++) {
212
		u8 *instr = a->instr;
G
Gerd Hoffmann 已提交
213
		BUG_ON(a->replacementlen > a->instrlen);
214
		BUG_ON(a->instrlen > sizeof(insnbuf));
G
Gerd Hoffmann 已提交
215 216
		if (!boot_cpu_has(a->cpuid))
			continue;
217 218 219 220 221
#ifdef CONFIG_X86_64
		/* vsyscall code is not mapped yet. resolve it manually. */
		if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
			instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
			DPRINTK("%s: vsyscall fixup: %p => %p\n",
222
				__func__, a->instr, instr);
223 224
		}
#endif
225
		memcpy(insnbuf, a->replacement, a->replacementlen);
226 227
		if (*insnbuf == 0xe8 && a->replacementlen == 5)
		    *(s32 *)(insnbuf + 1) += a->replacement - a->instr;
228 229
		add_nops(insnbuf + a->replacementlen,
			 a->instrlen - a->replacementlen);
230
		text_poke_early(instr, insnbuf, a->instrlen);
G
Gerd Hoffmann 已提交
231 232 233
	}
}

234 235
#ifdef CONFIG_SMP

G
Gerd Hoffmann 已提交
236 237 238 239
static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
{
	u8 **ptr;

240
	mutex_lock(&text_mutex);
G
Gerd Hoffmann 已提交
241 242 243 244 245
	for (ptr = start; ptr < end; ptr++) {
		if (*ptr < text)
			continue;
		if (*ptr > text_end)
			continue;
246
		/* turn DS segment override prefix into lock prefix */
247 248
		if (**ptr == 0x3e)
			text_poke(*ptr, ((unsigned char []){0xf0}), 1);
G
Gerd Hoffmann 已提交
249
	};
250
	mutex_unlock(&text_mutex);
G
Gerd Hoffmann 已提交
251 252 253 254 255 256
}

static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
{
	u8 **ptr;

257 258 259
	if (noreplace_smp)
		return;

260
	mutex_lock(&text_mutex);
G
Gerd Hoffmann 已提交
261 262 263 264 265
	for (ptr = start; ptr < end; ptr++) {
		if (*ptr < text)
			continue;
		if (*ptr > text_end)
			continue;
266
		/* turn lock prefix into DS segment override prefix */
267 268
		if (**ptr == 0xf0)
			text_poke(*ptr, ((unsigned char []){0x3E}), 1);
G
Gerd Hoffmann 已提交
269
	};
270
	mutex_unlock(&text_mutex);
G
Gerd Hoffmann 已提交
271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288
}

struct smp_alt_module {
	/* what is this ??? */
	struct module	*mod;
	char		*name;

	/* ptrs to lock prefixes */
	u8		**locks;
	u8		**locks_end;

	/* .text segment, needed to avoid patching init code ;) */
	u8		*text;
	u8		*text_end;

	struct list_head next;
};
static LIST_HEAD(smp_alt_modules);
289
static DEFINE_MUTEX(smp_alt);
290
static int smp_mode = 1;	/* protected by smp_alt */
G
Gerd Hoffmann 已提交
291

292 293 294 295
void __init_or_module alternatives_smp_module_add(struct module *mod,
						  char *name,
						  void *locks, void *locks_end,
						  void *text,  void *text_end)
G
Gerd Hoffmann 已提交
296 297 298
{
	struct smp_alt_module *smp;

299 300 301
	if (noreplace_smp)
		return;

G
Gerd Hoffmann 已提交
302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319
	if (smp_alt_once) {
		if (boot_cpu_has(X86_FEATURE_UP))
			alternatives_smp_unlock(locks, locks_end,
						text, text_end);
		return;
	}

	smp = kzalloc(sizeof(*smp), GFP_KERNEL);
	if (NULL == smp)
		return; /* we'll run the (safe but slow) SMP code then ... */

	smp->mod	= mod;
	smp->name	= name;
	smp->locks	= locks;
	smp->locks_end	= locks_end;
	smp->text	= text;
	smp->text_end	= text_end;
	DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
320
		__func__, smp->locks, smp->locks_end,
G
Gerd Hoffmann 已提交
321 322
		smp->text, smp->text_end, smp->name);

323
	mutex_lock(&smp_alt);
G
Gerd Hoffmann 已提交
324 325 326 327
	list_add_tail(&smp->next, &smp_alt_modules);
	if (boot_cpu_has(X86_FEATURE_UP))
		alternatives_smp_unlock(smp->locks, smp->locks_end,
					smp->text, smp->text_end);
328
	mutex_unlock(&smp_alt);
G
Gerd Hoffmann 已提交
329 330
}

331
void __init_or_module alternatives_smp_module_del(struct module *mod)
G
Gerd Hoffmann 已提交
332 333 334
{
	struct smp_alt_module *item;

335
	if (smp_alt_once || noreplace_smp)
G
Gerd Hoffmann 已提交
336 337
		return;

338
	mutex_lock(&smp_alt);
G
Gerd Hoffmann 已提交
339 340 341 342
	list_for_each_entry(item, &smp_alt_modules, next) {
		if (mod != item->mod)
			continue;
		list_del(&item->next);
343
		mutex_unlock(&smp_alt);
344
		DPRINTK("%s: %s\n", __func__, item->name);
G
Gerd Hoffmann 已提交
345 346 347
		kfree(item);
		return;
	}
348
	mutex_unlock(&smp_alt);
G
Gerd Hoffmann 已提交
349 350 351 352 353 354
}

void alternatives_smp_switch(int smp)
{
	struct smp_alt_module *mod;

355 356
#ifdef CONFIG_LOCKDEP
	/*
357 358 359 360 361
	 * Older binutils section handling bug prevented
	 * alternatives-replacement from working reliably.
	 *
	 * If this still occurs then you should see a hang
	 * or crash shortly after this line:
362
	 */
363
	printk("lockdep: fixing up alternatives.\n");
364 365
#endif

366
	if (noreplace_smp || smp_alt_once)
G
Gerd Hoffmann 已提交
367 368 369
		return;
	BUG_ON(!smp && (num_online_cpus() > 1));

370
	mutex_lock(&smp_alt);
371 372 373 374 375 376 377 378

	/*
	 * Avoid unnecessary switches because it forces JIT based VMs to
	 * throw away all cached translations, which can be quite costly.
	 */
	if (smp == smp_mode) {
		/* nothing */
	} else if (smp) {
G
Gerd Hoffmann 已提交
379
		printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
380 381
		clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
		clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
G
Gerd Hoffmann 已提交
382 383 384 385 386
		list_for_each_entry(mod, &smp_alt_modules, next)
			alternatives_smp_lock(mod->locks, mod->locks_end,
					      mod->text, mod->text_end);
	} else {
		printk(KERN_INFO "SMP alternatives: switching to UP code\n");
387 388
		set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
		set_cpu_cap(&cpu_data(0), X86_FEATURE_UP);
G
Gerd Hoffmann 已提交
389 390 391 392
		list_for_each_entry(mod, &smp_alt_modules, next)
			alternatives_smp_unlock(mod->locks, mod->locks_end,
						mod->text, mod->text_end);
	}
393
	smp_mode = smp;
394
	mutex_unlock(&smp_alt);
G
Gerd Hoffmann 已提交
395 396
}

397 398
#endif

399
#ifdef CONFIG_PARAVIRT
400 401
void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
				     struct paravirt_patch_site *end)
402
{
403
	struct paravirt_patch_site *p;
404
	char insnbuf[MAX_PATCH_LEN];
405

406 407 408
	if (noreplace_paravirt)
		return;

409 410 411
	for (p = start; p < end; p++) {
		unsigned int used;

412
		BUG_ON(p->len > MAX_PATCH_LEN);
413 414
		/* prep the buffer with the original instructions */
		memcpy(insnbuf, p->instr, p->len);
415 416
		used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
					 (unsigned long)p->instr, p->len);
417

418 419
		BUG_ON(used > p->len);

420
		/* Pad the rest with nops */
421
		add_nops(insnbuf + used, p->len - used);
422
		text_poke_early(p->instr, insnbuf, p->len);
423 424
	}
}
425
extern struct paravirt_patch_site __start_parainstructions[],
426 427 428
	__stop_parainstructions[];
#endif	/* CONFIG_PARAVIRT */

G
Gerd Hoffmann 已提交
429 430
void __init alternative_instructions(void)
{
431 432 433 434
	/* The patching is not fully atomic, so try to avoid local interruptions
	   that might execute the to be patched code.
	   Other CPUs are not running. */
	stop_nmi();
435 436 437 438 439 440 441 442 443 444 445

	/*
	 * Don't stop machine check exceptions while patching.
	 * MCEs only happen when something got corrupted and in this
	 * case we must do something about the corruption.
	 * Ignoring it is worse than a unlikely patching race.
	 * Also machine checks tend to be broadcast and if one CPU
	 * goes into machine check the others follow quickly, so we don't
	 * expect a machine check to cause undue problems during to code
	 * patching.
	 */
446

G
Gerd Hoffmann 已提交
447 448 449 450 451 452 453 454 455 456
	apply_alternatives(__alt_instructions, __alt_instructions_end);

	/* switch to patch-once-at-boottime-only mode and free the
	 * tables in case we know the number of CPUs will never ever
	 * change */
#ifdef CONFIG_HOTPLUG_CPU
	if (num_possible_cpus() < 2)
		smp_alt_once = 1;
#endif

457
#ifdef CONFIG_SMP
G
Gerd Hoffmann 已提交
458 459 460
	if (smp_alt_once) {
		if (1 == num_possible_cpus()) {
			printk(KERN_INFO "SMP alternatives: switching to UP code\n");
461 462 463
			set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP);
			set_cpu_cap(&cpu_data(0), X86_FEATURE_UP);

G
Gerd Hoffmann 已提交
464 465 466 467 468 469 470
			alternatives_smp_unlock(__smp_locks, __smp_locks_end,
						_text, _etext);
		}
	} else {
		alternatives_smp_module_add(NULL, "core kernel",
					    __smp_locks, __smp_locks_end,
					    _text, _etext);
471 472

		/* Only switch to UP mode if we don't immediately boot others */
473
		if (num_present_cpus() == 1 || setup_max_cpus <= 1)
474
			alternatives_smp_switch(0);
G
Gerd Hoffmann 已提交
475
	}
476
#endif
477
 	apply_paravirt(__parainstructions, __parainstructions_end);
478

479 480 481 482 483
	if (smp_alt_once)
		free_init_pages("SMP alternatives",
				(unsigned long)__smp_locks,
				(unsigned long)__smp_locks_end);

484
	restart_nmi();
G
Gerd Hoffmann 已提交
485
}
486

487 488 489 490 491 492
/**
 * text_poke_early - Update instructions on a live kernel at boot time
 * @addr: address to modify
 * @opcode: source of the copy
 * @len: length to copy
 *
493 494
 * When you use this code to patch more than one byte of an instruction
 * you need to make sure that other CPUs cannot execute this code in parallel.
495 496 497
 * Also no thread must be currently preempted in the middle of these
 * instructions. And on the local CPU you need to be protected again NMI or MCE
 * handlers seeing an inconsistent instruction while you patch.
498
 */
499 500
static void *__init_or_module text_poke_early(void *addr, const void *opcode,
					      size_t len)
501
{
502 503
	unsigned long flags;
	local_irq_save(flags);
504
	memcpy(addr, opcode, len);
505
	sync_core();
506
	local_irq_restore(flags);
507 508 509 510 511 512 513 514 515 516 517 518 519 520 521
	/* Could also do a CLFLUSH here to speed up CPU recovery; but
	   that causes hangs on some VIA CPUs. */
	return addr;
}

/**
 * text_poke - Update instructions on a live kernel
 * @addr: address to modify
 * @opcode: source of the copy
 * @len: length to copy
 *
 * Only atomic text poke/set should be allowed when not doing early patching.
 * It means the size must be writable atomically and the address must be aligned
 * in a way that permits an atomic write. It also makes sure we fit on a single
 * page.
522 523
 *
 * Note: Must be called under text_mutex.
524 525 526
 */
void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
{
527
	unsigned long flags;
528
	char *vaddr;
M
Mathieu Desnoyers 已提交
529 530
	struct page *pages[2];
	int i;
531

M
Mathieu Desnoyers 已提交
532 533 534
	if (!core_kernel_text((unsigned long)addr)) {
		pages[0] = vmalloc_to_page(addr);
		pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
535
	} else {
M
Mathieu Desnoyers 已提交
536
		pages[0] = virt_to_page(addr);
I
Ingo Molnar 已提交
537
		WARN_ON(!PageReserved(pages[0]));
M
Mathieu Desnoyers 已提交
538
		pages[1] = virt_to_page(addr + PAGE_SIZE);
539
	}
M
Mathieu Desnoyers 已提交
540
	BUG_ON(!pages[0]);
541
	local_irq_save(flags);
542 543 544 545
	set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
	if (pages[1])
		set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
	vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
M
Mathieu Desnoyers 已提交
546
	memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
547 548 549 550
	clear_fixmap(FIX_TEXT_POKE0);
	if (pages[1])
		clear_fixmap(FIX_TEXT_POKE1);
	local_flush_tlb();
551
	sync_core();
552 553
	/* Could also do a CLFLUSH here to speed up CPU recovery; but
	   that causes hangs on some VIA CPUs. */
M
Mathieu Desnoyers 已提交
554 555
	for (i = 0; i < len; i++)
		BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
556
	local_irq_restore(flags);
557
	return addr;
558
}