alternative.c 10.9 KB
Newer Older
G
Gerd Hoffmann 已提交
1
#include <linux/module.h>
A
Al Viro 已提交
2
#include <linux/sched.h>
G
Gerd Hoffmann 已提交
3 4
#include <linux/spinlock.h>
#include <linux/list.h>
5 6 7
#include <linux/kprobes.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
G
Gerd Hoffmann 已提交
8 9
#include <asm/alternative.h>
#include <asm/sections.h>
10
#include <asm/pgtable.h>
11 12
#include <asm/mce.h>
#include <asm/nmi.h>
D
Dave Jones 已提交
13
#include <asm/vsyscall.h>
G
Gerd Hoffmann 已提交
14

15 16
#define MAX_PATCH_LEN (255-1)

17 18
#ifdef CONFIG_HOTPLUG_CPU
static int smp_alt_once;
G
Gerd Hoffmann 已提交
19

20 21 22 23 24
static int __init bootonly(char *str)
{
	smp_alt_once = 1;
	return 1;
}
25
__setup("smp-alt-boot", bootonly);
26 27 28 29 30
#else
#define smp_alt_once 1
#endif

static int debug_alternative;
31

32 33 34 35 36 37 38
static int __init debug_alt(char *str)
{
	debug_alternative = 1;
	return 1;
}
__setup("debug-alternative", debug_alt);

39 40
static int noreplace_smp;

41 42 43 44 45 46 47
static int __init setup_noreplace_smp(char *str)
{
	noreplace_smp = 1;
	return 1;
}
__setup("noreplace-smp", setup_noreplace_smp);

48 49 50 51 52 53 54 55 56 57
#ifdef CONFIG_PARAVIRT
static int noreplace_paravirt = 0;

static int __init setup_noreplace_paravirt(char *str)
{
	noreplace_paravirt = 1;
	return 1;
}
__setup("noreplace-paravirt", setup_noreplace_paravirt);
#endif
58

59 60 61 62
#define DPRINTK(fmt, args...) if (debug_alternative) \
	printk(KERN_DEBUG fmt, args)

#ifdef GENERIC_NOP1
G
Gerd Hoffmann 已提交
63 64 65 66 67 68
/* Use inline assembly to define this because the nops are defined
   as inline assembly strings in the include files and we cannot
   get them easily into strings. */
asm("\t.data\nintelnops: "
	GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
	GENERIC_NOP7 GENERIC_NOP8);
69
extern unsigned char intelnops[];
G
Gerd Hoffmann 已提交
70 71 72 73 74 75 76 77 78 79 80
static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
	NULL,
	intelnops,
	intelnops + 1,
	intelnops + 1 + 2,
	intelnops + 1 + 2 + 3,
	intelnops + 1 + 2 + 3 + 4,
	intelnops + 1 + 2 + 3 + 4 + 5,
	intelnops + 1 + 2 + 3 + 4 + 5 + 6,
	intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
};
81 82 83 84 85 86 87
#endif

#ifdef K8_NOP1
asm("\t.data\nk8nops: "
	K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
	K8_NOP7 K8_NOP8);
extern unsigned char k8nops[];
G
Gerd Hoffmann 已提交
88 89 90 91 92 93 94 95 96 97 98
static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
	NULL,
	k8nops,
	k8nops + 1,
	k8nops + 1 + 2,
	k8nops + 1 + 2 + 3,
	k8nops + 1 + 2 + 3 + 4,
	k8nops + 1 + 2 + 3 + 4 + 5,
	k8nops + 1 + 2 + 3 + 4 + 5 + 6,
	k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
};
99 100 101 102 103 104 105
#endif

#ifdef K7_NOP1
asm("\t.data\nk7nops: "
	K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
	K7_NOP7 K7_NOP8);
extern unsigned char k7nops[];
G
Gerd Hoffmann 已提交
106 107 108 109 110 111 112 113 114 115 116
static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
	NULL,
	k7nops,
	k7nops + 1,
	k7nops + 1 + 2,
	k7nops + 1 + 2 + 3,
	k7nops + 1 + 2 + 3 + 4,
	k7nops + 1 + 2 + 3 + 4 + 5,
	k7nops + 1 + 2 + 3 + 4 + 5 + 6,
	k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
};
117 118 119 120 121 122 123 124 125 126 127 128
#endif

#ifdef CONFIG_X86_64

extern char __vsyscall_0;
static inline unsigned char** find_nop_table(void)
{
	return k8_nops;
}

#else /* CONFIG_X86_64 */

G
Gerd Hoffmann 已提交
129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
static struct nop {
	int cpuid;
	unsigned char **noptable;
} noptypes[] = {
	{ X86_FEATURE_K8, k8_nops },
	{ X86_FEATURE_K7, k7_nops },
	{ -1, NULL }
};

static unsigned char** find_nop_table(void)
{
	unsigned char **noptable = intel_nops;
	int i;

	for (i = 0; noptypes[i].cpuid >= 0; i++) {
		if (boot_cpu_has(noptypes[i].cpuid)) {
			noptable = noptypes[i].noptable;
			break;
		}
	}
	return noptable;
}

152 153
#endif /* CONFIG_X86_64 */

154 155
/* Use this to add nops to a buffer, then text_poke the whole buffer. */
static void add_nops(void *insns, unsigned int len)
156 157 158 159 160 161 162
{
	unsigned char **noptable = find_nop_table();

	while (len > 0) {
		unsigned int noplen = len;
		if (noplen > ASM_NOP_MAX)
			noplen = ASM_NOP_MAX;
163
		memcpy(insns, noptable[noplen], noplen);
164 165 166 167 168
		insns += noplen;
		len -= noplen;
	}
}

169 170 171
extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
extern u8 *__smp_locks[], *__smp_locks_end[];

G
Gerd Hoffmann 已提交
172 173 174 175 176 177 178 179 180
/* Replace instructions with better alternatives for this CPU type.
   This runs before SMP is initialized to avoid SMP problems with
   self modifying code. This implies that assymetric systems where
   APs have less capabilities than the boot processor are not handled.
   Tough. Make sure you disable such features by hand. */

void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
{
	struct alt_instr *a;
181
	char insnbuf[MAX_PATCH_LEN];
G
Gerd Hoffmann 已提交
182 183 184

	DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end);
	for (a = start; a < end; a++) {
185
		u8 *instr = a->instr;
G
Gerd Hoffmann 已提交
186
		BUG_ON(a->replacementlen > a->instrlen);
187
		BUG_ON(a->instrlen > sizeof(insnbuf));
G
Gerd Hoffmann 已提交
188 189
		if (!boot_cpu_has(a->cpuid))
			continue;
190 191 192 193 194 195 196 197
#ifdef CONFIG_X86_64
		/* vsyscall code is not mapped yet. resolve it manually. */
		if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
			instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
			DPRINTK("%s: vsyscall fixup: %p => %p\n",
				__FUNCTION__, a->instr, instr);
		}
#endif
198 199 200 201
		memcpy(insnbuf, a->replacement, a->replacementlen);
		add_nops(insnbuf + a->replacementlen,
			 a->instrlen - a->replacementlen);
		text_poke(instr, insnbuf, a->instrlen);
G
Gerd Hoffmann 已提交
202 203 204
	}
}

205 206
#ifdef CONFIG_SMP

G
Gerd Hoffmann 已提交
207 208 209 210 211 212 213 214 215
static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
{
	u8 **ptr;

	for (ptr = start; ptr < end; ptr++) {
		if (*ptr < text)
			continue;
		if (*ptr > text_end)
			continue;
216
		text_poke(*ptr, ((unsigned char []){0xf0}), 1); /* add lock prefix */
G
Gerd Hoffmann 已提交
217 218 219 220 221 222
	};
}

static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
{
	u8 **ptr;
223
	char insn[1];
G
Gerd Hoffmann 已提交
224

225 226 227
	if (noreplace_smp)
		return;

228
	add_nops(insn, 1);
G
Gerd Hoffmann 已提交
229 230 231 232 233
	for (ptr = start; ptr < end; ptr++) {
		if (*ptr < text)
			continue;
		if (*ptr > text_end)
			continue;
234
		text_poke(*ptr, insn, 1);
G
Gerd Hoffmann 已提交
235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262
	};
}

struct smp_alt_module {
	/* what is this ??? */
	struct module	*mod;
	char		*name;

	/* ptrs to lock prefixes */
	u8		**locks;
	u8		**locks_end;

	/* .text segment, needed to avoid patching init code ;) */
	u8		*text;
	u8		*text_end;

	struct list_head next;
};
static LIST_HEAD(smp_alt_modules);
static DEFINE_SPINLOCK(smp_alt);

void alternatives_smp_module_add(struct module *mod, char *name,
				 void *locks, void *locks_end,
				 void *text,  void *text_end)
{
	struct smp_alt_module *smp;
	unsigned long flags;

263 264 265
	if (noreplace_smp)
		return;

G
Gerd Hoffmann 已提交
266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299
	if (smp_alt_once) {
		if (boot_cpu_has(X86_FEATURE_UP))
			alternatives_smp_unlock(locks, locks_end,
						text, text_end);
		return;
	}

	smp = kzalloc(sizeof(*smp), GFP_KERNEL);
	if (NULL == smp)
		return; /* we'll run the (safe but slow) SMP code then ... */

	smp->mod	= mod;
	smp->name	= name;
	smp->locks	= locks;
	smp->locks_end	= locks_end;
	smp->text	= text;
	smp->text_end	= text_end;
	DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
		__FUNCTION__, smp->locks, smp->locks_end,
		smp->text, smp->text_end, smp->name);

	spin_lock_irqsave(&smp_alt, flags);
	list_add_tail(&smp->next, &smp_alt_modules);
	if (boot_cpu_has(X86_FEATURE_UP))
		alternatives_smp_unlock(smp->locks, smp->locks_end,
					smp->text, smp->text_end);
	spin_unlock_irqrestore(&smp_alt, flags);
}

void alternatives_smp_module_del(struct module *mod)
{
	struct smp_alt_module *item;
	unsigned long flags;

300
	if (smp_alt_once || noreplace_smp)
G
Gerd Hoffmann 已提交
301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320
		return;

	spin_lock_irqsave(&smp_alt, flags);
	list_for_each_entry(item, &smp_alt_modules, next) {
		if (mod != item->mod)
			continue;
		list_del(&item->next);
		spin_unlock_irqrestore(&smp_alt, flags);
		DPRINTK("%s: %s\n", __FUNCTION__, item->name);
		kfree(item);
		return;
	}
	spin_unlock_irqrestore(&smp_alt, flags);
}

void alternatives_smp_switch(int smp)
{
	struct smp_alt_module *mod;
	unsigned long flags;

321 322 323 324 325 326 327 328 329 330
#ifdef CONFIG_LOCKDEP
	/*
	 * A not yet fixed binutils section handling bug prevents
	 * alternatives-replacement from working reliably, so turn
	 * it off:
	 */
	printk("lockdep: not fixing up alternatives.\n");
	return;
#endif

331
	if (noreplace_smp || smp_alt_once)
G
Gerd Hoffmann 已提交
332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353
		return;
	BUG_ON(!smp && (num_online_cpus() > 1));

	spin_lock_irqsave(&smp_alt, flags);
	if (smp) {
		printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
		clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
		clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
		list_for_each_entry(mod, &smp_alt_modules, next)
			alternatives_smp_lock(mod->locks, mod->locks_end,
					      mod->text, mod->text_end);
	} else {
		printk(KERN_INFO "SMP alternatives: switching to UP code\n");
		set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
		set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
		list_for_each_entry(mod, &smp_alt_modules, next)
			alternatives_smp_unlock(mod->locks, mod->locks_end,
						mod->text, mod->text_end);
	}
	spin_unlock_irqrestore(&smp_alt, flags);
}

354 355
#endif

356
#ifdef CONFIG_PARAVIRT
357 358
void apply_paravirt(struct paravirt_patch_site *start,
		    struct paravirt_patch_site *end)
359
{
360
	struct paravirt_patch_site *p;
361
	char insnbuf[MAX_PATCH_LEN];
362

363 364 365
	if (noreplace_paravirt)
		return;

366 367 368
	for (p = start; p < end; p++) {
		unsigned int used;

369
		BUG_ON(p->len > MAX_PATCH_LEN);
370 371
		/* prep the buffer with the original instructions */
		memcpy(insnbuf, p->instr, p->len);
372 373
		used = paravirt_ops.patch(p->instrtype, p->clobbers, insnbuf,
					  (unsigned long)p->instr, p->len);
374

375 376
		BUG_ON(used > p->len);

377
		/* Pad the rest with nops */
378 379
		add_nops(insnbuf + used, p->len - used);
		text_poke(p->instr, insnbuf, p->len);
380 381
	}
}
382
extern struct paravirt_patch_site __start_parainstructions[],
383 384 385
	__stop_parainstructions[];
#endif	/* CONFIG_PARAVIRT */

G
Gerd Hoffmann 已提交
386 387
void __init alternative_instructions(void)
{
388 389
	unsigned long flags;

390 391 392 393
	/* The patching is not fully atomic, so try to avoid local interruptions
	   that might execute the to be patched code.
	   Other CPUs are not running. */
	stop_nmi();
394
#ifdef CONFIG_X86_MCE
395 396 397
	stop_mce();
#endif

398
	local_irq_save(flags);
G
Gerd Hoffmann 已提交
399 400 401 402 403 404 405 406 407 408
	apply_alternatives(__alt_instructions, __alt_instructions_end);

	/* switch to patch-once-at-boottime-only mode and free the
	 * tables in case we know the number of CPUs will never ever
	 * change */
#ifdef CONFIG_HOTPLUG_CPU
	if (num_possible_cpus() < 2)
		smp_alt_once = 1;
#endif

409
#ifdef CONFIG_SMP
G
Gerd Hoffmann 已提交
410 411 412 413 414 415 416 417 418
	if (smp_alt_once) {
		if (1 == num_possible_cpus()) {
			printk(KERN_INFO "SMP alternatives: switching to UP code\n");
			set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
			set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
			alternatives_smp_unlock(__smp_locks, __smp_locks_end,
						_text, _etext);
		}
		free_init_pages("SMP alternatives",
419 420
				(unsigned long)__smp_locks,
				(unsigned long)__smp_locks_end);
G
Gerd Hoffmann 已提交
421 422 423 424 425 426
	} else {
		alternatives_smp_module_add(NULL, "core kernel",
					    __smp_locks, __smp_locks_end,
					    _text, _etext);
		alternatives_smp_switch(0);
	}
427
#endif
428
 	apply_paravirt(__parainstructions, __parainstructions_end);
429
	local_irq_restore(flags);
430 431

	restart_nmi();
432
#ifdef CONFIG_X86_MCE
433 434
	restart_mce();
#endif
G
Gerd Hoffmann 已提交
435
}
436 437 438 439 440 441 442 443 444

/*
 * Warning:
 * When you use this code to patch more than one byte of an instruction
 * you need to make sure that other CPUs cannot execute this code in parallel.
 * Also no thread must be currently preempted in the middle of these instructions.
 * And on the local CPU you need to be protected again NMI or MCE handlers
 * seeing an inconsistent instruction while you patch.
 */
445
void __kprobes text_poke(void *addr, unsigned char *opcode, int len)
446 447 448
{
	memcpy(addr, opcode, len);
	sync_core();
449 450
	/* Could also do a CLFLUSH here to speed up CPU recovery; but
	   that causes hangs on some VIA CPUs. */
451
}