alternative.c 10.5 KB
Newer Older
G
Gerd Hoffmann 已提交
1
#include <linux/module.h>
A
Al Viro 已提交
2
#include <linux/sched.h>
G
Gerd Hoffmann 已提交
3 4
#include <linux/spinlock.h>
#include <linux/list.h>
5 6 7
#include <linux/kprobes.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
G
Gerd Hoffmann 已提交
8 9
#include <asm/alternative.h>
#include <asm/sections.h>
10
#include <asm/pgtable.h>
11 12
#include <asm/mce.h>
#include <asm/nmi.h>
G
Gerd Hoffmann 已提交
13

14 15
#ifdef CONFIG_HOTPLUG_CPU
static int smp_alt_once;
G
Gerd Hoffmann 已提交
16

17 18 19 20 21
static int __init bootonly(char *str)
{
	smp_alt_once = 1;
	return 1;
}
22
__setup("smp-alt-boot", bootonly);
23 24 25 26 27
#else
#define smp_alt_once 1
#endif

static int debug_alternative;
28

29 30 31 32 33 34 35
static int __init debug_alt(char *str)
{
	debug_alternative = 1;
	return 1;
}
__setup("debug-alternative", debug_alt);

36 37
static int noreplace_smp;

38 39 40 41 42 43 44
static int __init setup_noreplace_smp(char *str)
{
	noreplace_smp = 1;
	return 1;
}
__setup("noreplace-smp", setup_noreplace_smp);

45 46 47 48 49 50 51 52 53 54
#ifdef CONFIG_PARAVIRT
static int noreplace_paravirt = 0;

static int __init setup_noreplace_paravirt(char *str)
{
	noreplace_paravirt = 1;
	return 1;
}
__setup("noreplace-paravirt", setup_noreplace_paravirt);
#endif
55

56 57 58 59
#define DPRINTK(fmt, args...) if (debug_alternative) \
	printk(KERN_DEBUG fmt, args)

#ifdef GENERIC_NOP1
G
Gerd Hoffmann 已提交
60 61 62 63 64 65
/* Use inline assembly to define this because the nops are defined
   as inline assembly strings in the include files and we cannot
   get them easily into strings. */
asm("\t.data\nintelnops: "
	GENERIC_NOP1 GENERIC_NOP2 GENERIC_NOP3 GENERIC_NOP4 GENERIC_NOP5 GENERIC_NOP6
	GENERIC_NOP7 GENERIC_NOP8);
66
extern unsigned char intelnops[];
G
Gerd Hoffmann 已提交
67 68 69 70 71 72 73 74 75 76 77
static unsigned char *intel_nops[ASM_NOP_MAX+1] = {
	NULL,
	intelnops,
	intelnops + 1,
	intelnops + 1 + 2,
	intelnops + 1 + 2 + 3,
	intelnops + 1 + 2 + 3 + 4,
	intelnops + 1 + 2 + 3 + 4 + 5,
	intelnops + 1 + 2 + 3 + 4 + 5 + 6,
	intelnops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
};
78 79 80 81 82 83 84
#endif

#ifdef K8_NOP1
asm("\t.data\nk8nops: "
	K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
	K8_NOP7 K8_NOP8);
extern unsigned char k8nops[];
G
Gerd Hoffmann 已提交
85 86 87 88 89 90 91 92 93 94 95
static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
	NULL,
	k8nops,
	k8nops + 1,
	k8nops + 1 + 2,
	k8nops + 1 + 2 + 3,
	k8nops + 1 + 2 + 3 + 4,
	k8nops + 1 + 2 + 3 + 4 + 5,
	k8nops + 1 + 2 + 3 + 4 + 5 + 6,
	k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
};
96 97 98 99 100 101 102
#endif

#ifdef K7_NOP1
asm("\t.data\nk7nops: "
	K7_NOP1 K7_NOP2 K7_NOP3 K7_NOP4 K7_NOP5 K7_NOP6
	K7_NOP7 K7_NOP8);
extern unsigned char k7nops[];
G
Gerd Hoffmann 已提交
103 104 105 106 107 108 109 110 111 112 113
static unsigned char *k7_nops[ASM_NOP_MAX+1] = {
	NULL,
	k7nops,
	k7nops + 1,
	k7nops + 1 + 2,
	k7nops + 1 + 2 + 3,
	k7nops + 1 + 2 + 3 + 4,
	k7nops + 1 + 2 + 3 + 4 + 5,
	k7nops + 1 + 2 + 3 + 4 + 5 + 6,
	k7nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
};
114 115 116 117 118 119 120 121 122 123 124 125
#endif

#ifdef CONFIG_X86_64

extern char __vsyscall_0;
static inline unsigned char** find_nop_table(void)
{
	return k8_nops;
}

#else /* CONFIG_X86_64 */

G
Gerd Hoffmann 已提交
126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148
static struct nop {
	int cpuid;
	unsigned char **noptable;
} noptypes[] = {
	{ X86_FEATURE_K8, k8_nops },
	{ X86_FEATURE_K7, k7_nops },
	{ -1, NULL }
};

static unsigned char** find_nop_table(void)
{
	unsigned char **noptable = intel_nops;
	int i;

	for (i = 0; noptypes[i].cpuid >= 0; i++) {
		if (boot_cpu_has(noptypes[i].cpuid)) {
			noptable = noptypes[i].noptable;
			break;
		}
	}
	return noptable;
}

149 150
#endif /* CONFIG_X86_64 */

151 152 153 154 155 156 157 158
static void nop_out(void *insns, unsigned int len)
{
	unsigned char **noptable = find_nop_table();

	while (len > 0) {
		unsigned int noplen = len;
		if (noplen > ASM_NOP_MAX)
			noplen = ASM_NOP_MAX;
159
		text_poke(insns, noptable[noplen], noplen);
160 161 162 163 164
		insns += noplen;
		len -= noplen;
	}
}

165 166 167
extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
extern u8 *__smp_locks[], *__smp_locks_end[];

G
Gerd Hoffmann 已提交
168 169 170 171 172 173 174 175 176
/* Replace instructions with better alternatives for this CPU type.
   This runs before SMP is initialized to avoid SMP problems with
   self modifying code. This implies that assymetric systems where
   APs have less capabilities than the boot processor are not handled.
   Tough. Make sure you disable such features by hand. */

void apply_alternatives(struct alt_instr *start, struct alt_instr *end)
{
	struct alt_instr *a;
177
	u8 *instr;
178
	int diff;
G
Gerd Hoffmann 已提交
179 180 181 182 183 184

	DPRINTK("%s: alt table %p -> %p\n", __FUNCTION__, start, end);
	for (a = start; a < end; a++) {
		BUG_ON(a->replacementlen > a->instrlen);
		if (!boot_cpu_has(a->cpuid))
			continue;
185 186 187 188 189 190 191 192 193 194
		instr = a->instr;
#ifdef CONFIG_X86_64
		/* vsyscall code is not mapped yet. resolve it manually. */
		if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END) {
			instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
			DPRINTK("%s: vsyscall fixup: %p => %p\n",
				__FUNCTION__, a->instr, instr);
		}
#endif
		memcpy(instr, a->replacement, a->replacementlen);
G
Gerd Hoffmann 已提交
195
		diff = a->instrlen - a->replacementlen;
196
		nop_out(instr + a->replacementlen, diff);
G
Gerd Hoffmann 已提交
197 198 199
	}
}

200 201
#ifdef CONFIG_SMP

G
Gerd Hoffmann 已提交
202 203 204 205 206 207 208 209 210
static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end)
{
	u8 **ptr;

	for (ptr = start; ptr < end; ptr++) {
		if (*ptr < text)
			continue;
		if (*ptr > text_end)
			continue;
211
		text_poke(*ptr, ((unsigned char []){0xf0}), 1); /* add lock prefix */
G
Gerd Hoffmann 已提交
212 213 214 215 216 217 218
	};
}

static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end)
{
	u8 **ptr;

219 220 221
	if (noreplace_smp)
		return;

G
Gerd Hoffmann 已提交
222 223 224 225 226
	for (ptr = start; ptr < end; ptr++) {
		if (*ptr < text)
			continue;
		if (*ptr > text_end)
			continue;
227
		nop_out(*ptr, 1);
G
Gerd Hoffmann 已提交
228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255
	};
}

struct smp_alt_module {
	/* what is this ??? */
	struct module	*mod;
	char		*name;

	/* ptrs to lock prefixes */
	u8		**locks;
	u8		**locks_end;

	/* .text segment, needed to avoid patching init code ;) */
	u8		*text;
	u8		*text_end;

	struct list_head next;
};
static LIST_HEAD(smp_alt_modules);
static DEFINE_SPINLOCK(smp_alt);

void alternatives_smp_module_add(struct module *mod, char *name,
				 void *locks, void *locks_end,
				 void *text,  void *text_end)
{
	struct smp_alt_module *smp;
	unsigned long flags;

256 257 258
	if (noreplace_smp)
		return;

G
Gerd Hoffmann 已提交
259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292
	if (smp_alt_once) {
		if (boot_cpu_has(X86_FEATURE_UP))
			alternatives_smp_unlock(locks, locks_end,
						text, text_end);
		return;
	}

	smp = kzalloc(sizeof(*smp), GFP_KERNEL);
	if (NULL == smp)
		return; /* we'll run the (safe but slow) SMP code then ... */

	smp->mod	= mod;
	smp->name	= name;
	smp->locks	= locks;
	smp->locks_end	= locks_end;
	smp->text	= text;
	smp->text_end	= text_end;
	DPRINTK("%s: locks %p -> %p, text %p -> %p, name %s\n",
		__FUNCTION__, smp->locks, smp->locks_end,
		smp->text, smp->text_end, smp->name);

	spin_lock_irqsave(&smp_alt, flags);
	list_add_tail(&smp->next, &smp_alt_modules);
	if (boot_cpu_has(X86_FEATURE_UP))
		alternatives_smp_unlock(smp->locks, smp->locks_end,
					smp->text, smp->text_end);
	spin_unlock_irqrestore(&smp_alt, flags);
}

void alternatives_smp_module_del(struct module *mod)
{
	struct smp_alt_module *item;
	unsigned long flags;

293
	if (smp_alt_once || noreplace_smp)
G
Gerd Hoffmann 已提交
294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313
		return;

	spin_lock_irqsave(&smp_alt, flags);
	list_for_each_entry(item, &smp_alt_modules, next) {
		if (mod != item->mod)
			continue;
		list_del(&item->next);
		spin_unlock_irqrestore(&smp_alt, flags);
		DPRINTK("%s: %s\n", __FUNCTION__, item->name);
		kfree(item);
		return;
	}
	spin_unlock_irqrestore(&smp_alt, flags);
}

void alternatives_smp_switch(int smp)
{
	struct smp_alt_module *mod;
	unsigned long flags;

314 315 316 317 318 319 320 321 322 323
#ifdef CONFIG_LOCKDEP
	/*
	 * A not yet fixed binutils section handling bug prevents
	 * alternatives-replacement from working reliably, so turn
	 * it off:
	 */
	printk("lockdep: not fixing up alternatives.\n");
	return;
#endif

324
	if (noreplace_smp || smp_alt_once)
G
Gerd Hoffmann 已提交
325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346
		return;
	BUG_ON(!smp && (num_online_cpus() > 1));

	spin_lock_irqsave(&smp_alt, flags);
	if (smp) {
		printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
		clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
		clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
		list_for_each_entry(mod, &smp_alt_modules, next)
			alternatives_smp_lock(mod->locks, mod->locks_end,
					      mod->text, mod->text_end);
	} else {
		printk(KERN_INFO "SMP alternatives: switching to UP code\n");
		set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
		set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
		list_for_each_entry(mod, &smp_alt_modules, next)
			alternatives_smp_unlock(mod->locks, mod->locks_end,
						mod->text, mod->text_end);
	}
	spin_unlock_irqrestore(&smp_alt, flags);
}

347 348
#endif

349
#ifdef CONFIG_PARAVIRT
350 351
void apply_paravirt(struct paravirt_patch_site *start,
		    struct paravirt_patch_site *end)
352
{
353
	struct paravirt_patch_site *p;
354

355 356 357
	if (noreplace_paravirt)
		return;

358 359 360 361 362
	for (p = start; p < end; p++) {
		unsigned int used;

		used = paravirt_ops.patch(p->instrtype, p->clobbers, p->instr,
					  p->len);
363

364 365
		BUG_ON(used > p->len);

366 367 368 369
		/* Pad the rest with nops */
		nop_out(p->instr + used, p->len - used);
	}
}
370
extern struct paravirt_patch_site __start_parainstructions[],
371 372 373
	__stop_parainstructions[];
#endif	/* CONFIG_PARAVIRT */

G
Gerd Hoffmann 已提交
374 375
void __init alternative_instructions(void)
{
376 377
	unsigned long flags;

378 379 380 381 382 383 384 385
	/* The patching is not fully atomic, so try to avoid local interruptions
	   that might execute the to be patched code.
	   Other CPUs are not running. */
	stop_nmi();
#ifdef CONFIG_MCE
	stop_mce();
#endif

386
	local_irq_save(flags);
G
Gerd Hoffmann 已提交
387 388 389 390 391 392 393 394 395 396
	apply_alternatives(__alt_instructions, __alt_instructions_end);

	/* switch to patch-once-at-boottime-only mode and free the
	 * tables in case we know the number of CPUs will never ever
	 * change */
#ifdef CONFIG_HOTPLUG_CPU
	if (num_possible_cpus() < 2)
		smp_alt_once = 1;
#endif

397
#ifdef CONFIG_SMP
G
Gerd Hoffmann 已提交
398 399 400 401 402 403 404 405 406
	if (smp_alt_once) {
		if (1 == num_possible_cpus()) {
			printk(KERN_INFO "SMP alternatives: switching to UP code\n");
			set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
			set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
			alternatives_smp_unlock(__smp_locks, __smp_locks_end,
						_text, _etext);
		}
		free_init_pages("SMP alternatives",
407 408
				(unsigned long)__smp_locks,
				(unsigned long)__smp_locks_end);
G
Gerd Hoffmann 已提交
409 410 411 412 413 414
	} else {
		alternatives_smp_module_add(NULL, "core kernel",
					    __smp_locks, __smp_locks_end,
					    _text, _etext);
		alternatives_smp_switch(0);
	}
415
#endif
416
 	apply_paravirt(__parainstructions, __parainstructions_end);
417
	local_irq_restore(flags);
418 419 420 421 422

	restart_nmi();
#ifdef CONFIG_MCE
	restart_mce();
#endif
G
Gerd Hoffmann 已提交
423
}
424 425 426 427 428 429 430 431 432

/*
 * Warning:
 * When you use this code to patch more than one byte of an instruction
 * you need to make sure that other CPUs cannot execute this code in parallel.
 * Also no thread must be currently preempted in the middle of these instructions.
 * And on the local CPU you need to be protected again NMI or MCE handlers
 * seeing an inconsistent instruction while you patch.
 */
433
void __kprobes text_poke(void *addr, unsigned char *opcode, int len)
434 435 436 437 438 439
{
	memcpy(addr, opcode, len);
	sync_core();
	/* Not strictly needed, but can speed CPU recovery up. Ignore cross cacheline
	   case. */
	if (cpu_has_clflush)
440
		asm("clflush (%0) " :: "r" (addr) : "memory");
441
}