paravirt.c 13.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21
/*  Paravirtualization interfaces
    Copyright (C) 2006 Rusty Russell IBM Corporation

    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation; either version 2 of the License, or
    (at your option) any later version.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program; if not, write to the Free Software
    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/efi.h>
#include <linux/bcd.h>
22
#include <linux/start_kernel.h>
23 24 25 26 27 28 29 30 31

#include <asm/bug.h>
#include <asm/paravirt.h>
#include <asm/desc.h>
#include <asm/setup.h>
#include <asm/arch_hooks.h>
#include <asm/time.h>
#include <asm/irq.h>
#include <asm/delay.h>
32 33
#include <asm/fixmap.h>
#include <asm/apic.h>
34
#include <asm/tlbflush.h>
35
#include <asm/timer.h>
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52

/* nop stub */
static void native_nop(void)
{
}

static void __init default_banner(void)
{
	printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
	       paravirt_ops.name);
}

char *memory_setup(void)
{
	return paravirt_ops.memory_setup();
}

53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
/* Simple instruction patching code. */
#define DEF_NATIVE(name, code)					\
	extern const char start_##name[], end_##name[];		\
	asm("start_" #name ": " code "; end_" #name ":")
DEF_NATIVE(cli, "cli");
DEF_NATIVE(sti, "sti");
DEF_NATIVE(popf, "push %eax; popf");
DEF_NATIVE(pushf, "pushf; pop %eax");
DEF_NATIVE(pushf_cli, "pushf; pop %eax; cli");
DEF_NATIVE(iret, "iret");
DEF_NATIVE(sti_sysexit, "sti; sysexit");

static const struct native_insns
{
	const char *start, *end;
} native_insns[] = {
	[PARAVIRT_IRQ_DISABLE] = { start_cli, end_cli },
	[PARAVIRT_IRQ_ENABLE] = { start_sti, end_sti },
	[PARAVIRT_RESTORE_FLAGS] = { start_popf, end_popf },
	[PARAVIRT_SAVE_FLAGS] = { start_pushf, end_pushf },
	[PARAVIRT_SAVE_FLAGS_IRQ_DISABLE] = { start_pushf_cli, end_pushf_cli },
	[PARAVIRT_INTERRUPT_RETURN] = { start_iret, end_iret },
	[PARAVIRT_STI_SYSEXIT] = { start_sti_sysexit, end_sti_sysexit },
};

static unsigned native_patch(u8 type, u16 clobbers, void *insns, unsigned len)
{
	unsigned int insn_len;

	/* Don't touch it if we don't have a replacement */
	if (type >= ARRAY_SIZE(native_insns) || !native_insns[type].start)
		return len;

	insn_len = native_insns[type].end - native_insns[type].start;

	/* Similarly if we can't fit replacement. */
	if (len < insn_len)
		return len;

	memcpy(insns, native_insns[type].start, insn_len);
	return insn_len;
}

96
static unsigned long native_get_debugreg(int regno)
97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118
{
	unsigned long val = 0; 	/* Damn you, gcc! */

	switch (regno) {
	case 0:
		asm("movl %%db0, %0" :"=r" (val)); break;
	case 1:
		asm("movl %%db1, %0" :"=r" (val)); break;
	case 2:
		asm("movl %%db2, %0" :"=r" (val)); break;
	case 3:
		asm("movl %%db3, %0" :"=r" (val)); break;
	case 6:
		asm("movl %%db6, %0" :"=r" (val)); break;
	case 7:
		asm("movl %%db7, %0" :"=r" (val)); break;
	default:
		BUG();
	}
	return val;
}

119
static void native_set_debugreg(int regno, unsigned long value)
120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149
{
	switch (regno) {
	case 0:
		asm("movl %0,%%db0"	: /* no output */ :"r" (value));
		break;
	case 1:
		asm("movl %0,%%db1"	: /* no output */ :"r" (value));
		break;
	case 2:
		asm("movl %0,%%db2"	: /* no output */ :"r" (value));
		break;
	case 3:
		asm("movl %0,%%db3"	: /* no output */ :"r" (value));
		break;
	case 6:
		asm("movl %0,%%db6"	: /* no output */ :"r" (value));
		break;
	case 7:
		asm("movl %0,%%db7"	: /* no output */ :"r" (value));
		break;
	default:
		BUG();
	}
}

void init_IRQ(void)
{
	paravirt_ops.init_IRQ();
}

150
static void native_clts(void)
151 152 153 154
{
	asm volatile ("clts");
}

155
static unsigned long native_read_cr0(void)
156 157 158 159 160 161
{
	unsigned long val;
	asm volatile("movl %%cr0,%0\n\t" :"=r" (val));
	return val;
}

162
static void native_write_cr0(unsigned long val)
163 164 165 166
{
	asm volatile("movl %0,%%cr0": :"r" (val));
}

167
static unsigned long native_read_cr2(void)
168 169 170 171 172 173
{
	unsigned long val;
	asm volatile("movl %%cr2,%0\n\t" :"=r" (val));
	return val;
}

174
static void native_write_cr2(unsigned long val)
175 176 177 178
{
	asm volatile("movl %0,%%cr2": :"r" (val));
}

179
static unsigned long native_read_cr3(void)
180 181 182 183 184 185
{
	unsigned long val;
	asm volatile("movl %%cr3,%0\n\t" :"=r" (val));
	return val;
}

186
static void native_write_cr3(unsigned long val)
187 188 189 190
{
	asm volatile("movl %0,%%cr3": :"r" (val));
}

191
static unsigned long native_read_cr4(void)
192 193 194 195 196 197
{
	unsigned long val;
	asm volatile("movl %%cr4,%0\n\t" :"=r" (val));
	return val;
}

198
static unsigned long native_read_cr4_safe(void)
199 200 201 202 203 204 205 206 207 208 209 210
{
	unsigned long val;
	/* This could fault if %cr4 does not exist */
	asm("1: movl %%cr4, %0		\n"
		"2:				\n"
		".section __ex_table,\"a\"	\n"
		".long 1b,2b			\n"
		".previous			\n"
		: "=r" (val): "0" (0));
	return val;
}

211
static void native_write_cr4(unsigned long val)
212 213 214 215
{
	asm volatile("movl %0,%%cr4": :"r" (val));
}

216
static unsigned long native_save_fl(void)
217 218 219 220 221 222
{
	unsigned long f;
	asm volatile("pushfl ; popl %0":"=g" (f): /* no input */);
	return f;
}

223
static void native_restore_fl(unsigned long f)
224 225 226 227 228 229
{
	asm volatile("pushl %0 ; popfl": /* no output */
			     :"g" (f)
			     :"memory", "cc");
}

230
static void native_irq_disable(void)
231 232 233 234
{
	asm volatile("cli": : :"memory");
}

235
static void native_irq_enable(void)
236 237 238 239
{
	asm volatile("sti": : :"memory");
}

240
static void native_safe_halt(void)
241 242 243 244
{
	asm volatile("sti; hlt": : :"memory");
}

245
static void native_halt(void)
246 247 248 249
{
	asm volatile("hlt": : :"memory");
}

250
static void native_wbinvd(void)
251 252 253 254
{
	asm volatile("wbinvd": : :"memory");
}

255
static unsigned long long native_read_msr(unsigned int msr, int *err)
256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273
{
	unsigned long long val;

	asm volatile("2: rdmsr ; xorl %0,%0\n"
		     "1:\n\t"
		     ".section .fixup,\"ax\"\n\t"
		     "3:  movl %3,%0 ; jmp 1b\n\t"
		     ".previous\n\t"
 		     ".section __ex_table,\"a\"\n"
		     "   .align 4\n\t"
		     "   .long 	2b,3b\n\t"
		     ".previous"
		     : "=r" (*err), "=A" (val)
		     : "c" (msr), "i" (-EFAULT));

	return val;
}

274
static int native_write_msr(unsigned int msr, unsigned long long val)
275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291
{
	int err;
	asm volatile("2: wrmsr ; xorl %0,%0\n"
		     "1:\n\t"
		     ".section .fixup,\"ax\"\n\t"
		     "3:  movl %4,%0 ; jmp 1b\n\t"
		     ".previous\n\t"
 		     ".section __ex_table,\"a\"\n"
		     "   .align 4\n\t"
		     "   .long 	2b,3b\n\t"
		     ".previous"
		     : "=a" (err)
		     : "c" (msr), "0" ((u32)val), "d" ((u32)(val>>32)),
		       "i" (-EFAULT));
	return err;
}

292
static unsigned long long native_read_tsc(void)
293 294 295 296 297 298
{
	unsigned long long val;
	asm volatile("rdtsc" : "=A" (val));
	return val;
}

299
static unsigned long long native_read_pmc(void)
300 301 302 303 304 305
{
	unsigned long long val;
	asm volatile("rdpmc" : "=A" (val));
	return val;
}

306
static void native_load_tr_desc(void)
307 308 309 310
{
	asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
}

311
static void native_load_gdt(const struct Xgt_desc_struct *dtr)
312 313 314 315
{
	asm volatile("lgdt %0"::"m" (*dtr));
}

316
static void native_load_idt(const struct Xgt_desc_struct *dtr)
317 318 319 320
{
	asm volatile("lidt %0"::"m" (*dtr));
}

321
static void native_store_gdt(struct Xgt_desc_struct *dtr)
322 323 324 325
{
	asm ("sgdt %0":"=m" (*dtr));
}

326
static void native_store_idt(struct Xgt_desc_struct *dtr)
327 328 329 330
{
	asm ("sidt %0":"=m" (*dtr));
}

331
static unsigned long native_store_tr(void)
332 333 334 335 336 337
{
	unsigned long tr;
	asm ("str %0":"=r" (tr));
	return tr;
}

338
static void native_load_tls(struct thread_struct *t, unsigned int cpu)
339 340 341 342 343 344 345 346 347 348 349 350 351
{
#define C(i) get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]
	C(0); C(1); C(2);
#undef C
}

static inline void native_write_dt_entry(void *dt, int entry, u32 entry_low, u32 entry_high)
{
	u32 *lp = (u32 *)((char *)dt + entry*8);
	lp[0] = entry_low;
	lp[1] = entry_high;
}

352
static void native_write_ldt_entry(void *dt, int entrynum, u32 low, u32 high)
353 354 355 356
{
	native_write_dt_entry(dt, entrynum, low, high);
}

357
static void native_write_gdt_entry(void *dt, int entrynum, u32 low, u32 high)
358 359 360 361
{
	native_write_dt_entry(dt, entrynum, low, high);
}

362
static void native_write_idt_entry(void *dt, int entrynum, u32 low, u32 high)
363 364 365 366
{
	native_write_dt_entry(dt, entrynum, low, high);
}

367
static void native_load_esp0(struct tss_struct *tss,
368 369 370 371 372 373 374 375 376 377 378
				      struct thread_struct *thread)
{
	tss->esp0 = thread->esp0;

	/* This can only happen when SEP is enabled, no need to test "SEP"arately */
	if (unlikely(tss->ss1 != thread->sysenter_cs)) {
		tss->ss1 = thread->sysenter_cs;
		wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
	}
}

379
static void native_io_delay(void)
380 381 382 383
{
	asm volatile("outb %al,$0x80");
}

384
static void native_flush_tlb(void)
385 386 387 388 389 390 391 392
{
	__native_flush_tlb();
}

/*
 * Global pages have to be flushed a bit differently. Not a real
 * performance problem because this does not happen often.
 */
393
static void native_flush_tlb_global(void)
394 395 396 397
{
	__native_flush_tlb_global();
}

398
static void native_flush_tlb_single(u32 addr)
399 400 401 402 403
{
	__native_flush_tlb_single(addr);
}

#ifndef CONFIG_X86_PAE
404
static void native_set_pte(pte_t *ptep, pte_t pteval)
405 406 407 408
{
	*ptep = pteval;
}

409
static void native_set_pte_at(struct mm_struct *mm, u32 addr, pte_t *ptep, pte_t pteval)
410 411 412 413
{
	*ptep = pteval;
}

414
static void native_set_pmd(pmd_t *pmdp, pmd_t pmdval)
415 416 417 418 419 420
{
	*pmdp = pmdval;
}

#else /* CONFIG_X86_PAE */

421
static void native_set_pte(pte_t *ptep, pte_t pte)
422 423 424 425 426 427
{
	ptep->pte_high = pte.pte_high;
	smp_wmb();
	ptep->pte_low = pte.pte_low;
}

428
static void native_set_pte_at(struct mm_struct *mm, u32 addr, pte_t *ptep, pte_t pte)
429 430 431 432 433 434
{
	ptep->pte_high = pte.pte_high;
	smp_wmb();
	ptep->pte_low = pte.pte_low;
}

435
static void native_set_pte_present(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte)
436 437 438 439 440 441 442 443
{
	ptep->pte_low = 0;
	smp_wmb();
	ptep->pte_high = pte.pte_high;
	smp_wmb();
	ptep->pte_low = pte.pte_low;
}

444
static void native_set_pte_atomic(pte_t *ptep, pte_t pteval)
445 446 447 448
{
	set_64bit((unsigned long long *)ptep,pte_val(pteval));
}

449
static void native_set_pmd(pmd_t *pmdp, pmd_t pmdval)
450 451 452 453
{
	set_64bit((unsigned long long *)pmdp,pmd_val(pmdval));
}

454
static void native_set_pud(pud_t *pudp, pud_t pudval)
455 456 457 458
{
	*pudp = pudval;
}

459
static void native_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
460 461 462 463 464 465
{
	ptep->pte_low = 0;
	smp_wmb();
	ptep->pte_high = 0;
}

466
static void native_pmd_clear(pmd_t *pmd)
467 468 469 470 471 472 473 474
{
	u32 *tmp = (u32 *)pmd;
	*tmp = 0;
	smp_wmb();
	*(tmp + 1) = 0;
}
#endif /* CONFIG_X86_PAE */

475
/* These are in entry.S */
476 477
extern void native_iret(void);
extern void native_irq_enable_sysexit(void);
478 479 480 481 482 483 484 485 486 487 488 489 490

static int __init print_banner(void)
{
	paravirt_ops.banner();
	return 0;
}
core_initcall(print_banner);

struct paravirt_ops paravirt_ops = {
	.name = "bare hardware",
	.paravirt_enabled = 0,
	.kernel_rpl = 0,

491
 	.patch = native_patch,
492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523
	.banner = default_banner,
	.arch_setup = native_nop,
	.memory_setup = machine_specific_memory_setup,
	.get_wallclock = native_get_wallclock,
	.set_wallclock = native_set_wallclock,
	.time_init = time_init_hook,
	.init_IRQ = native_init_IRQ,

	.cpuid = native_cpuid,
	.get_debugreg = native_get_debugreg,
	.set_debugreg = native_set_debugreg,
	.clts = native_clts,
	.read_cr0 = native_read_cr0,
	.write_cr0 = native_write_cr0,
	.read_cr2 = native_read_cr2,
	.write_cr2 = native_write_cr2,
	.read_cr3 = native_read_cr3,
	.write_cr3 = native_write_cr3,
	.read_cr4 = native_read_cr4,
	.read_cr4_safe = native_read_cr4_safe,
	.write_cr4 = native_write_cr4,
	.save_fl = native_save_fl,
	.restore_fl = native_restore_fl,
	.irq_disable = native_irq_disable,
	.irq_enable = native_irq_enable,
	.safe_halt = native_safe_halt,
	.halt = native_halt,
	.wbinvd = native_wbinvd,
	.read_msr = native_read_msr,
	.write_msr = native_write_msr,
	.read_tsc = native_read_tsc,
	.read_pmc = native_read_pmc,
524
	.get_scheduled_cycles = native_read_tsc,
525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541
	.load_tr_desc = native_load_tr_desc,
	.set_ldt = native_set_ldt,
	.load_gdt = native_load_gdt,
	.load_idt = native_load_idt,
	.store_gdt = native_store_gdt,
	.store_idt = native_store_idt,
	.store_tr = native_store_tr,
	.load_tls = native_load_tls,
	.write_ldt_entry = native_write_ldt_entry,
	.write_gdt_entry = native_write_gdt_entry,
	.write_idt_entry = native_write_idt_entry,
	.load_esp0 = native_load_esp0,

	.set_iopl_mask = native_set_iopl_mask,
	.io_delay = native_io_delay,
	.const_udelay = __const_udelay,

542 543 544 545
#ifdef CONFIG_X86_LOCAL_APIC
	.apic_write = native_apic_write,
	.apic_write_atomic = native_apic_write_atomic,
	.apic_read = native_apic_read,
Z
Zachary Amsden 已提交
546 547
	.setup_boot_clock = setup_boot_APIC_clock,
	.setup_secondary_clock = setup_secondary_APIC_clock,
548
#endif
549
	.set_lazy_mode = (void *)native_nop,
550

551 552 553 554
	.flush_tlb_user = native_flush_tlb,
	.flush_tlb_kernel = native_flush_tlb_global,
	.flush_tlb_single = native_flush_tlb_single,

555 556 557 558 559 560
	.alloc_pt = (void *)native_nop,
	.alloc_pd = (void *)native_nop,
	.alloc_pd_clone = (void *)native_nop,
	.release_pt = (void *)native_nop,
	.release_pd = (void *)native_nop,

561 562 563 564 565 566 567 568 569 570 571 572 573
	.set_pte = native_set_pte,
	.set_pte_at = native_set_pte_at,
	.set_pmd = native_set_pmd,
	.pte_update = (void *)native_nop,
	.pte_update_defer = (void *)native_nop,
#ifdef CONFIG_X86_PAE
	.set_pte_atomic = native_set_pte_atomic,
	.set_pte_present = native_set_pte_present,
	.set_pud = native_set_pud,
	.pte_clear = native_pte_clear,
	.pmd_clear = native_pmd_clear,
#endif

574 575
	.irq_enable_sysexit = native_irq_enable_sysexit,
	.iret = native_iret,
576 577

	.startup_ipi_hook = (void *)native_nop,
578
};
579 580 581 582 583 584 585 586

/*
 * NOTE: CONFIG_PARAVIRT is experimental and the paravirt_ops
 * semantics are subject to change. Hence we only do this
 * internal-only export of this, until it gets sorted out and
 * all lowlevel CPU ops used by modules are separately exported.
 */
EXPORT_SYMBOL_GPL(paravirt_ops);