paravirt.h 23.8 KB
Newer Older
H
H. Peter Anvin 已提交
1 2
#ifndef _ASM_X86_PARAVIRT_H
#define _ASM_X86_PARAVIRT_H
3 4
/* Various instructions on x86 need to be replaced for
 * para-virtualization: those hooks are defined here. */
5 6

#ifdef CONFIG_PARAVIRT
7
#include <asm/pgtable_types.h>
8
#include <asm/asm.h>
9

10
#include <asm/paravirt_types.h>
11

12
#ifndef __ASSEMBLY__
13
#include <linux/bug.h>
14
#include <linux/types.h>
15
#include <linux/cpumask.h>
16
#include <asm/frame.h>
17

18
static inline void load_sp0(struct tss_struct *tss,
19 20
			     struct thread_struct *thread)
{
21
	PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
22 23 24 25 26 27
}

/* The paravirtualized CPUID instruction. */
static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
			   unsigned int *ecx, unsigned int *edx)
{
28
	PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
29 30 31 32 33
}

/*
 * These special macros can be used to get or set a debugging register
 */
34 35
static inline unsigned long paravirt_get_debugreg(int reg)
{
36
	return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
37 38 39 40
}
#define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
static inline void set_debugreg(unsigned long val, int reg)
{
41
	PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
42
}
43

44 45
static inline unsigned long read_cr0(void)
{
46
	return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
47
}
48

49 50
static inline void write_cr0(unsigned long x)
{
51
	PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
52 53 54 55
}

static inline unsigned long read_cr2(void)
{
56
	return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
57 58 59 60
}

static inline void write_cr2(unsigned long x)
{
61
	PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
62 63 64 65
}

static inline unsigned long read_cr3(void)
{
66
	return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
67
}
68

69 70
static inline void write_cr3(unsigned long x)
{
71
	PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
72
}
73

74
static inline unsigned long __read_cr4(void)
75
{
76
	return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
77
}
78

79
static inline void __write_cr4(unsigned long x)
80
{
81
	PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
82
}
83

84
#ifdef CONFIG_X86_64
85 86 87 88 89 90 91 92 93
static inline unsigned long read_cr8(void)
{
	return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
}

static inline void write_cr8(unsigned long x)
{
	PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
}
94
#endif
95

D
David Howells 已提交
96
static inline void arch_safe_halt(void)
97
{
98
	PVOP_VCALL0(pv_irq_ops.safe_halt);
99 100 101 102
}

static inline void halt(void)
{
103
	PVOP_VCALL0(pv_irq_ops.halt);
104 105 106 107
}

static inline void wbinvd(void)
{
108
	PVOP_VCALL0(pv_cpu_ops.wbinvd);
109 110
}

111
#define get_kernel_rpl()  (pv_info.kernel_rpl)
112

113 114 115 116 117 118 119 120 121 122 123
static inline u64 paravirt_read_msr(unsigned msr)
{
	return PVOP_CALL1(u64, pv_cpu_ops.read_msr, msr);
}

static inline void paravirt_write_msr(unsigned msr,
				      unsigned low, unsigned high)
{
	return PVOP_VCALL3(pv_cpu_ops.write_msr, msr, low, high);
}

124
static inline u64 paravirt_read_msr_safe(unsigned msr, int *err)
125
{
126
	return PVOP_CALL2(u64, pv_cpu_ops.read_msr_safe, msr, err);
127
}
128

129 130
static inline int paravirt_write_msr_safe(unsigned msr,
					  unsigned low, unsigned high)
131
{
132
	return PVOP_CALL3(int, pv_cpu_ops.write_msr_safe, msr, low, high);
133 134
}

135 136
#define rdmsr(msr, val1, val2)			\
do {						\
137
	u64 _l = paravirt_read_msr(msr);	\
138 139
	val1 = (u32)_l;				\
	val2 = _l >> 32;			\
140
} while (0)
141

142 143
#define wrmsr(msr, val1, val2)			\
do {						\
144
	paravirt_write_msr(msr, val1, val2);	\
145
} while (0)
146

147 148
#define rdmsrl(msr, val)			\
do {						\
149
	val = paravirt_read_msr(msr);		\
150
} while (0)
151

152 153 154 155 156
static inline void wrmsrl(unsigned msr, u64 val)
{
	wrmsr(msr, (u32)val, (u32)(val>>32));
}

157
#define wrmsr_safe(msr, a, b)	paravirt_write_msr_safe(msr, a, b)
158 159

/* rdmsr with exception handling */
160 161 162 163 164 165 166
#define rdmsr_safe(msr, a, b)				\
({							\
	int _err;					\
	u64 _l = paravirt_read_msr_safe(msr, &_err);	\
	(*a) = (u32)_l;					\
	(*b) = _l >> 32;				\
	_err;						\
167
})
168

A
Andi Kleen 已提交
169 170 171 172
static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
{
	int err;

173
	*p = paravirt_read_msr_safe(msr, &err);
A
Andi Kleen 已提交
174 175
	return err;
}
176

177 178
static inline unsigned long long paravirt_sched_clock(void)
{
179
	return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
180
}
181

182 183 184
struct static_key;
extern struct static_key paravirt_steal_enabled;
extern struct static_key paravirt_steal_rq_enabled;
185 186 187 188 189 190

static inline u64 paravirt_steal_clock(int cpu)
{
	return PVOP_CALL1(u64, pv_time_ops.steal_clock, cpu);
}

191 192
static inline unsigned long long paravirt_read_pmc(int counter)
{
193
	return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
194
}
195

196 197
#define rdpmc(counter, low, high)		\
do {						\
198 199 200
	u64 _l = paravirt_read_pmc(counter);	\
	low = (u32)_l;				\
	high = _l >> 32;			\
201
} while (0)
202

A
Andi Kleen 已提交
203 204
#define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))

205 206 207 208 209 210 211 212 213 214
static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
{
	PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
}

static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
{
	PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
}

215 216
static inline void load_TR_desc(void)
{
217
	PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
218
}
219
static inline void load_gdt(const struct desc_ptr *dtr)
220
{
221
	PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
222
}
223
static inline void load_idt(const struct desc_ptr *dtr)
224
{
225
	PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
226 227 228
}
static inline void set_ldt(const void *addr, unsigned entries)
{
229
	PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
230
}
231
static inline void store_idt(struct desc_ptr *dtr)
232
{
233
	PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
234 235 236
}
static inline unsigned long paravirt_store_tr(void)
{
237
	return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
238 239 240 241
}
#define store_tr(tr)	((tr) = paravirt_store_tr())
static inline void load_TLS(struct thread_struct *t, unsigned cpu)
{
242
	PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
243
}
244

245 246 247 248 249 250 251
#ifdef CONFIG_X86_64
static inline void load_gs_index(unsigned int gs)
{
	PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
}
#endif

252 253
static inline void write_ldt_entry(struct desc_struct *dt, int entry,
				   const void *desc)
254
{
255
	PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
256
}
257 258 259

static inline void write_gdt_entry(struct desc_struct *dt, int entry,
				   void *desc, int type)
260
{
261
	PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
262
}
263

264
static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
265
{
266
	PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
267 268 269
}
static inline void set_iopl_mask(unsigned mask)
{
270
	PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
271
}
272

273
/* The paravirtualized I/O functions */
274 275
static inline void slow_down_io(void)
{
276
	pv_cpu_ops.io_delay();
277
#ifdef REALLY_SLOW_IO
278 279 280
	pv_cpu_ops.io_delay();
	pv_cpu_ops.io_delay();
	pv_cpu_ops.io_delay();
281 282 283
#endif
}

284 285 286
static inline void paravirt_activate_mm(struct mm_struct *prev,
					struct mm_struct *next)
{
287
	PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
288 289
}

290 291
static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
					  struct mm_struct *mm)
292
{
293
	PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
294 295
}

296
static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
297
{
298
	PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
299 300
}

301 302
static inline void __flush_tlb(void)
{
303
	PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
304 305 306
}
static inline void __flush_tlb_global(void)
{
307
	PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
308 309 310
}
static inline void __flush_tlb_single(unsigned long addr)
{
311
	PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
312
}
313

314 315
static inline void flush_tlb_others(const struct cpumask *cpumask,
				    struct mm_struct *mm,
316 317
				    unsigned long start,
				    unsigned long end)
318
{
319
	PVOP_VCALL4(pv_mmu_ops.flush_tlb_others, cpumask, mm, start, end);
320 321
}

322 323 324 325 326 327 328 329 330 331
static inline int paravirt_pgd_alloc(struct mm_struct *mm)
{
	return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
}

static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
	PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
}

332
static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
333
{
334
	PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
335
}
336
static inline void paravirt_release_pte(unsigned long pfn)
337
{
338
	PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
339
}
340

341
static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
342
{
343
	PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
344
}
345

346
static inline void paravirt_release_pmd(unsigned long pfn)
347
{
348
	PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
349 350
}

351
static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
352 353 354
{
	PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
}
355
static inline void paravirt_release_pud(unsigned long pfn)
356 357 358 359
{
	PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
}

360 361 362 363 364 365 366 367 368 369
static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn)
{
	PVOP_VCALL2(pv_mmu_ops.alloc_p4d, mm, pfn);
}

static inline void paravirt_release_p4d(unsigned long pfn)
{
	PVOP_VCALL1(pv_mmu_ops.release_p4d, pfn);
}

370 371
static inline void pte_update(struct mm_struct *mm, unsigned long addr,
			      pte_t *ptep)
372
{
373
	PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
374
}
A
Andrea Arcangeli 已提交
375

376
static inline pte_t __pte(pteval_t val)
377
{
378 379 380
	pteval_t ret;

	if (sizeof(pteval_t) > sizeof(long))
381 382 383
		ret = PVOP_CALLEE2(pteval_t,
				   pv_mmu_ops.make_pte,
				   val, (u64)val >> 32);
384
	else
385 386 387
		ret = PVOP_CALLEE1(pteval_t,
				   pv_mmu_ops.make_pte,
				   val);
388

389
	return (pte_t) { .pte = ret };
390 391
}

392 393 394 395 396
static inline pteval_t pte_val(pte_t pte)
{
	pteval_t ret;

	if (sizeof(pteval_t) > sizeof(long))
397 398
		ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
				   pte.pte, (u64)pte.pte >> 32);
399
	else
400 401
		ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
				   pte.pte);
402 403 404 405

	return ret;
}

406
static inline pgd_t __pgd(pgdval_t val)
407
{
408 409 410
	pgdval_t ret;

	if (sizeof(pgdval_t) > sizeof(long))
411 412
		ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
				   val, (u64)val >> 32);
413
	else
414 415
		ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
				   val);
416 417 418 419 420 421 422 423 424

	return (pgd_t) { ret };
}

static inline pgdval_t pgd_val(pgd_t pgd)
{
	pgdval_t ret;

	if (sizeof(pgdval_t) > sizeof(long))
425 426
		ret =  PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
				    pgd.pgd, (u64)pgd.pgd >> 32);
427
	else
428 429
		ret =  PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
				    pgd.pgd);
430 431

	return ret;
432 433
}

434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456
#define  __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
					   pte_t *ptep)
{
	pteval_t ret;

	ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
			 mm, addr, ptep);

	return (pte_t) { .pte = ret };
}

static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
					   pte_t *ptep, pte_t pte)
{
	if (sizeof(pteval_t) > sizeof(long))
		/* 5 arg words */
		pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
	else
		PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
			    mm, addr, ptep, pte.pte);
}

457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476
static inline void set_pte(pte_t *ptep, pte_t pte)
{
	if (sizeof(pteval_t) > sizeof(long))
		PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
			    pte.pte, (u64)pte.pte >> 32);
	else
		PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
			    pte.pte);
}

static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
			      pte_t *ptep, pte_t pte)
{
	if (sizeof(pteval_t) > sizeof(long))
		/* 5 arg words */
		pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
	else
		PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
}

A
Andrea Arcangeli 已提交
477 478 479 480 481 482 483
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
			      pmd_t *pmdp, pmd_t pmd)
{
	if (sizeof(pmdval_t) > sizeof(long))
		/* 5 arg words */
		pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd);
	else
484 485
		PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp,
			    native_pmd_val(pmd));
A
Andrea Arcangeli 已提交
486 487
}

488 489 490 491 492 493 494 495 496 497 498
static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
			      pud_t *pudp, pud_t pud)
{
	if (sizeof(pudval_t) > sizeof(long))
		/* 5 arg words */
		pv_mmu_ops.set_pud_at(mm, addr, pudp, pud);
	else
		PVOP_VCALL4(pv_mmu_ops.set_pud_at, mm, addr, pudp,
			    native_pud_val(pud));
}

499 500 501 502 503 504 505 506 507 508
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
{
	pmdval_t val = native_pmd_val(pmd);

	if (sizeof(pmdval_t) > sizeof(long))
		PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
	else
		PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
}

509
#if CONFIG_PGTABLE_LEVELS >= 3
510 511 512 513 514
static inline pmd_t __pmd(pmdval_t val)
{
	pmdval_t ret;

	if (sizeof(pmdval_t) > sizeof(long))
515 516
		ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
				   val, (u64)val >> 32);
517
	else
518 519
		ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
				   val);
520 521 522 523 524 525 526 527 528

	return (pmd_t) { ret };
}

static inline pmdval_t pmd_val(pmd_t pmd)
{
	pmdval_t ret;

	if (sizeof(pmdval_t) > sizeof(long))
529 530
		ret =  PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
				    pmd.pmd, (u64)pmd.pmd >> 32);
531
	else
532 533
		ret =  PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
				    pmd.pmd);
534 535 536 537 538 539 540 541 542 543 544 545 546 547 548

	return ret;
}

static inline void set_pud(pud_t *pudp, pud_t pud)
{
	pudval_t val = native_pud_val(pud);

	if (sizeof(pudval_t) > sizeof(long))
		PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
			    val, (u64)val >> 32);
	else
		PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
			    val);
}
549
#if CONFIG_PGTABLE_LEVELS >= 4
550 551 552 553 554
static inline pud_t __pud(pudval_t val)
{
	pudval_t ret;

	if (sizeof(pudval_t) > sizeof(long))
555 556
		ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
				   val, (u64)val >> 32);
557
	else
558 559
		ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
				   val);
560 561 562 563 564 565 566 567 568

	return (pud_t) { ret };
}

static inline pudval_t pud_val(pud_t pud)
{
	pudval_t ret;

	if (sizeof(pudval_t) > sizeof(long))
569 570
		ret =  PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
				    pud.pud, (u64)pud.pud >> 32);
571
	else
572 573
		ret =  PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
				    pud.pud);
574 575 576 577

	return ret;
}

578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594
static inline void pud_clear(pud_t *pudp)
{
	set_pud(pudp, __pud(0));
}

static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
{
	p4dval_t val = native_p4d_val(p4d);

	if (sizeof(p4dval_t) > sizeof(long))
		PVOP_VCALL3(pv_mmu_ops.set_p4d, p4dp,
			    val, (u64)val >> 32);
	else
		PVOP_VCALL2(pv_mmu_ops.set_p4d, p4dp,
			    val);
}

595 596 597
#if CONFIG_PGTABLE_LEVELS >= 5

static inline p4d_t __p4d(p4dval_t val)
598
{
599
	p4dval_t ret = PVOP_CALLEE1(p4dval_t, pv_mmu_ops.make_p4d, val);
600

601 602
	return (p4d_t) { ret };
}
603

604 605 606 607
static inline p4dval_t p4d_val(p4d_t p4d)
{
	return PVOP_CALLEE1(p4dval_t, pv_mmu_ops.p4d_val, p4d.p4d);
}
608

609 610 611 612
static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
{
	pgdval_t val = native_pgd_val(pgd);

613
	PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp, val);
614 615 616 617 618 619 620
}

static inline void pgd_clear(pgd_t *pgdp)
{
	set_pgd(pgdp, __pgd(0));
}

621
#endif  /* CONFIG_PGTABLE_LEVELS == 5 */
622

623 624 625 626 627
static inline void p4d_clear(p4d_t *p4dp)
{
	set_p4d(p4dp, __p4d(0));
}

628
#endif	/* CONFIG_PGTABLE_LEVELS == 4 */
629

630
#endif	/* CONFIG_PGTABLE_LEVELS >= 3 */
631

632 633 634 635 636 637 638 639 640 641 642 643 644 645
#ifdef CONFIG_X86_PAE
/* Special-case pte-setting operations for PAE, which can't update a
   64-bit pte atomically */
static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
{
	PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
		    pte.pte, pte.pte >> 32);
}

static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
			     pte_t *ptep)
{
	PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
}
646 647 648 649 650

static inline void pmd_clear(pmd_t *pmdp)
{
	PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
}
651 652 653 654 655 656 657 658 659 660 661
#else  /* !CONFIG_X86_PAE */
static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
{
	set_pte(ptep, pte);
}

static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
			     pte_t *ptep)
{
	set_pte_at(mm, addr, ptep, __pte(0));
}
662 663 664 665 666

static inline void pmd_clear(pmd_t *pmdp)
{
	set_pmd(pmdp, __pmd(0));
}
667 668
#endif	/* CONFIG_X86_PAE */

669
#define  __HAVE_ARCH_START_CONTEXT_SWITCH
670
static inline void arch_start_context_switch(struct task_struct *prev)
671
{
672
	PVOP_VCALL1(pv_cpu_ops.start_context_switch, prev);
673 674
}

675
static inline void arch_end_context_switch(struct task_struct *next)
676
{
677
	PVOP_VCALL1(pv_cpu_ops.end_context_switch, next);
678 679
}

680
#define  __HAVE_ARCH_ENTER_LAZY_MMU_MODE
681 682
static inline void arch_enter_lazy_mmu_mode(void)
{
683
	PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
684 685 686 687
}

static inline void arch_leave_lazy_mmu_mode(void)
{
688
	PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
689 690
}

691 692 693 694
static inline void arch_flush_lazy_mmu_mode(void)
{
	PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush);
}
695

696
static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
697
				phys_addr_t phys, pgprot_t flags)
698 699 700 701
{
	pv_mmu_ops.set_fixmap(idx, phys, flags);
}

702
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
703

704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724
static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
							u32 val)
{
	PVOP_VCALL2(pv_lock_ops.queued_spin_lock_slowpath, lock, val);
}

static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
{
	PVOP_VCALLEE1(pv_lock_ops.queued_spin_unlock, lock);
}

static __always_inline void pv_wait(u8 *ptr, u8 val)
{
	PVOP_VCALL2(pv_lock_ops.wait, ptr, val);
}

static __always_inline void pv_kick(int cpu)
{
	PVOP_VCALL1(pv_lock_ops.kick, cpu);
}

725
static __always_inline bool pv_vcpu_is_preempted(long cpu)
726 727 728 729
{
	return PVOP_CALLEE1(bool, pv_lock_ops.vcpu_is_preempted, cpu);
}

730
#endif /* SMP && PARAVIRT_SPINLOCKS */
731

732
#ifdef CONFIG_X86_32
733 734 735 736
#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
#define PV_RESTORE_REGS "popl %edx; popl %ecx;"

/* save and restore all caller-save registers, except return value */
737 738
#define PV_SAVE_ALL_CALLER_REGS		"pushl %ecx;"
#define PV_RESTORE_ALL_CALLER_REGS	"popl  %ecx;"
739

740 741 742 743
#define PV_FLAGS_ARG "0"
#define PV_EXTRA_CLOBBERS
#define PV_VEXTRA_CLOBBERS
#else
744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763
/* save and restore all caller-save registers, except return value */
#define PV_SAVE_ALL_CALLER_REGS						\
	"push %rcx;"							\
	"push %rdx;"							\
	"push %rsi;"							\
	"push %rdi;"							\
	"push %r8;"							\
	"push %r9;"							\
	"push %r10;"							\
	"push %r11;"
#define PV_RESTORE_ALL_CALLER_REGS					\
	"pop %r11;"							\
	"pop %r10;"							\
	"pop %r9;"							\
	"pop %r8;"							\
	"pop %rdi;"							\
	"pop %rsi;"							\
	"pop %rdx;"							\
	"pop %rcx;"

764 765 766 767
/* We save some registers, but all of them, that's too much. We clobber all
 * caller saved registers but the argument parameter */
#define PV_SAVE_REGS "pushq %%rdi;"
#define PV_RESTORE_REGS "popq %%rdi;"
768 769
#define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
#define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
770 771 772
#define PV_FLAGS_ARG "D"
#endif

773 774 775 776 777 778 779 780 781 782 783 784
/*
 * Generate a thunk around a function which saves all caller-save
 * registers except for the return value.  This allows C functions to
 * be called from assembler code where fewer than normal registers are
 * available.  It may also help code generation around calls from C
 * code if the common case doesn't use many registers.
 *
 * When a callee is wrapped in a thunk, the caller can assume that all
 * arg regs and all scratch registers are preserved across the
 * call. The return value in rax/eax will not be saved, even for void
 * functions.
 */
785
#define PV_THUNK_NAME(func) "__raw_callee_save_" #func
786 787 788 789
#define PV_CALLEE_SAVE_REGS_THUNK(func)					\
	extern typeof(func) __raw_callee_save_##func;			\
									\
	asm(".pushsection .text;"					\
790 791 792 793
	    ".globl " PV_THUNK_NAME(func) ";"				\
	    ".type " PV_THUNK_NAME(func) ", @function;"			\
	    PV_THUNK_NAME(func) ":"					\
	    FRAME_BEGIN							\
794 795 796
	    PV_SAVE_ALL_CALLER_REGS					\
	    "call " #func ";"						\
	    PV_RESTORE_ALL_CALLER_REGS					\
797
	    FRAME_END							\
798 799 800 801 802 803 804 805 806 807 808
	    "ret;"							\
	    ".popsection")

/* Get a reference to a callee-save function */
#define PV_CALLEE_SAVE(func)						\
	((struct paravirt_callee_save) { __raw_callee_save_##func })

/* Promise that "func" already uses the right calling convention */
#define __PV_IS_CALLEE_SAVE(func)			\
	((struct paravirt_callee_save) { func })

809
static inline notrace unsigned long arch_local_save_flags(void)
810
{
811
	return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
812 813
}

814
static inline notrace void arch_local_irq_restore(unsigned long f)
815
{
816
	PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
817 818
}

819
static inline notrace void arch_local_irq_disable(void)
820
{
821
	PVOP_VCALLEE0(pv_irq_ops.irq_disable);
822 823
}

824
static inline notrace void arch_local_irq_enable(void)
825
{
826
	PVOP_VCALLEE0(pv_irq_ops.irq_enable);
827 828
}

829
static inline notrace unsigned long arch_local_irq_save(void)
830 831 832
{
	unsigned long f;

D
David Howells 已提交
833 834
	f = arch_local_save_flags();
	arch_local_irq_disable();
835 836 837
	return f;
}

838

839
/* Make sure as little as possible of this mess escapes. */
840
#undef PARAVIRT_CALL
841 842
#undef __PVOP_CALL
#undef __PVOP_VCALL
843 844 845 846 847 848 849 850 851 852
#undef PVOP_VCALL0
#undef PVOP_CALL0
#undef PVOP_VCALL1
#undef PVOP_CALL1
#undef PVOP_VCALL2
#undef PVOP_CALL2
#undef PVOP_VCALL3
#undef PVOP_CALL3
#undef PVOP_VCALL4
#undef PVOP_CALL4
853

854 855
extern void default_banner(void);

856 857
#else  /* __ASSEMBLY__ */

858
#define _PVSITE(ptype, clobbers, ops, word, algn)	\
859 860 861 862
771:;						\
	ops;					\
772:;						\
	.pushsection .parainstructions,"a";	\
863 864
	 .align	algn;				\
	 word 771b;				\
865 866 867 868 869
	 .byte ptype;				\
	 .byte 772b-771b;			\
	 .short clobbers;			\
	.popsection

870

871
#define COND_PUSH(set, mask, reg)			\
872
	.if ((~(set)) & mask); push %reg; .endif
873
#define COND_POP(set, mask, reg)			\
874
	.if ((~(set)) & mask); pop %reg; .endif
875

876
#ifdef CONFIG_X86_64
877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898

#define PV_SAVE_REGS(set)			\
	COND_PUSH(set, CLBR_RAX, rax);		\
	COND_PUSH(set, CLBR_RCX, rcx);		\
	COND_PUSH(set, CLBR_RDX, rdx);		\
	COND_PUSH(set, CLBR_RSI, rsi);		\
	COND_PUSH(set, CLBR_RDI, rdi);		\
	COND_PUSH(set, CLBR_R8, r8);		\
	COND_PUSH(set, CLBR_R9, r9);		\
	COND_PUSH(set, CLBR_R10, r10);		\
	COND_PUSH(set, CLBR_R11, r11)
#define PV_RESTORE_REGS(set)			\
	COND_POP(set, CLBR_R11, r11);		\
	COND_POP(set, CLBR_R10, r10);		\
	COND_POP(set, CLBR_R9, r9);		\
	COND_POP(set, CLBR_R8, r8);		\
	COND_POP(set, CLBR_RDI, rdi);		\
	COND_POP(set, CLBR_RSI, rsi);		\
	COND_POP(set, CLBR_RDX, rdx);		\
	COND_POP(set, CLBR_RCX, rcx);		\
	COND_POP(set, CLBR_RAX, rax)

899
#define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 8)
900
#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
901
#define PARA_INDIRECT(addr)	*addr(%rip)
902
#else
903 904 905 906 907 908 909 910 911 912 913
#define PV_SAVE_REGS(set)			\
	COND_PUSH(set, CLBR_EAX, eax);		\
	COND_PUSH(set, CLBR_EDI, edi);		\
	COND_PUSH(set, CLBR_ECX, ecx);		\
	COND_PUSH(set, CLBR_EDX, edx)
#define PV_RESTORE_REGS(set)			\
	COND_POP(set, CLBR_EDX, edx);		\
	COND_POP(set, CLBR_ECX, ecx);		\
	COND_POP(set, CLBR_EDI, edi);		\
	COND_POP(set, CLBR_EAX, eax)

914
#define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 4)
915
#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
916
#define PARA_INDIRECT(addr)	*%cs:addr
917 918
#endif

919 920
#define INTERRUPT_RETURN						\
	PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE,	\
921
		  jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
922 923

#define DISABLE_INTERRUPTS(clobbers)					\
924
	PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
925
		  PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);		\
926
		  call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable);	\
927
		  PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
928 929

#define ENABLE_INTERRUPTS(clobbers)					\
930
	PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers,	\
931
		  PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);		\
932
		  call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable);	\
933
		  PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
934

935
#ifdef CONFIG_X86_32
936 937 938
#define GET_CR0_INTO_EAX				\
	push %ecx; push %edx;				\
	call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);	\
939
	pop %edx; pop %ecx
940
#else	/* !CONFIG_X86_32 */
941 942 943 944 945 946 947 948 949 950

/*
 * If swapgs is used while the userspace stack is still current,
 * there's no way to call a pvop.  The PV replacement *must* be
 * inlined, or the swapgs instruction must be trapped and emulated.
 */
#define SWAPGS_UNSAFE_STACK						\
	PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,	\
		  swapgs)

951 952 953 954 955 956
/*
 * Note: swapgs is very special, and in practise is either going to be
 * implemented with a single "swapgs" instruction or something very
 * special.  Either way, we don't need to save any registers for
 * it.
 */
957 958
#define SWAPGS								\
	PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,	\
959
		  call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs)		\
960 961
		 )

962 963
#define GET_CR2_INTO_RAX				\
	call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2)
964

965 966 967 968 969
#define PARAVIRT_ADJUST_EXCEPTION_FRAME					\
	PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
		  CLBR_NONE,						\
		  call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))

970 971
#define USERGS_SYSRET64							\
	PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64),	\
972
		  CLBR_NONE,						\
973 974
		  jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
#endif	/* CONFIG_X86_32 */
975

976
#endif /* __ASSEMBLY__ */
977 978
#else  /* CONFIG_PARAVIRT */
# define default_banner x86_init_noop
979 980 981 982 983 984 985 986 987 988
#ifndef __ASSEMBLY__
static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
					  struct mm_struct *mm)
{
}

static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
{
}
#endif /* __ASSEMBLY__ */
989
#endif /* !CONFIG_PARAVIRT */
H
H. Peter Anvin 已提交
990
#endif /* _ASM_X86_PARAVIRT_H */