paravirt.h 23.1 KB
Newer Older
H
H. Peter Anvin 已提交
1 2
#ifndef _ASM_X86_PARAVIRT_H
#define _ASM_X86_PARAVIRT_H
3 4
/* Various instructions on x86 need to be replaced for
 * para-virtualization: those hooks are defined here. */
5 6

#ifdef CONFIG_PARAVIRT
7
#include <asm/pgtable_types.h>
8
#include <asm/asm.h>
9

10
#include <asm/paravirt_types.h>
11

12
#ifndef __ASSEMBLY__
13
#include <linux/bug.h>
14
#include <linux/types.h>
15
#include <linux/cpumask.h>
16
#include <asm/frame.h>
17

18
static inline void load_sp0(struct tss_struct *tss,
19 20
			     struct thread_struct *thread)
{
21
	PVOP_VCALL2(pv_cpu_ops.load_sp0, tss, thread);
22 23 24 25 26 27
}

/* The paravirtualized CPUID instruction. */
static inline void __cpuid(unsigned int *eax, unsigned int *ebx,
			   unsigned int *ecx, unsigned int *edx)
{
28
	PVOP_VCALL4(pv_cpu_ops.cpuid, eax, ebx, ecx, edx);
29 30 31 32 33
}

/*
 * These special macros can be used to get or set a debugging register
 */
34 35
static inline unsigned long paravirt_get_debugreg(int reg)
{
36
	return PVOP_CALL1(unsigned long, pv_cpu_ops.get_debugreg, reg);
37 38 39 40
}
#define get_debugreg(var, reg) var = paravirt_get_debugreg(reg)
static inline void set_debugreg(unsigned long val, int reg)
{
41
	PVOP_VCALL2(pv_cpu_ops.set_debugreg, reg, val);
42
}
43

44 45
static inline void clts(void)
{
46
	PVOP_VCALL0(pv_cpu_ops.clts);
47
}
48

49 50
static inline unsigned long read_cr0(void)
{
51
	return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr0);
52
}
53

54 55
static inline void write_cr0(unsigned long x)
{
56
	PVOP_VCALL1(pv_cpu_ops.write_cr0, x);
57 58 59 60
}

static inline unsigned long read_cr2(void)
{
61
	return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr2);
62 63 64 65
}

static inline void write_cr2(unsigned long x)
{
66
	PVOP_VCALL1(pv_mmu_ops.write_cr2, x);
67 68 69 70
}

static inline unsigned long read_cr3(void)
{
71
	return PVOP_CALL0(unsigned long, pv_mmu_ops.read_cr3);
72
}
73

74 75
static inline void write_cr3(unsigned long x)
{
76
	PVOP_VCALL1(pv_mmu_ops.write_cr3, x);
77
}
78

79
static inline unsigned long __read_cr4(void)
80
{
81
	return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4);
82
}
83

84
static inline void __write_cr4(unsigned long x)
85
{
86
	PVOP_VCALL1(pv_cpu_ops.write_cr4, x);
87
}
88

89
#ifdef CONFIG_X86_64
90 91 92 93 94 95 96 97 98
static inline unsigned long read_cr8(void)
{
	return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr8);
}

static inline void write_cr8(unsigned long x)
{
	PVOP_VCALL1(pv_cpu_ops.write_cr8, x);
}
99
#endif
100

D
David Howells 已提交
101
static inline void arch_safe_halt(void)
102
{
103
	PVOP_VCALL0(pv_irq_ops.safe_halt);
104 105 106 107
}

static inline void halt(void)
{
108
	PVOP_VCALL0(pv_irq_ops.halt);
109 110 111 112
}

static inline void wbinvd(void)
{
113
	PVOP_VCALL0(pv_cpu_ops.wbinvd);
114 115
}

116
#define get_kernel_rpl()  (pv_info.kernel_rpl)
117

118 119 120 121 122 123 124 125 126 127 128
static inline u64 paravirt_read_msr(unsigned msr)
{
	return PVOP_CALL1(u64, pv_cpu_ops.read_msr, msr);
}

static inline void paravirt_write_msr(unsigned msr,
				      unsigned low, unsigned high)
{
	return PVOP_VCALL3(pv_cpu_ops.write_msr, msr, low, high);
}

129
static inline u64 paravirt_read_msr_safe(unsigned msr, int *err)
130
{
131
	return PVOP_CALL2(u64, pv_cpu_ops.read_msr_safe, msr, err);
132
}
133

134 135
static inline int paravirt_write_msr_safe(unsigned msr,
					  unsigned low, unsigned high)
136
{
137
	return PVOP_CALL3(int, pv_cpu_ops.write_msr_safe, msr, low, high);
138 139
}

140 141
#define rdmsr(msr, val1, val2)			\
do {						\
142
	u64 _l = paravirt_read_msr(msr);	\
143 144
	val1 = (u32)_l;				\
	val2 = _l >> 32;			\
145
} while (0)
146

147 148
#define wrmsr(msr, val1, val2)			\
do {						\
149
	paravirt_write_msr(msr, val1, val2);	\
150
} while (0)
151

152 153
#define rdmsrl(msr, val)			\
do {						\
154
	val = paravirt_read_msr(msr);		\
155
} while (0)
156

157 158 159 160 161
static inline void wrmsrl(unsigned msr, u64 val)
{
	wrmsr(msr, (u32)val, (u32)(val>>32));
}

162
#define wrmsr_safe(msr, a, b)	paravirt_write_msr_safe(msr, a, b)
163 164

/* rdmsr with exception handling */
165 166 167 168 169 170 171
#define rdmsr_safe(msr, a, b)				\
({							\
	int _err;					\
	u64 _l = paravirt_read_msr_safe(msr, &_err);	\
	(*a) = (u32)_l;					\
	(*b) = _l >> 32;				\
	_err;						\
172
})
173

A
Andi Kleen 已提交
174 175 176 177
static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
{
	int err;

178
	*p = paravirt_read_msr_safe(msr, &err);
A
Andi Kleen 已提交
179 180
	return err;
}
181

182 183
static inline unsigned long long paravirt_sched_clock(void)
{
184
	return PVOP_CALL0(unsigned long long, pv_time_ops.sched_clock);
185
}
186

187 188 189
struct static_key;
extern struct static_key paravirt_steal_enabled;
extern struct static_key paravirt_steal_rq_enabled;
190 191 192 193 194 195

static inline u64 paravirt_steal_clock(int cpu)
{
	return PVOP_CALL1(u64, pv_time_ops.steal_clock, cpu);
}

196 197
static inline unsigned long long paravirt_read_pmc(int counter)
{
198
	return PVOP_CALL1(u64, pv_cpu_ops.read_pmc, counter);
199
}
200

201 202
#define rdpmc(counter, low, high)		\
do {						\
203 204 205
	u64 _l = paravirt_read_pmc(counter);	\
	low = (u32)_l;				\
	high = _l >> 32;			\
206
} while (0)
207

A
Andi Kleen 已提交
208 209
#define rdpmcl(counter, val) ((val) = paravirt_read_pmc(counter))

210 211 212 213 214 215 216 217 218 219
static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
{
	PVOP_VCALL2(pv_cpu_ops.alloc_ldt, ldt, entries);
}

static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
{
	PVOP_VCALL2(pv_cpu_ops.free_ldt, ldt, entries);
}

220 221
static inline void load_TR_desc(void)
{
222
	PVOP_VCALL0(pv_cpu_ops.load_tr_desc);
223
}
224
static inline void load_gdt(const struct desc_ptr *dtr)
225
{
226
	PVOP_VCALL1(pv_cpu_ops.load_gdt, dtr);
227
}
228
static inline void load_idt(const struct desc_ptr *dtr)
229
{
230
	PVOP_VCALL1(pv_cpu_ops.load_idt, dtr);
231 232 233
}
static inline void set_ldt(const void *addr, unsigned entries)
{
234
	PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries);
235
}
236
static inline void store_idt(struct desc_ptr *dtr)
237
{
238
	PVOP_VCALL1(pv_cpu_ops.store_idt, dtr);
239 240 241
}
static inline unsigned long paravirt_store_tr(void)
{
242
	return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr);
243 244 245 246
}
#define store_tr(tr)	((tr) = paravirt_store_tr())
static inline void load_TLS(struct thread_struct *t, unsigned cpu)
{
247
	PVOP_VCALL2(pv_cpu_ops.load_tls, t, cpu);
248
}
249

250 251 252 253 254 255 256
#ifdef CONFIG_X86_64
static inline void load_gs_index(unsigned int gs)
{
	PVOP_VCALL1(pv_cpu_ops.load_gs_index, gs);
}
#endif

257 258
static inline void write_ldt_entry(struct desc_struct *dt, int entry,
				   const void *desc)
259
{
260
	PVOP_VCALL3(pv_cpu_ops.write_ldt_entry, dt, entry, desc);
261
}
262 263 264

static inline void write_gdt_entry(struct desc_struct *dt, int entry,
				   void *desc, int type)
265
{
266
	PVOP_VCALL4(pv_cpu_ops.write_gdt_entry, dt, entry, desc, type);
267
}
268

269
static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
270
{
271
	PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g);
272 273 274
}
static inline void set_iopl_mask(unsigned mask)
{
275
	PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask);
276
}
277

278
/* The paravirtualized I/O functions */
279 280
static inline void slow_down_io(void)
{
281
	pv_cpu_ops.io_delay();
282
#ifdef REALLY_SLOW_IO
283 284 285
	pv_cpu_ops.io_delay();
	pv_cpu_ops.io_delay();
	pv_cpu_ops.io_delay();
286 287 288
#endif
}

289 290 291
static inline void paravirt_activate_mm(struct mm_struct *prev,
					struct mm_struct *next)
{
292
	PVOP_VCALL2(pv_mmu_ops.activate_mm, prev, next);
293 294
}

295 296
static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
					  struct mm_struct *mm)
297
{
298
	PVOP_VCALL2(pv_mmu_ops.dup_mmap, oldmm, mm);
299 300
}

301
static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
302
{
303
	PVOP_VCALL1(pv_mmu_ops.exit_mmap, mm);
304 305
}

306 307
static inline void __flush_tlb(void)
{
308
	PVOP_VCALL0(pv_mmu_ops.flush_tlb_user);
309 310 311
}
static inline void __flush_tlb_global(void)
{
312
	PVOP_VCALL0(pv_mmu_ops.flush_tlb_kernel);
313 314 315
}
static inline void __flush_tlb_single(unsigned long addr)
{
316
	PVOP_VCALL1(pv_mmu_ops.flush_tlb_single, addr);
317
}
318

319 320
static inline void flush_tlb_others(const struct cpumask *cpumask,
				    struct mm_struct *mm,
321 322
				    unsigned long start,
				    unsigned long end)
323
{
324
	PVOP_VCALL4(pv_mmu_ops.flush_tlb_others, cpumask, mm, start, end);
325 326
}

327 328 329 330 331 332 333 334 335 336
static inline int paravirt_pgd_alloc(struct mm_struct *mm)
{
	return PVOP_CALL1(int, pv_mmu_ops.pgd_alloc, mm);
}

static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
	PVOP_VCALL2(pv_mmu_ops.pgd_free, mm, pgd);
}

337
static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn)
338
{
339
	PVOP_VCALL2(pv_mmu_ops.alloc_pte, mm, pfn);
340
}
341
static inline void paravirt_release_pte(unsigned long pfn)
342
{
343
	PVOP_VCALL1(pv_mmu_ops.release_pte, pfn);
344
}
345

346
static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn)
347
{
348
	PVOP_VCALL2(pv_mmu_ops.alloc_pmd, mm, pfn);
349
}
350

351
static inline void paravirt_release_pmd(unsigned long pfn)
352
{
353
	PVOP_VCALL1(pv_mmu_ops.release_pmd, pfn);
354 355
}

356
static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn)
357 358 359
{
	PVOP_VCALL2(pv_mmu_ops.alloc_pud, mm, pfn);
}
360
static inline void paravirt_release_pud(unsigned long pfn)
361 362 363 364
{
	PVOP_VCALL1(pv_mmu_ops.release_pud, pfn);
}

365 366
static inline void pte_update(struct mm_struct *mm, unsigned long addr,
			      pte_t *ptep)
367
{
368
	PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep);
369
}
A
Andrea Arcangeli 已提交
370

371
static inline pte_t __pte(pteval_t val)
372
{
373 374 375
	pteval_t ret;

	if (sizeof(pteval_t) > sizeof(long))
376 377 378
		ret = PVOP_CALLEE2(pteval_t,
				   pv_mmu_ops.make_pte,
				   val, (u64)val >> 32);
379
	else
380 381 382
		ret = PVOP_CALLEE1(pteval_t,
				   pv_mmu_ops.make_pte,
				   val);
383

384
	return (pte_t) { .pte = ret };
385 386
}

387 388 389 390 391
static inline pteval_t pte_val(pte_t pte)
{
	pteval_t ret;

	if (sizeof(pteval_t) > sizeof(long))
392 393
		ret = PVOP_CALLEE2(pteval_t, pv_mmu_ops.pte_val,
				   pte.pte, (u64)pte.pte >> 32);
394
	else
395 396
		ret = PVOP_CALLEE1(pteval_t, pv_mmu_ops.pte_val,
				   pte.pte);
397 398 399 400

	return ret;
}

401
static inline pgd_t __pgd(pgdval_t val)
402
{
403 404 405
	pgdval_t ret;

	if (sizeof(pgdval_t) > sizeof(long))
406 407
		ret = PVOP_CALLEE2(pgdval_t, pv_mmu_ops.make_pgd,
				   val, (u64)val >> 32);
408
	else
409 410
		ret = PVOP_CALLEE1(pgdval_t, pv_mmu_ops.make_pgd,
				   val);
411 412 413 414 415 416 417 418 419

	return (pgd_t) { ret };
}

static inline pgdval_t pgd_val(pgd_t pgd)
{
	pgdval_t ret;

	if (sizeof(pgdval_t) > sizeof(long))
420 421
		ret =  PVOP_CALLEE2(pgdval_t, pv_mmu_ops.pgd_val,
				    pgd.pgd, (u64)pgd.pgd >> 32);
422
	else
423 424
		ret =  PVOP_CALLEE1(pgdval_t, pv_mmu_ops.pgd_val,
				    pgd.pgd);
425 426

	return ret;
427 428
}

429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451
#define  __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr,
					   pte_t *ptep)
{
	pteval_t ret;

	ret = PVOP_CALL3(pteval_t, pv_mmu_ops.ptep_modify_prot_start,
			 mm, addr, ptep);

	return (pte_t) { .pte = ret };
}

static inline void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
					   pte_t *ptep, pte_t pte)
{
	if (sizeof(pteval_t) > sizeof(long))
		/* 5 arg words */
		pv_mmu_ops.ptep_modify_prot_commit(mm, addr, ptep, pte);
	else
		PVOP_VCALL4(pv_mmu_ops.ptep_modify_prot_commit,
			    mm, addr, ptep, pte.pte);
}

452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471
static inline void set_pte(pte_t *ptep, pte_t pte)
{
	if (sizeof(pteval_t) > sizeof(long))
		PVOP_VCALL3(pv_mmu_ops.set_pte, ptep,
			    pte.pte, (u64)pte.pte >> 32);
	else
		PVOP_VCALL2(pv_mmu_ops.set_pte, ptep,
			    pte.pte);
}

static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
			      pte_t *ptep, pte_t pte)
{
	if (sizeof(pteval_t) > sizeof(long))
		/* 5 arg words */
		pv_mmu_ops.set_pte_at(mm, addr, ptep, pte);
	else
		PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte);
}

A
Andrea Arcangeli 已提交
472 473 474 475 476 477 478
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
			      pmd_t *pmdp, pmd_t pmd)
{
	if (sizeof(pmdval_t) > sizeof(long))
		/* 5 arg words */
		pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd);
	else
479 480
		PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp,
			    native_pmd_val(pmd));
A
Andrea Arcangeli 已提交
481 482
}

483 484 485 486 487 488 489 490 491 492
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
{
	pmdval_t val = native_pmd_val(pmd);

	if (sizeof(pmdval_t) > sizeof(long))
		PVOP_VCALL3(pv_mmu_ops.set_pmd, pmdp, val, (u64)val >> 32);
	else
		PVOP_VCALL2(pv_mmu_ops.set_pmd, pmdp, val);
}

493
#if CONFIG_PGTABLE_LEVELS >= 3
494 495 496 497 498
static inline pmd_t __pmd(pmdval_t val)
{
	pmdval_t ret;

	if (sizeof(pmdval_t) > sizeof(long))
499 500
		ret = PVOP_CALLEE2(pmdval_t, pv_mmu_ops.make_pmd,
				   val, (u64)val >> 32);
501
	else
502 503
		ret = PVOP_CALLEE1(pmdval_t, pv_mmu_ops.make_pmd,
				   val);
504 505 506 507 508 509 510 511 512

	return (pmd_t) { ret };
}

static inline pmdval_t pmd_val(pmd_t pmd)
{
	pmdval_t ret;

	if (sizeof(pmdval_t) > sizeof(long))
513 514
		ret =  PVOP_CALLEE2(pmdval_t, pv_mmu_ops.pmd_val,
				    pmd.pmd, (u64)pmd.pmd >> 32);
515
	else
516 517
		ret =  PVOP_CALLEE1(pmdval_t, pv_mmu_ops.pmd_val,
				    pmd.pmd);
518 519 520 521 522 523 524 525 526 527 528 529 530 531 532

	return ret;
}

static inline void set_pud(pud_t *pudp, pud_t pud)
{
	pudval_t val = native_pud_val(pud);

	if (sizeof(pudval_t) > sizeof(long))
		PVOP_VCALL3(pv_mmu_ops.set_pud, pudp,
			    val, (u64)val >> 32);
	else
		PVOP_VCALL2(pv_mmu_ops.set_pud, pudp,
			    val);
}
533
#if CONFIG_PGTABLE_LEVELS == 4
534 535 536 537 538
static inline pud_t __pud(pudval_t val)
{
	pudval_t ret;

	if (sizeof(pudval_t) > sizeof(long))
539 540
		ret = PVOP_CALLEE2(pudval_t, pv_mmu_ops.make_pud,
				   val, (u64)val >> 32);
541
	else
542 543
		ret = PVOP_CALLEE1(pudval_t, pv_mmu_ops.make_pud,
				   val);
544 545 546 547 548 549 550 551 552

	return (pud_t) { ret };
}

static inline pudval_t pud_val(pud_t pud)
{
	pudval_t ret;

	if (sizeof(pudval_t) > sizeof(long))
553 554
		ret =  PVOP_CALLEE2(pudval_t, pv_mmu_ops.pud_val,
				    pud.pud, (u64)pud.pud >> 32);
555
	else
556 557
		ret =  PVOP_CALLEE1(pudval_t, pv_mmu_ops.pud_val,
				    pud.pud);
558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583

	return ret;
}

static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
{
	pgdval_t val = native_pgd_val(pgd);

	if (sizeof(pgdval_t) > sizeof(long))
		PVOP_VCALL3(pv_mmu_ops.set_pgd, pgdp,
			    val, (u64)val >> 32);
	else
		PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp,
			    val);
}

static inline void pgd_clear(pgd_t *pgdp)
{
	set_pgd(pgdp, __pgd(0));
}

static inline void pud_clear(pud_t *pudp)
{
	set_pud(pudp, __pud(0));
}

584
#endif	/* CONFIG_PGTABLE_LEVELS == 4 */
585

586
#endif	/* CONFIG_PGTABLE_LEVELS >= 3 */
587

588 589 590 591 592 593 594 595 596 597 598 599 600 601
#ifdef CONFIG_X86_PAE
/* Special-case pte-setting operations for PAE, which can't update a
   64-bit pte atomically */
static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
{
	PVOP_VCALL3(pv_mmu_ops.set_pte_atomic, ptep,
		    pte.pte, pte.pte >> 32);
}

static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
			     pte_t *ptep)
{
	PVOP_VCALL3(pv_mmu_ops.pte_clear, mm, addr, ptep);
}
602 603 604 605 606

static inline void pmd_clear(pmd_t *pmdp)
{
	PVOP_VCALL1(pv_mmu_ops.pmd_clear, pmdp);
}
607 608 609 610 611 612 613 614 615 616 617
#else  /* !CONFIG_X86_PAE */
static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
{
	set_pte(ptep, pte);
}

static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
			     pte_t *ptep)
{
	set_pte_at(mm, addr, ptep, __pte(0));
}
618 619 620 621 622

static inline void pmd_clear(pmd_t *pmdp)
{
	set_pmd(pmdp, __pmd(0));
}
623 624
#endif	/* CONFIG_X86_PAE */

625
#define  __HAVE_ARCH_START_CONTEXT_SWITCH
626
static inline void arch_start_context_switch(struct task_struct *prev)
627
{
628
	PVOP_VCALL1(pv_cpu_ops.start_context_switch, prev);
629 630
}

631
static inline void arch_end_context_switch(struct task_struct *next)
632
{
633
	PVOP_VCALL1(pv_cpu_ops.end_context_switch, next);
634 635
}

636
#define  __HAVE_ARCH_ENTER_LAZY_MMU_MODE
637 638
static inline void arch_enter_lazy_mmu_mode(void)
{
639
	PVOP_VCALL0(pv_mmu_ops.lazy_mode.enter);
640 641 642 643
}

static inline void arch_leave_lazy_mmu_mode(void)
{
644
	PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
645 646
}

647 648 649 650
static inline void arch_flush_lazy_mmu_mode(void)
{
	PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush);
}
651

652
static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
653
				phys_addr_t phys, pgprot_t flags)
654 655 656 657
{
	pv_mmu_ops.set_fixmap(idx, phys, flags);
}

658
#if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
659

660
#ifdef CONFIG_QUEUED_SPINLOCKS
661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682

static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
							u32 val)
{
	PVOP_VCALL2(pv_lock_ops.queued_spin_lock_slowpath, lock, val);
}

static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
{
	PVOP_VCALLEE1(pv_lock_ops.queued_spin_unlock, lock);
}

static __always_inline void pv_wait(u8 *ptr, u8 val)
{
	PVOP_VCALL2(pv_lock_ops.wait, ptr, val);
}

static __always_inline void pv_kick(int cpu)
{
	PVOP_VCALL1(pv_lock_ops.kick, cpu);
}

683
#else /* !CONFIG_QUEUED_SPINLOCKS */
684

685 686
static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
							__ticket_t ticket)
687
{
688
	PVOP_VCALLEE2(pv_lock_ops.lock_spinning, lock, ticket);
689 690
}

691
static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock,
692
							__ticket_t ticket)
693
{
694
	PVOP_VCALL2(pv_lock_ops.unlock_kick, lock, ticket);
695 696
}

697
#endif /* CONFIG_QUEUED_SPINLOCKS */
698 699

#endif /* SMP && PARAVIRT_SPINLOCKS */
700

701
#ifdef CONFIG_X86_32
702 703 704 705
#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
#define PV_RESTORE_REGS "popl %edx; popl %ecx;"

/* save and restore all caller-save registers, except return value */
706 707
#define PV_SAVE_ALL_CALLER_REGS		"pushl %ecx;"
#define PV_RESTORE_ALL_CALLER_REGS	"popl  %ecx;"
708

709 710 711 712
#define PV_FLAGS_ARG "0"
#define PV_EXTRA_CLOBBERS
#define PV_VEXTRA_CLOBBERS
#else
713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732
/* save and restore all caller-save registers, except return value */
#define PV_SAVE_ALL_CALLER_REGS						\
	"push %rcx;"							\
	"push %rdx;"							\
	"push %rsi;"							\
	"push %rdi;"							\
	"push %r8;"							\
	"push %r9;"							\
	"push %r10;"							\
	"push %r11;"
#define PV_RESTORE_ALL_CALLER_REGS					\
	"pop %r11;"							\
	"pop %r10;"							\
	"pop %r9;"							\
	"pop %r8;"							\
	"pop %rdi;"							\
	"pop %rsi;"							\
	"pop %rdx;"							\
	"pop %rcx;"

733 734 735 736
/* We save some registers, but all of them, that's too much. We clobber all
 * caller saved registers but the argument parameter */
#define PV_SAVE_REGS "pushq %%rdi;"
#define PV_RESTORE_REGS "popq %%rdi;"
737 738
#define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
#define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
739 740 741
#define PV_FLAGS_ARG "D"
#endif

742 743 744 745 746 747 748 749 750 751 752 753
/*
 * Generate a thunk around a function which saves all caller-save
 * registers except for the return value.  This allows C functions to
 * be called from assembler code where fewer than normal registers are
 * available.  It may also help code generation around calls from C
 * code if the common case doesn't use many registers.
 *
 * When a callee is wrapped in a thunk, the caller can assume that all
 * arg regs and all scratch registers are preserved across the
 * call. The return value in rax/eax will not be saved, even for void
 * functions.
 */
754
#define PV_THUNK_NAME(func) "__raw_callee_save_" #func
755 756 757 758
#define PV_CALLEE_SAVE_REGS_THUNK(func)					\
	extern typeof(func) __raw_callee_save_##func;			\
									\
	asm(".pushsection .text;"					\
759 760 761 762
	    ".globl " PV_THUNK_NAME(func) ";"				\
	    ".type " PV_THUNK_NAME(func) ", @function;"			\
	    PV_THUNK_NAME(func) ":"					\
	    FRAME_BEGIN							\
763 764 765
	    PV_SAVE_ALL_CALLER_REGS					\
	    "call " #func ";"						\
	    PV_RESTORE_ALL_CALLER_REGS					\
766
	    FRAME_END							\
767 768 769 770 771 772 773 774 775 776 777
	    "ret;"							\
	    ".popsection")

/* Get a reference to a callee-save function */
#define PV_CALLEE_SAVE(func)						\
	((struct paravirt_callee_save) { __raw_callee_save_##func })

/* Promise that "func" already uses the right calling convention */
#define __PV_IS_CALLEE_SAVE(func)			\
	((struct paravirt_callee_save) { func })

778
static inline notrace unsigned long arch_local_save_flags(void)
779
{
780
	return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
781 782
}

783
static inline notrace void arch_local_irq_restore(unsigned long f)
784
{
785
	PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
786 787
}

788
static inline notrace void arch_local_irq_disable(void)
789
{
790
	PVOP_VCALLEE0(pv_irq_ops.irq_disable);
791 792
}

793
static inline notrace void arch_local_irq_enable(void)
794
{
795
	PVOP_VCALLEE0(pv_irq_ops.irq_enable);
796 797
}

798
static inline notrace unsigned long arch_local_irq_save(void)
799 800 801
{
	unsigned long f;

D
David Howells 已提交
802 803
	f = arch_local_save_flags();
	arch_local_irq_disable();
804 805 806
	return f;
}

807

808
/* Make sure as little as possible of this mess escapes. */
809
#undef PARAVIRT_CALL
810 811
#undef __PVOP_CALL
#undef __PVOP_VCALL
812 813 814 815 816 817 818 819 820 821
#undef PVOP_VCALL0
#undef PVOP_CALL0
#undef PVOP_VCALL1
#undef PVOP_CALL1
#undef PVOP_VCALL2
#undef PVOP_CALL2
#undef PVOP_VCALL3
#undef PVOP_CALL3
#undef PVOP_VCALL4
#undef PVOP_CALL4
822

823 824
extern void default_banner(void);

825 826
#else  /* __ASSEMBLY__ */

827
#define _PVSITE(ptype, clobbers, ops, word, algn)	\
828 829 830 831
771:;						\
	ops;					\
772:;						\
	.pushsection .parainstructions,"a";	\
832 833
	 .align	algn;				\
	 word 771b;				\
834 835 836 837 838
	 .byte ptype;				\
	 .byte 772b-771b;			\
	 .short clobbers;			\
	.popsection

839

840
#define COND_PUSH(set, mask, reg)			\
841
	.if ((~(set)) & mask); push %reg; .endif
842
#define COND_POP(set, mask, reg)			\
843
	.if ((~(set)) & mask); pop %reg; .endif
844

845
#ifdef CONFIG_X86_64
846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867

#define PV_SAVE_REGS(set)			\
	COND_PUSH(set, CLBR_RAX, rax);		\
	COND_PUSH(set, CLBR_RCX, rcx);		\
	COND_PUSH(set, CLBR_RDX, rdx);		\
	COND_PUSH(set, CLBR_RSI, rsi);		\
	COND_PUSH(set, CLBR_RDI, rdi);		\
	COND_PUSH(set, CLBR_R8, r8);		\
	COND_PUSH(set, CLBR_R9, r9);		\
	COND_PUSH(set, CLBR_R10, r10);		\
	COND_PUSH(set, CLBR_R11, r11)
#define PV_RESTORE_REGS(set)			\
	COND_POP(set, CLBR_R11, r11);		\
	COND_POP(set, CLBR_R10, r10);		\
	COND_POP(set, CLBR_R9, r9);		\
	COND_POP(set, CLBR_R8, r8);		\
	COND_POP(set, CLBR_RDI, rdi);		\
	COND_POP(set, CLBR_RSI, rsi);		\
	COND_POP(set, CLBR_RDX, rdx);		\
	COND_POP(set, CLBR_RCX, rcx);		\
	COND_POP(set, CLBR_RAX, rax)

868
#define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 8)
869
#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .quad, 8)
870
#define PARA_INDIRECT(addr)	*addr(%rip)
871
#else
872 873 874 875 876 877 878 879 880 881 882
#define PV_SAVE_REGS(set)			\
	COND_PUSH(set, CLBR_EAX, eax);		\
	COND_PUSH(set, CLBR_EDI, edi);		\
	COND_PUSH(set, CLBR_ECX, ecx);		\
	COND_PUSH(set, CLBR_EDX, edx)
#define PV_RESTORE_REGS(set)			\
	COND_POP(set, CLBR_EDX, edx);		\
	COND_POP(set, CLBR_ECX, ecx);		\
	COND_POP(set, CLBR_EDI, edi);		\
	COND_POP(set, CLBR_EAX, eax)

883
#define PARA_PATCH(struct, off)        ((PARAVIRT_PATCH_##struct + (off)) / 4)
884
#define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
885
#define PARA_INDIRECT(addr)	*%cs:addr
886 887
#endif

888 889
#define INTERRUPT_RETURN						\
	PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_iret), CLBR_NONE,	\
890
		  jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_iret))
891 892

#define DISABLE_INTERRUPTS(clobbers)					\
893
	PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_disable), clobbers, \
894
		  PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);		\
895
		  call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_disable);	\
896
		  PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
897 898

#define ENABLE_INTERRUPTS(clobbers)					\
899
	PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_irq_enable), clobbers,	\
900
		  PV_SAVE_REGS(clobbers | CLBR_CALLEE_SAVE);		\
901
		  call PARA_INDIRECT(pv_irq_ops+PV_IRQ_irq_enable);	\
902
		  PV_RESTORE_REGS(clobbers | CLBR_CALLEE_SAVE);)
903

904
#ifdef CONFIG_X86_32
905 906 907
#define GET_CR0_INTO_EAX				\
	push %ecx; push %edx;				\
	call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);	\
908
	pop %edx; pop %ecx
909
#else	/* !CONFIG_X86_32 */
910 911 912 913 914 915 916 917 918 919

/*
 * If swapgs is used while the userspace stack is still current,
 * there's no way to call a pvop.  The PV replacement *must* be
 * inlined, or the swapgs instruction must be trapped and emulated.
 */
#define SWAPGS_UNSAFE_STACK						\
	PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,	\
		  swapgs)

920 921 922 923 924 925
/*
 * Note: swapgs is very special, and in practise is either going to be
 * implemented with a single "swapgs" instruction or something very
 * special.  Either way, we don't need to save any registers for
 * it.
 */
926 927
#define SWAPGS								\
	PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_swapgs), CLBR_NONE,	\
928
		  call PARA_INDIRECT(pv_cpu_ops+PV_CPU_swapgs)		\
929 930
		 )

931 932
#define GET_CR2_INTO_RAX				\
	call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2)
933

934 935 936 937 938
#define PARAVIRT_ADJUST_EXCEPTION_FRAME					\
	PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
		  CLBR_NONE,						\
		  call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))

939 940
#define USERGS_SYSRET64							\
	PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64),	\
941
		  CLBR_NONE,						\
942 943
		  jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_usergs_sysret64))
#endif	/* CONFIG_X86_32 */
944

945
#endif /* __ASSEMBLY__ */
946 947
#else  /* CONFIG_PARAVIRT */
# define default_banner x86_init_noop
948 949 950 951 952 953 954 955 956 957
#ifndef __ASSEMBLY__
static inline void paravirt_arch_dup_mmap(struct mm_struct *oldmm,
					  struct mm_struct *mm)
{
}

static inline void paravirt_arch_exit_mmap(struct mm_struct *mm)
{
}
#endif /* __ASSEMBLY__ */
958
#endif /* !CONFIG_PARAVIRT */
H
H. Peter Anvin 已提交
959
#endif /* _ASM_X86_PARAVIRT_H */