desc.h 12.6 KB
Newer Older
H
H. Peter Anvin 已提交
1 2
#ifndef _ASM_X86_DESC_H
#define _ASM_X86_DESC_H
3 4 5

#include <asm/desc_defs.h>
#include <asm/ldt.h>
6
#include <asm/mmu.h>
7
#include <asm/fixmap.h>
8
#include <asm/irq_vectors.h>
I
Ingo Molnar 已提交
9

10
#include <linux/smp.h>
11
#include <linux/percpu.h>
12

I
Ingo Molnar 已提交
13 14 15 16 17 18 19 20 21 22 23 24 25
static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *info)
{
	desc->limit0		= info->limit & 0x0ffff;

	desc->base0		= (info->base_addr & 0x0000ffff);
	desc->base1		= (info->base_addr & 0x00ff0000) >> 16;

	desc->type		= (info->read_exec_only ^ 1) << 1;
	desc->type	       |= info->contents << 2;

	desc->s			= 1;
	desc->dpl		= 0x3;
	desc->p			= info->seg_not_present ^ 1;
26
	desc->limit1		= (info->limit & 0xf0000) >> 16;
I
Ingo Molnar 已提交
27 28 29 30 31
	desc->avl		= info->useable;
	desc->d			= info->seg_32bit;
	desc->g			= info->limit_in_pages;

	desc->base2		= (info->base_addr & 0xff000000) >> 24;
32
	/*
33 34
	 * Don't allow setting of the lm bit. It would confuse
	 * user_64bit_mode and would get overridden by sysret anyway.
35
	 */
I
Ingo Molnar 已提交
36
	desc->l			= 0;
37 38
}

39 40
extern struct desc_ptr idt_descr;
extern gate_desc idt_table[];
41
extern const struct desc_ptr debug_idt_descr;
42
extern gate_desc debug_idt_table[];
43

44 45 46
struct gdt_page {
	struct desc_struct gdt[GDT_ENTRIES];
} __attribute__((aligned(PAGE_SIZE)));
I
Ingo Molnar 已提交
47

48
DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
49

50 51
/* Provide the original GDT */
static inline struct desc_struct *get_cpu_gdt_rw(unsigned int cpu)
52 53 54 55
{
	return per_cpu(gdt_page, cpu).gdt;
}

56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
/* Provide the current original GDT */
static inline struct desc_struct *get_current_gdt_rw(void)
{
	return this_cpu_ptr(&gdt_page)->gdt;
}

/* Get the fixmap index for a specific processor */
static inline unsigned int get_cpu_gdt_ro_index(int cpu)
{
	return FIX_GDT_REMAP_BEGIN + cpu;
}

/* Provide the fixmap address of the remapped GDT */
static inline struct desc_struct *get_cpu_gdt_ro(int cpu)
{
	unsigned int idx = get_cpu_gdt_ro_index(cpu);
	return (struct desc_struct *)__fix_to_virt(idx);
}

/* Provide the current read-only GDT */
static inline struct desc_struct *get_current_gdt_ro(void)
{
	return get_cpu_gdt_ro(smp_processor_id());
}

81 82 83 84 85 86
/* Provide the physical address of the GDT page. */
static inline phys_addr_t get_cpu_gdt_paddr(unsigned int cpu)
{
	return per_cpu_ptr_to_phys(get_cpu_gdt_rw(cpu));
}

87 88 89
static inline void pack_gate(gate_desc *gate, unsigned type, unsigned long func,
			     unsigned dpl, unsigned ist, unsigned seg)
{
90 91 92 93 94 95 96
	gate->offset_low	= (u16) func;
	gate->bits.p		= 1;
	gate->bits.dpl		= dpl;
	gate->bits.zero		= 0;
	gate->bits.type		= type;
	gate->offset_middle	= (u16) (func >> 16);
#ifdef CONFIG_X86_64
I
Ingo Molnar 已提交
97
	gate->segment		= __KERNEL_CS;
98 99 100
	gate->bits.ist		= ist;
	gate->reserved		= 0;
	gate->offset_high	= (u32) (func >> 32);
101
#else
102 103
	gate->segment		= seg;
	gate->bits.ist		= 0;
104
#endif
105
}
106

107 108 109
static inline int desc_empty(const void *ptr)
{
	const u32 *desc = ptr;
I
Ingo Molnar 已提交
110

111 112 113
	return !(desc[0] | desc[1]);
}

114 115 116
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else
I
Ingo Molnar 已提交
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
#define load_TR_desc()				native_load_tr_desc()
#define load_gdt(dtr)				native_load_gdt(dtr)
#define load_idt(dtr)				native_load_idt(dtr)
#define load_tr(tr)				asm volatile("ltr %0"::"m" (tr))
#define load_ldt(ldt)				asm volatile("lldt %0"::"m" (ldt))

#define store_gdt(dtr)				native_store_gdt(dtr)
#define store_idt(dtr)				native_store_idt(dtr)
#define store_tr(tr)				(tr = native_store_tr())

#define load_TLS(t, cpu)			native_load_tls(t, cpu)
#define set_ldt					native_set_ldt

#define write_ldt_entry(dt, entry, desc)	native_write_ldt_entry(dt, entry, desc)
#define write_gdt_entry(dt, entry, desc, type)	native_write_gdt_entry(dt, entry, desc, type)
#define write_idt_entry(dt, entry, g)		native_write_idt_entry(dt, entry, g)
133 134 135 136 137 138 139 140 141

static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
{
}

static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
{
}
#endif	/* CONFIG_PARAVIRT */
142

143 144
#define store_ldt(ldt) asm("sldt %0" : "=m"(ldt))

I
Ingo Molnar 已提交
145
static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
146 147 148 149
{
	memcpy(&idt[entry], gate, sizeof(*gate));
}

I
Ingo Molnar 已提交
150
static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
151 152 153 154
{
	memcpy(&ldt[entry], desc, 8);
}

I
Ingo Molnar 已提交
155 156
static inline void
native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int type)
157 158
{
	unsigned int size;
I
Ingo Molnar 已提交
159

160
	switch (type) {
I
Ingo Molnar 已提交
161 162 163
	case DESC_TSS:	size = sizeof(tss_desc);	break;
	case DESC_LDT:	size = sizeof(ldt_desc);	break;
	default:	size = sizeof(*gdt);		break;
164
	}
I
Ingo Molnar 已提交
165

166 167 168
	memcpy(&gdt[entry], desc, size);
}

169 170
static inline void set_tssldt_descriptor(void *d, unsigned long addr,
					 unsigned type, unsigned size)
171
{
172
	struct ldttss_desc *desc = d;
I
Ingo Molnar 已提交
173

174
	memset(desc, 0, sizeof(*desc));
I
Ingo Molnar 已提交
175

176
	desc->limit0		= (u16) size;
177 178
	desc->base0		= (u16) addr;
	desc->base1		= (addr >> 16) & 0xFF;
I
Ingo Molnar 已提交
179 180 181
	desc->type		= type;
	desc->p			= 1;
	desc->limit1		= (size >> 16) & 0xF;
182
	desc->base2		= (addr >> 24) & 0xFF;
183
#ifdef CONFIG_X86_64
184
	desc->base3		= (u32) (addr >> 32);
185 186 187 188 189
#endif
}

static inline void __set_tss_desc(unsigned cpu, unsigned int entry, void *addr)
{
190
	struct desc_struct *d = get_cpu_gdt_rw(cpu);
191 192
	tss_desc tss;

193
	set_tssldt_descriptor(&tss, (unsigned long)addr, DESC_TSS,
194
			      __KERNEL_TSS_LIMIT);
195 196 197 198 199
	write_gdt_entry(d, entry, &tss, DESC_TSS);
}

#define set_tss_desc(cpu, addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)

200 201 202
static inline void native_set_ldt(const void *addr, unsigned int entries)
{
	if (likely(entries == 0))
203
		asm volatile("lldt %w0"::"q" (0));
204 205 206 207
	else {
		unsigned cpu = smp_processor_id();
		ldt_desc ldt;

M
Michael Karcher 已提交
208 209
		set_tssldt_descriptor(&ldt, (unsigned long)addr, DESC_LDT,
				      entries * LDT_ENTRY_SIZE - 1);
210
		write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_LDT,
211
				&ldt, DESC_LDT);
212
		asm volatile("lldt %w0"::"q" (GDT_ENTRY_LDT*8));
213 214 215
	}
}

216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
static inline void native_load_gdt(const struct desc_ptr *dtr)
{
	asm volatile("lgdt %0"::"m" (*dtr));
}

static inline void native_load_idt(const struct desc_ptr *dtr)
{
	asm volatile("lidt %0"::"m" (*dtr));
}

static inline void native_store_gdt(struct desc_ptr *dtr)
{
	asm volatile("sgdt %0":"=m" (*dtr));
}

static inline void native_store_idt(struct desc_ptr *dtr)
{
	asm volatile("sidt %0":"=m" (*dtr));
}

/*
 * The LTR instruction marks the TSS GDT entry as busy. On 64-bit, the GDT is
 * a read-only remapping. To prevent a page fault, the GDT is switched to the
 * original writeable version when needed.
 */
#ifdef CONFIG_X86_64
242 243
static inline void native_load_tr_desc(void)
{
244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259
	struct desc_ptr gdt;
	int cpu = raw_smp_processor_id();
	bool restore = 0;
	struct desc_struct *fixmap_gdt;

	native_store_gdt(&gdt);
	fixmap_gdt = get_cpu_gdt_ro(cpu);

	/*
	 * If the current GDT is the read-only fixmap, swap to the original
	 * writeable version. Swap back at the end.
	 */
	if (gdt.address == (unsigned long)fixmap_gdt) {
		load_direct_gdt(cpu);
		restore = 1;
	}
260
	asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
	if (restore)
		load_fixmap_gdt(cpu);
}
#else
static inline void native_load_tr_desc(void)
{
	asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
}
#endif

static inline unsigned long native_store_tr(void)
{
	unsigned long tr;

	asm volatile("str %0":"=r" (tr));

	return tr;
}

static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
{
	struct desc_struct *gdt = get_cpu_gdt_rw(cpu);
	unsigned int i;

	for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
		gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
287 288
}

289 290
DECLARE_PER_CPU(bool, __tss_limit_invalid);

291 292
static inline void force_reload_TR(void)
{
293
	struct desc_struct *d = get_current_gdt_rw();
294 295 296 297 298 299 300 301 302 303 304 305
	tss_desc tss;

	memcpy(&tss, &d[GDT_ENTRY_TSS], sizeof(tss_desc));

	/*
	 * LTR requires an available TSS, and the TSS is currently
	 * busy.  Make it be available so that LTR will work.
	 */
	tss.type = DESC_TSS;
	write_gdt_entry(d, GDT_ENTRY_TSS, &tss, DESC_TSS);

	load_TR_desc();
306
	this_cpu_write(__tss_limit_invalid, false);
307 308
}

309 310 311 312 313 314
/*
 * Call this if you need the TSS limit to be correct, which should be the case
 * if and only if you have TIF_IO_BITMAP set or you're switching to a task
 * with TIF_IO_BITMAP set.
 */
static inline void refresh_tss_limit(void)
315 316 317
{
	DEBUG_LOCKS_WARN_ON(preemptible());

318
	if (unlikely(this_cpu_read(__tss_limit_invalid)))
319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
		force_reload_TR();
}

/*
 * If you do something evil that corrupts the cached TSS limit (I'm looking
 * at you, VMX exits), call this function.
 *
 * The optimization here is that the TSS limit only matters for Linux if the
 * IO bitmap is in use.  If the TSS limit gets forced to its minimum value,
 * everything works except that IO bitmap will be ignored and all CPL 3 IO
 * instructions will #GP, which is exactly what we want for normal tasks.
 */
static inline void invalidate_tss_limit(void)
{
	DEBUG_LOCKS_WARN_ON(preemptible());

	if (unlikely(test_thread_flag(TIF_IO_BITMAP)))
		force_reload_TR();
	else
338
		this_cpu_write(__tss_limit_invalid, true);
339 340
}

341 342
/* This intentionally ignores lm, since 32-bit apps don't have that field. */
#define LDT_empty(info)					\
343 344 345 346 347 348 349 350
	((info)->base_addr		== 0	&&	\
	 (info)->limit			== 0	&&	\
	 (info)->contents		== 0	&&	\
	 (info)->read_exec_only		== 1	&&	\
	 (info)->seg_32bit		== 0	&&	\
	 (info)->limit_in_pages		== 0	&&	\
	 (info)->seg_not_present	== 1	&&	\
	 (info)->useable		== 0)
351

352 353 354 355 356 357 358 359 360 361 362 363 364
/* Lots of programs expect an all-zero user_desc to mean "no segment at all". */
static inline bool LDT_zero(const struct user_desc *info)
{
	return (info->base_addr		== 0 &&
		info->limit		== 0 &&
		info->contents		== 0 &&
		info->read_exec_only	== 0 &&
		info->seg_32bit		== 0 &&
		info->limit_in_pages	== 0 &&
		info->seg_not_present	== 0 &&
		info->useable		== 0);
}

365 366 367 368 369
static inline void clear_LDT(void)
{
	set_ldt(NULL, 0);
}

370
static inline unsigned long get_desc_base(const struct desc_struct *desc)
371
{
372
	return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
373
}
374

375 376 377 378 379 380 381
static inline void set_desc_base(struct desc_struct *desc, unsigned long base)
{
	desc->base0 = base & 0xffff;
	desc->base1 = (base >> 16) & 0xff;
	desc->base2 = (base >> 24) & 0xff;
}

382 383
static inline unsigned long get_desc_limit(const struct desc_struct *desc)
{
384
	return desc->limit0 | (desc->limit1 << 16);
385 386
}

387 388 389
static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
{
	desc->limit0 = limit & 0xffff;
390
	desc->limit1 = (limit >> 16) & 0xf;
391 392
}

393 394 395 396 397 398
#ifdef CONFIG_X86_64
static inline void set_nmi_gate(int gate, void *addr)
{
	gate_desc s;

	pack_gate(&s, GATE_INTERRUPT, (unsigned long)addr, 0, 0, __KERNEL_CS);
399
	write_idt_entry(debug_idt_table, gate, &s);
400 401 402
}
#endif

403
static inline void _set_gate(int gate, unsigned type, const void *addr,
404
			     unsigned dpl, unsigned ist, unsigned seg)
405 406
{
	gate_desc s;
I
Ingo Molnar 已提交
407

408 409 410 411 412 413 414 415
	pack_gate(&s, type, (unsigned long)addr, dpl, ist, seg);
	/*
	 * does not need to be atomic because it is only done once at
	 * setup time
	 */
	write_idt_entry(idt_table, gate, &s);
}

416 417 418 419 420
static inline void set_intr_gate(unsigned int n, const void *addr)
{
	BUG_ON(n > 0xFF);
	_set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
}
421

422
extern unsigned long used_vectors[];
423 424 425

static inline void alloc_system_vector(int vector)
{
426
	BUG_ON(vector < FIRST_SYSTEM_VECTOR);
427 428
	if (!test_bit(vector, used_vectors)) {
		set_bit(vector, used_vectors);
I
Ingo Molnar 已提交
429
	} else {
430
		BUG();
I
Ingo Molnar 已提交
431
	}
432 433
}

434 435 436
#define alloc_intr_gate(n, addr)				\
	do {							\
		alloc_system_vector(n);				\
437
		set_intr_gate(n, addr);				\
438 439
	} while (0)

440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460
/*
 * This routine sets up an interrupt gate at directory privilege level 3.
 */
static inline void set_system_intr_gate(unsigned int n, void *addr)
{
	BUG_ON((unsigned)n > 0xFF);
	_set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
}

static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
{
	BUG_ON((unsigned)n > 0xFF);
	_set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
}

static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
{
	BUG_ON((unsigned)n > 0xFF);
	_set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
}

461
static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
462 463 464 465
{
	BUG_ON((unsigned)n > 0xFF);
	_set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
}
466

467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492
#ifdef CONFIG_X86_64
DECLARE_PER_CPU(u32, debug_idt_ctr);
static inline bool is_debug_idt_enabled(void)
{
	if (this_cpu_read(debug_idt_ctr))
		return true;

	return false;
}

static inline void load_debug_idt(void)
{
	load_idt((const struct desc_ptr *)&debug_idt_descr);
}
#else
static inline bool is_debug_idt_enabled(void)
{
	return false;
}

static inline void load_debug_idt(void)
{
}
#endif

/*
493
 * The load_current_idt() must be called with interrupts disabled
494
 * to avoid races. That way the IDT will always be set back to the expected
495 496 497
 * descriptor. It's also called when a CPU is being initialized, and
 * that doesn't need to disable interrupts, as nothing should be
 * bothering the CPU then.
498 499 500 501 502 503 504 505
 */
static inline void load_current_idt(void)
{
	if (is_debug_idt_enabled())
		load_debug_idt();
	else
		load_idt((const struct desc_ptr *)&idt_descr);
}
506 507 508

extern void idt_invalidate(void *addr);

H
H. Peter Anvin 已提交
509
#endif /* _ASM_X86_DESC_H */