mmu_context_32.h 1.4 KB
Newer Older
H
H. Peter Anvin 已提交
1 2
#ifndef _ASM_X86_MMU_CONTEXT_32_H
#define _ASM_X86_MMU_CONTEXT_32_H
L
Linus Torvalds 已提交
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34

static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
#ifdef CONFIG_SMP
	unsigned cpu = smp_processor_id();
	if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
		per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY;
#endif
}

static inline void switch_mm(struct mm_struct *prev,
			     struct mm_struct *next,
			     struct task_struct *tsk)
{
	int cpu = smp_processor_id();

	if (likely(prev != next)) {
		/* stop flush ipis for the previous mm */
		cpu_clear(cpu, prev->cpu_vm_mask);
#ifdef CONFIG_SMP
		per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
		per_cpu(cpu_tlbstate, cpu).active_mm = next;
#endif
		cpu_set(cpu, next->cpu_vm_mask);

		/* Re-load page tables */
		load_cr3(next->pgd);

		/*
		 * load the LDT, if the LDT is different:
		 */
		if (unlikely(prev->context.ldt != next->context.ldt))
35
			load_LDT_nolock(&next->context);
L
Linus Torvalds 已提交
36 37 38 39 40 41 42
	}
#ifdef CONFIG_SMP
	else {
		per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
		BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next);

		if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
43
			/* We were in lazy tlb mode and leave_mm disabled
L
Linus Torvalds 已提交
44 45 46
			 * tlb flush IPI delivery. We must reload %cr3.
			 */
			load_cr3(next->pgd);
47
			load_LDT_nolock(&next->context);
L
Linus Torvalds 已提交
48 49 50 51 52
		}
	}
#endif
}

53
#define deactivate_mm(tsk, mm)			\
54
	asm("movl %0,%%gs": :"r" (0));
L
Linus Torvalds 已提交
55

H
H. Peter Anvin 已提交
56
#endif /* _ASM_X86_MMU_CONTEXT_32_H */