mmu_context.h 4.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
#ifndef __SPARC64_MMU_CONTEXT_H
#define __SPARC64_MMU_CONTEXT_H

/* Derived heavily from Linus's Alpha/AXP ASN code... */

#ifndef __ASSEMBLY__

#include <linux/spinlock.h>
#include <asm/system.h>
#include <asm/spitfire.h>
11
#include <asm-generic/mm_hooks.h>
L
Linus Torvalds 已提交
12 13 14 15 16 17 18 19 20 21

static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}

extern spinlock_t ctx_alloc_lock;
extern unsigned long tlb_context_cache;
extern unsigned long mmu_context_bmap[];

extern void get_new_mmu_context(struct mm_struct *mm);
22 23 24 25 26 27
#ifdef CONFIG_SMP
extern void smp_new_mmu_context_version(void);
#else
#define smp_new_mmu_context_version() do { } while (0)
#endif

D
David S. Miller 已提交
28 29
extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
extern void destroy_context(struct mm_struct *mm);
L
Linus Torvalds 已提交
30

31
extern void __tsb_context_switch(unsigned long pgd_pa,
32 33
				 struct tsb_config *tsb_base,
				 struct tsb_config *tsb_huge,
34
				 unsigned long tsb_descr_pa);
35 36 37

static inline void tsb_context_switch(struct mm_struct *mm)
{
38 39 40 41 42 43 44 45 46 47
	__tsb_context_switch(__pa(mm->pgd),
			     &mm->context.tsb_block[0],
#ifdef CONFIG_HUGETLB_PAGE
			     (mm->context.tsb_block[1].tsb ?
			      &mm->context.tsb_block[1] :
			      NULL)
#else
			     NULL
#endif
			     , __pa(&mm->context.tsb_descr[0]));
48
}
L
Linus Torvalds 已提交
49

50
extern void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long mm_rss);
51 52 53 54 55 56
#ifdef CONFIG_SMP
extern void smp_tsb_sync(struct mm_struct *mm);
#else
#define smp_tsb_sync(__mm) do { } while (0)
#endif

L
Linus Torvalds 已提交
57 58
/* Set MMU context in the actual hardware. */
#define load_secondary_context(__mm) \
59 60 61 62 63 64 65 66 67 68
	__asm__ __volatile__( \
	"\n661:	stxa		%0, [%1] %2\n" \
	"	.section	.sun4v_1insn_patch, \"ax\"\n" \
	"	.word		661b\n" \
	"	stxa		%0, [%1] %3\n" \
	"	.previous\n" \
	"	flush		%%g6\n" \
	: /* No outputs */ \
	: "r" (CTX_HWBITS((__mm)->context)), \
	  "r" (SECONDARY_CONTEXT), "i" (ASI_DMMU), "i" (ASI_MMU))
L
Linus Torvalds 已提交
69 70 71

extern void __flush_tlb_mm(unsigned long, unsigned long);

72
/* Switch the current MM context.  Interrupts are disabled.  */
L
Linus Torvalds 已提交
73 74
static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
{
75
	unsigned long ctx_valid, flags;
H
Hugh Dickins 已提交
76
	int cpu;
L
Linus Torvalds 已提交
77

78 79 80
	if (unlikely(mm == &init_mm))
		return;

81
	spin_lock_irqsave(&mm->context.lock, flags);
H
Hugh Dickins 已提交
82 83 84
	ctx_valid = CTX_VALID(mm->context);
	if (!ctx_valid)
		get_new_mmu_context(mm);
L
Linus Torvalds 已提交
85

86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
	/* We have to be extremely careful here or else we will miss
	 * a TSB grow if we switch back and forth between a kernel
	 * thread and an address space which has it's TSB size increased
	 * on another processor.
	 *
	 * It is possible to play some games in order to optimize the
	 * switch, but the safest thing to do is to unconditionally
	 * perform the secondary context load and the TSB context switch.
	 *
	 * For reference the bad case is, for address space "A":
	 *
	 *		CPU 0			CPU 1
	 *	run address space A
	 *	set cpu0's bits in cpu_vm_mask
	 *	switch to kernel thread, borrow
	 *	address space A via entry_lazy_tlb
	 *					run address space A
	 *					set cpu1's bit in cpu_vm_mask
	 *					flush_tlb_pending()
	 *					reset cpu_vm_mask to just cpu1
	 *					TSB grow
	 *	run address space A
	 *	context was valid, so skip
	 *	TSB context switch
	 *
	 * At that point cpu0 continues to use a stale TSB, the one from
	 * before the TSB grow performed on cpu1.  cpu1 did not cross-call
	 * cpu0 to update it's TSB because at that point the cpu_vm_mask
	 * only had cpu1 set in it.
	 */
	load_secondary_context(mm);
	tsb_context_switch(mm);
L
Linus Torvalds 已提交
118

119 120 121
	/* Any time a processor runs a context on an address space
	 * for the first time, we must flush that context out of the
	 * local TLB.
H
Hugh Dickins 已提交
122 123 124 125 126 127
	 */
	cpu = smp_processor_id();
	if (!ctx_valid || !cpu_isset(cpu, mm->cpu_vm_mask)) {
		cpu_set(cpu, mm->cpu_vm_mask);
		__flush_tlb_mm(CTX_HWBITS(mm->context),
			       SECONDARY_CONTEXT);
L
Linus Torvalds 已提交
128
	}
129
	spin_unlock_irqrestore(&mm->context.lock, flags);
L
Linus Torvalds 已提交
130 131 132 133 134 135 136
}

#define deactivate_mm(tsk,mm)	do { } while (0)

/* Activate a new MM instance for the current task. */
static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm)
{
137
	unsigned long flags;
L
Linus Torvalds 已提交
138 139
	int cpu;

140
	spin_lock_irqsave(&mm->context.lock, flags);
L
Linus Torvalds 已提交
141 142 143 144 145 146 147 148
	if (!CTX_VALID(mm->context))
		get_new_mmu_context(mm);
	cpu = smp_processor_id();
	if (!cpu_isset(cpu, mm->cpu_vm_mask))
		cpu_set(cpu, mm->cpu_vm_mask);

	load_secondary_context(mm);
	__flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
149
	tsb_context_switch(mm);
150
	spin_unlock_irqrestore(&mm->context.lock, flags);
L
Linus Torvalds 已提交
151 152 153 154 155
}

#endif /* !(__ASSEMBLY__) */

#endif /* !(__SPARC64_MMU_CONTEXT_H) */