context.c 6.6 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
R
Russell King 已提交
2
 *  linux/arch/arm/mm/context.c
L
Linus Torvalds 已提交
3 4
 *
 *  Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
5 6 7
 *  Copyright (C) 2012 ARM Limited
 *
 *  Author: Will Deacon <will.deacon@arm.com>
L
Linus Torvalds 已提交
8 9 10 11 12 13 14 15
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/mm.h>
16 17
#include <linux/smp.h>
#include <linux/percpu.h>
L
Linus Torvalds 已提交
18 19

#include <asm/mmu_context.h>
20
#include <asm/smp_plat.h>
21
#include <asm/thread_notify.h>
L
Linus Torvalds 已提交
22
#include <asm/tlbflush.h>
23
#include <asm/proc-fns.h>
L
Linus Torvalds 已提交
24

25 26 27 28 29 30 31 32 33 34 35 36 37
/*
 * On ARMv6, we have the following structure in the Context ID:
 *
 * 31                         7          0
 * +-------------------------+-----------+
 * |      process ID         |   ASID    |
 * +-------------------------+-----------+
 * |              context ID             |
 * +-------------------------------------+
 *
 * The ASID is used to tag entries in the CPU caches and TLBs.
 * The context ID is used by debuggers and trace logic, and
 * should be unique within all running processes.
38 39 40
 *
 * In big endian operation, the two 32 bit words are swapped if accesed by
 * non 64-bit operations.
41 42
 */
#define ASID_FIRST_VERSION	(1ULL << ASID_BITS)
43
#define NUM_USER_ASIDS		ASID_FIRST_VERSION
44

45
static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
46 47
static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
48

49
static DEFINE_PER_CPU(atomic64_t, active_asids);
50 51
static DEFINE_PER_CPU(u64, reserved_asids);
static cpumask_t tlb_flush_pending;
L
Linus Torvalds 已提交
52

53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
#ifdef CONFIG_ARM_ERRATA_798181
void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
			     cpumask_t *mask)
{
	int cpu;
	unsigned long flags;
	u64 context_id, asid;

	raw_spin_lock_irqsave(&cpu_asid_lock, flags);
	context_id = mm->context.id.counter;
	for_each_online_cpu(cpu) {
		if (cpu == this_cpu)
			continue;
		/*
		 * We only need to send an IPI if the other CPUs are
		 * running the same ASID as the one being invalidated.
		 */
		asid = per_cpu(active_asids, cpu).counter;
		if (asid == 0)
			asid = per_cpu(reserved_asids, cpu);
		if (context_id == asid)
			cpumask_set_cpu(cpu, mask);
	}
	raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
}
#endif

80
#ifdef CONFIG_ARM_LPAE
81
static void cpu_set_reserved_ttbr0(void)
82 83 84 85 86
{
	/*
	 * Set TTBR0 to swapper_pg_dir which contains only global entries. The
	 * ASID is set to 0.
	 */
87
	cpu_set_ttbr(0, __pa(swapper_pg_dir));
88
	isb();
89 90
}
#else
91
static void cpu_set_reserved_ttbr0(void)
92 93 94 95 96 97 98 99 100
{
	u32 ttb;
	/* Copy TTBR1 into TTBR0 */
	asm volatile(
	"	mrc	p15, 0, %0, c2, c0, 1		@ read TTBR1\n"
	"	mcr	p15, 0, %0, c2, c0, 0		@ set TTBR0\n"
	: "=r" (ttb));
	isb();
}
101 102
#endif

103 104 105 106 107 108 109 110 111 112 113 114 115 116
#ifdef CONFIG_PID_IN_CONTEXTIDR
static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd,
			       void *t)
{
	u32 contextidr;
	pid_t pid;
	struct thread_info *thread = t;

	if (cmd != THREAD_NOTIFY_SWITCH)
		return NOTIFY_DONE;

	pid = task_pid_nr(thread->task) << ASID_BITS;
	asm volatile(
	"	mrc	p15, 0, %0, c13, c0, 1\n"
117 118 119
	"	and	%0, %0, %2\n"
	"	orr	%0, %0, %1\n"
	"	mcr	p15, 0, %0, c13, c0, 1\n"
120
	: "=r" (contextidr), "+r" (pid)
121
	: "I" (~ASID_MASK));
122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137
	isb();

	return NOTIFY_OK;
}

static struct notifier_block contextidr_notifier_block = {
	.notifier_call = contextidr_notifier,
};

static int __init contextidr_notifier_init(void)
{
	return thread_register_notifier(&contextidr_notifier_block);
}
arch_initcall(contextidr_notifier_init);
#endif

138
static void flush_context(unsigned int cpu)
L
Linus Torvalds 已提交
139
{
140
	int i;
141 142 143 144 145 146 147 148 149
	u64 asid;

	/* Update the list of reserved ASIDs and the ASID bitmap. */
	bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
	for_each_possible_cpu(i) {
		if (i == cpu) {
			asid = 0;
		} else {
			asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
150 151 152 153 154 155 156 157 158
			/*
			 * If this CPU has already been through a
			 * rollover, but hasn't run another task in
			 * the meantime, we must preserve its reserved
			 * ASID, as this is the only trace we have of
			 * the process it is still running.
			 */
			if (asid == 0)
				asid = per_cpu(reserved_asids, i);
159
			__set_bit(asid & ~ASID_MASK, asid_map);
160 161 162
		}
		per_cpu(reserved_asids, i) = asid;
	}
163 164

	/* Queue a TLB invalidate and flush the I-cache if necessary. */
165
	cpumask_setall(&tlb_flush_pending);
166 167

	if (icache_is_vivt_asid_tagged())
168 169 170
		__flush_icache_all();
}

171
static int is_reserved_asid(u64 asid)
172 173 174
{
	int cpu;
	for_each_possible_cpu(cpu)
175
		if (per_cpu(reserved_asids, cpu) == asid)
176 177 178
			return 1;
	return 0;
}
179

180
static u64 new_context(struct mm_struct *mm, unsigned int cpu)
181
{
182
	u64 asid = atomic64_read(&mm->context.id);
183
	u64 generation = atomic64_read(&asid_generation);
184

185
	if (asid != 0 && is_reserved_asid(asid)) {
186
		/*
187 188
		 * Our current ASID was active during a rollover, we can
		 * continue to use it and this was just a false alarm.
189
		 */
190
		asid = generation | (asid & ~ASID_MASK);
191 192 193 194
	} else {
		/*
		 * Allocate a free ASID. If we can't find one, take a
		 * note of the currently active ASIDs and mark the TLBs
195 196 197
		 * as requiring flushes. We always count from ASID #1,
		 * as we reserve ASID #0 to switch via TTBR0 and indicate
		 * rollover events.
198
		 */
199
		asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
200 201 202 203
		if (asid == NUM_USER_ASIDS) {
			generation = atomic64_add_return(ASID_FIRST_VERSION,
							 &asid_generation);
			flush_context(cpu);
204
			asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
205 206
		}
		__set_bit(asid, asid_map);
207
		asid |= generation;
208 209 210
		cpumask_clear(mm_cpumask(mm));
	}

211
	return asid;
212 213
}

214
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
215
{
216
	unsigned long flags;
217
	unsigned int cpu = smp_processor_id();
218
	u64 asid;
219

220 221
	if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
		__check_vmalloc_seq(mm);
222 223

	/*
224 225
	 * Required during context switch to avoid speculative page table
	 * walking with the wrong TTBR.
226
	 */
227
	cpu_set_reserved_ttbr0();
L
Linus Torvalds 已提交
228

229 230 231
	asid = atomic64_read(&mm->context.id);
	if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
	    && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
232 233
		goto switch_mm_fastpath;

234 235
	raw_spin_lock_irqsave(&cpu_asid_lock, flags);
	/* Check that our ASID belongs to the current generation. */
236 237 238 239 240
	asid = atomic64_read(&mm->context.id);
	if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
		asid = new_context(mm, cpu);
		atomic64_set(&mm->context.id, asid);
	}
L
Linus Torvalds 已提交
241

242 243
	if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
		local_flush_bp_all();
244
		local_flush_tlb_all();
245
	}
246

247
	atomic64_set(&per_cpu(active_asids, cpu), asid);
248
	cpumask_set_cpu(cpu, mm_cpumask(mm));
249 250
	raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);

251
switch_mm_fastpath:
252
	cpu_switch_mm(mm->pgd, mm);
L
Linus Torvalds 已提交
253
}