context.c 6.7 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
R
Russell King 已提交
2
 *  linux/arch/arm/mm/context.c
L
Linus Torvalds 已提交
3 4
 *
 *  Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
5 6 7
 *  Copyright (C) 2012 ARM Limited
 *
 *  Author: Will Deacon <will.deacon@arm.com>
L
Linus Torvalds 已提交
8 9 10 11 12 13 14 15
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/mm.h>
16 17
#include <linux/smp.h>
#include <linux/percpu.h>
L
Linus Torvalds 已提交
18 19

#include <asm/mmu_context.h>
20
#include <asm/smp_plat.h>
21
#include <asm/thread_notify.h>
L
Linus Torvalds 已提交
22
#include <asm/tlbflush.h>
23
#include <asm/proc-fns.h>
L
Linus Torvalds 已提交
24

25 26 27 28 29 30 31 32 33 34 35 36 37
/*
 * On ARMv6, we have the following structure in the Context ID:
 *
 * 31                         7          0
 * +-------------------------+-----------+
 * |      process ID         |   ASID    |
 * +-------------------------+-----------+
 * |              context ID             |
 * +-------------------------------------+
 *
 * The ASID is used to tag entries in the CPU caches and TLBs.
 * The context ID is used by debuggers and trace logic, and
 * should be unique within all running processes.
38 39 40
 *
 * In big endian operation, the two 32 bit words are swapped if accesed by
 * non 64-bit operations.
41 42
 */
#define ASID_FIRST_VERSION	(1ULL << ASID_BITS)
43
#define NUM_USER_ASIDS		ASID_FIRST_VERSION
44

45
static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
46 47
static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
48

49
static DEFINE_PER_CPU(atomic64_t, active_asids);
50 51
static DEFINE_PER_CPU(u64, reserved_asids);
static cpumask_t tlb_flush_pending;
L
Linus Torvalds 已提交
52

53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
#ifdef CONFIG_ARM_ERRATA_798181
void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
			     cpumask_t *mask)
{
	int cpu;
	unsigned long flags;
	u64 context_id, asid;

	raw_spin_lock_irqsave(&cpu_asid_lock, flags);
	context_id = mm->context.id.counter;
	for_each_online_cpu(cpu) {
		if (cpu == this_cpu)
			continue;
		/*
		 * We only need to send an IPI if the other CPUs are
		 * running the same ASID as the one being invalidated.
		 */
		asid = per_cpu(active_asids, cpu).counter;
		if (asid == 0)
			asid = per_cpu(reserved_asids, cpu);
		if (context_id == asid)
			cpumask_set_cpu(cpu, mask);
	}
	raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
}
#endif

80
#ifdef CONFIG_ARM_LPAE
81
static void cpu_set_reserved_ttbr0(void)
82 83 84 85 86
{
	/*
	 * Set TTBR0 to swapper_pg_dir which contains only global entries. The
	 * ASID is set to 0.
	 */
87
	cpu_set_ttbr(0, __pa(swapper_pg_dir));
88
	isb();
89 90
}
#else
91
static void cpu_set_reserved_ttbr0(void)
92 93 94 95 96 97 98 99 100
{
	u32 ttb;
	/* Copy TTBR1 into TTBR0 */
	asm volatile(
	"	mrc	p15, 0, %0, c2, c0, 1		@ read TTBR1\n"
	"	mcr	p15, 0, %0, c2, c0, 0		@ set TTBR0\n"
	: "=r" (ttb));
	isb();
}
101 102
#endif

103 104 105 106 107 108 109 110 111 112 113 114 115 116
#ifdef CONFIG_PID_IN_CONTEXTIDR
static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd,
			       void *t)
{
	u32 contextidr;
	pid_t pid;
	struct thread_info *thread = t;

	if (cmd != THREAD_NOTIFY_SWITCH)
		return NOTIFY_DONE;

	pid = task_pid_nr(thread->task) << ASID_BITS;
	asm volatile(
	"	mrc	p15, 0, %0, c13, c0, 1\n"
117 118 119
	"	and	%0, %0, %2\n"
	"	orr	%0, %0, %1\n"
	"	mcr	p15, 0, %0, c13, c0, 1\n"
120
	: "=r" (contextidr), "+r" (pid)
121
	: "I" (~ASID_MASK));
122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137
	isb();

	return NOTIFY_OK;
}

static struct notifier_block contextidr_notifier_block = {
	.notifier_call = contextidr_notifier,
};

static int __init contextidr_notifier_init(void)
{
	return thread_register_notifier(&contextidr_notifier_block);
}
arch_initcall(contextidr_notifier_init);
#endif

138
static void flush_context(unsigned int cpu)
L
Linus Torvalds 已提交
139
{
140
	int i;
141 142 143 144 145 146 147 148 149
	u64 asid;

	/* Update the list of reserved ASIDs and the ASID bitmap. */
	bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
	for_each_possible_cpu(i) {
		if (i == cpu) {
			asid = 0;
		} else {
			asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
150 151 152 153 154 155 156 157 158
			/*
			 * If this CPU has already been through a
			 * rollover, but hasn't run another task in
			 * the meantime, we must preserve its reserved
			 * ASID, as this is the only trace we have of
			 * the process it is still running.
			 */
			if (asid == 0)
				asid = per_cpu(reserved_asids, i);
159
			__set_bit(asid & ~ASID_MASK, asid_map);
160 161 162
		}
		per_cpu(reserved_asids, i) = asid;
	}
163 164 165 166 167 168 169 170

	/* Queue a TLB invalidate and flush the I-cache if necessary. */
	if (!tlb_ops_need_broadcast())
		cpumask_set_cpu(cpu, &tlb_flush_pending);
	else
		cpumask_setall(&tlb_flush_pending);

	if (icache_is_vivt_asid_tagged())
171 172 173
		__flush_icache_all();
}

174
static int is_reserved_asid(u64 asid)
175 176 177
{
	int cpu;
	for_each_possible_cpu(cpu)
178
		if (per_cpu(reserved_asids, cpu) == asid)
179 180 181
			return 1;
	return 0;
}
182

183
static u64 new_context(struct mm_struct *mm, unsigned int cpu)
184
{
185
	u64 asid = atomic64_read(&mm->context.id);
186
	u64 generation = atomic64_read(&asid_generation);
187

188
	if (asid != 0 && is_reserved_asid(asid)) {
189
		/*
190 191
		 * Our current ASID was active during a rollover, we can
		 * continue to use it and this was just a false alarm.
192
		 */
193
		asid = generation | (asid & ~ASID_MASK);
194 195 196 197
	} else {
		/*
		 * Allocate a free ASID. If we can't find one, take a
		 * note of the currently active ASIDs and mark the TLBs
198 199 200
		 * as requiring flushes. We always count from ASID #1,
		 * as we reserve ASID #0 to switch via TTBR0 and indicate
		 * rollover events.
201
		 */
202
		asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
203 204 205 206
		if (asid == NUM_USER_ASIDS) {
			generation = atomic64_add_return(ASID_FIRST_VERSION,
							 &asid_generation);
			flush_context(cpu);
207
			asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
208 209
		}
		__set_bit(asid, asid_map);
210
		asid |= generation;
211 212 213
		cpumask_clear(mm_cpumask(mm));
	}

214
	return asid;
215 216
}

217
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
218
{
219
	unsigned long flags;
220
	unsigned int cpu = smp_processor_id();
221
	u64 asid;
222

223 224
	if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
		__check_vmalloc_seq(mm);
225 226

	/*
227 228
	 * Required during context switch to avoid speculative page table
	 * walking with the wrong TTBR.
229
	 */
230
	cpu_set_reserved_ttbr0();
L
Linus Torvalds 已提交
231

232 233 234
	asid = atomic64_read(&mm->context.id);
	if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
	    && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
235 236
		goto switch_mm_fastpath;

237 238
	raw_spin_lock_irqsave(&cpu_asid_lock, flags);
	/* Check that our ASID belongs to the current generation. */
239 240 241 242 243
	asid = atomic64_read(&mm->context.id);
	if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
		asid = new_context(mm, cpu);
		atomic64_set(&mm->context.id, asid);
	}
L
Linus Torvalds 已提交
244

245 246
	if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
		local_flush_bp_all();
247
		local_flush_tlb_all();
248
		dummy_flush_tlb_a15_erratum();
249
	}
250

251
	atomic64_set(&per_cpu(active_asids, cpu), asid);
252
	cpumask_set_cpu(cpu, mm_cpumask(mm));
253 254
	raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);

255
switch_mm_fastpath:
256
	cpu_switch_mm(mm->pgd, mm);
L
Linus Torvalds 已提交
257
}