context.c 6.7 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
R
Russell King 已提交
2
 *  linux/arch/arm/mm/context.c
L
Linus Torvalds 已提交
3 4
 *
 *  Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
5 6 7
 *  Copyright (C) 2012 ARM Limited
 *
 *  Author: Will Deacon <will.deacon@arm.com>
L
Linus Torvalds 已提交
8 9 10 11 12 13 14 15
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/mm.h>
16 17
#include <linux/smp.h>
#include <linux/percpu.h>
L
Linus Torvalds 已提交
18 19

#include <asm/mmu_context.h>
20
#include <asm/smp_plat.h>
21
#include <asm/thread_notify.h>
L
Linus Torvalds 已提交
22
#include <asm/tlbflush.h>
23
#include <asm/proc-fns.h>
L
Linus Torvalds 已提交
24

25 26 27 28 29 30 31 32 33 34 35 36 37
/*
 * On ARMv6, we have the following structure in the Context ID:
 *
 * 31                         7          0
 * +-------------------------+-----------+
 * |      process ID         |   ASID    |
 * +-------------------------+-----------+
 * |              context ID             |
 * +-------------------------------------+
 *
 * The ASID is used to tag entries in the CPU caches and TLBs.
 * The context ID is used by debuggers and trace logic, and
 * should be unique within all running processes.
38 39 40
 *
 * In big endian operation, the two 32 bit words are swapped if accesed by
 * non 64-bit operations.
41 42
 */
#define ASID_FIRST_VERSION	(1ULL << ASID_BITS)
43
#define NUM_USER_ASIDS		ASID_FIRST_VERSION
44

45
static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
46 47
static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
48

49
static DEFINE_PER_CPU(atomic64_t, active_asids);
50 51
static DEFINE_PER_CPU(u64, reserved_asids);
static cpumask_t tlb_flush_pending;
L
Linus Torvalds 已提交
52

53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
#ifdef CONFIG_ARM_ERRATA_798181
void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
			     cpumask_t *mask)
{
	int cpu;
	unsigned long flags;
	u64 context_id, asid;

	raw_spin_lock_irqsave(&cpu_asid_lock, flags);
	context_id = mm->context.id.counter;
	for_each_online_cpu(cpu) {
		if (cpu == this_cpu)
			continue;
		/*
		 * We only need to send an IPI if the other CPUs are
		 * running the same ASID as the one being invalidated.
		 */
		asid = per_cpu(active_asids, cpu).counter;
		if (asid == 0)
			asid = per_cpu(reserved_asids, cpu);
		if (context_id == asid)
			cpumask_set_cpu(cpu, mask);
	}
	raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
}
#endif

80
#ifdef CONFIG_ARM_LPAE
81 82 83 84 85 86
/*
 * With LPAE, the ASID and page tables are updated atomicly, so there is
 * no need for a reserved set of tables (the active ASID tracking prevents
 * any issues across a rollover).
 */
#define cpu_set_reserved_ttbr0()
87
#else
88
static void cpu_set_reserved_ttbr0(void)
89 90
{
	u32 ttb;
91 92 93 94 95
	/*
	 * Copy TTBR1 into TTBR0.
	 * This points at swapper_pg_dir, which contains only global
	 * entries so any speculative walks are perfectly safe.
	 */
96 97 98 99 100 101
	asm volatile(
	"	mrc	p15, 0, %0, c2, c0, 1		@ read TTBR1\n"
	"	mcr	p15, 0, %0, c2, c0, 0		@ set TTBR0\n"
	: "=r" (ttb));
	isb();
}
102 103
#endif

104 105 106 107 108 109 110 111 112 113 114 115 116 117
#ifdef CONFIG_PID_IN_CONTEXTIDR
static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd,
			       void *t)
{
	u32 contextidr;
	pid_t pid;
	struct thread_info *thread = t;

	if (cmd != THREAD_NOTIFY_SWITCH)
		return NOTIFY_DONE;

	pid = task_pid_nr(thread->task) << ASID_BITS;
	asm volatile(
	"	mrc	p15, 0, %0, c13, c0, 1\n"
118 119 120
	"	and	%0, %0, %2\n"
	"	orr	%0, %0, %1\n"
	"	mcr	p15, 0, %0, c13, c0, 1\n"
121
	: "=r" (contextidr), "+r" (pid)
122
	: "I" (~ASID_MASK));
123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
	isb();

	return NOTIFY_OK;
}

static struct notifier_block contextidr_notifier_block = {
	.notifier_call = contextidr_notifier,
};

static int __init contextidr_notifier_init(void)
{
	return thread_register_notifier(&contextidr_notifier_block);
}
arch_initcall(contextidr_notifier_init);
#endif

139
static void flush_context(unsigned int cpu)
L
Linus Torvalds 已提交
140
{
141
	int i;
142 143 144 145 146 147 148 149 150
	u64 asid;

	/* Update the list of reserved ASIDs and the ASID bitmap. */
	bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
	for_each_possible_cpu(i) {
		if (i == cpu) {
			asid = 0;
		} else {
			asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
151 152 153 154 155 156 157 158 159
			/*
			 * If this CPU has already been through a
			 * rollover, but hasn't run another task in
			 * the meantime, we must preserve its reserved
			 * ASID, as this is the only trace we have of
			 * the process it is still running.
			 */
			if (asid == 0)
				asid = per_cpu(reserved_asids, i);
160
			__set_bit(asid & ~ASID_MASK, asid_map);
161 162 163
		}
		per_cpu(reserved_asids, i) = asid;
	}
164 165

	/* Queue a TLB invalidate and flush the I-cache if necessary. */
166
	cpumask_setall(&tlb_flush_pending);
167 168

	if (icache_is_vivt_asid_tagged())
169 170 171
		__flush_icache_all();
}

172
static int is_reserved_asid(u64 asid)
173 174 175
{
	int cpu;
	for_each_possible_cpu(cpu)
176
		if (per_cpu(reserved_asids, cpu) == asid)
177 178 179
			return 1;
	return 0;
}
180

181
static u64 new_context(struct mm_struct *mm, unsigned int cpu)
182
{
183
	u64 asid = atomic64_read(&mm->context.id);
184
	u64 generation = atomic64_read(&asid_generation);
185

186
	if (asid != 0 && is_reserved_asid(asid)) {
187
		/*
188 189
		 * Our current ASID was active during a rollover, we can
		 * continue to use it and this was just a false alarm.
190
		 */
191
		asid = generation | (asid & ~ASID_MASK);
192 193 194 195
	} else {
		/*
		 * Allocate a free ASID. If we can't find one, take a
		 * note of the currently active ASIDs and mark the TLBs
196 197 198
		 * as requiring flushes. We always count from ASID #1,
		 * as we reserve ASID #0 to switch via TTBR0 and indicate
		 * rollover events.
199
		 */
200
		asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
201 202 203 204
		if (asid == NUM_USER_ASIDS) {
			generation = atomic64_add_return(ASID_FIRST_VERSION,
							 &asid_generation);
			flush_context(cpu);
205
			asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
206 207
		}
		__set_bit(asid, asid_map);
208
		asid |= generation;
209 210 211
		cpumask_clear(mm_cpumask(mm));
	}

212
	return asid;
213 214
}

215
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
216
{
217
	unsigned long flags;
218
	unsigned int cpu = smp_processor_id();
219
	u64 asid;
220

221 222
	if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
		__check_vmalloc_seq(mm);
223 224

	/*
225 226
	 * Required during context switch to avoid speculative page table
	 * walking with the wrong TTBR.
227
	 */
228
	cpu_set_reserved_ttbr0();
L
Linus Torvalds 已提交
229

230 231 232
	asid = atomic64_read(&mm->context.id);
	if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
	    && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
233 234
		goto switch_mm_fastpath;

235 236
	raw_spin_lock_irqsave(&cpu_asid_lock, flags);
	/* Check that our ASID belongs to the current generation. */
237 238 239 240 241
	asid = atomic64_read(&mm->context.id);
	if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
		asid = new_context(mm, cpu);
		atomic64_set(&mm->context.id, asid);
	}
L
Linus Torvalds 已提交
242

243 244
	if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
		local_flush_bp_all();
245
		local_flush_tlb_all();
246
	}
247

248
	atomic64_set(&per_cpu(active_asids, cpu), asid);
249
	cpumask_set_cpu(cpu, mm_cpumask(mm));
250 251
	raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);

252
switch_mm_fastpath:
253
	cpu_switch_mm(mm->pgd, mm);
L
Linus Torvalds 已提交
254
}