smp_tlb.c 4.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 *  linux/arch/arm/kernel/smp_tlb.c
 *
 *  Copyright (C) 2002 ARM Limited, All Rights Reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include <linux/preempt.h>
#include <linux/smp.h>

#include <asm/smp_plat.h>
#include <asm/tlbflush.h>
15
#include <asm/mmu_context.h>
16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67

/**********************************************************************/

/*
 * TLB operations
 */
struct tlb_args {
	struct vm_area_struct *ta_vma;
	unsigned long ta_start;
	unsigned long ta_end;
};

static inline void ipi_flush_tlb_all(void *ignored)
{
	local_flush_tlb_all();
}

static inline void ipi_flush_tlb_mm(void *arg)
{
	struct mm_struct *mm = (struct mm_struct *)arg;

	local_flush_tlb_mm(mm);
}

static inline void ipi_flush_tlb_page(void *arg)
{
	struct tlb_args *ta = (struct tlb_args *)arg;

	local_flush_tlb_page(ta->ta_vma, ta->ta_start);
}

static inline void ipi_flush_tlb_kernel_page(void *arg)
{
	struct tlb_args *ta = (struct tlb_args *)arg;

	local_flush_tlb_kernel_page(ta->ta_start);
}

static inline void ipi_flush_tlb_range(void *arg)
{
	struct tlb_args *ta = (struct tlb_args *)arg;

	local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
}

static inline void ipi_flush_tlb_kernel_range(void *arg)
{
	struct tlb_args *ta = (struct tlb_args *)arg;

	local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
}

68 69 70 71 72
static inline void ipi_flush_bp_all(void *ignored)
{
	local_flush_bp_all();
}

73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
#ifdef CONFIG_ARM_ERRATA_798181
static int erratum_a15_798181(void)
{
	unsigned int midr = read_cpuid_id();

	/* Cortex-A15 r0p0..r3p2 affected */
	if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2)
		return 0;
	return 1;
}
#else
static int erratum_a15_798181(void)
{
	return 0;
}
#endif

static void ipi_flush_tlb_a15_erratum(void *arg)
{
	dmb();
}

static void broadcast_tlb_a15_erratum(void)
{
	if (!erratum_a15_798181())
		return;

	dummy_flush_tlb_a15_erratum();
	smp_call_function_many(cpu_online_mask, ipi_flush_tlb_a15_erratum,
			       NULL, 1);
}

static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
{
	int cpu;
	cpumask_t mask = { CPU_BITS_NONE };

	if (!erratum_a15_798181())
		return;

	dummy_flush_tlb_a15_erratum();
	for_each_online_cpu(cpu) {
		if (cpu == smp_processor_id())
			continue;
		/*
		 * We only need to send an IPI if the other CPUs are running
		 * the same ASID as the one being invalidated. There is no
		 * need for locking around the active_asids check since the
		 * switch_mm() function has at least one dmb() (as required by
		 * this workaround) in case a context switch happens on
		 * another CPU after the condition below.
		 */
		if (atomic64_read(&mm->context.id) ==
		    atomic64_read(&per_cpu(active_asids, cpu)))
			cpumask_set_cpu(cpu, &mask);
	}
	smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1);
}

132 133 134 135 136 137
void flush_tlb_all(void)
{
	if (tlb_ops_need_broadcast())
		on_each_cpu(ipi_flush_tlb_all, NULL, 1);
	else
		local_flush_tlb_all();
138
	broadcast_tlb_a15_erratum();
139 140 141 142 143
}

void flush_tlb_mm(struct mm_struct *mm)
{
	if (tlb_ops_need_broadcast())
144
		on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1);
145 146
	else
		local_flush_tlb_mm(mm);
147
	broadcast_tlb_mm_a15_erratum(mm);
148 149 150 151 152 153 154 155
}

void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
{
	if (tlb_ops_need_broadcast()) {
		struct tlb_args ta;
		ta.ta_vma = vma;
		ta.ta_start = uaddr;
156 157
		on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page,
					&ta, 1);
158 159
	} else
		local_flush_tlb_page(vma, uaddr);
160
	broadcast_tlb_mm_a15_erratum(vma->vm_mm);
161 162 163 164 165 166 167 168 169 170
}

void flush_tlb_kernel_page(unsigned long kaddr)
{
	if (tlb_ops_need_broadcast()) {
		struct tlb_args ta;
		ta.ta_start = kaddr;
		on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
	} else
		local_flush_tlb_kernel_page(kaddr);
171
	broadcast_tlb_a15_erratum();
172 173 174 175 176 177 178 179 180 181
}

void flush_tlb_range(struct vm_area_struct *vma,
                     unsigned long start, unsigned long end)
{
	if (tlb_ops_need_broadcast()) {
		struct tlb_args ta;
		ta.ta_vma = vma;
		ta.ta_start = start;
		ta.ta_end = end;
182 183
		on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range,
					&ta, 1);
184 185
	} else
		local_flush_tlb_range(vma, start, end);
186
	broadcast_tlb_mm_a15_erratum(vma->vm_mm);
187 188 189 190 191 192 193 194 195 196 197
}

void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
	if (tlb_ops_need_broadcast()) {
		struct tlb_args ta;
		ta.ta_start = start;
		ta.ta_end = end;
		on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
	} else
		local_flush_tlb_kernel_range(start, end);
198
	broadcast_tlb_a15_erratum();
199 200
}

201 202 203 204 205 206 207
void flush_bp_all(void)
{
	if (tlb_ops_need_broadcast())
		on_each_cpu(ipi_flush_bp_all, NULL, 1);
	else
		local_flush_bp_all();
}