smp.c 8.1 KB
Newer Older
1 2 3
/*
 *	Intel SMP support routines.
 *
4
 *	(c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
I
Ingo Molnar 已提交
5
 *	(c) 1998-99, 2000, 2009 Ingo Molnar <mingo@redhat.com>
6 7 8 9 10 11 12 13
 *      (c) 2002,2003 Andi Kleen, SuSE Labs.
 *
 *	i386 and x86_64 integration by Glauber Costa <gcosta@redhat.com>
 *
 *	This code is released under the GNU General Public License version 2 or
 *	later.
 */

G
Glauber Costa 已提交
14 15 16 17 18
#include <linux/init.h>

#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
19
#include <linux/export.h>
G
Glauber Costa 已提交
20 21 22 23 24
#include <linux/kernel_stat.h>
#include <linux/mc146818rtc.h>
#include <linux/cache.h>
#include <linux/interrupt.h>
#include <linux/cpu.h>
25
#include <linux/gfp.h>
G
Glauber Costa 已提交
26 27 28 29 30

#include <asm/mtrr.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#include <asm/proto.h>
I
Ingo Molnar 已提交
31
#include <asm/apic.h>
32
#include <asm/nmi.h>
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110
/*
 *	Some notes on x86 processor bugs affecting SMP operation:
 *
 *	Pentium, Pentium Pro, II, III (and all CPUs) have bugs.
 *	The Linux implications for SMP are handled as follows:
 *
 *	Pentium III / [Xeon]
 *		None of the E1AP-E3AP errata are visible to the user.
 *
 *	E1AP.	see PII A1AP
 *	E2AP.	see PII A2AP
 *	E3AP.	see PII A3AP
 *
 *	Pentium II / [Xeon]
 *		None of the A1AP-A3AP errata are visible to the user.
 *
 *	A1AP.	see PPro 1AP
 *	A2AP.	see PPro 2AP
 *	A3AP.	see PPro 7AP
 *
 *	Pentium Pro
 *		None of 1AP-9AP errata are visible to the normal user,
 *	except occasional delivery of 'spurious interrupt' as trap #15.
 *	This is very rare and a non-problem.
 *
 *	1AP.	Linux maps APIC as non-cacheable
 *	2AP.	worked around in hardware
 *	3AP.	fixed in C0 and above steppings microcode update.
 *		Linux does not use excessive STARTUP_IPIs.
 *	4AP.	worked around in hardware
 *	5AP.	symmetric IO mode (normal Linux operation) not affected.
 *		'noapic' mode has vector 0xf filled out properly.
 *	6AP.	'noapic' mode might be affected - fixed in later steppings
 *	7AP.	We do not assume writes to the LVT deassering IRQs
 *	8AP.	We do not enable low power mode (deep sleep) during MP bootup
 *	9AP.	We do not use mixed mode
 *
 *	Pentium
 *		There is a marginal case where REP MOVS on 100MHz SMP
 *	machines with B stepping processors can fail. XXX should provide
 *	an L1cache=Writethrough or L1cache=off option.
 *
 *		B stepping CPUs may hang. There are hardware work arounds
 *	for this. We warn about it in case your board doesn't have the work
 *	arounds. Basically that's so I can tell anyone with a B stepping
 *	CPU and SMP problems "tough".
 *
 *	Specific items [From Pentium Processor Specification Update]
 *
 *	1AP.	Linux doesn't use remote read
 *	2AP.	Linux doesn't trust APIC errors
 *	3AP.	We work around this
 *	4AP.	Linux never generated 3 interrupts of the same priority
 *		to cause a lost local interrupt.
 *	5AP.	Remote read is never used
 *	6AP.	not affected - worked around in hardware
 *	7AP.	not affected - worked around in hardware
 *	8AP.	worked around in hardware - we get explicit CS errors if not
 *	9AP.	only 'noapic' mode affected. Might generate spurious
 *		interrupts, we log only the first one and count the
 *		rest silently.
 *	10AP.	not affected - worked around in hardware
 *	11AP.	Linux reads the APIC between writes to avoid this, as per
 *		the documentation. Make sure you preserve this as it affects
 *		the C stepping chips too.
 *	12AP.	not affected - worked around in hardware
 *	13AP.	not affected - worked around in hardware
 *	14AP.	we always deassert INIT during bootup
 *	15AP.	not affected - worked around in hardware
 *	16AP.	not affected - worked around in hardware
 *	17AP.	not affected - worked around in hardware
 *	18AP.	not affected - worked around in hardware
 *	19AP.	not affected - worked around in BIOS
 *
 *	If this sounds worrying believe me these bugs are either ___RARE___,
 *	or are signal timing bugs worked around in hardware and there's
 *	about nothing of note with C stepping upwards.
 */
G
Glauber Costa 已提交
111 112 113 114 115 116 117 118

/*
 * this function sends a 'reschedule' IPI to another CPU.
 * it goes straight through and wastes no time serializing
 * anything. Worst case is that we lose a reschedule ...
 */
static void native_smp_send_reschedule(int cpu)
{
119 120 121 122
	if (unlikely(cpu_is_offline(cpu))) {
		WARN_ON(1);
		return;
	}
123
	apic->send_IPI_mask(cpumask_of(cpu), RESCHEDULE_VECTOR);
G
Glauber Costa 已提交
124 125
}

126
void native_send_call_func_single_ipi(int cpu)
G
Glauber Costa 已提交
127
{
128
	apic->send_IPI_mask(cpumask_of(cpu), CALL_FUNCTION_SINGLE_VECTOR);
G
Glauber Costa 已提交
129 130
}

131
void native_send_call_func_ipi(const struct cpumask *mask)
G
Glauber Costa 已提交
132
{
133
	cpumask_var_t allbutself;
G
Glauber Costa 已提交
134

135
	if (!alloc_cpumask_var(&allbutself, GFP_ATOMIC)) {
136
		apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
137 138
		return;
	}
G
Glauber Costa 已提交
139

140 141 142 143 144
	cpumask_copy(allbutself, cpu_online_mask);
	cpumask_clear_cpu(smp_processor_id(), allbutself);

	if (cpumask_equal(mask, allbutself) &&
	    cpumask_equal(cpu_online_mask, cpu_callout_mask))
145
		apic->send_IPI_allbutself(CALL_FUNCTION_VECTOR);
G
Glauber Costa 已提交
146
	else
147
		apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
148 149

	free_cpumask_var(allbutself);
G
Glauber Costa 已提交
150 151
}

152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
static atomic_t stopping_cpu = ATOMIC_INIT(-1);

static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)
{
	/* We are registered on stopping cpu too, avoid spurious NMI */
	if (raw_smp_processor_id() == atomic_read(&stopping_cpu))
		return NMI_HANDLED;

	stop_this_cpu(NULL);

	return NMI_HANDLED;
}

static void native_nmi_stop_other_cpus(int wait)
{
	unsigned long flags;
	unsigned long timeout;

	if (reboot_force)
		return;

	/*
	 * Use an own vector here because smp_call_function
	 * does lots of things not suitable in a panic situation.
	 */
	if (num_online_cpus() > 1) {
		/* did someone beat us here? */
		if (atomic_cmpxchg(&stopping_cpu, -1, safe_smp_processor_id() != -1))
			return;

		if (register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback,
					 NMI_FLAG_FIRST, "smp_stop"))
			/* Note: we ignore failures here */
			return;

		/* sync above data before sending NMI */
		wmb();

		apic->send_IPI_allbutself(NMI_VECTOR);

		/*
		 * Don't wait longer than a second if the caller
		 * didn't ask us to wait.
		 */
		timeout = USEC_PER_SEC;
		while (num_online_cpus() > 1 && (wait || timeout--))
			udelay(1);
	}

	local_irq_save(flags);
	disable_local_APIC();
	local_irq_restore(flags);
}

G
Glauber Costa 已提交
206 207 208 209
/*
 * this function calls the 'stop' function on all other CPUs in the system.
 */

210 211 212 213 214 215 216 217
asmlinkage void smp_reboot_interrupt(void)
{
	ack_APIC_irq();
	irq_enter();
	stop_this_cpu(NULL);
	irq_exit();
}

218
static void native_irq_stop_other_cpus(int wait)
G
Glauber Costa 已提交
219 220
{
	unsigned long flags;
221
	unsigned long timeout;
G
Glauber Costa 已提交
222 223 224 225

	if (reboot_force)
		return;

226 227 228 229 230 231 232 233 234 235 236 237
	/*
	 * Use an own vector here because smp_call_function
	 * does lots of things not suitable in a panic situation.
	 * On most systems we could also use an NMI here,
	 * but there are a few systems around where NMI
	 * is problematic so stay with an non NMI for now
	 * (this implies we cannot stop CPUs spinning with irq off
	 * currently)
	 */
	if (num_online_cpus() > 1) {
		apic->send_IPI_allbutself(REBOOT_VECTOR);

238 239 240 241 242 243
		/*
		 * Don't wait longer than a second if the caller
		 * didn't ask us to wait.
		 */
		timeout = USEC_PER_SEC;
		while (num_online_cpus() > 1 && (wait || timeout--))
244 245 246
			udelay(1);
	}

G
Glauber Costa 已提交
247 248 249 250 251 252
	local_irq_save(flags);
	disable_local_APIC();
	local_irq_restore(flags);
}

/*
253
 * Reschedule call back.
G
Glauber Costa 已提交
254 255 256 257
 */
void smp_reschedule_interrupt(struct pt_regs *regs)
{
	ack_APIC_irq();
258
	inc_irq_stat(irq_resched_count);
259
	scheduler_ipi();
260 261 262
	/*
	 * KVM uses this interrupt to force a cpu out of guest mode
	 */
G
Glauber Costa 已提交
263 264 265 266 267 268
}

void smp_call_function_interrupt(struct pt_regs *regs)
{
	ack_APIC_irq();
	irq_enter();
269
	generic_smp_call_function_interrupt();
270
	inc_irq_stat(irq_call_count);
G
Glauber Costa 已提交
271
	irq_exit();
272
}
G
Glauber Costa 已提交
273

J
Jens Axboe 已提交
274
void smp_call_function_single_interrupt(struct pt_regs *regs)
275 276 277 278
{
	ack_APIC_irq();
	irq_enter();
	generic_smp_call_function_single_interrupt();
279
	inc_irq_stat(irq_call_count);
280
	irq_exit();
G
Glauber Costa 已提交
281 282 283
}

struct smp_ops smp_ops = {
284 285 286
	.smp_prepare_boot_cpu	= native_smp_prepare_boot_cpu,
	.smp_prepare_cpus	= native_smp_prepare_cpus,
	.smp_cpus_done		= native_smp_cpus_done,
G
Glauber Costa 已提交
287

288
	.stop_other_cpus	= native_nmi_stop_other_cpus,
289
	.smp_send_reschedule	= native_smp_send_reschedule,
290

291 292 293 294
	.cpu_up			= native_cpu_up,
	.cpu_die		= native_cpu_die,
	.cpu_disable		= native_cpu_disable,
	.play_dead		= native_play_dead,
295

296
	.send_call_func_ipi	= native_send_call_func_ipi,
297
	.send_call_func_single_ipi = native_send_call_func_single_ipi,
G
Glauber Costa 已提交
298 299
};
EXPORT_SYMBOL_GPL(smp_ops);