smp.c 12.4 KB
Newer Older
1 2 3 4 5 6
/*
 * Generic helpers for smp ipi calls
 *
 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
 */
#include <linux/rcupdate.h>
7
#include <linux/rculist.h>
8
#include <linux/kernel.h>
I
Ingo Molnar 已提交
9 10 11
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/init.h>
12
#include <linux/smp.h>
P
Peter Zijlstra 已提交
13
#include <linux/cpu.h>
14 15

static DEFINE_PER_CPU(struct call_single_queue, call_single_queue);
P
Peter Zijlstra 已提交
16 17 18 19

static struct {
	struct list_head	queue;
	spinlock_t		lock;
I
Ingo Molnar 已提交
20 21 22 23 24
} call_function __cacheline_aligned_in_smp =
	{
		.queue		= LIST_HEAD_INIT(call_function.queue),
		.lock		= __SPIN_LOCK_UNLOCKED(call_function.lock),
	};
25 26

enum {
27
	CSD_FLAG_LOCK		= 0x01,
28 29 30
};

struct call_function_data {
I
Ingo Molnar 已提交
31
	struct call_single_data	csd;
32
	atomic_t		refs;
I
Ingo Molnar 已提交
33
	cpumask_var_t		cpumask;
34 35 36
};

struct call_single_queue {
I
Ingo Molnar 已提交
37 38
	struct list_head	list;
	spinlock_t		lock;
39 40
};

41
static DEFINE_PER_CPU(struct call_function_data, cfd_data);
P
Peter Zijlstra 已提交
42 43 44 45 46 47 48 49 50 51

static int
hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
{
	long cpu = (long)hcpu;
	struct call_function_data *cfd = &per_cpu(cfd_data, cpu);

	switch (action) {
	case CPU_UP_PREPARE:
	case CPU_UP_PREPARE_FROZEN:
52
		if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
P
Peter Zijlstra 已提交
53 54 55 56
				cpu_to_node(cpu)))
			return NOTIFY_BAD;
		break;

X
Xiao Guangrong 已提交
57
#ifdef CONFIG_HOTPLUG_CPU
P
Peter Zijlstra 已提交
58 59 60 61 62 63 64 65 66 67 68 69 70 71
	case CPU_UP_CANCELED:
	case CPU_UP_CANCELED_FROZEN:

	case CPU_DEAD:
	case CPU_DEAD_FROZEN:
		free_cpumask_var(cfd->cpumask);
		break;
#endif
	};

	return NOTIFY_OK;
}

static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
I
Ingo Molnar 已提交
72
	.notifier_call		= hotplug_cfd,
P
Peter Zijlstra 已提交
73 74
};

75
static int __cpuinit init_call_single_data(void)
76
{
P
Peter Zijlstra 已提交
77
	void *cpu = (void *)(long)smp_processor_id();
78 79 80 81 82 83 84 85
	int i;

	for_each_possible_cpu(i) {
		struct call_single_queue *q = &per_cpu(call_single_queue, i);

		spin_lock_init(&q->lock);
		INIT_LIST_HEAD(&q->list);
	}
P
Peter Zijlstra 已提交
86 87 88 89

	hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
	register_cpu_notifier(&hotplug_cfd_notifier);

90
	return 0;
91
}
92
early_initcall(init_call_single_data);
93

P
Peter Zijlstra 已提交
94 95 96
/*
 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
 *
I
Ingo Molnar 已提交
97 98 99
 * For non-synchronous ipi calls the csd can still be in use by the
 * previous function call. For multi-cpu calls its even more interesting
 * as we'll have to ensure no other cpu is observing our csd.
P
Peter Zijlstra 已提交
100
 */
101
static void csd_lock_wait(struct call_single_data *data)
P
Peter Zijlstra 已提交
102 103 104
{
	while (data->flags & CSD_FLAG_LOCK)
		cpu_relax();
105 106 107 108 109
}

static void csd_lock(struct call_single_data *data)
{
	csd_lock_wait(data);
P
Peter Zijlstra 已提交
110 111 112
	data->flags = CSD_FLAG_LOCK;

	/*
I
Ingo Molnar 已提交
113 114 115
	 * prevent CPU from reordering the above assignment
	 * to ->flags with any subsequent assignments to other
	 * fields of the specified call_single_data structure:
P
Peter Zijlstra 已提交
116 117 118 119 120 121 122
	 */
	smp_mb();
}

static void csd_unlock(struct call_single_data *data)
{
	WARN_ON(!(data->flags & CSD_FLAG_LOCK));
I
Ingo Molnar 已提交
123

P
Peter Zijlstra 已提交
124
	/*
I
Ingo Molnar 已提交
125
	 * ensure we're all done before releasing data:
P
Peter Zijlstra 已提交
126 127
	 */
	smp_mb();
I
Ingo Molnar 已提交
128

P
Peter Zijlstra 已提交
129
	data->flags &= ~CSD_FLAG_LOCK;
130 131 132
}

/*
I
Ingo Molnar 已提交
133 134 135
 * Insert a previously allocated call_single_data element
 * for execution on the given CPU. data must already have
 * ->func, ->info, and ->flags set.
136
 */
137 138
static
void generic_exec_single(int cpu, struct call_single_data *data, int wait)
139 140 141
{
	struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
	unsigned long flags;
142
	int ipi;
143 144 145 146 147 148

	spin_lock_irqsave(&dst->lock, flags);
	ipi = list_empty(&dst->list);
	list_add_tail(&data->list, &dst->list);
	spin_unlock_irqrestore(&dst->lock, flags);

149
	/*
150 151 152 153 154 155 156
	 * The list addition should be visible before sending the IPI
	 * handler locks the list to pull the entry off it because of
	 * normal cache coherency rules implied by spinlocks.
	 *
	 * If IPIs can go out of order to the cache coherency protocol
	 * in an architecture, sufficient synchronisation should be added
	 * to arch code to make it appear to obey cache coherency WRT
I
Ingo Molnar 已提交
157 158
	 * locking and barrier primitives. Generic code isn't really
	 * equipped to do the right thing...
159
	 */
160 161 162 163
	if (ipi)
		arch_send_call_function_single_ipi(cpu);

	if (wait)
164
		csd_lock_wait(data);
165 166 167 168 169 170 171 172 173 174 175
}

/*
 * Invoked by arch to handle an IPI for call function. Must be called with
 * interrupts disabled.
 */
void generic_smp_call_function_interrupt(void)
{
	struct call_function_data *data;
	int cpu = get_cpu();

176 177 178 179 180
	/*
	 * Shouldn't receive this interrupt on a cpu that is not yet online.
	 */
	WARN_ON_ONCE(!cpu_online(cpu));

181 182 183 184 185 186 187 188
	/*
	 * Ensure entry is visible on call_function_queue after we have
	 * entered the IPI. See comment in smp_call_function_many.
	 * If we don't have this, then we may miss an entry on the list
	 * and never get another IPI to process it.
	 */
	smp_mb();

189
	/*
I
Ingo Molnar 已提交
190 191
	 * It's ok to use list_for_each_rcu() here even though we may
	 * delete 'pos', since list_del_rcu() doesn't clear ->next
192
	 */
P
Peter Zijlstra 已提交
193
	list_for_each_entry_rcu(data, &call_function.queue, csd.list) {
194 195
		int refs;

196
		if (!cpumask_test_and_clear_cpu(cpu, data->cpumask))
197 198 199 200
			continue;

		data->csd.func(data->csd.info);

201 202
		refs = atomic_dec_return(&data->refs);
		WARN_ON(refs < 0);
P
Peter Zijlstra 已提交
203 204 205 206 207
		if (!refs) {
			spin_lock(&call_function.lock);
			list_del_rcu(&data->csd.list);
			spin_unlock(&call_function.lock);
		}
208 209 210 211

		if (refs)
			continue;

P
Peter Zijlstra 已提交
212
		csd_unlock(&data->csd);
213 214 215 216 217 218
	}

	put_cpu();
}

/*
I
Ingo Molnar 已提交
219 220
 * Invoked by arch to handle an IPI for call function single. Must be
 * called from the arch with interrupts disabled.
221 222 223 224
 */
void generic_smp_call_function_single_interrupt(void)
{
	struct call_single_queue *q = &__get_cpu_var(call_single_queue);
225
	unsigned int data_flags;
I
Ingo Molnar 已提交
226
	LIST_HEAD(list);
227

228 229 230 231 232
	/*
	 * Shouldn't receive this interrupt on a cpu that is not yet online.
	 */
	WARN_ON_ONCE(!cpu_online(smp_processor_id()));

233 234 235
	spin_lock(&q->lock);
	list_replace_init(&q->list, &list);
	spin_unlock(&q->lock);
236

237 238
	while (!list_empty(&list)) {
		struct call_single_data *data;
239

I
Ingo Molnar 已提交
240
		data = list_entry(list.next, struct call_single_data, list);
241
		list_del(&data->list);
242 243

		/*
I
Ingo Molnar 已提交
244 245 246
		 * 'data' can be invalid after this call if flags == 0
		 * (when called through generic_exec_single()),
		 * so save them away before making the call:
247
		 */
248 249 250 251
		data_flags = data->flags;

		data->func(data->info);

P
Peter Zijlstra 已提交
252
		/*
I
Ingo Molnar 已提交
253
		 * Unlocked CSDs are valid through generic_exec_single():
P
Peter Zijlstra 已提交
254 255 256
		 */
		if (data_flags & CSD_FLAG_LOCK)
			csd_unlock(data);
257 258 259
	}
}

260 261
static DEFINE_PER_CPU(struct call_single_data, csd_data);

262 263 264 265 266 267 268 269 270 271 272
/*
 * smp_call_function_single - Run a function on a specific CPU
 * @func: The function to run. This must be fast and non-blocking.
 * @info: An arbitrary pointer to pass to the function.
 * @wait: If true, wait until function has completed on other CPUs.
 *
 * Returns 0 on success, else a negative status code. Note that @wait
 * will be implicitly turned on in case of allocation failures, since
 * we fall back to on-stack allocation.
 */
int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
273
			     int wait)
274
{
P
Peter Zijlstra 已提交
275 276 277
	struct call_single_data d = {
		.flags = 0,
	};
278
	unsigned long flags;
I
Ingo Molnar 已提交
279
	int this_cpu;
280
	int err = 0;
281

I
Ingo Molnar 已提交
282 283 284 285 286 287
	/*
	 * prevent preemption and reschedule on another processor,
	 * as well as CPU removal
	 */
	this_cpu = get_cpu();

288 289 290 291 292 293 294 295
	/*
	 * Can deadlock when called with interrupts disabled.
	 * We allow cpu's that are not yet online though, as no one else can
	 * send smp call function interrupt to this cpu and as such deadlocks
	 * can't happen.
	 */
	WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
		     && !oops_in_progress);
296

I
Ingo Molnar 已提交
297
	if (cpu == this_cpu) {
298 299 300
		local_irq_save(flags);
		func(info);
		local_irq_restore(flags);
I
Ingo Molnar 已提交
301 302 303
	} else {
		if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
			struct call_single_data *data = &d;
304

I
Ingo Molnar 已提交
305 306
			if (!wait)
				data = &__get_cpu_var(csd_data);
307

I
Ingo Molnar 已提交
308
			csd_lock(data);
309

I
Ingo Molnar 已提交
310 311 312 313 314 315
			data->func = func;
			data->info = info;
			generic_exec_single(cpu, data, wait);
		} else {
			err = -ENXIO;	/* CPU not online */
		}
316 317 318
	}

	put_cpu();
I
Ingo Molnar 已提交
319

320
	return err;
321 322 323 324 325 326 327 328
}
EXPORT_SYMBOL(smp_call_function_single);

/**
 * __smp_call_function_single(): Run a function on another CPU
 * @cpu: The CPU to run on.
 * @data: Pre-allocated and setup data structure
 *
I
Ingo Molnar 已提交
329 330 331
 * Like smp_call_function_single(), but allow caller to pass in a
 * pre-allocated data structure. Useful for embedding @data inside
 * other structures, for instance.
332
 */
333 334
void __smp_call_function_single(int cpu, struct call_single_data *data,
				int wait)
335
{
336 337
	csd_lock(data);

338 339 340 341 342 343 344 345
	/*
	 * Can deadlock when called with interrupts disabled.
	 * We allow cpu's that are not yet online though, as no one else can
	 * send smp call function interrupt to this cpu and as such deadlocks
	 * can't happen.
	 */
	WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled()
		     && !oops_in_progress);
346

347
	generic_exec_single(cpu, data, wait);
348 349
}

I
Ingo Molnar 已提交
350 351
/* Deprecated: shim for archs using old arch_send_call_function_ipi API. */

352
#ifndef arch_send_call_function_ipi_mask
I
Ingo Molnar 已提交
353 354
# define arch_send_call_function_ipi_mask(maskp) \
	 arch_send_call_function_ipi(*(maskp))
355 356
#endif

357
/**
R
Rusty Russell 已提交
358 359
 * smp_call_function_many(): Run a function on a set of other CPUs.
 * @mask: The set of cpus to run on (only runs on online subset).
360 361
 * @func: The function to run. This must be fast and non-blocking.
 * @info: An arbitrary pointer to pass to the function.
I
Ingo Molnar 已提交
362 363
 * @wait: If true, wait (atomically) until function has completed
 *        on other CPUs.
364 365 366 367 368 369 370 371 372
 *
 * If @wait is true, then returns once @func has returned. Note that @wait
 * will be implicitly turned on in case of allocation failures, since
 * we fall back to on-stack allocation.
 *
 * You must not call this function with disabled interrupts or from a
 * hardware interrupt handler or from a bottom half handler. Preemption
 * must be disabled when calling this function.
 */
R
Rusty Russell 已提交
373
void smp_call_function_many(const struct cpumask *mask,
I
Ingo Molnar 已提交
374
			    void (*func)(void *), void *info, bool wait)
375
{
R
Rusty Russell 已提交
376
	struct call_function_data *data;
377
	unsigned long flags;
I
Ingo Molnar 已提交
378
	int cpu, next_cpu, this_cpu = smp_processor_id();
379

380 381 382 383 384 385 386 387
	/*
	 * Can deadlock when called with interrupts disabled.
	 * We allow cpu's that are not yet online though, as no one else can
	 * send smp call function interrupt to this cpu and as such deadlocks
	 * can't happen.
	 */
	WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
		     && !oops_in_progress);
388

I
Ingo Molnar 已提交
389
	/* So, what's a CPU they want? Ignoring this one. */
R
Rusty Russell 已提交
390
	cpu = cpumask_first_and(mask, cpu_online_mask);
I
Ingo Molnar 已提交
391
	if (cpu == this_cpu)
R
Rusty Russell 已提交
392
		cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
I
Ingo Molnar 已提交
393

R
Rusty Russell 已提交
394 395 396 397 398 399
	/* No online cpus?  We're done. */
	if (cpu >= nr_cpu_ids)
		return;

	/* Do we have another CPU which isn't us? */
	next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
I
Ingo Molnar 已提交
400
	if (next_cpu == this_cpu)
R
Rusty Russell 已提交
401 402 403 404 405 406
		next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);

	/* Fastpath: do that cpu by itself. */
	if (next_cpu >= nr_cpu_ids) {
		smp_call_function_single(cpu, func, info, wait);
		return;
407 408
	}

P
Peter Zijlstra 已提交
409 410
	data = &__get_cpu_var(cfd_data);
	csd_lock(&data->csd);
411 412 413

	data->csd.func = func;
	data->csd.info = info;
P
Peter Zijlstra 已提交
414
	cpumask_and(data->cpumask, mask, cpu_online_mask);
I
Ingo Molnar 已提交
415
	cpumask_clear_cpu(this_cpu, data->cpumask);
416
	atomic_set(&data->refs, cpumask_weight(data->cpumask));
417

418
	spin_lock_irqsave(&call_function.lock, flags);
P
Peter Zijlstra 已提交
419 420
	/*
	 * Place entry at the _HEAD_ of the list, so that any cpu still
I
Ingo Molnar 已提交
421 422
	 * observing the entry in generic_smp_call_function_interrupt()
	 * will not miss any other list entries:
P
Peter Zijlstra 已提交
423 424
	 */
	list_add_rcu(&data->csd.list, &call_function.queue);
425
	spin_unlock_irqrestore(&call_function.lock, flags);
426

427 428
	/*
	 * Make the list addition visible before sending the ipi.
I
Ingo Molnar 已提交
429 430
	 * (IPIs must obey or appear to obey normal Linux cache
	 * coherency rules -- see comment in generic_exec_single).
431 432 433
	 */
	smp_mb();

434
	/* Send a message to all CPUs in the map */
P
Peter Zijlstra 已提交
435
	arch_send_call_function_ipi_mask(data->cpumask);
436

I
Ingo Molnar 已提交
437
	/* Optionally wait for the CPUs to complete */
R
Rusty Russell 已提交
438
	if (wait)
439
		csd_lock_wait(&data->csd);
440
}
R
Rusty Russell 已提交
441
EXPORT_SYMBOL(smp_call_function_many);
442 443 444 445 446

/**
 * smp_call_function(): Run a function on all other CPUs.
 * @func: The function to run. This must be fast and non-blocking.
 * @info: An arbitrary pointer to pass to the function.
I
Ingo Molnar 已提交
447 448
 * @wait: If true, wait (atomically) until function has completed
 *        on other CPUs.
449
 *
R
Rusty Russell 已提交
450
 * Returns 0.
451 452 453 454 455 456 457 458
 *
 * If @wait is true, then returns once @func has returned; otherwise
 * it returns just before the target cpu calls @func. In case of allocation
 * failure, @wait will be implicitly turned on.
 *
 * You must not call this function with disabled interrupts or from a
 * hardware interrupt handler or from a bottom half handler.
 */
459
int smp_call_function(void (*func)(void *), void *info, int wait)
460 461
{
	preempt_disable();
R
Rusty Russell 已提交
462
	smp_call_function_many(cpu_online_mask, func, info, wait);
463
	preempt_enable();
I
Ingo Molnar 已提交
464

R
Rusty Russell 已提交
465
	return 0;
466 467 468 469 470
}
EXPORT_SYMBOL(smp_call_function);

void ipi_call_lock(void)
{
P
Peter Zijlstra 已提交
471
	spin_lock(&call_function.lock);
472 473 474 475
}

void ipi_call_unlock(void)
{
P
Peter Zijlstra 已提交
476
	spin_unlock(&call_function.lock);
477 478 479 480
}

void ipi_call_lock_irq(void)
{
P
Peter Zijlstra 已提交
481
	spin_lock_irq(&call_function.lock);
482 483 484 485
}

void ipi_call_unlock_irq(void)
{
P
Peter Zijlstra 已提交
486
	spin_unlock_irq(&call_function.lock);
487
}