smp.c 10.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10
/*
 * Generic helpers for smp ipi calls
 *
 * (C) Jens Axboe <jens.axboe@oracle.com> 2008
 *
 */
#include <linux/init.h>
#include <linux/module.h>
#include <linux/percpu.h>
#include <linux/rcupdate.h>
11
#include <linux/rculist.h>
12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
#include <linux/smp.h>

static DEFINE_PER_CPU(struct call_single_queue, call_single_queue);
static LIST_HEAD(call_function_queue);
__cacheline_aligned_in_smp DEFINE_SPINLOCK(call_function_lock);

enum {
	CSD_FLAG_WAIT		= 0x01,
	CSD_FLAG_ALLOC		= 0x02,
};

struct call_function_data {
	struct call_single_data csd;
	spinlock_t lock;
	unsigned int refs;
	struct rcu_head rcu_head;
R
Rusty Russell 已提交
28
	unsigned long cpumask_bits[];
29 30 31 32 33 34 35
};

struct call_single_queue {
	struct list_head list;
	spinlock_t lock;
};

36
static int __cpuinit init_call_single_data(void)
37 38 39 40 41 42 43 44 45
{
	int i;

	for_each_possible_cpu(i) {
		struct call_single_queue *q = &per_cpu(call_single_queue, i);

		spin_lock_init(&q->lock);
		INIT_LIST_HEAD(&q->list);
	}
46
	return 0;
47
}
48
early_initcall(init_call_single_data);
49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74

static void csd_flag_wait(struct call_single_data *data)
{
	/* Wait for response */
	do {
		if (!(data->flags & CSD_FLAG_WAIT))
			break;
		cpu_relax();
	} while (1);
}

/*
 * Insert a previously allocated call_single_data element for execution
 * on the given CPU. data must already have ->func, ->info, and ->flags set.
 */
static void generic_exec_single(int cpu, struct call_single_data *data)
{
	struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
	int wait = data->flags & CSD_FLAG_WAIT, ipi;
	unsigned long flags;

	spin_lock_irqsave(&dst->lock, flags);
	ipi = list_empty(&dst->list);
	list_add_tail(&data->list, &dst->list);
	spin_unlock_irqrestore(&dst->lock, flags);

75 76 77 78 79
	/*
	 * Make the list addition visible before sending the ipi.
	 */
	smp_mb();

80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
	if (ipi)
		arch_send_call_function_single_ipi(cpu);

	if (wait)
		csd_flag_wait(data);
}

static void rcu_free_call_data(struct rcu_head *head)
{
	struct call_function_data *data;

	data = container_of(head, struct call_function_data, rcu_head);

	kfree(data);
}

/*
 * Invoked by arch to handle an IPI for call function. Must be called with
 * interrupts disabled.
 */
void generic_smp_call_function_interrupt(void)
{
	struct call_function_data *data;
	int cpu = get_cpu();

	/*
	 * It's ok to use list_for_each_rcu() here even though we may delete
	 * 'pos', since list_del_rcu() doesn't clear ->next
	 */
	rcu_read_lock();
	list_for_each_entry_rcu(data, &call_function_queue, csd.list) {
		int refs;

R
Rusty Russell 已提交
113
		if (!cpumask_test_cpu(cpu, to_cpumask(data->cpumask_bits)))
114 115 116 117 118
			continue;

		data->csd.func(data->csd.info);

		spin_lock(&data->lock);
R
Rusty Russell 已提交
119
		cpumask_clear_cpu(cpu, to_cpumask(data->cpumask_bits));
120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
		WARN_ON(data->refs == 0);
		data->refs--;
		refs = data->refs;
		spin_unlock(&data->lock);

		if (refs)
			continue;

		spin_lock(&call_function_lock);
		list_del_rcu(&data->csd.list);
		spin_unlock(&call_function_lock);

		if (data->csd.flags & CSD_FLAG_WAIT) {
			/*
			 * serialize stores to data with the flag clear
			 * and wakeup
			 */
			smp_wmb();
			data->csd.flags &= ~CSD_FLAG_WAIT;
139 140
		}
		if (data->csd.flags & CSD_FLAG_ALLOC)
141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
			call_rcu(&data->rcu_head, rcu_free_call_data);
	}
	rcu_read_unlock();

	put_cpu();
}

/*
 * Invoked by arch to handle an IPI for call function single. Must be called
 * from the arch with interrupts disabled.
 */
void generic_smp_call_function_single_interrupt(void)
{
	struct call_single_queue *q = &__get_cpu_var(call_single_queue);
	LIST_HEAD(list);

	/*
	 * Need to see other stores to list head for checking whether
	 * list is empty without holding q->lock
	 */
161
	smp_read_barrier_depends();
162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
	while (!list_empty(&q->list)) {
		unsigned int data_flags;

		spin_lock(&q->lock);
		list_replace_init(&q->list, &list);
		spin_unlock(&q->lock);

		while (!list_empty(&list)) {
			struct call_single_data *data;

			data = list_entry(list.next, struct call_single_data,
						list);
			list_del(&data->list);

			/*
			 * 'data' can be invalid after this call if
			 * flags == 0 (when called through
			 * generic_exec_single(), so save them away before
			 * making the call.
			 */
			data_flags = data->flags;

			data->func(data->info);

			if (data_flags & CSD_FLAG_WAIT) {
				smp_wmb();
				data->flags &= ~CSD_FLAG_WAIT;
			} else if (data_flags & CSD_FLAG_ALLOC)
				kfree(data);
		}
		/*
		 * See comment on outer loop
		 */
195
		smp_read_barrier_depends();
196 197 198 199 200 201 202 203 204 205 206 207 208 209
	}
}

/*
 * smp_call_function_single - Run a function on a specific CPU
 * @func: The function to run. This must be fast and non-blocking.
 * @info: An arbitrary pointer to pass to the function.
 * @wait: If true, wait until function has completed on other CPUs.
 *
 * Returns 0 on success, else a negative status code. Note that @wait
 * will be implicitly turned on in case of allocation failures, since
 * we fall back to on-stack allocation.
 */
int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
210
			     int wait)
211 212 213
{
	struct call_single_data d;
	unsigned long flags;
214 215
	/* prevent preemption and reschedule on another processor,
	   as well as CPU removal */
216
	int me = get_cpu();
217
	int err = 0;
218 219 220 221 222 223 224 225

	/* Can deadlock when called with interrupts disabled */
	WARN_ON(irqs_disabled());

	if (cpu == me) {
		local_irq_save(flags);
		func(info);
		local_irq_restore(flags);
226
	} else if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
		struct call_single_data *data = NULL;

		if (!wait) {
			data = kmalloc(sizeof(*data), GFP_ATOMIC);
			if (data)
				data->flags = CSD_FLAG_ALLOC;
		}
		if (!data) {
			data = &d;
			data->flags = CSD_FLAG_WAIT;
		}

		data->func = func;
		data->info = info;
		generic_exec_single(cpu, data);
242 243
	} else {
		err = -ENXIO;	/* CPU not online */
244 245 246
	}

	put_cpu();
247
	return err;
248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268
}
EXPORT_SYMBOL(smp_call_function_single);

/**
 * __smp_call_function_single(): Run a function on another CPU
 * @cpu: The CPU to run on.
 * @data: Pre-allocated and setup data structure
 *
 * Like smp_call_function_single(), but allow caller to pass in a pre-allocated
 * data structure. Useful for embedding @data inside other structures, for
 * instance.
 *
 */
void __smp_call_function_single(int cpu, struct call_single_data *data)
{
	/* Can deadlock when called with interrupts disabled */
	WARN_ON((data->flags & CSD_FLAG_WAIT) && irqs_disabled());

	generic_exec_single(cpu, data);
}

269 270 271 272 273 274
/* FIXME: Shim for archs using old arch_send_call_function_ipi API. */
#ifndef arch_send_call_function_ipi_mask
#define arch_send_call_function_ipi_mask(maskp) \
	arch_send_call_function_ipi(*(maskp))
#endif

275
/**
R
Rusty Russell 已提交
276 277
 * smp_call_function_many(): Run a function on a set of other CPUs.
 * @mask: The set of cpus to run on (only runs on online subset).
278 279 280 281 282 283 284 285 286 287 288 289
 * @func: The function to run. This must be fast and non-blocking.
 * @info: An arbitrary pointer to pass to the function.
 * @wait: If true, wait (atomically) until function has completed on other CPUs.
 *
 * If @wait is true, then returns once @func has returned. Note that @wait
 * will be implicitly turned on in case of allocation failures, since
 * we fall back to on-stack allocation.
 *
 * You must not call this function with disabled interrupts or from a
 * hardware interrupt handler or from a bottom half handler. Preemption
 * must be disabled when calling this function.
 */
R
Rusty Russell 已提交
290 291 292
void smp_call_function_many(const struct cpumask *mask,
			    void (*func)(void *), void *info,
			    bool wait)
293
{
R
Rusty Russell 已提交
294
	struct call_function_data *data;
295
	unsigned long flags;
R
Rusty Russell 已提交
296
	int cpu, next_cpu;
297 298 299 300

	/* Can deadlock when called with interrupts disabled */
	WARN_ON(irqs_disabled());

R
Rusty Russell 已提交
301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317
	/* So, what's a CPU they want?  Ignoring this one. */
	cpu = cpumask_first_and(mask, cpu_online_mask);
	if (cpu == smp_processor_id())
		cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
	/* No online cpus?  We're done. */
	if (cpu >= nr_cpu_ids)
		return;

	/* Do we have another CPU which isn't us? */
	next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
	if (next_cpu == smp_processor_id())
		next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);

	/* Fastpath: do that cpu by itself. */
	if (next_cpu >= nr_cpu_ids) {
		smp_call_function_single(cpu, func, info, wait);
		return;
318 319
	}

R
Rusty Russell 已提交
320 321 322 323 324 325 326 327 328 329
	data = kmalloc(sizeof(*data) + cpumask_size(), GFP_ATOMIC);
	if (unlikely(!data)) {
		/* Slow path. */
		for_each_online_cpu(cpu) {
			if (cpu == smp_processor_id())
				continue;
			if (cpumask_test_cpu(cpu, mask))
				smp_call_function_single(cpu, func, info, wait);
		}
		return;
330 331 332
	}

	spin_lock_init(&data->lock);
R
Rusty Russell 已提交
333 334 335
	data->csd.flags = CSD_FLAG_ALLOC;
	if (wait)
		data->csd.flags |= CSD_FLAG_WAIT;
336 337
	data->csd.func = func;
	data->csd.info = info;
R
Rusty Russell 已提交
338 339 340
	cpumask_and(to_cpumask(data->cpumask_bits), mask, cpu_online_mask);
	cpumask_clear_cpu(smp_processor_id(), to_cpumask(data->cpumask_bits));
	data->refs = cpumask_weight(to_cpumask(data->cpumask_bits));
341 342 343 344 345

	spin_lock_irqsave(&call_function_lock, flags);
	list_add_tail_rcu(&data->csd.list, &call_function_queue);
	spin_unlock_irqrestore(&call_function_lock, flags);

346 347 348 349 350
	/*
	 * Make the list addition visible before sending the ipi.
	 */
	smp_mb();

351
	/* Send a message to all CPUs in the map */
352
	arch_send_call_function_ipi_mask(to_cpumask(data->cpumask_bits));
353 354

	/* optionally wait for the CPUs to complete */
R
Rusty Russell 已提交
355
	if (wait)
356 357
		csd_flag_wait(&data->csd);
}
R
Rusty Russell 已提交
358
EXPORT_SYMBOL(smp_call_function_many);
359 360 361 362 363 364 365

/**
 * smp_call_function(): Run a function on all other CPUs.
 * @func: The function to run. This must be fast and non-blocking.
 * @info: An arbitrary pointer to pass to the function.
 * @wait: If true, wait (atomically) until function has completed on other CPUs.
 *
R
Rusty Russell 已提交
366
 * Returns 0.
367 368 369 370 371 372 373 374
 *
 * If @wait is true, then returns once @func has returned; otherwise
 * it returns just before the target cpu calls @func. In case of allocation
 * failure, @wait will be implicitly turned on.
 *
 * You must not call this function with disabled interrupts or from a
 * hardware interrupt handler or from a bottom half handler.
 */
375
int smp_call_function(void (*func)(void *), void *info, int wait)
376 377
{
	preempt_disable();
R
Rusty Russell 已提交
378
	smp_call_function_many(cpu_online_mask, func, info, wait);
379
	preempt_enable();
R
Rusty Russell 已提交
380
	return 0;
381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402
}
EXPORT_SYMBOL(smp_call_function);

void ipi_call_lock(void)
{
	spin_lock(&call_function_lock);
}

void ipi_call_unlock(void)
{
	spin_unlock(&call_function_lock);
}

void ipi_call_lock_irq(void)
{
	spin_lock_irq(&call_function_lock);
}

void ipi_call_unlock_irq(void)
{
	spin_unlock_irq(&call_function_lock);
}