softirq.c 18.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
/*
 *	linux/kernel/softirq.c
 *
 *	Copyright (C) 1992 Linus Torvalds
 *
P
Pavel Machek 已提交
6 7 8
 *	Distribute under GPLv2.
 *
 *	Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
9 10
 *
 *	Remote softirq infrastructure is by Jens Axboe.
L
Linus Torvalds 已提交
11 12 13 14 15 16 17 18 19 20
 */

#include <linux/module.h>
#include <linux/kernel_stat.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/notifier.h>
#include <linux/percpu.h>
#include <linux/cpu.h>
21
#include <linux/freezer.h>
L
Linus Torvalds 已提交
22 23
#include <linux/kthread.h>
#include <linux/rcupdate.h>
24
#include <linux/smp.h>
25
#include <linux/tick.h>
L
Linus Torvalds 已提交
26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50

#include <asm/irq.h>
/*
   - No shared variables, all the data are CPU local.
   - If a softirq needs serialization, let it serialize itself
     by its own spinlocks.
   - Even if softirq is serialized, only local cpu is marked for
     execution. Hence, we get something sort of weak cpu binding.
     Though it is still not clear, will it result in better locality
     or will not.

   Examples:
   - NET RX softirq. It is multithreaded and does not require
     any global serialization.
   - NET TX softirq. It kicks software netdevice queues, hence
     it is logically serialized per device, but this serialization
     is invisible to common code.
   - Tasklets: serialized wrt itself.
 */

#ifndef __ARCH_IRQ_STAT
irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
EXPORT_SYMBOL(irq_stat);
#endif

A
Alexey Dobriyan 已提交
51
static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
L
Linus Torvalds 已提交
52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69

static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);

/*
 * we cannot loop indefinitely here to avoid userspace starvation,
 * but we also don't want to introduce a worst case 1/HZ latency
 * to the pending events, so lets the scheduler to balance
 * the softirq load for us.
 */
static inline void wakeup_softirqd(void)
{
	/* Interrupts are disabled: no need to stop preemption */
	struct task_struct *tsk = __get_cpu_var(ksoftirqd);

	if (tsk && tsk->state != TASK_RUNNING)
		wake_up_process(tsk);
}

70 71 72 73
/*
 * This one is for softirq.c-internal use,
 * where hardirqs are disabled legitimately:
 */
74
#ifdef CONFIG_TRACE_IRQFLAGS
75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
static void __local_bh_disable(unsigned long ip)
{
	unsigned long flags;

	WARN_ON_ONCE(in_irq());

	raw_local_irq_save(flags);
	add_preempt_count(SOFTIRQ_OFFSET);
	/*
	 * Were softirqs turned off above:
	 */
	if (softirq_count() == SOFTIRQ_OFFSET)
		trace_softirqs_off(ip);
	raw_local_irq_restore(flags);
}
90 91 92 93 94 95 96
#else /* !CONFIG_TRACE_IRQFLAGS */
static inline void __local_bh_disable(unsigned long ip)
{
	add_preempt_count(SOFTIRQ_OFFSET);
	barrier();
}
#endif /* CONFIG_TRACE_IRQFLAGS */
97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135

void local_bh_disable(void)
{
	__local_bh_disable((unsigned long)__builtin_return_address(0));
}

EXPORT_SYMBOL(local_bh_disable);

void __local_bh_enable(void)
{
	WARN_ON_ONCE(in_irq());

	/*
	 * softirqs should never be enabled by __local_bh_enable(),
	 * it always nests inside local_bh_enable() sections:
	 */
	WARN_ON_ONCE(softirq_count() == SOFTIRQ_OFFSET);

	sub_preempt_count(SOFTIRQ_OFFSET);
}
EXPORT_SYMBOL_GPL(__local_bh_enable);

/*
 * Special-case - softirqs can safely be enabled in
 * cond_resched_softirq(), or by __do_softirq(),
 * without processing still-pending softirqs:
 */
void _local_bh_enable(void)
{
	WARN_ON_ONCE(in_irq());
	WARN_ON_ONCE(!irqs_disabled());

	if (softirq_count() == SOFTIRQ_OFFSET)
		trace_softirqs_on((unsigned long)__builtin_return_address(0));
	sub_preempt_count(SOFTIRQ_OFFSET);
}

EXPORT_SYMBOL(_local_bh_enable);

136
static inline void _local_bh_enable_ip(unsigned long ip)
137
{
138
	WARN_ON_ONCE(in_irq() || irqs_disabled());
139
#ifdef CONFIG_TRACE_IRQFLAGS
140
	local_irq_disable();
141
#endif
142 143 144 145
	/*
	 * Are softirqs going to be turned on now:
	 */
	if (softirq_count() == SOFTIRQ_OFFSET)
146
		trace_softirqs_on(ip);
147 148 149 150 151 152 153 154 155 156
	/*
	 * Keep preemption disabled until we are done with
	 * softirq processing:
 	 */
 	sub_preempt_count(SOFTIRQ_OFFSET - 1);

	if (unlikely(!in_interrupt() && local_softirq_pending()))
		do_softirq();

	dec_preempt_count();
157
#ifdef CONFIG_TRACE_IRQFLAGS
158
	local_irq_enable();
159
#endif
160 161
	preempt_check_resched();
}
162 163 164 165 166

void local_bh_enable(void)
{
	_local_bh_enable_ip((unsigned long)__builtin_return_address(0));
}
167 168 169 170
EXPORT_SYMBOL(local_bh_enable);

void local_bh_enable_ip(unsigned long ip)
{
171
	_local_bh_enable_ip(ip);
172 173 174
}
EXPORT_SYMBOL(local_bh_enable_ip);

L
Linus Torvalds 已提交
175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
/*
 * We restart softirq processing MAX_SOFTIRQ_RESTART times,
 * and we fall back to softirqd after that.
 *
 * This number has been established via experimentation.
 * The two things to balance is latency against fairness -
 * we want to handle softirqs as soon as possible, but they
 * should not be able to lock up the box.
 */
#define MAX_SOFTIRQ_RESTART 10

asmlinkage void __do_softirq(void)
{
	struct softirq_action *h;
	__u32 pending;
	int max_restart = MAX_SOFTIRQ_RESTART;
	int cpu;

	pending = local_softirq_pending();
194 195
	account_system_vtime(current);

196 197
	__local_bh_disable((unsigned long)__builtin_return_address(0));
	trace_softirq_enter();
L
Linus Torvalds 已提交
198 199 200 201

	cpu = smp_processor_id();
restart:
	/* Reset the pending bitmask before enabling irqs */
202
	set_softirq_pending(0);
L
Linus Torvalds 已提交
203

204
	local_irq_enable();
L
Linus Torvalds 已提交
205 206 207 208 209

	h = softirq_vec;

	do {
		if (pending & 1) {
210 211
			int prev_count = preempt_count();

L
Linus Torvalds 已提交
212
			h->action(h);
213 214

			if (unlikely(prev_count != preempt_count())) {
215
				printk(KERN_ERR "huh, entered softirq %td %p"
216 217 218 219 220 221
				       "with preempt_count %08x,"
				       " exited with %08x?\n", h - softirq_vec,
				       h->action, prev_count, preempt_count());
				preempt_count() = prev_count;
			}

L
Linus Torvalds 已提交
222 223 224 225 226 227
			rcu_bh_qsctr_inc(cpu);
		}
		h++;
		pending >>= 1;
	} while (pending);

228
	local_irq_disable();
L
Linus Torvalds 已提交
229 230 231 232 233 234 235 236

	pending = local_softirq_pending();
	if (pending && --max_restart)
		goto restart;

	if (pending)
		wakeup_softirqd();

237
	trace_softirq_exit();
238 239

	account_system_vtime(current);
240
	_local_bh_enable();
L
Linus Torvalds 已提交
241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264
}

#ifndef __ARCH_HAS_DO_SOFTIRQ

asmlinkage void do_softirq(void)
{
	__u32 pending;
	unsigned long flags;

	if (in_interrupt())
		return;

	local_irq_save(flags);

	pending = local_softirq_pending();

	if (pending)
		__do_softirq();

	local_irq_restore(flags);
}

#endif

I
Ingo Molnar 已提交
265 266 267 268 269
/*
 * Enter an interrupt context.
 */
void irq_enter(void)
{
270
	int cpu = smp_processor_id();
271

272 273
	if (idle_cpu(cpu) && !in_interrupt()) {
		__irq_enter();
274
		tick_check_idle(cpu);
275 276
	} else
		__irq_enter();
I
Ingo Molnar 已提交
277 278
}

L
Linus Torvalds 已提交
279 280 281 282 283 284 285 286 287 288 289 290
#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
# define invoke_softirq()	__do_softirq()
#else
# define invoke_softirq()	do_softirq()
#endif

/*
 * Exit an interrupt context. Process softirqs if needed and possible:
 */
void irq_exit(void)
{
	account_system_vtime(current);
291
	trace_hardirq_exit();
L
Linus Torvalds 已提交
292 293 294
	sub_preempt_count(IRQ_EXIT_OFFSET);
	if (!in_interrupt() && local_softirq_pending())
		invoke_softirq();
295 296 297 298

#ifdef CONFIG_NO_HZ
	/* Make sure that timer wheel updates are propagated */
	if (!in_interrupt() && idle_cpu(smp_processor_id()) && !need_resched())
299
		tick_nohz_stop_sched_tick(0);
300
	rcu_irq_exit();
301
#endif
L
Linus Torvalds 已提交
302 303 304 305 306 307
	preempt_enable_no_resched();
}

/*
 * This function must run with irqs disabled!
 */
308
inline void raise_softirq_irqoff(unsigned int nr)
L
Linus Torvalds 已提交
309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324
{
	__raise_softirq_irqoff(nr);

	/*
	 * If we're in an interrupt or softirq, we're done
	 * (this also catches softirq-disabled code). We will
	 * actually run the softirq once we return from
	 * the irq or softirq.
	 *
	 * Otherwise we wake up ksoftirqd to make sure we
	 * schedule the softirq soon.
	 */
	if (!in_interrupt())
		wakeup_softirqd();
}

325
void raise_softirq(unsigned int nr)
L
Linus Torvalds 已提交
326 327 328 329 330 331 332 333
{
	unsigned long flags;

	local_irq_save(flags);
	raise_softirq_irqoff(nr);
	local_irq_restore(flags);
}

334
void open_softirq(int nr, void (*action)(struct softirq_action *))
L
Linus Torvalds 已提交
335 336 337 338 339 340 341
{
	softirq_vec[nr].action = action;
}

/* Tasklets */
struct tasklet_head
{
342 343
	struct tasklet_struct *head;
	struct tasklet_struct **tail;
L
Linus Torvalds 已提交
344 345
};

346 347
static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
L
Linus Torvalds 已提交
348

349
void __tasklet_schedule(struct tasklet_struct *t)
L
Linus Torvalds 已提交
350 351 352 353
{
	unsigned long flags;

	local_irq_save(flags);
354 355 356
	t->next = NULL;
	*__get_cpu_var(tasklet_vec).tail = t;
	__get_cpu_var(tasklet_vec).tail = &(t->next);
L
Linus Torvalds 已提交
357 358 359 360 361 362
	raise_softirq_irqoff(TASKLET_SOFTIRQ);
	local_irq_restore(flags);
}

EXPORT_SYMBOL(__tasklet_schedule);

363
void __tasklet_hi_schedule(struct tasklet_struct *t)
L
Linus Torvalds 已提交
364 365 366 367
{
	unsigned long flags;

	local_irq_save(flags);
368 369 370
	t->next = NULL;
	*__get_cpu_var(tasklet_hi_vec).tail = t;
	__get_cpu_var(tasklet_hi_vec).tail = &(t->next);
L
Linus Torvalds 已提交
371 372 373 374 375 376 377 378 379 380 381
	raise_softirq_irqoff(HI_SOFTIRQ);
	local_irq_restore(flags);
}

EXPORT_SYMBOL(__tasklet_hi_schedule);

static void tasklet_action(struct softirq_action *a)
{
	struct tasklet_struct *list;

	local_irq_disable();
382 383 384
	list = __get_cpu_var(tasklet_vec).head;
	__get_cpu_var(tasklet_vec).head = NULL;
	__get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head;
L
Linus Torvalds 已提交
385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403
	local_irq_enable();

	while (list) {
		struct tasklet_struct *t = list;

		list = list->next;

		if (tasklet_trylock(t)) {
			if (!atomic_read(&t->count)) {
				if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
					BUG();
				t->func(t->data);
				tasklet_unlock(t);
				continue;
			}
			tasklet_unlock(t);
		}

		local_irq_disable();
404 405 406
		t->next = NULL;
		*__get_cpu_var(tasklet_vec).tail = t;
		__get_cpu_var(tasklet_vec).tail = &(t->next);
L
Linus Torvalds 已提交
407 408 409 410 411 412 413 414 415 416
		__raise_softirq_irqoff(TASKLET_SOFTIRQ);
		local_irq_enable();
	}
}

static void tasklet_hi_action(struct softirq_action *a)
{
	struct tasklet_struct *list;

	local_irq_disable();
417 418 419
	list = __get_cpu_var(tasklet_hi_vec).head;
	__get_cpu_var(tasklet_hi_vec).head = NULL;
	__get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head;
L
Linus Torvalds 已提交
420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438
	local_irq_enable();

	while (list) {
		struct tasklet_struct *t = list;

		list = list->next;

		if (tasklet_trylock(t)) {
			if (!atomic_read(&t->count)) {
				if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
					BUG();
				t->func(t->data);
				tasklet_unlock(t);
				continue;
			}
			tasklet_unlock(t);
		}

		local_irq_disable();
439 440 441
		t->next = NULL;
		*__get_cpu_var(tasklet_hi_vec).tail = t;
		__get_cpu_var(tasklet_hi_vec).tail = &(t->next);
L
Linus Torvalds 已提交
442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475
		__raise_softirq_irqoff(HI_SOFTIRQ);
		local_irq_enable();
	}
}


void tasklet_init(struct tasklet_struct *t,
		  void (*func)(unsigned long), unsigned long data)
{
	t->next = NULL;
	t->state = 0;
	atomic_set(&t->count, 0);
	t->func = func;
	t->data = data;
}

EXPORT_SYMBOL(tasklet_init);

void tasklet_kill(struct tasklet_struct *t)
{
	if (in_interrupt())
		printk("Attempt to kill tasklet from interrupt\n");

	while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
		do
			yield();
		while (test_bit(TASKLET_STATE_SCHED, &t->state));
	}
	tasklet_unlock_wait(t);
	clear_bit(TASKLET_STATE_SCHED, &t->state);
}

EXPORT_SYMBOL(tasklet_kill);

476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596
DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
EXPORT_PER_CPU_SYMBOL(softirq_work_list);

static void __local_trigger(struct call_single_data *cp, int softirq)
{
	struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]);

	list_add_tail(&cp->list, head);

	/* Trigger the softirq only if the list was previously empty.  */
	if (head->next == &cp->list)
		raise_softirq_irqoff(softirq);
}

#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
static void remote_softirq_receive(void *data)
{
	struct call_single_data *cp = data;
	unsigned long flags;
	int softirq;

	softirq = cp->priv;

	local_irq_save(flags);
	__local_trigger(cp, softirq);
	local_irq_restore(flags);
}

static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
{
	if (cpu_online(cpu)) {
		cp->func = remote_softirq_receive;
		cp->info = cp;
		cp->flags = 0;
		cp->priv = softirq;

		__smp_call_function_single(cpu, cp);
		return 0;
	}
	return 1;
}
#else /* CONFIG_USE_GENERIC_SMP_HELPERS */
static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
{
	return 1;
}
#endif

/**
 * __send_remote_softirq - try to schedule softirq work on a remote cpu
 * @cp: private SMP call function data area
 * @cpu: the remote cpu
 * @this_cpu: the currently executing cpu
 * @softirq: the softirq for the work
 *
 * Attempt to schedule softirq work on a remote cpu.  If this cannot be
 * done, the work is instead queued up on the local cpu.
 *
 * Interrupts must be disabled.
 */
void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq)
{
	if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq))
		__local_trigger(cp, softirq);
}
EXPORT_SYMBOL(__send_remote_softirq);

/**
 * send_remote_softirq - try to schedule softirq work on a remote cpu
 * @cp: private SMP call function data area
 * @cpu: the remote cpu
 * @softirq: the softirq for the work
 *
 * Like __send_remote_softirq except that disabling interrupts and
 * computing the current cpu is done for the caller.
 */
void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
{
	unsigned long flags;
	int this_cpu;

	local_irq_save(flags);
	this_cpu = smp_processor_id();
	__send_remote_softirq(cp, cpu, this_cpu, softirq);
	local_irq_restore(flags);
}
EXPORT_SYMBOL(send_remote_softirq);

static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
					       unsigned long action, void *hcpu)
{
	/*
	 * If a CPU goes away, splice its entries to the current CPU
	 * and trigger a run of the softirq
	 */
	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
		int cpu = (unsigned long) hcpu;
		int i;

		local_irq_disable();
		for (i = 0; i < NR_SOFTIRQS; i++) {
			struct list_head *head = &per_cpu(softirq_work_list[i], cpu);
			struct list_head *local_head;

			if (list_empty(head))
				continue;

			local_head = &__get_cpu_var(softirq_work_list[i]);
			list_splice_init(head, local_head);
			raise_softirq_irqoff(i);
		}
		local_irq_enable();
	}

	return NOTIFY_OK;
}

static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
	.notifier_call	= remote_softirq_cpu_notify,
};

L
Linus Torvalds 已提交
597 598
void __init softirq_init(void)
{
599 600 601
	int cpu;

	for_each_possible_cpu(cpu) {
602 603
		int i;

604 605 606 607
		per_cpu(tasklet_vec, cpu).tail =
			&per_cpu(tasklet_vec, cpu).head;
		per_cpu(tasklet_hi_vec, cpu).tail =
			&per_cpu(tasklet_hi_vec, cpu).head;
608 609
		for (i = 0; i < NR_SOFTIRQS; i++)
			INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu));
610 611
	}

612 613
	register_hotcpu_notifier(&remote_softirq_cpu_notifier);

614 615
	open_softirq(TASKLET_SOFTIRQ, tasklet_action);
	open_softirq(HI_SOFTIRQ, tasklet_hi_action);
L
Linus Torvalds 已提交
616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681
}

static int ksoftirqd(void * __bind_cpu)
{
	set_current_state(TASK_INTERRUPTIBLE);

	while (!kthread_should_stop()) {
		preempt_disable();
		if (!local_softirq_pending()) {
			preempt_enable_no_resched();
			schedule();
			preempt_disable();
		}

		__set_current_state(TASK_RUNNING);

		while (local_softirq_pending()) {
			/* Preempt disable stops cpu going offline.
			   If already offline, we'll be on wrong CPU:
			   don't process */
			if (cpu_is_offline((long)__bind_cpu))
				goto wait_to_die;
			do_softirq();
			preempt_enable_no_resched();
			cond_resched();
			preempt_disable();
		}
		preempt_enable();
		set_current_state(TASK_INTERRUPTIBLE);
	}
	__set_current_state(TASK_RUNNING);
	return 0;

wait_to_die:
	preempt_enable();
	/* Wait for kthread_stop */
	set_current_state(TASK_INTERRUPTIBLE);
	while (!kthread_should_stop()) {
		schedule();
		set_current_state(TASK_INTERRUPTIBLE);
	}
	__set_current_state(TASK_RUNNING);
	return 0;
}

#ifdef CONFIG_HOTPLUG_CPU
/*
 * tasklet_kill_immediate is called to remove a tasklet which can already be
 * scheduled for execution on @cpu.
 *
 * Unlike tasklet_kill, this function removes the tasklet
 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
 *
 * When this function is called, @cpu must be in the CPU_DEAD state.
 */
void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
{
	struct tasklet_struct **i;

	BUG_ON(cpu_online(cpu));
	BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));

	if (!test_bit(TASKLET_STATE_SCHED, &t->state))
		return;

	/* CPU is dead, so no lock needed. */
682
	for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
L
Linus Torvalds 已提交
683 684
		if (*i == t) {
			*i = t->next;
685 686 687
			/* If this was the tail element, move the tail ptr */
			if (*i == NULL)
				per_cpu(tasklet_vec, cpu).tail = i;
L
Linus Torvalds 已提交
688 689 690 691 692 693 694 695 696 697 698 699
			return;
		}
	}
	BUG();
}

static void takeover_tasklets(unsigned int cpu)
{
	/* CPU is dead, so no lock needed. */
	local_irq_disable();

	/* Find end, append list for that CPU. */
700 701 702 703 704 705
	if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
		*(__get_cpu_var(tasklet_vec).tail) = per_cpu(tasklet_vec, cpu).head;
		__get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail;
		per_cpu(tasklet_vec, cpu).head = NULL;
		per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
	}
L
Linus Torvalds 已提交
706 707
	raise_softirq_irqoff(TASKLET_SOFTIRQ);

708 709 710 711 712 713
	if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
		*__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head;
		__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail;
		per_cpu(tasklet_hi_vec, cpu).head = NULL;
		per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
	}
L
Linus Torvalds 已提交
714 715 716 717 718 719
	raise_softirq_irqoff(HI_SOFTIRQ);

	local_irq_enable();
}
#endif /* CONFIG_HOTPLUG_CPU */

720
static int __cpuinit cpu_callback(struct notifier_block *nfb,
L
Linus Torvalds 已提交
721 722 723 724 725 726 727 728
				  unsigned long action,
				  void *hcpu)
{
	int hotcpu = (unsigned long)hcpu;
	struct task_struct *p;

	switch (action) {
	case CPU_UP_PREPARE:
729
	case CPU_UP_PREPARE_FROZEN:
L
Linus Torvalds 已提交
730 731 732 733 734 735 736 737 738
		p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
		if (IS_ERR(p)) {
			printk("ksoftirqd for %i failed\n", hotcpu);
			return NOTIFY_BAD;
		}
		kthread_bind(p, hotcpu);
  		per_cpu(ksoftirqd, hotcpu) = p;
 		break;
	case CPU_ONLINE:
739
	case CPU_ONLINE_FROZEN:
L
Linus Torvalds 已提交
740 741 742 743
		wake_up_process(per_cpu(ksoftirqd, hotcpu));
		break;
#ifdef CONFIG_HOTPLUG_CPU
	case CPU_UP_CANCELED:
744
	case CPU_UP_CANCELED_FROZEN:
745 746
		if (!per_cpu(ksoftirqd, hotcpu))
			break;
L
Linus Torvalds 已提交
747
		/* Unbind so it can run.  Fall thru. */
748 749
		kthread_bind(per_cpu(ksoftirqd, hotcpu),
			     any_online_cpu(cpu_online_map));
L
Linus Torvalds 已提交
750
	case CPU_DEAD:
751 752 753
	case CPU_DEAD_FROZEN: {
		struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };

L
Linus Torvalds 已提交
754 755
		p = per_cpu(ksoftirqd, hotcpu);
		per_cpu(ksoftirqd, hotcpu) = NULL;
756
		sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
L
Linus Torvalds 已提交
757 758 759
		kthread_stop(p);
		takeover_tasklets(hotcpu);
		break;
760
	}
L
Linus Torvalds 已提交
761 762 763 764 765
#endif /* CONFIG_HOTPLUG_CPU */
 	}
	return NOTIFY_OK;
}

766
static struct notifier_block __cpuinitdata cpu_nfb = {
L
Linus Torvalds 已提交
767 768 769
	.notifier_call = cpu_callback
};

770
static __init int spawn_ksoftirqd(void)
L
Linus Torvalds 已提交
771 772
{
	void *cpu = (void *)(long)smp_processor_id();
773 774 775
	int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);

	BUG_ON(err == NOTIFY_BAD);
L
Linus Torvalds 已提交
776 777 778 779
	cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
	register_cpu_notifier(&cpu_nfb);
	return 0;
}
780
early_initcall(spawn_ksoftirqd);
781 782 783 784 785

#ifdef CONFIG_SMP
/*
 * Call a function on all processors
 */
786
int on_each_cpu(void (*func) (void *info), void *info, int wait)
787 788 789 790
{
	int ret = 0;

	preempt_disable();
791
	ret = smp_call_function(func, info, wait);
792 793 794 795 796 797 798 799
	local_irq_disable();
	func(info);
	local_irq_enable();
	preempt_enable();
	return ret;
}
EXPORT_SYMBOL(on_each_cpu);
#endif