irq_work.c 3.1 KB
Newer Older
1 2 3 4 5 6 7
/*
 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
 *
 * Provides a framework for enqueueing and running callbacks from hardirq
 * context. The enqueueing is NMI-safe.
 */

8
#include <linux/bug.h>
9
#include <linux/kernel.h>
10
#include <linux/export.h>
11
#include <linux/irq_work.h>
12
#include <linux/percpu.h>
13
#include <linux/hardirq.h>
14
#include <asm/processor.h>
15 16 17 18 19 20 21 22 23 24 25 26 27 28

/*
 * An entry can be in one of four states:
 *
 * free	     NULL, 0 -> {claimed}       : free to be used
 * claimed   NULL, 3 -> {pending}       : claimed to be enqueued
 * pending   next, 3 -> {busy}          : queued, pending callback
 * busy      NULL, 2 -> {free, claimed} : callback in progress, can be claimed
 */

#define IRQ_WORK_PENDING	1UL
#define IRQ_WORK_BUSY		2UL
#define IRQ_WORK_FLAGS		3UL

29
static DEFINE_PER_CPU(struct llist_head, irq_work_list);
30 31 32 33

/*
 * Claim the entry so that no one else will poke at it.
 */
34
static bool irq_work_claim(struct irq_work *work)
35
{
36
	unsigned long flags, nflags;
37

38 39 40
	for (;;) {
		flags = work->flags;
		if (flags & IRQ_WORK_PENDING)
41
			return false;
42 43 44 45 46
		nflags = flags | IRQ_WORK_FLAGS;
		if (cmpxchg(&work->flags, flags, nflags) == flags)
			break;
		cpu_relax();
	}
47 48 49 50 51 52 53 54 55 56 57 58 59 60

	return true;
}

void __weak arch_irq_work_raise(void)
{
	/*
	 * Lame architectures will get the timer tick callback
	 */
}

/*
 * Queue the entry and raise the IPI if needed.
 */
61
static void __irq_work_queue(struct irq_work *work)
62
{
63
	bool empty;
64

65
	preempt_disable();
66

67
	empty = llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
68
	/* The list was empty, raise self-interrupt to start processing. */
69
	if (empty)
70 71
		arch_irq_work_raise();

72
	preempt_enable();
73 74 75 76 77 78 79 80
}

/*
 * Enqueue the irq_work @entry, returns true on success, failure when the
 * @entry was already enqueued by someone else.
 *
 * Can be re-enqueued while the callback is still in progress.
 */
81
bool irq_work_queue(struct irq_work *work)
82
{
83
	if (!irq_work_claim(work)) {
84 85 86 87 88 89
		/*
		 * Already enqueued, can't do!
		 */
		return false;
	}

90
	__irq_work_queue(work);
91 92 93 94 95 96 97 98 99 100
	return true;
}
EXPORT_SYMBOL_GPL(irq_work_queue);

/*
 * Run the irq_work entries on this cpu. Requires to be ran from hardirq
 * context with local IRQs disabled.
 */
void irq_work_run(void)
{
101 102 103
	struct irq_work *work;
	struct llist_head *this_list;
	struct llist_node *llnode;
104

105 106
	this_list = &__get_cpu_var(irq_work_list);
	if (llist_empty(this_list))
107 108 109 110 111
		return;

	BUG_ON(!in_irq());
	BUG_ON(!irqs_disabled());

112 113 114
	llnode = llist_del_all(this_list);
	while (llnode != NULL) {
		work = llist_entry(llnode, struct irq_work, llnode);
115

P
Peter Zijlstra 已提交
116
		llnode = llist_next(llnode);
117 118

		/*
119
		 * Clear the PENDING bit, after this point the @work
120 121
		 * can be re-used.
		 */
122 123
		work->flags = IRQ_WORK_BUSY;
		work->func(work);
124 125 126 127
		/*
		 * Clear the BUSY bit and return to the free state if
		 * no-one else claimed it meanwhile.
		 */
128
		(void)cmpxchg(&work->flags, IRQ_WORK_BUSY, 0);
129 130 131 132 133 134 135 136
	}
}
EXPORT_SYMBOL_GPL(irq_work_run);

/*
 * Synchronize against the irq_work @entry, ensures the entry is not
 * currently in use.
 */
137
void irq_work_sync(struct irq_work *work)
138 139 140
{
	WARN_ON_ONCE(irqs_disabled());

141
	while (work->flags & IRQ_WORK_BUSY)
142 143 144
		cpu_relax();
}
EXPORT_SYMBOL_GPL(irq_work_sync);