irq_work.c 3.0 KB
Newer Older
1 2 3 4 5 6 7 8
/*
 * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
 *
 * Provides a framework for enqueueing and running callbacks from hardirq
 * context. The enqueueing is NMI-safe.
 */

#include <linux/kernel.h>
9
#include <linux/export.h>
10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
#include <linux/irq_work.h>
#include <linux/hardirq.h>

/*
 * An entry can be in one of four states:
 *
 * free	     NULL, 0 -> {claimed}       : free to be used
 * claimed   NULL, 3 -> {pending}       : claimed to be enqueued
 * pending   next, 3 -> {busy}          : queued, pending callback
 * busy      NULL, 2 -> {free, claimed} : callback in progress, can be claimed
 */

#define IRQ_WORK_PENDING	1UL
#define IRQ_WORK_BUSY		2UL
#define IRQ_WORK_FLAGS		3UL

26
static DEFINE_PER_CPU(struct llist_head, irq_work_list);
27 28 29 30

/*
 * Claim the entry so that no one else will poke at it.
 */
31
static bool irq_work_claim(struct irq_work *work)
32
{
33
	unsigned long flags, nflags;
34

35 36 37
	for (;;) {
		flags = work->flags;
		if (flags & IRQ_WORK_PENDING)
38
			return false;
39 40 41 42 43
		nflags = flags | IRQ_WORK_FLAGS;
		if (cmpxchg(&work->flags, flags, nflags) == flags)
			break;
		cpu_relax();
	}
44 45 46 47 48 49 50 51 52 53 54 55 56 57

	return true;
}

void __weak arch_irq_work_raise(void)
{
	/*
	 * Lame architectures will get the timer tick callback
	 */
}

/*
 * Queue the entry and raise the IPI if needed.
 */
58
static void __irq_work_queue(struct irq_work *work)
59
{
60
	bool empty;
61

62
	preempt_disable();
63

64
	empty = llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
65
	/* The list was empty, raise self-interrupt to start processing. */
66
	if (empty)
67 68
		arch_irq_work_raise();

69
	preempt_enable();
70 71 72 73 74 75 76 77
}

/*
 * Enqueue the irq_work @entry, returns true on success, failure when the
 * @entry was already enqueued by someone else.
 *
 * Can be re-enqueued while the callback is still in progress.
 */
78
bool irq_work_queue(struct irq_work *work)
79
{
80
	if (!irq_work_claim(work)) {
81 82 83 84 85 86
		/*
		 * Already enqueued, can't do!
		 */
		return false;
	}

87
	__irq_work_queue(work);
88 89 90 91 92 93 94 95 96 97
	return true;
}
EXPORT_SYMBOL_GPL(irq_work_queue);

/*
 * Run the irq_work entries on this cpu. Requires to be ran from hardirq
 * context with local IRQs disabled.
 */
void irq_work_run(void)
{
98 99 100
	struct irq_work *work;
	struct llist_head *this_list;
	struct llist_node *llnode;
101

102 103
	this_list = &__get_cpu_var(irq_work_list);
	if (llist_empty(this_list))
104 105 106 107 108
		return;

	BUG_ON(!in_irq());
	BUG_ON(!irqs_disabled());

109 110 111
	llnode = llist_del_all(this_list);
	while (llnode != NULL) {
		work = llist_entry(llnode, struct irq_work, llnode);
112

P
Peter Zijlstra 已提交
113
		llnode = llist_next(llnode);
114 115

		/*
116
		 * Clear the PENDING bit, after this point the @work
117 118
		 * can be re-used.
		 */
119 120
		work->flags = IRQ_WORK_BUSY;
		work->func(work);
121 122 123 124
		/*
		 * Clear the BUSY bit and return to the free state if
		 * no-one else claimed it meanwhile.
		 */
125
		(void)cmpxchg(&work->flags, IRQ_WORK_BUSY, 0);
126 127 128 129 130 131 132 133
	}
}
EXPORT_SYMBOL_GPL(irq_work_run);

/*
 * Synchronize against the irq_work @entry, ensures the entry is not
 * currently in use.
 */
134
void irq_work_sync(struct irq_work *work)
135 136 137
{
	WARN_ON_ONCE(irqs_disabled());

138
	while (work->flags & IRQ_WORK_BUSY)
139 140 141
		cpu_relax();
}
EXPORT_SYMBOL_GPL(irq_work_sync);