irq.c 11.1 KB
Newer Older
J
Jeff Dike 已提交
1
/*
J
Jeff Dike 已提交
2
 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
L
Linus Torvalds 已提交
3 4 5 6 7
 * Licensed under the GPL
 * Derived (i.e. mostly copied) from arch/i386/kernel/irq.c:
 *	Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
 */

J
Jeff Dike 已提交
8 9
#include "linux/cpumask.h"
#include "linux/hardirq.h"
L
Linus Torvalds 已提交
10
#include "linux/interrupt.h"
J
Jeff Dike 已提交
11 12
#include "linux/kernel_stat.h"
#include "linux/module.h"
13
#include "linux/sched.h"
L
Linus Torvalds 已提交
14
#include "linux/seq_file.h"
15
#include "linux/slab.h"
J
Jeff Dike 已提交
16
#include "as-layout.h"
L
Linus Torvalds 已提交
17
#include "kern_util.h"
J
Jeff Dike 已提交
18
#include "os.h"
L
Linus Torvalds 已提交
19

J
Jeff Dike 已提交
20 21 22 23 24 25 26 27
/*
 * This list is accessed under irq_lock, except in sigio_handler,
 * where it is safe from being modified.  IRQ handlers won't change it -
 * if an IRQ source has vanished, it will be freed by free_irqs just
 * before returning from sigio_handler.  That will process a separate
 * list of irqs to free, with its own locking, coming back here to
 * remove list elements, taking the irq_lock to do so.
 */
J
Jeff Dike 已提交
28
static struct irq_fd *active_fds = NULL;
29 30 31 32
static struct irq_fd **last_irq_ptr = &active_fds;

extern void free_irqs(void);

33
void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
34 35 36 37
{
	struct irq_fd *irq_fd;
	int n;

38 39 40 41
	if (smp_sigio_handler())
		return;

	while (1) {
42 43
		n = os_waiting_for_events(active_fds);
		if (n <= 0) {
J
Jeff Dike 已提交
44 45
			if (n == -EINTR)
				continue;
46 47 48
			else break;
		}

J
Jeff Dike 已提交
49 50
		for (irq_fd = active_fds; irq_fd != NULL;
		     irq_fd = irq_fd->next) {
51
			if (irq_fd->current_events != 0) {
52 53 54 55 56 57 58 59 60
				irq_fd->current_events = 0;
				do_IRQ(irq_fd->irq, regs);
			}
		}
	}

	free_irqs();
}

61 62
static DEFINE_SPINLOCK(irq_lock);

63
static int activate_fd(int irq, int fd, int type, void *dev_id)
64 65 66 67
{
	struct pollfd *tmp_pfd;
	struct irq_fd *new_fd, *irq_fd;
	unsigned long flags;
J
Jeff Dike 已提交
68
	int events, err, n;
69

J
Jeff Dike 已提交
70
	err = os_set_fd_async(fd);
71
	if (err < 0)
72 73 74
		goto out;

	err = -ENOMEM;
J
Jeff Dike 已提交
75
	new_fd = kmalloc(sizeof(struct irq_fd), GFP_KERNEL);
76
	if (new_fd == NULL)
77 78
		goto out;

79 80
	if (type == IRQ_READ)
		events = UM_POLLIN | UM_POLLPRI;
J
Jeff Dike 已提交
81
	else events = UM_POLLOUT;
82 83 84 85 86 87 88 89
	*new_fd = ((struct irq_fd) { .next  		= NULL,
				     .id 		= dev_id,
				     .fd 		= fd,
				     .type 		= type,
				     .irq 		= irq,
				     .events 		= events,
				     .current_events 	= 0 } );

90
	err = -EBUSY;
91
	spin_lock_irqsave(&irq_lock, flags);
92 93
	for (irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next) {
		if ((irq_fd->fd == fd) && (irq_fd->type == type)) {
J
Jeff Dike 已提交
94 95 96 97
			printk(KERN_ERR "Registering fd %d twice\n", fd);
			printk(KERN_ERR "Irqs : %d, %d\n", irq_fd->irq, irq);
			printk(KERN_ERR "Ids : 0x%p, 0x%p\n", irq_fd->id,
			       dev_id);
98 99 100 101
			goto out_unlock;
		}
	}

102
	if (type == IRQ_WRITE)
103 104 105 106 107
		fd = -1;

	tmp_pfd = NULL;
	n = 0;

108
	while (1) {
109 110 111 112
		n = os_create_pollfd(fd, events, tmp_pfd, n);
		if (n == 0)
			break;

J
Jeff Dike 已提交
113 114
		/*
		 * n > 0
115 116 117 118 119 120 121 122 123 124
		 * It means we couldn't put new pollfd to current pollfds
		 * and tmp_fds is NULL or too small for new pollfds array.
		 * Needed size is equal to n as minimum.
		 *
		 * Here we have to drop the lock in order to call
		 * kmalloc, which might sleep.
		 * If something else came in and changed the pollfds array
		 * so we will not be able to put new pollfd struct to pollfds
		 * then we free the buffer tmp_fds and try again.
		 */
125
		spin_unlock_irqrestore(&irq_lock, flags);
126
		kfree(tmp_pfd);
127

J
Jeff Dike 已提交
128
		tmp_pfd = kmalloc(n, GFP_KERNEL);
129 130 131
		if (tmp_pfd == NULL)
			goto out_kfree;

132
		spin_lock_irqsave(&irq_lock, flags);
133 134 135 136 137
	}

	*last_irq_ptr = new_fd;
	last_irq_ptr = &new_fd->next;

138
	spin_unlock_irqrestore(&irq_lock, flags);
139

J
Jeff Dike 已提交
140 141
	/*
	 * This calls activate_fd, so it has to be outside the critical
142 143
	 * section.
	 */
J
Jeff Dike 已提交
144
	maybe_sigio_broken(fd, (type == IRQ_READ));
145

J
Jeff Dike 已提交
146
	return 0;
147 148

 out_unlock:
149
	spin_unlock_irqrestore(&irq_lock, flags);
150 151 152
 out_kfree:
	kfree(new_fd);
 out:
J
Jeff Dike 已提交
153
	return err;
154 155 156 157 158 159
}

static void free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg)
{
	unsigned long flags;

160
	spin_lock_irqsave(&irq_lock, flags);
161
	os_free_irq_by_cb(test, arg, active_fds, &last_irq_ptr);
162
	spin_unlock_irqrestore(&irq_lock, flags);
163 164 165 166 167 168 169 170 171 172 173
}

struct irq_and_dev {
	int irq;
	void *dev;
};

static int same_irq_and_dev(struct irq_fd *irq, void *d)
{
	struct irq_and_dev *data = d;

174
	return ((irq->irq == data->irq) && (irq->id == data->dev));
175 176
}

177
static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
178 179 180 181 182 183 184 185 186
{
	struct irq_and_dev data = ((struct irq_and_dev) { .irq  = irq,
							  .dev  = dev });

	free_irq_by_cb(same_irq_and_dev, &data);
}

static int same_fd(struct irq_fd *irq, void *fd)
{
187
	return (irq->fd == *((int *)fd));
188 189 190 191 192 193 194
}

void free_irq_by_fd(int fd)
{
	free_irq_by_cb(same_fd, &fd);
}

J
Jeff Dike 已提交
195
/* Must be called with irq_lock held */
196 197 198 199 200 201
static struct irq_fd *find_irq_by_fd(int fd, int irqnum, int *index_out)
{
	struct irq_fd *irq;
	int i = 0;
	int fdi;

202 203 204
	for (irq = active_fds; irq != NULL; irq = irq->next) {
		if ((irq->fd == fd) && (irq->irq == irqnum))
			break;
205 206
		i++;
	}
207
	if (irq == NULL) {
J
Jeff Dike 已提交
208 209
		printk(KERN_ERR "find_irq_by_fd doesn't have descriptor %d\n",
		       fd);
210 211 212
		goto out;
	}
	fdi = os_get_pollfd(i);
213
	if ((fdi != -1) && (fdi != fd)) {
J
Jeff Dike 已提交
214 215
		printk(KERN_ERR "find_irq_by_fd - mismatch between active_fds "
		       "and pollfds, fd %d vs %d, need %d\n", irq->fd,
216 217 218 219 220 221
		       fdi, fd);
		irq = NULL;
		goto out;
	}
	*index_out = i;
 out:
222
	return irq;
223 224 225 226 227 228 229 230
}

void reactivate_fd(int fd, int irqnum)
{
	struct irq_fd *irq;
	unsigned long flags;
	int i;

231
	spin_lock_irqsave(&irq_lock, flags);
232
	irq = find_irq_by_fd(fd, irqnum, &i);
233
	if (irq == NULL) {
234
		spin_unlock_irqrestore(&irq_lock, flags);
235 236 237
		return;
	}
	os_set_pollfd(i, irq->fd);
238
	spin_unlock_irqrestore(&irq_lock, flags);
239

J
Jeff Dike 已提交
240
	add_sigio_fd(fd);
241 242 243 244 245 246 247 248
}

void deactivate_fd(int fd, int irqnum)
{
	struct irq_fd *irq;
	unsigned long flags;
	int i;

249
	spin_lock_irqsave(&irq_lock, flags);
250
	irq = find_irq_by_fd(fd, irqnum, &i);
J
Jeff Dike 已提交
251
	if (irq == NULL) {
J
Jeff Dike 已提交
252 253 254 255
		spin_unlock_irqrestore(&irq_lock, flags);
		return;
	}

256
	os_set_pollfd(i, -1);
257
	spin_unlock_irqrestore(&irq_lock, flags);
J
Jeff Dike 已提交
258 259

	ignore_sigio_fd(fd);
260
}
261
EXPORT_SYMBOL(deactivate_fd);
262

J
Jeff Dike 已提交
263 264 265 266 267 268
/*
 * Called just before shutdown in order to provide a clean exec
 * environment in case the system is rebooting.  No locking because
 * that would cause a pointless shutdown hang if something hadn't
 * released the lock.
 */
269 270 271 272 273
int deactivate_all_fds(void)
{
	struct irq_fd *irq;
	int err;

274
	for (irq = active_fds; irq != NULL; irq = irq->next) {
275
		err = os_clear_fd_async(irq->fd);
276 277
		if (err)
			return err;
278 279 280 281
	}
	/* If there is a signal already queued, after unblocking ignore it */
	os_set_ioignore();

282
	return 0;
283 284
}

L
Linus Torvalds 已提交
285
/*
S
Simon Arlott 已提交
286
 * do_IRQ handles all normal device IRQs (the special
L
Linus Torvalds 已提交
287 288 289
 * SMP cross-CPU interrupts have their own specific
 * handlers).
 */
290
unsigned int do_IRQ(int irq, struct uml_pt_regs *regs)
L
Linus Torvalds 已提交
291
{
A
Al Viro 已提交
292 293
	struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
	irq_enter();
294
	generic_handle_irq(irq);
A
Al Viro 已提交
295 296 297
	irq_exit();
	set_irq_regs(old_regs);
	return 1;
L
Linus Torvalds 已提交
298 299
}

R
Richard Weinberger 已提交
300 301 302 303 304 305 306
void um_free_irq(unsigned int irq, void *dev)
{
	free_irq_by_irq_and_dev(irq, dev);
	free_irq(irq, dev);
}
EXPORT_SYMBOL(um_free_irq);

L
Linus Torvalds 已提交
307
int um_request_irq(unsigned int irq, int fd, int type,
308
		   irq_handler_t handler,
L
Linus Torvalds 已提交
309 310 311 312 313
		   unsigned long irqflags, const char * devname,
		   void *dev_id)
{
	int err;

J
Jeff Dike 已提交
314
	if (fd != -1) {
L
Linus Torvalds 已提交
315
		err = activate_fd(irq, fd, type, dev_id);
J
Jeff Dike 已提交
316 317 318 319 320
		if (err)
			return err;
	}

	return request_irq(irq, handler, irqflags, devname, dev_id);
L
Linus Torvalds 已提交
321
}
J
Jeff Dike 已提交
322

L
Linus Torvalds 已提交
323 324 325
EXPORT_SYMBOL(um_request_irq);
EXPORT_SYMBOL(reactivate_fd);

J
Jeff Dike 已提交
326
/*
327 328
 * irq_chip must define at least enable/disable and ack when
 * the edge handler is used.
J
Jeff Dike 已提交
329
 */
330
static void dummy(struct irq_data *d)
L
Linus Torvalds 已提交
331 332 333
{
}

334
/* This is used for everything else than the timer. */
335
static struct irq_chip normal_irq_type = {
336
	.name = "SIGIO",
337 338 339
	.irq_disable = dummy,
	.irq_enable = dummy,
	.irq_ack = dummy,
L
Linus Torvalds 已提交
340 341
};

342
static struct irq_chip SIGVTALRM_irq_type = {
343
	.name = "SIGVTALRM",
344 345 346
	.irq_disable = dummy,
	.irq_enable = dummy,
	.irq_ack = dummy,
L
Linus Torvalds 已提交
347 348 349 350 351 352
};

void __init init_IRQ(void)
{
	int i;

T
Thomas Gleixner 已提交
353
	irq_set_chip_and_handler(TIMER_IRQ, &SIGVTALRM_irq_type, handle_edge_irq);
354

T
Thomas Gleixner 已提交
355 356
	for (i = 1; i < NR_IRQS; i++)
		irq_set_chip_and_handler(i, &normal_irq_type, handle_edge_irq);
L
Linus Torvalds 已提交
357 358
}

J
Jeff Dike 已提交
359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
/*
 * IRQ stack entry and exit:
 *
 * Unlike i386, UML doesn't receive IRQs on the normal kernel stack
 * and switch over to the IRQ stack after some preparation.  We use
 * sigaltstack to receive signals on a separate stack from the start.
 * These two functions make sure the rest of the kernel won't be too
 * upset by being on a different stack.  The IRQ stack has a
 * thread_info structure at the bottom so that current et al continue
 * to work.
 *
 * to_irq_stack copies the current task's thread_info to the IRQ stack
 * thread_info and sets the tasks's stack to point to the IRQ stack.
 *
 * from_irq_stack copies the thread_info struct back (flags may have
 * been modified) and resets the task's stack pointer.
 *
 * Tricky bits -
 *
 * What happens when two signals race each other?  UML doesn't block
 * signals with sigprocmask, SA_DEFER, or sa_mask, so a second signal
 * could arrive while a previous one is still setting up the
 * thread_info.
 *
 * There are three cases -
 *     The first interrupt on the stack - sets up the thread_info and
 * handles the interrupt
 *     A nested interrupt interrupting the copying of the thread_info -
 * can't handle the interrupt, as the stack is in an unknown state
 *     A nested interrupt not interrupting the copying of the
 * thread_info - doesn't do any setup, just handles the interrupt
 *
 * The first job is to figure out whether we interrupted stack setup.
 * This is done by xchging the signal mask with thread_info->pending.
 * If the value that comes back is zero, then there is no setup in
 * progress, and the interrupt can be handled.  If the value is
 * non-zero, then there is stack setup in progress.  In order to have
 * the interrupt handled, we leave our signal in the mask, and it will
 * be handled by the upper handler after it has set up the stack.
 *
 * Next is to figure out whether we are the outer handler or a nested
 * one.  As part of setting up the stack, thread_info->real_thread is
 * set to non-NULL (and is reset to NULL on exit).  This is the
 * nesting indicator.  If it is non-NULL, then the stack is already
 * set up and the handler can run.
 */

static unsigned long pending_mask;

J
Jeff Dike 已提交
408
unsigned long to_irq_stack(unsigned long *mask_out)
J
Jeff Dike 已提交
409 410 411 412 413
{
	struct thread_info *ti;
	unsigned long mask, old;
	int nested;

J
Jeff Dike 已提交
414
	mask = xchg(&pending_mask, *mask_out);
J
Jeff Dike 已提交
415 416 417
	if (mask != 0) {
		/*
		 * If any interrupts come in at this point, we want to
J
Jeff Dike 已提交
418 419 420 421 422 423 424
		 * make sure that their bits aren't lost by our
		 * putting our bit in.  So, this loop accumulates bits
		 * until xchg returns the same value that we put in.
		 * When that happens, there were no new interrupts,
		 * and pending_mask contains a bit for each interrupt
		 * that came in.
		 */
J
Jeff Dike 已提交
425
		old = *mask_out;
J
Jeff Dike 已提交
426 427 428
		do {
			old |= mask;
			mask = xchg(&pending_mask, old);
J
Jeff Dike 已提交
429
		} while (mask != old);
J
Jeff Dike 已提交
430 431 432 433 434
		return 1;
	}

	ti = current_thread_info();
	nested = (ti->real_thread != NULL);
J
Jeff Dike 已提交
435
	if (!nested) {
J
Jeff Dike 已提交
436 437 438 439 440
		struct task_struct *task;
		struct thread_info *tti;

		task = cpu_tasks[ti->cpu].task;
		tti = task_thread_info(task);
J
Jeff Dike 已提交
441

J
Jeff Dike 已提交
442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469
		*ti = *tti;
		ti->real_thread = tti;
		task->stack = ti;
	}

	mask = xchg(&pending_mask, 0);
	*mask_out |= mask | nested;
	return 0;
}

unsigned long from_irq_stack(int nested)
{
	struct thread_info *ti, *to;
	unsigned long mask;

	ti = current_thread_info();

	pending_mask = 1;

	to = ti->real_thread;
	current->stack = to;
	ti->real_thread = NULL;
	*to = *ti;

	mask = xchg(&pending_mask, 0);
	return mask & ~1;
}