regmap-irq.c 22.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
/*
 * regmap based irq_chip
 *
 * Copyright 2011 Wolfson Microelectronics plc
 *
 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

13
#include <linux/device.h>
14
#include <linux/export.h>
15
#include <linux/interrupt.h>
16
#include <linux/irq.h>
17
#include <linux/irqdomain.h>
18
#include <linux/pm_runtime.h>
19
#include <linux/regmap.h>
20 21 22 23 24 25
#include <linux/slab.h>

#include "internal.h"

struct regmap_irq_chip_data {
	struct mutex lock;
26
	struct irq_chip irq_chip;
27 28

	struct regmap *map;
M
Mark Brown 已提交
29
	const struct regmap_irq_chip *chip;
30 31

	int irq_base;
32
	struct irq_domain *domain;
33

34 35 36
	int irq;
	int wake_count;

37
	void *status_reg_buf;
38 39 40
	unsigned int *status_buf;
	unsigned int *mask_buf;
	unsigned int *mask_buf_def;
41
	unsigned int *wake_buf;
42 43
	unsigned int *type_buf;
	unsigned int *type_buf_def;
44 45

	unsigned int irq_reg_stride;
46
	unsigned int type_reg_stride;
47 48 49 50 51 52
};

static inline const
struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
				     int irq)
{
53
	return &data->chip->irqs[irq];
54 55 56 57 58 59 60 61 62
}

static void regmap_irq_lock(struct irq_data *data)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);

	mutex_lock(&d->lock);
}

63 64 65 66 67 68 69 70 71 72
static int regmap_irq_update_bits(struct regmap_irq_chip_data *d,
				  unsigned int reg, unsigned int mask,
				  unsigned int val)
{
	if (d->chip->mask_writeonly)
		return regmap_write_bits(d->map, reg, mask, val);
	else
		return regmap_update_bits(d->map, reg, mask, val);
}

73 74 75
static void regmap_irq_sync_unlock(struct irq_data *data)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
76
	struct regmap *map = d->map;
77
	int i, ret;
78
	u32 reg;
79
	u32 unmask_offset;
80

81 82 83 84 85 86 87
	if (d->chip->runtime_pm) {
		ret = pm_runtime_get_sync(map->dev);
		if (ret < 0)
			dev_err(map->dev, "IRQ sync failed to resume: %d\n",
				ret);
	}

88 89 90 91 92 93
	/*
	 * If there's been a change in the mask write it back to the
	 * hardware.  We rely on the use of the regmap core cache to
	 * suppress pointless writes.
	 */
	for (i = 0; i < d->chip->num_regs; i++) {
94 95
		reg = d->chip->mask_base +
			(i * map->reg_stride * d->irq_reg_stride);
96
		if (d->chip->mask_invert) {
97
			ret = regmap_irq_update_bits(d, reg,
98
					 d->mask_buf_def[i], ~d->mask_buf[i]);
99 100
		} else if (d->chip->unmask_base) {
			/* set mask with mask_base register */
101
			ret = regmap_irq_update_bits(d, reg,
102 103 104 105 106 107 108 109
					d->mask_buf_def[i], ~d->mask_buf[i]);
			if (ret < 0)
				dev_err(d->map->dev,
					"Failed to sync unmasks in %x\n",
					reg);
			unmask_offset = d->chip->unmask_base -
							d->chip->mask_base;
			/* clear mask with unmask_base register */
110
			ret = regmap_irq_update_bits(d,
111 112 113 114
					reg + unmask_offset,
					d->mask_buf_def[i],
					d->mask_buf[i]);
		} else {
115
			ret = regmap_irq_update_bits(d, reg,
116
					 d->mask_buf_def[i], d->mask_buf[i]);
117
		}
118 119
		if (ret != 0)
			dev_err(d->map->dev, "Failed to sync masks in %x\n",
120
				reg);
121 122 123 124

		reg = d->chip->wake_base +
			(i * map->reg_stride * d->irq_reg_stride);
		if (d->wake_buf) {
125
			if (d->chip->wake_invert)
126
				ret = regmap_irq_update_bits(d, reg,
127 128 129
							 d->mask_buf_def[i],
							 ~d->wake_buf[i]);
			else
130
				ret = regmap_irq_update_bits(d, reg,
131 132
							 d->mask_buf_def[i],
							 d->wake_buf[i]);
133 134 135 136 137
			if (ret != 0)
				dev_err(d->map->dev,
					"Failed to sync wakes in %x: %d\n",
					reg, ret);
		}
138 139 140 141

		if (!d->chip->init_ack_masked)
			continue;
		/*
D
dashsriram 已提交
142
		 * Ack all the masked interrupts unconditionally,
143 144 145
		 * OR if there is masked interrupt which hasn't been Acked,
		 * it'll be ignored in irq handler, then may introduce irq storm
		 */
146
		if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) {
147 148
			reg = d->chip->ack_base +
				(i * map->reg_stride * d->irq_reg_stride);
149 150 151 152 153
			/* some chips ack by write 0 */
			if (d->chip->ack_invert)
				ret = regmap_write(map, reg, ~d->mask_buf[i]);
			else
				ret = regmap_write(map, reg, d->mask_buf[i]);
154 155 156 157
			if (ret != 0)
				dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
					reg, ret);
		}
158 159
	}

160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
	/* Don't update the type bits if we're using mask bits for irq type. */
	if (!d->chip->type_in_mask) {
		for (i = 0; i < d->chip->num_type_reg; i++) {
			if (!d->type_buf_def[i])
				continue;
			reg = d->chip->type_base +
				(i * map->reg_stride * d->type_reg_stride);
			if (d->chip->type_invert)
				ret = regmap_irq_update_bits(d, reg,
					d->type_buf_def[i], ~d->type_buf[i]);
			else
				ret = regmap_irq_update_bits(d, reg,
					d->type_buf_def[i], d->type_buf[i]);
			if (ret != 0)
				dev_err(d->map->dev, "Failed to sync type in %x\n",
					reg);
		}
177 178
	}

179 180 181
	if (d->chip->runtime_pm)
		pm_runtime_put(map->dev);

182 183 184 185 186 187 188 189 190 191
	/* If we've changed our wakeup count propagate it to the parent */
	if (d->wake_count < 0)
		for (i = d->wake_count; i < 0; i++)
			irq_set_irq_wake(d->irq, 0);
	else if (d->wake_count > 0)
		for (i = 0; i < d->wake_count; i++)
			irq_set_irq_wake(d->irq, 1);

	d->wake_count = 0;

192 193 194 195 196 197
	mutex_unlock(&d->lock);
}

static void regmap_irq_enable(struct irq_data *data)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
198
	struct regmap *map = d->map;
199
	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
	unsigned int mask, type;

	type = irq_data->type_falling_mask | irq_data->type_rising_mask;

	/*
	 * The type_in_mask flag means that the underlying hardware uses
	 * separate mask bits for rising and falling edge interrupts, but
	 * we want to make them into a single virtual interrupt with
	 * configurable edge.
	 *
	 * If the interrupt we're enabling defines the falling or rising
	 * masks then instead of using the regular mask bits for this
	 * interrupt, use the value previously written to the type buffer
	 * at the corresponding offset in regmap_irq_set_type().
	 */
	if (d->chip->type_in_mask && type)
		mask = d->type_buf[irq_data->reg_offset / map->reg_stride];
	else
		mask = irq_data->mask;
219

220
	d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~mask;
221 222 223 224 225
}

static void regmap_irq_disable(struct irq_data *data)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
226
	struct regmap *map = d->map;
227
	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
228

229
	d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask;
230 231
}

232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263
static int regmap_irq_set_type(struct irq_data *data, unsigned int type)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
	struct regmap *map = d->map;
	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
	int reg = irq_data->type_reg_offset / map->reg_stride;

	if (!(irq_data->type_rising_mask | irq_data->type_falling_mask))
		return 0;

	d->type_buf[reg] &= ~(irq_data->type_falling_mask |
					irq_data->type_rising_mask);
	switch (type) {
	case IRQ_TYPE_EDGE_FALLING:
		d->type_buf[reg] |= irq_data->type_falling_mask;
		break;

	case IRQ_TYPE_EDGE_RISING:
		d->type_buf[reg] |= irq_data->type_rising_mask;
		break;

	case IRQ_TYPE_EDGE_BOTH:
		d->type_buf[reg] |= (irq_data->type_falling_mask |
					irq_data->type_rising_mask);
		break;

	default:
		return -EINVAL;
	}
	return 0;
}

264 265 266 267 268 269 270
static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
	struct regmap *map = d->map;
	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);

	if (on) {
271 272 273
		if (d->wake_buf)
			d->wake_buf[irq_data->reg_offset / map->reg_stride]
				&= ~irq_data->mask;
274 275
		d->wake_count++;
	} else {
276 277 278
		if (d->wake_buf)
			d->wake_buf[irq_data->reg_offset / map->reg_stride]
				|= irq_data->mask;
279 280 281 282 283 284
		d->wake_count--;
	}

	return 0;
}

285
static const struct irq_chip regmap_irq_chip = {
286 287 288 289
	.irq_bus_lock		= regmap_irq_lock,
	.irq_bus_sync_unlock	= regmap_irq_sync_unlock,
	.irq_disable		= regmap_irq_disable,
	.irq_enable		= regmap_irq_enable,
290
	.irq_set_type		= regmap_irq_set_type,
291
	.irq_set_wake		= regmap_irq_set_wake,
292 293 294 295 296
};

static irqreturn_t regmap_irq_thread(int irq, void *d)
{
	struct regmap_irq_chip_data *data = d;
M
Mark Brown 已提交
297
	const struct regmap_irq_chip *chip = data->chip;
298 299
	struct regmap *map = data->map;
	int ret, i;
300
	bool handled = false;
301
	u32 reg;
302

303 304 305
	if (chip->handle_pre_irq)
		chip->handle_pre_irq(chip->irq_drv_data);

306 307 308 309 310
	if (chip->runtime_pm) {
		ret = pm_runtime_get_sync(map->dev);
		if (ret < 0) {
			dev_err(map->dev, "IRQ thread failed to resume: %d\n",
				ret);
311
			pm_runtime_put(map->dev);
312
			goto exit;
313 314 315
		}
	}

316 317 318 319
	/*
	 * Read in the statuses, using a single bulk read if possible
	 * in order to reduce the I/O overheads.
	 */
320
	if (!map->use_single_read && map->reg_stride == 1 &&
321 322 323 324
	    data->irq_reg_stride == 1) {
		u8 *buf8 = data->status_reg_buf;
		u16 *buf16 = data->status_reg_buf;
		u32 *buf32 = data->status_reg_buf;
325

326 327 328 329 330
		BUG_ON(!data->status_reg_buf);

		ret = regmap_bulk_read(map, chip->status_base,
				       data->status_reg_buf,
				       chip->num_regs);
331 332
		if (ret != 0) {
			dev_err(map->dev, "Failed to read IRQ status: %d\n",
333
				ret);
334
			goto exit;
335
		}
336 337 338 339 340 341 342 343 344 345 346 347 348 349

		for (i = 0; i < data->chip->num_regs; i++) {
			switch (map->format.val_bytes) {
			case 1:
				data->status_buf[i] = buf8[i];
				break;
			case 2:
				data->status_buf[i] = buf16[i];
				break;
			case 4:
				data->status_buf[i] = buf32[i];
				break;
			default:
				BUG();
350
				goto exit;
351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366
			}
		}

	} else {
		for (i = 0; i < data->chip->num_regs; i++) {
			ret = regmap_read(map, chip->status_base +
					  (i * map->reg_stride
					   * data->irq_reg_stride),
					  &data->status_buf[i]);

			if (ret != 0) {
				dev_err(map->dev,
					"Failed to read IRQ status: %d\n",
					ret);
				if (chip->runtime_pm)
					pm_runtime_put(map->dev);
367
				goto exit;
368 369
			}
		}
370
	}
371

372 373 374 375 376 377 378 379
	/*
	 * Ignore masked IRQs and ack if we need to; we ack early so
	 * there is no race between handling and acknowleding the
	 * interrupt.  We assume that typically few of the interrupts
	 * will fire simultaneously so don't worry about overhead from
	 * doing a write per register.
	 */
	for (i = 0; i < data->chip->num_regs; i++) {
380 381
		data->status_buf[i] &= ~data->mask_buf[i];

382
		if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) {
383 384 385
			reg = chip->ack_base +
				(i * map->reg_stride * data->irq_reg_stride);
			ret = regmap_write(map, reg, data->status_buf[i]);
386 387
			if (ret != 0)
				dev_err(map->dev, "Failed to ack 0x%x: %d\n",
388
					reg, ret);
389 390 391 392
		}
	}

	for (i = 0; i < chip->num_irqs; i++) {
393 394
		if (data->status_buf[chip->irqs[i].reg_offset /
				     map->reg_stride] & chip->irqs[i].mask) {
395
			handle_nested_irq(irq_find_mapping(data->domain, i));
396
			handled = true;
397 398 399
		}
	}

400 401 402
	if (chip->runtime_pm)
		pm_runtime_put(map->dev);

403 404 405 406
exit:
	if (chip->handle_post_irq)
		chip->handle_post_irq(chip->irq_drv_data);

407 408 409 410
	if (handled)
		return IRQ_HANDLED;
	else
		return IRQ_NONE;
411 412
}

413 414 415 416 417 418
static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
			  irq_hw_number_t hw)
{
	struct regmap_irq_chip_data *data = h->host_data;

	irq_set_chip_data(virq, data);
419
	irq_set_chip(virq, &data->irq_chip);
420
	irq_set_nested_thread(virq, 1);
421
	irq_set_parent(virq, data->irq);
422 423 424 425 426
	irq_set_noprobe(virq);

	return 0;
}

427
static const struct irq_domain_ops regmap_domain_ops = {
428
	.map	= regmap_irq_map,
429
	.xlate	= irq_domain_xlate_onetwocell,
430 431
};

432
/**
433
 * regmap_add_irq_chip() - Use standard regmap IRQ controller handling
434
 *
435 436 437 438 439 440
 * @map: The regmap for the device.
 * @irq: The IRQ the device uses to signal interrupts.
 * @irq_flags: The IRQF_ flags to use for the primary interrupt.
 * @irq_base: Allocate at specific IRQ number if irq_base > 0.
 * @chip: Configuration for the interrupt controller.
 * @data: Runtime data structure for the controller, allocated on success.
441 442 443 444 445 446 447 448
 *
 * Returns 0 on success or an errno on failure.
 *
 * In order for this to be efficient the chip really should use a
 * register cache.  The chip driver is responsible for restoring the
 * register values used by the IRQ controller over suspend and resume.
 */
int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
M
Mark Brown 已提交
449
			int irq_base, const struct regmap_irq_chip *chip,
450 451 452
			struct regmap_irq_chip_data **data)
{
	struct regmap_irq_chip_data *d;
453
	int i;
454
	int ret = -ENOMEM;
455
	int num_type_reg;
456
	u32 reg;
457
	u32 unmask_offset;
458

459 460 461
	if (chip->num_regs <= 0)
		return -EINVAL;

462 463 464 465 466 467 468 469
	for (i = 0; i < chip->num_irqs; i++) {
		if (chip->irqs[i].reg_offset % map->reg_stride)
			return -EINVAL;
		if (chip->irqs[i].reg_offset / map->reg_stride >=
		    chip->num_regs)
			return -EINVAL;
	}

470 471 472 473 474 475 476
	if (irq_base) {
		irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
		if (irq_base < 0) {
			dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
				 irq_base);
			return irq_base;
		}
477 478 479 480 481 482
	}

	d = kzalloc(sizeof(*d), GFP_KERNEL);
	if (!d)
		return -ENOMEM;

L
lixiubo 已提交
483
	d->status_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
484 485 486 487
				GFP_KERNEL);
	if (!d->status_buf)
		goto err_alloc;

L
lixiubo 已提交
488
	d->mask_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
489 490 491 492
			      GFP_KERNEL);
	if (!d->mask_buf)
		goto err_alloc;

L
lixiubo 已提交
493
	d->mask_buf_def = kcalloc(chip->num_regs, sizeof(unsigned int),
494 495 496 497
				  GFP_KERNEL);
	if (!d->mask_buf_def)
		goto err_alloc;

498
	if (chip->wake_base) {
L
lixiubo 已提交
499
		d->wake_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
500 501 502 503 504
				      GFP_KERNEL);
		if (!d->wake_buf)
			goto err_alloc;
	}

505 506 507 508
	num_type_reg = chip->type_in_mask ? chip->num_regs : chip->num_type_reg;
	if (num_type_reg) {
		d->type_buf_def = kcalloc(num_type_reg,
					  sizeof(unsigned int), GFP_KERNEL);
509 510 511
		if (!d->type_buf_def)
			goto err_alloc;

512
		d->type_buf = kcalloc(num_type_reg, sizeof(unsigned int),
513 514 515 516 517
				      GFP_KERNEL);
		if (!d->type_buf)
			goto err_alloc;
	}

518
	d->irq_chip = regmap_irq_chip;
519
	d->irq_chip.name = chip->name;
520
	d->irq = irq;
521 522 523
	d->map = map;
	d->chip = chip;
	d->irq_base = irq_base;
524 525 526 527 528 529

	if (chip->irq_reg_stride)
		d->irq_reg_stride = chip->irq_reg_stride;
	else
		d->irq_reg_stride = 1;

530 531 532 533 534
	if (chip->type_reg_stride)
		d->type_reg_stride = chip->type_reg_stride;
	else
		d->type_reg_stride = 1;

535
	if (!map->use_single_read && map->reg_stride == 1 &&
536
	    d->irq_reg_stride == 1) {
537 538 539
		d->status_reg_buf = kmalloc_array(chip->num_regs,
						  map->format.val_bytes,
						  GFP_KERNEL);
540 541 542 543
		if (!d->status_reg_buf)
			goto err_alloc;
	}

544 545 546
	mutex_init(&d->lock);

	for (i = 0; i < chip->num_irqs; i++)
547
		d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
548 549 550 551 552
			|= chip->irqs[i].mask;

	/* Mask all the interrupts by default */
	for (i = 0; i < chip->num_regs; i++) {
		d->mask_buf[i] = d->mask_buf_def[i];
553 554
		reg = chip->mask_base +
			(i * map->reg_stride * d->irq_reg_stride);
555
		if (chip->mask_invert)
556
			ret = regmap_irq_update_bits(d, reg,
557
					 d->mask_buf[i], ~d->mask_buf[i]);
558 559 560
		else if (d->chip->unmask_base) {
			unmask_offset = d->chip->unmask_base -
					d->chip->mask_base;
561
			ret = regmap_irq_update_bits(d,
562 563 564 565
					reg + unmask_offset,
					d->mask_buf[i],
					d->mask_buf[i]);
		} else
566
			ret = regmap_irq_update_bits(d, reg,
567
					 d->mask_buf[i], d->mask_buf[i]);
568 569
		if (ret != 0) {
			dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
570
				reg, ret);
571 572
			goto err_alloc;
		}
573 574 575 576 577 578 579 580 581 582 583 584 585 586

		if (!chip->init_ack_masked)
			continue;

		/* Ack masked but set interrupts */
		reg = chip->status_base +
			(i * map->reg_stride * d->irq_reg_stride);
		ret = regmap_read(map, reg, &d->status_buf[i]);
		if (ret != 0) {
			dev_err(map->dev, "Failed to read IRQ status: %d\n",
				ret);
			goto err_alloc;
		}

587
		if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {
588 589
			reg = chip->ack_base +
				(i * map->reg_stride * d->irq_reg_stride);
590 591 592 593 594
			if (chip->ack_invert)
				ret = regmap_write(map, reg,
					~(d->status_buf[i] & d->mask_buf[i]));
			else
				ret = regmap_write(map, reg,
595 596 597 598 599 600 601
					d->status_buf[i] & d->mask_buf[i]);
			if (ret != 0) {
				dev_err(map->dev, "Failed to ack 0x%x: %d\n",
					reg, ret);
				goto err_alloc;
			}
		}
602 603
	}

604 605 606 607 608 609
	/* Wake is disabled by default */
	if (d->wake_buf) {
		for (i = 0; i < chip->num_regs; i++) {
			d->wake_buf[i] = d->mask_buf_def[i];
			reg = chip->wake_base +
				(i * map->reg_stride * d->irq_reg_stride);
610 611

			if (chip->wake_invert)
612
				ret = regmap_irq_update_bits(d, reg,
613 614 615
							 d->mask_buf_def[i],
							 0);
			else
616
				ret = regmap_irq_update_bits(d, reg,
617 618
							 d->mask_buf_def[i],
							 d->wake_buf[i]);
619 620 621 622 623 624 625 626
			if (ret != 0) {
				dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
					reg, ret);
				goto err_alloc;
			}
		}
	}

627
	if (chip->num_type_reg && !chip->type_in_mask) {
628 629 630 631 632 633 634 635 636 637 638 639
		for (i = 0; i < chip->num_irqs; i++) {
			reg = chip->irqs[i].type_reg_offset / map->reg_stride;
			d->type_buf_def[reg] |= chip->irqs[i].type_rising_mask |
					chip->irqs[i].type_falling_mask;
		}
		for (i = 0; i < chip->num_type_reg; ++i) {
			if (!d->type_buf_def[i])
				continue;

			reg = chip->type_base +
				(i * map->reg_stride * d->type_reg_stride);
			if (chip->type_invert)
640
				ret = regmap_irq_update_bits(d, reg,
641 642
					d->type_buf_def[i], 0xFF);
			else
643
				ret = regmap_irq_update_bits(d, reg,
644 645 646 647 648 649 650 651 652 653
					d->type_buf_def[i], 0x0);
			if (ret != 0) {
				dev_err(map->dev,
					"Failed to set type in 0x%x: %x\n",
					reg, ret);
				goto err_alloc;
			}
		}
	}

654 655 656 657 658 659 660 661 662 663 664 665
	if (irq_base)
		d->domain = irq_domain_add_legacy(map->dev->of_node,
						  chip->num_irqs, irq_base, 0,
						  &regmap_domain_ops, d);
	else
		d->domain = irq_domain_add_linear(map->dev->of_node,
						  chip->num_irqs,
						  &regmap_domain_ops, d);
	if (!d->domain) {
		dev_err(map->dev, "Failed to create IRQ domain\n");
		ret = -ENOMEM;
		goto err_alloc;
666 667
	}

668 669
	ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
				   irq_flags | IRQF_ONESHOT,
670 671
				   chip->name, d);
	if (ret != 0) {
672 673
		dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
			irq, chip->name, ret);
674
		goto err_domain;
675 676
	}

677 678
	*data = d;

679 680
	return 0;

681 682
err_domain:
	/* Should really dispose of the domain but... */
683
err_alloc:
684 685
	kfree(d->type_buf);
	kfree(d->type_buf_def);
686
	kfree(d->wake_buf);
687 688 689
	kfree(d->mask_buf_def);
	kfree(d->mask_buf);
	kfree(d->status_buf);
690
	kfree(d->status_reg_buf);
691 692 693 694 695 696
	kfree(d);
	return ret;
}
EXPORT_SYMBOL_GPL(regmap_add_irq_chip);

/**
697
 * regmap_del_irq_chip() - Stop interrupt handling for a regmap IRQ chip
698 699
 *
 * @irq: Primary IRQ for the device
700
 * @d: &regmap_irq_chip_data allocated by regmap_add_irq_chip()
701
 *
702
 * This function also disposes of all mapped IRQs on the chip.
703 704 705
 */
void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
{
706 707 708
	unsigned int virq;
	int hwirq;

709 710 711 712
	if (!d)
		return;

	free_irq(irq, d);
713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728

	/* Dispose all virtual irq from irq domain before removing it */
	for (hwirq = 0; hwirq < d->chip->num_irqs; hwirq++) {
		/* Ignore hwirq if holes in the IRQ list */
		if (!d->chip->irqs[hwirq].mask)
			continue;

		/*
		 * Find the virtual irq of hwirq on chip and if it is
		 * there then dispose it
		 */
		virq = irq_find_mapping(d->domain, hwirq);
		if (virq)
			irq_dispose_mapping(virq);
	}

M
Mark Brown 已提交
729
	irq_domain_remove(d->domain);
730 731
	kfree(d->type_buf);
	kfree(d->type_buf_def);
732
	kfree(d->wake_buf);
733 734
	kfree(d->mask_buf_def);
	kfree(d->mask_buf);
735
	kfree(d->status_reg_buf);
736 737 738 739
	kfree(d->status_buf);
	kfree(d);
}
EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
740

741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760
static void devm_regmap_irq_chip_release(struct device *dev, void *res)
{
	struct regmap_irq_chip_data *d = *(struct regmap_irq_chip_data **)res;

	regmap_del_irq_chip(d->irq, d);
}

static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data)

{
	struct regmap_irq_chip_data **r = res;

	if (!r || !*r) {
		WARN_ON(!r || !*r);
		return 0;
	}
	return *r == data;
}

/**
761
 * devm_regmap_add_irq_chip() - Resource manager regmap_add_irq_chip()
762
 *
763 764 765
 * @dev: The device pointer on which irq_chip belongs to.
 * @map: The regmap for the device.
 * @irq: The IRQ the device uses to signal interrupts
766
 * @irq_flags: The IRQF_ flags to use for the primary interrupt.
767 768 769
 * @irq_base: Allocate at specific IRQ number if irq_base > 0.
 * @chip: Configuration for the interrupt controller.
 * @data: Runtime data structure for the controller, allocated on success
770 771 772
 *
 * Returns 0 on success or an errno on failure.
 *
773
 * The &regmap_irq_chip_data will be automatically released when the device is
774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803
 * unbound.
 */
int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
			     int irq_flags, int irq_base,
			     const struct regmap_irq_chip *chip,
			     struct regmap_irq_chip_data **data)
{
	struct regmap_irq_chip_data **ptr, *d;
	int ret;

	ptr = devres_alloc(devm_regmap_irq_chip_release, sizeof(*ptr),
			   GFP_KERNEL);
	if (!ptr)
		return -ENOMEM;

	ret = regmap_add_irq_chip(map, irq, irq_flags, irq_base,
				  chip, &d);
	if (ret < 0) {
		devres_free(ptr);
		return ret;
	}

	*ptr = d;
	devres_add(dev, ptr);
	*data = d;
	return 0;
}
EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip);

/**
804
 * devm_regmap_del_irq_chip() - Resource managed regmap_del_irq_chip()
805 806
 *
 * @dev: Device for which which resource was allocated.
807 808 809 810
 * @irq: Primary IRQ for the device.
 * @data: &regmap_irq_chip_data allocated by regmap_add_irq_chip().
 *
 * A resource managed version of regmap_del_irq_chip().
811 812 813 814 815 816 817 818 819 820 821 822 823 824 825
 */
void devm_regmap_del_irq_chip(struct device *dev, int irq,
			      struct regmap_irq_chip_data *data)
{
	int rc;

	WARN_ON(irq != data->irq);
	rc = devres_release(dev, devm_regmap_irq_chip_release,
			    devm_regmap_irq_chip_match, data);

	if (rc != 0)
		WARN_ON(rc);
}
EXPORT_SYMBOL_GPL(devm_regmap_del_irq_chip);

826
/**
827
 * regmap_irq_chip_get_base() - Retrieve interrupt base for a regmap IRQ chip
828
 *
829
 * @data: regmap irq controller to operate on.
830
 *
831
 * Useful for drivers to request their own IRQs.
832 833 834
 */
int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
{
835
	WARN_ON(!data->irq_base);
836 837 838
	return data->irq_base;
}
EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
839 840

/**
841
 * regmap_irq_get_virq() - Map an interrupt on a chip to a virtual IRQ
842
 *
843 844
 * @data: regmap irq controller to operate on.
 * @irq: index of the interrupt requested in the chip IRQs.
845
 *
846
 * Useful for drivers to request their own IRQs.
847 848 849
 */
int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
{
850 851 852 853
	/* Handle holes in the IRQ list */
	if (!data->chip->irqs[irq].mask)
		return -EINVAL;

854 855 856
	return irq_create_mapping(data->domain, irq);
}
EXPORT_SYMBOL_GPL(regmap_irq_get_virq);
857 858

/**
859 860 861
 * regmap_irq_get_domain() - Retrieve the irq_domain for the chip
 *
 * @data: regmap_irq controller to operate on.
862 863 864 865 866 867 868 869 870 871 872 873 874 875
 *
 * Useful for drivers to request their own IRQs and for integration
 * with subsystems.  For ease of integration NULL is accepted as a
 * domain, allowing devices to just call this even if no domain is
 * allocated.
 */
struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data)
{
	if (data)
		return data->domain;
	else
		return NULL;
}
EXPORT_SYMBOL_GPL(regmap_irq_get_domain);