regmap-irq.c 25.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
/*
 * regmap based irq_chip
 *
 * Copyright 2011 Wolfson Microelectronics plc
 *
 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

13
#include <linux/device.h>
14
#include <linux/export.h>
15
#include <linux/interrupt.h>
16
#include <linux/irq.h>
17
#include <linux/irqdomain.h>
18
#include <linux/pm_runtime.h>
19
#include <linux/regmap.h>
20 21 22 23 24 25
#include <linux/slab.h>

#include "internal.h"

struct regmap_irq_chip_data {
	struct mutex lock;
26
	struct irq_chip irq_chip;
27 28

	struct regmap *map;
M
Mark Brown 已提交
29
	const struct regmap_irq_chip *chip;
30 31

	int irq_base;
32
	struct irq_domain *domain;
33

34 35 36
	int irq;
	int wake_count;

37
	void *status_reg_buf;
38
	unsigned int *main_status_buf;
39 40 41
	unsigned int *status_buf;
	unsigned int *mask_buf;
	unsigned int *mask_buf_def;
42
	unsigned int *wake_buf;
43 44
	unsigned int *type_buf;
	unsigned int *type_buf_def;
45 46

	unsigned int irq_reg_stride;
47
	unsigned int type_reg_stride;
48 49

	bool clear_status:1;
50 51 52 53 54 55
};

static inline const
struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
				     int irq)
{
56
	return &data->chip->irqs[irq];
57 58 59 60 61 62 63 64 65
}

static void regmap_irq_lock(struct irq_data *data)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);

	mutex_lock(&d->lock);
}

66 67 68 69 70 71 72 73 74 75
static int regmap_irq_update_bits(struct regmap_irq_chip_data *d,
				  unsigned int reg, unsigned int mask,
				  unsigned int val)
{
	if (d->chip->mask_writeonly)
		return regmap_write_bits(d->map, reg, mask, val);
	else
		return regmap_update_bits(d->map, reg, mask, val);
}

76 77 78
static void regmap_irq_sync_unlock(struct irq_data *data)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
79
	struct regmap *map = d->map;
80
	int i, ret;
81
	u32 reg;
82
	u32 unmask_offset;
83
	u32 val;
84

85 86 87 88 89 90 91
	if (d->chip->runtime_pm) {
		ret = pm_runtime_get_sync(map->dev);
		if (ret < 0)
			dev_err(map->dev, "IRQ sync failed to resume: %d\n",
				ret);
	}

92 93 94 95 96 97 98 99 100 101 102 103 104 105
	if (d->clear_status) {
		for (i = 0; i < d->chip->num_regs; i++) {
			reg = d->chip->status_base +
				(i * map->reg_stride * d->irq_reg_stride);

			ret = regmap_read(map, reg, &val);
			if (ret)
				dev_err(d->map->dev,
					"Failed to clear the interrupt status bits\n");
		}

		d->clear_status = false;
	}

106 107 108 109 110 111
	/*
	 * If there's been a change in the mask write it back to the
	 * hardware.  We rely on the use of the regmap core cache to
	 * suppress pointless writes.
	 */
	for (i = 0; i < d->chip->num_regs; i++) {
112 113 114
		if (!d->chip->mask_base)
			continue;

115 116
		reg = d->chip->mask_base +
			(i * map->reg_stride * d->irq_reg_stride);
117
		if (d->chip->mask_invert) {
118
			ret = regmap_irq_update_bits(d, reg,
119
					 d->mask_buf_def[i], ~d->mask_buf[i]);
120 121
		} else if (d->chip->unmask_base) {
			/* set mask with mask_base register */
122
			ret = regmap_irq_update_bits(d, reg,
123 124 125 126 127 128 129 130
					d->mask_buf_def[i], ~d->mask_buf[i]);
			if (ret < 0)
				dev_err(d->map->dev,
					"Failed to sync unmasks in %x\n",
					reg);
			unmask_offset = d->chip->unmask_base -
							d->chip->mask_base;
			/* clear mask with unmask_base register */
131
			ret = regmap_irq_update_bits(d,
132 133 134 135
					reg + unmask_offset,
					d->mask_buf_def[i],
					d->mask_buf[i]);
		} else {
136
			ret = regmap_irq_update_bits(d, reg,
137
					 d->mask_buf_def[i], d->mask_buf[i]);
138
		}
139 140
		if (ret != 0)
			dev_err(d->map->dev, "Failed to sync masks in %x\n",
141
				reg);
142 143 144 145

		reg = d->chip->wake_base +
			(i * map->reg_stride * d->irq_reg_stride);
		if (d->wake_buf) {
146
			if (d->chip->wake_invert)
147
				ret = regmap_irq_update_bits(d, reg,
148 149 150
							 d->mask_buf_def[i],
							 ~d->wake_buf[i]);
			else
151
				ret = regmap_irq_update_bits(d, reg,
152 153
							 d->mask_buf_def[i],
							 d->wake_buf[i]);
154 155 156 157 158
			if (ret != 0)
				dev_err(d->map->dev,
					"Failed to sync wakes in %x: %d\n",
					reg, ret);
		}
159 160 161 162

		if (!d->chip->init_ack_masked)
			continue;
		/*
D
dashsriram 已提交
163
		 * Ack all the masked interrupts unconditionally,
164 165 166
		 * OR if there is masked interrupt which hasn't been Acked,
		 * it'll be ignored in irq handler, then may introduce irq storm
		 */
167
		if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) {
168 169
			reg = d->chip->ack_base +
				(i * map->reg_stride * d->irq_reg_stride);
170 171 172 173 174
			/* some chips ack by write 0 */
			if (d->chip->ack_invert)
				ret = regmap_write(map, reg, ~d->mask_buf[i]);
			else
				ret = regmap_write(map, reg, d->mask_buf[i]);
175 176 177 178
			if (ret != 0)
				dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
					reg, ret);
		}
179 180
	}

181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
	/* Don't update the type bits if we're using mask bits for irq type. */
	if (!d->chip->type_in_mask) {
		for (i = 0; i < d->chip->num_type_reg; i++) {
			if (!d->type_buf_def[i])
				continue;
			reg = d->chip->type_base +
				(i * map->reg_stride * d->type_reg_stride);
			if (d->chip->type_invert)
				ret = regmap_irq_update_bits(d, reg,
					d->type_buf_def[i], ~d->type_buf[i]);
			else
				ret = regmap_irq_update_bits(d, reg,
					d->type_buf_def[i], d->type_buf[i]);
			if (ret != 0)
				dev_err(d->map->dev, "Failed to sync type in %x\n",
					reg);
		}
198 199
	}

200 201 202
	if (d->chip->runtime_pm)
		pm_runtime_put(map->dev);

203 204 205 206 207 208 209 210 211 212
	/* If we've changed our wakeup count propagate it to the parent */
	if (d->wake_count < 0)
		for (i = d->wake_count; i < 0; i++)
			irq_set_irq_wake(d->irq, 0);
	else if (d->wake_count > 0)
		for (i = 0; i < d->wake_count; i++)
			irq_set_irq_wake(d->irq, 1);

	d->wake_count = 0;

213 214 215 216 217 218
	mutex_unlock(&d->lock);
}

static void regmap_irq_enable(struct irq_data *data)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
219
	struct regmap *map = d->map;
220
	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
221 222
	unsigned int mask, type;

223
	type = irq_data->type.type_falling_val | irq_data->type.type_rising_val;
224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239

	/*
	 * The type_in_mask flag means that the underlying hardware uses
	 * separate mask bits for rising and falling edge interrupts, but
	 * we want to make them into a single virtual interrupt with
	 * configurable edge.
	 *
	 * If the interrupt we're enabling defines the falling or rising
	 * masks then instead of using the regular mask bits for this
	 * interrupt, use the value previously written to the type buffer
	 * at the corresponding offset in regmap_irq_set_type().
	 */
	if (d->chip->type_in_mask && type)
		mask = d->type_buf[irq_data->reg_offset / map->reg_stride];
	else
		mask = irq_data->mask;
240

241 242 243
	if (d->chip->clear_on_unmask)
		d->clear_status = true;

244
	d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~mask;
245 246 247 248 249
}

static void regmap_irq_disable(struct irq_data *data)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
250
	struct regmap *map = d->map;
251
	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
252

253
	d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask;
254 255
}

256 257 258 259 260
static int regmap_irq_set_type(struct irq_data *data, unsigned int type)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
	struct regmap *map = d->map;
	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
261 262
	int reg;
	const struct regmap_irq_type *t = &irq_data->type;
263

264
	if ((t->types_supported & type) != type)
265
		return 0;
266

267 268 269 270 271 272 273 274 275
	reg = t->type_reg_offset / map->reg_stride;

	if (t->type_reg_mask)
		d->type_buf[reg] &= ~t->type_reg_mask;
	else
		d->type_buf[reg] &= ~(t->type_falling_val |
				      t->type_rising_val |
				      t->type_level_low_val |
				      t->type_level_high_val);
276 277
	switch (type) {
	case IRQ_TYPE_EDGE_FALLING:
278
		d->type_buf[reg] |= t->type_falling_val;
279 280 281
		break;

	case IRQ_TYPE_EDGE_RISING:
282
		d->type_buf[reg] |= t->type_rising_val;
283 284 285
		break;

	case IRQ_TYPE_EDGE_BOTH:
286 287
		d->type_buf[reg] |= (t->type_falling_val |
					t->type_rising_val);
288 289
		break;

290 291 292 293 294 295 296
	case IRQ_TYPE_LEVEL_HIGH:
		d->type_buf[reg] |= t->type_level_high_val;
		break;

	case IRQ_TYPE_LEVEL_LOW:
		d->type_buf[reg] |= t->type_level_low_val;
		break;
297 298 299 300 301 302
	default:
		return -EINVAL;
	}
	return 0;
}

303 304 305 306 307 308 309
static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
	struct regmap *map = d->map;
	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);

	if (on) {
310 311 312
		if (d->wake_buf)
			d->wake_buf[irq_data->reg_offset / map->reg_stride]
				&= ~irq_data->mask;
313 314
		d->wake_count++;
	} else {
315 316 317
		if (d->wake_buf)
			d->wake_buf[irq_data->reg_offset / map->reg_stride]
				|= irq_data->mask;
318 319 320 321 322 323
		d->wake_count--;
	}

	return 0;
}

324
static const struct irq_chip regmap_irq_chip = {
325 326 327 328
	.irq_bus_lock		= regmap_irq_lock,
	.irq_bus_sync_unlock	= regmap_irq_sync_unlock,
	.irq_disable		= regmap_irq_disable,
	.irq_enable		= regmap_irq_enable,
329
	.irq_set_type		= regmap_irq_set_type,
330
	.irq_set_wake		= regmap_irq_set_wake,
331 332
};

333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359
static inline int read_sub_irq_data(struct regmap_irq_chip_data *data,
					   unsigned int b)
{
	const struct regmap_irq_chip *chip = data->chip;
	struct regmap *map = data->map;
	struct regmap_irq_sub_irq_map *subreg;
	int i, ret = 0;

	if (!chip->sub_reg_offsets) {
		/* Assume linear mapping */
		ret = regmap_read(map, chip->status_base +
				  (b * map->reg_stride * data->irq_reg_stride),
				   &data->status_buf[b]);
	} else {
		subreg = &chip->sub_reg_offsets[b];
		for (i = 0; i < subreg->num_regs; i++) {
			unsigned int offset = subreg->offset[i];

			ret = regmap_read(map, chip->status_base + offset,
					  &data->status_buf[offset]);
			if (ret)
				break;
		}
	}
	return ret;
}

360 361 362
static irqreturn_t regmap_irq_thread(int irq, void *d)
{
	struct regmap_irq_chip_data *data = d;
M
Mark Brown 已提交
363
	const struct regmap_irq_chip *chip = data->chip;
364 365
	struct regmap *map = data->map;
	int ret, i;
366
	bool handled = false;
367
	u32 reg;
368

369 370 371
	if (chip->handle_pre_irq)
		chip->handle_pre_irq(chip->irq_drv_data);

372 373 374 375 376
	if (chip->runtime_pm) {
		ret = pm_runtime_get_sync(map->dev);
		if (ret < 0) {
			dev_err(map->dev, "IRQ thread failed to resume: %d\n",
				ret);
377
			pm_runtime_put(map->dev);
378
			goto exit;
379 380 381
		}
	}

382
	/*
383 384 385
	 * Read only registers with active IRQs if the chip has 'main status
	 * register'. Else read in the statuses, using a single bulk read if
	 * possible in order to reduce the I/O overheads.
386
	 */
387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441

	if (chip->num_main_regs) {
		unsigned int max_main_bits;
		unsigned long size;

		size = chip->num_regs * sizeof(unsigned int);

		max_main_bits = (chip->num_main_status_bits) ?
				 chip->num_main_status_bits : chip->num_regs;
		/* Clear the status buf as we don't read all status regs */
		memset(data->status_buf, 0, size);

		/* We could support bulk read for main status registers
		 * but I don't expect to see devices with really many main
		 * status registers so let's only support single reads for the
		 * sake of simplicity. and add bulk reads only if needed
		 */
		for (i = 0; i < chip->num_main_regs; i++) {
			ret = regmap_read(map, chip->main_status +
				  (i * map->reg_stride
				   * data->irq_reg_stride),
				  &data->main_status_buf[i]);
			if (ret) {
				dev_err(map->dev,
					"Failed to read IRQ status %d\n",
					ret);
				goto exit;
			}
		}

		/* Read sub registers with active IRQs */
		for (i = 0; i < chip->num_main_regs; i++) {
			unsigned int b;
			const unsigned long mreg = data->main_status_buf[i];

			for_each_set_bit(b, &mreg, map->format.val_bytes * 8) {
				if (i * map->format.val_bytes * 8 + b >
				    max_main_bits)
					break;
				ret = read_sub_irq_data(data, b);

				if (ret != 0) {
					dev_err(map->dev,
						"Failed to read IRQ status %d\n",
						ret);
					if (chip->runtime_pm)
						pm_runtime_put(map->dev);
					goto exit;
				}
			}

		}
	} else if (!map->use_single_read && map->reg_stride == 1 &&
		   data->irq_reg_stride == 1) {

442 443 444
		u8 *buf8 = data->status_reg_buf;
		u16 *buf16 = data->status_reg_buf;
		u32 *buf32 = data->status_reg_buf;
445

446 447 448 449 450
		BUG_ON(!data->status_reg_buf);

		ret = regmap_bulk_read(map, chip->status_base,
				       data->status_reg_buf,
				       chip->num_regs);
451 452
		if (ret != 0) {
			dev_err(map->dev, "Failed to read IRQ status: %d\n",
453
				ret);
454
			goto exit;
455
		}
456 457 458 459 460 461 462 463 464 465 466 467 468 469

		for (i = 0; i < data->chip->num_regs; i++) {
			switch (map->format.val_bytes) {
			case 1:
				data->status_buf[i] = buf8[i];
				break;
			case 2:
				data->status_buf[i] = buf16[i];
				break;
			case 4:
				data->status_buf[i] = buf32[i];
				break;
			default:
				BUG();
470
				goto exit;
471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486
			}
		}

	} else {
		for (i = 0; i < data->chip->num_regs; i++) {
			ret = regmap_read(map, chip->status_base +
					  (i * map->reg_stride
					   * data->irq_reg_stride),
					  &data->status_buf[i]);

			if (ret != 0) {
				dev_err(map->dev,
					"Failed to read IRQ status: %d\n",
					ret);
				if (chip->runtime_pm)
					pm_runtime_put(map->dev);
487
				goto exit;
488 489
			}
		}
490
	}
491

492 493 494 495 496 497 498 499
	/*
	 * Ignore masked IRQs and ack if we need to; we ack early so
	 * there is no race between handling and acknowleding the
	 * interrupt.  We assume that typically few of the interrupts
	 * will fire simultaneously so don't worry about overhead from
	 * doing a write per register.
	 */
	for (i = 0; i < data->chip->num_regs; i++) {
500 501
		data->status_buf[i] &= ~data->mask_buf[i];

502
		if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) {
503 504 505
			reg = chip->ack_base +
				(i * map->reg_stride * data->irq_reg_stride);
			ret = regmap_write(map, reg, data->status_buf[i]);
506 507
			if (ret != 0)
				dev_err(map->dev, "Failed to ack 0x%x: %d\n",
508
					reg, ret);
509 510 511 512
		}
	}

	for (i = 0; i < chip->num_irqs; i++) {
513 514
		if (data->status_buf[chip->irqs[i].reg_offset /
				     map->reg_stride] & chip->irqs[i].mask) {
515
			handle_nested_irq(irq_find_mapping(data->domain, i));
516
			handled = true;
517 518 519
		}
	}

520 521 522
	if (chip->runtime_pm)
		pm_runtime_put(map->dev);

523 524 525 526
exit:
	if (chip->handle_post_irq)
		chip->handle_post_irq(chip->irq_drv_data);

527 528 529 530
	if (handled)
		return IRQ_HANDLED;
	else
		return IRQ_NONE;
531 532
}

533 534 535 536 537 538
static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
			  irq_hw_number_t hw)
{
	struct regmap_irq_chip_data *data = h->host_data;

	irq_set_chip_data(virq, data);
539
	irq_set_chip(virq, &data->irq_chip);
540
	irq_set_nested_thread(virq, 1);
541
	irq_set_parent(virq, data->irq);
542 543 544 545 546
	irq_set_noprobe(virq);

	return 0;
}

547
static const struct irq_domain_ops regmap_domain_ops = {
548
	.map	= regmap_irq_map,
549
	.xlate	= irq_domain_xlate_onetwocell,
550 551
};

552
/**
553
 * regmap_add_irq_chip() - Use standard regmap IRQ controller handling
554
 *
555 556 557 558 559 560
 * @map: The regmap for the device.
 * @irq: The IRQ the device uses to signal interrupts.
 * @irq_flags: The IRQF_ flags to use for the primary interrupt.
 * @irq_base: Allocate at specific IRQ number if irq_base > 0.
 * @chip: Configuration for the interrupt controller.
 * @data: Runtime data structure for the controller, allocated on success.
561 562 563 564 565 566 567 568
 *
 * Returns 0 on success or an errno on failure.
 *
 * In order for this to be efficient the chip really should use a
 * register cache.  The chip driver is responsible for restoring the
 * register values used by the IRQ controller over suspend and resume.
 */
int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
M
Mark Brown 已提交
569
			int irq_base, const struct regmap_irq_chip *chip,
570 571 572
			struct regmap_irq_chip_data **data)
{
	struct regmap_irq_chip_data *d;
573
	int i;
574
	int ret = -ENOMEM;
575
	int num_type_reg;
576
	u32 reg;
577
	u32 unmask_offset;
578

579 580 581
	if (chip->num_regs <= 0)
		return -EINVAL;

582 583 584
	if (chip->clear_on_unmask && (chip->ack_base || chip->use_ack))
		return -EINVAL;

585 586 587 588 589 590 591 592
	for (i = 0; i < chip->num_irqs; i++) {
		if (chip->irqs[i].reg_offset % map->reg_stride)
			return -EINVAL;
		if (chip->irqs[i].reg_offset / map->reg_stride >=
		    chip->num_regs)
			return -EINVAL;
	}

593 594 595 596 597 598 599
	if (irq_base) {
		irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
		if (irq_base < 0) {
			dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
				 irq_base);
			return irq_base;
		}
600 601 602 603 604 605
	}

	d = kzalloc(sizeof(*d), GFP_KERNEL);
	if (!d)
		return -ENOMEM;

606 607 608 609 610 611 612 613 614
	if (chip->num_main_regs) {
		d->main_status_buf = kcalloc(chip->num_main_regs,
					     sizeof(unsigned int),
					     GFP_KERNEL);

		if (!d->main_status_buf)
			goto err_alloc;
	}

L
lixiubo 已提交
615
	d->status_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
616 617 618 619
				GFP_KERNEL);
	if (!d->status_buf)
		goto err_alloc;

L
lixiubo 已提交
620
	d->mask_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
621 622 623 624
			      GFP_KERNEL);
	if (!d->mask_buf)
		goto err_alloc;

L
lixiubo 已提交
625
	d->mask_buf_def = kcalloc(chip->num_regs, sizeof(unsigned int),
626 627 628 629
				  GFP_KERNEL);
	if (!d->mask_buf_def)
		goto err_alloc;

630
	if (chip->wake_base) {
L
lixiubo 已提交
631
		d->wake_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
632 633 634 635 636
				      GFP_KERNEL);
		if (!d->wake_buf)
			goto err_alloc;
	}

637 638 639 640
	num_type_reg = chip->type_in_mask ? chip->num_regs : chip->num_type_reg;
	if (num_type_reg) {
		d->type_buf_def = kcalloc(num_type_reg,
					  sizeof(unsigned int), GFP_KERNEL);
641 642 643
		if (!d->type_buf_def)
			goto err_alloc;

644
		d->type_buf = kcalloc(num_type_reg, sizeof(unsigned int),
645 646 647 648 649
				      GFP_KERNEL);
		if (!d->type_buf)
			goto err_alloc;
	}

650
	d->irq_chip = regmap_irq_chip;
651
	d->irq_chip.name = chip->name;
652
	d->irq = irq;
653 654 655
	d->map = map;
	d->chip = chip;
	d->irq_base = irq_base;
656 657 658 659 660 661

	if (chip->irq_reg_stride)
		d->irq_reg_stride = chip->irq_reg_stride;
	else
		d->irq_reg_stride = 1;

662 663 664 665 666
	if (chip->type_reg_stride)
		d->type_reg_stride = chip->type_reg_stride;
	else
		d->type_reg_stride = 1;

667
	if (!map->use_single_read && map->reg_stride == 1 &&
668
	    d->irq_reg_stride == 1) {
669 670 671
		d->status_reg_buf = kmalloc_array(chip->num_regs,
						  map->format.val_bytes,
						  GFP_KERNEL);
672 673 674 675
		if (!d->status_reg_buf)
			goto err_alloc;
	}

676 677 678
	mutex_init(&d->lock);

	for (i = 0; i < chip->num_irqs; i++)
679
		d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
680 681 682 683 684
			|= chip->irqs[i].mask;

	/* Mask all the interrupts by default */
	for (i = 0; i < chip->num_regs; i++) {
		d->mask_buf[i] = d->mask_buf_def[i];
685 686 687
		if (!chip->mask_base)
			continue;

688 689
		reg = chip->mask_base +
			(i * map->reg_stride * d->irq_reg_stride);
690
		if (chip->mask_invert)
691
			ret = regmap_irq_update_bits(d, reg,
692
					 d->mask_buf[i], ~d->mask_buf[i]);
693 694 695
		else if (d->chip->unmask_base) {
			unmask_offset = d->chip->unmask_base -
					d->chip->mask_base;
696
			ret = regmap_irq_update_bits(d,
697 698 699 700
					reg + unmask_offset,
					d->mask_buf[i],
					d->mask_buf[i]);
		} else
701
			ret = regmap_irq_update_bits(d, reg,
702
					 d->mask_buf[i], d->mask_buf[i]);
703 704
		if (ret != 0) {
			dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
705
				reg, ret);
706 707
			goto err_alloc;
		}
708 709 710 711 712 713 714 715 716 717 718 719 720 721

		if (!chip->init_ack_masked)
			continue;

		/* Ack masked but set interrupts */
		reg = chip->status_base +
			(i * map->reg_stride * d->irq_reg_stride);
		ret = regmap_read(map, reg, &d->status_buf[i]);
		if (ret != 0) {
			dev_err(map->dev, "Failed to read IRQ status: %d\n",
				ret);
			goto err_alloc;
		}

722
		if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {
723 724
			reg = chip->ack_base +
				(i * map->reg_stride * d->irq_reg_stride);
725 726 727 728 729
			if (chip->ack_invert)
				ret = regmap_write(map, reg,
					~(d->status_buf[i] & d->mask_buf[i]));
			else
				ret = regmap_write(map, reg,
730 731 732 733 734 735 736
					d->status_buf[i] & d->mask_buf[i]);
			if (ret != 0) {
				dev_err(map->dev, "Failed to ack 0x%x: %d\n",
					reg, ret);
				goto err_alloc;
			}
		}
737 738
	}

739 740 741 742 743 744
	/* Wake is disabled by default */
	if (d->wake_buf) {
		for (i = 0; i < chip->num_regs; i++) {
			d->wake_buf[i] = d->mask_buf_def[i];
			reg = chip->wake_base +
				(i * map->reg_stride * d->irq_reg_stride);
745 746

			if (chip->wake_invert)
747
				ret = regmap_irq_update_bits(d, reg,
748 749 750
							 d->mask_buf_def[i],
							 0);
			else
751
				ret = regmap_irq_update_bits(d, reg,
752 753
							 d->mask_buf_def[i],
							 d->wake_buf[i]);
754 755 756 757 758 759 760 761
			if (ret != 0) {
				dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
					reg, ret);
				goto err_alloc;
			}
		}
	}

762
	if (chip->num_type_reg && !chip->type_in_mask) {
763 764 765 766 767 768
		for (i = 0; i < chip->num_type_reg; ++i) {
			if (!d->type_buf_def[i])
				continue;

			reg = chip->type_base +
				(i * map->reg_stride * d->type_reg_stride);
769 770 771 772 773 774 775 776

			ret = regmap_read(map, reg, &d->type_buf_def[i]);

			if (d->chip->type_invert)
				d->type_buf_def[i] = ~d->type_buf_def[i];

			if (ret) {
				dev_err(map->dev, "Failed to get type defaults at 0x%x: %d\n",
777 778 779 780 781 782
					reg, ret);
				goto err_alloc;
			}
		}
	}

783 784 785 786 787 788 789 790 791 792 793 794
	if (irq_base)
		d->domain = irq_domain_add_legacy(map->dev->of_node,
						  chip->num_irqs, irq_base, 0,
						  &regmap_domain_ops, d);
	else
		d->domain = irq_domain_add_linear(map->dev->of_node,
						  chip->num_irqs,
						  &regmap_domain_ops, d);
	if (!d->domain) {
		dev_err(map->dev, "Failed to create IRQ domain\n");
		ret = -ENOMEM;
		goto err_alloc;
795 796
	}

797 798
	ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
				   irq_flags | IRQF_ONESHOT,
799 800
				   chip->name, d);
	if (ret != 0) {
801 802
		dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
			irq, chip->name, ret);
803
		goto err_domain;
804 805
	}

806 807
	*data = d;

808 809
	return 0;

810 811
err_domain:
	/* Should really dispose of the domain but... */
812
err_alloc:
813 814
	kfree(d->type_buf);
	kfree(d->type_buf_def);
815
	kfree(d->wake_buf);
816 817 818
	kfree(d->mask_buf_def);
	kfree(d->mask_buf);
	kfree(d->status_buf);
819
	kfree(d->status_reg_buf);
820 821 822 823 824 825
	kfree(d);
	return ret;
}
EXPORT_SYMBOL_GPL(regmap_add_irq_chip);

/**
826
 * regmap_del_irq_chip() - Stop interrupt handling for a regmap IRQ chip
827 828
 *
 * @irq: Primary IRQ for the device
829
 * @d: &regmap_irq_chip_data allocated by regmap_add_irq_chip()
830
 *
831
 * This function also disposes of all mapped IRQs on the chip.
832 833 834
 */
void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
{
835 836 837
	unsigned int virq;
	int hwirq;

838 839 840 841
	if (!d)
		return;

	free_irq(irq, d);
842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857

	/* Dispose all virtual irq from irq domain before removing it */
	for (hwirq = 0; hwirq < d->chip->num_irqs; hwirq++) {
		/* Ignore hwirq if holes in the IRQ list */
		if (!d->chip->irqs[hwirq].mask)
			continue;

		/*
		 * Find the virtual irq of hwirq on chip and if it is
		 * there then dispose it
		 */
		virq = irq_find_mapping(d->domain, hwirq);
		if (virq)
			irq_dispose_mapping(virq);
	}

M
Mark Brown 已提交
858
	irq_domain_remove(d->domain);
859 860
	kfree(d->type_buf);
	kfree(d->type_buf_def);
861
	kfree(d->wake_buf);
862 863
	kfree(d->mask_buf_def);
	kfree(d->mask_buf);
864
	kfree(d->status_reg_buf);
865 866 867 868
	kfree(d->status_buf);
	kfree(d);
}
EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
869

870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889
static void devm_regmap_irq_chip_release(struct device *dev, void *res)
{
	struct regmap_irq_chip_data *d = *(struct regmap_irq_chip_data **)res;

	regmap_del_irq_chip(d->irq, d);
}

static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data)

{
	struct regmap_irq_chip_data **r = res;

	if (!r || !*r) {
		WARN_ON(!r || !*r);
		return 0;
	}
	return *r == data;
}

/**
890
 * devm_regmap_add_irq_chip() - Resource manager regmap_add_irq_chip()
891
 *
892 893 894
 * @dev: The device pointer on which irq_chip belongs to.
 * @map: The regmap for the device.
 * @irq: The IRQ the device uses to signal interrupts
895
 * @irq_flags: The IRQF_ flags to use for the primary interrupt.
896 897 898
 * @irq_base: Allocate at specific IRQ number if irq_base > 0.
 * @chip: Configuration for the interrupt controller.
 * @data: Runtime data structure for the controller, allocated on success
899 900 901
 *
 * Returns 0 on success or an errno on failure.
 *
902
 * The &regmap_irq_chip_data will be automatically released when the device is
903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932
 * unbound.
 */
int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
			     int irq_flags, int irq_base,
			     const struct regmap_irq_chip *chip,
			     struct regmap_irq_chip_data **data)
{
	struct regmap_irq_chip_data **ptr, *d;
	int ret;

	ptr = devres_alloc(devm_regmap_irq_chip_release, sizeof(*ptr),
			   GFP_KERNEL);
	if (!ptr)
		return -ENOMEM;

	ret = regmap_add_irq_chip(map, irq, irq_flags, irq_base,
				  chip, &d);
	if (ret < 0) {
		devres_free(ptr);
		return ret;
	}

	*ptr = d;
	devres_add(dev, ptr);
	*data = d;
	return 0;
}
EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip);

/**
933
 * devm_regmap_del_irq_chip() - Resource managed regmap_del_irq_chip()
934 935
 *
 * @dev: Device for which which resource was allocated.
936 937 938 939
 * @irq: Primary IRQ for the device.
 * @data: &regmap_irq_chip_data allocated by regmap_add_irq_chip().
 *
 * A resource managed version of regmap_del_irq_chip().
940 941 942 943 944 945 946 947 948 949 950 951 952 953 954
 */
void devm_regmap_del_irq_chip(struct device *dev, int irq,
			      struct regmap_irq_chip_data *data)
{
	int rc;

	WARN_ON(irq != data->irq);
	rc = devres_release(dev, devm_regmap_irq_chip_release,
			    devm_regmap_irq_chip_match, data);

	if (rc != 0)
		WARN_ON(rc);
}
EXPORT_SYMBOL_GPL(devm_regmap_del_irq_chip);

955
/**
956
 * regmap_irq_chip_get_base() - Retrieve interrupt base for a regmap IRQ chip
957
 *
958
 * @data: regmap irq controller to operate on.
959
 *
960
 * Useful for drivers to request their own IRQs.
961 962 963
 */
int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
{
964
	WARN_ON(!data->irq_base);
965 966 967
	return data->irq_base;
}
EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
968 969

/**
970
 * regmap_irq_get_virq() - Map an interrupt on a chip to a virtual IRQ
971
 *
972 973
 * @data: regmap irq controller to operate on.
 * @irq: index of the interrupt requested in the chip IRQs.
974
 *
975
 * Useful for drivers to request their own IRQs.
976 977 978
 */
int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
{
979 980 981 982
	/* Handle holes in the IRQ list */
	if (!data->chip->irqs[irq].mask)
		return -EINVAL;

983 984 985
	return irq_create_mapping(data->domain, irq);
}
EXPORT_SYMBOL_GPL(regmap_irq_get_virq);
986 987

/**
988 989 990
 * regmap_irq_get_domain() - Retrieve the irq_domain for the chip
 *
 * @data: regmap_irq controller to operate on.
991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004
 *
 * Useful for drivers to request their own IRQs and for integration
 * with subsystems.  For ease of integration NULL is accepted as a
 * domain, allowing devices to just call this even if no domain is
 * allocated.
 */
struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data)
{
	if (data)
		return data->domain;
	else
		return NULL;
}
EXPORT_SYMBOL_GPL(regmap_irq_get_domain);