regmap-irq.c 24.8 KB
Newer Older
1 2 3 4 5 6 7
// SPDX-License-Identifier: GPL-2.0
//
// regmap based irq_chip
//
// Copyright 2011 Wolfson Microelectronics plc
//
// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
8

9
#include <linux/device.h>
10
#include <linux/export.h>
11
#include <linux/interrupt.h>
12
#include <linux/irq.h>
13
#include <linux/irqdomain.h>
14
#include <linux/pm_runtime.h>
15
#include <linux/regmap.h>
16 17 18 19 20 21
#include <linux/slab.h>

#include "internal.h"

struct regmap_irq_chip_data {
	struct mutex lock;
22
	struct irq_chip irq_chip;
23 24

	struct regmap *map;
M
Mark Brown 已提交
25
	const struct regmap_irq_chip *chip;
26 27

	int irq_base;
28
	struct irq_domain *domain;
29

30 31 32
	int irq;
	int wake_count;

33
	void *status_reg_buf;
34
	unsigned int *main_status_buf;
35 36 37
	unsigned int *status_buf;
	unsigned int *mask_buf;
	unsigned int *mask_buf_def;
38
	unsigned int *wake_buf;
39 40
	unsigned int *type_buf;
	unsigned int *type_buf_def;
41 42

	unsigned int irq_reg_stride;
43
	unsigned int type_reg_stride;
44 45

	bool clear_status:1;
46 47 48 49 50 51
};

static inline const
struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
				     int irq)
{
52
	return &data->chip->irqs[irq];
53 54 55 56 57 58 59 60 61
}

static void regmap_irq_lock(struct irq_data *data)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);

	mutex_lock(&d->lock);
}

62 63 64 65 66 67 68 69 70 71
static int regmap_irq_update_bits(struct regmap_irq_chip_data *d,
				  unsigned int reg, unsigned int mask,
				  unsigned int val)
{
	if (d->chip->mask_writeonly)
		return regmap_write_bits(d->map, reg, mask, val);
	else
		return regmap_update_bits(d->map, reg, mask, val);
}

72 73 74
static void regmap_irq_sync_unlock(struct irq_data *data)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
75
	struct regmap *map = d->map;
76
	int i, ret;
77
	u32 reg;
78
	u32 unmask_offset;
79
	u32 val;
80

81 82 83 84 85 86 87
	if (d->chip->runtime_pm) {
		ret = pm_runtime_get_sync(map->dev);
		if (ret < 0)
			dev_err(map->dev, "IRQ sync failed to resume: %d\n",
				ret);
	}

88 89 90 91 92 93 94 95 96 97 98 99 100 101
	if (d->clear_status) {
		for (i = 0; i < d->chip->num_regs; i++) {
			reg = d->chip->status_base +
				(i * map->reg_stride * d->irq_reg_stride);

			ret = regmap_read(map, reg, &val);
			if (ret)
				dev_err(d->map->dev,
					"Failed to clear the interrupt status bits\n");
		}

		d->clear_status = false;
	}

102 103 104 105 106 107
	/*
	 * If there's been a change in the mask write it back to the
	 * hardware.  We rely on the use of the regmap core cache to
	 * suppress pointless writes.
	 */
	for (i = 0; i < d->chip->num_regs; i++) {
108 109 110
		if (!d->chip->mask_base)
			continue;

111 112
		reg = d->chip->mask_base +
			(i * map->reg_stride * d->irq_reg_stride);
113
		if (d->chip->mask_invert) {
114
			ret = regmap_irq_update_bits(d, reg,
115
					 d->mask_buf_def[i], ~d->mask_buf[i]);
116 117
		} else if (d->chip->unmask_base) {
			/* set mask with mask_base register */
118
			ret = regmap_irq_update_bits(d, reg,
119 120 121 122 123 124 125 126
					d->mask_buf_def[i], ~d->mask_buf[i]);
			if (ret < 0)
				dev_err(d->map->dev,
					"Failed to sync unmasks in %x\n",
					reg);
			unmask_offset = d->chip->unmask_base -
							d->chip->mask_base;
			/* clear mask with unmask_base register */
127
			ret = regmap_irq_update_bits(d,
128 129 130 131
					reg + unmask_offset,
					d->mask_buf_def[i],
					d->mask_buf[i]);
		} else {
132
			ret = regmap_irq_update_bits(d, reg,
133
					 d->mask_buf_def[i], d->mask_buf[i]);
134
		}
135 136
		if (ret != 0)
			dev_err(d->map->dev, "Failed to sync masks in %x\n",
137
				reg);
138 139 140 141

		reg = d->chip->wake_base +
			(i * map->reg_stride * d->irq_reg_stride);
		if (d->wake_buf) {
142
			if (d->chip->wake_invert)
143
				ret = regmap_irq_update_bits(d, reg,
144 145 146
							 d->mask_buf_def[i],
							 ~d->wake_buf[i]);
			else
147
				ret = regmap_irq_update_bits(d, reg,
148 149
							 d->mask_buf_def[i],
							 d->wake_buf[i]);
150 151 152 153 154
			if (ret != 0)
				dev_err(d->map->dev,
					"Failed to sync wakes in %x: %d\n",
					reg, ret);
		}
155 156 157 158

		if (!d->chip->init_ack_masked)
			continue;
		/*
D
dashsriram 已提交
159
		 * Ack all the masked interrupts unconditionally,
160 161 162
		 * OR if there is masked interrupt which hasn't been Acked,
		 * it'll be ignored in irq handler, then may introduce irq storm
		 */
163
		if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) {
164 165
			reg = d->chip->ack_base +
				(i * map->reg_stride * d->irq_reg_stride);
166 167 168 169 170
			/* some chips ack by write 0 */
			if (d->chip->ack_invert)
				ret = regmap_write(map, reg, ~d->mask_buf[i]);
			else
				ret = regmap_write(map, reg, d->mask_buf[i]);
171 172 173 174
			if (ret != 0)
				dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
					reg, ret);
		}
175 176
	}

177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
	/* Don't update the type bits if we're using mask bits for irq type. */
	if (!d->chip->type_in_mask) {
		for (i = 0; i < d->chip->num_type_reg; i++) {
			if (!d->type_buf_def[i])
				continue;
			reg = d->chip->type_base +
				(i * map->reg_stride * d->type_reg_stride);
			if (d->chip->type_invert)
				ret = regmap_irq_update_bits(d, reg,
					d->type_buf_def[i], ~d->type_buf[i]);
			else
				ret = regmap_irq_update_bits(d, reg,
					d->type_buf_def[i], d->type_buf[i]);
			if (ret != 0)
				dev_err(d->map->dev, "Failed to sync type in %x\n",
					reg);
		}
194 195
	}

196 197 198
	if (d->chip->runtime_pm)
		pm_runtime_put(map->dev);

199 200 201 202 203 204 205 206 207 208
	/* If we've changed our wakeup count propagate it to the parent */
	if (d->wake_count < 0)
		for (i = d->wake_count; i < 0; i++)
			irq_set_irq_wake(d->irq, 0);
	else if (d->wake_count > 0)
		for (i = 0; i < d->wake_count; i++)
			irq_set_irq_wake(d->irq, 1);

	d->wake_count = 0;

209 210 211 212 213 214
	mutex_unlock(&d->lock);
}

static void regmap_irq_enable(struct irq_data *data)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
215
	struct regmap *map = d->map;
216
	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
217 218
	unsigned int mask, type;

219
	type = irq_data->type.type_falling_val | irq_data->type.type_rising_val;
220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235

	/*
	 * The type_in_mask flag means that the underlying hardware uses
	 * separate mask bits for rising and falling edge interrupts, but
	 * we want to make them into a single virtual interrupt with
	 * configurable edge.
	 *
	 * If the interrupt we're enabling defines the falling or rising
	 * masks then instead of using the regular mask bits for this
	 * interrupt, use the value previously written to the type buffer
	 * at the corresponding offset in regmap_irq_set_type().
	 */
	if (d->chip->type_in_mask && type)
		mask = d->type_buf[irq_data->reg_offset / map->reg_stride];
	else
		mask = irq_data->mask;
236

237 238 239
	if (d->chip->clear_on_unmask)
		d->clear_status = true;

240
	d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~mask;
241 242 243 244 245
}

static void regmap_irq_disable(struct irq_data *data)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
246
	struct regmap *map = d->map;
247
	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
248

249
	d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask;
250 251
}

252 253 254 255 256
static int regmap_irq_set_type(struct irq_data *data, unsigned int type)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
	struct regmap *map = d->map;
	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
257 258
	int reg;
	const struct regmap_irq_type *t = &irq_data->type;
259

260
	if ((t->types_supported & type) != type)
261
		return 0;
262

263 264 265 266 267 268 269 270 271
	reg = t->type_reg_offset / map->reg_stride;

	if (t->type_reg_mask)
		d->type_buf[reg] &= ~t->type_reg_mask;
	else
		d->type_buf[reg] &= ~(t->type_falling_val |
				      t->type_rising_val |
				      t->type_level_low_val |
				      t->type_level_high_val);
272 273
	switch (type) {
	case IRQ_TYPE_EDGE_FALLING:
274
		d->type_buf[reg] |= t->type_falling_val;
275 276 277
		break;

	case IRQ_TYPE_EDGE_RISING:
278
		d->type_buf[reg] |= t->type_rising_val;
279 280 281
		break;

	case IRQ_TYPE_EDGE_BOTH:
282 283
		d->type_buf[reg] |= (t->type_falling_val |
					t->type_rising_val);
284 285
		break;

286 287 288 289 290 291 292
	case IRQ_TYPE_LEVEL_HIGH:
		d->type_buf[reg] |= t->type_level_high_val;
		break;

	case IRQ_TYPE_LEVEL_LOW:
		d->type_buf[reg] |= t->type_level_low_val;
		break;
293 294 295 296 297 298
	default:
		return -EINVAL;
	}
	return 0;
}

299 300 301 302 303 304 305
static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
	struct regmap *map = d->map;
	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);

	if (on) {
306 307 308
		if (d->wake_buf)
			d->wake_buf[irq_data->reg_offset / map->reg_stride]
				&= ~irq_data->mask;
309 310
		d->wake_count++;
	} else {
311 312 313
		if (d->wake_buf)
			d->wake_buf[irq_data->reg_offset / map->reg_stride]
				|= irq_data->mask;
314 315 316 317 318 319
		d->wake_count--;
	}

	return 0;
}

320
static const struct irq_chip regmap_irq_chip = {
321 322 323 324
	.irq_bus_lock		= regmap_irq_lock,
	.irq_bus_sync_unlock	= regmap_irq_sync_unlock,
	.irq_disable		= regmap_irq_disable,
	.irq_enable		= regmap_irq_enable,
325
	.irq_set_type		= regmap_irq_set_type,
326
	.irq_set_wake		= regmap_irq_set_wake,
327 328
};

329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355
static inline int read_sub_irq_data(struct regmap_irq_chip_data *data,
					   unsigned int b)
{
	const struct regmap_irq_chip *chip = data->chip;
	struct regmap *map = data->map;
	struct regmap_irq_sub_irq_map *subreg;
	int i, ret = 0;

	if (!chip->sub_reg_offsets) {
		/* Assume linear mapping */
		ret = regmap_read(map, chip->status_base +
				  (b * map->reg_stride * data->irq_reg_stride),
				   &data->status_buf[b]);
	} else {
		subreg = &chip->sub_reg_offsets[b];
		for (i = 0; i < subreg->num_regs; i++) {
			unsigned int offset = subreg->offset[i];

			ret = regmap_read(map, chip->status_base + offset,
					  &data->status_buf[offset]);
			if (ret)
				break;
		}
	}
	return ret;
}

356 357 358
static irqreturn_t regmap_irq_thread(int irq, void *d)
{
	struct regmap_irq_chip_data *data = d;
M
Mark Brown 已提交
359
	const struct regmap_irq_chip *chip = data->chip;
360 361
	struct regmap *map = data->map;
	int ret, i;
362
	bool handled = false;
363
	u32 reg;
364

365 366 367
	if (chip->handle_pre_irq)
		chip->handle_pre_irq(chip->irq_drv_data);

368 369 370 371 372
	if (chip->runtime_pm) {
		ret = pm_runtime_get_sync(map->dev);
		if (ret < 0) {
			dev_err(map->dev, "IRQ thread failed to resume: %d\n",
				ret);
373
			pm_runtime_put(map->dev);
374
			goto exit;
375 376 377
		}
	}

378
	/*
379 380 381
	 * Read only registers with active IRQs if the chip has 'main status
	 * register'. Else read in the statuses, using a single bulk read if
	 * possible in order to reduce the I/O overheads.
382
	 */
383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437

	if (chip->num_main_regs) {
		unsigned int max_main_bits;
		unsigned long size;

		size = chip->num_regs * sizeof(unsigned int);

		max_main_bits = (chip->num_main_status_bits) ?
				 chip->num_main_status_bits : chip->num_regs;
		/* Clear the status buf as we don't read all status regs */
		memset(data->status_buf, 0, size);

		/* We could support bulk read for main status registers
		 * but I don't expect to see devices with really many main
		 * status registers so let's only support single reads for the
		 * sake of simplicity. and add bulk reads only if needed
		 */
		for (i = 0; i < chip->num_main_regs; i++) {
			ret = regmap_read(map, chip->main_status +
				  (i * map->reg_stride
				   * data->irq_reg_stride),
				  &data->main_status_buf[i]);
			if (ret) {
				dev_err(map->dev,
					"Failed to read IRQ status %d\n",
					ret);
				goto exit;
			}
		}

		/* Read sub registers with active IRQs */
		for (i = 0; i < chip->num_main_regs; i++) {
			unsigned int b;
			const unsigned long mreg = data->main_status_buf[i];

			for_each_set_bit(b, &mreg, map->format.val_bytes * 8) {
				if (i * map->format.val_bytes * 8 + b >
				    max_main_bits)
					break;
				ret = read_sub_irq_data(data, b);

				if (ret != 0) {
					dev_err(map->dev,
						"Failed to read IRQ status %d\n",
						ret);
					if (chip->runtime_pm)
						pm_runtime_put(map->dev);
					goto exit;
				}
			}

		}
	} else if (!map->use_single_read && map->reg_stride == 1 &&
		   data->irq_reg_stride == 1) {

438 439 440
		u8 *buf8 = data->status_reg_buf;
		u16 *buf16 = data->status_reg_buf;
		u32 *buf32 = data->status_reg_buf;
441

442 443 444 445 446
		BUG_ON(!data->status_reg_buf);

		ret = regmap_bulk_read(map, chip->status_base,
				       data->status_reg_buf,
				       chip->num_regs);
447 448
		if (ret != 0) {
			dev_err(map->dev, "Failed to read IRQ status: %d\n",
449
				ret);
450
			goto exit;
451
		}
452 453 454 455 456 457 458 459 460 461 462 463 464 465

		for (i = 0; i < data->chip->num_regs; i++) {
			switch (map->format.val_bytes) {
			case 1:
				data->status_buf[i] = buf8[i];
				break;
			case 2:
				data->status_buf[i] = buf16[i];
				break;
			case 4:
				data->status_buf[i] = buf32[i];
				break;
			default:
				BUG();
466
				goto exit;
467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482
			}
		}

	} else {
		for (i = 0; i < data->chip->num_regs; i++) {
			ret = regmap_read(map, chip->status_base +
					  (i * map->reg_stride
					   * data->irq_reg_stride),
					  &data->status_buf[i]);

			if (ret != 0) {
				dev_err(map->dev,
					"Failed to read IRQ status: %d\n",
					ret);
				if (chip->runtime_pm)
					pm_runtime_put(map->dev);
483
				goto exit;
484 485
			}
		}
486
	}
487

488 489 490 491 492 493 494 495
	/*
	 * Ignore masked IRQs and ack if we need to; we ack early so
	 * there is no race between handling and acknowleding the
	 * interrupt.  We assume that typically few of the interrupts
	 * will fire simultaneously so don't worry about overhead from
	 * doing a write per register.
	 */
	for (i = 0; i < data->chip->num_regs; i++) {
496 497
		data->status_buf[i] &= ~data->mask_buf[i];

498
		if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) {
499 500 501
			reg = chip->ack_base +
				(i * map->reg_stride * data->irq_reg_stride);
			ret = regmap_write(map, reg, data->status_buf[i]);
502 503
			if (ret != 0)
				dev_err(map->dev, "Failed to ack 0x%x: %d\n",
504
					reg, ret);
505 506 507 508
		}
	}

	for (i = 0; i < chip->num_irqs; i++) {
509 510
		if (data->status_buf[chip->irqs[i].reg_offset /
				     map->reg_stride] & chip->irqs[i].mask) {
511
			handle_nested_irq(irq_find_mapping(data->domain, i));
512
			handled = true;
513 514 515
		}
	}

516 517 518
	if (chip->runtime_pm)
		pm_runtime_put(map->dev);

519 520 521 522
exit:
	if (chip->handle_post_irq)
		chip->handle_post_irq(chip->irq_drv_data);

523 524 525 526
	if (handled)
		return IRQ_HANDLED;
	else
		return IRQ_NONE;
527 528
}

529 530 531 532 533 534
static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
			  irq_hw_number_t hw)
{
	struct regmap_irq_chip_data *data = h->host_data;

	irq_set_chip_data(virq, data);
535
	irq_set_chip(virq, &data->irq_chip);
536
	irq_set_nested_thread(virq, 1);
537
	irq_set_parent(virq, data->irq);
538 539 540 541 542
	irq_set_noprobe(virq);

	return 0;
}

543
static const struct irq_domain_ops regmap_domain_ops = {
544
	.map	= regmap_irq_map,
545
	.xlate	= irq_domain_xlate_onetwocell,
546 547
};

548
/**
549
 * regmap_add_irq_chip() - Use standard regmap IRQ controller handling
550
 *
551 552 553 554 555 556
 * @map: The regmap for the device.
 * @irq: The IRQ the device uses to signal interrupts.
 * @irq_flags: The IRQF_ flags to use for the primary interrupt.
 * @irq_base: Allocate at specific IRQ number if irq_base > 0.
 * @chip: Configuration for the interrupt controller.
 * @data: Runtime data structure for the controller, allocated on success.
557 558 559 560 561 562 563 564
 *
 * Returns 0 on success or an errno on failure.
 *
 * In order for this to be efficient the chip really should use a
 * register cache.  The chip driver is responsible for restoring the
 * register values used by the IRQ controller over suspend and resume.
 */
int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
M
Mark Brown 已提交
565
			int irq_base, const struct regmap_irq_chip *chip,
566 567 568
			struct regmap_irq_chip_data **data)
{
	struct regmap_irq_chip_data *d;
569
	int i;
570
	int ret = -ENOMEM;
571
	int num_type_reg;
572
	u32 reg;
573
	u32 unmask_offset;
574

575 576 577
	if (chip->num_regs <= 0)
		return -EINVAL;

578 579 580
	if (chip->clear_on_unmask && (chip->ack_base || chip->use_ack))
		return -EINVAL;

581 582 583 584 585 586 587 588
	for (i = 0; i < chip->num_irqs; i++) {
		if (chip->irqs[i].reg_offset % map->reg_stride)
			return -EINVAL;
		if (chip->irqs[i].reg_offset / map->reg_stride >=
		    chip->num_regs)
			return -EINVAL;
	}

589 590 591 592 593 594 595
	if (irq_base) {
		irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
		if (irq_base < 0) {
			dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
				 irq_base);
			return irq_base;
		}
596 597 598 599 600 601
	}

	d = kzalloc(sizeof(*d), GFP_KERNEL);
	if (!d)
		return -ENOMEM;

602 603 604 605 606 607 608 609 610
	if (chip->num_main_regs) {
		d->main_status_buf = kcalloc(chip->num_main_regs,
					     sizeof(unsigned int),
					     GFP_KERNEL);

		if (!d->main_status_buf)
			goto err_alloc;
	}

L
lixiubo 已提交
611
	d->status_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
612 613 614 615
				GFP_KERNEL);
	if (!d->status_buf)
		goto err_alloc;

L
lixiubo 已提交
616
	d->mask_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
617 618 619 620
			      GFP_KERNEL);
	if (!d->mask_buf)
		goto err_alloc;

L
lixiubo 已提交
621
	d->mask_buf_def = kcalloc(chip->num_regs, sizeof(unsigned int),
622 623 624 625
				  GFP_KERNEL);
	if (!d->mask_buf_def)
		goto err_alloc;

626
	if (chip->wake_base) {
L
lixiubo 已提交
627
		d->wake_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
628 629 630 631 632
				      GFP_KERNEL);
		if (!d->wake_buf)
			goto err_alloc;
	}

633 634 635 636
	num_type_reg = chip->type_in_mask ? chip->num_regs : chip->num_type_reg;
	if (num_type_reg) {
		d->type_buf_def = kcalloc(num_type_reg,
					  sizeof(unsigned int), GFP_KERNEL);
637 638 639
		if (!d->type_buf_def)
			goto err_alloc;

640
		d->type_buf = kcalloc(num_type_reg, sizeof(unsigned int),
641 642 643 644 645
				      GFP_KERNEL);
		if (!d->type_buf)
			goto err_alloc;
	}

646
	d->irq_chip = regmap_irq_chip;
647
	d->irq_chip.name = chip->name;
648
	d->irq = irq;
649 650 651
	d->map = map;
	d->chip = chip;
	d->irq_base = irq_base;
652 653 654 655 656 657

	if (chip->irq_reg_stride)
		d->irq_reg_stride = chip->irq_reg_stride;
	else
		d->irq_reg_stride = 1;

658 659 660 661 662
	if (chip->type_reg_stride)
		d->type_reg_stride = chip->type_reg_stride;
	else
		d->type_reg_stride = 1;

663
	if (!map->use_single_read && map->reg_stride == 1 &&
664
	    d->irq_reg_stride == 1) {
665 666 667
		d->status_reg_buf = kmalloc_array(chip->num_regs,
						  map->format.val_bytes,
						  GFP_KERNEL);
668 669 670 671
		if (!d->status_reg_buf)
			goto err_alloc;
	}

672 673 674
	mutex_init(&d->lock);

	for (i = 0; i < chip->num_irqs; i++)
675
		d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
676 677 678 679 680
			|= chip->irqs[i].mask;

	/* Mask all the interrupts by default */
	for (i = 0; i < chip->num_regs; i++) {
		d->mask_buf[i] = d->mask_buf_def[i];
681 682 683
		if (!chip->mask_base)
			continue;

684 685
		reg = chip->mask_base +
			(i * map->reg_stride * d->irq_reg_stride);
686
		if (chip->mask_invert)
687
			ret = regmap_irq_update_bits(d, reg,
688
					 d->mask_buf[i], ~d->mask_buf[i]);
689 690 691
		else if (d->chip->unmask_base) {
			unmask_offset = d->chip->unmask_base -
					d->chip->mask_base;
692
			ret = regmap_irq_update_bits(d,
693 694 695 696
					reg + unmask_offset,
					d->mask_buf[i],
					d->mask_buf[i]);
		} else
697
			ret = regmap_irq_update_bits(d, reg,
698
					 d->mask_buf[i], d->mask_buf[i]);
699 700
		if (ret != 0) {
			dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
701
				reg, ret);
702 703
			goto err_alloc;
		}
704 705 706 707 708 709 710 711 712 713 714 715 716 717

		if (!chip->init_ack_masked)
			continue;

		/* Ack masked but set interrupts */
		reg = chip->status_base +
			(i * map->reg_stride * d->irq_reg_stride);
		ret = regmap_read(map, reg, &d->status_buf[i]);
		if (ret != 0) {
			dev_err(map->dev, "Failed to read IRQ status: %d\n",
				ret);
			goto err_alloc;
		}

718
		if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {
719 720
			reg = chip->ack_base +
				(i * map->reg_stride * d->irq_reg_stride);
721 722 723 724 725
			if (chip->ack_invert)
				ret = regmap_write(map, reg,
					~(d->status_buf[i] & d->mask_buf[i]));
			else
				ret = regmap_write(map, reg,
726 727 728 729 730 731 732
					d->status_buf[i] & d->mask_buf[i]);
			if (ret != 0) {
				dev_err(map->dev, "Failed to ack 0x%x: %d\n",
					reg, ret);
				goto err_alloc;
			}
		}
733 734
	}

735 736 737 738 739 740
	/* Wake is disabled by default */
	if (d->wake_buf) {
		for (i = 0; i < chip->num_regs; i++) {
			d->wake_buf[i] = d->mask_buf_def[i];
			reg = chip->wake_base +
				(i * map->reg_stride * d->irq_reg_stride);
741 742

			if (chip->wake_invert)
743
				ret = regmap_irq_update_bits(d, reg,
744 745 746
							 d->mask_buf_def[i],
							 0);
			else
747
				ret = regmap_irq_update_bits(d, reg,
748 749
							 d->mask_buf_def[i],
							 d->wake_buf[i]);
750 751 752 753 754 755 756 757
			if (ret != 0) {
				dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
					reg, ret);
				goto err_alloc;
			}
		}
	}

758
	if (chip->num_type_reg && !chip->type_in_mask) {
759 760 761
		for (i = 0; i < chip->num_type_reg; ++i) {
			reg = chip->type_base +
				(i * map->reg_stride * d->type_reg_stride);
762 763 764 765 766 767 768 769

			ret = regmap_read(map, reg, &d->type_buf_def[i]);

			if (d->chip->type_invert)
				d->type_buf_def[i] = ~d->type_buf_def[i];

			if (ret) {
				dev_err(map->dev, "Failed to get type defaults at 0x%x: %d\n",
770 771 772 773 774 775
					reg, ret);
				goto err_alloc;
			}
		}
	}

776 777 778 779 780 781 782 783 784 785 786 787
	if (irq_base)
		d->domain = irq_domain_add_legacy(map->dev->of_node,
						  chip->num_irqs, irq_base, 0,
						  &regmap_domain_ops, d);
	else
		d->domain = irq_domain_add_linear(map->dev->of_node,
						  chip->num_irqs,
						  &regmap_domain_ops, d);
	if (!d->domain) {
		dev_err(map->dev, "Failed to create IRQ domain\n");
		ret = -ENOMEM;
		goto err_alloc;
788 789
	}

790 791
	ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
				   irq_flags | IRQF_ONESHOT,
792 793
				   chip->name, d);
	if (ret != 0) {
794 795
		dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
			irq, chip->name, ret);
796
		goto err_domain;
797 798
	}

799 800
	*data = d;

801 802
	return 0;

803 804
err_domain:
	/* Should really dispose of the domain but... */
805
err_alloc:
806 807
	kfree(d->type_buf);
	kfree(d->type_buf_def);
808
	kfree(d->wake_buf);
809 810 811
	kfree(d->mask_buf_def);
	kfree(d->mask_buf);
	kfree(d->status_buf);
812
	kfree(d->status_reg_buf);
813 814 815 816 817 818
	kfree(d);
	return ret;
}
EXPORT_SYMBOL_GPL(regmap_add_irq_chip);

/**
819
 * regmap_del_irq_chip() - Stop interrupt handling for a regmap IRQ chip
820 821
 *
 * @irq: Primary IRQ for the device
822
 * @d: &regmap_irq_chip_data allocated by regmap_add_irq_chip()
823
 *
824
 * This function also disposes of all mapped IRQs on the chip.
825 826 827
 */
void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
{
828 829 830
	unsigned int virq;
	int hwirq;

831 832 833 834
	if (!d)
		return;

	free_irq(irq, d);
835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850

	/* Dispose all virtual irq from irq domain before removing it */
	for (hwirq = 0; hwirq < d->chip->num_irqs; hwirq++) {
		/* Ignore hwirq if holes in the IRQ list */
		if (!d->chip->irqs[hwirq].mask)
			continue;

		/*
		 * Find the virtual irq of hwirq on chip and if it is
		 * there then dispose it
		 */
		virq = irq_find_mapping(d->domain, hwirq);
		if (virq)
			irq_dispose_mapping(virq);
	}

M
Mark Brown 已提交
851
	irq_domain_remove(d->domain);
852 853
	kfree(d->type_buf);
	kfree(d->type_buf_def);
854
	kfree(d->wake_buf);
855 856
	kfree(d->mask_buf_def);
	kfree(d->mask_buf);
857
	kfree(d->status_reg_buf);
858 859 860 861
	kfree(d->status_buf);
	kfree(d);
}
EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
862

863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882
static void devm_regmap_irq_chip_release(struct device *dev, void *res)
{
	struct regmap_irq_chip_data *d = *(struct regmap_irq_chip_data **)res;

	regmap_del_irq_chip(d->irq, d);
}

static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data)

{
	struct regmap_irq_chip_data **r = res;

	if (!r || !*r) {
		WARN_ON(!r || !*r);
		return 0;
	}
	return *r == data;
}

/**
883
 * devm_regmap_add_irq_chip() - Resource manager regmap_add_irq_chip()
884
 *
885 886 887
 * @dev: The device pointer on which irq_chip belongs to.
 * @map: The regmap for the device.
 * @irq: The IRQ the device uses to signal interrupts
888
 * @irq_flags: The IRQF_ flags to use for the primary interrupt.
889 890 891
 * @irq_base: Allocate at specific IRQ number if irq_base > 0.
 * @chip: Configuration for the interrupt controller.
 * @data: Runtime data structure for the controller, allocated on success
892 893 894
 *
 * Returns 0 on success or an errno on failure.
 *
895
 * The &regmap_irq_chip_data will be automatically released when the device is
896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925
 * unbound.
 */
int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
			     int irq_flags, int irq_base,
			     const struct regmap_irq_chip *chip,
			     struct regmap_irq_chip_data **data)
{
	struct regmap_irq_chip_data **ptr, *d;
	int ret;

	ptr = devres_alloc(devm_regmap_irq_chip_release, sizeof(*ptr),
			   GFP_KERNEL);
	if (!ptr)
		return -ENOMEM;

	ret = regmap_add_irq_chip(map, irq, irq_flags, irq_base,
				  chip, &d);
	if (ret < 0) {
		devres_free(ptr);
		return ret;
	}

	*ptr = d;
	devres_add(dev, ptr);
	*data = d;
	return 0;
}
EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip);

/**
926
 * devm_regmap_del_irq_chip() - Resource managed regmap_del_irq_chip()
927 928
 *
 * @dev: Device for which which resource was allocated.
929 930 931 932
 * @irq: Primary IRQ for the device.
 * @data: &regmap_irq_chip_data allocated by regmap_add_irq_chip().
 *
 * A resource managed version of regmap_del_irq_chip().
933 934 935 936 937 938 939 940 941 942 943 944 945 946 947
 */
void devm_regmap_del_irq_chip(struct device *dev, int irq,
			      struct regmap_irq_chip_data *data)
{
	int rc;

	WARN_ON(irq != data->irq);
	rc = devres_release(dev, devm_regmap_irq_chip_release,
			    devm_regmap_irq_chip_match, data);

	if (rc != 0)
		WARN_ON(rc);
}
EXPORT_SYMBOL_GPL(devm_regmap_del_irq_chip);

948
/**
949
 * regmap_irq_chip_get_base() - Retrieve interrupt base for a regmap IRQ chip
950
 *
951
 * @data: regmap irq controller to operate on.
952
 *
953
 * Useful for drivers to request their own IRQs.
954 955 956
 */
int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
{
957
	WARN_ON(!data->irq_base);
958 959 960
	return data->irq_base;
}
EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
961 962

/**
963
 * regmap_irq_get_virq() - Map an interrupt on a chip to a virtual IRQ
964
 *
965 966
 * @data: regmap irq controller to operate on.
 * @irq: index of the interrupt requested in the chip IRQs.
967
 *
968
 * Useful for drivers to request their own IRQs.
969 970 971
 */
int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
{
972 973 974 975
	/* Handle holes in the IRQ list */
	if (!data->chip->irqs[irq].mask)
		return -EINVAL;

976 977 978
	return irq_create_mapping(data->domain, irq);
}
EXPORT_SYMBOL_GPL(regmap_irq_get_virq);
979 980

/**
981 982 983
 * regmap_irq_get_domain() - Retrieve the irq_domain for the chip
 *
 * @data: regmap_irq controller to operate on.
984 985 986 987 988 989 990 991 992 993 994 995 996 997
 *
 * Useful for drivers to request their own IRQs and for integration
 * with subsystems.  For ease of integration NULL is accepted as a
 * domain, allowing devices to just call this even if no domain is
 * allocated.
 */
struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data)
{
	if (data)
		return data->domain;
	else
		return NULL;
}
EXPORT_SYMBOL_GPL(regmap_irq_get_domain);