regmap-irq.c 27.7 KB
Newer Older
1 2 3 4 5 6 7
// SPDX-License-Identifier: GPL-2.0
//
// regmap based irq_chip
//
// Copyright 2011 Wolfson Microelectronics plc
//
// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
8

9
#include <linux/device.h>
10
#include <linux/export.h>
11
#include <linux/interrupt.h>
12
#include <linux/irq.h>
13
#include <linux/irqdomain.h>
14
#include <linux/pm_runtime.h>
15
#include <linux/regmap.h>
16 17 18 19 20 21
#include <linux/slab.h>

#include "internal.h"

struct regmap_irq_chip_data {
	struct mutex lock;
22
	struct irq_chip irq_chip;
23 24

	struct regmap *map;
M
Mark Brown 已提交
25
	const struct regmap_irq_chip *chip;
26 27

	int irq_base;
28
	struct irq_domain *domain;
29

30 31 32
	int irq;
	int wake_count;

33
	void *status_reg_buf;
34
	unsigned int *main_status_buf;
35 36 37
	unsigned int *status_buf;
	unsigned int *mask_buf;
	unsigned int *mask_buf_def;
38
	unsigned int *wake_buf;
39 40
	unsigned int *type_buf;
	unsigned int *type_buf_def;
41 42

	unsigned int irq_reg_stride;
43
	unsigned int type_reg_stride;
44 45

	bool clear_status:1;
46 47 48 49 50 51
};

static inline const
struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
				     int irq)
{
52
	return &data->chip->irqs[irq];
53 54 55 56 57 58 59 60 61
}

static void regmap_irq_lock(struct irq_data *data)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);

	mutex_lock(&d->lock);
}

62 63 64 65 66 67 68 69 70 71
static int regmap_irq_update_bits(struct regmap_irq_chip_data *d,
				  unsigned int reg, unsigned int mask,
				  unsigned int val)
{
	if (d->chip->mask_writeonly)
		return regmap_write_bits(d->map, reg, mask, val);
	else
		return regmap_update_bits(d->map, reg, mask, val);
}

72 73 74
static void regmap_irq_sync_unlock(struct irq_data *data)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
75
	struct regmap *map = d->map;
76
	int i, ret;
77
	u32 reg;
78
	u32 unmask_offset;
79
	u32 val;
80

81 82 83 84 85 86 87
	if (d->chip->runtime_pm) {
		ret = pm_runtime_get_sync(map->dev);
		if (ret < 0)
			dev_err(map->dev, "IRQ sync failed to resume: %d\n",
				ret);
	}

88 89 90 91 92 93 94 95 96 97 98 99 100 101
	if (d->clear_status) {
		for (i = 0; i < d->chip->num_regs; i++) {
			reg = d->chip->status_base +
				(i * map->reg_stride * d->irq_reg_stride);

			ret = regmap_read(map, reg, &val);
			if (ret)
				dev_err(d->map->dev,
					"Failed to clear the interrupt status bits\n");
		}

		d->clear_status = false;
	}

102 103 104 105 106 107
	/*
	 * If there's been a change in the mask write it back to the
	 * hardware.  We rely on the use of the regmap core cache to
	 * suppress pointless writes.
	 */
	for (i = 0; i < d->chip->num_regs; i++) {
108 109 110
		if (!d->chip->mask_base)
			continue;

111 112
		reg = d->chip->mask_base +
			(i * map->reg_stride * d->irq_reg_stride);
113
		if (d->chip->mask_invert) {
114
			ret = regmap_irq_update_bits(d, reg,
115
					 d->mask_buf_def[i], ~d->mask_buf[i]);
116 117
		} else if (d->chip->unmask_base) {
			/* set mask with mask_base register */
118
			ret = regmap_irq_update_bits(d, reg,
119 120 121 122 123 124 125 126
					d->mask_buf_def[i], ~d->mask_buf[i]);
			if (ret < 0)
				dev_err(d->map->dev,
					"Failed to sync unmasks in %x\n",
					reg);
			unmask_offset = d->chip->unmask_base -
							d->chip->mask_base;
			/* clear mask with unmask_base register */
127
			ret = regmap_irq_update_bits(d,
128 129 130 131
					reg + unmask_offset,
					d->mask_buf_def[i],
					d->mask_buf[i]);
		} else {
132
			ret = regmap_irq_update_bits(d, reg,
133
					 d->mask_buf_def[i], d->mask_buf[i]);
134
		}
135 136
		if (ret != 0)
			dev_err(d->map->dev, "Failed to sync masks in %x\n",
137
				reg);
138 139 140 141

		reg = d->chip->wake_base +
			(i * map->reg_stride * d->irq_reg_stride);
		if (d->wake_buf) {
142
			if (d->chip->wake_invert)
143
				ret = regmap_irq_update_bits(d, reg,
144 145 146
							 d->mask_buf_def[i],
							 ~d->wake_buf[i]);
			else
147
				ret = regmap_irq_update_bits(d, reg,
148 149
							 d->mask_buf_def[i],
							 d->wake_buf[i]);
150 151 152 153 154
			if (ret != 0)
				dev_err(d->map->dev,
					"Failed to sync wakes in %x: %d\n",
					reg, ret);
		}
155 156 157 158

		if (!d->chip->init_ack_masked)
			continue;
		/*
D
dashsriram 已提交
159
		 * Ack all the masked interrupts unconditionally,
160 161 162
		 * OR if there is masked interrupt which hasn't been Acked,
		 * it'll be ignored in irq handler, then may introduce irq storm
		 */
163
		if (d->mask_buf[i] && (d->chip->ack_base || d->chip->use_ack)) {
164 165
			reg = d->chip->ack_base +
				(i * map->reg_stride * d->irq_reg_stride);
166 167 168 169 170
			/* some chips ack by write 0 */
			if (d->chip->ack_invert)
				ret = regmap_write(map, reg, ~d->mask_buf[i]);
			else
				ret = regmap_write(map, reg, d->mask_buf[i]);
171 172 173 174 175 176 177 178
			if (d->chip->clear_ack) {
				if (d->chip->ack_invert && !ret)
					ret = regmap_write(map, reg,
							   d->mask_buf[i]);
				else if (!ret)
					ret = regmap_write(map, reg,
							   ~d->mask_buf[i]);
			}
179 180 181 182
			if (ret != 0)
				dev_err(d->map->dev, "Failed to ack 0x%x: %d\n",
					reg, ret);
		}
183 184
	}

185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201
	/* Don't update the type bits if we're using mask bits for irq type. */
	if (!d->chip->type_in_mask) {
		for (i = 0; i < d->chip->num_type_reg; i++) {
			if (!d->type_buf_def[i])
				continue;
			reg = d->chip->type_base +
				(i * map->reg_stride * d->type_reg_stride);
			if (d->chip->type_invert)
				ret = regmap_irq_update_bits(d, reg,
					d->type_buf_def[i], ~d->type_buf[i]);
			else
				ret = regmap_irq_update_bits(d, reg,
					d->type_buf_def[i], d->type_buf[i]);
			if (ret != 0)
				dev_err(d->map->dev, "Failed to sync type in %x\n",
					reg);
		}
202 203
	}

204 205 206
	if (d->chip->runtime_pm)
		pm_runtime_put(map->dev);

207 208 209 210 211 212 213 214 215 216
	/* If we've changed our wakeup count propagate it to the parent */
	if (d->wake_count < 0)
		for (i = d->wake_count; i < 0; i++)
			irq_set_irq_wake(d->irq, 0);
	else if (d->wake_count > 0)
		for (i = 0; i < d->wake_count; i++)
			irq_set_irq_wake(d->irq, 1);

	d->wake_count = 0;

217 218 219 220 221 222
	mutex_unlock(&d->lock);
}

static void regmap_irq_enable(struct irq_data *data)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
223
	struct regmap *map = d->map;
224
	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
225 226
	unsigned int mask, type;

227
	type = irq_data->type.type_falling_val | irq_data->type.type_rising_val;
228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243

	/*
	 * The type_in_mask flag means that the underlying hardware uses
	 * separate mask bits for rising and falling edge interrupts, but
	 * we want to make them into a single virtual interrupt with
	 * configurable edge.
	 *
	 * If the interrupt we're enabling defines the falling or rising
	 * masks then instead of using the regular mask bits for this
	 * interrupt, use the value previously written to the type buffer
	 * at the corresponding offset in regmap_irq_set_type().
	 */
	if (d->chip->type_in_mask && type)
		mask = d->type_buf[irq_data->reg_offset / map->reg_stride];
	else
		mask = irq_data->mask;
244

245 246 247
	if (d->chip->clear_on_unmask)
		d->clear_status = true;

248
	d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~mask;
249 250 251 252 253
}

static void regmap_irq_disable(struct irq_data *data)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
254
	struct regmap *map = d->map;
255
	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
256

257
	d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask;
258 259
}

260 261 262 263 264
static int regmap_irq_set_type(struct irq_data *data, unsigned int type)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
	struct regmap *map = d->map;
	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
265 266
	int reg;
	const struct regmap_irq_type *t = &irq_data->type;
267

268
	if ((t->types_supported & type) != type)
269
		return 0;
270

271 272 273 274 275 276 277 278 279
	reg = t->type_reg_offset / map->reg_stride;

	if (t->type_reg_mask)
		d->type_buf[reg] &= ~t->type_reg_mask;
	else
		d->type_buf[reg] &= ~(t->type_falling_val |
				      t->type_rising_val |
				      t->type_level_low_val |
				      t->type_level_high_val);
280 281
	switch (type) {
	case IRQ_TYPE_EDGE_FALLING:
282
		d->type_buf[reg] |= t->type_falling_val;
283 284 285
		break;

	case IRQ_TYPE_EDGE_RISING:
286
		d->type_buf[reg] |= t->type_rising_val;
287 288 289
		break;

	case IRQ_TYPE_EDGE_BOTH:
290 291
		d->type_buf[reg] |= (t->type_falling_val |
					t->type_rising_val);
292 293
		break;

294 295 296 297 298 299 300
	case IRQ_TYPE_LEVEL_HIGH:
		d->type_buf[reg] |= t->type_level_high_val;
		break;

	case IRQ_TYPE_LEVEL_LOW:
		d->type_buf[reg] |= t->type_level_low_val;
		break;
301 302 303 304 305 306
	default:
		return -EINVAL;
	}
	return 0;
}

307 308 309 310 311 312 313
static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
	struct regmap *map = d->map;
	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);

	if (on) {
314 315 316
		if (d->wake_buf)
			d->wake_buf[irq_data->reg_offset / map->reg_stride]
				&= ~irq_data->mask;
317 318
		d->wake_count++;
	} else {
319 320 321
		if (d->wake_buf)
			d->wake_buf[irq_data->reg_offset / map->reg_stride]
				|= irq_data->mask;
322 323 324 325 326 327
		d->wake_count--;
	}

	return 0;
}

328
static const struct irq_chip regmap_irq_chip = {
329 330 331 332
	.irq_bus_lock		= regmap_irq_lock,
	.irq_bus_sync_unlock	= regmap_irq_sync_unlock,
	.irq_disable		= regmap_irq_disable,
	.irq_enable		= regmap_irq_enable,
333
	.irq_set_type		= regmap_irq_set_type,
334
	.irq_set_wake		= regmap_irq_set_wake,
335 336
};

337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363
static inline int read_sub_irq_data(struct regmap_irq_chip_data *data,
					   unsigned int b)
{
	const struct regmap_irq_chip *chip = data->chip;
	struct regmap *map = data->map;
	struct regmap_irq_sub_irq_map *subreg;
	int i, ret = 0;

	if (!chip->sub_reg_offsets) {
		/* Assume linear mapping */
		ret = regmap_read(map, chip->status_base +
				  (b * map->reg_stride * data->irq_reg_stride),
				   &data->status_buf[b]);
	} else {
		subreg = &chip->sub_reg_offsets[b];
		for (i = 0; i < subreg->num_regs; i++) {
			unsigned int offset = subreg->offset[i];

			ret = regmap_read(map, chip->status_base + offset,
					  &data->status_buf[offset]);
			if (ret)
				break;
		}
	}
	return ret;
}

364 365 366
static irqreturn_t regmap_irq_thread(int irq, void *d)
{
	struct regmap_irq_chip_data *data = d;
M
Mark Brown 已提交
367
	const struct regmap_irq_chip *chip = data->chip;
368 369
	struct regmap *map = data->map;
	int ret, i;
370
	bool handled = false;
371
	u32 reg;
372

373 374 375
	if (chip->handle_pre_irq)
		chip->handle_pre_irq(chip->irq_drv_data);

376 377 378 379 380
	if (chip->runtime_pm) {
		ret = pm_runtime_get_sync(map->dev);
		if (ret < 0) {
			dev_err(map->dev, "IRQ thread failed to resume: %d\n",
				ret);
381
			goto exit;
382 383 384
		}
	}

385
	/*
386 387 388
	 * Read only registers with active IRQs if the chip has 'main status
	 * register'. Else read in the statuses, using a single bulk read if
	 * possible in order to reduce the I/O overheads.
389
	 */
390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442

	if (chip->num_main_regs) {
		unsigned int max_main_bits;
		unsigned long size;

		size = chip->num_regs * sizeof(unsigned int);

		max_main_bits = (chip->num_main_status_bits) ?
				 chip->num_main_status_bits : chip->num_regs;
		/* Clear the status buf as we don't read all status regs */
		memset(data->status_buf, 0, size);

		/* We could support bulk read for main status registers
		 * but I don't expect to see devices with really many main
		 * status registers so let's only support single reads for the
		 * sake of simplicity. and add bulk reads only if needed
		 */
		for (i = 0; i < chip->num_main_regs; i++) {
			ret = regmap_read(map, chip->main_status +
				  (i * map->reg_stride
				   * data->irq_reg_stride),
				  &data->main_status_buf[i]);
			if (ret) {
				dev_err(map->dev,
					"Failed to read IRQ status %d\n",
					ret);
				goto exit;
			}
		}

		/* Read sub registers with active IRQs */
		for (i = 0; i < chip->num_main_regs; i++) {
			unsigned int b;
			const unsigned long mreg = data->main_status_buf[i];

			for_each_set_bit(b, &mreg, map->format.val_bytes * 8) {
				if (i * map->format.val_bytes * 8 + b >
				    max_main_bits)
					break;
				ret = read_sub_irq_data(data, b);

				if (ret != 0) {
					dev_err(map->dev,
						"Failed to read IRQ status %d\n",
						ret);
					goto exit;
				}
			}

		}
	} else if (!map->use_single_read && map->reg_stride == 1 &&
		   data->irq_reg_stride == 1) {

443 444 445
		u8 *buf8 = data->status_reg_buf;
		u16 *buf16 = data->status_reg_buf;
		u32 *buf32 = data->status_reg_buf;
446

447 448 449 450 451
		BUG_ON(!data->status_reg_buf);

		ret = regmap_bulk_read(map, chip->status_base,
				       data->status_reg_buf,
				       chip->num_regs);
452 453
		if (ret != 0) {
			dev_err(map->dev, "Failed to read IRQ status: %d\n",
454
				ret);
455
			goto exit;
456
		}
457 458 459 460 461 462 463 464 465 466 467 468 469 470

		for (i = 0; i < data->chip->num_regs; i++) {
			switch (map->format.val_bytes) {
			case 1:
				data->status_buf[i] = buf8[i];
				break;
			case 2:
				data->status_buf[i] = buf16[i];
				break;
			case 4:
				data->status_buf[i] = buf32[i];
				break;
			default:
				BUG();
471
				goto exit;
472 473 474 475 476 477 478 479 480 481 482 483 484 485
			}
		}

	} else {
		for (i = 0; i < data->chip->num_regs; i++) {
			ret = regmap_read(map, chip->status_base +
					  (i * map->reg_stride
					   * data->irq_reg_stride),
					  &data->status_buf[i]);

			if (ret != 0) {
				dev_err(map->dev,
					"Failed to read IRQ status: %d\n",
					ret);
486
				goto exit;
487 488
			}
		}
489
	}
490

491 492 493 494 495 496 497 498
	/*
	 * Ignore masked IRQs and ack if we need to; we ack early so
	 * there is no race between handling and acknowleding the
	 * interrupt.  We assume that typically few of the interrupts
	 * will fire simultaneously so don't worry about overhead from
	 * doing a write per register.
	 */
	for (i = 0; i < data->chip->num_regs; i++) {
499 500
		data->status_buf[i] &= ~data->mask_buf[i];

501
		if (data->status_buf[i] && (chip->ack_base || chip->use_ack)) {
502 503
			reg = chip->ack_base +
				(i * map->reg_stride * data->irq_reg_stride);
504 505 506 507 508 509 510 511 512 513 514 515 516 517
			if (chip->ack_invert)
				ret = regmap_write(map, reg,
						~data->status_buf[i]);
			else
				ret = regmap_write(map, reg,
						data->status_buf[i]);
			if (chip->clear_ack) {
				if (chip->ack_invert && !ret)
					ret = regmap_write(map, reg,
							data->status_buf[i]);
				else if (!ret)
					ret = regmap_write(map, reg,
							~data->status_buf[i]);
			}
518 519
			if (ret != 0)
				dev_err(map->dev, "Failed to ack 0x%x: %d\n",
520
					reg, ret);
521 522 523 524
		}
	}

	for (i = 0; i < chip->num_irqs; i++) {
525 526
		if (data->status_buf[chip->irqs[i].reg_offset /
				     map->reg_stride] & chip->irqs[i].mask) {
527
			handle_nested_irq(irq_find_mapping(data->domain, i));
528
			handled = true;
529 530 531
		}
	}

532
exit:
533 534 535
	if (chip->runtime_pm)
		pm_runtime_put(map->dev);

536 537 538
	if (chip->handle_post_irq)
		chip->handle_post_irq(chip->irq_drv_data);

539 540 541 542
	if (handled)
		return IRQ_HANDLED;
	else
		return IRQ_NONE;
543 544
}

545 546 547 548 549 550
static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
			  irq_hw_number_t hw)
{
	struct regmap_irq_chip_data *data = h->host_data;

	irq_set_chip_data(virq, data);
551
	irq_set_chip(virq, &data->irq_chip);
552
	irq_set_nested_thread(virq, 1);
553
	irq_set_parent(virq, data->irq);
554 555 556 557 558
	irq_set_noprobe(virq);

	return 0;
}

559
static const struct irq_domain_ops regmap_domain_ops = {
560
	.map	= regmap_irq_map,
561
	.xlate	= irq_domain_xlate_onetwocell,
562 563
};

564
/**
565
 * regmap_add_irq_chip_fwnode() - Use standard regmap IRQ controller handling
566
 *
567
 * @fwnode: The firmware node where the IRQ domain should be added to.
568 569 570 571 572 573
 * @map: The regmap for the device.
 * @irq: The IRQ the device uses to signal interrupts.
 * @irq_flags: The IRQF_ flags to use for the primary interrupt.
 * @irq_base: Allocate at specific IRQ number if irq_base > 0.
 * @chip: Configuration for the interrupt controller.
 * @data: Runtime data structure for the controller, allocated on success.
574 575 576 577 578 579 580
 *
 * Returns 0 on success or an errno on failure.
 *
 * In order for this to be efficient the chip really should use a
 * register cache.  The chip driver is responsible for restoring the
 * register values used by the IRQ controller over suspend and resume.
 */
581 582 583 584 585
int regmap_add_irq_chip_fwnode(struct fwnode_handle *fwnode,
			       struct regmap *map, int irq,
			       int irq_flags, int irq_base,
			       const struct regmap_irq_chip *chip,
			       struct regmap_irq_chip_data **data)
586 587
{
	struct regmap_irq_chip_data *d;
588
	int i;
589
	int ret = -ENOMEM;
590
	int num_type_reg;
591
	u32 reg;
592
	u32 unmask_offset;
593

594 595 596
	if (chip->num_regs <= 0)
		return -EINVAL;

597 598 599
	if (chip->clear_on_unmask && (chip->ack_base || chip->use_ack))
		return -EINVAL;

600 601 602 603 604 605 606 607
	for (i = 0; i < chip->num_irqs; i++) {
		if (chip->irqs[i].reg_offset % map->reg_stride)
			return -EINVAL;
		if (chip->irqs[i].reg_offset / map->reg_stride >=
		    chip->num_regs)
			return -EINVAL;
	}

608 609 610 611 612 613 614
	if (irq_base) {
		irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
		if (irq_base < 0) {
			dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
				 irq_base);
			return irq_base;
		}
615 616 617 618 619 620
	}

	d = kzalloc(sizeof(*d), GFP_KERNEL);
	if (!d)
		return -ENOMEM;

621 622 623 624 625 626 627 628 629
	if (chip->num_main_regs) {
		d->main_status_buf = kcalloc(chip->num_main_regs,
					     sizeof(unsigned int),
					     GFP_KERNEL);

		if (!d->main_status_buf)
			goto err_alloc;
	}

L
lixiubo 已提交
630
	d->status_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
631 632 633 634
				GFP_KERNEL);
	if (!d->status_buf)
		goto err_alloc;

L
lixiubo 已提交
635
	d->mask_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
636 637 638 639
			      GFP_KERNEL);
	if (!d->mask_buf)
		goto err_alloc;

L
lixiubo 已提交
640
	d->mask_buf_def = kcalloc(chip->num_regs, sizeof(unsigned int),
641 642 643 644
				  GFP_KERNEL);
	if (!d->mask_buf_def)
		goto err_alloc;

645
	if (chip->wake_base) {
L
lixiubo 已提交
646
		d->wake_buf = kcalloc(chip->num_regs, sizeof(unsigned int),
647 648 649 650 651
				      GFP_KERNEL);
		if (!d->wake_buf)
			goto err_alloc;
	}

652 653 654 655
	num_type_reg = chip->type_in_mask ? chip->num_regs : chip->num_type_reg;
	if (num_type_reg) {
		d->type_buf_def = kcalloc(num_type_reg,
					  sizeof(unsigned int), GFP_KERNEL);
656 657 658
		if (!d->type_buf_def)
			goto err_alloc;

659
		d->type_buf = kcalloc(num_type_reg, sizeof(unsigned int),
660 661 662 663 664
				      GFP_KERNEL);
		if (!d->type_buf)
			goto err_alloc;
	}

665
	d->irq_chip = regmap_irq_chip;
666
	d->irq_chip.name = chip->name;
667
	d->irq = irq;
668 669 670
	d->map = map;
	d->chip = chip;
	d->irq_base = irq_base;
671 672 673 674 675 676

	if (chip->irq_reg_stride)
		d->irq_reg_stride = chip->irq_reg_stride;
	else
		d->irq_reg_stride = 1;

677 678 679 680 681
	if (chip->type_reg_stride)
		d->type_reg_stride = chip->type_reg_stride;
	else
		d->type_reg_stride = 1;

682
	if (!map->use_single_read && map->reg_stride == 1 &&
683
	    d->irq_reg_stride == 1) {
684 685 686
		d->status_reg_buf = kmalloc_array(chip->num_regs,
						  map->format.val_bytes,
						  GFP_KERNEL);
687 688 689 690
		if (!d->status_reg_buf)
			goto err_alloc;
	}

691 692 693
	mutex_init(&d->lock);

	for (i = 0; i < chip->num_irqs; i++)
694
		d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
695 696 697 698 699
			|= chip->irqs[i].mask;

	/* Mask all the interrupts by default */
	for (i = 0; i < chip->num_regs; i++) {
		d->mask_buf[i] = d->mask_buf_def[i];
700 701 702
		if (!chip->mask_base)
			continue;

703 704
		reg = chip->mask_base +
			(i * map->reg_stride * d->irq_reg_stride);
705
		if (chip->mask_invert)
706
			ret = regmap_irq_update_bits(d, reg,
707
					 d->mask_buf[i], ~d->mask_buf[i]);
708 709 710
		else if (d->chip->unmask_base) {
			unmask_offset = d->chip->unmask_base -
					d->chip->mask_base;
711
			ret = regmap_irq_update_bits(d,
712 713 714 715
					reg + unmask_offset,
					d->mask_buf[i],
					d->mask_buf[i]);
		} else
716
			ret = regmap_irq_update_bits(d, reg,
717
					 d->mask_buf[i], d->mask_buf[i]);
718 719
		if (ret != 0) {
			dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
720
				reg, ret);
721 722
			goto err_alloc;
		}
723 724 725 726 727 728 729 730 731 732 733 734 735 736

		if (!chip->init_ack_masked)
			continue;

		/* Ack masked but set interrupts */
		reg = chip->status_base +
			(i * map->reg_stride * d->irq_reg_stride);
		ret = regmap_read(map, reg, &d->status_buf[i]);
		if (ret != 0) {
			dev_err(map->dev, "Failed to read IRQ status: %d\n",
				ret);
			goto err_alloc;
		}

737
		if (d->status_buf[i] && (chip->ack_base || chip->use_ack)) {
738 739
			reg = chip->ack_base +
				(i * map->reg_stride * d->irq_reg_stride);
740 741 742 743 744
			if (chip->ack_invert)
				ret = regmap_write(map, reg,
					~(d->status_buf[i] & d->mask_buf[i]));
			else
				ret = regmap_write(map, reg,
745
					d->status_buf[i] & d->mask_buf[i]);
746 747 748 749 750 751 752 753 754 755
			if (chip->clear_ack) {
				if (chip->ack_invert && !ret)
					ret = regmap_write(map, reg,
						(d->status_buf[i] &
						 d->mask_buf[i]));
				else if (!ret)
					ret = regmap_write(map, reg,
						~(d->status_buf[i] &
						  d->mask_buf[i]));
			}
756 757 758 759 760 761
			if (ret != 0) {
				dev_err(map->dev, "Failed to ack 0x%x: %d\n",
					reg, ret);
				goto err_alloc;
			}
		}
762 763
	}

764 765 766 767 768 769
	/* Wake is disabled by default */
	if (d->wake_buf) {
		for (i = 0; i < chip->num_regs; i++) {
			d->wake_buf[i] = d->mask_buf_def[i];
			reg = chip->wake_base +
				(i * map->reg_stride * d->irq_reg_stride);
770 771

			if (chip->wake_invert)
772
				ret = regmap_irq_update_bits(d, reg,
773 774 775
							 d->mask_buf_def[i],
							 0);
			else
776
				ret = regmap_irq_update_bits(d, reg,
777 778
							 d->mask_buf_def[i],
							 d->wake_buf[i]);
779 780 781 782 783 784 785 786
			if (ret != 0) {
				dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
					reg, ret);
				goto err_alloc;
			}
		}
	}

787
	if (chip->num_type_reg && !chip->type_in_mask) {
788 789 790
		for (i = 0; i < chip->num_type_reg; ++i) {
			reg = chip->type_base +
				(i * map->reg_stride * d->type_reg_stride);
791 792 793 794 795 796 797 798

			ret = regmap_read(map, reg, &d->type_buf_def[i]);

			if (d->chip->type_invert)
				d->type_buf_def[i] = ~d->type_buf_def[i];

			if (ret) {
				dev_err(map->dev, "Failed to get type defaults at 0x%x: %d\n",
799 800 801 802 803 804
					reg, ret);
				goto err_alloc;
			}
		}
	}

805
	if (irq_base)
806 807
		d->domain = irq_domain_add_legacy(to_of_node(fwnode),
						  chip->num_irqs, irq_base,
808
						  0, &regmap_domain_ops, d);
809
	else
810 811
		d->domain = irq_domain_add_linear(to_of_node(fwnode),
						  chip->num_irqs,
812 813 814 815 816
						  &regmap_domain_ops, d);
	if (!d->domain) {
		dev_err(map->dev, "Failed to create IRQ domain\n");
		ret = -ENOMEM;
		goto err_alloc;
817 818
	}

819 820
	ret = request_threaded_irq(irq, NULL, regmap_irq_thread,
				   irq_flags | IRQF_ONESHOT,
821 822
				   chip->name, d);
	if (ret != 0) {
823 824
		dev_err(map->dev, "Failed to request IRQ %d for %s: %d\n",
			irq, chip->name, ret);
825
		goto err_domain;
826 827
	}

828 829
	*data = d;

830 831
	return 0;

832 833
err_domain:
	/* Should really dispose of the domain but... */
834
err_alloc:
835 836
	kfree(d->type_buf);
	kfree(d->type_buf_def);
837
	kfree(d->wake_buf);
838 839 840
	kfree(d->mask_buf_def);
	kfree(d->mask_buf);
	kfree(d->status_buf);
841
	kfree(d->status_reg_buf);
842 843 844
	kfree(d);
	return ret;
}
845
EXPORT_SYMBOL_GPL(regmap_add_irq_chip_fwnode);
846 847 848 849 850 851 852 853 854 855 856 857 858

/**
 * regmap_add_irq_chip() - Use standard regmap IRQ controller handling
 *
 * @map: The regmap for the device.
 * @irq: The IRQ the device uses to signal interrupts.
 * @irq_flags: The IRQF_ flags to use for the primary interrupt.
 * @irq_base: Allocate at specific IRQ number if irq_base > 0.
 * @chip: Configuration for the interrupt controller.
 * @data: Runtime data structure for the controller, allocated on success.
 *
 * Returns 0 on success or an errno on failure.
 *
859
 * This is the same as regmap_add_irq_chip_fwnode, except that the firmware
860 861 862 863 864 865
 * node of the regmap is used.
 */
int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
			int irq_base, const struct regmap_irq_chip *chip,
			struct regmap_irq_chip_data **data)
{
866 867
	return regmap_add_irq_chip_fwnode(dev_fwnode(map->dev), map, irq,
					  irq_flags, irq_base, chip, data);
868
}
869 870 871
EXPORT_SYMBOL_GPL(regmap_add_irq_chip);

/**
872
 * regmap_del_irq_chip() - Stop interrupt handling for a regmap IRQ chip
873 874
 *
 * @irq: Primary IRQ for the device
875
 * @d: &regmap_irq_chip_data allocated by regmap_add_irq_chip()
876
 *
877
 * This function also disposes of all mapped IRQs on the chip.
878 879 880
 */
void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
{
881 882 883
	unsigned int virq;
	int hwirq;

884 885 886 887
	if (!d)
		return;

	free_irq(irq, d);
888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903

	/* Dispose all virtual irq from irq domain before removing it */
	for (hwirq = 0; hwirq < d->chip->num_irqs; hwirq++) {
		/* Ignore hwirq if holes in the IRQ list */
		if (!d->chip->irqs[hwirq].mask)
			continue;

		/*
		 * Find the virtual irq of hwirq on chip and if it is
		 * there then dispose it
		 */
		virq = irq_find_mapping(d->domain, hwirq);
		if (virq)
			irq_dispose_mapping(virq);
	}

M
Mark Brown 已提交
904
	irq_domain_remove(d->domain);
905 906
	kfree(d->type_buf);
	kfree(d->type_buf_def);
907
	kfree(d->wake_buf);
908 909
	kfree(d->mask_buf_def);
	kfree(d->mask_buf);
910
	kfree(d->status_reg_buf);
911 912 913 914
	kfree(d->status_buf);
	kfree(d);
}
EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
915

916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935
static void devm_regmap_irq_chip_release(struct device *dev, void *res)
{
	struct regmap_irq_chip_data *d = *(struct regmap_irq_chip_data **)res;

	regmap_del_irq_chip(d->irq, d);
}

static int devm_regmap_irq_chip_match(struct device *dev, void *res, void *data)

{
	struct regmap_irq_chip_data **r = res;

	if (!r || !*r) {
		WARN_ON(!r || !*r);
		return 0;
	}
	return *r == data;
}

/**
936
 * devm_regmap_add_irq_chip_fwnode() - Resource managed regmap_add_irq_chip_fwnode()
937
 *
938
 * @dev: The device pointer on which irq_chip belongs to.
939
 * @fwnode: The firmware node where the IRQ domain should be added to.
940 941
 * @map: The regmap for the device.
 * @irq: The IRQ the device uses to signal interrupts
942
 * @irq_flags: The IRQF_ flags to use for the primary interrupt.
943 944 945
 * @irq_base: Allocate at specific IRQ number if irq_base > 0.
 * @chip: Configuration for the interrupt controller.
 * @data: Runtime data structure for the controller, allocated on success
946 947 948
 *
 * Returns 0 on success or an errno on failure.
 *
949
 * The &regmap_irq_chip_data will be automatically released when the device is
950 951
 * unbound.
 */
952 953 954 955 956 957
int devm_regmap_add_irq_chip_fwnode(struct device *dev,
				    struct fwnode_handle *fwnode,
				    struct regmap *map, int irq,
				    int irq_flags, int irq_base,
				    const struct regmap_irq_chip *chip,
				    struct regmap_irq_chip_data **data)
958 959 960 961 962 963 964 965 966
{
	struct regmap_irq_chip_data **ptr, *d;
	int ret;

	ptr = devres_alloc(devm_regmap_irq_chip_release, sizeof(*ptr),
			   GFP_KERNEL);
	if (!ptr)
		return -ENOMEM;

967 968
	ret = regmap_add_irq_chip_fwnode(fwnode, map, irq, irq_flags, irq_base,
					 chip, &d);
969 970 971 972 973 974 975 976 977 978
	if (ret < 0) {
		devres_free(ptr);
		return ret;
	}

	*ptr = d;
	devres_add(dev, ptr);
	*data = d;
	return 0;
}
979
EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip_fwnode);
980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001

/**
 * devm_regmap_add_irq_chip() - Resource manager regmap_add_irq_chip()
 *
 * @dev: The device pointer on which irq_chip belongs to.
 * @map: The regmap for the device.
 * @irq: The IRQ the device uses to signal interrupts
 * @irq_flags: The IRQF_ flags to use for the primary interrupt.
 * @irq_base: Allocate at specific IRQ number if irq_base > 0.
 * @chip: Configuration for the interrupt controller.
 * @data: Runtime data structure for the controller, allocated on success
 *
 * Returns 0 on success or an errno on failure.
 *
 * The &regmap_irq_chip_data will be automatically released when the device is
 * unbound.
 */
int devm_regmap_add_irq_chip(struct device *dev, struct regmap *map, int irq,
			     int irq_flags, int irq_base,
			     const struct regmap_irq_chip *chip,
			     struct regmap_irq_chip_data **data)
{
1002 1003 1004
	return devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(map->dev), map,
					       irq, irq_flags, irq_base, chip,
					       data);
1005
}
1006 1007 1008
EXPORT_SYMBOL_GPL(devm_regmap_add_irq_chip);

/**
1009
 * devm_regmap_del_irq_chip() - Resource managed regmap_del_irq_chip()
1010 1011
 *
 * @dev: Device for which which resource was allocated.
1012 1013 1014 1015
 * @irq: Primary IRQ for the device.
 * @data: &regmap_irq_chip_data allocated by regmap_add_irq_chip().
 *
 * A resource managed version of regmap_del_irq_chip().
1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030
 */
void devm_regmap_del_irq_chip(struct device *dev, int irq,
			      struct regmap_irq_chip_data *data)
{
	int rc;

	WARN_ON(irq != data->irq);
	rc = devres_release(dev, devm_regmap_irq_chip_release,
			    devm_regmap_irq_chip_match, data);

	if (rc != 0)
		WARN_ON(rc);
}
EXPORT_SYMBOL_GPL(devm_regmap_del_irq_chip);

1031
/**
1032
 * regmap_irq_chip_get_base() - Retrieve interrupt base for a regmap IRQ chip
1033
 *
1034
 * @data: regmap irq controller to operate on.
1035
 *
1036
 * Useful for drivers to request their own IRQs.
1037 1038 1039
 */
int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
{
1040
	WARN_ON(!data->irq_base);
1041 1042 1043
	return data->irq_base;
}
EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
1044 1045

/**
1046
 * regmap_irq_get_virq() - Map an interrupt on a chip to a virtual IRQ
1047
 *
1048 1049
 * @data: regmap irq controller to operate on.
 * @irq: index of the interrupt requested in the chip IRQs.
1050
 *
1051
 * Useful for drivers to request their own IRQs.
1052 1053 1054
 */
int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
{
1055 1056 1057 1058
	/* Handle holes in the IRQ list */
	if (!data->chip->irqs[irq].mask)
		return -EINVAL;

1059 1060 1061
	return irq_create_mapping(data->domain, irq);
}
EXPORT_SYMBOL_GPL(regmap_irq_get_virq);
1062 1063

/**
1064 1065 1066
 * regmap_irq_get_domain() - Retrieve the irq_domain for the chip
 *
 * @data: regmap_irq controller to operate on.
1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080
 *
 * Useful for drivers to request their own IRQs and for integration
 * with subsystems.  For ease of integration NULL is accepted as a
 * domain, allowing devices to just call this even if no domain is
 * allocated.
 */
struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data)
{
	if (data)
		return data->domain;
	else
		return NULL;
}
EXPORT_SYMBOL_GPL(regmap_irq_get_domain);