regmap-irq.c 11.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * regmap based irq_chip
 *
 * Copyright 2011 Wolfson Microelectronics plc
 *
 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/export.h>
14
#include <linux/device.h>
15 16 17
#include <linux/regmap.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
18
#include <linux/irqdomain.h>
19
#include <linux/pm_runtime.h>
20 21 22 23 24 25
#include <linux/slab.h>

#include "internal.h"

struct regmap_irq_chip_data {
	struct mutex lock;
26
	struct irq_chip irq_chip;
27 28

	struct regmap *map;
M
Mark Brown 已提交
29
	const struct regmap_irq_chip *chip;
30 31

	int irq_base;
32
	struct irq_domain *domain;
33

34 35 36
	int irq;
	int wake_count;

37 38 39
	unsigned int *status_buf;
	unsigned int *mask_buf;
	unsigned int *mask_buf_def;
40
	unsigned int *wake_buf;
41 42

	unsigned int irq_reg_stride;
43 44 45 46 47 48
};

static inline const
struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
				     int irq)
{
49
	return &data->chip->irqs[irq];
50 51 52 53 54 55 56 57 58 59 60 61
}

static void regmap_irq_lock(struct irq_data *data)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);

	mutex_lock(&d->lock);
}

static void regmap_irq_sync_unlock(struct irq_data *data)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
62
	struct regmap *map = d->map;
63
	int i, ret;
64
	u32 reg;
65

66 67 68 69 70 71 72
	if (d->chip->runtime_pm) {
		ret = pm_runtime_get_sync(map->dev);
		if (ret < 0)
			dev_err(map->dev, "IRQ sync failed to resume: %d\n",
				ret);
	}

73 74 75 76 77 78
	/*
	 * If there's been a change in the mask write it back to the
	 * hardware.  We rely on the use of the regmap core cache to
	 * suppress pointless writes.
	 */
	for (i = 0; i < d->chip->num_regs; i++) {
79 80
		reg = d->chip->mask_base +
			(i * map->reg_stride * d->irq_reg_stride);
81 82 83 84 85
		if (d->chip->mask_invert)
			ret = regmap_update_bits(d->map, reg,
					 d->mask_buf_def[i], ~d->mask_buf[i]);
		else
			ret = regmap_update_bits(d->map, reg,
86 87 88
					 d->mask_buf_def[i], d->mask_buf[i]);
		if (ret != 0)
			dev_err(d->map->dev, "Failed to sync masks in %x\n",
89
				reg);
90 91
	}

92 93 94
	if (d->chip->runtime_pm)
		pm_runtime_put(map->dev);

95 96 97 98 99 100 101 102 103 104
	/* If we've changed our wakeup count propagate it to the parent */
	if (d->wake_count < 0)
		for (i = d->wake_count; i < 0; i++)
			irq_set_irq_wake(d->irq, 0);
	else if (d->wake_count > 0)
		for (i = 0; i < d->wake_count; i++)
			irq_set_irq_wake(d->irq, 1);

	d->wake_count = 0;

105 106 107 108 109 110
	mutex_unlock(&d->lock);
}

static void regmap_irq_enable(struct irq_data *data)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
111
	struct regmap *map = d->map;
112
	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
113

114
	d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~irq_data->mask;
115 116 117 118 119
}

static void regmap_irq_disable(struct irq_data *data)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
120
	struct regmap *map = d->map;
121
	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);
122

123
	d->mask_buf[irq_data->reg_offset / map->reg_stride] |= irq_data->mask;
124 125
}

126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
static int regmap_irq_set_wake(struct irq_data *data, unsigned int on)
{
	struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
	struct regmap *map = d->map;
	const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq);

	if (!d->chip->wake_base)
		return -EINVAL;

	if (on) {
		d->wake_buf[irq_data->reg_offset / map->reg_stride]
			&= ~irq_data->mask;
		d->wake_count++;
	} else {
		d->wake_buf[irq_data->reg_offset / map->reg_stride]
			|= irq_data->mask;
		d->wake_count--;
	}

	return 0;
}

148
static const struct irq_chip regmap_irq_chip = {
149 150 151 152
	.irq_bus_lock		= regmap_irq_lock,
	.irq_bus_sync_unlock	= regmap_irq_sync_unlock,
	.irq_disable		= regmap_irq_disable,
	.irq_enable		= regmap_irq_enable,
153
	.irq_set_wake		= regmap_irq_set_wake,
154 155 156 157 158
};

static irqreturn_t regmap_irq_thread(int irq, void *d)
{
	struct regmap_irq_chip_data *data = d;
M
Mark Brown 已提交
159
	const struct regmap_irq_chip *chip = data->chip;
160 161
	struct regmap *map = data->map;
	int ret, i;
162
	bool handled = false;
163
	u32 reg;
164

165 166 167 168 169 170 171 172 173
	if (chip->runtime_pm) {
		ret = pm_runtime_get_sync(map->dev);
		if (ret < 0) {
			dev_err(map->dev, "IRQ thread failed to resume: %d\n",
				ret);
			return IRQ_NONE;
		}
	}

174 175 176 177 178 179 180 181
	/*
	 * Ignore masked IRQs and ack if we need to; we ack early so
	 * there is no race between handling and acknowleding the
	 * interrupt.  We assume that typically few of the interrupts
	 * will fire simultaneously so don't worry about overhead from
	 * doing a write per register.
	 */
	for (i = 0; i < data->chip->num_regs; i++) {
182
		ret = regmap_read(map, chip->status_base + (i * map->reg_stride
183 184 185 186 187 188
				   * data->irq_reg_stride),
				   &data->status_buf[i]);

		if (ret != 0) {
			dev_err(map->dev, "Failed to read IRQ status: %d\n",
					ret);
189 190
			if (chip->runtime_pm)
				pm_runtime_put(map->dev);
191 192 193 194 195 196
			return IRQ_NONE;
		}

		data->status_buf[i] &= ~data->mask_buf[i];

		if (data->status_buf[i] && chip->ack_base) {
197 198 199
			reg = chip->ack_base +
				(i * map->reg_stride * data->irq_reg_stride);
			ret = regmap_write(map, reg, data->status_buf[i]);
200 201
			if (ret != 0)
				dev_err(map->dev, "Failed to ack 0x%x: %d\n",
202
					reg, ret);
203 204 205 206
		}
	}

	for (i = 0; i < chip->num_irqs; i++) {
207 208
		if (data->status_buf[chip->irqs[i].reg_offset /
				     map->reg_stride] & chip->irqs[i].mask) {
209
			handle_nested_irq(irq_find_mapping(data->domain, i));
210
			handled = true;
211 212 213
		}
	}

214 215 216
	if (chip->runtime_pm)
		pm_runtime_put(map->dev);

217 218 219 220
	if (handled)
		return IRQ_HANDLED;
	else
		return IRQ_NONE;
221 222
}

223 224 225 226 227 228
static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
			  irq_hw_number_t hw)
{
	struct regmap_irq_chip_data *data = h->host_data;

	irq_set_chip_data(virq, data);
229
	irq_set_chip_and_handler(virq, &data->irq_chip, handle_edge_irq);
230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247
	irq_set_nested_thread(virq, 1);

	/* ARM needs us to explicitly flag the IRQ as valid
	 * and will set them noprobe when we do so. */
#ifdef CONFIG_ARM
	set_irq_flags(virq, IRQF_VALID);
#else
	irq_set_noprobe(virq);
#endif

	return 0;
}

static struct irq_domain_ops regmap_domain_ops = {
	.map	= regmap_irq_map,
	.xlate	= irq_domain_xlate_twocell,
};

248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263
/**
 * regmap_add_irq_chip(): Use standard regmap IRQ controller handling
 *
 * map:       The regmap for the device.
 * irq:       The IRQ the device uses to signal interrupts
 * irq_flags: The IRQF_ flags to use for the primary interrupt.
 * chip:      Configuration for the interrupt controller.
 * data:      Runtime data structure for the controller, allocated on success
 *
 * Returns 0 on success or an errno on failure.
 *
 * In order for this to be efficient the chip really should use a
 * register cache.  The chip driver is responsible for restoring the
 * register values used by the IRQ controller over suspend and resume.
 */
int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
M
Mark Brown 已提交
264
			int irq_base, const struct regmap_irq_chip *chip,
265 266 267
			struct regmap_irq_chip_data **data)
{
	struct regmap_irq_chip_data *d;
268
	int i;
269
	int ret = -ENOMEM;
270
	u32 reg;
271

272 273 274 275 276 277 278 279
	for (i = 0; i < chip->num_irqs; i++) {
		if (chip->irqs[i].reg_offset % map->reg_stride)
			return -EINVAL;
		if (chip->irqs[i].reg_offset / map->reg_stride >=
		    chip->num_regs)
			return -EINVAL;
	}

280 281 282 283 284 285 286
	if (irq_base) {
		irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
		if (irq_base < 0) {
			dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
				 irq_base);
			return irq_base;
		}
287 288 289 290 291 292
	}

	d = kzalloc(sizeof(*d), GFP_KERNEL);
	if (!d)
		return -ENOMEM;

293 294
	*data = d;

295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
	d->status_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
				GFP_KERNEL);
	if (!d->status_buf)
		goto err_alloc;

	d->mask_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
			      GFP_KERNEL);
	if (!d->mask_buf)
		goto err_alloc;

	d->mask_buf_def = kzalloc(sizeof(unsigned int) * chip->num_regs,
				  GFP_KERNEL);
	if (!d->mask_buf_def)
		goto err_alloc;

310 311 312 313 314 315 316
	if (chip->wake_base) {
		d->wake_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
				      GFP_KERNEL);
		if (!d->wake_buf)
			goto err_alloc;
	}

317
	d->irq_chip = regmap_irq_chip;
318
	d->irq_chip.name = chip->name;
319 320 321 322 323
	if (!chip->wake_base) {
		d->irq_chip.irq_set_wake = NULL;
		d->irq_chip.flags |= IRQCHIP_MASK_ON_SUSPEND |
				     IRQCHIP_SKIP_SET_WAKE;
	}
324
	d->irq = irq;
325 326 327
	d->map = map;
	d->chip = chip;
	d->irq_base = irq_base;
328 329 330 331 332 333

	if (chip->irq_reg_stride)
		d->irq_reg_stride = chip->irq_reg_stride;
	else
		d->irq_reg_stride = 1;

334 335 336
	mutex_init(&d->lock);

	for (i = 0; i < chip->num_irqs; i++)
337
		d->mask_buf_def[chip->irqs[i].reg_offset / map->reg_stride]
338 339 340 341 342
			|= chip->irqs[i].mask;

	/* Mask all the interrupts by default */
	for (i = 0; i < chip->num_regs; i++) {
		d->mask_buf[i] = d->mask_buf_def[i];
343 344
		reg = chip->mask_base +
			(i * map->reg_stride * d->irq_reg_stride);
345 346 347 348 349
		if (chip->mask_invert)
			ret = regmap_update_bits(map, reg,
					 d->mask_buf[i], ~d->mask_buf[i]);
		else
			ret = regmap_update_bits(map, reg,
350
					 d->mask_buf[i], d->mask_buf[i]);
351 352
		if (ret != 0) {
			dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
353
				reg, ret);
354 355 356 357
			goto err_alloc;
		}
	}

358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373
	/* Wake is disabled by default */
	if (d->wake_buf) {
		for (i = 0; i < chip->num_regs; i++) {
			d->wake_buf[i] = d->mask_buf_def[i];
			reg = chip->wake_base +
				(i * map->reg_stride * d->irq_reg_stride);
			ret = regmap_update_bits(map, reg, d->wake_buf[i],
						 d->wake_buf[i]);
			if (ret != 0) {
				dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
					reg, ret);
				goto err_alloc;
			}
		}
	}

374 375 376 377 378 379 380 381 382 383 384 385
	if (irq_base)
		d->domain = irq_domain_add_legacy(map->dev->of_node,
						  chip->num_irqs, irq_base, 0,
						  &regmap_domain_ops, d);
	else
		d->domain = irq_domain_add_linear(map->dev->of_node,
						  chip->num_irqs,
						  &regmap_domain_ops, d);
	if (!d->domain) {
		dev_err(map->dev, "Failed to create IRQ domain\n");
		ret = -ENOMEM;
		goto err_alloc;
386 387 388 389 390 391
	}

	ret = request_threaded_irq(irq, NULL, regmap_irq_thread, irq_flags,
				   chip->name, d);
	if (ret != 0) {
		dev_err(map->dev, "Failed to request IRQ %d: %d\n", irq, ret);
392
		goto err_domain;
393 394 395 396
	}

	return 0;

397 398
err_domain:
	/* Should really dispose of the domain but... */
399
err_alloc:
400
	kfree(d->wake_buf);
401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420
	kfree(d->mask_buf_def);
	kfree(d->mask_buf);
	kfree(d->status_buf);
	kfree(d);
	return ret;
}
EXPORT_SYMBOL_GPL(regmap_add_irq_chip);

/**
 * regmap_del_irq_chip(): Stop interrupt handling for a regmap IRQ chip
 *
 * @irq: Primary IRQ for the device
 * @d:   regmap_irq_chip_data allocated by regmap_add_irq_chip()
 */
void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
{
	if (!d)
		return;

	free_irq(irq, d);
421
	/* We should unmap the domain but... */
422
	kfree(d->wake_buf);
423 424 425 426 427 428
	kfree(d->mask_buf_def);
	kfree(d->mask_buf);
	kfree(d->status_buf);
	kfree(d);
}
EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
429 430 431 432 433 434 435 436 437 438

/**
 * regmap_irq_chip_get_base(): Retrieve interrupt base for a regmap IRQ chip
 *
 * Useful for drivers to request their own IRQs.
 *
 * @data: regmap_irq controller to operate on.
 */
int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
{
439
	WARN_ON(!data->irq_base);
440 441 442
	return data->irq_base;
}
EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
443 444 445 446 447 448 449 450 451 452 453

/**
 * regmap_irq_get_virq(): Map an interrupt on a chip to a virtual IRQ
 *
 * Useful for drivers to request their own IRQs.
 *
 * @data: regmap_irq controller to operate on.
 * @irq: index of the interrupt requested in the chip IRQs
 */
int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq)
{
454 455 456 457
	/* Handle holes in the IRQ list */
	if (!data->chip->irqs[irq].mask)
		return -EINVAL;

458 459 460
	return irq_create_mapping(data->domain, irq);
}
EXPORT_SYMBOL_GPL(regmap_irq_get_virq);