gpio-omap.c 40.1 KB
Newer Older
1 2 3
/*
 * Support functions for OMAP GPIO
 *
4
 * Copyright (C) 2003-2005 Nokia Corporation
5
 * Written by Juha Yrjölä <juha.yrjola@nokia.com>
6
 *
7 8 9
 * Copyright (C) 2009 Texas Instruments
 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
 *
10 11 12 13 14 15 16 17
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
18
#include <linux/syscore_ops.h>
19
#include <linux/err.h>
20
#include <linux/clk.h>
21
#include <linux/io.h>
22
#include <linux/device.h>
23
#include <linux/pm_runtime.h>
24
#include <linux/pm.h>
25 26 27
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/irqdomain.h>
28

29
#include <mach/hardware.h>
30
#include <asm/irq.h>
31
#include <mach/irqs.h>
32
#include <asm/gpio.h>
33 34
#include <asm/mach/irq.h>

35 36
#define OFF_MODE	1

37 38
static LIST_HEAD(omap_gpio_list);

39 40 41 42 43 44 45 46 47 48 49
struct gpio_regs {
	u32 irqenable1;
	u32 irqenable2;
	u32 wake_en;
	u32 ctrl;
	u32 oe;
	u32 leveldetect0;
	u32 leveldetect1;
	u32 risingdetect;
	u32 fallingdetect;
	u32 dataout;
50 51
	u32 debounce;
	u32 debounce_en;
52 53
};

54
struct gpio_bank {
55
	struct list_head node;
56
	void __iomem *base;
57
	u16 irq;
58 59
	int irq_base;
	struct irq_domain *domain;
60 61
	u32 suspend_wakeup;
	u32 saved_wakeup;
62 63
	u32 non_wakeup_gpios;
	u32 enabled_non_wakeup_gpios;
64
	struct gpio_regs context;
65 66 67
	u32 saved_datain;
	u32 saved_fallingdetect;
	u32 saved_risingdetect;
68
	u32 level_mask;
69
	u32 toggle_mask;
70
	spinlock_t lock;
D
David Brownell 已提交
71
	struct gpio_chip chip;
72
	struct clk *dbck;
C
Charulatha V 已提交
73
	u32 mod_usage;
74
	u32 dbck_enable_mask;
75
	bool dbck_enabled;
76
	struct device *dev;
77
	bool is_mpuio;
78
	bool dbck_flag;
79
	bool loses_context;
80
	int stride;
81
	u32 width;
82
	int context_loss_count;
83 84
	int power_mode;
	bool workaround_enabled;
85 86

	void (*set_dataout)(struct gpio_bank *bank, int gpio, int enable);
87
	int (*get_context_loss_count)(struct device *dev);
88 89

	struct omap_gpio_reg_offs *regs;
90 91
};

92 93
#define GPIO_INDEX(bank, gpio) (gpio % bank->width)
#define GPIO_BIT(bank, gpio) (1 << GPIO_INDEX(bank, gpio))
94
#define GPIO_MOD_CTRL_BIT	BIT(0)
95

96 97 98 99 100
static int irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq)
{
	return gpio_irq - bank->irq_base + bank->chip.base;
}

101 102
static void _set_gpio_direction(struct gpio_bank *bank, int gpio, int is_input)
{
103
	void __iomem *reg = bank->base;
104 105
	u32 l;

106
	reg += bank->regs->direction;
107 108 109 110 111 112
	l = __raw_readl(reg);
	if (is_input)
		l |= 1 << gpio;
	else
		l &= ~(1 << gpio);
	__raw_writel(l, reg);
113
	bank->context.oe = l;
114 115
}

116 117 118

/* set data out value using dedicate set/clear register */
static void _set_gpio_dataout_reg(struct gpio_bank *bank, int gpio, int enable)
119
{
120
	void __iomem *reg = bank->base;
121
	u32 l = GPIO_BIT(bank, gpio);
122

123 124 125 126
	if (enable)
		reg += bank->regs->set_dataout;
	else
		reg += bank->regs->clr_dataout;
127 128 129 130

	__raw_writel(l, reg);
}

131 132
/* set data out value using mask register */
static void _set_gpio_dataout_mask(struct gpio_bank *bank, int gpio, int enable)
133
{
134 135 136
	void __iomem *reg = bank->base + bank->regs->dataout;
	u32 gpio_bit = GPIO_BIT(bank, gpio);
	u32 l;
137

138 139 140 141 142
	l = __raw_readl(reg);
	if (enable)
		l |= gpio_bit;
	else
		l &= ~gpio_bit;
143
	__raw_writel(l, reg);
144
	bank->context.dataout = l;
145 146
}

147 148
static int _get_gpio_datain(struct gpio_bank *bank, int gpio)
{
149
	void __iomem *reg = bank->base + bank->regs->datain;
150

151
	return (__raw_readl(reg) & GPIO_BIT(bank, gpio)) != 0;
152
}
153 154 155

static int _get_gpio_dataout(struct gpio_bank *bank, int gpio)
{
156
	void __iomem *reg = bank->base + bank->regs->dataout;
157

158
	return (__raw_readl(reg) & GPIO_BIT(bank, gpio)) != 0;
159 160
}

161 162 163 164
static inline void _gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set)
{
	int l = __raw_readl(base + reg);

165
	if (set)
166 167 168 169 170 171
		l |= mask;
	else
		l &= ~mask;

	__raw_writel(l, base + reg);
}
172

173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
static inline void _gpio_dbck_enable(struct gpio_bank *bank)
{
	if (bank->dbck_enable_mask && !bank->dbck_enabled) {
		clk_enable(bank->dbck);
		bank->dbck_enabled = true;
	}
}

static inline void _gpio_dbck_disable(struct gpio_bank *bank)
{
	if (bank->dbck_enable_mask && bank->dbck_enabled) {
		clk_disable(bank->dbck);
		bank->dbck_enabled = false;
	}
}

189 190 191 192 193 194 195 196 197 198 199 200
/**
 * _set_gpio_debounce - low level gpio debounce time
 * @bank: the gpio bank we're acting upon
 * @gpio: the gpio number on this @gpio
 * @debounce: debounce time to use
 *
 * OMAP's debounce time is in 31us steps so we need
 * to convert and round up to the closest unit.
 */
static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio,
		unsigned debounce)
{
201
	void __iomem		*reg;
202 203 204
	u32			val;
	u32			l;

205 206 207
	if (!bank->dbck_flag)
		return;

208 209 210 211 212 213 214
	if (debounce < 32)
		debounce = 0x01;
	else if (debounce > 7936)
		debounce = 0xff;
	else
		debounce = (debounce / 0x1f) - 1;

215
	l = GPIO_BIT(bank, gpio);
216

217
	clk_enable(bank->dbck);
218
	reg = bank->base + bank->regs->debounce;
219 220
	__raw_writel(debounce, reg);

221
	reg = bank->base + bank->regs->debounce_en;
222 223
	val = __raw_readl(reg);

224
	if (debounce)
225
		val |= l;
226
	else
227
		val &= ~l;
228
	bank->dbck_enable_mask = val;
229 230

	__raw_writel(val, reg);
231 232 233 234 235 236 237 238 239 240
	clk_disable(bank->dbck);
	/*
	 * Enable debounce clock per module.
	 * This call is mandatory because in omap_gpio_request() when
	 * *_runtime_get_sync() is called,  _gpio_dbck_enable() within
	 * runtime callbck fails to turn on dbck because dbck_enable_mask
	 * used within _gpio_dbck_enable() is still not initialized at
	 * that point. Therefore we have to enable dbck here.
	 */
	_gpio_dbck_enable(bank);
241 242 243 244
	if (bank->dbck_enable_mask) {
		bank->context.debounce = debounce;
		bank->context.debounce_en = val;
	}
245 246
}

247
static inline void set_gpio_trigger(struct gpio_bank *bank, int gpio,
248
						unsigned trigger)
249
{
250
	void __iomem *base = bank->base;
251 252
	u32 gpio_bit = 1 << gpio;

253 254 255 256 257 258 259 260 261
	_gpio_rmw(base, bank->regs->leveldetect0, gpio_bit,
		  trigger & IRQ_TYPE_LEVEL_LOW);
	_gpio_rmw(base, bank->regs->leveldetect1, gpio_bit,
		  trigger & IRQ_TYPE_LEVEL_HIGH);
	_gpio_rmw(base, bank->regs->risingdetect, gpio_bit,
		  trigger & IRQ_TYPE_EDGE_RISING);
	_gpio_rmw(base, bank->regs->fallingdetect, gpio_bit,
		  trigger & IRQ_TYPE_EDGE_FALLING);

262 263 264 265 266 267 268 269 270 271
	bank->context.leveldetect0 =
			__raw_readl(bank->base + bank->regs->leveldetect0);
	bank->context.leveldetect1 =
			__raw_readl(bank->base + bank->regs->leveldetect1);
	bank->context.risingdetect =
			__raw_readl(bank->base + bank->regs->risingdetect);
	bank->context.fallingdetect =
			__raw_readl(bank->base + bank->regs->fallingdetect);

	if (likely(!(bank->non_wakeup_gpios & gpio_bit))) {
272
		_gpio_rmw(base, bank->regs->wkup_en, gpio_bit, trigger != 0);
273 274 275
		bank->context.wake_en =
			__raw_readl(bank->base + bank->regs->wkup_en);
	}
276

277
	/* This part needs to be executed always for OMAP{34xx, 44xx} */
278 279 280 281 282 283 284
	if (!bank->regs->irqctrl) {
		/* On omap24xx proceed only when valid GPIO bit is set */
		if (bank->non_wakeup_gpios) {
			if (!(bank->non_wakeup_gpios & gpio_bit))
				goto exit;
		}

285 286 287 288 289 290 291
		/*
		 * Log the edge gpio and manually trigger the IRQ
		 * after resume if the input level changes
		 * to avoid irq lost during PER RET/OFF mode
		 * Applies for omap2 non-wakeup gpio and all omap3 gpios
		 */
		if (trigger & IRQ_TYPE_EDGE_BOTH)
292 293 294 295
			bank->enabled_non_wakeup_gpios |= gpio_bit;
		else
			bank->enabled_non_wakeup_gpios &= ~gpio_bit;
	}
296

297
exit:
298 299 300
	bank->level_mask =
		__raw_readl(bank->base + bank->regs->leveldetect0) |
		__raw_readl(bank->base + bank->regs->leveldetect1);
301 302
}

303
#ifdef CONFIG_ARCH_OMAP1
304 305 306 307 308 309 310 311 312
/*
 * This only applies to chips that can't do both rising and falling edge
 * detection at once.  For all other chips, this function is a noop.
 */
static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio)
{
	void __iomem *reg = bank->base;
	u32 l = 0;

313
	if (!bank->regs->irqctrl)
314
		return;
315 316

	reg += bank->regs->irqctrl;
317 318 319 320 321 322 323 324 325

	l = __raw_readl(reg);
	if ((l >> gpio) & 1)
		l &= ~(1 << gpio);
	else
		l |= 1 << gpio;

	__raw_writel(l, reg);
}
326 327
#else
static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) {}
328
#endif
329

330 331
static int _set_gpio_triggering(struct gpio_bank *bank, int gpio,
							unsigned trigger)
332 333
{
	void __iomem *reg = bank->base;
334
	void __iomem *base = bank->base;
335
	u32 l = 0;
336

337 338 339 340 341
	if (bank->regs->leveldetect0 && bank->regs->wkup_en) {
		set_gpio_trigger(bank, gpio, trigger);
	} else if (bank->regs->irqctrl) {
		reg += bank->regs->irqctrl;

342
		l = __raw_readl(reg);
343
		if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
344
			bank->toggle_mask |= 1 << gpio;
345
		if (trigger & IRQ_TYPE_EDGE_RISING)
346
			l |= 1 << gpio;
347
		else if (trigger & IRQ_TYPE_EDGE_FALLING)
348
			l &= ~(1 << gpio);
349
		else
350 351 352 353
			return -EINVAL;

		__raw_writel(l, reg);
	} else if (bank->regs->edgectrl1) {
354
		if (gpio & 0x08)
355
			reg += bank->regs->edgectrl2;
356
		else
357 358
			reg += bank->regs->edgectrl1;

359 360 361
		gpio &= 0x07;
		l = __raw_readl(reg);
		l &= ~(3 << (gpio << 1));
362
		if (trigger & IRQ_TYPE_EDGE_RISING)
363
			l |= 2 << (gpio << 1);
364
		if (trigger & IRQ_TYPE_EDGE_FALLING)
365
			l |= 1 << (gpio << 1);
366 367 368

		/* Enable wake-up during idle for dynamic tick */
		_gpio_rmw(base, bank->regs->wkup_en, 1 << gpio, trigger);
369 370
		bank->context.wake_en =
			__raw_readl(bank->base + bank->regs->wkup_en);
371
		__raw_writel(l, reg);
372
	}
373
	return 0;
374 375
}

376
static int gpio_irq_type(struct irq_data *d, unsigned type)
377
{
378
	struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
379 380
	unsigned gpio;
	int retval;
D
David Brownell 已提交
381
	unsigned long flags;
382

383 384
	if (!cpu_class_is_omap2() && d->irq > IH_MPUIO_BASE)
		gpio = OMAP_MPUIO(d->irq - IH_MPUIO_BASE);
385
	else
386
		gpio = irq_to_gpio(bank, d->irq);
387

388
	if (type & ~IRQ_TYPE_SENSE_MASK)
389
		return -EINVAL;
390

391 392
	if (!bank->regs->leveldetect0 &&
		(type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH)))
393 394
		return -EINVAL;

D
David Brownell 已提交
395
	spin_lock_irqsave(&bank->lock, flags);
396
	retval = _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), type);
D
David Brownell 已提交
397
	spin_unlock_irqrestore(&bank->lock, flags);
398 399

	if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
T
Thomas Gleixner 已提交
400
		__irq_set_handler_locked(d->irq, handle_level_irq);
401
	else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
T
Thomas Gleixner 已提交
402
		__irq_set_handler_locked(d->irq, handle_edge_irq);
403

404
	return retval;
405 406 407 408
}

static void _clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
{
409
	void __iomem *reg = bank->base;
410

411
	reg += bank->regs->irqstatus;
412
	__raw_writel(gpio_mask, reg);
413 414

	/* Workaround for clearing DSP GPIO interrupts to allow retention */
415 416
	if (bank->regs->irqstatus2) {
		reg = bank->base + bank->regs->irqstatus2;
417
		__raw_writel(gpio_mask, reg);
418
	}
419 420 421

	/* Flush posted write for the irq status to avoid spurious interrupts */
	__raw_readl(reg);
422 423 424 425
}

static inline void _clear_gpio_irqstatus(struct gpio_bank *bank, int gpio)
{
426
	_clear_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
427 428
}

429 430 431
static u32 _get_gpio_irqbank_mask(struct gpio_bank *bank)
{
	void __iomem *reg = bank->base;
432
	u32 l;
433
	u32 mask = (1 << bank->width) - 1;
434

435
	reg += bank->regs->irqenable;
436
	l = __raw_readl(reg);
437
	if (bank->regs->irqenable_inv)
438 439 440
		l = ~l;
	l &= mask;
	return l;
441 442
}

443
static void _enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
444
{
445
	void __iomem *reg = bank->base;
446 447
	u32 l;

448 449 450 451 452
	if (bank->regs->set_irqenable) {
		reg += bank->regs->set_irqenable;
		l = gpio_mask;
	} else {
		reg += bank->regs->irqenable;
453
		l = __raw_readl(reg);
454 455
		if (bank->regs->irqenable_inv)
			l &= ~gpio_mask;
456 457
		else
			l |= gpio_mask;
458 459 460
	}

	__raw_writel(l, reg);
461
	bank->context.irqenable1 = l;
462 463 464 465 466 467 468 469 470
}

static void _disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
{
	void __iomem *reg = bank->base;
	u32 l;

	if (bank->regs->clr_irqenable) {
		reg += bank->regs->clr_irqenable;
471
		l = gpio_mask;
472 473
	} else {
		reg += bank->regs->irqenable;
474
		l = __raw_readl(reg);
475
		if (bank->regs->irqenable_inv)
476
			l |= gpio_mask;
477
		else
478
			l &= ~gpio_mask;
479
	}
480

481
	__raw_writel(l, reg);
482
	bank->context.irqenable1 = l;
483 484 485 486
}

static inline void _set_gpio_irqenable(struct gpio_bank *bank, int gpio, int enable)
{
487
	_enable_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
488 489
}

490 491 492 493 494 495 496 497 498 499
/*
 * Note that ENAWAKEUP needs to be enabled in GPIO_SYSCONFIG register.
 * 1510 does not seem to have a wake-up register. If JTAG is connected
 * to the target, system will wake up always on GPIO events. While
 * system is running all registered GPIO interrupts need to have wake-up
 * enabled. When system is suspended, only selected GPIO interrupts need
 * to have wake-up enabled.
 */
static int _set_gpio_wakeup(struct gpio_bank *bank, int gpio, int enable)
{
500 501
	u32 gpio_bit = GPIO_BIT(bank, gpio);
	unsigned long flags;
D
David Brownell 已提交
502

503
	if (bank->non_wakeup_gpios & gpio_bit) {
504
		dev_err(bank->dev,
505
			"Unable to modify wakeup on non-wakeup GPIO%d\n", gpio);
506 507
		return -EINVAL;
	}
508 509 510 511 512 513 514

	spin_lock_irqsave(&bank->lock, flags);
	if (enable)
		bank->suspend_wakeup |= gpio_bit;
	else
		bank->suspend_wakeup &= ~gpio_bit;

515
	__raw_writel(bank->suspend_wakeup, bank->base + bank->regs->wkup_en);
516 517 518
	spin_unlock_irqrestore(&bank->lock, flags);

	return 0;
519 520
}

521 522
static void _reset_gpio(struct gpio_bank *bank, int gpio)
{
523
	_set_gpio_direction(bank, GPIO_INDEX(bank, gpio), 1);
524 525
	_set_gpio_irqenable(bank, gpio, 0);
	_clear_gpio_irqstatus(bank, gpio);
526
	_set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE);
527 528
}

529
/* Use disable_irq_wake() and enable_irq_wake() functions from drivers */
530
static int gpio_wake_enable(struct irq_data *d, unsigned int enable)
531
{
532 533
	struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
	unsigned int gpio = irq_to_gpio(bank, d->irq);
534

535
	return _set_gpio_wakeup(bank, gpio, enable);
536 537
}

538
static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
539
{
540
	struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
D
David Brownell 已提交
541
	unsigned long flags;
D
David Brownell 已提交
542

543 544 545 546 547 548
	/*
	 * If this is the first gpio_request for the bank,
	 * enable the bank module.
	 */
	if (!bank->mod_usage)
		pm_runtime_get_sync(bank->dev);
549

550
	spin_lock_irqsave(&bank->lock, flags);
551 552 553
	/* Set trigger to none. You need to enable the desired trigger with
	 * request_irq() or set_irq_type().
	 */
554
	_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
555

556 557
	if (bank->regs->pinctrl) {
		void __iomem *reg = bank->base + bank->regs->pinctrl;
558

559
		/* Claim the pin for MPU */
560
		__raw_writel(__raw_readl(reg) | (1 << offset), reg);
561
	}
562

563 564 565 566 567 568 569 570
	if (bank->regs->ctrl && !bank->mod_usage) {
		void __iomem *reg = bank->base + bank->regs->ctrl;
		u32 ctrl;

		ctrl = __raw_readl(reg);
		/* Module is enabled, clocks are not gated */
		ctrl &= ~GPIO_MOD_CTRL_BIT;
		__raw_writel(ctrl, reg);
571
		bank->context.ctrl = ctrl;
C
Charulatha V 已提交
572
	}
573 574 575

	bank->mod_usage |= 1 << offset;

D
David Brownell 已提交
576
	spin_unlock_irqrestore(&bank->lock, flags);
577 578 579 580

	return 0;
}

581
static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
582
{
583
	struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
584
	void __iomem *base = bank->base;
D
David Brownell 已提交
585
	unsigned long flags;
586

D
David Brownell 已提交
587
	spin_lock_irqsave(&bank->lock, flags);
588

589
	if (bank->regs->wkup_en) {
590
		/* Disable wake-up during idle for dynamic tick */
591
		_gpio_rmw(base, bank->regs->wkup_en, 1 << offset, 0);
592 593 594
		bank->context.wake_en =
			__raw_readl(bank->base + bank->regs->wkup_en);
	}
595

596 597 598 599 600 601 602 603 604 605
	bank->mod_usage &= ~(1 << offset);

	if (bank->regs->ctrl && !bank->mod_usage) {
		void __iomem *reg = bank->base + bank->regs->ctrl;
		u32 ctrl;

		ctrl = __raw_readl(reg);
		/* Module is disabled, clocks are gated */
		ctrl |= GPIO_MOD_CTRL_BIT;
		__raw_writel(ctrl, reg);
606
		bank->context.ctrl = ctrl;
C
Charulatha V 已提交
607
	}
608

609
	_reset_gpio(bank, bank->chip.base + offset);
D
David Brownell 已提交
610
	spin_unlock_irqrestore(&bank->lock, flags);
611 612 613 614 615 616 617

	/*
	 * If this is the last gpio to be freed in the bank,
	 * disable the bank module.
	 */
	if (!bank->mod_usage)
		pm_runtime_put(bank->dev);
618 619 620 621 622 623 624 625 626 627 628
}

/*
 * We need to unmask the GPIO bank interrupt as soon as possible to
 * avoid missing GPIO interrupts for other lines in the bank.
 * Then we need to mask-read-clear-unmask the triggered GPIO lines
 * in the bank to avoid missing nested interrupts for a GPIO line.
 * If we wait to unmask individual GPIO lines in the bank after the
 * line's interrupt handler has been run, we may miss some nested
 * interrupts.
 */
629
static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
630
{
631
	void __iomem *isr_reg = NULL;
632
	u32 isr;
633
	unsigned int gpio_irq, gpio_index;
634
	struct gpio_bank *bank;
635 636
	u32 retrigger = 0;
	int unmasked = 0;
637
	struct irq_chip *chip = irq_desc_get_chip(desc);
638

639
	chained_irq_enter(chip, desc);
640

T
Thomas Gleixner 已提交
641
	bank = irq_get_handler_data(irq);
642
	isr_reg = bank->base + bank->regs->irqstatus;
643
	pm_runtime_get_sync(bank->dev);
644 645 646 647

	if (WARN_ON(!isr_reg))
		goto exit;

648
	while(1) {
649
		u32 isr_saved, level_mask = 0;
650
		u32 enabled;
651

652 653
		enabled = _get_gpio_irqbank_mask(bank);
		isr_saved = isr = __raw_readl(isr_reg) & enabled;
654

655
		if (bank->level_mask)
656
			level_mask = bank->level_mask & enabled;
657 658 659 660

		/* clear edge sensitive interrupts before handler(s) are
		called so that we don't miss any interrupt occurred while
		executing them */
661
		_disable_gpio_irqbank(bank, isr_saved & ~level_mask);
662
		_clear_gpio_irqbank(bank, isr_saved & ~level_mask);
663
		_enable_gpio_irqbank(bank, isr_saved & ~level_mask);
664 665 666

		/* if there is only edge sensitive GPIO pin interrupts
		configured, we could unmask GPIO bank interrupt immediately */
667 668
		if (!level_mask && !unmasked) {
			unmasked = 1;
669
			chained_irq_exit(chip, desc);
670
		}
671

672 673
		isr |= retrigger;
		retrigger = 0;
674 675 676
		if (!isr)
			break;

677
		gpio_irq = bank->irq_base;
678
		for (; isr != 0; isr >>= 1, gpio_irq++) {
679
			int gpio = irq_to_gpio(bank, gpio_irq);
680

681 682
			if (!(isr & 1))
				continue;
683

684 685
			gpio_index = GPIO_INDEX(bank, gpio);

686 687 688 689 690 691 692 693 694 695
			/*
			 * Some chips can't respond to both rising and falling
			 * at the same time.  If this irq was requested with
			 * both flags, we need to flip the ICR data for the IRQ
			 * to respond to the IRQ for the opposite direction.
			 * This will be indicated in the bank toggle_mask.
			 */
			if (bank->toggle_mask & (1 << gpio_index))
				_toggle_gpio_edge_triggering(bank, gpio_index);

696
			generic_handle_irq(gpio_irq);
697
		}
698
	}
699 700 701 702
	/* if bank has any level sensitive GPIO pin interrupt
	configured, we must unmask the bank interrupt only after
	handler(s) are executed in order to avoid spurious bank
	interrupt */
703
exit:
704
	if (!unmasked)
705
		chained_irq_exit(chip, desc);
706
	pm_runtime_put(bank->dev);
707 708
}

709
static void gpio_irq_shutdown(struct irq_data *d)
710
{
711
	struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
712
	unsigned int gpio = irq_to_gpio(bank, d->irq);
713
	unsigned long flags;
714

715
	spin_lock_irqsave(&bank->lock, flags);
716
	_reset_gpio(bank, gpio);
717
	spin_unlock_irqrestore(&bank->lock, flags);
718 719
}

720
static void gpio_ack_irq(struct irq_data *d)
721
{
722
	struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
723
	unsigned int gpio = irq_to_gpio(bank, d->irq);
724 725 726 727

	_clear_gpio_irqstatus(bank, gpio);
}

728
static void gpio_mask_irq(struct irq_data *d)
729
{
730
	struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
731
	unsigned int gpio = irq_to_gpio(bank, d->irq);
732
	unsigned long flags;
733

734
	spin_lock_irqsave(&bank->lock, flags);
735
	_set_gpio_irqenable(bank, gpio, 0);
736
	_set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE);
737
	spin_unlock_irqrestore(&bank->lock, flags);
738 739
}

740
static void gpio_unmask_irq(struct irq_data *d)
741
{
742
	struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
743
	unsigned int gpio = irq_to_gpio(bank, d->irq);
744
	unsigned int irq_mask = GPIO_BIT(bank, gpio);
745
	u32 trigger = irqd_get_trigger_type(d);
746
	unsigned long flags;
747

748
	spin_lock_irqsave(&bank->lock, flags);
749
	if (trigger)
750
		_set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), trigger);
751 752 753 754 755 756 757

	/* For level-triggered GPIOs, the clearing must be done after
	 * the HW source is cleared, thus after the handler has run */
	if (bank->level_mask & irq_mask) {
		_set_gpio_irqenable(bank, gpio, 0);
		_clear_gpio_irqstatus(bank, gpio);
	}
758

K
Kevin Hilman 已提交
759
	_set_gpio_irqenable(bank, gpio, 1);
760
	spin_unlock_irqrestore(&bank->lock, flags);
761 762
}

763 764
static struct irq_chip gpio_irq_chip = {
	.name		= "GPIO",
765 766 767 768 769 770
	.irq_shutdown	= gpio_irq_shutdown,
	.irq_ack	= gpio_ack_irq,
	.irq_mask	= gpio_mask_irq,
	.irq_unmask	= gpio_unmask_irq,
	.irq_set_type	= gpio_irq_type,
	.irq_set_wake	= gpio_wake_enable,
771 772 773 774
};

/*---------------------------------------------------------------------*/

775
static int omap_mpuio_suspend_noirq(struct device *dev)
D
David Brownell 已提交
776
{
777
	struct platform_device *pdev = to_platform_device(dev);
D
David Brownell 已提交
778
	struct gpio_bank	*bank = platform_get_drvdata(pdev);
779 780
	void __iomem		*mask_reg = bank->base +
					OMAP_MPUIO_GPIO_MASKIT / bank->stride;
D
David Brownell 已提交
781
	unsigned long		flags;
D
David Brownell 已提交
782

D
David Brownell 已提交
783
	spin_lock_irqsave(&bank->lock, flags);
D
David Brownell 已提交
784 785
	bank->saved_wakeup = __raw_readl(mask_reg);
	__raw_writel(0xffff & ~bank->suspend_wakeup, mask_reg);
D
David Brownell 已提交
786
	spin_unlock_irqrestore(&bank->lock, flags);
D
David Brownell 已提交
787 788 789 790

	return 0;
}

791
static int omap_mpuio_resume_noirq(struct device *dev)
D
David Brownell 已提交
792
{
793
	struct platform_device *pdev = to_platform_device(dev);
D
David Brownell 已提交
794
	struct gpio_bank	*bank = platform_get_drvdata(pdev);
795 796
	void __iomem		*mask_reg = bank->base +
					OMAP_MPUIO_GPIO_MASKIT / bank->stride;
D
David Brownell 已提交
797
	unsigned long		flags;
D
David Brownell 已提交
798

D
David Brownell 已提交
799
	spin_lock_irqsave(&bank->lock, flags);
D
David Brownell 已提交
800
	__raw_writel(bank->saved_wakeup, mask_reg);
D
David Brownell 已提交
801
	spin_unlock_irqrestore(&bank->lock, flags);
D
David Brownell 已提交
802 803 804 805

	return 0;
}

806
static const struct dev_pm_ops omap_mpuio_dev_pm_ops = {
807 808 809 810
	.suspend_noirq = omap_mpuio_suspend_noirq,
	.resume_noirq = omap_mpuio_resume_noirq,
};

811
/* use platform_driver for this. */
D
David Brownell 已提交
812 813 814
static struct platform_driver omap_mpuio_driver = {
	.driver		= {
		.name	= "mpuio",
815
		.pm	= &omap_mpuio_dev_pm_ops,
D
David Brownell 已提交
816 817 818 819 820 821 822 823 824 825 826 827
	},
};

static struct platform_device omap_mpuio_device = {
	.name		= "mpuio",
	.id		= -1,
	.dev = {
		.driver = &omap_mpuio_driver.driver,
	}
	/* could list the /proc/iomem resources */
};

828
static inline void mpuio_init(struct gpio_bank *bank)
D
David Brownell 已提交
829
{
830
	platform_set_drvdata(&omap_mpuio_device, bank);
831

D
David Brownell 已提交
832 833 834 835
	if (platform_driver_register(&omap_mpuio_driver) == 0)
		(void) platform_device_register(&omap_mpuio_device);
}

836
/*---------------------------------------------------------------------*/
837

D
David Brownell 已提交
838 839 840 841 842 843 844 845 846 847 848 849
static int gpio_input(struct gpio_chip *chip, unsigned offset)
{
	struct gpio_bank *bank;
	unsigned long flags;

	bank = container_of(chip, struct gpio_bank, chip);
	spin_lock_irqsave(&bank->lock, flags);
	_set_gpio_direction(bank, offset, 1);
	spin_unlock_irqrestore(&bank->lock, flags);
	return 0;
}

850 851
static int gpio_is_input(struct gpio_bank *bank, int mask)
{
852
	void __iomem *reg = bank->base + bank->regs->direction;
853 854 855 856

	return __raw_readl(reg) & mask;
}

D
David Brownell 已提交
857 858
static int gpio_get(struct gpio_chip *chip, unsigned offset)
{
859 860 861 862 863 864
	struct gpio_bank *bank;
	void __iomem *reg;
	int gpio;
	u32 mask;

	gpio = chip->base + offset;
C
Charulatha V 已提交
865
	bank = container_of(chip, struct gpio_bank, chip);
866
	reg = bank->base;
867
	mask = GPIO_BIT(bank, gpio);
868 869 870 871 872

	if (gpio_is_input(bank, mask))
		return _get_gpio_datain(bank, gpio);
	else
		return _get_gpio_dataout(bank, gpio);
D
David Brownell 已提交
873 874 875 876 877 878 879 880 881
}

static int gpio_output(struct gpio_chip *chip, unsigned offset, int value)
{
	struct gpio_bank *bank;
	unsigned long flags;

	bank = container_of(chip, struct gpio_bank, chip);
	spin_lock_irqsave(&bank->lock, flags);
882
	bank->set_dataout(bank, offset, value);
D
David Brownell 已提交
883 884 885 886 887
	_set_gpio_direction(bank, offset, 0);
	spin_unlock_irqrestore(&bank->lock, flags);
	return 0;
}

888 889 890 891 892 893 894
static int gpio_debounce(struct gpio_chip *chip, unsigned offset,
		unsigned debounce)
{
	struct gpio_bank *bank;
	unsigned long flags;

	bank = container_of(chip, struct gpio_bank, chip);
895 896 897 898 899 900 901

	if (!bank->dbck) {
		bank->dbck = clk_get(bank->dev, "dbclk");
		if (IS_ERR(bank->dbck))
			dev_err(bank->dev, "Could not get gpio dbck\n");
	}

902 903 904 905 906 907 908
	spin_lock_irqsave(&bank->lock, flags);
	_set_gpio_debounce(bank, offset, debounce);
	spin_unlock_irqrestore(&bank->lock, flags);

	return 0;
}

D
David Brownell 已提交
909 910 911 912 913 914 915
static void gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
	struct gpio_bank *bank;
	unsigned long flags;

	bank = container_of(chip, struct gpio_bank, chip);
	spin_lock_irqsave(&bank->lock, flags);
916
	bank->set_dataout(bank, offset, value);
D
David Brownell 已提交
917 918 919
	spin_unlock_irqrestore(&bank->lock, flags);
}

920 921 922 923 924
static int gpio_2irq(struct gpio_chip *chip, unsigned offset)
{
	struct gpio_bank *bank;

	bank = container_of(chip, struct gpio_bank, chip);
925
	return bank->irq_base + offset;
926 927
}

D
David Brownell 已提交
928 929
/*---------------------------------------------------------------------*/

930
static void __init omap_gpio_show_rev(struct gpio_bank *bank)
T
Tony Lindgren 已提交
931
{
932
	static bool called;
T
Tony Lindgren 已提交
933 934
	u32 rev;

935
	if (called || bank->regs->revision == USHRT_MAX)
T
Tony Lindgren 已提交
936 937
		return;

938 939
	rev = __raw_readw(bank->base + bank->regs->revision);
	pr_info("OMAP GPIO hardware version %d.%d\n",
T
Tony Lindgren 已提交
940
		(rev >> 4) & 0x0f, rev & 0x0f);
941 942

	called = true;
T
Tony Lindgren 已提交
943 944
}

945 946 947 948 949
/* This lock class tells lockdep that GPIO irqs are in a different
 * category than their parents, so it won't report false recursion.
 */
static struct lock_class_key gpio_lock_class;

950
static void omap_gpio_mod_init(struct gpio_bank *bank)
951
{
952 953
	void __iomem *base = bank->base;
	u32 l = 0xffffffff;
954

955 956 957
	if (bank->width == 16)
		l = 0xffff;

958
	if (bank->is_mpuio) {
959 960
		__raw_writel(l, bank->base + bank->regs->irqenable);
		return;
961
	}
962 963 964 965 966 967 968 969 970

	_gpio_rmw(base, bank->regs->irqenable, l, bank->regs->irqenable_inv);
	_gpio_rmw(base, bank->regs->irqstatus, l,
					bank->regs->irqenable_inv == false);
	_gpio_rmw(base, bank->regs->irqenable, l, bank->regs->debounce_en != 0);
	_gpio_rmw(base, bank->regs->irqenable, l, bank->regs->ctrl != 0);
	if (bank->regs->debounce_en)
		_gpio_rmw(base, bank->regs->debounce_en, 0, 1);

971 972
	/* Save OE default value (0xffffffff) in the context */
	bank->context.oe = __raw_readl(bank->base + bank->regs->direction);
973 974 975
	 /* Initialize interface clk ungated, module enabled */
	if (bank->regs->ctrl)
		_gpio_rmw(base, bank->regs->ctrl, 0, 1);
976 977
}

978
static __devinit void
979 980 981 982 983 984 985 986
omap_mpuio_alloc_gc(struct gpio_bank *bank, unsigned int irq_start,
		    unsigned int num)
{
	struct irq_chip_generic *gc;
	struct irq_chip_type *ct;

	gc = irq_alloc_generic_chip("MPUIO", 1, irq_start, bank->base,
				    handle_simple_irq);
987 988 989 990 991
	if (!gc) {
		dev_err(bank->dev, "Memory alloc failed for gc\n");
		return;
	}

992 993 994 995 996 997
	ct = gc->chip_types;

	/* NOTE: No ack required, reading IRQ status clears it. */
	ct->chip.irq_mask = irq_gc_mask_set_bit;
	ct->chip.irq_unmask = irq_gc_mask_clr_bit;
	ct->chip.irq_set_type = gpio_irq_type;
998 999

	if (bank->regs->wkup_en)
1000 1001 1002 1003 1004 1005 1006
		ct->chip.irq_set_wake = gpio_wake_enable,

	ct->regs.mask = OMAP_MPUIO_GPIO_INT / bank->stride;
	irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
			       IRQ_NOREQUEST | IRQ_NOPROBE, 0);
}

1007
static void __devinit omap_gpio_chip_init(struct gpio_bank *bank)
1008
{
1009
	int j;
1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023
	static int gpio;

	/*
	 * REVISIT eventually switch from OMAP-specific gpio structs
	 * over to the generic ones
	 */
	bank->chip.request = omap_gpio_request;
	bank->chip.free = omap_gpio_free;
	bank->chip.direction_input = gpio_input;
	bank->chip.get = gpio_get;
	bank->chip.direction_output = gpio_output;
	bank->chip.set_debounce = gpio_debounce;
	bank->chip.set = gpio_set;
	bank->chip.to_irq = gpio_2irq;
1024
	if (bank->is_mpuio) {
1025
		bank->chip.label = "mpuio";
1026 1027
		if (bank->regs->wkup_en)
			bank->chip.dev = &omap_mpuio_device.dev;
1028 1029 1030 1031
		bank->chip.base = OMAP_MPUIO(0);
	} else {
		bank->chip.label = "gpio";
		bank->chip.base = gpio;
1032
		gpio += bank->width;
1033
	}
1034
	bank->chip.ngpio = bank->width;
1035 1036 1037

	gpiochip_add(&bank->chip);

1038
	for (j = bank->irq_base; j < bank->irq_base + bank->width; j++) {
1039
		irq_set_lockdep_class(j, &gpio_lock_class);
T
Thomas Gleixner 已提交
1040
		irq_set_chip_data(j, bank);
1041
		if (bank->is_mpuio) {
1042 1043
			omap_mpuio_alloc_gc(bank, j, bank->width);
		} else {
T
Thomas Gleixner 已提交
1044
			irq_set_chip(j, &gpio_irq_chip);
1045 1046 1047
			irq_set_handler(j, handle_simple_irq);
			set_irq_flags(j, IRQF_VALID);
		}
1048
	}
T
Thomas Gleixner 已提交
1049 1050
	irq_set_chained_handler(bank->irq, gpio_irq_handler);
	irq_set_handler_data(bank->irq, bank);
1051 1052
}

1053 1054
static const struct of_device_id omap_gpio_match[];

1055
static int __devinit omap_gpio_probe(struct platform_device *pdev)
1056
{
1057
	struct device *dev = &pdev->dev;
1058 1059
	struct device_node *node = dev->of_node;
	const struct of_device_id *match;
1060 1061
	struct omap_gpio_platform_data *pdata;
	struct resource *res;
1062
	struct gpio_bank *bank;
1063
	int ret = 0;
1064

1065 1066 1067 1068
	match = of_match_device(of_match_ptr(omap_gpio_match), dev);

	pdata = match ? match->data : dev->platform_data;
	if (!pdata)
1069
		return -EINVAL;
1070

1071
	bank = devm_kzalloc(&pdev->dev, sizeof(struct gpio_bank), GFP_KERNEL);
1072
	if (!bank) {
1073
		dev_err(dev, "Memory alloc failed\n");
1074
		return -ENOMEM;
1075
	}
1076

1077 1078
	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	if (unlikely(!res)) {
1079
		dev_err(dev, "Invalid IRQ resource\n");
1080
		return -ENODEV;
1081
	}
1082

1083
	bank->irq = res->start;
1084
	bank->dev = dev;
1085
	bank->dbck_flag = pdata->dbck_flag;
1086
	bank->stride = pdata->bank_stride;
1087
	bank->width = pdata->bank_width;
1088
	bank->is_mpuio = pdata->is_mpuio;
1089
	bank->non_wakeup_gpios = pdata->non_wakeup_gpios;
1090
	bank->loses_context = pdata->loses_context;
1091
	bank->get_context_loss_count = pdata->get_context_loss_count;
1092
	bank->regs = pdata->regs;
1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104
#ifdef CONFIG_OF_GPIO
	bank->chip.of_node = of_node_get(node);
#endif

	bank->irq_base = irq_alloc_descs(-1, 0, bank->width, 0);
	if (bank->irq_base < 0) {
		dev_err(dev, "Couldn't allocate IRQ numbers\n");
		return -ENODEV;
	}

	bank->domain = irq_domain_add_legacy(node, bank->width, bank->irq_base,
					     0, &irq_domain_simple_ops, NULL);
1105 1106 1107 1108 1109

	if (bank->regs->set_dataout && bank->regs->clr_dataout)
		bank->set_dataout = _set_gpio_dataout_reg;
	else
		bank->set_dataout = _set_gpio_dataout_mask;
T
Tony Lindgren 已提交
1110

1111
	spin_lock_init(&bank->lock);
T
Tony Lindgren 已提交
1112

1113 1114 1115
	/* Static mapping, never released */
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (unlikely(!res)) {
1116
		dev_err(dev, "Invalid mem resource\n");
1117 1118 1119 1120 1121 1122 1123
		return -ENODEV;
	}

	if (!devm_request_mem_region(dev, res->start, resource_size(res),
				     pdev->name)) {
		dev_err(dev, "Region already claimed\n");
		return -EBUSY;
1124
	}
1125

1126
	bank->base = devm_ioremap(dev, res->start, resource_size(res));
1127
	if (!bank->base) {
1128
		dev_err(dev, "Could not ioremap\n");
1129
		return -ENOMEM;
1130 1131
	}

1132 1133
	platform_set_drvdata(pdev, bank);

1134
	pm_runtime_enable(bank->dev);
1135
	pm_runtime_irq_safe(bank->dev);
1136 1137
	pm_runtime_get_sync(bank->dev);

1138
	if (bank->is_mpuio)
1139 1140
		mpuio_init(bank);

1141
	omap_gpio_mod_init(bank);
1142
	omap_gpio_chip_init(bank);
1143
	omap_gpio_show_rev(bank);
T
Tony Lindgren 已提交
1144

1145 1146
	pm_runtime_put(bank->dev);

1147
	list_add_tail(&bank->node, &omap_gpio_list);
1148

1149
	return ret;
1150 1151
}

1152 1153 1154 1155
#ifdef CONFIG_ARCH_OMAP2PLUS

#if defined(CONFIG_PM_SLEEP)
static int omap_gpio_suspend(struct device *dev)
1156
{
1157 1158 1159 1160 1161
	struct platform_device *pdev = to_platform_device(dev);
	struct gpio_bank *bank = platform_get_drvdata(pdev);
	void __iomem *base = bank->base;
	void __iomem *wakeup_enable;
	unsigned long flags;
1162

1163 1164
	if (!bank->mod_usage || !bank->loses_context)
		return 0;
1165

1166 1167
	if (!bank->regs->wkup_en || !bank->suspend_wakeup)
		return 0;
1168

1169
	wakeup_enable = bank->base + bank->regs->wkup_en;
1170

1171 1172 1173 1174 1175
	spin_lock_irqsave(&bank->lock, flags);
	bank->saved_wakeup = __raw_readl(wakeup_enable);
	_gpio_rmw(base, bank->regs->wkup_en, 0xffffffff, 0);
	_gpio_rmw(base, bank->regs->wkup_en, bank->suspend_wakeup, 1);
	spin_unlock_irqrestore(&bank->lock, flags);
1176 1177 1178 1179

	return 0;
}

1180
static int omap_gpio_resume(struct device *dev)
1181
{
1182 1183 1184 1185
	struct platform_device *pdev = to_platform_device(dev);
	struct gpio_bank *bank = platform_get_drvdata(pdev);
	void __iomem *base = bank->base;
	unsigned long flags;
1186

1187 1188
	if (!bank->mod_usage || !bank->loses_context)
		return 0;
1189

1190 1191
	if (!bank->regs->wkup_en || !bank->saved_wakeup)
		return 0;
1192

1193 1194 1195 1196
	spin_lock_irqsave(&bank->lock, flags);
	_gpio_rmw(base, bank->regs->wkup_en, 0xffffffff, 0);
	_gpio_rmw(base, bank->regs->wkup_en, bank->saved_wakeup, 1);
	spin_unlock_irqrestore(&bank->lock, flags);
1197

1198 1199 1200
	return 0;
}
#endif /* CONFIG_PM_SLEEP */
1201

1202
#if defined(CONFIG_PM_RUNTIME)
1203
static void omap_gpio_restore_context(struct gpio_bank *bank);
1204

1205
static int omap_gpio_runtime_suspend(struct device *dev)
1206
{
1207 1208 1209 1210
	struct platform_device *pdev = to_platform_device(dev);
	struct gpio_bank *bank = platform_get_drvdata(pdev);
	u32 l1 = 0, l2 = 0;
	unsigned long flags;
1211
	u32 wake_low, wake_hi;
1212

1213
	spin_lock_irqsave(&bank->lock, flags);
1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234

	/*
	 * Only edges can generate a wakeup event to the PRCM.
	 *
	 * Therefore, ensure any wake-up capable GPIOs have
	 * edge-detection enabled before going idle to ensure a wakeup
	 * to the PRCM is generated on a GPIO transition. (c.f. 34xx
	 * NDA TRM 25.5.3.1)
	 *
	 * The normal values will be restored upon ->runtime_resume()
	 * by writing back the values saved in bank->context.
	 */
	wake_low = bank->context.leveldetect0 & bank->context.wake_en;
	if (wake_low)
		__raw_writel(wake_low | bank->context.fallingdetect,
			     bank->base + bank->regs->fallingdetect);
	wake_hi = bank->context.leveldetect1 & bank->context.wake_en;
	if (wake_hi)
		__raw_writel(wake_hi | bank->context.risingdetect,
			     bank->base + bank->regs->risingdetect);

1235 1236
	if (bank->power_mode != OFF_MODE) {
		bank->power_mode = 0;
1237
		goto update_gpio_context_count;
1238 1239 1240 1241 1242 1243 1244
	}
	/*
	 * If going to OFF, remove triggering for all
	 * non-wakeup GPIOs.  Otherwise spurious IRQs will be
	 * generated.  See OMAP2420 Errata item 1.101.
	 */
	if (!(bank->enabled_non_wakeup_gpios))
1245
		goto update_gpio_context_count;
1246

1247 1248 1249 1250
	bank->saved_datain = __raw_readl(bank->base +
						bank->regs->datain);
	l1 = __raw_readl(bank->base + bank->regs->fallingdetect);
	l2 = __raw_readl(bank->base + bank->regs->risingdetect);
1251

1252 1253 1254 1255
	bank->saved_fallingdetect = l1;
	bank->saved_risingdetect = l2;
	l1 &= ~bank->enabled_non_wakeup_gpios;
	l2 &= ~bank->enabled_non_wakeup_gpios;
1256

1257 1258
	__raw_writel(l1, bank->base + bank->regs->fallingdetect);
	__raw_writel(l2, bank->base + bank->regs->risingdetect);
1259

1260
	bank->workaround_enabled = true;
1261

1262
update_gpio_context_count:
1263 1264
	if (bank->get_context_loss_count)
		bank->context_loss_count =
1265 1266
				bank->get_context_loss_count(bank->dev);

1267
	_gpio_dbck_disable(bank);
1268
	spin_unlock_irqrestore(&bank->lock, flags);
1269

1270
	return 0;
1271 1272
}

1273
static int omap_gpio_runtime_resume(struct device *dev)
1274
{
1275 1276 1277 1278 1279
	struct platform_device *pdev = to_platform_device(dev);
	struct gpio_bank *bank = platform_get_drvdata(pdev);
	int context_lost_cnt_after;
	u32 l = 0, gen, gen0, gen1;
	unsigned long flags;
1280

1281
	spin_lock_irqsave(&bank->lock, flags);
1282
	_gpio_dbck_enable(bank);
1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294

	/*
	 * In ->runtime_suspend(), level-triggered, wakeup-enabled
	 * GPIOs were set to edge trigger also in order to be able to
	 * generate a PRCM wakeup.  Here we restore the
	 * pre-runtime_suspend() values for edge triggering.
	 */
	__raw_writel(bank->context.fallingdetect,
		     bank->base + bank->regs->fallingdetect);
	__raw_writel(bank->context.risingdetect,
		     bank->base + bank->regs->risingdetect);

1295 1296 1297 1298
	if (!bank->enabled_non_wakeup_gpios || !bank->workaround_enabled) {
		spin_unlock_irqrestore(&bank->lock, flags);
		return 0;
	}
1299

1300 1301 1302 1303 1304 1305 1306 1307 1308
	if (bank->get_context_loss_count) {
		context_lost_cnt_after =
			bank->get_context_loss_count(bank->dev);
		if (context_lost_cnt_after != bank->context_loss_count ||
						!context_lost_cnt_after) {
			omap_gpio_restore_context(bank);
		} else {
			spin_unlock_irqrestore(&bank->lock, flags);
			return 0;
1309
		}
1310
	}
1311

1312 1313 1314 1315 1316
	__raw_writel(bank->saved_fallingdetect,
			bank->base + bank->regs->fallingdetect);
	__raw_writel(bank->saved_risingdetect,
			bank->base + bank->regs->risingdetect);
	l = __raw_readl(bank->base + bank->regs->datain);
1317

1318 1319 1320 1321 1322 1323 1324 1325
	/*
	 * Check if any of the non-wakeup interrupt GPIOs have changed
	 * state.  If so, generate an IRQ by software.  This is
	 * horribly racy, but it's the best we can do to work around
	 * this silicon bug.
	 */
	l ^= bank->saved_datain;
	l &= bank->enabled_non_wakeup_gpios;
1326

1327 1328 1329 1330 1331 1332
	/*
	 * No need to generate IRQs for the rising edge for gpio IRQs
	 * configured with falling edge only; and vice versa.
	 */
	gen0 = l & bank->saved_fallingdetect;
	gen0 &= bank->saved_datain;
1333

1334 1335
	gen1 = l & bank->saved_risingdetect;
	gen1 &= ~(bank->saved_datain);
1336

1337 1338 1339 1340
	/* FIXME: Consider GPIO IRQs with level detections properly! */
	gen = l & (~(bank->saved_fallingdetect) & ~(bank->saved_risingdetect));
	/* Consider all GPIO IRQs needed to be updated */
	gen |= gen0 | gen1;
1341

1342 1343
	if (gen) {
		u32 old0, old1;
1344

1345 1346
		old0 = __raw_readl(bank->base + bank->regs->leveldetect0);
		old1 = __raw_readl(bank->base + bank->regs->leveldetect1);
1347

1348 1349
		if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
			__raw_writel(old0 | gen, bank->base +
1350
						bank->regs->leveldetect0);
1351
			__raw_writel(old1 | gen, bank->base +
1352
						bank->regs->leveldetect1);
1353
		}
1354

1355 1356
		if (cpu_is_omap44xx()) {
			__raw_writel(old0 | l, bank->base +
1357
						bank->regs->leveldetect0);
1358
			__raw_writel(old1 | l, bank->base +
1359
						bank->regs->leveldetect1);
1360
		}
1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394
		__raw_writel(old0, bank->base + bank->regs->leveldetect0);
		__raw_writel(old1, bank->base + bank->regs->leveldetect1);
	}

	bank->workaround_enabled = false;
	spin_unlock_irqrestore(&bank->lock, flags);

	return 0;
}
#endif /* CONFIG_PM_RUNTIME */

void omap2_gpio_prepare_for_idle(int pwr_mode)
{
	struct gpio_bank *bank;

	list_for_each_entry(bank, &omap_gpio_list, node) {
		if (!bank->mod_usage || !bank->loses_context)
			continue;

		bank->power_mode = pwr_mode;

		pm_runtime_put_sync_suspend(bank->dev);
	}
}

void omap2_gpio_resume_after_idle(void)
{
	struct gpio_bank *bank;

	list_for_each_entry(bank, &omap_gpio_list, node) {
		if (!bank->mod_usage || !bank->loses_context)
			continue;

		pm_runtime_get_sync(bank->dev);
1395 1396 1397
	}
}

1398
#if defined(CONFIG_PM_RUNTIME)
1399
static void omap_gpio_restore_context(struct gpio_bank *bank)
1400
{
1401
	__raw_writel(bank->context.wake_en,
1402 1403
				bank->base + bank->regs->wkup_en);
	__raw_writel(bank->context.ctrl, bank->base + bank->regs->ctrl);
1404
	__raw_writel(bank->context.leveldetect0,
1405
				bank->base + bank->regs->leveldetect0);
1406
	__raw_writel(bank->context.leveldetect1,
1407
				bank->base + bank->regs->leveldetect1);
1408
	__raw_writel(bank->context.risingdetect,
1409
				bank->base + bank->regs->risingdetect);
1410
	__raw_writel(bank->context.fallingdetect,
1411
				bank->base + bank->regs->fallingdetect);
1412 1413 1414 1415 1416 1417
	if (bank->regs->set_dataout && bank->regs->clr_dataout)
		__raw_writel(bank->context.dataout,
				bank->base + bank->regs->set_dataout);
	else
		__raw_writel(bank->context.dataout,
				bank->base + bank->regs->dataout);
1418 1419
	__raw_writel(bank->context.oe, bank->base + bank->regs->direction);

1420 1421 1422 1423 1424 1425
	if (bank->dbck_enable_mask) {
		__raw_writel(bank->context.debounce, bank->base +
					bank->regs->debounce);
		__raw_writel(bank->context.debounce_en,
					bank->base + bank->regs->debounce_en);
	}
1426 1427 1428 1429 1430

	__raw_writel(bank->context.irqenable1,
				bank->base + bank->regs->irqenable);
	__raw_writel(bank->context.irqenable2,
				bank->base + bank->regs->irqenable2);
1431
}
1432
#endif /* CONFIG_PM_RUNTIME */
1433 1434 1435
#else
#define omap_gpio_suspend NULL
#define omap_gpio_resume NULL
1436 1437
#define omap_gpio_runtime_suspend NULL
#define omap_gpio_runtime_resume NULL
1438 1439
#endif

1440 1441
static const struct dev_pm_ops gpio_pm_ops = {
	SET_SYSTEM_SLEEP_PM_OPS(omap_gpio_suspend, omap_gpio_resume)
1442 1443
	SET_RUNTIME_PM_OPS(omap_gpio_runtime_suspend, omap_gpio_runtime_resume,
									NULL)
1444 1445
};

1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528
#if defined(CONFIG_OF)
static struct omap_gpio_reg_offs omap2_gpio_regs = {
	.revision =		OMAP24XX_GPIO_REVISION,
	.direction =		OMAP24XX_GPIO_OE,
	.datain =		OMAP24XX_GPIO_DATAIN,
	.dataout =		OMAP24XX_GPIO_DATAOUT,
	.set_dataout =		OMAP24XX_GPIO_SETDATAOUT,
	.clr_dataout =		OMAP24XX_GPIO_CLEARDATAOUT,
	.irqstatus =		OMAP24XX_GPIO_IRQSTATUS1,
	.irqstatus2 =		OMAP24XX_GPIO_IRQSTATUS2,
	.irqenable =		OMAP24XX_GPIO_IRQENABLE1,
	.irqenable2 =		OMAP24XX_GPIO_IRQENABLE2,
	.set_irqenable =	OMAP24XX_GPIO_SETIRQENABLE1,
	.clr_irqenable =	OMAP24XX_GPIO_CLEARIRQENABLE1,
	.debounce =		OMAP24XX_GPIO_DEBOUNCE_VAL,
	.debounce_en =		OMAP24XX_GPIO_DEBOUNCE_EN,
	.ctrl =			OMAP24XX_GPIO_CTRL,
	.wkup_en =		OMAP24XX_GPIO_WAKE_EN,
	.leveldetect0 =		OMAP24XX_GPIO_LEVELDETECT0,
	.leveldetect1 =		OMAP24XX_GPIO_LEVELDETECT1,
	.risingdetect =		OMAP24XX_GPIO_RISINGDETECT,
	.fallingdetect =	OMAP24XX_GPIO_FALLINGDETECT,
};

static struct omap_gpio_reg_offs omap4_gpio_regs = {
	.revision =		OMAP4_GPIO_REVISION,
	.direction =		OMAP4_GPIO_OE,
	.datain =		OMAP4_GPIO_DATAIN,
	.dataout =		OMAP4_GPIO_DATAOUT,
	.set_dataout =		OMAP4_GPIO_SETDATAOUT,
	.clr_dataout =		OMAP4_GPIO_CLEARDATAOUT,
	.irqstatus =		OMAP4_GPIO_IRQSTATUS0,
	.irqstatus2 =		OMAP4_GPIO_IRQSTATUS1,
	.irqenable =		OMAP4_GPIO_IRQSTATUSSET0,
	.irqenable2 =		OMAP4_GPIO_IRQSTATUSSET1,
	.set_irqenable =	OMAP4_GPIO_IRQSTATUSSET0,
	.clr_irqenable =	OMAP4_GPIO_IRQSTATUSCLR0,
	.debounce =		OMAP4_GPIO_DEBOUNCINGTIME,
	.debounce_en =		OMAP4_GPIO_DEBOUNCENABLE,
	.ctrl =			OMAP4_GPIO_CTRL,
	.wkup_en =		OMAP4_GPIO_IRQWAKEN0,
	.leveldetect0 =		OMAP4_GPIO_LEVELDETECT0,
	.leveldetect1 =		OMAP4_GPIO_LEVELDETECT1,
	.risingdetect =		OMAP4_GPIO_RISINGDETECT,
	.fallingdetect =	OMAP4_GPIO_FALLINGDETECT,
};

static struct omap_gpio_platform_data omap2_pdata = {
	.regs = &omap2_gpio_regs,
	.bank_width = 32,
	.dbck_flag = false,
};

static struct omap_gpio_platform_data omap3_pdata = {
	.regs = &omap2_gpio_regs,
	.bank_width = 32,
	.dbck_flag = true,
};

static struct omap_gpio_platform_data omap4_pdata = {
	.regs = &omap4_gpio_regs,
	.bank_width = 32,
	.dbck_flag = true,
};

static const struct of_device_id omap_gpio_match[] = {
	{
		.compatible = "ti,omap4-gpio",
		.data = &omap4_pdata,
	},
	{
		.compatible = "ti,omap3-gpio",
		.data = &omap3_pdata,
	},
	{
		.compatible = "ti,omap2-gpio",
		.data = &omap2_pdata,
	},
	{ },
};
MODULE_DEVICE_TABLE(of, omap_gpio_match);
#endif

1529 1530 1531 1532
static struct platform_driver omap_gpio_driver = {
	.probe		= omap_gpio_probe,
	.driver		= {
		.name	= "omap_gpio",
1533
		.pm	= &gpio_pm_ops,
1534
		.of_match_table = of_match_ptr(omap_gpio_match),
1535 1536 1537
	},
};

1538
/*
1539 1540 1541
 * gpio driver register needs to be done before
 * machine_init functions access gpio APIs.
 * Hence omap_gpio_drv_reg() is a postcore_initcall.
1542
 */
1543
static int __init omap_gpio_drv_reg(void)
1544
{
1545
	return platform_driver_register(&omap_gpio_driver);
1546
}
1547
postcore_initcall(omap_gpio_drv_reg);