gpio-omap.c 39.9 KB
Newer Older
1 2 3
/*
 * Support functions for OMAP GPIO
 *
4
 * Copyright (C) 2003-2005 Nokia Corporation
5
 * Written by Juha Yrjölä <juha.yrjola@nokia.com>
6
 *
7 8 9
 * Copyright (C) 2009 Texas Instruments
 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
 *
10 11 12 13 14 15 16 17
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
18
#include <linux/syscore_ops.h>
19
#include <linux/err.h>
20
#include <linux/clk.h>
21
#include <linux/io.h>
22
#include <linux/device.h>
23
#include <linux/pm_runtime.h>
24
#include <linux/pm.h>
25 26 27
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/irqdomain.h>
28

29
#include <mach/hardware.h>
30
#include <asm/irq.h>
31
#include <mach/irqs.h>
32
#include <asm/gpio.h>
33 34
#include <asm/mach/irq.h>

35 36
#define OFF_MODE	1

37 38
static LIST_HEAD(omap_gpio_list);

39 40 41 42 43 44 45 46 47 48 49
struct gpio_regs {
	u32 irqenable1;
	u32 irqenable2;
	u32 wake_en;
	u32 ctrl;
	u32 oe;
	u32 leveldetect0;
	u32 leveldetect1;
	u32 risingdetect;
	u32 fallingdetect;
	u32 dataout;
50 51
	u32 debounce;
	u32 debounce_en;
52 53
};

54
struct gpio_bank {
55
	struct list_head node;
56
	void __iomem *base;
57
	u16 irq;
58 59
	int irq_base;
	struct irq_domain *domain;
60 61
	u32 suspend_wakeup;
	u32 saved_wakeup;
62 63
	u32 non_wakeup_gpios;
	u32 enabled_non_wakeup_gpios;
64
	struct gpio_regs context;
65
	u32 saved_datain;
66
	u32 level_mask;
67
	u32 toggle_mask;
68
	spinlock_t lock;
D
David Brownell 已提交
69
	struct gpio_chip chip;
70
	struct clk *dbck;
C
Charulatha V 已提交
71
	u32 mod_usage;
72
	u32 dbck_enable_mask;
73
	bool dbck_enabled;
74
	struct device *dev;
75
	bool is_mpuio;
76
	bool dbck_flag;
77
	bool loses_context;
78
	int stride;
79
	u32 width;
80
	int context_loss_count;
81 82
	int power_mode;
	bool workaround_enabled;
83 84

	void (*set_dataout)(struct gpio_bank *bank, int gpio, int enable);
85
	int (*get_context_loss_count)(struct device *dev);
86 87

	struct omap_gpio_reg_offs *regs;
88 89
};

90 91
#define GPIO_INDEX(bank, gpio) (gpio % bank->width)
#define GPIO_BIT(bank, gpio) (1 << GPIO_INDEX(bank, gpio))
92
#define GPIO_MOD_CTRL_BIT	BIT(0)
93

94 95 96 97 98
static int irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq)
{
	return gpio_irq - bank->irq_base + bank->chip.base;
}

99 100
static void _set_gpio_direction(struct gpio_bank *bank, int gpio, int is_input)
{
101
	void __iomem *reg = bank->base;
102 103
	u32 l;

104
	reg += bank->regs->direction;
105 106 107 108 109 110
	l = __raw_readl(reg);
	if (is_input)
		l |= 1 << gpio;
	else
		l &= ~(1 << gpio);
	__raw_writel(l, reg);
111
	bank->context.oe = l;
112 113
}

114 115 116

/* set data out value using dedicate set/clear register */
static void _set_gpio_dataout_reg(struct gpio_bank *bank, int gpio, int enable)
117
{
118
	void __iomem *reg = bank->base;
119
	u32 l = GPIO_BIT(bank, gpio);
120

121
	if (enable) {
122
		reg += bank->regs->set_dataout;
123 124
		bank->context.dataout |= l;
	} else {
125
		reg += bank->regs->clr_dataout;
126 127
		bank->context.dataout &= ~l;
	}
128 129 130 131

	__raw_writel(l, reg);
}

132 133
/* set data out value using mask register */
static void _set_gpio_dataout_mask(struct gpio_bank *bank, int gpio, int enable)
134
{
135 136 137
	void __iomem *reg = bank->base + bank->regs->dataout;
	u32 gpio_bit = GPIO_BIT(bank, gpio);
	u32 l;
138

139 140 141 142 143
	l = __raw_readl(reg);
	if (enable)
		l |= gpio_bit;
	else
		l &= ~gpio_bit;
144
	__raw_writel(l, reg);
145
	bank->context.dataout = l;
146 147
}

148
static int _get_gpio_datain(struct gpio_bank *bank, int offset)
149
{
150
	void __iomem *reg = bank->base + bank->regs->datain;
151

152
	return (__raw_readl(reg) & (1 << offset)) != 0;
153
}
154

155
static int _get_gpio_dataout(struct gpio_bank *bank, int offset)
156
{
157
	void __iomem *reg = bank->base + bank->regs->dataout;
158

159
	return (__raw_readl(reg) & (1 << offset)) != 0;
160 161
}

162 163 164 165
static inline void _gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set)
{
	int l = __raw_readl(base + reg);

166
	if (set)
167 168 169 170 171 172
		l |= mask;
	else
		l &= ~mask;

	__raw_writel(l, base + reg);
}
173

174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189
static inline void _gpio_dbck_enable(struct gpio_bank *bank)
{
	if (bank->dbck_enable_mask && !bank->dbck_enabled) {
		clk_enable(bank->dbck);
		bank->dbck_enabled = true;
	}
}

static inline void _gpio_dbck_disable(struct gpio_bank *bank)
{
	if (bank->dbck_enable_mask && bank->dbck_enabled) {
		clk_disable(bank->dbck);
		bank->dbck_enabled = false;
	}
}

190 191 192 193 194 195 196 197 198 199 200 201
/**
 * _set_gpio_debounce - low level gpio debounce time
 * @bank: the gpio bank we're acting upon
 * @gpio: the gpio number on this @gpio
 * @debounce: debounce time to use
 *
 * OMAP's debounce time is in 31us steps so we need
 * to convert and round up to the closest unit.
 */
static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio,
		unsigned debounce)
{
202
	void __iomem		*reg;
203 204 205
	u32			val;
	u32			l;

206 207 208
	if (!bank->dbck_flag)
		return;

209 210 211 212 213 214 215
	if (debounce < 32)
		debounce = 0x01;
	else if (debounce > 7936)
		debounce = 0xff;
	else
		debounce = (debounce / 0x1f) - 1;

216
	l = GPIO_BIT(bank, gpio);
217

218
	clk_enable(bank->dbck);
219
	reg = bank->base + bank->regs->debounce;
220 221
	__raw_writel(debounce, reg);

222
	reg = bank->base + bank->regs->debounce_en;
223 224
	val = __raw_readl(reg);

225
	if (debounce)
226
		val |= l;
227
	else
228
		val &= ~l;
229
	bank->dbck_enable_mask = val;
230 231

	__raw_writel(val, reg);
232 233 234 235 236 237 238 239 240 241
	clk_disable(bank->dbck);
	/*
	 * Enable debounce clock per module.
	 * This call is mandatory because in omap_gpio_request() when
	 * *_runtime_get_sync() is called,  _gpio_dbck_enable() within
	 * runtime callbck fails to turn on dbck because dbck_enable_mask
	 * used within _gpio_dbck_enable() is still not initialized at
	 * that point. Therefore we have to enable dbck here.
	 */
	_gpio_dbck_enable(bank);
242 243 244 245
	if (bank->dbck_enable_mask) {
		bank->context.debounce = debounce;
		bank->context.debounce_en = val;
	}
246 247
}

248
static inline void set_gpio_trigger(struct gpio_bank *bank, int gpio,
249
						unsigned trigger)
250
{
251
	void __iomem *base = bank->base;
252 253
	u32 gpio_bit = 1 << gpio;

254 255 256 257 258 259 260 261 262
	_gpio_rmw(base, bank->regs->leveldetect0, gpio_bit,
		  trigger & IRQ_TYPE_LEVEL_LOW);
	_gpio_rmw(base, bank->regs->leveldetect1, gpio_bit,
		  trigger & IRQ_TYPE_LEVEL_HIGH);
	_gpio_rmw(base, bank->regs->risingdetect, gpio_bit,
		  trigger & IRQ_TYPE_EDGE_RISING);
	_gpio_rmw(base, bank->regs->fallingdetect, gpio_bit,
		  trigger & IRQ_TYPE_EDGE_FALLING);

263 264 265 266 267 268 269 270 271 272
	bank->context.leveldetect0 =
			__raw_readl(bank->base + bank->regs->leveldetect0);
	bank->context.leveldetect1 =
			__raw_readl(bank->base + bank->regs->leveldetect1);
	bank->context.risingdetect =
			__raw_readl(bank->base + bank->regs->risingdetect);
	bank->context.fallingdetect =
			__raw_readl(bank->base + bank->regs->fallingdetect);

	if (likely(!(bank->non_wakeup_gpios & gpio_bit))) {
273
		_gpio_rmw(base, bank->regs->wkup_en, gpio_bit, trigger != 0);
274 275 276
		bank->context.wake_en =
			__raw_readl(bank->base + bank->regs->wkup_en);
	}
277

278
	/* This part needs to be executed always for OMAP{34xx, 44xx} */
279 280 281 282 283 284 285
	if (!bank->regs->irqctrl) {
		/* On omap24xx proceed only when valid GPIO bit is set */
		if (bank->non_wakeup_gpios) {
			if (!(bank->non_wakeup_gpios & gpio_bit))
				goto exit;
		}

286 287 288 289 290 291 292
		/*
		 * Log the edge gpio and manually trigger the IRQ
		 * after resume if the input level changes
		 * to avoid irq lost during PER RET/OFF mode
		 * Applies for omap2 non-wakeup gpio and all omap3 gpios
		 */
		if (trigger & IRQ_TYPE_EDGE_BOTH)
293 294 295 296
			bank->enabled_non_wakeup_gpios |= gpio_bit;
		else
			bank->enabled_non_wakeup_gpios &= ~gpio_bit;
	}
297

298
exit:
299 300 301
	bank->level_mask =
		__raw_readl(bank->base + bank->regs->leveldetect0) |
		__raw_readl(bank->base + bank->regs->leveldetect1);
302 303
}

304
#ifdef CONFIG_ARCH_OMAP1
305 306 307 308 309 310 311 312 313
/*
 * This only applies to chips that can't do both rising and falling edge
 * detection at once.  For all other chips, this function is a noop.
 */
static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio)
{
	void __iomem *reg = bank->base;
	u32 l = 0;

314
	if (!bank->regs->irqctrl)
315
		return;
316 317

	reg += bank->regs->irqctrl;
318 319 320 321 322 323 324 325 326

	l = __raw_readl(reg);
	if ((l >> gpio) & 1)
		l &= ~(1 << gpio);
	else
		l |= 1 << gpio;

	__raw_writel(l, reg);
}
327 328
#else
static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) {}
329
#endif
330

331 332
static int _set_gpio_triggering(struct gpio_bank *bank, int gpio,
							unsigned trigger)
333 334
{
	void __iomem *reg = bank->base;
335
	void __iomem *base = bank->base;
336
	u32 l = 0;
337

338 339 340 341 342
	if (bank->regs->leveldetect0 && bank->regs->wkup_en) {
		set_gpio_trigger(bank, gpio, trigger);
	} else if (bank->regs->irqctrl) {
		reg += bank->regs->irqctrl;

343
		l = __raw_readl(reg);
344
		if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
345
			bank->toggle_mask |= 1 << gpio;
346
		if (trigger & IRQ_TYPE_EDGE_RISING)
347
			l |= 1 << gpio;
348
		else if (trigger & IRQ_TYPE_EDGE_FALLING)
349
			l &= ~(1 << gpio);
350
		else
351 352 353 354
			return -EINVAL;

		__raw_writel(l, reg);
	} else if (bank->regs->edgectrl1) {
355
		if (gpio & 0x08)
356
			reg += bank->regs->edgectrl2;
357
		else
358 359
			reg += bank->regs->edgectrl1;

360 361 362
		gpio &= 0x07;
		l = __raw_readl(reg);
		l &= ~(3 << (gpio << 1));
363
		if (trigger & IRQ_TYPE_EDGE_RISING)
364
			l |= 2 << (gpio << 1);
365
		if (trigger & IRQ_TYPE_EDGE_FALLING)
366
			l |= 1 << (gpio << 1);
367 368 369

		/* Enable wake-up during idle for dynamic tick */
		_gpio_rmw(base, bank->regs->wkup_en, 1 << gpio, trigger);
370 371
		bank->context.wake_en =
			__raw_readl(bank->base + bank->regs->wkup_en);
372
		__raw_writel(l, reg);
373
	}
374
	return 0;
375 376
}

377
static int gpio_irq_type(struct irq_data *d, unsigned type)
378
{
379
	struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
380 381
	unsigned gpio;
	int retval;
D
David Brownell 已提交
382
	unsigned long flags;
383

384 385
	if (!cpu_class_is_omap2() && d->irq > IH_MPUIO_BASE)
		gpio = OMAP_MPUIO(d->irq - IH_MPUIO_BASE);
386
	else
387
		gpio = irq_to_gpio(bank, d->irq);
388

389
	if (type & ~IRQ_TYPE_SENSE_MASK)
390
		return -EINVAL;
391

392 393
	if (!bank->regs->leveldetect0 &&
		(type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH)))
394 395
		return -EINVAL;

D
David Brownell 已提交
396
	spin_lock_irqsave(&bank->lock, flags);
397
	retval = _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), type);
D
David Brownell 已提交
398
	spin_unlock_irqrestore(&bank->lock, flags);
399 400

	if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
T
Thomas Gleixner 已提交
401
		__irq_set_handler_locked(d->irq, handle_level_irq);
402
	else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
T
Thomas Gleixner 已提交
403
		__irq_set_handler_locked(d->irq, handle_edge_irq);
404

405
	return retval;
406 407 408 409
}

static void _clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
{
410
	void __iomem *reg = bank->base;
411

412
	reg += bank->regs->irqstatus;
413
	__raw_writel(gpio_mask, reg);
414 415

	/* Workaround for clearing DSP GPIO interrupts to allow retention */
416 417
	if (bank->regs->irqstatus2) {
		reg = bank->base + bank->regs->irqstatus2;
418
		__raw_writel(gpio_mask, reg);
419
	}
420 421 422

	/* Flush posted write for the irq status to avoid spurious interrupts */
	__raw_readl(reg);
423 424 425 426
}

static inline void _clear_gpio_irqstatus(struct gpio_bank *bank, int gpio)
{
427
	_clear_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
428 429
}

430 431 432
static u32 _get_gpio_irqbank_mask(struct gpio_bank *bank)
{
	void __iomem *reg = bank->base;
433
	u32 l;
434
	u32 mask = (1 << bank->width) - 1;
435

436
	reg += bank->regs->irqenable;
437
	l = __raw_readl(reg);
438
	if (bank->regs->irqenable_inv)
439 440 441
		l = ~l;
	l &= mask;
	return l;
442 443
}

444
static void _enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
445
{
446
	void __iomem *reg = bank->base;
447 448
	u32 l;

449 450 451
	if (bank->regs->set_irqenable) {
		reg += bank->regs->set_irqenable;
		l = gpio_mask;
452
		bank->context.irqenable1 |= gpio_mask;
453 454
	} else {
		reg += bank->regs->irqenable;
455
		l = __raw_readl(reg);
456 457
		if (bank->regs->irqenable_inv)
			l &= ~gpio_mask;
458 459
		else
			l |= gpio_mask;
460
		bank->context.irqenable1 = l;
461 462 463 464 465 466 467 468 469 470 471 472
	}

	__raw_writel(l, reg);
}

static void _disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
{
	void __iomem *reg = bank->base;
	u32 l;

	if (bank->regs->clr_irqenable) {
		reg += bank->regs->clr_irqenable;
473
		l = gpio_mask;
474
		bank->context.irqenable1 &= ~gpio_mask;
475 476
	} else {
		reg += bank->regs->irqenable;
477
		l = __raw_readl(reg);
478
		if (bank->regs->irqenable_inv)
479
			l |= gpio_mask;
480
		else
481
			l &= ~gpio_mask;
482
		bank->context.irqenable1 = l;
483
	}
484

485 486 487 488 489
	__raw_writel(l, reg);
}

static inline void _set_gpio_irqenable(struct gpio_bank *bank, int gpio, int enable)
{
490 491 492 493
	if (enable)
		_enable_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
	else
		_disable_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
494 495
}

496 497 498 499 500 501 502 503 504 505
/*
 * Note that ENAWAKEUP needs to be enabled in GPIO_SYSCONFIG register.
 * 1510 does not seem to have a wake-up register. If JTAG is connected
 * to the target, system will wake up always on GPIO events. While
 * system is running all registered GPIO interrupts need to have wake-up
 * enabled. When system is suspended, only selected GPIO interrupts need
 * to have wake-up enabled.
 */
static int _set_gpio_wakeup(struct gpio_bank *bank, int gpio, int enable)
{
506 507
	u32 gpio_bit = GPIO_BIT(bank, gpio);
	unsigned long flags;
D
David Brownell 已提交
508

509
	if (bank->non_wakeup_gpios & gpio_bit) {
510
		dev_err(bank->dev,
511
			"Unable to modify wakeup on non-wakeup GPIO%d\n", gpio);
512 513
		return -EINVAL;
	}
514 515 516 517 518 519 520

	spin_lock_irqsave(&bank->lock, flags);
	if (enable)
		bank->suspend_wakeup |= gpio_bit;
	else
		bank->suspend_wakeup &= ~gpio_bit;

521
	__raw_writel(bank->suspend_wakeup, bank->base + bank->regs->wkup_en);
522 523 524
	spin_unlock_irqrestore(&bank->lock, flags);

	return 0;
525 526
}

527 528
static void _reset_gpio(struct gpio_bank *bank, int gpio)
{
529
	_set_gpio_direction(bank, GPIO_INDEX(bank, gpio), 1);
530 531
	_set_gpio_irqenable(bank, gpio, 0);
	_clear_gpio_irqstatus(bank, gpio);
532
	_set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE);
533 534
}

535
/* Use disable_irq_wake() and enable_irq_wake() functions from drivers */
536
static int gpio_wake_enable(struct irq_data *d, unsigned int enable)
537
{
538 539
	struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
	unsigned int gpio = irq_to_gpio(bank, d->irq);
540

541
	return _set_gpio_wakeup(bank, gpio, enable);
542 543
}

544
static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
545
{
546
	struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
D
David Brownell 已提交
547
	unsigned long flags;
D
David Brownell 已提交
548

549 550 551 552 553 554
	/*
	 * If this is the first gpio_request for the bank,
	 * enable the bank module.
	 */
	if (!bank->mod_usage)
		pm_runtime_get_sync(bank->dev);
555

556
	spin_lock_irqsave(&bank->lock, flags);
557 558 559
	/* Set trigger to none. You need to enable the desired trigger with
	 * request_irq() or set_irq_type().
	 */
560
	_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
561

562 563
	if (bank->regs->pinctrl) {
		void __iomem *reg = bank->base + bank->regs->pinctrl;
564

565
		/* Claim the pin for MPU */
566
		__raw_writel(__raw_readl(reg) | (1 << offset), reg);
567
	}
568

569 570 571 572 573 574 575 576
	if (bank->regs->ctrl && !bank->mod_usage) {
		void __iomem *reg = bank->base + bank->regs->ctrl;
		u32 ctrl;

		ctrl = __raw_readl(reg);
		/* Module is enabled, clocks are not gated */
		ctrl &= ~GPIO_MOD_CTRL_BIT;
		__raw_writel(ctrl, reg);
577
		bank->context.ctrl = ctrl;
C
Charulatha V 已提交
578
	}
579 580 581

	bank->mod_usage |= 1 << offset;

D
David Brownell 已提交
582
	spin_unlock_irqrestore(&bank->lock, flags);
583 584 585 586

	return 0;
}

587
static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
588
{
589
	struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
590
	void __iomem *base = bank->base;
D
David Brownell 已提交
591
	unsigned long flags;
592

D
David Brownell 已提交
593
	spin_lock_irqsave(&bank->lock, flags);
594

595
	if (bank->regs->wkup_en) {
596
		/* Disable wake-up during idle for dynamic tick */
597
		_gpio_rmw(base, bank->regs->wkup_en, 1 << offset, 0);
598 599 600
		bank->context.wake_en =
			__raw_readl(bank->base + bank->regs->wkup_en);
	}
601

602 603 604 605 606 607 608 609 610 611
	bank->mod_usage &= ~(1 << offset);

	if (bank->regs->ctrl && !bank->mod_usage) {
		void __iomem *reg = bank->base + bank->regs->ctrl;
		u32 ctrl;

		ctrl = __raw_readl(reg);
		/* Module is disabled, clocks are gated */
		ctrl |= GPIO_MOD_CTRL_BIT;
		__raw_writel(ctrl, reg);
612
		bank->context.ctrl = ctrl;
C
Charulatha V 已提交
613
	}
614

615
	_reset_gpio(bank, bank->chip.base + offset);
D
David Brownell 已提交
616
	spin_unlock_irqrestore(&bank->lock, flags);
617 618 619 620 621 622 623

	/*
	 * If this is the last gpio to be freed in the bank,
	 * disable the bank module.
	 */
	if (!bank->mod_usage)
		pm_runtime_put(bank->dev);
624 625 626 627 628 629 630 631 632 633 634
}

/*
 * We need to unmask the GPIO bank interrupt as soon as possible to
 * avoid missing GPIO interrupts for other lines in the bank.
 * Then we need to mask-read-clear-unmask the triggered GPIO lines
 * in the bank to avoid missing nested interrupts for a GPIO line.
 * If we wait to unmask individual GPIO lines in the bank after the
 * line's interrupt handler has been run, we may miss some nested
 * interrupts.
 */
635
static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
636
{
637
	void __iomem *isr_reg = NULL;
638
	u32 isr;
639
	unsigned int gpio_irq, gpio_index;
640
	struct gpio_bank *bank;
641 642
	u32 retrigger = 0;
	int unmasked = 0;
643
	struct irq_chip *chip = irq_desc_get_chip(desc);
644

645
	chained_irq_enter(chip, desc);
646

T
Thomas Gleixner 已提交
647
	bank = irq_get_handler_data(irq);
648
	isr_reg = bank->base + bank->regs->irqstatus;
649
	pm_runtime_get_sync(bank->dev);
650 651 652 653

	if (WARN_ON(!isr_reg))
		goto exit;

654
	while(1) {
655
		u32 isr_saved, level_mask = 0;
656
		u32 enabled;
657

658 659
		enabled = _get_gpio_irqbank_mask(bank);
		isr_saved = isr = __raw_readl(isr_reg) & enabled;
660

661
		if (bank->level_mask)
662
			level_mask = bank->level_mask & enabled;
663 664 665 666

		/* clear edge sensitive interrupts before handler(s) are
		called so that we don't miss any interrupt occurred while
		executing them */
667
		_disable_gpio_irqbank(bank, isr_saved & ~level_mask);
668
		_clear_gpio_irqbank(bank, isr_saved & ~level_mask);
669
		_enable_gpio_irqbank(bank, isr_saved & ~level_mask);
670 671 672

		/* if there is only edge sensitive GPIO pin interrupts
		configured, we could unmask GPIO bank interrupt immediately */
673 674
		if (!level_mask && !unmasked) {
			unmasked = 1;
675
			chained_irq_exit(chip, desc);
676
		}
677

678 679
		isr |= retrigger;
		retrigger = 0;
680 681 682
		if (!isr)
			break;

683
		gpio_irq = bank->irq_base;
684
		for (; isr != 0; isr >>= 1, gpio_irq++) {
685
			int gpio = irq_to_gpio(bank, gpio_irq);
686

687 688
			if (!(isr & 1))
				continue;
689

690 691
			gpio_index = GPIO_INDEX(bank, gpio);

692 693 694 695 696 697 698 699 700 701
			/*
			 * Some chips can't respond to both rising and falling
			 * at the same time.  If this irq was requested with
			 * both flags, we need to flip the ICR data for the IRQ
			 * to respond to the IRQ for the opposite direction.
			 * This will be indicated in the bank toggle_mask.
			 */
			if (bank->toggle_mask & (1 << gpio_index))
				_toggle_gpio_edge_triggering(bank, gpio_index);

702
			generic_handle_irq(gpio_irq);
703
		}
704
	}
705 706 707 708
	/* if bank has any level sensitive GPIO pin interrupt
	configured, we must unmask the bank interrupt only after
	handler(s) are executed in order to avoid spurious bank
	interrupt */
709
exit:
710
	if (!unmasked)
711
		chained_irq_exit(chip, desc);
712
	pm_runtime_put(bank->dev);
713 714
}

715
static void gpio_irq_shutdown(struct irq_data *d)
716
{
717
	struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
718
	unsigned int gpio = irq_to_gpio(bank, d->irq);
719
	unsigned long flags;
720

721
	spin_lock_irqsave(&bank->lock, flags);
722
	_reset_gpio(bank, gpio);
723
	spin_unlock_irqrestore(&bank->lock, flags);
724 725
}

726
static void gpio_ack_irq(struct irq_data *d)
727
{
728
	struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
729
	unsigned int gpio = irq_to_gpio(bank, d->irq);
730 731 732 733

	_clear_gpio_irqstatus(bank, gpio);
}

734
static void gpio_mask_irq(struct irq_data *d)
735
{
736
	struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
737
	unsigned int gpio = irq_to_gpio(bank, d->irq);
738
	unsigned long flags;
739

740
	spin_lock_irqsave(&bank->lock, flags);
741
	_set_gpio_irqenable(bank, gpio, 0);
742
	_set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE);
743
	spin_unlock_irqrestore(&bank->lock, flags);
744 745
}

746
static void gpio_unmask_irq(struct irq_data *d)
747
{
748
	struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
749
	unsigned int gpio = irq_to_gpio(bank, d->irq);
750
	unsigned int irq_mask = GPIO_BIT(bank, gpio);
751
	u32 trigger = irqd_get_trigger_type(d);
752
	unsigned long flags;
753

754
	spin_lock_irqsave(&bank->lock, flags);
755
	if (trigger)
756
		_set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), trigger);
757 758 759 760 761 762 763

	/* For level-triggered GPIOs, the clearing must be done after
	 * the HW source is cleared, thus after the handler has run */
	if (bank->level_mask & irq_mask) {
		_set_gpio_irqenable(bank, gpio, 0);
		_clear_gpio_irqstatus(bank, gpio);
	}
764

K
Kevin Hilman 已提交
765
	_set_gpio_irqenable(bank, gpio, 1);
766
	spin_unlock_irqrestore(&bank->lock, flags);
767 768
}

769 770
static struct irq_chip gpio_irq_chip = {
	.name		= "GPIO",
771 772 773 774 775 776
	.irq_shutdown	= gpio_irq_shutdown,
	.irq_ack	= gpio_ack_irq,
	.irq_mask	= gpio_mask_irq,
	.irq_unmask	= gpio_unmask_irq,
	.irq_set_type	= gpio_irq_type,
	.irq_set_wake	= gpio_wake_enable,
777 778 779 780
};

/*---------------------------------------------------------------------*/

781
static int omap_mpuio_suspend_noirq(struct device *dev)
D
David Brownell 已提交
782
{
783
	struct platform_device *pdev = to_platform_device(dev);
D
David Brownell 已提交
784
	struct gpio_bank	*bank = platform_get_drvdata(pdev);
785 786
	void __iomem		*mask_reg = bank->base +
					OMAP_MPUIO_GPIO_MASKIT / bank->stride;
D
David Brownell 已提交
787
	unsigned long		flags;
D
David Brownell 已提交
788

D
David Brownell 已提交
789
	spin_lock_irqsave(&bank->lock, flags);
D
David Brownell 已提交
790 791
	bank->saved_wakeup = __raw_readl(mask_reg);
	__raw_writel(0xffff & ~bank->suspend_wakeup, mask_reg);
D
David Brownell 已提交
792
	spin_unlock_irqrestore(&bank->lock, flags);
D
David Brownell 已提交
793 794 795 796

	return 0;
}

797
static int omap_mpuio_resume_noirq(struct device *dev)
D
David Brownell 已提交
798
{
799
	struct platform_device *pdev = to_platform_device(dev);
D
David Brownell 已提交
800
	struct gpio_bank	*bank = platform_get_drvdata(pdev);
801 802
	void __iomem		*mask_reg = bank->base +
					OMAP_MPUIO_GPIO_MASKIT / bank->stride;
D
David Brownell 已提交
803
	unsigned long		flags;
D
David Brownell 已提交
804

D
David Brownell 已提交
805
	spin_lock_irqsave(&bank->lock, flags);
D
David Brownell 已提交
806
	__raw_writel(bank->saved_wakeup, mask_reg);
D
David Brownell 已提交
807
	spin_unlock_irqrestore(&bank->lock, flags);
D
David Brownell 已提交
808 809 810 811

	return 0;
}

812
static const struct dev_pm_ops omap_mpuio_dev_pm_ops = {
813 814 815 816
	.suspend_noirq = omap_mpuio_suspend_noirq,
	.resume_noirq = omap_mpuio_resume_noirq,
};

817
/* use platform_driver for this. */
D
David Brownell 已提交
818 819 820
static struct platform_driver omap_mpuio_driver = {
	.driver		= {
		.name	= "mpuio",
821
		.pm	= &omap_mpuio_dev_pm_ops,
D
David Brownell 已提交
822 823 824 825 826 827 828 829 830 831 832 833
	},
};

static struct platform_device omap_mpuio_device = {
	.name		= "mpuio",
	.id		= -1,
	.dev = {
		.driver = &omap_mpuio_driver.driver,
	}
	/* could list the /proc/iomem resources */
};

834
static inline void mpuio_init(struct gpio_bank *bank)
D
David Brownell 已提交
835
{
836
	platform_set_drvdata(&omap_mpuio_device, bank);
837

D
David Brownell 已提交
838 839 840 841
	if (platform_driver_register(&omap_mpuio_driver) == 0)
		(void) platform_device_register(&omap_mpuio_device);
}

842
/*---------------------------------------------------------------------*/
843

D
David Brownell 已提交
844 845 846 847 848 849 850 851 852 853 854 855
static int gpio_input(struct gpio_chip *chip, unsigned offset)
{
	struct gpio_bank *bank;
	unsigned long flags;

	bank = container_of(chip, struct gpio_bank, chip);
	spin_lock_irqsave(&bank->lock, flags);
	_set_gpio_direction(bank, offset, 1);
	spin_unlock_irqrestore(&bank->lock, flags);
	return 0;
}

856 857
static int gpio_is_input(struct gpio_bank *bank, int mask)
{
858
	void __iomem *reg = bank->base + bank->regs->direction;
859 860 861 862

	return __raw_readl(reg) & mask;
}

D
David Brownell 已提交
863 864
static int gpio_get(struct gpio_chip *chip, unsigned offset)
{
865 866 867
	struct gpio_bank *bank;
	u32 mask;

C
Charulatha V 已提交
868
	bank = container_of(chip, struct gpio_bank, chip);
869
	mask = (1 << offset);
870 871

	if (gpio_is_input(bank, mask))
872
		return _get_gpio_datain(bank, offset);
873
	else
874
		return _get_gpio_dataout(bank, offset);
D
David Brownell 已提交
875 876 877 878 879 880 881 882 883
}

static int gpio_output(struct gpio_chip *chip, unsigned offset, int value)
{
	struct gpio_bank *bank;
	unsigned long flags;

	bank = container_of(chip, struct gpio_bank, chip);
	spin_lock_irqsave(&bank->lock, flags);
884
	bank->set_dataout(bank, offset, value);
D
David Brownell 已提交
885 886 887 888 889
	_set_gpio_direction(bank, offset, 0);
	spin_unlock_irqrestore(&bank->lock, flags);
	return 0;
}

890 891 892 893 894 895 896
static int gpio_debounce(struct gpio_chip *chip, unsigned offset,
		unsigned debounce)
{
	struct gpio_bank *bank;
	unsigned long flags;

	bank = container_of(chip, struct gpio_bank, chip);
897 898 899 900 901 902 903

	if (!bank->dbck) {
		bank->dbck = clk_get(bank->dev, "dbclk");
		if (IS_ERR(bank->dbck))
			dev_err(bank->dev, "Could not get gpio dbck\n");
	}

904 905 906 907 908 909 910
	spin_lock_irqsave(&bank->lock, flags);
	_set_gpio_debounce(bank, offset, debounce);
	spin_unlock_irqrestore(&bank->lock, flags);

	return 0;
}

D
David Brownell 已提交
911 912 913 914 915 916 917
static void gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
	struct gpio_bank *bank;
	unsigned long flags;

	bank = container_of(chip, struct gpio_bank, chip);
	spin_lock_irqsave(&bank->lock, flags);
918
	bank->set_dataout(bank, offset, value);
D
David Brownell 已提交
919 920 921
	spin_unlock_irqrestore(&bank->lock, flags);
}

922 923 924 925 926
static int gpio_2irq(struct gpio_chip *chip, unsigned offset)
{
	struct gpio_bank *bank;

	bank = container_of(chip, struct gpio_bank, chip);
927
	return bank->irq_base + offset;
928 929
}

D
David Brownell 已提交
930 931
/*---------------------------------------------------------------------*/

932
static void __init omap_gpio_show_rev(struct gpio_bank *bank)
T
Tony Lindgren 已提交
933
{
934
	static bool called;
T
Tony Lindgren 已提交
935 936
	u32 rev;

937
	if (called || bank->regs->revision == USHRT_MAX)
T
Tony Lindgren 已提交
938 939
		return;

940 941
	rev = __raw_readw(bank->base + bank->regs->revision);
	pr_info("OMAP GPIO hardware version %d.%d\n",
T
Tony Lindgren 已提交
942
		(rev >> 4) & 0x0f, rev & 0x0f);
943 944

	called = true;
T
Tony Lindgren 已提交
945 946
}

947 948 949 950 951
/* This lock class tells lockdep that GPIO irqs are in a different
 * category than their parents, so it won't report false recursion.
 */
static struct lock_class_key gpio_lock_class;

952
static void omap_gpio_mod_init(struct gpio_bank *bank)
953
{
954 955
	void __iomem *base = bank->base;
	u32 l = 0xffffffff;
956

957 958 959
	if (bank->width == 16)
		l = 0xffff;

960
	if (bank->is_mpuio) {
961 962
		__raw_writel(l, bank->base + bank->regs->irqenable);
		return;
963
	}
964 965

	_gpio_rmw(base, bank->regs->irqenable, l, bank->regs->irqenable_inv);
966
	_gpio_rmw(base, bank->regs->irqstatus, l, !bank->regs->irqenable_inv);
967
	if (bank->regs->debounce_en)
968
		__raw_writel(0, base + bank->regs->debounce_en);
969

970 971
	/* Save OE default value (0xffffffff) in the context */
	bank->context.oe = __raw_readl(bank->base + bank->regs->direction);
972 973
	 /* Initialize interface clk ungated, module enabled */
	if (bank->regs->ctrl)
974
		__raw_writel(0, base + bank->regs->ctrl);
975 976
}

977
static __devinit void
978 979 980 981 982 983 984 985
omap_mpuio_alloc_gc(struct gpio_bank *bank, unsigned int irq_start,
		    unsigned int num)
{
	struct irq_chip_generic *gc;
	struct irq_chip_type *ct;

	gc = irq_alloc_generic_chip("MPUIO", 1, irq_start, bank->base,
				    handle_simple_irq);
986 987 988 989 990
	if (!gc) {
		dev_err(bank->dev, "Memory alloc failed for gc\n");
		return;
	}

991 992 993 994 995 996
	ct = gc->chip_types;

	/* NOTE: No ack required, reading IRQ status clears it. */
	ct->chip.irq_mask = irq_gc_mask_set_bit;
	ct->chip.irq_unmask = irq_gc_mask_clr_bit;
	ct->chip.irq_set_type = gpio_irq_type;
997 998

	if (bank->regs->wkup_en)
999 1000 1001 1002 1003 1004 1005
		ct->chip.irq_set_wake = gpio_wake_enable,

	ct->regs.mask = OMAP_MPUIO_GPIO_INT / bank->stride;
	irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
			       IRQ_NOREQUEST | IRQ_NOPROBE, 0);
}

1006
static void __devinit omap_gpio_chip_init(struct gpio_bank *bank)
1007
{
1008
	int j;
1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022
	static int gpio;

	/*
	 * REVISIT eventually switch from OMAP-specific gpio structs
	 * over to the generic ones
	 */
	bank->chip.request = omap_gpio_request;
	bank->chip.free = omap_gpio_free;
	bank->chip.direction_input = gpio_input;
	bank->chip.get = gpio_get;
	bank->chip.direction_output = gpio_output;
	bank->chip.set_debounce = gpio_debounce;
	bank->chip.set = gpio_set;
	bank->chip.to_irq = gpio_2irq;
1023
	if (bank->is_mpuio) {
1024
		bank->chip.label = "mpuio";
1025 1026
		if (bank->regs->wkup_en)
			bank->chip.dev = &omap_mpuio_device.dev;
1027 1028 1029 1030
		bank->chip.base = OMAP_MPUIO(0);
	} else {
		bank->chip.label = "gpio";
		bank->chip.base = gpio;
1031
		gpio += bank->width;
1032
	}
1033
	bank->chip.ngpio = bank->width;
1034 1035 1036

	gpiochip_add(&bank->chip);

1037
	for (j = bank->irq_base; j < bank->irq_base + bank->width; j++) {
1038
		irq_set_lockdep_class(j, &gpio_lock_class);
T
Thomas Gleixner 已提交
1039
		irq_set_chip_data(j, bank);
1040
		if (bank->is_mpuio) {
1041 1042
			omap_mpuio_alloc_gc(bank, j, bank->width);
		} else {
T
Thomas Gleixner 已提交
1043
			irq_set_chip(j, &gpio_irq_chip);
1044 1045 1046
			irq_set_handler(j, handle_simple_irq);
			set_irq_flags(j, IRQF_VALID);
		}
1047
	}
T
Thomas Gleixner 已提交
1048 1049
	irq_set_chained_handler(bank->irq, gpio_irq_handler);
	irq_set_handler_data(bank->irq, bank);
1050 1051
}

1052 1053
static const struct of_device_id omap_gpio_match[];

1054
static int __devinit omap_gpio_probe(struct platform_device *pdev)
1055
{
1056
	struct device *dev = &pdev->dev;
1057 1058
	struct device_node *node = dev->of_node;
	const struct of_device_id *match;
1059 1060
	struct omap_gpio_platform_data *pdata;
	struct resource *res;
1061
	struct gpio_bank *bank;
1062
	int ret = 0;
1063

1064 1065 1066 1067
	match = of_match_device(of_match_ptr(omap_gpio_match), dev);

	pdata = match ? match->data : dev->platform_data;
	if (!pdata)
1068
		return -EINVAL;
1069

1070
	bank = devm_kzalloc(&pdev->dev, sizeof(struct gpio_bank), GFP_KERNEL);
1071
	if (!bank) {
1072
		dev_err(dev, "Memory alloc failed\n");
1073
		return -ENOMEM;
1074
	}
1075

1076 1077
	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	if (unlikely(!res)) {
1078
		dev_err(dev, "Invalid IRQ resource\n");
1079
		return -ENODEV;
1080
	}
1081

1082
	bank->irq = res->start;
1083
	bank->dev = dev;
1084
	bank->dbck_flag = pdata->dbck_flag;
1085
	bank->stride = pdata->bank_stride;
1086
	bank->width = pdata->bank_width;
1087
	bank->is_mpuio = pdata->is_mpuio;
1088
	bank->non_wakeup_gpios = pdata->non_wakeup_gpios;
1089
	bank->loses_context = pdata->loses_context;
1090
	bank->get_context_loss_count = pdata->get_context_loss_count;
1091
	bank->regs = pdata->regs;
1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103
#ifdef CONFIG_OF_GPIO
	bank->chip.of_node = of_node_get(node);
#endif

	bank->irq_base = irq_alloc_descs(-1, 0, bank->width, 0);
	if (bank->irq_base < 0) {
		dev_err(dev, "Couldn't allocate IRQ numbers\n");
		return -ENODEV;
	}

	bank->domain = irq_domain_add_legacy(node, bank->width, bank->irq_base,
					     0, &irq_domain_simple_ops, NULL);
1104 1105 1106 1107 1108

	if (bank->regs->set_dataout && bank->regs->clr_dataout)
		bank->set_dataout = _set_gpio_dataout_reg;
	else
		bank->set_dataout = _set_gpio_dataout_mask;
T
Tony Lindgren 已提交
1109

1110
	spin_lock_init(&bank->lock);
T
Tony Lindgren 已提交
1111

1112 1113 1114
	/* Static mapping, never released */
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (unlikely(!res)) {
1115
		dev_err(dev, "Invalid mem resource\n");
1116 1117 1118 1119 1120 1121 1122
		return -ENODEV;
	}

	if (!devm_request_mem_region(dev, res->start, resource_size(res),
				     pdev->name)) {
		dev_err(dev, "Region already claimed\n");
		return -EBUSY;
1123
	}
1124

1125
	bank->base = devm_ioremap(dev, res->start, resource_size(res));
1126
	if (!bank->base) {
1127
		dev_err(dev, "Could not ioremap\n");
1128
		return -ENOMEM;
1129 1130
	}

1131 1132
	platform_set_drvdata(pdev, bank);

1133
	pm_runtime_enable(bank->dev);
1134
	pm_runtime_irq_safe(bank->dev);
1135 1136
	pm_runtime_get_sync(bank->dev);

1137
	if (bank->is_mpuio)
1138 1139
		mpuio_init(bank);

1140
	omap_gpio_mod_init(bank);
1141
	omap_gpio_chip_init(bank);
1142
	omap_gpio_show_rev(bank);
T
Tony Lindgren 已提交
1143

1144 1145
	pm_runtime_put(bank->dev);

1146
	list_add_tail(&bank->node, &omap_gpio_list);
1147

1148
	return ret;
1149 1150
}

1151 1152 1153 1154
#ifdef CONFIG_ARCH_OMAP2PLUS

#if defined(CONFIG_PM_SLEEP)
static int omap_gpio_suspend(struct device *dev)
1155
{
1156 1157 1158 1159 1160
	struct platform_device *pdev = to_platform_device(dev);
	struct gpio_bank *bank = platform_get_drvdata(pdev);
	void __iomem *base = bank->base;
	void __iomem *wakeup_enable;
	unsigned long flags;
1161

1162 1163
	if (!bank->mod_usage || !bank->loses_context)
		return 0;
1164

1165 1166
	if (!bank->regs->wkup_en || !bank->suspend_wakeup)
		return 0;
1167

1168
	wakeup_enable = bank->base + bank->regs->wkup_en;
1169

1170 1171 1172 1173 1174
	spin_lock_irqsave(&bank->lock, flags);
	bank->saved_wakeup = __raw_readl(wakeup_enable);
	_gpio_rmw(base, bank->regs->wkup_en, 0xffffffff, 0);
	_gpio_rmw(base, bank->regs->wkup_en, bank->suspend_wakeup, 1);
	spin_unlock_irqrestore(&bank->lock, flags);
1175 1176 1177 1178

	return 0;
}

1179
static int omap_gpio_resume(struct device *dev)
1180
{
1181 1182 1183 1184
	struct platform_device *pdev = to_platform_device(dev);
	struct gpio_bank *bank = platform_get_drvdata(pdev);
	void __iomem *base = bank->base;
	unsigned long flags;
1185

1186 1187
	if (!bank->mod_usage || !bank->loses_context)
		return 0;
1188

1189 1190
	if (!bank->regs->wkup_en || !bank->saved_wakeup)
		return 0;
1191

1192 1193 1194 1195
	spin_lock_irqsave(&bank->lock, flags);
	_gpio_rmw(base, bank->regs->wkup_en, 0xffffffff, 0);
	_gpio_rmw(base, bank->regs->wkup_en, bank->saved_wakeup, 1);
	spin_unlock_irqrestore(&bank->lock, flags);
1196

1197 1198 1199
	return 0;
}
#endif /* CONFIG_PM_SLEEP */
1200

1201
#if defined(CONFIG_PM_RUNTIME)
1202
static void omap_gpio_restore_context(struct gpio_bank *bank);
1203

1204
static int omap_gpio_runtime_suspend(struct device *dev)
1205
{
1206 1207 1208 1209
	struct platform_device *pdev = to_platform_device(dev);
	struct gpio_bank *bank = platform_get_drvdata(pdev);
	u32 l1 = 0, l2 = 0;
	unsigned long flags;
1210
	u32 wake_low, wake_hi;
1211

1212
	spin_lock_irqsave(&bank->lock, flags);
1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233

	/*
	 * Only edges can generate a wakeup event to the PRCM.
	 *
	 * Therefore, ensure any wake-up capable GPIOs have
	 * edge-detection enabled before going idle to ensure a wakeup
	 * to the PRCM is generated on a GPIO transition. (c.f. 34xx
	 * NDA TRM 25.5.3.1)
	 *
	 * The normal values will be restored upon ->runtime_resume()
	 * by writing back the values saved in bank->context.
	 */
	wake_low = bank->context.leveldetect0 & bank->context.wake_en;
	if (wake_low)
		__raw_writel(wake_low | bank->context.fallingdetect,
			     bank->base + bank->regs->fallingdetect);
	wake_hi = bank->context.leveldetect1 & bank->context.wake_en;
	if (wake_hi)
		__raw_writel(wake_hi | bank->context.risingdetect,
			     bank->base + bank->regs->risingdetect);

1234 1235
	if (bank->power_mode != OFF_MODE) {
		bank->power_mode = 0;
1236
		goto update_gpio_context_count;
1237 1238 1239 1240 1241 1242 1243 1244
	}
	/*
	 * If going to OFF, remove triggering for all
	 * non-wakeup GPIOs.  Otherwise spurious IRQs will be
	 * generated.  See OMAP2420 Errata item 1.101.
	 */
	bank->saved_datain = __raw_readl(bank->base +
						bank->regs->datain);
1245 1246
	l1 = bank->context.fallingdetect;
	l2 = bank->context.risingdetect;
1247

1248 1249
	l1 &= ~bank->enabled_non_wakeup_gpios;
	l2 &= ~bank->enabled_non_wakeup_gpios;
1250

1251 1252
	__raw_writel(l1, bank->base + bank->regs->fallingdetect);
	__raw_writel(l2, bank->base + bank->regs->risingdetect);
1253

1254
	bank->workaround_enabled = true;
1255

1256
update_gpio_context_count:
1257 1258
	if (bank->get_context_loss_count)
		bank->context_loss_count =
1259 1260
				bank->get_context_loss_count(bank->dev);

1261
	_gpio_dbck_disable(bank);
1262
	spin_unlock_irqrestore(&bank->lock, flags);
1263

1264
	return 0;
1265 1266
}

1267
static int omap_gpio_runtime_resume(struct device *dev)
1268
{
1269 1270 1271 1272 1273
	struct platform_device *pdev = to_platform_device(dev);
	struct gpio_bank *bank = platform_get_drvdata(pdev);
	int context_lost_cnt_after;
	u32 l = 0, gen, gen0, gen1;
	unsigned long flags;
1274

1275
	spin_lock_irqsave(&bank->lock, flags);
1276
	_gpio_dbck_enable(bank);
1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288

	/*
	 * In ->runtime_suspend(), level-triggered, wakeup-enabled
	 * GPIOs were set to edge trigger also in order to be able to
	 * generate a PRCM wakeup.  Here we restore the
	 * pre-runtime_suspend() values for edge triggering.
	 */
	__raw_writel(bank->context.fallingdetect,
		     bank->base + bank->regs->fallingdetect);
	__raw_writel(bank->context.risingdetect,
		     bank->base + bank->regs->risingdetect);

1289
	if (!bank->workaround_enabled) {
1290 1291 1292
		spin_unlock_irqrestore(&bank->lock, flags);
		return 0;
	}
1293

1294 1295 1296 1297 1298 1299 1300 1301 1302
	if (bank->get_context_loss_count) {
		context_lost_cnt_after =
			bank->get_context_loss_count(bank->dev);
		if (context_lost_cnt_after != bank->context_loss_count ||
						!context_lost_cnt_after) {
			omap_gpio_restore_context(bank);
		} else {
			spin_unlock_irqrestore(&bank->lock, flags);
			return 0;
1303
		}
1304
	}
1305

1306
	__raw_writel(bank->context.fallingdetect,
1307
			bank->base + bank->regs->fallingdetect);
1308
	__raw_writel(bank->context.risingdetect,
1309 1310
			bank->base + bank->regs->risingdetect);
	l = __raw_readl(bank->base + bank->regs->datain);
1311

1312 1313 1314 1315 1316 1317 1318 1319
	/*
	 * Check if any of the non-wakeup interrupt GPIOs have changed
	 * state.  If so, generate an IRQ by software.  This is
	 * horribly racy, but it's the best we can do to work around
	 * this silicon bug.
	 */
	l ^= bank->saved_datain;
	l &= bank->enabled_non_wakeup_gpios;
1320

1321 1322 1323 1324
	/*
	 * No need to generate IRQs for the rising edge for gpio IRQs
	 * configured with falling edge only; and vice versa.
	 */
1325
	gen0 = l & bank->context.fallingdetect;
1326
	gen0 &= bank->saved_datain;
1327

1328
	gen1 = l & bank->context.risingdetect;
1329
	gen1 &= ~(bank->saved_datain);
1330

1331
	/* FIXME: Consider GPIO IRQs with level detections properly! */
1332 1333
	gen = l & (~(bank->context.fallingdetect) &
					 ~(bank->context.risingdetect));
1334 1335
	/* Consider all GPIO IRQs needed to be updated */
	gen |= gen0 | gen1;
1336

1337 1338
	if (gen) {
		u32 old0, old1;
1339

1340 1341
		old0 = __raw_readl(bank->base + bank->regs->leveldetect0);
		old1 = __raw_readl(bank->base + bank->regs->leveldetect1);
1342

1343 1344
		if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
			__raw_writel(old0 | gen, bank->base +
1345
						bank->regs->leveldetect0);
1346
			__raw_writel(old1 | gen, bank->base +
1347
						bank->regs->leveldetect1);
1348
		}
1349

1350 1351
		if (cpu_is_omap44xx()) {
			__raw_writel(old0 | l, bank->base +
1352
						bank->regs->leveldetect0);
1353
			__raw_writel(old1 | l, bank->base +
1354
						bank->regs->leveldetect1);
1355
		}
1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389
		__raw_writel(old0, bank->base + bank->regs->leveldetect0);
		__raw_writel(old1, bank->base + bank->regs->leveldetect1);
	}

	bank->workaround_enabled = false;
	spin_unlock_irqrestore(&bank->lock, flags);

	return 0;
}
#endif /* CONFIG_PM_RUNTIME */

void omap2_gpio_prepare_for_idle(int pwr_mode)
{
	struct gpio_bank *bank;

	list_for_each_entry(bank, &omap_gpio_list, node) {
		if (!bank->mod_usage || !bank->loses_context)
			continue;

		bank->power_mode = pwr_mode;

		pm_runtime_put_sync_suspend(bank->dev);
	}
}

void omap2_gpio_resume_after_idle(void)
{
	struct gpio_bank *bank;

	list_for_each_entry(bank, &omap_gpio_list, node) {
		if (!bank->mod_usage || !bank->loses_context)
			continue;

		pm_runtime_get_sync(bank->dev);
1390 1391 1392
	}
}

1393
#if defined(CONFIG_PM_RUNTIME)
1394
static void omap_gpio_restore_context(struct gpio_bank *bank)
1395
{
1396
	__raw_writel(bank->context.wake_en,
1397 1398
				bank->base + bank->regs->wkup_en);
	__raw_writel(bank->context.ctrl, bank->base + bank->regs->ctrl);
1399
	__raw_writel(bank->context.leveldetect0,
1400
				bank->base + bank->regs->leveldetect0);
1401
	__raw_writel(bank->context.leveldetect1,
1402
				bank->base + bank->regs->leveldetect1);
1403
	__raw_writel(bank->context.risingdetect,
1404
				bank->base + bank->regs->risingdetect);
1405
	__raw_writel(bank->context.fallingdetect,
1406
				bank->base + bank->regs->fallingdetect);
1407 1408 1409 1410 1411 1412
	if (bank->regs->set_dataout && bank->regs->clr_dataout)
		__raw_writel(bank->context.dataout,
				bank->base + bank->regs->set_dataout);
	else
		__raw_writel(bank->context.dataout,
				bank->base + bank->regs->dataout);
1413 1414
	__raw_writel(bank->context.oe, bank->base + bank->regs->direction);

1415 1416 1417 1418 1419 1420
	if (bank->dbck_enable_mask) {
		__raw_writel(bank->context.debounce, bank->base +
					bank->regs->debounce);
		__raw_writel(bank->context.debounce_en,
					bank->base + bank->regs->debounce_en);
	}
1421 1422 1423 1424 1425

	__raw_writel(bank->context.irqenable1,
				bank->base + bank->regs->irqenable);
	__raw_writel(bank->context.irqenable2,
				bank->base + bank->regs->irqenable2);
1426
}
1427
#endif /* CONFIG_PM_RUNTIME */
1428 1429 1430
#else
#define omap_gpio_suspend NULL
#define omap_gpio_resume NULL
1431 1432
#define omap_gpio_runtime_suspend NULL
#define omap_gpio_runtime_resume NULL
1433 1434
#endif

1435 1436
static const struct dev_pm_ops gpio_pm_ops = {
	SET_SYSTEM_SLEEP_PM_OPS(omap_gpio_suspend, omap_gpio_resume)
1437 1438
	SET_RUNTIME_PM_OPS(omap_gpio_runtime_suspend, omap_gpio_runtime_resume,
									NULL)
1439 1440
};

1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523
#if defined(CONFIG_OF)
static struct omap_gpio_reg_offs omap2_gpio_regs = {
	.revision =		OMAP24XX_GPIO_REVISION,
	.direction =		OMAP24XX_GPIO_OE,
	.datain =		OMAP24XX_GPIO_DATAIN,
	.dataout =		OMAP24XX_GPIO_DATAOUT,
	.set_dataout =		OMAP24XX_GPIO_SETDATAOUT,
	.clr_dataout =		OMAP24XX_GPIO_CLEARDATAOUT,
	.irqstatus =		OMAP24XX_GPIO_IRQSTATUS1,
	.irqstatus2 =		OMAP24XX_GPIO_IRQSTATUS2,
	.irqenable =		OMAP24XX_GPIO_IRQENABLE1,
	.irqenable2 =		OMAP24XX_GPIO_IRQENABLE2,
	.set_irqenable =	OMAP24XX_GPIO_SETIRQENABLE1,
	.clr_irqenable =	OMAP24XX_GPIO_CLEARIRQENABLE1,
	.debounce =		OMAP24XX_GPIO_DEBOUNCE_VAL,
	.debounce_en =		OMAP24XX_GPIO_DEBOUNCE_EN,
	.ctrl =			OMAP24XX_GPIO_CTRL,
	.wkup_en =		OMAP24XX_GPIO_WAKE_EN,
	.leveldetect0 =		OMAP24XX_GPIO_LEVELDETECT0,
	.leveldetect1 =		OMAP24XX_GPIO_LEVELDETECT1,
	.risingdetect =		OMAP24XX_GPIO_RISINGDETECT,
	.fallingdetect =	OMAP24XX_GPIO_FALLINGDETECT,
};

static struct omap_gpio_reg_offs omap4_gpio_regs = {
	.revision =		OMAP4_GPIO_REVISION,
	.direction =		OMAP4_GPIO_OE,
	.datain =		OMAP4_GPIO_DATAIN,
	.dataout =		OMAP4_GPIO_DATAOUT,
	.set_dataout =		OMAP4_GPIO_SETDATAOUT,
	.clr_dataout =		OMAP4_GPIO_CLEARDATAOUT,
	.irqstatus =		OMAP4_GPIO_IRQSTATUS0,
	.irqstatus2 =		OMAP4_GPIO_IRQSTATUS1,
	.irqenable =		OMAP4_GPIO_IRQSTATUSSET0,
	.irqenable2 =		OMAP4_GPIO_IRQSTATUSSET1,
	.set_irqenable =	OMAP4_GPIO_IRQSTATUSSET0,
	.clr_irqenable =	OMAP4_GPIO_IRQSTATUSCLR0,
	.debounce =		OMAP4_GPIO_DEBOUNCINGTIME,
	.debounce_en =		OMAP4_GPIO_DEBOUNCENABLE,
	.ctrl =			OMAP4_GPIO_CTRL,
	.wkup_en =		OMAP4_GPIO_IRQWAKEN0,
	.leveldetect0 =		OMAP4_GPIO_LEVELDETECT0,
	.leveldetect1 =		OMAP4_GPIO_LEVELDETECT1,
	.risingdetect =		OMAP4_GPIO_RISINGDETECT,
	.fallingdetect =	OMAP4_GPIO_FALLINGDETECT,
};

static struct omap_gpio_platform_data omap2_pdata = {
	.regs = &omap2_gpio_regs,
	.bank_width = 32,
	.dbck_flag = false,
};

static struct omap_gpio_platform_data omap3_pdata = {
	.regs = &omap2_gpio_regs,
	.bank_width = 32,
	.dbck_flag = true,
};

static struct omap_gpio_platform_data omap4_pdata = {
	.regs = &omap4_gpio_regs,
	.bank_width = 32,
	.dbck_flag = true,
};

static const struct of_device_id omap_gpio_match[] = {
	{
		.compatible = "ti,omap4-gpio",
		.data = &omap4_pdata,
	},
	{
		.compatible = "ti,omap3-gpio",
		.data = &omap3_pdata,
	},
	{
		.compatible = "ti,omap2-gpio",
		.data = &omap2_pdata,
	},
	{ },
};
MODULE_DEVICE_TABLE(of, omap_gpio_match);
#endif

1524 1525 1526 1527
static struct platform_driver omap_gpio_driver = {
	.probe		= omap_gpio_probe,
	.driver		= {
		.name	= "omap_gpio",
1528
		.pm	= &gpio_pm_ops,
1529
		.of_match_table = of_match_ptr(omap_gpio_match),
1530 1531 1532
	},
};

1533
/*
1534 1535 1536
 * gpio driver register needs to be done before
 * machine_init functions access gpio APIs.
 * Hence omap_gpio_drv_reg() is a postcore_initcall.
1537
 */
1538
static int __init omap_gpio_drv_reg(void)
1539
{
1540
	return platform_driver_register(&omap_gpio_driver);
1541
}
1542
postcore_initcall(omap_gpio_drv_reg);