gpio-omap.c 38.9 KB
Newer Older
1 2 3
/*
 * Support functions for OMAP GPIO
 *
4
 * Copyright (C) 2003-2005 Nokia Corporation
5
 * Written by Juha Yrjölä <juha.yrjola@nokia.com>
6
 *
7 8 9
 * Copyright (C) 2009 Texas Instruments
 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
 *
10 11 12 13 14 15 16 17
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
18
#include <linux/syscore_ops.h>
19
#include <linux/err.h>
20
#include <linux/clk.h>
21
#include <linux/io.h>
22
#include <linux/device.h>
23
#include <linux/pm_runtime.h>
24
#include <linux/pm.h>
25 26 27
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/irqdomain.h>
28

29
#include <mach/hardware.h>
30
#include <asm/irq.h>
31
#include <mach/irqs.h>
32
#include <asm/gpio.h>
33 34
#include <asm/mach/irq.h>

35 36
#define OFF_MODE	1

37 38
static LIST_HEAD(omap_gpio_list);

39 40 41 42 43 44 45 46 47 48 49
struct gpio_regs {
	u32 irqenable1;
	u32 irqenable2;
	u32 wake_en;
	u32 ctrl;
	u32 oe;
	u32 leveldetect0;
	u32 leveldetect1;
	u32 risingdetect;
	u32 fallingdetect;
	u32 dataout;
50 51
	u32 debounce;
	u32 debounce_en;
52 53
};

54
struct gpio_bank {
55
	struct list_head node;
56
	void __iomem *base;
57
	u16 irq;
58 59
	int irq_base;
	struct irq_domain *domain;
60 61
	u32 suspend_wakeup;
	u32 saved_wakeup;
62 63
	u32 non_wakeup_gpios;
	u32 enabled_non_wakeup_gpios;
64
	struct gpio_regs context;
65 66 67
	u32 saved_datain;
	u32 saved_fallingdetect;
	u32 saved_risingdetect;
68
	u32 level_mask;
69
	u32 toggle_mask;
70
	spinlock_t lock;
D
David Brownell 已提交
71
	struct gpio_chip chip;
72
	struct clk *dbck;
C
Charulatha V 已提交
73
	u32 mod_usage;
74
	u32 dbck_enable_mask;
75
	bool dbck_enabled;
76
	struct device *dev;
77
	bool is_mpuio;
78
	bool dbck_flag;
79
	bool loses_context;
80
	int stride;
81
	u32 width;
82
	int context_loss_count;
83 84
	int power_mode;
	bool workaround_enabled;
85 86

	void (*set_dataout)(struct gpio_bank *bank, int gpio, int enable);
87
	int (*get_context_loss_count)(struct device *dev);
88 89

	struct omap_gpio_reg_offs *regs;
90 91
};

92 93
#define GPIO_INDEX(bank, gpio) (gpio % bank->width)
#define GPIO_BIT(bank, gpio) (1 << GPIO_INDEX(bank, gpio))
94
#define GPIO_MOD_CTRL_BIT	BIT(0)
95

96 97 98 99 100
static int irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq)
{
	return gpio_irq - bank->irq_base + bank->chip.base;
}

101 102
static void _set_gpio_direction(struct gpio_bank *bank, int gpio, int is_input)
{
103
	void __iomem *reg = bank->base;
104 105
	u32 l;

106
	reg += bank->regs->direction;
107 108 109 110 111 112
	l = __raw_readl(reg);
	if (is_input)
		l |= 1 << gpio;
	else
		l &= ~(1 << gpio);
	__raw_writel(l, reg);
113
	bank->context.oe = l;
114 115
}

116 117 118

/* set data out value using dedicate set/clear register */
static void _set_gpio_dataout_reg(struct gpio_bank *bank, int gpio, int enable)
119
{
120
	void __iomem *reg = bank->base;
121
	u32 l = GPIO_BIT(bank, gpio);
122

123 124 125 126
	if (enable)
		reg += bank->regs->set_dataout;
	else
		reg += bank->regs->clr_dataout;
127 128 129 130

	__raw_writel(l, reg);
}

131 132
/* set data out value using mask register */
static void _set_gpio_dataout_mask(struct gpio_bank *bank, int gpio, int enable)
133
{
134 135 136
	void __iomem *reg = bank->base + bank->regs->dataout;
	u32 gpio_bit = GPIO_BIT(bank, gpio);
	u32 l;
137

138 139 140 141 142
	l = __raw_readl(reg);
	if (enable)
		l |= gpio_bit;
	else
		l &= ~gpio_bit;
143
	__raw_writel(l, reg);
144
	bank->context.dataout = l;
145 146
}

147 148
static int _get_gpio_datain(struct gpio_bank *bank, int gpio)
{
149
	void __iomem *reg = bank->base + bank->regs->datain;
150

151
	return (__raw_readl(reg) & GPIO_BIT(bank, gpio)) != 0;
152
}
153 154 155

static int _get_gpio_dataout(struct gpio_bank *bank, int gpio)
{
156
	void __iomem *reg = bank->base + bank->regs->dataout;
157

158
	return (__raw_readl(reg) & GPIO_BIT(bank, gpio)) != 0;
159 160
}

161 162 163 164
static inline void _gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set)
{
	int l = __raw_readl(base + reg);

165
	if (set)
166 167 168 169 170 171
		l |= mask;
	else
		l &= ~mask;

	__raw_writel(l, base + reg);
}
172

173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
static inline void _gpio_dbck_enable(struct gpio_bank *bank)
{
	if (bank->dbck_enable_mask && !bank->dbck_enabled) {
		clk_enable(bank->dbck);
		bank->dbck_enabled = true;
	}
}

static inline void _gpio_dbck_disable(struct gpio_bank *bank)
{
	if (bank->dbck_enable_mask && bank->dbck_enabled) {
		clk_disable(bank->dbck);
		bank->dbck_enabled = false;
	}
}

189 190 191 192 193 194 195 196 197 198 199 200
/**
 * _set_gpio_debounce - low level gpio debounce time
 * @bank: the gpio bank we're acting upon
 * @gpio: the gpio number on this @gpio
 * @debounce: debounce time to use
 *
 * OMAP's debounce time is in 31us steps so we need
 * to convert and round up to the closest unit.
 */
static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio,
		unsigned debounce)
{
201
	void __iomem		*reg;
202 203 204
	u32			val;
	u32			l;

205 206 207
	if (!bank->dbck_flag)
		return;

208 209 210 211 212 213 214
	if (debounce < 32)
		debounce = 0x01;
	else if (debounce > 7936)
		debounce = 0xff;
	else
		debounce = (debounce / 0x1f) - 1;

215
	l = GPIO_BIT(bank, gpio);
216

217
	clk_enable(bank->dbck);
218
	reg = bank->base + bank->regs->debounce;
219 220
	__raw_writel(debounce, reg);

221
	reg = bank->base + bank->regs->debounce_en;
222 223
	val = __raw_readl(reg);

224
	if (debounce)
225
		val |= l;
226
	else
227
		val &= ~l;
228
	bank->dbck_enable_mask = val;
229 230

	__raw_writel(val, reg);
231 232 233 234 235 236 237 238 239 240
	clk_disable(bank->dbck);
	/*
	 * Enable debounce clock per module.
	 * This call is mandatory because in omap_gpio_request() when
	 * *_runtime_get_sync() is called,  _gpio_dbck_enable() within
	 * runtime callbck fails to turn on dbck because dbck_enable_mask
	 * used within _gpio_dbck_enable() is still not initialized at
	 * that point. Therefore we have to enable dbck here.
	 */
	_gpio_dbck_enable(bank);
241 242 243 244
	if (bank->dbck_enable_mask) {
		bank->context.debounce = debounce;
		bank->context.debounce_en = val;
	}
245 246
}

247
static inline void set_gpio_trigger(struct gpio_bank *bank, int gpio,
248
						int trigger)
249
{
250
	void __iomem *base = bank->base;
251 252
	u32 gpio_bit = 1 << gpio;

253 254 255 256 257 258 259 260 261
	_gpio_rmw(base, bank->regs->leveldetect0, gpio_bit,
		  trigger & IRQ_TYPE_LEVEL_LOW);
	_gpio_rmw(base, bank->regs->leveldetect1, gpio_bit,
		  trigger & IRQ_TYPE_LEVEL_HIGH);
	_gpio_rmw(base, bank->regs->risingdetect, gpio_bit,
		  trigger & IRQ_TYPE_EDGE_RISING);
	_gpio_rmw(base, bank->regs->fallingdetect, gpio_bit,
		  trigger & IRQ_TYPE_EDGE_FALLING);

262 263 264 265 266 267 268 269 270 271
	bank->context.leveldetect0 =
			__raw_readl(bank->base + bank->regs->leveldetect0);
	bank->context.leveldetect1 =
			__raw_readl(bank->base + bank->regs->leveldetect1);
	bank->context.risingdetect =
			__raw_readl(bank->base + bank->regs->risingdetect);
	bank->context.fallingdetect =
			__raw_readl(bank->base + bank->regs->fallingdetect);

	if (likely(!(bank->non_wakeup_gpios & gpio_bit))) {
272
		_gpio_rmw(base, bank->regs->wkup_en, gpio_bit, trigger != 0);
273 274 275
		bank->context.wake_en =
			__raw_readl(bank->base + bank->regs->wkup_en);
	}
276

277
	/* This part needs to be executed always for OMAP{34xx, 44xx} */
278 279 280 281 282 283 284
	if (!bank->regs->irqctrl) {
		/* On omap24xx proceed only when valid GPIO bit is set */
		if (bank->non_wakeup_gpios) {
			if (!(bank->non_wakeup_gpios & gpio_bit))
				goto exit;
		}

285 286 287 288 289 290 291
		/*
		 * Log the edge gpio and manually trigger the IRQ
		 * after resume if the input level changes
		 * to avoid irq lost during PER RET/OFF mode
		 * Applies for omap2 non-wakeup gpio and all omap3 gpios
		 */
		if (trigger & IRQ_TYPE_EDGE_BOTH)
292 293 294 295
			bank->enabled_non_wakeup_gpios |= gpio_bit;
		else
			bank->enabled_non_wakeup_gpios &= ~gpio_bit;
	}
296

297
exit:
298 299 300
	bank->level_mask =
		__raw_readl(bank->base + bank->regs->leveldetect0) |
		__raw_readl(bank->base + bank->regs->leveldetect1);
301 302
}

303
#ifdef CONFIG_ARCH_OMAP1
304 305 306 307 308 309 310 311 312
/*
 * This only applies to chips that can't do both rising and falling edge
 * detection at once.  For all other chips, this function is a noop.
 */
static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio)
{
	void __iomem *reg = bank->base;
	u32 l = 0;

313
	if (!bank->regs->irqctrl)
314
		return;
315 316

	reg += bank->regs->irqctrl;
317 318 319 320 321 322 323 324 325

	l = __raw_readl(reg);
	if ((l >> gpio) & 1)
		l &= ~(1 << gpio);
	else
		l |= 1 << gpio;

	__raw_writel(l, reg);
}
326 327
#else
static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) {}
328
#endif
329

330 331 332
static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, int trigger)
{
	void __iomem *reg = bank->base;
333
	void __iomem *base = bank->base;
334
	u32 l = 0;
335

336 337 338 339 340
	if (bank->regs->leveldetect0 && bank->regs->wkup_en) {
		set_gpio_trigger(bank, gpio, trigger);
	} else if (bank->regs->irqctrl) {
		reg += bank->regs->irqctrl;

341
		l = __raw_readl(reg);
342
		if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
343
			bank->toggle_mask |= 1 << gpio;
344
		if (trigger & IRQ_TYPE_EDGE_RISING)
345
			l |= 1 << gpio;
346
		else if (trigger & IRQ_TYPE_EDGE_FALLING)
347
			l &= ~(1 << gpio);
348
		else
349 350 351 352
			return -EINVAL;

		__raw_writel(l, reg);
	} else if (bank->regs->edgectrl1) {
353
		if (gpio & 0x08)
354
			reg += bank->regs->edgectrl2;
355
		else
356 357
			reg += bank->regs->edgectrl1;

358 359 360
		gpio &= 0x07;
		l = __raw_readl(reg);
		l &= ~(3 << (gpio << 1));
361
		if (trigger & IRQ_TYPE_EDGE_RISING)
362
			l |= 2 << (gpio << 1);
363
		if (trigger & IRQ_TYPE_EDGE_FALLING)
364
			l |= 1 << (gpio << 1);
365 366 367

		/* Enable wake-up during idle for dynamic tick */
		_gpio_rmw(base, bank->regs->wkup_en, 1 << gpio, trigger);
368 369
		bank->context.wake_en =
			__raw_readl(bank->base + bank->regs->wkup_en);
370
		__raw_writel(l, reg);
371
	}
372
	return 0;
373 374
}

375
static int gpio_irq_type(struct irq_data *d, unsigned type)
376
{
377
	struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
378 379
	unsigned gpio;
	int retval;
D
David Brownell 已提交
380
	unsigned long flags;
381

382 383
	if (!cpu_class_is_omap2() && d->irq > IH_MPUIO_BASE)
		gpio = OMAP_MPUIO(d->irq - IH_MPUIO_BASE);
384
	else
385
		gpio = irq_to_gpio(bank, d->irq);
386

387
	if (type & ~IRQ_TYPE_SENSE_MASK)
388
		return -EINVAL;
389

390 391
	if (!bank->regs->leveldetect0 &&
		(type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH)))
392 393
		return -EINVAL;

D
David Brownell 已提交
394
	spin_lock_irqsave(&bank->lock, flags);
395
	retval = _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), type);
D
David Brownell 已提交
396
	spin_unlock_irqrestore(&bank->lock, flags);
397 398

	if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
T
Thomas Gleixner 已提交
399
		__irq_set_handler_locked(d->irq, handle_level_irq);
400
	else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
T
Thomas Gleixner 已提交
401
		__irq_set_handler_locked(d->irq, handle_edge_irq);
402

403
	return retval;
404 405 406 407
}

static void _clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
{
408
	void __iomem *reg = bank->base;
409

410
	reg += bank->regs->irqstatus;
411
	__raw_writel(gpio_mask, reg);
412 413

	/* Workaround for clearing DSP GPIO interrupts to allow retention */
414 415
	if (bank->regs->irqstatus2) {
		reg = bank->base + bank->regs->irqstatus2;
416
		__raw_writel(gpio_mask, reg);
417
	}
418 419 420

	/* Flush posted write for the irq status to avoid spurious interrupts */
	__raw_readl(reg);
421 422 423 424
}

static inline void _clear_gpio_irqstatus(struct gpio_bank *bank, int gpio)
{
425
	_clear_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
426 427
}

428 429 430
static u32 _get_gpio_irqbank_mask(struct gpio_bank *bank)
{
	void __iomem *reg = bank->base;
431
	u32 l;
432
	u32 mask = (1 << bank->width) - 1;
433

434
	reg += bank->regs->irqenable;
435
	l = __raw_readl(reg);
436
	if (bank->regs->irqenable_inv)
437 438 439
		l = ~l;
	l &= mask;
	return l;
440 441
}

442
static void _enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
443
{
444
	void __iomem *reg = bank->base;
445 446
	u32 l;

447 448 449 450 451
	if (bank->regs->set_irqenable) {
		reg += bank->regs->set_irqenable;
		l = gpio_mask;
	} else {
		reg += bank->regs->irqenable;
452
		l = __raw_readl(reg);
453 454
		if (bank->regs->irqenable_inv)
			l &= ~gpio_mask;
455 456
		else
			l |= gpio_mask;
457 458 459
	}

	__raw_writel(l, reg);
460
	bank->context.irqenable1 = l;
461 462 463 464 465 466 467 468 469
}

static void _disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
{
	void __iomem *reg = bank->base;
	u32 l;

	if (bank->regs->clr_irqenable) {
		reg += bank->regs->clr_irqenable;
470
		l = gpio_mask;
471 472
	} else {
		reg += bank->regs->irqenable;
473
		l = __raw_readl(reg);
474
		if (bank->regs->irqenable_inv)
475
			l |= gpio_mask;
476
		else
477
			l &= ~gpio_mask;
478
	}
479

480
	__raw_writel(l, reg);
481
	bank->context.irqenable1 = l;
482 483 484 485
}

static inline void _set_gpio_irqenable(struct gpio_bank *bank, int gpio, int enable)
{
486
	_enable_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
487 488
}

489 490 491 492 493 494 495 496 497 498
/*
 * Note that ENAWAKEUP needs to be enabled in GPIO_SYSCONFIG register.
 * 1510 does not seem to have a wake-up register. If JTAG is connected
 * to the target, system will wake up always on GPIO events. While
 * system is running all registered GPIO interrupts need to have wake-up
 * enabled. When system is suspended, only selected GPIO interrupts need
 * to have wake-up enabled.
 */
static int _set_gpio_wakeup(struct gpio_bank *bank, int gpio, int enable)
{
499 500
	u32 gpio_bit = GPIO_BIT(bank, gpio);
	unsigned long flags;
D
David Brownell 已提交
501

502
	if (bank->non_wakeup_gpios & gpio_bit) {
503
		dev_err(bank->dev,
504
			"Unable to modify wakeup on non-wakeup GPIO%d\n", gpio);
505 506
		return -EINVAL;
	}
507 508 509 510 511 512 513 514 515 516

	spin_lock_irqsave(&bank->lock, flags);
	if (enable)
		bank->suspend_wakeup |= gpio_bit;
	else
		bank->suspend_wakeup &= ~gpio_bit;

	spin_unlock_irqrestore(&bank->lock, flags);

	return 0;
517 518
}

519 520
static void _reset_gpio(struct gpio_bank *bank, int gpio)
{
521
	_set_gpio_direction(bank, GPIO_INDEX(bank, gpio), 1);
522 523
	_set_gpio_irqenable(bank, gpio, 0);
	_clear_gpio_irqstatus(bank, gpio);
524
	_set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE);
525 526
}

527
/* Use disable_irq_wake() and enable_irq_wake() functions from drivers */
528
static int gpio_wake_enable(struct irq_data *d, unsigned int enable)
529
{
530 531
	struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
	unsigned int gpio = irq_to_gpio(bank, d->irq);
532

533
	return _set_gpio_wakeup(bank, gpio, enable);
534 535
}

536
static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
537
{
538
	struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
D
David Brownell 已提交
539
	unsigned long flags;
D
David Brownell 已提交
540

541 542 543 544 545 546
	/*
	 * If this is the first gpio_request for the bank,
	 * enable the bank module.
	 */
	if (!bank->mod_usage)
		pm_runtime_get_sync(bank->dev);
547

548
	spin_lock_irqsave(&bank->lock, flags);
549 550 551
	/* Set trigger to none. You need to enable the desired trigger with
	 * request_irq() or set_irq_type().
	 */
552
	_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
553

554 555
	if (bank->regs->pinctrl) {
		void __iomem *reg = bank->base + bank->regs->pinctrl;
556

557
		/* Claim the pin for MPU */
558
		__raw_writel(__raw_readl(reg) | (1 << offset), reg);
559
	}
560

561 562 563 564 565 566 567 568
	if (bank->regs->ctrl && !bank->mod_usage) {
		void __iomem *reg = bank->base + bank->regs->ctrl;
		u32 ctrl;

		ctrl = __raw_readl(reg);
		/* Module is enabled, clocks are not gated */
		ctrl &= ~GPIO_MOD_CTRL_BIT;
		__raw_writel(ctrl, reg);
569
		bank->context.ctrl = ctrl;
C
Charulatha V 已提交
570
	}
571 572 573

	bank->mod_usage |= 1 << offset;

D
David Brownell 已提交
574
	spin_unlock_irqrestore(&bank->lock, flags);
575 576 577 578

	return 0;
}

579
static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
580
{
581
	struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
582
	void __iomem *base = bank->base;
D
David Brownell 已提交
583
	unsigned long flags;
584

D
David Brownell 已提交
585
	spin_lock_irqsave(&bank->lock, flags);
586

587
	if (bank->regs->wkup_en) {
588
		/* Disable wake-up during idle for dynamic tick */
589
		_gpio_rmw(base, bank->regs->wkup_en, 1 << offset, 0);
590 591 592
		bank->context.wake_en =
			__raw_readl(bank->base + bank->regs->wkup_en);
	}
593

594 595 596 597 598 599 600 601 602 603
	bank->mod_usage &= ~(1 << offset);

	if (bank->regs->ctrl && !bank->mod_usage) {
		void __iomem *reg = bank->base + bank->regs->ctrl;
		u32 ctrl;

		ctrl = __raw_readl(reg);
		/* Module is disabled, clocks are gated */
		ctrl |= GPIO_MOD_CTRL_BIT;
		__raw_writel(ctrl, reg);
604
		bank->context.ctrl = ctrl;
C
Charulatha V 已提交
605
	}
606

607
	_reset_gpio(bank, bank->chip.base + offset);
D
David Brownell 已提交
608
	spin_unlock_irqrestore(&bank->lock, flags);
609 610 611 612 613 614 615

	/*
	 * If this is the last gpio to be freed in the bank,
	 * disable the bank module.
	 */
	if (!bank->mod_usage)
		pm_runtime_put(bank->dev);
616 617 618 619 620 621 622 623 624 625 626
}

/*
 * We need to unmask the GPIO bank interrupt as soon as possible to
 * avoid missing GPIO interrupts for other lines in the bank.
 * Then we need to mask-read-clear-unmask the triggered GPIO lines
 * in the bank to avoid missing nested interrupts for a GPIO line.
 * If we wait to unmask individual GPIO lines in the bank after the
 * line's interrupt handler has been run, we may miss some nested
 * interrupts.
 */
627
static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
628
{
629
	void __iomem *isr_reg = NULL;
630
	u32 isr;
631
	unsigned int gpio_irq, gpio_index;
632
	struct gpio_bank *bank;
633 634
	u32 retrigger = 0;
	int unmasked = 0;
635
	struct irq_chip *chip = irq_desc_get_chip(desc);
636

637
	chained_irq_enter(chip, desc);
638

T
Thomas Gleixner 已提交
639
	bank = irq_get_handler_data(irq);
640
	isr_reg = bank->base + bank->regs->irqstatus;
641
	pm_runtime_get_sync(bank->dev);
642 643 644 645

	if (WARN_ON(!isr_reg))
		goto exit;

646
	while(1) {
647
		u32 isr_saved, level_mask = 0;
648
		u32 enabled;
649

650 651
		enabled = _get_gpio_irqbank_mask(bank);
		isr_saved = isr = __raw_readl(isr_reg) & enabled;
652

653
		if (bank->level_mask)
654
			level_mask = bank->level_mask & enabled;
655 656 657 658

		/* clear edge sensitive interrupts before handler(s) are
		called so that we don't miss any interrupt occurred while
		executing them */
659
		_disable_gpio_irqbank(bank, isr_saved & ~level_mask);
660
		_clear_gpio_irqbank(bank, isr_saved & ~level_mask);
661
		_enable_gpio_irqbank(bank, isr_saved & ~level_mask);
662 663 664

		/* if there is only edge sensitive GPIO pin interrupts
		configured, we could unmask GPIO bank interrupt immediately */
665 666
		if (!level_mask && !unmasked) {
			unmasked = 1;
667
			chained_irq_exit(chip, desc);
668
		}
669

670 671
		isr |= retrigger;
		retrigger = 0;
672 673 674
		if (!isr)
			break;

675
		gpio_irq = bank->irq_base;
676
		for (; isr != 0; isr >>= 1, gpio_irq++) {
677
			int gpio = irq_to_gpio(bank, gpio_irq);
678

679 680
			if (!(isr & 1))
				continue;
681

682 683
			gpio_index = GPIO_INDEX(bank, gpio);

684 685 686 687 688 689 690 691 692 693
			/*
			 * Some chips can't respond to both rising and falling
			 * at the same time.  If this irq was requested with
			 * both flags, we need to flip the ICR data for the IRQ
			 * to respond to the IRQ for the opposite direction.
			 * This will be indicated in the bank toggle_mask.
			 */
			if (bank->toggle_mask & (1 << gpio_index))
				_toggle_gpio_edge_triggering(bank, gpio_index);

694
			generic_handle_irq(gpio_irq);
695
		}
696
	}
697 698 699 700
	/* if bank has any level sensitive GPIO pin interrupt
	configured, we must unmask the bank interrupt only after
	handler(s) are executed in order to avoid spurious bank
	interrupt */
701
exit:
702
	if (!unmasked)
703
		chained_irq_exit(chip, desc);
704
	pm_runtime_put(bank->dev);
705 706
}

707
static void gpio_irq_shutdown(struct irq_data *d)
708
{
709
	struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
710
	unsigned int gpio = irq_to_gpio(bank, d->irq);
711
	unsigned long flags;
712

713
	spin_lock_irqsave(&bank->lock, flags);
714
	_reset_gpio(bank, gpio);
715
	spin_unlock_irqrestore(&bank->lock, flags);
716 717
}

718
static void gpio_ack_irq(struct irq_data *d)
719
{
720
	struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
721
	unsigned int gpio = irq_to_gpio(bank, d->irq);
722 723 724 725

	_clear_gpio_irqstatus(bank, gpio);
}

726
static void gpio_mask_irq(struct irq_data *d)
727
{
728
	struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
729
	unsigned int gpio = irq_to_gpio(bank, d->irq);
730
	unsigned long flags;
731

732
	spin_lock_irqsave(&bank->lock, flags);
733
	_set_gpio_irqenable(bank, gpio, 0);
734
	_set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE);
735
	spin_unlock_irqrestore(&bank->lock, flags);
736 737
}

738
static void gpio_unmask_irq(struct irq_data *d)
739
{
740
	struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
741
	unsigned int gpio = irq_to_gpio(bank, d->irq);
742
	unsigned int irq_mask = GPIO_BIT(bank, gpio);
743
	u32 trigger = irqd_get_trigger_type(d);
744
	unsigned long flags;
745

746
	spin_lock_irqsave(&bank->lock, flags);
747
	if (trigger)
748
		_set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), trigger);
749 750 751 752 753 754 755

	/* For level-triggered GPIOs, the clearing must be done after
	 * the HW source is cleared, thus after the handler has run */
	if (bank->level_mask & irq_mask) {
		_set_gpio_irqenable(bank, gpio, 0);
		_clear_gpio_irqstatus(bank, gpio);
	}
756

K
Kevin Hilman 已提交
757
	_set_gpio_irqenable(bank, gpio, 1);
758
	spin_unlock_irqrestore(&bank->lock, flags);
759 760
}

761 762
static struct irq_chip gpio_irq_chip = {
	.name		= "GPIO",
763 764 765 766 767 768
	.irq_shutdown	= gpio_irq_shutdown,
	.irq_ack	= gpio_ack_irq,
	.irq_mask	= gpio_mask_irq,
	.irq_unmask	= gpio_unmask_irq,
	.irq_set_type	= gpio_irq_type,
	.irq_set_wake	= gpio_wake_enable,
769 770 771 772
};

/*---------------------------------------------------------------------*/

773
static int omap_mpuio_suspend_noirq(struct device *dev)
D
David Brownell 已提交
774
{
775
	struct platform_device *pdev = to_platform_device(dev);
D
David Brownell 已提交
776
	struct gpio_bank	*bank = platform_get_drvdata(pdev);
777 778
	void __iomem		*mask_reg = bank->base +
					OMAP_MPUIO_GPIO_MASKIT / bank->stride;
D
David Brownell 已提交
779
	unsigned long		flags;
D
David Brownell 已提交
780

D
David Brownell 已提交
781
	spin_lock_irqsave(&bank->lock, flags);
D
David Brownell 已提交
782 783
	bank->saved_wakeup = __raw_readl(mask_reg);
	__raw_writel(0xffff & ~bank->suspend_wakeup, mask_reg);
D
David Brownell 已提交
784
	spin_unlock_irqrestore(&bank->lock, flags);
D
David Brownell 已提交
785 786 787 788

	return 0;
}

789
static int omap_mpuio_resume_noirq(struct device *dev)
D
David Brownell 已提交
790
{
791
	struct platform_device *pdev = to_platform_device(dev);
D
David Brownell 已提交
792
	struct gpio_bank	*bank = platform_get_drvdata(pdev);
793 794
	void __iomem		*mask_reg = bank->base +
					OMAP_MPUIO_GPIO_MASKIT / bank->stride;
D
David Brownell 已提交
795
	unsigned long		flags;
D
David Brownell 已提交
796

D
David Brownell 已提交
797
	spin_lock_irqsave(&bank->lock, flags);
D
David Brownell 已提交
798
	__raw_writel(bank->saved_wakeup, mask_reg);
D
David Brownell 已提交
799
	spin_unlock_irqrestore(&bank->lock, flags);
D
David Brownell 已提交
800 801 802 803

	return 0;
}

804
static const struct dev_pm_ops omap_mpuio_dev_pm_ops = {
805 806 807 808
	.suspend_noirq = omap_mpuio_suspend_noirq,
	.resume_noirq = omap_mpuio_resume_noirq,
};

809
/* use platform_driver for this. */
D
David Brownell 已提交
810 811 812
static struct platform_driver omap_mpuio_driver = {
	.driver		= {
		.name	= "mpuio",
813
		.pm	= &omap_mpuio_dev_pm_ops,
D
David Brownell 已提交
814 815 816 817 818 819 820 821 822 823 824 825
	},
};

static struct platform_device omap_mpuio_device = {
	.name		= "mpuio",
	.id		= -1,
	.dev = {
		.driver = &omap_mpuio_driver.driver,
	}
	/* could list the /proc/iomem resources */
};

826
static inline void mpuio_init(struct gpio_bank *bank)
D
David Brownell 已提交
827
{
828
	platform_set_drvdata(&omap_mpuio_device, bank);
829

D
David Brownell 已提交
830 831 832 833
	if (platform_driver_register(&omap_mpuio_driver) == 0)
		(void) platform_device_register(&omap_mpuio_device);
}

834
/*---------------------------------------------------------------------*/
835

D
David Brownell 已提交
836 837 838 839 840 841 842 843 844 845 846 847
static int gpio_input(struct gpio_chip *chip, unsigned offset)
{
	struct gpio_bank *bank;
	unsigned long flags;

	bank = container_of(chip, struct gpio_bank, chip);
	spin_lock_irqsave(&bank->lock, flags);
	_set_gpio_direction(bank, offset, 1);
	spin_unlock_irqrestore(&bank->lock, flags);
	return 0;
}

848 849
static int gpio_is_input(struct gpio_bank *bank, int mask)
{
850
	void __iomem *reg = bank->base + bank->regs->direction;
851 852 853 854

	return __raw_readl(reg) & mask;
}

D
David Brownell 已提交
855 856
static int gpio_get(struct gpio_chip *chip, unsigned offset)
{
857 858 859 860 861 862
	struct gpio_bank *bank;
	void __iomem *reg;
	int gpio;
	u32 mask;

	gpio = chip->base + offset;
C
Charulatha V 已提交
863
	bank = container_of(chip, struct gpio_bank, chip);
864
	reg = bank->base;
865
	mask = GPIO_BIT(bank, gpio);
866 867 868 869 870

	if (gpio_is_input(bank, mask))
		return _get_gpio_datain(bank, gpio);
	else
		return _get_gpio_dataout(bank, gpio);
D
David Brownell 已提交
871 872 873 874 875 876 877 878 879
}

static int gpio_output(struct gpio_chip *chip, unsigned offset, int value)
{
	struct gpio_bank *bank;
	unsigned long flags;

	bank = container_of(chip, struct gpio_bank, chip);
	spin_lock_irqsave(&bank->lock, flags);
880
	bank->set_dataout(bank, offset, value);
D
David Brownell 已提交
881 882 883 884 885
	_set_gpio_direction(bank, offset, 0);
	spin_unlock_irqrestore(&bank->lock, flags);
	return 0;
}

886 887 888 889 890 891 892
static int gpio_debounce(struct gpio_chip *chip, unsigned offset,
		unsigned debounce)
{
	struct gpio_bank *bank;
	unsigned long flags;

	bank = container_of(chip, struct gpio_bank, chip);
893 894 895 896 897 898 899

	if (!bank->dbck) {
		bank->dbck = clk_get(bank->dev, "dbclk");
		if (IS_ERR(bank->dbck))
			dev_err(bank->dev, "Could not get gpio dbck\n");
	}

900 901 902 903 904 905 906
	spin_lock_irqsave(&bank->lock, flags);
	_set_gpio_debounce(bank, offset, debounce);
	spin_unlock_irqrestore(&bank->lock, flags);

	return 0;
}

D
David Brownell 已提交
907 908 909 910 911 912 913
static void gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
	struct gpio_bank *bank;
	unsigned long flags;

	bank = container_of(chip, struct gpio_bank, chip);
	spin_lock_irqsave(&bank->lock, flags);
914
	bank->set_dataout(bank, offset, value);
D
David Brownell 已提交
915 916 917
	spin_unlock_irqrestore(&bank->lock, flags);
}

918 919 920 921 922
static int gpio_2irq(struct gpio_chip *chip, unsigned offset)
{
	struct gpio_bank *bank;

	bank = container_of(chip, struct gpio_bank, chip);
923
	return bank->irq_base + offset;
924 925
}

D
David Brownell 已提交
926 927
/*---------------------------------------------------------------------*/

928
static void __init omap_gpio_show_rev(struct gpio_bank *bank)
T
Tony Lindgren 已提交
929
{
930
	static bool called;
T
Tony Lindgren 已提交
931 932
	u32 rev;

933
	if (called || bank->regs->revision == USHRT_MAX)
T
Tony Lindgren 已提交
934 935
		return;

936 937
	rev = __raw_readw(bank->base + bank->regs->revision);
	pr_info("OMAP GPIO hardware version %d.%d\n",
T
Tony Lindgren 已提交
938
		(rev >> 4) & 0x0f, rev & 0x0f);
939 940

	called = true;
T
Tony Lindgren 已提交
941 942
}

943 944 945 946 947
/* This lock class tells lockdep that GPIO irqs are in a different
 * category than their parents, so it won't report false recursion.
 */
static struct lock_class_key gpio_lock_class;

948
static void omap_gpio_mod_init(struct gpio_bank *bank)
949
{
950 951
	void __iomem *base = bank->base;
	u32 l = 0xffffffff;
952

953 954 955
	if (bank->width == 16)
		l = 0xffff;

956
	if (bank->is_mpuio) {
957 958
		__raw_writel(l, bank->base + bank->regs->irqenable);
		return;
959
	}
960 961 962 963 964 965 966 967 968

	_gpio_rmw(base, bank->regs->irqenable, l, bank->regs->irqenable_inv);
	_gpio_rmw(base, bank->regs->irqstatus, l,
					bank->regs->irqenable_inv == false);
	_gpio_rmw(base, bank->regs->irqenable, l, bank->regs->debounce_en != 0);
	_gpio_rmw(base, bank->regs->irqenable, l, bank->regs->ctrl != 0);
	if (bank->regs->debounce_en)
		_gpio_rmw(base, bank->regs->debounce_en, 0, 1);

969 970
	/* Save OE default value (0xffffffff) in the context */
	bank->context.oe = __raw_readl(bank->base + bank->regs->direction);
971 972 973
	 /* Initialize interface clk ungated, module enabled */
	if (bank->regs->ctrl)
		_gpio_rmw(base, bank->regs->ctrl, 0, 1);
974 975
}

976 977 978 979 980 981 982 983 984
static __init void
omap_mpuio_alloc_gc(struct gpio_bank *bank, unsigned int irq_start,
		    unsigned int num)
{
	struct irq_chip_generic *gc;
	struct irq_chip_type *ct;

	gc = irq_alloc_generic_chip("MPUIO", 1, irq_start, bank->base,
				    handle_simple_irq);
985 986 987 988 989
	if (!gc) {
		dev_err(bank->dev, "Memory alloc failed for gc\n");
		return;
	}

990 991 992 993 994 995
	ct = gc->chip_types;

	/* NOTE: No ack required, reading IRQ status clears it. */
	ct->chip.irq_mask = irq_gc_mask_set_bit;
	ct->chip.irq_unmask = irq_gc_mask_clr_bit;
	ct->chip.irq_set_type = gpio_irq_type;
996 997

	if (bank->regs->wkup_en)
998 999 1000 1001 1002 1003 1004
		ct->chip.irq_set_wake = gpio_wake_enable,

	ct->regs.mask = OMAP_MPUIO_GPIO_INT / bank->stride;
	irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
			       IRQ_NOREQUEST | IRQ_NOPROBE, 0);
}

1005
static void __devinit omap_gpio_chip_init(struct gpio_bank *bank)
1006
{
1007
	int j;
1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021
	static int gpio;

	/*
	 * REVISIT eventually switch from OMAP-specific gpio structs
	 * over to the generic ones
	 */
	bank->chip.request = omap_gpio_request;
	bank->chip.free = omap_gpio_free;
	bank->chip.direction_input = gpio_input;
	bank->chip.get = gpio_get;
	bank->chip.direction_output = gpio_output;
	bank->chip.set_debounce = gpio_debounce;
	bank->chip.set = gpio_set;
	bank->chip.to_irq = gpio_2irq;
1022
	if (bank->is_mpuio) {
1023
		bank->chip.label = "mpuio";
1024 1025
		if (bank->regs->wkup_en)
			bank->chip.dev = &omap_mpuio_device.dev;
1026 1027 1028 1029
		bank->chip.base = OMAP_MPUIO(0);
	} else {
		bank->chip.label = "gpio";
		bank->chip.base = gpio;
1030
		gpio += bank->width;
1031
	}
1032
	bank->chip.ngpio = bank->width;
1033 1034 1035

	gpiochip_add(&bank->chip);

1036
	for (j = bank->irq_base; j < bank->irq_base + bank->width; j++) {
1037
		irq_set_lockdep_class(j, &gpio_lock_class);
T
Thomas Gleixner 已提交
1038
		irq_set_chip_data(j, bank);
1039
		if (bank->is_mpuio) {
1040 1041
			omap_mpuio_alloc_gc(bank, j, bank->width);
		} else {
T
Thomas Gleixner 已提交
1042
			irq_set_chip(j, &gpio_irq_chip);
1043 1044 1045
			irq_set_handler(j, handle_simple_irq);
			set_irq_flags(j, IRQF_VALID);
		}
1046
	}
T
Thomas Gleixner 已提交
1047 1048
	irq_set_chained_handler(bank->irq, gpio_irq_handler);
	irq_set_handler_data(bank->irq, bank);
1049 1050
}

1051 1052
static const struct of_device_id omap_gpio_match[];

1053
static int __devinit omap_gpio_probe(struct platform_device *pdev)
1054
{
1055
	struct device *dev = &pdev->dev;
1056 1057
	struct device_node *node = dev->of_node;
	const struct of_device_id *match;
1058 1059
	struct omap_gpio_platform_data *pdata;
	struct resource *res;
1060
	struct gpio_bank *bank;
1061
	int ret = 0;
1062

1063 1064 1065 1066
	match = of_match_device(of_match_ptr(omap_gpio_match), dev);

	pdata = match ? match->data : dev->platform_data;
	if (!pdata)
1067
		return -EINVAL;
1068

1069
	bank = devm_kzalloc(&pdev->dev, sizeof(struct gpio_bank), GFP_KERNEL);
1070
	if (!bank) {
1071
		dev_err(dev, "Memory alloc failed\n");
1072
		return -ENOMEM;
1073
	}
1074

1075 1076
	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	if (unlikely(!res)) {
1077
		dev_err(dev, "Invalid IRQ resource\n");
1078
		return -ENODEV;
1079
	}
1080

1081
	bank->irq = res->start;
1082
	bank->dev = dev;
1083
	bank->dbck_flag = pdata->dbck_flag;
1084
	bank->stride = pdata->bank_stride;
1085
	bank->width = pdata->bank_width;
1086
	bank->is_mpuio = pdata->is_mpuio;
1087
	bank->non_wakeup_gpios = pdata->non_wakeup_gpios;
1088
	bank->loses_context = pdata->loses_context;
1089
	bank->get_context_loss_count = pdata->get_context_loss_count;
1090
	bank->regs = pdata->regs;
1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102
#ifdef CONFIG_OF_GPIO
	bank->chip.of_node = of_node_get(node);
#endif

	bank->irq_base = irq_alloc_descs(-1, 0, bank->width, 0);
	if (bank->irq_base < 0) {
		dev_err(dev, "Couldn't allocate IRQ numbers\n");
		return -ENODEV;
	}

	bank->domain = irq_domain_add_legacy(node, bank->width, bank->irq_base,
					     0, &irq_domain_simple_ops, NULL);
1103 1104 1105 1106 1107

	if (bank->regs->set_dataout && bank->regs->clr_dataout)
		bank->set_dataout = _set_gpio_dataout_reg;
	else
		bank->set_dataout = _set_gpio_dataout_mask;
T
Tony Lindgren 已提交
1108

1109
	spin_lock_init(&bank->lock);
T
Tony Lindgren 已提交
1110

1111 1112 1113
	/* Static mapping, never released */
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (unlikely(!res)) {
1114
		dev_err(dev, "Invalid mem resource\n");
1115 1116 1117 1118 1119 1120 1121
		return -ENODEV;
	}

	if (!devm_request_mem_region(dev, res->start, resource_size(res),
				     pdev->name)) {
		dev_err(dev, "Region already claimed\n");
		return -EBUSY;
1122
	}
1123

1124
	bank->base = devm_ioremap(dev, res->start, resource_size(res));
1125
	if (!bank->base) {
1126
		dev_err(dev, "Could not ioremap\n");
1127
		return -ENOMEM;
1128 1129
	}

1130 1131
	platform_set_drvdata(pdev, bank);

1132
	pm_runtime_enable(bank->dev);
1133
	pm_runtime_irq_safe(bank->dev);
1134 1135
	pm_runtime_get_sync(bank->dev);

1136
	if (bank->is_mpuio)
1137 1138
		mpuio_init(bank);

1139
	omap_gpio_mod_init(bank);
1140
	omap_gpio_chip_init(bank);
1141
	omap_gpio_show_rev(bank);
T
Tony Lindgren 已提交
1142

1143 1144
	pm_runtime_put(bank->dev);

1145
	list_add_tail(&bank->node, &omap_gpio_list);
1146

1147
	return ret;
1148 1149
}

1150 1151 1152 1153
#ifdef CONFIG_ARCH_OMAP2PLUS

#if defined(CONFIG_PM_SLEEP)
static int omap_gpio_suspend(struct device *dev)
1154
{
1155 1156 1157 1158 1159
	struct platform_device *pdev = to_platform_device(dev);
	struct gpio_bank *bank = platform_get_drvdata(pdev);
	void __iomem *base = bank->base;
	void __iomem *wakeup_enable;
	unsigned long flags;
1160

1161 1162
	if (!bank->mod_usage || !bank->loses_context)
		return 0;
1163

1164 1165
	if (!bank->regs->wkup_en || !bank->suspend_wakeup)
		return 0;
1166

1167
	wakeup_enable = bank->base + bank->regs->wkup_en;
1168

1169 1170 1171 1172 1173
	spin_lock_irqsave(&bank->lock, flags);
	bank->saved_wakeup = __raw_readl(wakeup_enable);
	_gpio_rmw(base, bank->regs->wkup_en, 0xffffffff, 0);
	_gpio_rmw(base, bank->regs->wkup_en, bank->suspend_wakeup, 1);
	spin_unlock_irqrestore(&bank->lock, flags);
1174 1175 1176 1177

	return 0;
}

1178
static int omap_gpio_resume(struct device *dev)
1179
{
1180 1181 1182 1183
	struct platform_device *pdev = to_platform_device(dev);
	struct gpio_bank *bank = platform_get_drvdata(pdev);
	void __iomem *base = bank->base;
	unsigned long flags;
1184

1185 1186
	if (!bank->mod_usage || !bank->loses_context)
		return 0;
1187

1188 1189
	if (!bank->regs->wkup_en || !bank->saved_wakeup)
		return 0;
1190

1191 1192 1193 1194
	spin_lock_irqsave(&bank->lock, flags);
	_gpio_rmw(base, bank->regs->wkup_en, 0xffffffff, 0);
	_gpio_rmw(base, bank->regs->wkup_en, bank->saved_wakeup, 1);
	spin_unlock_irqrestore(&bank->lock, flags);
1195

1196 1197 1198
	return 0;
}
#endif /* CONFIG_PM_SLEEP */
1199

1200
#if defined(CONFIG_PM_RUNTIME)
1201
static void omap_gpio_restore_context(struct gpio_bank *bank);
1202

1203
static int omap_gpio_runtime_suspend(struct device *dev)
1204
{
1205 1206 1207 1208
	struct platform_device *pdev = to_platform_device(dev);
	struct gpio_bank *bank = platform_get_drvdata(pdev);
	u32 l1 = 0, l2 = 0;
	unsigned long flags;
1209

1210 1211 1212
	spin_lock_irqsave(&bank->lock, flags);
	if (bank->power_mode != OFF_MODE) {
		bank->power_mode = 0;
1213
		goto update_gpio_context_count;
1214 1215 1216 1217 1218 1219 1220
	}
	/*
	 * If going to OFF, remove triggering for all
	 * non-wakeup GPIOs.  Otherwise spurious IRQs will be
	 * generated.  See OMAP2420 Errata item 1.101.
	 */
	if (!(bank->enabled_non_wakeup_gpios))
1221
		goto update_gpio_context_count;
1222

1223 1224 1225 1226
	bank->saved_datain = __raw_readl(bank->base +
						bank->regs->datain);
	l1 = __raw_readl(bank->base + bank->regs->fallingdetect);
	l2 = __raw_readl(bank->base + bank->regs->risingdetect);
1227

1228 1229 1230 1231
	bank->saved_fallingdetect = l1;
	bank->saved_risingdetect = l2;
	l1 &= ~bank->enabled_non_wakeup_gpios;
	l2 &= ~bank->enabled_non_wakeup_gpios;
1232

1233 1234
	__raw_writel(l1, bank->base + bank->regs->fallingdetect);
	__raw_writel(l2, bank->base + bank->regs->risingdetect);
1235

1236
	bank->workaround_enabled = true;
1237

1238
update_gpio_context_count:
1239 1240
	if (bank->get_context_loss_count)
		bank->context_loss_count =
1241 1242
				bank->get_context_loss_count(bank->dev);

1243
	_gpio_dbck_disable(bank);
1244
	spin_unlock_irqrestore(&bank->lock, flags);
1245

1246
	return 0;
1247 1248
}

1249
static int omap_gpio_runtime_resume(struct device *dev)
1250
{
1251 1252 1253 1254 1255
	struct platform_device *pdev = to_platform_device(dev);
	struct gpio_bank *bank = platform_get_drvdata(pdev);
	int context_lost_cnt_after;
	u32 l = 0, gen, gen0, gen1;
	unsigned long flags;
1256

1257
	spin_lock_irqsave(&bank->lock, flags);
1258
	_gpio_dbck_enable(bank);
1259 1260 1261 1262
	if (!bank->enabled_non_wakeup_gpios || !bank->workaround_enabled) {
		spin_unlock_irqrestore(&bank->lock, flags);
		return 0;
	}
1263

1264 1265 1266 1267 1268 1269 1270 1271 1272
	if (bank->get_context_loss_count) {
		context_lost_cnt_after =
			bank->get_context_loss_count(bank->dev);
		if (context_lost_cnt_after != bank->context_loss_count ||
						!context_lost_cnt_after) {
			omap_gpio_restore_context(bank);
		} else {
			spin_unlock_irqrestore(&bank->lock, flags);
			return 0;
1273
		}
1274
	}
1275

1276 1277 1278 1279 1280
	__raw_writel(bank->saved_fallingdetect,
			bank->base + bank->regs->fallingdetect);
	__raw_writel(bank->saved_risingdetect,
			bank->base + bank->regs->risingdetect);
	l = __raw_readl(bank->base + bank->regs->datain);
1281

1282 1283 1284 1285 1286 1287 1288 1289
	/*
	 * Check if any of the non-wakeup interrupt GPIOs have changed
	 * state.  If so, generate an IRQ by software.  This is
	 * horribly racy, but it's the best we can do to work around
	 * this silicon bug.
	 */
	l ^= bank->saved_datain;
	l &= bank->enabled_non_wakeup_gpios;
1290

1291 1292 1293 1294 1295 1296
	/*
	 * No need to generate IRQs for the rising edge for gpio IRQs
	 * configured with falling edge only; and vice versa.
	 */
	gen0 = l & bank->saved_fallingdetect;
	gen0 &= bank->saved_datain;
1297

1298 1299
	gen1 = l & bank->saved_risingdetect;
	gen1 &= ~(bank->saved_datain);
1300

1301 1302 1303 1304
	/* FIXME: Consider GPIO IRQs with level detections properly! */
	gen = l & (~(bank->saved_fallingdetect) & ~(bank->saved_risingdetect));
	/* Consider all GPIO IRQs needed to be updated */
	gen |= gen0 | gen1;
1305

1306 1307
	if (gen) {
		u32 old0, old1;
1308

1309 1310
		old0 = __raw_readl(bank->base + bank->regs->leveldetect0);
		old1 = __raw_readl(bank->base + bank->regs->leveldetect1);
1311

1312 1313
		if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
			__raw_writel(old0 | gen, bank->base +
1314
						bank->regs->leveldetect0);
1315
			__raw_writel(old1 | gen, bank->base +
1316
						bank->regs->leveldetect1);
1317
		}
1318

1319 1320
		if (cpu_is_omap44xx()) {
			__raw_writel(old0 | l, bank->base +
1321
						bank->regs->leveldetect0);
1322
			__raw_writel(old1 | l, bank->base +
1323
						bank->regs->leveldetect1);
1324
		}
1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358
		__raw_writel(old0, bank->base + bank->regs->leveldetect0);
		__raw_writel(old1, bank->base + bank->regs->leveldetect1);
	}

	bank->workaround_enabled = false;
	spin_unlock_irqrestore(&bank->lock, flags);

	return 0;
}
#endif /* CONFIG_PM_RUNTIME */

void omap2_gpio_prepare_for_idle(int pwr_mode)
{
	struct gpio_bank *bank;

	list_for_each_entry(bank, &omap_gpio_list, node) {
		if (!bank->mod_usage || !bank->loses_context)
			continue;

		bank->power_mode = pwr_mode;

		pm_runtime_put_sync_suspend(bank->dev);
	}
}

void omap2_gpio_resume_after_idle(void)
{
	struct gpio_bank *bank;

	list_for_each_entry(bank, &omap_gpio_list, node) {
		if (!bank->mod_usage || !bank->loses_context)
			continue;

		pm_runtime_get_sync(bank->dev);
1359 1360 1361
	}
}

1362
#if defined(CONFIG_PM_RUNTIME)
1363
static void omap_gpio_restore_context(struct gpio_bank *bank)
1364
{
1365
	__raw_writel(bank->context.wake_en,
1366 1367
				bank->base + bank->regs->wkup_en);
	__raw_writel(bank->context.ctrl, bank->base + bank->regs->ctrl);
1368
	__raw_writel(bank->context.leveldetect0,
1369
				bank->base + bank->regs->leveldetect0);
1370
	__raw_writel(bank->context.leveldetect1,
1371
				bank->base + bank->regs->leveldetect1);
1372
	__raw_writel(bank->context.risingdetect,
1373
				bank->base + bank->regs->risingdetect);
1374
	__raw_writel(bank->context.fallingdetect,
1375
				bank->base + bank->regs->fallingdetect);
1376 1377 1378 1379 1380 1381
	if (bank->regs->set_dataout && bank->regs->clr_dataout)
		__raw_writel(bank->context.dataout,
				bank->base + bank->regs->set_dataout);
	else
		__raw_writel(bank->context.dataout,
				bank->base + bank->regs->dataout);
1382 1383
	__raw_writel(bank->context.oe, bank->base + bank->regs->direction);

1384 1385 1386 1387 1388 1389
	if (bank->dbck_enable_mask) {
		__raw_writel(bank->context.debounce, bank->base +
					bank->regs->debounce);
		__raw_writel(bank->context.debounce_en,
					bank->base + bank->regs->debounce_en);
	}
1390 1391 1392 1393 1394

	__raw_writel(bank->context.irqenable1,
				bank->base + bank->regs->irqenable);
	__raw_writel(bank->context.irqenable2,
				bank->base + bank->regs->irqenable2);
1395
}
1396
#endif /* CONFIG_PM_RUNTIME */
1397 1398 1399
#else
#define omap_gpio_suspend NULL
#define omap_gpio_resume NULL
1400 1401
#define omap_gpio_runtime_suspend NULL
#define omap_gpio_runtime_resume NULL
1402 1403
#endif

1404 1405
static const struct dev_pm_ops gpio_pm_ops = {
	SET_SYSTEM_SLEEP_PM_OPS(omap_gpio_suspend, omap_gpio_resume)
1406 1407
	SET_RUNTIME_PM_OPS(omap_gpio_runtime_suspend, omap_gpio_runtime_resume,
									NULL)
1408 1409
};

1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492
#if defined(CONFIG_OF)
static struct omap_gpio_reg_offs omap2_gpio_regs = {
	.revision =		OMAP24XX_GPIO_REVISION,
	.direction =		OMAP24XX_GPIO_OE,
	.datain =		OMAP24XX_GPIO_DATAIN,
	.dataout =		OMAP24XX_GPIO_DATAOUT,
	.set_dataout =		OMAP24XX_GPIO_SETDATAOUT,
	.clr_dataout =		OMAP24XX_GPIO_CLEARDATAOUT,
	.irqstatus =		OMAP24XX_GPIO_IRQSTATUS1,
	.irqstatus2 =		OMAP24XX_GPIO_IRQSTATUS2,
	.irqenable =		OMAP24XX_GPIO_IRQENABLE1,
	.irqenable2 =		OMAP24XX_GPIO_IRQENABLE2,
	.set_irqenable =	OMAP24XX_GPIO_SETIRQENABLE1,
	.clr_irqenable =	OMAP24XX_GPIO_CLEARIRQENABLE1,
	.debounce =		OMAP24XX_GPIO_DEBOUNCE_VAL,
	.debounce_en =		OMAP24XX_GPIO_DEBOUNCE_EN,
	.ctrl =			OMAP24XX_GPIO_CTRL,
	.wkup_en =		OMAP24XX_GPIO_WAKE_EN,
	.leveldetect0 =		OMAP24XX_GPIO_LEVELDETECT0,
	.leveldetect1 =		OMAP24XX_GPIO_LEVELDETECT1,
	.risingdetect =		OMAP24XX_GPIO_RISINGDETECT,
	.fallingdetect =	OMAP24XX_GPIO_FALLINGDETECT,
};

static struct omap_gpio_reg_offs omap4_gpio_regs = {
	.revision =		OMAP4_GPIO_REVISION,
	.direction =		OMAP4_GPIO_OE,
	.datain =		OMAP4_GPIO_DATAIN,
	.dataout =		OMAP4_GPIO_DATAOUT,
	.set_dataout =		OMAP4_GPIO_SETDATAOUT,
	.clr_dataout =		OMAP4_GPIO_CLEARDATAOUT,
	.irqstatus =		OMAP4_GPIO_IRQSTATUS0,
	.irqstatus2 =		OMAP4_GPIO_IRQSTATUS1,
	.irqenable =		OMAP4_GPIO_IRQSTATUSSET0,
	.irqenable2 =		OMAP4_GPIO_IRQSTATUSSET1,
	.set_irqenable =	OMAP4_GPIO_IRQSTATUSSET0,
	.clr_irqenable =	OMAP4_GPIO_IRQSTATUSCLR0,
	.debounce =		OMAP4_GPIO_DEBOUNCINGTIME,
	.debounce_en =		OMAP4_GPIO_DEBOUNCENABLE,
	.ctrl =			OMAP4_GPIO_CTRL,
	.wkup_en =		OMAP4_GPIO_IRQWAKEN0,
	.leveldetect0 =		OMAP4_GPIO_LEVELDETECT0,
	.leveldetect1 =		OMAP4_GPIO_LEVELDETECT1,
	.risingdetect =		OMAP4_GPIO_RISINGDETECT,
	.fallingdetect =	OMAP4_GPIO_FALLINGDETECT,
};

static struct omap_gpio_platform_data omap2_pdata = {
	.regs = &omap2_gpio_regs,
	.bank_width = 32,
	.dbck_flag = false,
};

static struct omap_gpio_platform_data omap3_pdata = {
	.regs = &omap2_gpio_regs,
	.bank_width = 32,
	.dbck_flag = true,
};

static struct omap_gpio_platform_data omap4_pdata = {
	.regs = &omap4_gpio_regs,
	.bank_width = 32,
	.dbck_flag = true,
};

static const struct of_device_id omap_gpio_match[] = {
	{
		.compatible = "ti,omap4-gpio",
		.data = &omap4_pdata,
	},
	{
		.compatible = "ti,omap3-gpio",
		.data = &omap3_pdata,
	},
	{
		.compatible = "ti,omap2-gpio",
		.data = &omap2_pdata,
	},
	{ },
};
MODULE_DEVICE_TABLE(of, omap_gpio_match);
#endif

1493 1494 1495 1496
static struct platform_driver omap_gpio_driver = {
	.probe		= omap_gpio_probe,
	.driver		= {
		.name	= "omap_gpio",
1497
		.pm	= &gpio_pm_ops,
1498
		.of_match_table = of_match_ptr(omap_gpio_match),
1499 1500 1501
	},
};

1502
/*
1503 1504 1505
 * gpio driver register needs to be done before
 * machine_init functions access gpio APIs.
 * Hence omap_gpio_drv_reg() is a postcore_initcall.
1506
 */
1507
static int __init omap_gpio_drv_reg(void)
1508
{
1509
	return platform_driver_register(&omap_gpio_driver);
1510
}
1511
postcore_initcall(omap_gpio_drv_reg);