gpio-omap.c 39.5 KB
Newer Older
1 2 3
/*
 * Support functions for OMAP GPIO
 *
4
 * Copyright (C) 2003-2005 Nokia Corporation
5
 * Written by Juha Yrjölä <juha.yrjola@nokia.com>
6
 *
7 8 9
 * Copyright (C) 2009 Texas Instruments
 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
 *
10 11 12 13 14 15 16 17
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */

#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
18
#include <linux/syscore_ops.h>
19
#include <linux/err.h>
20
#include <linux/clk.h>
21
#include <linux/io.h>
22
#include <linux/device.h>
23
#include <linux/pm_runtime.h>
24
#include <linux/pm.h>
25 26 27
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/irqdomain.h>
28 29
#include <linux/gpio.h>
#include <linux/platform_data/gpio-omap.h>
30 31 32

#include <asm/mach/irq.h>

33 34
#define OFF_MODE	1

35 36
static LIST_HEAD(omap_gpio_list);

37 38 39 40 41 42 43 44 45 46 47
struct gpio_regs {
	u32 irqenable1;
	u32 irqenable2;
	u32 wake_en;
	u32 ctrl;
	u32 oe;
	u32 leveldetect0;
	u32 leveldetect1;
	u32 risingdetect;
	u32 fallingdetect;
	u32 dataout;
48 49
	u32 debounce;
	u32 debounce_en;
50 51
};

52
struct gpio_bank {
53
	struct list_head node;
54
	void __iomem *base;
55
	u16 irq;
56
	struct irq_domain *domain;
57 58
	u32 non_wakeup_gpios;
	u32 enabled_non_wakeup_gpios;
59
	struct gpio_regs context;
60
	u32 saved_datain;
61
	u32 level_mask;
62
	u32 toggle_mask;
63
	spinlock_t lock;
D
David Brownell 已提交
64
	struct gpio_chip chip;
65
	struct clk *dbck;
C
Charulatha V 已提交
66
	u32 mod_usage;
67
	u32 dbck_enable_mask;
68
	bool dbck_enabled;
69
	struct device *dev;
70
	bool is_mpuio;
71
	bool dbck_flag;
72
	bool loses_context;
73
	int stride;
74
	u32 width;
75
	int context_loss_count;
76 77
	int power_mode;
	bool workaround_enabled;
78 79

	void (*set_dataout)(struct gpio_bank *bank, int gpio, int enable);
80
	int (*get_context_loss_count)(struct device *dev);
81 82

	struct omap_gpio_reg_offs *regs;
83 84
};

85 86
#define GPIO_INDEX(bank, gpio) (gpio % bank->width)
#define GPIO_BIT(bank, gpio) (1 << GPIO_INDEX(bank, gpio))
87
#define GPIO_MOD_CTRL_BIT	BIT(0)
88

89 90
static int irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq)
{
91 92 93 94 95 96 97 98
	return bank->chip.base + gpio_irq;
}

static int omap_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
	struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);

	return irq_find_mapping(bank->domain, offset);
99 100
}

101 102
static void _set_gpio_direction(struct gpio_bank *bank, int gpio, int is_input)
{
103
	void __iomem *reg = bank->base;
104 105
	u32 l;

106
	reg += bank->regs->direction;
107 108 109 110 111 112
	l = __raw_readl(reg);
	if (is_input)
		l |= 1 << gpio;
	else
		l &= ~(1 << gpio);
	__raw_writel(l, reg);
113
	bank->context.oe = l;
114 115
}

116 117 118

/* set data out value using dedicate set/clear register */
static void _set_gpio_dataout_reg(struct gpio_bank *bank, int gpio, int enable)
119
{
120
	void __iomem *reg = bank->base;
121
	u32 l = GPIO_BIT(bank, gpio);
122

123
	if (enable) {
124
		reg += bank->regs->set_dataout;
125 126
		bank->context.dataout |= l;
	} else {
127
		reg += bank->regs->clr_dataout;
128 129
		bank->context.dataout &= ~l;
	}
130 131 132 133

	__raw_writel(l, reg);
}

134 135
/* set data out value using mask register */
static void _set_gpio_dataout_mask(struct gpio_bank *bank, int gpio, int enable)
136
{
137 138 139
	void __iomem *reg = bank->base + bank->regs->dataout;
	u32 gpio_bit = GPIO_BIT(bank, gpio);
	u32 l;
140

141 142 143 144 145
	l = __raw_readl(reg);
	if (enable)
		l |= gpio_bit;
	else
		l &= ~gpio_bit;
146
	__raw_writel(l, reg);
147
	bank->context.dataout = l;
148 149
}

150
static int _get_gpio_datain(struct gpio_bank *bank, int offset)
151
{
152
	void __iomem *reg = bank->base + bank->regs->datain;
153

154
	return (__raw_readl(reg) & (1 << offset)) != 0;
155
}
156

157
static int _get_gpio_dataout(struct gpio_bank *bank, int offset)
158
{
159
	void __iomem *reg = bank->base + bank->regs->dataout;
160

161
	return (__raw_readl(reg) & (1 << offset)) != 0;
162 163
}

164 165 166 167
static inline void _gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set)
{
	int l = __raw_readl(base + reg);

168
	if (set)
169 170 171 172 173 174
		l |= mask;
	else
		l &= ~mask;

	__raw_writel(l, base + reg);
}
175

176 177 178 179 180
static inline void _gpio_dbck_enable(struct gpio_bank *bank)
{
	if (bank->dbck_enable_mask && !bank->dbck_enabled) {
		clk_enable(bank->dbck);
		bank->dbck_enabled = true;
181 182 183

		__raw_writel(bank->dbck_enable_mask,
			     bank->base + bank->regs->debounce_en);
184 185 186 187 188 189
	}
}

static inline void _gpio_dbck_disable(struct gpio_bank *bank)
{
	if (bank->dbck_enable_mask && bank->dbck_enabled) {
190 191 192 193 194 195 196
		/*
		 * Disable debounce before cutting it's clock. If debounce is
		 * enabled but the clock is not, GPIO module seems to be unable
		 * to detect events and generate interrupts at least on OMAP3.
		 */
		__raw_writel(0, bank->base + bank->regs->debounce_en);

197 198 199 200 201
		clk_disable(bank->dbck);
		bank->dbck_enabled = false;
	}
}

202 203 204 205 206 207 208 209 210 211 212 213
/**
 * _set_gpio_debounce - low level gpio debounce time
 * @bank: the gpio bank we're acting upon
 * @gpio: the gpio number on this @gpio
 * @debounce: debounce time to use
 *
 * OMAP's debounce time is in 31us steps so we need
 * to convert and round up to the closest unit.
 */
static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio,
		unsigned debounce)
{
214
	void __iomem		*reg;
215 216 217
	u32			val;
	u32			l;

218 219 220
	if (!bank->dbck_flag)
		return;

221 222 223 224 225 226 227
	if (debounce < 32)
		debounce = 0x01;
	else if (debounce > 7936)
		debounce = 0xff;
	else
		debounce = (debounce / 0x1f) - 1;

228
	l = GPIO_BIT(bank, gpio);
229

230
	clk_enable(bank->dbck);
231
	reg = bank->base + bank->regs->debounce;
232 233
	__raw_writel(debounce, reg);

234
	reg = bank->base + bank->regs->debounce_en;
235 236
	val = __raw_readl(reg);

237
	if (debounce)
238
		val |= l;
239
	else
240
		val &= ~l;
241
	bank->dbck_enable_mask = val;
242 243

	__raw_writel(val, reg);
244 245 246 247 248 249 250 251 252 253
	clk_disable(bank->dbck);
	/*
	 * Enable debounce clock per module.
	 * This call is mandatory because in omap_gpio_request() when
	 * *_runtime_get_sync() is called,  _gpio_dbck_enable() within
	 * runtime callbck fails to turn on dbck because dbck_enable_mask
	 * used within _gpio_dbck_enable() is still not initialized at
	 * that point. Therefore we have to enable dbck here.
	 */
	_gpio_dbck_enable(bank);
254 255 256 257
	if (bank->dbck_enable_mask) {
		bank->context.debounce = debounce;
		bank->context.debounce_en = val;
	}
258 259
}

260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293
/**
 * _clear_gpio_debounce - clear debounce settings for a gpio
 * @bank: the gpio bank we're acting upon
 * @gpio: the gpio number on this @gpio
 *
 * If a gpio is using debounce, then clear the debounce enable bit and if
 * this is the only gpio in this bank using debounce, then clear the debounce
 * time too. The debounce clock will also be disabled when calling this function
 * if this is the only gpio in the bank using debounce.
 */
static void _clear_gpio_debounce(struct gpio_bank *bank, unsigned gpio)
{
	u32 gpio_bit = GPIO_BIT(bank, gpio);

	if (!bank->dbck_flag)
		return;

	if (!(bank->dbck_enable_mask & gpio_bit))
		return;

	bank->dbck_enable_mask &= ~gpio_bit;
	bank->context.debounce_en &= ~gpio_bit;
	__raw_writel(bank->context.debounce_en,
		     bank->base + bank->regs->debounce_en);

	if (!bank->dbck_enable_mask) {
		bank->context.debounce = 0;
		__raw_writel(bank->context.debounce, bank->base +
			     bank->regs->debounce);
		clk_disable(bank->dbck);
		bank->dbck_enabled = false;
	}
}

294
static inline void set_gpio_trigger(struct gpio_bank *bank, int gpio,
295
						unsigned trigger)
296
{
297
	void __iomem *base = bank->base;
298 299
	u32 gpio_bit = 1 << gpio;

300 301 302 303 304 305 306 307 308
	_gpio_rmw(base, bank->regs->leveldetect0, gpio_bit,
		  trigger & IRQ_TYPE_LEVEL_LOW);
	_gpio_rmw(base, bank->regs->leveldetect1, gpio_bit,
		  trigger & IRQ_TYPE_LEVEL_HIGH);
	_gpio_rmw(base, bank->regs->risingdetect, gpio_bit,
		  trigger & IRQ_TYPE_EDGE_RISING);
	_gpio_rmw(base, bank->regs->fallingdetect, gpio_bit,
		  trigger & IRQ_TYPE_EDGE_FALLING);

309 310 311 312 313 314 315 316 317 318
	bank->context.leveldetect0 =
			__raw_readl(bank->base + bank->regs->leveldetect0);
	bank->context.leveldetect1 =
			__raw_readl(bank->base + bank->regs->leveldetect1);
	bank->context.risingdetect =
			__raw_readl(bank->base + bank->regs->risingdetect);
	bank->context.fallingdetect =
			__raw_readl(bank->base + bank->regs->fallingdetect);

	if (likely(!(bank->non_wakeup_gpios & gpio_bit))) {
319
		_gpio_rmw(base, bank->regs->wkup_en, gpio_bit, trigger != 0);
320 321 322
		bank->context.wake_en =
			__raw_readl(bank->base + bank->regs->wkup_en);
	}
323

324
	/* This part needs to be executed always for OMAP{34xx, 44xx} */
325 326 327 328 329 330 331
	if (!bank->regs->irqctrl) {
		/* On omap24xx proceed only when valid GPIO bit is set */
		if (bank->non_wakeup_gpios) {
			if (!(bank->non_wakeup_gpios & gpio_bit))
				goto exit;
		}

332 333 334 335 336 337 338
		/*
		 * Log the edge gpio and manually trigger the IRQ
		 * after resume if the input level changes
		 * to avoid irq lost during PER RET/OFF mode
		 * Applies for omap2 non-wakeup gpio and all omap3 gpios
		 */
		if (trigger & IRQ_TYPE_EDGE_BOTH)
339 340 341 342
			bank->enabled_non_wakeup_gpios |= gpio_bit;
		else
			bank->enabled_non_wakeup_gpios &= ~gpio_bit;
	}
343

344
exit:
345 346 347
	bank->level_mask =
		__raw_readl(bank->base + bank->regs->leveldetect0) |
		__raw_readl(bank->base + bank->regs->leveldetect1);
348 349
}

350
#ifdef CONFIG_ARCH_OMAP1
351 352 353 354 355 356 357 358 359
/*
 * This only applies to chips that can't do both rising and falling edge
 * detection at once.  For all other chips, this function is a noop.
 */
static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio)
{
	void __iomem *reg = bank->base;
	u32 l = 0;

360
	if (!bank->regs->irqctrl)
361
		return;
362 363

	reg += bank->regs->irqctrl;
364 365 366 367 368 369 370 371 372

	l = __raw_readl(reg);
	if ((l >> gpio) & 1)
		l &= ~(1 << gpio);
	else
		l |= 1 << gpio;

	__raw_writel(l, reg);
}
373 374
#else
static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) {}
375
#endif
376

377 378
static int _set_gpio_triggering(struct gpio_bank *bank, int gpio,
							unsigned trigger)
379 380
{
	void __iomem *reg = bank->base;
381
	void __iomem *base = bank->base;
382
	u32 l = 0;
383

384 385 386 387 388
	if (bank->regs->leveldetect0 && bank->regs->wkup_en) {
		set_gpio_trigger(bank, gpio, trigger);
	} else if (bank->regs->irqctrl) {
		reg += bank->regs->irqctrl;

389
		l = __raw_readl(reg);
390
		if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH)
391
			bank->toggle_mask |= 1 << gpio;
392
		if (trigger & IRQ_TYPE_EDGE_RISING)
393
			l |= 1 << gpio;
394
		else if (trigger & IRQ_TYPE_EDGE_FALLING)
395
			l &= ~(1 << gpio);
396
		else
397 398 399 400
			return -EINVAL;

		__raw_writel(l, reg);
	} else if (bank->regs->edgectrl1) {
401
		if (gpio & 0x08)
402
			reg += bank->regs->edgectrl2;
403
		else
404 405
			reg += bank->regs->edgectrl1;

406 407 408
		gpio &= 0x07;
		l = __raw_readl(reg);
		l &= ~(3 << (gpio << 1));
409
		if (trigger & IRQ_TYPE_EDGE_RISING)
410
			l |= 2 << (gpio << 1);
411
		if (trigger & IRQ_TYPE_EDGE_FALLING)
412
			l |= 1 << (gpio << 1);
413 414 415

		/* Enable wake-up during idle for dynamic tick */
		_gpio_rmw(base, bank->regs->wkup_en, 1 << gpio, trigger);
416 417
		bank->context.wake_en =
			__raw_readl(bank->base + bank->regs->wkup_en);
418
		__raw_writel(l, reg);
419
	}
420
	return 0;
421 422
}

423
static int gpio_irq_type(struct irq_data *d, unsigned type)
424
{
425
	struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
426
	unsigned gpio = 0;
427
	int retval;
D
David Brownell 已提交
428
	unsigned long flags;
429

430 431
#ifdef CONFIG_ARCH_OMAP1
	if (d->irq > IH_MPUIO_BASE)
432
		gpio = OMAP_MPUIO(d->irq - IH_MPUIO_BASE);
433 434 435
#endif

	if (!gpio)
436
		gpio = irq_to_gpio(bank, d->hwirq);
437

438
	if (type & ~IRQ_TYPE_SENSE_MASK)
439
		return -EINVAL;
440

441 442
	if (!bank->regs->leveldetect0 &&
		(type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH)))
443 444
		return -EINVAL;

D
David Brownell 已提交
445
	spin_lock_irqsave(&bank->lock, flags);
446
	retval = _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), type);
D
David Brownell 已提交
447
	spin_unlock_irqrestore(&bank->lock, flags);
448 449

	if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
T
Thomas Gleixner 已提交
450
		__irq_set_handler_locked(d->irq, handle_level_irq);
451
	else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING))
T
Thomas Gleixner 已提交
452
		__irq_set_handler_locked(d->irq, handle_edge_irq);
453

454
	return retval;
455 456 457 458
}

static void _clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
{
459
	void __iomem *reg = bank->base;
460

461
	reg += bank->regs->irqstatus;
462
	__raw_writel(gpio_mask, reg);
463 464

	/* Workaround for clearing DSP GPIO interrupts to allow retention */
465 466
	if (bank->regs->irqstatus2) {
		reg = bank->base + bank->regs->irqstatus2;
467
		__raw_writel(gpio_mask, reg);
468
	}
469 470 471

	/* Flush posted write for the irq status to avoid spurious interrupts */
	__raw_readl(reg);
472 473 474 475
}

static inline void _clear_gpio_irqstatus(struct gpio_bank *bank, int gpio)
{
476
	_clear_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
477 478
}

479 480 481
static u32 _get_gpio_irqbank_mask(struct gpio_bank *bank)
{
	void __iomem *reg = bank->base;
482
	u32 l;
483
	u32 mask = (1 << bank->width) - 1;
484

485
	reg += bank->regs->irqenable;
486
	l = __raw_readl(reg);
487
	if (bank->regs->irqenable_inv)
488 489 490
		l = ~l;
	l &= mask;
	return l;
491 492
}

493
static void _enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
494
{
495
	void __iomem *reg = bank->base;
496 497
	u32 l;

498 499 500
	if (bank->regs->set_irqenable) {
		reg += bank->regs->set_irqenable;
		l = gpio_mask;
501
		bank->context.irqenable1 |= gpio_mask;
502 503
	} else {
		reg += bank->regs->irqenable;
504
		l = __raw_readl(reg);
505 506
		if (bank->regs->irqenable_inv)
			l &= ~gpio_mask;
507 508
		else
			l |= gpio_mask;
509
		bank->context.irqenable1 = l;
510 511 512 513 514 515 516 517 518 519 520 521
	}

	__raw_writel(l, reg);
}

static void _disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask)
{
	void __iomem *reg = bank->base;
	u32 l;

	if (bank->regs->clr_irqenable) {
		reg += bank->regs->clr_irqenable;
522
		l = gpio_mask;
523
		bank->context.irqenable1 &= ~gpio_mask;
524 525
	} else {
		reg += bank->regs->irqenable;
526
		l = __raw_readl(reg);
527
		if (bank->regs->irqenable_inv)
528
			l |= gpio_mask;
529
		else
530
			l &= ~gpio_mask;
531
		bank->context.irqenable1 = l;
532
	}
533

534 535 536 537 538
	__raw_writel(l, reg);
}

static inline void _set_gpio_irqenable(struct gpio_bank *bank, int gpio, int enable)
{
539 540 541 542
	if (enable)
		_enable_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
	else
		_disable_gpio_irqbank(bank, GPIO_BIT(bank, gpio));
543 544
}

545 546 547 548 549 550 551 552 553 554
/*
 * Note that ENAWAKEUP needs to be enabled in GPIO_SYSCONFIG register.
 * 1510 does not seem to have a wake-up register. If JTAG is connected
 * to the target, system will wake up always on GPIO events. While
 * system is running all registered GPIO interrupts need to have wake-up
 * enabled. When system is suspended, only selected GPIO interrupts need
 * to have wake-up enabled.
 */
static int _set_gpio_wakeup(struct gpio_bank *bank, int gpio, int enable)
{
555 556
	u32 gpio_bit = GPIO_BIT(bank, gpio);
	unsigned long flags;
D
David Brownell 已提交
557

558
	if (bank->non_wakeup_gpios & gpio_bit) {
559
		dev_err(bank->dev,
560
			"Unable to modify wakeup on non-wakeup GPIO%d\n", gpio);
561 562
		return -EINVAL;
	}
563 564 565

	spin_lock_irqsave(&bank->lock, flags);
	if (enable)
566
		bank->context.wake_en |= gpio_bit;
567
	else
568
		bank->context.wake_en &= ~gpio_bit;
569

570
	__raw_writel(bank->context.wake_en, bank->base + bank->regs->wkup_en);
571 572 573
	spin_unlock_irqrestore(&bank->lock, flags);

	return 0;
574 575
}

576 577
static void _reset_gpio(struct gpio_bank *bank, int gpio)
{
578
	_set_gpio_direction(bank, GPIO_INDEX(bank, gpio), 1);
579 580
	_set_gpio_irqenable(bank, gpio, 0);
	_clear_gpio_irqstatus(bank, gpio);
581
	_set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE);
582
	_clear_gpio_debounce(bank, gpio);
583 584
}

585
/* Use disable_irq_wake() and enable_irq_wake() functions from drivers */
586
static int gpio_wake_enable(struct irq_data *d, unsigned int enable)
587
{
588
	struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
589
	unsigned int gpio = irq_to_gpio(bank, d->hwirq);
590

591
	return _set_gpio_wakeup(bank, gpio, enable);
592 593
}

594
static int omap_gpio_request(struct gpio_chip *chip, unsigned offset)
595
{
596
	struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
D
David Brownell 已提交
597
	unsigned long flags;
D
David Brownell 已提交
598

599 600 601 602 603 604
	/*
	 * If this is the first gpio_request for the bank,
	 * enable the bank module.
	 */
	if (!bank->mod_usage)
		pm_runtime_get_sync(bank->dev);
605

606
	spin_lock_irqsave(&bank->lock, flags);
607 608 609
	/* Set trigger to none. You need to enable the desired trigger with
	 * request_irq() or set_irq_type().
	 */
610
	_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE);
611

612 613
	if (bank->regs->pinctrl) {
		void __iomem *reg = bank->base + bank->regs->pinctrl;
614

615
		/* Claim the pin for MPU */
616
		__raw_writel(__raw_readl(reg) | (1 << offset), reg);
617
	}
618

619 620 621 622 623 624 625 626
	if (bank->regs->ctrl && !bank->mod_usage) {
		void __iomem *reg = bank->base + bank->regs->ctrl;
		u32 ctrl;

		ctrl = __raw_readl(reg);
		/* Module is enabled, clocks are not gated */
		ctrl &= ~GPIO_MOD_CTRL_BIT;
		__raw_writel(ctrl, reg);
627
		bank->context.ctrl = ctrl;
C
Charulatha V 已提交
628
	}
629 630 631

	bank->mod_usage |= 1 << offset;

D
David Brownell 已提交
632
	spin_unlock_irqrestore(&bank->lock, flags);
633 634 635 636

	return 0;
}

637
static void omap_gpio_free(struct gpio_chip *chip, unsigned offset)
638
{
639
	struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
640
	void __iomem *base = bank->base;
D
David Brownell 已提交
641
	unsigned long flags;
642

D
David Brownell 已提交
643
	spin_lock_irqsave(&bank->lock, flags);
644

645
	if (bank->regs->wkup_en) {
646
		/* Disable wake-up during idle for dynamic tick */
647
		_gpio_rmw(base, bank->regs->wkup_en, 1 << offset, 0);
648 649 650
		bank->context.wake_en =
			__raw_readl(bank->base + bank->regs->wkup_en);
	}
651

652 653 654 655 656 657 658 659 660 661
	bank->mod_usage &= ~(1 << offset);

	if (bank->regs->ctrl && !bank->mod_usage) {
		void __iomem *reg = bank->base + bank->regs->ctrl;
		u32 ctrl;

		ctrl = __raw_readl(reg);
		/* Module is disabled, clocks are gated */
		ctrl |= GPIO_MOD_CTRL_BIT;
		__raw_writel(ctrl, reg);
662
		bank->context.ctrl = ctrl;
C
Charulatha V 已提交
663
	}
664

665
	_reset_gpio(bank, bank->chip.base + offset);
D
David Brownell 已提交
666
	spin_unlock_irqrestore(&bank->lock, flags);
667 668 669 670 671 672 673

	/*
	 * If this is the last gpio to be freed in the bank,
	 * disable the bank module.
	 */
	if (!bank->mod_usage)
		pm_runtime_put(bank->dev);
674 675 676 677 678 679 680 681 682 683 684
}

/*
 * We need to unmask the GPIO bank interrupt as soon as possible to
 * avoid missing GPIO interrupts for other lines in the bank.
 * Then we need to mask-read-clear-unmask the triggered GPIO lines
 * in the bank to avoid missing nested interrupts for a GPIO line.
 * If we wait to unmask individual GPIO lines in the bank after the
 * line's interrupt handler has been run, we may miss some nested
 * interrupts.
 */
685
static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
686
{
687
	void __iomem *isr_reg = NULL;
688
	u32 isr;
689
	unsigned int i;
690
	struct gpio_bank *bank;
691
	int unmasked = 0;
692
	struct irq_chip *chip = irq_desc_get_chip(desc);
693

694
	chained_irq_enter(chip, desc);
695

T
Thomas Gleixner 已提交
696
	bank = irq_get_handler_data(irq);
697
	isr_reg = bank->base + bank->regs->irqstatus;
698
	pm_runtime_get_sync(bank->dev);
699 700 701 702

	if (WARN_ON(!isr_reg))
		goto exit;

703
	while(1) {
704
		u32 isr_saved, level_mask = 0;
705
		u32 enabled;
706

707 708
		enabled = _get_gpio_irqbank_mask(bank);
		isr_saved = isr = __raw_readl(isr_reg) & enabled;
709

710
		if (bank->level_mask)
711
			level_mask = bank->level_mask & enabled;
712 713 714 715

		/* clear edge sensitive interrupts before handler(s) are
		called so that we don't miss any interrupt occurred while
		executing them */
716
		_disable_gpio_irqbank(bank, isr_saved & ~level_mask);
717
		_clear_gpio_irqbank(bank, isr_saved & ~level_mask);
718
		_enable_gpio_irqbank(bank, isr_saved & ~level_mask);
719 720 721

		/* if there is only edge sensitive GPIO pin interrupts
		configured, we could unmask GPIO bank interrupt immediately */
722 723
		if (!level_mask && !unmasked) {
			unmasked = 1;
724
			chained_irq_exit(chip, desc);
725
		}
726 727 728 729

		if (!isr)
			break;

730
		for (i = 0; isr != 0; isr >>= 1, i++) {
731 732
			if (!(isr & 1))
				continue;
733

734 735 736 737 738 739 740
			/*
			 * Some chips can't respond to both rising and falling
			 * at the same time.  If this irq was requested with
			 * both flags, we need to flip the ICR data for the IRQ
			 * to respond to the IRQ for the opposite direction.
			 * This will be indicated in the bank toggle_mask.
			 */
741 742
			if (bank->toggle_mask & (1 << i))
				_toggle_gpio_edge_triggering(bank, i);
743

744
			generic_handle_irq(irq_find_mapping(bank->domain, i));
745
		}
746
	}
747 748 749 750
	/* if bank has any level sensitive GPIO pin interrupt
	configured, we must unmask the bank interrupt only after
	handler(s) are executed in order to avoid spurious bank
	interrupt */
751
exit:
752
	if (!unmasked)
753
		chained_irq_exit(chip, desc);
754
	pm_runtime_put(bank->dev);
755 756
}

757
static void gpio_irq_shutdown(struct irq_data *d)
758
{
759
	struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
760
	unsigned int gpio = irq_to_gpio(bank, d->hwirq);
761
	unsigned long flags;
762

763
	spin_lock_irqsave(&bank->lock, flags);
764
	_reset_gpio(bank, gpio);
765
	spin_unlock_irqrestore(&bank->lock, flags);
766 767
}

768
static void gpio_ack_irq(struct irq_data *d)
769
{
770
	struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
771
	unsigned int gpio = irq_to_gpio(bank, d->hwirq);
772 773 774 775

	_clear_gpio_irqstatus(bank, gpio);
}

776
static void gpio_mask_irq(struct irq_data *d)
777
{
778
	struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
779
	unsigned int gpio = irq_to_gpio(bank, d->hwirq);
780
	unsigned long flags;
781

782
	spin_lock_irqsave(&bank->lock, flags);
783
	_set_gpio_irqenable(bank, gpio, 0);
784
	_set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE);
785
	spin_unlock_irqrestore(&bank->lock, flags);
786 787
}

788
static void gpio_unmask_irq(struct irq_data *d)
789
{
790
	struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
791
	unsigned int gpio = irq_to_gpio(bank, d->hwirq);
792
	unsigned int irq_mask = GPIO_BIT(bank, gpio);
793
	u32 trigger = irqd_get_trigger_type(d);
794
	unsigned long flags;
795

796
	spin_lock_irqsave(&bank->lock, flags);
797
	if (trigger)
798
		_set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), trigger);
799 800 801 802 803 804 805

	/* For level-triggered GPIOs, the clearing must be done after
	 * the HW source is cleared, thus after the handler has run */
	if (bank->level_mask & irq_mask) {
		_set_gpio_irqenable(bank, gpio, 0);
		_clear_gpio_irqstatus(bank, gpio);
	}
806

K
Kevin Hilman 已提交
807
	_set_gpio_irqenable(bank, gpio, 1);
808
	spin_unlock_irqrestore(&bank->lock, flags);
809 810
}

811 812
static struct irq_chip gpio_irq_chip = {
	.name		= "GPIO",
813 814 815 816 817 818
	.irq_shutdown	= gpio_irq_shutdown,
	.irq_ack	= gpio_ack_irq,
	.irq_mask	= gpio_mask_irq,
	.irq_unmask	= gpio_unmask_irq,
	.irq_set_type	= gpio_irq_type,
	.irq_set_wake	= gpio_wake_enable,
819 820 821 822
};

/*---------------------------------------------------------------------*/

823
static int omap_mpuio_suspend_noirq(struct device *dev)
D
David Brownell 已提交
824
{
825
	struct platform_device *pdev = to_platform_device(dev);
D
David Brownell 已提交
826
	struct gpio_bank	*bank = platform_get_drvdata(pdev);
827 828
	void __iomem		*mask_reg = bank->base +
					OMAP_MPUIO_GPIO_MASKIT / bank->stride;
D
David Brownell 已提交
829
	unsigned long		flags;
D
David Brownell 已提交
830

D
David Brownell 已提交
831
	spin_lock_irqsave(&bank->lock, flags);
832
	__raw_writel(0xffff & ~bank->context.wake_en, mask_reg);
D
David Brownell 已提交
833
	spin_unlock_irqrestore(&bank->lock, flags);
D
David Brownell 已提交
834 835 836 837

	return 0;
}

838
static int omap_mpuio_resume_noirq(struct device *dev)
D
David Brownell 已提交
839
{
840
	struct platform_device *pdev = to_platform_device(dev);
D
David Brownell 已提交
841
	struct gpio_bank	*bank = platform_get_drvdata(pdev);
842 843
	void __iomem		*mask_reg = bank->base +
					OMAP_MPUIO_GPIO_MASKIT / bank->stride;
D
David Brownell 已提交
844
	unsigned long		flags;
D
David Brownell 已提交
845

D
David Brownell 已提交
846
	spin_lock_irqsave(&bank->lock, flags);
847
	__raw_writel(bank->context.wake_en, mask_reg);
D
David Brownell 已提交
848
	spin_unlock_irqrestore(&bank->lock, flags);
D
David Brownell 已提交
849 850 851 852

	return 0;
}

853
static const struct dev_pm_ops omap_mpuio_dev_pm_ops = {
854 855 856 857
	.suspend_noirq = omap_mpuio_suspend_noirq,
	.resume_noirq = omap_mpuio_resume_noirq,
};

858
/* use platform_driver for this. */
D
David Brownell 已提交
859 860 861
static struct platform_driver omap_mpuio_driver = {
	.driver		= {
		.name	= "mpuio",
862
		.pm	= &omap_mpuio_dev_pm_ops,
D
David Brownell 已提交
863 864 865 866 867 868 869 870 871 872 873 874
	},
};

static struct platform_device omap_mpuio_device = {
	.name		= "mpuio",
	.id		= -1,
	.dev = {
		.driver = &omap_mpuio_driver.driver,
	}
	/* could list the /proc/iomem resources */
};

875
static inline void mpuio_init(struct gpio_bank *bank)
D
David Brownell 已提交
876
{
877
	platform_set_drvdata(&omap_mpuio_device, bank);
878

D
David Brownell 已提交
879 880 881 882
	if (platform_driver_register(&omap_mpuio_driver) == 0)
		(void) platform_device_register(&omap_mpuio_device);
}

883
/*---------------------------------------------------------------------*/
884

D
David Brownell 已提交
885 886 887 888 889 890 891 892 893 894 895 896
static int gpio_input(struct gpio_chip *chip, unsigned offset)
{
	struct gpio_bank *bank;
	unsigned long flags;

	bank = container_of(chip, struct gpio_bank, chip);
	spin_lock_irqsave(&bank->lock, flags);
	_set_gpio_direction(bank, offset, 1);
	spin_unlock_irqrestore(&bank->lock, flags);
	return 0;
}

897 898
static int gpio_is_input(struct gpio_bank *bank, int mask)
{
899
	void __iomem *reg = bank->base + bank->regs->direction;
900 901 902 903

	return __raw_readl(reg) & mask;
}

D
David Brownell 已提交
904 905
static int gpio_get(struct gpio_chip *chip, unsigned offset)
{
906 907 908
	struct gpio_bank *bank;
	u32 mask;

C
Charulatha V 已提交
909
	bank = container_of(chip, struct gpio_bank, chip);
910
	mask = (1 << offset);
911 912

	if (gpio_is_input(bank, mask))
913
		return _get_gpio_datain(bank, offset);
914
	else
915
		return _get_gpio_dataout(bank, offset);
D
David Brownell 已提交
916 917 918 919 920 921 922 923 924
}

static int gpio_output(struct gpio_chip *chip, unsigned offset, int value)
{
	struct gpio_bank *bank;
	unsigned long flags;

	bank = container_of(chip, struct gpio_bank, chip);
	spin_lock_irqsave(&bank->lock, flags);
925
	bank->set_dataout(bank, offset, value);
D
David Brownell 已提交
926 927 928 929 930
	_set_gpio_direction(bank, offset, 0);
	spin_unlock_irqrestore(&bank->lock, flags);
	return 0;
}

931 932 933 934 935 936 937
static int gpio_debounce(struct gpio_chip *chip, unsigned offset,
		unsigned debounce)
{
	struct gpio_bank *bank;
	unsigned long flags;

	bank = container_of(chip, struct gpio_bank, chip);
938

939 940 941 942 943 944 945
	spin_lock_irqsave(&bank->lock, flags);
	_set_gpio_debounce(bank, offset, debounce);
	spin_unlock_irqrestore(&bank->lock, flags);

	return 0;
}

D
David Brownell 已提交
946 947 948 949 950 951 952
static void gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
	struct gpio_bank *bank;
	unsigned long flags;

	bank = container_of(chip, struct gpio_bank, chip);
	spin_lock_irqsave(&bank->lock, flags);
953
	bank->set_dataout(bank, offset, value);
D
David Brownell 已提交
954 955 956 957 958
	spin_unlock_irqrestore(&bank->lock, flags);
}

/*---------------------------------------------------------------------*/

959
static void __init omap_gpio_show_rev(struct gpio_bank *bank)
T
Tony Lindgren 已提交
960
{
961
	static bool called;
T
Tony Lindgren 已提交
962 963
	u32 rev;

964
	if (called || bank->regs->revision == USHRT_MAX)
T
Tony Lindgren 已提交
965 966
		return;

967 968
	rev = __raw_readw(bank->base + bank->regs->revision);
	pr_info("OMAP GPIO hardware version %d.%d\n",
T
Tony Lindgren 已提交
969
		(rev >> 4) & 0x0f, rev & 0x0f);
970 971

	called = true;
T
Tony Lindgren 已提交
972 973
}

974 975 976 977 978
/* This lock class tells lockdep that GPIO irqs are in a different
 * category than their parents, so it won't report false recursion.
 */
static struct lock_class_key gpio_lock_class;

979
static void omap_gpio_mod_init(struct gpio_bank *bank)
980
{
981 982
	void __iomem *base = bank->base;
	u32 l = 0xffffffff;
983

984 985 986
	if (bank->width == 16)
		l = 0xffff;

987
	if (bank->is_mpuio) {
988 989
		__raw_writel(l, bank->base + bank->regs->irqenable);
		return;
990
	}
991 992

	_gpio_rmw(base, bank->regs->irqenable, l, bank->regs->irqenable_inv);
993
	_gpio_rmw(base, bank->regs->irqstatus, l, !bank->regs->irqenable_inv);
994
	if (bank->regs->debounce_en)
995
		__raw_writel(0, base + bank->regs->debounce_en);
996

997 998
	/* Save OE default value (0xffffffff) in the context */
	bank->context.oe = __raw_readl(bank->base + bank->regs->direction);
999 1000
	 /* Initialize interface clk ungated, module enabled */
	if (bank->regs->ctrl)
1001
		__raw_writel(0, base + bank->regs->ctrl);
1002 1003 1004 1005

	bank->dbck = clk_get(bank->dev, "dbclk");
	if (IS_ERR(bank->dbck))
		dev_err(bank->dev, "Could not get gpio dbck\n");
1006 1007
}

B
Bill Pemberton 已提交
1008
static void
1009 1010 1011 1012 1013 1014 1015 1016
omap_mpuio_alloc_gc(struct gpio_bank *bank, unsigned int irq_start,
		    unsigned int num)
{
	struct irq_chip_generic *gc;
	struct irq_chip_type *ct;

	gc = irq_alloc_generic_chip("MPUIO", 1, irq_start, bank->base,
				    handle_simple_irq);
1017 1018 1019 1020 1021
	if (!gc) {
		dev_err(bank->dev, "Memory alloc failed for gc\n");
		return;
	}

1022 1023 1024 1025 1026 1027
	ct = gc->chip_types;

	/* NOTE: No ack required, reading IRQ status clears it. */
	ct->chip.irq_mask = irq_gc_mask_set_bit;
	ct->chip.irq_unmask = irq_gc_mask_clr_bit;
	ct->chip.irq_set_type = gpio_irq_type;
1028 1029

	if (bank->regs->wkup_en)
1030 1031 1032 1033 1034 1035 1036
		ct->chip.irq_set_wake = gpio_wake_enable,

	ct->regs.mask = OMAP_MPUIO_GPIO_INT / bank->stride;
	irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
			       IRQ_NOREQUEST | IRQ_NOPROBE, 0);
}

B
Bill Pemberton 已提交
1037
static void omap_gpio_chip_init(struct gpio_bank *bank)
1038
{
1039
	int j;
1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052
	static int gpio;

	/*
	 * REVISIT eventually switch from OMAP-specific gpio structs
	 * over to the generic ones
	 */
	bank->chip.request = omap_gpio_request;
	bank->chip.free = omap_gpio_free;
	bank->chip.direction_input = gpio_input;
	bank->chip.get = gpio_get;
	bank->chip.direction_output = gpio_output;
	bank->chip.set_debounce = gpio_debounce;
	bank->chip.set = gpio_set;
1053
	bank->chip.to_irq = omap_gpio_to_irq;
1054
	if (bank->is_mpuio) {
1055
		bank->chip.label = "mpuio";
1056 1057
		if (bank->regs->wkup_en)
			bank->chip.dev = &omap_mpuio_device.dev;
1058 1059 1060 1061
		bank->chip.base = OMAP_MPUIO(0);
	} else {
		bank->chip.label = "gpio";
		bank->chip.base = gpio;
1062
		gpio += bank->width;
1063
	}
1064
	bank->chip.ngpio = bank->width;
1065 1066 1067

	gpiochip_add(&bank->chip);

1068 1069 1070 1071
	for (j = 0; j < bank->width; j++) {
		int irq = irq_create_mapping(bank->domain, j);
		irq_set_lockdep_class(irq, &gpio_lock_class);
		irq_set_chip_data(irq, bank);
1072
		if (bank->is_mpuio) {
1073
			omap_mpuio_alloc_gc(bank, irq, bank->width);
1074
		} else {
1075 1076 1077
			irq_set_chip_and_handler(irq, &gpio_irq_chip,
						 handle_simple_irq);
			set_irq_flags(irq, IRQF_VALID);
1078
		}
1079
	}
T
Thomas Gleixner 已提交
1080 1081
	irq_set_chained_handler(bank->irq, gpio_irq_handler);
	irq_set_handler_data(bank->irq, bank);
1082 1083
}

1084 1085
static const struct of_device_id omap_gpio_match[];

B
Bill Pemberton 已提交
1086
static int omap_gpio_probe(struct platform_device *pdev)
1087
{
1088
	struct device *dev = &pdev->dev;
1089 1090
	struct device_node *node = dev->of_node;
	const struct of_device_id *match;
1091
	const struct omap_gpio_platform_data *pdata;
1092
	struct resource *res;
1093
	struct gpio_bank *bank;
1094
	int ret = 0;
1095

1096 1097 1098 1099
	match = of_match_device(of_match_ptr(omap_gpio_match), dev);

	pdata = match ? match->data : dev->platform_data;
	if (!pdata)
1100
		return -EINVAL;
1101

1102
	bank = devm_kzalloc(dev, sizeof(struct gpio_bank), GFP_KERNEL);
1103
	if (!bank) {
1104
		dev_err(dev, "Memory alloc failed\n");
1105
		return -ENOMEM;
1106
	}
1107

1108 1109
	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	if (unlikely(!res)) {
1110
		dev_err(dev, "Invalid IRQ resource\n");
1111
		return -ENODEV;
1112
	}
1113

1114
	bank->irq = res->start;
1115
	bank->dev = dev;
1116
	bank->dbck_flag = pdata->dbck_flag;
1117
	bank->stride = pdata->bank_stride;
1118
	bank->width = pdata->bank_width;
1119
	bank->is_mpuio = pdata->is_mpuio;
1120
	bank->non_wakeup_gpios = pdata->non_wakeup_gpios;
1121
	bank->loses_context = pdata->loses_context;
1122
	bank->regs = pdata->regs;
1123 1124 1125 1126
#ifdef CONFIG_OF_GPIO
	bank->chip.of_node = of_node_get(node);
#endif

1127 1128 1129
	bank->domain = irq_domain_add_linear(node, bank->width,
					     &irq_domain_simple_ops, NULL);
	if (!bank->domain)
1130
		return -ENODEV;
1131 1132 1133 1134 1135

	if (bank->regs->set_dataout && bank->regs->clr_dataout)
		bank->set_dataout = _set_gpio_dataout_reg;
	else
		bank->set_dataout = _set_gpio_dataout_mask;
T
Tony Lindgren 已提交
1136

1137
	spin_lock_init(&bank->lock);
T
Tony Lindgren 已提交
1138

1139 1140 1141
	/* Static mapping, never released */
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (unlikely(!res)) {
1142
		dev_err(dev, "Invalid mem resource\n");
1143 1144 1145 1146 1147 1148 1149
		return -ENODEV;
	}

	if (!devm_request_mem_region(dev, res->start, resource_size(res),
				     pdev->name)) {
		dev_err(dev, "Region already claimed\n");
		return -EBUSY;
1150
	}
1151

1152
	bank->base = devm_ioremap(dev, res->start, resource_size(res));
1153
	if (!bank->base) {
1154
		dev_err(dev, "Could not ioremap\n");
1155
		return -ENOMEM;
1156 1157
	}

1158 1159
	platform_set_drvdata(pdev, bank);

1160
	pm_runtime_enable(bank->dev);
1161
	pm_runtime_irq_safe(bank->dev);
1162 1163
	pm_runtime_get_sync(bank->dev);

1164
	if (bank->is_mpuio)
1165 1166
		mpuio_init(bank);

1167
	omap_gpio_mod_init(bank);
1168
	omap_gpio_chip_init(bank);
1169
	omap_gpio_show_rev(bank);
T
Tony Lindgren 已提交
1170

1171 1172 1173
	if (bank->loses_context)
		bank->get_context_loss_count = pdata->get_context_loss_count;

1174 1175
	pm_runtime_put(bank->dev);

1176
	list_add_tail(&bank->node, &omap_gpio_list);
1177

1178
	return ret;
1179 1180
}

1181 1182
#ifdef CONFIG_ARCH_OMAP2PLUS

1183
#if defined(CONFIG_PM_RUNTIME)
1184
static void omap_gpio_restore_context(struct gpio_bank *bank);
1185

1186
static int omap_gpio_runtime_suspend(struct device *dev)
1187
{
1188 1189 1190 1191
	struct platform_device *pdev = to_platform_device(dev);
	struct gpio_bank *bank = platform_get_drvdata(pdev);
	u32 l1 = 0, l2 = 0;
	unsigned long flags;
1192
	u32 wake_low, wake_hi;
1193

1194
	spin_lock_irqsave(&bank->lock, flags);
1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215

	/*
	 * Only edges can generate a wakeup event to the PRCM.
	 *
	 * Therefore, ensure any wake-up capable GPIOs have
	 * edge-detection enabled before going idle to ensure a wakeup
	 * to the PRCM is generated on a GPIO transition. (c.f. 34xx
	 * NDA TRM 25.5.3.1)
	 *
	 * The normal values will be restored upon ->runtime_resume()
	 * by writing back the values saved in bank->context.
	 */
	wake_low = bank->context.leveldetect0 & bank->context.wake_en;
	if (wake_low)
		__raw_writel(wake_low | bank->context.fallingdetect,
			     bank->base + bank->regs->fallingdetect);
	wake_hi = bank->context.leveldetect1 & bank->context.wake_en;
	if (wake_hi)
		__raw_writel(wake_hi | bank->context.risingdetect,
			     bank->base + bank->regs->risingdetect);

1216 1217 1218
	if (!bank->enabled_non_wakeup_gpios)
		goto update_gpio_context_count;

1219 1220
	if (bank->power_mode != OFF_MODE) {
		bank->power_mode = 0;
1221
		goto update_gpio_context_count;
1222 1223 1224 1225 1226 1227 1228 1229
	}
	/*
	 * If going to OFF, remove triggering for all
	 * non-wakeup GPIOs.  Otherwise spurious IRQs will be
	 * generated.  See OMAP2420 Errata item 1.101.
	 */
	bank->saved_datain = __raw_readl(bank->base +
						bank->regs->datain);
1230 1231
	l1 = bank->context.fallingdetect;
	l2 = bank->context.risingdetect;
1232

1233 1234
	l1 &= ~bank->enabled_non_wakeup_gpios;
	l2 &= ~bank->enabled_non_wakeup_gpios;
1235

1236 1237
	__raw_writel(l1, bank->base + bank->regs->fallingdetect);
	__raw_writel(l2, bank->base + bank->regs->risingdetect);
1238

1239
	bank->workaround_enabled = true;
1240

1241
update_gpio_context_count:
1242 1243
	if (bank->get_context_loss_count)
		bank->context_loss_count =
1244 1245
				bank->get_context_loss_count(bank->dev);

1246
	_gpio_dbck_disable(bank);
1247
	spin_unlock_irqrestore(&bank->lock, flags);
1248

1249
	return 0;
1250 1251
}

1252
static int omap_gpio_runtime_resume(struct device *dev)
1253
{
1254 1255 1256 1257 1258
	struct platform_device *pdev = to_platform_device(dev);
	struct gpio_bank *bank = platform_get_drvdata(pdev);
	int context_lost_cnt_after;
	u32 l = 0, gen, gen0, gen1;
	unsigned long flags;
1259

1260
	spin_lock_irqsave(&bank->lock, flags);
1261
	_gpio_dbck_enable(bank);
1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273

	/*
	 * In ->runtime_suspend(), level-triggered, wakeup-enabled
	 * GPIOs were set to edge trigger also in order to be able to
	 * generate a PRCM wakeup.  Here we restore the
	 * pre-runtime_suspend() values for edge triggering.
	 */
	__raw_writel(bank->context.fallingdetect,
		     bank->base + bank->regs->fallingdetect);
	__raw_writel(bank->context.risingdetect,
		     bank->base + bank->regs->risingdetect);

1274 1275 1276
	if (bank->get_context_loss_count) {
		context_lost_cnt_after =
			bank->get_context_loss_count(bank->dev);
1277
		if (context_lost_cnt_after != bank->context_loss_count) {
1278 1279 1280 1281
			omap_gpio_restore_context(bank);
		} else {
			spin_unlock_irqrestore(&bank->lock, flags);
			return 0;
1282
		}
1283
	}
1284

1285 1286 1287 1288 1289
	if (!bank->workaround_enabled) {
		spin_unlock_irqrestore(&bank->lock, flags);
		return 0;
	}

1290
	__raw_writel(bank->context.fallingdetect,
1291
			bank->base + bank->regs->fallingdetect);
1292
	__raw_writel(bank->context.risingdetect,
1293 1294
			bank->base + bank->regs->risingdetect);
	l = __raw_readl(bank->base + bank->regs->datain);
1295

1296 1297 1298 1299 1300 1301 1302 1303
	/*
	 * Check if any of the non-wakeup interrupt GPIOs have changed
	 * state.  If so, generate an IRQ by software.  This is
	 * horribly racy, but it's the best we can do to work around
	 * this silicon bug.
	 */
	l ^= bank->saved_datain;
	l &= bank->enabled_non_wakeup_gpios;
1304

1305 1306 1307 1308
	/*
	 * No need to generate IRQs for the rising edge for gpio IRQs
	 * configured with falling edge only; and vice versa.
	 */
1309
	gen0 = l & bank->context.fallingdetect;
1310
	gen0 &= bank->saved_datain;
1311

1312
	gen1 = l & bank->context.risingdetect;
1313
	gen1 &= ~(bank->saved_datain);
1314

1315
	/* FIXME: Consider GPIO IRQs with level detections properly! */
1316 1317
	gen = l & (~(bank->context.fallingdetect) &
					 ~(bank->context.risingdetect));
1318 1319
	/* Consider all GPIO IRQs needed to be updated */
	gen |= gen0 | gen1;
1320

1321 1322
	if (gen) {
		u32 old0, old1;
1323

1324 1325
		old0 = __raw_readl(bank->base + bank->regs->leveldetect0);
		old1 = __raw_readl(bank->base + bank->regs->leveldetect1);
1326

1327
		if (!bank->regs->irqstatus_raw0) {
1328
			__raw_writel(old0 | gen, bank->base +
1329
						bank->regs->leveldetect0);
1330
			__raw_writel(old1 | gen, bank->base +
1331
						bank->regs->leveldetect1);
1332
		}
1333

1334
		if (bank->regs->irqstatus_raw0) {
1335
			__raw_writel(old0 | l, bank->base +
1336
						bank->regs->leveldetect0);
1337
			__raw_writel(old1 | l, bank->base +
1338
						bank->regs->leveldetect1);
1339
		}
1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373
		__raw_writel(old0, bank->base + bank->regs->leveldetect0);
		__raw_writel(old1, bank->base + bank->regs->leveldetect1);
	}

	bank->workaround_enabled = false;
	spin_unlock_irqrestore(&bank->lock, flags);

	return 0;
}
#endif /* CONFIG_PM_RUNTIME */

void omap2_gpio_prepare_for_idle(int pwr_mode)
{
	struct gpio_bank *bank;

	list_for_each_entry(bank, &omap_gpio_list, node) {
		if (!bank->mod_usage || !bank->loses_context)
			continue;

		bank->power_mode = pwr_mode;

		pm_runtime_put_sync_suspend(bank->dev);
	}
}

void omap2_gpio_resume_after_idle(void)
{
	struct gpio_bank *bank;

	list_for_each_entry(bank, &omap_gpio_list, node) {
		if (!bank->mod_usage || !bank->loses_context)
			continue;

		pm_runtime_get_sync(bank->dev);
1374 1375 1376
	}
}

1377
#if defined(CONFIG_PM_RUNTIME)
1378
static void omap_gpio_restore_context(struct gpio_bank *bank)
1379
{
1380
	__raw_writel(bank->context.wake_en,
1381 1382
				bank->base + bank->regs->wkup_en);
	__raw_writel(bank->context.ctrl, bank->base + bank->regs->ctrl);
1383
	__raw_writel(bank->context.leveldetect0,
1384
				bank->base + bank->regs->leveldetect0);
1385
	__raw_writel(bank->context.leveldetect1,
1386
				bank->base + bank->regs->leveldetect1);
1387
	__raw_writel(bank->context.risingdetect,
1388
				bank->base + bank->regs->risingdetect);
1389
	__raw_writel(bank->context.fallingdetect,
1390
				bank->base + bank->regs->fallingdetect);
1391 1392 1393 1394 1395 1396
	if (bank->regs->set_dataout && bank->regs->clr_dataout)
		__raw_writel(bank->context.dataout,
				bank->base + bank->regs->set_dataout);
	else
		__raw_writel(bank->context.dataout,
				bank->base + bank->regs->dataout);
1397 1398
	__raw_writel(bank->context.oe, bank->base + bank->regs->direction);

1399 1400 1401 1402 1403 1404
	if (bank->dbck_enable_mask) {
		__raw_writel(bank->context.debounce, bank->base +
					bank->regs->debounce);
		__raw_writel(bank->context.debounce_en,
					bank->base + bank->regs->debounce_en);
	}
1405 1406 1407 1408 1409

	__raw_writel(bank->context.irqenable1,
				bank->base + bank->regs->irqenable);
	__raw_writel(bank->context.irqenable2,
				bank->base + bank->regs->irqenable2);
1410
}
1411
#endif /* CONFIG_PM_RUNTIME */
1412
#else
1413 1414
#define omap_gpio_runtime_suspend NULL
#define omap_gpio_runtime_resume NULL
1415 1416
#endif

1417
static const struct dev_pm_ops gpio_pm_ops = {
1418 1419
	SET_RUNTIME_PM_OPS(omap_gpio_runtime_suspend, omap_gpio_runtime_resume,
									NULL)
1420 1421
};

1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468
#if defined(CONFIG_OF)
static struct omap_gpio_reg_offs omap2_gpio_regs = {
	.revision =		OMAP24XX_GPIO_REVISION,
	.direction =		OMAP24XX_GPIO_OE,
	.datain =		OMAP24XX_GPIO_DATAIN,
	.dataout =		OMAP24XX_GPIO_DATAOUT,
	.set_dataout =		OMAP24XX_GPIO_SETDATAOUT,
	.clr_dataout =		OMAP24XX_GPIO_CLEARDATAOUT,
	.irqstatus =		OMAP24XX_GPIO_IRQSTATUS1,
	.irqstatus2 =		OMAP24XX_GPIO_IRQSTATUS2,
	.irqenable =		OMAP24XX_GPIO_IRQENABLE1,
	.irqenable2 =		OMAP24XX_GPIO_IRQENABLE2,
	.set_irqenable =	OMAP24XX_GPIO_SETIRQENABLE1,
	.clr_irqenable =	OMAP24XX_GPIO_CLEARIRQENABLE1,
	.debounce =		OMAP24XX_GPIO_DEBOUNCE_VAL,
	.debounce_en =		OMAP24XX_GPIO_DEBOUNCE_EN,
	.ctrl =			OMAP24XX_GPIO_CTRL,
	.wkup_en =		OMAP24XX_GPIO_WAKE_EN,
	.leveldetect0 =		OMAP24XX_GPIO_LEVELDETECT0,
	.leveldetect1 =		OMAP24XX_GPIO_LEVELDETECT1,
	.risingdetect =		OMAP24XX_GPIO_RISINGDETECT,
	.fallingdetect =	OMAP24XX_GPIO_FALLINGDETECT,
};

static struct omap_gpio_reg_offs omap4_gpio_regs = {
	.revision =		OMAP4_GPIO_REVISION,
	.direction =		OMAP4_GPIO_OE,
	.datain =		OMAP4_GPIO_DATAIN,
	.dataout =		OMAP4_GPIO_DATAOUT,
	.set_dataout =		OMAP4_GPIO_SETDATAOUT,
	.clr_dataout =		OMAP4_GPIO_CLEARDATAOUT,
	.irqstatus =		OMAP4_GPIO_IRQSTATUS0,
	.irqstatus2 =		OMAP4_GPIO_IRQSTATUS1,
	.irqenable =		OMAP4_GPIO_IRQSTATUSSET0,
	.irqenable2 =		OMAP4_GPIO_IRQSTATUSSET1,
	.set_irqenable =	OMAP4_GPIO_IRQSTATUSSET0,
	.clr_irqenable =	OMAP4_GPIO_IRQSTATUSCLR0,
	.debounce =		OMAP4_GPIO_DEBOUNCINGTIME,
	.debounce_en =		OMAP4_GPIO_DEBOUNCENABLE,
	.ctrl =			OMAP4_GPIO_CTRL,
	.wkup_en =		OMAP4_GPIO_IRQWAKEN0,
	.leveldetect0 =		OMAP4_GPIO_LEVELDETECT0,
	.leveldetect1 =		OMAP4_GPIO_LEVELDETECT1,
	.risingdetect =		OMAP4_GPIO_RISINGDETECT,
	.fallingdetect =	OMAP4_GPIO_FALLINGDETECT,
};

1469
static const struct omap_gpio_platform_data omap2_pdata = {
1470 1471 1472 1473 1474
	.regs = &omap2_gpio_regs,
	.bank_width = 32,
	.dbck_flag = false,
};

1475
static const struct omap_gpio_platform_data omap3_pdata = {
1476 1477 1478 1479 1480
	.regs = &omap2_gpio_regs,
	.bank_width = 32,
	.dbck_flag = true,
};

1481
static const struct omap_gpio_platform_data omap4_pdata = {
1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504
	.regs = &omap4_gpio_regs,
	.bank_width = 32,
	.dbck_flag = true,
};

static const struct of_device_id omap_gpio_match[] = {
	{
		.compatible = "ti,omap4-gpio",
		.data = &omap4_pdata,
	},
	{
		.compatible = "ti,omap3-gpio",
		.data = &omap3_pdata,
	},
	{
		.compatible = "ti,omap2-gpio",
		.data = &omap2_pdata,
	},
	{ },
};
MODULE_DEVICE_TABLE(of, omap_gpio_match);
#endif

1505 1506 1507 1508
static struct platform_driver omap_gpio_driver = {
	.probe		= omap_gpio_probe,
	.driver		= {
		.name	= "omap_gpio",
1509
		.pm	= &gpio_pm_ops,
1510
		.of_match_table = of_match_ptr(omap_gpio_match),
1511 1512 1513
	},
};

1514
/*
1515 1516 1517
 * gpio driver register needs to be done before
 * machine_init functions access gpio APIs.
 * Hence omap_gpio_drv_reg() is a postcore_initcall.
1518
 */
1519
static int __init omap_gpio_drv_reg(void)
1520
{
1521
	return platform_driver_register(&omap_gpio_driver);
1522
}
1523
postcore_initcall(omap_gpio_drv_reg);