am35x.c 16.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
/*
 * Texas Instruments AM35x "glue layer"
 *
 * Copyright (c) 2010, by Texas Instruments
 *
 * Based on the DA8xx "glue layer" code.
 * Copyright (c) 2008-2009, MontaVista Software, Inc. <source@mvista.com>
 *
 * This file is part of the Inventra Controller Driver for Linux.
 *
 * The Inventra Controller Driver for Linux is free software; you
 * can redistribute it and/or modify it under the terms of the GNU
 * General Public License version 2 as published by the Free Software
 * Foundation.
 *
 * The Inventra Controller Driver for Linux is distributed in
 * the hope that it will be useful, but WITHOUT ANY WARRANTY;
 * without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
 * License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with The Inventra Controller Driver for Linux ; if not,
 * write to the Free Software Foundation, Inc., 59 Temple Place,
 * Suite 330, Boston, MA  02111-1307  USA
 *
 */

#include <linux/init.h>
30
#include <linux/module.h>
31
#include <linux/clk.h>
32
#include <linux/err.h>
33
#include <linux/io.h>
34 35
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85

#include <plat/usb.h>

#include "musb_core.h"

/*
 * AM35x specific definitions
 */
/* USB 2.0 OTG module registers */
#define USB_REVISION_REG	0x00
#define USB_CTRL_REG		0x04
#define USB_STAT_REG		0x08
#define USB_EMULATION_REG	0x0c
/* 0x10 Reserved */
#define USB_AUTOREQ_REG		0x14
#define USB_SRP_FIX_TIME_REG	0x18
#define USB_TEARDOWN_REG	0x1c
#define EP_INTR_SRC_REG		0x20
#define EP_INTR_SRC_SET_REG	0x24
#define EP_INTR_SRC_CLEAR_REG	0x28
#define EP_INTR_MASK_REG	0x2c
#define EP_INTR_MASK_SET_REG	0x30
#define EP_INTR_MASK_CLEAR_REG	0x34
#define EP_INTR_SRC_MASKED_REG	0x38
#define CORE_INTR_SRC_REG	0x40
#define CORE_INTR_SRC_SET_REG	0x44
#define CORE_INTR_SRC_CLEAR_REG	0x48
#define CORE_INTR_MASK_REG	0x4c
#define CORE_INTR_MASK_SET_REG	0x50
#define CORE_INTR_MASK_CLEAR_REG 0x54
#define CORE_INTR_SRC_MASKED_REG 0x58
/* 0x5c Reserved */
#define USB_END_OF_INTR_REG	0x60

/* Control register bits */
#define AM35X_SOFT_RESET_MASK	1

/* USB interrupt register bits */
#define AM35X_INTR_USB_SHIFT	16
#define AM35X_INTR_USB_MASK	(0x1ff << AM35X_INTR_USB_SHIFT)
#define AM35X_INTR_DRVVBUS	0x100
#define AM35X_INTR_RX_SHIFT	16
#define AM35X_INTR_TX_SHIFT	0
#define AM35X_TX_EP_MASK	0xffff		/* EP0 + 15 Tx EPs */
#define AM35X_RX_EP_MASK	0xfffe		/* 15 Rx EPs */
#define AM35X_TX_INTR_MASK	(AM35X_TX_EP_MASK << AM35X_INTR_TX_SHIFT)
#define AM35X_RX_INTR_MASK	(AM35X_RX_EP_MASK << AM35X_INTR_RX_SHIFT)

#define USB_MENTOR_CORE_OFFSET	0x400

86 87 88
struct am35x_glue {
	struct device		*dev;
	struct platform_device	*musb;
89 90
	struct clk		*phy_clk;
	struct clk		*clk;
91
};
92
#define glue_to_musb(g)		platform_get_drvdata(g->musb)
93

94
/*
95
 * am35x_musb_enable - enable interrupts
96
 */
97
static void am35x_musb_enable(struct musb *musb)
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115
{
	void __iomem *reg_base = musb->ctrl_base;
	u32 epmask;

	/* Workaround: setup IRQs through both register sets. */
	epmask = ((musb->epmask & AM35X_TX_EP_MASK) << AM35X_INTR_TX_SHIFT) |
	       ((musb->epmask & AM35X_RX_EP_MASK) << AM35X_INTR_RX_SHIFT);

	musb_writel(reg_base, EP_INTR_MASK_SET_REG, epmask);
	musb_writel(reg_base, CORE_INTR_MASK_SET_REG, AM35X_INTR_USB_MASK);

	/* Force the DRVVBUS IRQ so we can start polling for ID change. */
	if (is_otg_enabled(musb))
		musb_writel(reg_base, CORE_INTR_SRC_SET_REG,
			    AM35X_INTR_DRVVBUS << AM35X_INTR_USB_SHIFT);
}

/*
116
 * am35x_musb_disable - disable HDRC and flush interrupts
117
 */
118
static void am35x_musb_disable(struct musb *musb)
119 120 121 122 123 124 125 126 127 128 129 130
{
	void __iomem *reg_base = musb->ctrl_base;

	musb_writel(reg_base, CORE_INTR_MASK_CLEAR_REG, AM35X_INTR_USB_MASK);
	musb_writel(reg_base, EP_INTR_MASK_CLEAR_REG,
			 AM35X_TX_INTR_MASK | AM35X_RX_INTR_MASK);
	musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
	musb_writel(reg_base, USB_END_OF_INTR_REG, 0);
}

#define portstate(stmt)		stmt

131
static void am35x_musb_set_vbus(struct musb *musb, int is_on)
132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
{
	WARN_ON(is_on && is_peripheral_active(musb));
}

#define	POLL_SECONDS	2

static struct timer_list otg_workaround;

static void otg_timer(unsigned long _musb)
{
	struct musb		*musb = (void *)_musb;
	void __iomem		*mregs = musb->mregs;
	u8			devctl;
	unsigned long		flags;

	/*
	 * We poll because AM35x's won't expose several OTG-critical
	 * status change events (from the transceiver) otherwise.
	 */
	devctl = musb_readb(mregs, MUSB_DEVCTL);
152
	dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl,
153
		otg_state_string(musb->xceiv->state));
154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190

	spin_lock_irqsave(&musb->lock, flags);
	switch (musb->xceiv->state) {
	case OTG_STATE_A_WAIT_BCON:
		devctl &= ~MUSB_DEVCTL_SESSION;
		musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);

		devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
		if (devctl & MUSB_DEVCTL_BDEVICE) {
			musb->xceiv->state = OTG_STATE_B_IDLE;
			MUSB_DEV_MODE(musb);
		} else {
			musb->xceiv->state = OTG_STATE_A_IDLE;
			MUSB_HST_MODE(musb);
		}
		break;
	case OTG_STATE_A_WAIT_VFALL:
		musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
		musb_writel(musb->ctrl_base, CORE_INTR_SRC_SET_REG,
			    MUSB_INTR_VBUSERROR << AM35X_INTR_USB_SHIFT);
		break;
	case OTG_STATE_B_IDLE:
		if (!is_peripheral_enabled(musb))
			break;

		devctl = musb_readb(mregs, MUSB_DEVCTL);
		if (devctl & MUSB_DEVCTL_BDEVICE)
			mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
		else
			musb->xceiv->state = OTG_STATE_A_IDLE;
		break;
	default:
		break;
	}
	spin_unlock_irqrestore(&musb->lock, flags);
}

191
static void am35x_musb_try_idle(struct musb *musb, unsigned long timeout)
192 193 194 195 196 197 198 199 200 201 202 203
{
	static unsigned long last_timer;

	if (!is_otg_enabled(musb))
		return;

	if (timeout == 0)
		timeout = jiffies + msecs_to_jiffies(3);

	/* Never idle if active, or when VBUS timeout is not set as host */
	if (musb->is_active || (musb->a_wait_bcon == 0 &&
				musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) {
204
		dev_dbg(musb->controller, "%s active, deleting timer\n",
205
			otg_state_string(musb->xceiv->state));
206 207 208 209 210 211
		del_timer(&otg_workaround);
		last_timer = jiffies;
		return;
	}

	if (time_after(last_timer, timeout) && timer_pending(&otg_workaround)) {
212
		dev_dbg(musb->controller, "Longer idle timer already pending, ignoring...\n");
213 214 215 216
		return;
	}
	last_timer = timeout;

217
	dev_dbg(musb->controller, "%s inactive, starting idle timer for %u ms\n",
218 219
		otg_state_string(musb->xceiv->state),
		jiffies_to_msecs(timeout - jiffies));
220 221 222
	mod_timer(&otg_workaround, timeout);
}

223
static irqreturn_t am35x_musb_interrupt(int irq, void *hci)
224 225 226
{
	struct musb  *musb = hci;
	void __iomem *reg_base = musb->ctrl_base;
227 228 229
	struct device *dev = musb->controller;
	struct musb_hdrc_platform_data *plat = dev->platform_data;
	struct omap_musb_board_data *data = plat->board_data;
230
	struct usb_otg *otg = musb->xceiv->otg;
231 232
	unsigned long flags;
	irqreturn_t ret = IRQ_NONE;
233
	u32 epintr, usbintr;
234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293

	spin_lock_irqsave(&musb->lock, flags);

	/* Get endpoint interrupts */
	epintr = musb_readl(reg_base, EP_INTR_SRC_MASKED_REG);

	if (epintr) {
		musb_writel(reg_base, EP_INTR_SRC_CLEAR_REG, epintr);

		musb->int_rx =
			(epintr & AM35X_RX_INTR_MASK) >> AM35X_INTR_RX_SHIFT;
		musb->int_tx =
			(epintr & AM35X_TX_INTR_MASK) >> AM35X_INTR_TX_SHIFT;
	}

	/* Get usb core interrupts */
	usbintr = musb_readl(reg_base, CORE_INTR_SRC_MASKED_REG);
	if (!usbintr && !epintr)
		goto eoi;

	if (usbintr) {
		musb_writel(reg_base, CORE_INTR_SRC_CLEAR_REG, usbintr);

		musb->int_usb =
			(usbintr & AM35X_INTR_USB_MASK) >> AM35X_INTR_USB_SHIFT;
	}
	/*
	 * DRVVBUS IRQs are the only proxy we have (a very poor one!) for
	 * AM35x's missing ID change IRQ.  We need an ID change IRQ to
	 * switch appropriately between halves of the OTG state machine.
	 * Managing DEVCTL.SESSION per Mentor docs requires that we know its
	 * value but DEVCTL.BDEVICE is invalid without DEVCTL.SESSION set.
	 * Also, DRVVBUS pulses for SRP (but not at 5V) ...
	 */
	if (usbintr & (AM35X_INTR_DRVVBUS << AM35X_INTR_USB_SHIFT)) {
		int drvvbus = musb_readl(reg_base, USB_STAT_REG);
		void __iomem *mregs = musb->mregs;
		u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
		int err;

		err = is_host_enabled(musb) && (musb->int_usb &
						MUSB_INTR_VBUSERROR);
		if (err) {
			/*
			 * The Mentor core doesn't debounce VBUS as needed
			 * to cope with device connect current spikes. This
			 * means it's not uncommon for bus-powered devices
			 * to get VBUS errors during enumeration.
			 *
			 * This is a workaround, but newer RTL from Mentor
			 * seems to allow a better one: "re"-starting sessions
			 * without waiting for VBUS to stop registering in
			 * devctl.
			 */
			musb->int_usb &= ~MUSB_INTR_VBUSERROR;
			musb->xceiv->state = OTG_STATE_A_WAIT_VFALL;
			mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
			WARNING("VBUS error workaround (delay coming)\n");
		} else if (is_host_enabled(musb) && drvvbus) {
			MUSB_HST_MODE(musb);
294
			otg->default_a = 1;
295 296 297 298 299 300
			musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
			portstate(musb->port1_status |= USB_PORT_STAT_POWER);
			del_timer(&otg_workaround);
		} else {
			musb->is_active = 0;
			MUSB_DEV_MODE(musb);
301
			otg->default_a = 0;
302 303 304 305 306
			musb->xceiv->state = OTG_STATE_B_IDLE;
			portstate(musb->port1_status &= ~USB_PORT_STAT_POWER);
		}

		/* NOTE: this must complete power-on within 100 ms. */
307
		dev_dbg(musb->controller, "VBUS %s (%s)%s, devctl %02x\n",
308
				drvvbus ? "on" : "off",
309
				otg_state_string(musb->xceiv->state),
310 311 312 313 314 315 316 317 318 319 320 321
				err ? " ERROR" : "",
				devctl);
		ret = IRQ_HANDLED;
	}

	if (musb->int_tx || musb->int_rx || musb->int_usb)
		ret |= musb_interrupt(musb);

eoi:
	/* EOI needs to be written for the IRQ to be re-asserted. */
	if (ret == IRQ_HANDLED || epintr || usbintr) {
		/* clear level interrupt */
322 323
		if (data->clear_irq)
			data->clear_irq();
324 325 326 327 328 329 330 331 332 333 334 335 336
		/* write EOI */
		musb_writel(reg_base, USB_END_OF_INTR_REG, 0);
	}

	/* Poll for ID change */
	if (is_otg_enabled(musb) && musb->xceiv->state == OTG_STATE_B_IDLE)
		mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);

	spin_unlock_irqrestore(&musb->lock, flags);

	return ret;
}

337
static int am35x_musb_set_mode(struct musb *musb, u8 musb_mode)
338
{
339 340 341 342
	struct device *dev = musb->controller;
	struct musb_hdrc_platform_data *plat = dev->platform_data;
	struct omap_musb_board_data *data = plat->board_data;
	int     retval = 0;
343

344 345 346 347
	if (data->set_mode)
		data->set_mode(musb_mode);
	else
		retval = -EIO;
348

349
	return retval;
350 351
}

352
static int am35x_musb_init(struct musb *musb)
353
{
354 355 356
	struct device *dev = musb->controller;
	struct musb_hdrc_platform_data *plat = dev->platform_data;
	struct omap_musb_board_data *data = plat->board_data;
357
	void __iomem *reg_base = musb->ctrl_base;
358
	u32 rev;
359 360 361 362 363

	musb->mregs += USB_MENTOR_CORE_OFFSET;

	/* Returns zero if e.g. not clocked */
	rev = musb_readl(reg_base, USB_REVISION_REG);
364 365
	if (!rev)
		return -ENODEV;
366 367

	usb_nop_xceiv_register();
368
	musb->xceiv = usb_get_phy(USB_PHY_TYPE_USB2);
369
	if (IS_ERR_OR_NULL(musb->xceiv))
370
		return -ENODEV;
371 372 373 374

	if (is_host_enabled(musb))
		setup_timer(&otg_workaround, otg_timer, (unsigned long) musb);

375 376 377
	/* Reset the musb */
	if (data->reset)
		data->reset();
378 379 380 381 382

	/* Reset the controller */
	musb_writel(reg_base, USB_CTRL_REG, AM35X_SOFT_RESET_MASK);

	/* Start the on-chip PHY and its PLL. */
383 384
	if (data->set_phy_power)
		data->set_phy_power(1);
385 386 387

	msleep(5);

388
	musb->isr = am35x_musb_interrupt;
389 390

	/* clear level interrupt */
391 392
	if (data->clear_irq)
		data->clear_irq();
393

394 395 396
	return 0;
}

397
static int am35x_musb_exit(struct musb *musb)
398
{
399 400 401 402
	struct device *dev = musb->controller;
	struct musb_hdrc_platform_data *plat = dev->platform_data;
	struct omap_musb_board_data *data = plat->board_data;

403 404 405
	if (is_host_enabled(musb))
		del_timer_sync(&otg_workaround);

406 407 408
	/* Shutdown the on-chip PHY and its PLL. */
	if (data->set_phy_power)
		data->set_phy_power(0);
409

410
	usb_put_phy(musb->xceiv);
411 412 413 414 415
	usb_nop_xceiv_unregister();

	return 0;
}

416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444
/* AM35x supports only 32bit read operation */
void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
{
	void __iomem *fifo = hw_ep->fifo;
	u32		val;
	int		i;

	/* Read for 32bit-aligned destination address */
	if (likely((0x03 & (unsigned long) dst) == 0) && len >= 4) {
		readsl(fifo, dst, len >> 2);
		dst += len & ~0x03;
		len &= 0x03;
	}
	/*
	 * Now read the remaining 1 to 3 byte or complete length if
	 * unaligned address.
	 */
	if (len > 4) {
		for (i = 0; i < (len >> 2); i++) {
			*(u32 *) dst = musb_readl(fifo, 0);
			dst += 4;
		}
		len &= 0x03;
	}
	if (len > 0) {
		val = musb_readl(fifo, 0);
		memcpy(dst, &val, len);
	}
}
445

446
static const struct musb_platform_ops am35x_ops = {
447 448 449 450 451 452 453 454 455 456 457
	.init		= am35x_musb_init,
	.exit		= am35x_musb_exit,

	.enable		= am35x_musb_enable,
	.disable	= am35x_musb_disable,

	.set_mode	= am35x_musb_set_mode,
	.try_idle	= am35x_musb_try_idle,

	.set_vbus	= am35x_musb_set_vbus,
};
458 459 460

static u64 am35x_dmamask = DMA_BIT_MASK(32);

461
static int __devinit am35x_probe(struct platform_device *pdev)
462 463 464
{
	struct musb_hdrc_platform_data	*pdata = pdev->dev.platform_data;
	struct platform_device		*musb;
465
	struct am35x_glue		*glue;
466

467 468 469
	struct clk			*phy_clk;
	struct clk			*clk;

470 471
	int				ret = -ENOMEM;

472 473 474 475 476 477
	glue = kzalloc(sizeof(*glue), GFP_KERNEL);
	if (!glue) {
		dev_err(&pdev->dev, "failed to allocate glue context\n");
		goto err0;
	}

478 479 480
	musb = platform_device_alloc("musb-hdrc", -1);
	if (!musb) {
		dev_err(&pdev->dev, "failed to allocate musb device\n");
481
		goto err1;
482 483
	}

484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509
	phy_clk = clk_get(&pdev->dev, "fck");
	if (IS_ERR(phy_clk)) {
		dev_err(&pdev->dev, "failed to get PHY clock\n");
		ret = PTR_ERR(phy_clk);
		goto err2;
	}

	clk = clk_get(&pdev->dev, "ick");
	if (IS_ERR(clk)) {
		dev_err(&pdev->dev, "failed to get clock\n");
		ret = PTR_ERR(clk);
		goto err3;
	}

	ret = clk_enable(phy_clk);
	if (ret) {
		dev_err(&pdev->dev, "failed to enable PHY clock\n");
		goto err4;
	}

	ret = clk_enable(clk);
	if (ret) {
		dev_err(&pdev->dev, "failed to enable clock\n");
		goto err5;
	}

510 511 512 513
	musb->dev.parent		= &pdev->dev;
	musb->dev.dma_mask		= &am35x_dmamask;
	musb->dev.coherent_dma_mask	= am35x_dmamask;

514 515
	glue->dev			= &pdev->dev;
	glue->musb			= musb;
516 517
	glue->phy_clk			= phy_clk;
	glue->clk			= clk;
518

519 520
	pdata->platform_ops		= &am35x_ops;

521
	platform_set_drvdata(pdev, glue);
522 523 524 525 526

	ret = platform_device_add_resources(musb, pdev->resource,
			pdev->num_resources);
	if (ret) {
		dev_err(&pdev->dev, "failed to add resources\n");
527
		goto err6;
528 529 530 531 532
	}

	ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
	if (ret) {
		dev_err(&pdev->dev, "failed to add platform_data\n");
533
		goto err6;
534 535 536 537 538
	}

	ret = platform_device_add(musb);
	if (ret) {
		dev_err(&pdev->dev, "failed to register musb device\n");
539
		goto err6;
540 541 542 543
	}

	return 0;

544 545 546 547 548 549 550 551 552 553 554 555
err6:
	clk_disable(clk);

err5:
	clk_disable(phy_clk);

err4:
	clk_put(clk);

err3:
	clk_put(phy_clk);

556
err2:
557 558
	platform_device_put(musb);

559 560 561
err1:
	kfree(glue);

562 563 564 565
err0:
	return ret;
}

566
static int __devexit am35x_remove(struct platform_device *pdev)
567
{
568
	struct am35x_glue	*glue = platform_get_drvdata(pdev);
569

570 571
	platform_device_del(glue->musb);
	platform_device_put(glue->musb);
572 573 574 575
	clk_disable(glue->clk);
	clk_disable(glue->phy_clk);
	clk_put(glue->clk);
	clk_put(glue->phy_clk);
576
	kfree(glue);
577 578 579 580

	return 0;
}

581 582 583 584
#ifdef CONFIG_PM
static int am35x_suspend(struct device *dev)
{
	struct am35x_glue	*glue = dev_get_drvdata(dev);
585 586 587 588 589 590
	struct musb_hdrc_platform_data *plat = dev->platform_data;
	struct omap_musb_board_data *data = plat->board_data;

	/* Shutdown the on-chip PHY and its PLL. */
	if (data->set_phy_power)
		data->set_phy_power(0);
591 592 593 594 595 596 597 598 599 600

	clk_disable(glue->phy_clk);
	clk_disable(glue->clk);

	return 0;
}

static int am35x_resume(struct device *dev)
{
	struct am35x_glue	*glue = dev_get_drvdata(dev);
601 602
	struct musb_hdrc_platform_data *plat = dev->platform_data;
	struct omap_musb_board_data *data = plat->board_data;
603 604
	int			ret;

605 606 607 608
	/* Start the on-chip PHY and its PLL. */
	if (data->set_phy_power)
		data->set_phy_power(1);

609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633
	ret = clk_enable(glue->phy_clk);
	if (ret) {
		dev_err(dev, "failed to enable PHY clock\n");
		return ret;
	}

	ret = clk_enable(glue->clk);
	if (ret) {
		dev_err(dev, "failed to enable clock\n");
		return ret;
	}

	return 0;
}

static struct dev_pm_ops am35x_pm_ops = {
	.suspend	= am35x_suspend,
	.resume		= am35x_resume,
};

#define DEV_PM_OPS	&am35x_pm_ops
#else
#define DEV_PM_OPS	NULL
#endif

634
static struct platform_driver am35x_driver = {
635 636
	.probe		= am35x_probe,
	.remove		= __devexit_p(am35x_remove),
637 638
	.driver		= {
		.name	= "musb-am35x",
639
		.pm	= DEV_PM_OPS,
640 641 642 643 644 645 646 647 648
	},
};

MODULE_DESCRIPTION("AM35x MUSB Glue Layer");
MODULE_AUTHOR("Ajay Kumar Gupta <ajay.gupta@ti.com>");
MODULE_LICENSE("GPL v2");

static int __init am35x_init(void)
{
649
	return platform_driver_register(&am35x_driver);
650
}
651
module_init(am35x_init);
652 653 654 655 656 657

static void __exit am35x_exit(void)
{
	platform_driver_unregister(&am35x_driver);
}
module_exit(am35x_exit);