bfin_mac.c 46.4 KB
Newer Older
1
/*
2
 * Blackfin On-Chip MAC Driver
3
 *
4
 * Copyright 2004-2010 Analog Devices Inc.
5
 *
6
 * Enter bugs at http://blackfin.uclinux.org/
7
 *
8
 * Licensed under the GPL-2 or later.
9 10
 */

11 12 13 14 15
#define DRV_VERSION	"1.1"
#define DRV_DESC	"Blackfin on-chip Ethernet MAC driver"

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/errno.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/crc32.h>
#include <linux/device.h>
#include <linux/spinlock.h>
#include <linux/mii.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
33
#include <linux/ethtool.h>
34 35 36 37 38 39
#include <linux/skbuff.h>
#include <linux/platform_device.h>

#include <asm/dma.h>
#include <linux/dma-mapping.h>

40
#include <asm/div64.h>
41
#include <asm/dpmc.h>
42 43 44
#include <asm/blackfin.h>
#include <asm/cacheflush.h>
#include <asm/portmux.h>
45
#include <mach/pll.h>
46 47 48

#include "bfin_mac.h"

49
MODULE_AUTHOR("Bryan Wu, Luke Yang");
50 51
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION(DRV_DESC);
52
MODULE_ALIAS("platform:bfin_mac");
53 54

#if defined(CONFIG_BFIN_MAC_USE_L1)
55 56
# define bfin_mac_alloc(dma_handle, size, num)  l1_data_sram_zalloc(size*num)
# define bfin_mac_free(dma_handle, ptr, num)    l1_data_sram_free(ptr)
57
#else
58 59 60 61
# define bfin_mac_alloc(dma_handle, size, num) \
	dma_alloc_coherent(NULL, size*num, dma_handle, GFP_KERNEL)
# define bfin_mac_free(dma_handle, ptr, num) \
	dma_free_coherent(NULL, sizeof(*ptr)*num, ptr, dma_handle)
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
#endif

#define PKT_BUF_SZ 1580

#define MAX_TIMEOUT_CNT	500

/* pointers to maintain transmit list */
static struct net_dma_desc_tx *tx_list_head;
static struct net_dma_desc_tx *tx_list_tail;
static struct net_dma_desc_rx *rx_list_head;
static struct net_dma_desc_rx *rx_list_tail;
static struct net_dma_desc_rx *current_rx_ptr;
static struct net_dma_desc_tx *current_tx_ptr;
static struct net_dma_desc_tx *tx_desc;
static struct net_dma_desc_rx *rx_desc;

static void desc_list_free(void)
{
	struct net_dma_desc_rx *r;
	struct net_dma_desc_tx *t;
	int i;
#if !defined(CONFIG_BFIN_MAC_USE_L1)
	dma_addr_t dma_handle = 0;
#endif

	if (tx_desc) {
		t = tx_list_head;
		for (i = 0; i < CONFIG_BFIN_TX_DESC_NUM; i++) {
			if (t) {
				if (t->skb) {
					dev_kfree_skb(t->skb);
					t->skb = NULL;
				}
				t = t->next;
			}
		}
98
		bfin_mac_free(dma_handle, tx_desc, CONFIG_BFIN_TX_DESC_NUM);
99 100 101 102 103 104 105 106 107 108 109 110 111
	}

	if (rx_desc) {
		r = rx_list_head;
		for (i = 0; i < CONFIG_BFIN_RX_DESC_NUM; i++) {
			if (r) {
				if (r->skb) {
					dev_kfree_skb(r->skb);
					r->skb = NULL;
				}
				r = r->next;
			}
		}
112
		bfin_mac_free(dma_handle, rx_desc, CONFIG_BFIN_RX_DESC_NUM);
113 114 115
	}
}

116
static int desc_list_init(struct net_device *dev)
117 118 119 120 121 122 123 124 125 126 127 128
{
	int i;
	struct sk_buff *new_skb;
#if !defined(CONFIG_BFIN_MAC_USE_L1)
	/*
	 * This dma_handle is useless in Blackfin dma_alloc_coherent().
	 * The real dma handler is the return value of dma_alloc_coherent().
	 */
	dma_addr_t dma_handle;
#endif

	tx_desc = bfin_mac_alloc(&dma_handle,
129
				sizeof(struct net_dma_desc_tx),
130 131 132 133 134
				CONFIG_BFIN_TX_DESC_NUM);
	if (tx_desc == NULL)
		goto init_error;

	rx_desc = bfin_mac_alloc(&dma_handle,
135
				sizeof(struct net_dma_desc_rx),
136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189
				CONFIG_BFIN_RX_DESC_NUM);
	if (rx_desc == NULL)
		goto init_error;

	/* init tx_list */
	tx_list_head = tx_list_tail = tx_desc;

	for (i = 0; i < CONFIG_BFIN_TX_DESC_NUM; i++) {
		struct net_dma_desc_tx *t = tx_desc + i;
		struct dma_descriptor *a = &(t->desc_a);
		struct dma_descriptor *b = &(t->desc_b);

		/*
		 * disable DMA
		 * read from memory WNR = 0
		 * wordsize is 32 bits
		 * 6 half words is desc size
		 * large desc flow
		 */
		a->config = WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
		a->start_addr = (unsigned long)t->packet;
		a->x_count = 0;
		a->next_dma_desc = b;

		/*
		 * enabled DMA
		 * write to memory WNR = 1
		 * wordsize is 32 bits
		 * disable interrupt
		 * 6 half words is desc size
		 * large desc flow
		 */
		b->config = DMAEN | WNR | WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
		b->start_addr = (unsigned long)(&(t->status));
		b->x_count = 0;

		t->skb = NULL;
		tx_list_tail->desc_b.next_dma_desc = a;
		tx_list_tail->next = t;
		tx_list_tail = t;
	}
	tx_list_tail->next = tx_list_head;	/* tx_list is a circle */
	tx_list_tail->desc_b.next_dma_desc = &(tx_list_head->desc_a);
	current_tx_ptr = tx_list_head;

	/* init rx_list */
	rx_list_head = rx_list_tail = rx_desc;

	for (i = 0; i < CONFIG_BFIN_RX_DESC_NUM; i++) {
		struct net_dma_desc_rx *r = rx_desc + i;
		struct dma_descriptor *a = &(r->desc_a);
		struct dma_descriptor *b = &(r->desc_b);

		/* allocate a new skb for next time receive */
190
		new_skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN);
191
		if (!new_skb)
192
			goto init_error;
193

194
		skb_reserve(new_skb, NET_IP_ALIGN);
195 196 197 198 199
		/* Invidate the data cache of skb->data range when it is write back
		 * cache. It will prevent overwritting the new data from DMA
		 */
		blackfin_dcache_invalidate_range((unsigned long)new_skb->head,
					 (unsigned long)new_skb->end);
200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
		r->skb = new_skb;

		/*
		 * enabled DMA
		 * write to memory WNR = 1
		 * wordsize is 32 bits
		 * disable interrupt
		 * 6 half words is desc size
		 * large desc flow
		 */
		a->config = DMAEN | WNR | WDSIZE_32 | NDSIZE_6 | DMAFLOW_LARGE;
		/* since RXDWA is enabled */
		a->start_addr = (unsigned long)new_skb->data - 2;
		a->x_count = 0;
		a->next_dma_desc = b;

		/*
		 * enabled DMA
		 * write to memory WNR = 1
		 * wordsize is 32 bits
		 * enable interrupt
		 * 6 half words is desc size
		 * large desc flow
		 */
		b->config = DMAEN | WNR | WDSIZE_32 | DI_EN |
				NDSIZE_6 | DMAFLOW_LARGE;
		b->start_addr = (unsigned long)(&(r->status));
		b->x_count = 0;

		rx_list_tail->desc_b.next_dma_desc = a;
		rx_list_tail->next = r;
		rx_list_tail = r;
	}
	rx_list_tail->next = rx_list_head;	/* rx_list is a circle */
	rx_list_tail->desc_b.next_dma_desc = &(rx_list_head->desc_a);
	current_rx_ptr = rx_list_head;

	return 0;

init_error:
	desc_list_free();
241
	pr_err("kmalloc failed\n");
242 243 244 245 246 247
	return -ENOMEM;
}


/*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/

248 249 250
/*
 * MII operations
 */
251
/* Wait until the previous MDC/MDIO transaction has completed */
252
static int bfin_mdio_poll(void)
253 254 255 256 257
{
	int timeout_cnt = MAX_TIMEOUT_CNT;

	/* poll the STABUSY bit */
	while ((bfin_read_EMAC_STAADD()) & STABUSY) {
258
		udelay(1);
259
		if (timeout_cnt-- < 0) {
260
			pr_err("wait MDC/MDIO transaction to complete timeout\n");
261
			return -ETIMEDOUT;
262 263
		}
	}
264 265

	return 0;
266 267 268
}

/* Read an off-chip register in a PHY through the MDC/MDIO port */
A
Adrian Bunk 已提交
269
static int bfin_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
270
{
271 272 273 274 275
	int ret;

	ret = bfin_mdio_poll();
	if (ret)
		return ret;
276

277
	/* read mode */
278 279
	bfin_write_EMAC_STAADD(SET_PHYAD((u16) phy_addr) |
				SET_REGAD((u16) regnum) |
280 281
				STABUSY);

282 283 284
	ret = bfin_mdio_poll();
	if (ret)
		return ret;
285 286

	return (int) bfin_read_EMAC_STADAT();
287 288 289
}

/* Write an off-chip register in a PHY through the MDC/MDIO port */
A
Adrian Bunk 已提交
290 291
static int bfin_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
			      u16 value)
292
{
293 294 295 296 297
	int ret;

	ret = bfin_mdio_poll();
	if (ret)
		return ret;
298 299

	bfin_write_EMAC_STADAT((u32) value);
300 301

	/* write mode */
302 303
	bfin_write_EMAC_STAADD(SET_PHYAD((u16) phy_addr) |
				SET_REGAD((u16) regnum) |
304 305 306
				STAOP |
				STABUSY);

307
	return bfin_mdio_poll();
308 309
}

A
Adrian Bunk 已提交
310
static int bfin_mdiobus_reset(struct mii_bus *bus)
311
{
312
	return 0;
313 314
}

B
Bryan Wu 已提交
315
static void bfin_mac_adjust_link(struct net_device *dev)
316
{
B
Bryan Wu 已提交
317
	struct bfin_mac_local *lp = netdev_priv(dev);
318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
	struct phy_device *phydev = lp->phydev;
	unsigned long flags;
	int new_state = 0;

	spin_lock_irqsave(&lp->lock, flags);
	if (phydev->link) {
		/* Now we make sure that we can be in full duplex mode.
		 * If not, we operate in half-duplex mode. */
		if (phydev->duplex != lp->old_duplex) {
			u32 opmode = bfin_read_EMAC_OPMODE();
			new_state = 1;

			if (phydev->duplex)
				opmode |= FDMODE;
			else
				opmode &= ~(FDMODE);

			bfin_write_EMAC_OPMODE(opmode);
			lp->old_duplex = phydev->duplex;
		}
338

339
		if (phydev->speed != lp->old_speed) {
340 341 342 343 344 345 346 347 348 349
			if (phydev->interface == PHY_INTERFACE_MODE_RMII) {
				u32 opmode = bfin_read_EMAC_OPMODE();
				switch (phydev->speed) {
				case 10:
					opmode |= RMII_10;
					break;
				case 100:
					opmode &= ~RMII_10;
					break;
				default:
350 351 352
					netdev_warn(dev,
						"Ack! Speed (%d) is not 10/100!\n",
						phydev->speed);
353 354 355
					break;
				}
				bfin_write_EMAC_OPMODE(opmode);
356
			}
357

358 359 360
			new_state = 1;
			lp->old_speed = phydev->speed;
		}
361

362 363 364 365 366 367 368 369 370
		if (!lp->old_link) {
			new_state = 1;
			lp->old_link = 1;
		}
	} else if (lp->old_link) {
		new_state = 1;
		lp->old_link = 0;
		lp->old_speed = 0;
		lp->old_duplex = -1;
371 372
	}

373 374 375 376
	if (new_state) {
		u32 opmode = bfin_read_EMAC_OPMODE();
		phy_print_status(phydev);
		pr_debug("EMAC_OPMODE = 0x%08x\n", opmode);
377
	}
378 379

	spin_unlock_irqrestore(&lp->lock, flags);
380 381
}

382 383 384
/* MDC  = 2.5 MHz */
#define MDC_CLK 2500000

385
static int mii_probe(struct net_device *dev, int phy_mode)
386
{
B
Bryan Wu 已提交
387
	struct bfin_mac_local *lp = netdev_priv(dev);
388 389 390
	struct phy_device *phydev = NULL;
	unsigned short sysctl;
	int i;
391
	u32 sclk, mdc_div;
392

393
	/* Enable PHY output early */
394 395
	if (!(bfin_read_VR_CTL() & CLKBUFOE))
		bfin_write_VR_CTL(bfin_read_VR_CTL() | CLKBUFOE);
396

397 398 399
	sclk = get_sclk();
	mdc_div = ((sclk / MDC_CLK) / 2) - 1;

400
	sysctl = bfin_read_EMAC_SYSCTL();
401
	sysctl = (sysctl & ~MDCDIV) | SET_MDCDIV(mdc_div);
402 403
	bfin_write_EMAC_SYSCTL(sysctl);

404 405
	/* search for connected PHY device */
	for (i = 0; i < PHY_MAX_ADDR; ++i) {
406
		struct phy_device *const tmp_phydev = lp->mii_bus->phy_map[i];
407

408 409
		if (!tmp_phydev)
			continue; /* no PHY here... */
410

411 412 413 414 415 416
		phydev = tmp_phydev;
		break; /* found it */
	}

	/* now we are supposed to have a proper phydev, to attach to... */
	if (!phydev) {
417
		netdev_err(dev, "no phy device found\n");
418
		return -ENODEV;
419 420
	}

421 422
	if (phy_mode != PHY_INTERFACE_MODE_RMII &&
		phy_mode != PHY_INTERFACE_MODE_MII) {
423
		netdev_err(dev, "invalid phy interface mode\n");
424 425 426
		return -EINVAL;
	}

427 428
	phydev = phy_connect(dev, dev_name(&phydev->dev),
			     &bfin_mac_adjust_link, phy_mode);
429

430
	if (IS_ERR(phydev)) {
431
		netdev_err(dev, "could not attach PHY\n");
432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451
		return PTR_ERR(phydev);
	}

	/* mask with MAC supported features */
	phydev->supported &= (SUPPORTED_10baseT_Half
			      | SUPPORTED_10baseT_Full
			      | SUPPORTED_100baseT_Half
			      | SUPPORTED_100baseT_Full
			      | SUPPORTED_Autoneg
			      | SUPPORTED_Pause | SUPPORTED_Asym_Pause
			      | SUPPORTED_MII
			      | SUPPORTED_TP);

	phydev->advertising = phydev->supported;

	lp->old_link = 0;
	lp->old_speed = 0;
	lp->old_duplex = -1;
	lp->phydev = phydev;

452 453 454 455
	pr_info("attached PHY driver [%s] "
	        "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)@sclk=%dMHz)\n",
	        phydev->drv->name, dev_name(&phydev->dev), phydev->irq,
	        MDC_CLK, mdc_div, sclk/1000000);
456 457 458 459

	return 0;
}

460 461 462 463
/*
 * Ethtool support
 */

464 465 466 467 468 469 470 471
/*
 * interrupt routine for magic packet wakeup
 */
static irqreturn_t bfin_mac_wake_interrupt(int irq, void *dev_id)
{
	return IRQ_HANDLED;
}

472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499
static int
bfin_mac_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
{
	struct bfin_mac_local *lp = netdev_priv(dev);

	if (lp->phydev)
		return phy_ethtool_gset(lp->phydev, cmd);

	return -EINVAL;
}

static int
bfin_mac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
{
	struct bfin_mac_local *lp = netdev_priv(dev);

	if (!capable(CAP_NET_ADMIN))
		return -EPERM;

	if (lp->phydev)
		return phy_ethtool_sset(lp->phydev, cmd);

	return -EINVAL;
}

static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev,
					struct ethtool_drvinfo *info)
{
500 501 502 503
	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
	strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
	strlcpy(info->bus_info, dev_name(&dev->dev), sizeof(info->bus_info));
504 505
}

506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532
static void bfin_mac_ethtool_getwol(struct net_device *dev,
	struct ethtool_wolinfo *wolinfo)
{
	struct bfin_mac_local *lp = netdev_priv(dev);

	wolinfo->supported = WAKE_MAGIC;
	wolinfo->wolopts = lp->wol;
}

static int bfin_mac_ethtool_setwol(struct net_device *dev,
	struct ethtool_wolinfo *wolinfo)
{
	struct bfin_mac_local *lp = netdev_priv(dev);
	int rc;

	if (wolinfo->wolopts & (WAKE_MAGICSECURE |
				WAKE_UCAST |
				WAKE_MCAST |
				WAKE_BCAST |
				WAKE_ARP))
		return -EOPNOTSUPP;

	lp->wol = wolinfo->wolopts;

	if (lp->wol && !lp->irq_wake_requested) {
		/* register wake irq handler */
		rc = request_irq(IRQ_MAC_WAKEDET, bfin_mac_wake_interrupt,
533
				 0, "EMAC_WAKE", dev);
534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549
		if (rc)
			return rc;
		lp->irq_wake_requested = true;
	}

	if (!lp->wol && lp->irq_wake_requested) {
		free_irq(IRQ_MAC_WAKEDET, dev);
		lp->irq_wake_requested = false;
	}

	/* Make sure the PHY driver doesn't suspend */
	device_init_wakeup(&dev->dev, lp->wol);

	return 0;
}

550
#ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
551
static int bfin_mac_ethtool_get_ts_info(struct net_device *dev,
552
	struct ethtool_ts_info *info)
553
{
554 555
	struct bfin_mac_local *lp = netdev_priv(dev);

556 557 558
	info->so_timestamping =
		SOF_TIMESTAMPING_TX_HARDWARE |
		SOF_TIMESTAMPING_RX_HARDWARE |
559
		SOF_TIMESTAMPING_RAW_HARDWARE;
560
	info->phc_index = lp->phc_index;
561 562 563 564 565 566 567 568 569 570
	info->tx_types =
		(1 << HWTSTAMP_TX_OFF) |
		(1 << HWTSTAMP_TX_ON);
	info->rx_filters =
		(1 << HWTSTAMP_FILTER_NONE) |
		(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
		(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
		(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
	return 0;
}
571
#endif
572

573
static const struct ethtool_ops bfin_mac_ethtool_ops = {
574 575 576 577
	.get_settings = bfin_mac_ethtool_getsettings,
	.set_settings = bfin_mac_ethtool_setsettings,
	.get_link = ethtool_op_get_link,
	.get_drvinfo = bfin_mac_ethtool_getdrvinfo,
578 579
	.get_wol = bfin_mac_ethtool_getwol,
	.set_wol = bfin_mac_ethtool_setwol,
580
#ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
581
	.get_ts_info = bfin_mac_ethtool_get_ts_info,
582
#endif
583 584
};

585
/**************************************************************************/
586
static void setup_system_regs(struct net_device *dev)
587
{
588 589
	struct bfin_mac_local *lp = netdev_priv(dev);
	int i;
590 591 592 593 594 595 596
	unsigned short sysctl;

	/*
	 * Odd word alignment for Receive Frame DMA word
	 * Configure checksum support and rcve frame word alignment
	 */
	sysctl = bfin_read_EMAC_SYSCTL();
597 598 599 600 601 602 603 604 605
	/*
	 * check if interrupt is requested for any PHY,
	 * enable PHY interrupt only if needed
	 */
	for (i = 0; i < PHY_MAX_ADDR; ++i)
		if (lp->mii_bus->irq[i] != PHY_POLL)
			break;
	if (i < PHY_MAX_ADDR)
		sysctl |= PHYIE;
606
	sysctl |= RXDWA;
607
#if defined(BFIN_MAC_CSUM_OFFLOAD)
608
	sysctl |= RXCKS;
609
#else
610
	sysctl &= ~RXCKS;
611 612
#endif
	bfin_write_EMAC_SYSCTL(sysctl);
613 614 615

	bfin_write_EMAC_MMC_CTL(RSTC | CROLL);

616 617 618 619
	/* Set vlan regs to let 1522 bytes long packets pass through */
	bfin_write_EMAC_VLAN1(lp->vlan1_mask);
	bfin_write_EMAC_VLAN2(lp->vlan2_mask);

620 621 622 623 624 625 626 627 628 629 630 631 632
	/* Initialize the TX DMA channel registers */
	bfin_write_DMA2_X_COUNT(0);
	bfin_write_DMA2_X_MODIFY(4);
	bfin_write_DMA2_Y_COUNT(0);
	bfin_write_DMA2_Y_MODIFY(0);

	/* Initialize the RX DMA channel registers */
	bfin_write_DMA1_X_COUNT(0);
	bfin_write_DMA1_X_MODIFY(4);
	bfin_write_DMA1_Y_COUNT(0);
	bfin_write_DMA1_Y_MODIFY(0);
}

633
static void setup_mac_addr(u8 *mac_addr)
634 635 636 637 638 639 640 641 642
{
	u32 addr_low = le32_to_cpu(*(__le32 *) & mac_addr[0]);
	u16 addr_hi = le16_to_cpu(*(__le16 *) & mac_addr[4]);

	/* this depends on a little-endian machine */
	bfin_write_EMAC_ADDRLO(addr_low);
	bfin_write_EMAC_ADDRHI(addr_hi);
}

B
Bryan Wu 已提交
643
static int bfin_mac_set_mac_address(struct net_device *dev, void *p)
644 645 646 647 648 649 650 651 652
{
	struct sockaddr *addr = p;
	if (netif_running(dev))
		return -EBUSY;
	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
	setup_mac_addr(dev->dev_addr);
	return 0;
}

653 654 655
#ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
#define bfin_mac_hwtstamp_is_none(cfg) ((cfg) == HWTSTAMP_FILTER_NONE)

656 657 658 659 660 661 662 663 664 665 666 667 668 669
static u32 bfin_select_phc_clock(u32 input_clk, unsigned int *shift_result)
{
	u32 ipn = 1000000000UL / input_clk;
	u32 ppn = 1;
	unsigned int shift = 0;

	while (ppn <= ipn) {
		ppn <<= 1;
		shift++;
	}
	*shift_result = shift;
	return 1000000000UL / ppn;
}

670 671
static int bfin_mac_hwtstamp_set(struct net_device *netdev,
				 struct ifreq *ifr)
672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826
{
	struct hwtstamp_config config;
	struct bfin_mac_local *lp = netdev_priv(netdev);
	u16 ptpctl;
	u32 ptpfv1, ptpfv2, ptpfv3, ptpfoff;

	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
		return -EFAULT;

	pr_debug("%s config flag:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
			__func__, config.flags, config.tx_type, config.rx_filter);

	/* reserved for future extensions */
	if (config.flags)
		return -EINVAL;

	if ((config.tx_type != HWTSTAMP_TX_OFF) &&
			(config.tx_type != HWTSTAMP_TX_ON))
		return -ERANGE;

	ptpctl = bfin_read_EMAC_PTP_CTL();

	switch (config.rx_filter) {
	case HWTSTAMP_FILTER_NONE:
		/*
		 * Dont allow any timestamping
		 */
		ptpfv3 = 0xFFFFFFFF;
		bfin_write_EMAC_PTP_FV3(ptpfv3);
		break;
	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
		/*
		 * Clear the five comparison mask bits (bits[12:8]) in EMAC_PTP_CTL)
		 * to enable all the field matches.
		 */
		ptpctl &= ~0x1F00;
		bfin_write_EMAC_PTP_CTL(ptpctl);
		/*
		 * Keep the default values of the EMAC_PTP_FOFF register.
		 */
		ptpfoff = 0x4A24170C;
		bfin_write_EMAC_PTP_FOFF(ptpfoff);
		/*
		 * Keep the default values of the EMAC_PTP_FV1 and EMAC_PTP_FV2
		 * registers.
		 */
		ptpfv1 = 0x11040800;
		bfin_write_EMAC_PTP_FV1(ptpfv1);
		ptpfv2 = 0x0140013F;
		bfin_write_EMAC_PTP_FV2(ptpfv2);
		/*
		 * The default value (0xFFFC) allows the timestamping of both
		 * received Sync messages and Delay_Req messages.
		 */
		ptpfv3 = 0xFFFFFFFC;
		bfin_write_EMAC_PTP_FV3(ptpfv3);

		config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
		break;
	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
		/* Clear all five comparison mask bits (bits[12:8]) in the
		 * EMAC_PTP_CTL register to enable all the field matches.
		 */
		ptpctl &= ~0x1F00;
		bfin_write_EMAC_PTP_CTL(ptpctl);
		/*
		 * Keep the default values of the EMAC_PTP_FOFF register, except set
		 * the PTPCOF field to 0x2A.
		 */
		ptpfoff = 0x2A24170C;
		bfin_write_EMAC_PTP_FOFF(ptpfoff);
		/*
		 * Keep the default values of the EMAC_PTP_FV1 and EMAC_PTP_FV2
		 * registers.
		 */
		ptpfv1 = 0x11040800;
		bfin_write_EMAC_PTP_FV1(ptpfv1);
		ptpfv2 = 0x0140013F;
		bfin_write_EMAC_PTP_FV2(ptpfv2);
		/*
		 * To allow the timestamping of Pdelay_Req and Pdelay_Resp, set
		 * the value to 0xFFF0.
		 */
		ptpfv3 = 0xFFFFFFF0;
		bfin_write_EMAC_PTP_FV3(ptpfv3);

		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
		break;
	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
		/*
		 * Clear bits 8 and 12 of the EMAC_PTP_CTL register to enable only the
		 * EFTM and PTPCM field comparison.
		 */
		ptpctl &= ~0x1100;
		bfin_write_EMAC_PTP_CTL(ptpctl);
		/*
		 * Keep the default values of all the fields of the EMAC_PTP_FOFF
		 * register, except set the PTPCOF field to 0x0E.
		 */
		ptpfoff = 0x0E24170C;
		bfin_write_EMAC_PTP_FOFF(ptpfoff);
		/*
		 * Program bits [15:0] of the EMAC_PTP_FV1 register to 0x88F7, which
		 * corresponds to PTP messages on the MAC layer.
		 */
		ptpfv1 = 0x110488F7;
		bfin_write_EMAC_PTP_FV1(ptpfv1);
		ptpfv2 = 0x0140013F;
		bfin_write_EMAC_PTP_FV2(ptpfv2);
		/*
		 * To allow the timestamping of Pdelay_Req and Pdelay_Resp
		 * messages, set the value to 0xFFF0.
		 */
		ptpfv3 = 0xFFFFFFF0;
		bfin_write_EMAC_PTP_FV3(ptpfv3);

		config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
		break;
	default:
		return -ERANGE;
	}

	if (config.tx_type == HWTSTAMP_TX_OFF &&
	    bfin_mac_hwtstamp_is_none(config.rx_filter)) {
		ptpctl &= ~PTP_EN;
		bfin_write_EMAC_PTP_CTL(ptpctl);

		SSYNC();
	} else {
		ptpctl |= PTP_EN;
		bfin_write_EMAC_PTP_CTL(ptpctl);

		/*
		 * clear any existing timestamp
		 */
		bfin_read_EMAC_PTP_RXSNAPLO();
		bfin_read_EMAC_PTP_RXSNAPHI();

		bfin_read_EMAC_PTP_TXSNAPLO();
		bfin_read_EMAC_PTP_TXSNAPHI();

		SSYNC();
	}

	lp->stamp_cfg = config;
	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
		-EFAULT : 0;
}

827 828 829 830 831 832 833 834 835 836
static int bfin_mac_hwtstamp_get(struct net_device *netdev,
				 struct ifreq *ifr)
{
	struct bfin_mac_local *lp = netdev_priv(netdev);

	return copy_to_user(ifr->ifr_data, &lp->stamp_cfg,
			    sizeof(lp->stamp_cfg)) ?
		-EFAULT : 0;
}

837 838 839 840
static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
{
	struct bfin_mac_local *lp = netdev_priv(netdev);

841
	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
842 843 844 845 846
		int timeout_cnt = MAX_TIMEOUT_CNT;

		/* When doing time stamping, keep the connection to the socket
		 * a while longer
		 */
847
		skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
848 849 850 851 852 853 854 855 856 857

		/*
		 * The timestamping is done at the EMAC module's MII/RMII interface
		 * when the module sees the Start of Frame of an event message packet. This
		 * interface is the closest possible place to the physical Ethernet transmission
		 * medium, providing the best timing accuracy.
		 */
		while ((!(bfin_read_EMAC_PTP_ISTAT() & TXTL)) && (--timeout_cnt))
			udelay(1);
		if (timeout_cnt == 0)
858
			netdev_err(netdev, "timestamp the TX packet failed\n");
859 860 861 862 863 864 865 866
		else {
			struct skb_shared_hwtstamps shhwtstamps;
			u64 ns;
			u64 regval;

			regval = bfin_read_EMAC_PTP_TXSNAPLO();
			regval |= (u64)bfin_read_EMAC_PTP_TXSNAPHI() << 32;
			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
867
			ns = regval << lp->shift;
868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891
			shhwtstamps.hwtstamp = ns_to_ktime(ns);
			skb_tstamp_tx(skb, &shhwtstamps);
		}
	}
}

static void bfin_rx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
{
	struct bfin_mac_local *lp = netdev_priv(netdev);
	u32 valid;
	u64 regval, ns;
	struct skb_shared_hwtstamps *shhwtstamps;

	if (bfin_mac_hwtstamp_is_none(lp->stamp_cfg.rx_filter))
		return;

	valid = bfin_read_EMAC_PTP_ISTAT() & RXEL;
	if (!valid)
		return;

	shhwtstamps = skb_hwtstamps(skb);

	regval = bfin_read_EMAC_PTP_RXSNAPLO();
	regval |= (u64)bfin_read_EMAC_PTP_RXSNAPHI() << 32;
892
	ns = regval << lp->shift;
893 894 895 896 897 898 899
	memset(shhwtstamps, 0, sizeof(*shhwtstamps));
	shhwtstamps->hwtstamp = ns_to_ktime(ns);
}

static void bfin_mac_hwtstamp_init(struct net_device *netdev)
{
	struct bfin_mac_local *lp = netdev_priv(netdev);
900
	u64 addend, ppb;
901
	u32 input_clk, phc_clk;
902 903

	/* Initialize hardware timer */
904 905 906 907 908 909 910
	input_clk = get_sclk();
	phc_clk = bfin_select_phc_clock(input_clk, &lp->shift);
	addend = phc_clk * (1ULL << 32);
	do_div(addend, input_clk);
	bfin_write_EMAC_PTP_ADDEND((u32)addend);

	lp->addend = addend;
911 912 913
	ppb = 1000000000ULL * input_clk;
	do_div(ppb, phc_clk);
	lp->max_ppb = ppb - 1000000000ULL - 1ULL;
914 915 916 917 918 919

	/* Initialize hwstamp config */
	lp->stamp_cfg.rx_filter = HWTSTAMP_FILTER_NONE;
	lp->stamp_cfg.tx_type = HWTSTAMP_TX_OFF;
}

920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071
static u64 bfin_ptp_time_read(struct bfin_mac_local *lp)
{
	u64 ns;
	u32 lo, hi;

	lo = bfin_read_EMAC_PTP_TIMELO();
	hi = bfin_read_EMAC_PTP_TIMEHI();

	ns = ((u64) hi) << 32;
	ns |= lo;
	ns <<= lp->shift;

	return ns;
}

static void bfin_ptp_time_write(struct bfin_mac_local *lp, u64 ns)
{
	u32 hi, lo;

	ns >>= lp->shift;
	hi = ns >> 32;
	lo = ns & 0xffffffff;

	bfin_write_EMAC_PTP_TIMELO(lo);
	bfin_write_EMAC_PTP_TIMEHI(hi);
}

/* PTP Hardware Clock operations */

static int bfin_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
{
	u64 adj;
	u32 diff, addend;
	int neg_adj = 0;
	struct bfin_mac_local *lp =
		container_of(ptp, struct bfin_mac_local, caps);

	if (ppb < 0) {
		neg_adj = 1;
		ppb = -ppb;
	}
	addend = lp->addend;
	adj = addend;
	adj *= ppb;
	diff = div_u64(adj, 1000000000ULL);

	addend = neg_adj ? addend - diff : addend + diff;

	bfin_write_EMAC_PTP_ADDEND(addend);

	return 0;
}

static int bfin_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
	s64 now;
	unsigned long flags;
	struct bfin_mac_local *lp =
		container_of(ptp, struct bfin_mac_local, caps);

	spin_lock_irqsave(&lp->phc_lock, flags);

	now = bfin_ptp_time_read(lp);
	now += delta;
	bfin_ptp_time_write(lp, now);

	spin_unlock_irqrestore(&lp->phc_lock, flags);

	return 0;
}

static int bfin_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
{
	u64 ns;
	u32 remainder;
	unsigned long flags;
	struct bfin_mac_local *lp =
		container_of(ptp, struct bfin_mac_local, caps);

	spin_lock_irqsave(&lp->phc_lock, flags);

	ns = bfin_ptp_time_read(lp);

	spin_unlock_irqrestore(&lp->phc_lock, flags);

	ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
	ts->tv_nsec = remainder;
	return 0;
}

static int bfin_ptp_settime(struct ptp_clock_info *ptp,
			   const struct timespec *ts)
{
	u64 ns;
	unsigned long flags;
	struct bfin_mac_local *lp =
		container_of(ptp, struct bfin_mac_local, caps);

	ns = ts->tv_sec * 1000000000ULL;
	ns += ts->tv_nsec;

	spin_lock_irqsave(&lp->phc_lock, flags);

	bfin_ptp_time_write(lp, ns);

	spin_unlock_irqrestore(&lp->phc_lock, flags);

	return 0;
}

static int bfin_ptp_enable(struct ptp_clock_info *ptp,
			  struct ptp_clock_request *rq, int on)
{
	return -EOPNOTSUPP;
}

static struct ptp_clock_info bfin_ptp_caps = {
	.owner		= THIS_MODULE,
	.name		= "BF518 clock",
	.max_adj	= 0,
	.n_alarm	= 0,
	.n_ext_ts	= 0,
	.n_per_out	= 0,
	.pps		= 0,
	.adjfreq	= bfin_ptp_adjfreq,
	.adjtime	= bfin_ptp_adjtime,
	.gettime	= bfin_ptp_gettime,
	.settime	= bfin_ptp_settime,
	.enable		= bfin_ptp_enable,
};

static int bfin_phc_init(struct net_device *netdev, struct device *dev)
{
	struct bfin_mac_local *lp = netdev_priv(netdev);

	lp->caps = bfin_ptp_caps;
	lp->caps.max_adj = lp->max_ppb;
	lp->clock = ptp_clock_register(&lp->caps, dev);
	if (IS_ERR(lp->clock))
		return PTR_ERR(lp->clock);

	lp->phc_index = ptp_clock_index(lp->clock);
	spin_lock_init(&lp->phc_lock);

	return 0;
}

static void bfin_phc_release(struct bfin_mac_local *lp)
{
	ptp_clock_unregister(lp->clock);
}

1072 1073 1074
#else
# define bfin_mac_hwtstamp_is_none(cfg) 0
# define bfin_mac_hwtstamp_init(dev)
1075 1076
# define bfin_mac_hwtstamp_set(dev, ifr) (-EOPNOTSUPP)
# define bfin_mac_hwtstamp_get(dev, ifr) (-EOPNOTSUPP)
1077 1078
# define bfin_rx_hwtstamp(dev, skb)
# define bfin_tx_hwtstamp(dev, skb)
1079 1080
# define bfin_phc_init(netdev, dev) 0
# define bfin_phc_release(lp)
1081 1082
#endif

1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097
static inline void _tx_reclaim_skb(void)
{
	do {
		tx_list_head->desc_a.config &= ~DMAEN;
		tx_list_head->status.status_word = 0;
		if (tx_list_head->skb) {
			dev_kfree_skb(tx_list_head->skb);
			tx_list_head->skb = NULL;
		}
		tx_list_head = tx_list_head->next;

	} while (tx_list_head->status.status_word != 0);
}

static void tx_reclaim_skb(struct bfin_mac_local *lp)
1098 1099 1100
{
	int timeout_cnt = MAX_TIMEOUT_CNT;

1101 1102
	if (tx_list_head->status.status_word != 0)
		_tx_reclaim_skb();
1103

1104
	if (current_tx_ptr->next == tx_list_head) {
1105
		while (tx_list_head->status.status_word == 0) {
1106
			/* slow down polling to avoid too many queue stop. */
1107
			udelay(10);
1108 1109 1110 1111
			/* reclaim skb if DMA is not running. */
			if (!(bfin_read_DMA2_IRQ_STATUS() & DMA_RUN))
				break;
			if (timeout_cnt-- < 0)
1112 1113
				break;
		}
1114 1115 1116 1117 1118

		if (timeout_cnt >= 0)
			_tx_reclaim_skb();
		else
			netif_stop_queue(lp->ndev);
1119 1120
	}

1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136
	if (current_tx_ptr->next != tx_list_head &&
		netif_queue_stopped(lp->ndev))
		netif_wake_queue(lp->ndev);

	if (tx_list_head != current_tx_ptr) {
		/* shorten the timer interval if tx queue is stopped */
		if (netif_queue_stopped(lp->ndev))
			lp->tx_reclaim_timer.expires =
				jiffies + (TX_RECLAIM_JIFFIES >> 4);
		else
			lp->tx_reclaim_timer.expires =
				jiffies + TX_RECLAIM_JIFFIES;

		mod_timer(&lp->tx_reclaim_timer,
			lp->tx_reclaim_timer.expires);
	}
1137 1138

	return;
1139
}
1140

1141 1142 1143
static void tx_reclaim_skb_timeout(unsigned long lp)
{
	tx_reclaim_skb((struct bfin_mac_local *)lp);
1144 1145
}

B
Bryan Wu 已提交
1146
static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
1147 1148
				struct net_device *dev)
{
1149
	struct bfin_mac_local *lp = netdev_priv(dev);
1150
	u16 *data;
1151
	u32 data_align = (unsigned long)(skb->data) & 0x3;
1152

1153 1154
	current_tx_ptr->skb = skb;

1155 1156 1157
	if (data_align == 0x2) {
		/* move skb->data to current_tx_ptr payload */
		data = (u16 *)(skb->data) - 1;
1158 1159 1160 1161 1162 1163 1164
		*data = (u16)(skb->len);
		/*
		 * When transmitting an Ethernet packet, the PTP_TSYNC module requires
		 * a DMA_Length_Word field associated with the packet. The lower 12 bits
		 * of this field are the length of the packet payload in bytes and the higher
		 * 4 bits are the timestamping enable field.
		 */
1165
		if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
1166 1167
			*data |= 0x1000;

1168 1169 1170 1171
		current_tx_ptr->desc_a.start_addr = (u32)data;
		/* this is important! */
		blackfin_dcache_flush_range((u32)data,
				(u32)((u8 *)data + skb->len + 4));
1172
	} else {
1173
		*((u16 *)(current_tx_ptr->packet)) = (u16)(skb->len);
1174
		/* enable timestamping for the sent packet */
1175
		if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
1176
			*((u16 *)(current_tx_ptr->packet)) |= 0x1000;
1177 1178 1179 1180 1181 1182 1183
		memcpy((u8 *)(current_tx_ptr->packet + 2), skb->data,
			skb->len);
		current_tx_ptr->desc_a.start_addr =
			(u32)current_tx_ptr->packet;
		blackfin_dcache_flush_range(
			(u32)current_tx_ptr->packet,
			(u32)(current_tx_ptr->packet + skb->len + 2));
1184 1185
	}

1186 1187 1188 1189 1190 1191
	/* make sure the internal data buffers in the core are drained
	 * so that the DMA descriptors are completely written when the
	 * DMA engine goes to fetch them below
	 */
	SSYNC();

1192 1193 1194
	/* always clear status buffer before start tx dma */
	current_tx_ptr->status.status_word = 0;

1195 1196 1197 1198
	/* enable this packet's dma */
	current_tx_ptr->desc_a.config |= DMAEN;

	/* tx dma is running, just return */
1199
	if (bfin_read_DMA2_IRQ_STATUS() & DMA_RUN)
1200 1201 1202 1203 1204 1205 1206 1207 1208 1209
		goto out;

	/* tx dma is not running */
	bfin_write_DMA2_NEXT_DESC_PTR(&(current_tx_ptr->desc_a));
	/* dma enabled, read from memory, size is 6 */
	bfin_write_DMA2_CONFIG(current_tx_ptr->desc_a.config);
	/* Turn on the EMAC tx */
	bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE);

out:
1210 1211
	bfin_tx_hwtstamp(dev, skb);

1212
	current_tx_ptr = current_tx_ptr->next;
1213 1214
	dev->stats.tx_packets++;
	dev->stats.tx_bytes += (skb->len);
1215 1216 1217

	tx_reclaim_skb(lp);

1218
	return NETDEV_TX_OK;
1219 1220
}

1221
#define IP_HEADER_OFF  0
1222 1223 1224
#define RX_ERROR_MASK (RX_LONG | RX_ALIGN | RX_CRC | RX_LEN | \
	RX_FRAG | RX_ADDR | RX_DMAO | RX_PHY | RX_LATE | RX_RANGE)

B
Bryan Wu 已提交
1225
static void bfin_mac_rx(struct net_device *dev)
1226 1227 1228
{
	struct sk_buff *skb, *new_skb;
	unsigned short len;
1229
	struct bfin_mac_local *lp __maybe_unused = netdev_priv(dev);
1230 1231 1232 1233
#if defined(BFIN_MAC_CSUM_OFFLOAD)
	unsigned int i;
	unsigned char fcs[ETH_FCS_LEN + 1];
#endif
1234

1235 1236 1237 1238
	/* check if frame status word reports an error condition
	 * we which case we simply drop the packet
	 */
	if (current_rx_ptr->status.status_word & RX_ERROR_MASK) {
1239
		netdev_notice(dev, "rx: receive error - packet dropped\n");
1240 1241 1242 1243
		dev->stats.rx_dropped++;
		goto out;
	}

1244 1245
	/* allocate a new skb for next time receive */
	skb = current_rx_ptr->skb;
1246

1247
	new_skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN);
1248
	if (!new_skb) {
1249
		dev->stats.rx_dropped++;
1250 1251 1252
		goto out;
	}
	/* reserve 2 bytes for RXDWA padding */
1253
	skb_reserve(new_skb, NET_IP_ALIGN);
1254 1255 1256 1257 1258 1259
	/* Invidate the data cache of skb->data range when it is write back
	 * cache. It will prevent overwritting the new data from DMA
	 */
	blackfin_dcache_invalidate_range((unsigned long)new_skb->head,
					 (unsigned long)new_skb->end);

1260 1261 1262
	current_rx_ptr->skb = new_skb;
	current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2;

1263
	len = (unsigned short)((current_rx_ptr->status.status_word) & RX_FRLEN);
1264 1265
	/* Deduce Ethernet FCS length from Ethernet payload length */
	len -= ETH_FCS_LEN;
1266 1267 1268
	skb_put(skb, len);

	skb->protocol = eth_type_trans(skb, dev);
1269 1270 1271

	bfin_rx_hwtstamp(dev, skb);

1272
#if defined(BFIN_MAC_CSUM_OFFLOAD)
1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298
	/* Checksum offloading only works for IPv4 packets with the standard IP header
	 * length of 20 bytes, because the blackfin MAC checksum calculation is
	 * based on that assumption. We must NOT use the calculated checksum if our
	 * IP version or header break that assumption.
	 */
	if (skb->data[IP_HEADER_OFF] == 0x45) {
		skb->csum = current_rx_ptr->status.ip_payload_csum;
		/*
		 * Deduce Ethernet FCS from hardware generated IP payload checksum.
		 * IP checksum is based on 16-bit one's complement algorithm.
		 * To deduce a value from checksum is equal to add its inversion.
		 * If the IP payload len is odd, the inversed FCS should also
		 * begin from odd address and leave first byte zero.
		 */
		if (skb->len % 2) {
			fcs[0] = 0;
			for (i = 0; i < ETH_FCS_LEN; i++)
				fcs[i + 1] = ~skb->data[skb->len + i];
			skb->csum = csum_partial(fcs, ETH_FCS_LEN + 1, skb->csum);
		} else {
			for (i = 0; i < ETH_FCS_LEN; i++)
				fcs[i] = ~skb->data[skb->len + i];
			skb->csum = csum_partial(fcs, ETH_FCS_LEN, skb->csum);
		}
		skb->ip_summed = CHECKSUM_COMPLETE;
	}
1299 1300 1301
#endif

	netif_rx(skb);
1302 1303
	dev->stats.rx_packets++;
	dev->stats.rx_bytes += len;
1304
out:
1305 1306 1307 1308 1309
	current_rx_ptr->status.status_word = 0x00000000;
	current_rx_ptr = current_rx_ptr->next;
}

/* interrupt routine to handle rx and error signal */
B
Bryan Wu 已提交
1310
static irqreturn_t bfin_mac_interrupt(int irq, void *dev_id)
1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329
{
	struct net_device *dev = dev_id;
	int number = 0;

get_one_packet:
	if (current_rx_ptr->status.status_word == 0) {
		/* no more new packet received */
		if (number == 0) {
			if (current_rx_ptr->next->status.status_word != 0) {
				current_rx_ptr = current_rx_ptr->next;
				goto real_rx;
			}
		}
		bfin_write_DMA1_IRQ_STATUS(bfin_read_DMA1_IRQ_STATUS() |
					   DMA_DONE | DMA_ERR);
		return IRQ_HANDLED;
	}

real_rx:
B
Bryan Wu 已提交
1330
	bfin_mac_rx(dev);
1331 1332 1333 1334 1335
	number++;
	goto get_one_packet;
}

#ifdef CONFIG_NET_POLL_CONTROLLER
B
Bryan Wu 已提交
1336
static void bfin_mac_poll(struct net_device *dev)
1337
{
1338 1339
	struct bfin_mac_local *lp = netdev_priv(dev);

1340
	disable_irq(IRQ_MAC_RX);
B
Bryan Wu 已提交
1341
	bfin_mac_interrupt(IRQ_MAC_RX, dev);
1342
	tx_reclaim_skb(lp);
1343 1344 1345 1346
	enable_irq(IRQ_MAC_RX);
}
#endif				/* CONFIG_NET_POLL_CONTROLLER */

B
Bryan Wu 已提交
1347
static void bfin_mac_disable(void)
1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360
{
	unsigned int opmode;

	opmode = bfin_read_EMAC_OPMODE();
	opmode &= (~RE);
	opmode &= (~TE);
	/* Turn off the EMAC */
	bfin_write_EMAC_OPMODE(opmode);
}

/*
 * Enable Interrupts, Receive, and Transmit
 */
1361
static int bfin_mac_enable(struct phy_device *phydev)
1362
{
1363
	int ret;
1364 1365
	u32 opmode;

1366
	pr_debug("%s\n", __func__);
1367 1368 1369 1370 1371 1372

	/* Set RX DMA */
	bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a));
	bfin_write_DMA1_CONFIG(rx_list_head->desc_a.config);

	/* Wait MII done */
1373 1374 1375
	ret = bfin_mdio_poll();
	if (ret)
		return ret;
1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390

	/* We enable only RX here */
	/* ASTP   : Enable Automatic Pad Stripping
	   PR     : Promiscuous Mode for test
	   PSF    : Receive frames with total length less than 64 bytes.
	   FDMODE : Full Duplex Mode
	   LB     : Internal Loopback for test
	   RE     : Receiver Enable */
	opmode = bfin_read_EMAC_OPMODE();
	if (opmode & FDMODE)
		opmode |= PSF;
	else
		opmode |= DRO | DC | PSF;
	opmode |= RE;

1391 1392
	if (phydev->interface == PHY_INTERFACE_MODE_RMII) {
		opmode |= RMII; /* For Now only 100MBit are supported */
1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403
#if defined(CONFIG_BF537) || defined(CONFIG_BF536)
		if (__SILICON_REVISION__ < 3) {
			/*
			 * This isn't publicly documented (fun times!), but in
			 * silicon <=0.2, the RX and TX pins are clocked together.
			 * So in order to recv, we must enable the transmit side
			 * as well.  This will cause a spurious TX interrupt too,
			 * but we can easily consume that.
			 */
			opmode |= TE;
		}
1404
#endif
1405 1406
	}

1407 1408
	/* Turn on the EMAC rx */
	bfin_write_EMAC_OPMODE(opmode);
1409 1410

	return 0;
1411 1412 1413
}

/* Our watchdog timed out. Called by the networking layer */
B
Bryan Wu 已提交
1414
static void bfin_mac_timeout(struct net_device *dev)
1415
{
1416 1417
	struct bfin_mac_local *lp = netdev_priv(dev);

1418
	pr_debug("%s: %s\n", dev->name, __func__);
1419

B
Bryan Wu 已提交
1420
	bfin_mac_disable();
1421

1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436
	del_timer(&lp->tx_reclaim_timer);

	/* reset tx queue and free skb */
	while (tx_list_head != current_tx_ptr) {
		tx_list_head->desc_a.config &= ~DMAEN;
		tx_list_head->status.status_word = 0;
		if (tx_list_head->skb) {
			dev_kfree_skb(tx_list_head->skb);
			tx_list_head->skb = NULL;
		}
		tx_list_head = tx_list_head->next;
	}

	if (netif_queue_stopped(lp->ndev))
		netif_wake_queue(lp->ndev);
1437

1438
	bfin_mac_enable(lp->phydev);
1439 1440

	/* We can accept TX packets again */
E
Eric Dumazet 已提交
1441
	dev->trans_start = jiffies; /* prevent tx timeout */
1442 1443 1444
	netif_wake_queue(dev);
}

B
Bryan Wu 已提交
1445
static void bfin_mac_multicast_hash(struct net_device *dev)
1446 1447
{
	u32 emac_hashhi, emac_hashlo;
1448
	struct netdev_hw_addr *ha;
1449 1450 1451 1452
	u32 crc;

	emac_hashhi = emac_hashlo = 0;

1453
	netdev_for_each_mc_addr(ha, dev) {
1454
		crc = ether_crc(ETH_ALEN, ha->addr);
1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466
		crc >>= 26;

		if (crc & 0x20)
			emac_hashhi |= 1 << (crc & 0x1f);
		else
			emac_hashlo |= 1 << (crc & 0x1f);
	}

	bfin_write_EMAC_HASHHI(emac_hashhi);
	bfin_write_EMAC_HASHLO(emac_hashlo);
}

1467 1468 1469 1470 1471 1472
/*
 * This routine will, depending on the values passed to it,
 * either make it accept multicast packets, go into
 * promiscuous mode (for TCPDUMP and cousins) or accept
 * a select set of multicast packets
 */
B
Bryan Wu 已提交
1473
static void bfin_mac_set_multicast_list(struct net_device *dev)
1474 1475 1476 1477
{
	u32 sysctl;

	if (dev->flags & IFF_PROMISC) {
1478
		netdev_info(dev, "set promisc mode\n");
1479
		sysctl = bfin_read_EMAC_OPMODE();
1480
		sysctl |= PR;
1481
		bfin_write_EMAC_OPMODE(sysctl);
1482
	} else if (dev->flags & IFF_ALLMULTI) {
1483 1484 1485 1486
		/* accept all multicast */
		sysctl = bfin_read_EMAC_OPMODE();
		sysctl |= PAM;
		bfin_write_EMAC_OPMODE(sysctl);
1487
	} else if (!netdev_mc_empty(dev)) {
1488 1489 1490 1491
		/* set up multicast hash table */
		sysctl = bfin_read_EMAC_OPMODE();
		sysctl |= HM;
		bfin_write_EMAC_OPMODE(sysctl);
B
Bryan Wu 已提交
1492
		bfin_mac_multicast_hash(dev);
1493 1494 1495 1496 1497 1498 1499 1500
	} else {
		/* clear promisc or multicast mode */
		sysctl = bfin_read_EMAC_OPMODE();
		sysctl &= ~(RAF | PAM);
		bfin_write_EMAC_OPMODE(sysctl);
	}
}

1501 1502
static int bfin_mac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
1503 1504 1505 1506 1507
	struct bfin_mac_local *lp = netdev_priv(netdev);

	if (!netif_running(netdev))
		return -EINVAL;

1508 1509
	switch (cmd) {
	case SIOCSHWTSTAMP:
1510 1511 1512
		return bfin_mac_hwtstamp_set(netdev, ifr);
	case SIOCGHWTSTAMP:
		return bfin_mac_hwtstamp_get(netdev, ifr);
1513
	default:
1514 1515 1516 1517
		if (lp->phydev)
			return phy_mii_ioctl(lp->phydev, ifr, cmd);
		else
			return -EOPNOTSUPP;
1518 1519 1520
	}
}

1521 1522 1523
/*
 * this puts the device in an inactive state
 */
B
Bryan Wu 已提交
1524
static void bfin_mac_shutdown(struct net_device *dev)
1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537
{
	/* Turn off the EMAC */
	bfin_write_EMAC_OPMODE(0x00000000);
	/* Turn off the EMAC RX DMA */
	bfin_write_DMA1_CONFIG(0x0000);
	bfin_write_DMA2_CONFIG(0x0000);
}

/*
 * Open and Initialize the interface
 *
 * Set up everything, reset the card, etc..
 */
B
Bryan Wu 已提交
1538
static int bfin_mac_open(struct net_device *dev)
1539
{
B
Bryan Wu 已提交
1540
	struct bfin_mac_local *lp = netdev_priv(dev);
1541
	int ret;
1542
	pr_debug("%s: %s\n", dev->name, __func__);
1543 1544 1545 1546 1547 1548 1549

	/*
	 * Check that the address is valid.  If its not, refuse
	 * to bring the device up.  The user must specify an
	 * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
	 */
	if (!is_valid_ether_addr(dev->dev_addr)) {
1550
		netdev_warn(dev, "no valid ethernet hw addr\n");
1551 1552 1553 1554
		return -EINVAL;
	}

	/* initial rx and tx list */
1555
	ret = desc_list_init(dev);
1556 1557
	if (ret)
		return ret;
1558

1559
	phy_start(lp->phydev);
V
Vitja Makarov 已提交
1560
	phy_write(lp->phydev, MII_BMCR, BMCR_RESET);
1561
	setup_system_regs(dev);
1562
	setup_mac_addr(dev->dev_addr);
1563

B
Bryan Wu 已提交
1564
	bfin_mac_disable();
1565
	ret = bfin_mac_enable(lp->phydev);
1566 1567
	if (ret)
		return ret;
1568
	pr_debug("hardware init finished\n");
1569

1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580
	netif_start_queue(dev);
	netif_carrier_on(dev);

	return 0;
}

/*
 * this makes the board clean up everything that it can
 * and not talk to the outside world.   Caused by
 * an 'ifconfig ethX down'
 */
B
Bryan Wu 已提交
1581
static int bfin_mac_close(struct net_device *dev)
1582
{
B
Bryan Wu 已提交
1583
	struct bfin_mac_local *lp = netdev_priv(dev);
1584
	pr_debug("%s: %s\n", dev->name, __func__);
1585 1586 1587 1588

	netif_stop_queue(dev);
	netif_carrier_off(dev);

1589
	phy_stop(lp->phydev);
V
Vitja Makarov 已提交
1590
	phy_write(lp->phydev, MII_BMCR, BMCR_PDOWN);
1591

1592
	/* clear everything */
B
Bryan Wu 已提交
1593
	bfin_mac_shutdown(dev);
1594 1595 1596 1597 1598 1599 1600

	/* free the rx/tx buffers */
	desc_list_free();

	return 0;
}

1601 1602 1603 1604 1605 1606
static const struct net_device_ops bfin_mac_netdev_ops = {
	.ndo_open		= bfin_mac_open,
	.ndo_stop		= bfin_mac_close,
	.ndo_start_xmit		= bfin_mac_hard_start_xmit,
	.ndo_set_mac_address	= bfin_mac_set_mac_address,
	.ndo_tx_timeout		= bfin_mac_timeout,
1607
	.ndo_set_rx_mode	= bfin_mac_set_multicast_list,
1608
	.ndo_do_ioctl           = bfin_mac_ioctl,
1609 1610 1611 1612 1613 1614 1615
	.ndo_validate_addr	= eth_validate_addr,
	.ndo_change_mtu		= eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
	.ndo_poll_controller	= bfin_mac_poll,
#endif
};

1616
static int bfin_mac_probe(struct platform_device *pdev)
1617
{
B
Bryan Wu 已提交
1618 1619
	struct net_device *ndev;
	struct bfin_mac_local *lp;
1620
	struct platform_device *pd;
1621
	struct bfin_mii_bus_platform_data *mii_bus_data;
1622
	int rc;
B
Bryan Wu 已提交
1623 1624

	ndev = alloc_etherdev(sizeof(struct bfin_mac_local));
1625
	if (!ndev)
B
Bryan Wu 已提交
1626 1627 1628 1629 1630
		return -ENOMEM;

	SET_NETDEV_DEV(ndev, &pdev->dev);
	platform_set_drvdata(pdev, ndev);
	lp = netdev_priv(ndev);
1631
	lp->ndev = ndev;
1632 1633

	/* Grab the MAC address in the MAC */
B
Bryan Wu 已提交
1634 1635
	*(__le32 *) (&(ndev->dev_addr[0])) = cpu_to_le32(bfin_read_EMAC_ADDRLO());
	*(__le16 *) (&(ndev->dev_addr[4])) = cpu_to_le16((u16) bfin_read_EMAC_ADDRHI());
1636 1637 1638 1639 1640

	/* probe mac */
	/*todo: how to proble? which is revision_register */
	bfin_write_EMAC_ADDRLO(0x12345678);
	if (bfin_read_EMAC_ADDRLO() != 0x12345678) {
B
Bryan Wu 已提交
1641 1642 1643
		dev_err(&pdev->dev, "Cannot detect Blackfin on-chip ethernet MAC controller!\n");
		rc = -ENODEV;
		goto out_err_probe_mac;
1644 1645 1646
	}


B
Bryan Wu 已提交
1647 1648 1649 1650 1651
	/*
	 * Is it valid? (Did bootloader initialize it?)
	 * Grab the MAC from the board somehow
	 * this is done in the arch/blackfin/mach-bfxxx/boards/eth_mac.c
	 */
1652 1653 1654 1655 1656 1657 1658 1659
	if (!is_valid_ether_addr(ndev->dev_addr)) {
		if (bfin_get_ether_addr(ndev->dev_addr) ||
		     !is_valid_ether_addr(ndev->dev_addr)) {
			/* Still not valid, get a random one */
			netdev_warn(ndev, "Setting Ethernet MAC to a random one\n");
			eth_hw_addr_random(ndev);
		}
	}
1660

B
Bryan Wu 已提交
1661
	setup_mac_addr(ndev->dev_addr);
1662

1663
	if (!dev_get_platdata(&pdev->dev)) {
1664 1665 1666
		dev_err(&pdev->dev, "Cannot get platform device bfin_mii_bus!\n");
		rc = -ENODEV;
		goto out_err_probe_mac;
B
Bryan Wu 已提交
1667
	}
1668
	pd = dev_get_platdata(&pdev->dev);
1669
	lp->mii_bus = platform_get_drvdata(pd);
1670 1671 1672
	if (!lp->mii_bus) {
		dev_err(&pdev->dev, "Cannot get mii_bus!\n");
		rc = -ENODEV;
1673
		goto out_err_probe_mac;
1674
	}
1675
	lp->mii_bus->priv = ndev;
1676
	mii_bus_data = dev_get_platdata(&pd->dev);
1677

1678
	rc = mii_probe(ndev, mii_bus_data->phy_mode);
B
Bryan Wu 已提交
1679 1680 1681 1682
	if (rc) {
		dev_err(&pdev->dev, "MII Probe failed!\n");
		goto out_err_mii_probe;
	}
1683

1684 1685 1686
	lp->vlan1_mask = ETH_P_8021Q | mii_bus_data->vlan1_mask;
	lp->vlan2_mask = ETH_P_8021Q | mii_bus_data->vlan2_mask;

1687
	/* Fill in the fields of the device structure with ethernet values. */
B
Bryan Wu 已提交
1688 1689
	ether_setup(ndev);

1690
	ndev->netdev_ops = &bfin_mac_netdev_ops;
1691
	ndev->ethtool_ops = &bfin_mac_ethtool_ops;
1692

1693 1694 1695 1696
	init_timer(&lp->tx_reclaim_timer);
	lp->tx_reclaim_timer.data = (unsigned long)lp;
	lp->tx_reclaim_timer.function = tx_reclaim_skb_timeout;

1697 1698 1699 1700
	spin_lock_init(&lp->lock);

	/* now, enable interrupts */
	/* register irq handler */
B
Bryan Wu 已提交
1701
	rc = request_irq(IRQ_MAC_RX, bfin_mac_interrupt,
1702
			0, "EMAC_RX", ndev);
B
Bryan Wu 已提交
1703 1704 1705 1706
	if (rc) {
		dev_err(&pdev->dev, "Cannot request Blackfin MAC RX IRQ!\n");
		rc = -EBUSY;
		goto out_err_request_irq;
1707 1708
	}

B
Bryan Wu 已提交
1709 1710 1711 1712
	rc = register_netdev(ndev);
	if (rc) {
		dev_err(&pdev->dev, "Cannot register net device!\n");
		goto out_err_reg_ndev;
1713 1714
	}

1715
	bfin_mac_hwtstamp_init(ndev);
1716 1717
	rc = bfin_phc_init(ndev, &pdev->dev);
	if (rc) {
1718 1719 1720
		dev_err(&pdev->dev, "Cannot register PHC device!\n");
		goto out_err_phc;
	}
1721

B
Bryan Wu 已提交
1722
	/* now, print out the card info, in a short format.. */
1723
	netdev_info(ndev, "%s, Version %s\n", DRV_DESC, DRV_VERSION);
1724

B
Bryan Wu 已提交
1725
	return 0;
1726

1727
out_err_phc:
B
Bryan Wu 已提交
1728 1729 1730 1731
out_err_reg_ndev:
	free_irq(IRQ_MAC_RX, ndev);
out_err_request_irq:
out_err_mii_probe:
1732 1733
	mdiobus_unregister(lp->mii_bus);
	mdiobus_free(lp->mii_bus);
B
Bryan Wu 已提交
1734 1735
out_err_probe_mac:
	free_netdev(ndev);
1736

B
Bryan Wu 已提交
1737
	return rc;
1738 1739
}

1740
static int bfin_mac_remove(struct platform_device *pdev)
1741 1742
{
	struct net_device *ndev = platform_get_drvdata(pdev);
B
Bryan Wu 已提交
1743
	struct bfin_mac_local *lp = netdev_priv(ndev);
1744

1745 1746
	bfin_phc_release(lp);

1747
	lp->mii_bus->priv = NULL;
B
Bryan Wu 已提交
1748

1749 1750 1751 1752 1753 1754 1755 1756 1757
	unregister_netdev(ndev);

	free_irq(IRQ_MAC_RX, ndev);

	free_netdev(ndev);

	return 0;
}

1758 1759
#ifdef CONFIG_PM
static int bfin_mac_suspend(struct platform_device *pdev, pm_message_t mesg)
1760
{
1761
	struct net_device *net_dev = platform_get_drvdata(pdev);
1762
	struct bfin_mac_local *lp = netdev_priv(net_dev);
1763

1764 1765 1766 1767 1768 1769 1770 1771
	if (lp->wol) {
		bfin_write_EMAC_OPMODE((bfin_read_EMAC_OPMODE() & ~TE) | RE);
		bfin_write_EMAC_WKUP_CTL(MPKE);
		enable_irq_wake(IRQ_MAC_WAKEDET);
	} else {
		if (netif_running(net_dev))
			bfin_mac_close(net_dev);
	}
1772

1773 1774 1775 1776 1777
	return 0;
}

static int bfin_mac_resume(struct platform_device *pdev)
{
1778
	struct net_device *net_dev = platform_get_drvdata(pdev);
1779
	struct bfin_mac_local *lp = netdev_priv(net_dev);
1780

1781 1782 1783 1784 1785 1786 1787 1788
	if (lp->wol) {
		bfin_write_EMAC_OPMODE(bfin_read_EMAC_OPMODE() | TE);
		bfin_write_EMAC_WKUP_CTL(0);
		disable_irq_wake(IRQ_MAC_WAKEDET);
	} else {
		if (netif_running(net_dev))
			bfin_mac_open(net_dev);
	}
1789

1790 1791
	return 0;
}
1792 1793 1794 1795
#else
#define bfin_mac_suspend NULL
#define bfin_mac_resume NULL
#endif	/* CONFIG_PM */
1796

1797
static int bfin_mii_bus_probe(struct platform_device *pdev)
1798 1799
{
	struct mii_bus *miibus;
1800 1801
	struct bfin_mii_bus_platform_data *mii_bus_pd;
	const unsigned short *pin_req;
1802 1803
	int rc, i;

1804 1805 1806 1807 1808 1809
	mii_bus_pd = dev_get_platdata(&pdev->dev);
	if (!mii_bus_pd) {
		dev_err(&pdev->dev, "No peripherals in platform data!\n");
		return -EINVAL;
	}

1810 1811 1812 1813
	/*
	 * We are setting up a network card,
	 * so set the GPIO pins to Ethernet mode
	 */
1814
	pin_req = mii_bus_pd->mac_peripherals;
1815
	rc = peripheral_request_list(pin_req, KBUILD_MODNAME);
1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830
	if (rc) {
		dev_err(&pdev->dev, "Requesting peripherals failed!\n");
		return rc;
	}

	rc = -ENOMEM;
	miibus = mdiobus_alloc();
	if (miibus == NULL)
		goto out_err_alloc;
	miibus->read = bfin_mdiobus_read;
	miibus->write = bfin_mdiobus_write;
	miibus->reset = bfin_mdiobus_reset;

	miibus->parent = &pdev->dev;
	miibus->name = "bfin_mii_bus";
1831 1832
	miibus->phy_mask = mii_bus_pd->phy_mask;

1833 1834
	snprintf(miibus->id, MII_BUS_ID_SIZE, "%s-%x",
		pdev->name, pdev->id);
1835
	miibus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
1836 1837 1838 1839
	if (!miibus->irq)
		goto out_err_irq_alloc;

	for (i = rc; i < PHY_MAX_ADDR; ++i)
1840 1841
		miibus->irq[i] = PHY_POLL;

1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855
	rc = clamp(mii_bus_pd->phydev_number, 0, PHY_MAX_ADDR);
	if (rc != mii_bus_pd->phydev_number)
		dev_err(&pdev->dev, "Invalid number (%i) of phydevs\n",
			mii_bus_pd->phydev_number);
	for (i = 0; i < rc; ++i) {
		unsigned short phyaddr = mii_bus_pd->phydev_data[i].addr;
		if (phyaddr < PHY_MAX_ADDR)
			miibus->irq[phyaddr] = mii_bus_pd->phydev_data[i].irq;
		else
			dev_err(&pdev->dev,
				"Invalid PHY address %i for phydev %i\n",
				phyaddr, i);
	}

1856 1857 1858 1859 1860 1861 1862 1863 1864 1865
	rc = mdiobus_register(miibus);
	if (rc) {
		dev_err(&pdev->dev, "Cannot register MDIO bus!\n");
		goto out_err_mdiobus_register;
	}

	platform_set_drvdata(pdev, miibus);
	return 0;

out_err_mdiobus_register:
1866
	kfree(miibus->irq);
1867
out_err_irq_alloc:
1868 1869 1870 1871 1872 1873 1874
	mdiobus_free(miibus);
out_err_alloc:
	peripheral_free_list(pin_req);

	return rc;
}

1875
static int bfin_mii_bus_remove(struct platform_device *pdev)
1876 1877
{
	struct mii_bus *miibus = platform_get_drvdata(pdev);
1878 1879 1880
	struct bfin_mii_bus_platform_data *mii_bus_pd =
		dev_get_platdata(&pdev->dev);

1881
	mdiobus_unregister(miibus);
1882
	kfree(miibus->irq);
1883
	mdiobus_free(miibus);
1884 1885
	peripheral_free_list(mii_bus_pd->mac_peripherals);

1886 1887 1888 1889 1890
	return 0;
}

static struct platform_driver bfin_mii_bus_driver = {
	.probe = bfin_mii_bus_probe,
1891
	.remove = bfin_mii_bus_remove,
1892 1893 1894 1895 1896 1897
	.driver = {
		.name = "bfin_mii_bus",
		.owner	= THIS_MODULE,
	},
};

1898 1899
static struct platform_driver bfin_mac_driver = {
	.probe = bfin_mac_probe,
1900
	.remove = bfin_mac_remove,
1901 1902 1903
	.resume = bfin_mac_resume,
	.suspend = bfin_mac_suspend,
	.driver = {
1904
		.name = KBUILD_MODNAME,
1905 1906
		.owner	= THIS_MODULE,
	},
1907 1908 1909 1910
};

static int __init bfin_mac_init(void)
{
1911 1912 1913 1914 1915
	int ret;
	ret = platform_driver_register(&bfin_mii_bus_driver);
	if (!ret)
		return platform_driver_register(&bfin_mac_driver);
	return -ENODEV;
1916 1917 1918 1919 1920 1921 1922
}

module_init(bfin_mac_init);

static void __exit bfin_mac_cleanup(void)
{
	platform_driver_unregister(&bfin_mac_driver);
1923
	platform_driver_unregister(&bfin_mii_bus_driver);
1924 1925 1926
}

module_exit(bfin_mac_cleanup);
1927