pasemi_mac.c 38.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
/*
 * Copyright (C) 2006-2007 PA Semi, Inc
 *
 * Driver for the PA Semi PWRficient onchip 1G/10G Ethernet MACs
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
 */

#include <linux/init.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <asm/dma-mapping.h>
#include <linux/in.h>
#include <linux/skbuff.h>

#include <linux/ip.h>
#include <linux/tcp.h>
#include <net/checksum.h>
35
#include <linux/inet_lro.h>
36

37
#include <asm/irq.h>
O
Olof Johansson 已提交
38
#include <asm/firmware.h>
39
#include <asm/pasemi_dma.h>
40

41 42
#include "pasemi_mac.h"

43 44 45 46 47 48
/* We have our own align, since ppc64 in general has it at 0 because
 * of design flaws in some of the server bridge chips. However, for
 * PWRficient doing the unaligned copies is more expensive than doing
 * unaligned DMA, so make sure the data is aligned instead.
 */
#define LOCAL_SKB_ALIGN	2
49 50 51 52 53

/* TODO list
 *
 * - Multicast support
 * - Large MTU support
O
Olof Johansson 已提交
54 55
 * - SW LRO
 * - Multiqueue RX/TX
56 57 58 59
 */


/* Must be a power of two */
60
#define RX_RING_SIZE 2048
61
#define TX_RING_SIZE 4096
62

63 64
#define LRO_MAX_AGGR 64

65 66 67 68 69 70 71 72 73 74
#define DEFAULT_MSG_ENABLE	  \
	(NETIF_MSG_DRV		| \
	 NETIF_MSG_PROBE	| \
	 NETIF_MSG_LINK		| \
	 NETIF_MSG_TIMER	| \
	 NETIF_MSG_IFDOWN	| \
	 NETIF_MSG_IFUP		| \
	 NETIF_MSG_RX_ERR	| \
	 NETIF_MSG_TX_ERR)

75
#define TX_DESC(tx, num)	((tx)->chan.ring_virt[(num) & (TX_RING_SIZE-1)])
76
#define TX_DESC_INFO(tx, num)	((tx)->ring_info[(num) & (TX_RING_SIZE-1)])
77
#define RX_DESC(rx, num)	((rx)->chan.ring_virt[(num) & (RX_RING_SIZE-1)])
78 79
#define RX_DESC_INFO(rx, num)	((rx)->ring_info[(num) & (RX_RING_SIZE-1)])
#define RX_BUFF(rx, num)	((rx)->buffers[(num) & (RX_RING_SIZE-1)])
80

81 82 83 84
#define RING_USED(ring)		(((ring)->next_to_fill - (ring)->next_to_clean) \
				 & ((ring)->size - 1))
#define RING_AVAIL(ring)	((ring->size) - RING_USED(ring))

85 86
#define BUF_SIZE 1646 /* 1500 MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */

87 88 89 90 91 92 93
MODULE_LICENSE("GPL");
MODULE_AUTHOR ("Olof Johansson <olof@lixom.net>");
MODULE_DESCRIPTION("PA Semi PWRficient Ethernet driver");

static int debug = -1;	/* -1 == use DEFAULT_MSG_ENABLE as value */
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "PA Semi MAC bitmapped debugging message enable value");
94

O
Olof Johansson 已提交
95 96 97 98 99 100 101 102 103
static int translation_enabled(void)
{
#if defined(CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE)
	return 1;
#else
	return firmware_has_feature(FW_FEATURE_LPAR);
#endif
}

104
static void write_iob_reg(unsigned int reg, unsigned int val)
105
{
106
	pasemi_write_iob_reg(reg, val);
107 108
}

O
Olof Johansson 已提交
109
static unsigned int read_mac_reg(const struct pasemi_mac *mac, unsigned int reg)
110
{
111
	return pasemi_read_mac_reg(mac->dma_if, reg);
112 113
}

O
Olof Johansson 已提交
114
static void write_mac_reg(const struct pasemi_mac *mac, unsigned int reg,
115 116
			  unsigned int val)
{
117
	pasemi_write_mac_reg(mac->dma_if, reg, val);
118 119
}

120
static unsigned int read_dma_reg(unsigned int reg)
121
{
122
	return pasemi_read_dma_reg(reg);
123 124
}

125
static void write_dma_reg(unsigned int reg, unsigned int val)
126
{
127
	pasemi_write_dma_reg(reg, val);
128 129
}

O
Olof Johansson 已提交
130
static struct pasemi_mac_rxring *rx_ring(const struct pasemi_mac *mac)
131 132 133 134
{
	return mac->rx;
}

O
Olof Johansson 已提交
135
static struct pasemi_mac_txring *tx_ring(const struct pasemi_mac *mac)
136 137 138 139
{
	return mac->tx;
}

O
Olof Johansson 已提交
140 141 142 143 144 145 146 147 148 149
static inline void prefetch_skb(const struct sk_buff *skb)
{
	const void *d = skb;

	prefetch(d);
	prefetch(d+64);
	prefetch(d+128);
	prefetch(d+192);
}

150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177
static int mac_to_intf(struct pasemi_mac *mac)
{
	struct pci_dev *pdev = mac->pdev;
	u32 tmp;
	int nintf, off, i, j;
	int devfn = pdev->devfn;

	tmp = read_dma_reg(PAS_DMA_CAP_IFI);
	nintf = (tmp & PAS_DMA_CAP_IFI_NIN_M) >> PAS_DMA_CAP_IFI_NIN_S;
	off = (tmp & PAS_DMA_CAP_IFI_IOFF_M) >> PAS_DMA_CAP_IFI_IOFF_S;

	/* IOFF contains the offset to the registers containing the
	 * DMA interface-to-MAC-pci-id mappings, and NIN contains number
	 * of total interfaces. Each register contains 4 devfns.
	 * Just do a linear search until we find the devfn of the MAC
	 * we're trying to look up.
	 */

	for (i = 0; i < (nintf+3)/4; i++) {
		tmp = read_dma_reg(off+4*i);
		for (j = 0; j < 4; j++) {
			if (((tmp >> (8*j)) & 0xff) == devfn)
				return i*4 + j;
		}
	}
	return -1;
}

178 179 180 181
static int pasemi_get_mac_addr(struct pasemi_mac *mac)
{
	struct pci_dev *pdev = mac->pdev;
	struct device_node *dn = pci_device_to_OF_node(pdev);
182
	int len;
183 184 185 186 187 188 189 190 191
	const u8 *maddr;
	u8 addr[6];

	if (!dn) {
		dev_dbg(&pdev->dev,
			  "No device node for mac, not configuring\n");
		return -ENOENT;
	}

192 193 194 195 196 197 198 199 200 201
	maddr = of_get_property(dn, "local-mac-address", &len);

	if (maddr && len == 6) {
		memcpy(mac->mac_addr, maddr, 6);
		return 0;
	}

	/* Some old versions of firmware mistakenly uses mac-address
	 * (and as a string) instead of a byte array in local-mac-address.
	 */
202 203

	if (maddr == NULL)
204
		maddr = of_get_property(dn, "mac-address", NULL);
205

206 207 208 209 210 211 212 213 214 215 216 217 218
	if (maddr == NULL) {
		dev_warn(&pdev->dev,
			 "no mac address in device tree, not configuring\n");
		return -ENOENT;
	}

	if (sscanf(maddr, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &addr[0],
		   &addr[1], &addr[2], &addr[3], &addr[4], &addr[5]) != 6) {
		dev_warn(&pdev->dev,
			 "can't parse mac address, not configuring\n");
		return -EINVAL;
	}

219 220
	memcpy(mac->mac_addr, addr, 6);

221 222 223
	return 0;
}

224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254
static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
		       void **tcph, u64 *hdr_flags, void *data)
{
	u64 macrx = (u64) data;
	unsigned int ip_len;
	struct iphdr *iph;

	/* IPv4 header checksum failed */
	if ((macrx & XCT_MACRX_HTY_M) != XCT_MACRX_HTY_IPV4_OK)
		return -1;

	/* non tcp packet */
	skb_reset_network_header(skb);
	iph = ip_hdr(skb);
	if (iph->protocol != IPPROTO_TCP)
		return -1;

	ip_len = ip_hdrlen(skb);
	skb_set_transport_header(skb, ip_len);
	*tcph = tcp_hdr(skb);

	/* check if ip header and tcp header are complete */
	if (iph->tot_len < ip_len + tcp_hdrlen(skb))
		return -1;

	*hdr_flags = LRO_IPV4 | LRO_TCP;
	*iphdr = iph;

	return 0;
}

O
Olof Johansson 已提交
255
static int pasemi_mac_unmap_tx_skb(struct pasemi_mac *mac,
256
				    const int nfrags,
O
Olof Johansson 已提交
257
				    struct sk_buff *skb,
O
Olof Johansson 已提交
258
				    const dma_addr_t *dmas)
O
Olof Johansson 已提交
259 260
{
	int f;
O
Olof Johansson 已提交
261
	struct pci_dev *pdev = mac->dma_pdev;
O
Olof Johansson 已提交
262

O
Olof Johansson 已提交
263
	pci_unmap_single(pdev, dmas[0], skb_headlen(skb), PCI_DMA_TODEVICE);
O
Olof Johansson 已提交
264 265 266 267

	for (f = 0; f < nfrags; f++) {
		skb_frag_t *frag = &skb_shinfo(skb)->frags[f];

O
Olof Johansson 已提交
268
		pci_unmap_page(pdev, dmas[f+1], frag->size, PCI_DMA_TODEVICE);
O
Olof Johansson 已提交
269 270 271 272 273 274 275 276 277
	}
	dev_kfree_skb_irq(skb);

	/* Freed descriptor slot + main SKB ptr + nfrags additional ptrs,
	 * aligned up to a power of 2
	 */
	return (nfrags + 3) & ~1;
}

O
Olof Johansson 已提交
278
static int pasemi_mac_setup_rx_resources(const struct net_device *dev)
279 280 281
{
	struct pasemi_mac_rxring *ring;
	struct pasemi_mac *mac = netdev_priv(dev);
282
	int chno;
O
Olof Johansson 已提交
283
	unsigned int cfg;
284

285 286
	ring = pasemi_dma_alloc_chan(RXCHAN, sizeof(struct pasemi_mac_rxring),
				     offsetof(struct pasemi_mac_rxring, chan));
287

288 289 290 291 292
	if (!ring) {
		dev_err(&mac->pdev->dev, "Can't allocate RX channel\n");
		goto out_chan;
	}
	chno = ring->chan.chno;
293 294 295

	spin_lock_init(&ring->lock);

296
	ring->size = RX_RING_SIZE;
297
	ring->ring_info = kzalloc(sizeof(struct pasemi_mac_buffer) *
298 299
				  RX_RING_SIZE, GFP_KERNEL);

300 301
	if (!ring->ring_info)
		goto out_ring_info;
302 303

	/* Allocate descriptors */
304
	if (pasemi_dma_alloc_ring(&ring->chan, RX_RING_SIZE))
305
		goto out_ring_desc;
306 307 308 309 310

	ring->buffers = dma_alloc_coherent(&mac->dma_pdev->dev,
					   RX_RING_SIZE * sizeof(u64),
					   &ring->buf_dma, GFP_KERNEL);
	if (!ring->buffers)
311
		goto out_ring_desc;
312 313 314

	memset(ring->buffers, 0, RX_RING_SIZE * sizeof(u64));

315 316
	write_dma_reg(PAS_DMA_RXCHAN_BASEL(chno),
		      PAS_DMA_RXCHAN_BASEL_BRBL(ring->chan.ring_dma));
317

318 319 320
	write_dma_reg(PAS_DMA_RXCHAN_BASEU(chno),
		      PAS_DMA_RXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32) |
		      PAS_DMA_RXCHAN_BASEU_SIZ(RX_RING_SIZE >> 3));
321

O
Olof Johansson 已提交
322
	cfg = PAS_DMA_RXCHAN_CFG_HBU(2);
O
Olof Johansson 已提交
323 324 325 326

	if (translation_enabled())
		cfg |= PAS_DMA_RXCHAN_CFG_CTR;

327
	write_dma_reg(PAS_DMA_RXCHAN_CFG(chno), cfg);
328

329 330
	write_dma_reg(PAS_DMA_RXINT_BASEL(mac->dma_if),
		      PAS_DMA_RXINT_BASEL_BRBL(ring->buf_dma));
331

332 333 334
	write_dma_reg(PAS_DMA_RXINT_BASEU(mac->dma_if),
		      PAS_DMA_RXINT_BASEU_BRBH(ring->buf_dma >> 32) |
		      PAS_DMA_RXINT_BASEU_SIZ(RX_RING_SIZE >> 3));
335

O
Olof Johansson 已提交
336
	cfg = PAS_DMA_RXINT_CFG_DHL(2) | PAS_DMA_RXINT_CFG_L2 |
O
Olof Johansson 已提交
337 338 339 340 341 342
	      PAS_DMA_RXINT_CFG_LW | PAS_DMA_RXINT_CFG_RBP |
	      PAS_DMA_RXINT_CFG_HEN;

	if (translation_enabled())
		cfg |= PAS_DMA_RXINT_CFG_ITRR | PAS_DMA_RXINT_CFG_ITR;

343
	write_dma_reg(PAS_DMA_RXINT_CFG(mac->dma_if), cfg);
344

345 346
	ring->next_to_fill = 0;
	ring->next_to_clean = 0;
347
	ring->mac = mac;
348 349 350 351
	mac->rx = ring;

	return 0;

352 353 354
out_ring_desc:
	kfree(ring->ring_info);
out_ring_info:
355 356
	pasemi_dma_free_chan(&ring->chan);
out_chan:
357 358 359
	return -ENOMEM;
}

360
static struct pasemi_mac_txring *
O
Olof Johansson 已提交
361
pasemi_mac_setup_tx_resources(const struct net_device *dev)
362 363 364 365
{
	struct pasemi_mac *mac = netdev_priv(dev);
	u32 val;
	struct pasemi_mac_txring *ring;
O
Olof Johansson 已提交
366
	unsigned int cfg;
367
	int chno;
368

369 370 371 372 373 374 375 376 377
	ring = pasemi_dma_alloc_chan(TXCHAN, sizeof(struct pasemi_mac_txring),
				     offsetof(struct pasemi_mac_txring, chan));

	if (!ring) {
		dev_err(&mac->pdev->dev, "Can't allocate TX channel\n");
		goto out_chan;
	}

	chno = ring->chan.chno;
378 379 380

	spin_lock_init(&ring->lock);

381
	ring->size = TX_RING_SIZE;
382
	ring->ring_info = kzalloc(sizeof(struct pasemi_mac_buffer) *
383
				  TX_RING_SIZE, GFP_KERNEL);
384 385
	if (!ring->ring_info)
		goto out_ring_info;
386 387

	/* Allocate descriptors */
388
	if (pasemi_dma_alloc_ring(&ring->chan, TX_RING_SIZE))
389
		goto out_ring_desc;
390

391 392 393
	write_dma_reg(PAS_DMA_TXCHAN_BASEL(chno),
		      PAS_DMA_TXCHAN_BASEL_BRBL(ring->chan.ring_dma));
	val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32);
394
	val |= PAS_DMA_TXCHAN_BASEU_SIZ(TX_RING_SIZE >> 3);
395

396
	write_dma_reg(PAS_DMA_TXCHAN_BASEU(chno), val);
397

O
Olof Johansson 已提交
398 399 400 401 402 403 404 405
	cfg = PAS_DMA_TXCHAN_CFG_TY_IFACE |
	      PAS_DMA_TXCHAN_CFG_TATTR(mac->dma_if) |
	      PAS_DMA_TXCHAN_CFG_UP |
	      PAS_DMA_TXCHAN_CFG_WT(2);

	if (translation_enabled())
		cfg |= PAS_DMA_TXCHAN_CFG_TRD | PAS_DMA_TXCHAN_CFG_TRR;

406
	write_dma_reg(PAS_DMA_TXCHAN_CFG(chno), cfg);
407

408
	ring->next_to_fill = 0;
409
	ring->next_to_clean = 0;
410
	ring->mac = mac;
411

412
	return ring;
413

414 415 416
out_ring_desc:
	kfree(ring->ring_info);
out_ring_info:
417 418
	pasemi_dma_free_chan(&ring->chan);
out_chan:
419
	return NULL;
420 421
}

422
static void pasemi_mac_free_tx_resources(struct pasemi_mac *mac)
423
{
424
	struct pasemi_mac_txring *txring = tx_ring(mac);
O
Olof Johansson 已提交
425
	unsigned int i, j;
426
	struct pasemi_mac_buffer *info;
O
Olof Johansson 已提交
427
	dma_addr_t dmas[MAX_SKB_FRAGS+1];
428
	int freed, nfrags;
429
	int start, limit;
430

431 432
	start = txring->next_to_clean;
	limit = txring->next_to_fill;
433 434 435 436 437 438

	/* Compensate for when fill has wrapped and clean has not */
	if (start > limit)
		limit += TX_RING_SIZE;

	for (i = start; i < limit; i += freed) {
439
		info = &txring->ring_info[(i+1) & (TX_RING_SIZE-1)];
440
		if (info->dma && info->skb) {
441 442
			nfrags = skb_shinfo(info->skb)->nr_frags;
			for (j = 0; j <= nfrags; j++)
443 444
				dmas[j] = txring->ring_info[(i+1+j) &
						(TX_RING_SIZE-1)].dma;
445 446
			freed = pasemi_mac_unmap_tx_skb(mac, nfrags,
							info->skb, dmas);
O
Olof Johansson 已提交
447 448
		} else
			freed = 2;
449 450
	}

451
	kfree(txring->ring_info);
452 453
	pasemi_dma_free_chan(&txring->chan);

454 455
}

456
static void pasemi_mac_free_rx_resources(struct pasemi_mac *mac)
457
{
458
	struct pasemi_mac_rxring *rx = rx_ring(mac);
459 460 461 462
	unsigned int i;
	struct pasemi_mac_buffer *info;

	for (i = 0; i < RX_RING_SIZE; i++) {
463
		info = &RX_DESC_INFO(rx, i);
464 465 466 467 468 469
		if (info->skb && info->dma) {
			pci_unmap_single(mac->dma_pdev,
					 info->dma,
					 info->skb->len,
					 PCI_DMA_FROMDEVICE);
			dev_kfree_skb_any(info->skb);
470
		}
471 472
		info->dma = 0;
		info->skb = NULL;
473 474
	}

475
	for (i = 0; i < RX_RING_SIZE; i++)
476
		RX_DESC(rx, i) = 0;
477

478
	dma_free_coherent(&mac->dma_pdev->dev, RX_RING_SIZE * sizeof(u64),
479
			  rx_ring(mac)->buffers, rx_ring(mac)->buf_dma);
480

481
	kfree(rx_ring(mac)->ring_info);
482
	pasemi_dma_free_chan(&rx_ring(mac)->chan);
483 484 485
	mac->rx = NULL;
}

O
Olof Johansson 已提交
486 487
static void pasemi_mac_replenish_rx_ring(const struct net_device *dev,
					 const int limit)
488
{
O
Olof Johansson 已提交
489
	const struct pasemi_mac *mac = netdev_priv(dev);
490
	struct pasemi_mac_rxring *rx = rx_ring(mac);
491
	int fill, count;
492

493
	if (limit <= 0)
494 495
		return;

496
	fill = rx_ring(mac)->next_to_fill;
497
	for (count = 0; count < limit; count++) {
498 499
		struct pasemi_mac_buffer *info = &RX_DESC_INFO(rx, fill);
		u64 *buff = &RX_BUFF(rx, fill);
500 501 502
		struct sk_buff *skb;
		dma_addr_t dma;

503 504 505
		/* Entry in use? */
		WARN_ON(*buff);

506 507
		skb = dev_alloc_skb(BUF_SIZE);
		skb_reserve(skb, LOCAL_SKB_ALIGN);
508

509
		if (unlikely(!skb))
510 511
			break;

512 513
		dma = pci_map_single(mac->dma_pdev, skb->data,
				     BUF_SIZE - LOCAL_SKB_ALIGN,
514 515
				     PCI_DMA_FROMDEVICE);

516
		if (unlikely(dma_mapping_error(dma))) {
517 518 519 520 521 522 523
			dev_kfree_skb_irq(info->skb);
			break;
		}

		info->skb = skb;
		info->dma = dma;
		*buff = XCT_RXB_LEN(BUF_SIZE) | XCT_RXB_ADDR(dma);
524
		fill++;
525 526 527 528
	}

	wmb();

529
	write_dma_reg(PAS_DMA_RXINT_INCR(mac->dma_if), count);
530

531
	rx_ring(mac)->next_to_fill = (rx_ring(mac)->next_to_fill + count) &
532
				(RX_RING_SIZE - 1);
533 534
}

O
Olof Johansson 已提交
535
static void pasemi_mac_restart_rx_intr(const struct pasemi_mac *mac)
536
{
537
	struct pasemi_mac_rxring *rx = rx_ring(mac);
O
Olof Johansson 已提交
538
	unsigned int reg, pcnt;
539 540 541 542
	/* Re-enable packet count interrupts: finally
	 * ack the packet count interrupt we got in rx_intr.
	 */

543
	pcnt = *rx->chan.status & PAS_STATUS_PCNT_M;
544

O
Olof Johansson 已提交
545
	reg = PAS_IOB_DMA_RXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_RXCH_RESET_PINTC;
546

547 548 549
	if (*rx->chan.status & PAS_STATUS_TIMER)
		reg |= PAS_IOB_DMA_RXCH_RESET_TINTC;

550
	write_iob_reg(PAS_IOB_DMA_RXCH_RESET(mac->rx->chan.chno), reg);
551 552
}

O
Olof Johansson 已提交
553
static void pasemi_mac_restart_tx_intr(const struct pasemi_mac *mac)
554
{
O
Olof Johansson 已提交
555
	unsigned int reg, pcnt;
556 557

	/* Re-enable packet count interrupts */
558
	pcnt = *tx_ring(mac)->chan.status & PAS_STATUS_PCNT_M;
559

O
Olof Johansson 已提交
560
	reg = PAS_IOB_DMA_TXCH_RESET_PCNT(pcnt) | PAS_IOB_DMA_TXCH_RESET_PINTC;
561

562
	write_iob_reg(PAS_IOB_DMA_TXCH_RESET(tx_ring(mac)->chan.chno), reg);
563 564 565
}


O
Olof Johansson 已提交
566 567
static inline void pasemi_mac_rx_error(const struct pasemi_mac *mac,
				       const u64 macrx)
O
Olof Johansson 已提交
568 569
{
	unsigned int rcmdsta, ccmdsta;
570
	struct pasemi_dmachan *chan = &rx_ring(mac)->chan;
O
Olof Johansson 已提交
571 572 573 574

	if (!netif_msg_rx_err(mac))
		return;

575 576
	rcmdsta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
	ccmdsta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(chan->chno));
O
Olof Johansson 已提交
577 578

	printk(KERN_ERR "pasemi_mac: rx error. macrx %016lx, rx status %lx\n",
579
		macrx, *chan->status);
O
Olof Johansson 已提交
580 581 582 583 584

	printk(KERN_ERR "pasemi_mac: rcmdsta %08x ccmdsta %08x\n",
		rcmdsta, ccmdsta);
}

O
Olof Johansson 已提交
585 586
static inline void pasemi_mac_tx_error(const struct pasemi_mac *mac,
				       const u64 mactx)
O
Olof Johansson 已提交
587 588
{
	unsigned int cmdsta;
589
	struct pasemi_dmachan *chan = &tx_ring(mac)->chan;
O
Olof Johansson 已提交
590 591 592 593

	if (!netif_msg_tx_err(mac))
		return;

594
	cmdsta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(chan->chno));
O
Olof Johansson 已提交
595 596

	printk(KERN_ERR "pasemi_mac: tx error. mactx 0x%016lx, "\
597
		"tx status 0x%016lx\n", mactx, *chan->status);
O
Olof Johansson 已提交
598 599 600 601

	printk(KERN_ERR "pasemi_mac: tcmdsta 0x%08x\n", cmdsta);
}

O
Olof Johansson 已提交
602 603
static int pasemi_mac_clean_rx(struct pasemi_mac_rxring *rx,
			       const int limit)
604
{
O
Olof Johansson 已提交
605
	const struct pasemi_dmachan *chan = &rx->chan;
606
	struct pasemi_mac *mac = rx->mac;
O
Olof Johansson 已提交
607
	struct pci_dev *pdev = mac->dma_pdev;
608
	unsigned int n;
O
Olof Johansson 已提交
609
	int count, buf_index, tot_bytes, packets;
610 611
	struct pasemi_mac_buffer *info;
	struct sk_buff *skb;
612
	unsigned int len;
O
Olof Johansson 已提交
613
	u64 macrx, eval;
614
	dma_addr_t dma;
O
Olof Johansson 已提交
615 616 617

	tot_bytes = 0;
	packets = 0;
618

619
	spin_lock(&rx->lock);
620

621
	n = rx->next_to_clean;
622

623
	prefetch(&RX_DESC(rx, n));
624 625

	for (count = 0; count < limit; count++) {
626
		macrx = RX_DESC(rx, n);
O
Olof Johansson 已提交
627
		prefetch(&RX_DESC(rx, n+4));
628

O
Olof Johansson 已提交
629
		if ((macrx & XCT_MACRX_E) ||
630
		    (*chan->status & PAS_STATUS_ERROR))
O
Olof Johansson 已提交
631 632
			pasemi_mac_rx_error(mac, macrx);

633
		if (!(macrx & XCT_MACRX_O))
634 635 636 637
			break;

		info = NULL;

638
		BUG_ON(!(macrx & XCT_MACRX_RR_8BRES));
639

640
		eval = (RX_DESC(rx, n+1) & XCT_RXRES_8B_EVAL_M) >>
641 642 643
			XCT_RXRES_8B_EVAL_S;
		buf_index = eval-1;

644 645
		dma = (RX_DESC(rx, n+2) & XCT_PTR_ADDR_M);
		info = &RX_DESC_INFO(rx, buf_index);
646

647
		skb = info->skb;
648

O
Olof Johansson 已提交
649
		prefetch_skb(skb);
650

651
		len = (macrx & XCT_MACRX_LLEN_M) >> XCT_MACRX_LLEN_S;
652

O
Olof Johansson 已提交
653 654
		pci_unmap_single(pdev, dma, BUF_SIZE-LOCAL_SKB_ALIGN,
				 PCI_DMA_FROMDEVICE);
O
Olof Johansson 已提交
655 656 657 658 659

		if (macrx & XCT_MACRX_CRC) {
			/* CRC error flagged */
			mac->netdev->stats.rx_errors++;
			mac->netdev->stats.rx_crc_errors++;
660
			/* No need to free skb, it'll be reused */
O
Olof Johansson 已提交
661 662 663
			goto next;
		}

664
		info->skb = NULL;
665
		info->dma = 0;
666

O
Olof Johansson 已提交
667
		if (likely((macrx & XCT_MACRX_HTY_M) == XCT_MACRX_HTY_IPV4_OK)) {
O
Olof Johansson 已提交
668
			skb->ip_summed = CHECKSUM_UNNECESSARY;
669
			skb->csum = (macrx & XCT_MACRX_CSUM_M) >>
670 671 672 673
					   XCT_MACRX_CSUM_S;
		} else
			skb->ip_summed = CHECKSUM_NONE;

O
Olof Johansson 已提交
674 675 676 677 678
		packets++;
		tot_bytes += len;

		/* Don't include CRC */
		skb_put(skb, len-4);
679

O
Olof Johansson 已提交
680
		skb->protocol = eth_type_trans(skb, mac->netdev);
681
		lro_receive_skb(&mac->lro_mgr, skb, (void *)macrx);
682

O
Olof Johansson 已提交
683
next:
684 685
		RX_DESC(rx, n) = 0;
		RX_DESC(rx, n+1) = 0;
686

687 688 689
		/* Need to zero it out since hardware doesn't, since the
		 * replenish loop uses it to tell when it's done.
		 */
690
		RX_BUFF(rx, buf_index) = 0;
691

692
		n += 4;
693 694
	}

695 696
	if (n > RX_RING_SIZE) {
		/* Errata 5971 workaround: L2 target of headers */
697
		write_iob_reg(PAS_IOB_COM_PKTHDRCNT, 0);
698 699
		n &= (RX_RING_SIZE-1);
	}
700

701
	rx_ring(mac)->next_to_clean = n;
702

703 704
	lro_flush_all(&mac->lro_mgr);

705 706 707 708
	/* Increase is in number of 16-byte entries, and since each descriptor
	 * with an 8BRES takes up 3x8 bytes (padded to 4x8), increase with
	 * count*2.
	 */
709
	write_dma_reg(PAS_DMA_RXCHAN_INCR(mac->rx->chan.chno), count << 1);
710 711

	pasemi_mac_replenish_rx_ring(mac->netdev, count);
712

O
Olof Johansson 已提交
713 714 715
	mac->netdev->stats.rx_bytes += tot_bytes;
	mac->netdev->stats.rx_packets += packets;

716
	spin_unlock(&rx_ring(mac)->lock);
717 718 719 720

	return count;
}

O
Olof Johansson 已提交
721 722 723
/* Can't make this too large or we blow the kernel stack limits */
#define TX_CLEAN_BATCHSIZE (128/MAX_SKB_FRAGS)

724
static int pasemi_mac_clean_tx(struct pasemi_mac_txring *txring)
725
{
726
	struct pasemi_dmachan *chan = &txring->chan;
727
	struct pasemi_mac *mac = txring->mac;
O
Olof Johansson 已提交
728
	int i, j;
729 730
	unsigned int start, descr_count, buf_count, batch_limit;
	unsigned int ring_limit;
731
	unsigned int total_count;
732
	unsigned long flags;
O
Olof Johansson 已提交
733 734
	struct sk_buff *skbs[TX_CLEAN_BATCHSIZE];
	dma_addr_t dmas[TX_CLEAN_BATCHSIZE][MAX_SKB_FRAGS+1];
735 736
	int nf[TX_CLEAN_BATCHSIZE];
	int nr_frags;
737

738
	total_count = 0;
739
	batch_limit = TX_CLEAN_BATCHSIZE;
740
restart:
741
	spin_lock_irqsave(&txring->lock, flags);
742

743 744
	start = txring->next_to_clean;
	ring_limit = txring->next_to_fill;
745

746 747
	prefetch(&TX_DESC_INFO(txring, start+1).skb);

748 749 750
	/* Compensate for when fill has wrapped but clean has not */
	if (start > ring_limit)
		ring_limit += TX_RING_SIZE;
751

O
Olof Johansson 已提交
752 753
	buf_count = 0;
	descr_count = 0;
754

O
Olof Johansson 已提交
755
	for (i = start;
756
	     descr_count < batch_limit && i < ring_limit;
O
Olof Johansson 已提交
757
	     i += buf_count) {
758
		u64 mactx = TX_DESC(txring, i);
759
		struct sk_buff *skb;
O
Olof Johansson 已提交
760

761 762 763
		skb = TX_DESC_INFO(txring, i+1).skb;
		nr_frags = TX_DESC_INFO(txring, i).dma;

764
		if ((mactx  & XCT_MACTX_E) ||
765
		    (*chan->status & PAS_STATUS_ERROR))
766
			pasemi_mac_tx_error(mac, mactx);
O
Olof Johansson 已提交
767

768
		if (unlikely(mactx & XCT_MACTX_O))
769
			/* Not yet transmitted */
770 771
			break;

772 773 774 775 776 777
		buf_count = 2 + nr_frags;
		/* Since we always fill with an even number of entries, make
		 * sure we skip any unused one at the end as well.
		 */
		if (buf_count & 1)
			buf_count++;
O
Olof Johansson 已提交
778

779
		for (j = 0; j <= nr_frags; j++)
780
			dmas[descr_count][j] = TX_DESC_INFO(txring, i+1+j).dma;
O
Olof Johansson 已提交
781

782 783 784
		skbs[descr_count] = skb;
		nf[descr_count] = nr_frags;

785 786
		TX_DESC(txring, i) = 0;
		TX_DESC(txring, i+1) = 0;
787

O
Olof Johansson 已提交
788
		descr_count++;
789
	}
790
	txring->next_to_clean = i & (TX_RING_SIZE-1);
O
Olof Johansson 已提交
791

792
	spin_unlock_irqrestore(&txring->lock, flags);
793 794
	netif_wake_queue(mac->netdev);

O
Olof Johansson 已提交
795
	for (i = 0; i < descr_count; i++)
796
		pasemi_mac_unmap_tx_skb(mac, nf[i], skbs[i], dmas[i]);
797

O
Olof Johansson 已提交
798
	total_count += descr_count;
799 800

	/* If the batch was full, try to clean more */
801
	if (descr_count == batch_limit)
802 803 804
		goto restart;

	return total_count;
805 806 807 808 809
}


static irqreturn_t pasemi_mac_rx_intr(int irq, void *data)
{
O
Olof Johansson 已提交
810
	const struct pasemi_mac_rxring *rxring = data;
811 812
	struct pasemi_mac *mac = rxring->mac;
	struct net_device *dev = mac->netdev;
O
Olof Johansson 已提交
813
	const struct pasemi_dmachan *chan = &rxring->chan;
814 815
	unsigned int reg;

816
	if (!(*chan->status & PAS_STATUS_CAUSE_M))
817 818
		return IRQ_NONE;

819 820 821 822 823
	/* Don't reset packet count so it won't fire again but clear
	 * all others.
	 */

	reg = 0;
824
	if (*chan->status & PAS_STATUS_SOFT)
825
		reg |= PAS_IOB_DMA_RXCH_RESET_SINTC;
826
	if (*chan->status & PAS_STATUS_ERROR)
827
		reg |= PAS_IOB_DMA_RXCH_RESET_DINTC;
828

829
	netif_rx_schedule(dev, &mac->napi);
830

831
	write_iob_reg(PAS_IOB_DMA_RXCH_RESET(chan->chno), reg);
832 833 834 835

	return IRQ_HANDLED;
}

O
Olof Johansson 已提交
836 837 838 839 840 841 842 843 844 845 846 847 848 849
#define TX_CLEAN_INTERVAL HZ

static void pasemi_mac_tx_timer(unsigned long data)
{
	struct pasemi_mac_txring *txring = (struct pasemi_mac_txring *)data;
	struct pasemi_mac *mac = txring->mac;

	pasemi_mac_clean_tx(txring);

	mod_timer(&txring->clean_timer, jiffies + TX_CLEAN_INTERVAL);

	pasemi_mac_restart_tx_intr(mac);
}

850 851
static irqreturn_t pasemi_mac_tx_intr(int irq, void *data)
{
852
	struct pasemi_mac_txring *txring = data;
O
Olof Johansson 已提交
853
	const struct pasemi_dmachan *chan = &txring->chan;
O
Olof Johansson 已提交
854 855
	struct pasemi_mac *mac = txring->mac;
	unsigned int reg;
856

857
	if (!(*chan->status & PAS_STATUS_CAUSE_M))
858 859
		return IRQ_NONE;

O
Olof Johansson 已提交
860
	reg = 0;
861

862
	if (*chan->status & PAS_STATUS_SOFT)
863
		reg |= PAS_IOB_DMA_TXCH_RESET_SINTC;
864
	if (*chan->status & PAS_STATUS_ERROR)
865
		reg |= PAS_IOB_DMA_TXCH_RESET_DINTC;
866

O
Olof Johansson 已提交
867 868 869 870 871 872
	mod_timer(&txring->clean_timer, jiffies + (TX_CLEAN_INTERVAL)*2);

	netif_rx_schedule(mac->netdev, &mac->napi);

	if (reg)
		write_iob_reg(PAS_IOB_DMA_TXCH_RESET(chan->chno), reg);
873 874 875 876

	return IRQ_HANDLED;
}

877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894
static void pasemi_mac_intf_disable(struct pasemi_mac *mac)
{
	unsigned int flags;

	flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
	flags &= ~PAS_MAC_CFG_PCFG_PE;
	write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
}

static void pasemi_mac_intf_enable(struct pasemi_mac *mac)
{
	unsigned int flags;

	flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
	flags |= PAS_MAC_CFG_PCFG_PE;
	write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
}

O
Olof Johansson 已提交
895 896 897 898 899 900 901 902 903 904 905 906 907 908 909
static void pasemi_adjust_link(struct net_device *dev)
{
	struct pasemi_mac *mac = netdev_priv(dev);
	int msg;
	unsigned int flags;
	unsigned int new_flags;

	if (!mac->phydev->link) {
		/* If no link, MAC speed settings don't matter. Just report
		 * link down and return.
		 */
		if (mac->link && netif_msg_link(mac))
			printk(KERN_INFO "%s: Link is down.\n", dev->name);

		netif_carrier_off(dev);
910
		pasemi_mac_intf_disable(mac);
O
Olof Johansson 已提交
911 912 913
		mac->link = 0;

		return;
914 915
	} else {
		pasemi_mac_intf_enable(mac);
O
Olof Johansson 已提交
916
		netif_carrier_on(dev);
917
	}
O
Olof Johansson 已提交
918

919
	flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
O
Olof Johansson 已提交
920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950
	new_flags = flags & ~(PAS_MAC_CFG_PCFG_HD | PAS_MAC_CFG_PCFG_SPD_M |
			      PAS_MAC_CFG_PCFG_TSR_M);

	if (!mac->phydev->duplex)
		new_flags |= PAS_MAC_CFG_PCFG_HD;

	switch (mac->phydev->speed) {
	case 1000:
		new_flags |= PAS_MAC_CFG_PCFG_SPD_1G |
			     PAS_MAC_CFG_PCFG_TSR_1G;
		break;
	case 100:
		new_flags |= PAS_MAC_CFG_PCFG_SPD_100M |
			     PAS_MAC_CFG_PCFG_TSR_100M;
		break;
	case 10:
		new_flags |= PAS_MAC_CFG_PCFG_SPD_10M |
			     PAS_MAC_CFG_PCFG_TSR_10M;
		break;
	default:
		printk("Unsupported speed %d\n", mac->phydev->speed);
	}

	/* Print on link or speed/duplex change */
	msg = mac->link != mac->phydev->link || flags != new_flags;

	mac->duplex = mac->phydev->duplex;
	mac->speed = mac->phydev->speed;
	mac->link = mac->phydev->link;

	if (new_flags != flags)
951
		write_mac_reg(mac, PAS_MAC_CFG_PCFG, new_flags);
O
Olof Johansson 已提交
952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969

	if (msg && netif_msg_link(mac))
		printk(KERN_INFO "%s: Link is up at %d Mbps, %s duplex.\n",
		       dev->name, mac->speed, mac->duplex ? "full" : "half");
}

static int pasemi_mac_phy_init(struct net_device *dev)
{
	struct pasemi_mac *mac = netdev_priv(dev);
	struct device_node *dn, *phy_dn;
	struct phy_device *phydev;
	unsigned int phy_id;
	const phandle *ph;
	const unsigned int *prop;
	struct resource r;
	int ret;

	dn = pci_device_to_OF_node(mac->pdev);
970
	ph = of_get_property(dn, "phy-handle", NULL);
O
Olof Johansson 已提交
971 972 973 974
	if (!ph)
		return -ENODEV;
	phy_dn = of_find_node_by_phandle(*ph);

975
	prop = of_get_property(phy_dn, "reg", NULL);
O
Olof Johansson 已提交
976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005
	ret = of_address_to_resource(phy_dn->parent, 0, &r);
	if (ret)
		goto err;

	phy_id = *prop;
	snprintf(mac->phy_id, BUS_ID_SIZE, PHY_ID_FMT, (int)r.start, phy_id);

	of_node_put(phy_dn);

	mac->link = 0;
	mac->speed = 0;
	mac->duplex = -1;

	phydev = phy_connect(dev, mac->phy_id, &pasemi_adjust_link, 0, PHY_INTERFACE_MODE_SGMII);

	if (IS_ERR(phydev)) {
		printk(KERN_ERR "%s: Could not attach to phy\n", dev->name);
		return PTR_ERR(phydev);
	}

	mac->phydev = phydev;

	return 0;

err:
	of_node_put(phy_dn);
	return -ENODEV;
}


1006 1007 1008 1009 1010 1011 1012
static int pasemi_mac_open(struct net_device *dev)
{
	struct pasemi_mac *mac = netdev_priv(dev);
	unsigned int flags;
	int ret;

	/* enable rx section */
1013
	write_dma_reg(PAS_DMA_COM_RXCMD, PAS_DMA_COM_RXCMD_EN);
1014 1015

	/* enable tx section */
1016
	write_dma_reg(PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN);
1017 1018 1019 1020 1021

	flags = PAS_MAC_CFG_TXP_FCE | PAS_MAC_CFG_TXP_FPC(3) |
		PAS_MAC_CFG_TXP_SL(3) | PAS_MAC_CFG_TXP_COB(0xf) |
		PAS_MAC_CFG_TXP_TIFT(8) | PAS_MAC_CFG_TXP_TIFG(12);

1022
	write_mac_reg(mac, PAS_MAC_CFG_TXP, flags);
1023 1024 1025 1026 1027

	ret = pasemi_mac_setup_rx_resources(dev);
	if (ret)
		goto out_rx_resources;

1028
	mac->tx = pasemi_mac_setup_tx_resources(dev);
1029 1030 1031

	if (!mac->tx)
		goto out_tx_ring;
1032

1033 1034 1035 1036
	/* 0x3ff with 33MHz clock is about 31us */
	write_iob_reg(PAS_IOB_DMA_COM_TIMEOUTCFG,
		      PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0x3ff));

1037
	write_iob_reg(PAS_IOB_DMA_RXCH_CFG(mac->rx->chan.chno),
1038
		      PAS_IOB_DMA_RXCH_CFG_CNTTH(256));
1039 1040

	write_iob_reg(PAS_IOB_DMA_TXCH_CFG(mac->tx->chan.chno),
O
Olof Johansson 已提交
1041
		      PAS_IOB_DMA_TXCH_CFG_CNTTH(32));
1042

1043
	write_mac_reg(mac, PAS_MAC_IPC_CHNL,
1044 1045
		      PAS_MAC_IPC_CHNL_DCHNO(mac->rx->chan.chno) |
		      PAS_MAC_IPC_CHNL_BCH(mac->rx->chan.chno));
1046 1047

	/* enable rx if */
1048 1049 1050 1051 1052 1053
	write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
		      PAS_DMA_RXINT_RCMDSTA_EN |
		      PAS_DMA_RXINT_RCMDSTA_DROPS_M |
		      PAS_DMA_RXINT_RCMDSTA_BP |
		      PAS_DMA_RXINT_RCMDSTA_OO |
		      PAS_DMA_RXINT_RCMDSTA_BT);
1054 1055

	/* enable rx channel */
1056 1057 1058 1059
	pasemi_dma_start_chan(&rx_ring(mac)->chan, PAS_DMA_RXCHAN_CCMDSTA_DU |
						   PAS_DMA_RXCHAN_CCMDSTA_OD |
						   PAS_DMA_RXCHAN_CCMDSTA_FD |
						   PAS_DMA_RXCHAN_CCMDSTA_DT);
1060 1061

	/* enable tx channel */
1062 1063 1064 1065
	pasemi_dma_start_chan(&tx_ring(mac)->chan, PAS_DMA_TXCHAN_TCMDSTA_SZ |
						   PAS_DMA_TXCHAN_TCMDSTA_DB |
						   PAS_DMA_TXCHAN_TCMDSTA_DE |
						   PAS_DMA_TXCHAN_TCMDSTA_DA);
1066

1067
	pasemi_mac_replenish_rx_ring(dev, RX_RING_SIZE);
1068

1069 1070
	write_dma_reg(PAS_DMA_RXCHAN_INCR(rx_ring(mac)->chan.chno),
		      RX_RING_SIZE>>1);
1071

1072 1073 1074 1075
	/* Clear out any residual packet count state from firmware */
	pasemi_mac_restart_rx_intr(mac);
	pasemi_mac_restart_tx_intr(mac);

1076
	flags = PAS_MAC_CFG_PCFG_S1 | PAS_MAC_CFG_PCFG_PR | PAS_MAC_CFG_PCFG_CE;
1077 1078 1079 1080 1081 1082 1083 1084 1085

	if (mac->type == MAC_TYPE_GMAC)
		flags |= PAS_MAC_CFG_PCFG_TSR_1G | PAS_MAC_CFG_PCFG_SPD_1G;
	else
		flags |= PAS_MAC_CFG_PCFG_TSR_10G | PAS_MAC_CFG_PCFG_SPD_10G;

	/* Enable interface in MAC */
	write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);

O
Olof Johansson 已提交
1086
	ret = pasemi_mac_phy_init(dev);
1087 1088 1089 1090 1091 1092 1093 1094 1095 1096
	if (ret) {
		/* Since we won't get link notification, just enable RX */
		pasemi_mac_intf_enable(mac);
		if (mac->type == MAC_TYPE_GMAC) {
			/* Warn for missing PHY on SGMII (1Gig) ports */
			dev_warn(&mac->pdev->dev,
				 "PHY init failed: %d.\n", ret);
			dev_warn(&mac->pdev->dev,
				 "Defaulting to 1Gbit full duplex\n");
		}
1097
	}
O
Olof Johansson 已提交
1098

1099
	netif_start_queue(dev);
1100
	napi_enable(&mac->napi);
1101

1102 1103
	snprintf(mac->tx_irq_name, sizeof(mac->tx_irq_name), "%s tx",
		 dev->name);
1104

1105
	ret = request_irq(mac->tx->chan.irq, &pasemi_mac_tx_intr, IRQF_DISABLED,
1106
			  mac->tx_irq_name, mac->tx);
1107 1108
	if (ret) {
		dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
1109
			mac->tx->chan.irq, ret);
1110 1111 1112
		goto out_tx_int;
	}

1113 1114 1115
	snprintf(mac->rx_irq_name, sizeof(mac->rx_irq_name), "%s rx",
		 dev->name);

1116 1117
	ret = request_irq(mac->rx->chan.irq, &pasemi_mac_rx_intr, IRQF_DISABLED,
			  mac->rx_irq_name, mac->rx);
1118 1119
	if (ret) {
		dev_err(&mac->pdev->dev, "request_irq of irq %d failed: %d\n",
1120
			mac->rx->chan.irq, ret);
1121 1122 1123
		goto out_rx_int;
	}

O
Olof Johansson 已提交
1124 1125 1126
	if (mac->phydev)
		phy_start(mac->phydev);

O
Olof Johansson 已提交
1127 1128 1129 1130 1131 1132
	init_timer(&mac->tx->clean_timer);
	mac->tx->clean_timer.function = pasemi_mac_tx_timer;
	mac->tx->clean_timer.data = (unsigned long)mac->tx;
	mac->tx->clean_timer.expires = jiffies+HZ;
	add_timer(&mac->tx->clean_timer);

1133 1134 1135
	return 0;

out_rx_int:
1136
	free_irq(mac->tx->chan.irq, mac->tx);
1137
out_tx_int:
1138
	napi_disable(&mac->napi);
1139
	netif_stop_queue(dev);
1140 1141 1142 1143
out_tx_ring:
	if (mac->tx)
		pasemi_mac_free_tx_resources(mac);
	pasemi_mac_free_rx_resources(mac);
1144 1145 1146 1147 1148 1149 1150 1151 1152 1153
out_rx_resources:

	return ret;
}

#define MAX_RETRIES 5000

static int pasemi_mac_close(struct net_device *dev)
{
	struct pasemi_mac *mac = netdev_priv(dev);
1154
	unsigned int sta;
1155
	int retries;
1156 1157 1158 1159
	int rxch, txch;

	rxch = rx_ring(mac)->chan.chno;
	txch = tx_ring(mac)->chan.chno;
1160

O
Olof Johansson 已提交
1161 1162 1163 1164 1165
	if (mac->phydev) {
		phy_stop(mac->phydev);
		phy_disconnect(mac->phydev);
	}

O
Olof Johansson 已提交
1166 1167
	del_timer_sync(&mac->tx->clean_timer);

1168
	netif_stop_queue(dev);
1169
	napi_disable(&mac->napi);
1170

1171
	sta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
1172 1173 1174 1175 1176
	if (sta & (PAS_DMA_RXINT_RCMDSTA_BP |
		      PAS_DMA_RXINT_RCMDSTA_OO |
		      PAS_DMA_RXINT_RCMDSTA_BT))
		printk(KERN_DEBUG "pasemi_mac: rcmdsta error: 0x%08x\n", sta);

1177
	sta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch));
1178 1179 1180 1181 1182 1183
	if (sta & (PAS_DMA_RXCHAN_CCMDSTA_DU |
		     PAS_DMA_RXCHAN_CCMDSTA_OD |
		     PAS_DMA_RXCHAN_CCMDSTA_FD |
		     PAS_DMA_RXCHAN_CCMDSTA_DT))
		printk(KERN_DEBUG "pasemi_mac: ccmdsta error: 0x%08x\n", sta);

1184
	sta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch));
1185 1186
	if (sta & (PAS_DMA_TXCHAN_TCMDSTA_SZ | PAS_DMA_TXCHAN_TCMDSTA_DB |
		      PAS_DMA_TXCHAN_TCMDSTA_DE | PAS_DMA_TXCHAN_TCMDSTA_DA))
1187 1188
		printk(KERN_DEBUG "pasemi_mac: tcmdsta error: 0x%08x\n", sta);

1189
	/* Clean out any pending buffers */
1190 1191
	pasemi_mac_clean_tx(tx_ring(mac));
	pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE);
1192 1193

	/* Disable interface */
1194
	write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch),
1195
		      PAS_DMA_TXCHAN_TCMDSTA_ST);
1196
	write_dma_reg( PAS_DMA_RXINT_RCMDSTA(mac->dma_if),
1197
		      PAS_DMA_RXINT_RCMDSTA_ST);
1198
	write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch),
1199
		      PAS_DMA_RXCHAN_CCMDSTA_ST);
1200 1201

	for (retries = 0; retries < MAX_RETRIES; retries++) {
1202
		sta = read_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(rxch));
1203
		if (!(sta & PAS_DMA_TXCHAN_TCMDSTA_ACT))
1204 1205 1206 1207
			break;
		cond_resched();
	}

1208
	if (sta & PAS_DMA_TXCHAN_TCMDSTA_ACT)
1209
		dev_err(&mac->dma_pdev->dev, "Failed to stop tx channel\n");
1210 1211

	for (retries = 0; retries < MAX_RETRIES; retries++) {
1212
		sta = read_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch));
1213
		if (!(sta & PAS_DMA_RXCHAN_CCMDSTA_ACT))
1214 1215 1216 1217
			break;
		cond_resched();
	}

1218
	if (sta & PAS_DMA_RXCHAN_CCMDSTA_ACT)
1219 1220 1221
		dev_err(&mac->dma_pdev->dev, "Failed to stop rx channel\n");

	for (retries = 0; retries < MAX_RETRIES; retries++) {
1222
		sta = read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if));
1223
		if (!(sta & PAS_DMA_RXINT_RCMDSTA_ACT))
1224 1225 1226 1227
			break;
		cond_resched();
	}

1228
	if (sta & PAS_DMA_RXINT_RCMDSTA_ACT)
1229 1230 1231 1232 1233 1234
		dev_err(&mac->dma_pdev->dev, "Failed to stop rx interface\n");

	/* Then, disable the channel. This must be done separately from
	 * stopping, since you can't disable when active.
	 */

1235 1236 1237
	write_dma_reg(PAS_DMA_TXCHAN_TCMDSTA(txch), 0);
	write_dma_reg(PAS_DMA_RXCHAN_CCMDSTA(rxch), 0);
	write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), 0);
1238

1239 1240
	free_irq(mac->tx->chan.irq, mac->tx);
	free_irq(mac->rx->chan.irq, mac->rx);
1241 1242

	/* Free resources */
1243 1244
	pasemi_mac_free_rx_resources(mac);
	pasemi_mac_free_tx_resources(mac);
1245 1246 1247 1248 1249 1250 1251 1252

	return 0;
}

static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
{
	struct pasemi_mac *mac = netdev_priv(dev);
	struct pasemi_mac_txring *txring;
O
Olof Johansson 已提交
1253 1254 1255
	u64 dflags, mactx;
	dma_addr_t map[MAX_SKB_FRAGS+1];
	unsigned int map_size[MAX_SKB_FRAGS+1];
1256
	unsigned long flags;
O
Olof Johansson 已提交
1257
	int i, nfrags;
O
Olof Johansson 已提交
1258
	int fill;
1259

1260
	dflags = XCT_MACTX_O | XCT_MACTX_ST | XCT_MACTX_CRC_PAD;
1261 1262

	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1263 1264
		const unsigned char *nh = skb_network_header(skb);

1265
		switch (ip_hdr(skb)->protocol) {
1266 1267
		case IPPROTO_TCP:
			dflags |= XCT_MACTX_CSUM_TCP;
1268
			dflags |= XCT_MACTX_IPH(skb_network_header_len(skb) >> 2);
1269
			dflags |= XCT_MACTX_IPO(nh - skb->data);
1270 1271 1272
			break;
		case IPPROTO_UDP:
			dflags |= XCT_MACTX_CSUM_UDP;
1273
			dflags |= XCT_MACTX_IPH(skb_network_header_len(skb) >> 2);
1274
			dflags |= XCT_MACTX_IPO(nh - skb->data);
1275 1276 1277 1278
			break;
		}
	}

O
Olof Johansson 已提交
1279 1280 1281 1282 1283 1284 1285 1286 1287 1288
	nfrags = skb_shinfo(skb)->nr_frags;

	map[0] = pci_map_single(mac->dma_pdev, skb->data, skb_headlen(skb),
				PCI_DMA_TODEVICE);
	map_size[0] = skb_headlen(skb);
	if (dma_mapping_error(map[0]))
		goto out_err_nolock;

	for (i = 0; i < nfrags; i++) {
		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1289

O
Olof Johansson 已提交
1290 1291 1292 1293 1294 1295 1296 1297 1298
		map[i+1] = pci_map_page(mac->dma_pdev, frag->page,
					frag->page_offset, frag->size,
					PCI_DMA_TODEVICE);
		map_size[i+1] = frag->size;
		if (dma_mapping_error(map[i+1])) {
			nfrags = i;
			goto out_err_nolock;
		}
	}
1299

O
Olof Johansson 已提交
1300 1301
	mactx = dflags | XCT_MACTX_LLEN(skb->len);

1302
	txring = tx_ring(mac);
1303 1304 1305

	spin_lock_irqsave(&txring->lock, flags);

O
Olof Johansson 已提交
1306 1307
	fill = txring->next_to_fill;

1308 1309 1310 1311 1312 1313 1314 1315
	/* Avoid stepping on the same cache line that the DMA controller
	 * is currently about to send, so leave at least 8 words available.
	 * Total free space needed is mactx + fragments + 8
	 */
	if (RING_AVAIL(txring) < nfrags + 10) {
		/* no room -- stop the queue and wait for tx intr */
		netif_stop_queue(dev);
		goto out_err;
1316 1317
	}

O
Olof Johansson 已提交
1318
	TX_DESC(txring, fill) = mactx;
1319
	TX_DESC_INFO(txring, fill).dma = nfrags;
O
Olof Johansson 已提交
1320 1321
	fill++;
	TX_DESC_INFO(txring, fill).skb = skb;
O
Olof Johansson 已提交
1322
	for (i = 0; i <= nfrags; i++) {
O
Olof Johansson 已提交
1323
		TX_DESC(txring, fill+i) =
1324
			XCT_PTR_LEN(map_size[i]) | XCT_PTR_ADDR(map[i]);
O
Olof Johansson 已提交
1325
		TX_DESC_INFO(txring, fill+i).dma = map[i];
O
Olof Johansson 已提交
1326 1327 1328 1329 1330 1331 1332 1333
	}

	/* We have to add an even number of 8-byte entries to the ring
	 * even if the last one is unused. That means always an odd number
	 * of pointers + one mactx descriptor.
	 */
	if (nfrags & 1)
		nfrags++;
1334

O
Olof Johansson 已提交
1335
	txring->next_to_fill = (fill + nfrags + 1) & (TX_RING_SIZE-1);
1336

1337 1338
	dev->stats.tx_packets++;
	dev->stats.tx_bytes += skb->len;
1339 1340 1341

	spin_unlock_irqrestore(&txring->lock, flags);

1342
	write_dma_reg(PAS_DMA_TXCHAN_INCR(txring->chan.chno), (nfrags+2) >> 1);
1343 1344 1345 1346 1347

	return NETDEV_TX_OK;

out_err:
	spin_unlock_irqrestore(&txring->lock, flags);
O
Olof Johansson 已提交
1348 1349 1350 1351 1352
out_err_nolock:
	while (nfrags--)
		pci_unmap_single(mac->dma_pdev, map[nfrags], map_size[nfrags],
				 PCI_DMA_TODEVICE);

1353 1354 1355 1356 1357
	return NETDEV_TX_BUSY;
}

static void pasemi_mac_set_rx_mode(struct net_device *dev)
{
O
Olof Johansson 已提交
1358
	const struct pasemi_mac *mac = netdev_priv(dev);
1359 1360
	unsigned int flags;

1361
	flags = read_mac_reg(mac, PAS_MAC_CFG_PCFG);
1362 1363 1364 1365 1366 1367 1368

	/* Set promiscuous */
	if (dev->flags & IFF_PROMISC)
		flags |= PAS_MAC_CFG_PCFG_PR;
	else
		flags &= ~PAS_MAC_CFG_PCFG_PR;

1369
	write_mac_reg(mac, PAS_MAC_CFG_PCFG, flags);
1370 1371 1372
}


1373
static int pasemi_mac_poll(struct napi_struct *napi, int budget)
1374
{
1375 1376 1377
	struct pasemi_mac *mac = container_of(napi, struct pasemi_mac, napi);
	struct net_device *dev = mac->netdev;
	int pkts;
1378

1379 1380
	pasemi_mac_clean_tx(tx_ring(mac));
	pkts = pasemi_mac_clean_rx(rx_ring(mac), budget);
1381
	if (pkts < budget) {
1382
		/* all done, no more packets present */
1383
		netif_rx_complete(dev, napi);
1384

1385
		pasemi_mac_restart_rx_intr(mac);
O
Olof Johansson 已提交
1386
		pasemi_mac_restart_tx_intr(mac);
1387
	}
1388
	return pkts;
1389 1390 1391 1392 1393 1394 1395 1396
}

static int __devinit
pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	struct net_device *dev;
	struct pasemi_mac *mac;
	int err;
1397
	DECLARE_MAC_BUF(mac_buf);
1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418

	err = pci_enable_device(pdev);
	if (err)
		return err;

	dev = alloc_etherdev(sizeof(struct pasemi_mac));
	if (dev == NULL) {
		dev_err(&pdev->dev,
			"pasemi_mac: Could not allocate ethernet device.\n");
		err = -ENOMEM;
		goto out_disable_device;
	}

	pci_set_drvdata(pdev, dev);
	SET_NETDEV_DEV(dev, &pdev->dev);

	mac = netdev_priv(dev);

	mac->pdev = pdev;
	mac->netdev = dev;

1419 1420
	netif_napi_add(dev, &mac->napi, pasemi_mac_poll, 64);

O
Olof Johansson 已提交
1421 1422
	dev->features = NETIF_F_IP_CSUM | NETIF_F_LLTX | NETIF_F_SG |
			NETIF_F_HIGHDMA;
1423

1424 1425 1426 1427 1428 1429 1430 1431 1432 1433
	mac->lro_mgr.max_aggr = LRO_MAX_AGGR;
	mac->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS;
	mac->lro_mgr.lro_arr = mac->lro_desc;
	mac->lro_mgr.get_skb_header = get_skb_hdr;
	mac->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
	mac->lro_mgr.dev = mac->netdev;
	mac->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
	mac->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;


1434 1435 1436 1437 1438 1439
	mac->dma_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa007, NULL);
	if (!mac->dma_pdev) {
		dev_err(&mac->pdev->dev, "Can't find DMA Controller\n");
		err = -ENODEV;
		goto out;
	}
1440

1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460
	mac->iob_pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa001, NULL);
	if (!mac->iob_pdev) {
		dev_err(&mac->pdev->dev, "Can't find I/O Bridge\n");
		err = -ENODEV;
		goto out;
	}

	/* get mac addr from device tree */
	if (pasemi_get_mac_addr(mac) || !is_valid_ether_addr(mac->mac_addr)) {
		err = -ENODEV;
		goto out;
	}
	memcpy(dev->dev_addr, mac->mac_addr, sizeof(mac->mac_addr));

	mac->dma_if = mac_to_intf(mac);
	if (mac->dma_if < 0) {
		dev_err(&mac->pdev->dev, "Can't map DMA interface\n");
		err = -ENODEV;
		goto out;
	}
1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478

	switch (pdev->device) {
	case 0xa005:
		mac->type = MAC_TYPE_GMAC;
		break;
	case 0xa006:
		mac->type = MAC_TYPE_XAUI;
		break;
	default:
		err = -ENODEV;
		goto out;
	}

	dev->open = pasemi_mac_open;
	dev->stop = pasemi_mac_close;
	dev->hard_start_xmit = pasemi_mac_start_tx;
	dev->set_multicast_list = pasemi_mac_set_rx_mode;

1479 1480
	if (err)
		goto out;
1481

1482 1483
	mac->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);

O
Olof Johansson 已提交
1484 1485 1486
	/* Enable most messages by default */
	mac->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;

1487 1488 1489 1490 1491 1492
	err = register_netdev(dev);

	if (err) {
		dev_err(&mac->pdev->dev, "register_netdev failed with error %d\n",
			err);
		goto out;
O
Olof Johansson 已提交
1493
	} else if netif_msg_probe(mac)
1494
		printk(KERN_INFO "%s: PA Semi %s: intf %d, hw addr %s\n",
1495
		       dev->name, mac->type == MAC_TYPE_GMAC ? "GMAC" : "XAUI",
1496
		       mac->dma_if, print_mac(mac_buf, dev->dev_addr));
1497 1498 1499 1500

	return err;

out:
1501 1502 1503 1504 1505
	if (mac->iob_pdev)
		pci_dev_put(mac->iob_pdev);
	if (mac->dma_pdev)
		pci_dev_put(mac->dma_pdev);

1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528
	free_netdev(dev);
out_disable_device:
	pci_disable_device(pdev);
	return err;

}

static void __devexit pasemi_mac_remove(struct pci_dev *pdev)
{
	struct net_device *netdev = pci_get_drvdata(pdev);
	struct pasemi_mac *mac;

	if (!netdev)
		return;

	mac = netdev_priv(netdev);

	unregister_netdev(netdev);

	pci_disable_device(pdev);
	pci_dev_put(mac->dma_pdev);
	pci_dev_put(mac->iob_pdev);

1529 1530
	pasemi_dma_free_chan(&mac->tx->chan);
	pasemi_dma_free_chan(&mac->rx->chan);
1531

1532 1533 1534 1535 1536 1537 1538
	pci_set_drvdata(pdev, NULL);
	free_netdev(netdev);
}

static struct pci_device_id pasemi_mac_pci_tbl[] = {
	{ PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa005) },
	{ PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa006) },
1539
	{ },
1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557
};

MODULE_DEVICE_TABLE(pci, pasemi_mac_pci_tbl);

static struct pci_driver pasemi_mac_driver = {
	.name		= "pasemi_mac",
	.id_table	= pasemi_mac_pci_tbl,
	.probe		= pasemi_mac_probe,
	.remove		= __devexit_p(pasemi_mac_remove),
};

static void __exit pasemi_mac_cleanup_module(void)
{
	pci_unregister_driver(&pasemi_mac_driver);
}

int pasemi_mac_init_module(void)
{
1558 1559 1560 1561 1562 1563
	int err;

	err = pasemi_dma_init();
	if (err)
		return err;

1564 1565 1566 1567 1568
	return pci_register_driver(&pasemi_mac_driver);
}

module_init(pasemi_mac_init_module);
module_exit(pasemi_mac_cleanup_module);