bgmac.c 41.0 KB
Newer Older
1 2 3 4 5 6 7 8 9
/*
 * Driver for (BCM4706)? GBit MAC core on BCMA bus.
 *
 * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
 *
 * Licensed under the GNU/GPL. See COPYING for details.
 */


10 11 12
#define pr_fmt(fmt)		KBUILD_MODNAME ": " fmt

#include <linux/bcma/bcma.h>
13
#include <linux/etherdevice.h>
14
#include <linux/bcm47xx_nvram.h>
15
#include "bgmac.h"
16

17
static bool bgmac_wait_value(struct bgmac *bgmac, u16 reg, u32 mask,
18 19 20 21 22 23
			     u32 value, int timeout)
{
	u32 val;
	int i;

	for (i = 0; i < timeout / 10; i++) {
24
		val = bgmac_read(bgmac, reg);
25 26 27 28
		if ((val & mask) == value)
			return true;
		udelay(10);
	}
29
	dev_err(bgmac->dev, "Timeout waiting for reg 0x%X\n", reg);
30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
	return false;
}

/**************************************************
 * DMA
 **************************************************/

static void bgmac_dma_tx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
{
	u32 val;
	int i;

	if (!ring->mmio_base)
		return;

	/* Suspend DMA TX ring first.
	 * bgmac_wait_value doesn't support waiting for any of few values, so
	 * implement whole loop here.
	 */
	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL,
		    BGMAC_DMA_TX_SUSPEND);
	for (i = 0; i < 10000 / 10; i++) {
		val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
		val &= BGMAC_DMA_TX_STAT;
		if (val == BGMAC_DMA_TX_STAT_DISABLED ||
		    val == BGMAC_DMA_TX_STAT_IDLEWAIT ||
		    val == BGMAC_DMA_TX_STAT_STOPPED) {
			i = 0;
			break;
		}
		udelay(10);
	}
	if (i)
63 64
		dev_err(bgmac->dev, "Timeout suspending DMA TX ring 0x%X (BGMAC_DMA_TX_STAT: 0x%08X)\n",
			ring->mmio_base, val);
65 66 67

	/* Remove SUSPEND bit */
	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, 0);
68
	if (!bgmac_wait_value(bgmac,
69 70 71
			      ring->mmio_base + BGMAC_DMA_TX_STATUS,
			      BGMAC_DMA_TX_STAT, BGMAC_DMA_TX_STAT_DISABLED,
			      10000)) {
72 73
		dev_warn(bgmac->dev, "DMA TX ring 0x%X wasn't disabled on time, waiting additional 300us\n",
			 ring->mmio_base);
74 75 76
		udelay(300);
		val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
		if ((val & BGMAC_DMA_TX_STAT) != BGMAC_DMA_TX_STAT_DISABLED)
77 78
			dev_err(bgmac->dev, "Reset of DMA TX ring 0x%X failed\n",
				ring->mmio_base);
79 80 81 82 83 84 85 86 87
	}
}

static void bgmac_dma_tx_enable(struct bgmac *bgmac,
				struct bgmac_dma_ring *ring)
{
	u32 ctl;

	ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL);
88
	if (bgmac->feature_flags & BGMAC_FEAT_TX_MASK_SETUP) {
89 90 91 92 93 94 95 96 97 98 99 100
		ctl &= ~BGMAC_DMA_TX_BL_MASK;
		ctl |= BGMAC_DMA_TX_BL_128 << BGMAC_DMA_TX_BL_SHIFT;

		ctl &= ~BGMAC_DMA_TX_MR_MASK;
		ctl |= BGMAC_DMA_TX_MR_2 << BGMAC_DMA_TX_MR_SHIFT;

		ctl &= ~BGMAC_DMA_TX_PC_MASK;
		ctl |= BGMAC_DMA_TX_PC_16 << BGMAC_DMA_TX_PC_SHIFT;

		ctl &= ~BGMAC_DMA_TX_PT_MASK;
		ctl |= BGMAC_DMA_TX_PT_8 << BGMAC_DMA_TX_PT_SHIFT;
	}
101 102 103 104 105
	ctl |= BGMAC_DMA_TX_ENABLE;
	ctl |= BGMAC_DMA_TX_PARITY_DISABLE;
	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl);
}

106 107 108 109 110 111 112 113
static void
bgmac_dma_tx_add_buf(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
		     int i, int len, u32 ctl0)
{
	struct bgmac_slot_info *slot;
	struct bgmac_dma_desc *dma_desc;
	u32 ctl1;

F
Felix Fietkau 已提交
114
	if (i == BGMAC_TX_RING_SLOTS - 1)
115 116 117 118 119 120 121 122 123 124 125 126
		ctl0 |= BGMAC_DESC_CTL0_EOT;

	ctl1 = len & BGMAC_DESC_CTL1_LEN;

	slot = &ring->slots[i];
	dma_desc = &ring->cpu_base[i];
	dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr));
	dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr));
	dma_desc->ctl0 = cpu_to_le32(ctl0);
	dma_desc->ctl1 = cpu_to_le32(ctl1);
}

127 128 129 130
static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
				    struct bgmac_dma_ring *ring,
				    struct sk_buff *skb)
{
131
	struct device *dma_dev = bgmac->dma_dev;
132
	struct net_device *net_dev = bgmac->net_dev;
133 134
	int index = ring->end % BGMAC_TX_RING_SLOTS;
	struct bgmac_slot_info *slot = &ring->slots[index];
135 136 137
	int nr_frags;
	u32 flags;
	int i;
138 139

	if (skb->len > BGMAC_DESC_CTL1_LEN) {
140
		netdev_err(bgmac->net_dev, "Too long skb (%d)\n", skb->len);
141
		goto err_drop;
142 143
	}

144 145 146 147 148
	if (skb->ip_summed == CHECKSUM_PARTIAL)
		skb_checksum_help(skb);

	nr_frags = skb_shinfo(skb)->nr_frags;

149 150 151 152
	/* ring->end - ring->start will return the number of valid slots,
	 * even when ring->end overflows
	 */
	if (ring->end - ring->start + nr_frags + 1 >= BGMAC_TX_RING_SLOTS) {
153
		netdev_err(bgmac->net_dev, "TX ring is full, queue should be stopped!\n");
154 155 156 157
		netif_stop_queue(net_dev);
		return NETDEV_TX_BUSY;
	}

158
	slot->dma_addr = dma_map_single(dma_dev, skb->data, skb_headlen(skb),
159
					DMA_TO_DEVICE);
160 161
	if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr)))
		goto err_dma_head;
162

163 164 165
	flags = BGMAC_DESC_CTL0_SOF;
	if (!nr_frags)
		flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC;
166

167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
	bgmac_dma_tx_add_buf(bgmac, ring, index, skb_headlen(skb), flags);
	flags = 0;

	for (i = 0; i < nr_frags; i++) {
		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
		int len = skb_frag_size(frag);

		index = (index + 1) % BGMAC_TX_RING_SLOTS;
		slot = &ring->slots[index];
		slot->dma_addr = skb_frag_dma_map(dma_dev, frag, 0,
						  len, DMA_TO_DEVICE);
		if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr)))
			goto err_dma;

		if (i == nr_frags - 1)
			flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC;

		bgmac_dma_tx_add_buf(bgmac, ring, index, len, flags);
	}

	slot->skb = skb;
188
	ring->end += nr_frags + 1;
189 190
	netdev_sent_queue(net_dev, skb->len);

191 192 193 194 195 196
	wmb();

	/* Increase ring->end to point empty slot. We tell hardware the first
	 * slot it should *not* read.
	 */
	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
197
		    ring->index_base +
198 199
		    (ring->end % BGMAC_TX_RING_SLOTS) *
		    sizeof(struct bgmac_dma_desc));
200

201
	if (ring->end - ring->start >= BGMAC_TX_RING_SLOTS - 8)
202 203 204 205
		netif_stop_queue(net_dev);

	return NETDEV_TX_OK;

206 207 208 209
err_dma:
	dma_unmap_single(dma_dev, slot->dma_addr, skb_headlen(skb),
			 DMA_TO_DEVICE);

210
	while (i-- > 0) {
211 212 213 214 215 216 217 218 219
		int index = (ring->end + i) % BGMAC_TX_RING_SLOTS;
		struct bgmac_slot_info *slot = &ring->slots[index];
		u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1);
		int len = ctl1 & BGMAC_DESC_CTL1_LEN;

		dma_unmap_page(dma_dev, slot->dma_addr, len, DMA_TO_DEVICE);
	}

err_dma_head:
220 221
	netdev_err(bgmac->net_dev, "Mapping error of skb on ring 0x%X\n",
		   ring->mmio_base);
222 223

err_drop:
224
	dev_kfree_skb(skb);
225 226
	net_dev->stats.tx_dropped++;
	net_dev->stats.tx_errors++;
227 228 229 230 231 232
	return NETDEV_TX_OK;
}

/* Free transmitted packets */
static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
{
233
	struct device *dma_dev = bgmac->dma_dev;
234 235
	int empty_slot;
	bool freed = false;
236
	unsigned bytes_compl = 0, pkts_compl = 0;
237 238 239 240

	/* The last slot that hardware didn't consume yet */
	empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
	empty_slot &= BGMAC_DMA_TX_STATDPTR;
241 242
	empty_slot -= ring->index_base;
	empty_slot &= BGMAC_DMA_TX_STATDPTR;
243 244
	empty_slot /= sizeof(struct bgmac_dma_desc);

245 246 247
	while (ring->start != ring->end) {
		int slot_idx = ring->start % BGMAC_TX_RING_SLOTS;
		struct bgmac_slot_info *slot = &ring->slots[slot_idx];
248
		u32 ctl0, ctl1;
249
		int len;
250

251 252
		if (slot_idx == empty_slot)
			break;
253

254
		ctl0 = le32_to_cpu(ring->cpu_base[slot_idx].ctl0);
255 256
		ctl1 = le32_to_cpu(ring->cpu_base[slot_idx].ctl1);
		len = ctl1 & BGMAC_DESC_CTL1_LEN;
257
		if (ctl0 & BGMAC_DESC_CTL0_SOF)
258
			/* Unmap no longer used buffer */
259 260 261 262 263
			dma_unmap_single(dma_dev, slot->dma_addr, len,
					 DMA_TO_DEVICE);
		else
			dma_unmap_page(dma_dev, slot->dma_addr, len,
				       DMA_TO_DEVICE);
264

265
		if (slot->skb) {
266 267
			bgmac->net_dev->stats.tx_bytes += slot->skb->len;
			bgmac->net_dev->stats.tx_packets++;
268 269 270
			bytes_compl += slot->skb->len;
			pkts_compl++;

271 272 273 274 275
			/* Free memory! :) */
			dev_kfree_skb(slot->skb);
			slot->skb = NULL;
		}

276
		slot->dma_addr = 0;
277
		ring->start++;
278 279 280
		freed = true;
	}

281 282 283
	if (!pkts_compl)
		return;

284 285
	netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl);

286
	if (netif_queue_stopped(bgmac->net_dev))
287 288 289 290 291 292 293 294 295
		netif_wake_queue(bgmac->net_dev);
}

static void bgmac_dma_rx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
{
	if (!ring->mmio_base)
		return;

	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, 0);
296
	if (!bgmac_wait_value(bgmac,
297 298 299
			      ring->mmio_base + BGMAC_DMA_RX_STATUS,
			      BGMAC_DMA_RX_STAT, BGMAC_DMA_RX_STAT_DISABLED,
			      10000))
300 301
		dev_err(bgmac->dev, "Reset of ring 0x%X RX failed\n",
			ring->mmio_base);
302 303 304 305 306 307 308 309
}

static void bgmac_dma_rx_enable(struct bgmac *bgmac,
				struct bgmac_dma_ring *ring)
{
	u32 ctl;

	ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL);
310 311 312 313

	/* preserve ONLY bits 16-17 from current hardware value */
	ctl &= BGMAC_DMA_RX_ADDREXT_MASK;

314
	if (bgmac->feature_flags & BGMAC_FEAT_RX_MASK_SETUP) {
315 316 317 318 319 320 321 322 323
		ctl &= ~BGMAC_DMA_RX_BL_MASK;
		ctl |= BGMAC_DMA_RX_BL_128 << BGMAC_DMA_RX_BL_SHIFT;

		ctl &= ~BGMAC_DMA_RX_PC_MASK;
		ctl |= BGMAC_DMA_RX_PC_8 << BGMAC_DMA_RX_PC_SHIFT;

		ctl &= ~BGMAC_DMA_RX_PT_MASK;
		ctl |= BGMAC_DMA_RX_PT_1 << BGMAC_DMA_RX_PT_SHIFT;
	}
324 325 326 327 328 329 330 331 332 333
	ctl |= BGMAC_DMA_RX_ENABLE;
	ctl |= BGMAC_DMA_RX_PARITY_DISABLE;
	ctl |= BGMAC_DMA_RX_OVERFLOW_CONT;
	ctl |= BGMAC_RX_FRAME_OFFSET << BGMAC_DMA_RX_FRAME_OFFSET_SHIFT;
	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, ctl);
}

static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
				     struct bgmac_slot_info *slot)
{
334
	struct device *dma_dev = bgmac->dma_dev;
335
	dma_addr_t dma_addr;
336
	struct bgmac_rx_header *rx;
337
	void *buf;
338 339

	/* Alloc skb */
340 341
	buf = netdev_alloc_frag(BGMAC_RX_ALLOC_SIZE);
	if (!buf)
342 343 344
		return -ENOMEM;

	/* Poison - if everything goes fine, hardware will overwrite it */
345
	rx = buf + BGMAC_RX_BUF_OFFSET;
346 347 348 349
	rx->len = cpu_to_le16(0xdead);
	rx->flags = cpu_to_le16(0xbeef);

	/* Map skb for the DMA */
350 351
	dma_addr = dma_map_single(dma_dev, buf + BGMAC_RX_BUF_OFFSET,
				  BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
352
	if (dma_mapping_error(dma_dev, dma_addr)) {
353
		netdev_err(bgmac->net_dev, "DMA mapping error\n");
354
		put_page(virt_to_head_page(buf));
355 356
		return -ENOMEM;
	}
357 358

	/* Update the slot */
359
	slot->buf = buf;
360 361
	slot->dma_addr = dma_addr;

362 363 364
	return 0;
}

F
Felix Fietkau 已提交
365 366 367 368 369 370 371 372 373 374
static void bgmac_dma_rx_update_index(struct bgmac *bgmac,
				      struct bgmac_dma_ring *ring)
{
	dma_wmb();

	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
		    ring->index_base +
		    ring->end * sizeof(struct bgmac_dma_desc));
}

375 376 377 378 379 380
static void bgmac_dma_rx_setup_desc(struct bgmac *bgmac,
				    struct bgmac_dma_ring *ring, int desc_idx)
{
	struct bgmac_dma_desc *dma_desc = ring->cpu_base + desc_idx;
	u32 ctl0 = 0, ctl1 = 0;

F
Felix Fietkau 已提交
381
	if (desc_idx == BGMAC_RX_RING_SLOTS - 1)
382 383 384 385 386 387 388 389 390 391 392
		ctl0 |= BGMAC_DESC_CTL0_EOT;
	ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
	/* Is there any BGMAC device that requires extension? */
	/* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
	 * B43_DMA64_DCTL1_ADDREXT_MASK;
	 */

	dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[desc_idx].dma_addr));
	dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[desc_idx].dma_addr));
	dma_desc->ctl0 = cpu_to_le32(ctl0);
	dma_desc->ctl1 = cpu_to_le32(ctl1);
F
Felix Fietkau 已提交
393 394

	ring->end = desc_idx;
395 396
}

397 398 399 400 401 402 403 404 405 406 407 408 409
static void bgmac_dma_rx_poison_buf(struct device *dma_dev,
				    struct bgmac_slot_info *slot)
{
	struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET;

	dma_sync_single_for_cpu(dma_dev, slot->dma_addr, BGMAC_RX_BUF_SIZE,
				DMA_FROM_DEVICE);
	rx->len = cpu_to_le16(0xdead);
	rx->flags = cpu_to_le16(0xbeef);
	dma_sync_single_for_device(dma_dev, slot->dma_addr, BGMAC_RX_BUF_SIZE,
				   DMA_FROM_DEVICE);
}

410 411 412 413 414 415 416 417
static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
			     int weight)
{
	u32 end_slot;
	int handled = 0;

	end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
	end_slot &= BGMAC_DMA_RX_STATDPTR;
418 419
	end_slot -= ring->index_base;
	end_slot &= BGMAC_DMA_RX_STATDPTR;
420 421
	end_slot /= sizeof(struct bgmac_dma_desc);

F
Felix Fietkau 已提交
422
	while (ring->start != end_slot) {
423
		struct device *dma_dev = bgmac->dma_dev;
424
		struct bgmac_slot_info *slot = &ring->slots[ring->start];
425
		struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET;
426 427
		struct sk_buff *skb;
		void *buf = slot->buf;
428
		dma_addr_t dma_addr = slot->dma_addr;
429 430
		u16 len, flags;

431 432 433 434 435 436
		do {
			/* Prepare new skb as replacement */
			if (bgmac_dma_rx_skb_for_slot(bgmac, slot)) {
				bgmac_dma_rx_poison_buf(dma_dev, slot);
				break;
			}
437

438 439 440
			/* Unmap buffer to make it accessible to the CPU */
			dma_unmap_single(dma_dev, dma_addr,
					 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
441

442 443 444
			/* Get info from the header */
			len = le16_to_cpu(rx->len);
			flags = le16_to_cpu(rx->flags);
445 446 447

			/* Check for poison and drop or pass the packet */
			if (len == 0xdead && flags == 0xbeef) {
448 449
				netdev_err(bgmac->net_dev, "Found poisoned packet at slot %d, DMA issue!\n",
					   ring->start);
450
				put_page(virt_to_head_page(buf));
451
				bgmac->net_dev->stats.rx_errors++;
452 453 454
				break;
			}

455
			if (len > BGMAC_RX_ALLOC_SIZE) {
456 457
				netdev_err(bgmac->net_dev, "Found oversized packet at slot %d, DMA issue!\n",
					   ring->start);
458
				put_page(virt_to_head_page(buf));
459 460
				bgmac->net_dev->stats.rx_length_errors++;
				bgmac->net_dev->stats.rx_errors++;
461 462 463
				break;
			}

H
Hauke Mehrtens 已提交
464 465 466
			/* Omit CRC. */
			len -= ETH_FCS_LEN;

467
			skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE);
468
			if (unlikely(!skb)) {
469
				netdev_err(bgmac->net_dev, "build_skb failed\n");
470
				put_page(virt_to_head_page(buf));
471
				bgmac->net_dev->stats.rx_errors++;
472 473
				break;
			}
474 475 476 477
			skb_put(skb, BGMAC_RX_FRAME_OFFSET +
				BGMAC_RX_BUF_OFFSET + len);
			skb_pull(skb, BGMAC_RX_FRAME_OFFSET +
				 BGMAC_RX_BUF_OFFSET);
478

479 480
			skb_checksum_none_assert(skb);
			skb->protocol = eth_type_trans(skb, bgmac->net_dev);
481 482
			bgmac->net_dev->stats.rx_bytes += len;
			bgmac->net_dev->stats.rx_packets++;
483
			napi_gro_receive(&bgmac->napi, skb);
484 485
			handled++;
		} while (0);
486

487 488
		bgmac_dma_rx_setup_desc(bgmac, ring, ring->start);

489 490 491 492 493 494 495
		if (++ring->start >= BGMAC_RX_RING_SLOTS)
			ring->start = 0;

		if (handled >= weight) /* Should never be greater */
			break;
	}

F
Felix Fietkau 已提交
496 497
	bgmac_dma_rx_update_index(bgmac, ring);

498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522
	return handled;
}

/* Does ring support unaligned addressing? */
static bool bgmac_dma_unaligned(struct bgmac *bgmac,
				struct bgmac_dma_ring *ring,
				enum bgmac_dma_ring_type ring_type)
{
	switch (ring_type) {
	case BGMAC_DMA_RING_TX:
		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
			    0xff0);
		if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO))
			return true;
		break;
	case BGMAC_DMA_RING_RX:
		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
			    0xff0);
		if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO))
			return true;
		break;
	}
	return false;
}

523 524
static void bgmac_dma_tx_ring_free(struct bgmac *bgmac,
				   struct bgmac_dma_ring *ring)
525
{
526
	struct device *dma_dev = bgmac->dma_dev;
527
	struct bgmac_dma_desc *dma_desc = ring->cpu_base;
528 529 530
	struct bgmac_slot_info *slot;
	int i;

F
Felix Fietkau 已提交
531
	for (i = 0; i < BGMAC_TX_RING_SLOTS; i++) {
532 533
		int len = dma_desc[i].ctl1 & BGMAC_DESC_CTL1_LEN;

534
		slot = &ring->slots[i];
535 536 537 538 539 540 541 542 543 544 545
		dev_kfree_skb(slot->skb);

		if (!slot->dma_addr)
			continue;

		if (slot->skb)
			dma_unmap_single(dma_dev, slot->dma_addr,
					 len, DMA_TO_DEVICE);
		else
			dma_unmap_page(dma_dev, slot->dma_addr,
				       len, DMA_TO_DEVICE);
546
	}
547 548 549 550 551
}

static void bgmac_dma_rx_ring_free(struct bgmac *bgmac,
				   struct bgmac_dma_ring *ring)
{
552
	struct device *dma_dev = bgmac->dma_dev;
553 554 555
	struct bgmac_slot_info *slot;
	int i;

F
Felix Fietkau 已提交
556
	for (i = 0; i < BGMAC_RX_RING_SLOTS; i++) {
557
		slot = &ring->slots[i];
558
		if (!slot->dma_addr)
559
			continue;
560

561 562 563
		dma_unmap_single(dma_dev, slot->dma_addr,
				 BGMAC_RX_BUF_SIZE,
				 DMA_FROM_DEVICE);
564
		put_page(virt_to_head_page(slot->buf));
565
		slot->dma_addr = 0;
566 567 568
	}
}

569
static void bgmac_dma_ring_desc_free(struct bgmac *bgmac,
F
Felix Fietkau 已提交
570 571
				     struct bgmac_dma_ring *ring,
				     int num_slots)
572
{
573
	struct device *dma_dev = bgmac->dma_dev;
574 575 576 577 578 579
	int size;

	if (!ring->cpu_base)
	    return;

	/* Free ring of descriptors */
F
Felix Fietkau 已提交
580
	size = num_slots * sizeof(struct bgmac_dma_desc);
581 582 583 584
	dma_free_coherent(dma_dev, size, ring->cpu_base,
			  ring->dma_base);
}

F
Felix Fietkau 已提交
585
static void bgmac_dma_cleanup(struct bgmac *bgmac)
586 587 588
{
	int i;

F
Felix Fietkau 已提交
589
	for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
590
		bgmac_dma_tx_ring_free(bgmac, &bgmac->tx_ring[i]);
F
Felix Fietkau 已提交
591 592

	for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
593
		bgmac_dma_rx_ring_free(bgmac, &bgmac->rx_ring[i]);
F
Felix Fietkau 已提交
594 595 596 597 598 599 600
}

static void bgmac_dma_free(struct bgmac *bgmac)
{
	int i;

	for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
F
Felix Fietkau 已提交
601 602
		bgmac_dma_ring_desc_free(bgmac, &bgmac->tx_ring[i],
					 BGMAC_TX_RING_SLOTS);
F
Felix Fietkau 已提交
603 604

	for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
F
Felix Fietkau 已提交
605 606
		bgmac_dma_ring_desc_free(bgmac, &bgmac->rx_ring[i],
					 BGMAC_RX_RING_SLOTS);
607 608 609 610
}

static int bgmac_dma_alloc(struct bgmac *bgmac)
{
611
	struct device *dma_dev = bgmac->dma_dev;
612 613 614 615 616 617 618 619 620 621
	struct bgmac_dma_ring *ring;
	static const u16 ring_base[] = { BGMAC_DMA_BASE0, BGMAC_DMA_BASE1,
					 BGMAC_DMA_BASE2, BGMAC_DMA_BASE3, };
	int size; /* ring size: different for Tx and Rx */
	int err;
	int i;

	BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base));
	BUILD_BUG_ON(BGMAC_MAX_RX_RINGS > ARRAY_SIZE(ring_base));

622
	if (!(bgmac_idm_read(bgmac, BCMA_IOST) & BCMA_IOST_DMA64)) {
623
		dev_err(bgmac->dev, "Core does not report 64-bit DMA\n");
624 625 626 627 628 629 630 631
		return -ENOTSUPP;
	}

	for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
		ring = &bgmac->tx_ring[i];
		ring->mmio_base = ring_base[i];

		/* Alloc ring of descriptors */
F
Felix Fietkau 已提交
632
		size = BGMAC_TX_RING_SLOTS * sizeof(struct bgmac_dma_desc);
633 634 635 636
		ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
						     &ring->dma_base,
						     GFP_KERNEL);
		if (!ring->cpu_base) {
637 638
			dev_err(bgmac->dev, "Allocation of TX ring 0x%X failed\n",
				ring->mmio_base);
639 640 641
			goto err_dma_free;
		}

642 643 644 645 646 647 648
		ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
						      BGMAC_DMA_RING_TX);
		if (ring->unaligned)
			ring->index_base = lower_32_bits(ring->dma_base);
		else
			ring->index_base = 0;

649 650 651 652 653 654 655 656
		/* No need to alloc TX slots yet */
	}

	for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
		ring = &bgmac->rx_ring[i];
		ring->mmio_base = ring_base[i];

		/* Alloc ring of descriptors */
F
Felix Fietkau 已提交
657
		size = BGMAC_RX_RING_SLOTS * sizeof(struct bgmac_dma_desc);
658 659 660 661
		ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
						     &ring->dma_base,
						     GFP_KERNEL);
		if (!ring->cpu_base) {
662 663
			dev_err(bgmac->dev, "Allocation of RX ring 0x%X failed\n",
				ring->mmio_base);
664 665 666 667
			err = -ENOMEM;
			goto err_dma_free;
		}

668 669 670 671 672 673
		ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
						      BGMAC_DMA_RING_RX);
		if (ring->unaligned)
			ring->index_base = lower_32_bits(ring->dma_base);
		else
			ring->index_base = 0;
674 675 676 677 678 679 680 681 682
	}

	return 0;

err_dma_free:
	bgmac_dma_free(bgmac);
	return -ENOMEM;
}

F
Felix Fietkau 已提交
683
static int bgmac_dma_init(struct bgmac *bgmac)
684 685
{
	struct bgmac_dma_ring *ring;
F
Felix Fietkau 已提交
686
	int i, err;
687 688 689 690

	for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
		ring = &bgmac->tx_ring[i];

691 692
		if (!ring->unaligned)
			bgmac_dma_tx_enable(bgmac, ring);
693 694 695 696
		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
			    lower_32_bits(ring->dma_base));
		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI,
			    upper_32_bits(ring->dma_base));
697 698
		if (ring->unaligned)
			bgmac_dma_tx_enable(bgmac, ring);
699 700 701 702 703 704

		ring->start = 0;
		ring->end = 0;	/* Points the slot that should *not* be read */
	}

	for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
705 706
		int j;

707 708
		ring = &bgmac->rx_ring[i];

709 710
		if (!ring->unaligned)
			bgmac_dma_rx_enable(bgmac, ring);
711 712 713 714
		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
			    lower_32_bits(ring->dma_base));
		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI,
			    upper_32_bits(ring->dma_base));
715 716
		if (ring->unaligned)
			bgmac_dma_rx_enable(bgmac, ring);
717

F
Felix Fietkau 已提交
718 719
		ring->start = 0;
		ring->end = 0;
F
Felix Fietkau 已提交
720
		for (j = 0; j < BGMAC_RX_RING_SLOTS; j++) {
F
Felix Fietkau 已提交
721 722 723 724
			err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]);
			if (err)
				goto error;

725
			bgmac_dma_rx_setup_desc(bgmac, ring, j);
F
Felix Fietkau 已提交
726
		}
727

F
Felix Fietkau 已提交
728
		bgmac_dma_rx_update_index(bgmac, ring);
729
	}
F
Felix Fietkau 已提交
730 731 732 733 734 735

	return 0;

error:
	bgmac_dma_cleanup(bgmac);
	return err;
736 737 738 739 740 741 742 743 744 745 746 747 748 749 750
}


/**************************************************
 * Chip ops
 **************************************************/

/* TODO: can we just drop @force? Can we don't reset MAC at all if there is
 * nothing to change? Try if after stabilizng driver.
 */
static void bgmac_cmdcfg_maskset(struct bgmac *bgmac, u32 mask, u32 set,
				 bool force)
{
	u32 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
	u32 new_val = (cmdcfg & mask) | set;
751
	u32 cmdcfg_sr;
752

753 754 755 756 757 758
	if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
		cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
	else
		cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;

	bgmac_set(bgmac, BGMAC_CMDCFG, cmdcfg_sr);
759 760 761 762 763
	udelay(2);

	if (new_val != cmdcfg || force)
		bgmac_write(bgmac, BGMAC_CMDCFG, new_val);

764
	bgmac_mask(bgmac, BGMAC_CMDCFG, ~cmdcfg_sr);
765 766 767
	udelay(2);
}

768 769 770 771 772 773 774 775 776 777
static void bgmac_write_mac_address(struct bgmac *bgmac, u8 *addr)
{
	u32 tmp;

	tmp = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
	bgmac_write(bgmac, BGMAC_MACADDR_HIGH, tmp);
	tmp = (addr[4] << 8) | addr[5];
	bgmac_write(bgmac, BGMAC_MACADDR_LOW, tmp);
}

778 779 780 781 782
static void bgmac_set_rx_mode(struct net_device *net_dev)
{
	struct bgmac *bgmac = netdev_priv(net_dev);

	if (net_dev->flags & IFF_PROMISC)
783
		bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_PROM, true);
784
	else
785
		bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_PROM, 0, true);
786 787
}

788 789 790 791 792
#if 0 /* We don't use that regs yet */
static void bgmac_chip_stats_update(struct bgmac *bgmac)
{
	int i;

793
	if (!(bgmac->feature_flags & BGMAC_FEAT_NO_CLR_MIB)) {
794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811
		for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
			bgmac->mib_tx_regs[i] =
				bgmac_read(bgmac,
					   BGMAC_TX_GOOD_OCTETS + (i * 4));
		for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
			bgmac->mib_rx_regs[i] =
				bgmac_read(bgmac,
					   BGMAC_RX_GOOD_OCTETS + (i * 4));
	}

	/* TODO: what else? how to handle BCM4706? Specs are needed */
}
#endif

static void bgmac_clear_mib(struct bgmac *bgmac)
{
	int i;

812
	if (bgmac->feature_flags & BGMAC_FEAT_NO_CLR_MIB)
813 814 815 816 817 818 819 820 821 822
		return;

	bgmac_set(bgmac, BGMAC_DEV_CTL, BGMAC_DC_MROR);
	for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
		bgmac_read(bgmac, BGMAC_TX_GOOD_OCTETS + (i * 4));
	for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
		bgmac_read(bgmac, BGMAC_RX_GOOD_OCTETS + (i * 4));
}

/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_speed */
823
static void bgmac_mac_speed(struct bgmac *bgmac)
824 825 826 827
{
	u32 mask = ~(BGMAC_CMDCFG_ES_MASK | BGMAC_CMDCFG_HD);
	u32 set = 0;

828 829
	switch (bgmac->mac_speed) {
	case SPEED_10:
830
		set |= BGMAC_CMDCFG_ES_10;
831 832
		break;
	case SPEED_100:
833
		set |= BGMAC_CMDCFG_ES_100;
834 835
		break;
	case SPEED_1000:
836
		set |= BGMAC_CMDCFG_ES_1000;
837
		break;
838 839 840
	case SPEED_2500:
		set |= BGMAC_CMDCFG_ES_2500;
		break;
841
	default:
842 843
		dev_err(bgmac->dev, "Unsupported speed: %d\n",
			bgmac->mac_speed);
844 845 846
	}

	if (bgmac->mac_duplex == DUPLEX_HALF)
847
		set |= BGMAC_CMDCFG_HD;
848

849 850 851 852 853
	bgmac_cmdcfg_maskset(bgmac, mask, set, true);
}

static void bgmac_miiconfig(struct bgmac *bgmac)
{
854
	if (bgmac->feature_flags & BGMAC_FEAT_FORCE_SPEED_2500) {
855 856 857
		bgmac_idm_write(bgmac, BCMA_IOCTL,
				bgmac_idm_read(bgmac, BCMA_IOCTL) | 0x40 |
				BGMAC_BCMA_IOCTL_SW_CLKEN);
858
		bgmac->mac_speed = SPEED_2500;
859 860
		bgmac->mac_duplex = DUPLEX_FULL;
		bgmac_mac_speed(bgmac);
861
	} else {
862 863
		u8 imode;

864 865 866 867 868 869 870
		imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) &
			BGMAC_DS_MM_MASK) >> BGMAC_DS_MM_SHIFT;
		if (imode == 0 || imode == 1) {
			bgmac->mac_speed = SPEED_100;
			bgmac->mac_duplex = DUPLEX_FULL;
			bgmac_mac_speed(bgmac);
		}
871 872 873 874 875 876
	}
}

/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */
static void bgmac_chip_reset(struct bgmac *bgmac)
{
877
	u32 cmdcfg_sr;
878 879 880
	u32 iost;
	int i;

881
	if (bgmac_clk_enabled(bgmac)) {
882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898
		if (!bgmac->stats_grabbed) {
			/* bgmac_chip_stats_update(bgmac); */
			bgmac->stats_grabbed = true;
		}

		for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
			bgmac_dma_tx_reset(bgmac, &bgmac->tx_ring[i]);

		bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
		udelay(1);

		for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
			bgmac_dma_rx_reset(bgmac, &bgmac->rx_ring[i]);

		/* TODO: Clear software multicast filter list */
	}

899
	iost = bgmac_idm_read(bgmac, BCMA_IOST);
900
	if (bgmac->feature_flags & BGMAC_FEAT_IOST_ATTACHED)
901 902
		iost &= ~BGMAC_BCMA_IOST_ATTACHED;

903
	/* 3GMAC: for BCM4707 & BCM47094, only do core reset at bgmac_probe() */
904 905
	if (!(bgmac->feature_flags & BGMAC_FEAT_NO_RESET)) {
		u32 flags = 0;
906 907 908 909 910
		if (iost & BGMAC_BCMA_IOST_ATTACHED) {
			flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
			if (!bgmac->has_robosw)
				flags |= BGMAC_BCMA_IOCTL_SW_RESET;
		}
911
		bgmac_clk_enable(bgmac, flags);
912 913
	}

914
	/* Request Misc PLL for corerev > 2 */
915
	if (bgmac->feature_flags & BGMAC_FEAT_MISC_PLL_REQ) {
916 917
		bgmac_set(bgmac, BCMA_CLKCTLST,
			  BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ);
918
		bgmac_wait_value(bgmac, BCMA_CLKCTLST,
919 920
				 BGMAC_BCMA_CLKCTLST_MISC_PLL_ST,
				 BGMAC_BCMA_CLKCTLST_MISC_PLL_ST,
921 922 923
				 1000);
	}

924
	if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_PHY) {
925 926
		u8 et_swtype = 0;
		u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY |
927
			     BGMAC_CHIPCTL_1_IF_TYPE_MII;
928
		char buf[4];
929

930
		if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) {
931
			if (kstrtou8(buf, 0, &et_swtype))
932 933
				dev_err(bgmac->dev, "Failed to parse et_swtype (%s)\n",
					buf);
934 935 936
			et_swtype &= 0x0f;
			et_swtype <<= 4;
			sw_type = et_swtype;
937
		} else if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_EPHYRMII) {
938 939
			sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RMII |
				  BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII;
940
		} else if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_RGMII) {
941 942
			sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII |
				  BGMAC_CHIPCTL_1_SW_TYPE_RGMII;
943
		}
944 945 946
		bgmac_cco_ctl_maskset(bgmac, 1, ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK |
						  BGMAC_CHIPCTL_1_SW_TYPE_MASK),
				      sw_type);
947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967
	} else if (bgmac->feature_flags & BGMAC_FEAT_CC4_IF_SW_TYPE) {
		u32 sw_type = BGMAC_CHIPCTL_4_IF_TYPE_MII |
			      BGMAC_CHIPCTL_4_SW_TYPE_EPHY;
		u8 et_swtype = 0;
		char buf[4];

		if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) {
			if (kstrtou8(buf, 0, &et_swtype))
				dev_err(bgmac->dev, "Failed to parse et_swtype (%s)\n",
					buf);
			sw_type = (et_swtype & 0x0f) << 12;
		} else if (bgmac->feature_flags & BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII) {
			sw_type = BGMAC_CHIPCTL_4_IF_TYPE_RGMII |
				  BGMAC_CHIPCTL_4_SW_TYPE_RGMII;
		}
		bgmac_cco_ctl_maskset(bgmac, 4, ~(BGMAC_CHIPCTL_4_IF_TYPE_MASK |
						  BGMAC_CHIPCTL_4_SW_TYPE_MASK),
				      sw_type);
	} else if (bgmac->feature_flags & BGMAC_FEAT_CC7_IF_TYPE_RGMII) {
		bgmac_cco_ctl_maskset(bgmac, 7, ~BGMAC_CHIPCTL_7_IF_TYPE_MASK,
				      BGMAC_CHIPCTL_7_IF_TYPE_RGMII);
968 969 970
	}

	if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
971 972 973
		bgmac_idm_write(bgmac, BCMA_IOCTL,
				bgmac_idm_read(bgmac, BCMA_IOCTL) &
				~BGMAC_BCMA_IOCTL_SW_RESET);
974 975 976 977 978 979

	/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset
	 * Specs don't say about using BGMAC_CMDCFG_SR, but in this routine
	 * BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to
	 * be keps until taking MAC out of the reset.
	 */
980 981 982 983 984
	if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
		cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
	else
		cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;

985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001
	bgmac_cmdcfg_maskset(bgmac,
			     ~(BGMAC_CMDCFG_TE |
			       BGMAC_CMDCFG_RE |
			       BGMAC_CMDCFG_RPI |
			       BGMAC_CMDCFG_TAI |
			       BGMAC_CMDCFG_HD |
			       BGMAC_CMDCFG_ML |
			       BGMAC_CMDCFG_CFE |
			       BGMAC_CMDCFG_RL |
			       BGMAC_CMDCFG_RED |
			       BGMAC_CMDCFG_PE |
			       BGMAC_CMDCFG_TPI |
			       BGMAC_CMDCFG_PAD_EN |
			       BGMAC_CMDCFG_PF),
			     BGMAC_CMDCFG_PROM |
			     BGMAC_CMDCFG_NLC |
			     BGMAC_CMDCFG_CFE |
1002
			     cmdcfg_sr,
1003
			     false);
1004 1005
	bgmac->mac_speed = SPEED_UNKNOWN;
	bgmac->mac_duplex = DUPLEX_UNKNOWN;
1006 1007

	bgmac_clear_mib(bgmac);
1008
	if (bgmac->feature_flags & BGMAC_FEAT_CMN_PHY_CTL)
1009 1010
		bgmac_cmn_maskset32(bgmac, BCMA_GMAC_CMN_PHY_CTL, ~0,
				    BCMA_GMAC_CMN_PC_MTE);
1011 1012 1013
	else
		bgmac_set(bgmac, BGMAC_PHY_CNTL, BGMAC_PC_MTE);
	bgmac_miiconfig(bgmac);
1014 1015
	if (bgmac->mii_bus)
		bgmac->mii_bus->reset(bgmac->mii_bus);
1016

1017
	netdev_reset_queue(bgmac->net_dev);
1018 1019 1020 1021 1022 1023 1024 1025 1026 1027
}

static void bgmac_chip_intrs_on(struct bgmac *bgmac)
{
	bgmac_write(bgmac, BGMAC_INT_MASK, bgmac->int_mask);
}

static void bgmac_chip_intrs_off(struct bgmac *bgmac)
{
	bgmac_write(bgmac, BGMAC_INT_MASK, 0);
1028
	bgmac_read(bgmac, BGMAC_INT_MASK);
1029 1030 1031 1032 1033
}

/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_enable */
static void bgmac_enable(struct bgmac *bgmac)
{
1034
	u32 cmdcfg_sr;
1035 1036
	u32 cmdcfg;
	u32 mode;
1037 1038 1039 1040 1041

	if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
		cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
	else
		cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;
1042 1043 1044

	cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
	bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE),
1045
			     cmdcfg_sr, true);
1046 1047 1048 1049 1050 1051
	udelay(2);
	cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE;
	bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg);

	mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
		BGMAC_DS_MM_SHIFT;
1052
	if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST || mode != 0)
1053
		bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
1054
	if (!(bgmac->feature_flags & BGMAC_FEAT_CLKCTLST) && mode == 2)
1055 1056
		bgmac_cco_ctl_maskset(bgmac, 1, ~0,
				      BGMAC_CHIPCTL_1_RXC_DLL_BYPASS);
1057

1058 1059 1060 1061 1062
	if (bgmac->feature_flags & (BGMAC_FEAT_FLW_CTRL1 |
				    BGMAC_FEAT_FLW_CTRL2)) {
		u32 fl_ctl;

		if (bgmac->feature_flags & BGMAC_FEAT_FLW_CTRL1)
1063
			fl_ctl = 0x2300e1;
1064 1065 1066
		else
			fl_ctl = 0x03cb04cb;

1067 1068 1069 1070
		bgmac_write(bgmac, BGMAC_FLOW_CTL_THRESH, fl_ctl);
		bgmac_write(bgmac, BGMAC_PAUSE_CTL, 0x27fff);
	}

1071 1072 1073 1074 1075
	if (bgmac->feature_flags & BGMAC_FEAT_SET_RXQ_CLK) {
		u32 rxq_ctl;
		u16 bp_clk;
		u8 mdp;

1076 1077
		rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL);
		rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK;
1078
		bp_clk = bgmac_get_bus_clock(bgmac) / 1000000;
1079 1080 1081 1082
		mdp = (bp_clk * 128 / 1000) - 3;
		rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT);
		bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl);
	}
1083 1084 1085
}

/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipinit */
F
Felix Fietkau 已提交
1086
static void bgmac_chip_init(struct bgmac *bgmac)
1087
{
J
Jon Mason 已提交
1088 1089 1090
	/* Clear any erroneously pending interrupts */
	bgmac_write(bgmac, BGMAC_INT_STATUS, ~0);

1091 1092 1093 1094 1095 1096
	/* 1 interrupt per received frame */
	bgmac_write(bgmac, BGMAC_INT_RECV_LAZY, 1 << BGMAC_IRL_FC_SHIFT);

	/* Enable 802.3x tx flow control (honor received PAUSE frames) */
	bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_RPI, 0, true);

1097
	bgmac_set_rx_mode(bgmac->net_dev);
1098

1099
	bgmac_write_mac_address(bgmac, bgmac->net_dev->dev_addr);
1100 1101

	if (bgmac->loopback)
1102
		bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
1103
	else
1104
		bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_ML, 0, false);
1105 1106 1107

	bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN);

F
Felix Fietkau 已提交
1108
	bgmac_chip_intrs_on(bgmac);
1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122

	bgmac_enable(bgmac);
}

static irqreturn_t bgmac_interrupt(int irq, void *dev_id)
{
	struct bgmac *bgmac = netdev_priv(dev_id);

	u32 int_status = bgmac_read(bgmac, BGMAC_INT_STATUS);
	int_status &= bgmac->int_mask;

	if (!int_status)
		return IRQ_NONE;

1123 1124
	int_status &= ~(BGMAC_IS_TX0 | BGMAC_IS_RX);
	if (int_status)
1125
		dev_err(bgmac->dev, "Unknown IRQs: 0x%08X\n", int_status);
1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139

	/* Disable new interrupts until handling existing ones */
	bgmac_chip_intrs_off(bgmac);

	napi_schedule(&bgmac->napi);

	return IRQ_HANDLED;
}

static int bgmac_poll(struct napi_struct *napi, int weight)
{
	struct bgmac *bgmac = container_of(napi, struct bgmac, napi);
	int handled = 0;

1140 1141
	/* Ack */
	bgmac_write(bgmac, BGMAC_INT_STATUS, ~0);
1142

1143 1144
	bgmac_dma_tx_free(bgmac, &bgmac->tx_ring[0]);
	handled += bgmac_dma_rx_read(bgmac, &bgmac->rx_ring[0], weight);
1145

1146 1147
	/* Poll again if more events arrived in the meantime */
	if (bgmac_read(bgmac, BGMAC_INT_STATUS) & (BGMAC_IS_TX0 | BGMAC_IS_RX))
1148
		return weight;
1149

1150
	if (handled < weight) {
1151
		napi_complete_done(napi, handled);
1152 1153
		bgmac_chip_intrs_on(bgmac);
	}
1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167

	return handled;
}

/**************************************************
 * net_device_ops
 **************************************************/

static int bgmac_open(struct net_device *net_dev)
{
	struct bgmac *bgmac = netdev_priv(net_dev);
	int err = 0;

	bgmac_chip_reset(bgmac);
F
Felix Fietkau 已提交
1168 1169 1170 1171 1172

	err = bgmac_dma_init(bgmac);
	if (err)
		return err;

1173
	/* Specs say about reclaiming rings here, but we do that in DMA init */
F
Felix Fietkau 已提交
1174
	bgmac_chip_init(bgmac);
1175

1176
	err = request_irq(bgmac->irq, bgmac_interrupt, IRQF_SHARED,
1177 1178
			  KBUILD_MODNAME, net_dev);
	if (err < 0) {
1179
		dev_err(bgmac->dev, "IRQ request error: %d!\n", err);
F
Felix Fietkau 已提交
1180 1181
		bgmac_dma_cleanup(bgmac);
		return err;
1182 1183 1184
	}
	napi_enable(&bgmac->napi);

1185
	phy_start(net_dev->phydev);
1186

1187 1188
	netif_start_queue(net_dev);

F
Felix Fietkau 已提交
1189
	return 0;
1190 1191 1192 1193 1194 1195 1196 1197
}

static int bgmac_stop(struct net_device *net_dev)
{
	struct bgmac *bgmac = netdev_priv(net_dev);

	netif_carrier_off(net_dev);

1198
	phy_stop(net_dev->phydev);
1199

1200 1201
	napi_disable(&bgmac->napi);
	bgmac_chip_intrs_off(bgmac);
1202
	free_irq(bgmac->irq, net_dev);
1203 1204

	bgmac_chip_reset(bgmac);
F
Felix Fietkau 已提交
1205
	bgmac_dma_cleanup(bgmac);
1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220

	return 0;
}

static netdev_tx_t bgmac_start_xmit(struct sk_buff *skb,
				    struct net_device *net_dev)
{
	struct bgmac *bgmac = netdev_priv(net_dev);
	struct bgmac_dma_ring *ring;

	/* No QOS support yet */
	ring = &bgmac->tx_ring[0];
	return bgmac_dma_tx_add(bgmac, ring, skb);
}

1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233
static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
{
	struct bgmac *bgmac = netdev_priv(net_dev);
	int ret;

	ret = eth_prepare_mac_addr_change(net_dev, addr);
	if (ret < 0)
		return ret;
	bgmac_write_mac_address(bgmac, (u8 *)addr);
	eth_commit_mac_addr_change(net_dev, addr);
	return 0;
}

1234 1235
static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
{
1236 1237 1238
	if (!netif_running(net_dev))
		return -EINVAL;

1239
	return phy_mii_ioctl(net_dev->phydev, ifr, cmd);
1240 1241 1242 1243 1244 1245
}

static const struct net_device_ops bgmac_netdev_ops = {
	.ndo_open		= bgmac_open,
	.ndo_stop		= bgmac_stop,
	.ndo_start_xmit		= bgmac_start_xmit,
1246
	.ndo_set_rx_mode	= bgmac_set_rx_mode,
1247
	.ndo_set_mac_address	= bgmac_set_mac_address,
1248
	.ndo_validate_addr	= eth_validate_addr,
1249 1250 1251 1252 1253 1254 1255
	.ndo_do_ioctl           = bgmac_ioctl,
};

/**************************************************
 * ethtool_ops
 **************************************************/

1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376
struct bgmac_stat {
	u8 size;
	u32 offset;
	const char *name;
};

static struct bgmac_stat bgmac_get_strings_stats[] = {
	{ 8, BGMAC_TX_GOOD_OCTETS, "tx_good_octets" },
	{ 4, BGMAC_TX_GOOD_PKTS, "tx_good" },
	{ 8, BGMAC_TX_OCTETS, "tx_octets" },
	{ 4, BGMAC_TX_PKTS, "tx_pkts" },
	{ 4, BGMAC_TX_BROADCAST_PKTS, "tx_broadcast" },
	{ 4, BGMAC_TX_MULTICAST_PKTS, "tx_multicast" },
	{ 4, BGMAC_TX_LEN_64, "tx_64" },
	{ 4, BGMAC_TX_LEN_65_TO_127, "tx_65_127" },
	{ 4, BGMAC_TX_LEN_128_TO_255, "tx_128_255" },
	{ 4, BGMAC_TX_LEN_256_TO_511, "tx_256_511" },
	{ 4, BGMAC_TX_LEN_512_TO_1023, "tx_512_1023" },
	{ 4, BGMAC_TX_LEN_1024_TO_1522, "tx_1024_1522" },
	{ 4, BGMAC_TX_LEN_1523_TO_2047, "tx_1523_2047" },
	{ 4, BGMAC_TX_LEN_2048_TO_4095, "tx_2048_4095" },
	{ 4, BGMAC_TX_LEN_4096_TO_8191, "tx_4096_8191" },
	{ 4, BGMAC_TX_LEN_8192_TO_MAX, "tx_8192_max" },
	{ 4, BGMAC_TX_JABBER_PKTS, "tx_jabber" },
	{ 4, BGMAC_TX_OVERSIZE_PKTS, "tx_oversize" },
	{ 4, BGMAC_TX_FRAGMENT_PKTS, "tx_fragment" },
	{ 4, BGMAC_TX_UNDERRUNS, "tx_underruns" },
	{ 4, BGMAC_TX_TOTAL_COLS, "tx_total_cols" },
	{ 4, BGMAC_TX_SINGLE_COLS, "tx_single_cols" },
	{ 4, BGMAC_TX_MULTIPLE_COLS, "tx_multiple_cols" },
	{ 4, BGMAC_TX_EXCESSIVE_COLS, "tx_excessive_cols" },
	{ 4, BGMAC_TX_LATE_COLS, "tx_late_cols" },
	{ 4, BGMAC_TX_DEFERED, "tx_defered" },
	{ 4, BGMAC_TX_CARRIER_LOST, "tx_carrier_lost" },
	{ 4, BGMAC_TX_PAUSE_PKTS, "tx_pause" },
	{ 4, BGMAC_TX_UNI_PKTS, "tx_unicast" },
	{ 4, BGMAC_TX_Q0_PKTS, "tx_q0" },
	{ 8, BGMAC_TX_Q0_OCTETS, "tx_q0_octets" },
	{ 4, BGMAC_TX_Q1_PKTS, "tx_q1" },
	{ 8, BGMAC_TX_Q1_OCTETS, "tx_q1_octets" },
	{ 4, BGMAC_TX_Q2_PKTS, "tx_q2" },
	{ 8, BGMAC_TX_Q2_OCTETS, "tx_q2_octets" },
	{ 4, BGMAC_TX_Q3_PKTS, "tx_q3" },
	{ 8, BGMAC_TX_Q3_OCTETS, "tx_q3_octets" },
	{ 8, BGMAC_RX_GOOD_OCTETS, "rx_good_octets" },
	{ 4, BGMAC_RX_GOOD_PKTS, "rx_good" },
	{ 8, BGMAC_RX_OCTETS, "rx_octets" },
	{ 4, BGMAC_RX_PKTS, "rx_pkts" },
	{ 4, BGMAC_RX_BROADCAST_PKTS, "rx_broadcast" },
	{ 4, BGMAC_RX_MULTICAST_PKTS, "rx_multicast" },
	{ 4, BGMAC_RX_LEN_64, "rx_64" },
	{ 4, BGMAC_RX_LEN_65_TO_127, "rx_65_127" },
	{ 4, BGMAC_RX_LEN_128_TO_255, "rx_128_255" },
	{ 4, BGMAC_RX_LEN_256_TO_511, "rx_256_511" },
	{ 4, BGMAC_RX_LEN_512_TO_1023, "rx_512_1023" },
	{ 4, BGMAC_RX_LEN_1024_TO_1522, "rx_1024_1522" },
	{ 4, BGMAC_RX_LEN_1523_TO_2047, "rx_1523_2047" },
	{ 4, BGMAC_RX_LEN_2048_TO_4095, "rx_2048_4095" },
	{ 4, BGMAC_RX_LEN_4096_TO_8191, "rx_4096_8191" },
	{ 4, BGMAC_RX_LEN_8192_TO_MAX, "rx_8192_max" },
	{ 4, BGMAC_RX_JABBER_PKTS, "rx_jabber" },
	{ 4, BGMAC_RX_OVERSIZE_PKTS, "rx_oversize" },
	{ 4, BGMAC_RX_FRAGMENT_PKTS, "rx_fragment" },
	{ 4, BGMAC_RX_MISSED_PKTS, "rx_missed" },
	{ 4, BGMAC_RX_CRC_ALIGN_ERRS, "rx_crc_align" },
	{ 4, BGMAC_RX_UNDERSIZE, "rx_undersize" },
	{ 4, BGMAC_RX_CRC_ERRS, "rx_crc" },
	{ 4, BGMAC_RX_ALIGN_ERRS, "rx_align" },
	{ 4, BGMAC_RX_SYMBOL_ERRS, "rx_symbol" },
	{ 4, BGMAC_RX_PAUSE_PKTS, "rx_pause" },
	{ 4, BGMAC_RX_NONPAUSE_PKTS, "rx_nonpause" },
	{ 4, BGMAC_RX_SACHANGES, "rx_sa_changes" },
	{ 4, BGMAC_RX_UNI_PKTS, "rx_unicast" },
};

#define BGMAC_STATS_LEN	ARRAY_SIZE(bgmac_get_strings_stats)

static int bgmac_get_sset_count(struct net_device *dev, int string_set)
{
	switch (string_set) {
	case ETH_SS_STATS:
		return BGMAC_STATS_LEN;
	}

	return -EOPNOTSUPP;
}

static void bgmac_get_strings(struct net_device *dev, u32 stringset,
			      u8 *data)
{
	int i;

	if (stringset != ETH_SS_STATS)
		return;

	for (i = 0; i < BGMAC_STATS_LEN; i++)
		strlcpy(data + i * ETH_GSTRING_LEN,
			bgmac_get_strings_stats[i].name, ETH_GSTRING_LEN);
}

static void bgmac_get_ethtool_stats(struct net_device *dev,
				    struct ethtool_stats *ss, uint64_t *data)
{
	struct bgmac *bgmac = netdev_priv(dev);
	const struct bgmac_stat *s;
	unsigned int i;
	u64 val;

	if (!netif_running(dev))
		return;

	for (i = 0; i < BGMAC_STATS_LEN; i++) {
		s = &bgmac_get_strings_stats[i];
		val = 0;
		if (s->size == 8)
			val = (u64)bgmac_read(bgmac, s->offset + 4) << 32;
		val |= bgmac_read(bgmac, s->offset);
		data[i] = val;
	}
}

1377 1378 1379 1380
static void bgmac_get_drvinfo(struct net_device *net_dev,
			      struct ethtool_drvinfo *info)
{
	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1381
	strlcpy(info->bus_info, "AXI", sizeof(info->bus_info));
1382 1383 1384
}

static const struct ethtool_ops bgmac_ethtool_ops = {
1385 1386 1387
	.get_strings		= bgmac_get_strings,
	.get_sset_count		= bgmac_get_sset_count,
	.get_ethtool_stats	= bgmac_get_ethtool_stats,
1388
	.get_drvinfo		= bgmac_get_drvinfo,
1389 1390
	.get_link_ksettings     = phy_ethtool_get_link_ksettings,
	.set_link_ksettings     = phy_ethtool_set_link_ksettings,
1391 1392
};

R
Rafał Miłecki 已提交
1393 1394 1395 1396
/**************************************************
 * MII
 **************************************************/

1397
void bgmac_adjust_link(struct net_device *net_dev)
1398 1399
{
	struct bgmac *bgmac = netdev_priv(net_dev);
1400
	struct phy_device *phy_dev = net_dev->phydev;
1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419
	bool update = false;

	if (phy_dev->link) {
		if (phy_dev->speed != bgmac->mac_speed) {
			bgmac->mac_speed = phy_dev->speed;
			update = true;
		}

		if (phy_dev->duplex != bgmac->mac_duplex) {
			bgmac->mac_duplex = phy_dev->duplex;
			update = true;
		}
	}

	if (update) {
		bgmac_mac_speed(bgmac);
		phy_print_status(phy_dev);
	}
}
1420
EXPORT_SYMBOL_GPL(bgmac_adjust_link);
1421

1422
int bgmac_phy_connect_direct(struct bgmac *bgmac)
1423 1424 1425 1426 1427 1428 1429 1430 1431
{
	struct fixed_phy_status fphy_status = {
		.link = 1,
		.speed = SPEED_1000,
		.duplex = DUPLEX_FULL,
	};
	struct phy_device *phy_dev;
	int err;

1432
	phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, -1, NULL);
1433
	if (!phy_dev || IS_ERR(phy_dev)) {
1434
		dev_err(bgmac->dev, "Failed to register fixed PHY device\n");
1435 1436 1437 1438 1439 1440
		return -ENODEV;
	}

	err = phy_connect_direct(bgmac->net_dev, phy_dev, bgmac_adjust_link,
				 PHY_INTERFACE_MODE_MII);
	if (err) {
1441
		dev_err(bgmac->dev, "Connecting PHY failed\n");
1442 1443 1444 1445 1446
		return err;
	}

	return err;
}
1447
EXPORT_SYMBOL_GPL(bgmac_phy_connect_direct);
R
Rafał Miłecki 已提交
1448

1449
struct bgmac *bgmac_alloc(struct device *dev)
1450 1451 1452 1453 1454
{
	struct net_device *net_dev;
	struct bgmac *bgmac;

	/* Allocation and references */
1455
	net_dev = devm_alloc_etherdev(dev, sizeof(*bgmac));
1456
	if (!net_dev)
1457
		return NULL;
1458

1459
	net_dev->netdev_ops = &bgmac_netdev_ops;
1460
	net_dev->ethtool_ops = &bgmac_ethtool_ops;
1461

1462
	bgmac = netdev_priv(net_dev);
1463
	bgmac->dev = dev;
1464
	bgmac->net_dev = net_dev;
1465 1466 1467 1468 1469 1470 1471 1472 1473 1474

	return bgmac;
}
EXPORT_SYMBOL_GPL(bgmac_alloc);

int bgmac_enet_probe(struct bgmac *bgmac)
{
	struct net_device *net_dev = bgmac->net_dev;
	int err;

1475 1476 1477 1478 1479 1480 1481 1482 1483
	net_dev->irq = bgmac->irq;
	SET_NETDEV_DEV(net_dev, bgmac->dev);

	if (!is_valid_ether_addr(bgmac->mac_addr)) {
		dev_err(bgmac->dev, "Invalid MAC addr: %pM\n",
			bgmac->mac_addr);
		eth_random_addr(bgmac->mac_addr);
		dev_warn(bgmac->dev, "Using random MAC: %pM\n",
			 bgmac->mac_addr);
1484
	}
1485
	ether_addr_copy(net_dev->dev_addr, bgmac->mac_addr);
1486

1487 1488 1489 1490
	/* This (reset &) enable is not preset in specs or reference driver but
	 * Broadcom does it in arch PCI code when enabling fake PCI device.
	 */
	bgmac_clk_enable(bgmac, 0);
1491

1492 1493 1494 1495
	/* This seems to be fixing IRQ by assigning OOB #6 to the core */
	if (bgmac->feature_flags & BGMAC_FEAT_IRQ_ID_OOB_6)
		bgmac_idm_write(bgmac, BCMA_OOB_SEL_OUT_A30, 0x86);

1496 1497 1498 1499
	bgmac_chip_reset(bgmac);

	err = bgmac_dma_alloc(bgmac);
	if (err) {
1500
		dev_err(bgmac->dev, "Unable to alloc memory for DMA\n");
1501
		goto err_out;
1502 1503 1504
	}

	bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK;
R
Ralf Baechle 已提交
1505
	if (bcm47xx_nvram_getenv("et0_no_txint", NULL, 0) == 0)
1506 1507
		bgmac->int_mask &= ~BGMAC_IS_TX_MASK;

1508 1509
	netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT);

1510
	err = bgmac_phy_connect(bgmac);
R
Rafał Miłecki 已提交
1511
	if (err) {
1512
		dev_err(bgmac->dev, "Cannot connect to phy\n");
1513
		goto err_dma_free;
R
Rafał Miłecki 已提交
1514 1515
	}

1516 1517 1518 1519
	net_dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
	net_dev->hw_features = net_dev->features;
	net_dev->vlan_features = net_dev->features;

1520 1521
	err = register_netdev(bgmac->net_dev);
	if (err) {
1522
		dev_err(bgmac->dev, "Cannot register net device\n");
1523
		goto err_phy_disconnect;
1524 1525 1526 1527 1528 1529
	}

	netif_carrier_off(net_dev);

	return 0;

1530 1531
err_phy_disconnect:
	phy_disconnect(net_dev->phydev);
1532 1533
err_dma_free:
	bgmac_dma_free(bgmac);
1534
err_out:
1535 1536 1537

	return err;
}
1538
EXPORT_SYMBOL_GPL(bgmac_enet_probe);
1539

1540
void bgmac_enet_remove(struct bgmac *bgmac)
1541 1542
{
	unregister_netdev(bgmac->net_dev);
1543
	phy_disconnect(bgmac->net_dev->phydev);
1544
	netif_napi_del(&bgmac->napi);
1545 1546 1547
	bgmac_dma_free(bgmac);
	free_netdev(bgmac->net_dev);
}
1548
EXPORT_SYMBOL_GPL(bgmac_enet_remove);
1549 1550 1551

MODULE_AUTHOR("Rafał Miłecki");
MODULE_LICENSE("GPL");