bgmac.c 42.0 KB
Newer Older
1 2 3 4 5 6 7 8 9
/*
 * Driver for (BCM4706)? GBit MAC core on BCMA bus.
 *
 * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
 *
 * Licensed under the GNU/GPL. See COPYING for details.
 */


10 11 12
#define pr_fmt(fmt)		KBUILD_MODNAME ": " fmt

#include <linux/bcma/bcma.h>
13
#include <linux/etherdevice.h>
14
#include <linux/interrupt.h>
15
#include <linux/bcm47xx_nvram.h>
16 17
#include <linux/phy.h>
#include <linux/phy_fixed.h>
18
#include "bgmac.h"
19

20
static bool bgmac_wait_value(struct bgmac *bgmac, u16 reg, u32 mask,
21 22 23 24 25 26
			     u32 value, int timeout)
{
	u32 val;
	int i;

	for (i = 0; i < timeout / 10; i++) {
27
		val = bgmac_read(bgmac, reg);
28 29 30 31
		if ((val & mask) == value)
			return true;
		udelay(10);
	}
32
	dev_err(bgmac->dev, "Timeout waiting for reg 0x%X\n", reg);
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65
	return false;
}

/**************************************************
 * DMA
 **************************************************/

static void bgmac_dma_tx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
{
	u32 val;
	int i;

	if (!ring->mmio_base)
		return;

	/* Suspend DMA TX ring first.
	 * bgmac_wait_value doesn't support waiting for any of few values, so
	 * implement whole loop here.
	 */
	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL,
		    BGMAC_DMA_TX_SUSPEND);
	for (i = 0; i < 10000 / 10; i++) {
		val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
		val &= BGMAC_DMA_TX_STAT;
		if (val == BGMAC_DMA_TX_STAT_DISABLED ||
		    val == BGMAC_DMA_TX_STAT_IDLEWAIT ||
		    val == BGMAC_DMA_TX_STAT_STOPPED) {
			i = 0;
			break;
		}
		udelay(10);
	}
	if (i)
66 67
		dev_err(bgmac->dev, "Timeout suspending DMA TX ring 0x%X (BGMAC_DMA_TX_STAT: 0x%08X)\n",
			ring->mmio_base, val);
68 69 70

	/* Remove SUSPEND bit */
	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, 0);
71
	if (!bgmac_wait_value(bgmac,
72 73 74
			      ring->mmio_base + BGMAC_DMA_TX_STATUS,
			      BGMAC_DMA_TX_STAT, BGMAC_DMA_TX_STAT_DISABLED,
			      10000)) {
75 76
		dev_warn(bgmac->dev, "DMA TX ring 0x%X wasn't disabled on time, waiting additional 300us\n",
			 ring->mmio_base);
77 78 79
		udelay(300);
		val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
		if ((val & BGMAC_DMA_TX_STAT) != BGMAC_DMA_TX_STAT_DISABLED)
80 81
			dev_err(bgmac->dev, "Reset of DMA TX ring 0x%X failed\n",
				ring->mmio_base);
82 83 84 85 86 87 88 89 90
	}
}

static void bgmac_dma_tx_enable(struct bgmac *bgmac,
				struct bgmac_dma_ring *ring)
{
	u32 ctl;

	ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL);
91
	if (bgmac->feature_flags & BGMAC_FEAT_TX_MASK_SETUP) {
92 93 94 95 96 97 98 99 100 101 102 103
		ctl &= ~BGMAC_DMA_TX_BL_MASK;
		ctl |= BGMAC_DMA_TX_BL_128 << BGMAC_DMA_TX_BL_SHIFT;

		ctl &= ~BGMAC_DMA_TX_MR_MASK;
		ctl |= BGMAC_DMA_TX_MR_2 << BGMAC_DMA_TX_MR_SHIFT;

		ctl &= ~BGMAC_DMA_TX_PC_MASK;
		ctl |= BGMAC_DMA_TX_PC_16 << BGMAC_DMA_TX_PC_SHIFT;

		ctl &= ~BGMAC_DMA_TX_PT_MASK;
		ctl |= BGMAC_DMA_TX_PT_8 << BGMAC_DMA_TX_PT_SHIFT;
	}
104 105 106 107 108
	ctl |= BGMAC_DMA_TX_ENABLE;
	ctl |= BGMAC_DMA_TX_PARITY_DISABLE;
	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl);
}

109 110 111 112 113 114 115 116
static void
bgmac_dma_tx_add_buf(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
		     int i, int len, u32 ctl0)
{
	struct bgmac_slot_info *slot;
	struct bgmac_dma_desc *dma_desc;
	u32 ctl1;

F
Felix Fietkau 已提交
117
	if (i == BGMAC_TX_RING_SLOTS - 1)
118 119 120 121 122 123 124 125 126 127 128 129
		ctl0 |= BGMAC_DESC_CTL0_EOT;

	ctl1 = len & BGMAC_DESC_CTL1_LEN;

	slot = &ring->slots[i];
	dma_desc = &ring->cpu_base[i];
	dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr));
	dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr));
	dma_desc->ctl0 = cpu_to_le32(ctl0);
	dma_desc->ctl1 = cpu_to_le32(ctl1);
}

130 131 132 133
static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
				    struct bgmac_dma_ring *ring,
				    struct sk_buff *skb)
{
134
	struct device *dma_dev = bgmac->dma_dev;
135
	struct net_device *net_dev = bgmac->net_dev;
136 137
	int index = ring->end % BGMAC_TX_RING_SLOTS;
	struct bgmac_slot_info *slot = &ring->slots[index];
138 139 140
	int nr_frags;
	u32 flags;
	int i;
141 142

	if (skb->len > BGMAC_DESC_CTL1_LEN) {
143
		netdev_err(bgmac->net_dev, "Too long skb (%d)\n", skb->len);
144
		goto err_drop;
145 146
	}

147 148 149 150 151
	if (skb->ip_summed == CHECKSUM_PARTIAL)
		skb_checksum_help(skb);

	nr_frags = skb_shinfo(skb)->nr_frags;

152 153 154 155
	/* ring->end - ring->start will return the number of valid slots,
	 * even when ring->end overflows
	 */
	if (ring->end - ring->start + nr_frags + 1 >= BGMAC_TX_RING_SLOTS) {
156
		netdev_err(bgmac->net_dev, "TX ring is full, queue should be stopped!\n");
157 158 159 160
		netif_stop_queue(net_dev);
		return NETDEV_TX_BUSY;
	}

161
	slot->dma_addr = dma_map_single(dma_dev, skb->data, skb_headlen(skb),
162
					DMA_TO_DEVICE);
163 164
	if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr)))
		goto err_dma_head;
165

166 167 168
	flags = BGMAC_DESC_CTL0_SOF;
	if (!nr_frags)
		flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC;
169

170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
	bgmac_dma_tx_add_buf(bgmac, ring, index, skb_headlen(skb), flags);
	flags = 0;

	for (i = 0; i < nr_frags; i++) {
		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
		int len = skb_frag_size(frag);

		index = (index + 1) % BGMAC_TX_RING_SLOTS;
		slot = &ring->slots[index];
		slot->dma_addr = skb_frag_dma_map(dma_dev, frag, 0,
						  len, DMA_TO_DEVICE);
		if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr)))
			goto err_dma;

		if (i == nr_frags - 1)
			flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC;

		bgmac_dma_tx_add_buf(bgmac, ring, index, len, flags);
	}

	slot->skb = skb;
191
	ring->end += nr_frags + 1;
192 193
	netdev_sent_queue(net_dev, skb->len);

194 195 196 197 198 199
	wmb();

	/* Increase ring->end to point empty slot. We tell hardware the first
	 * slot it should *not* read.
	 */
	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
200
		    ring->index_base +
201 202
		    (ring->end % BGMAC_TX_RING_SLOTS) *
		    sizeof(struct bgmac_dma_desc));
203

204
	if (ring->end - ring->start >= BGMAC_TX_RING_SLOTS - 8)
205 206 207 208
		netif_stop_queue(net_dev);

	return NETDEV_TX_OK;

209 210 211 212
err_dma:
	dma_unmap_single(dma_dev, slot->dma_addr, skb_headlen(skb),
			 DMA_TO_DEVICE);

213
	while (i-- > 0) {
214 215 216 217 218 219 220 221 222
		int index = (ring->end + i) % BGMAC_TX_RING_SLOTS;
		struct bgmac_slot_info *slot = &ring->slots[index];
		u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1);
		int len = ctl1 & BGMAC_DESC_CTL1_LEN;

		dma_unmap_page(dma_dev, slot->dma_addr, len, DMA_TO_DEVICE);
	}

err_dma_head:
223 224
	netdev_err(bgmac->net_dev, "Mapping error of skb on ring 0x%X\n",
		   ring->mmio_base);
225 226

err_drop:
227
	dev_kfree_skb(skb);
228 229
	net_dev->stats.tx_dropped++;
	net_dev->stats.tx_errors++;
230 231 232 233 234 235
	return NETDEV_TX_OK;
}

/* Free transmitted packets */
static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
{
236
	struct device *dma_dev = bgmac->dma_dev;
237 238
	int empty_slot;
	bool freed = false;
239
	unsigned bytes_compl = 0, pkts_compl = 0;
240 241 242 243

	/* The last slot that hardware didn't consume yet */
	empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
	empty_slot &= BGMAC_DMA_TX_STATDPTR;
244 245
	empty_slot -= ring->index_base;
	empty_slot &= BGMAC_DMA_TX_STATDPTR;
246 247
	empty_slot /= sizeof(struct bgmac_dma_desc);

248 249 250
	while (ring->start != ring->end) {
		int slot_idx = ring->start % BGMAC_TX_RING_SLOTS;
		struct bgmac_slot_info *slot = &ring->slots[slot_idx];
251
		u32 ctl0, ctl1;
252
		int len;
253

254 255
		if (slot_idx == empty_slot)
			break;
256

257
		ctl0 = le32_to_cpu(ring->cpu_base[slot_idx].ctl0);
258 259
		ctl1 = le32_to_cpu(ring->cpu_base[slot_idx].ctl1);
		len = ctl1 & BGMAC_DESC_CTL1_LEN;
260
		if (ctl0 & BGMAC_DESC_CTL0_SOF)
261
			/* Unmap no longer used buffer */
262 263 264 265 266
			dma_unmap_single(dma_dev, slot->dma_addr, len,
					 DMA_TO_DEVICE);
		else
			dma_unmap_page(dma_dev, slot->dma_addr, len,
				       DMA_TO_DEVICE);
267

268
		if (slot->skb) {
269 270
			bgmac->net_dev->stats.tx_bytes += slot->skb->len;
			bgmac->net_dev->stats.tx_packets++;
271 272 273
			bytes_compl += slot->skb->len;
			pkts_compl++;

274 275 276 277 278
			/* Free memory! :) */
			dev_kfree_skb(slot->skb);
			slot->skb = NULL;
		}

279
		slot->dma_addr = 0;
280
		ring->start++;
281 282 283
		freed = true;
	}

284 285 286
	if (!pkts_compl)
		return;

287 288
	netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl);

289
	if (netif_queue_stopped(bgmac->net_dev))
290 291 292 293 294 295 296 297 298
		netif_wake_queue(bgmac->net_dev);
}

static void bgmac_dma_rx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
{
	if (!ring->mmio_base)
		return;

	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, 0);
299
	if (!bgmac_wait_value(bgmac,
300 301 302
			      ring->mmio_base + BGMAC_DMA_RX_STATUS,
			      BGMAC_DMA_RX_STAT, BGMAC_DMA_RX_STAT_DISABLED,
			      10000))
303 304
		dev_err(bgmac->dev, "Reset of ring 0x%X RX failed\n",
			ring->mmio_base);
305 306 307 308 309 310 311 312
}

static void bgmac_dma_rx_enable(struct bgmac *bgmac,
				struct bgmac_dma_ring *ring)
{
	u32 ctl;

	ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL);
313 314 315 316

	/* preserve ONLY bits 16-17 from current hardware value */
	ctl &= BGMAC_DMA_RX_ADDREXT_MASK;

317
	if (bgmac->feature_flags & BGMAC_FEAT_RX_MASK_SETUP) {
318 319 320 321 322 323 324 325 326
		ctl &= ~BGMAC_DMA_RX_BL_MASK;
		ctl |= BGMAC_DMA_RX_BL_128 << BGMAC_DMA_RX_BL_SHIFT;

		ctl &= ~BGMAC_DMA_RX_PC_MASK;
		ctl |= BGMAC_DMA_RX_PC_8 << BGMAC_DMA_RX_PC_SHIFT;

		ctl &= ~BGMAC_DMA_RX_PT_MASK;
		ctl |= BGMAC_DMA_RX_PT_1 << BGMAC_DMA_RX_PT_SHIFT;
	}
327 328 329 330 331 332 333 334 335 336
	ctl |= BGMAC_DMA_RX_ENABLE;
	ctl |= BGMAC_DMA_RX_PARITY_DISABLE;
	ctl |= BGMAC_DMA_RX_OVERFLOW_CONT;
	ctl |= BGMAC_RX_FRAME_OFFSET << BGMAC_DMA_RX_FRAME_OFFSET_SHIFT;
	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, ctl);
}

static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
				     struct bgmac_slot_info *slot)
{
337
	struct device *dma_dev = bgmac->dma_dev;
338
	dma_addr_t dma_addr;
339
	struct bgmac_rx_header *rx;
340
	void *buf;
341 342

	/* Alloc skb */
343 344
	buf = netdev_alloc_frag(BGMAC_RX_ALLOC_SIZE);
	if (!buf)
345 346 347
		return -ENOMEM;

	/* Poison - if everything goes fine, hardware will overwrite it */
348
	rx = buf + BGMAC_RX_BUF_OFFSET;
349 350 351 352
	rx->len = cpu_to_le16(0xdead);
	rx->flags = cpu_to_le16(0xbeef);

	/* Map skb for the DMA */
353 354
	dma_addr = dma_map_single(dma_dev, buf + BGMAC_RX_BUF_OFFSET,
				  BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
355
	if (dma_mapping_error(dma_dev, dma_addr)) {
356
		netdev_err(bgmac->net_dev, "DMA mapping error\n");
357
		put_page(virt_to_head_page(buf));
358 359
		return -ENOMEM;
	}
360 361

	/* Update the slot */
362
	slot->buf = buf;
363 364
	slot->dma_addr = dma_addr;

365 366 367
	return 0;
}

F
Felix Fietkau 已提交
368 369 370 371 372 373 374 375 376 377
static void bgmac_dma_rx_update_index(struct bgmac *bgmac,
				      struct bgmac_dma_ring *ring)
{
	dma_wmb();

	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
		    ring->index_base +
		    ring->end * sizeof(struct bgmac_dma_desc));
}

378 379 380 381 382 383
static void bgmac_dma_rx_setup_desc(struct bgmac *bgmac,
				    struct bgmac_dma_ring *ring, int desc_idx)
{
	struct bgmac_dma_desc *dma_desc = ring->cpu_base + desc_idx;
	u32 ctl0 = 0, ctl1 = 0;

F
Felix Fietkau 已提交
384
	if (desc_idx == BGMAC_RX_RING_SLOTS - 1)
385 386 387 388 389 390 391 392 393 394 395
		ctl0 |= BGMAC_DESC_CTL0_EOT;
	ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
	/* Is there any BGMAC device that requires extension? */
	/* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
	 * B43_DMA64_DCTL1_ADDREXT_MASK;
	 */

	dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[desc_idx].dma_addr));
	dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[desc_idx].dma_addr));
	dma_desc->ctl0 = cpu_to_le32(ctl0);
	dma_desc->ctl1 = cpu_to_le32(ctl1);
F
Felix Fietkau 已提交
396 397

	ring->end = desc_idx;
398 399
}

400 401 402 403 404 405 406 407 408 409 410 411 412
static void bgmac_dma_rx_poison_buf(struct device *dma_dev,
				    struct bgmac_slot_info *slot)
{
	struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET;

	dma_sync_single_for_cpu(dma_dev, slot->dma_addr, BGMAC_RX_BUF_SIZE,
				DMA_FROM_DEVICE);
	rx->len = cpu_to_le16(0xdead);
	rx->flags = cpu_to_le16(0xbeef);
	dma_sync_single_for_device(dma_dev, slot->dma_addr, BGMAC_RX_BUF_SIZE,
				   DMA_FROM_DEVICE);
}

413 414 415 416 417 418 419 420
static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
			     int weight)
{
	u32 end_slot;
	int handled = 0;

	end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
	end_slot &= BGMAC_DMA_RX_STATDPTR;
421 422
	end_slot -= ring->index_base;
	end_slot &= BGMAC_DMA_RX_STATDPTR;
423 424
	end_slot /= sizeof(struct bgmac_dma_desc);

F
Felix Fietkau 已提交
425
	while (ring->start != end_slot) {
426
		struct device *dma_dev = bgmac->dma_dev;
427
		struct bgmac_slot_info *slot = &ring->slots[ring->start];
428
		struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET;
429 430
		struct sk_buff *skb;
		void *buf = slot->buf;
431
		dma_addr_t dma_addr = slot->dma_addr;
432 433
		u16 len, flags;

434 435 436 437 438 439
		do {
			/* Prepare new skb as replacement */
			if (bgmac_dma_rx_skb_for_slot(bgmac, slot)) {
				bgmac_dma_rx_poison_buf(dma_dev, slot);
				break;
			}
440

441 442 443
			/* Unmap buffer to make it accessible to the CPU */
			dma_unmap_single(dma_dev, dma_addr,
					 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
444

445 446 447
			/* Get info from the header */
			len = le16_to_cpu(rx->len);
			flags = le16_to_cpu(rx->flags);
448 449 450

			/* Check for poison and drop or pass the packet */
			if (len == 0xdead && flags == 0xbeef) {
451 452
				netdev_err(bgmac->net_dev, "Found poisoned packet at slot %d, DMA issue!\n",
					   ring->start);
453
				put_page(virt_to_head_page(buf));
454
				bgmac->net_dev->stats.rx_errors++;
455 456 457
				break;
			}

458
			if (len > BGMAC_RX_ALLOC_SIZE) {
459 460
				netdev_err(bgmac->net_dev, "Found oversized packet at slot %d, DMA issue!\n",
					   ring->start);
461
				put_page(virt_to_head_page(buf));
462 463
				bgmac->net_dev->stats.rx_length_errors++;
				bgmac->net_dev->stats.rx_errors++;
464 465 466
				break;
			}

H
Hauke Mehrtens 已提交
467 468 469
			/* Omit CRC. */
			len -= ETH_FCS_LEN;

470
			skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE);
471
			if (unlikely(!skb)) {
472
				netdev_err(bgmac->net_dev, "build_skb failed\n");
473
				put_page(virt_to_head_page(buf));
474
				bgmac->net_dev->stats.rx_errors++;
475 476
				break;
			}
477 478 479 480
			skb_put(skb, BGMAC_RX_FRAME_OFFSET +
				BGMAC_RX_BUF_OFFSET + len);
			skb_pull(skb, BGMAC_RX_FRAME_OFFSET +
				 BGMAC_RX_BUF_OFFSET);
481

482 483
			skb_checksum_none_assert(skb);
			skb->protocol = eth_type_trans(skb, bgmac->net_dev);
484 485
			bgmac->net_dev->stats.rx_bytes += len;
			bgmac->net_dev->stats.rx_packets++;
486
			napi_gro_receive(&bgmac->napi, skb);
487 488
			handled++;
		} while (0);
489

490 491
		bgmac_dma_rx_setup_desc(bgmac, ring, ring->start);

492 493 494 495 496 497 498
		if (++ring->start >= BGMAC_RX_RING_SLOTS)
			ring->start = 0;

		if (handled >= weight) /* Should never be greater */
			break;
	}

F
Felix Fietkau 已提交
499 500
	bgmac_dma_rx_update_index(bgmac, ring);

501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525
	return handled;
}

/* Does ring support unaligned addressing? */
static bool bgmac_dma_unaligned(struct bgmac *bgmac,
				struct bgmac_dma_ring *ring,
				enum bgmac_dma_ring_type ring_type)
{
	switch (ring_type) {
	case BGMAC_DMA_RING_TX:
		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
			    0xff0);
		if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO))
			return true;
		break;
	case BGMAC_DMA_RING_RX:
		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
			    0xff0);
		if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO))
			return true;
		break;
	}
	return false;
}

526 527
static void bgmac_dma_tx_ring_free(struct bgmac *bgmac,
				   struct bgmac_dma_ring *ring)
528
{
529
	struct device *dma_dev = bgmac->dma_dev;
530
	struct bgmac_dma_desc *dma_desc = ring->cpu_base;
531 532 533
	struct bgmac_slot_info *slot;
	int i;

F
Felix Fietkau 已提交
534
	for (i = 0; i < BGMAC_TX_RING_SLOTS; i++) {
535 536
		int len = dma_desc[i].ctl1 & BGMAC_DESC_CTL1_LEN;

537
		slot = &ring->slots[i];
538 539 540 541 542 543 544 545 546 547 548
		dev_kfree_skb(slot->skb);

		if (!slot->dma_addr)
			continue;

		if (slot->skb)
			dma_unmap_single(dma_dev, slot->dma_addr,
					 len, DMA_TO_DEVICE);
		else
			dma_unmap_page(dma_dev, slot->dma_addr,
				       len, DMA_TO_DEVICE);
549
	}
550 551 552 553 554
}

static void bgmac_dma_rx_ring_free(struct bgmac *bgmac,
				   struct bgmac_dma_ring *ring)
{
555
	struct device *dma_dev = bgmac->dma_dev;
556 557 558
	struct bgmac_slot_info *slot;
	int i;

F
Felix Fietkau 已提交
559
	for (i = 0; i < BGMAC_RX_RING_SLOTS; i++) {
560
		slot = &ring->slots[i];
561
		if (!slot->dma_addr)
562
			continue;
563

564 565 566
		dma_unmap_single(dma_dev, slot->dma_addr,
				 BGMAC_RX_BUF_SIZE,
				 DMA_FROM_DEVICE);
567
		put_page(virt_to_head_page(slot->buf));
568
		slot->dma_addr = 0;
569 570 571
	}
}

572
static void bgmac_dma_ring_desc_free(struct bgmac *bgmac,
F
Felix Fietkau 已提交
573 574
				     struct bgmac_dma_ring *ring,
				     int num_slots)
575
{
576
	struct device *dma_dev = bgmac->dma_dev;
577 578 579 580 581 582
	int size;

	if (!ring->cpu_base)
	    return;

	/* Free ring of descriptors */
F
Felix Fietkau 已提交
583
	size = num_slots * sizeof(struct bgmac_dma_desc);
584 585 586 587
	dma_free_coherent(dma_dev, size, ring->cpu_base,
			  ring->dma_base);
}

F
Felix Fietkau 已提交
588
static void bgmac_dma_cleanup(struct bgmac *bgmac)
589 590 591
{
	int i;

F
Felix Fietkau 已提交
592
	for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
593
		bgmac_dma_tx_ring_free(bgmac, &bgmac->tx_ring[i]);
F
Felix Fietkau 已提交
594 595

	for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
596
		bgmac_dma_rx_ring_free(bgmac, &bgmac->rx_ring[i]);
F
Felix Fietkau 已提交
597 598 599 600 601 602 603
}

static void bgmac_dma_free(struct bgmac *bgmac)
{
	int i;

	for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
F
Felix Fietkau 已提交
604 605
		bgmac_dma_ring_desc_free(bgmac, &bgmac->tx_ring[i],
					 BGMAC_TX_RING_SLOTS);
F
Felix Fietkau 已提交
606 607

	for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
F
Felix Fietkau 已提交
608 609
		bgmac_dma_ring_desc_free(bgmac, &bgmac->rx_ring[i],
					 BGMAC_RX_RING_SLOTS);
610 611 612 613
}

static int bgmac_dma_alloc(struct bgmac *bgmac)
{
614
	struct device *dma_dev = bgmac->dma_dev;
615 616 617 618 619 620 621 622 623 624
	struct bgmac_dma_ring *ring;
	static const u16 ring_base[] = { BGMAC_DMA_BASE0, BGMAC_DMA_BASE1,
					 BGMAC_DMA_BASE2, BGMAC_DMA_BASE3, };
	int size; /* ring size: different for Tx and Rx */
	int err;
	int i;

	BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base));
	BUILD_BUG_ON(BGMAC_MAX_RX_RINGS > ARRAY_SIZE(ring_base));

625
	if (!(bgmac_idm_read(bgmac, BCMA_IOST) & BCMA_IOST_DMA64)) {
626
		dev_err(bgmac->dev, "Core does not report 64-bit DMA\n");
627 628 629 630 631 632 633 634
		return -ENOTSUPP;
	}

	for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
		ring = &bgmac->tx_ring[i];
		ring->mmio_base = ring_base[i];

		/* Alloc ring of descriptors */
F
Felix Fietkau 已提交
635
		size = BGMAC_TX_RING_SLOTS * sizeof(struct bgmac_dma_desc);
636 637 638 639
		ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
						     &ring->dma_base,
						     GFP_KERNEL);
		if (!ring->cpu_base) {
640 641
			dev_err(bgmac->dev, "Allocation of TX ring 0x%X failed\n",
				ring->mmio_base);
642 643 644
			goto err_dma_free;
		}

645 646 647 648 649 650 651
		ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
						      BGMAC_DMA_RING_TX);
		if (ring->unaligned)
			ring->index_base = lower_32_bits(ring->dma_base);
		else
			ring->index_base = 0;

652 653 654 655 656 657 658 659
		/* No need to alloc TX slots yet */
	}

	for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
		ring = &bgmac->rx_ring[i];
		ring->mmio_base = ring_base[i];

		/* Alloc ring of descriptors */
F
Felix Fietkau 已提交
660
		size = BGMAC_RX_RING_SLOTS * sizeof(struct bgmac_dma_desc);
661 662 663 664
		ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
						     &ring->dma_base,
						     GFP_KERNEL);
		if (!ring->cpu_base) {
665 666
			dev_err(bgmac->dev, "Allocation of RX ring 0x%X failed\n",
				ring->mmio_base);
667 668 669 670
			err = -ENOMEM;
			goto err_dma_free;
		}

671 672 673 674 675 676
		ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
						      BGMAC_DMA_RING_RX);
		if (ring->unaligned)
			ring->index_base = lower_32_bits(ring->dma_base);
		else
			ring->index_base = 0;
677 678 679 680 681 682 683 684 685
	}

	return 0;

err_dma_free:
	bgmac_dma_free(bgmac);
	return -ENOMEM;
}

F
Felix Fietkau 已提交
686
static int bgmac_dma_init(struct bgmac *bgmac)
687 688
{
	struct bgmac_dma_ring *ring;
F
Felix Fietkau 已提交
689
	int i, err;
690 691 692 693

	for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
		ring = &bgmac->tx_ring[i];

694 695
		if (!ring->unaligned)
			bgmac_dma_tx_enable(bgmac, ring);
696 697 698 699
		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
			    lower_32_bits(ring->dma_base));
		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI,
			    upper_32_bits(ring->dma_base));
700 701
		if (ring->unaligned)
			bgmac_dma_tx_enable(bgmac, ring);
702 703 704 705 706 707

		ring->start = 0;
		ring->end = 0;	/* Points the slot that should *not* be read */
	}

	for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
708 709
		int j;

710 711
		ring = &bgmac->rx_ring[i];

712 713
		if (!ring->unaligned)
			bgmac_dma_rx_enable(bgmac, ring);
714 715 716 717
		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
			    lower_32_bits(ring->dma_base));
		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI,
			    upper_32_bits(ring->dma_base));
718 719
		if (ring->unaligned)
			bgmac_dma_rx_enable(bgmac, ring);
720

F
Felix Fietkau 已提交
721 722
		ring->start = 0;
		ring->end = 0;
F
Felix Fietkau 已提交
723
		for (j = 0; j < BGMAC_RX_RING_SLOTS; j++) {
F
Felix Fietkau 已提交
724 725 726 727
			err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]);
			if (err)
				goto error;

728
			bgmac_dma_rx_setup_desc(bgmac, ring, j);
F
Felix Fietkau 已提交
729
		}
730

F
Felix Fietkau 已提交
731
		bgmac_dma_rx_update_index(bgmac, ring);
732
	}
F
Felix Fietkau 已提交
733 734 735 736 737 738

	return 0;

error:
	bgmac_dma_cleanup(bgmac);
	return err;
739 740 741 742 743 744 745 746 747 748 749 750 751 752 753
}


/**************************************************
 * Chip ops
 **************************************************/

/* TODO: can we just drop @force? Can we don't reset MAC at all if there is
 * nothing to change? Try if after stabilizng driver.
 */
static void bgmac_cmdcfg_maskset(struct bgmac *bgmac, u32 mask, u32 set,
				 bool force)
{
	u32 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
	u32 new_val = (cmdcfg & mask) | set;
754
	u32 cmdcfg_sr;
755

756 757 758 759 760 761
	if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
		cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
	else
		cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;

	bgmac_set(bgmac, BGMAC_CMDCFG, cmdcfg_sr);
762 763 764 765 766
	udelay(2);

	if (new_val != cmdcfg || force)
		bgmac_write(bgmac, BGMAC_CMDCFG, new_val);

767
	bgmac_mask(bgmac, BGMAC_CMDCFG, ~cmdcfg_sr);
768 769 770
	udelay(2);
}

771 772 773 774 775 776 777 778 779 780
static void bgmac_write_mac_address(struct bgmac *bgmac, u8 *addr)
{
	u32 tmp;

	tmp = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
	bgmac_write(bgmac, BGMAC_MACADDR_HIGH, tmp);
	tmp = (addr[4] << 8) | addr[5];
	bgmac_write(bgmac, BGMAC_MACADDR_LOW, tmp);
}

781 782 783 784 785
static void bgmac_set_rx_mode(struct net_device *net_dev)
{
	struct bgmac *bgmac = netdev_priv(net_dev);

	if (net_dev->flags & IFF_PROMISC)
786
		bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_PROM, true);
787
	else
788
		bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_PROM, 0, true);
789 790
}

791 792 793 794 795
#if 0 /* We don't use that regs yet */
static void bgmac_chip_stats_update(struct bgmac *bgmac)
{
	int i;

796
	if (!(bgmac->feature_flags & BGMAC_FEAT_NO_CLR_MIB)) {
797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814
		for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
			bgmac->mib_tx_regs[i] =
				bgmac_read(bgmac,
					   BGMAC_TX_GOOD_OCTETS + (i * 4));
		for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
			bgmac->mib_rx_regs[i] =
				bgmac_read(bgmac,
					   BGMAC_RX_GOOD_OCTETS + (i * 4));
	}

	/* TODO: what else? how to handle BCM4706? Specs are needed */
}
#endif

static void bgmac_clear_mib(struct bgmac *bgmac)
{
	int i;

815
	if (bgmac->feature_flags & BGMAC_FEAT_NO_CLR_MIB)
816 817 818 819 820 821 822 823 824 825
		return;

	bgmac_set(bgmac, BGMAC_DEV_CTL, BGMAC_DC_MROR);
	for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
		bgmac_read(bgmac, BGMAC_TX_GOOD_OCTETS + (i * 4));
	for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
		bgmac_read(bgmac, BGMAC_RX_GOOD_OCTETS + (i * 4));
}

/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_speed */
826
static void bgmac_mac_speed(struct bgmac *bgmac)
827 828 829 830
{
	u32 mask = ~(BGMAC_CMDCFG_ES_MASK | BGMAC_CMDCFG_HD);
	u32 set = 0;

831 832
	switch (bgmac->mac_speed) {
	case SPEED_10:
833
		set |= BGMAC_CMDCFG_ES_10;
834 835
		break;
	case SPEED_100:
836
		set |= BGMAC_CMDCFG_ES_100;
837 838
		break;
	case SPEED_1000:
839
		set |= BGMAC_CMDCFG_ES_1000;
840
		break;
841 842 843
	case SPEED_2500:
		set |= BGMAC_CMDCFG_ES_2500;
		break;
844
	default:
845 846
		dev_err(bgmac->dev, "Unsupported speed: %d\n",
			bgmac->mac_speed);
847 848 849
	}

	if (bgmac->mac_duplex == DUPLEX_HALF)
850
		set |= BGMAC_CMDCFG_HD;
851

852 853 854 855 856
	bgmac_cmdcfg_maskset(bgmac, mask, set, true);
}

static void bgmac_miiconfig(struct bgmac *bgmac)
{
857
	if (bgmac->feature_flags & BGMAC_FEAT_FORCE_SPEED_2500) {
858 859 860
		bgmac_idm_write(bgmac, BCMA_IOCTL,
				bgmac_idm_read(bgmac, BCMA_IOCTL) | 0x40 |
				BGMAC_BCMA_IOCTL_SW_CLKEN);
861
		bgmac->mac_speed = SPEED_2500;
862 863
		bgmac->mac_duplex = DUPLEX_FULL;
		bgmac_mac_speed(bgmac);
864
	} else {
865 866
		u8 imode;

867 868 869 870 871 872 873
		imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) &
			BGMAC_DS_MM_MASK) >> BGMAC_DS_MM_SHIFT;
		if (imode == 0 || imode == 1) {
			bgmac->mac_speed = SPEED_100;
			bgmac->mac_duplex = DUPLEX_FULL;
			bgmac_mac_speed(bgmac);
		}
874 875 876 877 878 879
	}
}

/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */
static void bgmac_chip_reset(struct bgmac *bgmac)
{
880
	u32 cmdcfg_sr;
881 882 883
	u32 iost;
	int i;

884
	if (bgmac_clk_enabled(bgmac)) {
885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901
		if (!bgmac->stats_grabbed) {
			/* bgmac_chip_stats_update(bgmac); */
			bgmac->stats_grabbed = true;
		}

		for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
			bgmac_dma_tx_reset(bgmac, &bgmac->tx_ring[i]);

		bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
		udelay(1);

		for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
			bgmac_dma_rx_reset(bgmac, &bgmac->rx_ring[i]);

		/* TODO: Clear software multicast filter list */
	}

902
	iost = bgmac_idm_read(bgmac, BCMA_IOST);
903
	if (bgmac->feature_flags & BGMAC_FEAT_IOST_ATTACHED)
904 905
		iost &= ~BGMAC_BCMA_IOST_ATTACHED;

906
	/* 3GMAC: for BCM4707 & BCM47094, only do core reset at bgmac_probe() */
907 908
	if (!(bgmac->feature_flags & BGMAC_FEAT_NO_RESET)) {
		u32 flags = 0;
909 910 911 912 913
		if (iost & BGMAC_BCMA_IOST_ATTACHED) {
			flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
			if (!bgmac->has_robosw)
				flags |= BGMAC_BCMA_IOCTL_SW_RESET;
		}
914
		bgmac_clk_enable(bgmac, flags);
915 916
	}

917
	/* Request Misc PLL for corerev > 2 */
918
	if (bgmac->feature_flags & BGMAC_FEAT_MISC_PLL_REQ) {
919 920
		bgmac_set(bgmac, BCMA_CLKCTLST,
			  BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ);
921
		bgmac_wait_value(bgmac, BCMA_CLKCTLST,
922 923
				 BGMAC_BCMA_CLKCTLST_MISC_PLL_ST,
				 BGMAC_BCMA_CLKCTLST_MISC_PLL_ST,
924 925 926
				 1000);
	}

927
	if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_PHY) {
928 929
		u8 et_swtype = 0;
		u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY |
930
			     BGMAC_CHIPCTL_1_IF_TYPE_MII;
931
		char buf[4];
932

933
		if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) {
934
			if (kstrtou8(buf, 0, &et_swtype))
935 936
				dev_err(bgmac->dev, "Failed to parse et_swtype (%s)\n",
					buf);
937 938 939
			et_swtype &= 0x0f;
			et_swtype <<= 4;
			sw_type = et_swtype;
940
		} else if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_EPHYRMII) {
941 942
			sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RMII |
				  BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII;
943
		} else if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_RGMII) {
944 945
			sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII |
				  BGMAC_CHIPCTL_1_SW_TYPE_RGMII;
946
		}
947 948 949
		bgmac_cco_ctl_maskset(bgmac, 1, ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK |
						  BGMAC_CHIPCTL_1_SW_TYPE_MASK),
				      sw_type);
950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970
	} else if (bgmac->feature_flags & BGMAC_FEAT_CC4_IF_SW_TYPE) {
		u32 sw_type = BGMAC_CHIPCTL_4_IF_TYPE_MII |
			      BGMAC_CHIPCTL_4_SW_TYPE_EPHY;
		u8 et_swtype = 0;
		char buf[4];

		if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) {
			if (kstrtou8(buf, 0, &et_swtype))
				dev_err(bgmac->dev, "Failed to parse et_swtype (%s)\n",
					buf);
			sw_type = (et_swtype & 0x0f) << 12;
		} else if (bgmac->feature_flags & BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII) {
			sw_type = BGMAC_CHIPCTL_4_IF_TYPE_RGMII |
				  BGMAC_CHIPCTL_4_SW_TYPE_RGMII;
		}
		bgmac_cco_ctl_maskset(bgmac, 4, ~(BGMAC_CHIPCTL_4_IF_TYPE_MASK |
						  BGMAC_CHIPCTL_4_SW_TYPE_MASK),
				      sw_type);
	} else if (bgmac->feature_flags & BGMAC_FEAT_CC7_IF_TYPE_RGMII) {
		bgmac_cco_ctl_maskset(bgmac, 7, ~BGMAC_CHIPCTL_7_IF_TYPE_MASK,
				      BGMAC_CHIPCTL_7_IF_TYPE_RGMII);
971 972 973
	}

	if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
974 975 976
		bgmac_idm_write(bgmac, BCMA_IOCTL,
				bgmac_idm_read(bgmac, BCMA_IOCTL) &
				~BGMAC_BCMA_IOCTL_SW_RESET);
977 978 979 980 981 982

	/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset
	 * Specs don't say about using BGMAC_CMDCFG_SR, but in this routine
	 * BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to
	 * be keps until taking MAC out of the reset.
	 */
983 984 985 986 987
	if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
		cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
	else
		cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;

988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004
	bgmac_cmdcfg_maskset(bgmac,
			     ~(BGMAC_CMDCFG_TE |
			       BGMAC_CMDCFG_RE |
			       BGMAC_CMDCFG_RPI |
			       BGMAC_CMDCFG_TAI |
			       BGMAC_CMDCFG_HD |
			       BGMAC_CMDCFG_ML |
			       BGMAC_CMDCFG_CFE |
			       BGMAC_CMDCFG_RL |
			       BGMAC_CMDCFG_RED |
			       BGMAC_CMDCFG_PE |
			       BGMAC_CMDCFG_TPI |
			       BGMAC_CMDCFG_PAD_EN |
			       BGMAC_CMDCFG_PF),
			     BGMAC_CMDCFG_PROM |
			     BGMAC_CMDCFG_NLC |
			     BGMAC_CMDCFG_CFE |
1005
			     cmdcfg_sr,
1006
			     false);
1007 1008
	bgmac->mac_speed = SPEED_UNKNOWN;
	bgmac->mac_duplex = DUPLEX_UNKNOWN;
1009 1010

	bgmac_clear_mib(bgmac);
1011
	if (bgmac->feature_flags & BGMAC_FEAT_CMN_PHY_CTL)
1012 1013
		bgmac_cmn_maskset32(bgmac, BCMA_GMAC_CMN_PHY_CTL, ~0,
				    BCMA_GMAC_CMN_PC_MTE);
1014 1015 1016
	else
		bgmac_set(bgmac, BGMAC_PHY_CNTL, BGMAC_PC_MTE);
	bgmac_miiconfig(bgmac);
1017 1018
	if (bgmac->mii_bus)
		bgmac->mii_bus->reset(bgmac->mii_bus);
1019

1020
	netdev_reset_queue(bgmac->net_dev);
1021 1022 1023 1024 1025 1026 1027 1028 1029 1030
}

static void bgmac_chip_intrs_on(struct bgmac *bgmac)
{
	bgmac_write(bgmac, BGMAC_INT_MASK, bgmac->int_mask);
}

static void bgmac_chip_intrs_off(struct bgmac *bgmac)
{
	bgmac_write(bgmac, BGMAC_INT_MASK, 0);
1031
	bgmac_read(bgmac, BGMAC_INT_MASK);
1032 1033 1034 1035 1036
}

/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_enable */
static void bgmac_enable(struct bgmac *bgmac)
{
1037
	u32 cmdcfg_sr;
1038 1039
	u32 cmdcfg;
	u32 mode;
1040 1041 1042 1043 1044

	if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
		cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
	else
		cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;
1045 1046 1047

	cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
	bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE),
1048
			     cmdcfg_sr, true);
1049 1050 1051 1052 1053 1054
	udelay(2);
	cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE;
	bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg);

	mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
		BGMAC_DS_MM_SHIFT;
1055
	if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST || mode != 0)
1056
		bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
1057
	if (!(bgmac->feature_flags & BGMAC_FEAT_CLKCTLST) && mode == 2)
1058 1059
		bgmac_cco_ctl_maskset(bgmac, 1, ~0,
				      BGMAC_CHIPCTL_1_RXC_DLL_BYPASS);
1060

1061 1062 1063 1064 1065
	if (bgmac->feature_flags & (BGMAC_FEAT_FLW_CTRL1 |
				    BGMAC_FEAT_FLW_CTRL2)) {
		u32 fl_ctl;

		if (bgmac->feature_flags & BGMAC_FEAT_FLW_CTRL1)
1066
			fl_ctl = 0x2300e1;
1067 1068 1069
		else
			fl_ctl = 0x03cb04cb;

1070 1071 1072 1073
		bgmac_write(bgmac, BGMAC_FLOW_CTL_THRESH, fl_ctl);
		bgmac_write(bgmac, BGMAC_PAUSE_CTL, 0x27fff);
	}

1074 1075 1076 1077 1078
	if (bgmac->feature_flags & BGMAC_FEAT_SET_RXQ_CLK) {
		u32 rxq_ctl;
		u16 bp_clk;
		u8 mdp;

1079 1080
		rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL);
		rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK;
1081
		bp_clk = bgmac_get_bus_clock(bgmac) / 1000000;
1082 1083 1084 1085
		mdp = (bp_clk * 128 / 1000) - 3;
		rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT);
		bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl);
	}
1086 1087 1088
}

/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipinit */
F
Felix Fietkau 已提交
1089
static void bgmac_chip_init(struct bgmac *bgmac)
1090
{
J
Jon Mason 已提交
1091 1092 1093
	/* Clear any erroneously pending interrupts */
	bgmac_write(bgmac, BGMAC_INT_STATUS, ~0);

1094 1095 1096 1097 1098 1099
	/* 1 interrupt per received frame */
	bgmac_write(bgmac, BGMAC_INT_RECV_LAZY, 1 << BGMAC_IRL_FC_SHIFT);

	/* Enable 802.3x tx flow control (honor received PAUSE frames) */
	bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_RPI, 0, true);

1100
	bgmac_set_rx_mode(bgmac->net_dev);
1101

1102
	bgmac_write_mac_address(bgmac, bgmac->net_dev->dev_addr);
1103 1104

	if (bgmac->loopback)
1105
		bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
1106
	else
1107
		bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_ML, 0, false);
1108 1109 1110

	bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN);

F
Felix Fietkau 已提交
1111
	bgmac_chip_intrs_on(bgmac);
1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125

	bgmac_enable(bgmac);
}

static irqreturn_t bgmac_interrupt(int irq, void *dev_id)
{
	struct bgmac *bgmac = netdev_priv(dev_id);

	u32 int_status = bgmac_read(bgmac, BGMAC_INT_STATUS);
	int_status &= bgmac->int_mask;

	if (!int_status)
		return IRQ_NONE;

1126 1127
	int_status &= ~(BGMAC_IS_TX0 | BGMAC_IS_RX);
	if (int_status)
1128
		dev_err(bgmac->dev, "Unknown IRQs: 0x%08X\n", int_status);
1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142

	/* Disable new interrupts until handling existing ones */
	bgmac_chip_intrs_off(bgmac);

	napi_schedule(&bgmac->napi);

	return IRQ_HANDLED;
}

static int bgmac_poll(struct napi_struct *napi, int weight)
{
	struct bgmac *bgmac = container_of(napi, struct bgmac, napi);
	int handled = 0;

1143 1144
	/* Ack */
	bgmac_write(bgmac, BGMAC_INT_STATUS, ~0);
1145

1146 1147
	bgmac_dma_tx_free(bgmac, &bgmac->tx_ring[0]);
	handled += bgmac_dma_rx_read(bgmac, &bgmac->rx_ring[0], weight);
1148

1149 1150
	/* Poll again if more events arrived in the meantime */
	if (bgmac_read(bgmac, BGMAC_INT_STATUS) & (BGMAC_IS_TX0 | BGMAC_IS_RX))
1151
		return weight;
1152

1153
	if (handled < weight) {
1154
		napi_complete_done(napi, handled);
1155 1156
		bgmac_chip_intrs_on(bgmac);
	}
1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170

	return handled;
}

/**************************************************
 * net_device_ops
 **************************************************/

static int bgmac_open(struct net_device *net_dev)
{
	struct bgmac *bgmac = netdev_priv(net_dev);
	int err = 0;

	bgmac_chip_reset(bgmac);
F
Felix Fietkau 已提交
1171 1172 1173 1174 1175

	err = bgmac_dma_init(bgmac);
	if (err)
		return err;

1176
	/* Specs say about reclaiming rings here, but we do that in DMA init */
F
Felix Fietkau 已提交
1177
	bgmac_chip_init(bgmac);
1178

1179
	err = request_irq(bgmac->irq, bgmac_interrupt, IRQF_SHARED,
1180 1181
			  KBUILD_MODNAME, net_dev);
	if (err < 0) {
1182
		dev_err(bgmac->dev, "IRQ request error: %d!\n", err);
F
Felix Fietkau 已提交
1183 1184
		bgmac_dma_cleanup(bgmac);
		return err;
1185 1186 1187
	}
	napi_enable(&bgmac->napi);

1188
	phy_start(net_dev->phydev);
1189

1190 1191
	netif_start_queue(net_dev);

F
Felix Fietkau 已提交
1192
	return 0;
1193 1194 1195 1196 1197 1198 1199 1200
}

static int bgmac_stop(struct net_device *net_dev)
{
	struct bgmac *bgmac = netdev_priv(net_dev);

	netif_carrier_off(net_dev);

1201
	phy_stop(net_dev->phydev);
1202

1203 1204
	napi_disable(&bgmac->napi);
	bgmac_chip_intrs_off(bgmac);
1205
	free_irq(bgmac->irq, net_dev);
1206 1207

	bgmac_chip_reset(bgmac);
F
Felix Fietkau 已提交
1208
	bgmac_dma_cleanup(bgmac);
1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223

	return 0;
}

static netdev_tx_t bgmac_start_xmit(struct sk_buff *skb,
				    struct net_device *net_dev)
{
	struct bgmac *bgmac = netdev_priv(net_dev);
	struct bgmac_dma_ring *ring;

	/* No QOS support yet */
	ring = &bgmac->tx_ring[0];
	return bgmac_dma_tx_add(bgmac, ring, skb);
}

1224 1225 1226
static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
{
	struct bgmac *bgmac = netdev_priv(net_dev);
1227
	struct sockaddr *sa = addr;
1228 1229 1230 1231 1232
	int ret;

	ret = eth_prepare_mac_addr_change(net_dev, addr);
	if (ret < 0)
		return ret;
1233 1234 1235 1236

	ether_addr_copy(net_dev->dev_addr, sa->sa_data);
	bgmac_write_mac_address(bgmac, net_dev->dev_addr);

1237 1238 1239 1240
	eth_commit_mac_addr_change(net_dev, addr);
	return 0;
}

1241 1242
static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
{
1243 1244 1245
	if (!netif_running(net_dev))
		return -EINVAL;

1246
	return phy_mii_ioctl(net_dev->phydev, ifr, cmd);
1247 1248 1249 1250 1251 1252
}

static const struct net_device_ops bgmac_netdev_ops = {
	.ndo_open		= bgmac_open,
	.ndo_stop		= bgmac_stop,
	.ndo_start_xmit		= bgmac_start_xmit,
1253
	.ndo_set_rx_mode	= bgmac_set_rx_mode,
1254
	.ndo_set_mac_address	= bgmac_set_mac_address,
1255
	.ndo_validate_addr	= eth_validate_addr,
1256 1257 1258 1259 1260 1261 1262
	.ndo_do_ioctl           = bgmac_ioctl,
};

/**************************************************
 * ethtool_ops
 **************************************************/

1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383
struct bgmac_stat {
	u8 size;
	u32 offset;
	const char *name;
};

static struct bgmac_stat bgmac_get_strings_stats[] = {
	{ 8, BGMAC_TX_GOOD_OCTETS, "tx_good_octets" },
	{ 4, BGMAC_TX_GOOD_PKTS, "tx_good" },
	{ 8, BGMAC_TX_OCTETS, "tx_octets" },
	{ 4, BGMAC_TX_PKTS, "tx_pkts" },
	{ 4, BGMAC_TX_BROADCAST_PKTS, "tx_broadcast" },
	{ 4, BGMAC_TX_MULTICAST_PKTS, "tx_multicast" },
	{ 4, BGMAC_TX_LEN_64, "tx_64" },
	{ 4, BGMAC_TX_LEN_65_TO_127, "tx_65_127" },
	{ 4, BGMAC_TX_LEN_128_TO_255, "tx_128_255" },
	{ 4, BGMAC_TX_LEN_256_TO_511, "tx_256_511" },
	{ 4, BGMAC_TX_LEN_512_TO_1023, "tx_512_1023" },
	{ 4, BGMAC_TX_LEN_1024_TO_1522, "tx_1024_1522" },
	{ 4, BGMAC_TX_LEN_1523_TO_2047, "tx_1523_2047" },
	{ 4, BGMAC_TX_LEN_2048_TO_4095, "tx_2048_4095" },
	{ 4, BGMAC_TX_LEN_4096_TO_8191, "tx_4096_8191" },
	{ 4, BGMAC_TX_LEN_8192_TO_MAX, "tx_8192_max" },
	{ 4, BGMAC_TX_JABBER_PKTS, "tx_jabber" },
	{ 4, BGMAC_TX_OVERSIZE_PKTS, "tx_oversize" },
	{ 4, BGMAC_TX_FRAGMENT_PKTS, "tx_fragment" },
	{ 4, BGMAC_TX_UNDERRUNS, "tx_underruns" },
	{ 4, BGMAC_TX_TOTAL_COLS, "tx_total_cols" },
	{ 4, BGMAC_TX_SINGLE_COLS, "tx_single_cols" },
	{ 4, BGMAC_TX_MULTIPLE_COLS, "tx_multiple_cols" },
	{ 4, BGMAC_TX_EXCESSIVE_COLS, "tx_excessive_cols" },
	{ 4, BGMAC_TX_LATE_COLS, "tx_late_cols" },
	{ 4, BGMAC_TX_DEFERED, "tx_defered" },
	{ 4, BGMAC_TX_CARRIER_LOST, "tx_carrier_lost" },
	{ 4, BGMAC_TX_PAUSE_PKTS, "tx_pause" },
	{ 4, BGMAC_TX_UNI_PKTS, "tx_unicast" },
	{ 4, BGMAC_TX_Q0_PKTS, "tx_q0" },
	{ 8, BGMAC_TX_Q0_OCTETS, "tx_q0_octets" },
	{ 4, BGMAC_TX_Q1_PKTS, "tx_q1" },
	{ 8, BGMAC_TX_Q1_OCTETS, "tx_q1_octets" },
	{ 4, BGMAC_TX_Q2_PKTS, "tx_q2" },
	{ 8, BGMAC_TX_Q2_OCTETS, "tx_q2_octets" },
	{ 4, BGMAC_TX_Q3_PKTS, "tx_q3" },
	{ 8, BGMAC_TX_Q3_OCTETS, "tx_q3_octets" },
	{ 8, BGMAC_RX_GOOD_OCTETS, "rx_good_octets" },
	{ 4, BGMAC_RX_GOOD_PKTS, "rx_good" },
	{ 8, BGMAC_RX_OCTETS, "rx_octets" },
	{ 4, BGMAC_RX_PKTS, "rx_pkts" },
	{ 4, BGMAC_RX_BROADCAST_PKTS, "rx_broadcast" },
	{ 4, BGMAC_RX_MULTICAST_PKTS, "rx_multicast" },
	{ 4, BGMAC_RX_LEN_64, "rx_64" },
	{ 4, BGMAC_RX_LEN_65_TO_127, "rx_65_127" },
	{ 4, BGMAC_RX_LEN_128_TO_255, "rx_128_255" },
	{ 4, BGMAC_RX_LEN_256_TO_511, "rx_256_511" },
	{ 4, BGMAC_RX_LEN_512_TO_1023, "rx_512_1023" },
	{ 4, BGMAC_RX_LEN_1024_TO_1522, "rx_1024_1522" },
	{ 4, BGMAC_RX_LEN_1523_TO_2047, "rx_1523_2047" },
	{ 4, BGMAC_RX_LEN_2048_TO_4095, "rx_2048_4095" },
	{ 4, BGMAC_RX_LEN_4096_TO_8191, "rx_4096_8191" },
	{ 4, BGMAC_RX_LEN_8192_TO_MAX, "rx_8192_max" },
	{ 4, BGMAC_RX_JABBER_PKTS, "rx_jabber" },
	{ 4, BGMAC_RX_OVERSIZE_PKTS, "rx_oversize" },
	{ 4, BGMAC_RX_FRAGMENT_PKTS, "rx_fragment" },
	{ 4, BGMAC_RX_MISSED_PKTS, "rx_missed" },
	{ 4, BGMAC_RX_CRC_ALIGN_ERRS, "rx_crc_align" },
	{ 4, BGMAC_RX_UNDERSIZE, "rx_undersize" },
	{ 4, BGMAC_RX_CRC_ERRS, "rx_crc" },
	{ 4, BGMAC_RX_ALIGN_ERRS, "rx_align" },
	{ 4, BGMAC_RX_SYMBOL_ERRS, "rx_symbol" },
	{ 4, BGMAC_RX_PAUSE_PKTS, "rx_pause" },
	{ 4, BGMAC_RX_NONPAUSE_PKTS, "rx_nonpause" },
	{ 4, BGMAC_RX_SACHANGES, "rx_sa_changes" },
	{ 4, BGMAC_RX_UNI_PKTS, "rx_unicast" },
};

#define BGMAC_STATS_LEN	ARRAY_SIZE(bgmac_get_strings_stats)

static int bgmac_get_sset_count(struct net_device *dev, int string_set)
{
	switch (string_set) {
	case ETH_SS_STATS:
		return BGMAC_STATS_LEN;
	}

	return -EOPNOTSUPP;
}

static void bgmac_get_strings(struct net_device *dev, u32 stringset,
			      u8 *data)
{
	int i;

	if (stringset != ETH_SS_STATS)
		return;

	for (i = 0; i < BGMAC_STATS_LEN; i++)
		strlcpy(data + i * ETH_GSTRING_LEN,
			bgmac_get_strings_stats[i].name, ETH_GSTRING_LEN);
}

static void bgmac_get_ethtool_stats(struct net_device *dev,
				    struct ethtool_stats *ss, uint64_t *data)
{
	struct bgmac *bgmac = netdev_priv(dev);
	const struct bgmac_stat *s;
	unsigned int i;
	u64 val;

	if (!netif_running(dev))
		return;

	for (i = 0; i < BGMAC_STATS_LEN; i++) {
		s = &bgmac_get_strings_stats[i];
		val = 0;
		if (s->size == 8)
			val = (u64)bgmac_read(bgmac, s->offset + 4) << 32;
		val |= bgmac_read(bgmac, s->offset);
		data[i] = val;
	}
}

1384 1385 1386 1387
static void bgmac_get_drvinfo(struct net_device *net_dev,
			      struct ethtool_drvinfo *info)
{
	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1388
	strlcpy(info->bus_info, "AXI", sizeof(info->bus_info));
1389 1390 1391
}

static const struct ethtool_ops bgmac_ethtool_ops = {
1392 1393 1394
	.get_strings		= bgmac_get_strings,
	.get_sset_count		= bgmac_get_sset_count,
	.get_ethtool_stats	= bgmac_get_ethtool_stats,
1395
	.get_drvinfo		= bgmac_get_drvinfo,
1396 1397
	.get_link_ksettings     = phy_ethtool_get_link_ksettings,
	.set_link_ksettings     = phy_ethtool_set_link_ksettings,
1398 1399
};

R
Rafał Miłecki 已提交
1400 1401 1402 1403
/**************************************************
 * MII
 **************************************************/

1404
void bgmac_adjust_link(struct net_device *net_dev)
1405 1406
{
	struct bgmac *bgmac = netdev_priv(net_dev);
1407
	struct phy_device *phy_dev = net_dev->phydev;
1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426
	bool update = false;

	if (phy_dev->link) {
		if (phy_dev->speed != bgmac->mac_speed) {
			bgmac->mac_speed = phy_dev->speed;
			update = true;
		}

		if (phy_dev->duplex != bgmac->mac_duplex) {
			bgmac->mac_duplex = phy_dev->duplex;
			update = true;
		}
	}

	if (update) {
		bgmac_mac_speed(bgmac);
		phy_print_status(phy_dev);
	}
}
1427
EXPORT_SYMBOL_GPL(bgmac_adjust_link);
1428

1429
int bgmac_phy_connect_direct(struct bgmac *bgmac)
1430 1431 1432 1433 1434 1435 1436 1437 1438
{
	struct fixed_phy_status fphy_status = {
		.link = 1,
		.speed = SPEED_1000,
		.duplex = DUPLEX_FULL,
	};
	struct phy_device *phy_dev;
	int err;

1439
	phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, -1, NULL);
1440
	if (!phy_dev || IS_ERR(phy_dev)) {
1441
		dev_err(bgmac->dev, "Failed to register fixed PHY device\n");
1442 1443 1444 1445 1446 1447
		return -ENODEV;
	}

	err = phy_connect_direct(bgmac->net_dev, phy_dev, bgmac_adjust_link,
				 PHY_INTERFACE_MODE_MII);
	if (err) {
1448
		dev_err(bgmac->dev, "Connecting PHY failed\n");
1449 1450 1451 1452 1453
		return err;
	}

	return err;
}
1454
EXPORT_SYMBOL_GPL(bgmac_phy_connect_direct);
R
Rafał Miłecki 已提交
1455

1456
struct bgmac *bgmac_alloc(struct device *dev)
1457 1458 1459 1460 1461
{
	struct net_device *net_dev;
	struct bgmac *bgmac;

	/* Allocation and references */
1462
	net_dev = devm_alloc_etherdev(dev, sizeof(*bgmac));
1463
	if (!net_dev)
1464
		return NULL;
1465

1466
	net_dev->netdev_ops = &bgmac_netdev_ops;
1467
	net_dev->ethtool_ops = &bgmac_ethtool_ops;
1468

1469
	bgmac = netdev_priv(net_dev);
1470
	bgmac->dev = dev;
1471
	bgmac->net_dev = net_dev;
1472 1473 1474 1475 1476 1477 1478 1479 1480 1481

	return bgmac;
}
EXPORT_SYMBOL_GPL(bgmac_alloc);

int bgmac_enet_probe(struct bgmac *bgmac)
{
	struct net_device *net_dev = bgmac->net_dev;
	int err;

1482 1483
	net_dev->irq = bgmac->irq;
	SET_NETDEV_DEV(net_dev, bgmac->dev);
1484
	dev_set_drvdata(bgmac->dev, bgmac);
1485

1486
	if (!is_valid_ether_addr(net_dev->dev_addr)) {
1487
		dev_err(bgmac->dev, "Invalid MAC addr: %pM\n",
1488 1489
			net_dev->dev_addr);
		eth_hw_addr_random(net_dev);
1490
		dev_warn(bgmac->dev, "Using random MAC: %pM\n",
1491
			 net_dev->dev_addr);
1492 1493
	}

1494 1495 1496 1497
	/* This (reset &) enable is not preset in specs or reference driver but
	 * Broadcom does it in arch PCI code when enabling fake PCI device.
	 */
	bgmac_clk_enable(bgmac, 0);
1498

1499 1500 1501 1502
	/* This seems to be fixing IRQ by assigning OOB #6 to the core */
	if (bgmac->feature_flags & BGMAC_FEAT_IRQ_ID_OOB_6)
		bgmac_idm_write(bgmac, BCMA_OOB_SEL_OUT_A30, 0x86);

1503 1504 1505 1506
	bgmac_chip_reset(bgmac);

	err = bgmac_dma_alloc(bgmac);
	if (err) {
1507
		dev_err(bgmac->dev, "Unable to alloc memory for DMA\n");
1508
		goto err_out;
1509 1510 1511
	}

	bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK;
R
Ralf Baechle 已提交
1512
	if (bcm47xx_nvram_getenv("et0_no_txint", NULL, 0) == 0)
1513 1514
		bgmac->int_mask &= ~BGMAC_IS_TX_MASK;

1515 1516
	netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT);

1517
	err = bgmac_phy_connect(bgmac);
R
Rafał Miłecki 已提交
1518
	if (err) {
1519
		dev_err(bgmac->dev, "Cannot connect to phy\n");
1520
		goto err_dma_free;
R
Rafał Miłecki 已提交
1521 1522
	}

1523 1524 1525 1526
	net_dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
	net_dev->hw_features = net_dev->features;
	net_dev->vlan_features = net_dev->features;

1527 1528
	err = register_netdev(bgmac->net_dev);
	if (err) {
1529
		dev_err(bgmac->dev, "Cannot register net device\n");
1530
		goto err_phy_disconnect;
1531 1532 1533 1534 1535 1536
	}

	netif_carrier_off(net_dev);

	return 0;

1537 1538
err_phy_disconnect:
	phy_disconnect(net_dev->phydev);
1539 1540
err_dma_free:
	bgmac_dma_free(bgmac);
1541
err_out:
1542 1543 1544

	return err;
}
1545
EXPORT_SYMBOL_GPL(bgmac_enet_probe);
1546

1547
void bgmac_enet_remove(struct bgmac *bgmac)
1548 1549
{
	unregister_netdev(bgmac->net_dev);
1550
	phy_disconnect(bgmac->net_dev->phydev);
1551
	netif_napi_del(&bgmac->napi);
1552 1553 1554
	bgmac_dma_free(bgmac);
	free_netdev(bgmac->net_dev);
}
1555
EXPORT_SYMBOL_GPL(bgmac_enet_remove);
1556

1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606
int bgmac_enet_suspend(struct bgmac *bgmac)
{
	if (!netif_running(bgmac->net_dev))
		return 0;

	phy_stop(bgmac->net_dev->phydev);

	netif_stop_queue(bgmac->net_dev);

	napi_disable(&bgmac->napi);

	netif_tx_lock(bgmac->net_dev);
	netif_device_detach(bgmac->net_dev);
	netif_tx_unlock(bgmac->net_dev);

	bgmac_chip_intrs_off(bgmac);
	bgmac_chip_reset(bgmac);
	bgmac_dma_cleanup(bgmac);

	return 0;
}
EXPORT_SYMBOL_GPL(bgmac_enet_suspend);

int bgmac_enet_resume(struct bgmac *bgmac)
{
	int rc;

	if (!netif_running(bgmac->net_dev))
		return 0;

	rc = bgmac_dma_init(bgmac);
	if (rc)
		return rc;

	bgmac_chip_init(bgmac);

	napi_enable(&bgmac->napi);

	netif_tx_lock(bgmac->net_dev);
	netif_device_attach(bgmac->net_dev);
	netif_tx_unlock(bgmac->net_dev);

	netif_start_queue(bgmac->net_dev);

	phy_start(bgmac->net_dev->phydev);

	return 0;
}
EXPORT_SYMBOL_GPL(bgmac_enet_resume);

1607 1608
MODULE_AUTHOR("Rafał Miłecki");
MODULE_LICENSE("GPL");