bgmac.c 40.2 KB
Newer Older
1 2 3 4 5 6 7 8 9
/*
 * Driver for (BCM4706)? GBit MAC core on BCMA bus.
 *
 * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
 *
 * Licensed under the GNU/GPL. See COPYING for details.
 */


10 11 12
#define pr_fmt(fmt)		KBUILD_MODNAME ": " fmt

#include <linux/bcma/bcma.h>
13
#include <linux/etherdevice.h>
14
#include <linux/bcm47xx_nvram.h>
15
#include "bgmac.h"
16

17
static bool bgmac_wait_value(struct bgmac *bgmac, u16 reg, u32 mask,
18 19 20 21 22 23
			     u32 value, int timeout)
{
	u32 val;
	int i;

	for (i = 0; i < timeout / 10; i++) {
24
		val = bgmac_read(bgmac, reg);
25 26 27 28
		if ((val & mask) == value)
			return true;
		udelay(10);
	}
29
	dev_err(bgmac->dev, "Timeout waiting for reg 0x%X\n", reg);
30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
	return false;
}

/**************************************************
 * DMA
 **************************************************/

static void bgmac_dma_tx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
{
	u32 val;
	int i;

	if (!ring->mmio_base)
		return;

	/* Suspend DMA TX ring first.
	 * bgmac_wait_value doesn't support waiting for any of few values, so
	 * implement whole loop here.
	 */
	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL,
		    BGMAC_DMA_TX_SUSPEND);
	for (i = 0; i < 10000 / 10; i++) {
		val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
		val &= BGMAC_DMA_TX_STAT;
		if (val == BGMAC_DMA_TX_STAT_DISABLED ||
		    val == BGMAC_DMA_TX_STAT_IDLEWAIT ||
		    val == BGMAC_DMA_TX_STAT_STOPPED) {
			i = 0;
			break;
		}
		udelay(10);
	}
	if (i)
63 64
		dev_err(bgmac->dev, "Timeout suspending DMA TX ring 0x%X (BGMAC_DMA_TX_STAT: 0x%08X)\n",
			ring->mmio_base, val);
65 66 67

	/* Remove SUSPEND bit */
	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, 0);
68
	if (!bgmac_wait_value(bgmac,
69 70 71
			      ring->mmio_base + BGMAC_DMA_TX_STATUS,
			      BGMAC_DMA_TX_STAT, BGMAC_DMA_TX_STAT_DISABLED,
			      10000)) {
72 73
		dev_warn(bgmac->dev, "DMA TX ring 0x%X wasn't disabled on time, waiting additional 300us\n",
			 ring->mmio_base);
74 75 76
		udelay(300);
		val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
		if ((val & BGMAC_DMA_TX_STAT) != BGMAC_DMA_TX_STAT_DISABLED)
77 78
			dev_err(bgmac->dev, "Reset of DMA TX ring 0x%X failed\n",
				ring->mmio_base);
79 80 81 82 83 84 85 86 87
	}
}

static void bgmac_dma_tx_enable(struct bgmac *bgmac,
				struct bgmac_dma_ring *ring)
{
	u32 ctl;

	ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL);
88
	if (bgmac->feature_flags & BGMAC_FEAT_TX_MASK_SETUP) {
89 90 91 92 93 94 95 96 97 98 99 100
		ctl &= ~BGMAC_DMA_TX_BL_MASK;
		ctl |= BGMAC_DMA_TX_BL_128 << BGMAC_DMA_TX_BL_SHIFT;

		ctl &= ~BGMAC_DMA_TX_MR_MASK;
		ctl |= BGMAC_DMA_TX_MR_2 << BGMAC_DMA_TX_MR_SHIFT;

		ctl &= ~BGMAC_DMA_TX_PC_MASK;
		ctl |= BGMAC_DMA_TX_PC_16 << BGMAC_DMA_TX_PC_SHIFT;

		ctl &= ~BGMAC_DMA_TX_PT_MASK;
		ctl |= BGMAC_DMA_TX_PT_8 << BGMAC_DMA_TX_PT_SHIFT;
	}
101 102 103 104 105
	ctl |= BGMAC_DMA_TX_ENABLE;
	ctl |= BGMAC_DMA_TX_PARITY_DISABLE;
	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl);
}

106 107 108 109 110 111 112 113
static void
bgmac_dma_tx_add_buf(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
		     int i, int len, u32 ctl0)
{
	struct bgmac_slot_info *slot;
	struct bgmac_dma_desc *dma_desc;
	u32 ctl1;

F
Felix Fietkau 已提交
114
	if (i == BGMAC_TX_RING_SLOTS - 1)
115 116 117 118 119 120 121 122 123 124 125 126
		ctl0 |= BGMAC_DESC_CTL0_EOT;

	ctl1 = len & BGMAC_DESC_CTL1_LEN;

	slot = &ring->slots[i];
	dma_desc = &ring->cpu_base[i];
	dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr));
	dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr));
	dma_desc->ctl0 = cpu_to_le32(ctl0);
	dma_desc->ctl1 = cpu_to_le32(ctl1);
}

127 128 129 130
static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
				    struct bgmac_dma_ring *ring,
				    struct sk_buff *skb)
{
131
	struct device *dma_dev = bgmac->dma_dev;
132
	struct net_device *net_dev = bgmac->net_dev;
133 134
	int index = ring->end % BGMAC_TX_RING_SLOTS;
	struct bgmac_slot_info *slot = &ring->slots[index];
135 136 137
	int nr_frags;
	u32 flags;
	int i;
138 139

	if (skb->len > BGMAC_DESC_CTL1_LEN) {
140
		netdev_err(bgmac->net_dev, "Too long skb (%d)\n", skb->len);
141
		goto err_drop;
142 143
	}

144 145 146 147 148
	if (skb->ip_summed == CHECKSUM_PARTIAL)
		skb_checksum_help(skb);

	nr_frags = skb_shinfo(skb)->nr_frags;

149 150 151 152
	/* ring->end - ring->start will return the number of valid slots,
	 * even when ring->end overflows
	 */
	if (ring->end - ring->start + nr_frags + 1 >= BGMAC_TX_RING_SLOTS) {
153
		netdev_err(bgmac->net_dev, "TX ring is full, queue should be stopped!\n");
154 155 156 157
		netif_stop_queue(net_dev);
		return NETDEV_TX_BUSY;
	}

158
	slot->dma_addr = dma_map_single(dma_dev, skb->data, skb_headlen(skb),
159
					DMA_TO_DEVICE);
160 161
	if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr)))
		goto err_dma_head;
162

163 164 165
	flags = BGMAC_DESC_CTL0_SOF;
	if (!nr_frags)
		flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC;
166

167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
	bgmac_dma_tx_add_buf(bgmac, ring, index, skb_headlen(skb), flags);
	flags = 0;

	for (i = 0; i < nr_frags; i++) {
		struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
		int len = skb_frag_size(frag);

		index = (index + 1) % BGMAC_TX_RING_SLOTS;
		slot = &ring->slots[index];
		slot->dma_addr = skb_frag_dma_map(dma_dev, frag, 0,
						  len, DMA_TO_DEVICE);
		if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr)))
			goto err_dma;

		if (i == nr_frags - 1)
			flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC;

		bgmac_dma_tx_add_buf(bgmac, ring, index, len, flags);
	}

	slot->skb = skb;
188
	ring->end += nr_frags + 1;
189 190
	netdev_sent_queue(net_dev, skb->len);

191 192 193 194 195 196
	wmb();

	/* Increase ring->end to point empty slot. We tell hardware the first
	 * slot it should *not* read.
	 */
	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
197
		    ring->index_base +
198 199
		    (ring->end % BGMAC_TX_RING_SLOTS) *
		    sizeof(struct bgmac_dma_desc));
200

201
	if (ring->end - ring->start >= BGMAC_TX_RING_SLOTS - 8)
202 203 204 205
		netif_stop_queue(net_dev);

	return NETDEV_TX_OK;

206 207 208 209
err_dma:
	dma_unmap_single(dma_dev, slot->dma_addr, skb_headlen(skb),
			 DMA_TO_DEVICE);

210
	while (i-- > 0) {
211 212 213 214 215 216 217 218 219
		int index = (ring->end + i) % BGMAC_TX_RING_SLOTS;
		struct bgmac_slot_info *slot = &ring->slots[index];
		u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1);
		int len = ctl1 & BGMAC_DESC_CTL1_LEN;

		dma_unmap_page(dma_dev, slot->dma_addr, len, DMA_TO_DEVICE);
	}

err_dma_head:
220 221
	netdev_err(bgmac->net_dev, "Mapping error of skb on ring 0x%X\n",
		   ring->mmio_base);
222 223

err_drop:
224
	dev_kfree_skb(skb);
225 226
	net_dev->stats.tx_dropped++;
	net_dev->stats.tx_errors++;
227 228 229 230 231 232
	return NETDEV_TX_OK;
}

/* Free transmitted packets */
static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
{
233
	struct device *dma_dev = bgmac->dma_dev;
234 235
	int empty_slot;
	bool freed = false;
236
	unsigned bytes_compl = 0, pkts_compl = 0;
237 238 239 240

	/* The last slot that hardware didn't consume yet */
	empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
	empty_slot &= BGMAC_DMA_TX_STATDPTR;
241 242
	empty_slot -= ring->index_base;
	empty_slot &= BGMAC_DMA_TX_STATDPTR;
243 244
	empty_slot /= sizeof(struct bgmac_dma_desc);

245 246 247
	while (ring->start != ring->end) {
		int slot_idx = ring->start % BGMAC_TX_RING_SLOTS;
		struct bgmac_slot_info *slot = &ring->slots[slot_idx];
248
		u32 ctl0, ctl1;
249
		int len;
250

251 252
		if (slot_idx == empty_slot)
			break;
253

254
		ctl0 = le32_to_cpu(ring->cpu_base[slot_idx].ctl0);
255 256
		ctl1 = le32_to_cpu(ring->cpu_base[slot_idx].ctl1);
		len = ctl1 & BGMAC_DESC_CTL1_LEN;
257
		if (ctl0 & BGMAC_DESC_CTL0_SOF)
258
			/* Unmap no longer used buffer */
259 260 261 262 263
			dma_unmap_single(dma_dev, slot->dma_addr, len,
					 DMA_TO_DEVICE);
		else
			dma_unmap_page(dma_dev, slot->dma_addr, len,
				       DMA_TO_DEVICE);
264

265
		if (slot->skb) {
266 267
			bgmac->net_dev->stats.tx_bytes += slot->skb->len;
			bgmac->net_dev->stats.tx_packets++;
268 269 270
			bytes_compl += slot->skb->len;
			pkts_compl++;

271 272 273 274 275
			/* Free memory! :) */
			dev_kfree_skb(slot->skb);
			slot->skb = NULL;
		}

276
		slot->dma_addr = 0;
277
		ring->start++;
278 279 280
		freed = true;
	}

281 282 283
	if (!pkts_compl)
		return;

284 285
	netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl);

286
	if (netif_queue_stopped(bgmac->net_dev))
287 288 289 290 291 292 293 294 295
		netif_wake_queue(bgmac->net_dev);
}

static void bgmac_dma_rx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
{
	if (!ring->mmio_base)
		return;

	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, 0);
296
	if (!bgmac_wait_value(bgmac,
297 298 299
			      ring->mmio_base + BGMAC_DMA_RX_STATUS,
			      BGMAC_DMA_RX_STAT, BGMAC_DMA_RX_STAT_DISABLED,
			      10000))
300 301
		dev_err(bgmac->dev, "Reset of ring 0x%X RX failed\n",
			ring->mmio_base);
302 303 304 305 306 307 308 309
}

static void bgmac_dma_rx_enable(struct bgmac *bgmac,
				struct bgmac_dma_ring *ring)
{
	u32 ctl;

	ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL);
310
	if (bgmac->feature_flags & BGMAC_FEAT_RX_MASK_SETUP) {
311 312 313 314 315 316 317 318 319
		ctl &= ~BGMAC_DMA_RX_BL_MASK;
		ctl |= BGMAC_DMA_RX_BL_128 << BGMAC_DMA_RX_BL_SHIFT;

		ctl &= ~BGMAC_DMA_RX_PC_MASK;
		ctl |= BGMAC_DMA_RX_PC_8 << BGMAC_DMA_RX_PC_SHIFT;

		ctl &= ~BGMAC_DMA_RX_PT_MASK;
		ctl |= BGMAC_DMA_RX_PT_1 << BGMAC_DMA_RX_PT_SHIFT;
	}
320 321 322 323 324 325 326 327 328 329 330
	ctl &= BGMAC_DMA_RX_ADDREXT_MASK;
	ctl |= BGMAC_DMA_RX_ENABLE;
	ctl |= BGMAC_DMA_RX_PARITY_DISABLE;
	ctl |= BGMAC_DMA_RX_OVERFLOW_CONT;
	ctl |= BGMAC_RX_FRAME_OFFSET << BGMAC_DMA_RX_FRAME_OFFSET_SHIFT;
	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, ctl);
}

static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
				     struct bgmac_slot_info *slot)
{
331
	struct device *dma_dev = bgmac->dma_dev;
332
	dma_addr_t dma_addr;
333
	struct bgmac_rx_header *rx;
334
	void *buf;
335 336

	/* Alloc skb */
337 338
	buf = netdev_alloc_frag(BGMAC_RX_ALLOC_SIZE);
	if (!buf)
339 340 341
		return -ENOMEM;

	/* Poison - if everything goes fine, hardware will overwrite it */
342
	rx = buf + BGMAC_RX_BUF_OFFSET;
343 344 345 346
	rx->len = cpu_to_le16(0xdead);
	rx->flags = cpu_to_le16(0xbeef);

	/* Map skb for the DMA */
347 348
	dma_addr = dma_map_single(dma_dev, buf + BGMAC_RX_BUF_OFFSET,
				  BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
349
	if (dma_mapping_error(dma_dev, dma_addr)) {
350
		netdev_err(bgmac->net_dev, "DMA mapping error\n");
351
		put_page(virt_to_head_page(buf));
352 353
		return -ENOMEM;
	}
354 355

	/* Update the slot */
356
	slot->buf = buf;
357 358
	slot->dma_addr = dma_addr;

359 360 361
	return 0;
}

F
Felix Fietkau 已提交
362 363 364 365 366 367 368 369 370 371
static void bgmac_dma_rx_update_index(struct bgmac *bgmac,
				      struct bgmac_dma_ring *ring)
{
	dma_wmb();

	bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
		    ring->index_base +
		    ring->end * sizeof(struct bgmac_dma_desc));
}

372 373 374 375 376 377
static void bgmac_dma_rx_setup_desc(struct bgmac *bgmac,
				    struct bgmac_dma_ring *ring, int desc_idx)
{
	struct bgmac_dma_desc *dma_desc = ring->cpu_base + desc_idx;
	u32 ctl0 = 0, ctl1 = 0;

F
Felix Fietkau 已提交
378
	if (desc_idx == BGMAC_RX_RING_SLOTS - 1)
379 380 381 382 383 384 385 386 387 388 389
		ctl0 |= BGMAC_DESC_CTL0_EOT;
	ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
	/* Is there any BGMAC device that requires extension? */
	/* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
	 * B43_DMA64_DCTL1_ADDREXT_MASK;
	 */

	dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[desc_idx].dma_addr));
	dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[desc_idx].dma_addr));
	dma_desc->ctl0 = cpu_to_le32(ctl0);
	dma_desc->ctl1 = cpu_to_le32(ctl1);
F
Felix Fietkau 已提交
390 391

	ring->end = desc_idx;
392 393
}

394 395 396 397 398 399 400 401 402 403 404 405 406
static void bgmac_dma_rx_poison_buf(struct device *dma_dev,
				    struct bgmac_slot_info *slot)
{
	struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET;

	dma_sync_single_for_cpu(dma_dev, slot->dma_addr, BGMAC_RX_BUF_SIZE,
				DMA_FROM_DEVICE);
	rx->len = cpu_to_le16(0xdead);
	rx->flags = cpu_to_le16(0xbeef);
	dma_sync_single_for_device(dma_dev, slot->dma_addr, BGMAC_RX_BUF_SIZE,
				   DMA_FROM_DEVICE);
}

407 408 409 410 411 412 413 414
static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
			     int weight)
{
	u32 end_slot;
	int handled = 0;

	end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
	end_slot &= BGMAC_DMA_RX_STATDPTR;
415 416
	end_slot -= ring->index_base;
	end_slot &= BGMAC_DMA_RX_STATDPTR;
417 418
	end_slot /= sizeof(struct bgmac_dma_desc);

F
Felix Fietkau 已提交
419
	while (ring->start != end_slot) {
420
		struct device *dma_dev = bgmac->dma_dev;
421
		struct bgmac_slot_info *slot = &ring->slots[ring->start];
422
		struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET;
423 424
		struct sk_buff *skb;
		void *buf = slot->buf;
425
		dma_addr_t dma_addr = slot->dma_addr;
426 427
		u16 len, flags;

428 429 430 431 432 433
		do {
			/* Prepare new skb as replacement */
			if (bgmac_dma_rx_skb_for_slot(bgmac, slot)) {
				bgmac_dma_rx_poison_buf(dma_dev, slot);
				break;
			}
434

435 436 437
			/* Unmap buffer to make it accessible to the CPU */
			dma_unmap_single(dma_dev, dma_addr,
					 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
438

439 440 441
			/* Get info from the header */
			len = le16_to_cpu(rx->len);
			flags = le16_to_cpu(rx->flags);
442 443 444

			/* Check for poison and drop or pass the packet */
			if (len == 0xdead && flags == 0xbeef) {
445 446
				netdev_err(bgmac->net_dev, "Found poisoned packet at slot %d, DMA issue!\n",
					   ring->start);
447
				put_page(virt_to_head_page(buf));
448
				bgmac->net_dev->stats.rx_errors++;
449 450 451
				break;
			}

452
			if (len > BGMAC_RX_ALLOC_SIZE) {
453 454
				netdev_err(bgmac->net_dev, "Found oversized packet at slot %d, DMA issue!\n",
					   ring->start);
455
				put_page(virt_to_head_page(buf));
456 457
				bgmac->net_dev->stats.rx_length_errors++;
				bgmac->net_dev->stats.rx_errors++;
458 459 460
				break;
			}

H
Hauke Mehrtens 已提交
461 462 463
			/* Omit CRC. */
			len -= ETH_FCS_LEN;

464
			skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE);
465
			if (unlikely(!skb)) {
466
				netdev_err(bgmac->net_dev, "build_skb failed\n");
467
				put_page(virt_to_head_page(buf));
468
				bgmac->net_dev->stats.rx_errors++;
469 470
				break;
			}
471 472 473 474
			skb_put(skb, BGMAC_RX_FRAME_OFFSET +
				BGMAC_RX_BUF_OFFSET + len);
			skb_pull(skb, BGMAC_RX_FRAME_OFFSET +
				 BGMAC_RX_BUF_OFFSET);
475

476 477
			skb_checksum_none_assert(skb);
			skb->protocol = eth_type_trans(skb, bgmac->net_dev);
478 479
			bgmac->net_dev->stats.rx_bytes += len;
			bgmac->net_dev->stats.rx_packets++;
480
			napi_gro_receive(&bgmac->napi, skb);
481 482
			handled++;
		} while (0);
483

484 485
		bgmac_dma_rx_setup_desc(bgmac, ring, ring->start);

486 487 488 489 490 491 492
		if (++ring->start >= BGMAC_RX_RING_SLOTS)
			ring->start = 0;

		if (handled >= weight) /* Should never be greater */
			break;
	}

F
Felix Fietkau 已提交
493 494
	bgmac_dma_rx_update_index(bgmac, ring);

495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519
	return handled;
}

/* Does ring support unaligned addressing? */
static bool bgmac_dma_unaligned(struct bgmac *bgmac,
				struct bgmac_dma_ring *ring,
				enum bgmac_dma_ring_type ring_type)
{
	switch (ring_type) {
	case BGMAC_DMA_RING_TX:
		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
			    0xff0);
		if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO))
			return true;
		break;
	case BGMAC_DMA_RING_RX:
		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
			    0xff0);
		if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO))
			return true;
		break;
	}
	return false;
}

520 521
static void bgmac_dma_tx_ring_free(struct bgmac *bgmac,
				   struct bgmac_dma_ring *ring)
522
{
523
	struct device *dma_dev = bgmac->dma_dev;
524
	struct bgmac_dma_desc *dma_desc = ring->cpu_base;
525 526 527
	struct bgmac_slot_info *slot;
	int i;

F
Felix Fietkau 已提交
528
	for (i = 0; i < BGMAC_TX_RING_SLOTS; i++) {
529 530
		int len = dma_desc[i].ctl1 & BGMAC_DESC_CTL1_LEN;

531
		slot = &ring->slots[i];
532 533 534 535 536 537 538 539 540 541 542
		dev_kfree_skb(slot->skb);

		if (!slot->dma_addr)
			continue;

		if (slot->skb)
			dma_unmap_single(dma_dev, slot->dma_addr,
					 len, DMA_TO_DEVICE);
		else
			dma_unmap_page(dma_dev, slot->dma_addr,
				       len, DMA_TO_DEVICE);
543
	}
544 545 546 547 548
}

static void bgmac_dma_rx_ring_free(struct bgmac *bgmac,
				   struct bgmac_dma_ring *ring)
{
549
	struct device *dma_dev = bgmac->dma_dev;
550 551 552
	struct bgmac_slot_info *slot;
	int i;

F
Felix Fietkau 已提交
553
	for (i = 0; i < BGMAC_RX_RING_SLOTS; i++) {
554
		slot = &ring->slots[i];
555
		if (!slot->dma_addr)
556
			continue;
557

558 559 560
		dma_unmap_single(dma_dev, slot->dma_addr,
				 BGMAC_RX_BUF_SIZE,
				 DMA_FROM_DEVICE);
561
		put_page(virt_to_head_page(slot->buf));
562
		slot->dma_addr = 0;
563 564 565
	}
}

566
static void bgmac_dma_ring_desc_free(struct bgmac *bgmac,
F
Felix Fietkau 已提交
567 568
				     struct bgmac_dma_ring *ring,
				     int num_slots)
569
{
570
	struct device *dma_dev = bgmac->dma_dev;
571 572 573 574 575 576
	int size;

	if (!ring->cpu_base)
	    return;

	/* Free ring of descriptors */
F
Felix Fietkau 已提交
577
	size = num_slots * sizeof(struct bgmac_dma_desc);
578 579 580 581
	dma_free_coherent(dma_dev, size, ring->cpu_base,
			  ring->dma_base);
}

F
Felix Fietkau 已提交
582
static void bgmac_dma_cleanup(struct bgmac *bgmac)
583 584 585
{
	int i;

F
Felix Fietkau 已提交
586
	for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
587
		bgmac_dma_tx_ring_free(bgmac, &bgmac->tx_ring[i]);
F
Felix Fietkau 已提交
588 589

	for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
590
		bgmac_dma_rx_ring_free(bgmac, &bgmac->rx_ring[i]);
F
Felix Fietkau 已提交
591 592 593 594 595 596 597
}

static void bgmac_dma_free(struct bgmac *bgmac)
{
	int i;

	for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
F
Felix Fietkau 已提交
598 599
		bgmac_dma_ring_desc_free(bgmac, &bgmac->tx_ring[i],
					 BGMAC_TX_RING_SLOTS);
F
Felix Fietkau 已提交
600 601

	for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
F
Felix Fietkau 已提交
602 603
		bgmac_dma_ring_desc_free(bgmac, &bgmac->rx_ring[i],
					 BGMAC_RX_RING_SLOTS);
604 605 606 607
}

static int bgmac_dma_alloc(struct bgmac *bgmac)
{
608
	struct device *dma_dev = bgmac->dma_dev;
609 610 611 612 613 614 615 616 617 618
	struct bgmac_dma_ring *ring;
	static const u16 ring_base[] = { BGMAC_DMA_BASE0, BGMAC_DMA_BASE1,
					 BGMAC_DMA_BASE2, BGMAC_DMA_BASE3, };
	int size; /* ring size: different for Tx and Rx */
	int err;
	int i;

	BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base));
	BUILD_BUG_ON(BGMAC_MAX_RX_RINGS > ARRAY_SIZE(ring_base));

619
	if (!(bgmac_idm_read(bgmac, BCMA_IOST) & BCMA_IOST_DMA64)) {
620
		dev_err(bgmac->dev, "Core does not report 64-bit DMA\n");
621 622 623 624 625 626 627 628
		return -ENOTSUPP;
	}

	for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
		ring = &bgmac->tx_ring[i];
		ring->mmio_base = ring_base[i];

		/* Alloc ring of descriptors */
F
Felix Fietkau 已提交
629
		size = BGMAC_TX_RING_SLOTS * sizeof(struct bgmac_dma_desc);
630 631 632 633
		ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
						     &ring->dma_base,
						     GFP_KERNEL);
		if (!ring->cpu_base) {
634 635
			dev_err(bgmac->dev, "Allocation of TX ring 0x%X failed\n",
				ring->mmio_base);
636 637 638
			goto err_dma_free;
		}

639 640 641 642 643 644 645
		ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
						      BGMAC_DMA_RING_TX);
		if (ring->unaligned)
			ring->index_base = lower_32_bits(ring->dma_base);
		else
			ring->index_base = 0;

646 647 648 649 650 651 652 653
		/* No need to alloc TX slots yet */
	}

	for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
		ring = &bgmac->rx_ring[i];
		ring->mmio_base = ring_base[i];

		/* Alloc ring of descriptors */
F
Felix Fietkau 已提交
654
		size = BGMAC_RX_RING_SLOTS * sizeof(struct bgmac_dma_desc);
655 656 657 658
		ring->cpu_base = dma_zalloc_coherent(dma_dev, size,
						     &ring->dma_base,
						     GFP_KERNEL);
		if (!ring->cpu_base) {
659 660
			dev_err(bgmac->dev, "Allocation of RX ring 0x%X failed\n",
				ring->mmio_base);
661 662 663 664
			err = -ENOMEM;
			goto err_dma_free;
		}

665 666 667 668 669 670
		ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
						      BGMAC_DMA_RING_RX);
		if (ring->unaligned)
			ring->index_base = lower_32_bits(ring->dma_base);
		else
			ring->index_base = 0;
671 672 673 674 675 676 677 678 679
	}

	return 0;

err_dma_free:
	bgmac_dma_free(bgmac);
	return -ENOMEM;
}

F
Felix Fietkau 已提交
680
static int bgmac_dma_init(struct bgmac *bgmac)
681 682
{
	struct bgmac_dma_ring *ring;
F
Felix Fietkau 已提交
683
	int i, err;
684 685 686 687

	for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
		ring = &bgmac->tx_ring[i];

688 689
		if (!ring->unaligned)
			bgmac_dma_tx_enable(bgmac, ring);
690 691 692 693
		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
			    lower_32_bits(ring->dma_base));
		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI,
			    upper_32_bits(ring->dma_base));
694 695
		if (ring->unaligned)
			bgmac_dma_tx_enable(bgmac, ring);
696 697 698 699 700 701

		ring->start = 0;
		ring->end = 0;	/* Points the slot that should *not* be read */
	}

	for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) {
702 703
		int j;

704 705
		ring = &bgmac->rx_ring[i];

706 707
		if (!ring->unaligned)
			bgmac_dma_rx_enable(bgmac, ring);
708 709 710 711
		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
			    lower_32_bits(ring->dma_base));
		bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI,
			    upper_32_bits(ring->dma_base));
712 713
		if (ring->unaligned)
			bgmac_dma_rx_enable(bgmac, ring);
714

F
Felix Fietkau 已提交
715 716
		ring->start = 0;
		ring->end = 0;
F
Felix Fietkau 已提交
717
		for (j = 0; j < BGMAC_RX_RING_SLOTS; j++) {
F
Felix Fietkau 已提交
718 719 720 721
			err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]);
			if (err)
				goto error;

722
			bgmac_dma_rx_setup_desc(bgmac, ring, j);
F
Felix Fietkau 已提交
723
		}
724

F
Felix Fietkau 已提交
725
		bgmac_dma_rx_update_index(bgmac, ring);
726
	}
F
Felix Fietkau 已提交
727 728 729 730 731 732

	return 0;

error:
	bgmac_dma_cleanup(bgmac);
	return err;
733 734 735 736 737 738 739 740 741 742 743 744 745 746 747
}


/**************************************************
 * Chip ops
 **************************************************/

/* TODO: can we just drop @force? Can we don't reset MAC at all if there is
 * nothing to change? Try if after stabilizng driver.
 */
static void bgmac_cmdcfg_maskset(struct bgmac *bgmac, u32 mask, u32 set,
				 bool force)
{
	u32 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
	u32 new_val = (cmdcfg & mask) | set;
748
	u32 cmdcfg_sr;
749

750 751 752 753 754 755
	if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
		cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
	else
		cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;

	bgmac_set(bgmac, BGMAC_CMDCFG, cmdcfg_sr);
756 757 758 759 760
	udelay(2);

	if (new_val != cmdcfg || force)
		bgmac_write(bgmac, BGMAC_CMDCFG, new_val);

761
	bgmac_mask(bgmac, BGMAC_CMDCFG, ~cmdcfg_sr);
762 763 764
	udelay(2);
}

765 766 767 768 769 770 771 772 773 774
static void bgmac_write_mac_address(struct bgmac *bgmac, u8 *addr)
{
	u32 tmp;

	tmp = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3];
	bgmac_write(bgmac, BGMAC_MACADDR_HIGH, tmp);
	tmp = (addr[4] << 8) | addr[5];
	bgmac_write(bgmac, BGMAC_MACADDR_LOW, tmp);
}

775 776 777 778 779
static void bgmac_set_rx_mode(struct net_device *net_dev)
{
	struct bgmac *bgmac = netdev_priv(net_dev);

	if (net_dev->flags & IFF_PROMISC)
780
		bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_PROM, true);
781
	else
782
		bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_PROM, 0, true);
783 784
}

785 786 787 788 789
#if 0 /* We don't use that regs yet */
static void bgmac_chip_stats_update(struct bgmac *bgmac)
{
	int i;

790
	if (!(bgmac->feature_flags & BGMAC_FEAT_NO_CLR_MIB)) {
791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808
		for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
			bgmac->mib_tx_regs[i] =
				bgmac_read(bgmac,
					   BGMAC_TX_GOOD_OCTETS + (i * 4));
		for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
			bgmac->mib_rx_regs[i] =
				bgmac_read(bgmac,
					   BGMAC_RX_GOOD_OCTETS + (i * 4));
	}

	/* TODO: what else? how to handle BCM4706? Specs are needed */
}
#endif

static void bgmac_clear_mib(struct bgmac *bgmac)
{
	int i;

809
	if (bgmac->feature_flags & BGMAC_FEAT_NO_CLR_MIB)
810 811 812 813 814 815 816 817 818 819
		return;

	bgmac_set(bgmac, BGMAC_DEV_CTL, BGMAC_DC_MROR);
	for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
		bgmac_read(bgmac, BGMAC_TX_GOOD_OCTETS + (i * 4));
	for (i = 0; i < BGMAC_NUM_MIB_RX_REGS; i++)
		bgmac_read(bgmac, BGMAC_RX_GOOD_OCTETS + (i * 4));
}

/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_speed */
820
static void bgmac_mac_speed(struct bgmac *bgmac)
821 822 823 824
{
	u32 mask = ~(BGMAC_CMDCFG_ES_MASK | BGMAC_CMDCFG_HD);
	u32 set = 0;

825 826
	switch (bgmac->mac_speed) {
	case SPEED_10:
827
		set |= BGMAC_CMDCFG_ES_10;
828 829
		break;
	case SPEED_100:
830
		set |= BGMAC_CMDCFG_ES_100;
831 832
		break;
	case SPEED_1000:
833
		set |= BGMAC_CMDCFG_ES_1000;
834
		break;
835 836 837
	case SPEED_2500:
		set |= BGMAC_CMDCFG_ES_2500;
		break;
838
	default:
839 840
		dev_err(bgmac->dev, "Unsupported speed: %d\n",
			bgmac->mac_speed);
841 842 843
	}

	if (bgmac->mac_duplex == DUPLEX_HALF)
844
		set |= BGMAC_CMDCFG_HD;
845

846 847 848 849 850
	bgmac_cmdcfg_maskset(bgmac, mask, set, true);
}

static void bgmac_miiconfig(struct bgmac *bgmac)
{
851
	if (bgmac->feature_flags & BGMAC_FEAT_FORCE_SPEED_2500) {
852 853 854
		bgmac_idm_write(bgmac, BCMA_IOCTL,
				bgmac_idm_read(bgmac, BCMA_IOCTL) | 0x40 |
				BGMAC_BCMA_IOCTL_SW_CLKEN);
855
		bgmac->mac_speed = SPEED_2500;
856 857
		bgmac->mac_duplex = DUPLEX_FULL;
		bgmac_mac_speed(bgmac);
858
	} else {
859 860
		u8 imode;

861 862 863 864 865 866 867
		imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) &
			BGMAC_DS_MM_MASK) >> BGMAC_DS_MM_SHIFT;
		if (imode == 0 || imode == 1) {
			bgmac->mac_speed = SPEED_100;
			bgmac->mac_duplex = DUPLEX_FULL;
			bgmac_mac_speed(bgmac);
		}
868 869 870 871 872 873
	}
}

/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */
static void bgmac_chip_reset(struct bgmac *bgmac)
{
874
	u32 cmdcfg_sr;
875 876 877
	u32 iost;
	int i;

878
	if (bgmac_clk_enabled(bgmac)) {
879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895
		if (!bgmac->stats_grabbed) {
			/* bgmac_chip_stats_update(bgmac); */
			bgmac->stats_grabbed = true;
		}

		for (i = 0; i < BGMAC_MAX_TX_RINGS; i++)
			bgmac_dma_tx_reset(bgmac, &bgmac->tx_ring[i]);

		bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
		udelay(1);

		for (i = 0; i < BGMAC_MAX_RX_RINGS; i++)
			bgmac_dma_rx_reset(bgmac, &bgmac->rx_ring[i]);

		/* TODO: Clear software multicast filter list */
	}

896
	iost = bgmac_idm_read(bgmac, BCMA_IOST);
897
	if (bgmac->feature_flags & BGMAC_FEAT_IOST_ATTACHED)
898 899
		iost &= ~BGMAC_BCMA_IOST_ATTACHED;

900
	/* 3GMAC: for BCM4707 & BCM47094, only do core reset at bgmac_probe() */
901 902
	if (!(bgmac->feature_flags & BGMAC_FEAT_NO_RESET)) {
		u32 flags = 0;
903 904 905 906 907
		if (iost & BGMAC_BCMA_IOST_ATTACHED) {
			flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
			if (!bgmac->has_robosw)
				flags |= BGMAC_BCMA_IOCTL_SW_RESET;
		}
908
		bgmac_clk_enable(bgmac, flags);
909 910
	}

911
	/* Request Misc PLL for corerev > 2 */
912
	if (bgmac->feature_flags & BGMAC_FEAT_MISC_PLL_REQ) {
913 914
		bgmac_set(bgmac, BCMA_CLKCTLST,
			  BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ);
915
		bgmac_wait_value(bgmac, BCMA_CLKCTLST,
916 917
				 BGMAC_BCMA_CLKCTLST_MISC_PLL_ST,
				 BGMAC_BCMA_CLKCTLST_MISC_PLL_ST,
918 919 920
				 1000);
	}

921
	if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_PHY) {
922 923
		u8 et_swtype = 0;
		u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY |
924
			     BGMAC_CHIPCTL_1_IF_TYPE_MII;
925
		char buf[4];
926

927
		if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) {
928
			if (kstrtou8(buf, 0, &et_swtype))
929 930
				dev_err(bgmac->dev, "Failed to parse et_swtype (%s)\n",
					buf);
931 932 933
			et_swtype &= 0x0f;
			et_swtype <<= 4;
			sw_type = et_swtype;
934
		} else if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_EPHYRMII) {
935
			sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII;
936
		} else if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_RGMII) {
937 938
			sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII |
				  BGMAC_CHIPCTL_1_SW_TYPE_RGMII;
939
		}
940 941 942
		bgmac_cco_ctl_maskset(bgmac, 1, ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK |
						  BGMAC_CHIPCTL_1_SW_TYPE_MASK),
				      sw_type);
943 944 945
	}

	if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
946 947 948
		bgmac_idm_write(bgmac, BCMA_IOCTL,
				bgmac_idm_read(bgmac, BCMA_IOCTL) &
				~BGMAC_BCMA_IOCTL_SW_RESET);
949 950 951 952 953 954

	/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset
	 * Specs don't say about using BGMAC_CMDCFG_SR, but in this routine
	 * BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to
	 * be keps until taking MAC out of the reset.
	 */
955 956 957 958 959
	if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
		cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
	else
		cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;

960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976
	bgmac_cmdcfg_maskset(bgmac,
			     ~(BGMAC_CMDCFG_TE |
			       BGMAC_CMDCFG_RE |
			       BGMAC_CMDCFG_RPI |
			       BGMAC_CMDCFG_TAI |
			       BGMAC_CMDCFG_HD |
			       BGMAC_CMDCFG_ML |
			       BGMAC_CMDCFG_CFE |
			       BGMAC_CMDCFG_RL |
			       BGMAC_CMDCFG_RED |
			       BGMAC_CMDCFG_PE |
			       BGMAC_CMDCFG_TPI |
			       BGMAC_CMDCFG_PAD_EN |
			       BGMAC_CMDCFG_PF),
			     BGMAC_CMDCFG_PROM |
			     BGMAC_CMDCFG_NLC |
			     BGMAC_CMDCFG_CFE |
977
			     cmdcfg_sr,
978
			     false);
979 980
	bgmac->mac_speed = SPEED_UNKNOWN;
	bgmac->mac_duplex = DUPLEX_UNKNOWN;
981 982

	bgmac_clear_mib(bgmac);
983
	if (bgmac->feature_flags & BGMAC_FEAT_CMN_PHY_CTL)
984 985
		bgmac_cmn_maskset32(bgmac, BCMA_GMAC_CMN_PHY_CTL, ~0,
				    BCMA_GMAC_CMN_PC_MTE);
986 987 988
	else
		bgmac_set(bgmac, BGMAC_PHY_CNTL, BGMAC_PC_MTE);
	bgmac_miiconfig(bgmac);
989 990
	if (bgmac->mii_bus)
		bgmac->mii_bus->reset(bgmac->mii_bus);
991

992
	netdev_reset_queue(bgmac->net_dev);
993 994 995 996 997 998 999 1000 1001 1002
}

static void bgmac_chip_intrs_on(struct bgmac *bgmac)
{
	bgmac_write(bgmac, BGMAC_INT_MASK, bgmac->int_mask);
}

static void bgmac_chip_intrs_off(struct bgmac *bgmac)
{
	bgmac_write(bgmac, BGMAC_INT_MASK, 0);
1003
	bgmac_read(bgmac, BGMAC_INT_MASK);
1004 1005 1006 1007 1008
}

/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_enable */
static void bgmac_enable(struct bgmac *bgmac)
{
1009
	u32 cmdcfg_sr;
1010 1011
	u32 cmdcfg;
	u32 mode;
1012 1013 1014 1015 1016

	if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
		cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
	else
		cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;
1017 1018 1019

	cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
	bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE),
1020
			     cmdcfg_sr, true);
1021 1022 1023 1024 1025 1026
	udelay(2);
	cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE;
	bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg);

	mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
		BGMAC_DS_MM_SHIFT;
1027
	if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST || mode != 0)
1028
		bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
1029
	if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST && mode == 2)
1030 1031
		bgmac_cco_ctl_maskset(bgmac, 1, ~0,
				      BGMAC_CHIPCTL_1_RXC_DLL_BYPASS);
1032

1033 1034 1035 1036 1037
	if (bgmac->feature_flags & (BGMAC_FEAT_FLW_CTRL1 |
				    BGMAC_FEAT_FLW_CTRL2)) {
		u32 fl_ctl;

		if (bgmac->feature_flags & BGMAC_FEAT_FLW_CTRL1)
1038
			fl_ctl = 0x2300e1;
1039 1040 1041
		else
			fl_ctl = 0x03cb04cb;

1042 1043 1044 1045
		bgmac_write(bgmac, BGMAC_FLOW_CTL_THRESH, fl_ctl);
		bgmac_write(bgmac, BGMAC_PAUSE_CTL, 0x27fff);
	}

1046 1047 1048 1049 1050
	if (bgmac->feature_flags & BGMAC_FEAT_SET_RXQ_CLK) {
		u32 rxq_ctl;
		u16 bp_clk;
		u8 mdp;

1051 1052
		rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL);
		rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK;
1053
		bp_clk = bgmac_get_bus_clock(bgmac) / 1000000;
1054 1055 1056 1057
		mdp = (bp_clk * 128 / 1000) - 3;
		rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT);
		bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl);
	}
1058 1059 1060
}

/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipinit */
F
Felix Fietkau 已提交
1061
static void bgmac_chip_init(struct bgmac *bgmac)
1062 1063 1064 1065 1066 1067 1068
{
	/* 1 interrupt per received frame */
	bgmac_write(bgmac, BGMAC_INT_RECV_LAZY, 1 << BGMAC_IRL_FC_SHIFT);

	/* Enable 802.3x tx flow control (honor received PAUSE frames) */
	bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_RPI, 0, true);

1069
	bgmac_set_rx_mode(bgmac->net_dev);
1070

1071
	bgmac_write_mac_address(bgmac, bgmac->net_dev->dev_addr);
1072 1073

	if (bgmac->loopback)
1074
		bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false);
1075
	else
1076
		bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_ML, 0, false);
1077 1078 1079

	bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN);

F
Felix Fietkau 已提交
1080
	bgmac_chip_intrs_on(bgmac);
1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094

	bgmac_enable(bgmac);
}

static irqreturn_t bgmac_interrupt(int irq, void *dev_id)
{
	struct bgmac *bgmac = netdev_priv(dev_id);

	u32 int_status = bgmac_read(bgmac, BGMAC_INT_STATUS);
	int_status &= bgmac->int_mask;

	if (!int_status)
		return IRQ_NONE;

1095 1096
	int_status &= ~(BGMAC_IS_TX0 | BGMAC_IS_RX);
	if (int_status)
1097
		dev_err(bgmac->dev, "Unknown IRQs: 0x%08X\n", int_status);
1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111

	/* Disable new interrupts until handling existing ones */
	bgmac_chip_intrs_off(bgmac);

	napi_schedule(&bgmac->napi);

	return IRQ_HANDLED;
}

static int bgmac_poll(struct napi_struct *napi, int weight)
{
	struct bgmac *bgmac = container_of(napi, struct bgmac, napi);
	int handled = 0;

1112 1113
	/* Ack */
	bgmac_write(bgmac, BGMAC_INT_STATUS, ~0);
1114

1115 1116
	bgmac_dma_tx_free(bgmac, &bgmac->tx_ring[0]);
	handled += bgmac_dma_rx_read(bgmac, &bgmac->rx_ring[0], weight);
1117

1118 1119
	/* Poll again if more events arrived in the meantime */
	if (bgmac_read(bgmac, BGMAC_INT_STATUS) & (BGMAC_IS_TX0 | BGMAC_IS_RX))
1120
		return weight;
1121

1122
	if (handled < weight) {
1123
		napi_complete(napi);
1124 1125
		bgmac_chip_intrs_on(bgmac);
	}
1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139

	return handled;
}

/**************************************************
 * net_device_ops
 **************************************************/

static int bgmac_open(struct net_device *net_dev)
{
	struct bgmac *bgmac = netdev_priv(net_dev);
	int err = 0;

	bgmac_chip_reset(bgmac);
F
Felix Fietkau 已提交
1140 1141 1142 1143 1144

	err = bgmac_dma_init(bgmac);
	if (err)
		return err;

1145
	/* Specs say about reclaiming rings here, but we do that in DMA init */
F
Felix Fietkau 已提交
1146
	bgmac_chip_init(bgmac);
1147

1148
	err = request_irq(bgmac->irq, bgmac_interrupt, IRQF_SHARED,
1149 1150
			  KBUILD_MODNAME, net_dev);
	if (err < 0) {
1151
		dev_err(bgmac->dev, "IRQ request error: %d!\n", err);
F
Felix Fietkau 已提交
1152 1153
		bgmac_dma_cleanup(bgmac);
		return err;
1154 1155 1156
	}
	napi_enable(&bgmac->napi);

1157
	phy_start(net_dev->phydev);
1158

1159 1160
	netif_start_queue(net_dev);

F
Felix Fietkau 已提交
1161
	return 0;
1162 1163 1164 1165 1166 1167 1168 1169
}

static int bgmac_stop(struct net_device *net_dev)
{
	struct bgmac *bgmac = netdev_priv(net_dev);

	netif_carrier_off(net_dev);

1170
	phy_stop(net_dev->phydev);
1171

1172 1173
	napi_disable(&bgmac->napi);
	bgmac_chip_intrs_off(bgmac);
1174
	free_irq(bgmac->irq, net_dev);
1175 1176

	bgmac_chip_reset(bgmac);
F
Felix Fietkau 已提交
1177
	bgmac_dma_cleanup(bgmac);
1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192

	return 0;
}

static netdev_tx_t bgmac_start_xmit(struct sk_buff *skb,
				    struct net_device *net_dev)
{
	struct bgmac *bgmac = netdev_priv(net_dev);
	struct bgmac_dma_ring *ring;

	/* No QOS support yet */
	ring = &bgmac->tx_ring[0];
	return bgmac_dma_tx_add(bgmac, ring, skb);
}

1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205
static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
{
	struct bgmac *bgmac = netdev_priv(net_dev);
	int ret;

	ret = eth_prepare_mac_addr_change(net_dev, addr);
	if (ret < 0)
		return ret;
	bgmac_write_mac_address(bgmac, (u8 *)addr);
	eth_commit_mac_addr_change(net_dev, addr);
	return 0;
}

1206 1207
static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
{
1208 1209 1210
	if (!netif_running(net_dev))
		return -EINVAL;

1211
	return phy_mii_ioctl(net_dev->phydev, ifr, cmd);
1212 1213 1214 1215 1216 1217
}

static const struct net_device_ops bgmac_netdev_ops = {
	.ndo_open		= bgmac_open,
	.ndo_stop		= bgmac_stop,
	.ndo_start_xmit		= bgmac_start_xmit,
1218
	.ndo_set_rx_mode	= bgmac_set_rx_mode,
1219
	.ndo_set_mac_address	= bgmac_set_mac_address,
1220
	.ndo_validate_addr	= eth_validate_addr,
1221 1222 1223 1224 1225 1226 1227
	.ndo_do_ioctl           = bgmac_ioctl,
};

/**************************************************
 * ethtool_ops
 **************************************************/

1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348
struct bgmac_stat {
	u8 size;
	u32 offset;
	const char *name;
};

static struct bgmac_stat bgmac_get_strings_stats[] = {
	{ 8, BGMAC_TX_GOOD_OCTETS, "tx_good_octets" },
	{ 4, BGMAC_TX_GOOD_PKTS, "tx_good" },
	{ 8, BGMAC_TX_OCTETS, "tx_octets" },
	{ 4, BGMAC_TX_PKTS, "tx_pkts" },
	{ 4, BGMAC_TX_BROADCAST_PKTS, "tx_broadcast" },
	{ 4, BGMAC_TX_MULTICAST_PKTS, "tx_multicast" },
	{ 4, BGMAC_TX_LEN_64, "tx_64" },
	{ 4, BGMAC_TX_LEN_65_TO_127, "tx_65_127" },
	{ 4, BGMAC_TX_LEN_128_TO_255, "tx_128_255" },
	{ 4, BGMAC_TX_LEN_256_TO_511, "tx_256_511" },
	{ 4, BGMAC_TX_LEN_512_TO_1023, "tx_512_1023" },
	{ 4, BGMAC_TX_LEN_1024_TO_1522, "tx_1024_1522" },
	{ 4, BGMAC_TX_LEN_1523_TO_2047, "tx_1523_2047" },
	{ 4, BGMAC_TX_LEN_2048_TO_4095, "tx_2048_4095" },
	{ 4, BGMAC_TX_LEN_4096_TO_8191, "tx_4096_8191" },
	{ 4, BGMAC_TX_LEN_8192_TO_MAX, "tx_8192_max" },
	{ 4, BGMAC_TX_JABBER_PKTS, "tx_jabber" },
	{ 4, BGMAC_TX_OVERSIZE_PKTS, "tx_oversize" },
	{ 4, BGMAC_TX_FRAGMENT_PKTS, "tx_fragment" },
	{ 4, BGMAC_TX_UNDERRUNS, "tx_underruns" },
	{ 4, BGMAC_TX_TOTAL_COLS, "tx_total_cols" },
	{ 4, BGMAC_TX_SINGLE_COLS, "tx_single_cols" },
	{ 4, BGMAC_TX_MULTIPLE_COLS, "tx_multiple_cols" },
	{ 4, BGMAC_TX_EXCESSIVE_COLS, "tx_excessive_cols" },
	{ 4, BGMAC_TX_LATE_COLS, "tx_late_cols" },
	{ 4, BGMAC_TX_DEFERED, "tx_defered" },
	{ 4, BGMAC_TX_CARRIER_LOST, "tx_carrier_lost" },
	{ 4, BGMAC_TX_PAUSE_PKTS, "tx_pause" },
	{ 4, BGMAC_TX_UNI_PKTS, "tx_unicast" },
	{ 4, BGMAC_TX_Q0_PKTS, "tx_q0" },
	{ 8, BGMAC_TX_Q0_OCTETS, "tx_q0_octets" },
	{ 4, BGMAC_TX_Q1_PKTS, "tx_q1" },
	{ 8, BGMAC_TX_Q1_OCTETS, "tx_q1_octets" },
	{ 4, BGMAC_TX_Q2_PKTS, "tx_q2" },
	{ 8, BGMAC_TX_Q2_OCTETS, "tx_q2_octets" },
	{ 4, BGMAC_TX_Q3_PKTS, "tx_q3" },
	{ 8, BGMAC_TX_Q3_OCTETS, "tx_q3_octets" },
	{ 8, BGMAC_RX_GOOD_OCTETS, "rx_good_octets" },
	{ 4, BGMAC_RX_GOOD_PKTS, "rx_good" },
	{ 8, BGMAC_RX_OCTETS, "rx_octets" },
	{ 4, BGMAC_RX_PKTS, "rx_pkts" },
	{ 4, BGMAC_RX_BROADCAST_PKTS, "rx_broadcast" },
	{ 4, BGMAC_RX_MULTICAST_PKTS, "rx_multicast" },
	{ 4, BGMAC_RX_LEN_64, "rx_64" },
	{ 4, BGMAC_RX_LEN_65_TO_127, "rx_65_127" },
	{ 4, BGMAC_RX_LEN_128_TO_255, "rx_128_255" },
	{ 4, BGMAC_RX_LEN_256_TO_511, "rx_256_511" },
	{ 4, BGMAC_RX_LEN_512_TO_1023, "rx_512_1023" },
	{ 4, BGMAC_RX_LEN_1024_TO_1522, "rx_1024_1522" },
	{ 4, BGMAC_RX_LEN_1523_TO_2047, "rx_1523_2047" },
	{ 4, BGMAC_RX_LEN_2048_TO_4095, "rx_2048_4095" },
	{ 4, BGMAC_RX_LEN_4096_TO_8191, "rx_4096_8191" },
	{ 4, BGMAC_RX_LEN_8192_TO_MAX, "rx_8192_max" },
	{ 4, BGMAC_RX_JABBER_PKTS, "rx_jabber" },
	{ 4, BGMAC_RX_OVERSIZE_PKTS, "rx_oversize" },
	{ 4, BGMAC_RX_FRAGMENT_PKTS, "rx_fragment" },
	{ 4, BGMAC_RX_MISSED_PKTS, "rx_missed" },
	{ 4, BGMAC_RX_CRC_ALIGN_ERRS, "rx_crc_align" },
	{ 4, BGMAC_RX_UNDERSIZE, "rx_undersize" },
	{ 4, BGMAC_RX_CRC_ERRS, "rx_crc" },
	{ 4, BGMAC_RX_ALIGN_ERRS, "rx_align" },
	{ 4, BGMAC_RX_SYMBOL_ERRS, "rx_symbol" },
	{ 4, BGMAC_RX_PAUSE_PKTS, "rx_pause" },
	{ 4, BGMAC_RX_NONPAUSE_PKTS, "rx_nonpause" },
	{ 4, BGMAC_RX_SACHANGES, "rx_sa_changes" },
	{ 4, BGMAC_RX_UNI_PKTS, "rx_unicast" },
};

#define BGMAC_STATS_LEN	ARRAY_SIZE(bgmac_get_strings_stats)

static int bgmac_get_sset_count(struct net_device *dev, int string_set)
{
	switch (string_set) {
	case ETH_SS_STATS:
		return BGMAC_STATS_LEN;
	}

	return -EOPNOTSUPP;
}

static void bgmac_get_strings(struct net_device *dev, u32 stringset,
			      u8 *data)
{
	int i;

	if (stringset != ETH_SS_STATS)
		return;

	for (i = 0; i < BGMAC_STATS_LEN; i++)
		strlcpy(data + i * ETH_GSTRING_LEN,
			bgmac_get_strings_stats[i].name, ETH_GSTRING_LEN);
}

static void bgmac_get_ethtool_stats(struct net_device *dev,
				    struct ethtool_stats *ss, uint64_t *data)
{
	struct bgmac *bgmac = netdev_priv(dev);
	const struct bgmac_stat *s;
	unsigned int i;
	u64 val;

	if (!netif_running(dev))
		return;

	for (i = 0; i < BGMAC_STATS_LEN; i++) {
		s = &bgmac_get_strings_stats[i];
		val = 0;
		if (s->size == 8)
			val = (u64)bgmac_read(bgmac, s->offset + 4) << 32;
		val |= bgmac_read(bgmac, s->offset);
		data[i] = val;
	}
}

1349 1350 1351 1352
static void bgmac_get_drvinfo(struct net_device *net_dev,
			      struct ethtool_drvinfo *info)
{
	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1353
	strlcpy(info->bus_info, "AXI", sizeof(info->bus_info));
1354 1355 1356
}

static const struct ethtool_ops bgmac_ethtool_ops = {
1357 1358 1359
	.get_strings		= bgmac_get_strings,
	.get_sset_count		= bgmac_get_sset_count,
	.get_ethtool_stats	= bgmac_get_ethtool_stats,
1360
	.get_drvinfo		= bgmac_get_drvinfo,
1361 1362
	.get_link_ksettings     = phy_ethtool_get_link_ksettings,
	.set_link_ksettings     = phy_ethtool_set_link_ksettings,
1363 1364
};

R
Rafał Miłecki 已提交
1365 1366 1367 1368
/**************************************************
 * MII
 **************************************************/

1369 1370 1371
static void bgmac_adjust_link(struct net_device *net_dev)
{
	struct bgmac *bgmac = netdev_priv(net_dev);
1372
	struct phy_device *phy_dev = net_dev->phydev;
1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392
	bool update = false;

	if (phy_dev->link) {
		if (phy_dev->speed != bgmac->mac_speed) {
			bgmac->mac_speed = phy_dev->speed;
			update = true;
		}

		if (phy_dev->duplex != bgmac->mac_duplex) {
			bgmac->mac_duplex = phy_dev->duplex;
			update = true;
		}
	}

	if (update) {
		bgmac_mac_speed(bgmac);
		phy_print_status(phy_dev);
	}
}

1393
static int bgmac_phy_connect_direct(struct bgmac *bgmac)
1394 1395 1396 1397 1398 1399 1400 1401 1402
{
	struct fixed_phy_status fphy_status = {
		.link = 1,
		.speed = SPEED_1000,
		.duplex = DUPLEX_FULL,
	};
	struct phy_device *phy_dev;
	int err;

1403
	phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, -1, NULL);
1404
	if (!phy_dev || IS_ERR(phy_dev)) {
1405
		dev_err(bgmac->dev, "Failed to register fixed PHY device\n");
1406 1407 1408 1409 1410 1411
		return -ENODEV;
	}

	err = phy_connect_direct(bgmac->net_dev, phy_dev, bgmac_adjust_link,
				 PHY_INTERFACE_MODE_MII);
	if (err) {
1412
		dev_err(bgmac->dev, "Connecting PHY failed\n");
1413 1414 1415 1416 1417 1418
		return err;
	}

	return err;
}

1419
static int bgmac_phy_connect(struct bgmac *bgmac)
R
Rafał Miłecki 已提交
1420
{
1421 1422
	struct phy_device *phy_dev;
	char bus_id[MII_BUS_ID_SIZE + 3];
R
Rafał Miłecki 已提交
1423

1424
	/* Connect to the PHY */
1425
	snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, bgmac->mii_bus->id,
1426 1427 1428 1429
		 bgmac->phyaddr);
	phy_dev = phy_connect(bgmac->net_dev, bus_id, &bgmac_adjust_link,
			      PHY_INTERFACE_MODE_MII);
	if (IS_ERR(phy_dev)) {
1430
		dev_err(bgmac->dev, "PHY connecton failed\n");
1431
		return PTR_ERR(phy_dev);
1432 1433
	}

1434
	return 0;
R
Rafał Miłecki 已提交
1435 1436
}

1437
int bgmac_enet_probe(struct bgmac *info)
1438 1439 1440 1441 1442 1443 1444 1445 1446
{
	struct net_device *net_dev;
	struct bgmac *bgmac;
	int err;

	/* Allocation and references */
	net_dev = alloc_etherdev(sizeof(*bgmac));
	if (!net_dev)
		return -ENOMEM;
1447

1448
	net_dev->netdev_ops = &bgmac_netdev_ops;
1449
	net_dev->ethtool_ops = &bgmac_ethtool_ops;
1450
	bgmac = netdev_priv(net_dev);
1451
	memcpy(bgmac, info, sizeof(*bgmac));
1452
	bgmac->net_dev = net_dev;
1453 1454 1455 1456 1457 1458 1459 1460 1461
	net_dev->irq = bgmac->irq;
	SET_NETDEV_DEV(net_dev, bgmac->dev);

	if (!is_valid_ether_addr(bgmac->mac_addr)) {
		dev_err(bgmac->dev, "Invalid MAC addr: %pM\n",
			bgmac->mac_addr);
		eth_random_addr(bgmac->mac_addr);
		dev_warn(bgmac->dev, "Using random MAC: %pM\n",
			 bgmac->mac_addr);
1462
	}
1463
	ether_addr_copy(net_dev->dev_addr, bgmac->mac_addr);
1464

1465 1466 1467 1468
	/* This (reset &) enable is not preset in specs or reference driver but
	 * Broadcom does it in arch PCI code when enabling fake PCI device.
	 */
	bgmac_clk_enable(bgmac, 0);
1469 1470 1471 1472 1473

	bgmac_chip_reset(bgmac);

	err = bgmac_dma_alloc(bgmac);
	if (err) {
1474
		dev_err(bgmac->dev, "Unable to alloc memory for DMA\n");
1475 1476 1477 1478
		goto err_netdev_free;
	}

	bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK;
R
Ralf Baechle 已提交
1479
	if (bcm47xx_nvram_getenv("et0_no_txint", NULL, 0) == 0)
1480 1481
		bgmac->int_mask &= ~BGMAC_IS_TX_MASK;

1482 1483
	netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT);

1484 1485 1486 1487
	if (!bgmac->mii_bus)
		err = bgmac_phy_connect_direct(bgmac);
	else
		err = bgmac_phy_connect(bgmac);
R
Rafał Miłecki 已提交
1488
	if (err) {
1489
		dev_err(bgmac->dev, "Cannot connect to phy\n");
1490
		goto err_dma_free;
R
Rafał Miłecki 已提交
1491 1492
	}

1493 1494 1495 1496
	net_dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
	net_dev->hw_features = net_dev->features;
	net_dev->vlan_features = net_dev->features;

1497 1498
	err = register_netdev(bgmac->net_dev);
	if (err) {
1499
		dev_err(bgmac->dev, "Cannot register net device\n");
1500
		goto err_phy_disconnect;
1501 1502 1503 1504 1505 1506
	}

	netif_carrier_off(net_dev);

	return 0;

1507 1508
err_phy_disconnect:
	phy_disconnect(net_dev->phydev);
1509 1510 1511 1512 1513 1514 1515
err_dma_free:
	bgmac_dma_free(bgmac);
err_netdev_free:
	free_netdev(net_dev);

	return err;
}
1516
EXPORT_SYMBOL_GPL(bgmac_enet_probe);
1517

1518
void bgmac_enet_remove(struct bgmac *bgmac)
1519 1520
{
	unregister_netdev(bgmac->net_dev);
1521
	phy_disconnect(bgmac->net_dev->phydev);
1522
	netif_napi_del(&bgmac->napi);
1523 1524 1525
	bgmac_dma_free(bgmac);
	free_netdev(bgmac->net_dev);
}
1526
EXPORT_SYMBOL_GPL(bgmac_enet_remove);
1527 1528 1529

MODULE_AUTHOR("Rafał Miłecki");
MODULE_LICENSE("GPL");