dpaa2-eth.c 95.1 KB
Newer Older
1
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
2
/* Copyright 2014-2016 Freescale Semiconductor Inc.
3
 * Copyright 2016-2020 NXP
4 5 6 7 8 9 10 11 12
 */
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/etherdevice.h>
#include <linux/of_net.h>
#include <linux/interrupt.h>
#include <linux/msi.h>
#include <linux/kthread.h>
13
#include <linux/iommu.h>
14
#include <linux/net_tstamp.h>
15
#include <linux/fsl/mc.h>
16 17
#include <linux/bpf.h>
#include <linux/bpf_trace.h>
18 19
#include <net/sock.h>

20 21
#include "dpaa2-eth.h"

22 23 24 25 26 27
/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files
 * using trace events only need to #include <trace/events/sched.h>
 */
#define CREATE_TRACE_POINTS
#include "dpaa2-eth-trace.h"

28 29 30 31
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Freescale Semiconductor, Inc");
MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver");

32 33 34 35 36 37 38 39 40 41
static void *dpaa2_iova_to_virt(struct iommu_domain *domain,
				dma_addr_t iova_addr)
{
	phys_addr_t phys_addr;

	phys_addr = domain ? iommu_iova_to_phys(domain, iova_addr) : iova_addr;

	return phys_to_virt(phys_addr);
}

42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
static void validate_rx_csum(struct dpaa2_eth_priv *priv,
			     u32 fd_status,
			     struct sk_buff *skb)
{
	skb_checksum_none_assert(skb);

	/* HW checksum validation is disabled, nothing to do here */
	if (!(priv->net_dev->features & NETIF_F_RXCSUM))
		return;

	/* Read checksum validation bits */
	if (!((fd_status & DPAA2_FAS_L3CV) &&
	      (fd_status & DPAA2_FAS_L4CV)))
		return;

	/* Inform the stack there's no need to compute L3/L4 csum anymore */
	skb->ip_summed = CHECKSUM_UNNECESSARY;
}

/* Free a received FD.
 * Not to be used for Tx conf FDs or on any other paths.
 */
static void free_rx_fd(struct dpaa2_eth_priv *priv,
		       const struct dpaa2_fd *fd,
		       void *vaddr)
{
	struct device *dev = priv->net_dev->dev.parent;
	dma_addr_t addr = dpaa2_fd_get_addr(fd);
	u8 fd_format = dpaa2_fd_get_format(fd);
	struct dpaa2_sg_entry *sgt;
	void *sg_vaddr;
	int i;

	/* If single buffer frame, just free the data buffer */
	if (fd_format == dpaa2_fd_single)
		goto free_buf;
	else if (fd_format != dpaa2_fd_sg)
		/* We don't support any other format */
		return;

82 83 84
	/* For S/G frames, we first need to free all SG entries
	 * except the first one, which was taken care of already
	 */
85
	sgt = vaddr + dpaa2_fd_get_offset(fd);
86
	for (i = 1; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
87
		addr = dpaa2_sg_get_addr(&sgt[i]);
88
		sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
89
		dma_unmap_page(dev, addr, priv->rx_buf_size,
90
			       DMA_BIDIRECTIONAL);
91

92
		free_pages((unsigned long)sg_vaddr, 0);
93 94 95 96 97
		if (dpaa2_sg_is_final(&sgt[i]))
			break;
	}

free_buf:
98
	free_pages((unsigned long)vaddr, 0);
99 100 101
}

/* Build a linear skb based on a single-buffer frame descriptor */
102
static struct sk_buff *build_linear_skb(struct dpaa2_eth_channel *ch,
103 104 105 106 107 108 109
					const struct dpaa2_fd *fd,
					void *fd_vaddr)
{
	struct sk_buff *skb = NULL;
	u16 fd_offset = dpaa2_fd_get_offset(fd);
	u32 fd_length = dpaa2_fd_get_len(fd);

110 111
	ch->buf_count--;

112
	skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
	if (unlikely(!skb))
		return NULL;

	skb_reserve(skb, fd_offset);
	skb_put(skb, fd_length);

	return skb;
}

/* Build a non linear (fragmented) skb based on a S/G table */
static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv,
				      struct dpaa2_eth_channel *ch,
				      struct dpaa2_sg_entry *sgt)
{
	struct sk_buff *skb = NULL;
	struct device *dev = priv->net_dev->dev.parent;
	void *sg_vaddr;
	dma_addr_t sg_addr;
	u16 sg_offset;
	u32 sg_length;
	struct page *page, *head_page;
	int page_offset;
	int i;

	for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) {
		struct dpaa2_sg_entry *sge = &sgt[i];

		/* NOTE: We only support SG entries in dpaa2_sg_single format,
		 * but this is the only format we may receive from HW anyway
		 */

		/* Get the address and length from the S/G entry */
		sg_addr = dpaa2_sg_get_addr(sge);
146
		sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr);
147
		dma_unmap_page(dev, sg_addr, priv->rx_buf_size,
148
			       DMA_BIDIRECTIONAL);
149 150 151 152 153

		sg_length = dpaa2_sg_get_len(sge);

		if (i == 0) {
			/* We build the skb around the first data buffer */
154
			skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_RAW_SIZE);
155
			if (unlikely(!skb)) {
156 157 158
				/* Free the first SG entry now, since we already
				 * unmapped it and obtained the virtual address
				 */
159
				free_pages((unsigned long)sg_vaddr, 0);
160

161 162 163 164 165 166 167 168
				/* We still need to subtract the buffers used
				 * by this FD from our software counter
				 */
				while (!dpaa2_sg_is_final(&sgt[i]) &&
				       i < DPAA2_ETH_MAX_SG_ENTRIES)
					i++;
				break;
			}
169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187

			sg_offset = dpaa2_sg_get_offset(sge);
			skb_reserve(skb, sg_offset);
			skb_put(skb, sg_length);
		} else {
			/* Rest of the data buffers are stored as skb frags */
			page = virt_to_page(sg_vaddr);
			head_page = virt_to_head_page(sg_vaddr);

			/* Offset in page (which may be compound).
			 * Data in subsequent SG entries is stored from the
			 * beginning of the buffer, so we don't need to add the
			 * sg_offset.
			 */
			page_offset = ((unsigned long)sg_vaddr &
				(PAGE_SIZE - 1)) +
				(page_address(page) - page_address(head_page));

			skb_add_rx_frag(skb, i - 1, head_page, page_offset,
188
					sg_length, priv->rx_buf_size);
189 190 191 192 193 194
		}

		if (dpaa2_sg_is_final(sge))
			break;
	}

195 196
	WARN_ONCE(i == DPAA2_ETH_MAX_SG_ENTRIES, "Final bit not set in SGT");

197 198 199 200 201 202
	/* Count all data buffers + SG table buffer */
	ch->buf_count -= i + 2;

	return skb;
}

I
Ioana Ciocoi Radulescu 已提交
203 204 205 206 207 208 209 210 211 212 213
/* Free buffers acquired from the buffer pool or which were meant to
 * be released in the pool
 */
static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count)
{
	struct device *dev = priv->net_dev->dev.parent;
	void *vaddr;
	int i;

	for (i = 0; i < count; i++) {
		vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]);
214
		dma_unmap_page(dev, buf_array[i], priv->rx_buf_size,
215 216
			       DMA_BIDIRECTIONAL);
		free_pages((unsigned long)vaddr, 0);
I
Ioana Ciocoi Radulescu 已提交
217 218 219
	}
}

220 221 222 223
static void xdp_release_buf(struct dpaa2_eth_priv *priv,
			    struct dpaa2_eth_channel *ch,
			    dma_addr_t addr)
{
224
	int retries = 0;
225 226 227 228 229 230 231 232
	int err;

	ch->xdp.drop_bufs[ch->xdp.drop_cnt++] = addr;
	if (ch->xdp.drop_cnt < DPAA2_ETH_BUFS_PER_CMD)
		return;

	while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid,
					       ch->xdp.drop_bufs,
233 234 235
					       ch->xdp.drop_cnt)) == -EBUSY) {
		if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
			break;
236
		cpu_relax();
237
	}
238 239 240 241 242 243 244 245 246

	if (err) {
		free_bufs(priv, ch->xdp.drop_bufs, ch->xdp.drop_cnt);
		ch->buf_count -= ch->xdp.drop_cnt;
	}

	ch->xdp.drop_cnt = 0;
}

247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275
static int dpaa2_eth_xdp_flush(struct dpaa2_eth_priv *priv,
			       struct dpaa2_eth_fq *fq,
			       struct dpaa2_eth_xdp_fds *xdp_fds)
{
	int total_enqueued = 0, retries = 0, enqueued;
	struct dpaa2_eth_drv_stats *percpu_extras;
	int num_fds, err, max_retries;
	struct dpaa2_fd *fds;

	percpu_extras = this_cpu_ptr(priv->percpu_extras);

	/* try to enqueue all the FDs until the max number of retries is hit */
	fds = xdp_fds->fds;
	num_fds = xdp_fds->num;
	max_retries = num_fds * DPAA2_ETH_ENQUEUE_RETRIES;
	while (total_enqueued < num_fds && retries < max_retries) {
		err = priv->enqueue(priv, fq, &fds[total_enqueued],
				    0, num_fds - total_enqueued, &enqueued);
		if (err == -EBUSY) {
			percpu_extras->tx_portal_busy += ++retries;
			continue;
		}
		total_enqueued += enqueued;
	}
	xdp_fds->num = 0;

	return total_enqueued;
}

276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299
static int xdp_enqueue(struct dpaa2_eth_priv *priv, struct dpaa2_fd *fd,
		       void *buf_start, u16 queue_id)
{
	struct dpaa2_eth_fq *fq;
	struct dpaa2_faead *faead;
	u32 ctrl, frc;
	int i, err;

	/* Mark the egress frame hardware annotation area as valid */
	frc = dpaa2_fd_get_frc(fd);
	dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);
	dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL);

	/* Instruct hardware to release the FD buffer directly into
	 * the buffer pool once transmission is completed, instead of
	 * sending a Tx confirmation frame to us
	 */
	ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV;
	faead = dpaa2_get_faead(buf_start, false);
	faead->ctrl = cpu_to_le32(ctrl);
	faead->conf_fqid = 0;

	fq = &priv->fq[queue_id];
	for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
300
		err = priv->enqueue(priv, fq, fd, 0, 1, NULL);
301 302 303 304 305 306 307
		if (err != -EBUSY)
			break;
	}

	return err;
}

308 309
static u32 run_xdp(struct dpaa2_eth_priv *priv,
		   struct dpaa2_eth_channel *ch,
310
		   struct dpaa2_eth_fq *rx_fq,
311 312
		   struct dpaa2_fd *fd, void *vaddr)
{
313
	dma_addr_t addr = dpaa2_fd_get_addr(fd);
314
	struct rtnl_link_stats64 *percpu_stats;
315 316 317
	struct bpf_prog *xdp_prog;
	struct xdp_buff xdp;
	u32 xdp_act = XDP_PASS;
318 319 320
	int err;

	percpu_stats = this_cpu_ptr(priv->percpu_stats);
321 322 323 324 325 326 327 328 329

	rcu_read_lock();

	xdp_prog = READ_ONCE(ch->xdp.prog);
	if (!xdp_prog)
		goto out;

	xdp.data = vaddr + dpaa2_fd_get_offset(fd);
	xdp.data_end = xdp.data + dpaa2_fd_get_len(fd);
330
	xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
331
	xdp_set_data_meta_invalid(&xdp);
332
	xdp.rxq = &ch->xdp_rxq;
333

334 335 336
	xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE -
		(dpaa2_fd_get_offset(fd) - XDP_PACKET_HEADROOM);

337 338
	xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);

339 340 341 342
	/* xdp.data pointer may have changed */
	dpaa2_fd_set_offset(fd, xdp.data - vaddr);
	dpaa2_fd_set_len(fd, xdp.data_end - xdp.data);

343 344 345
	switch (xdp_act) {
	case XDP_PASS:
		break;
346 347 348 349 350
	case XDP_TX:
		err = xdp_enqueue(priv, fd, vaddr, rx_fq->flowid);
		if (err) {
			xdp_release_buf(priv, ch, addr);
			percpu_stats->tx_errors++;
351
			ch->stats.xdp_tx_err++;
352 353 354
		} else {
			percpu_stats->tx_packets++;
			percpu_stats->tx_bytes += dpaa2_fd_get_len(fd);
355
			ch->stats.xdp_tx++;
356 357
		}
		break;
358 359
	default:
		bpf_warn_invalid_xdp_action(xdp_act);
360
		/* fall through */
361 362
	case XDP_ABORTED:
		trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act);
363
		/* fall through */
364
	case XDP_DROP:
365
		xdp_release_buf(priv, ch, addr);
366
		ch->stats.xdp_drop++;
367
		break;
368 369
	case XDP_REDIRECT:
		dma_unmap_page(priv->net_dev->dev.parent, addr,
370
			       priv->rx_buf_size, DMA_BIDIRECTIONAL);
371
		ch->buf_count--;
372 373

		/* Allow redirect use of full headroom */
374
		xdp.data_hard_start = vaddr;
375 376
		xdp.frame_sz = DPAA2_ETH_RX_BUF_RAW_SIZE;

377 378 379 380 381 382
		err = xdp_do_redirect(priv->net_dev, &xdp, xdp_prog);
		if (unlikely(err))
			ch->stats.xdp_drop++;
		else
			ch->stats.xdp_redirect++;
		break;
383 384
	}

385
	ch->xdp.res |= xdp_act;
386 387 388 389 390
out:
	rcu_read_unlock();
	return xdp_act;
}

391 392 393 394
/* Main Rx frame processing routine */
static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
			 struct dpaa2_eth_channel *ch,
			 const struct dpaa2_fd *fd,
395
			 struct dpaa2_eth_fq *fq)
396 397 398 399 400 401
{
	dma_addr_t addr = dpaa2_fd_get_addr(fd);
	u8 fd_format = dpaa2_fd_get_format(fd);
	void *vaddr;
	struct sk_buff *skb;
	struct rtnl_link_stats64 *percpu_stats;
402
	struct dpaa2_eth_drv_stats *percpu_extras;
403 404
	struct device *dev = priv->net_dev->dev.parent;
	struct dpaa2_fas *fas;
405
	void *buf_data;
406
	u32 status = 0;
407
	u32 xdp_act;
408

409 410 411
	/* Tracing point */
	trace_dpaa2_rx_fd(priv->net_dev, fd);

412
	vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr);
413
	dma_sync_single_for_cpu(dev, addr, priv->rx_buf_size,
414
				DMA_BIDIRECTIONAL);
415

416
	fas = dpaa2_get_fas(vaddr, false);
417 418 419
	prefetch(fas);
	buf_data = vaddr + dpaa2_fd_get_offset(fd);
	prefetch(buf_data);
420 421

	percpu_stats = this_cpu_ptr(priv->percpu_stats);
422
	percpu_extras = this_cpu_ptr(priv->percpu_extras);
423 424

	if (fd_format == dpaa2_fd_single) {
425
		xdp_act = run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr);
426 427 428 429 430 431
		if (xdp_act != XDP_PASS) {
			percpu_stats->rx_packets++;
			percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
			return;
		}

432
		dma_unmap_page(dev, addr, priv->rx_buf_size,
433
			       DMA_BIDIRECTIONAL);
434
		skb = build_linear_skb(ch, fd, vaddr);
435
	} else if (fd_format == dpaa2_fd_sg) {
436 437
		WARN_ON(priv->xdp_prog);

438
		dma_unmap_page(dev, addr, priv->rx_buf_size,
439
			       DMA_BIDIRECTIONAL);
440
		skb = build_frag_skb(priv, ch, buf_data);
441
		free_pages((unsigned long)vaddr, 0);
442 443
		percpu_extras->rx_sg_frames++;
		percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd);
444 445 446 447 448 449 450 451 452 453
	} else {
		/* We don't support any other format */
		goto err_frame_format;
	}

	if (unlikely(!skb))
		goto err_build_skb;

	prefetch(skb->data);

454 455 456 457 458 459 460 461 462 463 464 465
	/* Get the timestamp value */
	if (priv->rx_tstamp) {
		struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
		__le64 *ts = dpaa2_get_ts(vaddr, false);
		u64 ns;

		memset(shhwtstamps, 0, sizeof(*shhwtstamps));

		ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
		shhwtstamps->hwtstamp = ns_to_ktime(ns);
	}

466 467 468 469 470 471 472
	/* Check if we need to validate the L4 csum */
	if (likely(dpaa2_fd_get_frc(fd) & DPAA2_FD_FRC_FASV)) {
		status = le32_to_cpu(fas->status);
		validate_rx_csum(priv, status, skb);
	}

	skb->protocol = eth_type_trans(skb, priv->net_dev);
473
	skb_record_rx_queue(skb, fq->flowid);
474 475 476 477

	percpu_stats->rx_packets++;
	percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);

478
	list_add_tail(&skb->list, ch->rx_list);
479 480 481 482 483 484 485 486 487 488 489 490 491 492 493

	return;

err_build_skb:
	free_rx_fd(priv, fd, vaddr);
err_frame_format:
	percpu_stats->rx_dropped++;
}

/* Consume all frames pull-dequeued into the store. This is the simplest way to
 * make sure we don't accidentally issue another volatile dequeue which would
 * overwrite (leak) frames already in the store.
 *
 * Observance of NAPI budget is not our concern, leaving that to the caller.
 */
494
static int consume_frames(struct dpaa2_eth_channel *ch,
I
Ioana Ciocoi Radulescu 已提交
495
			  struct dpaa2_eth_fq **src)
496 497
{
	struct dpaa2_eth_priv *priv = ch->priv;
498
	struct dpaa2_eth_fq *fq = NULL;
499 500
	struct dpaa2_dq *dq;
	const struct dpaa2_fd *fd;
501
	int cleaned = 0, retries = 0;
502 503 504 505 506 507 508 509 510 511
	int is_last;

	do {
		dq = dpaa2_io_store_next(ch->store, &is_last);
		if (unlikely(!dq)) {
			/* If we're here, we *must* have placed a
			 * volatile dequeue comnmand, so keep reading through
			 * the store until we get some sort of valid response
			 * token (either a valid frame or an "empty dequeue")
			 */
512 513 514 515 516
			if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES) {
				netdev_err_once(priv->net_dev,
						"Unable to read a valid dequeue response\n");
				return -ETIMEDOUT;
			}
517 518 519 520
			continue;
		}

		fd = dpaa2_dq_fd(dq);
521
		fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq);
522

523
		fq->consume(priv, ch, fd, fq);
524
		cleaned++;
525
		retries = 0;
526 527
	} while (!is_last);

528 529 530 531
	if (!cleaned)
		return 0;

	fq->stats.frames += cleaned;
532
	ch->stats.frames += cleaned;
533 534

	/* A dequeue operation only pulls frames from a single queue
I
Ioana Ciocoi Radulescu 已提交
535
	 * into the store. Return the frame queue as an out param.
536
	 */
I
Ioana Ciocoi Radulescu 已提交
537 538
	if (src)
		*src = fq;
539

540 541 542
	return cleaned;
}

543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564
/* Configure the egress frame annotation for timestamp update */
static void enable_tx_tstamp(struct dpaa2_fd *fd, void *buf_start)
{
	struct dpaa2_faead *faead;
	u32 ctrl, frc;

	/* Mark the egress frame annotation area as valid */
	frc = dpaa2_fd_get_frc(fd);
	dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV);

	/* Set hardware annotation size */
	ctrl = dpaa2_fd_get_ctrl(fd);
	dpaa2_fd_set_ctrl(fd, ctrl | DPAA2_FD_CTRL_ASAL);

	/* enable UPD (update prepanded data) bit in FAEAD field of
	 * hardware frame annotation area
	 */
	ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD;
	faead = dpaa2_get_faead(buf_start, true);
	faead->ctrl = cpu_to_le32(ctrl);
}

565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595
/* Create a frame descriptor based on a fragmented skb */
static int build_sg_fd(struct dpaa2_eth_priv *priv,
		       struct sk_buff *skb,
		       struct dpaa2_fd *fd)
{
	struct device *dev = priv->net_dev->dev.parent;
	void *sgt_buf = NULL;
	dma_addr_t addr;
	int nr_frags = skb_shinfo(skb)->nr_frags;
	struct dpaa2_sg_entry *sgt;
	int i, err;
	int sgt_buf_size;
	struct scatterlist *scl, *crt_scl;
	int num_sg;
	int num_dma_bufs;
	struct dpaa2_eth_swa *swa;

	/* Create and map scatterlist.
	 * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have
	 * to go beyond nr_frags+1.
	 * Note: We don't support chained scatterlists
	 */
	if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1))
		return -EINVAL;

	scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC);
	if (unlikely(!scl))
		return -ENOMEM;

	sg_init_table(scl, nr_frags + 1);
	num_sg = skb_to_sgvec(skb, scl, 0, skb->len);
596
	num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
597 598 599 600 601 602 603
	if (unlikely(!num_dma_bufs)) {
		err = -ENOMEM;
		goto dma_map_sg_failed;
	}

	/* Prepare the HW SGT structure */
	sgt_buf_size = priv->tx_data_offset +
604
		       sizeof(struct dpaa2_sg_entry) *  num_dma_bufs;
605
	sgt_buf = napi_alloc_frag(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN);
606 607 608 609 610
	if (unlikely(!sgt_buf)) {
		err = -ENOMEM;
		goto sgt_buf_alloc_failed;
	}
	sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN);
611 612
	memset(sgt_buf, 0, sgt_buf_size);

613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633
	sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset);

	/* Fill in the HW SGT structure.
	 *
	 * sgt_buf is zeroed out, so the following fields are implicit
	 * in all sgt entries:
	 *   - offset is 0
	 *   - format is 'dpaa2_sg_single'
	 */
	for_each_sg(scl, crt_scl, num_dma_bufs, i) {
		dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl));
		dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl));
	}
	dpaa2_sg_set_final(&sgt[i - 1], true);

	/* Store the skb backpointer in the SGT buffer.
	 * Fit the scatterlist and the number of buffers alongside the
	 * skb backpointer in the software annotation area. We'll need
	 * all of them on Tx Conf.
	 */
	swa = (struct dpaa2_eth_swa *)sgt_buf;
634 635 636 637 638
	swa->type = DPAA2_ETH_SWA_SG;
	swa->sg.skb = skb;
	swa->sg.scl = scl;
	swa->sg.num_sg = num_sg;
	swa->sg.sgt_size = sgt_buf_size;
639 640

	/* Separately map the SGT buffer */
641
	addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_BIDIRECTIONAL);
642 643 644 645 646 647 648 649
	if (unlikely(dma_mapping_error(dev, addr))) {
		err = -ENOMEM;
		goto dma_map_single_failed;
	}
	dpaa2_fd_set_offset(fd, priv->tx_data_offset);
	dpaa2_fd_set_format(fd, dpaa2_fd_sg);
	dpaa2_fd_set_addr(fd, addr);
	dpaa2_fd_set_len(fd, skb->len);
650
	dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
651

652 653 654
	if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
		enable_tx_tstamp(fd, sgt_buf);

655 656 657
	return 0;

dma_map_single_failed:
658
	skb_free_frag(sgt_buf);
659
sgt_buf_alloc_failed:
660
	dma_unmap_sg(dev, scl, num_sg, DMA_BIDIRECTIONAL);
661 662 663 664 665 666 667 668 669 670 671
dma_map_sg_failed:
	kfree(scl);
	return err;
}

/* Create a frame descriptor based on a linear skb */
static int build_single_fd(struct dpaa2_eth_priv *priv,
			   struct sk_buff *skb,
			   struct dpaa2_fd *fd)
{
	struct device *dev = priv->net_dev->dev.parent;
672
	u8 *buffer_start, *aligned_start;
673
	struct dpaa2_eth_swa *swa;
674 675
	dma_addr_t addr;

676 677 678 679 680 681 682 683 684
	buffer_start = skb->data - dpaa2_eth_needed_headroom(priv, skb);

	/* If there's enough room to align the FD address, do it.
	 * It will help hardware optimize accesses.
	 */
	aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
				  DPAA2_ETH_TX_BUF_ALIGN);
	if (aligned_start >= skb->head)
		buffer_start = aligned_start;
685 686 687 688 689

	/* Store a backpointer to the skb at the beginning of the buffer
	 * (in the private data area) such that we can release it
	 * on Tx confirm
	 */
690 691 692
	swa = (struct dpaa2_eth_swa *)buffer_start;
	swa->type = DPAA2_ETH_SWA_SINGLE;
	swa->single.skb = skb;
693 694 695

	addr = dma_map_single(dev, buffer_start,
			      skb_tail_pointer(skb) - buffer_start,
696
			      DMA_BIDIRECTIONAL);
697 698 699 700 701 702 703
	if (unlikely(dma_mapping_error(dev, addr)))
		return -ENOMEM;

	dpaa2_fd_set_addr(fd, addr);
	dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start));
	dpaa2_fd_set_len(fd, skb->len);
	dpaa2_fd_set_format(fd, dpaa2_fd_single);
704
	dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
705

706 707 708
	if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
		enable_tx_tstamp(fd, buffer_start);

709 710 711 712 713 714 715 716 717 718 719
	return 0;
}

/* FD freeing routine on the Tx path
 *
 * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb
 * back-pointed to is also freed.
 * This can be called either from dpaa2_eth_tx_conf() or on the error path of
 * dpaa2_eth_tx().
 */
static void free_tx_fd(const struct dpaa2_eth_priv *priv,
720
		       struct dpaa2_eth_fq *fq,
721
		       const struct dpaa2_fd *fd, bool in_napi)
722 723 724
{
	struct device *dev = priv->net_dev->dev.parent;
	dma_addr_t fd_addr;
725
	struct sk_buff *skb = NULL;
726 727 728
	unsigned char *buffer_start;
	struct dpaa2_eth_swa *swa;
	u8 fd_format = dpaa2_fd_get_format(fd);
729
	u32 fd_len = dpaa2_fd_get_len(fd);
730 731

	fd_addr = dpaa2_fd_get_addr(fd);
732 733
	buffer_start = dpaa2_iova_to_virt(priv->iommu_domain, fd_addr);
	swa = (struct dpaa2_eth_swa *)buffer_start;
734 735

	if (fd_format == dpaa2_fd_single) {
736 737 738 739 740 741 742 743 744 745 746 747 748
		if (swa->type == DPAA2_ETH_SWA_SINGLE) {
			skb = swa->single.skb;
			/* Accessing the skb buffer is safe before dma unmap,
			 * because we didn't map the actual skb shell.
			 */
			dma_unmap_single(dev, fd_addr,
					 skb_tail_pointer(skb) - buffer_start,
					 DMA_BIDIRECTIONAL);
		} else {
			WARN_ONCE(swa->type != DPAA2_ETH_SWA_XDP, "Wrong SWA type");
			dma_unmap_single(dev, fd_addr, swa->xdp.dma_size,
					 DMA_BIDIRECTIONAL);
		}
749
	} else if (fd_format == dpaa2_fd_sg) {
750
		skb = swa->sg.skb;
751 752

		/* Unmap the scatterlist */
753 754 755
		dma_unmap_sg(dev, swa->sg.scl, swa->sg.num_sg,
			     DMA_BIDIRECTIONAL);
		kfree(swa->sg.scl);
756 757

		/* Unmap the SGT buffer */
758
		dma_unmap_single(dev, fd_addr, swa->sg.sgt_size,
759
				 DMA_BIDIRECTIONAL);
760
	} else {
761
		netdev_dbg(priv->net_dev, "Invalid FD format\n");
762 763 764
		return;
	}

765 766 767 768 769 770 771 772 773 774
	if (swa->type != DPAA2_ETH_SWA_XDP && in_napi) {
		fq->dq_frames++;
		fq->dq_bytes += fd_len;
	}

	if (swa->type == DPAA2_ETH_SWA_XDP) {
		xdp_return_frame(swa->xdp.xdpf);
		return;
	}

775 776 777
	/* Get the timestamp value */
	if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
		struct skb_shared_hwtstamps shhwtstamps;
778
		__le64 *ts = dpaa2_get_ts(buffer_start, true);
779 780 781 782 783 784 785 786 787
		u64 ns;

		memset(&shhwtstamps, 0, sizeof(shhwtstamps));

		ns = DPAA2_PTP_CLK_PERIOD_NS * le64_to_cpup(ts);
		shhwtstamps.hwtstamp = ns_to_ktime(ns);
		skb_tstamp_tx(skb, &shhwtstamps);
	}

788
	/* Free SGT buffer allocated on tx */
789
	if (fd_format != dpaa2_fd_single)
790
		skb_free_frag(buffer_start);
791 792

	/* Move on with skb release */
793
	napi_consume_skb(skb, in_napi);
794 795
}

796
static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
797 798 799 800
{
	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
	struct dpaa2_fd fd;
	struct rtnl_link_stats64 *percpu_stats;
801
	struct dpaa2_eth_drv_stats *percpu_extras;
802
	struct dpaa2_eth_fq *fq;
I
Ioana Ciocoi Radulescu 已提交
803
	struct netdev_queue *nq;
804
	u16 queue_mapping;
805
	unsigned int needed_headroom;
I
Ioana Ciocoi Radulescu 已提交
806
	u32 fd_len;
I
Ioana Radulescu 已提交
807
	u8 prio = 0;
808 809 810
	int err, i;

	percpu_stats = this_cpu_ptr(priv->percpu_stats);
811
	percpu_extras = this_cpu_ptr(priv->percpu_extras);
812

813 814
	needed_headroom = dpaa2_eth_needed_headroom(priv, skb);
	if (skb_headroom(skb) < needed_headroom) {
815 816
		struct sk_buff *ns;

817
		ns = skb_realloc_headroom(skb, needed_headroom);
818 819 820 821
		if (unlikely(!ns)) {
			percpu_stats->tx_dropped++;
			goto err_alloc_headroom;
		}
822
		percpu_extras->tx_reallocs++;
823 824 825 826

		if (skb->sk)
			skb_set_owner_w(ns, skb->sk);

827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843
		dev_kfree_skb(skb);
		skb = ns;
	}

	/* We'll be holding a back-reference to the skb until Tx Confirmation;
	 * we don't want that overwritten by a concurrent Tx with a cloned skb.
	 */
	skb = skb_unshare(skb, GFP_ATOMIC);
	if (unlikely(!skb)) {
		/* skb_unshare() has already freed the skb */
		percpu_stats->tx_dropped++;
		return NETDEV_TX_OK;
	}

	/* Setup the FD fields */
	memset(&fd, 0, sizeof(fd));

844
	if (skb_is_nonlinear(skb)) {
845
		err = build_sg_fd(priv, skb, &fd);
846 847 848
		percpu_extras->tx_sg_frames++;
		percpu_extras->tx_sg_bytes += skb->len;
	} else {
849
		err = build_single_fd(priv, skb, &fd);
850 851
	}

852 853 854 855 856
	if (unlikely(err)) {
		percpu_stats->tx_dropped++;
		goto err_build_fd;
	}

857 858 859
	/* Tracing point */
	trace_dpaa2_tx_fd(net_dev, &fd);

860 861 862
	/* TxConf FQ selection relies on queue id from the stack.
	 * In case of a forwarded frame from another DPNI interface, we choose
	 * a queue affined to the same core that processed the Rx frame
863
	 */
864
	queue_mapping = skb_get_queue_mapping(skb);
I
Ioana Radulescu 已提交
865 866 867 868 869 870 871 872 873 874 875 876

	if (net_dev->num_tc) {
		prio = netdev_txq_to_tc(net_dev, queue_mapping);
		/* Hardware interprets priority level 0 as being the highest,
		 * so we need to do a reverse mapping to the netdev tc index
		 */
		prio = net_dev->num_tc - prio - 1;
		/* We have only one FQ array entry for all Tx hardware queues
		 * with the same flow id (but different priority levels)
		 */
		queue_mapping %= dpaa2_eth_queue_count(priv);
	}
877
	fq = &priv->fq[queue_mapping];
878 879 880 881 882 883 884 885

	fd_len = dpaa2_fd_get_len(&fd);
	nq = netdev_get_tx_queue(net_dev, queue_mapping);
	netdev_tx_sent_queue(nq, fd_len);

	/* Everything that happens after this enqueues might race with
	 * the Tx confirmation callback for this frame
	 */
886
	for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) {
887
		err = priv->enqueue(priv, fq, &fd, prio, 1, NULL);
888 889 890
		if (err != -EBUSY)
			break;
	}
891
	percpu_extras->tx_portal_busy += i;
892 893 894
	if (unlikely(err < 0)) {
		percpu_stats->tx_errors++;
		/* Clean up everything, including freeing the skb */
895
		free_tx_fd(priv, fq, &fd, false);
896
		netdev_tx_completed_queue(nq, 1, fd_len);
897 898
	} else {
		percpu_stats->tx_packets++;
I
Ioana Ciocoi Radulescu 已提交
899
		percpu_stats->tx_bytes += fd_len;
900 901 902 903 904 905 906 907 908 909 910 911 912
	}

	return NETDEV_TX_OK;

err_build_fd:
err_alloc_headroom:
	dev_kfree_skb(skb);

	return NETDEV_TX_OK;
}

/* Tx confirmation frame processing routine */
static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
913
			      struct dpaa2_eth_channel *ch __always_unused,
914
			      const struct dpaa2_fd *fd,
I
Ioana Ciocoi Radulescu 已提交
915
			      struct dpaa2_eth_fq *fq)
916 917
{
	struct rtnl_link_stats64 *percpu_stats;
918
	struct dpaa2_eth_drv_stats *percpu_extras;
I
Ioana Ciocoi Radulescu 已提交
919
	u32 fd_len = dpaa2_fd_get_len(fd);
920
	u32 fd_errors;
921

922 923 924
	/* Tracing point */
	trace_dpaa2_tx_conf_fd(priv->net_dev, fd);

925 926
	percpu_extras = this_cpu_ptr(priv->percpu_extras);
	percpu_extras->tx_conf_frames++;
I
Ioana Ciocoi Radulescu 已提交
927 928
	percpu_extras->tx_conf_bytes += fd_len;

929 930
	/* Check frame errors in the FD field */
	fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
931
	free_tx_fd(priv, fq, fd, true);
932 933 934 935

	if (likely(!fd_errors))
		return;

936 937 938 939
	if (net_ratelimit())
		netdev_dbg(priv->net_dev, "TX frame FD error: 0x%08x\n",
			   fd_errors);

940 941 942
	percpu_stats = this_cpu_ptr(priv->percpu_stats);
	/* Tx-conf logically pertains to the egress path. */
	percpu_stats->tx_errors++;
943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991
}

static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable)
{
	int err;

	err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
			       DPNI_OFF_RX_L3_CSUM, enable);
	if (err) {
		netdev_err(priv->net_dev,
			   "dpni_set_offload(RX_L3_CSUM) failed\n");
		return err;
	}

	err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
			       DPNI_OFF_RX_L4_CSUM, enable);
	if (err) {
		netdev_err(priv->net_dev,
			   "dpni_set_offload(RX_L4_CSUM) failed\n");
		return err;
	}

	return 0;
}

static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable)
{
	int err;

	err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
			       DPNI_OFF_TX_L3_CSUM, enable);
	if (err) {
		netdev_err(priv->net_dev, "dpni_set_offload(TX_L3_CSUM) failed\n");
		return err;
	}

	err = dpni_set_offload(priv->mc_io, 0, priv->mc_token,
			       DPNI_OFF_TX_L4_CSUM, enable);
	if (err) {
		netdev_err(priv->net_dev, "dpni_set_offload(TX_L4_CSUM) failed\n");
		return err;
	}

	return 0;
}

/* Perform a single release command to add buffers
 * to the specified buffer pool
 */
992 993
static int add_bufs(struct dpaa2_eth_priv *priv,
		    struct dpaa2_eth_channel *ch, u16 bpid)
994 995 996
{
	struct device *dev = priv->net_dev->dev.parent;
	u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
997
	struct page *page;
998
	dma_addr_t addr;
999
	int retries = 0;
1000
	int i, err;
1001 1002 1003 1004 1005

	for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) {
		/* Allocate buffer visible to WRIOP + skb shared info +
		 * alignment padding
		 */
1006 1007 1008 1009 1010 1011
		/* allocate one page for each Rx buffer. WRIOP sees
		 * the entire page except for a tailroom reserved for
		 * skb shared info
		 */
		page = dev_alloc_pages(0);
		if (!page)
1012 1013
			goto err_alloc;

1014
		addr = dma_map_page(dev, page, 0, priv->rx_buf_size,
1015
				    DMA_BIDIRECTIONAL);
1016 1017 1018 1019
		if (unlikely(dma_mapping_error(dev, addr)))
			goto err_map;

		buf_array[i] = addr;
1020 1021 1022

		/* tracing point */
		trace_dpaa2_eth_buf_seed(priv->net_dev,
1023
					 page, DPAA2_ETH_RX_BUF_RAW_SIZE,
1024
					 addr, priv->rx_buf_size,
1025
					 bpid);
1026 1027 1028
	}

release_bufs:
1029
	/* In case the portal is busy, retry until successful */
1030
	while ((err = dpaa2_io_service_release(ch->dpio, bpid,
1031 1032 1033
					       buf_array, i)) == -EBUSY) {
		if (retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
			break;
1034
		cpu_relax();
1035
	}
1036 1037 1038 1039 1040 1041 1042 1043 1044

	/* If release command failed, clean up and bail out;
	 * not much else we can do about it
	 */
	if (err) {
		free_bufs(priv, buf_array, i);
		return 0;
	}

1045 1046 1047
	return i;

err_map:
1048
	__free_pages(page, 0);
1049
err_alloc:
1050 1051 1052
	/* If we managed to allocate at least some buffers,
	 * release them to hardware
	 */
1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066
	if (i)
		goto release_bufs;

	return 0;
}

static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid)
{
	int i, j;
	int new_count;

	for (j = 0; j < priv->num_channels; j++) {
		for (i = 0; i < DPAA2_ETH_NUM_BUFS;
		     i += DPAA2_ETH_BUFS_PER_CMD) {
1067
			new_count = add_bufs(priv, priv->channel[j], bpid);
1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085
			priv->channel[j]->buf_count += new_count;

			if (new_count < DPAA2_ETH_BUFS_PER_CMD) {
				return -ENOMEM;
			}
		}
	}

	return 0;
}

/**
 * Drain the specified number of buffers from the DPNI's private buffer pool.
 * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD
 */
static void drain_bufs(struct dpaa2_eth_priv *priv, int count)
{
	u64 buf_array[DPAA2_ETH_BUFS_PER_CMD];
1086
	int retries = 0;
1087
	int ret;
1088 1089

	do {
1090
		ret = dpaa2_io_service_acquire(NULL, priv->bpid,
1091 1092
					       buf_array, count);
		if (ret < 0) {
1093 1094 1095
			if (ret == -EBUSY &&
			    retries++ >= DPAA2_ETH_SWP_BUSY_RETRIES)
				continue;
1096 1097 1098
			netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n");
			return;
		}
1099
		free_bufs(priv, buf_array, ret);
1100
		retries = 0;
1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127
	} while (ret);
}

static void drain_pool(struct dpaa2_eth_priv *priv)
{
	int i;

	drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD);
	drain_bufs(priv, 1);

	for (i = 0; i < priv->num_channels; i++)
		priv->channel[i]->buf_count = 0;
}

/* Function is called from softirq context only, so we don't need to guard
 * the access to percpu count
 */
static int refill_pool(struct dpaa2_eth_priv *priv,
		       struct dpaa2_eth_channel *ch,
		       u16 bpid)
{
	int new_count;

	if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH))
		return 0;

	do {
1128
		new_count = add_bufs(priv, ch, bpid);
1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144
		if (unlikely(!new_count)) {
			/* Out of memory; abort for now, we'll try later on */
			break;
		}
		ch->buf_count += new_count;
	} while (ch->buf_count < DPAA2_ETH_NUM_BUFS);

	if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS))
		return -ENOMEM;

	return 0;
}

static int pull_channel(struct dpaa2_eth_channel *ch)
{
	int err;
1145
	int dequeues = -1;
1146 1147 1148

	/* Retry while portal is busy */
	do {
1149 1150
		err = dpaa2_io_service_pull_channel(ch->dpio, ch->ch_id,
						    ch->store);
1151
		dequeues++;
1152
		cpu_relax();
1153
	} while (err == -EBUSY && dequeues < DPAA2_ETH_SWP_BUSY_RETRIES);
1154

1155 1156 1157 1158
	ch->stats.dequeue_portal_busy += dequeues;
	if (unlikely(err))
		ch->stats.pull_err++;

1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171
	return err;
}

/* NAPI poll routine
 *
 * Frames are dequeued from the QMan channel associated with this NAPI context.
 * Rx, Tx confirmation and (if configured) Rx error frames all count
 * towards the NAPI budget.
 */
static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
{
	struct dpaa2_eth_channel *ch;
	struct dpaa2_eth_priv *priv;
1172
	int rx_cleaned = 0, txconf_cleaned = 0;
I
Ioana Ciocoi Radulescu 已提交
1173 1174 1175
	struct dpaa2_eth_fq *fq, *txc_fq = NULL;
	struct netdev_queue *nq;
	int store_cleaned, work_done;
1176
	struct list_head rx_list;
1177
	int retries = 0;
1178 1179 1180
	int err;

	ch = container_of(napi, struct dpaa2_eth_channel, napi);
1181
	ch->xdp.res = 0;
1182 1183
	priv = ch->priv;

1184 1185 1186
	INIT_LIST_HEAD(&rx_list);
	ch->rx_list = &rx_list;

1187
	do {
1188 1189 1190 1191 1192
		err = pull_channel(ch);
		if (unlikely(err))
			break;

		/* Refill pool if appropriate */
1193
		refill_pool(priv, ch, priv->bpid);
1194

I
Ioana Ciocoi Radulescu 已提交
1195
		store_cleaned = consume_frames(ch, &fq);
1196
		if (store_cleaned <= 0)
I
Ioana Ciocoi Radulescu 已提交
1197 1198
			break;
		if (fq->type == DPAA2_RX_FQ) {
1199
			rx_cleaned += store_cleaned;
I
Ioana Ciocoi Radulescu 已提交
1200
		} else {
1201
			txconf_cleaned += store_cleaned;
I
Ioana Ciocoi Radulescu 已提交
1202 1203 1204
			/* We have a single Tx conf FQ on this channel */
			txc_fq = fq;
		}
1205

1206 1207
		/* If we either consumed the whole NAPI budget with Rx frames
		 * or we reached the Tx confirmations threshold, we're done.
1208
		 */
1209
		if (rx_cleaned >= budget ||
I
Ioana Ciocoi Radulescu 已提交
1210 1211 1212 1213
		    txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) {
			work_done = budget;
			goto out;
		}
1214
	} while (store_cleaned);
1215

1216 1217 1218 1219 1220 1221 1222
	/* We didn't consume the entire budget, so finish napi and
	 * re-enable data availability notifications
	 */
	napi_complete_done(napi, rx_cleaned);
	do {
		err = dpaa2_io_service_rearm(ch->dpio, &ch->nctx);
		cpu_relax();
1223
	} while (err == -EBUSY && retries++ < DPAA2_ETH_SWP_BUSY_RETRIES);
1224 1225
	WARN_ONCE(err, "CDAN notifications rearm failed on core %d",
		  ch->nctx.desired_cpu);
1226

I
Ioana Ciocoi Radulescu 已提交
1227 1228 1229
	work_done = max(rx_cleaned, 1);

out:
1230 1231
	netif_receive_skb_list(ch->rx_list);

1232
	if (txc_fq && txc_fq->dq_frames) {
I
Ioana Ciocoi Radulescu 已提交
1233 1234 1235 1236 1237 1238 1239
		nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid);
		netdev_tx_completed_queue(nq, txc_fq->dq_frames,
					  txc_fq->dq_bytes);
		txc_fq->dq_frames = 0;
		txc_fq->dq_bytes = 0;
	}

1240 1241 1242
	if (ch->xdp.res & XDP_REDIRECT)
		xdp_do_flush_map();

I
Ioana Ciocoi Radulescu 已提交
1243
	return work_done;
1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267
}

static void enable_ch_napi(struct dpaa2_eth_priv *priv)
{
	struct dpaa2_eth_channel *ch;
	int i;

	for (i = 0; i < priv->num_channels; i++) {
		ch = priv->channel[i];
		napi_enable(&ch->napi);
	}
}

static void disable_ch_napi(struct dpaa2_eth_priv *priv)
{
	struct dpaa2_eth_channel *ch;
	int i;

	for (i = 0; i < priv->num_channels; i++) {
		ch = priv->channel[i];
		napi_disable(&ch->napi);
	}
}

1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294
static void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv, bool enable)
{
	struct dpni_taildrop td = {0};
	int i, err;

	if (priv->rx_td_enabled == enable)
		return;

	td.enable = enable;
	td.threshold = DPAA2_ETH_TAILDROP_THRESH;

	for (i = 0; i < priv->num_fqs; i++) {
		if (priv->fq[i].type != DPAA2_RX_FQ)
			continue;
		err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
					DPNI_CP_QUEUE, DPNI_QUEUE_RX, 0,
					priv->fq[i].flowid, &td);
		if (err) {
			netdev_err(priv->net_dev,
				   "dpni_set_taildrop() failed\n");
			break;
		}
	}

	priv->rx_td_enabled = enable;
}

1295 1296
static int link_state_update(struct dpaa2_eth_priv *priv)
{
1297
	struct dpni_link_state state = {0};
1298
	bool tx_pause;
1299 1300 1301 1302 1303 1304 1305 1306 1307
	int err;

	err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state);
	if (unlikely(err)) {
		netdev_err(priv->net_dev,
			   "dpni_get_link_state() failed\n");
		return err;
	}

1308 1309 1310 1311 1312 1313 1314 1315
	/* If Tx pause frame settings have changed, we need to update
	 * Rx FQ taildrop configuration as well. We configure taildrop
	 * only when pause frame generation is disabled.
	 */
	tx_pause = !!(state.options & DPNI_LINK_OPT_PAUSE) ^
		   !!(state.options & DPNI_LINK_OPT_ASYM_PAUSE);
	dpaa2_eth_set_rx_taildrop(priv, !tx_pause);

1316 1317 1318 1319 1320 1321
	/* When we manage the MAC/PHY using phylink there is no need
	 * to manually update the netif_carrier.
	 */
	if (priv->mac)
		goto out;

1322 1323
	/* Chech link state; speed / duplex changes are not treated yet */
	if (priv->link_state.up == state.up)
1324
		goto out;
1325 1326 1327 1328 1329 1330 1331 1332 1333

	if (state.up) {
		netif_carrier_on(priv->net_dev);
		netif_tx_start_all_queues(priv->net_dev);
	} else {
		netif_tx_stop_all_queues(priv->net_dev);
		netif_carrier_off(priv->net_dev);
	}

1334
	netdev_info(priv->net_dev, "Link Event: state %s\n",
1335 1336
		    state.up ? "up" : "down");

1337 1338 1339
out:
	priv->link_state = state;

1340 1341 1342 1343 1344 1345 1346 1347
	return 0;
}

static int dpaa2_eth_open(struct net_device *net_dev)
{
	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
	int err;

1348
	err = seed_pool(priv, priv->bpid);
1349 1350 1351 1352 1353 1354
	if (err) {
		/* Not much to do; the buffer pool, though not filled up,
		 * may still contain some buffers which would enable us
		 * to limp on.
		 */
		netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n",
1355
			   priv->dpbp_dev->obj_desc.id, priv->bpid);
1356 1357
	}

1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371
	if (!priv->mac) {
		/* We'll only start the txqs when the link is actually ready;
		 * make sure we don't race against the link up notification,
		 * which may come immediately after dpni_enable();
		 */
		netif_tx_stop_all_queues(net_dev);

		/* Also, explicitly set carrier off, otherwise
		 * netif_carrier_ok() will return true and cause 'ip link show'
		 * to report the LOWER_UP flag, even though the link
		 * notification wasn't even received.
		 */
		netif_carrier_off(net_dev);
	}
1372 1373 1374 1375 1376 1377 1378 1379
	enable_ch_napi(priv);

	err = dpni_enable(priv->mc_io, 0, priv->mc_token);
	if (err < 0) {
		netdev_err(net_dev, "dpni_enable() failed\n");
		goto enable_err;
	}

1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390
	if (!priv->mac) {
		/* If the DPMAC object has already processed the link up
		 * interrupt, we have to learn the link state ourselves.
		 */
		err = link_state_update(priv);
		if (err < 0) {
			netdev_err(net_dev, "Can't update link state\n");
			goto link_state_err;
		}
	} else {
		phylink_start(priv->mac->phylink);
1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401
	}

	return 0;

link_state_err:
enable_err:
	disable_ch_napi(priv);
	drain_pool(priv);
	return err;
}

1402 1403
/* Total number of in-flight frames on ingress queues */
static u32 ingress_fq_count(struct dpaa2_eth_priv *priv)
1404
{
1405 1406 1407
	struct dpaa2_eth_fq *fq;
	u32 fcnt = 0, bcnt = 0, total = 0;
	int i, err;
1408

1409 1410 1411 1412 1413 1414 1415 1416 1417
	for (i = 0; i < priv->num_fqs; i++) {
		fq = &priv->fq[i];
		err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt);
		if (err) {
			netdev_warn(priv->net_dev, "query_fq_count failed");
			break;
		}
		total += fcnt;
	}
1418 1419 1420 1421

	return total;
}

1422
static void wait_for_ingress_fq_empty(struct dpaa2_eth_priv *priv)
1423
{
1424 1425
	int retries = 10;
	u32 pending;
1426

1427 1428 1429 1430 1431
	do {
		pending = ingress_fq_count(priv);
		if (pending)
			msleep(100);
	} while (pending && --retries);
1432 1433
}

1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458
#define DPNI_TX_PENDING_VER_MAJOR	7
#define DPNI_TX_PENDING_VER_MINOR	13
static void wait_for_egress_fq_empty(struct dpaa2_eth_priv *priv)
{
	union dpni_statistics stats;
	int retries = 10;
	int err;

	if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_TX_PENDING_VER_MAJOR,
				   DPNI_TX_PENDING_VER_MINOR) < 0)
		goto out;

	do {
		err = dpni_get_statistics(priv->mc_io, 0, priv->mc_token, 6,
					  &stats);
		if (err)
			goto out;
		if (stats.page_6.tx_pending_frames == 0)
			return;
	} while (--retries);

out:
	msleep(500);
}

1459 1460 1461
static int dpaa2_eth_stop(struct net_device *net_dev)
{
	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1462
	int dpni_enabled = 0;
1463 1464
	int retries = 10;

1465 1466 1467 1468 1469 1470
	if (!priv->mac) {
		netif_tx_stop_all_queues(net_dev);
		netif_carrier_off(net_dev);
	} else {
		phylink_stop(priv->mac->phylink);
	}
1471

1472 1473 1474 1475 1476 1477 1478 1479 1480
	/* On dpni_disable(), the MC firmware will:
	 * - stop MAC Rx and wait for all Rx frames to be enqueued to software
	 * - cut off WRIOP dequeues from egress FQs and wait until transmission
	 * of all in flight Tx frames is finished (and corresponding Tx conf
	 * frames are enqueued back to software)
	 *
	 * Before calling dpni_disable(), we wait for all Tx frames to arrive
	 * on WRIOP. After it finishes, wait until all remaining frames on Rx
	 * and Tx conf queues are consumed on NAPI poll.
1481
	 */
1482
	wait_for_egress_fq_empty(priv);
1483

1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497
	do {
		dpni_disable(priv->mc_io, 0, priv->mc_token);
		dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled);
		if (dpni_enabled)
			/* Allow the hardware some slack */
			msleep(100);
	} while (dpni_enabled && --retries);
	if (!retries) {
		netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n");
		/* Must go on and disable NAPI nonetheless, so we don't crash at
		 * the next "ifconfig up"
		 */
	}

1498
	wait_for_ingress_fq_empty(priv);
1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531
	disable_ch_napi(priv);

	/* Empty the buffer pool */
	drain_pool(priv);

	return 0;
}

static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr)
{
	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
	struct device *dev = net_dev->dev.parent;
	int err;

	err = eth_mac_addr(net_dev, addr);
	if (err < 0) {
		dev_err(dev, "eth_mac_addr() failed (%d)\n", err);
		return err;
	}

	err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
					net_dev->dev_addr);
	if (err) {
		dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err);
		return err;
	}

	return 0;
}

/** Fill in counters maintained by the GPP driver. These may be different from
 * the hardware counters obtained by ethtool.
 */
1532 1533
static void dpaa2_eth_get_stats(struct net_device *net_dev,
				struct rtnl_link_stats64 *stats)
1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710
{
	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
	struct rtnl_link_stats64 *percpu_stats;
	u64 *cpustats;
	u64 *netstats = (u64 *)stats;
	int i, j;
	int num = sizeof(struct rtnl_link_stats64) / sizeof(u64);

	for_each_possible_cpu(i) {
		percpu_stats = per_cpu_ptr(priv->percpu_stats, i);
		cpustats = (u64 *)percpu_stats;
		for (j = 0; j < num; j++)
			netstats[j] += cpustats[j];
	}
}

/* Copy mac unicast addresses from @net_dev to @priv.
 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
 */
static void add_uc_hw_addr(const struct net_device *net_dev,
			   struct dpaa2_eth_priv *priv)
{
	struct netdev_hw_addr *ha;
	int err;

	netdev_for_each_uc_addr(ha, net_dev) {
		err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
					ha->addr);
		if (err)
			netdev_warn(priv->net_dev,
				    "Could not add ucast MAC %pM to the filtering table (err %d)\n",
				    ha->addr, err);
	}
}

/* Copy mac multicast addresses from @net_dev to @priv
 * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable.
 */
static void add_mc_hw_addr(const struct net_device *net_dev,
			   struct dpaa2_eth_priv *priv)
{
	struct netdev_hw_addr *ha;
	int err;

	netdev_for_each_mc_addr(ha, net_dev) {
		err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token,
					ha->addr);
		if (err)
			netdev_warn(priv->net_dev,
				    "Could not add mcast MAC %pM to the filtering table (err %d)\n",
				    ha->addr, err);
	}
}

static void dpaa2_eth_set_rx_mode(struct net_device *net_dev)
{
	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
	int uc_count = netdev_uc_count(net_dev);
	int mc_count = netdev_mc_count(net_dev);
	u8 max_mac = priv->dpni_attrs.mac_filter_entries;
	u32 options = priv->dpni_attrs.options;
	u16 mc_token = priv->mc_token;
	struct fsl_mc_io *mc_io = priv->mc_io;
	int err;

	/* Basic sanity checks; these probably indicate a misconfiguration */
	if (options & DPNI_OPT_NO_MAC_FILTER && max_mac != 0)
		netdev_info(net_dev,
			    "mac_filter_entries=%d, DPNI_OPT_NO_MAC_FILTER option must be disabled\n",
			    max_mac);

	/* Force promiscuous if the uc or mc counts exceed our capabilities. */
	if (uc_count > max_mac) {
		netdev_info(net_dev,
			    "Unicast addr count reached %d, max allowed is %d; forcing promisc\n",
			    uc_count, max_mac);
		goto force_promisc;
	}
	if (mc_count + uc_count > max_mac) {
		netdev_info(net_dev,
			    "Unicast + multicast addr count reached %d, max allowed is %d; forcing promisc\n",
			    uc_count + mc_count, max_mac);
		goto force_mc_promisc;
	}

	/* Adjust promisc settings due to flag combinations */
	if (net_dev->flags & IFF_PROMISC)
		goto force_promisc;
	if (net_dev->flags & IFF_ALLMULTI) {
		/* First, rebuild unicast filtering table. This should be done
		 * in promisc mode, in order to avoid frame loss while we
		 * progressively add entries to the table.
		 * We don't know whether we had been in promisc already, and
		 * making an MC call to find out is expensive; so set uc promisc
		 * nonetheless.
		 */
		err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
		if (err)
			netdev_warn(net_dev, "Can't set uc promisc\n");

		/* Actual uc table reconstruction. */
		err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0);
		if (err)
			netdev_warn(net_dev, "Can't clear uc filters\n");
		add_uc_hw_addr(net_dev, priv);

		/* Finally, clear uc promisc and set mc promisc as requested. */
		err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
		if (err)
			netdev_warn(net_dev, "Can't clear uc promisc\n");
		goto force_mc_promisc;
	}

	/* Neither unicast, nor multicast promisc will be on... eventually.
	 * For now, rebuild mac filtering tables while forcing both of them on.
	 */
	err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
	if (err)
		netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err);
	err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
	if (err)
		netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err);

	/* Actual mac filtering tables reconstruction */
	err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1);
	if (err)
		netdev_warn(net_dev, "Can't clear mac filters\n");
	add_mc_hw_addr(net_dev, priv);
	add_uc_hw_addr(net_dev, priv);

	/* Now we can clear both ucast and mcast promisc, without risking
	 * to drop legitimate frames anymore.
	 */
	err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0);
	if (err)
		netdev_warn(net_dev, "Can't clear ucast promisc\n");
	err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0);
	if (err)
		netdev_warn(net_dev, "Can't clear mcast promisc\n");

	return;

force_promisc:
	err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1);
	if (err)
		netdev_warn(net_dev, "Can't set ucast promisc\n");
force_mc_promisc:
	err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1);
	if (err)
		netdev_warn(net_dev, "Can't set mcast promisc\n");
}

static int dpaa2_eth_set_features(struct net_device *net_dev,
				  netdev_features_t features)
{
	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
	netdev_features_t changed = features ^ net_dev->features;
	bool enable;
	int err;

	if (changed & NETIF_F_RXCSUM) {
		enable = !!(features & NETIF_F_RXCSUM);
		err = set_rx_csum(priv, enable);
		if (err)
			return err;
	}

	if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) {
		enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
		err = set_tx_csum(priv, enable);
		if (err)
			return err;
	}

	return 0;
}

1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743
static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
	struct dpaa2_eth_priv *priv = netdev_priv(dev);
	struct hwtstamp_config config;

	if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
		return -EFAULT;

	switch (config.tx_type) {
	case HWTSTAMP_TX_OFF:
		priv->tx_tstamp = false;
		break;
	case HWTSTAMP_TX_ON:
		priv->tx_tstamp = true;
		break;
	default:
		return -ERANGE;
	}

	if (config.rx_filter == HWTSTAMP_FILTER_NONE) {
		priv->rx_tstamp = false;
	} else {
		priv->rx_tstamp = true;
		/* TS is set for all frame types, not only those requested */
		config.rx_filter = HWTSTAMP_FILTER_ALL;
	}

	return copy_to_user(rq->ifr_data, &config, sizeof(config)) ?
			-EFAULT : 0;
}

static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
1744 1745
	struct dpaa2_eth_priv *priv = netdev_priv(dev);

1746 1747 1748
	if (cmd == SIOCSHWTSTAMP)
		return dpaa2_eth_ts_ioctl(dev, rq, cmd);

1749 1750 1751 1752
	if (priv->mac)
		return phylink_mii_ioctl(priv->mac->phylink, rq, cmd);

	return -EOPNOTSUPP;
1753 1754
}

1755 1756 1757 1758 1759
static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu)
{
	int mfl, linear_mfl;

	mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
1760
	linear_mfl = priv->rx_buf_size - DPAA2_ETH_RX_HWA_SIZE -
1761
		     dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM;
1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814

	if (mfl > linear_mfl) {
		netdev_warn(priv->net_dev, "Maximum MTU for XDP is %d\n",
			    linear_mfl - VLAN_ETH_HLEN);
		return false;
	}

	return true;
}

static int set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp)
{
	int mfl, err;

	/* We enforce a maximum Rx frame length based on MTU only if we have
	 * an XDP program attached (in order to avoid Rx S/G frames).
	 * Otherwise, we accept all incoming frames as long as they are not
	 * larger than maximum size supported in hardware
	 */
	if (has_xdp)
		mfl = DPAA2_ETH_L2_MAX_FRM(mtu);
	else
		mfl = DPAA2_ETH_MFL;

	err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, mfl);
	if (err) {
		netdev_err(priv->net_dev, "dpni_set_max_frame_length failed\n");
		return err;
	}

	return 0;
}

static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu)
{
	struct dpaa2_eth_priv *priv = netdev_priv(dev);
	int err;

	if (!priv->xdp_prog)
		goto out;

	if (!xdp_mtu_valid(priv, new_mtu))
		return -EINVAL;

	err = set_rx_mfl(priv, new_mtu, true);
	if (err)
		return err;

out:
	dev->mtu = new_mtu;
	return 0;
}

1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840
static int update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp)
{
	struct dpni_buffer_layout buf_layout = {0};
	int err;

	err = dpni_get_buffer_layout(priv->mc_io, 0, priv->mc_token,
				     DPNI_QUEUE_RX, &buf_layout);
	if (err) {
		netdev_err(priv->net_dev, "dpni_get_buffer_layout failed\n");
		return err;
	}

	/* Reserve extra headroom for XDP header size changes */
	buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv) +
				    (has_xdp ? XDP_PACKET_HEADROOM : 0);
	buf_layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM;
	err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
				     DPNI_QUEUE_RX, &buf_layout);
	if (err) {
		netdev_err(priv->net_dev, "dpni_set_buffer_layout failed\n");
		return err;
	}

	return 0;
}

1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851
static int setup_xdp(struct net_device *dev, struct bpf_prog *prog)
{
	struct dpaa2_eth_priv *priv = netdev_priv(dev);
	struct dpaa2_eth_channel *ch;
	struct bpf_prog *old;
	bool up, need_update;
	int i, err;

	if (prog && !xdp_mtu_valid(priv, dev->mtu))
		return -EINVAL;

1852 1853
	if (prog)
		bpf_prog_add(prog, priv->num_channels);
1854 1855 1856 1857 1858 1859 1860

	up = netif_running(dev);
	need_update = (!!priv->xdp_prog != !!prog);

	if (up)
		dpaa2_eth_stop(dev);

1861 1862 1863 1864 1865
	/* While in xdp mode, enforce a maximum Rx frame size based on MTU.
	 * Also, when switching between xdp/non-xdp modes we need to reconfigure
	 * our Rx buffer layout. Buffer pool was drained on dpaa2_eth_stop,
	 * so we are sure no old format buffers will be used from now on.
	 */
1866 1867 1868 1869
	if (need_update) {
		err = set_rx_mfl(priv, dev->mtu, !!prog);
		if (err)
			goto out_err;
1870 1871 1872
		err = update_rx_buffer_headroom(priv, !!prog);
		if (err)
			goto out_err;
1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919
	}

	old = xchg(&priv->xdp_prog, prog);
	if (old)
		bpf_prog_put(old);

	for (i = 0; i < priv->num_channels; i++) {
		ch = priv->channel[i];
		old = xchg(&ch->xdp.prog, prog);
		if (old)
			bpf_prog_put(old);
	}

	if (up) {
		err = dpaa2_eth_open(dev);
		if (err)
			return err;
	}

	return 0;

out_err:
	if (prog)
		bpf_prog_sub(prog, priv->num_channels);
	if (up)
		dpaa2_eth_open(dev);

	return err;
}

static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
	struct dpaa2_eth_priv *priv = netdev_priv(dev);

	switch (xdp->command) {
	case XDP_SETUP_PROG:
		return setup_xdp(dev, xdp->prog);
	case XDP_QUERY_PROG:
		xdp->prog_id = priv->xdp_prog ? priv->xdp_prog->aux->id : 0;
		break;
	default:
		return -EINVAL;
	}

	return 0;
}

1920 1921 1922
static int dpaa2_eth_xdp_create_fd(struct net_device *net_dev,
				   struct xdp_frame *xdpf,
				   struct dpaa2_fd *fd)
1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938
{
	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
	struct device *dev = net_dev->dev.parent;
	unsigned int needed_headroom;
	struct dpaa2_eth_swa *swa;
	void *buffer_start, *aligned_start;
	dma_addr_t addr;

	/* We require a minimum headroom to be able to transmit the frame.
	 * Otherwise return an error and let the original net_device handle it
	 */
	needed_headroom = dpaa2_eth_needed_headroom(priv, NULL);
	if (xdpf->headroom < needed_headroom)
		return -EINVAL;

	/* Setup the FD fields */
1939
	memset(fd, 0, sizeof(*fd));
1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956

	/* Align FD address, if possible */
	buffer_start = xdpf->data - needed_headroom;
	aligned_start = PTR_ALIGN(buffer_start - DPAA2_ETH_TX_BUF_ALIGN,
				  DPAA2_ETH_TX_BUF_ALIGN);
	if (aligned_start >= xdpf->data - xdpf->headroom)
		buffer_start = aligned_start;

	swa = (struct dpaa2_eth_swa *)buffer_start;
	/* fill in necessary fields here */
	swa->type = DPAA2_ETH_SWA_XDP;
	swa->xdp.dma_size = xdpf->data + xdpf->len - buffer_start;
	swa->xdp.xdpf = xdpf;

	addr = dma_map_single(dev, buffer_start,
			      swa->xdp.dma_size,
			      DMA_BIDIRECTIONAL);
1957
	if (unlikely(dma_mapping_error(dev, addr)))
1958 1959
		return -ENOMEM;

1960 1961 1962 1963 1964
	dpaa2_fd_set_addr(fd, addr);
	dpaa2_fd_set_offset(fd, xdpf->data - buffer_start);
	dpaa2_fd_set_len(fd, xdpf->len);
	dpaa2_fd_set_format(fd, dpaa2_fd_single);
	dpaa2_fd_set_ctrl(fd, FD_CTRL_PTA);
1965 1966 1967 1968 1969 1970 1971

	return 0;
}

static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
			      struct xdp_frame **frames, u32 flags)
{
1972
	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
1973
	struct dpaa2_eth_xdp_fds *xdp_redirect_fds;
1974 1975
	struct rtnl_link_stats64 *percpu_stats;
	struct dpaa2_eth_fq *fq;
1976
	struct dpaa2_fd *fds;
1977
	int enqueued, i, err;
1978 1979 1980 1981 1982 1983 1984

	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
		return -EINVAL;

	if (!netif_running(net_dev))
		return -ENETDOWN;

1985
	fq = &priv->fq[smp_processor_id()];
1986 1987
	xdp_redirect_fds = &fq->xdp_redirect_fds;
	fds = xdp_redirect_fds->fds;
1988

1989 1990
	percpu_stats = this_cpu_ptr(priv->percpu_stats);

1991
	/* create a FD for each xdp_frame in the list received */
1992
	for (i = 0; i < n; i++) {
1993 1994 1995 1996
		err = dpaa2_eth_xdp_create_fd(net_dev, frames[i], &fds[i]);
		if (err)
			break;
	}
1997
	xdp_redirect_fds->num = i;
1998

1999 2000
	/* enqueue all the frame descriptors */
	enqueued = dpaa2_eth_xdp_flush(priv, fq, xdp_redirect_fds);
2001

2002
	/* update statistics */
2003 2004
	percpu_stats->tx_packets += enqueued;
	for (i = 0; i < enqueued; i++)
2005
		percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
2006
	for (i = enqueued; i < n; i++)
2007 2008
		xdp_return_frame_rx_napi(frames[i]);

2009
	return enqueued;
2010 2011
}

I
Ioana Radulescu 已提交
2012 2013 2014 2015 2016
static int update_xps(struct dpaa2_eth_priv *priv)
{
	struct net_device *net_dev = priv->net_dev;
	struct cpumask xps_mask;
	struct dpaa2_eth_fq *fq;
I
Ioana Radulescu 已提交
2017
	int i, num_queues, netdev_queues;
I
Ioana Radulescu 已提交
2018 2019 2020
	int err = 0;

	num_queues = dpaa2_eth_queue_count(priv);
I
Ioana Radulescu 已提交
2021
	netdev_queues = (net_dev->num_tc ? : 1) * num_queues;
I
Ioana Radulescu 已提交
2022 2023 2024 2025

	/* The first <num_queues> entries in priv->fq array are Tx/Tx conf
	 * queues, so only process those
	 */
I
Ioana Radulescu 已提交
2026 2027
	for (i = 0; i < netdev_queues; i++) {
		fq = &priv->fq[i % num_queues];
I
Ioana Radulescu 已提交
2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041

		cpumask_clear(&xps_mask);
		cpumask_set_cpu(fq->target_cpu, &xps_mask);

		err = netif_set_xps_queue(net_dev, &xps_mask, i);
		if (err) {
			netdev_warn_once(net_dev, "Error setting XPS queue\n");
			break;
		}
	}

	return err;
}

I
Ioana Radulescu 已提交
2042 2043 2044 2045 2046 2047 2048 2049 2050
static int dpaa2_eth_setup_tc(struct net_device *net_dev,
			      enum tc_setup_type type, void *type_data)
{
	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
	struct tc_mqprio_qopt *mqprio = type_data;
	u8 num_tc, num_queues;
	int i;

	if (type != TC_SETUP_QDISC_MQPRIO)
2051
		return -EOPNOTSUPP;
I
Ioana Radulescu 已提交
2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062

	mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
	num_queues = dpaa2_eth_queue_count(priv);
	num_tc = mqprio->num_tc;

	if (num_tc == net_dev->num_tc)
		return 0;

	if (num_tc  > dpaa2_eth_tc_count(priv)) {
		netdev_err(net_dev, "Max %d traffic classes supported\n",
			   dpaa2_eth_tc_count(priv));
2063
		return -EOPNOTSUPP;
I
Ioana Radulescu 已提交
2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083
	}

	if (!num_tc) {
		netdev_reset_tc(net_dev);
		netif_set_real_num_tx_queues(net_dev, num_queues);
		goto out;
	}

	netdev_set_num_tc(net_dev, num_tc);
	netif_set_real_num_tx_queues(net_dev, num_tc * num_queues);

	for (i = 0; i < num_tc; i++)
		netdev_set_tc_queue(net_dev, i, num_queues, i * num_queues);

out:
	update_xps(priv);

	return 0;
}

2084 2085 2086 2087 2088 2089 2090 2091
static const struct net_device_ops dpaa2_eth_ops = {
	.ndo_open = dpaa2_eth_open,
	.ndo_start_xmit = dpaa2_eth_tx,
	.ndo_stop = dpaa2_eth_stop,
	.ndo_set_mac_address = dpaa2_eth_set_addr,
	.ndo_get_stats64 = dpaa2_eth_get_stats,
	.ndo_set_rx_mode = dpaa2_eth_set_rx_mode,
	.ndo_set_features = dpaa2_eth_set_features,
2092
	.ndo_do_ioctl = dpaa2_eth_ioctl,
2093 2094
	.ndo_change_mtu = dpaa2_eth_change_mtu,
	.ndo_bpf = dpaa2_eth_xdp,
2095
	.ndo_xdp_xmit = dpaa2_eth_xdp_xmit,
I
Ioana Radulescu 已提交
2096
	.ndo_setup_tc = dpaa2_eth_setup_tc,
2097 2098 2099 2100 2101 2102 2103
};

static void cdan_cb(struct dpaa2_io_notification_ctx *ctx)
{
	struct dpaa2_eth_channel *ch;

	ch = container_of(ctx, struct dpaa2_eth_channel, nctx);
2104 2105 2106 2107

	/* Update NAPI statistics */
	ch->stats.cdan++;

2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120
	napi_schedule_irqoff(&ch->napi);
}

/* Allocate and configure a DPCON object */
static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv)
{
	struct fsl_mc_device *dpcon;
	struct device *dev = priv->net_dev->dev.parent;
	int err;

	err = fsl_mc_object_allocate(to_fsl_mc_device(dev),
				     FSL_MC_POOL_DPCON, &dpcon);
	if (err) {
2121 2122 2123 2124 2125
		if (err == -ENXIO)
			err = -EPROBE_DEFER;
		else
			dev_info(dev, "Not enough DPCONs, will go on as-is\n");
		return ERR_PTR(err);
2126 2127 2128 2129 2130
	}

	err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle);
	if (err) {
		dev_err(dev, "dpcon_open() failed\n");
2131
		goto free;
2132 2133 2134 2135 2136
	}

	err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle);
	if (err) {
		dev_err(dev, "dpcon_reset() failed\n");
2137
		goto close;
2138 2139 2140 2141 2142
	}

	err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle);
	if (err) {
		dev_err(dev, "dpcon_enable() failed\n");
2143
		goto close;
2144 2145 2146 2147
	}

	return dpcon;

2148
close:
2149
	dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
2150
free:
2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176
	fsl_mc_object_free(dpcon);

	return NULL;
}

static void free_dpcon(struct dpaa2_eth_priv *priv,
		       struct fsl_mc_device *dpcon)
{
	dpcon_disable(priv->mc_io, 0, dpcon->mc_handle);
	dpcon_close(priv->mc_io, 0, dpcon->mc_handle);
	fsl_mc_object_free(dpcon);
}

static struct dpaa2_eth_channel *
alloc_channel(struct dpaa2_eth_priv *priv)
{
	struct dpaa2_eth_channel *channel;
	struct dpcon_attr attr;
	struct device *dev = priv->net_dev->dev.parent;
	int err;

	channel = kzalloc(sizeof(*channel), GFP_KERNEL);
	if (!channel)
		return NULL;

	channel->dpcon = setup_dpcon(priv);
2177
	if (IS_ERR_OR_NULL(channel->dpcon)) {
2178
		err = PTR_ERR_OR_ZERO(channel->dpcon);
2179
		goto err_setup;
2180
	}
2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198

	err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle,
				   &attr);
	if (err) {
		dev_err(dev, "dpcon_get_attributes() failed\n");
		goto err_get_attr;
	}

	channel->dpcon_id = attr.id;
	channel->ch_id = attr.qbman_ch_id;
	channel->priv = priv;

	return channel;

err_get_attr:
	free_dpcon(priv, channel->dpcon);
err_setup:
	kfree(channel);
2199
	return ERR_PTR(err);
2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232
}

static void free_channel(struct dpaa2_eth_priv *priv,
			 struct dpaa2_eth_channel *channel)
{
	free_dpcon(priv, channel->dpcon);
	kfree(channel);
}

/* DPIO setup: allocate and configure QBMan channels, setup core affinity
 * and register data availability notifications
 */
static int setup_dpio(struct dpaa2_eth_priv *priv)
{
	struct dpaa2_io_notification_ctx *nctx;
	struct dpaa2_eth_channel *channel;
	struct dpcon_notification_cfg dpcon_notif_cfg;
	struct device *dev = priv->net_dev->dev.parent;
	int i, err;

	/* We want the ability to spread ingress traffic (RX, TX conf) to as
	 * many cores as possible, so we need one channel for each core
	 * (unless there's fewer queues than cores, in which case the extra
	 * channels would be wasted).
	 * Allocate one channel per core and register it to the core's
	 * affine DPIO. If not enough channels are available for all cores
	 * or if some cores don't have an affine DPIO, there will be no
	 * ingress frame processing on those cores.
	 */
	cpumask_clear(&priv->dpio_cpumask);
	for_each_online_cpu(i) {
		/* Try to allocate a channel */
		channel = alloc_channel(priv);
2233
		if (IS_ERR_OR_NULL(channel)) {
2234
			err = PTR_ERR_OR_ZERO(channel);
2235 2236 2237
			if (err != -EPROBE_DEFER)
				dev_info(dev,
					 "No affine channel for cpu %d and above\n", i);
2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249
			goto err_alloc_ch;
		}

		priv->channel[priv->num_channels] = channel;

		nctx = &channel->nctx;
		nctx->is_cdan = 1;
		nctx->cb = cdan_cb;
		nctx->id = channel->ch_id;
		nctx->desired_cpu = i;

		/* Register the new context */
2250
		channel->dpio = dpaa2_io_service_select(i);
2251
		err = dpaa2_io_service_register(channel->dpio, nctx, dev);
2252
		if (err) {
2253
			dev_dbg(dev, "No affine DPIO for cpu %d\n", i);
2254
			/* If no affine DPIO for this core, there's probably
2255 2256 2257
			 * none available for next cores either. Signal we want
			 * to retry later, in case the DPIO devices weren't
			 * probed yet.
2258
			 */
2259
			err = -EPROBE_DEFER;
2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283
			goto err_service_reg;
		}

		/* Register DPCON notification with MC */
		dpcon_notif_cfg.dpio_id = nctx->dpio_id;
		dpcon_notif_cfg.priority = 0;
		dpcon_notif_cfg.user_ctx = nctx->qman64;
		err = dpcon_set_notification(priv->mc_io, 0,
					     channel->dpcon->mc_handle,
					     &dpcon_notif_cfg);
		if (err) {
			dev_err(dev, "dpcon_set_notification failed()\n");
			goto err_set_cdan;
		}

		/* If we managed to allocate a channel and also found an affine
		 * DPIO for this core, add it to the final mask
		 */
		cpumask_set_cpu(i, &priv->dpio_cpumask);
		priv->num_channels++;

		/* Stop if we already have enough channels to accommodate all
		 * RX and TX conf queues
		 */
2284
		if (priv->num_channels == priv->dpni_attrs.num_queues)
2285 2286 2287 2288 2289 2290
			break;
	}

	return 0;

err_set_cdan:
2291
	dpaa2_io_service_deregister(channel->dpio, nctx, dev);
2292 2293 2294
err_service_reg:
	free_channel(priv, channel);
err_alloc_ch:
2295 2296 2297 2298 2299 2300 2301 2302
	if (err == -EPROBE_DEFER) {
		for (i = 0; i < priv->num_channels; i++) {
			channel = priv->channel[i];
			nctx = &channel->nctx;
			dpaa2_io_service_deregister(channel->dpio, nctx, dev);
			free_channel(priv, channel);
		}
		priv->num_channels = 0;
2303
		return err;
2304
	}
2305

2306 2307
	if (cpumask_empty(&priv->dpio_cpumask)) {
		dev_err(dev, "No cpu with an affine DPIO/DPCON\n");
2308
		return -ENODEV;
2309 2310 2311 2312 2313 2314 2315 2316 2317 2318
	}

	dev_info(dev, "Cores %*pbl available for processing ingress traffic\n",
		 cpumask_pr_args(&priv->dpio_cpumask));

	return 0;
}

static void free_dpio(struct dpaa2_eth_priv *priv)
{
2319
	struct device *dev = priv->net_dev->dev.parent;
2320
	struct dpaa2_eth_channel *ch;
2321
	int i;
2322 2323 2324 2325

	/* deregister CDAN notifications and free channels */
	for (i = 0; i < priv->num_channels; i++) {
		ch = priv->channel[i];
2326
		dpaa2_io_service_deregister(ch->dpio, &ch->nctx, dev);
2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353
		free_channel(priv, ch);
	}
}

static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv,
						    int cpu)
{
	struct device *dev = priv->net_dev->dev.parent;
	int i;

	for (i = 0; i < priv->num_channels; i++)
		if (priv->channel[i]->nctx.desired_cpu == cpu)
			return priv->channel[i];

	/* We should never get here. Issue a warning and return
	 * the first channel, because it's still better than nothing
	 */
	dev_warn(dev, "No affine channel found for cpu %d\n", cpu);

	return priv->channel[0];
}

static void set_fq_affinity(struct dpaa2_eth_priv *priv)
{
	struct device *dev = priv->net_dev->dev.parent;
	struct dpaa2_eth_fq *fq;
	int rx_cpu, txc_cpu;
I
Ioana Radulescu 已提交
2354
	int i;
2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381

	/* For each FQ, pick one channel/CPU to deliver frames to.
	 * This may well change at runtime, either through irqbalance or
	 * through direct user intervention.
	 */
	rx_cpu = txc_cpu = cpumask_first(&priv->dpio_cpumask);

	for (i = 0; i < priv->num_fqs; i++) {
		fq = &priv->fq[i];
		switch (fq->type) {
		case DPAA2_RX_FQ:
			fq->target_cpu = rx_cpu;
			rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask);
			if (rx_cpu >= nr_cpu_ids)
				rx_cpu = cpumask_first(&priv->dpio_cpumask);
			break;
		case DPAA2_TX_CONF_FQ:
			fq->target_cpu = txc_cpu;
			txc_cpu = cpumask_next(txc_cpu, &priv->dpio_cpumask);
			if (txc_cpu >= nr_cpu_ids)
				txc_cpu = cpumask_first(&priv->dpio_cpumask);
			break;
		default:
			dev_err(dev, "Unknown FQ type: %d\n", fq->type);
		}
		fq->channel = get_affine_channel(priv, fq->target_cpu);
	}
I
Ioana Radulescu 已提交
2382 2383

	update_xps(priv);
2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415
}

static void setup_fqs(struct dpaa2_eth_priv *priv)
{
	int i;

	/* We have one TxConf FQ per Tx flow.
	 * The number of Tx and Rx queues is the same.
	 * Tx queues come first in the fq array.
	 */
	for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
		priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ;
		priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf;
		priv->fq[priv->num_fqs++].flowid = (u16)i;
	}

	for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
		priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
		priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
		priv->fq[priv->num_fqs++].flowid = (u16)i;
	}

	/* For each FQ, decide on which core to process incoming frames */
	set_fq_affinity(priv);
}

/* Allocate and configure one buffer pool for each interface */
static int setup_dpbp(struct dpaa2_eth_priv *priv)
{
	int err;
	struct fsl_mc_device *dpbp_dev;
	struct device *dev = priv->net_dev->dev.parent;
2416
	struct dpbp_attr dpbp_attrs;
2417 2418 2419 2420

	err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP,
				     &dpbp_dev);
	if (err) {
2421 2422 2423 2424
		if (err == -ENXIO)
			err = -EPROBE_DEFER;
		else
			dev_err(dev, "DPBP device allocation failed\n");
2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436
		return err;
	}

	priv->dpbp_dev = dpbp_dev;

	err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id,
			&dpbp_dev->mc_handle);
	if (err) {
		dev_err(dev, "dpbp_open() failed\n");
		goto err_open;
	}

2437 2438 2439 2440 2441 2442
	err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle);
	if (err) {
		dev_err(dev, "dpbp_reset() failed\n");
		goto err_reset;
	}

2443 2444 2445 2446 2447 2448 2449
	err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle);
	if (err) {
		dev_err(dev, "dpbp_enable() failed\n");
		goto err_enable;
	}

	err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle,
2450
				  &dpbp_attrs);
2451 2452 2453 2454
	if (err) {
		dev_err(dev, "dpbp_get_attributes() failed\n");
		goto err_get_attr;
	}
2455
	priv->bpid = dpbp_attrs.bpid;
2456 2457 2458 2459 2460 2461

	return 0;

err_get_attr:
	dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle);
err_enable:
2462
err_reset:
2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477
	dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle);
err_open:
	fsl_mc_object_free(dpbp_dev);

	return err;
}

static void free_dpbp(struct dpaa2_eth_priv *priv)
{
	drain_pool(priv);
	dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
	dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle);
	fsl_mc_object_free(priv->dpbp_dev);
}

2478
static int set_buffer_layout(struct dpaa2_eth_priv *priv)
2479
{
2480
	struct device *dev = priv->net_dev->dev.parent;
2481
	struct dpni_buffer_layout buf_layout = {0};
2482
	u16 rx_buf_align;
2483 2484
	int err;

2485 2486 2487 2488 2489 2490
	/* We need to check for WRIOP version 1.0.0, but depending on the MC
	 * version, this number is not always provided correctly on rev1.
	 * We need to check for both alternatives in this situation.
	 */
	if (priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(0, 0, 0) ||
	    priv->dpni_attrs.wriop_version == DPAA2_WRIOP_VERSION(1, 0, 0))
2491
		rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN_REV1;
2492
	else
2493
		rx_buf_align = DPAA2_ETH_RX_BUF_ALIGN;
2494

2495 2496 2497 2498 2499
	/* We need to ensure that the buffer size seen by WRIOP is a multiple
	 * of 64 or 256 bytes depending on the WRIOP version.
	 */
	priv->rx_buf_size = ALIGN_DOWN(DPAA2_ETH_RX_BUF_SIZE, rx_buf_align);

2500
	/* tx buffer */
2501
	buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE;
2502 2503 2504
	buf_layout.pass_timestamp = true;
	buf_layout.options = DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE |
			     DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
2505
	err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
2506
				     DPNI_QUEUE_TX, &buf_layout);
2507 2508
	if (err) {
		dev_err(dev, "dpni_set_buffer_layout(TX) failed\n");
2509
		return err;
2510 2511 2512
	}

	/* tx-confirm buffer */
2513
	buf_layout.options = DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
2514
	err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
2515
				     DPNI_QUEUE_TX_CONFIRM, &buf_layout);
2516 2517
	if (err) {
		dev_err(dev, "dpni_set_buffer_layout(TX_CONF) failed\n");
2518 2519 2520
		return err;
	}

2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535
	/* Now that we've set our tx buffer layout, retrieve the minimum
	 * required tx data offset.
	 */
	err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token,
				      &priv->tx_data_offset);
	if (err) {
		dev_err(dev, "dpni_get_tx_data_offset() failed\n");
		return err;
	}

	if ((priv->tx_data_offset % 64) != 0)
		dev_warn(dev, "Tx data offset (%d) not a multiple of 64B\n",
			 priv->tx_data_offset);

	/* rx buffer */
2536
	buf_layout.pass_frame_status = true;
2537
	buf_layout.pass_parser_result = true;
2538
	buf_layout.data_align = rx_buf_align;
2539 2540 2541 2542 2543
	buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv);
	buf_layout.private_data_size = 0;
	buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
			     DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
			     DPNI_BUF_LAYOUT_OPT_DATA_ALIGN |
2544 2545
			     DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM |
			     DPNI_BUF_LAYOUT_OPT_TIMESTAMP;
2546 2547 2548 2549 2550 2551 2552
	err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token,
				     DPNI_QUEUE_RX, &buf_layout);
	if (err) {
		dev_err(dev, "dpni_set_buffer_layout(RX) failed\n");
		return err;
	}

2553 2554 2555
	return 0;
}

2556 2557 2558 2559 2560
#define DPNI_ENQUEUE_FQID_VER_MAJOR	7
#define DPNI_ENQUEUE_FQID_VER_MINOR	9

static inline int dpaa2_eth_enqueue_qd(struct dpaa2_eth_priv *priv,
				       struct dpaa2_eth_fq *fq,
2561
				       struct dpaa2_fd *fd, u8 prio,
2562
				       u32 num_frames __always_unused,
2563
				       int *frames_enqueued)
2564
{
2565 2566 2567 2568 2569 2570 2571 2572
	int err;

	err = dpaa2_io_service_enqueue_qd(fq->channel->dpio,
					  priv->tx_qdid, prio,
					  fq->tx_qdbin, fd);
	if (!err && frames_enqueued)
		*frames_enqueued = 1;
	return err;
2573 2574
}

2575 2576 2577 2578 2579
static inline int dpaa2_eth_enqueue_fq_multiple(struct dpaa2_eth_priv *priv,
						struct dpaa2_eth_fq *fq,
						struct dpaa2_fd *fd,
						u8 prio, u32 num_frames,
						int *frames_enqueued)
2580
{
2581 2582
	int err;

2583 2584 2585 2586 2587 2588 2589 2590 2591 2592
	err = dpaa2_io_service_enqueue_multiple_fq(fq->channel->dpio,
						   fq->tx_fqid[prio],
						   fd, num_frames);

	if (err == 0)
		return -EBUSY;

	if (frames_enqueued)
		*frames_enqueued = err;
	return 0;
2593 2594 2595 2596 2597 2598 2599 2600
}

static void set_enqueue_mode(struct dpaa2_eth_priv *priv)
{
	if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
				   DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
		priv->enqueue = dpaa2_eth_enqueue_qd;
	else
2601
		priv->enqueue = dpaa2_eth_enqueue_fq_multiple;
2602 2603
}

2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630
static int set_pause(struct dpaa2_eth_priv *priv)
{
	struct device *dev = priv->net_dev->dev.parent;
	struct dpni_link_cfg link_cfg = {0};
	int err;

	/* Get the default link options so we don't override other flags */
	err = dpni_get_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
	if (err) {
		dev_err(dev, "dpni_get_link_cfg() failed\n");
		return err;
	}

	/* By default, enable both Rx and Tx pause frames */
	link_cfg.options |= DPNI_LINK_OPT_PAUSE;
	link_cfg.options &= ~DPNI_LINK_OPT_ASYM_PAUSE;
	err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
	if (err) {
		dev_err(dev, "dpni_set_link_cfg() failed\n");
		return err;
	}

	priv->link_state.options = link_cfg.options;

	return 0;
}

I
Ioana Radulescu 已提交
2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661
static void update_tx_fqids(struct dpaa2_eth_priv *priv)
{
	struct dpni_queue_id qid = {0};
	struct dpaa2_eth_fq *fq;
	struct dpni_queue queue;
	int i, j, err;

	/* We only use Tx FQIDs for FQID-based enqueue, so check
	 * if DPNI version supports it before updating FQIDs
	 */
	if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_ENQUEUE_FQID_VER_MAJOR,
				   DPNI_ENQUEUE_FQID_VER_MINOR) < 0)
		return;

	for (i = 0; i < priv->num_fqs; i++) {
		fq = &priv->fq[i];
		if (fq->type != DPAA2_TX_CONF_FQ)
			continue;
		for (j = 0; j < dpaa2_eth_tc_count(priv); j++) {
			err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
					     DPNI_QUEUE_TX, j, fq->flowid,
					     &queue, &qid);
			if (err)
				goto out_err;

			fq->tx_fqid[j] = qid.fqid;
			if (fq->tx_fqid[j] == 0)
				goto out_err;
		}
	}

2662
	priv->enqueue = dpaa2_eth_enqueue_fq_multiple;
I
Ioana Radulescu 已提交
2663 2664 2665 2666 2667 2668 2669 2670 2671

	return;

out_err:
	netdev_info(priv->net_dev,
		    "Error reading Tx FQID, fallback to QDID-based enqueue\n");
	priv->enqueue = dpaa2_eth_enqueue_qd;
}

2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689
/* Configure the DPNI object this interface is associated with */
static int setup_dpni(struct fsl_mc_device *ls_dev)
{
	struct device *dev = &ls_dev->dev;
	struct dpaa2_eth_priv *priv;
	struct net_device *net_dev;
	int err;

	net_dev = dev_get_drvdata(dev);
	priv = netdev_priv(net_dev);

	/* get a handle for the DPNI object */
	err = dpni_open(priv->mc_io, 0, ls_dev->obj_desc.id, &priv->mc_token);
	if (err) {
		dev_err(dev, "dpni_open() failed\n");
		return err;
	}

2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704
	/* Check if we can work with this DPNI object */
	err = dpni_get_api_version(priv->mc_io, 0, &priv->dpni_ver_major,
				   &priv->dpni_ver_minor);
	if (err) {
		dev_err(dev, "dpni_get_api_version() failed\n");
		goto close;
	}
	if (dpaa2_eth_cmp_dpni_ver(priv, DPNI_VER_MAJOR, DPNI_VER_MINOR) < 0) {
		dev_err(dev, "DPNI version %u.%u not supported, need >= %u.%u\n",
			priv->dpni_ver_major, priv->dpni_ver_minor,
			DPNI_VER_MAJOR, DPNI_VER_MINOR);
		err = -ENOTSUPP;
		goto close;
	}

2705 2706 2707 2708 2709 2710
	ls_dev->mc_io = priv->mc_io;
	ls_dev->mc_handle = priv->mc_token;

	err = dpni_reset(priv->mc_io, 0, priv->mc_token);
	if (err) {
		dev_err(dev, "dpni_reset() failed\n");
2711
		goto close;
2712 2713
	}

2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724
	err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token,
				  &priv->dpni_attrs);
	if (err) {
		dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err);
		goto close;
	}

	err = set_buffer_layout(priv);
	if (err)
		goto close;

2725 2726
	set_enqueue_mode(priv);

2727 2728 2729 2730 2731 2732 2733
	/* Enable pause frame support */
	if (dpaa2_eth_has_pause_support(priv)) {
		err = set_pause(priv);
		if (err)
			goto close;
	}

2734 2735
	priv->cls_rules = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) *
				       dpaa2_eth_fs_count(priv), GFP_KERNEL);
2736 2737
	if (!priv->cls_rules) {
		err = -ENOMEM;
2738
		goto close;
2739
	}
2740

2741 2742
	return 0;

2743
close:
2744
	dpni_close(priv->mc_io, 0, priv->mc_token);
2745

2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780
	return err;
}

static void free_dpni(struct dpaa2_eth_priv *priv)
{
	int err;

	err = dpni_reset(priv->mc_io, 0, priv->mc_token);
	if (err)
		netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n",
			    err);

	dpni_close(priv->mc_io, 0, priv->mc_token);
}

static int setup_rx_flow(struct dpaa2_eth_priv *priv,
			 struct dpaa2_eth_fq *fq)
{
	struct device *dev = priv->net_dev->dev.parent;
	struct dpni_queue queue;
	struct dpni_queue_id qid;
	int err;

	err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
			     DPNI_QUEUE_RX, 0, fq->flowid, &queue, &qid);
	if (err) {
		dev_err(dev, "dpni_get_queue(RX) failed\n");
		return err;
	}

	fq->fqid = qid.fqid;

	queue.destination.id = fq->channel->dpcon_id;
	queue.destination.type = DPNI_DEST_DPCON;
	queue.destination.priority = 1;
2781
	queue.user_context = (u64)(uintptr_t)fq;
2782 2783
	err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
			     DPNI_QUEUE_RX, 0, fq->flowid,
2784
			     DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
2785 2786 2787 2788 2789 2790
			     &queue);
	if (err) {
		dev_err(dev, "dpni_set_queue(RX) failed\n");
		return err;
	}

2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805
	/* xdp_rxq setup */
	err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev,
			       fq->flowid);
	if (err) {
		dev_err(dev, "xdp_rxq_info_reg failed\n");
		return err;
	}

	err = xdp_rxq_info_reg_mem_model(&fq->channel->xdp_rxq,
					 MEM_TYPE_PAGE_ORDER0, NULL);
	if (err) {
		dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n");
		return err;
	}

2806 2807 2808 2809 2810 2811 2812 2813 2814
	return 0;
}

static int setup_tx_flow(struct dpaa2_eth_priv *priv,
			 struct dpaa2_eth_fq *fq)
{
	struct device *dev = priv->net_dev->dev.parent;
	struct dpni_queue queue;
	struct dpni_queue_id qid;
2815
	int i, err;
2816

2817 2818 2819 2820 2821 2822 2823 2824 2825
	for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
		err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
				     DPNI_QUEUE_TX, i, fq->flowid,
				     &queue, &qid);
		if (err) {
			dev_err(dev, "dpni_get_queue(TX) failed\n");
			return err;
		}
		fq->tx_fqid[i] = qid.fqid;
2826 2827
	}

2828
	/* All Tx queues belonging to the same flowid have the same qdbin */
2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843
	fq->tx_qdbin = qid.qdbin;

	err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
			     DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
			     &queue, &qid);
	if (err) {
		dev_err(dev, "dpni_get_queue(TX_CONF) failed\n");
		return err;
	}

	fq->fqid = qid.fqid;

	queue.destination.id = fq->channel->dpcon_id;
	queue.destination.type = DPNI_DEST_DPCON;
	queue.destination.priority = 0;
2844
	queue.user_context = (u64)(uintptr_t)fq;
2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856
	err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
			     DPNI_QUEUE_TX_CONFIRM, 0, fq->flowid,
			     DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
			     &queue);
	if (err) {
		dev_err(dev, "dpni_set_queue(TX_CONF) failed\n");
		return err;
	}

	return 0;
}

2857
/* Supported header fields for Rx hash distribution key */
I
Ioana Radulescu 已提交
2858
static const struct dpaa2_eth_dist_fields dist_fields[] = {
2859
	{
2860 2861 2862 2863
		/* L2 header */
		.rxnfc_field = RXH_L2DA,
		.cls_prot = NET_PROT_ETH,
		.cls_field = NH_FLD_ETH_DA,
2864
		.id = DPAA2_ETH_DIST_ETHDST,
2865
		.size = 6,
2866 2867 2868
	}, {
		.cls_prot = NET_PROT_ETH,
		.cls_field = NH_FLD_ETH_SA,
2869
		.id = DPAA2_ETH_DIST_ETHSRC,
2870 2871 2872 2873 2874 2875 2876 2877
		.size = 6,
	}, {
		/* This is the last ethertype field parsed:
		 * depending on frame format, it can be the MAC ethertype
		 * or the VLAN etype.
		 */
		.cls_prot = NET_PROT_ETH,
		.cls_field = NH_FLD_ETH_TYPE,
2878
		.id = DPAA2_ETH_DIST_ETHTYPE,
2879
		.size = 2,
2880 2881 2882 2883 2884
	}, {
		/* VLAN header */
		.rxnfc_field = RXH_VLAN,
		.cls_prot = NET_PROT_VLAN,
		.cls_field = NH_FLD_VLAN_TCI,
2885
		.id = DPAA2_ETH_DIST_VLAN,
2886 2887
		.size = 2,
	}, {
2888 2889 2890 2891
		/* IP header */
		.rxnfc_field = RXH_IP_SRC,
		.cls_prot = NET_PROT_IP,
		.cls_field = NH_FLD_IP_SRC,
2892
		.id = DPAA2_ETH_DIST_IPSRC,
2893 2894 2895 2896 2897
		.size = 4,
	}, {
		.rxnfc_field = RXH_IP_DST,
		.cls_prot = NET_PROT_IP,
		.cls_field = NH_FLD_IP_DST,
2898
		.id = DPAA2_ETH_DIST_IPDST,
2899 2900 2901 2902 2903
		.size = 4,
	}, {
		.rxnfc_field = RXH_L3_PROTO,
		.cls_prot = NET_PROT_IP,
		.cls_field = NH_FLD_IP_PROTO,
2904
		.id = DPAA2_ETH_DIST_IPPROTO,
2905 2906 2907 2908 2909 2910 2911 2912
		.size = 1,
	}, {
		/* Using UDP ports, this is functionally equivalent to raw
		 * byte pairs from L4 header.
		 */
		.rxnfc_field = RXH_L4_B_0_1,
		.cls_prot = NET_PROT_UDP,
		.cls_field = NH_FLD_UDP_PORT_SRC,
2913
		.id = DPAA2_ETH_DIST_L4SRC,
2914 2915 2916 2917 2918
		.size = 2,
	}, {
		.rxnfc_field = RXH_L4_B_2_3,
		.cls_prot = NET_PROT_UDP,
		.cls_field = NH_FLD_UDP_PORT_DST,
2919
		.id = DPAA2_ETH_DIST_L4DST,
2920 2921 2922 2923
		.size = 2,
	},
};

2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963
/* Configure the Rx hash key using the legacy API */
static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
{
	struct device *dev = priv->net_dev->dev.parent;
	struct dpni_rx_tc_dist_cfg dist_cfg;
	int err;

	memset(&dist_cfg, 0, sizeof(dist_cfg));

	dist_cfg.key_cfg_iova = key;
	dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
	dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;

	err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
	if (err)
		dev_err(dev, "dpni_set_rx_tc_dist failed\n");

	return err;
}

/* Configure the Rx hash key using the new API */
static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
{
	struct device *dev = priv->net_dev->dev.parent;
	struct dpni_rx_dist_cfg dist_cfg;
	int err;

	memset(&dist_cfg, 0, sizeof(dist_cfg));

	dist_cfg.key_cfg_iova = key;
	dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
	dist_cfg.enable = 1;

	err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg);
	if (err)
		dev_err(dev, "dpni_set_rx_hash_dist failed\n");

	return err;
}

2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983
/* Configure the Rx flow classification key */
static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
{
	struct device *dev = priv->net_dev->dev.parent;
	struct dpni_rx_dist_cfg dist_cfg;
	int err;

	memset(&dist_cfg, 0, sizeof(dist_cfg));

	dist_cfg.key_cfg_iova = key;
	dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
	dist_cfg.enable = 1;

	err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg);
	if (err)
		dev_err(dev, "dpni_set_rx_fs_dist failed\n");

	return err;
}

2984
/* Size of the Rx flow classification key */
2985
int dpaa2_eth_cls_key_size(u64 fields)
2986 2987 2988
{
	int i, size = 0;

2989 2990 2991
	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
		if (!(fields & dist_fields[i].id))
			continue;
2992
		size += dist_fields[i].size;
2993
	}
2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013

	return size;
}

/* Offset of header field in Rx classification key */
int dpaa2_eth_cls_fld_off(int prot, int field)
{
	int i, off = 0;

	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
		if (dist_fields[i].cls_prot == prot &&
		    dist_fields[i].cls_field == field)
			return off;
		off += dist_fields[i].size;
	}

	WARN_ONCE(1, "Unsupported header field used for Rx flow cls\n");
	return 0;
}

3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031
/* Prune unused fields from the classification rule.
 * Used when masking is not supported
 */
void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields)
{
	int off = 0, new_off = 0;
	int i, size;

	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
		size = dist_fields[i].size;
		if (dist_fields[i].id & fields) {
			memcpy(key_mem + new_off, key_mem + off, size);
			new_off += size;
		}
		off += size;
	}
}

3032
/* Set Rx distribution (hash or flow classification) key
3033 3034
 * flags is a combination of RXH_ bits
 */
3035 3036
static int dpaa2_eth_set_dist_key(struct net_device *net_dev,
				  enum dpaa2_eth_rx_dist type, u64 flags)
3037 3038 3039 3040
{
	struct device *dev = net_dev->dev.parent;
	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
	struct dpkg_profile_cfg cls_cfg;
3041
	u32 rx_hash_fields = 0;
3042
	dma_addr_t key_iova;
3043 3044 3045 3046 3047 3048
	u8 *dma_mem;
	int i;
	int err = 0;

	memset(&cls_cfg, 0, sizeof(cls_cfg));

I
Ioana Radulescu 已提交
3049
	for (i = 0; i < ARRAY_SIZE(dist_fields); i++) {
3050 3051 3052
		struct dpkg_extract *key =
			&cls_cfg.extracts[cls_cfg.num_extracts];

3053 3054
		/* For both Rx hashing and classification keys
		 * we set only the selected fields.
3055
		 */
3056 3057 3058
		if (!(flags & dist_fields[i].id))
			continue;
		if (type == DPAA2_ETH_RX_DIST_HASH)
3059
			rx_hash_fields |= dist_fields[i].rxnfc_field;
3060 3061 3062 3063 3064 3065 3066

		if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) {
			dev_err(dev, "error adding key extraction rule, too many rules?\n");
			return -E2BIG;
		}

		key->type = DPKG_EXTRACT_FROM_HDR;
I
Ioana Radulescu 已提交
3067
		key->extract.from_hdr.prot = dist_fields[i].cls_prot;
3068
		key->extract.from_hdr.type = DPKG_FULL_FIELD;
I
Ioana Radulescu 已提交
3069
		key->extract.from_hdr.field = dist_fields[i].cls_field;
3070 3071 3072
		cls_cfg.num_extracts++;
	}

3073
	dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
3074 3075 3076 3077 3078
	if (!dma_mem)
		return -ENOMEM;

	err = dpni_prepare_key_cfg(&cls_cfg, dma_mem);
	if (err) {
3079
		dev_err(dev, "dpni_prepare_key_cfg error %d\n", err);
3080
		goto free_key;
3081 3082 3083
	}

	/* Prepare for setting the rx dist */
3084 3085 3086
	key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE,
				  DMA_TO_DEVICE);
	if (dma_mapping_error(dev, key_iova)) {
3087 3088
		dev_err(dev, "DMA mapping failed\n");
		err = -ENOMEM;
3089
		goto free_key;
3090 3091
	}

3092 3093 3094 3095 3096 3097 3098 3099
	if (type == DPAA2_ETH_RX_DIST_HASH) {
		if (dpaa2_eth_has_legacy_dist(priv))
			err = config_legacy_hash_key(priv, key_iova);
		else
			err = config_hash_key(priv, key_iova);
	} else {
		err = config_cls_key(priv, key_iova);
	}
3100 3101 3102

	dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE,
			 DMA_TO_DEVICE);
3103
	if (!err && type == DPAA2_ETH_RX_DIST_HASH)
3104
		priv->rx_hash_fields = rx_hash_fields;
3105

3106
free_key:
3107 3108 3109 3110
	kfree(dma_mem);
	return err;
}

3111 3112 3113
int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags)
{
	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3114 3115
	u64 key = 0;
	int i;
3116 3117 3118 3119

	if (!dpaa2_eth_hash_enabled(priv))
		return -EOPNOTSUPP;

3120 3121 3122 3123 3124
	for (i = 0; i < ARRAY_SIZE(dist_fields); i++)
		if (dist_fields[i].rxnfc_field & flags)
			key |= dist_fields[i].id;

	return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_HASH, key);
3125 3126
}

3127 3128 3129 3130 3131 3132
int dpaa2_eth_set_cls(struct net_device *net_dev, u64 flags)
{
	return dpaa2_eth_set_dist_key(net_dev, DPAA2_ETH_RX_DIST_CLS, flags);
}

static int dpaa2_eth_set_default_cls(struct dpaa2_eth_priv *priv)
3133 3134
{
	struct device *dev = priv->net_dev->dev.parent;
3135
	int err;
3136 3137 3138 3139 3140 3141 3142

	/* Check if we actually support Rx flow classification */
	if (dpaa2_eth_has_legacy_dist(priv)) {
		dev_dbg(dev, "Rx cls not supported by current MC version\n");
		return -EOPNOTSUPP;
	}

3143
	if (!dpaa2_eth_fs_enabled(priv)) {
3144 3145 3146 3147 3148 3149 3150 3151 3152
		dev_dbg(dev, "Rx cls disabled in DPNI options\n");
		return -EOPNOTSUPP;
	}

	if (!dpaa2_eth_hash_enabled(priv)) {
		dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n");
		return -EOPNOTSUPP;
	}

3153 3154 3155 3156 3157 3158 3159 3160
	/* If there is no support for masking in the classification table,
	 * we don't set a default key, as it will depend on the rules
	 * added by the user at runtime.
	 */
	if (!dpaa2_eth_fs_mask_enabled(priv))
		goto out;

	err = dpaa2_eth_set_cls(priv->net_dev, DPAA2_ETH_DIST_ALL);
3161 3162 3163
	if (err)
		return err;

3164
out:
3165 3166
	priv->rx_cls_enabled = 1;

3167
	return 0;
3168 3169
}

3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184
/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs,
 * frame queues and channels
 */
static int bind_dpni(struct dpaa2_eth_priv *priv)
{
	struct net_device *net_dev = priv->net_dev;
	struct device *dev = net_dev->dev.parent;
	struct dpni_pools_cfg pools_params;
	struct dpni_error_cfg err_cfg;
	int err = 0;
	int i;

	pools_params.num_dpbp = 1;
	pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id;
	pools_params.pools[0].backup_pool = 0;
3185
	pools_params.pools[0].buffer_size = priv->rx_buf_size;
3186 3187 3188 3189 3190 3191
	err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params);
	if (err) {
		dev_err(dev, "dpni_set_pools() failed\n");
		return err;
	}

3192 3193
	/* have the interface implicitly distribute traffic based on
	 * the default hash key
3194
	 */
3195
	err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_DEFAULT);
3196
	if (err && err != -EOPNOTSUPP)
3197
		dev_err(dev, "Failed to configure hashing\n");
3198

3199 3200 3201
	/* Configure the flow classification key; it includes all
	 * supported header fields and cannot be modified at runtime
	 */
3202
	err = dpaa2_eth_set_default_cls(priv);
3203 3204 3205
	if (err && err != -EOPNOTSUPP)
		dev_err(dev, "Failed to configure Rx classification key\n");

3206
	/* Configure handling of error frames */
3207
	err_cfg.errors = DPAA2_FAS_RX_ERR_MASK;
3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279
	err_cfg.set_frame_annotation = 1;
	err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD;
	err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token,
				       &err_cfg);
	if (err) {
		dev_err(dev, "dpni_set_errors_behavior failed\n");
		return err;
	}

	/* Configure Rx and Tx conf queues to generate CDANs */
	for (i = 0; i < priv->num_fqs; i++) {
		switch (priv->fq[i].type) {
		case DPAA2_RX_FQ:
			err = setup_rx_flow(priv, &priv->fq[i]);
			break;
		case DPAA2_TX_CONF_FQ:
			err = setup_tx_flow(priv, &priv->fq[i]);
			break;
		default:
			dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type);
			return -EINVAL;
		}
		if (err)
			return err;
	}

	err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token,
			    DPNI_QUEUE_TX, &priv->tx_qdid);
	if (err) {
		dev_err(dev, "dpni_get_qdid() failed\n");
		return err;
	}

	return 0;
}

/* Allocate rings for storing incoming frame descriptors */
static int alloc_rings(struct dpaa2_eth_priv *priv)
{
	struct net_device *net_dev = priv->net_dev;
	struct device *dev = net_dev->dev.parent;
	int i;

	for (i = 0; i < priv->num_channels; i++) {
		priv->channel[i]->store =
			dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev);
		if (!priv->channel[i]->store) {
			netdev_err(net_dev, "dpaa2_io_store_create() failed\n");
			goto err_ring;
		}
	}

	return 0;

err_ring:
	for (i = 0; i < priv->num_channels; i++) {
		if (!priv->channel[i]->store)
			break;
		dpaa2_io_store_destroy(priv->channel[i]->store);
	}

	return -ENOMEM;
}

static void free_rings(struct dpaa2_eth_priv *priv)
{
	int i;

	for (i = 0; i < priv->num_channels; i++)
		dpaa2_io_store_destroy(priv->channel[i]->store);
}

3280
static int set_mac_addr(struct dpaa2_eth_priv *priv)
3281
{
3282
	struct net_device *net_dev = priv->net_dev;
3283 3284
	struct device *dev = net_dev->dev.parent;
	u8 mac_addr[ETH_ALEN], dpni_mac_addr[ETH_ALEN];
3285
	int err;
3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297

	/* Get firmware address, if any */
	err = dpni_get_port_mac_addr(priv->mc_io, 0, priv->mc_token, mac_addr);
	if (err) {
		dev_err(dev, "dpni_get_port_mac_addr() failed\n");
		return err;
	}

	/* Get DPNI attributes address, if any */
	err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
					dpni_mac_addr);
	if (err) {
3298
		dev_err(dev, "dpni_get_primary_mac_addr() failed\n");
3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315
		return err;
	}

	/* First check if firmware has any address configured by bootloader */
	if (!is_zero_ether_addr(mac_addr)) {
		/* If the DPMAC addr != DPNI addr, update it */
		if (!ether_addr_equal(mac_addr, dpni_mac_addr)) {
			err = dpni_set_primary_mac_addr(priv->mc_io, 0,
							priv->mc_token,
							mac_addr);
			if (err) {
				dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
				return err;
			}
		}
		memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
	} else if (is_zero_ether_addr(dpni_mac_addr)) {
3316 3317
		/* No MAC address configured, fill in net_dev->dev_addr
		 * with a random one
3318 3319
		 */
		eth_hw_addr_random(net_dev);
3320 3321
		dev_dbg_once(dev, "device(s) have all-zero hwaddr, replaced with random\n");

3322 3323 3324
		err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token,
						net_dev->dev_addr);
		if (err) {
3325
			dev_err(dev, "dpni_set_primary_mac_addr() failed\n");
3326 3327
			return err;
		}
3328

3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341
		/* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all
		 * practical purposes, this will be our "permanent" mac address,
		 * at least until the next reboot. This move will also permit
		 * register_netdevice() to properly fill up net_dev->perm_addr.
		 */
		net_dev->addr_assign_type = NET_ADDR_PERM;
	} else {
		/* NET_ADDR_PERM is default, all we have to do is
		 * fill in the device addr.
		 */
		memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len);
	}

3342 3343 3344 3345 3346 3347 3348
	return 0;
}

static int netdev_init(struct net_device *net_dev)
{
	struct device *dev = net_dev->dev.parent;
	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3349 3350
	u32 options = priv->dpni_attrs.options;
	u64 supported = 0, not_supported = 0;
3351
	u8 bcast_addr[ETH_ALEN];
3352
	u8 num_queues;
3353 3354 3355
	int err;

	net_dev->netdev_ops = &dpaa2_eth_ops;
3356
	net_dev->ethtool_ops = &dpaa2_ethtool_ops;
3357 3358 3359 3360 3361 3362

	err = set_mac_addr(priv);
	if (err)
		return err;

	/* Explicitly add the broadcast address to the MAC filtering table */
3363 3364 3365
	eth_broadcast_addr(bcast_addr);
	err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr);
	if (err) {
3366 3367
		dev_err(dev, "dpni_add_mac_addr() failed\n");
		return err;
3368 3369
	}

3370
	/* Set MTU upper limit; lower limit is 68B (default value) */
3371
	net_dev->max_mtu = DPAA2_ETH_MAX_MTU;
3372
	err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token,
3373
					DPAA2_ETH_MFL);
3374 3375 3376 3377
	if (err) {
		dev_err(dev, "dpni_set_max_frame_length() failed\n");
		return err;
	}
3378

3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391
	/* Set actual number of queues in the net device */
	num_queues = dpaa2_eth_queue_count(priv);
	err = netif_set_real_num_tx_queues(net_dev, num_queues);
	if (err) {
		dev_err(dev, "netif_set_real_num_tx_queues() failed\n");
		return err;
	}
	err = netif_set_real_num_rx_queues(net_dev, num_queues);
	if (err) {
		dev_err(dev, "netif_set_real_num_rx_queues() failed\n");
		return err;
	}

3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408
	/* Capabilities listing */
	supported |= IFF_LIVE_ADDR_CHANGE;

	if (options & DPNI_OPT_NO_MAC_FILTER)
		not_supported |= IFF_UNICAST_FLT;
	else
		supported |= IFF_UNICAST_FLT;

	net_dev->priv_flags |= supported;
	net_dev->priv_flags &= ~not_supported;

	/* Features */
	net_dev->features = NETIF_F_RXCSUM |
			    NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
			    NETIF_F_SG | NETIF_F_HIGHDMA |
			    NETIF_F_LLTX;
	net_dev->hw_features = net_dev->features;
3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428

	return 0;
}

static int poll_link_state(void *arg)
{
	struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg;
	int err;

	while (!kthread_should_stop()) {
		err = link_state_update(priv);
		if (unlikely(err))
			return err;

		msleep(DPAA2_ETH_LINK_STATE_REFRESH);
	}

	return 0;
}

3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471
static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv)
{
	struct fsl_mc_device *dpni_dev, *dpmac_dev;
	struct dpaa2_mac *mac;
	int err;

	dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent);
	dpmac_dev = fsl_mc_get_endpoint(dpni_dev);
	if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type)
		return 0;

	if (dpaa2_mac_is_type_fixed(dpmac_dev, priv->mc_io))
		return 0;

	mac = kzalloc(sizeof(struct dpaa2_mac), GFP_KERNEL);
	if (!mac)
		return -ENOMEM;

	mac->mc_dev = dpmac_dev;
	mac->mc_io = priv->mc_io;
	mac->net_dev = priv->net_dev;

	err = dpaa2_mac_connect(mac);
	if (err) {
		netdev_err(priv->net_dev, "Error connecting to the MAC endpoint\n");
		kfree(mac);
		return err;
	}
	priv->mac = mac;

	return 0;
}

static void dpaa2_eth_disconnect_mac(struct dpaa2_eth_priv *priv)
{
	if (!priv->mac)
		return;

	dpaa2_mac_disconnect(priv->mac);
	kfree(priv->mac);
	priv->mac = NULL;
}

3472 3473
static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg)
{
3474
	u32 status = ~0;
3475 3476 3477
	struct device *dev = (struct device *)arg;
	struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev);
	struct net_device *net_dev = dev_get_drvdata(dev);
3478
	struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
3479 3480 3481 3482 3483
	int err;

	err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle,
				  DPNI_IRQ_INDEX, &status);
	if (unlikely(err)) {
3484
		netdev_err(net_dev, "Can't get irq status (err %d)\n", err);
3485
		return IRQ_HANDLED;
3486 3487
	}

3488
	if (status & DPNI_IRQ_EVENT_LINK_CHANGED)
3489 3490
		link_state_update(netdev_priv(net_dev));

3491
	if (status & DPNI_IRQ_EVENT_ENDPOINT_CHANGED) {
3492
		set_mac_addr(netdev_priv(net_dev));
3493
		update_tx_fqids(priv);
3494 3495 3496 3497 3498 3499 3500

		rtnl_lock();
		if (priv->mac)
			dpaa2_eth_disconnect_mac(priv);
		else
			dpaa2_eth_connect_mac(priv);
		rtnl_unlock();
3501
	}
3502

3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518
	return IRQ_HANDLED;
}

static int setup_irqs(struct fsl_mc_device *ls_dev)
{
	int err = 0;
	struct fsl_mc_device_irq *irq;

	err = fsl_mc_allocate_irqs(ls_dev);
	if (err) {
		dev_err(&ls_dev->dev, "MC irqs allocation failed\n");
		return err;
	}

	irq = ls_dev->irqs[0];
	err = devm_request_threaded_irq(&ls_dev->dev, irq->msi_desc->irq,
3519
					NULL, dpni_irq0_handler_thread,
3520 3521 3522
					IRQF_NO_SUSPEND | IRQF_ONESHOT,
					dev_name(&ls_dev->dev), &ls_dev->dev);
	if (err < 0) {
3523
		dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d\n", err);
3524 3525 3526 3527
		goto free_mc_irq;
	}

	err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle,
3528 3529
				DPNI_IRQ_INDEX, DPNI_IRQ_EVENT_LINK_CHANGED |
				DPNI_IRQ_EVENT_ENDPOINT_CHANGED);
3530
	if (err < 0) {
3531
		dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d\n", err);
3532 3533 3534 3535 3536 3537
		goto free_irq;
	}

	err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle,
				  DPNI_IRQ_INDEX, 1);
	if (err < 0) {
3538
		dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d\n", err);
3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585
		goto free_irq;
	}

	return 0;

free_irq:
	devm_free_irq(&ls_dev->dev, irq->msi_desc->irq, &ls_dev->dev);
free_mc_irq:
	fsl_mc_free_irqs(ls_dev);

	return err;
}

static void add_ch_napi(struct dpaa2_eth_priv *priv)
{
	int i;
	struct dpaa2_eth_channel *ch;

	for (i = 0; i < priv->num_channels; i++) {
		ch = priv->channel[i];
		/* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
		netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll,
			       NAPI_POLL_WEIGHT);
	}
}

static void del_ch_napi(struct dpaa2_eth_priv *priv)
{
	int i;
	struct dpaa2_eth_channel *ch;

	for (i = 0; i < priv->num_channels; i++) {
		ch = priv->channel[i];
		netif_napi_del(&ch->napi);
	}
}

static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
{
	struct device *dev;
	struct net_device *net_dev = NULL;
	struct dpaa2_eth_priv *priv = NULL;
	int err = 0;

	dev = &dpni_dev->dev;

	/* Net device */
I
Ioana Radulescu 已提交
3586
	net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_NETDEV_QUEUES);
3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597
	if (!net_dev) {
		dev_err(dev, "alloc_etherdev_mq() failed\n");
		return -ENOMEM;
	}

	SET_NETDEV_DEV(net_dev, dev);
	dev_set_drvdata(dev, net_dev);

	priv = netdev_priv(net_dev);
	priv->net_dev = net_dev;

3598 3599
	priv->iommu_domain = iommu_get_domain_for_dev(dev);

3600 3601 3602 3603
	/* Obtain a MC portal */
	err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
				     &priv->mc_io);
	if (err) {
3604 3605 3606 3607
		if (err == -ENXIO)
			err = -EPROBE_DEFER;
		else
			dev_err(dev, "MC portal allocation failed\n");
3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639
		goto err_portal_alloc;
	}

	/* MC objects initialization and configuration */
	err = setup_dpni(dpni_dev);
	if (err)
		goto err_dpni_setup;

	err = setup_dpio(priv);
	if (err)
		goto err_dpio_setup;

	setup_fqs(priv);

	err = setup_dpbp(priv);
	if (err)
		goto err_dpbp_setup;

	err = bind_dpni(priv);
	if (err)
		goto err_bind;

	/* Add a NAPI context for each channel */
	add_ch_napi(priv);

	/* Percpu statistics */
	priv->percpu_stats = alloc_percpu(*priv->percpu_stats);
	if (!priv->percpu_stats) {
		dev_err(dev, "alloc_percpu(percpu_stats) failed\n");
		err = -ENOMEM;
		goto err_alloc_percpu_stats;
	}
3640 3641 3642 3643 3644 3645
	priv->percpu_extras = alloc_percpu(*priv->percpu_extras);
	if (!priv->percpu_extras) {
		dev_err(dev, "alloc_percpu(percpu_extras) failed\n");
		err = -ENOMEM;
		goto err_alloc_percpu_extras;
	}
3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670

	err = netdev_init(net_dev);
	if (err)
		goto err_netdev_init;

	/* Configure checksum offload based on current interface flags */
	err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM));
	if (err)
		goto err_csum;

	err = set_tx_csum(priv, !!(net_dev->features &
				   (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)));
	if (err)
		goto err_csum;

	err = alloc_rings(priv);
	if (err)
		goto err_alloc_rings;

	err = setup_irqs(dpni_dev);
	if (err) {
		netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
		priv->poll_thread = kthread_run(poll_link_state, priv,
						"%s_poll_link", net_dev->name);
		if (IS_ERR(priv->poll_thread)) {
3671
			dev_err(dev, "Error starting polling thread\n");
3672 3673 3674 3675 3676
			goto err_poll_thread;
		}
		priv->do_link_poll = true;
	}

3677 3678 3679 3680
	err = dpaa2_eth_connect_mac(priv);
	if (err)
		goto err_connect_mac;

3681 3682 3683 3684 3685 3686
	err = register_netdev(net_dev);
	if (err < 0) {
		dev_err(dev, "register_netdev() failed\n");
		goto err_netdev_reg;
	}

3687 3688 3689 3690
#ifdef CONFIG_DEBUG_FS
	dpaa2_dbg_add(priv);
#endif

3691 3692 3693
	dev_info(dev, "Probed interface %s\n", net_dev->name);
	return 0;

3694
err_netdev_reg:
3695 3696
	dpaa2_eth_disconnect_mac(priv);
err_connect_mac:
3697 3698 3699 3700
	if (priv->do_link_poll)
		kthread_stop(priv->poll_thread);
	else
		fsl_mc_free_irqs(dpni_dev);
3701 3702 3703 3704 3705
err_poll_thread:
	free_rings(priv);
err_alloc_rings:
err_csum:
err_netdev_init:
3706 3707
	free_percpu(priv->percpu_extras);
err_alloc_percpu_extras:
3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735
	free_percpu(priv->percpu_stats);
err_alloc_percpu_stats:
	del_ch_napi(priv);
err_bind:
	free_dpbp(priv);
err_dpbp_setup:
	free_dpio(priv);
err_dpio_setup:
	free_dpni(priv);
err_dpni_setup:
	fsl_mc_portal_free(priv->mc_io);
err_portal_alloc:
	dev_set_drvdata(dev, NULL);
	free_netdev(net_dev);

	return err;
}

static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
{
	struct device *dev;
	struct net_device *net_dev;
	struct dpaa2_eth_priv *priv;

	dev = &ls_dev->dev;
	net_dev = dev_get_drvdata(dev);
	priv = netdev_priv(net_dev);

3736 3737 3738
#ifdef CONFIG_DEBUG_FS
	dpaa2_dbg_remove(priv);
#endif
3739 3740 3741 3742
	rtnl_lock();
	dpaa2_eth_disconnect_mac(priv);
	rtnl_unlock();

3743 3744 3745 3746 3747 3748 3749 3750 3751
	unregister_netdev(net_dev);

	if (priv->do_link_poll)
		kthread_stop(priv->poll_thread);
	else
		fsl_mc_free_irqs(ls_dev);

	free_rings(priv);
	free_percpu(priv->percpu_stats);
3752
	free_percpu(priv->percpu_extras);
3753 3754 3755 3756 3757 3758 3759 3760 3761 3762

	del_ch_napi(priv);
	free_dpbp(priv);
	free_dpio(priv);
	free_dpni(priv);

	fsl_mc_portal_free(priv->mc_io);

	free_netdev(net_dev);

3763
	dev_dbg(net_dev->dev.parent, "Removed interface %s\n", net_dev->name);
3764

3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786
	return 0;
}

static const struct fsl_mc_device_id dpaa2_eth_match_id_table[] = {
	{
		.vendor = FSL_MC_VENDOR_FREESCALE,
		.obj_type = "dpni",
	},
	{ .vendor = 0x0 }
};
MODULE_DEVICE_TABLE(fslmc, dpaa2_eth_match_id_table);

static struct fsl_mc_driver dpaa2_eth_driver = {
	.driver = {
		.name = KBUILD_MODNAME,
		.owner = THIS_MODULE,
	},
	.probe = dpaa2_eth_probe,
	.remove = dpaa2_eth_remove,
	.match_id_table = dpaa2_eth_match_id_table
};

3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808
static int __init dpaa2_eth_driver_init(void)
{
	int err;

	dpaa2_eth_dbg_init();
	err = fsl_mc_driver_register(&dpaa2_eth_driver);
	if (err) {
		dpaa2_eth_dbg_exit();
		return err;
	}

	return 0;
}

static void __exit dpaa2_eth_driver_exit(void)
{
	dpaa2_eth_dbg_exit();
	fsl_mc_driver_unregister(&dpaa2_eth_driver);
}

module_init(dpaa2_eth_driver_init);
module_exit(dpaa2_eth_driver_exit);