ice_txrx.c 65.6 KB
Newer Older
1 2 3 4 5 6 7
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018, Intel Corporation. */

/* The driver transmit and receive code */

#include <linux/prefetch.h>
#include <linux/mm.h>
M
Maciej Fijalkowski 已提交
8
#include <linux/bpf_trace.h>
D
Dave Ertman 已提交
9
#include <net/dsfield.h>
M
Maciej Fijalkowski 已提交
10
#include <net/xdp.h>
11
#include "ice_txrx_lib.h"
M
Maciej Fijalkowski 已提交
12
#include "ice_lib.h"
13
#include "ice.h"
J
Jesse Brandeburg 已提交
14
#include "ice_trace.h"
15
#include "ice_dcb_lib.h"
16
#include "ice_xsk.h"
G
Grzegorz Nitka 已提交
17
#include "ice_eswitch.h"
18

19 20
#define ICE_RX_HDR_SIZE		256

21
#define FDIR_DESC_RXDID 0x40
22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
#define ICE_FDIR_CLEAN_DELAY 10

/**
 * ice_prgm_fdir_fltr - Program a Flow Director filter
 * @vsi: VSI to send dummy packet
 * @fdir_desc: flow director descriptor
 * @raw_packet: allocated buffer for flow director
 */
int
ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
		   u8 *raw_packet)
{
	struct ice_tx_buf *tx_buf, *first;
	struct ice_fltr_desc *f_desc;
	struct ice_tx_desc *tx_desc;
37
	struct ice_tx_ring *tx_ring;
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103
	struct device *dev;
	dma_addr_t dma;
	u32 td_cmd;
	u16 i;

	/* VSI and Tx ring */
	if (!vsi)
		return -ENOENT;
	tx_ring = vsi->tx_rings[0];
	if (!tx_ring || !tx_ring->desc)
		return -ENOENT;
	dev = tx_ring->dev;

	/* we are using two descriptors to add/del a filter and we can wait */
	for (i = ICE_FDIR_CLEAN_DELAY; ICE_DESC_UNUSED(tx_ring) < 2; i--) {
		if (!i)
			return -EAGAIN;
		msleep_interruptible(1);
	}

	dma = dma_map_single(dev, raw_packet, ICE_FDIR_MAX_RAW_PKT_SIZE,
			     DMA_TO_DEVICE);

	if (dma_mapping_error(dev, dma))
		return -EINVAL;

	/* grab the next descriptor */
	i = tx_ring->next_to_use;
	first = &tx_ring->tx_buf[i];
	f_desc = ICE_TX_FDIRDESC(tx_ring, i);
	memcpy(f_desc, fdir_desc, sizeof(*f_desc));

	i++;
	i = (i < tx_ring->count) ? i : 0;
	tx_desc = ICE_TX_DESC(tx_ring, i);
	tx_buf = &tx_ring->tx_buf[i];

	i++;
	tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;

	memset(tx_buf, 0, sizeof(*tx_buf));
	dma_unmap_len_set(tx_buf, len, ICE_FDIR_MAX_RAW_PKT_SIZE);
	dma_unmap_addr_set(tx_buf, dma, dma);

	tx_desc->buf_addr = cpu_to_le64(dma);
	td_cmd = ICE_TXD_LAST_DESC_CMD | ICE_TX_DESC_CMD_DUMMY |
		 ICE_TX_DESC_CMD_RE;

	tx_buf->tx_flags = ICE_TX_FLAGS_DUMMY_PKT;
	tx_buf->raw_buf = raw_packet;

	tx_desc->cmd_type_offset_bsz =
		ice_build_ctob(td_cmd, 0, ICE_FDIR_MAX_RAW_PKT_SIZE, 0);

	/* Force memory write to complete before letting h/w know
	 * there are new descriptors to fetch.
	 */
	wmb();

	/* mark the data descriptor to be watched */
	first->next_to_watch = tx_desc;

	writel(tx_ring->next_to_use, tx_ring->tail);

	return 0;
}
104

105 106 107 108 109 110
/**
 * ice_unmap_and_free_tx_buf - Release a Tx buffer
 * @ring: the ring that owns the buffer
 * @tx_buf: the buffer to free
 */
static void
111
ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf)
112 113
{
	if (tx_buf->skb) {
114 115 116
		if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
			devm_kfree(ring->dev, tx_buf->raw_buf);
		else if (ice_ring_is_xdp(ring))
M
Maciej Fijalkowski 已提交
117 118 119
			page_frag_free(tx_buf->raw_buf);
		else
			dev_kfree_skb_any(tx_buf->skb);
120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137
		if (dma_unmap_len(tx_buf, len))
			dma_unmap_single(ring->dev,
					 dma_unmap_addr(tx_buf, dma),
					 dma_unmap_len(tx_buf, len),
					 DMA_TO_DEVICE);
	} else if (dma_unmap_len(tx_buf, len)) {
		dma_unmap_page(ring->dev,
			       dma_unmap_addr(tx_buf, dma),
			       dma_unmap_len(tx_buf, len),
			       DMA_TO_DEVICE);
	}

	tx_buf->next_to_watch = NULL;
	tx_buf->skb = NULL;
	dma_unmap_len_set(tx_buf, len, 0);
	/* tx_buf must be completely set up in the transmit path */
}

138
static struct netdev_queue *txring_txq(const struct ice_tx_ring *ring)
139 140 141 142 143 144 145 146
{
	return netdev_get_tx_queue(ring->netdev, ring->q_index);
}

/**
 * ice_clean_tx_ring - Free any empty Tx buffers
 * @tx_ring: ring to be cleaned
 */
147
void ice_clean_tx_ring(struct ice_tx_ring *tx_ring)
148
{
149
	u32 size;
150 151
	u16 i;

152
	if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
153 154 155 156
		ice_xsk_clean_xdp_ring(tx_ring);
		goto tx_skip_free;
	}

157 158 159 160
	/* ring already cleared, nothing to do */
	if (!tx_ring->tx_buf)
		return;

161
	/* Free all the Tx ring sk_buffs */
162 163 164
	for (i = 0; i < tx_ring->count; i++)
		ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);

165
tx_skip_free:
166
	memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
167

168 169
	size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
		     PAGE_SIZE);
170
	/* Zero out the descriptor ring */
171
	memset(tx_ring->desc, 0, size);
172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188

	tx_ring->next_to_use = 0;
	tx_ring->next_to_clean = 0;

	if (!tx_ring->netdev)
		return;

	/* cleanup Tx queue statistics */
	netdev_tx_reset_queue(txring_txq(tx_ring));
}

/**
 * ice_free_tx_ring - Free Tx resources per queue
 * @tx_ring: Tx descriptor ring for a specific queue
 *
 * Free all transmit software resources
 */
189
void ice_free_tx_ring(struct ice_tx_ring *tx_ring)
190
{
191 192
	u32 size;

193 194 195 196 197
	ice_clean_tx_ring(tx_ring);
	devm_kfree(tx_ring->dev, tx_ring->tx_buf);
	tx_ring->tx_buf = NULL;

	if (tx_ring->desc) {
198 199 200
		size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
			     PAGE_SIZE);
		dmam_free_coherent(tx_ring->dev, size,
201 202 203 204 205
				   tx_ring->desc, tx_ring->dma);
		tx_ring->desc = NULL;
	}
}

206 207 208 209 210 211 212
/**
 * ice_clean_tx_irq - Reclaim resources after transmit completes
 * @tx_ring: Tx ring to clean
 * @napi_budget: Used to determine if we are in netpoll
 *
 * Returns true if there's any budget left (e.g. the clean is finished)
 */
213
static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
214 215
{
	unsigned int total_bytes = 0, total_pkts = 0;
J
Jesse Brandeburg 已提交
216 217
	unsigned int budget = ICE_DFLT_IRQ_WORK;
	struct ice_vsi *vsi = tx_ring->vsi;
218 219 220 221 222 223 224 225
	s16 i = tx_ring->next_to_clean;
	struct ice_tx_desc *tx_desc;
	struct ice_tx_buf *tx_buf;

	tx_buf = &tx_ring->tx_buf[i];
	tx_desc = ICE_TX_DESC(tx_ring, i);
	i -= tx_ring->count;

J
Jesse Brandeburg 已提交
226 227
	prefetch(&vsi->state);

228 229 230 231 232 233 234 235 236
	do {
		struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;

		/* if next_to_watch is not set then there is no work pending */
		if (!eop_desc)
			break;

		smp_rmb();	/* prevent any other reads prior to eop_desc */

J
Jesse Brandeburg 已提交
237
		ice_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
238 239 240 241 242 243 244 245 246 247 248 249
		/* if the descriptor isn't done, no work yet to do */
		if (!(eop_desc->cmd_type_offset_bsz &
		      cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
			break;

		/* clear next_to_watch to prevent false hangs */
		tx_buf->next_to_watch = NULL;

		/* update the statistics for this packet */
		total_bytes += tx_buf->bytecount;
		total_pkts += tx_buf->gso_segs;

M
Maciej Fijalkowski 已提交
250 251 252 253 254
		if (ice_ring_is_xdp(tx_ring))
			page_frag_free(tx_buf->raw_buf);
		else
			/* free the skb */
			napi_consume_skb(tx_buf->skb, napi_budget);
255 256 257 258 259 260 261 262 263 264 265 266 267

		/* unmap skb header data */
		dma_unmap_single(tx_ring->dev,
				 dma_unmap_addr(tx_buf, dma),
				 dma_unmap_len(tx_buf, len),
				 DMA_TO_DEVICE);

		/* clear tx_buf data */
		tx_buf->skb = NULL;
		dma_unmap_len_set(tx_buf, len, 0);

		/* unmap remaining buffers */
		while (tx_desc != eop_desc) {
J
Jesse Brandeburg 已提交
268
			ice_trace(clean_tx_irq_unmap, tx_ring, tx_desc, tx_buf);
269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
			tx_buf++;
			tx_desc++;
			i++;
			if (unlikely(!i)) {
				i -= tx_ring->count;
				tx_buf = tx_ring->tx_buf;
				tx_desc = ICE_TX_DESC(tx_ring, 0);
			}

			/* unmap any remaining paged data */
			if (dma_unmap_len(tx_buf, len)) {
				dma_unmap_page(tx_ring->dev,
					       dma_unmap_addr(tx_buf, dma),
					       dma_unmap_len(tx_buf, len),
					       DMA_TO_DEVICE);
				dma_unmap_len_set(tx_buf, len, 0);
			}
		}
J
Jesse Brandeburg 已提交
287
		ice_trace(clean_tx_irq_unmap_eop, tx_ring, tx_desc, tx_buf);
288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306

		/* move us one more past the eop_desc for start of next pkt */
		tx_buf++;
		tx_desc++;
		i++;
		if (unlikely(!i)) {
			i -= tx_ring->count;
			tx_buf = tx_ring->tx_buf;
			tx_desc = ICE_TX_DESC(tx_ring, 0);
		}

		prefetch(tx_desc);

		/* update budget accounting */
		budget--;
	} while (likely(budget));

	i += tx_ring->count;
	tx_ring->next_to_clean = i;
307 308

	ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes);
309

M
Maciej Fijalkowski 已提交
310 311 312
	if (ice_ring_is_xdp(tx_ring))
		return !!budget;

313 314 315 316 317 318 319 320 321 322 323 324
	netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts,
				  total_bytes);

#define TX_WAKE_THRESHOLD ((s16)(DESC_NEEDED * 2))
	if (unlikely(total_pkts && netif_carrier_ok(tx_ring->netdev) &&
		     (ICE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
		/* Make sure that anybody stopping the queue after this
		 * sees the new next_to_clean.
		 */
		smp_mb();
		if (__netif_subqueue_stopped(tx_ring->netdev,
					     tx_ring->q_index) &&
325
		    !test_bit(ICE_VSI_DOWN, vsi->state)) {
326 327 328 329 330 331 332 333 334
			netif_wake_subqueue(tx_ring->netdev,
					    tx_ring->q_index);
			++tx_ring->tx_stats.restart_q;
		}
	}

	return !!budget;
}

335 336
/**
 * ice_setup_tx_ring - Allocate the Tx descriptors
337
 * @tx_ring: the Tx ring to set up
338 339 340
 *
 * Return 0 on success, negative on error
 */
341
int ice_setup_tx_ring(struct ice_tx_ring *tx_ring)
342 343
{
	struct device *dev = tx_ring->dev;
344
	u32 size;
345 346 347 348 349 350

	if (!dev)
		return -ENOMEM;

	/* warn if we are about to overwrite the pointer */
	WARN_ON(tx_ring->tx_buf);
351 352 353
	tx_ring->tx_buf =
		devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count,
			     GFP_KERNEL);
354 355 356
	if (!tx_ring->tx_buf)
		return -ENOMEM;

357
	/* round up to nearest page */
358 359 360
	size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
		     PAGE_SIZE);
	tx_ring->desc = dmam_alloc_coherent(dev, size, &tx_ring->dma,
361 362 363
					    GFP_KERNEL);
	if (!tx_ring->desc) {
		dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
364
			size);
365 366 367 368 369
		goto err;
	}

	tx_ring->next_to_use = 0;
	tx_ring->next_to_clean = 0;
370
	tx_ring->tx_stats.prev_pkt = -1;
371 372 373 374 375 376 377 378 379 380 381 382
	return 0;

err:
	devm_kfree(dev, tx_ring->tx_buf);
	tx_ring->tx_buf = NULL;
	return -ENOMEM;
}

/**
 * ice_clean_rx_ring - Free Rx buffers
 * @rx_ring: ring to be cleaned
 */
383
void ice_clean_rx_ring(struct ice_rx_ring *rx_ring)
384 385
{
	struct device *dev = rx_ring->dev;
386
	u32 size;
387 388 389 390 391 392
	u16 i;

	/* ring already cleared, nothing to do */
	if (!rx_ring->rx_buf)
		return;

393 394 395 396 397
	if (rx_ring->skb) {
		dev_kfree_skb(rx_ring->skb);
		rx_ring->skb = NULL;
	}

398
	if (rx_ring->xsk_pool) {
399 400 401 402
		ice_xsk_clean_rx_ring(rx_ring);
		goto rx_skip_free;
	}

403 404 405 406 407 408 409
	/* Free all the Rx ring sk_buffs */
	for (i = 0; i < rx_ring->count; i++) {
		struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];

		if (!rx_buf->page)
			continue;

410 411 412 413 414
		/* Invalidate cache lines that may have been written to by
		 * device so that we avoid corrupting memory.
		 */
		dma_sync_single_range_for_cpu(dev, rx_buf->dma,
					      rx_buf->page_offset,
M
Maciej Fijalkowski 已提交
415 416
					      rx_ring->rx_buf_len,
					      DMA_FROM_DEVICE);
417 418

		/* free resources associated with mapping */
M
Maciej Fijalkowski 已提交
419
		dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring),
420
				     DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
421
		__page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
422 423 424 425 426

		rx_buf->page = NULL;
		rx_buf->page_offset = 0;
	}

427
rx_skip_free:
428
	memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count);
429 430

	/* Zero out the descriptor ring */
431 432 433
	size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
		     PAGE_SIZE);
	memset(rx_ring->desc, 0, size);
434 435 436 437 438 439 440 441 442 443 444 445

	rx_ring->next_to_alloc = 0;
	rx_ring->next_to_clean = 0;
	rx_ring->next_to_use = 0;
}

/**
 * ice_free_rx_ring - Free Rx resources
 * @rx_ring: ring to clean the resources from
 *
 * Free all receive software resources
 */
446
void ice_free_rx_ring(struct ice_rx_ring *rx_ring)
447
{
448 449
	u32 size;

450
	ice_clean_rx_ring(rx_ring);
M
Maciej Fijalkowski 已提交
451 452 453 454
	if (rx_ring->vsi->type == ICE_VSI_PF)
		if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
			xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
	rx_ring->xdp_prog = NULL;
455 456 457 458
	devm_kfree(rx_ring->dev, rx_ring->rx_buf);
	rx_ring->rx_buf = NULL;

	if (rx_ring->desc) {
459 460 461
		size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
			     PAGE_SIZE);
		dmam_free_coherent(rx_ring->dev, size,
462 463 464 465 466 467 468
				   rx_ring->desc, rx_ring->dma);
		rx_ring->desc = NULL;
	}
}

/**
 * ice_setup_rx_ring - Allocate the Rx descriptors
469
 * @rx_ring: the Rx ring to set up
470 471 472
 *
 * Return 0 on success, negative on error
 */
473
int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
474 475
{
	struct device *dev = rx_ring->dev;
476
	u32 size;
477 478 479 480 481 482

	if (!dev)
		return -ENOMEM;

	/* warn if we are about to overwrite the pointer */
	WARN_ON(rx_ring->rx_buf);
483 484 485
	rx_ring->rx_buf =
		devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count,
			     GFP_KERNEL);
486 487 488
	if (!rx_ring->rx_buf)
		return -ENOMEM;

489
	/* round up to nearest page */
490 491 492
	size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
		     PAGE_SIZE);
	rx_ring->desc = dmam_alloc_coherent(dev, size, &rx_ring->dma,
493 494 495
					    GFP_KERNEL);
	if (!rx_ring->desc) {
		dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
496
			size);
497 498 499 500 501
		goto err;
	}

	rx_ring->next_to_use = 0;
	rx_ring->next_to_clean = 0;
M
Maciej Fijalkowski 已提交
502 503 504 505 506 507 508

	if (ice_is_xdp_ena_vsi(rx_ring->vsi))
		WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);

	if (rx_ring->vsi->type == ICE_VSI_PF &&
	    !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
		if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
509
				     rx_ring->q_index, rx_ring->q_vector->napi.napi_id))
M
Maciej Fijalkowski 已提交
510
			goto err;
511 512 513 514 515 516 517 518
	return 0;

err:
	devm_kfree(dev, rx_ring->rx_buf);
	rx_ring->rx_buf = NULL;
	return -ENOMEM;
}

T
Tony Nguyen 已提交
519
static unsigned int
520
ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, unsigned int __maybe_unused size)
521 522 523 524 525 526
{
	unsigned int truesize;

#if (PAGE_SIZE < 8192)
	truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */
#else
527 528
	truesize = rx_ring->rx_offset ?
		SKB_DATA_ALIGN(rx_ring->rx_offset + size) +
529 530 531 532 533 534
		SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
		SKB_DATA_ALIGN(size);
#endif
	return truesize;
}

M
Maciej Fijalkowski 已提交
535 536 537 538 539
/**
 * ice_run_xdp - Executes an XDP program on initialized xdp_buff
 * @rx_ring: Rx ring
 * @xdp: xdp_buff used as input to the XDP program
 * @xdp_prog: XDP program to run
540
 * @xdp_ring: ring to be used for XDP_TX action
M
Maciej Fijalkowski 已提交
541 542 543 544
 *
 * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
 */
static int
545
ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
546
	    struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring)
M
Maciej Fijalkowski 已提交
547
{
548
	int err;
M
Maciej Fijalkowski 已提交
549 550 551 552 553
	u32 act;

	act = bpf_prog_run_xdp(xdp_prog, xdp);
	switch (act) {
	case XDP_PASS:
M
Maciej Fijalkowski 已提交
554
		return ICE_XDP_PASS;
M
Maciej Fijalkowski 已提交
555
	case XDP_TX:
556 557
		err = ice_xmit_xdp_ring(xdp->data, xdp->data_end - xdp->data, xdp_ring);
		if (err == ICE_XDP_CONSUMED)
558
			goto out_failure;
559
		return err;
M
Maciej Fijalkowski 已提交
560 561
	case XDP_REDIRECT:
		err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
562 563 564
		if (err)
			goto out_failure;
		return ICE_XDP_REDIR;
M
Maciej Fijalkowski 已提交
565 566
	default:
		bpf_warn_invalid_xdp_action(act);
567
		fallthrough;
M
Maciej Fijalkowski 已提交
568
	case XDP_ABORTED:
569
out_failure:
M
Maciej Fijalkowski 已提交
570
		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
571
		fallthrough;
M
Maciej Fijalkowski 已提交
572
	case XDP_DROP:
M
Maciej Fijalkowski 已提交
573
		return ICE_XDP_CONSUMED;
M
Maciej Fijalkowski 已提交
574 575 576 577 578 579 580 581 582 583
	}
}

/**
 * ice_xdp_xmit - submit packets to XDP ring for transmission
 * @dev: netdev
 * @n: number of XDP frames to be transmitted
 * @frames: XDP frames to be transmitted
 * @flags: transmit flags
 *
584 585
 * Returns number of frames successfully sent. Failed frames
 * will be free'ed by XDP core.
M
Maciej Fijalkowski 已提交
586 587 588 589 590 591 592 593 594 595
 * For error cases, a negative errno code is returned and no-frames
 * are transmitted (caller must handle freeing frames).
 */
int
ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
	     u32 flags)
{
	struct ice_netdev_priv *np = netdev_priv(dev);
	unsigned int queue_index = smp_processor_id();
	struct ice_vsi *vsi = np->vsi;
596
	struct ice_tx_ring *xdp_ring;
597
	int nxmit = 0, i;
M
Maciej Fijalkowski 已提交
598

599
	if (test_bit(ICE_VSI_DOWN, vsi->state))
M
Maciej Fijalkowski 已提交
600 601 602 603 604 605 606 607 608 609 610 611 612 613
		return -ENETDOWN;

	if (!ice_is_xdp_ena_vsi(vsi) || queue_index >= vsi->num_xdp_txq)
		return -ENXIO;

	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
		return -EINVAL;

	xdp_ring = vsi->xdp_rings[queue_index];
	for (i = 0; i < n; i++) {
		struct xdp_frame *xdpf = frames[i];
		int err;

		err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
614 615 616
		if (err != ICE_XDP_TX)
			break;
		nxmit++;
M
Maciej Fijalkowski 已提交
617 618 619 620 621
	}

	if (unlikely(flags & XDP_XMIT_FLUSH))
		ice_xdp_ring_update_tail(xdp_ring);

622
	return nxmit;
M
Maciej Fijalkowski 已提交
623 624
}

625 626 627 628 629 630 631 632
/**
 * ice_alloc_mapped_page - recycle or make a new page
 * @rx_ring: ring to use
 * @bi: rx_buf struct to modify
 *
 * Returns true if the page was successfully allocated or
 * reused.
 */
633
static bool
634
ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi)
635 636 637 638 639
{
	struct page *page = bi->page;
	dma_addr_t dma;

	/* since we are recycling buffers we should seldom need to alloc */
T
Tony Nguyen 已提交
640
	if (likely(page))
641 642 643
		return true;

	/* alloc new page for storage */
M
Maciej Fijalkowski 已提交
644
	page = dev_alloc_pages(ice_rx_pg_order(rx_ring));
645 646
	if (unlikely(!page)) {
		rx_ring->rx_stats.alloc_page_failed++;
647
		return false;
648
	}
649 650

	/* map page for use */
M
Maciej Fijalkowski 已提交
651
	dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring),
652
				 DMA_FROM_DEVICE, ICE_RX_DMA_ATTR);
653 654 655 656 657

	/* if mapping failed free memory back to system since
	 * there isn't much point in holding memory we can't use
	 */
	if (dma_mapping_error(rx_ring->dev, dma)) {
M
Maciej Fijalkowski 已提交
658
		__free_pages(page, ice_rx_pg_order(rx_ring));
659
		rx_ring->rx_stats.alloc_page_failed++;
660 661 662 663 664
		return false;
	}

	bi->dma = dma;
	bi->page = page;
665
	bi->page_offset = rx_ring->rx_offset;
666 667
	page_ref_add(page, USHRT_MAX - 1);
	bi->pagecnt_bias = USHRT_MAX;
668 669 670 671 672 673 674 675 676

	return true;
}

/**
 * ice_alloc_rx_bufs - Replace used receive buffers
 * @rx_ring: ring to place buffers on
 * @cleaned_count: number of buffers to replace
 *
677 678 679 680 681 682 683
 * Returns false if all allocations were successful, true if any fail. Returning
 * true signals to the caller that we didn't replace cleaned_count buffers and
 * there is more work to do.
 *
 * First, try to clean "cleaned_count" Rx buffers. Then refill the cleaned Rx
 * buffers. Then bump tail at most one time. Grouping like this lets us avoid
 * multiple tail writes per call.
684
 */
685
bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, u16 cleaned_count)
686 687 688 689 690 691
{
	union ice_32b_rx_flex_desc *rx_desc;
	u16 ntu = rx_ring->next_to_use;
	struct ice_rx_buf *bi;

	/* do nothing if no valid netdev defined */
692 693
	if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) ||
	    !cleaned_count)
694 695
		return false;

696
	/* get the Rx descriptor and buffer based on next_to_use */
697 698 699 700
	rx_desc = ICE_RX_DESC(rx_ring, ntu);
	bi = &rx_ring->rx_buf[ntu];

	do {
701
		/* if we fail here, we have work remaining */
702
		if (!ice_alloc_mapped_page(rx_ring, bi))
703
			break;
704

705 706 707
		/* sync the buffer for use by the device */
		dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
						 bi->page_offset,
M
Maciej Fijalkowski 已提交
708
						 rx_ring->rx_buf_len,
709 710
						 DMA_FROM_DEVICE);

711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733
		/* Refresh the desc even if buffer_addrs didn't change
		 * because each write-back erases this info.
		 */
		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);

		rx_desc++;
		bi++;
		ntu++;
		if (unlikely(ntu == rx_ring->count)) {
			rx_desc = ICE_RX_DESC(rx_ring, 0);
			bi = rx_ring->rx_buf;
			ntu = 0;
		}

		/* clear the status bits for the next_to_use descriptor */
		rx_desc->wb.status_error0 = 0;

		cleaned_count--;
	} while (cleaned_count);

	if (rx_ring->next_to_use != ntu)
		ice_release_rx_desc(rx_ring, ntu);

734
	return !!cleaned_count;
735
}
736 737

/**
738 739 740
 * ice_rx_buf_adjust_pg_offset - Prepare Rx buffer for reuse
 * @rx_buf: Rx buffer to adjust
 * @size: Size of adjustment
741
 *
742 743 744
 * Update the offset within page so that Rx buf will be ready to be reused.
 * For systems with PAGE_SIZE < 8192 this function will flip the page offset
 * so the second half of page assigned to Rx buffer will be used, otherwise
T
Tony Nguyen 已提交
745
 * the offset is moved by "size" bytes
746
 */
747 748
static void
ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
749 750
{
#if (PAGE_SIZE < 8192)
751 752
	/* flip page offset to other buffer */
	rx_buf->page_offset ^= size;
753
#else
754 755 756 757
	/* move offset up to the next cache line */
	rx_buf->page_offset += size;
#endif
}
758

759 760 761
/**
 * ice_can_reuse_rx_page - Determine if page can be reused for another Rx
 * @rx_buf: buffer containing the page
762
 * @rx_buf_pgcnt: rx_buf page refcount pre xdp_do_redirect() call
763 764 765 766 767 768
 *
 * If page is reusable, we have a green light for calling ice_reuse_rx_page,
 * which will assign the current buffer to the buffer that next_to_alloc is
 * pointing to; otherwise, the DMA mapping needs to be destroyed and
 * page freed
 */
769 770
static bool
ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
771
{
772
	unsigned int pagecnt_bias = rx_buf->pagecnt_bias;
773
	struct page *page = rx_buf->page;
774

775 776
	/* avoid re-using remote and pfmemalloc pages */
	if (!dev_page_is_reusable(page))
777 778 779 780
		return false;

#if (PAGE_SIZE < 8192)
	/* if we are only owner of page we can reuse it */
781
	if (unlikely((rx_buf_pgcnt - pagecnt_bias) > 1))
782 783
		return false;
#else
M
Maciej Fijalkowski 已提交
784 785 786
#define ICE_LAST_OFFSET \
	(SKB_WITH_OVERHEAD(PAGE_SIZE) - ICE_RXBUF_2048)
	if (rx_buf->page_offset > ICE_LAST_OFFSET)
787 788 789
		return false;
#endif /* PAGE_SIZE < 8192) */

790 791 792
	/* If we have drained the page fragment pool we need to update
	 * the pagecnt_bias and page count so that we fully restock the
	 * number of references the driver holds.
793
	 */
794 795 796 797
	if (unlikely(pagecnt_bias == 1)) {
		page_ref_add(page, USHRT_MAX - 1);
		rx_buf->pagecnt_bias = USHRT_MAX;
	}
798 799 800 801 802

	return true;
}

/**
803
 * ice_add_rx_frag - Add contents of Rx buffer to sk_buff as a frag
M
Maciej Fijalkowski 已提交
804
 * @rx_ring: Rx descriptor ring to transact packets on
805
 * @rx_buf: buffer containing page to add
806 807
 * @skb: sk_buff to place the data into
 * @size: packet length from rx_desc
808 809
 *
 * This function will add the data contained in rx_buf->page to the skb.
810 811
 * It will just attach the page as a frag to the skb.
 * The function will then update the page offset.
812
 */
813
static void
814
ice_add_rx_frag(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
M
Maciej Fijalkowski 已提交
815
		struct sk_buff *skb, unsigned int size)
816
{
817
#if (PAGE_SIZE >= 8192)
818
	unsigned int truesize = SKB_DATA_ALIGN(size + rx_ring->rx_offset);
819
#else
M
Maciej Fijalkowski 已提交
820
	unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
821
#endif
822

M
Mitch Williams 已提交
823 824
	if (!size)
		return;
825 826
	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page,
			rx_buf->page_offset, size, truesize);
827

828
	/* page is being used so we must update the page offset */
829
	ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
830 831 832 833
}

/**
 * ice_reuse_rx_page - page flip buffer and store it back on the ring
834
 * @rx_ring: Rx descriptor ring to store buffers on
835 836 837 838
 * @old_buf: donor buffer to have page reused
 *
 * Synchronizes page for reuse by the adapter
 */
839
static void
840
ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf)
841 842 843 844 845 846 847 848 849 850
{
	u16 nta = rx_ring->next_to_alloc;
	struct ice_rx_buf *new_buf;

	new_buf = &rx_ring->rx_buf[nta];

	/* update, and store next to alloc */
	nta++;
	rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;

851 852 853 854 855 856 857 858
	/* Transfer page from old buffer to new buffer.
	 * Move each member individually to avoid possible store
	 * forwarding stalls and unnecessary copy of skb.
	 */
	new_buf->dma = old_buf->dma;
	new_buf->page = old_buf->page;
	new_buf->page_offset = old_buf->page_offset;
	new_buf->pagecnt_bias = old_buf->pagecnt_bias;
859 860 861
}

/**
862
 * ice_get_rx_buf - Fetch Rx buffer and synchronize data for use
863
 * @rx_ring: Rx descriptor ring to transact packets on
864
 * @size: size of buffer to add to skb
865
 * @rx_buf_pgcnt: rx_buf page refcount
866
 *
867 868
 * This function will pull an Rx buffer from the ring and synchronize it
 * for use by the CPU.
869
 */
870
static struct ice_rx_buf *
871
ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
872
	       int *rx_buf_pgcnt)
873 874 875 876
{
	struct ice_rx_buf *rx_buf;

	rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
877 878 879 880 881 882
	*rx_buf_pgcnt =
#if (PAGE_SIZE < 8192)
		page_count(rx_buf->page);
#else
		0;
#endif
883 884
	prefetchw(rx_buf->page);

M
Mitch Williams 已提交
885 886
	if (!size)
		return rx_buf;
887 888 889 890
	/* we are reusing so sync this buffer for CPU use */
	dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma,
				      rx_buf->page_offset, size,
				      DMA_FROM_DEVICE);
891

892 893
	/* We have pulled a buffer for use, so decrement pagecnt_bias */
	rx_buf->pagecnt_bias--;
894

895 896
	return rx_buf;
}
897

M
Maciej Fijalkowski 已提交
898 899 900 901 902 903 904 905 906 907
/**
 * ice_build_skb - Build skb around an existing buffer
 * @rx_ring: Rx descriptor ring to transact packets on
 * @rx_buf: Rx buffer to pull data from
 * @xdp: xdp_buff pointing to the data
 *
 * This function builds an skb around an existing Rx buffer, taking care
 * to set up the skb correctly and avoid any memcpy overhead.
 */
static struct sk_buff *
908
ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
M
Maciej Fijalkowski 已提交
909 910
	      struct xdp_buff *xdp)
{
K
Karol Kolacinski 已提交
911
	u8 metasize = xdp->data - xdp->data_meta;
M
Maciej Fijalkowski 已提交
912 913 914 915 916 917 918 919 920 921 922 923 924 925
#if (PAGE_SIZE < 8192)
	unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
#else
	unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
				SKB_DATA_ALIGN(xdp->data_end -
					       xdp->data_hard_start);
#endif
	struct sk_buff *skb;

	/* Prefetch first cache line of first page. If xdp->data_meta
	 * is unused, this points exactly as xdp->data, otherwise we
	 * likely have a consumer accessing first few bytes of meta
	 * data, and then actual data.
	 */
926
	net_prefetch(xdp->data_meta);
M
Maciej Fijalkowski 已提交
927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948
	/* build an skb around the page buffer */
	skb = build_skb(xdp->data_hard_start, truesize);
	if (unlikely(!skb))
		return NULL;

	/* must to record Rx queue, otherwise OS features such as
	 * symmetric queue won't work
	 */
	skb_record_rx_queue(skb, rx_ring->q_index);

	/* update pointers within the skb to store the data */
	skb_reserve(skb, xdp->data - xdp->data_hard_start);
	__skb_put(skb, xdp->data_end - xdp->data);
	if (metasize)
		skb_metadata_set(skb, metasize);

	/* buffer is used by skb, update page_offset */
	ice_rx_buf_adjust_pg_offset(rx_buf, truesize);

	return skb;
}

949
/**
950
 * ice_construct_skb - Allocate skb and populate it
951
 * @rx_ring: Rx descriptor ring to transact packets on
952
 * @rx_buf: Rx buffer to pull data from
M
Maciej Fijalkowski 已提交
953
 * @xdp: xdp_buff pointing to the data
954
 *
955 956 957
 * This function allocates an skb. It then populates it with the page
 * data from the current receive descriptor, taking care to set up the
 * skb correctly.
958
 */
959
static struct sk_buff *
960
ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
M
Maciej Fijalkowski 已提交
961
		  struct xdp_buff *xdp)
962
{
M
Maciej Fijalkowski 已提交
963
	unsigned int size = xdp->data_end - xdp->data;
964 965
	unsigned int headlen;
	struct sk_buff *skb;
966

967
	/* prefetch first cache line of first page */
968
	net_prefetch(xdp->data);
969

970 971 972 973 974
	/* allocate a skb to store the frags */
	skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE,
			       GFP_ATOMIC | __GFP_NOWARN);
	if (unlikely(!skb))
		return NULL;
975

976 977 978 979
	skb_record_rx_queue(skb, rx_ring->q_index);
	/* Determine available headroom for copy */
	headlen = size;
	if (headlen > ICE_RX_HDR_SIZE)
M
Maciej Fijalkowski 已提交
980
		headlen = eth_get_headlen(skb->dev, xdp->data, ICE_RX_HDR_SIZE);
981

982
	/* align pull length to size of long to optimize memcpy performance */
M
Maciej Fijalkowski 已提交
983 984
	memcpy(__skb_put(skb, headlen), xdp->data, ALIGN(headlen,
							 sizeof(long)));
985

986 987 988 989 990 991
	/* if we exhaust the linear part then add what is left as a frag */
	size -= headlen;
	if (size) {
#if (PAGE_SIZE >= 8192)
		unsigned int truesize = SKB_DATA_ALIGN(size);
#else
M
Maciej Fijalkowski 已提交
992
		unsigned int truesize = ice_rx_pg_size(rx_ring) / 2;
993 994 995 996 997
#endif
		skb_add_rx_frag(skb, 0, rx_buf->page,
				rx_buf->page_offset + headlen, size, truesize);
		/* buffer is used by skb, update page_offset */
		ice_rx_buf_adjust_pg_offset(rx_buf, truesize);
998
	} else {
999 1000 1001 1002 1003
		/* buffer is unused, reset bias back to rx_buf; data was copied
		 * onto skb's linear part so there's no need for adjusting
		 * page offset and we can reuse this buffer as-is
		 */
		rx_buf->pagecnt_bias++;
1004 1005 1006 1007 1008 1009
	}

	return skb;
}

/**
1010 1011 1012
 * ice_put_rx_buf - Clean up used buffer and either recycle or free
 * @rx_ring: Rx descriptor ring to transact packets on
 * @rx_buf: Rx buffer to pull data from
1013
 * @rx_buf_pgcnt: Rx buffer page count pre xdp_do_redirect()
1014
 *
M
Maciej Fijalkowski 已提交
1015 1016 1017
 * This function will update next_to_clean and then clean up the contents
 * of the rx_buf. It will either recycle the buffer or unmap it and free
 * the associated resources.
1018
 */
1019
static void
1020
ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
1021
	       int rx_buf_pgcnt)
1022
{
K
Karol Kolacinski 已提交
1023
	u16 ntc = rx_ring->next_to_clean + 1;
M
Maciej Fijalkowski 已提交
1024 1025 1026 1027 1028

	/* fetch, update, and store next to clean */
	ntc = (ntc < rx_ring->count) ? ntc : 0;
	rx_ring->next_to_clean = ntc;

M
Mitch Williams 已提交
1029 1030 1031
	if (!rx_buf)
		return;

1032
	if (ice_can_reuse_rx_page(rx_buf, rx_buf_pgcnt)) {
M
Mitch Williams 已提交
1033
		/* hand second half of page back to the ring */
1034 1035 1036
		ice_reuse_rx_page(rx_ring, rx_buf);
	} else {
		/* we are not reusing the buffer so unmap it */
M
Maciej Fijalkowski 已提交
1037 1038 1039
		dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma,
				     ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
				     ICE_RX_DMA_ATTR);
1040
		__page_frag_cache_drain(rx_buf->page, rx_buf->pagecnt_bias);
1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051
	}

	/* clear contents of buffer_info */
	rx_buf->page = NULL;
}

/**
 * ice_is_non_eop - process handling of non-EOP buffers
 * @rx_ring: Rx ring being processed
 * @rx_desc: Rx descriptor for current buffer
 *
M
Maciej Fijalkowski 已提交
1052 1053
 * If the buffer is an EOP buffer, this function exits returning false,
 * otherwise return true indicating that this is in fact a non-EOP buffer.
1054
 */
1055
static bool
1056
ice_is_non_eop(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069
{
	/* if we are the last buffer then there is nothing else to do */
#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
	if (likely(ice_test_staterr(rx_desc, ICE_RXD_EOF)))
		return false;

	rx_ring->rx_stats.non_eop_descs++;

	return true;
}

/**
 * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
1070
 * @rx_ring: Rx descriptor ring to transact packets on
1071 1072 1073
 * @budget: Total limit on number of packets to process
 *
 * This function provides a "bounce buffer" approach to Rx interrupt
1074
 * processing. The advantage to this is that on systems that have
1075 1076 1077 1078 1079
 * expensive overhead for IOMMU access this provides a means of avoiding
 * it by maintaining the mapping of the page to the system.
 *
 * Returns amount of work completed
 */
1080
int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
1081
{
1082
	unsigned int total_rx_bytes = 0, total_rx_pkts = 0, frame_sz = 0;
1083
	u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
1084
	unsigned int offset = rx_ring->rx_offset;
1085
	struct ice_tx_ring *xdp_ring = NULL;
M
Maciej Fijalkowski 已提交
1086
	unsigned int xdp_res, xdp_xmit = 0;
1087
	struct sk_buff *skb = rx_ring->skb;
M
Maciej Fijalkowski 已提交
1088 1089
	struct bpf_prog *xdp_prog = NULL;
	struct xdp_buff xdp;
1090
	bool failure;
1091

1092 1093
	/* Frame size depend on rx_ring setup when PAGE_SIZE=4K */
#if (PAGE_SIZE < 8192)
1094
	frame_sz = ice_rx_frame_truesize(rx_ring, 0);
1095
#endif
1096
	xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
M
Maciej Fijalkowski 已提交
1097

1098 1099 1100 1101
	xdp_prog = READ_ONCE(rx_ring->xdp_prog);
	if (xdp_prog)
		xdp_ring = rx_ring->xdp_ring;

1102
	/* start the loop to process Rx packets bounded by 'budget' */
1103 1104
	while (likely(total_rx_pkts < (unsigned int)budget)) {
		union ice_32b_rx_flex_desc *rx_desc;
1105
		struct ice_rx_buf *rx_buf;
1106
		unsigned char *hard_start;
1107
		unsigned int size;
1108
		u16 stat_err_bits;
1109
		int rx_buf_pgcnt;
1110
		u16 vlan_tag = 0;
1111
		u16 rx_ptype;
1112

1113
		/* get the Rx desc from Rx ring based on 'next_to_clean' */
1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130
		rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);

		/* status_error_len will always be zero for unused descriptors
		 * because it's cleared in cleanup, and overlaps with hdr_addr
		 * which is always zero because packet split isn't used, if the
		 * hardware wrote DD then it will be non-zero
		 */
		stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
		if (!ice_test_staterr(rx_desc, stat_err_bits))
			break;

		/* This memory barrier is needed to keep us from reading
		 * any other fields out of the rx_desc until we know the
		 * DD bit is set.
		 */
		dma_rmb();

J
Jesse Brandeburg 已提交
1131
		ice_trace(clean_rx_irq, rx_ring, rx_desc);
1132
		if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) {
1133 1134 1135 1136 1137
			struct ice_vsi *ctrl_vsi = rx_ring->vsi;

			if (rx_desc->wb.rxdid == FDIR_DESC_RXDID &&
			    ctrl_vsi->vf_id != ICE_INVAL_VFID)
				ice_vc_fdir_irq_handler(ctrl_vsi, rx_desc);
1138
			ice_put_rx_buf(rx_ring, NULL, 0);
1139 1140 1141 1142
			cleaned_count++;
			continue;
		}

1143 1144 1145
		size = le16_to_cpu(rx_desc->wb.pkt_len) &
			ICE_RX_FLX_DESC_PKT_LEN_M;

M
Mitch Williams 已提交
1146
		/* retrieve a buffer from the ring */
1147
		rx_buf = ice_get_rx_buf(rx_ring, size, &rx_buf_pgcnt);
M
Mitch Williams 已提交
1148

M
Maciej Fijalkowski 已提交
1149 1150 1151
		if (!size) {
			xdp.data = NULL;
			xdp.data_end = NULL;
M
Maciej Fijalkowski 已提交
1152 1153
			xdp.data_hard_start = NULL;
			xdp.data_meta = NULL;
M
Maciej Fijalkowski 已提交
1154 1155 1156
			goto construct_skb;
		}

1157 1158 1159
		hard_start = page_address(rx_buf->page) + rx_buf->page_offset -
			     offset;
		xdp_prepare_buff(&xdp, hard_start, offset, size, true);
1160 1161 1162 1163
#if (PAGE_SIZE > 4096)
		/* At larger PAGE_SIZE, frame_sz depend on len size */
		xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size);
#endif
M
Maciej Fijalkowski 已提交
1164

1165
		if (!xdp_prog)
M
Maciej Fijalkowski 已提交
1166 1167
			goto construct_skb;

1168
		xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog, xdp_ring);
1169 1170 1171 1172
		if (!xdp_res)
			goto construct_skb;
		if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
			xdp_xmit |= xdp_res;
1173
			ice_rx_buf_adjust_pg_offset(rx_buf, xdp.frame_sz);
1174 1175
		} else {
			rx_buf->pagecnt_bias++;
M
Maciej Fijalkowski 已提交
1176
		}
1177 1178 1179 1180
		total_rx_bytes += size;
		total_rx_pkts++;

		cleaned_count++;
1181
		ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
1182
		continue;
M
Maciej Fijalkowski 已提交
1183
construct_skb:
1184
		if (skb) {
M
Maciej Fijalkowski 已提交
1185
			ice_add_rx_frag(rx_ring, rx_buf, skb, size);
1186 1187 1188 1189 1190 1191
		} else if (likely(xdp.data)) {
			if (ice_ring_uses_build_skb(rx_ring))
				skb = ice_build_skb(rx_ring, rx_buf, &xdp);
			else
				skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
		}
1192 1193 1194
		/* exit if we failed to retrieve a buffer */
		if (!skb) {
			rx_ring->rx_stats.alloc_buf_failed++;
M
Mitch Williams 已提交
1195 1196
			if (rx_buf)
				rx_buf->pagecnt_bias++;
1197
			break;
1198
		}
1199

1200
		ice_put_rx_buf(rx_ring, rx_buf, rx_buf_pgcnt);
1201 1202 1203
		cleaned_count++;

		/* skip if it is NOP desc */
1204
		if (ice_is_non_eop(rx_ring, rx_desc))
1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216
			continue;

		stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
		if (unlikely(ice_test_staterr(rx_desc, stat_err_bits))) {
			dev_kfree_skb_any(skb);
			continue;
		}

		stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
		if (ice_test_staterr(rx_desc, stat_err_bits))
			vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);

1217 1218
		/* pad the skb if needed, to make a valid ethernet frame */
		if (eth_skb_pad(skb)) {
1219 1220 1221 1222 1223 1224 1225
			skb = NULL;
			continue;
		}

		/* probably a little skewed due to removing CRC */
		total_rx_bytes += skb->len;

1226
		/* populate checksum, VLAN, and protocol */
1227 1228 1229
		rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
			ICE_RX_FLEX_DESC_PTYPE_M;

1230 1231
		ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);

J
Jesse Brandeburg 已提交
1232
		ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb);
1233 1234
		/* send completed skb up the stack */
		ice_receive_skb(rx_ring, skb, vlan_tag);
1235
		skb = NULL;
1236 1237 1238 1239 1240

		/* update budget accounting */
		total_rx_pkts++;
	}

1241 1242 1243
	/* return up to cleaned_count buffers to hardware */
	failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);

M
Maciej Fijalkowski 已提交
1244
	if (xdp_prog)
1245
		ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
1246
	rx_ring->skb = skb;
M
Maciej Fijalkowski 已提交
1247

1248
	ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes);
1249 1250 1251 1252 1253

	/* guarantee a trip back through this routine if there was a failure */
	return failure ? budget : (int)total_rx_pkts;
}

1254
/**
1255 1256
 * ice_net_dim - Update net DIM algorithm
 * @q_vector: the vector associated with the interrupt
1257
 *
1258 1259
 * Create a DIM sample and notify net_dim() so that it can possibly decide
 * a new ITR value based on incoming packets, bytes, and interrupts.
1260
 *
1261
 * This function is a no-op if the ring is not configured to dynamic ITR.
1262
 */
1263
static void ice_net_dim(struct ice_q_vector *q_vector)
1264
{
1265 1266
	struct ice_ring_container *tx = &q_vector->tx;
	struct ice_ring_container *rx = &q_vector->rx;
1267

1268
	if (ITR_IS_DYNAMIC(tx)) {
1269 1270
		struct dim_sample dim_sample = {};
		u64 packets = 0, bytes = 0;
1271
		struct ice_tx_ring *ring;
1272

1273
		ice_for_each_tx_ring(ring, q_vector->tx) {
1274 1275 1276
			packets += ring->stats.pkts;
			bytes += ring->stats.bytes;
		}
1277

1278 1279
		dim_update_sample(q_vector->total_events, packets, bytes,
				  &dim_sample);
1280

1281
		net_dim(&tx->dim, dim_sample);
1282 1283
	}

1284
	if (ITR_IS_DYNAMIC(rx)) {
1285 1286
		struct dim_sample dim_sample = {};
		u64 packets = 0, bytes = 0;
1287
		struct ice_rx_ring *ring;
1288

1289
		ice_for_each_rx_ring(ring, q_vector->rx) {
1290 1291
			packets += ring->stats.pkts;
			bytes += ring->stats.bytes;
1292 1293
		}

1294 1295
		dim_update_sample(q_vector->total_events, packets, bytes,
				  &dim_sample);
1296

1297
		net_dim(&rx->dim, dim_sample);
1298 1299 1300
	}
}

1301 1302 1303
/**
 * ice_buildreg_itr - build value for writing to the GLINT_DYN_CTL register
 * @itr_idx: interrupt throttling index
1304
 * @itr: interrupt throttling value in usecs
1305
 */
1306
static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
1307
{
1308
	/* The ITR value is reported in microseconds, and the register value is
1309 1310 1311 1312 1313 1314 1315 1316
	 * recorded in 2 microsecond units. For this reason we only need to
	 * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this
	 * granularity as a shift instead of division. The mask makes sure the
	 * ITR value is never odd so we don't accidentally write into the field
	 * prior to the ITR field.
	 */
	itr &= ICE_ITR_MASK;

1317 1318
	return GLINT_DYN_CTL_INTENA_M | GLINT_DYN_CTL_CLEARPBA_M |
		(itr_idx << GLINT_DYN_CTL_ITR_INDX_S) |
1319
		(itr << (GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S));
1320 1321 1322
}

/**
1323 1324 1325 1326 1327 1328 1329
 * ice_update_ena_itr - Update ITR moderation and re-enable MSI-X interrupt
 * @q_vector: the vector associated with the interrupt to enable
 *
 * Update the net_dim() algorithm and re-enable the interrupt associated with
 * this vector.
 *
 * If the VSI is down, the interrupt will not be re-enabled.
1330
 */
J
Jesse Brandeburg 已提交
1331
static void ice_update_ena_itr(struct ice_q_vector *q_vector)
1332
{
J
Jesse Brandeburg 已提交
1333
	struct ice_vsi *vsi = q_vector->vsi;
1334
	bool wb_en = q_vector->wb_on_itr;
1335 1336
	u32 itr_val;

1337 1338 1339 1340 1341
	if (test_bit(ICE_DOWN, vsi->state))
		return;

	/* When exiting WB_ON_ITR, let ITR resume its normal
	 * interrupts-enabled path.
1342
	 */
1343
	if (wb_en)
1344 1345 1346 1347
		q_vector->wb_on_itr = false;

	/* This will do nothing if dynamic updates are not enabled. */
	ice_net_dim(q_vector);
1348

1349 1350
	/* net_dim() updates ITR out-of-band using a work item */
	itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
1351 1352 1353 1354 1355 1356 1357 1358 1359 1360
	/* trigger an immediate software interrupt when exiting
	 * busy poll, to make sure to catch any pending cleanups
	 * that might have been missed due to interrupt state
	 * transition.
	 */
	if (wb_en) {
		itr_val |= GLINT_DYN_CTL_SWINT_TRIG_M |
			   GLINT_DYN_CTL_SW_ITR_INDX_M |
			   GLINT_DYN_CTL_SW_ITR_INDX_ENA_M;
	}
1361
	wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val);
1362 1363
}

1364 1365 1366 1367 1368 1369 1370
/**
 * ice_set_wb_on_itr - set WB_ON_ITR for this q_vector
 * @q_vector: q_vector to set WB_ON_ITR on
 *
 * We need to tell hardware to write-back completed descriptors even when
 * interrupts are disabled. Descriptors will be written back on cache line
 * boundaries without WB_ON_ITR enabled, but if we don't enable WB_ON_ITR
1371 1372
 * descriptors may not be written back if they don't fill a cache line until
 * the next interrupt.
1373
 *
1374 1375 1376
 * This sets the write-back frequency to whatever was set previously for the
 * ITR indices. Also, set the INTENA_MSK bit to make sure hardware knows we
 * aren't meddling with the INTENA_M bit.
1377
 */
J
Jesse Brandeburg 已提交
1378
static void ice_set_wb_on_itr(struct ice_q_vector *q_vector)
1379
{
J
Jesse Brandeburg 已提交
1380 1381
	struct ice_vsi *vsi = q_vector->vsi;

1382
	/* already in wb_on_itr mode no need to change it */
1383
	if (q_vector->wb_on_itr)
1384 1385
		return;

1386 1387 1388 1389 1390 1391 1392 1393
	/* use previously set ITR values for all of the ITR indices by
	 * specifying ICE_ITR_NONE, which will vary in adaptive (AIM) mode and
	 * be static in non-adaptive mode (user configured)
	 */
	wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx),
	     ((ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S) &
	      GLINT_DYN_CTL_ITR_INDX_M) | GLINT_DYN_CTL_INTENA_MSK_M |
	     GLINT_DYN_CTL_WB_ON_ITR_M);
1394

1395
	q_vector->wb_on_itr = true;
1396 1397
}

1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410
/**
 * ice_napi_poll - NAPI polling Rx/Tx cleanup routine
 * @napi: napi struct with our devices info in it
 * @budget: amount of work driver is allowed to do this pass, in packets
 *
 * This function will clean all queues associated with a q_vector.
 *
 * Returns the amount of work done
 */
int ice_napi_poll(struct napi_struct *napi, int budget)
{
	struct ice_q_vector *q_vector =
				container_of(napi, struct ice_q_vector, napi);
1411 1412
	struct ice_tx_ring *tx_ring;
	struct ice_rx_ring *rx_ring;
1413
	bool clean_complete = true;
1414
	int budget_per_ring;
1415 1416 1417 1418 1419
	int work_done = 0;

	/* Since the actual Tx work is minimal, we can give the Tx a larger
	 * budget and be more aggressive about cleaning up the Tx descriptors.
	 */
1420 1421 1422 1423
	ice_for_each_tx_ring(tx_ring, q_vector->tx) {
		bool wd = tx_ring->xsk_pool ?
			  ice_clean_tx_irq_zc(tx_ring, budget) :
			  ice_clean_tx_irq(tx_ring, budget);
1424 1425

		if (!wd)
1426
			clean_complete = false;
1427
	}
1428 1429

	/* Handle case where we are called by netpoll with a budget of 0 */
J
Jesse Brandeburg 已提交
1430
	if (unlikely(budget <= 0))
1431 1432
		return budget;

1433 1434 1435 1436 1437 1438
	/* normally we have 1 Rx ring per q_vector */
	if (unlikely(q_vector->num_ring_rx > 1))
		/* We attempt to distribute budget to each Rx queue fairly, but
		 * don't allow the budget to go below 1 because that would exit
		 * polling early.
		 */
K
Karol Kolacinski 已提交
1439
		budget_per_ring = max_t(int, budget / q_vector->num_ring_rx, 1);
1440 1441 1442
	else
		/* Max of 1 Rx ring in this q_vector so give it the budget */
		budget_per_ring = budget;
1443

1444
	ice_for_each_rx_ring(rx_ring, q_vector->rx) {
1445 1446
		int cleaned;

1447 1448 1449 1450
		/* A dedicated path for zero-copy allows making a single
		 * comparison in the irq context instead of many inside the
		 * ice_clean_rx_irq function and makes the codebase cleaner.
		 */
1451 1452 1453
		cleaned = rx_ring->xsk_pool ?
			  ice_clean_rx_irq_zc(rx_ring, budget_per_ring) :
			  ice_clean_rx_irq(rx_ring, budget_per_ring);
1454 1455 1456 1457 1458 1459 1460
		work_done += cleaned;
		/* if we clean as many as budgeted, we must not be done */
		if (cleaned >= budget_per_ring)
			clean_complete = false;
	}

	/* If work not completed, return budget and polling will return */
1461 1462 1463 1464 1465
	if (!clean_complete) {
		/* Set the writeback on ITR so partial completions of
		 * cache-lines will still continue even if we're polling.
		 */
		ice_set_wb_on_itr(q_vector);
1466
		return budget;
1467
	}
1468

1469 1470 1471 1472
	/* Exit the polling mode, but don't re-enable interrupts if stack might
	 * poll us due to busy-polling
	 */
	if (likely(napi_complete_done(napi, work_done)))
J
Jesse Brandeburg 已提交
1473
		ice_update_ena_itr(q_vector);
1474
	else
J
Jesse Brandeburg 已提交
1475
		ice_set_wb_on_itr(q_vector);
D
Dave Ertman 已提交
1476

B
Bruce Allan 已提交
1477
	return min_t(int, work_done, budget - 1);
1478 1479 1480
}

/**
1481
 * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions
1482 1483 1484 1485 1486
 * @tx_ring: the ring to be checked
 * @size: the size buffer we want to assure is available
 *
 * Returns -EBUSY if a stop is needed, else 0
 */
1487
static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)
1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503
{
	netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index);
	/* Memory barrier before checking head and tail */
	smp_mb();

	/* Check again in a case another CPU has just made room available. */
	if (likely(ICE_DESC_UNUSED(tx_ring) < size))
		return -EBUSY;

	/* A reprieve! - use start_subqueue because it doesn't call schedule */
	netif_start_subqueue(tx_ring->netdev, tx_ring->q_index);
	++tx_ring->tx_stats.restart_q;
	return 0;
}

/**
1504
 * ice_maybe_stop_tx - 1st level check for Tx stop conditions
1505 1506 1507 1508 1509
 * @tx_ring: the ring to be checked
 * @size:    the size buffer we want to assure is available
 *
 * Returns 0 if stop is not needed
 */
1510
static int ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)
1511 1512 1513
{
	if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
		return 0;
1514

1515 1516 1517 1518 1519 1520 1521
	return __ice_maybe_stop_tx(tx_ring, size);
}

/**
 * ice_tx_map - Build the Tx descriptor
 * @tx_ring: ring to send buffer on
 * @first: first buffer info buffer to use
1522
 * @off: pointer to struct that holds offload parameters
1523 1524 1525 1526 1527
 *
 * This function loops over the skb data pointed to by *first
 * and gets a physical address for each memory location and programs
 * it and the length into the transmit descriptor.
 */
1528
static void
1529
ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
1530
	   struct ice_tx_offload_params *off)
1531
{
1532
	u64 td_offset, td_tag, td_cmd;
1533 1534 1535 1536 1537
	u16 i = tx_ring->next_to_use;
	unsigned int data_len, size;
	struct ice_tx_desc *tx_desc;
	struct ice_tx_buf *tx_buf;
	struct sk_buff *skb;
T
Tony Nguyen 已提交
1538
	skb_frag_t *frag;
1539 1540
	dma_addr_t dma;

1541 1542 1543
	td_tag = off->td_l2tag1;
	td_cmd = off->td_cmd;
	td_offset = off->td_offset;
1544 1545 1546 1547 1548 1549 1550
	skb = first->skb;

	data_len = skb->data_len;
	size = skb_headlen(skb);

	tx_desc = ICE_TX_DESC(tx_ring, i);

1551 1552 1553 1554 1555 1556
	if (first->tx_flags & ICE_TX_FLAGS_HW_VLAN) {
		td_cmd |= (u64)ICE_TX_DESC_CMD_IL2TAG1;
		td_tag = (first->tx_flags & ICE_TX_FLAGS_VLAN_M) >>
			  ICE_TX_FLAGS_VLAN_S;
	}

1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579
	dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);

	tx_buf = first;

	for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
		unsigned int max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;

		if (dma_mapping_error(tx_ring->dev, dma))
			goto dma_error;

		/* record length, and DMA address */
		dma_unmap_len_set(tx_buf, len, size);
		dma_unmap_addr_set(tx_buf, dma, dma);

		/* align size to end of page */
		max_data += -dma & (ICE_MAX_READ_REQ_SIZE - 1);
		tx_desc->buf_addr = cpu_to_le64(dma);

		/* account for data chunks larger than the hardware
		 * can handle
		 */
		while (unlikely(size > ICE_MAX_DATA_PER_TXD)) {
			tx_desc->cmd_type_offset_bsz =
1580 1581
				ice_build_ctob(td_cmd, td_offset, max_data,
					       td_tag);
1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600

			tx_desc++;
			i++;

			if (i == tx_ring->count) {
				tx_desc = ICE_TX_DESC(tx_ring, 0);
				i = 0;
			}

			dma += max_data;
			size -= max_data;

			max_data = ICE_MAX_DATA_PER_TXD_ALIGNED;
			tx_desc->buf_addr = cpu_to_le64(dma);
		}

		if (likely(!data_len))
			break;

1601 1602
		tx_desc->cmd_type_offset_bsz = ice_build_ctob(td_cmd, td_offset,
							      size, td_tag);
1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631

		tx_desc++;
		i++;

		if (i == tx_ring->count) {
			tx_desc = ICE_TX_DESC(tx_ring, 0);
			i = 0;
		}

		size = skb_frag_size(frag);
		data_len -= size;

		dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size,
				       DMA_TO_DEVICE);

		tx_buf = &tx_ring->tx_buf[i];
	}

	/* record bytecount for BQL */
	netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);

	/* record SW timestamp if HW timestamp is not available */
	skb_tx_timestamp(first->skb);

	i++;
	if (i == tx_ring->count)
		i = 0;

	/* write last descriptor with RS and EOP bits */
M
Maciej Fijalkowski 已提交
1632
	td_cmd |= (u64)ICE_TXD_LAST_DESC_CMD;
1633 1634
	tx_desc->cmd_type_offset_bsz =
			ice_build_ctob(td_cmd, td_offset, size, td_tag);
1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651

	/* Force memory writes to complete before letting h/w know there
	 * are new descriptors to fetch.
	 *
	 * We also use this memory barrier to make certain all of the
	 * status bits have been updated before next_to_watch is written.
	 */
	wmb();

	/* set next_to_watch value indicating a packet is present */
	first->next_to_watch = tx_desc;

	tx_ring->next_to_use = i;

	ice_maybe_stop_tx(tx_ring, DESC_NEEDED);

	/* notify HW of packet */
T
Tony Nguyen 已提交
1652
	if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more())
1653 1654 1655 1656 1657
		writel(i, tx_ring->tail);

	return;

dma_error:
1658
	/* clear DMA mappings for failed tx_buf map */
1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671
	for (;;) {
		tx_buf = &tx_ring->tx_buf[i];
		ice_unmap_and_free_tx_buf(tx_ring, tx_buf);
		if (tx_buf == first)
			break;
		if (i == 0)
			i = tx_ring->count;
		i--;
	}

	tx_ring->next_to_use = i;
}

1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707
/**
 * ice_tx_csum - Enable Tx checksum offloads
 * @first: pointer to the first descriptor
 * @off: pointer to struct that holds offload parameters
 *
 * Returns 0 or error (negative) if checksum offload can't happen, 1 otherwise.
 */
static
int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
{
	u32 l4_len = 0, l3_len = 0, l2_len = 0;
	struct sk_buff *skb = first->skb;
	union {
		struct iphdr *v4;
		struct ipv6hdr *v6;
		unsigned char *hdr;
	} ip;
	union {
		struct tcphdr *tcp;
		unsigned char *hdr;
	} l4;
	__be16 frag_off, protocol;
	unsigned char *exthdr;
	u32 offset, cmd = 0;
	u8 l4_proto = 0;

	if (skb->ip_summed != CHECKSUM_PARTIAL)
		return 0;

	ip.hdr = skb_network_header(skb);
	l4.hdr = skb_transport_header(skb);

	/* compute outer L2 header size */
	l2_len = ip.hdr - skb->data;
	offset = (l2_len / 2) << ICE_TX_DESC_LEN_MACLEN_S;

1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725
	protocol = vlan_get_protocol(skb);

	if (protocol == htons(ETH_P_IP))
		first->tx_flags |= ICE_TX_FLAGS_IPV4;
	else if (protocol == htons(ETH_P_IPV6))
		first->tx_flags |= ICE_TX_FLAGS_IPV6;

	if (skb->encapsulation) {
		bool gso_ena = false;
		u32 tunnel = 0;

		/* define outer network header type */
		if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
			tunnel |= (first->tx_flags & ICE_TX_FLAGS_TSO) ?
				  ICE_TX_CTX_EIPT_IPV4 :
				  ICE_TX_CTX_EIPT_IPV4_NO_CSUM;
			l4_proto = ip.v4->protocol;
		} else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
1726 1727
			int ret;

1728 1729 1730
			tunnel |= ICE_TX_CTX_EIPT_IPV6;
			exthdr = ip.hdr + sizeof(*ip.v6);
			l4_proto = ip.v6->nexthdr;
1731 1732 1733 1734
			ret = ipv6_skip_exthdr(skb, exthdr - skb->data,
					       &l4_proto, &frag_off);
			if (ret < 0)
				return -1;
1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795
		}

		/* define outer transport */
		switch (l4_proto) {
		case IPPROTO_UDP:
			tunnel |= ICE_TXD_CTX_UDP_TUNNELING;
			first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
			break;
		case IPPROTO_GRE:
			tunnel |= ICE_TXD_CTX_GRE_TUNNELING;
			first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
			break;
		case IPPROTO_IPIP:
		case IPPROTO_IPV6:
			first->tx_flags |= ICE_TX_FLAGS_TUNNEL;
			l4.hdr = skb_inner_network_header(skb);
			break;
		default:
			if (first->tx_flags & ICE_TX_FLAGS_TSO)
				return -1;

			skb_checksum_help(skb);
			return 0;
		}

		/* compute outer L3 header size */
		tunnel |= ((l4.hdr - ip.hdr) / 4) <<
			  ICE_TXD_CTX_QW0_EIPLEN_S;

		/* switch IP header pointer from outer to inner header */
		ip.hdr = skb_inner_network_header(skb);

		/* compute tunnel header size */
		tunnel |= ((ip.hdr - l4.hdr) / 2) <<
			   ICE_TXD_CTX_QW0_NATLEN_S;

		gso_ena = skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL;
		/* indicate if we need to offload outer UDP header */
		if ((first->tx_flags & ICE_TX_FLAGS_TSO) && !gso_ena &&
		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
			tunnel |= ICE_TXD_CTX_QW0_L4T_CS_M;

		/* record tunnel offload values */
		off->cd_tunnel_params |= tunnel;

		/* set DTYP=1 to indicate that it's an Tx context descriptor
		 * in IPsec tunnel mode with Tx offloads in Quad word 1
		 */
		off->cd_qw1 |= (u64)ICE_TX_DESC_DTYPE_CTX;

		/* switch L4 header pointer from outer to inner */
		l4.hdr = skb_inner_transport_header(skb);
		l4_proto = 0;

		/* reset type as we transition from outer to inner headers */
		first->tx_flags &= ~(ICE_TX_FLAGS_IPV4 | ICE_TX_FLAGS_IPV6);
		if (ip.v4->version == 4)
			first->tx_flags |= ICE_TX_FLAGS_IPV4;
		if (ip.v6->version == 6)
			first->tx_flags |= ICE_TX_FLAGS_IPV6;
	}
1796 1797

	/* Enable IP checksum offloads */
1798
	if (first->tx_flags & ICE_TX_FLAGS_IPV4) {
1799 1800 1801 1802 1803 1804 1805 1806 1807
		l4_proto = ip.v4->protocol;
		/* the stack computes the IP header already, the only time we
		 * need the hardware to recompute it is in the case of TSO.
		 */
		if (first->tx_flags & ICE_TX_FLAGS_TSO)
			cmd |= ICE_TX_DESC_CMD_IIPT_IPV4_CSUM;
		else
			cmd |= ICE_TX_DESC_CMD_IIPT_IPV4;

1808
	} else if (first->tx_flags & ICE_TX_FLAGS_IPV6) {
1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837
		cmd |= ICE_TX_DESC_CMD_IIPT_IPV6;
		exthdr = ip.hdr + sizeof(*ip.v6);
		l4_proto = ip.v6->nexthdr;
		if (l4.hdr != exthdr)
			ipv6_skip_exthdr(skb, exthdr - skb->data, &l4_proto,
					 &frag_off);
	} else {
		return -1;
	}

	/* compute inner L3 header size */
	l3_len = l4.hdr - ip.hdr;
	offset |= (l3_len / 4) << ICE_TX_DESC_LEN_IPLEN_S;

	/* Enable L4 checksum offloads */
	switch (l4_proto) {
	case IPPROTO_TCP:
		/* enable checksum offloads */
		cmd |= ICE_TX_DESC_CMD_L4T_EOFT_TCP;
		l4_len = l4.tcp->doff;
		offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
		break;
	case IPPROTO_UDP:
		/* enable UDP checksum offload */
		cmd |= ICE_TX_DESC_CMD_L4T_EOFT_UDP;
		l4_len = (sizeof(struct udphdr) >> 2);
		offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
		break;
	case IPPROTO_SCTP:
1838 1839 1840 1841 1842 1843
		/* enable SCTP checksum offload */
		cmd |= ICE_TX_DESC_CMD_L4T_EOFT_SCTP;
		l4_len = sizeof(struct sctphdr) >> 2;
		offset |= l4_len << ICE_TX_DESC_LEN_L4_LEN_S;
		break;

1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856
	default:
		if (first->tx_flags & ICE_TX_FLAGS_TSO)
			return -1;
		skb_checksum_help(skb);
		return 0;
	}

	off->td_cmd |= cmd;
	off->td_offset |= offset;
	return 1;
}

/**
1857
 * ice_tx_prepare_vlan_flags - prepare generic Tx VLAN tagging flags for HW
1858 1859 1860 1861 1862 1863
 * @tx_ring: ring to send buffer on
 * @first: pointer to struct ice_tx_buf
 *
 * Checks the skb and set up correspondingly several generic transmit flags
 * related to VLAN tagging for the HW, such as VLAN, DCB, etc.
 */
1864
static void
1865
ice_tx_prepare_vlan_flags(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first)
1866 1867 1868
{
	struct sk_buff *skb = first->skb;

1869 1870 1871 1872 1873 1874 1875
	/* nothing left to do, software offloaded VLAN */
	if (!skb_vlan_tag_present(skb) && eth_type_vlan(skb->protocol))
		return;

	/* currently, we always assume 802.1Q for VLAN insertion as VLAN
	 * insertion for 802.1AD is not supported
	 */
1876 1877 1878 1879 1880
	if (skb_vlan_tag_present(skb)) {
		first->tx_flags |= skb_vlan_tag_get(skb) << ICE_TX_FLAGS_VLAN_S;
		first->tx_flags |= ICE_TX_FLAGS_HW_VLAN;
	}

1881
	ice_tx_prepare_vlan_flags_dcb(tx_ring, first);
1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901
}

/**
 * ice_tso - computes mss and TSO length to prepare for TSO
 * @first: pointer to struct ice_tx_buf
 * @off: pointer to struct that holds offload parameters
 *
 * Returns 0 or error (negative) if TSO can't happen, 1 otherwise.
 */
static
int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
{
	struct sk_buff *skb = first->skb;
	union {
		struct iphdr *v4;
		struct ipv6hdr *v6;
		unsigned char *hdr;
	} ip;
	union {
		struct tcphdr *tcp;
1902
		struct udphdr *udp;
1903 1904 1905
		unsigned char *hdr;
	} l4;
	u64 cd_mss, cd_tso_len;
K
Karol Kolacinski 已提交
1906 1907
	u32 paylen;
	u8 l4_start;
1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919
	int err;

	if (skb->ip_summed != CHECKSUM_PARTIAL)
		return 0;

	if (!skb_is_gso(skb))
		return 0;

	err = skb_cow_head(skb, 0);
	if (err < 0)
		return err;

1920
	/* cppcheck-suppress unreadVariable */
1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931
	ip.hdr = skb_network_header(skb);
	l4.hdr = skb_transport_header(skb);

	/* initialize outer IP header fields */
	if (ip.v4->version == 4) {
		ip.v4->tot_len = 0;
		ip.v4->check = 0;
	} else {
		ip.v6->payload_len = 0;
	}

1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942
	if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
					 SKB_GSO_GRE_CSUM |
					 SKB_GSO_IPXIP4 |
					 SKB_GSO_IPXIP6 |
					 SKB_GSO_UDP_TUNNEL |
					 SKB_GSO_UDP_TUNNEL_CSUM)) {
		if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
		    (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
			l4.udp->len = 0;

			/* determine offset of outer transport header */
K
Karol Kolacinski 已提交
1943
			l4_start = (u8)(l4.hdr - skb->data);
1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965

			/* remove payload length from outer checksum */
			paylen = skb->len - l4_start;
			csum_replace_by_diff(&l4.udp->check,
					     (__force __wsum)htonl(paylen));
		}

		/* reset pointers to inner headers */

		/* cppcheck-suppress unreadVariable */
		ip.hdr = skb_inner_network_header(skb);
		l4.hdr = skb_inner_transport_header(skb);

		/* initialize inner IP header fields */
		if (ip.v4->version == 4) {
			ip.v4->tot_len = 0;
			ip.v4->check = 0;
		} else {
			ip.v6->payload_len = 0;
		}
	}

1966
	/* determine offset of transport header */
K
Karol Kolacinski 已提交
1967
	l4_start = (u8)(l4.hdr - skb->data);
1968 1969 1970 1971

	/* remove payload length from checksum */
	paylen = skb->len - l4_start;

1972 1973 1974 1975
	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
		csum_replace_by_diff(&l4.udp->check,
				     (__force __wsum)htonl(paylen));
		/* compute length of UDP segmentation header */
K
Karol Kolacinski 已提交
1976
		off->header_len = (u8)sizeof(l4.udp) + l4_start;
1977 1978 1979 1980
	} else {
		csum_replace_by_diff(&l4.tcp->check,
				     (__force __wsum)htonl(paylen));
		/* compute length of TCP segmentation header */
K
Karol Kolacinski 已提交
1981
		off->header_len = (u8)((l4.tcp->doff * 4) + l4_start);
1982
	}
1983 1984 1985

	/* update gso_segs and bytecount */
	first->gso_segs = skb_shinfo(skb)->gso_segs;
1986
	first->bytecount += (first->gso_segs - 1) * off->header_len;
1987 1988 1989 1990 1991

	cd_tso_len = skb->len - off->header_len;
	cd_mss = skb_shinfo(skb)->gso_size;

	/* record cdesc_qw1 with TSO parameters */
1992 1993 1994 1995
	off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
			     (ICE_TX_CTX_DESC_TSO << ICE_TXD_CTX_QW1_CMD_S) |
			     (cd_tso_len << ICE_TXD_CTX_QW1_TSO_LEN_S) |
			     (cd_mss << ICE_TXD_CTX_QW1_MSS_S));
1996 1997 1998 1999
	first->tx_flags |= ICE_TX_FLAGS_TSO;
	return 1;
}

2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017
/**
 * ice_txd_use_count  - estimate the number of descriptors needed for Tx
 * @size: transmit request size in bytes
 *
 * Due to hardware alignment restrictions (4K alignment), we need to
 * assume that we can have no more than 12K of data per descriptor, even
 * though each descriptor can take up to 16K - 1 bytes of aligned memory.
 * Thus, we need to divide by 12K. But division is slow! Instead,
 * we decompose the operation into shifts and one relatively cheap
 * multiply operation.
 *
 * To divide by 12K, we first divide by 4K, then divide by 3:
 *     To divide by 4K, shift right by 12 bits
 *     To divide by 3, multiply by 85, then divide by 256
 *     (Divide by 256 is done by shifting right by 8 bits)
 * Finally, we add one to round up. Because 256 isn't an exact multiple of
 * 3, we'll underestimate near each multiple of 12K. This is actually more
 * accurate as we have 4K - 1 of wiggle room that we can fit into the last
2018
 * segment. For our purposes this is accurate out to 1M which is orders of
2019 2020 2021
 * magnitude greater than our largest possible GSO size.
 *
 * This would then be implemented as:
B
Brett Creeley 已提交
2022
 *     return (((size >> 12) * 85) >> 8) + ICE_DESCS_FOR_SKB_DATA_PTR;
2023 2024 2025
 *
 * Since multiplication and division are commutative, we can reorder
 * operations into:
B
Brett Creeley 已提交
2026
 *     return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
2027 2028 2029
 */
static unsigned int ice_txd_use_count(unsigned int size)
{
B
Brett Creeley 已提交
2030
	return ((size * 85) >> 20) + ICE_DESCS_FOR_SKB_DATA_PTR;
2031 2032 2033
}

/**
2034
 * ice_xmit_desc_count - calculate number of Tx descriptors needed
2035 2036 2037 2038 2039 2040
 * @skb: send buffer
 *
 * Returns number of data descriptors needed for this skb.
 */
static unsigned int ice_xmit_desc_count(struct sk_buff *skb)
{
2041
	const skb_frag_t *frag = &skb_shinfo(skb)->frags[0];
2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071
	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
	unsigned int count = 0, size = skb_headlen(skb);

	for (;;) {
		count += ice_txd_use_count(size);

		if (!nr_frags--)
			break;

		size = skb_frag_size(frag++);
	}

	return count;
}

/**
 * __ice_chk_linearize - Check if there are more than 8 buffers per packet
 * @skb: send buffer
 *
 * Note: This HW can't DMA more than 8 buffers to build a packet on the wire
 * and so we need to figure out the cases where we need to linearize the skb.
 *
 * For TSO we need to count the TSO header and segment payload separately.
 * As such we need to check cases where we have 7 fragments or more as we
 * can potentially require 9 DMA transactions, 1 for the TSO header, 1 for
 * the segment payload in the first descriptor, and another 7 for the
 * fragments.
 */
static bool __ice_chk_linearize(struct sk_buff *skb)
{
2072
	const skb_frag_t *frag, *stale;
2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085
	int nr_frags, sum;

	/* no need to check if number of frags is less than 7 */
	nr_frags = skb_shinfo(skb)->nr_frags;
	if (nr_frags < (ICE_MAX_BUF_TXD - 1))
		return false;

	/* We need to walk through the list and validate that each group
	 * of 6 fragments totals at least gso_size.
	 */
	nr_frags -= ICE_MAX_BUF_TXD - 2;
	frag = &skb_shinfo(skb)->frags[0];

2086
	/* Initialize size to the negative value of gso_size minus 1. We
T
Tony Nguyen 已提交
2087
	 * use this as the worst case scenario in which the frag ahead
2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103
	 * of us only provides one byte which is why we are limited to 6
	 * descriptors for a single transmit as the header and previous
	 * fragment are already consuming 2 descriptors.
	 */
	sum = 1 - skb_shinfo(skb)->gso_size;

	/* Add size of frags 0 through 4 to create our initial sum */
	sum += skb_frag_size(frag++);
	sum += skb_frag_size(frag++);
	sum += skb_frag_size(frag++);
	sum += skb_frag_size(frag++);
	sum += skb_frag_size(frag++);

	/* Walk through fragments adding latest fragment, testing it, and
	 * then removing stale fragments from the sum.
	 */
K
Kiran Patil 已提交
2104 2105 2106
	for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
		int stale_size = skb_frag_size(stale);

2107 2108
		sum += skb_frag_size(frag++);

K
Kiran Patil 已提交
2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127
		/* The stale fragment may present us with a smaller
		 * descriptor than the actual fragment size. To account
		 * for that we need to remove all the data on the front and
		 * figure out what the remainder would be in the last
		 * descriptor associated with the fragment.
		 */
		if (stale_size > ICE_MAX_DATA_PER_TXD) {
			int align_pad = -(skb_frag_off(stale)) &
					(ICE_MAX_READ_REQ_SIZE - 1);

			sum -= align_pad;
			stale_size -= align_pad;

			do {
				sum -= ICE_MAX_DATA_PER_TXD_ALIGNED;
				stale_size -= ICE_MAX_DATA_PER_TXD_ALIGNED;
			} while (stale_size > ICE_MAX_DATA_PER_TXD);
		}

2128 2129 2130 2131 2132 2133 2134
		/* if sum is negative we failed to make sufficient progress */
		if (sum < 0)
			return true;

		if (!nr_frags--)
			break;

K
Kiran Patil 已提交
2135
		sum -= stale_size;
2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162
	}

	return false;
}

/**
 * ice_chk_linearize - Check if there are more than 8 fragments per packet
 * @skb:      send buffer
 * @count:    number of buffers used
 *
 * Note: Our HW can't scatter-gather more than 8 fragments to build
 * a packet on the wire and so we need to figure out the cases where we
 * need to linearize the skb.
 */
static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
{
	/* Both TSO and single send will work if count is less than 8 */
	if (likely(count < ICE_MAX_BUF_TXD))
		return false;

	if (skb_is_gso(skb))
		return __ice_chk_linearize(skb);

	/* we can support up to 8 data buffers for a single send */
	return count != ICE_MAX_BUF_TXD;
}

2163 2164 2165 2166 2167 2168 2169 2170
/**
 * ice_tstamp - set up context descriptor for hardware timestamp
 * @tx_ring: pointer to the Tx ring to send buffer on
 * @skb: pointer to the SKB we're sending
 * @first: Tx buffer
 * @off: Tx offload parameters
 */
static void
2171
ice_tstamp(struct ice_tx_ring *tx_ring, struct sk_buff *skb,
2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197
	   struct ice_tx_buf *first, struct ice_tx_offload_params *off)
{
	s8 idx;

	/* only timestamp the outbound packet if the user has requested it */
	if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
		return;

	if (!tx_ring->ptp_tx)
		return;

	/* Tx timestamps cannot be sampled when doing TSO */
	if (first->tx_flags & ICE_TX_FLAGS_TSO)
		return;

	/* Grab an open timestamp slot */
	idx = ice_ptp_request_ts(tx_ring->tx_tstamps, skb);
	if (idx < 0)
		return;

	off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
			     (ICE_TX_CTX_DESC_TSYN << ICE_TXD_CTX_QW1_CMD_S) |
			     ((u64)idx << ICE_TXD_CTX_QW1_TSO_LEN_S));
	first->tx_flags |= ICE_TX_FLAGS_TSYN;
}

2198 2199 2200 2201 2202 2203 2204 2205
/**
 * ice_xmit_frame_ring - Sends buffer on Tx ring
 * @skb: send buffer
 * @tx_ring: ring to send buffer on
 *
 * Returns NETDEV_TX_OK if sent, else an error code
 */
static netdev_tx_t
2206
ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
2207
{
2208
	struct ice_tx_offload_params offload = { 0 };
2209
	struct ice_vsi *vsi = tx_ring->vsi;
2210
	struct ice_tx_buf *first;
2211
	struct ethhdr *eth;
2212
	unsigned int count;
2213
	int tso, csum;
2214

J
Jesse Brandeburg 已提交
2215 2216
	ice_trace(xmit_frame_ring, tx_ring, skb);

2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230
	count = ice_xmit_desc_count(skb);
	if (ice_chk_linearize(skb, count)) {
		if (__skb_linearize(skb))
			goto out_drop;
		count = ice_txd_use_count(skb->len);
		tx_ring->tx_stats.tx_linearize++;
	}

	/* need: 1 descriptor per page * PAGE_SIZE/ICE_MAX_DATA_PER_TXD,
	 *       + 1 desc for skb_head_len/ICE_MAX_DATA_PER_TXD,
	 *       + 4 desc gap to avoid the cache line where head is,
	 *       + 1 desc for context descriptor,
	 * otherwise try next time
	 */
B
Brett Creeley 已提交
2231 2232
	if (ice_maybe_stop_tx(tx_ring, count + ICE_DESCS_PER_CACHE_LINE +
			      ICE_DESCS_FOR_CTX_DESC)) {
2233 2234 2235 2236
		tx_ring->tx_stats.tx_busy++;
		return NETDEV_TX_BUSY;
	}

2237 2238
	offload.tx_ring = tx_ring;

2239 2240 2241 2242 2243
	/* record the location of the first descriptor for this packet */
	first = &tx_ring->tx_buf[tx_ring->next_to_use];
	first->skb = skb;
	first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN);
	first->gso_segs = 1;
2244 2245 2246
	first->tx_flags = 0;

	/* prepare the VLAN tagging flags for Tx */
2247
	ice_tx_prepare_vlan_flags(tx_ring, first);
2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258

	/* set up TSO offload */
	tso = ice_tso(first, &offload);
	if (tso < 0)
		goto out_drop;

	/* always set up Tx checksum offload */
	csum = ice_tx_csum(first, &offload);
	if (csum < 0)
		goto out_drop;

2259
	/* allow CONTROL frames egress from main VSI if FW LLDP disabled */
2260 2261 2262
	eth = (struct ethhdr *)skb_mac_header(skb);
	if (unlikely((skb->priority == TC_PRIO_CONTROL ||
		      eth->h_proto == htons(ETH_P_LLDP)) &&
2263
		     vsi->type == ICE_VSI_PF &&
2264
		     vsi->port_info->qos_cfg.is_sw_lldp))
2265 2266 2267 2268
		offload.cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
					ICE_TX_CTX_DESC_SWTCH_UPLINK <<
					ICE_TXD_CTX_QW1_CMD_S);

2269
	ice_tstamp(tx_ring, skb, first, &offload);
G
Grzegorz Nitka 已提交
2270 2271
	if (ice_is_switchdev_running(vsi->back))
		ice_eswitch_set_target_vsi(skb, &offload);
2272

2273
	if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
2274
		struct ice_tx_ctx_desc *cdesc;
K
Karol Kolacinski 已提交
2275
		u16 i = tx_ring->next_to_use;
2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287

		/* grab the next descriptor */
		cdesc = ICE_TX_CTX_DESC(tx_ring, i);
		i++;
		tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;

		/* setup context descriptor */
		cdesc->tunneling_params = cpu_to_le32(offload.cd_tunnel_params);
		cdesc->l2tag2 = cpu_to_le16(offload.cd_l2tag2);
		cdesc->rsvd = cpu_to_le16(0);
		cdesc->qw1 = cpu_to_le64(offload.cd_qw1);
	}
2288

2289
	ice_tx_map(tx_ring, first, &offload);
2290 2291 2292
	return NETDEV_TX_OK;

out_drop:
J
Jesse Brandeburg 已提交
2293
	ice_trace(xmit_frame_ring_drop, tx_ring, skb);
2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308
	dev_kfree_skb_any(skb);
	return NETDEV_TX_OK;
}

/**
 * ice_start_xmit - Selects the correct VSI and Tx queue to send buffer
 * @skb: send buffer
 * @netdev: network interface device structure
 *
 * Returns NETDEV_TX_OK if sent, else an error code
 */
netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
	struct ice_netdev_priv *np = netdev_priv(netdev);
	struct ice_vsi *vsi = np->vsi;
2309
	struct ice_tx_ring *tx_ring;
2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320

	tx_ring = vsi->tx_rings[skb->queue_mapping];

	/* hardware can't handle really short frames, hardware padding works
	 * beyond this point
	 */
	if (skb_put_padto(skb, ICE_MIN_TX_LEN))
		return NETDEV_TX_OK;

	return ice_xmit_frame_ring(skb, tx_ring);
}
2321

D
Dave Ertman 已提交
2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354
/**
 * ice_get_dscp_up - return the UP/TC value for a SKB
 * @dcbcfg: DCB config that contains DSCP to UP/TC mapping
 * @skb: SKB to query for info to determine UP/TC
 *
 * This function is to only be called when the PF is in L3 DSCP PFC mode
 */
static u8 ice_get_dscp_up(struct ice_dcbx_cfg *dcbcfg, struct sk_buff *skb)
{
	u8 dscp = 0;

	if (skb->protocol == htons(ETH_P_IP))
		dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
	else if (skb->protocol == htons(ETH_P_IPV6))
		dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;

	return dcbcfg->dscp_map[dscp];
}

u16
ice_select_queue(struct net_device *netdev, struct sk_buff *skb,
		 struct net_device *sb_dev)
{
	struct ice_pf *pf = ice_netdev_to_pf(netdev);
	struct ice_dcbx_cfg *dcbcfg;

	dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
	if (dcbcfg->pfc_mode == ICE_QOS_MODE_DSCP)
		skb->priority = ice_get_dscp_up(dcbcfg, skb);

	return netdev_pick_tx(netdev, skb, sb_dev);
}

2355 2356 2357 2358
/**
 * ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue
 * @tx_ring: tx_ring to clean
 */
2359
void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring)
2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436
{
	struct ice_vsi *vsi = tx_ring->vsi;
	s16 i = tx_ring->next_to_clean;
	int budget = ICE_DFLT_IRQ_WORK;
	struct ice_tx_desc *tx_desc;
	struct ice_tx_buf *tx_buf;

	tx_buf = &tx_ring->tx_buf[i];
	tx_desc = ICE_TX_DESC(tx_ring, i);
	i -= tx_ring->count;

	do {
		struct ice_tx_desc *eop_desc = tx_buf->next_to_watch;

		/* if next_to_watch is not set then there is no pending work */
		if (!eop_desc)
			break;

		/* prevent any other reads prior to eop_desc */
		smp_rmb();

		/* if the descriptor isn't done, no work to do */
		if (!(eop_desc->cmd_type_offset_bsz &
		      cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
			break;

		/* clear next_to_watch to prevent false hangs */
		tx_buf->next_to_watch = NULL;
		tx_desc->buf_addr = 0;
		tx_desc->cmd_type_offset_bsz = 0;

		/* move past filter desc */
		tx_buf++;
		tx_desc++;
		i++;
		if (unlikely(!i)) {
			i -= tx_ring->count;
			tx_buf = tx_ring->tx_buf;
			tx_desc = ICE_TX_DESC(tx_ring, 0);
		}

		/* unmap the data header */
		if (dma_unmap_len(tx_buf, len))
			dma_unmap_single(tx_ring->dev,
					 dma_unmap_addr(tx_buf, dma),
					 dma_unmap_len(tx_buf, len),
					 DMA_TO_DEVICE);
		if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
			devm_kfree(tx_ring->dev, tx_buf->raw_buf);

		/* clear next_to_watch to prevent false hangs */
		tx_buf->raw_buf = NULL;
		tx_buf->tx_flags = 0;
		tx_buf->next_to_watch = NULL;
		dma_unmap_len_set(tx_buf, len, 0);
		tx_desc->buf_addr = 0;
		tx_desc->cmd_type_offset_bsz = 0;

		/* move past eop_desc for start of next FD desc */
		tx_buf++;
		tx_desc++;
		i++;
		if (unlikely(!i)) {
			i -= tx_ring->count;
			tx_buf = tx_ring->tx_buf;
			tx_desc = ICE_TX_DESC(tx_ring, 0);
		}

		budget--;
	} while (likely(budget));

	i += tx_ring->count;
	tx_ring->next_to_clean = i;

	/* re-enable interrupt if needed */
	ice_irq_dynamic_ena(&vsi->back->hw, vsi, vsi->q_vectors[0]);
}