i40e_xsk.c 16.0 KB
Newer Older
1 2 3 4
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2018 Intel Corporation. */

#include <linux/bpf_trace.h>
5
#include <linux/stringify.h>
6
#include <net/xdp_sock_drv.h>
7 8 9 10 11 12
#include <net/xdp.h>

#include "i40e.h"
#include "i40e_txrx_common.h"
#include "i40e_xsk.h"

13
int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring)
B
Björn Töpel 已提交
14
{
15 16 17 18 19 20 21 22 23 24 25 26
	unsigned long sz = sizeof(*rx_ring->rx_bi_zc) * rx_ring->count;

	rx_ring->rx_bi_zc = kzalloc(sz, GFP_KERNEL);
	return rx_ring->rx_bi_zc ? 0 : -ENOMEM;
}

void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring)
{
	memset(rx_ring->rx_bi_zc, 0,
	       sizeof(*rx_ring->rx_bi_zc) * rx_ring->count);
}

27
static struct xdp_buff **i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
28 29
{
	return &rx_ring->rx_bi_zc[idx];
B
Björn Töpel 已提交
30 31
}

32
/**
33 34
 * i40e_xsk_pool_enable - Enable/associate an AF_XDP buffer pool to a
 * certain ring/qid
35
 * @vsi: Current VSI
36 37
 * @pool: buffer pool
 * @qid: Rx ring to associate buffer pool with
38 39 40
 *
 * Returns 0 on success, <0 on failure
 **/
41 42
static int i40e_xsk_pool_enable(struct i40e_vsi *vsi,
				struct xsk_buff_pool *pool,
43 44
				u16 qid)
{
J
Jan Sokolowski 已提交
45
	struct net_device *netdev = vsi->netdev;
46 47 48 49 50 51 52 53 54
	bool if_running;
	int err;

	if (vsi->type != I40E_VSI_MAIN)
		return -EINVAL;

	if (qid >= vsi->num_queue_pairs)
		return -EINVAL;

J
Jan Sokolowski 已提交
55 56 57
	if (qid >= netdev->real_num_rx_queues ||
	    qid >= netdev->real_num_tx_queues)
		return -EINVAL;
58

59
	err = xsk_pool_dma_map(pool, &vsi->back->pdev->dev, I40E_RX_DMA_ATTR);
60 61 62
	if (err)
		return err;

63 64
	set_bit(qid, vsi->af_xdp_zc_qps);

65 66 67 68 69 70 71 72 73 74
	if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);

	if (if_running) {
		err = i40e_queue_pair_disable(vsi, qid);
		if (err)
			return err;

		err = i40e_queue_pair_enable(vsi, qid);
		if (err)
			return err;
75 76

		/* Kick start the NAPI context so that receiving will start */
77
		err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX);
78 79
		if (err)
			return err;
80 81 82 83 84 85
	}

	return 0;
}

/**
86 87
 * i40e_xsk_pool_disable - Disassociate an AF_XDP buffer pool from a
 * certain ring/qid
88
 * @vsi: Current VSI
89
 * @qid: Rx ring to associate buffer pool with
90 91 92
 *
 * Returns 0 on success, <0 on failure
 **/
93
static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid)
94
{
J
Jan Sokolowski 已提交
95
	struct net_device *netdev = vsi->netdev;
96
	struct xsk_buff_pool *pool;
97 98 99
	bool if_running;
	int err;

100
	pool = xsk_get_pool_from_qid(netdev, qid);
101
	if (!pool)
102 103 104 105 106 107 108 109 110 111
		return -EINVAL;

	if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);

	if (if_running) {
		err = i40e_queue_pair_disable(vsi, qid);
		if (err)
			return err;
	}

112
	clear_bit(qid, vsi->af_xdp_zc_qps);
113
	xsk_pool_dma_unmap(pool, I40E_RX_DMA_ATTR);
114 115 116 117 118 119 120 121 122 123 124

	if (if_running) {
		err = i40e_queue_pair_enable(vsi, qid);
		if (err)
			return err;
	}

	return 0;
}

/**
125 126
 * i40e_xsk_pool_setup - Enable/disassociate an AF_XDP buffer pool to/from
 * a ring/qid
127
 * @vsi: Current VSI
128 129
 * @pool: Buffer pool to enable/associate to a ring, or NULL to disable
 * @qid: Rx ring to (dis)associate buffer pool (from)to
130
 *
131
 * This function enables or disables a buffer pool to a certain ring.
132 133 134
 *
 * Returns 0 on success, <0 on failure
 **/
135
int i40e_xsk_pool_setup(struct i40e_vsi *vsi, struct xsk_buff_pool *pool,
136 137
			u16 qid)
{
138 139
	return pool ? i40e_xsk_pool_enable(vsi, pool, qid) :
		i40e_xsk_pool_disable(vsi, qid);
140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
}

/**
 * i40e_run_xdp_zc - Executes an XDP program on an xdp_buff
 * @rx_ring: Rx ring
 * @xdp: xdp_buff used as input to the XDP program
 *
 * Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR}
 **/
static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
{
	int err, result = I40E_XDP_PASS;
	struct i40e_ring *xdp_ring;
	struct bpf_prog *xdp_prog;
	u32 act;

	rcu_read_lock();
	/* NB! xdp_prog will always be !NULL, due to the fact that
	 * this path is enabled by setting an XDP program.
	 */
	xdp_prog = READ_ONCE(rx_ring->xdp_prog);
	act = bpf_prog_run_xdp(xdp_prog, xdp);
162

163 164 165 166 167 168 169 170 171 172 173 174 175
	switch (act) {
	case XDP_PASS:
		break;
	case XDP_TX:
		xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
		result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
		break;
	case XDP_REDIRECT:
		err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
		result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
		break;
	default:
		bpf_warn_invalid_xdp_action(act);
176
		fallthrough;
177 178
	case XDP_ABORTED:
		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
179
		fallthrough; /* handle aborts by dropping packet */
180 181 182 183 184 185 186 187
	case XDP_DROP:
		result = I40E_XDP_CONSUMED;
		break;
	}
	rcu_read_unlock();
	return result;
}

188
bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
189 190 191
{
	u16 ntu = rx_ring->next_to_use;
	union i40e_rx_desc *rx_desc;
192 193
	struct xdp_buff **bi, *xdp;
	dma_addr_t dma;
194 195 196
	bool ok = true;

	rx_desc = I40E_RX_DESC(rx_ring, ntu);
B
Björn Töpel 已提交
197
	bi = i40e_rx_bi(rx_ring, ntu);
198
	do {
199
		xdp = xsk_buff_alloc(rx_ring->xsk_pool);
200
		if (!xdp) {
201 202 203
			ok = false;
			goto no_buffers;
		}
204 205 206 207
		*bi = xdp;
		dma = xsk_buff_xdp_get_dma(xdp);
		rx_desc->read.pkt_addr = cpu_to_le64(dma);
		rx_desc->read.hdr_addr = 0;
208 209 210 211 212 213 214

		rx_desc++;
		bi++;
		ntu++;

		if (unlikely(ntu == rx_ring->count)) {
			rx_desc = I40E_RX_DESC(rx_ring, 0);
B
Björn Töpel 已提交
215
			bi = i40e_rx_bi(rx_ring, 0);
216 217 218 219 220 221 222 223 224 225 226 227 228 229
			ntu = 0;
		}

		count--;
	} while (count);

no_buffers:
	if (rx_ring->next_to_use != ntu)
		i40e_release_rx_desc(rx_ring, ntu);

	return ok;
}

/**
230
 * i40e_construct_skb_zc - Create skbuff from zero-copy Rx buffer
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
 * @rx_ring: Rx ring
 * @xdp: xdp_buff
 *
 * This functions allocates a new skb from a zero-copy Rx buffer.
 *
 * Returns the skb, or NULL on failure.
 **/
static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
					     struct xdp_buff *xdp)
{
	unsigned int metasize = xdp->data - xdp->data_meta;
	unsigned int datasize = xdp->data_end - xdp->data;
	struct sk_buff *skb;

	/* allocate a skb to store the frags */
	skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
			       xdp->data_end - xdp->data_hard_start,
			       GFP_ATOMIC | __GFP_NOWARN);
	if (unlikely(!skb))
		return NULL;

	skb_reserve(skb, xdp->data - xdp->data_hard_start);
	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
	if (metasize)
		skb_metadata_set(skb, metasize);

257
	xsk_buff_free(xdp);
258 259 260
	return skb;
}

261 262 263 264 265 266 267 268 269 270 271 272
/**
 * i40e_inc_ntc: Advance the next_to_clean index
 * @rx_ring: Rx ring
 **/
static void i40e_inc_ntc(struct i40e_ring *rx_ring)
{
	u32 ntc = rx_ring->next_to_clean + 1;

	ntc = (ntc < rx_ring->count) ? ntc : 0;
	rx_ring->next_to_clean = ntc;
}

273 274 275 276 277 278 279 280 281 282 283 284
/**
 * i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring
 * @rx_ring: Rx ring
 * @budget: NAPI budget
 *
 * Returns amount of work completed
 **/
int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
{
	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
	u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
	unsigned int xdp_res, xdp_xmit = 0;
285
	bool failure = false;
286 287 288 289
	struct sk_buff *skb;

	while (likely(total_rx_packets < (unsigned int)budget)) {
		union i40e_rx_desc *rx_desc;
290
		struct xdp_buff **bi;
291 292 293 294 295 296 297 298 299 300 301 302
		unsigned int size;
		u64 qword;

		rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
		qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);

		/* This memory barrier is needed to keep us from reading
		 * any other fields out of the rx_desc until we have
		 * verified the descriptor has been written back.
		 */
		dma_rmb();

303 304 305 306 307
		if (i40e_rx_is_programming_status(qword)) {
			i40e_clean_programming_status(rx_ring,
						      rx_desc->raw.qword[0],
						      qword);
			bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
308 309
			xsk_buff_free(*bi);
			*bi = NULL;
310
			cleaned_count++;
311
			i40e_inc_ntc(rx_ring);
312 313 314
			continue;
		}

315
		bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
316 317 318 319 320
		size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
		       I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
		if (!size)
			break;

321 322
		bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
		(*bi)->data_end = (*bi)->data + size;
323
		xsk_buff_dma_sync_for_cpu(*bi, rx_ring->xsk_pool);
324

325
		xdp_res = i40e_run_xdp_zc(rx_ring, *bi);
326
		if (xdp_res) {
327
			if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR))
328
				xdp_xmit |= xdp_res;
329 330
			else
				xsk_buff_free(*bi);
331

332
			*bi = NULL;
333 334 335 336 337 338 339 340 341 342 343 344 345 346 347
			total_rx_bytes += size;
			total_rx_packets++;

			cleaned_count++;
			i40e_inc_ntc(rx_ring);
			continue;
		}

		/* XDP_PASS path */

		/* NB! We are not checking for errors using
		 * i40e_test_staterr with
		 * BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that
		 * SBP is *not* set in PRT_SBPVSI (default not set).
		 */
348 349
		skb = i40e_construct_skb_zc(rx_ring, *bi);
		*bi = NULL;
350 351 352 353 354 355 356 357 358 359 360 361 362 363
		if (!skb) {
			rx_ring->rx_stats.alloc_buff_failed++;
			break;
		}

		cleaned_count++;
		i40e_inc_ntc(rx_ring);

		if (eth_skb_pad(skb))
			continue;

		total_rx_bytes += skb->len;
		total_rx_packets++;

364
		i40e_process_skb_fields(rx_ring, rx_desc, skb);
365
		napi_gro_receive(&rx_ring->q_vector->napi, skb);
366 367
	}

368 369 370
	if (cleaned_count >= I40E_RX_BUFFER_WRITE)
		failure = !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count);

371 372
	i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
	i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
373

374
	if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
375
		if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
376
			xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
377
		else
378
			xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
379 380 381

		return (int)total_rx_packets;
	}
382 383 384
	return failure ? budget : (int)total_rx_packets;
}

385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447
static void i40e_xmit_pkt(struct i40e_ring *xdp_ring, struct xdp_desc *desc,
			  unsigned int *total_bytes)
{
	struct i40e_tx_desc *tx_desc;
	dma_addr_t dma;

	dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr);
	xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc->len);

	tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use++);
	tx_desc->buffer_addr = cpu_to_le64(dma);
	tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC | I40E_TX_DESC_CMD_EOP,
						  0, desc->len, 0);

	*total_bytes += desc->len;
}

static void i40e_xmit_pkt_batch(struct i40e_ring *xdp_ring, struct xdp_desc *desc,
				unsigned int *total_bytes)
{
	u16 ntu = xdp_ring->next_to_use;
	struct i40e_tx_desc *tx_desc;
	dma_addr_t dma;
	u32 i;

	loop_unrolled_for(i = 0; i < PKTS_PER_BATCH; i++) {
		dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc[i].addr);
		xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, desc[i].len);

		tx_desc = I40E_TX_DESC(xdp_ring, ntu++);
		tx_desc->buffer_addr = cpu_to_le64(dma);
		tx_desc->cmd_type_offset_bsz = build_ctob(I40E_TX_DESC_CMD_ICRC |
							  I40E_TX_DESC_CMD_EOP,
							  0, desc[i].len, 0);

		*total_bytes += desc[i].len;
	}

	xdp_ring->next_to_use = ntu;
}

static void i40e_fill_tx_hw_ring(struct i40e_ring *xdp_ring, struct xdp_desc *descs, u32 nb_pkts,
				 unsigned int *total_bytes)
{
	u32 batched, leftover, i;

	batched = nb_pkts & ~(PKTS_PER_BATCH - 1);
	leftover = nb_pkts & (PKTS_PER_BATCH - 1);
	for (i = 0; i < batched; i += PKTS_PER_BATCH)
		i40e_xmit_pkt_batch(xdp_ring, &descs[i], total_bytes);
	for (i = batched; i < batched + leftover; i++)
		i40e_xmit_pkt(xdp_ring, &descs[i], total_bytes);
}

static void i40e_set_rs_bit(struct i40e_ring *xdp_ring)
{
	u16 ntu = xdp_ring->next_to_use ? xdp_ring->next_to_use - 1 : xdp_ring->count - 1;
	struct i40e_tx_desc *tx_desc;

	tx_desc = I40E_TX_DESC(xdp_ring, ntu);
	tx_desc->cmd_type_offset_bsz |= (I40E_TX_DESC_CMD_RS << I40E_TXD_QW1_CMD_SHIFT);
}

448 449 450 451 452 453 454 455 456
/**
 * i40e_xmit_zc - Performs zero-copy Tx AF_XDP
 * @xdp_ring: XDP Tx ring
 * @budget: NAPI budget
 *
 * Returns true if the work is finished.
 **/
static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
{
457 458 459 460 461 462 463 464 465 466 467 468
	struct xdp_desc *descs = xdp_ring->xsk_descs;
	u32 nb_pkts, nb_processed = 0;
	unsigned int total_bytes = 0;

	nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, descs, budget);
	if (!nb_pkts)
		return false;

	if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) {
		nb_processed = xdp_ring->count - xdp_ring->next_to_use;
		i40e_fill_tx_hw_ring(xdp_ring, descs, nb_processed, &total_bytes);
		xdp_ring->next_to_use = 0;
469 470
	}

471 472
	i40e_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed,
			     &total_bytes);
473

474 475 476 477 478
	/* Request an interrupt for the last frame and bump tail ptr. */
	i40e_set_rs_bit(xdp_ring);
	i40e_xdp_ring_update_tail(xdp_ring);

	i40e_update_tx_stats(xdp_ring, nb_pkts, total_bytes);
479

480
	return true;
481 482 483 484 485 486 487 488 489 490 491
}

/**
 * i40e_clean_xdp_tx_buffer - Frees and unmaps an XDP Tx entry
 * @tx_ring: XDP Tx ring
 * @tx_bi: Tx buffer info to clean
 **/
static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring,
				     struct i40e_tx_buffer *tx_bi)
{
	xdp_return_frame(tx_bi->xdpf);
492
	tx_ring->xdp_tx_active--;
493 494 495 496 497 498 499 500
	dma_unmap_single(tx_ring->dev,
			 dma_unmap_addr(tx_bi, dma),
			 dma_unmap_len(tx_bi, len), DMA_TO_DEVICE);
	dma_unmap_len_set(tx_bi, len, 0);
}

/**
 * i40e_clean_xdp_tx_irq - Completes AF_XDP entries, and cleans XDP entries
501
 * @vsi: Current VSI
502 503 504 505
 * @tx_ring: XDP Tx ring
 *
 * Returns true if cleanup/tranmission is done.
 **/
506
bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring)
507
{
508
	struct xsk_buff_pool *bp = tx_ring->xsk_pool;
509
	u32 i, completed_frames, xsk_frames = 0;
510 511
	u32 head_idx = i40e_get_head(tx_ring);
	struct i40e_tx_buffer *tx_bi;
512
	unsigned int ntc;
513 514 515

	if (head_idx < tx_ring->next_to_clean)
		head_idx += tx_ring->count;
516
	completed_frames = head_idx - tx_ring->next_to_clean;
517

518
	if (completed_frames == 0)
519
		goto out_xmit;
520 521 522 523

	if (likely(!tx_ring->xdp_tx_active)) {
		xsk_frames = completed_frames;
		goto skip;
524 525 526 527 528 529 530
	}

	ntc = tx_ring->next_to_clean;

	for (i = 0; i < completed_frames; i++) {
		tx_bi = &tx_ring->tx_bi[ntc];

531
		if (tx_bi->xdpf) {
532
			i40e_clean_xdp_tx_buffer(tx_ring, tx_bi);
533 534
			tx_bi->xdpf = NULL;
		} else {
535
			xsk_frames++;
536
		}
537 538 539 540 541

		if (++ntc >= tx_ring->count)
			ntc = 0;
	}

542
skip:
543 544 545 546 547
	tx_ring->next_to_clean += completed_frames;
	if (unlikely(tx_ring->next_to_clean >= tx_ring->count))
		tx_ring->next_to_clean -= tx_ring->count;

	if (xsk_frames)
548
		xsk_tx_completed(bp, xsk_frames);
549

550
	i40e_arm_wb(tx_ring, vsi, completed_frames);
551 552

out_xmit:
553 554
	if (xsk_uses_need_wakeup(tx_ring->xsk_pool))
		xsk_set_tx_need_wakeup(tx_ring->xsk_pool);
555

556
	return i40e_xmit_zc(tx_ring, I40E_DESC_UNUSED(tx_ring));
557 558 559
}

/**
560
 * i40e_xsk_wakeup - Implements the ndo_xsk_wakeup
561 562
 * @dev: the netdevice
 * @queue_id: queue id to wake up
563
 * @flags: ignored in our case since we have Rx and Tx in the same NAPI.
564 565 566
 *
 * Returns <0 for errors, 0 otherwise.
 **/
567
int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
568 569 570
{
	struct i40e_netdev_priv *np = netdev_priv(dev);
	struct i40e_vsi *vsi = np->vsi;
571
	struct i40e_pf *pf = vsi->back;
572 573
	struct i40e_ring *ring;

574
	if (test_bit(__I40E_CONFIG_BUSY, pf->state))
575
		return -EAGAIN;
576

577 578 579 580 581 582 583 584 585
	if (test_bit(__I40E_VSI_DOWN, vsi->state))
		return -ENETDOWN;

	if (!i40e_enabled_xdp_vsi(vsi))
		return -ENXIO;

	if (queue_id >= vsi->num_queue_pairs)
		return -ENXIO;

586
	if (!vsi->xdp_rings[queue_id]->xsk_pool)
587 588 589 590 591 592 593 594 595 596 597 598 599 600 601
		return -ENXIO;

	ring = vsi->xdp_rings[queue_id];

	/* The idea here is that if NAPI is running, mark a miss, so
	 * it will run again. If not, trigger an interrupt and
	 * schedule the NAPI from interrupt context. If NAPI would be
	 * scheduled here, the interrupt affinity would not be
	 * honored.
	 */
	if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi))
		i40e_force_wb(vsi, ring->q_vector);

	return 0;
}
602

603 604 605 606 607
void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring)
{
	u16 i;

	for (i = 0; i < rx_ring->count; i++) {
608
		struct xdp_buff *rx_bi = *i40e_rx_bi(rx_ring, i);
609

610
		if (!rx_bi)
611 612
			continue;

613 614
		xsk_buff_free(rx_bi);
		rx_bi = NULL;
615 616 617
	}
}

618 619
/**
 * i40e_xsk_clean_xdp_ring - Clean the XDP Tx ring on shutdown
620
 * @tx_ring: XDP Tx ring
621 622 623 624
 **/
void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring)
{
	u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
625
	struct xsk_buff_pool *bp = tx_ring->xsk_pool;
626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644
	struct i40e_tx_buffer *tx_bi;
	u32 xsk_frames = 0;

	while (ntc != ntu) {
		tx_bi = &tx_ring->tx_bi[ntc];

		if (tx_bi->xdpf)
			i40e_clean_xdp_tx_buffer(tx_ring, tx_bi);
		else
			xsk_frames++;

		tx_bi->xdpf = NULL;

		ntc++;
		if (ntc >= tx_ring->count)
			ntc = 0;
	}

	if (xsk_frames)
645
		xsk_tx_completed(bp, xsk_frames);
646
}
647 648

/**
649 650
 * i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have an AF_XDP
 * buffer pool attached
651 652
 * @vsi: vsi
 *
653
 * Returns true if any of the Rx rings has an AF_XDP buffer pool attached
654 655 656
 **/
bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi)
{
J
Jan Sokolowski 已提交
657
	struct net_device *netdev = vsi->netdev;
658 659 660
	int i;

	for (i = 0; i < vsi->num_queue_pairs; i++) {
661
		if (xsk_get_pool_from_qid(netdev, i))
662 663 664 665 666
			return true;
	}

	return false;
}