i40e_xsk.c 14.5 KB
Newer Older
1 2 3 4
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2018 Intel Corporation. */

#include <linux/bpf_trace.h>
5
#include <net/xdp_sock_drv.h>
6 7 8 9 10 11
#include <net/xdp.h>

#include "i40e.h"
#include "i40e_txrx_common.h"
#include "i40e_xsk.h"

12
int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring)
B
Björn Töpel 已提交
13
{
14 15 16 17 18 19 20 21 22 23 24 25
	unsigned long sz = sizeof(*rx_ring->rx_bi_zc) * rx_ring->count;

	rx_ring->rx_bi_zc = kzalloc(sz, GFP_KERNEL);
	return rx_ring->rx_bi_zc ? 0 : -ENOMEM;
}

void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring)
{
	memset(rx_ring->rx_bi_zc, 0,
	       sizeof(*rx_ring->rx_bi_zc) * rx_ring->count);
}

26
static struct xdp_buff **i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
27 28
{
	return &rx_ring->rx_bi_zc[idx];
B
Björn Töpel 已提交
29 30
}

31
/**
32 33
 * i40e_xsk_pool_enable - Enable/associate an AF_XDP buffer pool to a
 * certain ring/qid
34
 * @vsi: Current VSI
35 36
 * @pool: buffer pool
 * @qid: Rx ring to associate buffer pool with
37 38 39
 *
 * Returns 0 on success, <0 on failure
 **/
40 41
static int i40e_xsk_pool_enable(struct i40e_vsi *vsi,
				struct xsk_buff_pool *pool,
42 43
				u16 qid)
{
J
Jan Sokolowski 已提交
44
	struct net_device *netdev = vsi->netdev;
45 46 47 48 49 50 51 52 53
	bool if_running;
	int err;

	if (vsi->type != I40E_VSI_MAIN)
		return -EINVAL;

	if (qid >= vsi->num_queue_pairs)
		return -EINVAL;

J
Jan Sokolowski 已提交
54 55 56
	if (qid >= netdev->real_num_rx_queues ||
	    qid >= netdev->real_num_tx_queues)
		return -EINVAL;
57

58
	err = xsk_pool_dma_map(pool, &vsi->back->pdev->dev, I40E_RX_DMA_ATTR);
59 60 61
	if (err)
		return err;

62 63
	set_bit(qid, vsi->af_xdp_zc_qps);

64 65 66 67 68 69 70 71 72 73
	if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);

	if (if_running) {
		err = i40e_queue_pair_disable(vsi, qid);
		if (err)
			return err;

		err = i40e_queue_pair_enable(vsi, qid);
		if (err)
			return err;
74 75

		/* Kick start the NAPI context so that receiving will start */
76
		err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX);
77 78
		if (err)
			return err;
79 80 81 82 83 84
	}

	return 0;
}

/**
85 86
 * i40e_xsk_pool_disable - Disassociate an AF_XDP buffer pool from a
 * certain ring/qid
87
 * @vsi: Current VSI
88
 * @qid: Rx ring to associate buffer pool with
89 90 91
 *
 * Returns 0 on success, <0 on failure
 **/
92
static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid)
93
{
J
Jan Sokolowski 已提交
94
	struct net_device *netdev = vsi->netdev;
95
	struct xsk_buff_pool *pool;
96 97 98
	bool if_running;
	int err;

99
	pool = xsk_get_pool_from_qid(netdev, qid);
100
	if (!pool)
101 102 103 104 105 106 107 108 109 110
		return -EINVAL;

	if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);

	if (if_running) {
		err = i40e_queue_pair_disable(vsi, qid);
		if (err)
			return err;
	}

111
	clear_bit(qid, vsi->af_xdp_zc_qps);
112
	xsk_pool_dma_unmap(pool, I40E_RX_DMA_ATTR);
113 114 115 116 117 118 119 120 121 122 123

	if (if_running) {
		err = i40e_queue_pair_enable(vsi, qid);
		if (err)
			return err;
	}

	return 0;
}

/**
124 125
 * i40e_xsk_pool_setup - Enable/disassociate an AF_XDP buffer pool to/from
 * a ring/qid
126
 * @vsi: Current VSI
127 128
 * @pool: Buffer pool to enable/associate to a ring, or NULL to disable
 * @qid: Rx ring to (dis)associate buffer pool (from)to
129
 *
130
 * This function enables or disables a buffer pool to a certain ring.
131 132 133
 *
 * Returns 0 on success, <0 on failure
 **/
134
int i40e_xsk_pool_setup(struct i40e_vsi *vsi, struct xsk_buff_pool *pool,
135 136
			u16 qid)
{
137 138
	return pool ? i40e_xsk_pool_enable(vsi, pool, qid) :
		i40e_xsk_pool_disable(vsi, qid);
139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
}

/**
 * i40e_run_xdp_zc - Executes an XDP program on an xdp_buff
 * @rx_ring: Rx ring
 * @xdp: xdp_buff used as input to the XDP program
 *
 * Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR}
 **/
static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp)
{
	int err, result = I40E_XDP_PASS;
	struct i40e_ring *xdp_ring;
	struct bpf_prog *xdp_prog;
	u32 act;

	rcu_read_lock();
	/* NB! xdp_prog will always be !NULL, due to the fact that
	 * this path is enabled by setting an XDP program.
	 */
	xdp_prog = READ_ONCE(rx_ring->xdp_prog);
	act = bpf_prog_run_xdp(xdp_prog, xdp);
161

162 163 164 165 166 167 168 169 170 171 172 173 174
	switch (act) {
	case XDP_PASS:
		break;
	case XDP_TX:
		xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index];
		result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring);
		break;
	case XDP_REDIRECT:
		err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
		result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
		break;
	default:
		bpf_warn_invalid_xdp_action(act);
175
		fallthrough;
176 177
	case XDP_ABORTED:
		trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
178
		fallthrough; /* handle aborts by dropping packet */
179 180 181 182 183 184 185 186
	case XDP_DROP:
		result = I40E_XDP_CONSUMED;
		break;
	}
	rcu_read_unlock();
	return result;
}

187
bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
188 189 190
{
	u16 ntu = rx_ring->next_to_use;
	union i40e_rx_desc *rx_desc;
191 192
	struct xdp_buff **bi, *xdp;
	dma_addr_t dma;
193 194 195
	bool ok = true;

	rx_desc = I40E_RX_DESC(rx_ring, ntu);
B
Björn Töpel 已提交
196
	bi = i40e_rx_bi(rx_ring, ntu);
197
	do {
198
		xdp = xsk_buff_alloc(rx_ring->xsk_pool);
199
		if (!xdp) {
200 201 202
			ok = false;
			goto no_buffers;
		}
203 204 205 206
		*bi = xdp;
		dma = xsk_buff_xdp_get_dma(xdp);
		rx_desc->read.pkt_addr = cpu_to_le64(dma);
		rx_desc->read.hdr_addr = 0;
207 208 209 210 211 212 213

		rx_desc++;
		bi++;
		ntu++;

		if (unlikely(ntu == rx_ring->count)) {
			rx_desc = I40E_RX_DESC(rx_ring, 0);
B
Björn Töpel 已提交
214
			bi = i40e_rx_bi(rx_ring, 0);
215 216 217 218 219 220 221 222 223 224 225 226 227 228
			ntu = 0;
		}

		count--;
	} while (count);

no_buffers:
	if (rx_ring->next_to_use != ntu)
		i40e_release_rx_desc(rx_ring, ntu);

	return ok;
}

/**
229
 * i40e_construct_skb_zc - Create skbuff from zero-copy Rx buffer
230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255
 * @rx_ring: Rx ring
 * @xdp: xdp_buff
 *
 * This functions allocates a new skb from a zero-copy Rx buffer.
 *
 * Returns the skb, or NULL on failure.
 **/
static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring,
					     struct xdp_buff *xdp)
{
	unsigned int metasize = xdp->data - xdp->data_meta;
	unsigned int datasize = xdp->data_end - xdp->data;
	struct sk_buff *skb;

	/* allocate a skb to store the frags */
	skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
			       xdp->data_end - xdp->data_hard_start,
			       GFP_ATOMIC | __GFP_NOWARN);
	if (unlikely(!skb))
		return NULL;

	skb_reserve(skb, xdp->data - xdp->data_hard_start);
	memcpy(__skb_put(skb, datasize), xdp->data, datasize);
	if (metasize)
		skb_metadata_set(skb, metasize);

256
	xsk_buff_free(xdp);
257 258 259
	return skb;
}

260 261 262 263 264 265 266 267 268 269 270 271
/**
 * i40e_inc_ntc: Advance the next_to_clean index
 * @rx_ring: Rx ring
 **/
static void i40e_inc_ntc(struct i40e_ring *rx_ring)
{
	u32 ntc = rx_ring->next_to_clean + 1;

	ntc = (ntc < rx_ring->count) ? ntc : 0;
	rx_ring->next_to_clean = ntc;
}

272 273 274 275 276 277 278 279 280 281 282 283 284
/**
 * i40e_clean_rx_irq_zc - Consumes Rx packets from the hardware ring
 * @rx_ring: Rx ring
 * @budget: NAPI budget
 *
 * Returns amount of work completed
 **/
int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
{
	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
	u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
	unsigned int xdp_res, xdp_xmit = 0;
	struct sk_buff *skb;
285
	bool failure;
286 287 288

	while (likely(total_rx_packets < (unsigned int)budget)) {
		union i40e_rx_desc *rx_desc;
289
		struct xdp_buff **bi;
290 291 292 293 294 295 296 297 298 299 300 301
		unsigned int size;
		u64 qword;

		rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
		qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);

		/* This memory barrier is needed to keep us from reading
		 * any other fields out of the rx_desc until we have
		 * verified the descriptor has been written back.
		 */
		dma_rmb();

302 303 304 305 306
		if (i40e_rx_is_programming_status(qword)) {
			i40e_clean_programming_status(rx_ring,
						      rx_desc->raw.qword[0],
						      qword);
			bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
307 308
			xsk_buff_free(*bi);
			*bi = NULL;
309
			cleaned_count++;
310
			i40e_inc_ntc(rx_ring);
311 312 313
			continue;
		}

314
		bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
315 316 317 318 319
		size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
		       I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
		if (!size)
			break;

320 321
		bi = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
		(*bi)->data_end = (*bi)->data + size;
322
		xsk_buff_dma_sync_for_cpu(*bi, rx_ring->xsk_pool);
323

324
		xdp_res = i40e_run_xdp_zc(rx_ring, *bi);
325
		if (xdp_res) {
326
			if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR))
327
				xdp_xmit |= xdp_res;
328 329
			else
				xsk_buff_free(*bi);
330

331
			*bi = NULL;
332 333 334 335 336 337 338 339 340 341 342 343 344 345 346
			total_rx_bytes += size;
			total_rx_packets++;

			cleaned_count++;
			i40e_inc_ntc(rx_ring);
			continue;
		}

		/* XDP_PASS path */

		/* NB! We are not checking for errors using
		 * i40e_test_staterr with
		 * BIT(I40E_RXD_QW1_ERROR_SHIFT). This is due to that
		 * SBP is *not* set in PRT_SBPVSI (default not set).
		 */
347 348
		skb = i40e_construct_skb_zc(rx_ring, *bi);
		*bi = NULL;
349 350 351 352 353 354 355 356 357 358 359 360 361 362
		if (!skb) {
			rx_ring->rx_stats.alloc_buff_failed++;
			break;
		}

		cleaned_count++;
		i40e_inc_ntc(rx_ring);

		if (eth_skb_pad(skb))
			continue;

		total_rx_bytes += skb->len;
		total_rx_packets++;

363
		i40e_process_skb_fields(rx_ring, rx_desc, skb);
364
		napi_gro_receive(&rx_ring->q_vector->napi, skb);
365 366
	}

367 368 369
	if (cleaned_count >= I40E_RX_BUFFER_WRITE)
		failure = !i40e_alloc_rx_buffers_zc(rx_ring, cleaned_count);

370 371
	i40e_finalize_xdp_rx(rx_ring, xdp_xmit);
	i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets);
372

373
	if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
374
		if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
375
			xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
376
		else
377
			xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
378 379 380

		return (int)total_rx_packets;
	}
381 382 383
	return failure ? budget : (int)total_rx_packets;
}

384 385 386 387 388 389 390 391 392
/**
 * i40e_xmit_zc - Performs zero-copy Tx AF_XDP
 * @xdp_ring: XDP Tx ring
 * @budget: NAPI budget
 *
 * Returns true if the work is finished.
 **/
static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
{
393
	unsigned int sent_frames = 0, total_bytes = 0;
394
	struct i40e_tx_desc *tx_desc = NULL;
395
	struct i40e_tx_buffer *tx_bi;
396
	struct xdp_desc desc;
397 398 399
	dma_addr_t dma;

	while (budget-- > 0) {
400
		if (!xsk_tx_peek_desc(xdp_ring->xsk_pool, &desc))
401 402
			break;

403 404
		dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc.addr);
		xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma,
405
						 desc.len);
406 407

		tx_bi = &xdp_ring->tx_bi[xdp_ring->next_to_use];
408
		tx_bi->bytecount = desc.len;
409 410 411 412 413 414

		tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use);
		tx_desc->buffer_addr = cpu_to_le64(dma);
		tx_desc->cmd_type_offset_bsz =
			build_ctob(I40E_TX_DESC_CMD_ICRC
				   | I40E_TX_DESC_CMD_EOP,
415
				   0, desc.len, 0);
416

417 418 419
		sent_frames++;
		total_bytes += tx_bi->bytecount;

420 421 422 423 424
		xdp_ring->next_to_use++;
		if (xdp_ring->next_to_use == xdp_ring->count)
			xdp_ring->next_to_use = 0;
	}

425
	if (tx_desc) {
426 427 428 429 430
		/* Request an interrupt for the last frame and bump tail ptr. */
		tx_desc->cmd_type_offset_bsz |= (I40E_TX_DESC_CMD_RS <<
						 I40E_TXD_QW1_CMD_SHIFT);
		i40e_xdp_ring_update_tail(xdp_ring);

431
		xsk_tx_release(xdp_ring->xsk_pool);
432
		i40e_update_tx_stats(xdp_ring, sent_frames, total_bytes);
433 434
	}

435
	return !!budget;
436 437 438 439 440 441 442 443 444 445 446
}

/**
 * i40e_clean_xdp_tx_buffer - Frees and unmaps an XDP Tx entry
 * @tx_ring: XDP Tx ring
 * @tx_bi: Tx buffer info to clean
 **/
static void i40e_clean_xdp_tx_buffer(struct i40e_ring *tx_ring,
				     struct i40e_tx_buffer *tx_bi)
{
	xdp_return_frame(tx_bi->xdpf);
447
	tx_ring->xdp_tx_active--;
448 449 450 451 452 453 454 455
	dma_unmap_single(tx_ring->dev,
			 dma_unmap_addr(tx_bi, dma),
			 dma_unmap_len(tx_bi, len), DMA_TO_DEVICE);
	dma_unmap_len_set(tx_bi, len, 0);
}

/**
 * i40e_clean_xdp_tx_irq - Completes AF_XDP entries, and cleans XDP entries
456
 * @vsi: Current VSI
457 458 459 460
 * @tx_ring: XDP Tx ring
 *
 * Returns true if cleanup/tranmission is done.
 **/
461
bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring)
462
{
463
	struct xsk_buff_pool *bp = tx_ring->xsk_pool;
464
	u32 i, completed_frames, xsk_frames = 0;
465 466
	u32 head_idx = i40e_get_head(tx_ring);
	struct i40e_tx_buffer *tx_bi;
467
	unsigned int ntc;
468 469 470

	if (head_idx < tx_ring->next_to_clean)
		head_idx += tx_ring->count;
471
	completed_frames = head_idx - tx_ring->next_to_clean;
472

473
	if (completed_frames == 0)
474
		goto out_xmit;
475 476 477 478

	if (likely(!tx_ring->xdp_tx_active)) {
		xsk_frames = completed_frames;
		goto skip;
479 480 481 482 483 484 485
	}

	ntc = tx_ring->next_to_clean;

	for (i = 0; i < completed_frames; i++) {
		tx_bi = &tx_ring->tx_bi[ntc];

486
		if (tx_bi->xdpf) {
487
			i40e_clean_xdp_tx_buffer(tx_ring, tx_bi);
488 489
			tx_bi->xdpf = NULL;
		} else {
490
			xsk_frames++;
491
		}
492 493 494 495 496

		if (++ntc >= tx_ring->count)
			ntc = 0;
	}

497
skip:
498 499 500 501 502
	tx_ring->next_to_clean += completed_frames;
	if (unlikely(tx_ring->next_to_clean >= tx_ring->count))
		tx_ring->next_to_clean -= tx_ring->count;

	if (xsk_frames)
503
		xsk_tx_completed(bp, xsk_frames);
504

505
	i40e_arm_wb(tx_ring, vsi, completed_frames);
506 507

out_xmit:
508 509
	if (xsk_uses_need_wakeup(tx_ring->xsk_pool))
		xsk_set_tx_need_wakeup(tx_ring->xsk_pool);
510

511
	return i40e_xmit_zc(tx_ring, I40E_DESC_UNUSED(tx_ring));
512 513 514
}

/**
515
 * i40e_xsk_wakeup - Implements the ndo_xsk_wakeup
516 517
 * @dev: the netdevice
 * @queue_id: queue id to wake up
518
 * @flags: ignored in our case since we have Rx and Tx in the same NAPI.
519 520 521
 *
 * Returns <0 for errors, 0 otherwise.
 **/
522
int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
523 524 525
{
	struct i40e_netdev_priv *np = netdev_priv(dev);
	struct i40e_vsi *vsi = np->vsi;
526
	struct i40e_pf *pf = vsi->back;
527 528
	struct i40e_ring *ring;

529
	if (test_bit(__I40E_CONFIG_BUSY, pf->state))
530
		return -EAGAIN;
531

532 533 534 535 536 537 538 539 540
	if (test_bit(__I40E_VSI_DOWN, vsi->state))
		return -ENETDOWN;

	if (!i40e_enabled_xdp_vsi(vsi))
		return -ENXIO;

	if (queue_id >= vsi->num_queue_pairs)
		return -ENXIO;

541
	if (!vsi->xdp_rings[queue_id]->xsk_pool)
542 543 544 545 546 547 548 549 550 551 552 553 554 555 556
		return -ENXIO;

	ring = vsi->xdp_rings[queue_id];

	/* The idea here is that if NAPI is running, mark a miss, so
	 * it will run again. If not, trigger an interrupt and
	 * schedule the NAPI from interrupt context. If NAPI would be
	 * scheduled here, the interrupt affinity would not be
	 * honored.
	 */
	if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi))
		i40e_force_wb(vsi, ring->q_vector);

	return 0;
}
557

558 559 560 561 562
void i40e_xsk_clean_rx_ring(struct i40e_ring *rx_ring)
{
	u16 i;

	for (i = 0; i < rx_ring->count; i++) {
563
		struct xdp_buff *rx_bi = *i40e_rx_bi(rx_ring, i);
564

565
		if (!rx_bi)
566 567
			continue;

568 569
		xsk_buff_free(rx_bi);
		rx_bi = NULL;
570 571 572
	}
}

573 574
/**
 * i40e_xsk_clean_xdp_ring - Clean the XDP Tx ring on shutdown
575
 * @tx_ring: XDP Tx ring
576 577 578 579
 **/
void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring)
{
	u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
580
	struct xsk_buff_pool *bp = tx_ring->xsk_pool;
581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599
	struct i40e_tx_buffer *tx_bi;
	u32 xsk_frames = 0;

	while (ntc != ntu) {
		tx_bi = &tx_ring->tx_bi[ntc];

		if (tx_bi->xdpf)
			i40e_clean_xdp_tx_buffer(tx_ring, tx_bi);
		else
			xsk_frames++;

		tx_bi->xdpf = NULL;

		ntc++;
		if (ntc >= tx_ring->count)
			ntc = 0;
	}

	if (xsk_frames)
600
		xsk_tx_completed(bp, xsk_frames);
601
}
602 603

/**
604 605
 * i40e_xsk_any_rx_ring_enabled - Checks if Rx rings have an AF_XDP
 * buffer pool attached
606 607
 * @vsi: vsi
 *
608
 * Returns true if any of the Rx rings has an AF_XDP buffer pool attached
609 610 611
 **/
bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi)
{
J
Jan Sokolowski 已提交
612
	struct net_device *netdev = vsi->netdev;
613 614 615
	int i;

	for (i = 0; i < vsi->num_queue_pairs; i++) {
616
		if (xsk_get_pool_from_qid(netdev, i))
617 618 619 620 621
			return true;
	}

	return false;
}