xsk.c 26.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
// SPDX-License-Identifier: GPL-2.0
/* XDP sockets
 *
 * AF_XDP sockets allows a channel between XDP programs and userspace
 * applications.
 * Copyright(c) 2018 Intel Corporation.
 *
 * Author(s): Björn Töpel <bjorn.topel@intel.com>
 *	      Magnus Karlsson <magnus.karlsson@intel.com>
 */

#define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__

#include <linux/if_xdp.h>
#include <linux/init.h>
#include <linux/sched/mm.h>
#include <linux/sched/signal.h>
#include <linux/sched/task.h>
#include <linux/socket.h>
#include <linux/file.h>
#include <linux/uaccess.h>
#include <linux/net.h>
#include <linux/netdevice.h>
24
#include <linux/rculist.h>
25
#include <net/xdp_sock.h>
26
#include <net/xdp.h>
27

28
#include "xsk_queue.h"
29
#include "xdp_umem.h"
30
#include "xsk.h"
31

M
Magnus Karlsson 已提交
32 33
#define TX_BATCH_SIZE 16

34 35
static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);

36 37
bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
{
38 39
	return READ_ONCE(xs->rx) &&  READ_ONCE(xs->umem) &&
		READ_ONCE(xs->umem->fq);
40 41
}

42 43
bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt)
{
44
	return xskq_cons_has_entries(umem->fq, cnt);
45 46 47
}
EXPORT_SYMBOL(xsk_umem_has_addrs);

48
bool xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
49
{
50
	return xskq_cons_peek_addr(umem->fq, addr, umem);
51 52 53
}
EXPORT_SYMBOL(xsk_umem_peek_addr);

54
void xsk_umem_release_addr(struct xdp_umem *umem)
55
{
56
	xskq_cons_release(umem->fq);
57
}
58
EXPORT_SYMBOL(xsk_umem_release_addr);
59

60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
void xsk_set_rx_need_wakeup(struct xdp_umem *umem)
{
	if (umem->need_wakeup & XDP_WAKEUP_RX)
		return;

	umem->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
	umem->need_wakeup |= XDP_WAKEUP_RX;
}
EXPORT_SYMBOL(xsk_set_rx_need_wakeup);

void xsk_set_tx_need_wakeup(struct xdp_umem *umem)
{
	struct xdp_sock *xs;

	if (umem->need_wakeup & XDP_WAKEUP_TX)
		return;

	rcu_read_lock();
	list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
		xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
	}
	rcu_read_unlock();

	umem->need_wakeup |= XDP_WAKEUP_TX;
}
EXPORT_SYMBOL(xsk_set_tx_need_wakeup);

void xsk_clear_rx_need_wakeup(struct xdp_umem *umem)
{
	if (!(umem->need_wakeup & XDP_WAKEUP_RX))
		return;

	umem->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
	umem->need_wakeup &= ~XDP_WAKEUP_RX;
}
EXPORT_SYMBOL(xsk_clear_rx_need_wakeup);

void xsk_clear_tx_need_wakeup(struct xdp_umem *umem)
{
	struct xdp_sock *xs;

	if (!(umem->need_wakeup & XDP_WAKEUP_TX))
		return;

	rcu_read_lock();
	list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
		xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
	}
	rcu_read_unlock();

	umem->need_wakeup &= ~XDP_WAKEUP_TX;
}
EXPORT_SYMBOL(xsk_clear_tx_need_wakeup);

bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem)
{
	return umem->flags & XDP_UMEM_USES_NEED_WAKEUP;
}
EXPORT_SYMBOL(xsk_umem_uses_need_wakeup);

120 121 122 123 124 125 126 127 128
/* If a buffer crosses a page boundary, we need to do 2 memcpy's, one for
 * each page. This is only required in copy mode.
 */
static void __xsk_rcv_memcpy(struct xdp_umem *umem, u64 addr, void *from_buf,
			     u32 len, u32 metalen)
{
	void *to_buf = xdp_umem_get_data(umem, addr);

	addr = xsk_umem_add_offset_to_addr(addr);
129
	if (xskq_cons_crosses_non_contig_pg(umem, addr, len + metalen)) {
130 131 132 133 134 135 136 137 138 139 140 141 142
		void *next_pg_addr = umem->pages[(addr >> PAGE_SHIFT) + 1].addr;
		u64 page_start = addr & ~(PAGE_SIZE - 1);
		u64 first_len = PAGE_SIZE - (addr - page_start);

		memcpy(to_buf, from_buf, first_len + metalen);
		memcpy(next_pg_addr, from_buf + first_len, len - first_len);

		return;
	}

	memcpy(to_buf, from_buf, len + metalen);
}

143
static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
144
{
145 146 147
	u64 offset = xs->umem->headroom;
	u64 addr, memcpy_addr;
	void *from_buf;
148
	u32 metalen;
149
	int err;
150

151
	if (!xskq_cons_peek_addr(xs->umem->fq, &addr, xs->umem) ||
152
	    len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) {
153
		xs->rx_dropped++;
154
		return -ENOSPC;
155
	}
156

157 158 159 160 161 162 163 164
	if (unlikely(xdp_data_meta_unsupported(xdp))) {
		from_buf = xdp->data;
		metalen = 0;
	} else {
		from_buf = xdp->data_meta;
		metalen = xdp->data - xdp->data_meta;
	}

165 166 167 168 169
	memcpy_addr = xsk_umem_adjust_offset(xs->umem, addr, offset);
	__xsk_rcv_memcpy(xs->umem, memcpy_addr, from_buf, len, metalen);

	offset += metalen;
	addr = xsk_umem_adjust_offset(xs->umem, addr, offset);
170
	err = xskq_prod_reserve_desc(xs->rx, addr, len);
171
	if (!err) {
172
		xskq_cons_release(xs->umem->fq);
173 174 175
		xdp_return_buff(xdp);
		return 0;
	}
176

177
	xs->rx_dropped++;
178 179 180
	return err;
}

181
static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
182
{
183
	int err = xskq_prod_reserve_desc(xs->rx, xdp->handle, len);
184

185
	if (err)
186
		xs->rx_dropped++;
187 188 189 190

	return err;
}

191 192 193 194 195 196 197 198 199 200
static bool xsk_is_bound(struct xdp_sock *xs)
{
	if (READ_ONCE(xs->state) == XSK_BOUND) {
		/* Matches smp_wmb() in bind(). */
		smp_rmb();
		return true;
	}
	return false;
}

201
static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
202 203 204
{
	u32 len;

205 206 207
	if (!xsk_is_bound(xs))
		return -EINVAL;

208 209 210 211 212 213 214 215 216
	if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
		return -EINVAL;

	len = xdp->data_end - xdp->data;

	return (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY) ?
		__xsk_rcv_zc(xs, xdp, len) : __xsk_rcv(xs, xdp, len);
}

217
static void xsk_flush(struct xdp_sock *xs)
218
{
219
	xskq_prod_submit(xs->rx);
220 221 222 223 224
	xs->sk.sk_data_ready(&xs->sk);
}

int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
{
225
	u32 metalen = xdp->data - xdp->data_meta;
226
	u32 len = xdp->data_end - xdp->data;
227
	u64 offset = xs->umem->headroom;
228 229
	void *buffer;
	u64 addr;
230 231
	int err;

232 233 234 235 236 237
	spin_lock_bh(&xs->rx_lock);

	if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index) {
		err = -EINVAL;
		goto out_unlock;
	}
238

239
	if (!xskq_cons_peek_addr(xs->umem->fq, &addr, xs->umem) ||
240
	    len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) {
241 242
		err = -ENOSPC;
		goto out_drop;
243 244
	}

245
	addr = xsk_umem_adjust_offset(xs->umem, addr, offset);
246
	buffer = xdp_umem_get_data(xs->umem, addr);
247
	memcpy(buffer, xdp->data_meta, len + metalen);
248 249

	addr = xsk_umem_adjust_offset(xs->umem, addr, metalen);
250
	err = xskq_prod_reserve_desc(xs->rx, addr, len);
251 252 253
	if (err)
		goto out_drop;

254
	xskq_cons_release(xs->umem->fq);
255
	xskq_prod_submit(xs->rx);
256

257 258 259 260 261 262
	spin_unlock_bh(&xs->rx_lock);

	xs->sk.sk_data_ready(&xs->sk);
	return 0;

out_drop:
263
	xs->rx_dropped++;
264 265
out_unlock:
	spin_unlock_bh(&xs->rx_lock);
266 267 268
	return err;
}

269
int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
270
{
271
	struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
272 273 274 275 276 277 278 279 280 281 282 283
	int err;

	err = xsk_rcv(xs, xdp);
	if (err)
		return err;

	if (!xs->flush_node.prev)
		list_add(&xs->flush_node, flush_list);

	return 0;
}

284
void __xsk_map_flush(void)
285
{
286
	struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list);
287 288 289 290 291 292 293 294
	struct xdp_sock *xs, *tmp;

	list_for_each_entry_safe(xs, tmp, flush_list, flush_node) {
		xsk_flush(xs);
		__list_del_clearprev(&xs->flush_node);
	}
}

295 296
void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
{
297
	xskq_prod_submit_n(umem->cq, nb_entries);
298 299 300 301 302 303 304 305 306 307 308 309 310 311 312
}
EXPORT_SYMBOL(xsk_umem_complete_tx);

void xsk_umem_consume_tx_done(struct xdp_umem *umem)
{
	struct xdp_sock *xs;

	rcu_read_lock();
	list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
		xs->sk.sk_write_space(&xs->sk);
	}
	rcu_read_unlock();
}
EXPORT_SYMBOL(xsk_umem_consume_tx_done);

313
bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc)
314 315 316 317 318
{
	struct xdp_sock *xs;

	rcu_read_lock();
	list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
319
		if (!xskq_cons_peek_desc(xs->tx, desc, umem))
320 321
			continue;

322
		if (xskq_prod_reserve_addr(umem->cq, desc->addr))
323 324
			goto out;

325
		xskq_cons_release(xs->tx);
326 327 328 329 330 331 332 333 334 335
		rcu_read_unlock();
		return true;
	}

out:
	rcu_read_unlock();
	return false;
}
EXPORT_SYMBOL(xsk_umem_consume_tx);

336
static int xsk_zc_xmit(struct xdp_sock *xs)
337 338 339
{
	struct net_device *dev = xs->dev;

340 341
	return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id,
					       XDP_WAKEUP_TX);
342 343
}

M
Magnus Karlsson 已提交
344 345
static void xsk_destruct_skb(struct sk_buff *skb)
{
346
	u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg;
M
Magnus Karlsson 已提交
347
	struct xdp_sock *xs = xdp_sk(skb->sk);
348
	unsigned long flags;
M
Magnus Karlsson 已提交
349

350
	spin_lock_irqsave(&xs->tx_completion_lock, flags);
351
	xskq_prod_submit_addr(xs->umem->cq, addr);
352
	spin_unlock_irqrestore(&xs->tx_completion_lock, flags);
M
Magnus Karlsson 已提交
353 354 355 356

	sock_wfree(skb);
}

357
static int xsk_generic_xmit(struct sock *sk)
M
Magnus Karlsson 已提交
358 359
{
	struct xdp_sock *xs = xdp_sk(sk);
360
	u32 max_batch = TX_BATCH_SIZE;
M
Magnus Karlsson 已提交
361 362 363 364 365 366 367
	bool sent_frame = false;
	struct xdp_desc desc;
	struct sk_buff *skb;
	int err = 0;

	mutex_lock(&xs->mutex);

I
Ilya Maximets 已提交
368 369 370
	if (xs->queue_id >= xs->dev->real_num_tx_queues)
		goto out;

371
	while (xskq_cons_peek_desc(xs->tx, &desc, xs->umem)) {
M
Magnus Karlsson 已提交
372
		char *buffer;
373 374
		u64 addr;
		u32 len;
M
Magnus Karlsson 已提交
375 376 377 378 379 380

		if (max_batch-- == 0) {
			err = -EAGAIN;
			goto out;
		}

381
		len = desc.len;
382
		skb = sock_alloc_send_skb(sk, len, 1, &err);
M
Magnus Karlsson 已提交
383 384 385 386 387 388
		if (unlikely(!skb)) {
			err = -EAGAIN;
			goto out;
		}

		skb_put(skb, len);
389 390
		addr = desc.addr;
		buffer = xdp_umem_get_data(xs->umem, addr);
M
Magnus Karlsson 已提交
391
		err = skb_store_bits(skb, 0, buffer, len);
392
		if (unlikely(err) || xskq_prod_reserve(xs->umem->cq)) {
M
Magnus Karlsson 已提交
393 394 395 396 397 398 399
			kfree_skb(skb);
			goto out;
		}

		skb->dev = xs->dev;
		skb->priority = sk->sk_priority;
		skb->mark = sk->sk_mark;
400
		skb_shinfo(skb)->destructor_arg = (void *)(long)desc.addr;
M
Magnus Karlsson 已提交
401 402 403
		skb->destructor = xsk_destruct_skb;

		err = dev_direct_xmit(skb, xs->queue_id);
404
		xskq_cons_release(xs->tx);
M
Magnus Karlsson 已提交
405 406
		/* Ignore NET_XMIT_CN as packet might have been sent */
		if (err == NET_XMIT_DROP || err == NETDEV_TX_BUSY) {
407 408
			/* SKB completed but not sent */
			err = -EBUSY;
M
Magnus Karlsson 已提交
409 410 411 412 413 414 415 416 417 418 419 420 421 422
			goto out;
		}

		sent_frame = true;
	}

out:
	if (sent_frame)
		sk->sk_write_space(sk);

	mutex_unlock(&xs->mutex);
	return err;
}

423 424 425 426 427 428 429 430 431 432 433 434
static int __xsk_sendmsg(struct sock *sk)
{
	struct xdp_sock *xs = xdp_sk(sk);

	if (unlikely(!(xs->dev->flags & IFF_UP)))
		return -ENETDOWN;
	if (unlikely(!xs->tx))
		return -ENOBUFS;

	return xs->zc ? xsk_zc_xmit(xs) : xsk_generic_xmit(sk);
}

M
Magnus Karlsson 已提交
435 436
static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
{
437
	bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
M
Magnus Karlsson 已提交
438 439 440
	struct sock *sk = sock->sk;
	struct xdp_sock *xs = xdp_sk(sk);

441
	if (unlikely(!xsk_is_bound(xs)))
M
Magnus Karlsson 已提交
442
		return -ENXIO;
443
	if (unlikely(need_wait))
444
		return -EOPNOTSUPP;
M
Magnus Karlsson 已提交
445

446
	return __xsk_sendmsg(sk);
M
Magnus Karlsson 已提交
447 448
}

449
static __poll_t xsk_poll(struct file *file, struct socket *sock,
450
			     struct poll_table_struct *wait)
451
{
452
	__poll_t mask = datagram_poll(file, sock, wait);
453 454
	struct sock *sk = sock->sk;
	struct xdp_sock *xs = xdp_sk(sk);
455 456 457 458 459 460 461 462
	struct net_device *dev;
	struct xdp_umem *umem;

	if (unlikely(!xsk_is_bound(xs)))
		return mask;

	dev = xs->dev;
	umem = xs->umem;
463

464 465 466 467 468 469 470 471
	if (umem->need_wakeup) {
		if (dev->netdev_ops->ndo_xsk_wakeup)
			dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id,
							umem->need_wakeup);
		else
			/* Poll needs to drive Tx also in copy mode */
			__xsk_sendmsg(sk);
	}
472

473
	if (xs->rx && !xskq_prod_is_empty(xs->rx))
474
		mask |= EPOLLIN | EPOLLRDNORM;
475
	if (xs->tx && !xskq_cons_is_full(xs->tx))
476
		mask |= EPOLLOUT | EPOLLWRNORM;
477 478 479 480

	return mask;
}

481 482
static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
			  bool umem_queue)
483 484 485 486 487 488
{
	struct xsk_queue *q;

	if (entries == 0 || *queue || !is_power_of_2(entries))
		return -EINVAL;

489
	q = xskq_create(entries, umem_queue);
490 491 492
	if (!q)
		return -ENOMEM;

493 494
	/* Make sure queue is ready before it can be seen by others */
	smp_wmb();
495
	WRITE_ONCE(*queue, q);
496 497 498
	return 0;
}

499 500 501 502
static void xsk_unbind_dev(struct xdp_sock *xs)
{
	struct net_device *dev = xs->dev;

503
	if (xs->state != XSK_BOUND)
504
		return;
505
	WRITE_ONCE(xs->state, XSK_UNBOUND);
506 507 508 509 510 511 512 513

	/* Wait for driver to stop using the xdp socket. */
	xdp_del_sk_umem(xs->umem, xs);
	xs->dev = NULL;
	synchronize_net();
	dev_put(dev);
}

514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559
static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs,
					      struct xdp_sock ***map_entry)
{
	struct xsk_map *map = NULL;
	struct xsk_map_node *node;

	*map_entry = NULL;

	spin_lock_bh(&xs->map_list_lock);
	node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node,
					node);
	if (node) {
		WARN_ON(xsk_map_inc(node->map));
		map = node->map;
		*map_entry = node->map_entry;
	}
	spin_unlock_bh(&xs->map_list_lock);
	return map;
}

static void xsk_delete_from_maps(struct xdp_sock *xs)
{
	/* This function removes the current XDP socket from all the
	 * maps it resides in. We need to take extra care here, due to
	 * the two locks involved. Each map has a lock synchronizing
	 * updates to the entries, and each socket has a lock that
	 * synchronizes access to the list of maps (map_list). For
	 * deadlock avoidance the locks need to be taken in the order
	 * "map lock"->"socket map list lock". We start off by
	 * accessing the socket map list, and take a reference to the
	 * map to guarantee existence between the
	 * xsk_get_map_list_entry() and xsk_map_try_sock_delete()
	 * calls. Then we ask the map to remove the socket, which
	 * tries to remove the socket from the map. Note that there
	 * might be updates to the map between
	 * xsk_get_map_list_entry() and xsk_map_try_sock_delete().
	 */
	struct xdp_sock **map_entry = NULL;
	struct xsk_map *map;

	while ((map = xsk_get_map_list_entry(xs, &map_entry))) {
		xsk_map_try_sock_delete(map, xs, map_entry);
		xsk_map_put(map);
	}
}

560 561 562
static int xsk_release(struct socket *sock)
{
	struct sock *sk = sock->sk;
563
	struct xdp_sock *xs = xdp_sk(sk);
564 565 566 567 568 569 570
	struct net *net;

	if (!sk)
		return 0;

	net = sock_net(sk);

571 572 573 574
	mutex_lock(&net->xdp.lock);
	sk_del_node_init_rcu(sk);
	mutex_unlock(&net->xdp.lock);

575 576 577 578
	local_bh_disable();
	sock_prot_inuse_add(net, sk->sk_prot, -1);
	local_bh_enable();

579
	xsk_delete_from_maps(xs);
580
	mutex_lock(&xs->mutex);
581
	xsk_unbind_dev(xs);
582
	mutex_unlock(&xs->mutex);
583

584 585 586
	xskq_destroy(xs->rx);
	xskq_destroy(xs->tx);

587 588 589 590 591 592 593 594 595
	sock_orphan(sk);
	sock->sk = NULL;

	sk_refcnt_debug_release(sk);
	sock_put(sk);

	return 0;
}

596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612
static struct socket *xsk_lookup_xsk_from_fd(int fd)
{
	struct socket *sock;
	int err;

	sock = sockfd_lookup(fd, &err);
	if (!sock)
		return ERR_PTR(-ENOTSOCK);

	if (sock->sk->sk_family != PF_XDP) {
		sockfd_put(sock);
		return ERR_PTR(-ENOPROTOOPT);
	}

	return sock;
}

613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630
/* Check if umem pages are contiguous.
 * If zero-copy mode, use the DMA address to do the page contiguity check
 * For all other modes we use addr (kernel virtual address)
 * Store the result in the low bits of addr.
 */
static void xsk_check_page_contiguity(struct xdp_umem *umem, u32 flags)
{
	struct xdp_umem_page *pgs = umem->pages;
	int i, is_contig;

	for (i = 0; i < umem->npgs - 1; i++) {
		is_contig = (flags & XDP_ZEROCOPY) ?
			(pgs[i].dma + PAGE_SIZE == pgs[i + 1].dma) :
			(pgs[i].addr + PAGE_SIZE == pgs[i + 1].addr);
		pgs[i].addr += is_contig << XSK_NEXT_PG_CONTIG_SHIFT;
	}
}

631 632 633 634 635
static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
{
	struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
	struct sock *sk = sock->sk;
	struct xdp_sock *xs = xdp_sk(sk);
B
Björn Töpel 已提交
636
	struct net_device *dev;
637
	u32 flags, qid;
638 639 640 641 642 643 644
	int err = 0;

	if (addr_len < sizeof(struct sockaddr_xdp))
		return -EINVAL;
	if (sxdp->sxdp_family != AF_XDP)
		return -EINVAL;

645
	flags = sxdp->sxdp_flags;
646 647
	if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY |
		      XDP_USE_NEED_WAKEUP))
648 649
		return -EINVAL;

650
	rtnl_lock();
651
	mutex_lock(&xs->mutex);
652
	if (xs->state != XSK_READY) {
B
Björn Töpel 已提交
653 654 655 656
		err = -EBUSY;
		goto out_release;
	}

657 658 659 660 661 662
	dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
	if (!dev) {
		err = -ENODEV;
		goto out_release;
	}

663
	if (!xs->rx && !xs->tx) {
664 665 666 667
		err = -EINVAL;
		goto out_unlock;
	}

668 669 670
	qid = sxdp->sxdp_queue_id;

	if (flags & XDP_SHARED_UMEM) {
671 672 673
		struct xdp_sock *umem_xs;
		struct socket *sock;

674 675
		if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) ||
		    (flags & XDP_USE_NEED_WAKEUP)) {
676 677 678 679 680
			/* Cannot specify flags for shared sockets. */
			err = -EINVAL;
			goto out_unlock;
		}

681 682 683 684 685 686 687 688 689 690 691 692 693
		if (xs->umem) {
			/* We have already our own. */
			err = -EINVAL;
			goto out_unlock;
		}

		sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
		if (IS_ERR(sock)) {
			err = PTR_ERR(sock);
			goto out_unlock;
		}

		umem_xs = xdp_sk(sock->sk);
694
		if (!xsk_is_bound(umem_xs)) {
695 696 697
			err = -EBADF;
			sockfd_put(sock);
			goto out_unlock;
698 699
		}
		if (umem_xs->dev != dev || umem_xs->queue_id != qid) {
700 701 702 703 704 705
			err = -EINVAL;
			sockfd_put(sock);
			goto out_unlock;
		}

		xdp_get_umem(umem_xs->umem);
706
		WRITE_ONCE(xs->umem, umem_xs->umem);
707 708 709 710
		sockfd_put(sock);
	} else if (!xs->umem || !xdp_umem_validate_queues(xs->umem)) {
		err = -EINVAL;
		goto out_unlock;
711 712
	} else {
		/* This xsk has its own umem. */
713 714 715 716
		xskq_set_umem(xs->umem->fq, xs->umem->size,
			      xs->umem->chunk_mask);
		xskq_set_umem(xs->umem->cq, xs->umem->size,
			      xs->umem->chunk_mask);
717 718 719 720

		err = xdp_umem_assign_dev(xs->umem, dev, qid, flags);
		if (err)
			goto out_unlock;
721 722

		xsk_check_page_contiguity(xs->umem, flags);
723 724 725
	}

	xs->dev = dev;
726 727
	xs->zc = xs->umem->zc;
	xs->queue_id = qid;
728 729
	xskq_set_umem(xs->rx, xs->umem->size, xs->umem->chunk_mask);
	xskq_set_umem(xs->tx, xs->umem->size, xs->umem->chunk_mask);
730
	xdp_add_sk_umem(xs->umem, xs);
731 732

out_unlock:
733
	if (err) {
734
		dev_put(dev);
735 736 737 738 739 740 741
	} else {
		/* Matches smp_rmb() in bind() for shared umem
		 * sockets, and xsk_is_bound().
		 */
		smp_wmb();
		WRITE_ONCE(xs->state, XSK_BOUND);
	}
742 743
out_release:
	mutex_unlock(&xs->mutex);
744
	rtnl_unlock();
745 746 747
	return err;
}

748 749 750 751 752 753 754
struct xdp_umem_reg_v1 {
	__u64 addr; /* Start of packet data area */
	__u64 len; /* Length of packet data area */
	__u32 chunk_size;
	__u32 headroom;
};

755 756 757 758 759 760 761 762 763 764 765
static int xsk_setsockopt(struct socket *sock, int level, int optname,
			  char __user *optval, unsigned int optlen)
{
	struct sock *sk = sock->sk;
	struct xdp_sock *xs = xdp_sk(sk);
	int err;

	if (level != SOL_XDP)
		return -ENOPROTOOPT;

	switch (optname) {
766
	case XDP_RX_RING:
767
	case XDP_TX_RING:
768 769 770 771 772 773 774 775 776 777
	{
		struct xsk_queue **q;
		int entries;

		if (optlen < sizeof(entries))
			return -EINVAL;
		if (copy_from_user(&entries, optval, sizeof(entries)))
			return -EFAULT;

		mutex_lock(&xs->mutex);
778 779 780 781
		if (xs->state != XSK_READY) {
			mutex_unlock(&xs->mutex);
			return -EBUSY;
		}
782
		q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
783
		err = xsk_init_queue(entries, q, false);
784 785 786
		if (!err && optname == XDP_TX_RING)
			/* Tx needs to be explicitly woken up the first time */
			xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
787 788 789
		mutex_unlock(&xs->mutex);
		return err;
	}
790 791
	case XDP_UMEM_REG:
	{
792 793
		size_t mr_size = sizeof(struct xdp_umem_reg);
		struct xdp_umem_reg mr = {};
794 795
		struct xdp_umem *umem;

796 797 798 799 800 801
		if (optlen < sizeof(struct xdp_umem_reg_v1))
			return -EINVAL;
		else if (optlen < sizeof(mr))
			mr_size = sizeof(struct xdp_umem_reg_v1);

		if (copy_from_user(&mr, optval, mr_size))
802 803 804
			return -EFAULT;

		mutex_lock(&xs->mutex);
805
		if (xs->state != XSK_READY || xs->umem) {
B
Björn Töpel 已提交
806 807 808
			mutex_unlock(&xs->mutex);
			return -EBUSY;
		}
809

B
Björn Töpel 已提交
810 811
		umem = xdp_umem_create(&mr);
		if (IS_ERR(umem)) {
812
			mutex_unlock(&xs->mutex);
B
Björn Töpel 已提交
813
			return PTR_ERR(umem);
814 815 816 817
		}

		/* Make sure umem is ready before it can be seen by others */
		smp_wmb();
818
		WRITE_ONCE(xs->umem, umem);
819 820 821
		mutex_unlock(&xs->mutex);
		return 0;
	}
822
	case XDP_UMEM_FILL_RING:
823
	case XDP_UMEM_COMPLETION_RING:
824 825 826 827 828 829 830 831
	{
		struct xsk_queue **q;
		int entries;

		if (copy_from_user(&entries, optval, sizeof(entries)))
			return -EFAULT;

		mutex_lock(&xs->mutex);
832 833 834 835
		if (xs->state != XSK_READY) {
			mutex_unlock(&xs->mutex);
			return -EBUSY;
		}
B
Björn Töpel 已提交
836 837 838 839 840
		if (!xs->umem) {
			mutex_unlock(&xs->mutex);
			return -EINVAL;
		}

841 842
		q = (optname == XDP_UMEM_FILL_RING) ? &xs->umem->fq :
			&xs->umem->cq;
843
		err = xsk_init_queue(entries, q, true);
844 845 846
		mutex_unlock(&xs->mutex);
		return err;
	}
847 848 849 850 851 852 853
	default:
		break;
	}

	return -ENOPROTOOPT;
}

854 855 856 857 858 859 860 861 862 863 864 865 866 867
static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring)
{
	ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
	ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
	ring->desc = offsetof(struct xdp_rxtx_ring, desc);
}

static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring)
{
	ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer);
	ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
	ring->desc = offsetof(struct xdp_umem_ring, desc);
}

M
Magnus Karlsson 已提交
868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903
static int xsk_getsockopt(struct socket *sock, int level, int optname,
			  char __user *optval, int __user *optlen)
{
	struct sock *sk = sock->sk;
	struct xdp_sock *xs = xdp_sk(sk);
	int len;

	if (level != SOL_XDP)
		return -ENOPROTOOPT;

	if (get_user(len, optlen))
		return -EFAULT;
	if (len < 0)
		return -EINVAL;

	switch (optname) {
	case XDP_STATISTICS:
	{
		struct xdp_statistics stats;

		if (len < sizeof(stats))
			return -EINVAL;

		mutex_lock(&xs->mutex);
		stats.rx_dropped = xs->rx_dropped;
		stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
		stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
		mutex_unlock(&xs->mutex);

		if (copy_to_user(optval, &stats, sizeof(stats)))
			return -EFAULT;
		if (put_user(sizeof(stats), optlen))
			return -EFAULT;

		return 0;
	}
904 905 906
	case XDP_MMAP_OFFSETS:
	{
		struct xdp_mmap_offsets off;
907 908 909
		struct xdp_mmap_offsets_v1 off_v1;
		bool flags_supported = true;
		void *to_copy;
910

911
		if (len < sizeof(off_v1))
912
			return -EINVAL;
913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947
		else if (len < sizeof(off))
			flags_supported = false;

		if (flags_supported) {
			/* xdp_ring_offset is identical to xdp_ring_offset_v1
			 * except for the flags field added to the end.
			 */
			xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
					       &off.rx);
			xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
					       &off.tx);
			xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
					       &off.fr);
			xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
					       &off.cr);
			off.rx.flags = offsetof(struct xdp_rxtx_ring,
						ptrs.flags);
			off.tx.flags = offsetof(struct xdp_rxtx_ring,
						ptrs.flags);
			off.fr.flags = offsetof(struct xdp_umem_ring,
						ptrs.flags);
			off.cr.flags = offsetof(struct xdp_umem_ring,
						ptrs.flags);

			len = sizeof(off);
			to_copy = &off;
		} else {
			xsk_enter_rxtx_offsets(&off_v1.rx);
			xsk_enter_rxtx_offsets(&off_v1.tx);
			xsk_enter_umem_offsets(&off_v1.fr);
			xsk_enter_umem_offsets(&off_v1.cr);

			len = sizeof(off_v1);
			to_copy = &off_v1;
		}
948

949
		if (copy_to_user(optval, to_copy, len))
950 951 952 953 954 955
			return -EFAULT;
		if (put_user(len, optlen))
			return -EFAULT;

		return 0;
	}
956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975
	case XDP_OPTIONS:
	{
		struct xdp_options opts = {};

		if (len < sizeof(opts))
			return -EINVAL;

		mutex_lock(&xs->mutex);
		if (xs->zc)
			opts.flags |= XDP_OPTIONS_ZEROCOPY;
		mutex_unlock(&xs->mutex);

		len = sizeof(opts);
		if (copy_to_user(optval, &opts, len))
			return -EFAULT;
		if (put_user(len, optlen))
			return -EFAULT;

		return 0;
	}
M
Magnus Karlsson 已提交
976 977 978 979 980 981 982
	default:
		break;
	}

	return -EOPNOTSUPP;
}

983 984 985
static int xsk_mmap(struct file *file, struct socket *sock,
		    struct vm_area_struct *vma)
{
986
	loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
987 988 989
	unsigned long size = vma->vm_end - vma->vm_start;
	struct xdp_sock *xs = xdp_sk(sock->sk);
	struct xsk_queue *q = NULL;
990
	struct xdp_umem *umem;
991 992 993
	unsigned long pfn;
	struct page *qpg;

994
	if (READ_ONCE(xs->state) != XSK_READY)
995 996
		return -EBUSY;

997
	if (offset == XDP_PGOFF_RX_RING) {
998
		q = READ_ONCE(xs->rx);
999
	} else if (offset == XDP_PGOFF_TX_RING) {
1000
		q = READ_ONCE(xs->tx);
1001
	} else {
1002 1003
		umem = READ_ONCE(xs->umem);
		if (!umem)
1004
			return -EINVAL;
1005

1006 1007
		/* Matches the smp_wmb() in XDP_UMEM_REG */
		smp_rmb();
1008
		if (offset == XDP_UMEM_PGOFF_FILL_RING)
1009
			q = READ_ONCE(umem->fq);
1010
		else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
1011
			q = READ_ONCE(umem->cq);
1012
	}
1013 1014 1015 1016

	if (!q)
		return -EINVAL;

1017 1018
	/* Matches the smp_wmb() in xsk_init_queue */
	smp_rmb();
1019
	qpg = virt_to_head_page(q->ring);
1020
	if (size > page_size(qpg))
1021 1022 1023 1024 1025 1026 1027
		return -EINVAL;

	pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
	return remap_pfn_range(vma, vma->vm_start, pfn,
			       size, vma->vm_page_prot);
}

1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059
static int xsk_notifier(struct notifier_block *this,
			unsigned long msg, void *ptr)
{
	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
	struct net *net = dev_net(dev);
	struct sock *sk;

	switch (msg) {
	case NETDEV_UNREGISTER:
		mutex_lock(&net->xdp.lock);
		sk_for_each(sk, &net->xdp.list) {
			struct xdp_sock *xs = xdp_sk(sk);

			mutex_lock(&xs->mutex);
			if (xs->dev == dev) {
				sk->sk_err = ENETDOWN;
				if (!sock_flag(sk, SOCK_DEAD))
					sk->sk_error_report(sk);

				xsk_unbind_dev(xs);

				/* Clear device references in umem. */
				xdp_umem_clear_dev(xs->umem);
			}
			mutex_unlock(&xs->mutex);
		}
		mutex_unlock(&net->xdp.lock);
		break;
	}
	return NOTIFY_DONE;
}

1060 1061 1062 1063 1064 1065 1066
static struct proto xsk_proto = {
	.name =		"XDP",
	.owner =	THIS_MODULE,
	.obj_size =	sizeof(struct xdp_sock),
};

static const struct proto_ops xsk_proto_ops = {
B
Björn Töpel 已提交
1067 1068 1069 1070 1071 1072 1073 1074
	.family		= PF_XDP,
	.owner		= THIS_MODULE,
	.release	= xsk_release,
	.bind		= xsk_bind,
	.connect	= sock_no_connect,
	.socketpair	= sock_no_socketpair,
	.accept		= sock_no_accept,
	.getname	= sock_no_getname,
1075
	.poll		= xsk_poll,
B
Björn Töpel 已提交
1076 1077 1078 1079 1080 1081 1082 1083 1084
	.ioctl		= sock_no_ioctl,
	.listen		= sock_no_listen,
	.shutdown	= sock_no_shutdown,
	.setsockopt	= xsk_setsockopt,
	.getsockopt	= xsk_getsockopt,
	.sendmsg	= xsk_sendmsg,
	.recvmsg	= sock_no_recvmsg,
	.mmap		= xsk_mmap,
	.sendpage	= sock_no_sendpage,
1085 1086
};

1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098
static void xsk_destruct(struct sock *sk)
{
	struct xdp_sock *xs = xdp_sk(sk);

	if (!sock_flag(sk, SOCK_DEAD))
		return;

	xdp_put_umem(xs->umem);

	sk_refcnt_debug_dec(sk);
}

1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124
static int xsk_create(struct net *net, struct socket *sock, int protocol,
		      int kern)
{
	struct sock *sk;
	struct xdp_sock *xs;

	if (!ns_capable(net->user_ns, CAP_NET_RAW))
		return -EPERM;
	if (sock->type != SOCK_RAW)
		return -ESOCKTNOSUPPORT;

	if (protocol)
		return -EPROTONOSUPPORT;

	sock->state = SS_UNCONNECTED;

	sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
	if (!sk)
		return -ENOBUFS;

	sock->ops = &xsk_proto_ops;

	sock_init_data(sock, sk);

	sk->sk_family = PF_XDP;

1125 1126 1127
	sk->sk_destruct = xsk_destruct;
	sk_refcnt_debug_inc(sk);

1128 1129
	sock_set_flag(sk, SOCK_RCU_FREE);

1130
	xs = xdp_sk(sk);
1131
	xs->state = XSK_READY;
1132
	mutex_init(&xs->mutex);
1133
	spin_lock_init(&xs->rx_lock);
1134
	spin_lock_init(&xs->tx_completion_lock);
1135

1136 1137 1138
	INIT_LIST_HEAD(&xs->map_list);
	spin_lock_init(&xs->map_list_lock);

1139 1140 1141 1142
	mutex_lock(&net->xdp.lock);
	sk_add_node_rcu(sk, &net->xdp.list);
	mutex_unlock(&net->xdp.lock);

1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155
	local_bh_disable();
	sock_prot_inuse_add(net, &xsk_proto, 1);
	local_bh_enable();

	return 0;
}

static const struct net_proto_family xsk_family_ops = {
	.family = PF_XDP,
	.create = xsk_create,
	.owner	= THIS_MODULE,
};

1156 1157 1158 1159
static struct notifier_block xsk_netdev_notifier = {
	.notifier_call	= xsk_notifier,
};

1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176
static int __net_init xsk_net_init(struct net *net)
{
	mutex_init(&net->xdp.lock);
	INIT_HLIST_HEAD(&net->xdp.list);
	return 0;
}

static void __net_exit xsk_net_exit(struct net *net)
{
	WARN_ON_ONCE(!hlist_empty(&net->xdp.list));
}

static struct pernet_operations xsk_net_ops = {
	.init = xsk_net_init,
	.exit = xsk_net_exit,
};

1177 1178
static int __init xsk_init(void)
{
1179
	int err, cpu;
1180 1181 1182 1183 1184 1185 1186 1187 1188

	err = proto_register(&xsk_proto, 0 /* no slab */);
	if (err)
		goto out;

	err = sock_register(&xsk_family_ops);
	if (err)
		goto out_proto;

1189 1190 1191
	err = register_pernet_subsys(&xsk_net_ops);
	if (err)
		goto out_sk;
1192 1193 1194 1195 1196

	err = register_netdevice_notifier(&xsk_netdev_notifier);
	if (err)
		goto out_pernet;

1197 1198
	for_each_possible_cpu(cpu)
		INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu));
1199 1200
	return 0;

1201 1202
out_pernet:
	unregister_pernet_subsys(&xsk_net_ops);
1203 1204
out_sk:
	sock_unregister(PF_XDP);
1205 1206 1207 1208 1209 1210 1211
out_proto:
	proto_unregister(&xsk_proto);
out:
	return err;
}

fs_initcall(xsk_init);