xsk.c 25.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
// SPDX-License-Identifier: GPL-2.0
/* XDP sockets
 *
 * AF_XDP sockets allows a channel between XDP programs and userspace
 * applications.
 * Copyright(c) 2018 Intel Corporation.
 *
 * Author(s): Björn Töpel <bjorn.topel@intel.com>
 *	      Magnus Karlsson <magnus.karlsson@intel.com>
 */

#define pr_fmt(fmt) "AF_XDP: %s: " fmt, __func__

#include <linux/if_xdp.h>
#include <linux/init.h>
#include <linux/sched/mm.h>
#include <linux/sched/signal.h>
#include <linux/sched/task.h>
#include <linux/socket.h>
#include <linux/file.h>
#include <linux/uaccess.h>
#include <linux/net.h>
#include <linux/netdevice.h>
24
#include <linux/rculist.h>
25
#include <net/xdp_sock.h>
26
#include <net/xdp.h>
27

28
#include "xsk_queue.h"
29
#include "xdp_umem.h"
30
#include "xsk.h"
31

M
Magnus Karlsson 已提交
32 33
#define TX_BATCH_SIZE 16

34 35
bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
{
36 37
	return READ_ONCE(xs->rx) &&  READ_ONCE(xs->umem) &&
		READ_ONCE(xs->umem->fq);
38 39
}

40 41 42 43 44 45
bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt)
{
	return xskq_has_addrs(umem->fq, cnt);
}
EXPORT_SYMBOL(xsk_umem_has_addrs);

46 47
u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
{
48
	return xskq_peek_addr(umem->fq, addr, umem);
49 50 51 52 53 54 55 56 57
}
EXPORT_SYMBOL(xsk_umem_peek_addr);

void xsk_umem_discard_addr(struct xdp_umem *umem)
{
	xskq_discard_addr(umem->fq);
}
EXPORT_SYMBOL(xsk_umem_discard_addr);

58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
void xsk_set_rx_need_wakeup(struct xdp_umem *umem)
{
	if (umem->need_wakeup & XDP_WAKEUP_RX)
		return;

	umem->fq->ring->flags |= XDP_RING_NEED_WAKEUP;
	umem->need_wakeup |= XDP_WAKEUP_RX;
}
EXPORT_SYMBOL(xsk_set_rx_need_wakeup);

void xsk_set_tx_need_wakeup(struct xdp_umem *umem)
{
	struct xdp_sock *xs;

	if (umem->need_wakeup & XDP_WAKEUP_TX)
		return;

	rcu_read_lock();
	list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
		xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
	}
	rcu_read_unlock();

	umem->need_wakeup |= XDP_WAKEUP_TX;
}
EXPORT_SYMBOL(xsk_set_tx_need_wakeup);

void xsk_clear_rx_need_wakeup(struct xdp_umem *umem)
{
	if (!(umem->need_wakeup & XDP_WAKEUP_RX))
		return;

	umem->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP;
	umem->need_wakeup &= ~XDP_WAKEUP_RX;
}
EXPORT_SYMBOL(xsk_clear_rx_need_wakeup);

void xsk_clear_tx_need_wakeup(struct xdp_umem *umem)
{
	struct xdp_sock *xs;

	if (!(umem->need_wakeup & XDP_WAKEUP_TX))
		return;

	rcu_read_lock();
	list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
		xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP;
	}
	rcu_read_unlock();

	umem->need_wakeup &= ~XDP_WAKEUP_TX;
}
EXPORT_SYMBOL(xsk_clear_tx_need_wakeup);

bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem)
{
	return umem->flags & XDP_UMEM_USES_NEED_WAKEUP;
}
EXPORT_SYMBOL(xsk_umem_uses_need_wakeup);

118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
/* If a buffer crosses a page boundary, we need to do 2 memcpy's, one for
 * each page. This is only required in copy mode.
 */
static void __xsk_rcv_memcpy(struct xdp_umem *umem, u64 addr, void *from_buf,
			     u32 len, u32 metalen)
{
	void *to_buf = xdp_umem_get_data(umem, addr);

	addr = xsk_umem_add_offset_to_addr(addr);
	if (xskq_crosses_non_contig_pg(umem, addr, len + metalen)) {
		void *next_pg_addr = umem->pages[(addr >> PAGE_SHIFT) + 1].addr;
		u64 page_start = addr & ~(PAGE_SIZE - 1);
		u64 first_len = PAGE_SIZE - (addr - page_start);

		memcpy(to_buf, from_buf, first_len + metalen);
		memcpy(next_pg_addr, from_buf + first_len, len - first_len);

		return;
	}

	memcpy(to_buf, from_buf, len + metalen);
}

141
static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
142
{
143 144 145
	u64 offset = xs->umem->headroom;
	u64 addr, memcpy_addr;
	void *from_buf;
146
	u32 metalen;
147
	int err;
148

149
	if (!xskq_peek_addr(xs->umem->fq, &addr, xs->umem) ||
150
	    len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) {
151
		xs->rx_dropped++;
152
		return -ENOSPC;
153
	}
154

155 156 157 158 159 160 161 162
	if (unlikely(xdp_data_meta_unsupported(xdp))) {
		from_buf = xdp->data;
		metalen = 0;
	} else {
		from_buf = xdp->data_meta;
		metalen = xdp->data - xdp->data_meta;
	}

163 164 165 166 167
	memcpy_addr = xsk_umem_adjust_offset(xs->umem, addr, offset);
	__xsk_rcv_memcpy(xs->umem, memcpy_addr, from_buf, len, metalen);

	offset += metalen;
	addr = xsk_umem_adjust_offset(xs->umem, addr, offset);
168
	err = xskq_produce_batch_desc(xs->rx, addr, len);
169
	if (!err) {
170
		xskq_discard_addr(xs->umem->fq);
171 172 173
		xdp_return_buff(xdp);
		return 0;
	}
174

175
	xs->rx_dropped++;
176 177 178
	return err;
}

179
static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
180
{
181
	int err = xskq_produce_batch_desc(xs->rx, (u64)xdp->handle, len);
182

183
	if (err)
184
		xs->rx_dropped++;
185 186 187 188

	return err;
}

189 190 191 192 193 194 195 196 197 198
static bool xsk_is_bound(struct xdp_sock *xs)
{
	if (READ_ONCE(xs->state) == XSK_BOUND) {
		/* Matches smp_wmb() in bind(). */
		smp_rmb();
		return true;
	}
	return false;
}

199 200 201 202
int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
{
	u32 len;

203 204 205
	if (!xsk_is_bound(xs))
		return -EINVAL;

206 207 208 209 210 211 212 213 214
	if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
		return -EINVAL;

	len = xdp->data_end - xdp->data;

	return (xdp->rxq->mem.type == MEM_TYPE_ZERO_COPY) ?
		__xsk_rcv_zc(xs, xdp, len) : __xsk_rcv(xs, xdp, len);
}

215 216 217 218 219 220 221 222
void xsk_flush(struct xdp_sock *xs)
{
	xskq_produce_flush_desc(xs->rx);
	xs->sk.sk_data_ready(&xs->sk);
}

int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
{
223
	u32 metalen = xdp->data - xdp->data_meta;
224
	u32 len = xdp->data_end - xdp->data;
225
	u64 offset = xs->umem->headroom;
226 227
	void *buffer;
	u64 addr;
228 229
	int err;

230 231 232 233 234 235
	spin_lock_bh(&xs->rx_lock);

	if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index) {
		err = -EINVAL;
		goto out_unlock;
	}
236

237
	if (!xskq_peek_addr(xs->umem->fq, &addr, xs->umem) ||
238
	    len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) {
239 240
		err = -ENOSPC;
		goto out_drop;
241 242
	}

243
	addr = xsk_umem_adjust_offset(xs->umem, addr, offset);
244
	buffer = xdp_umem_get_data(xs->umem, addr);
245
	memcpy(buffer, xdp->data_meta, len + metalen);
246 247

	addr = xsk_umem_adjust_offset(xs->umem, addr, metalen);
248
	err = xskq_produce_batch_desc(xs->rx, addr, len);
249 250 251 252 253
	if (err)
		goto out_drop;

	xskq_discard_addr(xs->umem->fq);
	xskq_produce_flush_desc(xs->rx);
254

255 256 257 258 259 260
	spin_unlock_bh(&xs->rx_lock);

	xs->sk.sk_data_ready(&xs->sk);
	return 0;

out_drop:
261
	xs->rx_dropped++;
262 263
out_unlock:
	spin_unlock_bh(&xs->rx_lock);
264 265 266
	return err;
}

267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
{
	xskq_produce_flush_addr_n(umem->cq, nb_entries);
}
EXPORT_SYMBOL(xsk_umem_complete_tx);

void xsk_umem_consume_tx_done(struct xdp_umem *umem)
{
	struct xdp_sock *xs;

	rcu_read_lock();
	list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
		xs->sk.sk_write_space(&xs->sk);
	}
	rcu_read_unlock();
}
EXPORT_SYMBOL(xsk_umem_consume_tx_done);

285
bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc)
286 287 288 289 290
{
	struct xdp_sock *xs;

	rcu_read_lock();
	list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
291
		if (!xskq_peek_desc(xs->tx, desc, umem))
292 293
			continue;

294
		if (xskq_produce_addr_lazy(umem->cq, desc->addr))
295 296 297 298 299 300 301 302 303 304 305 306 307
			goto out;

		xskq_discard_desc(xs->tx);
		rcu_read_unlock();
		return true;
	}

out:
	rcu_read_unlock();
	return false;
}
EXPORT_SYMBOL(xsk_umem_consume_tx);

308
static int xsk_zc_xmit(struct xdp_sock *xs)
309 310 311
{
	struct net_device *dev = xs->dev;

312 313
	return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id,
					       XDP_WAKEUP_TX);
314 315
}

M
Magnus Karlsson 已提交
316 317
static void xsk_destruct_skb(struct sk_buff *skb)
{
318
	u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg;
M
Magnus Karlsson 已提交
319
	struct xdp_sock *xs = xdp_sk(skb->sk);
320
	unsigned long flags;
M
Magnus Karlsson 已提交
321

322
	spin_lock_irqsave(&xs->tx_completion_lock, flags);
323
	WARN_ON_ONCE(xskq_produce_addr(xs->umem->cq, addr));
324
	spin_unlock_irqrestore(&xs->tx_completion_lock, flags);
M
Magnus Karlsson 已提交
325 326 327 328

	sock_wfree(skb);
}

329
static int xsk_generic_xmit(struct sock *sk)
M
Magnus Karlsson 已提交
330 331
{
	struct xdp_sock *xs = xdp_sk(sk);
332
	u32 max_batch = TX_BATCH_SIZE;
M
Magnus Karlsson 已提交
333 334 335 336 337 338 339
	bool sent_frame = false;
	struct xdp_desc desc;
	struct sk_buff *skb;
	int err = 0;

	mutex_lock(&xs->mutex);

I
Ilya Maximets 已提交
340 341 342
	if (xs->queue_id >= xs->dev->real_num_tx_queues)
		goto out;

343
	while (xskq_peek_desc(xs->tx, &desc, xs->umem)) {
M
Magnus Karlsson 已提交
344
		char *buffer;
345 346
		u64 addr;
		u32 len;
M
Magnus Karlsson 已提交
347 348 349 350 351 352

		if (max_batch-- == 0) {
			err = -EAGAIN;
			goto out;
		}

353
		len = desc.len;
354
		skb = sock_alloc_send_skb(sk, len, 1, &err);
M
Magnus Karlsson 已提交
355 356 357 358 359 360
		if (unlikely(!skb)) {
			err = -EAGAIN;
			goto out;
		}

		skb_put(skb, len);
361 362
		addr = desc.addr;
		buffer = xdp_umem_get_data(xs->umem, addr);
M
Magnus Karlsson 已提交
363
		err = skb_store_bits(skb, 0, buffer, len);
I
Ilya Maximets 已提交
364
		if (unlikely(err) || xskq_reserve_addr(xs->umem->cq)) {
M
Magnus Karlsson 已提交
365 366 367 368 369 370 371
			kfree_skb(skb);
			goto out;
		}

		skb->dev = xs->dev;
		skb->priority = sk->sk_priority;
		skb->mark = sk->sk_mark;
372
		skb_shinfo(skb)->destructor_arg = (void *)(long)desc.addr;
M
Magnus Karlsson 已提交
373 374 375
		skb->destructor = xsk_destruct_skb;

		err = dev_direct_xmit(skb, xs->queue_id);
376
		xskq_discard_desc(xs->tx);
M
Magnus Karlsson 已提交
377 378
		/* Ignore NET_XMIT_CN as packet might have been sent */
		if (err == NET_XMIT_DROP || err == NETDEV_TX_BUSY) {
379 380
			/* SKB completed but not sent */
			err = -EBUSY;
M
Magnus Karlsson 已提交
381 382 383 384 385 386 387 388 389 390 391 392 393 394
			goto out;
		}

		sent_frame = true;
	}

out:
	if (sent_frame)
		sk->sk_write_space(sk);

	mutex_unlock(&xs->mutex);
	return err;
}

395 396 397 398 399 400 401 402 403 404 405 406
static int __xsk_sendmsg(struct sock *sk)
{
	struct xdp_sock *xs = xdp_sk(sk);

	if (unlikely(!(xs->dev->flags & IFF_UP)))
		return -ENETDOWN;
	if (unlikely(!xs->tx))
		return -ENOBUFS;

	return xs->zc ? xsk_zc_xmit(xs) : xsk_generic_xmit(sk);
}

M
Magnus Karlsson 已提交
407 408
static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
{
409
	bool need_wait = !(m->msg_flags & MSG_DONTWAIT);
M
Magnus Karlsson 已提交
410 411 412
	struct sock *sk = sock->sk;
	struct xdp_sock *xs = xdp_sk(sk);

413
	if (unlikely(!xsk_is_bound(xs)))
M
Magnus Karlsson 已提交
414
		return -ENXIO;
415
	if (unlikely(need_wait))
416
		return -EOPNOTSUPP;
M
Magnus Karlsson 已提交
417

418
	return __xsk_sendmsg(sk);
M
Magnus Karlsson 已提交
419 420
}

421 422
static unsigned int xsk_poll(struct file *file, struct socket *sock,
			     struct poll_table_struct *wait)
423
{
424
	unsigned int mask = datagram_poll(file, sock, wait);
425 426
	struct sock *sk = sock->sk;
	struct xdp_sock *xs = xdp_sk(sk);
427 428 429 430 431 432 433 434
	struct net_device *dev;
	struct xdp_umem *umem;

	if (unlikely(!xsk_is_bound(xs)))
		return mask;

	dev = xs->dev;
	umem = xs->umem;
435

436 437 438 439 440 441 442 443
	if (umem->need_wakeup) {
		if (dev->netdev_ops->ndo_xsk_wakeup)
			dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id,
							umem->need_wakeup);
		else
			/* Poll needs to drive Tx also in copy mode */
			__xsk_sendmsg(sk);
	}
444 445 446

	if (xs->rx && !xskq_empty_desc(xs->rx))
		mask |= POLLIN | POLLRDNORM;
M
Magnus Karlsson 已提交
447 448
	if (xs->tx && !xskq_full_desc(xs->tx))
		mask |= POLLOUT | POLLWRNORM;
449 450 451 452

	return mask;
}

453 454
static int xsk_init_queue(u32 entries, struct xsk_queue **queue,
			  bool umem_queue)
455 456 457 458 459 460
{
	struct xsk_queue *q;

	if (entries == 0 || *queue || !is_power_of_2(entries))
		return -EINVAL;

461
	q = xskq_create(entries, umem_queue);
462 463 464
	if (!q)
		return -ENOMEM;

465 466
	/* Make sure queue is ready before it can be seen by others */
	smp_wmb();
467
	WRITE_ONCE(*queue, q);
468 469 470
	return 0;
}

471 472 473 474
static void xsk_unbind_dev(struct xdp_sock *xs)
{
	struct net_device *dev = xs->dev;

475
	if (xs->state != XSK_BOUND)
476
		return;
477
	WRITE_ONCE(xs->state, XSK_UNBOUND);
478 479 480 481 482 483 484 485

	/* Wait for driver to stop using the xdp socket. */
	xdp_del_sk_umem(xs->umem, xs);
	xs->dev = NULL;
	synchronize_net();
	dev_put(dev);
}

486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531
static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs,
					      struct xdp_sock ***map_entry)
{
	struct xsk_map *map = NULL;
	struct xsk_map_node *node;

	*map_entry = NULL;

	spin_lock_bh(&xs->map_list_lock);
	node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node,
					node);
	if (node) {
		WARN_ON(xsk_map_inc(node->map));
		map = node->map;
		*map_entry = node->map_entry;
	}
	spin_unlock_bh(&xs->map_list_lock);
	return map;
}

static void xsk_delete_from_maps(struct xdp_sock *xs)
{
	/* This function removes the current XDP socket from all the
	 * maps it resides in. We need to take extra care here, due to
	 * the two locks involved. Each map has a lock synchronizing
	 * updates to the entries, and each socket has a lock that
	 * synchronizes access to the list of maps (map_list). For
	 * deadlock avoidance the locks need to be taken in the order
	 * "map lock"->"socket map list lock". We start off by
	 * accessing the socket map list, and take a reference to the
	 * map to guarantee existence between the
	 * xsk_get_map_list_entry() and xsk_map_try_sock_delete()
	 * calls. Then we ask the map to remove the socket, which
	 * tries to remove the socket from the map. Note that there
	 * might be updates to the map between
	 * xsk_get_map_list_entry() and xsk_map_try_sock_delete().
	 */
	struct xdp_sock **map_entry = NULL;
	struct xsk_map *map;

	while ((map = xsk_get_map_list_entry(xs, &map_entry))) {
		xsk_map_try_sock_delete(map, xs, map_entry);
		xsk_map_put(map);
	}
}

532 533 534
static int xsk_release(struct socket *sock)
{
	struct sock *sk = sock->sk;
535
	struct xdp_sock *xs = xdp_sk(sk);
536 537 538 539 540 541 542
	struct net *net;

	if (!sk)
		return 0;

	net = sock_net(sk);

543 544 545 546
	mutex_lock(&net->xdp.lock);
	sk_del_node_init_rcu(sk);
	mutex_unlock(&net->xdp.lock);

547 548 549 550
	local_bh_disable();
	sock_prot_inuse_add(net, sk->sk_prot, -1);
	local_bh_enable();

551
	xsk_delete_from_maps(xs);
552
	mutex_lock(&xs->mutex);
553
	xsk_unbind_dev(xs);
554
	mutex_unlock(&xs->mutex);
555

556 557 558
	xskq_destroy(xs->rx);
	xskq_destroy(xs->tx);

559 560 561 562 563 564 565 566 567
	sock_orphan(sk);
	sock->sk = NULL;

	sk_refcnt_debug_release(sk);
	sock_put(sk);

	return 0;
}

568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584
static struct socket *xsk_lookup_xsk_from_fd(int fd)
{
	struct socket *sock;
	int err;

	sock = sockfd_lookup(fd, &err);
	if (!sock)
		return ERR_PTR(-ENOTSOCK);

	if (sock->sk->sk_family != PF_XDP) {
		sockfd_put(sock);
		return ERR_PTR(-ENOPROTOOPT);
	}

	return sock;
}

585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602
/* Check if umem pages are contiguous.
 * If zero-copy mode, use the DMA address to do the page contiguity check
 * For all other modes we use addr (kernel virtual address)
 * Store the result in the low bits of addr.
 */
static void xsk_check_page_contiguity(struct xdp_umem *umem, u32 flags)
{
	struct xdp_umem_page *pgs = umem->pages;
	int i, is_contig;

	for (i = 0; i < umem->npgs - 1; i++) {
		is_contig = (flags & XDP_ZEROCOPY) ?
			(pgs[i].dma + PAGE_SIZE == pgs[i + 1].dma) :
			(pgs[i].addr + PAGE_SIZE == pgs[i + 1].addr);
		pgs[i].addr += is_contig << XSK_NEXT_PG_CONTIG_SHIFT;
	}
}

603 604 605 606 607
static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
{
	struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr;
	struct sock *sk = sock->sk;
	struct xdp_sock *xs = xdp_sk(sk);
B
Björn Töpel 已提交
608
	struct net_device *dev;
609
	u32 flags, qid;
610 611 612 613 614 615 616
	int err = 0;

	if (addr_len < sizeof(struct sockaddr_xdp))
		return -EINVAL;
	if (sxdp->sxdp_family != AF_XDP)
		return -EINVAL;

617
	flags = sxdp->sxdp_flags;
618 619
	if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY |
		      XDP_USE_NEED_WAKEUP))
620 621
		return -EINVAL;

622
	rtnl_lock();
623
	mutex_lock(&xs->mutex);
624
	if (xs->state != XSK_READY) {
B
Björn Töpel 已提交
625 626 627 628
		err = -EBUSY;
		goto out_release;
	}

629 630 631 632 633 634
	dev = dev_get_by_index(sock_net(sk), sxdp->sxdp_ifindex);
	if (!dev) {
		err = -ENODEV;
		goto out_release;
	}

635
	if (!xs->rx && !xs->tx) {
636 637 638 639
		err = -EINVAL;
		goto out_unlock;
	}

640 641 642
	qid = sxdp->sxdp_queue_id;

	if (flags & XDP_SHARED_UMEM) {
643 644 645
		struct xdp_sock *umem_xs;
		struct socket *sock;

646 647
		if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) ||
		    (flags & XDP_USE_NEED_WAKEUP)) {
648 649 650 651 652
			/* Cannot specify flags for shared sockets. */
			err = -EINVAL;
			goto out_unlock;
		}

653 654 655 656 657 658 659 660 661 662 663 664 665
		if (xs->umem) {
			/* We have already our own. */
			err = -EINVAL;
			goto out_unlock;
		}

		sock = xsk_lookup_xsk_from_fd(sxdp->sxdp_shared_umem_fd);
		if (IS_ERR(sock)) {
			err = PTR_ERR(sock);
			goto out_unlock;
		}

		umem_xs = xdp_sk(sock->sk);
666
		if (!xsk_is_bound(umem_xs)) {
667 668 669
			err = -EBADF;
			sockfd_put(sock);
			goto out_unlock;
670 671
		}
		if (umem_xs->dev != dev || umem_xs->queue_id != qid) {
672 673 674 675 676 677
			err = -EINVAL;
			sockfd_put(sock);
			goto out_unlock;
		}

		xdp_get_umem(umem_xs->umem);
678
		WRITE_ONCE(xs->umem, umem_xs->umem);
679 680 681 682
		sockfd_put(sock);
	} else if (!xs->umem || !xdp_umem_validate_queues(xs->umem)) {
		err = -EINVAL;
		goto out_unlock;
683 684
	} else {
		/* This xsk has its own umem. */
685 686 687 688
		xskq_set_umem(xs->umem->fq, xs->umem->size,
			      xs->umem->chunk_mask);
		xskq_set_umem(xs->umem->cq, xs->umem->size,
			      xs->umem->chunk_mask);
689 690 691 692

		err = xdp_umem_assign_dev(xs->umem, dev, qid, flags);
		if (err)
			goto out_unlock;
693 694

		xsk_check_page_contiguity(xs->umem, flags);
695 696 697
	}

	xs->dev = dev;
698 699
	xs->zc = xs->umem->zc;
	xs->queue_id = qid;
700 701
	xskq_set_umem(xs->rx, xs->umem->size, xs->umem->chunk_mask);
	xskq_set_umem(xs->tx, xs->umem->size, xs->umem->chunk_mask);
702
	xdp_add_sk_umem(xs->umem, xs);
703 704

out_unlock:
705
	if (err) {
706
		dev_put(dev);
707 708 709 710 711 712 713
	} else {
		/* Matches smp_rmb() in bind() for shared umem
		 * sockets, and xsk_is_bound().
		 */
		smp_wmb();
		WRITE_ONCE(xs->state, XSK_BOUND);
	}
714 715
out_release:
	mutex_unlock(&xs->mutex);
716
	rtnl_unlock();
717 718 719
	return err;
}

720 721 722 723 724 725 726
struct xdp_umem_reg_v1 {
	__u64 addr; /* Start of packet data area */
	__u64 len; /* Length of packet data area */
	__u32 chunk_size;
	__u32 headroom;
};

727 728 729 730 731 732 733 734 735 736 737
static int xsk_setsockopt(struct socket *sock, int level, int optname,
			  char __user *optval, unsigned int optlen)
{
	struct sock *sk = sock->sk;
	struct xdp_sock *xs = xdp_sk(sk);
	int err;

	if (level != SOL_XDP)
		return -ENOPROTOOPT;

	switch (optname) {
738
	case XDP_RX_RING:
739
	case XDP_TX_RING:
740 741 742 743 744 745 746 747 748 749
	{
		struct xsk_queue **q;
		int entries;

		if (optlen < sizeof(entries))
			return -EINVAL;
		if (copy_from_user(&entries, optval, sizeof(entries)))
			return -EFAULT;

		mutex_lock(&xs->mutex);
750 751 752 753
		if (xs->state != XSK_READY) {
			mutex_unlock(&xs->mutex);
			return -EBUSY;
		}
754
		q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx;
755
		err = xsk_init_queue(entries, q, false);
756 757 758
		if (!err && optname == XDP_TX_RING)
			/* Tx needs to be explicitly woken up the first time */
			xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP;
759 760 761
		mutex_unlock(&xs->mutex);
		return err;
	}
762 763
	case XDP_UMEM_REG:
	{
764 765
		size_t mr_size = sizeof(struct xdp_umem_reg);
		struct xdp_umem_reg mr = {};
766 767
		struct xdp_umem *umem;

768 769 770 771 772 773
		if (optlen < sizeof(struct xdp_umem_reg_v1))
			return -EINVAL;
		else if (optlen < sizeof(mr))
			mr_size = sizeof(struct xdp_umem_reg_v1);

		if (copy_from_user(&mr, optval, mr_size))
774 775 776
			return -EFAULT;

		mutex_lock(&xs->mutex);
777
		if (xs->state != XSK_READY || xs->umem) {
B
Björn Töpel 已提交
778 779 780
			mutex_unlock(&xs->mutex);
			return -EBUSY;
		}
781

B
Björn Töpel 已提交
782 783
		umem = xdp_umem_create(&mr);
		if (IS_ERR(umem)) {
784
			mutex_unlock(&xs->mutex);
B
Björn Töpel 已提交
785
			return PTR_ERR(umem);
786 787 788 789
		}

		/* Make sure umem is ready before it can be seen by others */
		smp_wmb();
790
		WRITE_ONCE(xs->umem, umem);
791 792 793
		mutex_unlock(&xs->mutex);
		return 0;
	}
794
	case XDP_UMEM_FILL_RING:
795
	case XDP_UMEM_COMPLETION_RING:
796 797 798 799 800 801 802 803
	{
		struct xsk_queue **q;
		int entries;

		if (copy_from_user(&entries, optval, sizeof(entries)))
			return -EFAULT;

		mutex_lock(&xs->mutex);
804 805 806 807
		if (xs->state != XSK_READY) {
			mutex_unlock(&xs->mutex);
			return -EBUSY;
		}
B
Björn Töpel 已提交
808 809 810 811 812
		if (!xs->umem) {
			mutex_unlock(&xs->mutex);
			return -EINVAL;
		}

813 814
		q = (optname == XDP_UMEM_FILL_RING) ? &xs->umem->fq :
			&xs->umem->cq;
815
		err = xsk_init_queue(entries, q, true);
816 817 818
		mutex_unlock(&xs->mutex);
		return err;
	}
819 820 821 822 823 824 825
	default:
		break;
	}

	return -ENOPROTOOPT;
}

826 827 828 829 830 831 832 833 834 835 836 837 838 839
static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring)
{
	ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer);
	ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer);
	ring->desc = offsetof(struct xdp_rxtx_ring, desc);
}

static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring)
{
	ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer);
	ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer);
	ring->desc = offsetof(struct xdp_umem_ring, desc);
}

M
Magnus Karlsson 已提交
840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875
static int xsk_getsockopt(struct socket *sock, int level, int optname,
			  char __user *optval, int __user *optlen)
{
	struct sock *sk = sock->sk;
	struct xdp_sock *xs = xdp_sk(sk);
	int len;

	if (level != SOL_XDP)
		return -ENOPROTOOPT;

	if (get_user(len, optlen))
		return -EFAULT;
	if (len < 0)
		return -EINVAL;

	switch (optname) {
	case XDP_STATISTICS:
	{
		struct xdp_statistics stats;

		if (len < sizeof(stats))
			return -EINVAL;

		mutex_lock(&xs->mutex);
		stats.rx_dropped = xs->rx_dropped;
		stats.rx_invalid_descs = xskq_nb_invalid_descs(xs->rx);
		stats.tx_invalid_descs = xskq_nb_invalid_descs(xs->tx);
		mutex_unlock(&xs->mutex);

		if (copy_to_user(optval, &stats, sizeof(stats)))
			return -EFAULT;
		if (put_user(sizeof(stats), optlen))
			return -EFAULT;

		return 0;
	}
876 877 878
	case XDP_MMAP_OFFSETS:
	{
		struct xdp_mmap_offsets off;
879 880 881
		struct xdp_mmap_offsets_v1 off_v1;
		bool flags_supported = true;
		void *to_copy;
882

883
		if (len < sizeof(off_v1))
884
			return -EINVAL;
885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919
		else if (len < sizeof(off))
			flags_supported = false;

		if (flags_supported) {
			/* xdp_ring_offset is identical to xdp_ring_offset_v1
			 * except for the flags field added to the end.
			 */
			xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
					       &off.rx);
			xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *)
					       &off.tx);
			xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
					       &off.fr);
			xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *)
					       &off.cr);
			off.rx.flags = offsetof(struct xdp_rxtx_ring,
						ptrs.flags);
			off.tx.flags = offsetof(struct xdp_rxtx_ring,
						ptrs.flags);
			off.fr.flags = offsetof(struct xdp_umem_ring,
						ptrs.flags);
			off.cr.flags = offsetof(struct xdp_umem_ring,
						ptrs.flags);

			len = sizeof(off);
			to_copy = &off;
		} else {
			xsk_enter_rxtx_offsets(&off_v1.rx);
			xsk_enter_rxtx_offsets(&off_v1.tx);
			xsk_enter_umem_offsets(&off_v1.fr);
			xsk_enter_umem_offsets(&off_v1.cr);

			len = sizeof(off_v1);
			to_copy = &off_v1;
		}
920

921
		if (copy_to_user(optval, to_copy, len))
922 923 924 925 926 927
			return -EFAULT;
		if (put_user(len, optlen))
			return -EFAULT;

		return 0;
	}
928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947
	case XDP_OPTIONS:
	{
		struct xdp_options opts = {};

		if (len < sizeof(opts))
			return -EINVAL;

		mutex_lock(&xs->mutex);
		if (xs->zc)
			opts.flags |= XDP_OPTIONS_ZEROCOPY;
		mutex_unlock(&xs->mutex);

		len = sizeof(opts);
		if (copy_to_user(optval, &opts, len))
			return -EFAULT;
		if (put_user(len, optlen))
			return -EFAULT;

		return 0;
	}
M
Magnus Karlsson 已提交
948 949 950 951 952 953 954
	default:
		break;
	}

	return -EOPNOTSUPP;
}

955 956 957
static int xsk_mmap(struct file *file, struct socket *sock,
		    struct vm_area_struct *vma)
{
958
	loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
959 960 961
	unsigned long size = vma->vm_end - vma->vm_start;
	struct xdp_sock *xs = xdp_sk(sock->sk);
	struct xsk_queue *q = NULL;
962
	struct xdp_umem *umem;
963 964 965
	unsigned long pfn;
	struct page *qpg;

966
	if (READ_ONCE(xs->state) != XSK_READY)
967 968
		return -EBUSY;

969
	if (offset == XDP_PGOFF_RX_RING) {
970
		q = READ_ONCE(xs->rx);
971
	} else if (offset == XDP_PGOFF_TX_RING) {
972
		q = READ_ONCE(xs->tx);
973
	} else {
974 975
		umem = READ_ONCE(xs->umem);
		if (!umem)
976
			return -EINVAL;
977

978 979
		/* Matches the smp_wmb() in XDP_UMEM_REG */
		smp_rmb();
980
		if (offset == XDP_UMEM_PGOFF_FILL_RING)
981
			q = READ_ONCE(umem->fq);
982
		else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
983
			q = READ_ONCE(umem->cq);
984
	}
985 986 987 988

	if (!q)
		return -EINVAL;

989 990
	/* Matches the smp_wmb() in xsk_init_queue */
	smp_rmb();
991
	qpg = virt_to_head_page(q->ring);
992
	if (size > page_size(qpg))
993 994 995 996 997 998 999
		return -EINVAL;

	pfn = virt_to_phys(q->ring) >> PAGE_SHIFT;
	return remap_pfn_range(vma, vma->vm_start, pfn,
			       size, vma->vm_page_prot);
}

1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031
static int xsk_notifier(struct notifier_block *this,
			unsigned long msg, void *ptr)
{
	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
	struct net *net = dev_net(dev);
	struct sock *sk;

	switch (msg) {
	case NETDEV_UNREGISTER:
		mutex_lock(&net->xdp.lock);
		sk_for_each(sk, &net->xdp.list) {
			struct xdp_sock *xs = xdp_sk(sk);

			mutex_lock(&xs->mutex);
			if (xs->dev == dev) {
				sk->sk_err = ENETDOWN;
				if (!sock_flag(sk, SOCK_DEAD))
					sk->sk_error_report(sk);

				xsk_unbind_dev(xs);

				/* Clear device references in umem. */
				xdp_umem_clear_dev(xs->umem);
			}
			mutex_unlock(&xs->mutex);
		}
		mutex_unlock(&net->xdp.lock);
		break;
	}
	return NOTIFY_DONE;
}

1032 1033 1034 1035 1036 1037 1038
static struct proto xsk_proto = {
	.name =		"XDP",
	.owner =	THIS_MODULE,
	.obj_size =	sizeof(struct xdp_sock),
};

static const struct proto_ops xsk_proto_ops = {
B
Björn Töpel 已提交
1039 1040 1041 1042 1043 1044 1045 1046
	.family		= PF_XDP,
	.owner		= THIS_MODULE,
	.release	= xsk_release,
	.bind		= xsk_bind,
	.connect	= sock_no_connect,
	.socketpair	= sock_no_socketpair,
	.accept		= sock_no_accept,
	.getname	= sock_no_getname,
1047
	.poll		= xsk_poll,
B
Björn Töpel 已提交
1048 1049 1050 1051 1052 1053 1054 1055 1056
	.ioctl		= sock_no_ioctl,
	.listen		= sock_no_listen,
	.shutdown	= sock_no_shutdown,
	.setsockopt	= xsk_setsockopt,
	.getsockopt	= xsk_getsockopt,
	.sendmsg	= xsk_sendmsg,
	.recvmsg	= sock_no_recvmsg,
	.mmap		= xsk_mmap,
	.sendpage	= sock_no_sendpage,
1057 1058
};

1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070
static void xsk_destruct(struct sock *sk)
{
	struct xdp_sock *xs = xdp_sk(sk);

	if (!sock_flag(sk, SOCK_DEAD))
		return;

	xdp_put_umem(xs->umem);

	sk_refcnt_debug_dec(sk);
}

1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096
static int xsk_create(struct net *net, struct socket *sock, int protocol,
		      int kern)
{
	struct sock *sk;
	struct xdp_sock *xs;

	if (!ns_capable(net->user_ns, CAP_NET_RAW))
		return -EPERM;
	if (sock->type != SOCK_RAW)
		return -ESOCKTNOSUPPORT;

	if (protocol)
		return -EPROTONOSUPPORT;

	sock->state = SS_UNCONNECTED;

	sk = sk_alloc(net, PF_XDP, GFP_KERNEL, &xsk_proto, kern);
	if (!sk)
		return -ENOBUFS;

	sock->ops = &xsk_proto_ops;

	sock_init_data(sock, sk);

	sk->sk_family = PF_XDP;

1097 1098 1099
	sk->sk_destruct = xsk_destruct;
	sk_refcnt_debug_inc(sk);

1100 1101
	sock_set_flag(sk, SOCK_RCU_FREE);

1102
	xs = xdp_sk(sk);
1103
	xs->state = XSK_READY;
1104
	mutex_init(&xs->mutex);
1105
	spin_lock_init(&xs->rx_lock);
1106
	spin_lock_init(&xs->tx_completion_lock);
1107

1108 1109 1110
	INIT_LIST_HEAD(&xs->map_list);
	spin_lock_init(&xs->map_list_lock);

1111 1112 1113 1114
	mutex_lock(&net->xdp.lock);
	sk_add_node_rcu(sk, &net->xdp.list);
	mutex_unlock(&net->xdp.lock);

1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127
	local_bh_disable();
	sock_prot_inuse_add(net, &xsk_proto, 1);
	local_bh_enable();

	return 0;
}

static const struct net_proto_family xsk_family_ops = {
	.family = PF_XDP,
	.create = xsk_create,
	.owner	= THIS_MODULE,
};

1128 1129 1130 1131
static struct notifier_block xsk_netdev_notifier = {
	.notifier_call	= xsk_notifier,
};

1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148
static int __net_init xsk_net_init(struct net *net)
{
	mutex_init(&net->xdp.lock);
	INIT_HLIST_HEAD(&net->xdp.list);
	return 0;
}

static void __net_exit xsk_net_exit(struct net *net)
{
	WARN_ON_ONCE(!hlist_empty(&net->xdp.list));
}

static struct pernet_operations xsk_net_ops = {
	.init = xsk_net_init,
	.exit = xsk_net_exit,
};

1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160
static int __init xsk_init(void)
{
	int err;

	err = proto_register(&xsk_proto, 0 /* no slab */);
	if (err)
		goto out;

	err = sock_register(&xsk_family_ops);
	if (err)
		goto out_proto;

1161 1162 1163
	err = register_pernet_subsys(&xsk_net_ops);
	if (err)
		goto out_sk;
1164 1165 1166 1167 1168

	err = register_netdevice_notifier(&xsk_netdev_notifier);
	if (err)
		goto out_pernet;

1169 1170
	return 0;

1171 1172
out_pernet:
	unregister_pernet_subsys(&xsk_net_ops);
1173 1174
out_sk:
	sock_unregister(PF_XDP);
1175 1176 1177 1178 1179 1180 1181
out_proto:
	proto_unregister(&xsk_proto);
out:
	return err;
}

fs_initcall(xsk_init);