xsk.c 20.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)

/*
 * AF_XDP user-space access library.
 *
 * Copyright(c) 2018 - 2019 Intel Corporation.
 *
 * Author(s): Magnus Karlsson <magnus.karlsson@intel.com>
 */

#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <arpa/inet.h>
#include <asm/barrier.h>
#include <linux/compiler.h>
#include <linux/ethtool.h>
#include <linux/filter.h>
#include <linux/if_ether.h>
#include <linux/if_packet.h>
#include <linux/if_xdp.h>
23
#include <linux/kernel.h>
24
#include <linux/list.h>
25 26 27 28 29 30 31 32 33
#include <linux/sockios.h>
#include <net/if.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/socket.h>
#include <sys/types.h>

#include "bpf.h"
#include "libbpf.h"
34
#include "libbpf_internal.h"
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49
#include "xsk.h"

#ifndef SOL_XDP
 #define SOL_XDP 283
#endif

#ifndef AF_XDP
 #define AF_XDP 44
#endif

#ifndef PF_XDP
 #define PF_XDP AF_XDP
#endif

struct xsk_umem {
50 51
	struct xsk_ring_prod *fill_save;
	struct xsk_ring_cons *comp_save;
52 53 54 55
	char *umem_area;
	struct xsk_umem_config config;
	int fd;
	int refcount;
56 57 58 59 60 61 62 63 64 65 66 67 68 69
	struct list_head ctx_list;
};

struct xsk_ctx {
	struct xsk_ring_prod *fill;
	struct xsk_ring_cons *comp;
	__u32 queue_id;
	struct xsk_umem *umem;
	int refcount;
	int ifindex;
	struct list_head list;
	int prog_fd;
	int xsks_map_fd;
	char ifname[IFNAMSIZ];
70 71 72 73 74 75
};

struct xsk_socket {
	struct xsk_ring_cons *rx;
	struct xsk_ring_prod *tx;
	__u64 outstanding_tx;
76
	struct xsk_ctx *ctx;
77 78 79 80 81 82 83 84 85 86
	struct xsk_socket_config config;
	int fd;
};

struct xsk_nl_info {
	bool xdp_prog_attached;
	int ifindex;
	int fd;
};

87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
/* Up until and including Linux 5.3 */
struct xdp_ring_offset_v1 {
	__u64 producer;
	__u64 consumer;
	__u64 desc;
};

/* Up until and including Linux 5.3 */
struct xdp_mmap_offsets_v1 {
	struct xdp_ring_offset_v1 rx;
	struct xdp_ring_offset_v1 tx;
	struct xdp_ring_offset_v1 fr;
	struct xdp_ring_offset_v1 cr;
};

102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
int xsk_umem__fd(const struct xsk_umem *umem)
{
	return umem ? umem->fd : -EINVAL;
}

int xsk_socket__fd(const struct xsk_socket *xsk)
{
	return xsk ? xsk->fd : -EINVAL;
}

static bool xsk_page_aligned(void *buffer)
{
	unsigned long addr = (unsigned long)buffer;

	return !(addr & (getpagesize() - 1));
}

static void xsk_set_umem_config(struct xsk_umem_config *cfg,
				const struct xsk_umem_config *usr_cfg)
{
	if (!usr_cfg) {
		cfg->fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
		cfg->comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
		cfg->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
		cfg->frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM;
K
Kevin Laatz 已提交
127
		cfg->flags = XSK_UMEM__DEFAULT_FLAGS;
128 129 130 131 132 133 134
		return;
	}

	cfg->fill_size = usr_cfg->fill_size;
	cfg->comp_size = usr_cfg->comp_size;
	cfg->frame_size = usr_cfg->frame_size;
	cfg->frame_headroom = usr_cfg->frame_headroom;
K
Kevin Laatz 已提交
135
	cfg->flags = usr_cfg->flags;
136 137
}

138 139
static int xsk_set_xdp_socket_config(struct xsk_socket_config *cfg,
				     const struct xsk_socket_config *usr_cfg)
140 141 142 143 144 145 146
{
	if (!usr_cfg) {
		cfg->rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
		cfg->tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
		cfg->libbpf_flags = 0;
		cfg->xdp_flags = 0;
		cfg->bind_flags = 0;
147
		return 0;
148 149
	}

150 151 152
	if (usr_cfg->libbpf_flags & ~XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)
		return -EINVAL;

153 154 155 156 157
	cfg->rx_size = usr_cfg->rx_size;
	cfg->tx_size = usr_cfg->tx_size;
	cfg->libbpf_flags = usr_cfg->libbpf_flags;
	cfg->xdp_flags = usr_cfg->xdp_flags;
	cfg->bind_flags = usr_cfg->bind_flags;
158 159

	return 0;
160 161
}

162 163 164 165 166 167 168 169 170 171 172 173 174
static void xsk_mmap_offsets_v1(struct xdp_mmap_offsets *off)
{
	struct xdp_mmap_offsets_v1 off_v1;

	/* getsockopt on a kernel <= 5.3 has no flags fields.
	 * Copy over the offsets to the correct places in the >=5.4 format
	 * and put the flags where they would have been on that kernel.
	 */
	memcpy(&off_v1, off, sizeof(off_v1));

	off->rx.producer = off_v1.rx.producer;
	off->rx.consumer = off_v1.rx.consumer;
	off->rx.desc = off_v1.rx.desc;
175
	off->rx.flags = off_v1.rx.consumer + sizeof(__u32);
176 177 178 179

	off->tx.producer = off_v1.tx.producer;
	off->tx.consumer = off_v1.tx.consumer;
	off->tx.desc = off_v1.tx.desc;
180
	off->tx.flags = off_v1.tx.consumer + sizeof(__u32);
181 182 183 184

	off->fr.producer = off_v1.fr.producer;
	off->fr.consumer = off_v1.fr.consumer;
	off->fr.desc = off_v1.fr.desc;
185
	off->fr.flags = off_v1.fr.consumer + sizeof(__u32);
186 187 188 189

	off->cr.producer = off_v1.cr.producer;
	off->cr.consumer = off_v1.cr.consumer;
	off->cr.desc = off_v1.cr.desc;
190
	off->cr.flags = off_v1.cr.consumer + sizeof(__u32);
191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
}

static int xsk_get_mmap_offsets(int fd, struct xdp_mmap_offsets *off)
{
	socklen_t optlen;
	int err;

	optlen = sizeof(*off);
	err = getsockopt(fd, SOL_XDP, XDP_MMAP_OFFSETS, off, &optlen);
	if (err)
		return err;

	if (optlen == sizeof(*off))
		return 0;

	if (optlen == sizeof(struct xdp_mmap_offsets_v1)) {
		xsk_mmap_offsets_v1(off);
		return 0;
	}

	return -EINVAL;
}

214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273
static int xsk_create_umem_rings(struct xsk_umem *umem, int fd,
				 struct xsk_ring_prod *fill,
				 struct xsk_ring_cons *comp)
{
	struct xdp_mmap_offsets off;
	void *map;
	int err;

	err = setsockopt(fd, SOL_XDP, XDP_UMEM_FILL_RING,
			 &umem->config.fill_size,
			 sizeof(umem->config.fill_size));
	if (err)
		return -errno;

	err = setsockopt(fd, SOL_XDP, XDP_UMEM_COMPLETION_RING,
			 &umem->config.comp_size,
			 sizeof(umem->config.comp_size));
	if (err)
		return -errno;

	err = xsk_get_mmap_offsets(fd, &off);
	if (err)
		return -errno;

	map = mmap(NULL, off.fr.desc + umem->config.fill_size * sizeof(__u64),
		   PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
		   XDP_UMEM_PGOFF_FILL_RING);
	if (map == MAP_FAILED)
		return -errno;

	fill->mask = umem->config.fill_size - 1;
	fill->size = umem->config.fill_size;
	fill->producer = map + off.fr.producer;
	fill->consumer = map + off.fr.consumer;
	fill->flags = map + off.fr.flags;
	fill->ring = map + off.fr.desc;
	fill->cached_cons = umem->config.fill_size;

	map = mmap(NULL, off.cr.desc + umem->config.comp_size * sizeof(__u64),
		   PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, fd,
		   XDP_UMEM_PGOFF_COMPLETION_RING);
	if (map == MAP_FAILED) {
		err = -errno;
		goto out_mmap;
	}

	comp->mask = umem->config.comp_size - 1;
	comp->size = umem->config.comp_size;
	comp->producer = map + off.cr.producer;
	comp->consumer = map + off.cr.consumer;
	comp->flags = map + off.cr.flags;
	comp->ring = map + off.cr.desc;

	return 0;

out_mmap:
	munmap(map, off.fr.desc + umem->config.fill_size * sizeof(__u64));
	return err;
}

K
Kevin Laatz 已提交
274 275 276 277
int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area,
			    __u64 size, struct xsk_ring_prod *fill,
			    struct xsk_ring_cons *comp,
			    const struct xsk_umem_config *usr_config)
278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298
{
	struct xdp_umem_reg mr;
	struct xsk_umem *umem;
	int err;

	if (!umem_area || !umem_ptr || !fill || !comp)
		return -EFAULT;
	if (!size && !xsk_page_aligned(umem_area))
		return -EINVAL;

	umem = calloc(1, sizeof(*umem));
	if (!umem)
		return -ENOMEM;

	umem->fd = socket(AF_XDP, SOCK_RAW, 0);
	if (umem->fd < 0) {
		err = -errno;
		goto out_umem_alloc;
	}

	umem->umem_area = umem_area;
299
	INIT_LIST_HEAD(&umem->ctx_list);
300 301
	xsk_set_umem_config(&umem->config, usr_config);

302
	memset(&mr, 0, sizeof(mr));
303 304 305 306
	mr.addr = (uintptr_t)umem_area;
	mr.len = size;
	mr.chunk_size = umem->config.frame_size;
	mr.headroom = umem->config.frame_headroom;
K
Kevin Laatz 已提交
307
	mr.flags = umem->config.flags;
308 309 310 311 312 313 314

	err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_REG, &mr, sizeof(mr));
	if (err) {
		err = -errno;
		goto out_socket;
	}

315 316
	err = xsk_create_umem_rings(umem, umem->fd, fill, comp);
	if (err)
317 318
		goto out_socket;

319 320
	umem->fill_save = fill;
	umem->comp_save = comp;
321 322 323 324 325 326 327 328 329 330
	*umem_ptr = umem;
	return 0;

out_socket:
	close(umem->fd);
out_umem_alloc:
	free(umem);
	return err;
}

K
Kevin Laatz 已提交
331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
struct xsk_umem_config_v1 {
	__u32 fill_size;
	__u32 comp_size;
	__u32 frame_size;
	__u32 frame_headroom;
};

int xsk_umem__create_v0_0_2(struct xsk_umem **umem_ptr, void *umem_area,
			    __u64 size, struct xsk_ring_prod *fill,
			    struct xsk_ring_cons *comp,
			    const struct xsk_umem_config *usr_config)
{
	struct xsk_umem_config config;

	memcpy(&config, usr_config, sizeof(struct xsk_umem_config_v1));
	config.flags = 0;

	return xsk_umem__create_v0_0_4(umem_ptr, umem_area, size, fill, comp,
					&config);
}
351 352
COMPAT_VERSION(xsk_umem__create_v0_0_2, xsk_umem__create, LIBBPF_0.0.2)
DEFAULT_VERSION(xsk_umem__create_v0_0_4, xsk_umem__create, LIBBPF_0.0.4)
K
Kevin Laatz 已提交
353

354 355
static int xsk_load_xdp_prog(struct xsk_socket *xsk)
{
356
	static const int log_buf_size = 16 * 1024;
357
	struct xsk_ctx *ctx = xsk->ctx;
358
	char log_buf[log_buf_size];
359 360 361 362 363
	int err, prog_fd;

	/* This is the C-program:
	 * SEC("xdp_sock") int xdp_sock_prog(struct xdp_md *ctx)
	 * {
364
	 *     int ret, index = ctx->rx_queue_index;
365 366 367
	 *
	 *     // A set entry here means that the correspnding queue_id
	 *     // has an active AF_XDP socket bound to it.
368 369 370 371 372 373
	 *     ret = bpf_redirect_map(&xsks_map, index, XDP_PASS);
	 *     if (ret > 0)
	 *         return ret;
	 *
	 *     // Fallback for pre-5.3 kernels, not supporting default
	 *     // action in the flags parameter.
374
	 *     if (bpf_map_lookup_elem(&xsks_map, &index))
375 376 377 378 379
	 *         return bpf_redirect_map(&xsks_map, index, 0);
	 *     return XDP_PASS;
	 * }
	 */
	struct bpf_insn prog[] = {
380 381 382 383 384
		/* r2 = *(u32 *)(r1 + 16) */
		BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 16),
		/* *(u32 *)(r10 - 4) = r2 */
		BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_2, -4),
		/* r1 = xskmap[] */
385
		BPF_LD_MAP_FD(BPF_REG_1, ctx->xsks_map_fd),
386 387 388 389 390 391 392
		/* r3 = XDP_PASS */
		BPF_MOV64_IMM(BPF_REG_3, 2),
		/* call bpf_redirect_map */
		BPF_EMIT_CALL(BPF_FUNC_redirect_map),
		/* if w0 != 0 goto pc+13 */
		BPF_JMP32_IMM(BPF_JSGT, BPF_REG_0, 0, 13),
		/* r2 = r10 */
393
		BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
394
		/* r2 += -4 */
395
		BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
396
		/* r1 = xskmap[] */
397
		BPF_LD_MAP_FD(BPF_REG_1, ctx->xsks_map_fd),
398
		/* call bpf_map_lookup_elem */
399
		BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
400
		/* r1 = r0 */
401
		BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
402 403 404
		/* r0 = XDP_PASS */
		BPF_MOV64_IMM(BPF_REG_0, 2),
		/* if r1 == 0 goto pc+5 */
405 406 407
		BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 5),
		/* r2 = *(u32 *)(r10 - 4) */
		BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_10, -4),
408
		/* r1 = xskmap[] */
409
		BPF_LD_MAP_FD(BPF_REG_1, ctx->xsks_map_fd),
410 411 412
		/* r3 = 0 */
		BPF_MOV64_IMM(BPF_REG_3, 0),
		/* call bpf_redirect_map */
413 414 415 416 417 418 419
		BPF_EMIT_CALL(BPF_FUNC_redirect_map),
		/* The jumps are to this instruction */
		BPF_EXIT_INSN(),
	};
	size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);

	prog_fd = bpf_load_program(BPF_PROG_TYPE_XDP, prog, insns_cnt,
420 421
				   "LGPL-2.1 or BSD-2-Clause", 0, log_buf,
				   log_buf_size);
422
	if (prog_fd < 0) {
423
		pr_warn("BPF log buffer:\n%s", log_buf);
424 425 426
		return prog_fd;
	}

427 428
	err = bpf_set_link_xdp_fd(xsk->ctx->ifindex, prog_fd,
				  xsk->config.xdp_flags);
429 430 431 432 433
	if (err) {
		close(prog_fd);
		return err;
	}

434
	ctx->prog_fd = prog_fd;
435 436 437 438 439
	return 0;
}

static int xsk_get_max_queues(struct xsk_socket *xsk)
{
440
	struct ethtool_channels channels = { .cmd = ETHTOOL_GCHANNELS };
441
	struct xsk_ctx *ctx = xsk->ctx;
442
	struct ifreq ifr = {};
443 444 445 446 447 448 449
	int fd, err, ret;

	fd = socket(AF_INET, SOCK_DGRAM, 0);
	if (fd < 0)
		return -errno;

	ifr.ifr_data = (void *)&channels;
450
	memcpy(ifr.ifr_name, ctx->ifname, IFNAMSIZ - 1);
451
	ifr.ifr_name[IFNAMSIZ - 1] = '\0';
452 453 454 455 456 457
	err = ioctl(fd, SIOCETHTOOL, &ifr);
	if (err && errno != EOPNOTSUPP) {
		ret = -errno;
		goto out;
	}

458
	if (err) {
459 460 461 462
		/* If the device says it has no channels, then all traffic
		 * is sent to a single stream, so max queues = 1.
		 */
		ret = 1;
463 464 465 466 467 468 469
	} else {
		/* Take the max of rx, tx, combined. Drivers return
		 * the number of channels in different ways.
		 */
		ret = max(channels.max_rx, channels.max_tx);
		ret = max(ret, (int)channels.max_combined);
	}
470 471 472 473 474 475 476 477

out:
	close(fd);
	return ret;
}

static int xsk_create_bpf_maps(struct xsk_socket *xsk)
{
478
	struct xsk_ctx *ctx = xsk->ctx;
479 480 481 482 483 484 485
	int max_queues;
	int fd;

	max_queues = xsk_get_max_queues(xsk);
	if (max_queues < 0)
		return max_queues;

486
	fd = bpf_create_map_name(BPF_MAP_TYPE_XSKMAP, "xsks_map",
487 488 489 490
				 sizeof(int), sizeof(int), max_queues, 0);
	if (fd < 0)
		return fd;

491
	ctx->xsks_map_fd = fd;
492 493 494 495 496 497

	return 0;
}

static void xsk_delete_bpf_maps(struct xsk_socket *xsk)
{
498 499 500 501
	struct xsk_ctx *ctx = xsk->ctx;

	bpf_map_delete_elem(ctx->xsks_map_fd, &ctx->queue_id);
	close(ctx->xsks_map_fd);
502 503
}

B
Björn Töpel 已提交
504
static int xsk_lookup_bpf_maps(struct xsk_socket *xsk)
505
{
B
Björn Töpel 已提交
506 507
	__u32 i, *map_ids, num_maps, prog_len = sizeof(struct bpf_prog_info);
	__u32 map_len = sizeof(struct bpf_map_info);
508
	struct bpf_prog_info prog_info = {};
509
	struct xsk_ctx *ctx = xsk->ctx;
510
	struct bpf_map_info map_info;
B
Björn Töpel 已提交
511
	int fd, err;
512

513
	err = bpf_obj_get_info_by_fd(ctx->prog_fd, &prog_info, &prog_len);
514 515 516 517 518 519 520 521 522 523 524 525 526
	if (err)
		return err;

	num_maps = prog_info.nr_map_ids;

	map_ids = calloc(prog_info.nr_map_ids, sizeof(*map_ids));
	if (!map_ids)
		return -ENOMEM;

	memset(&prog_info, 0, prog_len);
	prog_info.nr_map_ids = num_maps;
	prog_info.map_ids = (__u64)(unsigned long)map_ids;

527
	err = bpf_obj_get_info_by_fd(ctx->prog_fd, &prog_info, &prog_len);
528 529 530
	if (err)
		goto out_map_ids;

531
	ctx->xsks_map_fd = -1;
532

533
	for (i = 0; i < prog_info.nr_map_ids; i++) {
534
		fd = bpf_map_get_fd_by_id(map_ids[i]);
B
Björn Töpel 已提交
535 536
		if (fd < 0)
			continue;
537 538

		err = bpf_obj_get_info_by_fd(fd, &map_info, &map_len);
B
Björn Töpel 已提交
539 540 541 542
		if (err) {
			close(fd);
			continue;
		}
543

B
Björn Töpel 已提交
544
		if (!strcmp(map_info.name, "xsks_map")) {
545
			ctx->xsks_map_fd = fd;
B
Björn Töpel 已提交
546
			continue;
547 548
		}

B
Björn Töpel 已提交
549
		close(fd);
550 551
	}

B
Björn Töpel 已提交
552
	err = 0;
553
	if (ctx->xsks_map_fd == -1)
554 555 556 557 558 559 560
		err = -ENOENT;

out_map_ids:
	free(map_ids);
	return err;
}

B
Björn Töpel 已提交
561 562
static int xsk_set_bpf_maps(struct xsk_socket *xsk)
{
563 564 565
	struct xsk_ctx *ctx = xsk->ctx;

	return bpf_map_update_elem(ctx->xsks_map_fd, &ctx->queue_id,
566
				   &xsk->fd, 0);
B
Björn Töpel 已提交
567 568
}

569 570
static int xsk_setup_xdp_prog(struct xsk_socket *xsk)
{
571
	struct xsk_ctx *ctx = xsk->ctx;
572 573 574
	__u32 prog_id = 0;
	int err;

575
	err = bpf_get_link_xdp_id(ctx->ifindex, &prog_id,
576 577 578 579 580 581 582 583 584 585
				  xsk->config.xdp_flags);
	if (err)
		return err;

	if (!prog_id) {
		err = xsk_create_bpf_maps(xsk);
		if (err)
			return err;

		err = xsk_load_xdp_prog(xsk);
586 587 588 589
		if (err) {
			xsk_delete_bpf_maps(xsk);
			return err;
		}
590
	} else {
591 592
		ctx->prog_fd = bpf_prog_get_fd_by_id(prog_id);
		if (ctx->prog_fd < 0)
593
			return -errno;
B
Björn Töpel 已提交
594
		err = xsk_lookup_bpf_maps(xsk);
595
		if (err) {
596
			close(ctx->prog_fd);
597 598
			return err;
		}
599 600
	}

601 602
	if (xsk->rx)
		err = xsk_set_bpf_maps(xsk);
603 604
	if (err) {
		xsk_delete_bpf_maps(xsk);
605
		close(ctx->prog_fd);
606 607
		return err;
	}
608 609 610 611

	return 0;
}

612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699
static struct xsk_ctx *xsk_get_ctx(struct xsk_umem *umem, int ifindex,
				   __u32 queue_id)
{
	struct xsk_ctx *ctx;

	if (list_empty(&umem->ctx_list))
		return NULL;

	list_for_each_entry(ctx, &umem->ctx_list, list) {
		if (ctx->ifindex == ifindex && ctx->queue_id == queue_id) {
			ctx->refcount++;
			return ctx;
		}
	}

	return NULL;
}

static void xsk_put_ctx(struct xsk_ctx *ctx)
{
	struct xsk_umem *umem = ctx->umem;
	struct xdp_mmap_offsets off;
	int err;

	if (--ctx->refcount == 0) {
		err = xsk_get_mmap_offsets(umem->fd, &off);
		if (!err) {
			munmap(ctx->fill->ring - off.fr.desc,
			       off.fr.desc + umem->config.fill_size *
			       sizeof(__u64));
			munmap(ctx->comp->ring - off.cr.desc,
			       off.cr.desc + umem->config.comp_size *
			       sizeof(__u64));
		}

		list_del(&ctx->list);
		free(ctx);
	}
}

static struct xsk_ctx *xsk_create_ctx(struct xsk_socket *xsk,
				      struct xsk_umem *umem, int ifindex,
				      const char *ifname, __u32 queue_id,
				      struct xsk_ring_prod *fill,
				      struct xsk_ring_cons *comp)
{
	struct xsk_ctx *ctx;
	int err;

	ctx = calloc(1, sizeof(*ctx));
	if (!ctx)
		return NULL;

	if (!umem->fill_save) {
		err = xsk_create_umem_rings(umem, xsk->fd, fill, comp);
		if (err) {
			free(ctx);
			return NULL;
		}
	} else if (umem->fill_save != fill || umem->comp_save != comp) {
		/* Copy over rings to new structs. */
		memcpy(fill, umem->fill_save, sizeof(*fill));
		memcpy(comp, umem->comp_save, sizeof(*comp));
	}

	ctx->ifindex = ifindex;
	ctx->refcount = 1;
	ctx->umem = umem;
	ctx->queue_id = queue_id;
	memcpy(ctx->ifname, ifname, IFNAMSIZ - 1);
	ctx->ifname[IFNAMSIZ - 1] = '\0';

	umem->fill_save = NULL;
	umem->comp_save = NULL;
	ctx->fill = fill;
	ctx->comp = comp;
	list_add(&ctx->list, &umem->ctx_list);
	return ctx;
}

int xsk_socket__create_shared(struct xsk_socket **xsk_ptr,
			      const char *ifname,
			      __u32 queue_id, struct xsk_umem *umem,
			      struct xsk_ring_cons *rx,
			      struct xsk_ring_prod *tx,
			      struct xsk_ring_prod *fill,
			      struct xsk_ring_cons *comp,
			      const struct xsk_socket_config *usr_config)
700
{
B
Björn Töpel 已提交
701
	void *rx_map = NULL, *tx_map = NULL;
702 703 704
	struct sockaddr_xdp sxdp = {};
	struct xdp_mmap_offsets off;
	struct xsk_socket *xsk;
705 706
	struct xsk_ctx *ctx;
	int err, ifindex;
707

708
	if (!umem || !xsk_ptr || !(rx || tx))
709 710 711 712 713 714
		return -EFAULT;

	xsk = calloc(1, sizeof(*xsk));
	if (!xsk)
		return -ENOMEM;

715 716 717 718
	err = xsk_set_xdp_socket_config(&xsk->config, usr_config);
	if (err)
		goto out_xsk_alloc;

719 720 721 722
	xsk->outstanding_tx = 0;
	ifindex = if_nametoindex(ifname);
	if (!ifindex) {
		err = -errno;
723 724 725
		goto out_xsk_alloc;
	}

726 727 728 729 730 731 732 733 734 735
	if (umem->refcount++ > 0) {
		xsk->fd = socket(AF_XDP, SOCK_RAW, 0);
		if (xsk->fd < 0) {
			err = -errno;
			goto out_xsk_alloc;
		}
	} else {
		xsk->fd = umem->fd;
	}

736 737
	ctx = xsk_get_ctx(umem, ifindex, queue_id);
	if (!ctx) {
738 739 740 741 742
		if (!fill || !comp) {
			err = -EFAULT;
			goto out_socket;
		}

743 744 745 746 747 748
		ctx = xsk_create_ctx(xsk, umem, ifindex, ifname, queue_id,
				     fill, comp);
		if (!ctx) {
			err = -ENOMEM;
			goto out_socket;
		}
749
	}
750
	xsk->ctx = ctx;
751 752 753 754 755 756 757

	if (rx) {
		err = setsockopt(xsk->fd, SOL_XDP, XDP_RX_RING,
				 &xsk->config.rx_size,
				 sizeof(xsk->config.rx_size));
		if (err) {
			err = -errno;
758
			goto out_put_ctx;
759 760 761 762 763 764 765 766
		}
	}
	if (tx) {
		err = setsockopt(xsk->fd, SOL_XDP, XDP_TX_RING,
				 &xsk->config.tx_size,
				 sizeof(xsk->config.tx_size));
		if (err) {
			err = -errno;
767
			goto out_put_ctx;
768 769 770
		}
	}

771
	err = xsk_get_mmap_offsets(xsk->fd, &off);
772 773
	if (err) {
		err = -errno;
774
		goto out_put_ctx;
775 776 777
	}

	if (rx) {
778 779 780 781
		rx_map = mmap(NULL, off.rx.desc +
			      xsk->config.rx_size * sizeof(struct xdp_desc),
			      PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
			      xsk->fd, XDP_PGOFF_RX_RING);
B
Björn Töpel 已提交
782
		if (rx_map == MAP_FAILED) {
783
			err = -errno;
784
			goto out_put_ctx;
785 786 787 788
		}

		rx->mask = xsk->config.rx_size - 1;
		rx->size = xsk->config.rx_size;
B
Björn Töpel 已提交
789 790
		rx->producer = rx_map + off.rx.producer;
		rx->consumer = rx_map + off.rx.consumer;
791
		rx->flags = rx_map + off.rx.flags;
B
Björn Töpel 已提交
792
		rx->ring = rx_map + off.rx.desc;
793 794
		rx->cached_prod = *rx->producer;
		rx->cached_cons = *rx->consumer;
795 796 797 798
	}
	xsk->rx = rx;

	if (tx) {
799 800 801 802
		tx_map = mmap(NULL, off.tx.desc +
			      xsk->config.tx_size * sizeof(struct xdp_desc),
			      PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
			      xsk->fd, XDP_PGOFF_TX_RING);
B
Björn Töpel 已提交
803
		if (tx_map == MAP_FAILED) {
804 805 806 807 808 809
			err = -errno;
			goto out_mmap_rx;
		}

		tx->mask = xsk->config.tx_size - 1;
		tx->size = xsk->config.tx_size;
B
Björn Töpel 已提交
810 811
		tx->producer = tx_map + off.tx.producer;
		tx->consumer = tx_map + off.tx.consumer;
812
		tx->flags = tx_map + off.tx.flags;
B
Björn Töpel 已提交
813
		tx->ring = tx_map + off.tx.desc;
814 815 816 817 818
		tx->cached_prod = *tx->producer;
		/* cached_cons is r->size bigger than the real consumer pointer
		 * See xsk_prod_nb_free
		 */
		tx->cached_cons = *tx->consumer + xsk->config.tx_size;
819 820 821 822
	}
	xsk->tx = tx;

	sxdp.sxdp_family = PF_XDP;
823 824
	sxdp.sxdp_ifindex = ctx->ifindex;
	sxdp.sxdp_queue_id = ctx->queue_id;
825
	if (umem->refcount > 1) {
826
		sxdp.sxdp_flags |= XDP_SHARED_UMEM;
827 828 829 830
		sxdp.sxdp_shared_umem_fd = umem->fd;
	} else {
		sxdp.sxdp_flags = xsk->config.bind_flags;
	}
831 832 833 834 835 836 837

	err = bind(xsk->fd, (struct sockaddr *)&sxdp, sizeof(sxdp));
	if (err) {
		err = -errno;
		goto out_mmap_tx;
	}

838
	ctx->prog_fd = -1;
839

840 841 842 843 844 845 846 847 848 849 850
	if (!(xsk->config.libbpf_flags & XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)) {
		err = xsk_setup_xdp_prog(xsk);
		if (err)
			goto out_mmap_tx;
	}

	*xsk_ptr = xsk;
	return 0;

out_mmap_tx:
	if (tx)
B
Björn Töpel 已提交
851
		munmap(tx_map, off.tx.desc +
852 853 854
		       xsk->config.tx_size * sizeof(struct xdp_desc));
out_mmap_rx:
	if (rx)
B
Björn Töpel 已提交
855
		munmap(rx_map, off.rx.desc +
856
		       xsk->config.rx_size * sizeof(struct xdp_desc));
857 858
out_put_ctx:
	xsk_put_ctx(ctx);
859 860 861 862 863 864 865 866
out_socket:
	if (--umem->refcount)
		close(xsk->fd);
out_xsk_alloc:
	free(xsk);
	return err;
}

867 868 869 870
int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
		       __u32 queue_id, struct xsk_umem *umem,
		       struct xsk_ring_cons *rx, struct xsk_ring_prod *tx,
		       const struct xsk_socket_config *usr_config)
871
{
872 873 874 875
	return xsk_socket__create_shared(xsk_ptr, ifname, queue_id, umem,
					 rx, tx, umem->fill_save,
					 umem->comp_save, usr_config);
}
876

877 878
int xsk_umem__delete(struct xsk_umem *umem)
{
879 880 881 882 883 884 885 886 887 888 889 890 891 892
	if (!umem)
		return 0;

	if (umem->refcount)
		return -EBUSY;

	close(umem->fd);
	free(umem);

	return 0;
}

void xsk_socket__delete(struct xsk_socket *xsk)
{
B
Björn Töpel 已提交
893
	size_t desc_sz = sizeof(struct xdp_desc);
894
	struct xsk_ctx *ctx = xsk->ctx;
895 896 897 898 899 900
	struct xdp_mmap_offsets off;
	int err;

	if (!xsk)
		return;

901
	if (ctx->prog_fd != -1) {
902
		xsk_delete_bpf_maps(xsk);
903
		close(ctx->prog_fd);
904
	}
905

906
	err = xsk_get_mmap_offsets(xsk->fd, &off);
907
	if (!err) {
B
Björn Töpel 已提交
908
		if (xsk->rx) {
909 910
			munmap(xsk->rx->ring - off.rx.desc,
			       off.rx.desc + xsk->config.rx_size * desc_sz);
B
Björn Töpel 已提交
911 912
		}
		if (xsk->tx) {
913 914
			munmap(xsk->tx->ring - off.tx.desc,
			       off.tx.desc + xsk->config.tx_size * desc_sz);
B
Björn Töpel 已提交
915
		}
916 917
	}

918 919 920
	xsk_put_ctx(ctx);

	ctx->umem->refcount--;
921 922 923
	/* Do not close an fd that also has an associated umem connected
	 * to it.
	 */
924
	if (xsk->fd != ctx->umem->fd)
925 926 927
		close(xsk->fd);
	free(xsk);
}