xsk.c 15.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)

/*
 * AF_XDP user-space access library.
 *
 * Copyright(c) 2018 - 2019 Intel Corporation.
 *
 * Author(s): Magnus Karlsson <magnus.karlsson@intel.com>
 */

#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <arpa/inet.h>
#include <asm/barrier.h>
#include <linux/compiler.h>
#include <linux/ethtool.h>
#include <linux/filter.h>
#include <linux/if_ether.h>
#include <linux/if_packet.h>
#include <linux/if_xdp.h>
#include <linux/sockios.h>
#include <net/if.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/socket.h>
#include <sys/types.h>

#include "bpf.h"
#include "libbpf.h"
32
#include "libbpf_internal.h"
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
#include "xsk.h"

#ifndef SOL_XDP
 #define SOL_XDP 283
#endif

#ifndef AF_XDP
 #define AF_XDP 44
#endif

#ifndef PF_XDP
 #define PF_XDP AF_XDP
#endif

struct xsk_umem {
	struct xsk_ring_prod *fill;
	struct xsk_ring_cons *comp;
	char *umem_area;
	struct xsk_umem_config config;
	int fd;
	int refcount;
};

struct xsk_socket {
	struct xsk_ring_cons *rx;
	struct xsk_ring_prod *tx;
	__u64 outstanding_tx;
	struct xsk_umem *umem;
	struct xsk_socket_config config;
	int fd;
	int ifindex;
	int prog_fd;
	int xsks_map_fd;
	__u32 queue_id;
	char ifname[IFNAMSIZ];
};

struct xsk_nl_info {
	bool xdp_prog_attached;
	int ifindex;
	int fd;
};

int xsk_umem__fd(const struct xsk_umem *umem)
{
	return umem ? umem->fd : -EINVAL;
}

int xsk_socket__fd(const struct xsk_socket *xsk)
{
	return xsk ? xsk->fd : -EINVAL;
}

static bool xsk_page_aligned(void *buffer)
{
	unsigned long addr = (unsigned long)buffer;

	return !(addr & (getpagesize() - 1));
}

static void xsk_set_umem_config(struct xsk_umem_config *cfg,
				const struct xsk_umem_config *usr_cfg)
{
	if (!usr_cfg) {
		cfg->fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
		cfg->comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
		cfg->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
		cfg->frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM;
K
Kevin Laatz 已提交
101
		cfg->flags = XSK_UMEM__DEFAULT_FLAGS;
102 103 104 105 106 107 108
		return;
	}

	cfg->fill_size = usr_cfg->fill_size;
	cfg->comp_size = usr_cfg->comp_size;
	cfg->frame_size = usr_cfg->frame_size;
	cfg->frame_headroom = usr_cfg->frame_headroom;
K
Kevin Laatz 已提交
109
	cfg->flags = usr_cfg->flags;
110 111
}

112 113
static int xsk_set_xdp_socket_config(struct xsk_socket_config *cfg,
				     const struct xsk_socket_config *usr_cfg)
114 115 116 117 118 119 120
{
	if (!usr_cfg) {
		cfg->rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
		cfg->tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
		cfg->libbpf_flags = 0;
		cfg->xdp_flags = 0;
		cfg->bind_flags = 0;
121
		return 0;
122 123
	}

124 125 126
	if (usr_cfg->libbpf_flags & ~XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)
		return -EINVAL;

127 128 129 130 131
	cfg->rx_size = usr_cfg->rx_size;
	cfg->tx_size = usr_cfg->tx_size;
	cfg->libbpf_flags = usr_cfg->libbpf_flags;
	cfg->xdp_flags = usr_cfg->xdp_flags;
	cfg->bind_flags = usr_cfg->bind_flags;
132 133

	return 0;
134 135
}

K
Kevin Laatz 已提交
136 137 138 139
int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area,
			    __u64 size, struct xsk_ring_prod *fill,
			    struct xsk_ring_cons *comp,
			    const struct xsk_umem_config *usr_config)
140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165
{
	struct xdp_mmap_offsets off;
	struct xdp_umem_reg mr;
	struct xsk_umem *umem;
	socklen_t optlen;
	void *map;
	int err;

	if (!umem_area || !umem_ptr || !fill || !comp)
		return -EFAULT;
	if (!size && !xsk_page_aligned(umem_area))
		return -EINVAL;

	umem = calloc(1, sizeof(*umem));
	if (!umem)
		return -ENOMEM;

	umem->fd = socket(AF_XDP, SOCK_RAW, 0);
	if (umem->fd < 0) {
		err = -errno;
		goto out_umem_alloc;
	}

	umem->umem_area = umem_area;
	xsk_set_umem_config(&umem->config, usr_config);

166
	memset(&mr, 0, sizeof(mr));
167 168 169 170
	mr.addr = (uintptr_t)umem_area;
	mr.len = size;
	mr.chunk_size = umem->config.frame_size;
	mr.headroom = umem->config.frame_headroom;
K
Kevin Laatz 已提交
171
	mr.flags = umem->config.flags;
172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199

	err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_REG, &mr, sizeof(mr));
	if (err) {
		err = -errno;
		goto out_socket;
	}
	err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_FILL_RING,
			 &umem->config.fill_size,
			 sizeof(umem->config.fill_size));
	if (err) {
		err = -errno;
		goto out_socket;
	}
	err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_COMPLETION_RING,
			 &umem->config.comp_size,
			 sizeof(umem->config.comp_size));
	if (err) {
		err = -errno;
		goto out_socket;
	}

	optlen = sizeof(off);
	err = getsockopt(umem->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
	if (err) {
		err = -errno;
		goto out_socket;
	}

200 201 202
	map = mmap(NULL, off.fr.desc + umem->config.fill_size * sizeof(__u64),
		   PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, umem->fd,
		   XDP_UMEM_PGOFF_FILL_RING);
203 204 205 206 207 208 209 210 211 212
	if (map == MAP_FAILED) {
		err = -errno;
		goto out_socket;
	}

	umem->fill = fill;
	fill->mask = umem->config.fill_size - 1;
	fill->size = umem->config.fill_size;
	fill->producer = map + off.fr.producer;
	fill->consumer = map + off.fr.consumer;
213
	fill->flags = map + off.fr.flags;
214 215 216
	fill->ring = map + off.fr.desc;
	fill->cached_cons = umem->config.fill_size;

217 218 219
	map = mmap(NULL, off.cr.desc + umem->config.comp_size * sizeof(__u64),
		   PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, umem->fd,
		   XDP_UMEM_PGOFF_COMPLETION_RING);
220 221 222 223 224 225 226 227 228 229
	if (map == MAP_FAILED) {
		err = -errno;
		goto out_mmap;
	}

	umem->comp = comp;
	comp->mask = umem->config.comp_size - 1;
	comp->size = umem->config.comp_size;
	comp->producer = map + off.cr.producer;
	comp->consumer = map + off.cr.consumer;
230
	comp->flags = map + off.cr.flags;
231 232 233 234 235 236
	comp->ring = map + off.cr.desc;

	*umem_ptr = umem;
	return 0;

out_mmap:
B
Björn Töpel 已提交
237
	munmap(map, off.fr.desc + umem->config.fill_size * sizeof(__u64));
238 239 240 241 242 243 244
out_socket:
	close(umem->fd);
out_umem_alloc:
	free(umem);
	return err;
}

K
Kevin Laatz 已提交
245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267
struct xsk_umem_config_v1 {
	__u32 fill_size;
	__u32 comp_size;
	__u32 frame_size;
	__u32 frame_headroom;
};

int xsk_umem__create_v0_0_2(struct xsk_umem **umem_ptr, void *umem_area,
			    __u64 size, struct xsk_ring_prod *fill,
			    struct xsk_ring_cons *comp,
			    const struct xsk_umem_config *usr_config)
{
	struct xsk_umem_config config;

	memcpy(&config, usr_config, sizeof(struct xsk_umem_config_v1));
	config.flags = 0;

	return xsk_umem__create_v0_0_4(umem_ptr, umem_area, size, fill, comp,
					&config);
}
asm(".symver xsk_umem__create_v0_0_2, xsk_umem__create@LIBBPF_0.0.2");
asm(".symver xsk_umem__create_v0_0_4, xsk_umem__create@@LIBBPF_0.0.4");

268 269
static int xsk_load_xdp_prog(struct xsk_socket *xsk)
{
270 271
	static const int log_buf_size = 16 * 1024;
	char log_buf[log_buf_size];
272 273 274 275 276
	int err, prog_fd;

	/* This is the C-program:
	 * SEC("xdp_sock") int xdp_sock_prog(struct xdp_md *ctx)
	 * {
277
	 *     int index = ctx->rx_queue_index;
278 279 280
	 *
	 *     // A set entry here means that the correspnding queue_id
	 *     // has an active AF_XDP socket bound to it.
281
	 *     if (bpf_map_lookup_elem(&xsks_map, &index))
282 283 284 285 286 287 288 289 290 291 292 293
	 *         return bpf_redirect_map(&xsks_map, index, 0);
	 *
	 *     return XDP_PASS;
	 * }
	 */
	struct bpf_insn prog[] = {
		/* r1 = *(u32 *)(r1 + 16) */
		BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 16),
		/* *(u32 *)(r10 - 4) = r1 */
		BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_1, -4),
		BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
		BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
294
		BPF_LD_MAP_FD(BPF_REG_1, xsk->xsks_map_fd),
295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
		BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
		BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
		BPF_MOV32_IMM(BPF_REG_0, 2),
		/* if r1 == 0 goto +5 */
		BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 5),
		/* r2 = *(u32 *)(r10 - 4) */
		BPF_LD_MAP_FD(BPF_REG_1, xsk->xsks_map_fd),
		BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_10, -4),
		BPF_MOV32_IMM(BPF_REG_3, 0),
		BPF_EMIT_CALL(BPF_FUNC_redirect_map),
		/* The jumps are to this instruction */
		BPF_EXIT_INSN(),
	};
	size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);

	prog_fd = bpf_load_program(BPF_PROG_TYPE_XDP, prog, insns_cnt,
311 312
				   "LGPL-2.1 or BSD-2-Clause", 0, log_buf,
				   log_buf_size);
313
	if (prog_fd < 0) {
314
		pr_warn("BPF log buffer:\n%s", log_buf);
315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
		return prog_fd;
	}

	err = bpf_set_link_xdp_fd(xsk->ifindex, prog_fd, xsk->config.xdp_flags);
	if (err) {
		close(prog_fd);
		return err;
	}

	xsk->prog_fd = prog_fd;
	return 0;
}

static int xsk_get_max_queues(struct xsk_socket *xsk)
{
330 331
	struct ethtool_channels channels = { .cmd = ETHTOOL_GCHANNELS };
	struct ifreq ifr = {};
332 333 334 335 336 337 338
	int fd, err, ret;

	fd = socket(AF_INET, SOCK_DGRAM, 0);
	if (fd < 0)
		return -errno;

	ifr.ifr_data = (void *)&channels;
339
	memcpy(ifr.ifr_name, xsk->ifname, IFNAMSIZ - 1);
340
	ifr.ifr_name[IFNAMSIZ - 1] = '\0';
341 342 343 344 345 346
	err = ioctl(fd, SIOCETHTOOL, &ifr);
	if (err && errno != EOPNOTSUPP) {
		ret = -errno;
		goto out;
	}

347
	if (err || channels.max_combined == 0)
348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368
		/* If the device says it has no channels, then all traffic
		 * is sent to a single stream, so max queues = 1.
		 */
		ret = 1;
	else
		ret = channels.max_combined;

out:
	close(fd);
	return ret;
}

static int xsk_create_bpf_maps(struct xsk_socket *xsk)
{
	int max_queues;
	int fd;

	max_queues = xsk_get_max_queues(xsk);
	if (max_queues < 0)
		return max_queues;

369
	fd = bpf_create_map_name(BPF_MAP_TYPE_XSKMAP, "xsks_map",
370 371 372 373 374 375 376 377 378 379 380
				 sizeof(int), sizeof(int), max_queues, 0);
	if (fd < 0)
		return fd;

	xsk->xsks_map_fd = fd;

	return 0;
}

static void xsk_delete_bpf_maps(struct xsk_socket *xsk)
{
381
	bpf_map_delete_elem(xsk->xsks_map_fd, &xsk->queue_id);
382 383 384
	close(xsk->xsks_map_fd);
}

B
Björn Töpel 已提交
385
static int xsk_lookup_bpf_maps(struct xsk_socket *xsk)
386
{
B
Björn Töpel 已提交
387 388
	__u32 i, *map_ids, num_maps, prog_len = sizeof(struct bpf_prog_info);
	__u32 map_len = sizeof(struct bpf_map_info);
389 390
	struct bpf_prog_info prog_info = {};
	struct bpf_map_info map_info;
B
Björn Töpel 已提交
391
	int fd, err;
392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410

	err = bpf_obj_get_info_by_fd(xsk->prog_fd, &prog_info, &prog_len);
	if (err)
		return err;

	num_maps = prog_info.nr_map_ids;

	map_ids = calloc(prog_info.nr_map_ids, sizeof(*map_ids));
	if (!map_ids)
		return -ENOMEM;

	memset(&prog_info, 0, prog_len);
	prog_info.nr_map_ids = num_maps;
	prog_info.map_ids = (__u64)(unsigned long)map_ids;

	err = bpf_obj_get_info_by_fd(xsk->prog_fd, &prog_info, &prog_len);
	if (err)
		goto out_map_ids;

411
	xsk->xsks_map_fd = -1;
412

413
	for (i = 0; i < prog_info.nr_map_ids; i++) {
414
		fd = bpf_map_get_fd_by_id(map_ids[i]);
B
Björn Töpel 已提交
415 416
		if (fd < 0)
			continue;
417 418

		err = bpf_obj_get_info_by_fd(fd, &map_info, &map_len);
B
Björn Töpel 已提交
419 420 421 422
		if (err) {
			close(fd);
			continue;
		}
423

B
Björn Töpel 已提交
424
		if (!strcmp(map_info.name, "xsks_map")) {
425
			xsk->xsks_map_fd = fd;
B
Björn Töpel 已提交
426
			continue;
427 428
		}

B
Björn Töpel 已提交
429
		close(fd);
430 431
	}

B
Björn Töpel 已提交
432
	err = 0;
433
	if (xsk->xsks_map_fd == -1)
434 435 436 437 438 439 440
		err = -ENOENT;

out_map_ids:
	free(map_ids);
	return err;
}

B
Björn Töpel 已提交
441 442
static int xsk_set_bpf_maps(struct xsk_socket *xsk)
{
443 444
	return bpf_map_update_elem(xsk->xsks_map_fd, &xsk->queue_id,
				   &xsk->fd, 0);
B
Björn Töpel 已提交
445 446
}

447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462
static int xsk_setup_xdp_prog(struct xsk_socket *xsk)
{
	__u32 prog_id = 0;
	int err;

	err = bpf_get_link_xdp_id(xsk->ifindex, &prog_id,
				  xsk->config.xdp_flags);
	if (err)
		return err;

	if (!prog_id) {
		err = xsk_create_bpf_maps(xsk);
		if (err)
			return err;

		err = xsk_load_xdp_prog(xsk);
463 464 465 466
		if (err) {
			xsk_delete_bpf_maps(xsk);
			return err;
		}
467 468
	} else {
		xsk->prog_fd = bpf_prog_get_fd_by_id(prog_id);
B
Björn Töpel 已提交
469
		err = xsk_lookup_bpf_maps(xsk);
470 471 472 473
		if (err) {
			close(xsk->prog_fd);
			return err;
		}
474 475
	}

B
Björn Töpel 已提交
476
	err = xsk_set_bpf_maps(xsk);
477 478 479 480 481
	if (err) {
		xsk_delete_bpf_maps(xsk);
		close(xsk->prog_fd);
		return err;
	}
482 483 484 485 486 487 488 489 490

	return 0;
}

int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
		       __u32 queue_id, struct xsk_umem *umem,
		       struct xsk_ring_cons *rx, struct xsk_ring_prod *tx,
		       const struct xsk_socket_config *usr_config)
{
B
Björn Töpel 已提交
491
	void *rx_map = NULL, *tx_map = NULL;
492 493 494 495 496 497 498 499 500 501
	struct sockaddr_xdp sxdp = {};
	struct xdp_mmap_offsets off;
	struct xsk_socket *xsk;
	socklen_t optlen;
	int err;

	if (!umem || !xsk_ptr || !rx || !tx)
		return -EFAULT;

	if (umem->refcount) {
502
		pr_warn("Error: shared umems not supported by libbpf.\n");
503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527
		return -EBUSY;
	}

	xsk = calloc(1, sizeof(*xsk));
	if (!xsk)
		return -ENOMEM;

	if (umem->refcount++ > 0) {
		xsk->fd = socket(AF_XDP, SOCK_RAW, 0);
		if (xsk->fd < 0) {
			err = -errno;
			goto out_xsk_alloc;
		}
	} else {
		xsk->fd = umem->fd;
	}

	xsk->outstanding_tx = 0;
	xsk->queue_id = queue_id;
	xsk->umem = umem;
	xsk->ifindex = if_nametoindex(ifname);
	if (!xsk->ifindex) {
		err = -errno;
		goto out_socket;
	}
528
	memcpy(xsk->ifname, ifname, IFNAMSIZ - 1);
529
	xsk->ifname[IFNAMSIZ - 1] = '\0';
530

531 532 533
	err = xsk_set_xdp_socket_config(&xsk->config, usr_config);
	if (err)
		goto out_socket;
534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561

	if (rx) {
		err = setsockopt(xsk->fd, SOL_XDP, XDP_RX_RING,
				 &xsk->config.rx_size,
				 sizeof(xsk->config.rx_size));
		if (err) {
			err = -errno;
			goto out_socket;
		}
	}
	if (tx) {
		err = setsockopt(xsk->fd, SOL_XDP, XDP_TX_RING,
				 &xsk->config.tx_size,
				 sizeof(xsk->config.tx_size));
		if (err) {
			err = -errno;
			goto out_socket;
		}
	}

	optlen = sizeof(off);
	err = getsockopt(xsk->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
	if (err) {
		err = -errno;
		goto out_socket;
	}

	if (rx) {
562 563 564 565
		rx_map = mmap(NULL, off.rx.desc +
			      xsk->config.rx_size * sizeof(struct xdp_desc),
			      PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
			      xsk->fd, XDP_PGOFF_RX_RING);
B
Björn Töpel 已提交
566
		if (rx_map == MAP_FAILED) {
567 568 569 570 571 572
			err = -errno;
			goto out_socket;
		}

		rx->mask = xsk->config.rx_size - 1;
		rx->size = xsk->config.rx_size;
B
Björn Töpel 已提交
573 574
		rx->producer = rx_map + off.rx.producer;
		rx->consumer = rx_map + off.rx.consumer;
575
		rx->flags = rx_map + off.rx.flags;
B
Björn Töpel 已提交
576
		rx->ring = rx_map + off.rx.desc;
577 578 579 580
	}
	xsk->rx = rx;

	if (tx) {
581 582 583 584
		tx_map = mmap(NULL, off.tx.desc +
			      xsk->config.tx_size * sizeof(struct xdp_desc),
			      PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
			      xsk->fd, XDP_PGOFF_TX_RING);
B
Björn Töpel 已提交
585
		if (tx_map == MAP_FAILED) {
586 587 588 589 590 591
			err = -errno;
			goto out_mmap_rx;
		}

		tx->mask = xsk->config.tx_size - 1;
		tx->size = xsk->config.tx_size;
B
Björn Töpel 已提交
592 593
		tx->producer = tx_map + off.tx.producer;
		tx->consumer = tx_map + off.tx.consumer;
594
		tx->flags = tx_map + off.tx.flags;
B
Björn Töpel 已提交
595
		tx->ring = tx_map + off.tx.desc;
596 597 598 599 600 601 602 603 604 605 606 607 608 609 610
		tx->cached_cons = xsk->config.tx_size;
	}
	xsk->tx = tx;

	sxdp.sxdp_family = PF_XDP;
	sxdp.sxdp_ifindex = xsk->ifindex;
	sxdp.sxdp_queue_id = xsk->queue_id;
	sxdp.sxdp_flags = xsk->config.bind_flags;

	err = bind(xsk->fd, (struct sockaddr *)&sxdp, sizeof(sxdp));
	if (err) {
		err = -errno;
		goto out_mmap_tx;
	}

611
	xsk->prog_fd = -1;
612

613 614 615 616 617 618 619 620 621 622 623
	if (!(xsk->config.libbpf_flags & XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)) {
		err = xsk_setup_xdp_prog(xsk);
		if (err)
			goto out_mmap_tx;
	}

	*xsk_ptr = xsk;
	return 0;

out_mmap_tx:
	if (tx)
B
Björn Töpel 已提交
624
		munmap(tx_map, off.tx.desc +
625 626 627
		       xsk->config.tx_size * sizeof(struct xdp_desc));
out_mmap_rx:
	if (rx)
B
Björn Töpel 已提交
628
		munmap(rx_map, off.rx.desc +
629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652
		       xsk->config.rx_size * sizeof(struct xdp_desc));
out_socket:
	if (--umem->refcount)
		close(xsk->fd);
out_xsk_alloc:
	free(xsk);
	return err;
}

int xsk_umem__delete(struct xsk_umem *umem)
{
	struct xdp_mmap_offsets off;
	socklen_t optlen;
	int err;

	if (!umem)
		return 0;

	if (umem->refcount)
		return -EBUSY;

	optlen = sizeof(off);
	err = getsockopt(umem->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
	if (!err) {
653 654 655 656
		munmap(umem->fill->ring - off.fr.desc,
		       off.fr.desc + umem->config.fill_size * sizeof(__u64));
		munmap(umem->comp->ring - off.cr.desc,
		       off.cr.desc + umem->config.comp_size * sizeof(__u64));
657 658 659 660 661 662 663 664 665 666
	}

	close(umem->fd);
	free(umem);

	return 0;
}

void xsk_socket__delete(struct xsk_socket *xsk)
{
B
Björn Töpel 已提交
667
	size_t desc_sz = sizeof(struct xdp_desc);
668 669 670 671 672 673 674
	struct xdp_mmap_offsets off;
	socklen_t optlen;
	int err;

	if (!xsk)
		return;

675 676 677 678
	if (xsk->prog_fd != -1) {
		xsk_delete_bpf_maps(xsk);
		close(xsk->prog_fd);
	}
679 680 681 682

	optlen = sizeof(off);
	err = getsockopt(xsk->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
	if (!err) {
B
Björn Töpel 已提交
683
		if (xsk->rx) {
684 685
			munmap(xsk->rx->ring - off.rx.desc,
			       off.rx.desc + xsk->config.rx_size * desc_sz);
B
Björn Töpel 已提交
686 687
		}
		if (xsk->tx) {
688 689
			munmap(xsk->tx->ring - off.tx.desc,
			       off.tx.desc + xsk->config.tx_size * desc_sz);
B
Björn Töpel 已提交
690 691
		}

692 693 694 695 696 697 698 699 700 701
	}

	xsk->umem->refcount--;
	/* Do not close an fd that also has an associated umem connected
	 * to it.
	 */
	if (xsk->fd != xsk->umem->fd)
		close(xsk->fd);
	free(xsk);
}