xdp.c 11.7 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5
/* net/core/xdp.c
 *
 * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
 */
6 7
#include <linux/bpf.h>
#include <linux/filter.h>
8 9
#include <linux/types.h>
#include <linux/mm.h>
10
#include <linux/netdevice.h>
11 12 13
#include <linux/slab.h>
#include <linux/idr.h>
#include <linux/rhashtable.h>
14
#include <net/page_pool.h>
15 16

#include <net/xdp.h>
17 18
#include <net/xdp_priv.h> /* struct xdp_mem_allocator */
#include <trace/events/xdp.h>
19 20 21 22 23 24

#define REG_STATE_NEW		0x0
#define REG_STATE_REGISTERED	0x1
#define REG_STATE_UNREGISTERED	0x2
#define REG_STATE_UNUSED	0x3

25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41
static DEFINE_IDA(mem_id_pool);
static DEFINE_MUTEX(mem_id_lock);
#define MEM_ID_MAX 0xFFFE
#define MEM_ID_MIN 1
static int mem_id_next = MEM_ID_MIN;

static bool mem_id_init; /* false */
static struct rhashtable *mem_id_ht;

static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed)
{
	const u32 *k = data;
	const u32 key = *k;

	BUILD_BUG_ON(FIELD_SIZEOF(struct xdp_mem_allocator, mem.id)
		     != sizeof(u32));

42 43
	/* Use cyclic increasing ID as direct hash key */
	return key;
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
}

static int xdp_mem_id_cmp(struct rhashtable_compare_arg *arg,
			  const void *ptr)
{
	const struct xdp_mem_allocator *xa = ptr;
	u32 mem_id = *(u32 *)arg->key;

	return xa->mem.id != mem_id;
}

static const struct rhashtable_params mem_id_rht_params = {
	.nelem_hint = 64,
	.head_offset = offsetof(struct xdp_mem_allocator, node),
	.key_offset  = offsetof(struct xdp_mem_allocator, mem.id),
	.key_len = FIELD_SIZEOF(struct xdp_mem_allocator, mem.id),
	.max_size = MEM_ID_MAX,
	.min_size = 8,
	.automatic_shrinking = true,
	.hashfn    = xdp_mem_id_hashfn,
	.obj_cmpfn = xdp_mem_id_cmp,
};

static void __xdp_mem_allocator_rcu_free(struct rcu_head *rcu)
{
	struct xdp_mem_allocator *xa;

	xa = container_of(rcu, struct xdp_mem_allocator, rcu);

	/* Allow this ID to be reused */
	ida_simple_remove(&mem_id_pool, xa->mem.id);

	kfree(xa);
}

79
static void mem_xa_remove(struct xdp_mem_allocator *xa)
80
{
81
	trace_mem_disconnect(xa);
82 83 84

	mutex_lock(&mem_id_lock);

85
	if (!rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params))
86 87 88 89 90
		call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free);

	mutex_unlock(&mem_id_lock);
}

91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
static void mem_allocator_disconnect(void *allocator)
{
	struct xdp_mem_allocator *xa;
	struct rhashtable_iter iter;

	rhashtable_walk_enter(mem_id_ht, &iter);
	do {
		rhashtable_walk_start(&iter);

		while ((xa = rhashtable_walk_next(&iter)) && !IS_ERR(xa)) {
			if (xa->allocator == allocator)
				mem_xa_remove(xa);
		}

		rhashtable_walk_stop(&iter);
106

107 108 109 110 111
	} while (xa == ERR_PTR(-EAGAIN));
	rhashtable_walk_exit(&iter);
}

static void mem_id_disconnect(int id)
112
{
113
	struct xdp_mem_allocator *xa;
114

115
	mutex_lock(&mem_id_lock);
116

117 118 119 120
	xa = rhashtable_lookup_fast(mem_id_ht, &id, mem_id_rht_params);
	if (!xa) {
		mutex_unlock(&mem_id_lock);
		WARN(1, "Request remove non-existing id(%d), driver bug?", id);
121
		return;
122
	}
123

124
	trace_mem_disconnect(xa);
125

126 127
	if (!rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params))
		call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free);
128

129
	mutex_unlock(&mem_id_lock);
130 131
}

132
void xdp_rxq_info_unreg_mem_model(struct xdp_rxq_info *xdp_rxq)
133 134 135 136
{
	struct xdp_mem_allocator *xa;
	int id = xdp_rxq->mem.id;

137 138 139 140 141
	if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
		WARN(1, "Missing register, driver bug");
		return;
	}

142 143 144
	if (id == 0)
		return;

145 146
	if (xdp_rxq->mem.type == MEM_TYPE_ZERO_COPY)
		return mem_id_disconnect(id);
147

148 149 150 151 152
	if (xdp_rxq->mem.type == MEM_TYPE_PAGE_POOL) {
		rcu_read_lock();
		xa = rhashtable_lookup(mem_id_ht, &id, mem_id_rht_params);
		page_pool_destroy(xa->page_pool);
		rcu_read_unlock();
153
	}
154
}
155
EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg_mem_model);
156

157 158 159 160 161 162 163 164
void xdp_rxq_info_unreg(struct xdp_rxq_info *xdp_rxq)
{
	/* Simplify driver cleanup code paths, allow unreg "unused" */
	if (xdp_rxq->reg_state == REG_STATE_UNUSED)
		return;

	WARN(!(xdp_rxq->reg_state == REG_STATE_REGISTERED), "Driver BUG");

165
	xdp_rxq_info_unreg_mem_model(xdp_rxq);
166

167 168
	xdp_rxq->reg_state = REG_STATE_UNREGISTERED;
	xdp_rxq->dev = NULL;
169 170 171 172

	/* Reset mem info to defaults */
	xdp_rxq->mem.id = 0;
	xdp_rxq->mem.type = 0;
173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214
}
EXPORT_SYMBOL_GPL(xdp_rxq_info_unreg);

static void xdp_rxq_info_init(struct xdp_rxq_info *xdp_rxq)
{
	memset(xdp_rxq, 0, sizeof(*xdp_rxq));
}

/* Returns 0 on success, negative on failure */
int xdp_rxq_info_reg(struct xdp_rxq_info *xdp_rxq,
		     struct net_device *dev, u32 queue_index)
{
	if (xdp_rxq->reg_state == REG_STATE_UNUSED) {
		WARN(1, "Driver promised not to register this");
		return -EINVAL;
	}

	if (xdp_rxq->reg_state == REG_STATE_REGISTERED) {
		WARN(1, "Missing unregister, handled but fix driver");
		xdp_rxq_info_unreg(xdp_rxq);
	}

	if (!dev) {
		WARN(1, "Missing net_device from driver");
		return -ENODEV;
	}

	/* State either UNREGISTERED or NEW */
	xdp_rxq_info_init(xdp_rxq);
	xdp_rxq->dev = dev;
	xdp_rxq->queue_index = queue_index;

	xdp_rxq->reg_state = REG_STATE_REGISTERED;
	return 0;
}
EXPORT_SYMBOL_GPL(xdp_rxq_info_reg);

void xdp_rxq_info_unused(struct xdp_rxq_info *xdp_rxq)
{
	xdp_rxq->reg_state = REG_STATE_UNUSED;
}
EXPORT_SYMBOL_GPL(xdp_rxq_info_unused);
215 216 217 218 219 220

bool xdp_rxq_info_is_reg(struct xdp_rxq_info *xdp_rxq)
{
	return (xdp_rxq->reg_state == REG_STATE_REGISTERED);
}
EXPORT_SYMBOL_GPL(xdp_rxq_info_is_reg);
221

222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
static int __mem_id_init_hash_table(void)
{
	struct rhashtable *rht;
	int ret;

	if (unlikely(mem_id_init))
		return 0;

	rht = kzalloc(sizeof(*rht), GFP_KERNEL);
	if (!rht)
		return -ENOMEM;

	ret = rhashtable_init(rht, &mem_id_rht_params);
	if (ret < 0) {
		kfree(rht);
		return ret;
	}
	mem_id_ht = rht;
	smp_mb(); /* mutex lock should provide enough pairing */
	mem_id_init = true;

	return 0;
}

/* Allocate a cyclic ID that maps to allocator pointer.
 * See: https://www.kernel.org/doc/html/latest/core-api/idr.html
 *
 * Caller must lock mem_id_lock.
 */
static int __mem_id_cyclic_get(gfp_t gfp)
{
	int retries = 1;
	int id;

again:
	id = ida_simple_get(&mem_id_pool, mem_id_next, MEM_ID_MAX, gfp);
	if (id < 0) {
		if (id == -ENOSPC) {
			/* Cyclic allocator, reset next id */
			if (retries--) {
				mem_id_next = MEM_ID_MIN;
				goto again;
			}
		}
		return id; /* errno */
	}
	mem_id_next = id + 1;

	return id;
}

273 274 275 276 277 278 279 280 281 282 283
static bool __is_supported_mem_type(enum xdp_mem_type type)
{
	if (type == MEM_TYPE_PAGE_POOL)
		return is_page_pool_compiled_in();

	if (type >= MEM_TYPE_MAX)
		return false;

	return true;
}

284 285 286
int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
			       enum xdp_mem_type type, void *allocator)
{
287 288 289 290 291 292 293 294 295 296
	struct xdp_mem_allocator *xdp_alloc;
	gfp_t gfp = GFP_KERNEL;
	int id, errno, ret;
	void *ptr;

	if (xdp_rxq->reg_state != REG_STATE_REGISTERED) {
		WARN(1, "Missing register, driver bug");
		return -EFAULT;
	}

297 298
	if (!__is_supported_mem_type(type))
		return -EOPNOTSUPP;
299 300 301

	xdp_rxq->mem.type = type;

302
	if (!allocator) {
B
Björn Töpel 已提交
303
		if (type == MEM_TYPE_PAGE_POOL || type == MEM_TYPE_ZERO_COPY)
304
			return -EINVAL; /* Setup time check page_pool req */
305
		return 0;
306
	}
307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335

	/* Delay init of rhashtable to save memory if feature isn't used */
	if (!mem_id_init) {
		mutex_lock(&mem_id_lock);
		ret = __mem_id_init_hash_table();
		mutex_unlock(&mem_id_lock);
		if (ret < 0) {
			WARN_ON(1);
			return ret;
		}
	}

	xdp_alloc = kzalloc(sizeof(*xdp_alloc), gfp);
	if (!xdp_alloc)
		return -ENOMEM;

	mutex_lock(&mem_id_lock);
	id = __mem_id_cyclic_get(gfp);
	if (id < 0) {
		errno = id;
		goto err;
	}
	xdp_rxq->mem.id = id;
	xdp_alloc->mem  = xdp_rxq->mem;
	xdp_alloc->allocator = allocator;

	/* Insert allocator into ID lookup table */
	ptr = rhashtable_insert_slow(mem_id_ht, &id, &xdp_alloc->node);
	if (IS_ERR(ptr)) {
336 337
		ida_simple_remove(&mem_id_pool, xdp_rxq->mem.id);
		xdp_rxq->mem.id = 0;
338 339 340 341
		errno = PTR_ERR(ptr);
		goto err;
	}

342
	if (type == MEM_TYPE_PAGE_POOL)
343
		page_pool_use_xdp_mem(allocator, mem_allocator_disconnect);
344

345
	mutex_unlock(&mem_id_lock);
346

347
	trace_mem_connect(xdp_alloc, xdp_rxq);
348
	return 0;
349 350 351 352
err:
	mutex_unlock(&mem_id_lock);
	kfree(xdp_alloc);
	return errno;
353 354
}
EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
355

356 357
/* XDP RX runs under NAPI protection, and in different delivery error
 * scenarios (e.g. queue full), it is possible to return the xdp_frame
358
 * while still leveraging this protection.  The @napi_direct boolean
359 360 361
 * is used for those calls sites.  Thus, allowing for faster recycling
 * of xdp_frames/pages in those cases.
 */
B
Björn Töpel 已提交
362 363
static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
			 unsigned long handle)
364
{
365 366 367 368 369 370 371 372 373
	struct xdp_mem_allocator *xa;
	struct page *page;

	switch (mem->type) {
	case MEM_TYPE_PAGE_POOL:
		rcu_read_lock();
		/* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */
		xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
		page = virt_to_head_page(data);
374 375
		napi_direct &= !xdp_return_frame_no_direct();
		page_pool_put_page(xa->page_pool, page, napi_direct);
376 377 378
		rcu_read_unlock();
		break;
	case MEM_TYPE_PAGE_SHARED:
379
		page_frag_free(data);
380 381 382
		break;
	case MEM_TYPE_PAGE_ORDER0:
		page = virt_to_page(data); /* Assumes order0 page*/
383
		put_page(page);
384
		break;
B
Björn Töpel 已提交
385 386 387 388 389
	case MEM_TYPE_ZERO_COPY:
		/* NB! Only valid from an xdp_buff! */
		rcu_read_lock();
		/* mem->id is valid, checked in xdp_rxq_info_reg_mem_model() */
		xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
390
		xa->zc_alloc->free(xa->zc_alloc, handle);
B
Björn Töpel 已提交
391
		rcu_read_unlock();
392 393 394
	default:
		/* Not possible, checked in xdp_rxq_info_reg_mem_model() */
		break;
395 396
	}
}
397 398 399

void xdp_return_frame(struct xdp_frame *xdpf)
{
B
Björn Töpel 已提交
400
	__xdp_return(xdpf->data, &xdpf->mem, false, 0);
401
}
402
EXPORT_SYMBOL_GPL(xdp_return_frame);
403

404 405
void xdp_return_frame_rx_napi(struct xdp_frame *xdpf)
{
B
Björn Töpel 已提交
406
	__xdp_return(xdpf->data, &xdpf->mem, true, 0);
407 408 409
}
EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);

410 411
void xdp_return_buff(struct xdp_buff *xdp)
{
B
Björn Töpel 已提交
412
	__xdp_return(xdp->data, &xdp->rxq->mem, true, xdp->handle);
413 414
}
EXPORT_SYMBOL_GPL(xdp_return_buff);
415

416 417 418 419 420 421 422 423 424 425 426 427 428 429 430
/* Only called for MEM_TYPE_PAGE_POOL see xdp.h */
void __xdp_release_frame(void *data, struct xdp_mem_info *mem)
{
	struct xdp_mem_allocator *xa;
	struct page *page;

	rcu_read_lock();
	xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
	page = virt_to_head_page(data);
	if (xa)
		page_pool_release_page(xa->page_pool, page);
	rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(__xdp_release_frame);

431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460
int xdp_attachment_query(struct xdp_attachment_info *info,
			 struct netdev_bpf *bpf)
{
	bpf->prog_id = info->prog ? info->prog->aux->id : 0;
	bpf->prog_flags = info->prog ? info->flags : 0;
	return 0;
}
EXPORT_SYMBOL_GPL(xdp_attachment_query);

bool xdp_attachment_flags_ok(struct xdp_attachment_info *info,
			     struct netdev_bpf *bpf)
{
	if (info->prog && (bpf->flags ^ info->flags) & XDP_FLAGS_MODES) {
		NL_SET_ERR_MSG(bpf->extack,
			       "program loaded with different flags");
		return false;
	}
	return true;
}
EXPORT_SYMBOL_GPL(xdp_attachment_flags_ok);

void xdp_attachment_setup(struct xdp_attachment_info *info,
			  struct netdev_bpf *bpf)
{
	if (info->prog)
		bpf_prog_put(info->prog);
	info->prog = bpf->prog;
	info->flags = bpf->flags;
}
EXPORT_SYMBOL_GPL(xdp_attachment_setup);
461 462 463

struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp)
{
464
	unsigned int metasize, totsize;
465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498
	void *addr, *data_to_copy;
	struct xdp_frame *xdpf;
	struct page *page;

	/* Clone into a MEM_TYPE_PAGE_ORDER0 xdp_frame. */
	metasize = xdp_data_meta_unsupported(xdp) ? 0 :
		   xdp->data - xdp->data_meta;
	totsize = xdp->data_end - xdp->data + metasize;

	if (sizeof(*xdpf) + totsize > PAGE_SIZE)
		return NULL;

	page = dev_alloc_page();
	if (!page)
		return NULL;

	addr = page_to_virt(page);
	xdpf = addr;
	memset(xdpf, 0, sizeof(*xdpf));

	addr += sizeof(*xdpf);
	data_to_copy = metasize ? xdp->data_meta : xdp->data;
	memcpy(addr, data_to_copy, totsize);

	xdpf->data = addr + metasize;
	xdpf->len = totsize - metasize;
	xdpf->headroom = 0;
	xdpf->metasize = metasize;
	xdpf->mem.type = MEM_TYPE_PAGE_ORDER0;

	xdp_return_buff(xdp);
	return xdpf;
}
EXPORT_SYMBOL_GPL(xdp_convert_zc_to_xdp_frame);