xdp_umem.c 9.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
// SPDX-License-Identifier: GPL-2.0
/* XDP user-space packet buffer
 * Copyright(c) 2018 Intel Corporation.
 */

#include <linux/init.h>
#include <linux/sched/mm.h>
#include <linux/sched/signal.h>
#include <linux/sched/task.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/bpf.h>
#include <linux/mm.h>
14 15
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
B
Björn Töpel 已提交
16
#include <linux/idr.h>
17
#include <linux/vmalloc.h>
18 19

#include "xdp_umem.h"
20
#include "xsk_queue.h"
21

22
#define XDP_UMEM_MIN_CHUNK_SIZE 2048
23

B
Björn Töpel 已提交
24 25
static DEFINE_IDA(umem_ida);

26 27 28 29
void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
{
	unsigned long flags;

30 31 32
	if (!xs->tx)
		return;

33 34 35 36 37 38 39 40 41
	spin_lock_irqsave(&umem->xsk_list_lock, flags);
	list_add_rcu(&xs->list, &umem->xsk_list);
	spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
}

void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
{
	unsigned long flags;

42 43 44
	if (!xs->tx)
		return;

45 46 47
	spin_lock_irqsave(&umem->xsk_list_lock, flags);
	list_del_rcu(&xs->list);
	spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
48 49
}

50 51 52 53
/* The umem is stored both in the _rx struct and the _tx struct as we do
 * not know if the device has more tx queues than rx, or the opposite.
 * This might also change during run time.
 */
54 55
static int xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem,
			       u16 queue_id)
56
{
57 58 59 60 61
	if (queue_id >= max_t(unsigned int,
			      dev->real_num_rx_queues,
			      dev->real_num_tx_queues))
		return -EINVAL;

62 63 64 65
	if (queue_id < dev->real_num_rx_queues)
		dev->_rx[queue_id].umem = umem;
	if (queue_id < dev->real_num_tx_queues)
		dev->_tx[queue_id].umem = umem;
66 67

	return 0;
68
}
69

70 71
struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
				       u16 queue_id)
72 73 74 75 76
{
	if (queue_id < dev->real_num_rx_queues)
		return dev->_rx[queue_id].umem;
	if (queue_id < dev->real_num_tx_queues)
		return dev->_tx[queue_id].umem;
77

78 79
	return NULL;
}
80
EXPORT_SYMBOL(xdp_get_umem_from_qid);
81

82 83
static void xdp_clear_umem_at_qid(struct net_device *dev, u16 queue_id)
{
84
	if (queue_id < dev->real_num_rx_queues)
85
		dev->_rx[queue_id].umem = NULL;
86
	if (queue_id < dev->real_num_tx_queues)
87
		dev->_tx[queue_id].umem = NULL;
88 89
}

90
int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
91
			u16 queue_id, u16 flags)
92 93 94
{
	bool force_zc, force_copy;
	struct netdev_bpf bpf;
95
	int err = 0;
96

97 98
	ASSERT_RTNL();

99 100 101 102 103 104
	force_zc = flags & XDP_ZEROCOPY;
	force_copy = flags & XDP_COPY;

	if (force_zc && force_copy)
		return -EINVAL;

105 106
	if (xdp_get_umem_from_qid(dev, queue_id))
		return -EBUSY;
107

108 109
	err = xdp_reg_umem_at_qid(dev, umem, queue_id);
	if (err)
110
		return err;
111

112 113
	umem->dev = dev;
	umem->queue_id = queue_id;
114

115 116 117 118 119 120 121 122 123
	if (flags & XDP_USE_NEED_WAKEUP) {
		umem->flags |= XDP_UMEM_USES_NEED_WAKEUP;
		/* Tx needs to be explicitly woken up the first time.
		 * Also for supporting drivers that do not implement this
		 * feature. They will always have to call sendto().
		 */
		xsk_set_tx_need_wakeup(umem);
	}

124 125
	dev_hold(dev);

126 127
	if (force_copy)
		/* For copy-mode, we are done. */
128
		return 0;
129

130
	if (!dev->netdev_ops->ndo_bpf || !dev->netdev_ops->ndo_xsk_wakeup) {
131 132
		err = -EOPNOTSUPP;
		goto err_unreg_umem;
133
	}
134

135 136 137
	bpf.command = XDP_SETUP_XSK_UMEM;
	bpf.xsk.umem = umem;
	bpf.xsk.queue_id = queue_id;
138

139 140
	err = dev->netdev_ops->ndo_bpf(dev, &bpf);
	if (err)
141
		goto err_unreg_umem;
142

143 144
	umem->zc = true;
	return 0;
145

146 147 148
err_unreg_umem:
	if (!force_zc)
		err = 0; /* fallback to copy mode */
149 150
	if (err)
		xdp_clear_umem_at_qid(dev, queue_id);
151
	return err;
152 153
}

154
void xdp_umem_clear_dev(struct xdp_umem *umem)
155 156 157 158
{
	struct netdev_bpf bpf;
	int err;

159 160
	ASSERT_RTNL();

161 162 163
	if (!umem->dev)
		return;

164
	if (umem->zc) {
165 166 167 168 169 170 171 172
		bpf.command = XDP_SETUP_XSK_UMEM;
		bpf.xsk.umem = NULL;
		bpf.xsk.queue_id = umem->queue_id;

		err = umem->dev->netdev_ops->ndo_bpf(umem->dev, &bpf);

		if (err)
			WARN(1, "failed to disable umem!\n");
173 174
	}

175
	xdp_clear_umem_at_qid(umem->dev, umem->queue_id);
176

177 178 179
	dev_put(umem->dev);
	umem->dev = NULL;
	umem->zc = false;
180 181
}

182 183 184 185 186
static void xdp_umem_unmap_pages(struct xdp_umem *umem)
{
	unsigned int i;

	for (i = 0; i < umem->npgs; i++)
187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210
		if (PageHighMem(umem->pgs[i]))
			vunmap(umem->pages[i].addr);
}

static int xdp_umem_map_pages(struct xdp_umem *umem)
{
	unsigned int i;
	void *addr;

	for (i = 0; i < umem->npgs; i++) {
		if (PageHighMem(umem->pgs[i]))
			addr = vmap(&umem->pgs[i], 1, VM_MAP, PAGE_KERNEL);
		else
			addr = page_address(umem->pgs[i]);

		if (!addr) {
			xdp_umem_unmap_pages(umem);
			return -ENOMEM;
		}

		umem->pages[i].addr = addr;
	}

	return 0;
211 212
}

213 214
static void xdp_umem_unpin_pages(struct xdp_umem *umem)
{
215
	put_user_pages_dirty_lock(umem->pgs, umem->npgs, true);
B
Björn Töpel 已提交
216 217 218

	kfree(umem->pgs);
	umem->pgs = NULL;
219 220 221 222
}

static void xdp_umem_unaccount_pages(struct xdp_umem *umem)
{
223 224 225 226
	if (umem->user) {
		atomic_long_sub(umem->npgs, &umem->user->locked_vm);
		free_uid(umem->user);
	}
227 228 229 230
}

static void xdp_umem_release(struct xdp_umem *umem)
{
231
	rtnl_lock();
232
	xdp_umem_clear_dev(umem);
233
	rtnl_unlock();
234

B
Björn Töpel 已提交
235 236
	ida_simple_remove(&umem_ida, umem->id);

237 238 239 240 241
	if (umem->fq) {
		xskq_destroy(umem->fq);
		umem->fq = NULL;
	}

242 243 244 245 246
	if (umem->cq) {
		xskq_destroy(umem->cq);
		umem->cq = NULL;
	}

247 248
	xsk_reuseq_destroy(umem);

249
	xdp_umem_unmap_pages(umem);
B
Björn Töpel 已提交
250
	xdp_umem_unpin_pages(umem);
251

252
	kvfree(umem->pages);
B
Björn Töpel 已提交
253 254
	umem->pages = NULL;

255 256 257 258 259 260 261 262 263 264 265 266 267
	xdp_umem_unaccount_pages(umem);
	kfree(umem);
}

static void xdp_umem_release_deferred(struct work_struct *work)
{
	struct xdp_umem *umem = container_of(work, struct xdp_umem, work);

	xdp_umem_release(umem);
}

void xdp_get_umem(struct xdp_umem *umem)
{
268
	refcount_inc(&umem->users);
269 270 271 272 273 274 275
}

void xdp_put_umem(struct xdp_umem *umem)
{
	if (!umem)
		return;

276
	if (refcount_dec_and_test(&umem->users)) {
277 278 279 280 281 282 283 284 285 286 287
		INIT_WORK(&umem->work, xdp_umem_release_deferred);
		schedule_work(&umem->work);
	}
}

static int xdp_umem_pin_pages(struct xdp_umem *umem)
{
	unsigned int gup_flags = FOLL_WRITE;
	long npgs;
	int err;

288 289
	umem->pgs = kcalloc(umem->npgs, sizeof(*umem->pgs),
			    GFP_KERNEL | __GFP_NOWARN);
290 291 292
	if (!umem->pgs)
		return -ENOMEM;

293
	down_read(&current->mm->mmap_sem);
294
	npgs = pin_user_pages(umem->address, umem->npgs,
295
			      gup_flags | FOLL_LONGTERM, &umem->pgs[0], NULL);
296
	up_read(&current->mm->mmap_sem);
297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339

	if (npgs != umem->npgs) {
		if (npgs >= 0) {
			umem->npgs = npgs;
			err = -ENOMEM;
			goto out_pin;
		}
		err = npgs;
		goto out_pgs;
	}
	return 0;

out_pin:
	xdp_umem_unpin_pages(umem);
out_pgs:
	kfree(umem->pgs);
	umem->pgs = NULL;
	return err;
}

static int xdp_umem_account_pages(struct xdp_umem *umem)
{
	unsigned long lock_limit, new_npgs, old_npgs;

	if (capable(CAP_IPC_LOCK))
		return 0;

	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
	umem->user = get_uid(current_user());

	do {
		old_npgs = atomic_long_read(&umem->user->locked_vm);
		new_npgs = old_npgs + umem->npgs;
		if (new_npgs > lock_limit) {
			free_uid(umem->user);
			umem->user = NULL;
			return -ENOBUFS;
		}
	} while (atomic_long_cmpxchg(&umem->user->locked_vm, old_npgs,
				     new_npgs) != old_npgs);
	return 0;
}

B
Björn Töpel 已提交
340
static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
341
{
342
	bool unaligned_chunks = mr->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG;
343 344
	u32 chunk_size = mr->chunk_size, headroom = mr->headroom;
	unsigned int chunks, chunks_per_page;
345
	u64 addr = mr->addr, size = mr->len;
346
	int size_chk, err;
347

348
	if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
349 350 351 352 353 354 355 356 357
		/* Strictly speaking we could support this, if:
		 * - huge pages, or*
		 * - using an IOMMU, or
		 * - making sure the memory area is consecutive
		 * but for now, we simply say "computer says no".
		 */
		return -EINVAL;
	}

358 359 360 361 362
	if (mr->flags & ~(XDP_UMEM_UNALIGNED_CHUNK_FLAG |
			XDP_UMEM_USES_NEED_WAKEUP))
		return -EINVAL;

	if (!unaligned_chunks && !is_power_of_2(chunk_size))
363 364 365 366 367 368 369 370 371 372 373 374
		return -EINVAL;

	if (!PAGE_ALIGNED(addr)) {
		/* Memory area has to be page size aligned. For
		 * simplicity, this might change.
		 */
		return -EINVAL;
	}

	if ((addr + size) < addr)
		return -EINVAL;

375 376
	chunks = (unsigned int)div_u64(size, chunk_size);
	if (chunks == 0)
377 378
		return -EINVAL;

379 380 381 382 383
	if (!unaligned_chunks) {
		chunks_per_page = PAGE_SIZE / chunk_size;
		if (chunks < chunks_per_page || chunks % chunks_per_page)
			return -EINVAL;
	}
384

385
	size_chk = chunk_size - headroom - XDP_PACKET_HEADROOM;
386 387 388 389
	if (size_chk < 0)
		return -EINVAL;

	umem->address = (unsigned long)addr;
390 391
	umem->chunk_mask = unaligned_chunks ? XSK_UNALIGNED_BUF_ADDR_MASK
					    : ~((u64)chunk_size - 1);
392
	umem->size = size;
393 394
	umem->headroom = headroom;
	umem->chunk_size_nohr = chunk_size - headroom;
395 396 397
	umem->npgs = size / PAGE_SIZE;
	umem->pgs = NULL;
	umem->user = NULL;
398
	umem->flags = mr->flags;
399 400
	INIT_LIST_HEAD(&umem->xsk_list);
	spin_lock_init(&umem->xsk_list_lock);
401

402
	refcount_set(&umem->users, 1);
403 404 405

	err = xdp_umem_account_pages(umem);
	if (err)
406
		return err;
407 408 409 410

	err = xdp_umem_pin_pages(umem);
	if (err)
		goto out_account;
B
Björn Töpel 已提交
411

412 413
	umem->pages = kvcalloc(umem->npgs, sizeof(*umem->pages),
			       GFP_KERNEL_ACCOUNT);
B
Björn Töpel 已提交
414 415
	if (!umem->pages) {
		err = -ENOMEM;
416
		goto out_pin;
B
Björn Töpel 已提交
417 418
	}

419 420 421
	err = xdp_umem_map_pages(umem);
	if (!err)
		return 0;
B
Björn Töpel 已提交
422

423
	kvfree(umem->pages);
424

425 426
out_pin:
	xdp_umem_unpin_pages(umem);
427 428 429 430
out_account:
	xdp_umem_unaccount_pages(umem);
	return err;
}
431

B
Björn Töpel 已提交
432 433 434 435 436 437 438 439 440
struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr)
{
	struct xdp_umem *umem;
	int err;

	umem = kzalloc(sizeof(*umem), GFP_KERNEL);
	if (!umem)
		return ERR_PTR(-ENOMEM);

B
Björn Töpel 已提交
441 442 443 444 445 446 447
	err = ida_simple_get(&umem_ida, 0, 0, GFP_KERNEL);
	if (err < 0) {
		kfree(umem);
		return ERR_PTR(err);
	}
	umem->id = err;

B
Björn Töpel 已提交
448 449
	err = xdp_umem_reg(umem, mr);
	if (err) {
B
Björn Töpel 已提交
450
		ida_simple_remove(&umem_ida, umem->id);
B
Björn Töpel 已提交
451 452 453 454 455 456 457
		kfree(umem);
		return ERR_PTR(err);
	}

	return umem;
}

458 459
bool xdp_umem_validate_queues(struct xdp_umem *umem)
{
460
	return umem->fq && umem->cq;
461
}