xdp_umem.c 7.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
// SPDX-License-Identifier: GPL-2.0
/* XDP user-space packet buffer
 * Copyright(c) 2018 Intel Corporation.
 */

#include <linux/init.h>
#include <linux/sched/mm.h>
#include <linux/sched/signal.h>
#include <linux/sched/task.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/bpf.h>
#include <linux/mm.h>
14 15
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
16 17

#include "xdp_umem.h"
18
#include "xsk_queue.h"
19

20
#define XDP_UMEM_MIN_CHUNK_SIZE 2048
21

22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
{
	unsigned long flags;

	spin_lock_irqsave(&umem->xsk_list_lock, flags);
	list_add_rcu(&xs->list, &umem->xsk_list);
	spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
}

void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
{
	unsigned long flags;

	if (xs->dev) {
		spin_lock_irqsave(&umem->xsk_list_lock, flags);
		list_del_rcu(&xs->list);
		spin_unlock_irqrestore(&umem->xsk_list_lock, flags);

		if (umem->zc)
			synchronize_net();
	}
}

45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
int xdp_umem_query(struct net_device *dev, u16 queue_id)
{
	struct netdev_bpf bpf;

	ASSERT_RTNL();

	memset(&bpf, 0, sizeof(bpf));
	bpf.command = XDP_QUERY_XSK_UMEM;
	bpf.xsk.queue_id = queue_id;

	if (!dev->netdev_ops->ndo_bpf)
		return 0;
	return dev->netdev_ops->ndo_bpf(dev, &bpf) ?: !!bpf.xsk.umem;
}

60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
			u32 queue_id, u16 flags)
{
	bool force_zc, force_copy;
	struct netdev_bpf bpf;
	int err;

	force_zc = flags & XDP_ZEROCOPY;
	force_copy = flags & XDP_COPY;

	if (force_zc && force_copy)
		return -EINVAL;

	if (force_copy)
		return 0;

76
	if (!dev->netdev_ops->ndo_bpf || !dev->netdev_ops->ndo_xsk_async_xmit)
77
		return force_zc ? -EOPNOTSUPP : 0; /* fail or fallback */
78

79
	rtnl_lock();
80 81
	err = xdp_umem_query(dev, queue_id);
	if (err) {
82
		err = err < 0 ? -EOPNOTSUPP : -EBUSY;
83 84
		goto err_rtnl_unlock;
	}
85

86 87 88
	bpf.command = XDP_SETUP_XSK_UMEM;
	bpf.xsk.umem = umem;
	bpf.xsk.queue_id = queue_id;
89

90 91
	err = dev->netdev_ops->ndo_bpf(dev, &bpf);
	if (err)
92 93
		goto err_rtnl_unlock;
	rtnl_unlock();
94

95 96 97 98 99
	dev_hold(dev);
	umem->dev = dev;
	umem->queue_id = queue_id;
	umem->zc = true;
	return 0;
100 101 102 103

err_rtnl_unlock:
	rtnl_unlock();
	return force_zc ? err : 0; /* fail or fallback */
104 105
}

106
static void xdp_umem_clear_dev(struct xdp_umem *umem)
107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
{
	struct netdev_bpf bpf;
	int err;

	if (umem->dev) {
		bpf.command = XDP_SETUP_XSK_UMEM;
		bpf.xsk.umem = NULL;
		bpf.xsk.queue_id = umem->queue_id;

		rtnl_lock();
		err = umem->dev->netdev_ops->ndo_bpf(umem->dev, &bpf);
		rtnl_unlock();

		if (err)
			WARN(1, "failed to disable umem!\n");

		dev_put(umem->dev);
		umem->dev = NULL;
	}
}

128 129 130 131
static void xdp_umem_unpin_pages(struct xdp_umem *umem)
{
	unsigned int i;

B
Björn Töpel 已提交
132 133
	for (i = 0; i < umem->npgs; i++) {
		struct page *page = umem->pgs[i];
134

B
Björn Töpel 已提交
135 136
		set_page_dirty_lock(page);
		put_page(page);
137
	}
B
Björn Töpel 已提交
138 139 140

	kfree(umem->pgs);
	umem->pgs = NULL;
141 142 143 144
}

static void xdp_umem_unaccount_pages(struct xdp_umem *umem)
{
145 146 147 148
	if (umem->user) {
		atomic_long_sub(umem->npgs, &umem->user->locked_vm);
		free_uid(umem->user);
	}
149 150 151 152 153 154 155
}

static void xdp_umem_release(struct xdp_umem *umem)
{
	struct task_struct *task;
	struct mm_struct *mm;

156 157
	xdp_umem_clear_dev(umem);

158 159 160 161 162
	if (umem->fq) {
		xskq_destroy(umem->fq);
		umem->fq = NULL;
	}

163 164 165 166 167
	if (umem->cq) {
		xskq_destroy(umem->cq);
		umem->cq = NULL;
	}

168 169
	xsk_reuseq_destroy(umem);

B
Björn Töpel 已提交
170
	xdp_umem_unpin_pages(umem);
171

B
Björn Töpel 已提交
172 173 174 175 176 177 178 179
	task = get_pid_task(umem->pid, PIDTYPE_PID);
	put_pid(umem->pid);
	if (!task)
		goto out;
	mm = get_task_mm(task);
	put_task_struct(task);
	if (!mm)
		goto out;
180

B
Björn Töpel 已提交
181
	mmput(mm);
B
Björn Töpel 已提交
182 183 184
	kfree(umem->pages);
	umem->pages = NULL;

185 186 187 188 189 190 191 192 193 194 195 196 197 198
	xdp_umem_unaccount_pages(umem);
out:
	kfree(umem);
}

static void xdp_umem_release_deferred(struct work_struct *work)
{
	struct xdp_umem *umem = container_of(work, struct xdp_umem, work);

	xdp_umem_release(umem);
}

void xdp_get_umem(struct xdp_umem *umem)
{
199
	refcount_inc(&umem->users);
200 201 202 203 204 205 206
}

void xdp_put_umem(struct xdp_umem *umem)
{
	if (!umem)
		return;

207
	if (refcount_dec_and_test(&umem->users)) {
208 209 210 211 212 213 214 215 216 217 218
		INIT_WORK(&umem->work, xdp_umem_release_deferred);
		schedule_work(&umem->work);
	}
}

static int xdp_umem_pin_pages(struct xdp_umem *umem)
{
	unsigned int gup_flags = FOLL_WRITE;
	long npgs;
	int err;

219 220
	umem->pgs = kcalloc(umem->npgs, sizeof(*umem->pgs),
			    GFP_KERNEL | __GFP_NOWARN);
221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
	if (!umem->pgs)
		return -ENOMEM;

	down_write(&current->mm->mmap_sem);
	npgs = get_user_pages(umem->address, umem->npgs,
			      gup_flags, &umem->pgs[0], NULL);
	up_write(&current->mm->mmap_sem);

	if (npgs != umem->npgs) {
		if (npgs >= 0) {
			umem->npgs = npgs;
			err = -ENOMEM;
			goto out_pin;
		}
		err = npgs;
		goto out_pgs;
	}
	return 0;

out_pin:
	xdp_umem_unpin_pages(umem);
out_pgs:
	kfree(umem->pgs);
	umem->pgs = NULL;
	return err;
}

static int xdp_umem_account_pages(struct xdp_umem *umem)
{
	unsigned long lock_limit, new_npgs, old_npgs;

	if (capable(CAP_IPC_LOCK))
		return 0;

	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
	umem->user = get_uid(current_user());

	do {
		old_npgs = atomic_long_read(&umem->user->locked_vm);
		new_npgs = old_npgs + umem->npgs;
		if (new_npgs > lock_limit) {
			free_uid(umem->user);
			umem->user = NULL;
			return -ENOBUFS;
		}
	} while (atomic_long_cmpxchg(&umem->user->locked_vm, old_npgs,
				     new_npgs) != old_npgs);
	return 0;
}

B
Björn Töpel 已提交
271
static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
272
{
273 274
	u32 chunk_size = mr->chunk_size, headroom = mr->headroom;
	unsigned int chunks, chunks_per_page;
275
	u64 addr = mr->addr, size = mr->len;
B
Björn Töpel 已提交
276
	int size_chk, err, i;
277

278
	if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
279 280 281 282 283 284 285 286 287
		/* Strictly speaking we could support this, if:
		 * - huge pages, or*
		 * - using an IOMMU, or
		 * - making sure the memory area is consecutive
		 * but for now, we simply say "computer says no".
		 */
		return -EINVAL;
	}

288
	if (!is_power_of_2(chunk_size))
289 290 291 292 293 294 295 296 297 298 299 300
		return -EINVAL;

	if (!PAGE_ALIGNED(addr)) {
		/* Memory area has to be page size aligned. For
		 * simplicity, this might change.
		 */
		return -EINVAL;
	}

	if ((addr + size) < addr)
		return -EINVAL;

301 302
	chunks = (unsigned int)div_u64(size, chunk_size);
	if (chunks == 0)
303 304
		return -EINVAL;

305 306
	chunks_per_page = PAGE_SIZE / chunk_size;
	if (chunks < chunks_per_page || chunks % chunks_per_page)
307 308
		return -EINVAL;

309
	headroom = ALIGN(headroom, 64);
310

311
	size_chk = chunk_size - headroom - XDP_PACKET_HEADROOM;
312 313 314 315 316
	if (size_chk < 0)
		return -EINVAL;

	umem->pid = get_task_pid(current, PIDTYPE_PID);
	umem->address = (unsigned long)addr;
317 318
	umem->chunk_mask = ~((u64)chunk_size - 1);
	umem->size = size;
319 320
	umem->headroom = headroom;
	umem->chunk_size_nohr = chunk_size - headroom;
321 322 323
	umem->npgs = size / PAGE_SIZE;
	umem->pgs = NULL;
	umem->user = NULL;
324 325
	INIT_LIST_HEAD(&umem->xsk_list);
	spin_lock_init(&umem->xsk_list_lock);
326

327
	refcount_set(&umem->users, 1);
328 329 330 331 332 333 334 335

	err = xdp_umem_account_pages(umem);
	if (err)
		goto out;

	err = xdp_umem_pin_pages(umem);
	if (err)
		goto out_account;
B
Björn Töpel 已提交
336 337 338 339 340 341 342 343 344 345

	umem->pages = kcalloc(umem->npgs, sizeof(*umem->pages), GFP_KERNEL);
	if (!umem->pages) {
		err = -ENOMEM;
		goto out_account;
	}

	for (i = 0; i < umem->npgs; i++)
		umem->pages[i].addr = page_address(umem->pgs[i]);

346 347 348 349 350 351 352 353
	return 0;

out_account:
	xdp_umem_unaccount_pages(umem);
out:
	put_pid(umem->pid);
	return err;
}
354

B
Björn Töpel 已提交
355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr)
{
	struct xdp_umem *umem;
	int err;

	umem = kzalloc(sizeof(*umem), GFP_KERNEL);
	if (!umem)
		return ERR_PTR(-ENOMEM);

	err = xdp_umem_reg(umem, mr);
	if (err) {
		kfree(umem);
		return ERR_PTR(err);
	}

	return umem;
}

373 374
bool xdp_umem_validate_queues(struct xdp_umem *umem)
{
375
	return umem->fq && umem->cq;
376
}