test_run.c 13.6 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
2 3 4 5 6 7 8 9
/* Copyright (c) 2017 Facebook
 */
#include <linux/bpf.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/etherdevice.h>
#include <linux/filter.h>
#include <linux/sched/signal.h>
10
#include <net/bpf_sk_storage.h>
11 12
#include <net/sock.h>
#include <net/tcp.h>
13

14 15 16
#define CREATE_TRACE_POINTS
#include <trace/events/bpf_test_run.h>

17
static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
18
			u32 *retval, u32 *time, bool xdp)
19
{
20
	struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { NULL };
21
	enum bpf_cgroup_storage_type stype;
22
	u64 time_start, time_spent = 0;
23
	int ret = 0;
24
	u32 i;
25

26 27 28 29 30 31 32 33 34
	for_each_cgroup_storage_type(stype) {
		storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
		if (IS_ERR(storage[stype])) {
			storage[stype] = NULL;
			for_each_cgroup_storage_type(stype)
				bpf_cgroup_storage_free(storage[stype]);
			return -ENOMEM;
		}
	}
35

36 37
	if (!repeat)
		repeat = 1;
38 39

	rcu_read_lock();
40
	migrate_disable();
41 42
	time_start = ktime_get_ns();
	for (i = 0; i < repeat; i++) {
43
		bpf_cgroup_storage_set(storage);
44 45 46 47 48

		if (xdp)
			*retval = bpf_prog_run_xdp(prog, ctx);
		else
			*retval = BPF_PROG_RUN(prog, ctx);
49 50 51 52 53 54

		if (signal_pending(current)) {
			ret = -EINTR;
			break;
		}

55 56
		if (need_resched()) {
			time_spent += ktime_get_ns() - time_start;
57
			migrate_enable();
58 59
			rcu_read_unlock();

60
			cond_resched();
61 62

			rcu_read_lock();
63
			migrate_disable();
64 65 66 67
			time_start = ktime_get_ns();
		}
	}
	time_spent += ktime_get_ns() - time_start;
68
	migrate_enable();
69 70
	rcu_read_unlock();

71 72 73
	do_div(time_spent, repeat);
	*time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;

74 75
	for_each_cgroup_storage_type(stype)
		bpf_cgroup_storage_free(storage[stype]);
76

77
	return ret;
78 79
}

80 81
static int bpf_test_finish(const union bpf_attr *kattr,
			   union bpf_attr __user *uattr, const void *data,
82 83
			   u32 size, u32 retval, u32 duration)
{
84
	void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
85
	int err = -EFAULT;
86
	u32 copy_size = size;
87

88 89 90 91 92 93 94 95 96 97
	/* Clamp copy if the user has provided a size hint, but copy the full
	 * buffer if not to retain old behaviour.
	 */
	if (kattr->test.data_size_out &&
	    copy_size > kattr->test.data_size_out) {
		copy_size = kattr->test.data_size_out;
		err = -ENOSPC;
	}

	if (data_out && copy_to_user(data_out, data, copy_size))
98 99 100 101 102 103 104
		goto out;
	if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
		goto out;
	if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
		goto out;
	if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
		goto out;
105 106
	if (err != -ENOSPC)
		err = 0;
107
out:
108
	trace_bpf_test_finish(&err);
109 110 111
	return err;
}

112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
/* Integer types of various sizes and pointer combinations cover variety of
 * architecture dependent calling conventions. 7+ can be supported in the
 * future.
 */
int noinline bpf_fentry_test1(int a)
{
	return a + 1;
}

int noinline bpf_fentry_test2(int a, u64 b)
{
	return a + b;
}

int noinline bpf_fentry_test3(char a, int b, u64 c)
{
	return a + b + c;
}

int noinline bpf_fentry_test4(void *a, char b, int c, u64 d)
{
	return (long)a + b + c + d;
}

int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e)
{
	return a + (long)b + c + d + e;
}

int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
{
	return a + (long)b + c + d + (long)e + f;
}

146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162
static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
			   u32 headroom, u32 tailroom)
{
	void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
	void *data;

	if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
		return ERR_PTR(-EINVAL);

	data = kzalloc(size + headroom + tailroom, GFP_USER);
	if (!data)
		return ERR_PTR(-ENOMEM);

	if (copy_from_user(data + headroom, data_in, size)) {
		kfree(data);
		return ERR_PTR(-EFAULT);
	}
163

164 165 166
	return data;
}

167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
int bpf_prog_test_run_tracing(struct bpf_prog *prog,
			      const union bpf_attr *kattr,
			      union bpf_attr __user *uattr)
{
	int err = -EFAULT;

	switch (prog->expected_attach_type) {
	case BPF_TRACE_FENTRY:
	case BPF_TRACE_FEXIT:
		if (bpf_fentry_test1(1) != 2 ||
		    bpf_fentry_test2(2, 3) != 5 ||
		    bpf_fentry_test3(4, 5, 6) != 15 ||
		    bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
		    bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
		    bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111)
			goto out;
		break;
	default:
		goto out;
	}

	err = 0;
out:
	trace_bpf_test_finish(&err);
	return err;
}

194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
{
	void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
	void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
	u32 size = kattr->test.ctx_size_in;
	void *data;
	int err;

	if (!data_in && !data_out)
		return NULL;

	data = kzalloc(max_size, GFP_USER);
	if (!data)
		return ERR_PTR(-ENOMEM);

	if (data_in) {
		err = bpf_check_uarg_tail_zero(data_in, max_size, size);
		if (err) {
			kfree(data);
			return ERR_PTR(err);
		}

		size = min_t(u32, max_size, size);
		if (copy_from_user(data, data_in, size)) {
			kfree(data);
			return ERR_PTR(-EFAULT);
		}
	}
	return data;
}

static int bpf_ctx_finish(const union bpf_attr *kattr,
			  union bpf_attr __user *uattr, const void *data,
			  u32 size)
{
	void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
	int err = -EFAULT;
	u32 copy_size = size;

	if (!data || !data_out)
		return 0;

	if (copy_size > kattr->test.ctx_size_out) {
		copy_size = kattr->test.ctx_size_out;
		err = -ENOSPC;
	}

	if (copy_to_user(data_out, data, copy_size))
		goto out;
	if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size)))
		goto out;
	if (err != -ENOSPC)
		err = 0;
out:
	return err;
}

/**
 * range_is_zero - test whether buffer is initialized
 * @buf: buffer to check
 * @from: check from this position
 * @to: check up until (excluding) this position
 *
 * This function returns true if the there is a non-zero byte
 * in the buf in the range [from,to).
 */
static inline bool range_is_zero(void *buf, size_t from, size_t to)
{
	return !memchr_inv((u8 *)buf + from, 0, to - from);
}

static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
{
	struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;

	if (!__skb)
		return 0;

	/* make sure the fields we don't use are zeroed */
273 274 275 276 277 278 279
	if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, mark)))
		return -EINVAL;

	/* mark is allowed */

	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, mark),
			   offsetof(struct __sk_buff, priority)))
280 281 282 283
		return -EINVAL;

	/* priority is allowed */

284
	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, priority),
285 286 287 288 289
			   offsetof(struct __sk_buff, cb)))
		return -EINVAL;

	/* cb is allowed */

290
	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb),
291 292 293 294
			   offsetof(struct __sk_buff, tstamp)))
		return -EINVAL;

	/* tstamp is allowed */
295 296
	/* wire_len is allowed */
	/* gso_segs is allowed */
297

298
	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs),
299 300 301 302 303 304
			   offsetof(struct __sk_buff, gso_size)))
		return -EINVAL;

	/* gso_size is allowed */

	if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size),
305 306 307
			   sizeof(struct __sk_buff)))
		return -EINVAL;

308
	skb->mark = __skb->mark;
309
	skb->priority = __skb->priority;
310
	skb->tstamp = __skb->tstamp;
311 312
	memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);

313 314 315 316 317 318 319 320 321 322 323 324
	if (__skb->wire_len == 0) {
		cb->pkt_len = skb->len;
	} else {
		if (__skb->wire_len < skb->len ||
		    __skb->wire_len > GSO_MAX_SIZE)
			return -EINVAL;
		cb->pkt_len = __skb->wire_len;
	}

	if (__skb->gso_segs > GSO_MAX_SEGS)
		return -EINVAL;
	skb_shinfo(skb)->gso_segs = __skb->gso_segs;
325
	skb_shinfo(skb)->gso_size = __skb->gso_size;
326

327 328 329 330 331 332 333 334 335 336
	return 0;
}

static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
{
	struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;

	if (!__skb)
		return;

337
	__skb->mark = skb->mark;
338
	__skb->priority = skb->priority;
339
	__skb->tstamp = skb->tstamp;
340
	memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
341 342
	__skb->wire_len = cb->pkt_len;
	__skb->gso_segs = skb_shinfo(skb)->gso_segs;
343 344
}

345 346 347 348 349 350
int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
			  union bpf_attr __user *uattr)
{
	bool is_l2 = false, is_direct_pkt_access = false;
	u32 size = kattr->test.data_size_in;
	u32 repeat = kattr->test.repeat;
351
	struct __sk_buff *ctx = NULL;
352
	u32 retval, duration;
353
	int hh_len = ETH_HLEN;
354
	struct sk_buff *skb;
355
	struct sock *sk;
356 357 358
	void *data;
	int ret;

359
	data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
360 361 362 363
			     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
	if (IS_ERR(data))
		return PTR_ERR(data);

364 365 366 367 368 369
	ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff));
	if (IS_ERR(ctx)) {
		kfree(data);
		return PTR_ERR(ctx);
	}

370 371 372 373 374 375 376 377 378 379 380 381 382 383
	switch (prog->type) {
	case BPF_PROG_TYPE_SCHED_CLS:
	case BPF_PROG_TYPE_SCHED_ACT:
		is_l2 = true;
		/* fall through */
	case BPF_PROG_TYPE_LWT_IN:
	case BPF_PROG_TYPE_LWT_OUT:
	case BPF_PROG_TYPE_LWT_XMIT:
		is_direct_pkt_access = true;
		break;
	default:
		break;
	}

384 385 386
	sk = kzalloc(sizeof(struct sock), GFP_USER);
	if (!sk) {
		kfree(data);
387
		kfree(ctx);
388 389 390 391 392
		return -ENOMEM;
	}
	sock_net_set(sk, current->nsproxy->net_ns);
	sock_init_data(NULL, sk);

393 394 395
	skb = build_skb(data, 0);
	if (!skb) {
		kfree(data);
396
		kfree(ctx);
397
		kfree(sk);
398 399
		return -ENOMEM;
	}
400
	skb->sk = sk;
401

402
	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
403 404 405 406 407
	__skb_put(skb, size);
	skb->protocol = eth_type_trans(skb, current->nsproxy->net_ns->loopback_dev);
	skb_reset_network_header(skb);

	if (is_l2)
408
		__skb_push(skb, hh_len);
409
	if (is_direct_pkt_access)
410
		bpf_compute_data_pointers(skb);
411 412 413
	ret = convert___skb_to_skb(skb, ctx);
	if (ret)
		goto out;
414
	ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false);
415 416
	if (ret)
		goto out;
417 418 419 420 421
	if (!is_l2) {
		if (skb_headroom(skb) < hh_len) {
			int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));

			if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
422 423
				ret = -ENOMEM;
				goto out;
424 425 426 427
			}
		}
		memset(__skb_push(skb, hh_len), 0, hh_len);
	}
428
	convert_skb_to___skb(skb, ctx);
429

430 431 432 433
	size = skb->len;
	/* bpf program can never convert linear skb to non-linear */
	if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
		size = skb_headlen(skb);
434
	ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
435 436 437 438
	if (!ret)
		ret = bpf_ctx_finish(kattr, uattr, ctx,
				     sizeof(struct __sk_buff));
out:
439
	kfree_skb(skb);
440
	bpf_sk_storage_free(sk);
441
	kfree(sk);
442
	kfree(ctx);
443 444 445 446 447 448 449 450
	return ret;
}

int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
			  union bpf_attr __user *uattr)
{
	u32 size = kattr->test.data_size_in;
	u32 repeat = kattr->test.repeat;
451
	struct netdev_rx_queue *rxqueue;
452 453 454 455 456
	struct xdp_buff xdp = {};
	u32 retval, duration;
	void *data;
	int ret;

457 458 459
	if (kattr->test.ctx_in || kattr->test.ctx_out)
		return -EINVAL;

460
	data = bpf_test_init(kattr, size, XDP_PACKET_HEADROOM + NET_IP_ALIGN, 0);
461 462 463 464
	if (IS_ERR(data))
		return PTR_ERR(data);

	xdp.data_hard_start = data;
465
	xdp.data = data + XDP_PACKET_HEADROOM + NET_IP_ALIGN;
466
	xdp.data_meta = xdp.data;
467 468
	xdp.data_end = xdp.data + size;

469 470
	rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
	xdp.rxq = &rxqueue->xdp_rxq;
471 472
	bpf_prog_change_xdp(NULL, prog);
	ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
473 474
	if (ret)
		goto out;
475 476
	if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN ||
	    xdp.data_end != xdp.data + size)
477
		size = xdp.data_end - xdp.data;
478
	ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
479
out:
480
	bpf_prog_change_xdp(prog, NULL);
481 482 483
	kfree(data);
	return ret;
}
484

485 486 487 488 489 490 491 492
static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx)
{
	/* make sure the fields we don't use are zeroed */
	if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags)))
		return -EINVAL;

	/* flags is allowed */

493
	if (!range_is_zero(ctx, offsetofend(struct bpf_flow_keys, flags),
494 495 496 497 498 499
			   sizeof(struct bpf_flow_keys)))
		return -EINVAL;

	return 0;
}

500 501 502 503 504
int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
				     const union bpf_attr *kattr,
				     union bpf_attr __user *uattr)
{
	u32 size = kattr->test.data_size_in;
505
	struct bpf_flow_dissector ctx = {};
506
	u32 repeat = kattr->test.repeat;
507
	struct bpf_flow_keys *user_ctx;
508 509
	struct bpf_flow_keys flow_keys;
	u64 time_start, time_spent = 0;
510
	const struct ethhdr *eth;
511
	unsigned int flags = 0;
512 513 514 515 516 517 518 519
	u32 retval, duration;
	void *data;
	int ret;
	u32 i;

	if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
		return -EINVAL;

520 521 522 523
	if (size < ETH_HLEN)
		return -EINVAL;

	data = bpf_test_init(kattr, size, 0, 0);
524 525 526
	if (IS_ERR(data))
		return PTR_ERR(data);

527
	eth = (struct ethhdr *)data;
528 529 530 531

	if (!repeat)
		repeat = 1;

532 533 534 535 536 537 538 539 540 541 542 543
	user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys));
	if (IS_ERR(user_ctx)) {
		kfree(data);
		return PTR_ERR(user_ctx);
	}
	if (user_ctx) {
		ret = verify_user_bpf_flow_keys(user_ctx);
		if (ret)
			goto out;
		flags = user_ctx->flags;
	}

544 545 546 547
	ctx.flow_keys = &flow_keys;
	ctx.data = data;
	ctx.data_end = (__u8 *)data + size;

548 549
	rcu_read_lock();
	preempt_disable();
550 551
	time_start = ktime_get_ns();
	for (i = 0; i < repeat; i++) {
552
		retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
553
					  size, flags);
554

555 556 557 558 559 560 561
		if (signal_pending(current)) {
			preempt_enable();
			rcu_read_unlock();

			ret = -EINTR;
			goto out;
		}
562 563 564

		if (need_resched()) {
			time_spent += ktime_get_ns() - time_start;
565 566 567
			preempt_enable();
			rcu_read_unlock();

568
			cond_resched();
569 570 571

			rcu_read_lock();
			preempt_disable();
572 573 574 575
			time_start = ktime_get_ns();
		}
	}
	time_spent += ktime_get_ns() - time_start;
576 577 578
	preempt_enable();
	rcu_read_unlock();

579 580 581 582 583
	do_div(time_spent, repeat);
	duration = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;

	ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
			      retval, duration);
584 585 586
	if (!ret)
		ret = bpf_ctx_finish(kattr, uattr, user_ctx,
				     sizeof(struct bpf_flow_keys));
587

588
out:
589
	kfree(user_ctx);
590
	kfree(data);
591 592
	return ret;
}