test_run.c 7.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
/* Copyright (c) 2017 Facebook
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of version 2 of the GNU General Public
 * License as published by the Free Software Foundation.
 */
#include <linux/bpf.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/etherdevice.h>
#include <linux/filter.h>
#include <linux/sched/signal.h>
13 14
#include <net/sock.h>
#include <net/tcp.h>
15

16
static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx,
17
		struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
18 19 20 21 22
{
	u32 ret;

	preempt_disable();
	rcu_read_lock();
23
	bpf_cgroup_storage_set(storage);
24 25 26 27 28 29 30
	ret = BPF_PROG_RUN(prog, ctx);
	rcu_read_unlock();
	preempt_enable();

	return ret;
}

31 32
static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret,
			u32 *time)
33
{
34 35
	struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 };
	enum bpf_cgroup_storage_type stype;
36
	u64 time_start, time_spent = 0;
37
	u32 i;
38

39 40 41 42 43 44 45 46 47
	for_each_cgroup_storage_type(stype) {
		storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
		if (IS_ERR(storage[stype])) {
			storage[stype] = NULL;
			for_each_cgroup_storage_type(stype)
				bpf_cgroup_storage_free(storage[stype]);
			return -ENOMEM;
		}
	}
48

49 50 51 52
	if (!repeat)
		repeat = 1;
	time_start = ktime_get_ns();
	for (i = 0; i < repeat; i++) {
53
		*ret = bpf_test_run_one(prog, ctx, storage);
54 55 56 57 58 59 60 61 62 63 64 65
		if (need_resched()) {
			if (signal_pending(current))
				break;
			time_spent += ktime_get_ns() - time_start;
			cond_resched();
			time_start = ktime_get_ns();
		}
	}
	time_spent += ktime_get_ns() - time_start;
	do_div(time_spent, repeat);
	*time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;

66 67
	for_each_cgroup_storage_type(stype)
		bpf_cgroup_storage_free(storage[stype]);
68

69
	return 0;
70 71
}

72 73
static int bpf_test_finish(const union bpf_attr *kattr,
			   union bpf_attr __user *uattr, const void *data,
74 75
			   u32 size, u32 retval, u32 duration)
{
76
	void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
77
	int err = -EFAULT;
78
	u32 copy_size = size;
79

80 81 82 83 84 85 86 87 88 89
	/* Clamp copy if the user has provided a size hint, but copy the full
	 * buffer if not to retain old behaviour.
	 */
	if (kattr->test.data_size_out &&
	    copy_size > kattr->test.data_size_out) {
		copy_size = kattr->test.data_size_out;
		err = -ENOSPC;
	}

	if (data_out && copy_to_user(data_out, data, copy_size))
90 91 92 93 94 95 96
		goto out;
	if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
		goto out;
	if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
		goto out;
	if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
		goto out;
97 98
	if (err != -ENOSPC)
		err = 0;
99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
out:
	return err;
}

static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
			   u32 headroom, u32 tailroom)
{
	void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
	void *data;

	if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
		return ERR_PTR(-EINVAL);

	data = kzalloc(size + headroom + tailroom, GFP_USER);
	if (!data)
		return ERR_PTR(-ENOMEM);

	if (copy_from_user(data + headroom, data_in, size)) {
		kfree(data);
		return ERR_PTR(-EFAULT);
	}
	return data;
}

int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
			  union bpf_attr __user *uattr)
{
	bool is_l2 = false, is_direct_pkt_access = false;
	u32 size = kattr->test.data_size_in;
	u32 repeat = kattr->test.repeat;
	u32 retval, duration;
130
	int hh_len = ETH_HLEN;
131
	struct sk_buff *skb;
132
	struct sock *sk;
133 134 135
	void *data;
	int ret;

136
	data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
			     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
	if (IS_ERR(data))
		return PTR_ERR(data);

	switch (prog->type) {
	case BPF_PROG_TYPE_SCHED_CLS:
	case BPF_PROG_TYPE_SCHED_ACT:
		is_l2 = true;
		/* fall through */
	case BPF_PROG_TYPE_LWT_IN:
	case BPF_PROG_TYPE_LWT_OUT:
	case BPF_PROG_TYPE_LWT_XMIT:
		is_direct_pkt_access = true;
		break;
	default:
		break;
	}

155 156 157 158 159 160 161 162
	sk = kzalloc(sizeof(struct sock), GFP_USER);
	if (!sk) {
		kfree(data);
		return -ENOMEM;
	}
	sock_net_set(sk, current->nsproxy->net_ns);
	sock_init_data(NULL, sk);

163 164 165
	skb = build_skb(data, 0);
	if (!skb) {
		kfree(data);
166
		kfree(sk);
167 168
		return -ENOMEM;
	}
169
	skb->sk = sk;
170

171
	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
172 173 174 175 176
	__skb_put(skb, size);
	skb->protocol = eth_type_trans(skb, current->nsproxy->net_ns->loopback_dev);
	skb_reset_network_header(skb);

	if (is_l2)
177
		__skb_push(skb, hh_len);
178
	if (is_direct_pkt_access)
179
		bpf_compute_data_pointers(skb);
180 181 182 183 184 185
	ret = bpf_test_run(prog, skb, repeat, &retval, &duration);
	if (ret) {
		kfree_skb(skb);
		kfree(sk);
		return ret;
	}
186 187 188 189 190 191
	if (!is_l2) {
		if (skb_headroom(skb) < hh_len) {
			int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));

			if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
				kfree_skb(skb);
192
				kfree(sk);
193 194 195 196 197 198
				return -ENOMEM;
			}
		}
		memset(__skb_push(skb, hh_len), 0, hh_len);
	}

199 200 201 202
	size = skb->len;
	/* bpf program can never convert linear skb to non-linear */
	if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
		size = skb_headlen(skb);
203
	ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
204
	kfree_skb(skb);
205
	kfree(sk);
206 207 208 209 210 211 212 213
	return ret;
}

int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
			  union bpf_attr __user *uattr)
{
	u32 size = kattr->test.data_size_in;
	u32 repeat = kattr->test.repeat;
214
	struct netdev_rx_queue *rxqueue;
215 216 217 218 219
	struct xdp_buff xdp = {};
	u32 retval, duration;
	void *data;
	int ret;

220
	data = bpf_test_init(kattr, size, XDP_PACKET_HEADROOM + NET_IP_ALIGN, 0);
221 222 223 224
	if (IS_ERR(data))
		return PTR_ERR(data);

	xdp.data_hard_start = data;
225
	xdp.data = data + XDP_PACKET_HEADROOM + NET_IP_ALIGN;
226
	xdp.data_meta = xdp.data;
227 228
	xdp.data_end = xdp.data + size;

229 230 231
	rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
	xdp.rxq = &rxqueue->xdp_rxq;

232 233 234
	ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration);
	if (ret)
		goto out;
235 236
	if (xdp.data != data + XDP_PACKET_HEADROOM + NET_IP_ALIGN ||
	    xdp.data_end != xdp.data + size)
237
		size = xdp.data_end - xdp.data;
238
	ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration);
239
out:
240 241 242
	kfree(data);
	return ret;
}
243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324

int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
				     const union bpf_attr *kattr,
				     union bpf_attr __user *uattr)
{
	u32 size = kattr->test.data_size_in;
	u32 repeat = kattr->test.repeat;
	struct bpf_flow_keys flow_keys;
	u64 time_start, time_spent = 0;
	struct bpf_skb_data_end *cb;
	u32 retval, duration;
	struct sk_buff *skb;
	struct sock *sk;
	void *data;
	int ret;
	u32 i;

	if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
		return -EINVAL;

	data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
			     SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
	if (IS_ERR(data))
		return PTR_ERR(data);

	sk = kzalloc(sizeof(*sk), GFP_USER);
	if (!sk) {
		kfree(data);
		return -ENOMEM;
	}
	sock_net_set(sk, current->nsproxy->net_ns);
	sock_init_data(NULL, sk);

	skb = build_skb(data, 0);
	if (!skb) {
		kfree(data);
		kfree(sk);
		return -ENOMEM;
	}
	skb->sk = sk;

	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
	__skb_put(skb, size);
	skb->protocol = eth_type_trans(skb,
				       current->nsproxy->net_ns->loopback_dev);
	skb_reset_network_header(skb);

	cb = (struct bpf_skb_data_end *)skb->cb;
	cb->qdisc_cb.flow_keys = &flow_keys;

	if (!repeat)
		repeat = 1;

	time_start = ktime_get_ns();
	for (i = 0; i < repeat; i++) {
		preempt_disable();
		rcu_read_lock();
		retval = __skb_flow_bpf_dissect(prog, skb,
						&flow_keys_dissector,
						&flow_keys);
		rcu_read_unlock();
		preempt_enable();

		if (need_resched()) {
			if (signal_pending(current))
				break;
			time_spent += ktime_get_ns() - time_start;
			cond_resched();
			time_start = ktime_get_ns();
		}
	}
	time_spent += ktime_get_ns() - time_start;
	do_div(time_spent, repeat);
	duration = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;

	ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
			      retval, duration);

	kfree_skb(skb);
	kfree(sk);
	return ret;
}