bpf-cgroup.h 18.2 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2 3 4
#ifndef _BPF_CGROUP_H
#define _BPF_CGROUP_H

5
#include <linux/bpf.h>
6
#include <linux/errno.h>
7
#include <linux/jump_label.h>
8
#include <linux/percpu.h>
9
#include <linux/percpu-refcount.h>
10
#include <linux/rbtree.h>
11 12 13
#include <uapi/linux/bpf.h>

struct sock;
A
Andrey Ignatov 已提交
14
struct sockaddr;
15 16
struct cgroup;
struct sk_buff;
17 18
struct bpf_map;
struct bpf_prog;
L
Lawrence Brakmo 已提交
19
struct bpf_sock_ops_kern;
20
struct bpf_cgroup_storage;
A
Andrey Ignatov 已提交
21 22
struct ctl_table;
struct ctl_table_header;
23
struct task_struct;
24 25 26

#ifdef CONFIG_CGROUP_BPF

27 28
extern struct static_key_false cgroup_bpf_enabled_key[MAX_BPF_ATTACH_TYPE];
#define cgroup_bpf_enabled(type) static_branch_unlikely(&cgroup_bpf_enabled_key[type])
29

30 31 32 33 34 35 36 37 38 39 40 41
#define BPF_CGROUP_STORAGE_NEST_MAX	8

struct bpf_cgroup_storage_info {
	struct task_struct *task;
	struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
};

/* For each cpu, permit maximum BPF_CGROUP_STORAGE_NEST_MAX number of tasks
 * to use bpf cgroup storage simultaneously.
 */
DECLARE_PER_CPU(struct bpf_cgroup_storage_info,
		bpf_cgroup_storage_info[BPF_CGROUP_STORAGE_NEST_MAX]);
42 43 44

#define for_each_cgroup_storage_type(stype) \
	for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
45

46 47 48 49
struct bpf_cgroup_storage_map;

struct bpf_storage_buffer {
	struct rcu_head rcu;
50
	char data[];
51 52 53
};

struct bpf_cgroup_storage {
54 55 56 57
	union {
		struct bpf_storage_buffer *buf;
		void __percpu *percpu_buf;
	};
58 59
	struct bpf_cgroup_storage_map *map;
	struct bpf_cgroup_storage_key key;
60 61
	struct list_head list_map;
	struct list_head list_cg;
62 63 64 65
	struct rb_node node;
	struct rcu_head rcu;
};

66 67 68 69 70 71
struct bpf_cgroup_link {
	struct bpf_link link;
	struct cgroup *cgroup;
	enum bpf_attach_type type;
};

72 73 74
struct bpf_prog_list {
	struct list_head node;
	struct bpf_prog *prog;
75
	struct bpf_cgroup_link *link;
76
	struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
77 78 79 80
};

struct bpf_prog_array;

81
struct cgroup_bpf {
82 83 84 85 86 87 88
	/* array of effective progs in this cgroup */
	struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE];

	/* attached progs to this cgroup and attach flags
	 * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
	 * have either zero or one element
	 * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
89
	 */
90 91 92
	struct list_head progs[MAX_BPF_ATTACH_TYPE];
	u32 flags[MAX_BPF_ATTACH_TYPE];

93 94 95
	/* list of cgroup shared storages */
	struct list_head storages;

96
	/* temp storage for effective prog array used by prog_attach/detach */
97
	struct bpf_prog_array *inactive;
98 99 100 101 102 103

	/* reference counter used to detach bpf programs after cgroup removal */
	struct percpu_ref refcnt;

	/* cgroup_bpf is released using a work queue */
	struct work_struct release_work;
104 105
};

106
int cgroup_bpf_inherit(struct cgroup *cgrp);
107
void cgroup_bpf_offline(struct cgroup *cgrp);
108

109 110 111
int __cgroup_bpf_attach(struct cgroup *cgrp,
			struct bpf_prog *prog, struct bpf_prog *replace_prog,
			struct bpf_cgroup_link *link,
112 113
			enum bpf_attach_type type, u32 flags);
int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
114
			struct bpf_cgroup_link *link,
115
			enum bpf_attach_type type);
116 117
int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
		       union bpf_attr __user *uattr);
118

119
/* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
120 121 122
int cgroup_bpf_attach(struct cgroup *cgrp,
		      struct bpf_prog *prog, struct bpf_prog *replace_prog,
		      struct bpf_cgroup_link *link, enum bpf_attach_type type,
123
		      u32 flags);
124
int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
125
		      enum bpf_attach_type type);
126 127
int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
		     union bpf_attr __user *uattr);
128

129 130 131 132
int __cgroup_bpf_run_filter_skb(struct sock *sk,
				struct sk_buff *skb,
				enum bpf_attach_type type);

133 134 135
int __cgroup_bpf_run_filter_sk(struct sock *sk,
			       enum bpf_attach_type type);

A
Andrey Ignatov 已提交
136 137
int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
				      struct sockaddr *uaddr,
A
Andrey Ignatov 已提交
138
				      enum bpf_attach_type type,
139 140
				      void *t_ctx,
				      u32 *flags);
A
Andrey Ignatov 已提交
141

L
Lawrence Brakmo 已提交
142 143 144 145
int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
				     struct bpf_sock_ops_kern *sock_ops,
				     enum bpf_attach_type type);

146 147 148
int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
				      short access, enum bpf_attach_type type);

A
Andrey Ignatov 已提交
149 150
int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
				   struct ctl_table *table, int write,
151
				   char **buf, size_t *pcount, loff_t *ppos,
152
				   enum bpf_attach_type type);
A
Andrey Ignatov 已提交
153

154 155 156 157 158 159 160 161
int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level,
				       int *optname, char __user *optval,
				       int *optlen, char **kernel_optval);
int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
				       int optname, char __user *optval,
				       int __user *optlen, int max_optlen,
				       int retval);

162 163 164 165
int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
					    int optname, void *optval,
					    int *optlen, int retval);

166 167
static inline enum bpf_cgroup_storage_type cgroup_storage_type(
	struct bpf_map *map)
168
{
169 170 171
	if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
		return BPF_CGROUP_STORAGE_PERCPU;

172 173 174
	return BPF_CGROUP_STORAGE_SHARED;
}

175 176
static inline int bpf_cgroup_storage_set(struct bpf_cgroup_storage
					 *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
177 178
{
	enum bpf_cgroup_storage_type stype;
179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
	int i, err = 0;

	preempt_disable();
	for (i = 0; i < BPF_CGROUP_STORAGE_NEST_MAX; i++) {
		if (unlikely(this_cpu_read(bpf_cgroup_storage_info[i].task) != NULL))
			continue;

		this_cpu_write(bpf_cgroup_storage_info[i].task, current);
		for_each_cgroup_storage_type(stype)
			this_cpu_write(bpf_cgroup_storage_info[i].storage[stype],
				       storage[stype]);
		goto out;
	}
	err = -EBUSY;
	WARN_ON_ONCE(1);

out:
	preempt_enable();
	return err;
}

static inline void bpf_cgroup_storage_unset(void)
{
	int i;

	for (i = 0; i < BPF_CGROUP_STORAGE_NEST_MAX; i++) {
		if (unlikely(this_cpu_read(bpf_cgroup_storage_info[i].task) != current))
			continue;
207

208 209 210
		this_cpu_write(bpf_cgroup_storage_info[i].task, NULL);
		return;
	}
211 212
}

213 214 215
struct bpf_cgroup_storage *
cgroup_storage_lookup(struct bpf_cgroup_storage_map *map,
		      void *key, bool locked);
216 217
struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
					enum bpf_cgroup_storage_type stype);
218 219 220 221 222
void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
			     struct cgroup *cgroup,
			     enum bpf_attach_type type);
void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
223
int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);
224

225 226 227 228
int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
				     void *value, u64 flags);

229 230 231 232
/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb)			      \
({									      \
	int __ret = 0;							      \
233
	if (cgroup_bpf_enabled(BPF_CGROUP_INET_INGRESS))		      \
234 235 236 237
		__ret = __cgroup_bpf_run_filter_skb(sk, skb,		      \
						    BPF_CGROUP_INET_INGRESS); \
									      \
	__ret;								      \
238 239
})

240 241 242
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb)			       \
({									       \
	int __ret = 0;							       \
243
	if (cgroup_bpf_enabled(BPF_CGROUP_INET_EGRESS) && sk && sk == skb->sk) { \
244 245 246 247 248 249
		typeof(sk) __sk = sk_to_full_sk(sk);			       \
		if (sk_fullsock(__sk))					       \
			__ret = __cgroup_bpf_run_filter_skb(__sk, skb,	       \
						      BPF_CGROUP_INET_EGRESS); \
	}								       \
	__ret;								       \
250 251
})

A
Andrey Ignatov 已提交
252
#define BPF_CGROUP_RUN_SK_PROG(sk, type)				       \
253 254
({									       \
	int __ret = 0;							       \
255
	if (cgroup_bpf_enabled(type)) {					       \
A
Andrey Ignatov 已提交
256
		__ret = __cgroup_bpf_run_filter_sk(sk, type);		       \
257 258 259 260
	}								       \
	__ret;								       \
})

A
Andrey Ignatov 已提交
261 262 263
#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk)				       \
	BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE)

264 265 266
#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk)			       \
	BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_RELEASE)

A
Andrey Ignatov 已提交
267 268 269 270 271 272
#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk)				       \
	BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND)

#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk)				       \
	BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND)

A
Andrey Ignatov 已提交
273 274
#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type)				       \
({									       \
275
	u32 __unused_flags;						       \
A
Andrey Ignatov 已提交
276
	int __ret = 0;							       \
277
	if (cgroup_bpf_enabled(type))					       \
A
Andrey Ignatov 已提交
278
		__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type,     \
279 280
							  NULL,		       \
							  &__unused_flags);    \
A
Andrey Ignatov 已提交
281 282 283
	__ret;								       \
})

A
Andrey Ignatov 已提交
284
#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx)		       \
A
Andrey Ignatov 已提交
285
({									       \
286
	u32 __unused_flags;						       \
A
Andrey Ignatov 已提交
287
	int __ret = 0;							       \
288
	if (cgroup_bpf_enabled(type))	{				       \
A
Andrey Ignatov 已提交
289
		lock_sock(sk);						       \
A
Andrey Ignatov 已提交
290
		__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type,     \
291 292
							  t_ctx,	       \
							  &__unused_flags);    \
A
Andrey Ignatov 已提交
293 294 295 296 297
		release_sock(sk);					       \
	}								       \
	__ret;								       \
})

298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316
/* BPF_CGROUP_INET4_BIND and BPF_CGROUP_INET6_BIND can return extra flags
 * via upper bits of return code. The only flag that is supported
 * (at bit position 0) is to indicate CAP_NET_BIND_SERVICE capability check
 * should be bypassed (BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE).
 */
#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, type, bind_flags)	       \
({									       \
	u32 __flags = 0;						       \
	int __ret = 0;							       \
	if (cgroup_bpf_enabled(type))	{				       \
		lock_sock(sk);						       \
		__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type,     \
							  NULL, &__flags);     \
		release_sock(sk);					       \
		if (__flags & BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE)	       \
			*bind_flags |= BIND_NO_CAP_NET_BIND_SERVICE;	       \
	}								       \
	__ret;								       \
})
A
Andrey Ignatov 已提交
317

318 319 320 321
#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk)				       \
	((cgroup_bpf_enabled(BPF_CGROUP_INET4_CONNECT) ||		       \
	  cgroup_bpf_enabled(BPF_CGROUP_INET6_CONNECT)) &&		       \
	 (sk)->sk_prot->pre_connect)
A
Andrey Ignatov 已提交
322 323 324 325 326 327 328 329

#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr)			       \
	BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)

#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr)			       \
	BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT)

#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr)		       \
A
Andrey Ignatov 已提交
330
	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL)
A
Andrey Ignatov 已提交
331 332

#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr)		       \
A
Andrey Ignatov 已提交
333 334 335 336 337 338 339
	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL)

#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx)		       \
	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx)

#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx)		       \
	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx)
A
Andrey Ignatov 已提交
340

D
Daniel Borkmann 已提交
341 342 343 344 345 346
#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr)			\
	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_RECVMSG, NULL)

#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr)			\
	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_RECVMSG, NULL)

347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364
/* The SOCK_OPS"_SK" macro should be used when sock_ops->sk is not a
 * fullsock and its parent fullsock cannot be traced by
 * sk_to_full_sk().
 *
 * e.g. sock_ops->sk is a request_sock and it is under syncookie mode.
 * Its listener-sk is not attached to the rsk_listener.
 * In this case, the caller holds the listener-sk (unlocked),
 * set its sock_ops->sk to req_sk, and call this SOCK_OPS"_SK" with
 * the listener-sk such that the cgroup-bpf-progs of the
 * listener-sk will be run.
 *
 * Regardless of syncookie mode or not,
 * calling bpf_setsockopt on listener-sk will not make sense anyway,
 * so passing 'sock_ops->sk == req_sk' to the bpf prog is appropriate here.
 */
#define BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(sock_ops, sk)			\
({									\
	int __ret = 0;							\
365
	if (cgroup_bpf_enabled(BPF_CGROUP_SOCK_OPS))			\
366 367 368 369 370 371
		__ret = __cgroup_bpf_run_filter_sock_ops(sk,		\
							 sock_ops,	\
							 BPF_CGROUP_SOCK_OPS); \
	__ret;								\
})

L
Lawrence Brakmo 已提交
372 373 374
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops)				       \
({									       \
	int __ret = 0;							       \
375
	if (cgroup_bpf_enabled(BPF_CGROUP_SOCK_OPS) && (sock_ops)->sk) {       \
L
Lawrence Brakmo 已提交
376
		typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk);	       \
377
		if (__sk && sk_fullsock(__sk))				       \
L
Lawrence Brakmo 已提交
378 379 380 381 382 383
			__ret = __cgroup_bpf_run_filter_sock_ops(__sk,	       \
								 sock_ops,     \
							 BPF_CGROUP_SOCK_OPS); \
	}								       \
	__ret;								       \
})
384 385 386 387

#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access)	      \
({									      \
	int __ret = 0;							      \
388
	if (cgroup_bpf_enabled(BPF_CGROUP_DEVICE))			      \
389 390 391 392 393 394
		__ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
							  access,	      \
							  BPF_CGROUP_DEVICE); \
									      \
	__ret;								      \
})
A
Andrey Ignatov 已提交
395 396


397
#define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos)  \
A
Andrey Ignatov 已提交
398 399
({									       \
	int __ret = 0;							       \
400
	if (cgroup_bpf_enabled(BPF_CGROUP_SYSCTL))			       \
A
Andrey Ignatov 已提交
401
		__ret = __cgroup_bpf_run_filter_sysctl(head, table, write,     \
402
						       buf, count, pos,        \
A
Andrey Ignatov 已提交
403 404 405 406
						       BPF_CGROUP_SYSCTL);     \
	__ret;								       \
})

407 408 409 410
#define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen,   \
				       kernel_optval)			       \
({									       \
	int __ret = 0;							       \
411
	if (cgroup_bpf_enabled(BPF_CGROUP_SETSOCKOPT))			       \
412 413 414 415 416 417 418 419 420 421
		__ret = __cgroup_bpf_run_filter_setsockopt(sock, level,	       \
							   optname, optval,    \
							   optlen,	       \
							   kernel_optval);     \
	__ret;								       \
})

#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen)			       \
({									       \
	int __ret = 0;							       \
422
	if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT))			       \
423 424 425 426 427 428 429 430
		get_user(__ret, optlen);				       \
	__ret;								       \
})

#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen,   \
				       max_optlen, retval)		       \
({									       \
	int __ret = retval;						       \
431
	if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT))			       \
432 433 434 435 436 437 438 439 440 441 442 443 444 445
		if (!(sock)->sk_prot->bpf_bypass_getsockopt ||		       \
		    !INDIRECT_CALL_INET_1((sock)->sk_prot->bpf_bypass_getsockopt, \
					tcp_bpf_bypass_getsockopt,	       \
					level, optname))		       \
			__ret = __cgroup_bpf_run_filter_getsockopt(	       \
				sock, level, optname, optval, optlen,	       \
				max_optlen, retval);			       \
	__ret;								       \
})

#define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval,      \
					    optlen, retval)		       \
({									       \
	int __ret = retval;						       \
446
	if (cgroup_bpf_enabled(BPF_CGROUP_GETSOCKOPT))			       \
447 448
		__ret = __cgroup_bpf_run_filter_getsockopt_kern(	       \
			sock, level, optname, optval, optlen, retval);	       \
449 450 451
	__ret;								       \
})

452 453 454 455
int cgroup_bpf_prog_attach(const union bpf_attr *attr,
			   enum bpf_prog_type ptype, struct bpf_prog *prog);
int cgroup_bpf_prog_detach(const union bpf_attr *attr,
			   enum bpf_prog_type ptype);
456
int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
457 458
int cgroup_bpf_prog_query(const union bpf_attr *attr,
			  union bpf_attr __user *uattr);
459 460
#else

461
struct bpf_prog;
462
struct cgroup_bpf {};
463
static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
464
static inline void cgroup_bpf_offline(struct cgroup *cgrp) {}
465

466 467 468 469 470 471 472 473 474 475 476 477 478
static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
					 enum bpf_prog_type ptype,
					 struct bpf_prog *prog)
{
	return -EINVAL;
}

static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
					 enum bpf_prog_type ptype)
{
	return -EINVAL;
}

479 480 481 482 483 484
static inline int cgroup_bpf_link_attach(const union bpf_attr *attr,
					 struct bpf_prog *prog)
{
	return -EINVAL;
}

485 486 487 488 489 490
static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
					union bpf_attr __user *uattr)
{
	return -EINVAL;
}

491 492 493
static inline int bpf_cgroup_storage_set(
	struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) { return 0; }
static inline void bpf_cgroup_storage_unset(void) {}
494
static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
495 496
					    struct bpf_map *map) { return 0; }
static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
497
	struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; }
498 499
static inline void bpf_cgroup_storage_free(
	struct bpf_cgroup_storage *storage) {}
500 501 502 503 504 505 506 507
static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
						 void *value) {
	return 0;
}
static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
					void *key, void *value, u64 flags) {
	return 0;
}
508

509
#define cgroup_bpf_enabled(type) (0)
510
#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) ({ 0; })
A
Andrey Ignatov 已提交
511
#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
512 513
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
514
#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
515
#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; })
516
#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, type, flags) ({ 0; })
A
Andrey Ignatov 已提交
517 518
#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
A
Andrey Ignatov 已提交
519 520 521 522
#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
A
Andrey Ignatov 已提交
523 524
#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
D
Daniel Borkmann 已提交
525 526
#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; })
#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; })
L
Lawrence Brakmo 已提交
527
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
528
#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
529
#define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; })
530 531 532
#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; })
#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
				       optlen, max_optlen, retval) ({ retval; })
533 534
#define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \
					    optlen, retval) ({ retval; })
535 536
#define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
				       kernel_optval) ({ 0; })
537

538 539
#define for_each_cgroup_storage_type(stype) for (; false; )

540 541 542
#endif /* CONFIG_CGROUP_BPF */

#endif /* _BPF_CGROUP_H */