bpf-cgroup.h 18.5 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
2 3 4
#ifndef _BPF_CGROUP_H
#define _BPF_CGROUP_H

5
#include <linux/bpf.h>
6
#include <linux/errno.h>
7
#include <linux/jump_label.h>
8
#include <linux/percpu.h>
9
#include <linux/percpu-refcount.h>
10
#include <linux/rbtree.h>
11 12 13
#include <uapi/linux/bpf.h>

struct sock;
A
Andrey Ignatov 已提交
14
struct sockaddr;
15 16
struct cgroup;
struct sk_buff;
17 18
struct bpf_map;
struct bpf_prog;
L
Lawrence Brakmo 已提交
19
struct bpf_sock_ops_kern;
20
struct bpf_cgroup_storage;
A
Andrey Ignatov 已提交
21 22
struct ctl_table;
struct ctl_table_header;
23
struct task_struct;
24 25

#ifdef CONFIG_CGROUP_BPF
26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
enum cgroup_bpf_attach_type {
	CGROUP_BPF_ATTACH_TYPE_INVALID = -1,
	CGROUP_INET_INGRESS = 0,
	CGROUP_INET_EGRESS,
	CGROUP_INET_SOCK_CREATE,
	CGROUP_SOCK_OPS,
	CGROUP_DEVICE,
	CGROUP_INET4_BIND,
	CGROUP_INET6_BIND,
	CGROUP_INET4_CONNECT,
	CGROUP_INET6_CONNECT,
	CGROUP_INET4_POST_BIND,
	CGROUP_INET6_POST_BIND,
	CGROUP_UDP4_SENDMSG,
	CGROUP_UDP6_SENDMSG,
	CGROUP_SYSCTL,
	CGROUP_UDP4_RECVMSG,
	CGROUP_UDP6_RECVMSG,
	CGROUP_GETSOCKOPT,
	CGROUP_SETSOCKOPT,
	CGROUP_INET4_GETPEERNAME,
	CGROUP_INET6_GETPEERNAME,
	CGROUP_INET4_GETSOCKNAME,
	CGROUP_INET6_GETSOCKNAME,
	CGROUP_INET_SOCK_RELEASE,
	MAX_CGROUP_BPF_ATTACH_TYPE
};

#define CGROUP_ATYPE(type) \
	case BPF_##type: return type

static inline enum cgroup_bpf_attach_type
to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type)
{
	switch (attach_type) {
	CGROUP_ATYPE(CGROUP_INET_INGRESS);
	CGROUP_ATYPE(CGROUP_INET_EGRESS);
	CGROUP_ATYPE(CGROUP_INET_SOCK_CREATE);
	CGROUP_ATYPE(CGROUP_SOCK_OPS);
	CGROUP_ATYPE(CGROUP_DEVICE);
	CGROUP_ATYPE(CGROUP_INET4_BIND);
	CGROUP_ATYPE(CGROUP_INET6_BIND);
	CGROUP_ATYPE(CGROUP_INET4_CONNECT);
	CGROUP_ATYPE(CGROUP_INET6_CONNECT);
	CGROUP_ATYPE(CGROUP_INET4_POST_BIND);
	CGROUP_ATYPE(CGROUP_INET6_POST_BIND);
	CGROUP_ATYPE(CGROUP_UDP4_SENDMSG);
	CGROUP_ATYPE(CGROUP_UDP6_SENDMSG);
	CGROUP_ATYPE(CGROUP_SYSCTL);
	CGROUP_ATYPE(CGROUP_UDP4_RECVMSG);
	CGROUP_ATYPE(CGROUP_UDP6_RECVMSG);
	CGROUP_ATYPE(CGROUP_GETSOCKOPT);
	CGROUP_ATYPE(CGROUP_SETSOCKOPT);
	CGROUP_ATYPE(CGROUP_INET4_GETPEERNAME);
	CGROUP_ATYPE(CGROUP_INET6_GETPEERNAME);
	CGROUP_ATYPE(CGROUP_INET4_GETSOCKNAME);
	CGROUP_ATYPE(CGROUP_INET6_GETSOCKNAME);
	CGROUP_ATYPE(CGROUP_INET_SOCK_RELEASE);
	default:
		return CGROUP_BPF_ATTACH_TYPE_INVALID;
	}
}

#undef CGROUP_ATYPE
90

91 92
extern struct static_key_false cgroup_bpf_enabled_key[MAX_CGROUP_BPF_ATTACH_TYPE];
#define cgroup_bpf_enabled(atype) static_branch_unlikely(&cgroup_bpf_enabled_key[atype])
93

94 95
#define for_each_cgroup_storage_type(stype) \
	for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
96

97 98 99 100
struct bpf_cgroup_storage_map;

struct bpf_storage_buffer {
	struct rcu_head rcu;
101
	char data[];
102 103 104
};

struct bpf_cgroup_storage {
105 106 107 108
	union {
		struct bpf_storage_buffer *buf;
		void __percpu *percpu_buf;
	};
109 110
	struct bpf_cgroup_storage_map *map;
	struct bpf_cgroup_storage_key key;
111 112
	struct list_head list_map;
	struct list_head list_cg;
113 114 115 116
	struct rb_node node;
	struct rcu_head rcu;
};

117 118 119 120 121 122
struct bpf_cgroup_link {
	struct bpf_link link;
	struct cgroup *cgroup;
	enum bpf_attach_type type;
};

123 124 125
struct bpf_prog_list {
	struct list_head node;
	struct bpf_prog *prog;
126
	struct bpf_cgroup_link *link;
127
	struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
128 129 130 131
};

struct bpf_prog_array;

132
struct cgroup_bpf {
133
	/* array of effective progs in this cgroup */
134
	struct bpf_prog_array __rcu *effective[MAX_CGROUP_BPF_ATTACH_TYPE];
135 136 137 138 139

	/* attached progs to this cgroup and attach flags
	 * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
	 * have either zero or one element
	 * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
140
	 */
141 142
	struct list_head progs[MAX_CGROUP_BPF_ATTACH_TYPE];
	u32 flags[MAX_CGROUP_BPF_ATTACH_TYPE];
143

144 145 146
	/* list of cgroup shared storages */
	struct list_head storages;

147
	/* temp storage for effective prog array used by prog_attach/detach */
148
	struct bpf_prog_array *inactive;
149 150 151 152 153 154

	/* reference counter used to detach bpf programs after cgroup removal */
	struct percpu_ref refcnt;

	/* cgroup_bpf is released using a work queue */
	struct work_struct release_work;
155 156
};

157
int cgroup_bpf_inherit(struct cgroup *cgrp);
158
void cgroup_bpf_offline(struct cgroup *cgrp);
159

160 161 162
int __cgroup_bpf_attach(struct cgroup *cgrp,
			struct bpf_prog *prog, struct bpf_prog *replace_prog,
			struct bpf_cgroup_link *link,
163 164
			enum bpf_attach_type type, u32 flags);
int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
165
			struct bpf_cgroup_link *link,
166
			enum bpf_attach_type type);
167 168
int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
		       union bpf_attr __user *uattr);
169

170
/* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
171 172 173
int cgroup_bpf_attach(struct cgroup *cgrp,
		      struct bpf_prog *prog, struct bpf_prog *replace_prog,
		      struct bpf_cgroup_link *link, enum bpf_attach_type type,
174
		      u32 flags);
175
int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
176
		      enum bpf_attach_type type);
177 178
int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
		     union bpf_attr __user *uattr);
179

180 181
int __cgroup_bpf_run_filter_skb(struct sock *sk,
				struct sk_buff *skb,
182
				enum cgroup_bpf_attach_type atype);
183

184
int __cgroup_bpf_run_filter_sk(struct sock *sk,
185
			       enum cgroup_bpf_attach_type atype);
186

A
Andrey Ignatov 已提交
187 188
int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
				      struct sockaddr *uaddr,
189
				      enum cgroup_bpf_attach_type atype,
190 191
				      void *t_ctx,
				      u32 *flags);
A
Andrey Ignatov 已提交
192

L
Lawrence Brakmo 已提交
193 194
int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
				     struct bpf_sock_ops_kern *sock_ops,
195
				     enum cgroup_bpf_attach_type atype);
L
Lawrence Brakmo 已提交
196

197
int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
198
				      short access, enum cgroup_bpf_attach_type atype);
199

A
Andrey Ignatov 已提交
200 201
int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
				   struct ctl_table *table, int write,
202
				   char **buf, size_t *pcount, loff_t *ppos,
203
				   enum cgroup_bpf_attach_type atype);
A
Andrey Ignatov 已提交
204

205 206 207 208 209 210 211 212
int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level,
				       int *optname, char __user *optval,
				       int *optlen, char **kernel_optval);
int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
				       int optname, char __user *optval,
				       int __user *optlen, int max_optlen,
				       int retval);

213 214 215 216
int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
					    int optname, void *optval,
					    int *optlen, int retval);

217 218
static inline enum bpf_cgroup_storage_type cgroup_storage_type(
	struct bpf_map *map)
219
{
220 221 222
	if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
		return BPF_CGROUP_STORAGE_PERCPU;

223 224 225
	return BPF_CGROUP_STORAGE_SHARED;
}

226 227 228
struct bpf_cgroup_storage *
cgroup_storage_lookup(struct bpf_cgroup_storage_map *map,
		      void *key, bool locked);
229 230
struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
					enum bpf_cgroup_storage_type stype);
231 232 233 234 235
void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
			     struct cgroup *cgroup,
			     enum bpf_attach_type type);
void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
236
int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);
237

238 239 240 241
int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
				     void *value, u64 flags);

242 243 244 245
/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb)			      \
({									      \
	int __ret = 0;							      \
246
	if (cgroup_bpf_enabled(CGROUP_INET_INGRESS))		      \
247
		__ret = __cgroup_bpf_run_filter_skb(sk, skb,		      \
248
						    CGROUP_INET_INGRESS); \
249 250
									      \
	__ret;								      \
251 252
})

253 254 255
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb)			       \
({									       \
	int __ret = 0;							       \
256
	if (cgroup_bpf_enabled(CGROUP_INET_EGRESS) && sk && sk == skb->sk) { \
257 258 259
		typeof(sk) __sk = sk_to_full_sk(sk);			       \
		if (sk_fullsock(__sk))					       \
			__ret = __cgroup_bpf_run_filter_skb(__sk, skb,	       \
260
						      CGROUP_INET_EGRESS); \
261 262
	}								       \
	__ret;								       \
263 264
})

265
#define BPF_CGROUP_RUN_SK_PROG(sk, atype)				       \
266 267
({									       \
	int __ret = 0;							       \
268 269
	if (cgroup_bpf_enabled(atype)) {					       \
		__ret = __cgroup_bpf_run_filter_sk(sk, atype);		       \
270 271 272 273
	}								       \
	__ret;								       \
})

A
Andrey Ignatov 已提交
274
#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk)				       \
275
	BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_CREATE)
A
Andrey Ignatov 已提交
276

277
#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk)			       \
278
	BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_RELEASE)
279

A
Andrey Ignatov 已提交
280
#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk)				       \
281
	BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET4_POST_BIND)
A
Andrey Ignatov 已提交
282 283

#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk)				       \
284
	BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET6_POST_BIND)
A
Andrey Ignatov 已提交
285

286
#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype)				       \
A
Andrey Ignatov 已提交
287
({									       \
288
	u32 __unused_flags;						       \
A
Andrey Ignatov 已提交
289
	int __ret = 0;							       \
290 291
	if (cgroup_bpf_enabled(atype))					       \
		__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype,     \
292 293
							  NULL,		       \
							  &__unused_flags);    \
A
Andrey Ignatov 已提交
294 295 296
	__ret;								       \
})

297
#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx)		       \
A
Andrey Ignatov 已提交
298
({									       \
299
	u32 __unused_flags;						       \
A
Andrey Ignatov 已提交
300
	int __ret = 0;							       \
301
	if (cgroup_bpf_enabled(atype))	{				       \
A
Andrey Ignatov 已提交
302
		lock_sock(sk);						       \
303
		__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype,     \
304 305
							  t_ctx,	       \
							  &__unused_flags);    \
A
Andrey Ignatov 已提交
306 307 308 309 310
		release_sock(sk);					       \
	}								       \
	__ret;								       \
})

311 312 313 314 315
/* BPF_CGROUP_INET4_BIND and BPF_CGROUP_INET6_BIND can return extra flags
 * via upper bits of return code. The only flag that is supported
 * (at bit position 0) is to indicate CAP_NET_BIND_SERVICE capability check
 * should be bypassed (BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE).
 */
316
#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, bind_flags)	       \
317 318 319
({									       \
	u32 __flags = 0;						       \
	int __ret = 0;							       \
320
	if (cgroup_bpf_enabled(atype))	{				       \
321
		lock_sock(sk);						       \
322
		__ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype,     \
323 324 325 326 327 328 329
							  NULL, &__flags);     \
		release_sock(sk);					       \
		if (__flags & BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE)	       \
			*bind_flags |= BIND_NO_CAP_NET_BIND_SERVICE;	       \
	}								       \
	__ret;								       \
})
A
Andrey Ignatov 已提交
330

331
#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk)				       \
332 333
	((cgroup_bpf_enabled(CGROUP_INET4_CONNECT) ||		       \
	  cgroup_bpf_enabled(CGROUP_INET6_CONNECT)) &&		       \
334
	 (sk)->sk_prot->pre_connect)
A
Andrey Ignatov 已提交
335 336

#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr)			       \
337
	BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET4_CONNECT)
A
Andrey Ignatov 已提交
338 339

#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr)			       \
340
	BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET6_CONNECT)
A
Andrey Ignatov 已提交
341 342

#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr)		       \
343
	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET4_CONNECT, NULL)
A
Andrey Ignatov 已提交
344 345

#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr)		       \
346
	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET6_CONNECT, NULL)
A
Andrey Ignatov 已提交
347 348

#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx)		       \
349
	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_SENDMSG, t_ctx)
A
Andrey Ignatov 已提交
350 351

#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx)		       \
352
	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_SENDMSG, t_ctx)
A
Andrey Ignatov 已提交
353

D
Daniel Borkmann 已提交
354
#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr)			\
355
	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_RECVMSG, NULL)
D
Daniel Borkmann 已提交
356 357

#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr)			\
358
	BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_RECVMSG, NULL)
D
Daniel Borkmann 已提交
359

360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377
/* The SOCK_OPS"_SK" macro should be used when sock_ops->sk is not a
 * fullsock and its parent fullsock cannot be traced by
 * sk_to_full_sk().
 *
 * e.g. sock_ops->sk is a request_sock and it is under syncookie mode.
 * Its listener-sk is not attached to the rsk_listener.
 * In this case, the caller holds the listener-sk (unlocked),
 * set its sock_ops->sk to req_sk, and call this SOCK_OPS"_SK" with
 * the listener-sk such that the cgroup-bpf-progs of the
 * listener-sk will be run.
 *
 * Regardless of syncookie mode or not,
 * calling bpf_setsockopt on listener-sk will not make sense anyway,
 * so passing 'sock_ops->sk == req_sk' to the bpf prog is appropriate here.
 */
#define BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(sock_ops, sk)			\
({									\
	int __ret = 0;							\
378
	if (cgroup_bpf_enabled(CGROUP_SOCK_OPS))			\
379 380
		__ret = __cgroup_bpf_run_filter_sock_ops(sk,		\
							 sock_ops,	\
381
							 CGROUP_SOCK_OPS); \
382 383 384
	__ret;								\
})

L
Lawrence Brakmo 已提交
385 386 387
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops)				       \
({									       \
	int __ret = 0;							       \
388
	if (cgroup_bpf_enabled(CGROUP_SOCK_OPS) && (sock_ops)->sk) {       \
L
Lawrence Brakmo 已提交
389
		typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk);	       \
390
		if (__sk && sk_fullsock(__sk))				       \
L
Lawrence Brakmo 已提交
391 392
			__ret = __cgroup_bpf_run_filter_sock_ops(__sk,	       \
								 sock_ops,     \
393
							 CGROUP_SOCK_OPS); \
L
Lawrence Brakmo 已提交
394 395 396
	}								       \
	__ret;								       \
})
397

398
#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access)	      \
399 400
({									      \
	int __ret = 0;							      \
401 402
	if (cgroup_bpf_enabled(CGROUP_DEVICE))			      \
		__ret = __cgroup_bpf_check_dev_permission(atype, major, minor, \
403
							  access,	      \
404
							  CGROUP_DEVICE); \
405 406 407
									      \
	__ret;								      \
})
A
Andrey Ignatov 已提交
408 409


410
#define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos)  \
A
Andrey Ignatov 已提交
411 412
({									       \
	int __ret = 0;							       \
413
	if (cgroup_bpf_enabled(CGROUP_SYSCTL))			       \
A
Andrey Ignatov 已提交
414
		__ret = __cgroup_bpf_run_filter_sysctl(head, table, write,     \
415
						       buf, count, pos,        \
416
						       CGROUP_SYSCTL);     \
A
Andrey Ignatov 已提交
417 418 419
	__ret;								       \
})

420 421 422 423
#define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen,   \
				       kernel_optval)			       \
({									       \
	int __ret = 0;							       \
424
	if (cgroup_bpf_enabled(CGROUP_SETSOCKOPT))			       \
425 426 427 428 429 430 431 432 433 434
		__ret = __cgroup_bpf_run_filter_setsockopt(sock, level,	       \
							   optname, optval,    \
							   optlen,	       \
							   kernel_optval);     \
	__ret;								       \
})

#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen)			       \
({									       \
	int __ret = 0;							       \
435
	if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT))			       \
436 437 438 439 440 441 442 443
		get_user(__ret, optlen);				       \
	__ret;								       \
})

#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen,   \
				       max_optlen, retval)		       \
({									       \
	int __ret = retval;						       \
444
	if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT))			       \
445 446 447 448 449 450 451 452 453 454 455 456 457 458
		if (!(sock)->sk_prot->bpf_bypass_getsockopt ||		       \
		    !INDIRECT_CALL_INET_1((sock)->sk_prot->bpf_bypass_getsockopt, \
					tcp_bpf_bypass_getsockopt,	       \
					level, optname))		       \
			__ret = __cgroup_bpf_run_filter_getsockopt(	       \
				sock, level, optname, optval, optlen,	       \
				max_optlen, retval);			       \
	__ret;								       \
})

#define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval,      \
					    optlen, retval)		       \
({									       \
	int __ret = retval;						       \
459
	if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT))			       \
460 461
		__ret = __cgroup_bpf_run_filter_getsockopt_kern(	       \
			sock, level, optname, optval, optlen, retval);	       \
462 463 464
	__ret;								       \
})

465 466 467 468
int cgroup_bpf_prog_attach(const union bpf_attr *attr,
			   enum bpf_prog_type ptype, struct bpf_prog *prog);
int cgroup_bpf_prog_detach(const union bpf_attr *attr,
			   enum bpf_prog_type ptype);
469
int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
470 471
int cgroup_bpf_prog_query(const union bpf_attr *attr,
			  union bpf_attr __user *uattr);
472 473 474
#else

struct cgroup_bpf {};
475
static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
476
static inline void cgroup_bpf_offline(struct cgroup *cgrp) {}
477

478 479 480 481 482 483 484 485 486 487 488 489 490
static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
					 enum bpf_prog_type ptype,
					 struct bpf_prog *prog)
{
	return -EINVAL;
}

static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
					 enum bpf_prog_type ptype)
{
	return -EINVAL;
}

491 492 493 494 495 496
static inline int cgroup_bpf_link_attach(const union bpf_attr *attr,
					 struct bpf_prog *prog)
{
	return -EINVAL;
}

497 498 499 500 501 502
static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
					union bpf_attr __user *uattr)
{
	return -EINVAL;
}

503
static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
504 505
					    struct bpf_map *map) { return 0; }
static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
506
	struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; }
507 508
static inline void bpf_cgroup_storage_free(
	struct bpf_cgroup_storage *storage) {}
509 510 511 512 513 514 515 516
static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
						 void *value) {
	return 0;
}
static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
					void *key, void *value, u64 flags) {
	return 0;
}
517

518 519
#define cgroup_bpf_enabled(atype) (0)
#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) ({ 0; })
520
#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) ({ 0; })
A
Andrey Ignatov 已提交
521
#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
522 523
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
524
#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
525
#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; })
526
#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, flags) ({ 0; })
A
Andrey Ignatov 已提交
527 528
#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
A
Andrey Ignatov 已提交
529 530 531 532
#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
A
Andrey Ignatov 已提交
533 534
#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
D
Daniel Borkmann 已提交
535 536
#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; })
#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; })
L
Lawrence Brakmo 已提交
537
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
538
#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) ({ 0; })
539
#define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; })
540 541 542
#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; })
#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
				       optlen, max_optlen, retval) ({ retval; })
543 544
#define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \
					    optlen, retval) ({ retval; })
545 546
#define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
				       kernel_optval) ({ 0; })
547

548 549
#define for_each_cgroup_storage_type(stype) for (; false; )

550 551 552
#endif /* CONFIG_CGROUP_BPF */

#endif /* _BPF_CGROUP_H */