diff --git a/include/linux/bpf.h b/include/linux/bpf.h index d67ccdc0099f1742ca9359916202d5af57fd117a..1e334b248ff65724eb40e70846046f61a05b4909 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -57,6 +57,9 @@ struct bpf_map { atomic_t usercnt; struct bpf_map *inner_map_meta; char name[BPF_OBJ_NAME_LEN]; +#ifdef CONFIG_SECURITY + void *security; +#endif }; /* function argument constraints */ @@ -193,6 +196,9 @@ struct bpf_prog_aux { struct user_struct *user; u64 load_time; /* ns since boottime */ char name[BPF_OBJ_NAME_LEN]; +#ifdef CONFIG_SECURITY + void *security; +#endif union { struct work_struct work; struct rcu_head rcu; @@ -282,6 +288,9 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs, #ifdef CONFIG_BPF_SYSCALL DECLARE_PER_CPU(int, bpf_prog_active); +extern const struct file_operations bpf_map_fops; +extern const struct file_operations bpf_prog_fops; + #define BPF_PROG_TYPE(_id, _name) \ extern const struct bpf_prog_ops _name ## _prog_ops; \ extern const struct bpf_verifier_ops _name ## _verifier_ops; @@ -315,11 +324,11 @@ void bpf_map_area_free(void *base); extern int sysctl_unprivileged_bpf_disabled; -int bpf_map_new_fd(struct bpf_map *map); +int bpf_map_new_fd(struct bpf_map *map, int flags); int bpf_prog_new_fd(struct bpf_prog *prog); int bpf_obj_pin_user(u32 ufd, const char __user *pathname); -int bpf_obj_get_user(const char __user *pathname); +int bpf_obj_get_user(const char __user *pathname, int flags); int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value); int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value); @@ -338,6 +347,8 @@ int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, void *key, void *value, u64 map_flags); int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); +int bpf_get_file_flag(int flags); + /* memcpy that is used with 8-byte aligned pointers, power-of-8 size and * forced to use 'long' read/writes to try to atomically copy long counters. * Best-effort only. No barriers here, since it _will_ race with concurrent @@ -421,7 +432,7 @@ static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages) { } -static inline int bpf_obj_get_user(const char __user *pathname) +static inline int bpf_obj_get_user(const char __user *pathname, int flags) { return -EOPNOTSUPP; } diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index c9258124e41757187cdb8b2f83c5901966345902..7161d8e7ee79246ffca220805826f883f26d7ddd 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -1351,6 +1351,40 @@ * @inode we wish to get the security context of. * @ctx is a pointer in which to place the allocated security context. * @ctxlen points to the place to put the length of @ctx. + * + * Security hooks for using the eBPF maps and programs functionalities through + * eBPF syscalls. + * + * @bpf: + * Do a initial check for all bpf syscalls after the attribute is copied + * into the kernel. The actual security module can implement their own + * rules to check the specific cmd they need. + * + * @bpf_map: + * Do a check when the kernel generate and return a file descriptor for + * eBPF maps. + * + * @map: bpf map that we want to access + * @mask: the access flags + * + * @bpf_prog: + * Do a check when the kernel generate and return a file descriptor for + * eBPF programs. + * + * @prog: bpf prog that userspace want to use. + * + * @bpf_map_alloc_security: + * Initialize the security field inside bpf map. + * + * @bpf_map_free_security: + * Clean up the security information stored inside bpf map. + * + * @bpf_prog_alloc_security: + * Initialize the security field inside bpf program. + * + * @bpf_prog_free_security: + * Clean up the security information stored inside bpf prog. + * */ union security_list_options { int (*binder_set_context_mgr)(struct task_struct *mgr); @@ -1682,6 +1716,17 @@ union security_list_options { struct audit_context *actx); void (*audit_rule_free)(void *lsmrule); #endif /* CONFIG_AUDIT */ + +#ifdef CONFIG_BPF_SYSCALL + int (*bpf)(int cmd, union bpf_attr *attr, + unsigned int size); + int (*bpf_map)(struct bpf_map *map, fmode_t fmode); + int (*bpf_prog)(struct bpf_prog *prog); + int (*bpf_map_alloc_security)(struct bpf_map *map); + void (*bpf_map_free_security)(struct bpf_map *map); + int (*bpf_prog_alloc_security)(struct bpf_prog_aux *aux); + void (*bpf_prog_free_security)(struct bpf_prog_aux *aux); +#endif /* CONFIG_BPF_SYSCALL */ }; struct security_hook_heads { @@ -1901,6 +1946,15 @@ struct security_hook_heads { struct list_head audit_rule_match; struct list_head audit_rule_free; #endif /* CONFIG_AUDIT */ +#ifdef CONFIG_BPF_SYSCALL + struct list_head bpf; + struct list_head bpf_map; + struct list_head bpf_prog; + struct list_head bpf_map_alloc_security; + struct list_head bpf_map_free_security; + struct list_head bpf_prog_alloc_security; + struct list_head bpf_prog_free_security; +#endif /* CONFIG_BPF_SYSCALL */ } __randomize_layout; /* diff --git a/include/linux/security.h b/include/linux/security.h index ce6265960d6c430a90e1ad3c3749d0a438ecaca9..18800b0911e53ac6958be1caa1c1395f6723682a 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -31,6 +31,7 @@ #include #include #include +#include struct linux_binprm; struct cred; @@ -1730,6 +1731,50 @@ static inline void securityfs_remove(struct dentry *dentry) #endif +#ifdef CONFIG_BPF_SYSCALL +#ifdef CONFIG_SECURITY +extern int security_bpf(int cmd, union bpf_attr *attr, unsigned int size); +extern int security_bpf_map(struct bpf_map *map, fmode_t fmode); +extern int security_bpf_prog(struct bpf_prog *prog); +extern int security_bpf_map_alloc(struct bpf_map *map); +extern void security_bpf_map_free(struct bpf_map *map); +extern int security_bpf_prog_alloc(struct bpf_prog_aux *aux); +extern void security_bpf_prog_free(struct bpf_prog_aux *aux); +#else +static inline int security_bpf(int cmd, union bpf_attr *attr, + unsigned int size) +{ + return 0; +} + +static inline int security_bpf_map(struct bpf_map *map, fmode_t fmode) +{ + return 0; +} + +static inline int security_bpf_prog(struct bpf_prog *prog) +{ + return 0; +} + +static inline int security_bpf_map_alloc(struct bpf_map *map) +{ + return 0; +} + +static inline void security_bpf_map_free(struct bpf_map *map) +{ } + +static inline int security_bpf_prog_alloc(struct bpf_prog_aux *aux) +{ + return 0; +} + +static inline void security_bpf_prog_free(struct bpf_prog_aux *aux) +{ } +#endif /* CONFIG_SECURITY */ +#endif /* CONFIG_BPF_SYSCALL */ + #ifdef CONFIG_SECURITY static inline char *alloc_secdata(void) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 4303fb6c38171cb711d27b58620b7f82190d3866..d83f95ea6a1b283fa12e02099f4788f6755f2c5b 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -218,6 +218,10 @@ enum bpf_attach_type { #define BPF_OBJ_NAME_LEN 16U +/* Flags for accessing BPF object */ +#define BPF_F_RDONLY (1U << 3) +#define BPF_F_WRONLY (1U << 4) + union bpf_attr { struct { /* anonymous struct used by BPF_MAP_CREATE command */ __u32 map_type; /* one of enum bpf_map_type */ @@ -260,6 +264,7 @@ union bpf_attr { struct { /* anonymous struct used by BPF_OBJ_* commands */ __aligned_u64 pathname; __u32 bpf_fd; + __u32 file_flags; }; struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */ @@ -287,6 +292,7 @@ union bpf_attr { __u32 map_id; }; __u32 next_id; + __u32 open_flags; }; struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */ diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 68d866628be0d69cd81ce7340ea382c378218641..988c04c91e10f87de6273d9568e47932b07fec1b 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c @@ -19,6 +19,9 @@ #include "map_in_map.h" +#define ARRAY_CREATE_FLAG_MASK \ + (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) + static void bpf_array_free_percpu(struct bpf_array *array) { int i; @@ -56,7 +59,8 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr) /* check sanity of attributes */ if (attr->max_entries == 0 || attr->key_size != 4 || - attr->value_size == 0 || attr->map_flags & ~BPF_F_NUMA_NODE || + attr->value_size == 0 || + attr->map_flags & ~ARRAY_CREATE_FLAG_MASK || (percpu && numa_node != NUMA_NO_NODE)) return ERR_PTR(-EINVAL); diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index e093d9a2c4dd3fb5f02503bf0c741f2af9cc7c48..e5d3de7cff2e55d85e9dca7c1f5099b647f3e985 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -50,6 +50,9 @@ #include #include +#define DEV_CREATE_FLAG_MASK \ + (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) + struct bpf_dtab_netdev { struct net_device *dev; struct bpf_dtab *dtab; @@ -80,7 +83,7 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr) /* check sanity of attributes */ if (attr->max_entries == 0 || attr->key_size != 4 || - attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE) + attr->value_size != 4 || attr->map_flags & ~DEV_CREATE_FLAG_MASK) return ERR_PTR(-EINVAL); dtab = kzalloc(sizeof(*dtab), GFP_USER); diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 431126f31ea3c90648366295e2b77fd3bb79b6e7..919955236e633144a7fc7354b2d430df69d84841 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -18,8 +18,9 @@ #include "bpf_lru_list.h" #include "map_in_map.h" -#define HTAB_CREATE_FLAG_MASK \ - (BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE) +#define HTAB_CREATE_FLAG_MASK \ + (BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE | \ + BPF_F_RDONLY | BPF_F_WRONLY) struct bucket { struct hlist_nulls_head head; diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c index be1dde967208eb19d1f92872275df5fdce437024..01aaef1a77c5af164660b0f75ac99e4bd7c55a52 100644 --- a/kernel/bpf/inode.c +++ b/kernel/bpf/inode.c @@ -295,7 +295,7 @@ int bpf_obj_pin_user(u32 ufd, const char __user *pathname) } static void *bpf_obj_do_get(const struct filename *pathname, - enum bpf_type *type) + enum bpf_type *type, int flags) { struct inode *inode; struct path path; @@ -307,7 +307,7 @@ static void *bpf_obj_do_get(const struct filename *pathname, return ERR_PTR(ret); inode = d_backing_inode(path.dentry); - ret = inode_permission(inode, MAY_WRITE); + ret = inode_permission(inode, ACC_MODE(flags)); if (ret) goto out; @@ -326,18 +326,23 @@ static void *bpf_obj_do_get(const struct filename *pathname, return ERR_PTR(ret); } -int bpf_obj_get_user(const char __user *pathname) +int bpf_obj_get_user(const char __user *pathname, int flags) { enum bpf_type type = BPF_TYPE_UNSPEC; struct filename *pname; int ret = -ENOENT; + int f_flags; void *raw; + f_flags = bpf_get_file_flag(flags); + if (f_flags < 0) + return f_flags; + pname = getname(pathname); if (IS_ERR(pname)) return PTR_ERR(pname); - raw = bpf_obj_do_get(pname, &type); + raw = bpf_obj_do_get(pname, &type, f_flags); if (IS_ERR(raw)) { ret = PTR_ERR(raw); goto out; @@ -346,7 +351,7 @@ int bpf_obj_get_user(const char __user *pathname) if (type == BPF_TYPE_PROG) ret = bpf_prog_new_fd(raw); else if (type == BPF_TYPE_MAP) - ret = bpf_map_new_fd(raw); + ret = bpf_map_new_fd(raw, f_flags); else goto out; diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c index 34d8a690ea059bd0e4347960290b6f268444cfea..885e45479680508a11681290c0a52bc25c849c3b 100644 --- a/kernel/bpf/lpm_trie.c +++ b/kernel/bpf/lpm_trie.c @@ -495,7 +495,8 @@ static int trie_delete_elem(struct bpf_map *map, void *_key) #define LPM_KEY_SIZE_MAX LPM_KEY_SIZE(LPM_DATA_SIZE_MAX) #define LPM_KEY_SIZE_MIN LPM_KEY_SIZE(LPM_DATA_SIZE_MIN) -#define LPM_CREATE_FLAG_MASK (BPF_F_NO_PREALLOC | BPF_F_NUMA_NODE) +#define LPM_CREATE_FLAG_MASK (BPF_F_NO_PREALLOC | BPF_F_NUMA_NODE | \ + BPF_F_RDONLY | BPF_F_WRONLY) static struct bpf_map *trie_alloc(union bpf_attr *attr) { diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index a298d66666981e51831427fea6ab83b7606f78c7..86ec846f2d5e9c9eccd7ccb7b1cd8c1e86c68c5d 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c @@ -40,6 +40,9 @@ #include #include +#define SOCK_CREATE_FLAG_MASK \ + (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) + struct bpf_stab { struct bpf_map map; struct sock **sock_map; @@ -489,7 +492,7 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr) /* check sanity of attributes */ if (attr->max_entries == 0 || attr->key_size != 4 || - attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE) + attr->value_size != 4 || attr->map_flags & ~SOCK_CREATE_FLAG_MASK) return ERR_PTR(-EINVAL); if (attr->value_size > KMALLOC_MAX_SIZE) diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index 135be433e9a0fb9a3c18e96db336cf1817be1c1c..a15bc636cc98ff7b2fe55b0d50b5432e9d4b50de 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c @@ -11,6 +11,9 @@ #include #include "percpu_freelist.h" +#define STACK_CREATE_FLAG_MASK \ + (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) + struct stack_map_bucket { struct pcpu_freelist_node fnode; u32 hash; @@ -60,7 +63,7 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr) if (!capable(CAP_SYS_ADMIN)) return ERR_PTR(-EPERM); - if (attr->map_flags & ~BPF_F_NUMA_NODE) + if (attr->map_flags & ~STACK_CREATE_FLAG_MASK) return ERR_PTR(-EINVAL); /* check sanity of attributes */ diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 0e893cac6795ce27d82bd1d2e29827d5abcff0a3..323be2473c4bfafc65d50a600000d3f13c0d7b94 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -34,6 +34,8 @@ #define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) #define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_HASH(map)) +#define BPF_OBJ_FLAG_MASK (BPF_F_RDONLY | BPF_F_WRONLY) + DEFINE_PER_CPU(int, bpf_prog_active); static DEFINE_IDR(prog_idr); static DEFINE_SPINLOCK(prog_idr_lock); @@ -210,6 +212,7 @@ static void bpf_map_free_deferred(struct work_struct *work) struct bpf_map *map = container_of(work, struct bpf_map, work); bpf_map_uncharge_memlock(map); + security_bpf_map_free(map); /* implementation dependent freeing */ map->ops->map_free(map); } @@ -294,17 +297,54 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp) } #endif -static const struct file_operations bpf_map_fops = { +static ssize_t bpf_dummy_read(struct file *filp, char __user *buf, size_t siz, + loff_t *ppos) +{ + /* We need this handler such that alloc_file() enables + * f_mode with FMODE_CAN_READ. + */ + return -EINVAL; +} + +static ssize_t bpf_dummy_write(struct file *filp, const char __user *buf, + size_t siz, loff_t *ppos) +{ + /* We need this handler such that alloc_file() enables + * f_mode with FMODE_CAN_WRITE. + */ + return -EINVAL; +} + +const struct file_operations bpf_map_fops = { #ifdef CONFIG_PROC_FS .show_fdinfo = bpf_map_show_fdinfo, #endif .release = bpf_map_release, + .read = bpf_dummy_read, + .write = bpf_dummy_write, }; -int bpf_map_new_fd(struct bpf_map *map) +int bpf_map_new_fd(struct bpf_map *map, int flags) { + int ret; + + ret = security_bpf_map(map, OPEN_FMODE(flags)); + if (ret < 0) + return ret; + return anon_inode_getfd("bpf-map", &bpf_map_fops, map, - O_RDWR | O_CLOEXEC); + flags | O_CLOEXEC); +} + +int bpf_get_file_flag(int flags) +{ + if ((flags & BPF_F_RDONLY) && (flags & BPF_F_WRONLY)) + return -EINVAL; + if (flags & BPF_F_RDONLY) + return O_RDONLY; + if (flags & BPF_F_WRONLY) + return O_WRONLY; + return O_RDWR; } /* helper macro to check that unused fields 'union bpf_attr' are zero */ @@ -344,12 +384,17 @@ static int map_create(union bpf_attr *attr) { int numa_node = bpf_map_attr_numa_node(attr); struct bpf_map *map; + int f_flags; int err; err = CHECK_ATTR(BPF_MAP_CREATE); if (err) return -EINVAL; + f_flags = bpf_get_file_flag(attr->map_flags); + if (f_flags < 0) + return f_flags; + if (numa_node != NUMA_NO_NODE && ((unsigned int)numa_node >= nr_node_ids || !node_online(numa_node))) @@ -367,15 +412,19 @@ static int map_create(union bpf_attr *attr) atomic_set(&map->refcnt, 1); atomic_set(&map->usercnt, 1); - err = bpf_map_charge_memlock(map); + err = security_bpf_map_alloc(map); if (err) goto free_map_nouncharge; + err = bpf_map_charge_memlock(map); + if (err) + goto free_map_sec; + err = bpf_map_alloc_id(map); if (err) goto free_map; - err = bpf_map_new_fd(map); + err = bpf_map_new_fd(map, f_flags); if (err < 0) { /* failed to allocate fd. * bpf_map_put() is needed because the above @@ -392,6 +441,8 @@ static int map_create(union bpf_attr *attr) free_map: bpf_map_uncharge_memlock(map); +free_map_sec: + security_bpf_map_free(map); free_map_nouncharge: map->ops->map_free(map); return err; @@ -490,6 +541,11 @@ static int map_lookup_elem(union bpf_attr *attr) if (IS_ERR(map)) return PTR_ERR(map); + if (!(f.file->f_mode & FMODE_CAN_READ)) { + err = -EPERM; + goto err_put; + } + key = memdup_user(ukey, map->key_size); if (IS_ERR(key)) { err = PTR_ERR(key); @@ -570,6 +626,11 @@ static int map_update_elem(union bpf_attr *attr) if (IS_ERR(map)) return PTR_ERR(map); + if (!(f.file->f_mode & FMODE_CAN_WRITE)) { + err = -EPERM; + goto err_put; + } + key = memdup_user(ukey, map->key_size); if (IS_ERR(key)) { err = PTR_ERR(key); @@ -659,6 +720,11 @@ static int map_delete_elem(union bpf_attr *attr) if (IS_ERR(map)) return PTR_ERR(map); + if (!(f.file->f_mode & FMODE_CAN_WRITE)) { + err = -EPERM; + goto err_put; + } + key = memdup_user(ukey, map->key_size); if (IS_ERR(key)) { err = PTR_ERR(key); @@ -702,6 +768,11 @@ static int map_get_next_key(union bpf_attr *attr) if (IS_ERR(map)) return PTR_ERR(map); + if (!(f.file->f_mode & FMODE_CAN_READ)) { + err = -EPERM; + goto err_put; + } + if (ukey) { key = memdup_user(ukey, map->key_size); if (IS_ERR(key)) { @@ -856,6 +927,7 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu) free_used_maps(aux); bpf_prog_uncharge_memlock(aux->prog); + security_bpf_prog_free(aux); bpf_prog_free(aux->prog); } @@ -903,15 +975,23 @@ static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp) } #endif -static const struct file_operations bpf_prog_fops = { +const struct file_operations bpf_prog_fops = { #ifdef CONFIG_PROC_FS .show_fdinfo = bpf_prog_show_fdinfo, #endif .release = bpf_prog_release, + .read = bpf_dummy_read, + .write = bpf_dummy_write, }; int bpf_prog_new_fd(struct bpf_prog *prog) { + int ret; + + ret = security_bpf_prog(prog); + if (ret < 0) + return ret; + return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog, O_RDWR | O_CLOEXEC); } @@ -1051,10 +1131,14 @@ static int bpf_prog_load(union bpf_attr *attr) if (!prog) return -ENOMEM; - err = bpf_prog_charge_memlock(prog); + err = security_bpf_prog_alloc(prog->aux); if (err) goto free_prog_nouncharge; + err = bpf_prog_charge_memlock(prog); + if (err) + goto free_prog_sec; + prog->len = attr->insn_cnt; err = -EFAULT; @@ -1112,16 +1196,18 @@ static int bpf_prog_load(union bpf_attr *attr) free_used_maps(prog->aux); free_prog: bpf_prog_uncharge_memlock(prog); +free_prog_sec: + security_bpf_prog_free(prog->aux); free_prog_nouncharge: bpf_prog_free(prog); return err; } -#define BPF_OBJ_LAST_FIELD bpf_fd +#define BPF_OBJ_LAST_FIELD file_flags static int bpf_obj_pin(const union bpf_attr *attr) { - if (CHECK_ATTR(BPF_OBJ)) + if (CHECK_ATTR(BPF_OBJ) || attr->file_flags != 0) return -EINVAL; return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname)); @@ -1129,10 +1215,12 @@ static int bpf_obj_pin(const union bpf_attr *attr) static int bpf_obj_get(const union bpf_attr *attr) { - if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0) + if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0 || + attr->file_flags & ~BPF_OBJ_FLAG_MASK) return -EINVAL; - return bpf_obj_get_user(u64_to_user_ptr(attr->pathname)); + return bpf_obj_get_user(u64_to_user_ptr(attr->pathname), + attr->file_flags); } #ifdef CONFIG_CGROUP_BPF @@ -1392,20 +1480,26 @@ static int bpf_prog_get_fd_by_id(const union bpf_attr *attr) return fd; } -#define BPF_MAP_GET_FD_BY_ID_LAST_FIELD map_id +#define BPF_MAP_GET_FD_BY_ID_LAST_FIELD open_flags static int bpf_map_get_fd_by_id(const union bpf_attr *attr) { struct bpf_map *map; u32 id = attr->map_id; + int f_flags; int fd; - if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID)) + if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID) || + attr->open_flags & ~BPF_OBJ_FLAG_MASK) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EPERM; + f_flags = bpf_get_file_flag(attr->open_flags); + if (f_flags < 0) + return f_flags; + spin_lock_bh(&map_idr_lock); map = idr_find(&map_idr, id); if (map) @@ -1417,7 +1511,7 @@ static int bpf_map_get_fd_by_id(const union bpf_attr *attr) if (IS_ERR(map)) return PTR_ERR(map); - fd = bpf_map_new_fd(map); + fd = bpf_map_new_fd(map, f_flags); if (fd < 0) bpf_map_put(map); @@ -1572,6 +1666,10 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz if (copy_from_user(&attr, uattr, size) != 0) return -EFAULT; + err = security_bpf(cmd, &attr, size); + if (err < 0) + return err; + switch (cmd) { case BPF_MAP_CREATE: err = map_create(&attr); diff --git a/net/netfilter/xt_bpf.c b/net/netfilter/xt_bpf.c index 29123934887bbfe5081178f9ce2425c5bb618a9c..041da0d9c06f2b1c2ecb31851932ac5a350122a9 100644 --- a/net/netfilter/xt_bpf.c +++ b/net/netfilter/xt_bpf.c @@ -56,7 +56,7 @@ static int __bpf_mt_check_path(const char *path, struct bpf_prog **ret) int retval, fd; set_fs(KERNEL_DS); - fd = bpf_obj_get_user(path); + fd = bpf_obj_get_user(path, 0); set_fs(oldfs); if (fd < 0) return fd; diff --git a/security/security.c b/security/security.c index 4bf0f571b4ef94df1d3c44b7fed6b7b651c1924f..1cd8526cb0b775647fe567a6910cc7b811cff21a 100644 --- a/security/security.c +++ b/security/security.c @@ -12,6 +12,7 @@ * (at your option) any later version. */ +#include #include #include #include @@ -1703,3 +1704,34 @@ int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule, actx); } #endif /* CONFIG_AUDIT */ + +#ifdef CONFIG_BPF_SYSCALL +int security_bpf(int cmd, union bpf_attr *attr, unsigned int size) +{ + return call_int_hook(bpf, 0, cmd, attr, size); +} +int security_bpf_map(struct bpf_map *map, fmode_t fmode) +{ + return call_int_hook(bpf_map, 0, map, fmode); +} +int security_bpf_prog(struct bpf_prog *prog) +{ + return call_int_hook(bpf_prog, 0, prog); +} +int security_bpf_map_alloc(struct bpf_map *map) +{ + return call_int_hook(bpf_map_alloc_security, 0, map); +} +int security_bpf_prog_alloc(struct bpf_prog_aux *aux) +{ + return call_int_hook(bpf_prog_alloc_security, 0, aux); +} +void security_bpf_map_free(struct bpf_map *map) +{ + call_void_hook(bpf_map_free_security, map); +} +void security_bpf_prog_free(struct bpf_prog_aux *aux) +{ + call_void_hook(bpf_prog_free_security, aux); +} +#endif /* CONFIG_BPF_SYSCALL */ diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index f5d304736852f98508144c23e7325068c6ae2ee8..2e3a627fc0b1f034a68469ccab6232cae6e41f5a 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -85,6 +85,7 @@ #include #include #include +#include #include "avc.h" #include "objsec.h" @@ -1814,6 +1815,10 @@ static inline int file_path_has_perm(const struct cred *cred, return inode_has_perm(cred, file_inode(file), av, &ad); } +#ifdef CONFIG_BPF_SYSCALL +static int bpf_fd_pass(struct file *file, u32 sid); +#endif + /* Check whether a task can use an open file descriptor to access an inode in a given way. Check access to the descriptor itself, and then use dentry_has_perm to @@ -1844,6 +1849,12 @@ static int file_has_perm(const struct cred *cred, goto out; } +#ifdef CONFIG_BPF_SYSCALL + rc = bpf_fd_pass(file, cred_sid(cred)); + if (rc) + return rc; +#endif + /* av is zero if only checking access to the descriptor. */ rc = 0; if (av) @@ -2164,6 +2175,12 @@ static int selinux_binder_transfer_file(struct task_struct *from, return rc; } +#ifdef CONFIG_BPF_SYSCALL + rc = bpf_fd_pass(file, sid); + if (rc) + return rc; +#endif + if (unlikely(IS_PRIVATE(d_backing_inode(dentry)))) return 0; @@ -6252,6 +6269,139 @@ static void selinux_ib_free_security(void *ib_sec) } #endif +#ifdef CONFIG_BPF_SYSCALL +static int selinux_bpf(int cmd, union bpf_attr *attr, + unsigned int size) +{ + u32 sid = current_sid(); + int ret; + + switch (cmd) { + case BPF_MAP_CREATE: + ret = avc_has_perm(sid, sid, SECCLASS_BPF, BPF__MAP_CREATE, + NULL); + break; + case BPF_PROG_LOAD: + ret = avc_has_perm(sid, sid, SECCLASS_BPF, BPF__PROG_LOAD, + NULL); + break; + default: + ret = 0; + break; + } + + return ret; +} + +static u32 bpf_map_fmode_to_av(fmode_t fmode) +{ + u32 av = 0; + + if (fmode & FMODE_READ) + av |= BPF__MAP_READ; + if (fmode & FMODE_WRITE) + av |= BPF__MAP_WRITE; + return av; +} + +/* This function will check the file pass through unix socket or binder to see + * if it is a bpf related object. And apply correspinding checks on the bpf + * object based on the type. The bpf maps and programs, not like other files and + * socket, are using a shared anonymous inode inside the kernel as their inode. + * So checking that inode cannot identify if the process have privilege to + * access the bpf object and that's why we have to add this additional check in + * selinux_file_receive and selinux_binder_transfer_files. + */ +static int bpf_fd_pass(struct file *file, u32 sid) +{ + struct bpf_security_struct *bpfsec; + struct bpf_prog *prog; + struct bpf_map *map; + int ret; + + if (file->f_op == &bpf_map_fops) { + map = file->private_data; + bpfsec = map->security; + ret = avc_has_perm(sid, bpfsec->sid, SECCLASS_BPF, + bpf_map_fmode_to_av(file->f_mode), NULL); + if (ret) + return ret; + } else if (file->f_op == &bpf_prog_fops) { + prog = file->private_data; + bpfsec = prog->aux->security; + ret = avc_has_perm(sid, bpfsec->sid, SECCLASS_BPF, + BPF__PROG_RUN, NULL); + if (ret) + return ret; + } + return 0; +} + +static int selinux_bpf_map(struct bpf_map *map, fmode_t fmode) +{ + u32 sid = current_sid(); + struct bpf_security_struct *bpfsec; + + bpfsec = map->security; + return avc_has_perm(sid, bpfsec->sid, SECCLASS_BPF, + bpf_map_fmode_to_av(fmode), NULL); +} + +static int selinux_bpf_prog(struct bpf_prog *prog) +{ + u32 sid = current_sid(); + struct bpf_security_struct *bpfsec; + + bpfsec = prog->aux->security; + return avc_has_perm(sid, bpfsec->sid, SECCLASS_BPF, + BPF__PROG_RUN, NULL); +} + +static int selinux_bpf_map_alloc(struct bpf_map *map) +{ + struct bpf_security_struct *bpfsec; + + bpfsec = kzalloc(sizeof(*bpfsec), GFP_KERNEL); + if (!bpfsec) + return -ENOMEM; + + bpfsec->sid = current_sid(); + map->security = bpfsec; + + return 0; +} + +static void selinux_bpf_map_free(struct bpf_map *map) +{ + struct bpf_security_struct *bpfsec = map->security; + + map->security = NULL; + kfree(bpfsec); +} + +static int selinux_bpf_prog_alloc(struct bpf_prog_aux *aux) +{ + struct bpf_security_struct *bpfsec; + + bpfsec = kzalloc(sizeof(*bpfsec), GFP_KERNEL); + if (!bpfsec) + return -ENOMEM; + + bpfsec->sid = current_sid(); + aux->security = bpfsec; + + return 0; +} + +static void selinux_bpf_prog_free(struct bpf_prog_aux *aux) +{ + struct bpf_security_struct *bpfsec = aux->security; + + aux->security = NULL; + kfree(bpfsec); +} +#endif + static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = { LSM_HOOK_INIT(binder_set_context_mgr, selinux_binder_set_context_mgr), LSM_HOOK_INIT(binder_transaction, selinux_binder_transaction), @@ -6471,6 +6621,16 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = { LSM_HOOK_INIT(audit_rule_match, selinux_audit_rule_match), LSM_HOOK_INIT(audit_rule_free, selinux_audit_rule_free), #endif + +#ifdef CONFIG_BPF_SYSCALL + LSM_HOOK_INIT(bpf, selinux_bpf), + LSM_HOOK_INIT(bpf_map, selinux_bpf_map), + LSM_HOOK_INIT(bpf_prog, selinux_bpf_prog), + LSM_HOOK_INIT(bpf_map_alloc_security, selinux_bpf_map_alloc), + LSM_HOOK_INIT(bpf_prog_alloc_security, selinux_bpf_prog_alloc), + LSM_HOOK_INIT(bpf_map_free_security, selinux_bpf_map_free), + LSM_HOOK_INIT(bpf_prog_free_security, selinux_bpf_prog_free), +#endif }; static __init int selinux_init(void) diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h index 35ffb29a69cb00b34a4de5d722eaee46df310a74..0a7023b5f000ea088a291ffea1a2057bd5cf7225 100644 --- a/security/selinux/include/classmap.h +++ b/security/selinux/include/classmap.h @@ -237,6 +237,8 @@ struct security_class_mapping secclass_map[] = { { "access", NULL } }, { "infiniband_endport", { "manage_subnet", NULL } }, + { "bpf", + {"map_create", "map_read", "map_write", "prog_load", "prog_run"} }, { NULL } }; diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h index 1649cd18eb0bed125bb38466506309d26c647f90..3d54468ce3342851169396813c48fc3a0428a813 100644 --- a/security/selinux/include/objsec.h +++ b/security/selinux/include/objsec.h @@ -150,6 +150,10 @@ struct pkey_security_struct { u32 sid; /* SID of pkey */ }; +struct bpf_security_struct { + u32 sid; /*SID of bpf obj creater*/ +}; + extern unsigned int selinux_checkreqprot; #endif /* _SELINUX_OBJSEC_H_ */ diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index fe3a443a110228efb99bc893d2f0820cd03e6763..896f23cfe9185c0eb35f8d7262a7d95e55cd963c 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c @@ -1033,6 +1033,51 @@ static void test_map_parallel(void) assert(bpf_map_get_next_key(fd, &key, &key) == -1 && errno == ENOENT); } +static void test_map_rdonly(void) +{ + int i, fd, key = 0, value = 0; + + fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), + MAP_SIZE, map_flags | BPF_F_RDONLY); + if (fd < 0) { + printf("Failed to create map for read only test '%s'!\n", + strerror(errno)); + exit(1); + } + + key = 1; + value = 1234; + /* Insert key=1 element. */ + assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == -1 && + errno == EPERM); + + /* Check that key=2 is not found. */ + assert(bpf_map_lookup_elem(fd, &key, &value) == -1 && errno == ENOENT); + assert(bpf_map_get_next_key(fd, &key, &value) == -1 && errno == ENOENT); +} + +static void test_map_wronly(void) +{ + int i, fd, key = 0, value = 0; + + fd = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(key), sizeof(value), + MAP_SIZE, map_flags | BPF_F_WRONLY); + if (fd < 0) { + printf("Failed to create map for read only test '%s'!\n", + strerror(errno)); + exit(1); + } + + key = 1; + value = 1234; + /* Insert key=1 element. */ + assert(bpf_map_update_elem(fd, &key, &value, BPF_ANY) == 0) + + /* Check that key=2 is not found. */ + assert(bpf_map_lookup_elem(fd, &key, &value) == -1 && errno == EPERM); + assert(bpf_map_get_next_key(fd, &key, &value) == -1 && errno == EPERM); +} + static void run_all_tests(void) { test_hashmap(0, NULL); @@ -1050,6 +1095,9 @@ static void run_all_tests(void) test_map_large(); test_map_parallel(); test_map_stress(); + + test_map_rdonly(); + test_map_wronly(); } int main(void)