inode.c 10.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Minimal file system backend for holding eBPF maps and programs,
 * used by bpf(2) object pinning.
 *
 * Authors:
 *
 *	Daniel Borkmann <daniel@iogearbox.net>
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * version 2 as published by the Free Software Foundation.
 */

14
#include <linux/init.h>
15 16 17 18 19 20
#include <linux/magic.h>
#include <linux/major.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/fs.h>
#include <linux/kdev_t.h>
21
#include <linux/parser.h>
22 23
#include <linux/filter.h>
#include <linux/bpf.h>
24
#include <linux/bpf_trace.h>
25 26 27 28 29 30 31 32 33 34 35

enum bpf_type {
	BPF_TYPE_UNSPEC	= 0,
	BPF_TYPE_PROG,
	BPF_TYPE_MAP,
};

static void *bpf_any_get(void *raw, enum bpf_type type)
{
	switch (type) {
	case BPF_TYPE_PROG:
A
Alexei Starovoitov 已提交
36
		raw = bpf_prog_inc(raw);
37 38
		break;
	case BPF_TYPE_MAP:
A
Alexei Starovoitov 已提交
39
		raw = bpf_map_inc(raw, true);
40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
		break;
	default:
		WARN_ON_ONCE(1);
		break;
	}

	return raw;
}

static void bpf_any_put(void *raw, enum bpf_type type)
{
	switch (type) {
	case BPF_TYPE_PROG:
		bpf_prog_put(raw);
		break;
	case BPF_TYPE_MAP:
56
		bpf_map_put_with_uref(raw);
57 58 59 60 61 62 63 64 65 66 67 68
		break;
	default:
		WARN_ON_ONCE(1);
		break;
	}
}

static void *bpf_fd_probe_obj(u32 ufd, enum bpf_type *type)
{
	void *raw;

	*type = BPF_TYPE_MAP;
69
	raw = bpf_map_get_with_uref(ufd);
70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
	if (IS_ERR(raw)) {
		*type = BPF_TYPE_PROG;
		raw = bpf_prog_get(ufd);
	}

	return raw;
}

static const struct inode_operations bpf_dir_iops;

static const struct inode_operations bpf_prog_iops = { };
static const struct inode_operations bpf_map_iops  = { };

static struct inode *bpf_get_inode(struct super_block *sb,
				   const struct inode *dir,
				   umode_t mode)
{
	struct inode *inode;

	switch (mode & S_IFMT) {
	case S_IFDIR:
	case S_IFREG:
92
	case S_IFLNK:
93 94 95 96 97 98 99 100 101 102
		break;
	default:
		return ERR_PTR(-EINVAL);
	}

	inode = new_inode(sb);
	if (!inode)
		return ERR_PTR(-ENOSPC);

	inode->i_ino = get_next_ino();
103
	inode->i_atime = current_time(inode);
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
	inode->i_mtime = inode->i_atime;
	inode->i_ctime = inode->i_atime;

	inode_init_owner(inode, dir, mode);

	return inode;
}

static int bpf_inode_type(const struct inode *inode, enum bpf_type *type)
{
	*type = BPF_TYPE_UNSPEC;
	if (inode->i_op == &bpf_prog_iops)
		*type = BPF_TYPE_PROG;
	else if (inode->i_op == &bpf_map_iops)
		*type = BPF_TYPE_MAP;
	else
		return -EACCES;

	return 0;
}

125 126 127 128 129 130 131 132 133 134
static void bpf_dentry_finalize(struct dentry *dentry, struct inode *inode,
				struct inode *dir)
{
	d_instantiate(dentry, inode);
	dget(dentry);

	dir->i_mtime = current_time(dir);
	dir->i_ctime = dir->i_mtime;
}

135 136 137 138 139 140 141 142 143 144 145 146 147 148
static int bpf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
	struct inode *inode;

	inode = bpf_get_inode(dir->i_sb, dir, mode | S_IFDIR);
	if (IS_ERR(inode))
		return PTR_ERR(inode);

	inode->i_op = &bpf_dir_iops;
	inode->i_fop = &simple_dir_operations;

	inc_nlink(inode);
	inc_nlink(dir);

149
	bpf_dentry_finalize(dentry, inode, dir);
150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
	return 0;
}

static int bpf_mkobj_ops(struct inode *dir, struct dentry *dentry,
			 umode_t mode, const struct inode_operations *iops)
{
	struct inode *inode;

	inode = bpf_get_inode(dir->i_sb, dir, mode | S_IFREG);
	if (IS_ERR(inode))
		return PTR_ERR(inode);

	inode->i_op = iops;
	inode->i_private = dentry->d_fsdata;

165
	bpf_dentry_finalize(dentry, inode, dir);
166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
	return 0;
}

static int bpf_mkobj(struct inode *dir, struct dentry *dentry, umode_t mode,
		     dev_t devt)
{
	enum bpf_type type = MINOR(devt);

	if (MAJOR(devt) != UNNAMED_MAJOR || !S_ISREG(mode) ||
	    dentry->d_fsdata == NULL)
		return -EPERM;

	switch (type) {
	case BPF_TYPE_PROG:
		return bpf_mkobj_ops(dir, dentry, mode, &bpf_prog_iops);
	case BPF_TYPE_MAP:
		return bpf_mkobj_ops(dir, dentry, mode, &bpf_map_iops);
	default:
		return -EPERM;
	}
}

188 189
static struct dentry *
bpf_lookup(struct inode *dir, struct dentry *dentry, unsigned flags)
190
{
191 192
	if (strchr(dentry->d_name.name, '.'))
		return ERR_PTR(-EPERM);
193

194
	return simple_lookup(dir, dentry, flags);
195 196
}

197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
static int bpf_symlink(struct inode *dir, struct dentry *dentry,
		       const char *target)
{
	char *link = kstrdup(target, GFP_USER | __GFP_NOWARN);
	struct inode *inode;

	if (!link)
		return -ENOMEM;

	inode = bpf_get_inode(dir->i_sb, dir, S_IRWXUGO | S_IFLNK);
	if (IS_ERR(inode)) {
		kfree(link);
		return PTR_ERR(inode);
	}

	inode->i_op = &simple_symlink_inode_operations;
	inode->i_link = link;

	bpf_dentry_finalize(dentry, inode, dir);
	return 0;
}

219
static const struct inode_operations bpf_dir_iops = {
220
	.lookup		= bpf_lookup,
221 222
	.mknod		= bpf_mkobj,
	.mkdir		= bpf_mkdir,
223
	.symlink	= bpf_symlink,
224
	.rmdir		= simple_rmdir,
225 226
	.rename		= simple_rename,
	.link		= simple_link,
227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
	.unlink		= simple_unlink,
};

static int bpf_obj_do_pin(const struct filename *pathname, void *raw,
			  enum bpf_type type)
{
	struct dentry *dentry;
	struct inode *dir;
	struct path path;
	umode_t mode;
	dev_t devt;
	int ret;

	dentry = kern_path_create(AT_FDCWD, pathname->name, &path, 0);
	if (IS_ERR(dentry))
		return PTR_ERR(dentry);

	mode = S_IFREG | ((S_IRUSR | S_IWUSR) & ~current_umask());
	devt = MKDEV(UNNAMED_MAJOR, type);

	ret = security_path_mknod(&path, dentry, mode, devt);
	if (ret)
		goto out;

	dir = d_inode(path.dentry);
	if (dir->i_op != &bpf_dir_iops) {
		ret = -EPERM;
		goto out;
	}

	dentry->d_fsdata = raw;
	ret = vfs_mknod(dir, dentry, mode, devt);
	dentry->d_fsdata = NULL;
out:
	done_path_create(&path, dentry);
	return ret;
}

int bpf_obj_pin_user(u32 ufd, const char __user *pathname)
{
	struct filename *pname;
	enum bpf_type type;
	void *raw;
	int ret;

	pname = getname(pathname);
	if (IS_ERR(pname))
		return PTR_ERR(pname);

	raw = bpf_fd_probe_obj(ufd, &type);
	if (IS_ERR(raw)) {
		ret = PTR_ERR(raw);
		goto out;
	}

	ret = bpf_obj_do_pin(pname, raw, type);
	if (ret != 0)
		bpf_any_put(raw, type);
285 286 287 288 289 290 291
	if ((trace_bpf_obj_pin_prog_enabled() ||
	     trace_bpf_obj_pin_map_enabled()) && !ret) {
		if (type == BPF_TYPE_PROG)
			trace_bpf_obj_pin_prog(raw, ufd, pname);
		if (type == BPF_TYPE_MAP)
			trace_bpf_obj_pin_map(raw, ufd, pname);
	}
292 293 294 295 296 297
out:
	putname(pname);
	return ret;
}

static void *bpf_obj_do_get(const struct filename *pathname,
298
			    enum bpf_type *type, int flags)
299 300 301 302 303 304 305 306 307 308 309
{
	struct inode *inode;
	struct path path;
	void *raw;
	int ret;

	ret = kern_path(pathname->name, LOOKUP_FOLLOW, &path);
	if (ret)
		return ERR_PTR(ret);

	inode = d_backing_inode(path.dentry);
310
	ret = inode_permission(inode, ACC_MODE(flags));
311 312 313 314 315 316 317 318
	if (ret)
		goto out;

	ret = bpf_inode_type(inode, type);
	if (ret)
		goto out;

	raw = bpf_any_get(inode->i_private, *type);
A
Alexei Starovoitov 已提交
319 320
	if (!IS_ERR(raw))
		touch_atime(&path);
321 322 323 324 325 326 327 328

	path_put(&path);
	return raw;
out:
	path_put(&path);
	return ERR_PTR(ret);
}

329
int bpf_obj_get_user(const char __user *pathname, int flags)
330 331 332 333
{
	enum bpf_type type = BPF_TYPE_UNSPEC;
	struct filename *pname;
	int ret = -ENOENT;
334
	int f_flags;
335 336
	void *raw;

337 338 339 340
	f_flags = bpf_get_file_flag(flags);
	if (f_flags < 0)
		return f_flags;

341 342 343 344
	pname = getname(pathname);
	if (IS_ERR(pname))
		return PTR_ERR(pname);

345
	raw = bpf_obj_do_get(pname, &type, f_flags);
346 347 348 349 350 351 352 353
	if (IS_ERR(raw)) {
		ret = PTR_ERR(raw);
		goto out;
	}

	if (type == BPF_TYPE_PROG)
		ret = bpf_prog_new_fd(raw);
	else if (type == BPF_TYPE_MAP)
354
		ret = bpf_map_new_fd(raw, f_flags);
355 356 357
	else
		goto out;

358
	if (ret < 0) {
359
		bpf_any_put(raw, type);
360 361 362 363 364 365 366
	} else if (trace_bpf_obj_get_prog_enabled() ||
		   trace_bpf_obj_get_map_enabled()) {
		if (type == BPF_TYPE_PROG)
			trace_bpf_obj_get_prog(raw, ret, pname);
		if (type == BPF_TYPE_MAP)
			trace_bpf_obj_get_map(raw, ret, pname);
	}
367 368 369 370
out:
	putname(pname);
	return ret;
}
371
EXPORT_SYMBOL_GPL(bpf_obj_get_user);
372 373 374 375 376 377 378 379

static void bpf_evict_inode(struct inode *inode)
{
	enum bpf_type type;

	truncate_inode_pages_final(&inode->i_data);
	clear_inode(inode);

380 381
	if (S_ISLNK(inode->i_mode))
		kfree(inode->i_link);
382 383 384 385
	if (!bpf_inode_type(inode, &type))
		bpf_any_put(inode->i_private, type);
}

D
David Howells 已提交
386 387 388 389 390 391 392 393 394 395 396 397
/*
 * Display the mount options in /proc/mounts.
 */
static int bpf_show_options(struct seq_file *m, struct dentry *root)
{
	umode_t mode = d_inode(root)->i_mode & S_IALLUGO & ~S_ISVTX;

	if (mode != S_IRWXUGO)
		seq_printf(m, ",mode=%o", mode);
	return 0;
}

398 399 400
static const struct super_operations bpf_super_ops = {
	.statfs		= simple_statfs,
	.drop_inode	= generic_delete_inode,
D
David Howells 已提交
401
	.show_options	= bpf_show_options,
402 403 404
	.evict_inode	= bpf_evict_inode,
};

405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447
enum {
	OPT_MODE,
	OPT_ERR,
};

static const match_table_t bpf_mount_tokens = {
	{ OPT_MODE, "mode=%o" },
	{ OPT_ERR, NULL },
};

struct bpf_mount_opts {
	umode_t mode;
};

static int bpf_parse_options(char *data, struct bpf_mount_opts *opts)
{
	substring_t args[MAX_OPT_ARGS];
	int option, token;
	char *ptr;

	opts->mode = S_IRWXUGO;

	while ((ptr = strsep(&data, ",")) != NULL) {
		if (!*ptr)
			continue;

		token = match_token(ptr, bpf_mount_tokens, args);
		switch (token) {
		case OPT_MODE:
			if (match_octal(&args[0], &option))
				return -EINVAL;
			opts->mode = option & S_IALLUGO;
			break;
		/* We might like to report bad mount options here, but
		 * traditionally we've ignored all mount options, so we'd
		 * better continue to ignore non-existing options for bpf.
		 */
		}
	}

	return 0;
}

448 449
static int bpf_fill_super(struct super_block *sb, void *data, int silent)
{
450
	static const struct tree_descr bpf_rfiles[] = { { "" } };
451
	struct bpf_mount_opts opts;
452 453 454
	struct inode *inode;
	int ret;

455 456 457 458
	ret = bpf_parse_options(data, &opts);
	if (ret)
		return ret;

459 460 461 462 463 464 465 466 467
	ret = simple_fill_super(sb, BPF_FS_MAGIC, bpf_rfiles);
	if (ret)
		return ret;

	sb->s_op = &bpf_super_ops;

	inode = sb->s_root->d_inode;
	inode->i_op = &bpf_dir_iops;
	inode->i_mode &= ~S_IALLUGO;
468
	inode->i_mode |= S_ISVTX | opts.mode;
469 470 471 472 473 474 475

	return 0;
}

static struct dentry *bpf_mount(struct file_system_type *type, int flags,
				const char *dev_name, void *data)
{
476
	return mount_nodev(type, flags, data, bpf_fill_super);
477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500
}

static struct file_system_type bpf_fs_type = {
	.owner		= THIS_MODULE,
	.name		= "bpf",
	.mount		= bpf_mount,
	.kill_sb	= kill_litter_super,
};

static int __init bpf_init(void)
{
	int ret;

	ret = sysfs_create_mount_point(fs_kobj, "bpf");
	if (ret)
		return ret;

	ret = register_filesystem(&bpf_fs_type);
	if (ret)
		sysfs_remove_mount_point(fs_kobj, "bpf");

	return ret;
}
fs_initcall(bpf_init);