device_cgroup.c 11.5 KB
Newer Older
1
/*
L
Lai Jiangshan 已提交
2
 * device_cgroup.c - device cgroup subsystem
3 4 5 6 7 8 9 10 11
 *
 * Copyright 2007 IBM Corp
 */

#include <linux/device_cgroup.h>
#include <linux/cgroup.h>
#include <linux/ctype.h>
#include <linux/list.h>
#include <linux/uaccess.h>
12
#include <linux/seq_file.h>
13
#include <linux/slab.h>
L
Lai Jiangshan 已提交
14
#include <linux/rcupdate.h>
L
Li Zefan 已提交
15
#include <linux/mutex.h>
16 17 18 19 20 21 22 23 24 25

#define ACC_MKNOD 1
#define ACC_READ  2
#define ACC_WRITE 4
#define ACC_MASK (ACC_MKNOD | ACC_READ | ACC_WRITE)

#define DEV_BLOCK 1
#define DEV_CHAR  2
#define DEV_ALL   4  /* this represents all devices */

L
Li Zefan 已提交
26 27
static DEFINE_MUTEX(devcgroup_mutex);

28 29
/*
 * whitelist locking rules:
L
Li Zefan 已提交
30
 * hold devcgroup_mutex for update/read.
L
Lai Jiangshan 已提交
31
 * hold rcu_read_lock() for read.
32 33 34 35 36 37 38
 */

struct dev_whitelist_item {
	u32 major, minor;
	short type;
	short access;
	struct list_head list;
39
	struct rcu_head rcu;
40 41 42 43 44 45 46
};

struct dev_cgroup {
	struct cgroup_subsys_state css;
	struct list_head whitelist;
};

47 48 49 50 51
static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s)
{
	return container_of(s, struct dev_cgroup, css);
}

52 53
static inline struct dev_cgroup *cgroup_to_devcgroup(struct cgroup *cgroup)
{
54
	return css_to_devcgroup(cgroup_subsys_state(cgroup, devices_subsys_id));
55 56
}

57 58 59 60 61
static inline struct dev_cgroup *task_devcgroup(struct task_struct *task)
{
	return css_to_devcgroup(task_subsys_state(task, devices_subsys_id));
}

62 63 64
struct cgroup_subsys devices_subsys;

static int devcgroup_can_attach(struct cgroup_subsys *ss,
65
			struct cgroup *new_cgrp, struct cgroup_taskset *set)
66
{
67
	struct task_struct *task = cgroup_taskset_first(set);
68

69 70
	if (current != task && !capable(CAP_SYS_ADMIN))
		return -EPERM;
71 72 73 74
	return 0;
}

/*
L
Li Zefan 已提交
75
 * called under devcgroup_mutex
76 77 78 79 80 81
 */
static int dev_whitelist_copy(struct list_head *dest, struct list_head *orig)
{
	struct dev_whitelist_item *wh, *tmp, *new;

	list_for_each_entry(wh, orig, list) {
L
Li Zefan 已提交
82
		new = kmemdup(wh, sizeof(*wh), GFP_KERNEL);
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
		if (!new)
			goto free_and_exit;
		list_add_tail(&new->list, dest);
	}

	return 0;

free_and_exit:
	list_for_each_entry_safe(wh, tmp, dest, list) {
		list_del(&wh->list);
		kfree(wh);
	}
	return -ENOMEM;
}

/* Stupid prototype - don't bother combining existing entries */
/*
L
Li Zefan 已提交
100
 * called under devcgroup_mutex
101 102 103 104
 */
static int dev_whitelist_add(struct dev_cgroup *dev_cgroup,
			struct dev_whitelist_item *wh)
{
105
	struct dev_whitelist_item *whcopy, *walk;
106

L
Li Zefan 已提交
107
	whcopy = kmemdup(wh, sizeof(*wh), GFP_KERNEL);
108 109 110
	if (!whcopy)
		return -ENOMEM;

111 112 113 114 115 116 117 118 119 120 121 122 123 124
	list_for_each_entry(walk, &dev_cgroup->whitelist, list) {
		if (walk->type != wh->type)
			continue;
		if (walk->major != wh->major)
			continue;
		if (walk->minor != wh->minor)
			continue;

		walk->access |= wh->access;
		kfree(whcopy);
		whcopy = NULL;
	}

	if (whcopy != NULL)
125
		list_add_tail_rcu(&whcopy->list, &dev_cgroup->whitelist);
126 127 128 129
	return 0;
}

/*
L
Li Zefan 已提交
130
 * called under devcgroup_mutex
131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149
 */
static void dev_whitelist_rm(struct dev_cgroup *dev_cgroup,
			struct dev_whitelist_item *wh)
{
	struct dev_whitelist_item *walk, *tmp;

	list_for_each_entry_safe(walk, tmp, &dev_cgroup->whitelist, list) {
		if (walk->type == DEV_ALL)
			goto remove;
		if (walk->type != wh->type)
			continue;
		if (walk->major != ~0 && walk->major != wh->major)
			continue;
		if (walk->minor != ~0 && walk->minor != wh->minor)
			continue;

remove:
		walk->access &= ~wh->access;
		if (!walk->access) {
150
			list_del_rcu(&walk->list);
151
			kfree_rcu(walk, rcu);
152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
		}
	}
}

/*
 * called from kernel/cgroup.c with cgroup_lock() held.
 */
static struct cgroup_subsys_state *devcgroup_create(struct cgroup_subsys *ss,
						struct cgroup *cgroup)
{
	struct dev_cgroup *dev_cgroup, *parent_dev_cgroup;
	struct cgroup *parent_cgroup;
	int ret;

	dev_cgroup = kzalloc(sizeof(*dev_cgroup), GFP_KERNEL);
	if (!dev_cgroup)
		return ERR_PTR(-ENOMEM);
	INIT_LIST_HEAD(&dev_cgroup->whitelist);
	parent_cgroup = cgroup->parent;

	if (parent_cgroup == NULL) {
		struct dev_whitelist_item *wh;
		wh = kmalloc(sizeof(*wh), GFP_KERNEL);
		if (!wh) {
			kfree(dev_cgroup);
			return ERR_PTR(-ENOMEM);
		}
		wh->minor = wh->major = ~0;
		wh->type = DEV_ALL;
L
Li Zefan 已提交
181
		wh->access = ACC_MASK;
182 183 184
		list_add(&wh->list, &dev_cgroup->whitelist);
	} else {
		parent_dev_cgroup = cgroup_to_devcgroup(parent_cgroup);
L
Li Zefan 已提交
185
		mutex_lock(&devcgroup_mutex);
186 187
		ret = dev_whitelist_copy(&dev_cgroup->whitelist,
				&parent_dev_cgroup->whitelist);
L
Li Zefan 已提交
188
		mutex_unlock(&devcgroup_mutex);
189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
		if (ret) {
			kfree(dev_cgroup);
			return ERR_PTR(ret);
		}
	}

	return &dev_cgroup->css;
}

static void devcgroup_destroy(struct cgroup_subsys *ss,
			struct cgroup *cgroup)
{
	struct dev_cgroup *dev_cgroup;
	struct dev_whitelist_item *wh, *tmp;

	dev_cgroup = cgroup_to_devcgroup(cgroup);
	list_for_each_entry_safe(wh, tmp, &dev_cgroup->whitelist, list) {
		list_del(&wh->list);
		kfree(wh);
	}
	kfree(dev_cgroup);
}

#define DEVCG_ALLOW 1
#define DEVCG_DENY 2
214 215
#define DEVCG_LIST 3

216
#define MAJMINLEN 13
217
#define ACCLEN 4
218 219 220 221

static void set_access(char *acc, short access)
{
	int idx = 0;
222
	memset(acc, 0, ACCLEN);
223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
	if (access & ACC_READ)
		acc[idx++] = 'r';
	if (access & ACC_WRITE)
		acc[idx++] = 'w';
	if (access & ACC_MKNOD)
		acc[idx++] = 'm';
}

static char type_to_char(short type)
{
	if (type == DEV_ALL)
		return 'a';
	if (type == DEV_CHAR)
		return 'c';
	if (type == DEV_BLOCK)
		return 'b';
	return 'X';
}

242
static void set_majmin(char *str, unsigned m)
243 244
{
	if (m == ~0)
L
Li Zefan 已提交
245
		strcpy(str, "*");
246
	else
L
Li Zefan 已提交
247
		sprintf(str, "%u", m);
248 249
}

250 251
static int devcgroup_seq_read(struct cgroup *cgroup, struct cftype *cft,
				struct seq_file *m)
252
{
253
	struct dev_cgroup *devcgroup = cgroup_to_devcgroup(cgroup);
254
	struct dev_whitelist_item *wh;
255
	char maj[MAJMINLEN], min[MAJMINLEN], acc[ACCLEN];
256

257 258
	rcu_read_lock();
	list_for_each_entry_rcu(wh, &devcgroup->whitelist, list) {
259
		set_access(acc, wh->access);
260 261 262 263
		set_majmin(maj, wh->major);
		set_majmin(min, wh->minor);
		seq_printf(m, "%c %s:%s %s\n", type_to_char(wh->type),
			   maj, min, acc);
264
	}
265
	rcu_read_unlock();
266

267
	return 0;
268 269 270 271 272 273 274
}

/*
 * may_access_whitelist:
 * does the access granted to dev_cgroup c contain the access
 * requested in whitelist item refwh.
 * return 1 if yes, 0 if no.
L
Li Zefan 已提交
275
 * call with devcgroup_mutex held
276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292
 */
static int may_access_whitelist(struct dev_cgroup *c,
				       struct dev_whitelist_item *refwh)
{
	struct dev_whitelist_item *whitem;

	list_for_each_entry(whitem, &c->whitelist, list) {
		if (whitem->type & DEV_ALL)
			return 1;
		if ((refwh->type & DEV_BLOCK) && !(whitem->type & DEV_BLOCK))
			continue;
		if ((refwh->type & DEV_CHAR) && !(whitem->type & DEV_CHAR))
			continue;
		if (whitem->major != ~0 && whitem->major != refwh->major)
			continue;
		if (whitem->minor != ~0 && whitem->minor != refwh->minor)
			continue;
293
		if (refwh->access & (~whitem->access))
294 295 296 297 298 299 300 301 302 303 304
			continue;
		return 1;
	}
	return 0;
}

/*
 * parent_has_perm:
 * when adding a new allow rule to a device whitelist, the rule
 * must be allowed in the parent device
 */
305
static int parent_has_perm(struct dev_cgroup *childcg,
306 307
				  struct dev_whitelist_item *wh)
{
308
	struct cgroup *pcg = childcg->css.cgroup->parent;
309 310 311 312 313
	struct dev_cgroup *parent;

	if (!pcg)
		return 1;
	parent = cgroup_to_devcgroup(pcg);
L
Lai Jiangshan 已提交
314
	return may_access_whitelist(parent, wh);
315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
}

/*
 * Modify the whitelist using allow/deny rules.
 * CAP_SYS_ADMIN is needed for this.  It's at least separate from CAP_MKNOD
 * so we can give a container CAP_MKNOD to let it create devices but not
 * modify the whitelist.
 * It seems likely we'll want to add a CAP_CONTAINER capability to allow
 * us to also grant CAP_SYS_ADMIN to containers without giving away the
 * device whitelist controls, but for now we'll stick with CAP_SYS_ADMIN
 *
 * Taking rules away is always allowed (given CAP_SYS_ADMIN).  Granting
 * new access is only allowed if you're in the top-level cgroup, or your
 * parent cgroup has the access you're asking for.
 */
330 331
static int devcgroup_update_access(struct dev_cgroup *devcgroup,
				   int filetype, const char *buffer)
332
{
333
	const char *b;
L
Li Zefan 已提交
334
	char *endp;
L
Li Zefan 已提交
335
	int count;
336 337 338 339 340 341 342 343 344 345 346 347
	struct dev_whitelist_item wh;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

	memset(&wh, 0, sizeof(wh));
	b = buffer;

	switch (*b) {
	case 'a':
		wh.type = DEV_ALL;
		wh.access = ACC_MASK;
348 349
		wh.major = ~0;
		wh.minor = ~0;
350 351 352 353 354 355 356 357
		goto handle;
	case 'b':
		wh.type = DEV_BLOCK;
		break;
	case 'c':
		wh.type = DEV_CHAR;
		break;
	default:
358
		return -EINVAL;
359 360
	}
	b++;
361 362
	if (!isspace(*b))
		return -EINVAL;
363 364 365 366 367
	b++;
	if (*b == '*') {
		wh.major = ~0;
		b++;
	} else if (isdigit(*b)) {
L
Li Zefan 已提交
368 369
		wh.major = simple_strtoul(b, &endp, 10);
		b = endp;
370
	} else {
371
		return -EINVAL;
372
	}
373 374
	if (*b != ':')
		return -EINVAL;
375 376 377 378 379 380 381
	b++;

	/* read minor */
	if (*b == '*') {
		wh.minor = ~0;
		b++;
	} else if (isdigit(*b)) {
L
Li Zefan 已提交
382 383
		wh.minor = simple_strtoul(b, &endp, 10);
		b = endp;
384
	} else {
385
		return -EINVAL;
386
	}
387 388
	if (!isspace(*b))
		return -EINVAL;
389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404
	for (b++, count = 0; count < 3; count++, b++) {
		switch (*b) {
		case 'r':
			wh.access |= ACC_READ;
			break;
		case 'w':
			wh.access |= ACC_WRITE;
			break;
		case 'm':
			wh.access |= ACC_MKNOD;
			break;
		case '\n':
		case '\0':
			count = 3;
			break;
		default:
405
			return -EINVAL;
406 407 408 409 410 411
		}
	}

handle:
	switch (filetype) {
	case DEVCG_ALLOW:
412 413 414
		if (!parent_has_perm(devcgroup, &wh))
			return -EPERM;
		return dev_whitelist_add(devcgroup, &wh);
415 416 417 418
	case DEVCG_DENY:
		dev_whitelist_rm(devcgroup, &wh);
		break;
	default:
419
		return -EINVAL;
420
	}
421 422
	return 0;
}
423

424 425 426 427
static int devcgroup_access_write(struct cgroup *cgrp, struct cftype *cft,
				  const char *buffer)
{
	int retval;
L
Li Zefan 已提交
428 429

	mutex_lock(&devcgroup_mutex);
430 431
	retval = devcgroup_update_access(cgroup_to_devcgroup(cgrp),
					 cft->private, buffer);
L
Li Zefan 已提交
432
	mutex_unlock(&devcgroup_mutex);
433 434 435 436 437 438
	return retval;
}

static struct cftype dev_cgroup_files[] = {
	{
		.name = "allow",
439
		.write_string  = devcgroup_access_write,
440 441 442 443
		.private = DEVCG_ALLOW,
	},
	{
		.name = "deny",
444
		.write_string = devcgroup_access_write,
445 446
		.private = DEVCG_DENY,
	},
447 448 449 450 451
	{
		.name = "list",
		.read_seq_string = devcgroup_seq_read,
		.private = DEVCG_LIST,
	},
452 453 454 455 456 457 458 459 460 461 462 463 464
};

static int devcgroup_populate(struct cgroup_subsys *ss,
				struct cgroup *cgroup)
{
	return cgroup_add_files(cgroup, ss, dev_cgroup_files,
					ARRAY_SIZE(dev_cgroup_files));
}

struct cgroup_subsys devices_subsys = {
	.name = "devices",
	.can_attach = devcgroup_can_attach,
	.create = devcgroup_create,
465
	.destroy = devcgroup_destroy,
466 467 468 469
	.populate = devcgroup_populate,
	.subsys_id = devices_subsys_id,
};

470
int __devcgroup_inode_permission(struct inode *inode, int mask)
471 472 473 474
{
	struct dev_cgroup *dev_cgroup;
	struct dev_whitelist_item *wh;

475
	rcu_read_lock();
L
Li Zefan 已提交
476 477 478

	dev_cgroup = task_devcgroup(current);

479
	list_for_each_entry_rcu(wh, &dev_cgroup->whitelist, list) {
480
		if (wh->type & DEV_ALL)
481
			goto found;
482 483 484 485 486 487 488 489
		if ((wh->type & DEV_BLOCK) && !S_ISBLK(inode->i_mode))
			continue;
		if ((wh->type & DEV_CHAR) && !S_ISCHR(inode->i_mode))
			continue;
		if (wh->major != ~0 && wh->major != imajor(inode))
			continue;
		if (wh->minor != ~0 && wh->minor != iminor(inode))
			continue;
490

491 492 493 494
		if ((mask & MAY_WRITE) && !(wh->access & ACC_WRITE))
			continue;
		if ((mask & MAY_READ) && !(wh->access & ACC_READ))
			continue;
495
found:
496
		rcu_read_unlock();
497 498
		return 0;
	}
L
Li Zefan 已提交
499

500
	rcu_read_unlock();
501 502 503 504 505 506 507 508 509

	return -EPERM;
}

int devcgroup_inode_mknod(int mode, dev_t dev)
{
	struct dev_cgroup *dev_cgroup;
	struct dev_whitelist_item *wh;

S
Serge E. Hallyn 已提交
510 511 512
	if (!S_ISBLK(mode) && !S_ISCHR(mode))
		return 0;

513
	rcu_read_lock();
L
Li Zefan 已提交
514 515 516

	dev_cgroup = task_devcgroup(current);

517
	list_for_each_entry_rcu(wh, &dev_cgroup->whitelist, list) {
518
		if (wh->type & DEV_ALL)
519
			goto found;
520 521 522 523 524 525 526 527
		if ((wh->type & DEV_BLOCK) && !S_ISBLK(mode))
			continue;
		if ((wh->type & DEV_CHAR) && !S_ISCHR(mode))
			continue;
		if (wh->major != ~0 && wh->major != MAJOR(dev))
			continue;
		if (wh->minor != ~0 && wh->minor != MINOR(dev))
			continue;
528

529 530
		if (!(wh->access & ACC_MKNOD))
			continue;
531
found:
532
		rcu_read_unlock();
533 534
		return 0;
	}
L
Li Zefan 已提交
535

536
	rcu_read_unlock();
L
Li Zefan 已提交
537

538 539
	return -EPERM;
}