device_cgroup.c 14.8 KB
Newer Older
1
/*
L
Lai Jiangshan 已提交
2
 * device_cgroup.c - device cgroup subsystem
3 4 5 6 7 8 9 10 11
 *
 * Copyright 2007 IBM Corp
 */

#include <linux/device_cgroup.h>
#include <linux/cgroup.h>
#include <linux/ctype.h>
#include <linux/list.h>
#include <linux/uaccess.h>
12
#include <linux/seq_file.h>
13
#include <linux/slab.h>
L
Lai Jiangshan 已提交
14
#include <linux/rcupdate.h>
L
Li Zefan 已提交
15
#include <linux/mutex.h>
16 17 18 19 20 21 22 23 24 25

#define ACC_MKNOD 1
#define ACC_READ  2
#define ACC_WRITE 4
#define ACC_MASK (ACC_MKNOD | ACC_READ | ACC_WRITE)

#define DEV_BLOCK 1
#define DEV_CHAR  2
#define DEV_ALL   4  /* this represents all devices */

L
Li Zefan 已提交
26 27
static DEFINE_MUTEX(devcgroup_mutex);

28
/*
29
 * exception list locking rules:
L
Li Zefan 已提交
30
 * hold devcgroup_mutex for update/read.
L
Lai Jiangshan 已提交
31
 * hold rcu_read_lock() for read.
32 33
 */

34
struct dev_exception_item {
35 36 37 38
	u32 major, minor;
	short type;
	short access;
	struct list_head list;
39
	struct rcu_head rcu;
40 41 42 43
};

struct dev_cgroup {
	struct cgroup_subsys_state css;
44
	struct list_head exceptions;
45 46 47 48
	enum {
		DEVCG_DEFAULT_ALLOW,
		DEVCG_DEFAULT_DENY,
	} behavior;
49 50
};

51 52 53 54 55
static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s)
{
	return container_of(s, struct dev_cgroup, css);
}

56 57
static inline struct dev_cgroup *cgroup_to_devcgroup(struct cgroup *cgroup)
{
58
	return css_to_devcgroup(cgroup_subsys_state(cgroup, devices_subsys_id));
59 60
}

61 62 63 64 65
static inline struct dev_cgroup *task_devcgroup(struct task_struct *task)
{
	return css_to_devcgroup(task_subsys_state(task, devices_subsys_id));
}

66 67
struct cgroup_subsys devices_subsys;

68 69
static int devcgroup_can_attach(struct cgroup *new_cgrp,
				struct cgroup_taskset *set)
70
{
71
	struct task_struct *task = cgroup_taskset_first(set);
72

73 74
	if (current != task && !capable(CAP_SYS_ADMIN))
		return -EPERM;
75 76 77 78
	return 0;
}

/*
L
Li Zefan 已提交
79
 * called under devcgroup_mutex
80
 */
81
static int dev_exceptions_copy(struct list_head *dest, struct list_head *orig)
82
{
83
	struct dev_exception_item *ex, *tmp, *new;
84

T
Tejun Heo 已提交
85 86
	lockdep_assert_held(&devcgroup_mutex);

87 88
	list_for_each_entry(ex, orig, list) {
		new = kmemdup(ex, sizeof(*ex), GFP_KERNEL);
89 90 91 92 93 94 95 96
		if (!new)
			goto free_and_exit;
		list_add_tail(&new->list, dest);
	}

	return 0;

free_and_exit:
97 98 99
	list_for_each_entry_safe(ex, tmp, dest, list) {
		list_del(&ex->list);
		kfree(ex);
100 101 102 103 104
	}
	return -ENOMEM;
}

/*
L
Li Zefan 已提交
105
 * called under devcgroup_mutex
106
 */
107 108
static int dev_exception_add(struct dev_cgroup *dev_cgroup,
			     struct dev_exception_item *ex)
109
{
110
	struct dev_exception_item *excopy, *walk;
111

T
Tejun Heo 已提交
112 113
	lockdep_assert_held(&devcgroup_mutex);

114 115
	excopy = kmemdup(ex, sizeof(*ex), GFP_KERNEL);
	if (!excopy)
116 117
		return -ENOMEM;

118 119
	list_for_each_entry(walk, &dev_cgroup->exceptions, list) {
		if (walk->type != ex->type)
120
			continue;
121
		if (walk->major != ex->major)
122
			continue;
123
		if (walk->minor != ex->minor)
124 125
			continue;

126 127 128
		walk->access |= ex->access;
		kfree(excopy);
		excopy = NULL;
129 130
	}

131 132
	if (excopy != NULL)
		list_add_tail_rcu(&excopy->list, &dev_cgroup->exceptions);
133 134 135 136
	return 0;
}

/*
L
Li Zefan 已提交
137
 * called under devcgroup_mutex
138
 */
139 140
static void dev_exception_rm(struct dev_cgroup *dev_cgroup,
			     struct dev_exception_item *ex)
141
{
142
	struct dev_exception_item *walk, *tmp;
143

T
Tejun Heo 已提交
144 145
	lockdep_assert_held(&devcgroup_mutex);

146 147
	list_for_each_entry_safe(walk, tmp, &dev_cgroup->exceptions, list) {
		if (walk->type != ex->type)
148
			continue;
149
		if (walk->major != ex->major)
150
			continue;
151
		if (walk->minor != ex->minor)
152 153
			continue;

154
		walk->access &= ~ex->access;
155
		if (!walk->access) {
156
			list_del_rcu(&walk->list);
157
			kfree_rcu(walk, rcu);
158 159 160 161
		}
	}
}

162 163 164 165 166 167 168 169 170 171
static void __dev_exception_clean(struct dev_cgroup *dev_cgroup)
{
	struct dev_exception_item *ex, *tmp;

	list_for_each_entry_safe(ex, tmp, &dev_cgroup->exceptions, list) {
		list_del_rcu(&ex->list);
		kfree_rcu(ex, rcu);
	}
}

172
/**
173 174
 * dev_exception_clean - frees all entries of the exception list
 * @dev_cgroup: dev_cgroup with the exception list to be cleaned
175 176 177
 *
 * called under devcgroup_mutex
 */
178
static void dev_exception_clean(struct dev_cgroup *dev_cgroup)
179
{
T
Tejun Heo 已提交
180 181
	lockdep_assert_held(&devcgroup_mutex);

182
	__dev_exception_clean(dev_cgroup);
183 184
}

185 186 187
/*
 * called from kernel/cgroup.c with cgroup_lock() held.
 */
188
static struct cgroup_subsys_state *devcgroup_css_alloc(struct cgroup *cgroup)
189 190 191 192 193 194 195 196
{
	struct dev_cgroup *dev_cgroup, *parent_dev_cgroup;
	struct cgroup *parent_cgroup;
	int ret;

	dev_cgroup = kzalloc(sizeof(*dev_cgroup), GFP_KERNEL);
	if (!dev_cgroup)
		return ERR_PTR(-ENOMEM);
197
	INIT_LIST_HEAD(&dev_cgroup->exceptions);
198 199
	parent_cgroup = cgroup->parent;

200
	if (parent_cgroup == NULL)
201
		dev_cgroup->behavior = DEVCG_DEFAULT_ALLOW;
202
	else {
203
		parent_dev_cgroup = cgroup_to_devcgroup(parent_cgroup);
L
Li Zefan 已提交
204
		mutex_lock(&devcgroup_mutex);
205 206
		ret = dev_exceptions_copy(&dev_cgroup->exceptions,
					  &parent_dev_cgroup->exceptions);
207
		dev_cgroup->behavior = parent_dev_cgroup->behavior;
L
Li Zefan 已提交
208
		mutex_unlock(&devcgroup_mutex);
209 210 211 212 213 214 215 216 217
		if (ret) {
			kfree(dev_cgroup);
			return ERR_PTR(ret);
		}
	}

	return &dev_cgroup->css;
}

218
static void devcgroup_css_free(struct cgroup *cgroup)
219 220 221 222
{
	struct dev_cgroup *dev_cgroup;

	dev_cgroup = cgroup_to_devcgroup(cgroup);
223
	__dev_exception_clean(dev_cgroup);
224 225 226 227 228
	kfree(dev_cgroup);
}

#define DEVCG_ALLOW 1
#define DEVCG_DENY 2
229 230
#define DEVCG_LIST 3

231
#define MAJMINLEN 13
232
#define ACCLEN 4
233 234 235 236

static void set_access(char *acc, short access)
{
	int idx = 0;
237
	memset(acc, 0, ACCLEN);
238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
	if (access & ACC_READ)
		acc[idx++] = 'r';
	if (access & ACC_WRITE)
		acc[idx++] = 'w';
	if (access & ACC_MKNOD)
		acc[idx++] = 'm';
}

static char type_to_char(short type)
{
	if (type == DEV_ALL)
		return 'a';
	if (type == DEV_CHAR)
		return 'c';
	if (type == DEV_BLOCK)
		return 'b';
	return 'X';
}

257
static void set_majmin(char *str, unsigned m)
258 259
{
	if (m == ~0)
L
Li Zefan 已提交
260
		strcpy(str, "*");
261
	else
L
Li Zefan 已提交
262
		sprintf(str, "%u", m);
263 264
}

265 266
static int devcgroup_seq_read(struct cgroup *cgroup, struct cftype *cft,
				struct seq_file *m)
267
{
268
	struct dev_cgroup *devcgroup = cgroup_to_devcgroup(cgroup);
269
	struct dev_exception_item *ex;
270
	char maj[MAJMINLEN], min[MAJMINLEN], acc[ACCLEN];
271

272
	rcu_read_lock();
273 274 275 276 277 278
	/*
	 * To preserve the compatibility:
	 * - Only show the "all devices" when the default policy is to allow
	 * - List the exceptions in case the default policy is to deny
	 * This way, the file remains as a "whitelist of devices"
	 */
279
	if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
280 281 282 283
		set_access(acc, ACC_MASK);
		set_majmin(maj, ~0);
		set_majmin(min, ~0);
		seq_printf(m, "%c %s:%s %s\n", type_to_char(DEV_ALL),
284
			   maj, min, acc);
285
	} else {
286 287 288 289 290
		list_for_each_entry_rcu(ex, &devcgroup->exceptions, list) {
			set_access(acc, ex->access);
			set_majmin(maj, ex->major);
			set_majmin(min, ex->minor);
			seq_printf(m, "%c %s:%s %s\n", type_to_char(ex->type),
291 292
				   maj, min, acc);
		}
293
	}
294
	rcu_read_unlock();
295

296
	return 0;
297 298
}

299
/**
300 301 302 303 304
 * may_access - verifies if a new exception is part of what is allowed
 *		by a dev cgroup based on the default policy +
 *		exceptions. This is used to make sure a child cgroup
 *		won't have more privileges than its parent or to
 *		verify if a certain access is allowed.
305
 * @dev_cgroup: dev cgroup to be tested against
306
 * @refex: new exception
307
 */
308 309
static bool may_access(struct dev_cgroup *dev_cgroup,
		       struct dev_exception_item *refex)
310
{
311
	struct dev_exception_item *ex;
312
	bool match = false;
313

T
Tejun Heo 已提交
314 315 316 317
	rcu_lockdep_assert(rcu_read_lock_held() ||
			   lockdep_is_held(&devcgroup_mutex),
			   "device_cgroup::may_access() called without proper synchronization");

T
Tejun Heo 已提交
318
	list_for_each_entry_rcu(ex, &dev_cgroup->exceptions, list) {
319
		if ((refex->type & DEV_BLOCK) && !(ex->type & DEV_BLOCK))
320
			continue;
321
		if ((refex->type & DEV_CHAR) && !(ex->type & DEV_CHAR))
322
			continue;
323
		if (ex->major != ~0 && ex->major != refex->major)
324
			continue;
325
		if (ex->minor != ~0 && ex->minor != refex->minor)
326
			continue;
327
		if (refex->access & (~ex->access))
328
			continue;
329 330
		match = true;
		break;
331
	}
332 333

	/*
334
	 * In two cases we'll consider this new exception valid:
335
	 * - the dev cgroup has its default policy to deny + exception list:
336
	 *   the new exception *should* match the exceptions
337 338
	 * - the dev cgroup has its default policy to allow + exception list:
	 *   the new exception should *not* match any of the exceptions
339
	 */
340 341 342 343 344 345 346 347
	if (dev_cgroup->behavior == DEVCG_DEFAULT_DENY) {
		if (match)
			return true;
	} else {
		if (!match)
			return true;
	}
	return false;
348 349 350 351
}

/*
 * parent_has_perm:
352
 * when adding a new allow rule to a device exception list, the rule
353 354
 * must be allowed in the parent device
 */
355
static int parent_has_perm(struct dev_cgroup *childcg,
356
				  struct dev_exception_item *ex)
357
{
358
	struct cgroup *pcg = childcg->css.cgroup->parent;
359 360 361 362 363
	struct dev_cgroup *parent;

	if (!pcg)
		return 1;
	parent = cgroup_to_devcgroup(pcg);
364
	return may_access(parent, ex);
365 366
}

367 368 369 370 371 372 373 374
/**
 * may_allow_all - checks if it's possible to change the behavior to
 *		   allow based on parent's rules.
 * @parent: device cgroup's parent
 * returns: != 0 in case it's allowed, 0 otherwise
 */
static inline int may_allow_all(struct dev_cgroup *parent)
{
375 376
	if (!parent)
		return 1;
377 378 379
	return parent->behavior == DEVCG_DEFAULT_ALLOW;
}

380
/*
381
 * Modify the exception list using allow/deny rules.
382 383
 * CAP_SYS_ADMIN is needed for this.  It's at least separate from CAP_MKNOD
 * so we can give a container CAP_MKNOD to let it create devices but not
384
 * modify the exception list.
385 386
 * It seems likely we'll want to add a CAP_CONTAINER capability to allow
 * us to also grant CAP_SYS_ADMIN to containers without giving away the
387
 * device exception list controls, but for now we'll stick with CAP_SYS_ADMIN
388 389 390 391 392
 *
 * Taking rules away is always allowed (given CAP_SYS_ADMIN).  Granting
 * new access is only allowed if you're in the top-level cgroup, or your
 * parent cgroup has the access you're asking for.
 */
393 394
static int devcgroup_update_access(struct dev_cgroup *devcgroup,
				   int filetype, const char *buffer)
395
{
396
	const char *b;
397 398
	char temp[12];		/* 11 + 1 characters needed for a u32 */
	int count, rc;
399
	struct dev_exception_item ex;
400
	struct cgroup *p = devcgroup->css.cgroup;
401
	struct dev_cgroup *parent = NULL;
402 403 404 405

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;

406 407 408
	if (p->parent)
		parent = cgroup_to_devcgroup(p->parent);

409
	memset(&ex, 0, sizeof(ex));
410 411 412 413
	b = buffer;

	switch (*b) {
	case 'a':
414 415
		switch (filetype) {
		case DEVCG_ALLOW:
416
			if (!may_allow_all(parent))
417
				return -EPERM;
418
			dev_exception_clean(devcgroup);
419 420 421 422
			devcgroup->behavior = DEVCG_DEFAULT_ALLOW;
			if (!parent)
				break;

423 424 425 426
			rc = dev_exceptions_copy(&devcgroup->exceptions,
						 &parent->exceptions);
			if (rc)
				return rc;
427 428
			break;
		case DEVCG_DENY:
429
			dev_exception_clean(devcgroup);
430
			devcgroup->behavior = DEVCG_DEFAULT_DENY;
431 432 433 434 435
			break;
		default:
			return -EINVAL;
		}
		return 0;
436
	case 'b':
437
		ex.type = DEV_BLOCK;
438 439
		break;
	case 'c':
440
		ex.type = DEV_CHAR;
441 442
		break;
	default:
443
		return -EINVAL;
444 445
	}
	b++;
446 447
	if (!isspace(*b))
		return -EINVAL;
448 449
	b++;
	if (*b == '*') {
450
		ex.major = ~0;
451 452
		b++;
	} else if (isdigit(*b)) {
453 454 455 456 457 458 459 460 461 462
		memset(temp, 0, sizeof(temp));
		for (count = 0; count < sizeof(temp) - 1; count++) {
			temp[count] = *b;
			b++;
			if (!isdigit(*b))
				break;
		}
		rc = kstrtou32(temp, 10, &ex.major);
		if (rc)
			return -EINVAL;
463
	} else {
464
		return -EINVAL;
465
	}
466 467
	if (*b != ':')
		return -EINVAL;
468 469 470 471
	b++;

	/* read minor */
	if (*b == '*') {
472
		ex.minor = ~0;
473 474
		b++;
	} else if (isdigit(*b)) {
475 476 477 478 479 480 481 482 483 484
		memset(temp, 0, sizeof(temp));
		for (count = 0; count < sizeof(temp) - 1; count++) {
			temp[count] = *b;
			b++;
			if (!isdigit(*b))
				break;
		}
		rc = kstrtou32(temp, 10, &ex.minor);
		if (rc)
			return -EINVAL;
485
	} else {
486
		return -EINVAL;
487
	}
488 489
	if (!isspace(*b))
		return -EINVAL;
490 491 492
	for (b++, count = 0; count < 3; count++, b++) {
		switch (*b) {
		case 'r':
493
			ex.access |= ACC_READ;
494 495
			break;
		case 'w':
496
			ex.access |= ACC_WRITE;
497 498
			break;
		case 'm':
499
			ex.access |= ACC_MKNOD;
500 501 502 503 504 505
			break;
		case '\n':
		case '\0':
			count = 3;
			break;
		default:
506
			return -EINVAL;
507 508 509 510 511
		}
	}

	switch (filetype) {
	case DEVCG_ALLOW:
512
		if (!parent_has_perm(devcgroup, &ex))
513
			return -EPERM;
514 515 516 517 518
		/*
		 * If the default policy is to allow by default, try to remove
		 * an matching exception instead. And be silent about it: we
		 * don't want to break compatibility
		 */
519
		if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
520
			dev_exception_rm(devcgroup, &ex);
521 522
			return 0;
		}
523
		return dev_exception_add(devcgroup, &ex);
524
	case DEVCG_DENY:
525 526 527 528 529
		/*
		 * If the default policy is to deny by default, try to remove
		 * an matching exception instead. And be silent about it: we
		 * don't want to break compatibility
		 */
530
		if (devcgroup->behavior == DEVCG_DEFAULT_DENY) {
531
			dev_exception_rm(devcgroup, &ex);
532 533
			return 0;
		}
534
		return dev_exception_add(devcgroup, &ex);
535
	default:
536
		return -EINVAL;
537
	}
538 539
	return 0;
}
540

541 542 543 544
static int devcgroup_access_write(struct cgroup *cgrp, struct cftype *cft,
				  const char *buffer)
{
	int retval;
L
Li Zefan 已提交
545 546

	mutex_lock(&devcgroup_mutex);
547 548
	retval = devcgroup_update_access(cgroup_to_devcgroup(cgrp),
					 cft->private, buffer);
L
Li Zefan 已提交
549
	mutex_unlock(&devcgroup_mutex);
550 551 552 553 554 555
	return retval;
}

static struct cftype dev_cgroup_files[] = {
	{
		.name = "allow",
556
		.write_string  = devcgroup_access_write,
557 558 559 560
		.private = DEVCG_ALLOW,
	},
	{
		.name = "deny",
561
		.write_string = devcgroup_access_write,
562 563
		.private = DEVCG_DENY,
	},
564 565 566 567 568
	{
		.name = "list",
		.read_seq_string = devcgroup_seq_read,
		.private = DEVCG_LIST,
	},
569
	{ }	/* terminate */
570 571 572 573 574
};

struct cgroup_subsys devices_subsys = {
	.name = "devices",
	.can_attach = devcgroup_can_attach,
575 576
	.css_alloc = devcgroup_css_alloc,
	.css_free = devcgroup_css_free,
577
	.subsys_id = devices_subsys_id,
578
	.base_cftypes = dev_cgroup_files,
579 580 581 582 583 584 585 586 587

	/*
	 * While devices cgroup has the rudimentary hierarchy support which
	 * checks the parent's restriction, it doesn't properly propagates
	 * config changes in ancestors to their descendents.  A child
	 * should only be allowed to add more restrictions to the parent's
	 * configuration.  Fix it and remove the following.
	 */
	.broken_hierarchy = true,
588 589
};

590 591 592 593 594 595 596 597 598 599
/**
 * __devcgroup_check_permission - checks if an inode operation is permitted
 * @dev_cgroup: the dev cgroup to be tested against
 * @type: device type
 * @major: device major number
 * @minor: device minor number
 * @access: combination of ACC_WRITE, ACC_READ and ACC_MKNOD
 *
 * returns 0 on success, -EPERM case the operation is not permitted
 */
J
Jiri Slaby 已提交
600
static int __devcgroup_check_permission(short type, u32 major, u32 minor,
601
				        short access)
602
{
J
Jiri Slaby 已提交
603
	struct dev_cgroup *dev_cgroup;
604
	struct dev_exception_item ex;
605
	int rc;
L
Li Zefan 已提交
606

607 608 609 610 611
	memset(&ex, 0, sizeof(ex));
	ex.type = type;
	ex.major = major;
	ex.minor = minor;
	ex.access = access;
L
Li Zefan 已提交
612

613
	rcu_read_lock();
J
Jiri Slaby 已提交
614
	dev_cgroup = task_devcgroup(current);
615
	rc = may_access(dev_cgroup, &ex);
616
	rcu_read_unlock();
617

618 619
	if (!rc)
		return -EPERM;
L
Li Zefan 已提交
620

621 622
	return 0;
}
623

624 625 626 627 628 629 630 631 632 633 634 635 636
int __devcgroup_inode_permission(struct inode *inode, int mask)
{
	short type, access = 0;

	if (S_ISBLK(inode->i_mode))
		type = DEV_BLOCK;
	if (S_ISCHR(inode->i_mode))
		type = DEV_CHAR;
	if (mask & MAY_WRITE)
		access |= ACC_WRITE;
	if (mask & MAY_READ)
		access |= ACC_READ;

J
Jiri Slaby 已提交
637 638
	return __devcgroup_check_permission(type, imajor(inode), iminor(inode),
			access);
639 640 641 642
}

int devcgroup_inode_mknod(int mode, dev_t dev)
{
643
	short type;
644

S
Serge E. Hallyn 已提交
645 646 647
	if (!S_ISBLK(mode) && !S_ISCHR(mode))
		return 0;

648 649 650 651
	if (S_ISBLK(mode))
		type = DEV_BLOCK;
	else
		type = DEV_CHAR;
L
Li Zefan 已提交
652

J
Jiri Slaby 已提交
653 654
	return __devcgroup_check_permission(type, MAJOR(dev), MINOR(dev),
			ACC_MKNOD);
L
Li Zefan 已提交
655

656
}