提交 32d01dc7 编写于 作者: L Linus Torvalds

Merge branch 'for-3.15' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup

Pull cgroup updates from Tejun Heo:
 "A lot updates for cgroup:

   - The biggest one is cgroup's conversion to kernfs.  cgroup took
     after the long abandoned vfs-entangled sysfs implementation and
     made it even more convoluted over time.  cgroup's internal objects
     were fused with vfs objects which also brought in vfs locking and
     object lifetime rules.  Naturally, there are places where vfs rules
     don't fit and nasty hacks, such as credential switching or lock
     dance interleaving inode mutex and cgroup_mutex with object serial
     number comparison thrown in to decide whether the operation is
     actually necessary, needed to be employed.

     After conversion to kernfs, internal object lifetime and locking
     rules are mostly isolated from vfs interactions allowing shedding
     of several nasty hacks and overall simplification.  This will also
     allow implmentation of operations which may affect multiple cgroups
     which weren't possible before as it would have required nesting
     i_mutexes.

   - Various simplifications including dropping of module support,
     easier cgroup name/path handling, simplified cgroup file type
     handling and task_cg_lists optimization.

   - Prepatory changes for the planned unified hierarchy, which is still
     a patchset away from being actually operational.  The dummy
     hierarchy is updated to serve as the default unified hierarchy.
     Controllers which aren't claimed by other hierarchies are
     associated with it, which BTW was what the dummy hierarchy was for
     anyway.

   - Various fixes from Li and others.  This pull request includes some
     patches to add missing slab.h to various subsystems.  This was
     triggered xattr.h include removal from cgroup.h.  cgroup.h
     indirectly got included a lot of files which brought in xattr.h
     which brought in slab.h.

  There are several merge commits - one to pull in kernfs updates
  necessary for converting cgroup (already in upstream through
  driver-core), others for interfering changes in the fixes branch"

* 'for-3.15' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup: (74 commits)
  cgroup: remove useless argument from cgroup_exit()
  cgroup: fix spurious lockdep warning in cgroup_exit()
  cgroup: Use RCU_INIT_POINTER(x, NULL) in cgroup.c
  cgroup: break kernfs active_ref protection in cgroup directory operations
  cgroup: fix cgroup_taskset walking order
  cgroup: implement CFTYPE_ONLY_ON_DFL
  cgroup: make cgrp_dfl_root mountable
  cgroup: drop const from @buffer of cftype->write_string()
  cgroup: rename cgroup_dummy_root and related names
  cgroup: move ->subsys_mask from cgroupfs_root to cgroup
  cgroup: treat cgroup_dummy_root as an equivalent hierarchy during rebinding
  cgroup: remove NULL checks from [pr_cont_]cgroup_{name|path}()
  cgroup: use cgroup_setup_root() to initialize cgroup_dummy_root
  cgroup: reorganize cgroup bootstrapping
  cgroup: relocate setting of CGRP_DEAD
  cpuset: use rcu_read_lock() to protect task_cs()
  cgroup_freezer: document freezer_fork() subtleties
  cgroup: update cgroup_transfer_tasks() to either succeed or fail
  cgroup: drop task_lock() protection around task->cgroups
  cgroup: update how a newly forked task gets associated with css_set
  ...
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/of_device.h> #include <linux/of_device.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/slab.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/export.h> #include <linux/export.h>
#include <asm/io.h> #include <asm/io.h>
......
...@@ -9,6 +9,8 @@ ...@@ -9,6 +9,8 @@
* Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk) * Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk)
*/ */
#include <linux/slab.h>
#include <asm/timer.h> #include <asm/timer.h>
#include <asm/traps.h> #include <asm/traps.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
......
...@@ -894,7 +894,7 @@ static int blkcg_can_attach(struct cgroup_subsys_state *css, ...@@ -894,7 +894,7 @@ static int blkcg_can_attach(struct cgroup_subsys_state *css,
int ret = 0; int ret = 0;
/* task_lock() is needed to avoid races with exit_io_context() */ /* task_lock() is needed to avoid races with exit_io_context() */
cgroup_taskset_for_each(task, css, tset) { cgroup_taskset_for_each(task, tset) {
task_lock(task); task_lock(task);
ioc = task->io_context; ioc = task->io_context;
if (ioc && atomic_read(&ioc->nr_tasks) > 1) if (ioc && atomic_read(&ioc->nr_tasks) > 1)
...@@ -906,17 +906,14 @@ static int blkcg_can_attach(struct cgroup_subsys_state *css, ...@@ -906,17 +906,14 @@ static int blkcg_can_attach(struct cgroup_subsys_state *css,
return ret; return ret;
} }
struct cgroup_subsys blkio_subsys = { struct cgroup_subsys blkio_cgrp_subsys = {
.name = "blkio",
.css_alloc = blkcg_css_alloc, .css_alloc = blkcg_css_alloc,
.css_offline = blkcg_css_offline, .css_offline = blkcg_css_offline,
.css_free = blkcg_css_free, .css_free = blkcg_css_free,
.can_attach = blkcg_can_attach, .can_attach = blkcg_can_attach,
.subsys_id = blkio_subsys_id,
.base_cftypes = blkcg_files, .base_cftypes = blkcg_files,
.module = THIS_MODULE,
}; };
EXPORT_SYMBOL_GPL(blkio_subsys); EXPORT_SYMBOL_GPL(blkio_cgrp_subsys);
/** /**
* blkcg_activate_policy - activate a blkcg policy on a request_queue * blkcg_activate_policy - activate a blkcg policy on a request_queue
...@@ -1106,7 +1103,7 @@ int blkcg_policy_register(struct blkcg_policy *pol) ...@@ -1106,7 +1103,7 @@ int blkcg_policy_register(struct blkcg_policy *pol)
/* everything is in place, add intf files for the new policy */ /* everything is in place, add intf files for the new policy */
if (pol->cftypes) if (pol->cftypes)
WARN_ON(cgroup_add_cftypes(&blkio_subsys, pol->cftypes)); WARN_ON(cgroup_add_cftypes(&blkio_cgrp_subsys, pol->cftypes));
ret = 0; ret = 0;
out_unlock: out_unlock:
mutex_unlock(&blkcg_pol_mutex); mutex_unlock(&blkcg_pol_mutex);
......
...@@ -186,7 +186,7 @@ static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css) ...@@ -186,7 +186,7 @@ static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
static inline struct blkcg *task_blkcg(struct task_struct *tsk) static inline struct blkcg *task_blkcg(struct task_struct *tsk)
{ {
return css_to_blkcg(task_css(tsk, blkio_subsys_id)); return css_to_blkcg(task_css(tsk, blkio_cgrp_id));
} }
static inline struct blkcg *bio_blkcg(struct bio *bio) static inline struct blkcg *bio_blkcg(struct bio *bio)
...@@ -241,12 +241,16 @@ static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) ...@@ -241,12 +241,16 @@ static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
*/ */
static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen) static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
{ {
int ret; char *p;
ret = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen); p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
if (ret) if (!p) {
strncpy(buf, "<unavailable>", buflen); strncpy(buf, "<unavailable>", buflen);
return ret; return -ENAMETOOLONG;
}
memmove(buf, p, buf + buflen - p);
return 0;
} }
/** /**
......
...@@ -1408,13 +1408,13 @@ static int tg_set_conf(struct cgroup_subsys_state *css, struct cftype *cft, ...@@ -1408,13 +1408,13 @@ static int tg_set_conf(struct cgroup_subsys_state *css, struct cftype *cft,
} }
static int tg_set_conf_u64(struct cgroup_subsys_state *css, struct cftype *cft, static int tg_set_conf_u64(struct cgroup_subsys_state *css, struct cftype *cft,
const char *buf) char *buf)
{ {
return tg_set_conf(css, cft, buf, true); return tg_set_conf(css, cft, buf, true);
} }
static int tg_set_conf_uint(struct cgroup_subsys_state *css, struct cftype *cft, static int tg_set_conf_uint(struct cgroup_subsys_state *css, struct cftype *cft,
const char *buf) char *buf)
{ {
return tg_set_conf(css, cft, buf, false); return tg_set_conf(css, cft, buf, false);
} }
...@@ -1425,28 +1425,24 @@ static struct cftype throtl_files[] = { ...@@ -1425,28 +1425,24 @@ static struct cftype throtl_files[] = {
.private = offsetof(struct throtl_grp, bps[READ]), .private = offsetof(struct throtl_grp, bps[READ]),
.seq_show = tg_print_conf_u64, .seq_show = tg_print_conf_u64,
.write_string = tg_set_conf_u64, .write_string = tg_set_conf_u64,
.max_write_len = 256,
}, },
{ {
.name = "throttle.write_bps_device", .name = "throttle.write_bps_device",
.private = offsetof(struct throtl_grp, bps[WRITE]), .private = offsetof(struct throtl_grp, bps[WRITE]),
.seq_show = tg_print_conf_u64, .seq_show = tg_print_conf_u64,
.write_string = tg_set_conf_u64, .write_string = tg_set_conf_u64,
.max_write_len = 256,
}, },
{ {
.name = "throttle.read_iops_device", .name = "throttle.read_iops_device",
.private = offsetof(struct throtl_grp, iops[READ]), .private = offsetof(struct throtl_grp, iops[READ]),
.seq_show = tg_print_conf_uint, .seq_show = tg_print_conf_uint,
.write_string = tg_set_conf_uint, .write_string = tg_set_conf_uint,
.max_write_len = 256,
}, },
{ {
.name = "throttle.write_iops_device", .name = "throttle.write_iops_device",
.private = offsetof(struct throtl_grp, iops[WRITE]), .private = offsetof(struct throtl_grp, iops[WRITE]),
.seq_show = tg_print_conf_uint, .seq_show = tg_print_conf_uint,
.write_string = tg_set_conf_uint, .write_string = tg_set_conf_uint,
.max_write_len = 256,
}, },
{ {
.name = "throttle.io_service_bytes", .name = "throttle.io_service_bytes",
......
...@@ -1701,13 +1701,13 @@ static int __cfqg_set_weight_device(struct cgroup_subsys_state *css, ...@@ -1701,13 +1701,13 @@ static int __cfqg_set_weight_device(struct cgroup_subsys_state *css,
} }
static int cfqg_set_weight_device(struct cgroup_subsys_state *css, static int cfqg_set_weight_device(struct cgroup_subsys_state *css,
struct cftype *cft, const char *buf) struct cftype *cft, char *buf)
{ {
return __cfqg_set_weight_device(css, cft, buf, false); return __cfqg_set_weight_device(css, cft, buf, false);
} }
static int cfqg_set_leaf_weight_device(struct cgroup_subsys_state *css, static int cfqg_set_leaf_weight_device(struct cgroup_subsys_state *css,
struct cftype *cft, const char *buf) struct cftype *cft, char *buf)
{ {
return __cfqg_set_weight_device(css, cft, buf, true); return __cfqg_set_weight_device(css, cft, buf, true);
} }
...@@ -1838,7 +1838,6 @@ static struct cftype cfq_blkcg_files[] = { ...@@ -1838,7 +1838,6 @@ static struct cftype cfq_blkcg_files[] = {
.flags = CFTYPE_ONLY_ON_ROOT, .flags = CFTYPE_ONLY_ON_ROOT,
.seq_show = cfqg_print_leaf_weight_device, .seq_show = cfqg_print_leaf_weight_device,
.write_string = cfqg_set_leaf_weight_device, .write_string = cfqg_set_leaf_weight_device,
.max_write_len = 256,
}, },
{ {
.name = "weight", .name = "weight",
...@@ -1853,7 +1852,6 @@ static struct cftype cfq_blkcg_files[] = { ...@@ -1853,7 +1852,6 @@ static struct cftype cfq_blkcg_files[] = {
.flags = CFTYPE_NOT_ON_ROOT, .flags = CFTYPE_NOT_ON_ROOT,
.seq_show = cfqg_print_weight_device, .seq_show = cfqg_print_weight_device,
.write_string = cfqg_set_weight_device, .write_string = cfqg_set_weight_device,
.max_write_len = 256,
}, },
{ {
.name = "weight", .name = "weight",
...@@ -1866,7 +1864,6 @@ static struct cftype cfq_blkcg_files[] = { ...@@ -1866,7 +1864,6 @@ static struct cftype cfq_blkcg_files[] = {
.name = "leaf_weight_device", .name = "leaf_weight_device",
.seq_show = cfqg_print_leaf_weight_device, .seq_show = cfqg_print_leaf_weight_device,
.write_string = cfqg_set_leaf_weight_device, .write_string = cfqg_set_leaf_weight_device,
.max_write_len = 256,
}, },
{ {
.name = "leaf_weight", .name = "leaf_weight",
......
...@@ -1969,7 +1969,7 @@ int bio_associate_current(struct bio *bio) ...@@ -1969,7 +1969,7 @@ int bio_associate_current(struct bio *bio)
/* associate blkcg if exists */ /* associate blkcg if exists */
rcu_read_lock(); rcu_read_lock();
css = task_css(current, blkio_subsys_id); css = task_css(current, blkio_cgrp_id);
if (css && css_tryget(css)) if (css && css_tryget(css))
bio->bi_css = css; bio->bi_css = css;
rcu_read_unlock(); rcu_read_unlock();
......
...@@ -112,6 +112,7 @@ char *kernfs_path(struct kernfs_node *kn, char *buf, size_t buflen) ...@@ -112,6 +112,7 @@ char *kernfs_path(struct kernfs_node *kn, char *buf, size_t buflen)
spin_unlock_irqrestore(&kernfs_rename_lock, flags); spin_unlock_irqrestore(&kernfs_rename_lock, flags);
return p; return p;
} }
EXPORT_SYMBOL_GPL(kernfs_path);
/** /**
* pr_cont_kernfs_name - pr_cont name of a kernfs_node * pr_cont_kernfs_name - pr_cont name of a kernfs_node
......
...@@ -14,18 +14,17 @@ ...@@ -14,18 +14,17 @@
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/rculist.h> #include <linux/rculist.h>
#include <linux/cgroupstats.h> #include <linux/cgroupstats.h>
#include <linux/prio_heap.h>
#include <linux/rwsem.h> #include <linux/rwsem.h>
#include <linux/idr.h> #include <linux/idr.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/xattr.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/percpu-refcount.h> #include <linux/percpu-refcount.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/kernfs.h>
#ifdef CONFIG_CGROUPS #ifdef CONFIG_CGROUPS
struct cgroupfs_root; struct cgroup_root;
struct cgroup_subsys; struct cgroup_subsys;
struct inode; struct inode;
struct cgroup; struct cgroup;
...@@ -34,31 +33,16 @@ extern int cgroup_init_early(void); ...@@ -34,31 +33,16 @@ extern int cgroup_init_early(void);
extern int cgroup_init(void); extern int cgroup_init(void);
extern void cgroup_fork(struct task_struct *p); extern void cgroup_fork(struct task_struct *p);
extern void cgroup_post_fork(struct task_struct *p); extern void cgroup_post_fork(struct task_struct *p);
extern void cgroup_exit(struct task_struct *p, int run_callbacks); extern void cgroup_exit(struct task_struct *p);
extern int cgroupstats_build(struct cgroupstats *stats, extern int cgroupstats_build(struct cgroupstats *stats,
struct dentry *dentry); struct dentry *dentry);
extern int cgroup_load_subsys(struct cgroup_subsys *ss);
extern void cgroup_unload_subsys(struct cgroup_subsys *ss);
extern int proc_cgroup_show(struct seq_file *, void *); extern int proc_cgroup_show(struct seq_file *, void *);
/* /* define the enumeration of all cgroup subsystems */
* Define the enumeration of all cgroup subsystems. #define SUBSYS(_x) _x ## _cgrp_id,
*
* We define ids for builtin subsystems and then modular ones.
*/
#define SUBSYS(_x) _x ## _subsys_id,
enum cgroup_subsys_id { enum cgroup_subsys_id {
#define IS_SUBSYS_ENABLED(option) IS_BUILTIN(option)
#include <linux/cgroup_subsys.h>
#undef IS_SUBSYS_ENABLED
CGROUP_BUILTIN_SUBSYS_COUNT,
__CGROUP_SUBSYS_TEMP_PLACEHOLDER = CGROUP_BUILTIN_SUBSYS_COUNT - 1,
#define IS_SUBSYS_ENABLED(option) IS_MODULE(option)
#include <linux/cgroup_subsys.h> #include <linux/cgroup_subsys.h>
#undef IS_SUBSYS_ENABLED
CGROUP_SUBSYS_COUNT, CGROUP_SUBSYS_COUNT,
}; };
#undef SUBSYS #undef SUBSYS
...@@ -153,11 +137,6 @@ enum { ...@@ -153,11 +137,6 @@ enum {
CGRP_SANE_BEHAVIOR, CGRP_SANE_BEHAVIOR,
}; };
struct cgroup_name {
struct rcu_head rcu_head;
char name[];
};
struct cgroup { struct cgroup {
unsigned long flags; /* "unsigned long" so bitops work */ unsigned long flags; /* "unsigned long" so bitops work */
...@@ -174,16 +153,17 @@ struct cgroup { ...@@ -174,16 +153,17 @@ struct cgroup {
/* the number of attached css's */ /* the number of attached css's */
int nr_css; int nr_css;
atomic_t refcnt;
/* /*
* We link our 'sibling' struct into our parent's 'children'. * We link our 'sibling' struct into our parent's 'children'.
* Our children link their 'sibling' into our 'children'. * Our children link their 'sibling' into our 'children'.
*/ */
struct list_head sibling; /* my parent's children */ struct list_head sibling; /* my parent's children */
struct list_head children; /* my children */ struct list_head children; /* my children */
struct list_head files; /* my files */
struct cgroup *parent; /* my parent */ struct cgroup *parent; /* my parent */
struct dentry *dentry; /* cgroup fs entry, RCU protected */ struct kernfs_node *kn; /* cgroup kernfs entry */
/* /*
* Monotonically increasing unique serial number which defines a * Monotonically increasing unique serial number which defines a
...@@ -193,23 +173,13 @@ struct cgroup { ...@@ -193,23 +173,13 @@ struct cgroup {
*/ */
u64 serial_nr; u64 serial_nr;
/* /* The bitmask of subsystems attached to this cgroup */
* This is a copy of dentry->d_name, and it's needed because unsigned long subsys_mask;
* we can't use dentry->d_name in cgroup_path().
*
* You must acquire rcu_read_lock() to access cgrp->name, and
* the only place that can change it is rename(), which is
* protected by parent dir's i_mutex.
*
* Normally you should use cgroup_name() wrapper rather than
* access it directly.
*/
struct cgroup_name __rcu *name;
/* Private pointers for each registered subsystem */ /* Private pointers for each registered subsystem */
struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT]; struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT];
struct cgroupfs_root *root; struct cgroup_root *root;
/* /*
* List of cgrp_cset_links pointing at css_sets with tasks in this * List of cgrp_cset_links pointing at css_sets with tasks in this
...@@ -237,14 +207,11 @@ struct cgroup { ...@@ -237,14 +207,11 @@ struct cgroup {
/* For css percpu_ref killing and RCU-protected deletion */ /* For css percpu_ref killing and RCU-protected deletion */
struct rcu_head rcu_head; struct rcu_head rcu_head;
struct work_struct destroy_work; struct work_struct destroy_work;
/* directory xattrs */
struct simple_xattrs xattrs;
}; };
#define MAX_CGROUP_ROOT_NAMELEN 64 #define MAX_CGROUP_ROOT_NAMELEN 64
/* cgroupfs_root->flags */ /* cgroup_root->flags */
enum { enum {
/* /*
* Unfortunately, cgroup core and various controllers are riddled * Unfortunately, cgroup core and various controllers are riddled
...@@ -262,8 +229,8 @@ enum { ...@@ -262,8 +229,8 @@ enum {
* *
* The followings are the behaviors currently affected this flag. * The followings are the behaviors currently affected this flag.
* *
* - Mount options "noprefix" and "clone_children" are disallowed. * - Mount options "noprefix", "xattr", "clone_children",
* Also, cgroupfs file cgroup.clone_children is not created. * "release_agent" and "name" are disallowed.
* *
* - When mounting an existing superblock, mount options should * - When mounting an existing superblock, mount options should
* match. * match.
...@@ -281,6 +248,11 @@ enum { ...@@ -281,6 +248,11 @@ enum {
* - "release_agent" and "notify_on_release" are removed. * - "release_agent" and "notify_on_release" are removed.
* Replacement notification mechanism will be implemented. * Replacement notification mechanism will be implemented.
* *
* - "cgroup.clone_children" is removed.
*
* - If mount is requested with sane_behavior but without any
* subsystem, the default unified hierarchy is mounted.
*
* - cpuset: tasks will be kept in empty cpusets when hotplug happens * - cpuset: tasks will be kept in empty cpusets when hotplug happens
* and take masks of ancestors with non-empty cpus/mems, instead of * and take masks of ancestors with non-empty cpus/mems, instead of
* being moved to an ancestor. * being moved to an ancestor.
...@@ -300,29 +272,24 @@ enum { ...@@ -300,29 +272,24 @@ enum {
/* mount options live below bit 16 */ /* mount options live below bit 16 */
CGRP_ROOT_OPTION_MASK = (1 << 16) - 1, CGRP_ROOT_OPTION_MASK = (1 << 16) - 1,
CGRP_ROOT_SUBSYS_BOUND = (1 << 16), /* subsystems finished binding */
}; };
/* /*
* A cgroupfs_root represents the root of a cgroup hierarchy, and may be * A cgroup_root represents the root of a cgroup hierarchy, and may be
* associated with a superblock to form an active hierarchy. This is * associated with a kernfs_root to form an active hierarchy. This is
* internal to cgroup core. Don't access directly from controllers. * internal to cgroup core. Don't access directly from controllers.
*/ */
struct cgroupfs_root { struct cgroup_root {
struct super_block *sb; struct kernfs_root *kf_root;
/* The bitmask of subsystems attached to this hierarchy */
unsigned long subsys_mask;
/* Unique id for this hierarchy. */ /* Unique id for this hierarchy. */
int hierarchy_id; int hierarchy_id;
/* The root cgroup for this hierarchy */ /* The root cgroup. Root is destroyed on its release. */
struct cgroup top_cgroup; struct cgroup cgrp;
/* Tracks how many cgroups are currently defined in hierarchy.*/ /* Number of cgroups in the hierarchy, used only for /proc/cgroups */
int number_of_cgroups; atomic_t nr_cgrps;
/* A list running through the active hierarchies */ /* A list running through the active hierarchies */
struct list_head root_list; struct list_head root_list;
...@@ -360,10 +327,14 @@ struct css_set { ...@@ -360,10 +327,14 @@ struct css_set {
struct hlist_node hlist; struct hlist_node hlist;
/* /*
* List running through all tasks using this cgroup * Lists running through all tasks using this cgroup group.
* group. Protected by css_set_lock * mg_tasks lists tasks which belong to this cset but are in the
* process of being migrated out or in. Protected by
* css_set_rwsem, but, during migration, once tasks are moved to
* mg_tasks, it can be read safely while holding cgroup_mutex.
*/ */
struct list_head tasks; struct list_head tasks;
struct list_head mg_tasks;
/* /*
* List of cgrp_cset_links pointing at cgroups referenced from this * List of cgrp_cset_links pointing at cgroups referenced from this
...@@ -372,13 +343,29 @@ struct css_set { ...@@ -372,13 +343,29 @@ struct css_set {
struct list_head cgrp_links; struct list_head cgrp_links;
/* /*
* Set of subsystem states, one for each subsystem. This array * Set of subsystem states, one for each subsystem. This array is
* is immutable after creation apart from the init_css_set * immutable after creation apart from the init_css_set during
* during subsystem registration (at boot time) and modular subsystem * subsystem registration (at boot time).
* loading/unloading.
*/ */
struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
/*
* List of csets participating in the on-going migration either as
* source or destination. Protected by cgroup_mutex.
*/
struct list_head mg_preload_node;
struct list_head mg_node;
/*
* If this cset is acting as the source of migration the following
* two fields are set. mg_src_cgrp is the source cgroup of the
* on-going migration and mg_dst_cset is the destination cset the
* target tasks on this cset should be migrated to. Protected by
* cgroup_mutex.
*/
struct cgroup *mg_src_cgrp;
struct css_set *mg_dst_cset;
/* For RCU-protected deletion */ /* For RCU-protected deletion */
struct rcu_head rcu_head; struct rcu_head rcu_head;
}; };
...@@ -397,6 +384,7 @@ enum { ...@@ -397,6 +384,7 @@ enum {
CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */ CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */
CFTYPE_INSANE = (1 << 2), /* don't create if sane_behavior */ CFTYPE_INSANE = (1 << 2), /* don't create if sane_behavior */
CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */ CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */
CFTYPE_ONLY_ON_DFL = (1 << 4), /* only on default hierarchy */
}; };
#define MAX_CFTYPE_NAME 64 #define MAX_CFTYPE_NAME 64
...@@ -416,8 +404,9 @@ struct cftype { ...@@ -416,8 +404,9 @@ struct cftype {
umode_t mode; umode_t mode;
/* /*
* If non-zero, defines the maximum length of string that can * The maximum length of string, excluding trailing nul, that can
* be passed to write_string; defaults to 64 * be passed to write_string. If < PAGE_SIZE-1, PAGE_SIZE-1 is
* assumed.
*/ */
size_t max_write_len; size_t max_write_len;
...@@ -425,10 +414,12 @@ struct cftype { ...@@ -425,10 +414,12 @@ struct cftype {
unsigned int flags; unsigned int flags;
/* /*
* The subsys this file belongs to. Initialized automatically * Fields used for internal bookkeeping. Initialized automatically
* during registration. NULL for cgroup core files. * during registration.
*/ */
struct cgroup_subsys *ss; struct cgroup_subsys *ss; /* NULL for cgroup core files */
struct list_head node; /* anchored at ss->cfts */
struct kernfs_ops *kf_ops;
/* /*
* read_u64() is a shortcut for the common case of returning a * read_u64() is a shortcut for the common case of returning a
...@@ -467,7 +458,7 @@ struct cftype { ...@@ -467,7 +458,7 @@ struct cftype {
* Returns 0 or -ve error code. * Returns 0 or -ve error code.
*/ */
int (*write_string)(struct cgroup_subsys_state *css, struct cftype *cft, int (*write_string)(struct cgroup_subsys_state *css, struct cftype *cft,
const char *buffer); char *buffer);
/* /*
* trigger() callback can be used to get some kick from the * trigger() callback can be used to get some kick from the
* userspace, when the actual string written is not important * userspace, when the actual string written is not important
...@@ -475,37 +466,18 @@ struct cftype { ...@@ -475,37 +466,18 @@ struct cftype {
* kick type for multiplexing. * kick type for multiplexing.
*/ */
int (*trigger)(struct cgroup_subsys_state *css, unsigned int event); int (*trigger)(struct cgroup_subsys_state *css, unsigned int event);
};
/* #ifdef CONFIG_DEBUG_LOCK_ALLOC
* cftype_sets describe cftypes belonging to a subsystem and are chained at struct lock_class_key lockdep_key;
* cgroup_subsys->cftsets. Each cftset points to an array of cftypes #endif
* terminated by zero length name.
*/
struct cftype_set {
struct list_head node; /* chained at subsys->cftsets */
struct cftype *cfts;
}; };
/* extern struct cgroup_root cgrp_dfl_root;
* cgroupfs file entry, pointed to from leaf dentry->d_fsdata. Don't
* access directly.
*/
struct cfent {
struct list_head node;
struct dentry *dentry;
struct cftype *type;
struct cgroup_subsys_state *css;
/* file xattrs */
struct simple_xattrs xattrs;
};
/* seq_file->private points to the following, only ->priv is public */ static inline bool cgroup_on_dfl(const struct cgroup *cgrp)
struct cgroup_open_file { {
struct cfent *cfe; return cgrp->root == &cgrp_dfl_root;
void *priv; }
};
/* /*
* See the comment above CGRP_ROOT_SANE_BEHAVIOR for details. This * See the comment above CGRP_ROOT_SANE_BEHAVIOR for details. This
...@@ -516,34 +488,63 @@ static inline bool cgroup_sane_behavior(const struct cgroup *cgrp) ...@@ -516,34 +488,63 @@ static inline bool cgroup_sane_behavior(const struct cgroup *cgrp)
return cgrp->root->flags & CGRP_ROOT_SANE_BEHAVIOR; return cgrp->root->flags & CGRP_ROOT_SANE_BEHAVIOR;
} }
/* Caller should hold rcu_read_lock() */ /* no synchronization, the result can only be used as a hint */
static inline const char *cgroup_name(const struct cgroup *cgrp) static inline bool cgroup_has_tasks(struct cgroup *cgrp)
{ {
return rcu_dereference(cgrp->name)->name; return !list_empty(&cgrp->cset_links);
} }
static inline struct cgroup_subsys_state *seq_css(struct seq_file *seq) /* returns ino associated with a cgroup, 0 indicates unmounted root */
static inline ino_t cgroup_ino(struct cgroup *cgrp)
{ {
struct cgroup_open_file *of = seq->private; if (cgrp->kn)
return of->cfe->css; return cgrp->kn->ino;
else
return 0;
} }
static inline struct cftype *seq_cft(struct seq_file *seq) static inline struct cftype *seq_cft(struct seq_file *seq)
{ {
struct cgroup_open_file *of = seq->private; struct kernfs_open_file *of = seq->private;
return of->cfe->type;
return of->kn->priv;
}
struct cgroup_subsys_state *seq_css(struct seq_file *seq);
/*
* Name / path handling functions. All are thin wrappers around the kernfs
* counterparts and can be called under any context.
*/
static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen)
{
return kernfs_name(cgrp->kn, buf, buflen);
} }
static inline char * __must_check cgroup_path(struct cgroup *cgrp, char *buf,
size_t buflen)
{
return kernfs_path(cgrp->kn, buf, buflen);
}
static inline void pr_cont_cgroup_name(struct cgroup *cgrp)
{
pr_cont_kernfs_name(cgrp->kn);
}
static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
{
pr_cont_kernfs_path(cgrp->kn);
}
char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
int cgroup_rm_cftypes(struct cftype *cfts); int cgroup_rm_cftypes(struct cftype *cfts);
bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor); bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor);
int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen);
int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
int cgroup_task_count(const struct cgroup *cgrp);
/* /*
* Control Group taskset, used to pass around set of tasks to cgroup_subsys * Control Group taskset, used to pass around set of tasks to cgroup_subsys
* methods. * methods.
...@@ -551,22 +552,15 @@ int cgroup_task_count(const struct cgroup *cgrp); ...@@ -551,22 +552,15 @@ int cgroup_task_count(const struct cgroup *cgrp);
struct cgroup_taskset; struct cgroup_taskset;
struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset); struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset);
struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset); struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset);
struct cgroup_subsys_state *cgroup_taskset_cur_css(struct cgroup_taskset *tset,
int subsys_id);
int cgroup_taskset_size(struct cgroup_taskset *tset);
/** /**
* cgroup_taskset_for_each - iterate cgroup_taskset * cgroup_taskset_for_each - iterate cgroup_taskset
* @task: the loop cursor * @task: the loop cursor
* @skip_css: skip if task's css matches this, %NULL to iterate through all
* @tset: taskset to iterate * @tset: taskset to iterate
*/ */
#define cgroup_taskset_for_each(task, skip_css, tset) \ #define cgroup_taskset_for_each(task, tset) \
for ((task) = cgroup_taskset_first((tset)); (task); \ for ((task) = cgroup_taskset_first((tset)); (task); \
(task) = cgroup_taskset_next((tset))) \ (task) = cgroup_taskset_next((tset)))
if (!(skip_css) || \
cgroup_taskset_cur_css((tset), \
(skip_css)->ss->subsys_id) != (skip_css))
/* /*
* Control Group subsystem type. * Control Group subsystem type.
...@@ -591,7 +585,6 @@ struct cgroup_subsys { ...@@ -591,7 +585,6 @@ struct cgroup_subsys {
struct task_struct *task); struct task_struct *task);
void (*bind)(struct cgroup_subsys_state *root_css); void (*bind)(struct cgroup_subsys_state *root_css);
int subsys_id;
int disabled; int disabled;
int early_init; int early_init;
...@@ -610,27 +603,26 @@ struct cgroup_subsys { ...@@ -610,27 +603,26 @@ struct cgroup_subsys {
bool broken_hierarchy; bool broken_hierarchy;
bool warned_broken_hierarchy; bool warned_broken_hierarchy;
/* the following two fields are initialized automtically during boot */
int id;
#define MAX_CGROUP_TYPE_NAMELEN 32 #define MAX_CGROUP_TYPE_NAMELEN 32
const char *name; const char *name;
/* link to parent, protected by cgroup_lock() */ /* link to parent, protected by cgroup_lock() */
struct cgroupfs_root *root; struct cgroup_root *root;
/* list of cftype_sets */ /*
struct list_head cftsets; * List of cftypes. Each entry is the first entry of an array
* terminated by zero length name.
*/
struct list_head cfts;
/* base cftypes, automatically [de]registered with subsys itself */ /* base cftypes, automatically registered with subsys itself */
struct cftype *base_cftypes; struct cftype *base_cftypes;
struct cftype_set base_cftset;
/* should be defined only by modular subsystems */
struct module *module;
}; };
#define SUBSYS(_x) extern struct cgroup_subsys _x ## _subsys; #define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
#define IS_SUBSYS_ENABLED(option) IS_BUILTIN(option)
#include <linux/cgroup_subsys.h> #include <linux/cgroup_subsys.h>
#undef IS_SUBSYS_ENABLED
#undef SUBSYS #undef SUBSYS
/** /**
...@@ -661,10 +653,12 @@ struct cgroup_subsys_state *css_parent(struct cgroup_subsys_state *css) ...@@ -661,10 +653,12 @@ struct cgroup_subsys_state *css_parent(struct cgroup_subsys_state *css)
*/ */
#ifdef CONFIG_PROVE_RCU #ifdef CONFIG_PROVE_RCU
extern struct mutex cgroup_mutex; extern struct mutex cgroup_mutex;
extern struct rw_semaphore css_set_rwsem;
#define task_css_set_check(task, __c) \ #define task_css_set_check(task, __c) \
rcu_dereference_check((task)->cgroups, \ rcu_dereference_check((task)->cgroups, \
lockdep_is_held(&(task)->alloc_lock) || \ lockdep_is_held(&cgroup_mutex) || \
lockdep_is_held(&cgroup_mutex) || (__c)) lockdep_is_held(&css_set_rwsem) || \
((task)->flags & PF_EXITING) || (__c))
#else #else
#define task_css_set_check(task, __c) \ #define task_css_set_check(task, __c) \
rcu_dereference((task)->cgroups) rcu_dereference((task)->cgroups)
...@@ -837,16 +831,11 @@ void css_task_iter_start(struct cgroup_subsys_state *css, ...@@ -837,16 +831,11 @@ void css_task_iter_start(struct cgroup_subsys_state *css,
struct task_struct *css_task_iter_next(struct css_task_iter *it); struct task_struct *css_task_iter_next(struct css_task_iter *it);
void css_task_iter_end(struct css_task_iter *it); void css_task_iter_end(struct css_task_iter *it);
int css_scan_tasks(struct cgroup_subsys_state *css,
bool (*test)(struct task_struct *, void *),
void (*process)(struct task_struct *, void *),
void *data, struct ptr_heap *heap);
int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
struct cgroup_subsys_state *css_from_dir(struct dentry *dentry, struct cgroup_subsys_state *css_tryget_from_dir(struct dentry *dentry,
struct cgroup_subsys *ss); struct cgroup_subsys *ss);
#else /* !CONFIG_CGROUPS */ #else /* !CONFIG_CGROUPS */
...@@ -854,7 +843,7 @@ static inline int cgroup_init_early(void) { return 0; } ...@@ -854,7 +843,7 @@ static inline int cgroup_init_early(void) { return 0; }
static inline int cgroup_init(void) { return 0; } static inline int cgroup_init(void) { return 0; }
static inline void cgroup_fork(struct task_struct *p) {} static inline void cgroup_fork(struct task_struct *p) {}
static inline void cgroup_post_fork(struct task_struct *p) {} static inline void cgroup_post_fork(struct task_struct *p) {}
static inline void cgroup_exit(struct task_struct *p, int callbacks) {} static inline void cgroup_exit(struct task_struct *p) {}
static inline int cgroupstats_build(struct cgroupstats *stats, static inline int cgroupstats_build(struct cgroupstats *stats,
struct dentry *dentry) struct dentry *dentry)
......
...@@ -3,51 +3,51 @@ ...@@ -3,51 +3,51 @@
* *
* DO NOT ADD ANY SUBSYSTEM WITHOUT EXPLICIT ACKS FROM CGROUP MAINTAINERS. * DO NOT ADD ANY SUBSYSTEM WITHOUT EXPLICIT ACKS FROM CGROUP MAINTAINERS.
*/ */
#if IS_SUBSYS_ENABLED(CONFIG_CPUSETS) #if IS_ENABLED(CONFIG_CPUSETS)
SUBSYS(cpuset) SUBSYS(cpuset)
#endif #endif
#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_DEBUG) #if IS_ENABLED(CONFIG_CGROUP_DEBUG)
SUBSYS(debug) SUBSYS(debug)
#endif #endif
#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_SCHED) #if IS_ENABLED(CONFIG_CGROUP_SCHED)
SUBSYS(cpu_cgroup) SUBSYS(cpu)
#endif #endif
#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_CPUACCT) #if IS_ENABLED(CONFIG_CGROUP_CPUACCT)
SUBSYS(cpuacct) SUBSYS(cpuacct)
#endif #endif
#if IS_SUBSYS_ENABLED(CONFIG_MEMCG) #if IS_ENABLED(CONFIG_MEMCG)
SUBSYS(mem_cgroup) SUBSYS(memory)
#endif #endif
#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_DEVICE) #if IS_ENABLED(CONFIG_CGROUP_DEVICE)
SUBSYS(devices) SUBSYS(devices)
#endif #endif
#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_FREEZER) #if IS_ENABLED(CONFIG_CGROUP_FREEZER)
SUBSYS(freezer) SUBSYS(freezer)
#endif #endif
#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_NET_CLASSID) #if IS_ENABLED(CONFIG_CGROUP_NET_CLASSID)
SUBSYS(net_cls) SUBSYS(net_cls)
#endif #endif
#if IS_SUBSYS_ENABLED(CONFIG_BLK_CGROUP) #if IS_ENABLED(CONFIG_BLK_CGROUP)
SUBSYS(blkio) SUBSYS(blkio)
#endif #endif
#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_PERF) #if IS_ENABLED(CONFIG_CGROUP_PERF)
SUBSYS(perf) SUBSYS(perf_event)
#endif #endif
#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_NET_PRIO) #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
SUBSYS(net_prio) SUBSYS(net_prio)
#endif #endif
#if IS_SUBSYS_ENABLED(CONFIG_CGROUP_HUGETLB) #if IS_ENABLED(CONFIG_CGROUP_HUGETLB)
SUBSYS(hugetlb) SUBSYS(hugetlb)
#endif #endif
/* /*
......
...@@ -49,7 +49,7 @@ int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg) ...@@ -49,7 +49,7 @@ int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg)
static inline bool hugetlb_cgroup_disabled(void) static inline bool hugetlb_cgroup_disabled(void)
{ {
if (hugetlb_subsys.disabled) if (hugetlb_cgrp_subsys.disabled)
return true; return true;
return false; return false;
} }
......
...@@ -162,7 +162,7 @@ extern int do_swap_account; ...@@ -162,7 +162,7 @@ extern int do_swap_account;
static inline bool mem_cgroup_disabled(void) static inline bool mem_cgroup_disabled(void)
{ {
if (mem_cgroup_subsys.disabled) if (memory_cgrp_subsys.disabled)
return true; return true;
return false; return false;
} }
......
...@@ -34,7 +34,7 @@ static inline u32 task_cls_classid(struct task_struct *p) ...@@ -34,7 +34,7 @@ static inline u32 task_cls_classid(struct task_struct *p)
return 0; return 0;
rcu_read_lock(); rcu_read_lock();
classid = container_of(task_css(p, net_cls_subsys_id), classid = container_of(task_css(p, net_cls_cgrp_id),
struct cgroup_cls_state, css)->classid; struct cgroup_cls_state, css)->classid;
rcu_read_unlock(); rcu_read_unlock();
......
...@@ -27,32 +27,17 @@ struct netprio_map { ...@@ -27,32 +27,17 @@ struct netprio_map {
void sock_update_netprioidx(struct sock *sk); void sock_update_netprioidx(struct sock *sk);
#if IS_BUILTIN(CONFIG_CGROUP_NET_PRIO)
static inline u32 task_netprioidx(struct task_struct *p) static inline u32 task_netprioidx(struct task_struct *p)
{ {
struct cgroup_subsys_state *css; struct cgroup_subsys_state *css;
u32 idx; u32 idx;
rcu_read_lock(); rcu_read_lock();
css = task_css(p, net_prio_subsys_id); css = task_css(p, net_prio_cgrp_id);
idx = css->cgroup->id; idx = css->cgroup->id;
rcu_read_unlock(); rcu_read_unlock();
return idx; return idx;
} }
#elif IS_MODULE(CONFIG_CGROUP_NET_PRIO)
static inline u32 task_netprioidx(struct task_struct *p)
{
struct cgroup_subsys_state *css;
u32 idx = 0;
rcu_read_lock();
css = task_css(p, net_prio_subsys_id);
if (css)
idx = css->cgroup->id;
rcu_read_unlock();
return idx;
}
#endif
#else /* !CONFIG_CGROUP_NET_PRIO */ #else /* !CONFIG_CGROUP_NET_PRIO */
static inline u32 task_netprioidx(struct task_struct *p) static inline u32 task_netprioidx(struct task_struct *p)
{ {
......
...@@ -854,6 +854,7 @@ config NUMA_BALANCING ...@@ -854,6 +854,7 @@ config NUMA_BALANCING
menuconfig CGROUPS menuconfig CGROUPS
boolean "Control Group support" boolean "Control Group support"
select KERNFS
help help
This option adds support for grouping sets of processes together, for This option adds support for grouping sets of processes together, for
use with process control subsystems such as Cpusets, CFS, memory use with process control subsystems such as Cpusets, CFS, memory
......
此差异已折叠。
...@@ -52,7 +52,7 @@ static inline struct freezer *css_freezer(struct cgroup_subsys_state *css) ...@@ -52,7 +52,7 @@ static inline struct freezer *css_freezer(struct cgroup_subsys_state *css)
static inline struct freezer *task_freezer(struct task_struct *task) static inline struct freezer *task_freezer(struct task_struct *task)
{ {
return css_freezer(task_css(task, freezer_subsys_id)); return css_freezer(task_css(task, freezer_cgrp_id));
} }
static struct freezer *parent_freezer(struct freezer *freezer) static struct freezer *parent_freezer(struct freezer *freezer)
...@@ -84,8 +84,6 @@ static const char *freezer_state_strs(unsigned int state) ...@@ -84,8 +84,6 @@ static const char *freezer_state_strs(unsigned int state)
return "THAWED"; return "THAWED";
}; };
struct cgroup_subsys freezer_subsys;
static struct cgroup_subsys_state * static struct cgroup_subsys_state *
freezer_css_alloc(struct cgroup_subsys_state *parent_css) freezer_css_alloc(struct cgroup_subsys_state *parent_css)
{ {
...@@ -189,7 +187,7 @@ static void freezer_attach(struct cgroup_subsys_state *new_css, ...@@ -189,7 +187,7 @@ static void freezer_attach(struct cgroup_subsys_state *new_css,
* current state before executing the following - !frozen tasks may * current state before executing the following - !frozen tasks may
* be visible in a FROZEN cgroup and frozen tasks in a THAWED one. * be visible in a FROZEN cgroup and frozen tasks in a THAWED one.
*/ */
cgroup_taskset_for_each(task, new_css, tset) { cgroup_taskset_for_each(task, tset) {
if (!(freezer->state & CGROUP_FREEZING)) { if (!(freezer->state & CGROUP_FREEZING)) {
__thaw_task(task); __thaw_task(task);
} else { } else {
...@@ -216,6 +214,16 @@ static void freezer_attach(struct cgroup_subsys_state *new_css, ...@@ -216,6 +214,16 @@ static void freezer_attach(struct cgroup_subsys_state *new_css,
} }
} }
/**
* freezer_fork - cgroup post fork callback
* @task: a task which has just been forked
*
* @task has just been created and should conform to the current state of
* the cgroup_freezer it belongs to. This function may race against
* freezer_attach(). Losing to freezer_attach() means that we don't have
* to do anything as freezer_attach() will put @task into the appropriate
* state.
*/
static void freezer_fork(struct task_struct *task) static void freezer_fork(struct task_struct *task)
{ {
struct freezer *freezer; struct freezer *freezer;
...@@ -224,14 +232,26 @@ static void freezer_fork(struct task_struct *task) ...@@ -224,14 +232,26 @@ static void freezer_fork(struct task_struct *task)
freezer = task_freezer(task); freezer = task_freezer(task);
/* /*
* The root cgroup is non-freezable, so we can skip the * The root cgroup is non-freezable, so we can skip locking the
* following check. * freezer. This is safe regardless of race with task migration.
* If we didn't race or won, skipping is obviously the right thing
* to do. If we lost and root is the new cgroup, noop is still the
* right thing to do.
*/ */
if (!parent_freezer(freezer)) if (!parent_freezer(freezer))
goto out; goto out;
/*
* Grab @freezer->lock and freeze @task after verifying @task still
* belongs to @freezer and it's freezing. The former is for the
* case where we have raced against task migration and lost and
* @task is already in a different cgroup which may not be frozen.
* This isn't strictly necessary as freeze_task() is allowed to be
* called spuriously but let's do it anyway for, if nothing else,
* documentation.
*/
spin_lock_irq(&freezer->lock); spin_lock_irq(&freezer->lock);
if (freezer->state & CGROUP_FREEZING) if (freezer == task_freezer(task) && (freezer->state & CGROUP_FREEZING))
freeze_task(task); freeze_task(task);
spin_unlock_irq(&freezer->lock); spin_unlock_irq(&freezer->lock);
out: out:
...@@ -422,7 +442,7 @@ static void freezer_change_state(struct freezer *freezer, bool freeze) ...@@ -422,7 +442,7 @@ static void freezer_change_state(struct freezer *freezer, bool freeze)
} }
static int freezer_write(struct cgroup_subsys_state *css, struct cftype *cft, static int freezer_write(struct cgroup_subsys_state *css, struct cftype *cft,
const char *buffer) char *buffer)
{ {
bool freeze; bool freeze;
...@@ -473,13 +493,11 @@ static struct cftype files[] = { ...@@ -473,13 +493,11 @@ static struct cftype files[] = {
{ } /* terminate */ { } /* terminate */
}; };
struct cgroup_subsys freezer_subsys = { struct cgroup_subsys freezer_cgrp_subsys = {
.name = "freezer",
.css_alloc = freezer_css_alloc, .css_alloc = freezer_css_alloc,
.css_online = freezer_css_online, .css_online = freezer_css_online,
.css_offline = freezer_css_offline, .css_offline = freezer_css_offline,
.css_free = freezer_css_free, .css_free = freezer_css_free,
.subsys_id = freezer_subsys_id,
.attach = freezer_attach, .attach = freezer_attach,
.fork = freezer_fork, .fork = freezer_fork,
.base_cftypes = files, .base_cftypes = files,
......
...@@ -119,7 +119,7 @@ static inline struct cpuset *css_cs(struct cgroup_subsys_state *css) ...@@ -119,7 +119,7 @@ static inline struct cpuset *css_cs(struct cgroup_subsys_state *css)
/* Retrieve the cpuset for a task */ /* Retrieve the cpuset for a task */
static inline struct cpuset *task_cs(struct task_struct *task) static inline struct cpuset *task_cs(struct task_struct *task)
{ {
return css_cs(task_css(task, cpuset_subsys_id)); return css_cs(task_css(task, cpuset_cgrp_id));
} }
static inline struct cpuset *parent_cs(struct cpuset *cs) static inline struct cpuset *parent_cs(struct cpuset *cs)
...@@ -467,7 +467,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial) ...@@ -467,7 +467,7 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
* be changed to have empty cpus_allowed or mems_allowed. * be changed to have empty cpus_allowed or mems_allowed.
*/ */
ret = -ENOSPC; ret = -ENOSPC;
if ((cgroup_task_count(cur->css.cgroup) || cur->attach_in_progress)) { if ((cgroup_has_tasks(cur->css.cgroup) || cur->attach_in_progress)) {
if (!cpumask_empty(cur->cpus_allowed) && if (!cpumask_empty(cur->cpus_allowed) &&
cpumask_empty(trial->cpus_allowed)) cpumask_empty(trial->cpus_allowed))
goto out; goto out;
...@@ -828,56 +828,37 @@ static struct cpuset *effective_nodemask_cpuset(struct cpuset *cs) ...@@ -828,56 +828,37 @@ static struct cpuset *effective_nodemask_cpuset(struct cpuset *cs)
return cs; return cs;
} }
/**
* cpuset_change_cpumask - make a task's cpus_allowed the same as its cpuset's
* @tsk: task to test
* @data: cpuset to @tsk belongs to
*
* Called by css_scan_tasks() for each task in a cgroup whose cpus_allowed
* mask needs to be changed.
*
* We don't need to re-check for the cgroup/cpuset membership, since we're
* holding cpuset_mutex at this point.
*/
static void cpuset_change_cpumask(struct task_struct *tsk, void *data)
{
struct cpuset *cs = data;
struct cpuset *cpus_cs = effective_cpumask_cpuset(cs);
set_cpus_allowed_ptr(tsk, cpus_cs->cpus_allowed);
}
/** /**
* update_tasks_cpumask - Update the cpumasks of tasks in the cpuset. * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
* @cs: the cpuset in which each task's cpus_allowed mask needs to be changed * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
* @heap: if NULL, defer allocating heap memory to css_scan_tasks()
*
* Called with cpuset_mutex held
* *
* The css_scan_tasks() function will scan all the tasks in a cgroup, * Iterate through each task of @cs updating its cpus_allowed to the
* calling callback functions for each. * effective cpuset's. As this function is called with cpuset_mutex held,
* * cpuset membership stays stable.
* No return value. It's guaranteed that css_scan_tasks() always returns 0
* if @heap != NULL.
*/ */
static void update_tasks_cpumask(struct cpuset *cs, struct ptr_heap *heap) static void update_tasks_cpumask(struct cpuset *cs)
{ {
css_scan_tasks(&cs->css, NULL, cpuset_change_cpumask, cs, heap); struct cpuset *cpus_cs = effective_cpumask_cpuset(cs);
struct css_task_iter it;
struct task_struct *task;
css_task_iter_start(&cs->css, &it);
while ((task = css_task_iter_next(&it)))
set_cpus_allowed_ptr(task, cpus_cs->cpus_allowed);
css_task_iter_end(&it);
} }
/* /*
* update_tasks_cpumask_hier - Update the cpumasks of tasks in the hierarchy. * update_tasks_cpumask_hier - Update the cpumasks of tasks in the hierarchy.
* @root_cs: the root cpuset of the hierarchy * @root_cs: the root cpuset of the hierarchy
* @update_root: update root cpuset or not? * @update_root: update root cpuset or not?
* @heap: the heap used by css_scan_tasks()
* *
* This will update cpumasks of tasks in @root_cs and all other empty cpusets * This will update cpumasks of tasks in @root_cs and all other empty cpusets
* which take on cpumask of @root_cs. * which take on cpumask of @root_cs.
* *
* Called with cpuset_mutex held * Called with cpuset_mutex held
*/ */
static void update_tasks_cpumask_hier(struct cpuset *root_cs, static void update_tasks_cpumask_hier(struct cpuset *root_cs, bool update_root)
bool update_root, struct ptr_heap *heap)
{ {
struct cpuset *cp; struct cpuset *cp;
struct cgroup_subsys_state *pos_css; struct cgroup_subsys_state *pos_css;
...@@ -898,7 +879,7 @@ static void update_tasks_cpumask_hier(struct cpuset *root_cs, ...@@ -898,7 +879,7 @@ static void update_tasks_cpumask_hier(struct cpuset *root_cs,
continue; continue;
rcu_read_unlock(); rcu_read_unlock();
update_tasks_cpumask(cp, heap); update_tasks_cpumask(cp);
rcu_read_lock(); rcu_read_lock();
css_put(&cp->css); css_put(&cp->css);
...@@ -914,7 +895,6 @@ static void update_tasks_cpumask_hier(struct cpuset *root_cs, ...@@ -914,7 +895,6 @@ static void update_tasks_cpumask_hier(struct cpuset *root_cs,
static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
const char *buf) const char *buf)
{ {
struct ptr_heap heap;
int retval; int retval;
int is_load_balanced; int is_load_balanced;
...@@ -947,19 +927,13 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, ...@@ -947,19 +927,13 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
if (retval < 0) if (retval < 0)
return retval; return retval;
retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
if (retval)
return retval;
is_load_balanced = is_sched_load_balance(trialcs); is_load_balanced = is_sched_load_balance(trialcs);
mutex_lock(&callback_mutex); mutex_lock(&callback_mutex);
cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
mutex_unlock(&callback_mutex); mutex_unlock(&callback_mutex);
update_tasks_cpumask_hier(cs, true, &heap); update_tasks_cpumask_hier(cs, true);
heap_free(&heap);
if (is_load_balanced) if (is_load_balanced)
rebuild_sched_domains_locked(); rebuild_sched_domains_locked();
...@@ -1048,53 +1022,22 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk, ...@@ -1048,53 +1022,22 @@ static void cpuset_change_task_nodemask(struct task_struct *tsk,
task_unlock(tsk); task_unlock(tsk);
} }
struct cpuset_change_nodemask_arg {
struct cpuset *cs;
nodemask_t *newmems;
};
/*
* Update task's mems_allowed and rebind its mempolicy and vmas' mempolicy
* of it to cpuset's new mems_allowed, and migrate pages to new nodes if
* memory_migrate flag is set. Called with cpuset_mutex held.
*/
static void cpuset_change_nodemask(struct task_struct *p, void *data)
{
struct cpuset_change_nodemask_arg *arg = data;
struct cpuset *cs = arg->cs;
struct mm_struct *mm;
int migrate;
cpuset_change_task_nodemask(p, arg->newmems);
mm = get_task_mm(p);
if (!mm)
return;
migrate = is_memory_migrate(cs);
mpol_rebind_mm(mm, &cs->mems_allowed);
if (migrate)
cpuset_migrate_mm(mm, &cs->old_mems_allowed, arg->newmems);
mmput(mm);
}
static void *cpuset_being_rebound; static void *cpuset_being_rebound;
/** /**
* update_tasks_nodemask - Update the nodemasks of tasks in the cpuset. * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
* @cs: the cpuset in which each task's mems_allowed mask needs to be changed * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
* @heap: if NULL, defer allocating heap memory to css_scan_tasks()
* *
* Called with cpuset_mutex held. No return value. It's guaranteed that * Iterate through each task of @cs updating its mems_allowed to the
* css_scan_tasks() always returns 0 if @heap != NULL. * effective cpuset's. As this function is called with cpuset_mutex held,
* cpuset membership stays stable.
*/ */
static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap) static void update_tasks_nodemask(struct cpuset *cs)
{ {
static nodemask_t newmems; /* protected by cpuset_mutex */ static nodemask_t newmems; /* protected by cpuset_mutex */
struct cpuset *mems_cs = effective_nodemask_cpuset(cs); struct cpuset *mems_cs = effective_nodemask_cpuset(cs);
struct cpuset_change_nodemask_arg arg = { .cs = cs, struct css_task_iter it;
.newmems = &newmems }; struct task_struct *task;
cpuset_being_rebound = cs; /* causes mpol_dup() rebind */ cpuset_being_rebound = cs; /* causes mpol_dup() rebind */
...@@ -1110,7 +1053,25 @@ static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap) ...@@ -1110,7 +1053,25 @@ static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap)
* It's ok if we rebind the same mm twice; mpol_rebind_mm() * It's ok if we rebind the same mm twice; mpol_rebind_mm()
* is idempotent. Also migrate pages in each mm to new nodes. * is idempotent. Also migrate pages in each mm to new nodes.
*/ */
css_scan_tasks(&cs->css, NULL, cpuset_change_nodemask, &arg, heap); css_task_iter_start(&cs->css, &it);
while ((task = css_task_iter_next(&it))) {
struct mm_struct *mm;
bool migrate;
cpuset_change_task_nodemask(task, &newmems);
mm = get_task_mm(task);
if (!mm)
continue;
migrate = is_memory_migrate(cs);
mpol_rebind_mm(mm, &cs->mems_allowed);
if (migrate)
cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
mmput(mm);
}
css_task_iter_end(&it);
/* /*
* All the tasks' nodemasks have been updated, update * All the tasks' nodemasks have been updated, update
...@@ -1126,15 +1087,13 @@ static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap) ...@@ -1126,15 +1087,13 @@ static void update_tasks_nodemask(struct cpuset *cs, struct ptr_heap *heap)
* update_tasks_nodemask_hier - Update the nodemasks of tasks in the hierarchy. * update_tasks_nodemask_hier - Update the nodemasks of tasks in the hierarchy.
* @cs: the root cpuset of the hierarchy * @cs: the root cpuset of the hierarchy
* @update_root: update the root cpuset or not? * @update_root: update the root cpuset or not?
* @heap: the heap used by css_scan_tasks()
* *
* This will update nodemasks of tasks in @root_cs and all other empty cpusets * This will update nodemasks of tasks in @root_cs and all other empty cpusets
* which take on nodemask of @root_cs. * which take on nodemask of @root_cs.
* *
* Called with cpuset_mutex held * Called with cpuset_mutex held
*/ */
static void update_tasks_nodemask_hier(struct cpuset *root_cs, static void update_tasks_nodemask_hier(struct cpuset *root_cs, bool update_root)
bool update_root, struct ptr_heap *heap)
{ {
struct cpuset *cp; struct cpuset *cp;
struct cgroup_subsys_state *pos_css; struct cgroup_subsys_state *pos_css;
...@@ -1155,7 +1114,7 @@ static void update_tasks_nodemask_hier(struct cpuset *root_cs, ...@@ -1155,7 +1114,7 @@ static void update_tasks_nodemask_hier(struct cpuset *root_cs,
continue; continue;
rcu_read_unlock(); rcu_read_unlock();
update_tasks_nodemask(cp, heap); update_tasks_nodemask(cp);
rcu_read_lock(); rcu_read_lock();
css_put(&cp->css); css_put(&cp->css);
...@@ -1180,7 +1139,6 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, ...@@ -1180,7 +1139,6 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
const char *buf) const char *buf)
{ {
int retval; int retval;
struct ptr_heap heap;
/* /*
* top_cpuset.mems_allowed tracks node_stats[N_MEMORY]; * top_cpuset.mems_allowed tracks node_stats[N_MEMORY];
...@@ -1219,17 +1177,11 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, ...@@ -1219,17 +1177,11 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
if (retval < 0) if (retval < 0)
goto done; goto done;
retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
if (retval < 0)
goto done;
mutex_lock(&callback_mutex); mutex_lock(&callback_mutex);
cs->mems_allowed = trialcs->mems_allowed; cs->mems_allowed = trialcs->mems_allowed;
mutex_unlock(&callback_mutex); mutex_unlock(&callback_mutex);
update_tasks_nodemask_hier(cs, true, &heap); update_tasks_nodemask_hier(cs, true);
heap_free(&heap);
done: done:
return retval; return retval;
} }
...@@ -1256,39 +1208,23 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val) ...@@ -1256,39 +1208,23 @@ static int update_relax_domain_level(struct cpuset *cs, s64 val)
return 0; return 0;
} }
/**
* cpuset_change_flag - make a task's spread flags the same as its cpuset's
* @tsk: task to be updated
* @data: cpuset to @tsk belongs to
*
* Called by css_scan_tasks() for each task in a cgroup.
*
* We don't need to re-check for the cgroup/cpuset membership, since we're
* holding cpuset_mutex at this point.
*/
static void cpuset_change_flag(struct task_struct *tsk, void *data)
{
struct cpuset *cs = data;
cpuset_update_task_spread_flag(cs, tsk);
}
/** /**
* update_tasks_flags - update the spread flags of tasks in the cpuset. * update_tasks_flags - update the spread flags of tasks in the cpuset.
* @cs: the cpuset in which each task's spread flags needs to be changed * @cs: the cpuset in which each task's spread flags needs to be changed
* @heap: if NULL, defer allocating heap memory to css_scan_tasks()
*
* Called with cpuset_mutex held
* *
* The css_scan_tasks() function will scan all the tasks in a cgroup, * Iterate through each task of @cs updating its spread flags. As this
* calling callback functions for each. * function is called with cpuset_mutex held, cpuset membership stays
* * stable.
* No return value. It's guaranteed that css_scan_tasks() always returns 0
* if @heap != NULL.
*/ */
static void update_tasks_flags(struct cpuset *cs, struct ptr_heap *heap) static void update_tasks_flags(struct cpuset *cs)
{ {
css_scan_tasks(&cs->css, NULL, cpuset_change_flag, cs, heap); struct css_task_iter it;
struct task_struct *task;
css_task_iter_start(&cs->css, &it);
while ((task = css_task_iter_next(&it)))
cpuset_update_task_spread_flag(cs, task);
css_task_iter_end(&it);
} }
/* /*
...@@ -1306,7 +1242,6 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, ...@@ -1306,7 +1242,6 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
struct cpuset *trialcs; struct cpuset *trialcs;
int balance_flag_changed; int balance_flag_changed;
int spread_flag_changed; int spread_flag_changed;
struct ptr_heap heap;
int err; int err;
trialcs = alloc_trial_cpuset(cs); trialcs = alloc_trial_cpuset(cs);
...@@ -1322,10 +1257,6 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, ...@@ -1322,10 +1257,6 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
if (err < 0) if (err < 0)
goto out; goto out;
err = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, NULL);
if (err < 0)
goto out;
balance_flag_changed = (is_sched_load_balance(cs) != balance_flag_changed = (is_sched_load_balance(cs) !=
is_sched_load_balance(trialcs)); is_sched_load_balance(trialcs));
...@@ -1340,8 +1271,7 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, ...@@ -1340,8 +1271,7 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
rebuild_sched_domains_locked(); rebuild_sched_domains_locked();
if (spread_flag_changed) if (spread_flag_changed)
update_tasks_flags(cs, &heap); update_tasks_flags(cs);
heap_free(&heap);
out: out:
free_trial_cpuset(trialcs); free_trial_cpuset(trialcs);
return err; return err;
...@@ -1445,6 +1375,8 @@ static int fmeter_getrate(struct fmeter *fmp) ...@@ -1445,6 +1375,8 @@ static int fmeter_getrate(struct fmeter *fmp)
return val; return val;
} }
static struct cpuset *cpuset_attach_old_cs;
/* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */ /* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
static int cpuset_can_attach(struct cgroup_subsys_state *css, static int cpuset_can_attach(struct cgroup_subsys_state *css,
struct cgroup_taskset *tset) struct cgroup_taskset *tset)
...@@ -1453,6 +1385,9 @@ static int cpuset_can_attach(struct cgroup_subsys_state *css, ...@@ -1453,6 +1385,9 @@ static int cpuset_can_attach(struct cgroup_subsys_state *css,
struct task_struct *task; struct task_struct *task;
int ret; int ret;
/* used later by cpuset_attach() */
cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset));
mutex_lock(&cpuset_mutex); mutex_lock(&cpuset_mutex);
/* /*
...@@ -1464,7 +1399,7 @@ static int cpuset_can_attach(struct cgroup_subsys_state *css, ...@@ -1464,7 +1399,7 @@ static int cpuset_can_attach(struct cgroup_subsys_state *css,
(cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))) (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)))
goto out_unlock; goto out_unlock;
cgroup_taskset_for_each(task, css, tset) { cgroup_taskset_for_each(task, tset) {
/* /*
* Kthreads which disallow setaffinity shouldn't be moved * Kthreads which disallow setaffinity shouldn't be moved
* to a new cpuset; we don't want to change their cpu * to a new cpuset; we don't want to change their cpu
...@@ -1516,10 +1451,8 @@ static void cpuset_attach(struct cgroup_subsys_state *css, ...@@ -1516,10 +1451,8 @@ static void cpuset_attach(struct cgroup_subsys_state *css,
struct mm_struct *mm; struct mm_struct *mm;
struct task_struct *task; struct task_struct *task;
struct task_struct *leader = cgroup_taskset_first(tset); struct task_struct *leader = cgroup_taskset_first(tset);
struct cgroup_subsys_state *oldcss = cgroup_taskset_cur_css(tset,
cpuset_subsys_id);
struct cpuset *cs = css_cs(css); struct cpuset *cs = css_cs(css);
struct cpuset *oldcs = css_cs(oldcss); struct cpuset *oldcs = cpuset_attach_old_cs;
struct cpuset *cpus_cs = effective_cpumask_cpuset(cs); struct cpuset *cpus_cs = effective_cpumask_cpuset(cs);
struct cpuset *mems_cs = effective_nodemask_cpuset(cs); struct cpuset *mems_cs = effective_nodemask_cpuset(cs);
...@@ -1533,7 +1466,7 @@ static void cpuset_attach(struct cgroup_subsys_state *css, ...@@ -1533,7 +1466,7 @@ static void cpuset_attach(struct cgroup_subsys_state *css,
guarantee_online_mems(mems_cs, &cpuset_attach_nodemask_to); guarantee_online_mems(mems_cs, &cpuset_attach_nodemask_to);
cgroup_taskset_for_each(task, css, tset) { cgroup_taskset_for_each(task, tset) {
/* /*
* can_attach beforehand should guarantee that this doesn't * can_attach beforehand should guarantee that this doesn't
* fail. TODO: have a better way to handle failure here * fail. TODO: have a better way to handle failure here
...@@ -1673,7 +1606,7 @@ static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft, ...@@ -1673,7 +1606,7 @@ static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
* Common handling for a write to a "cpus" or "mems" file. * Common handling for a write to a "cpus" or "mems" file.
*/ */
static int cpuset_write_resmask(struct cgroup_subsys_state *css, static int cpuset_write_resmask(struct cgroup_subsys_state *css,
struct cftype *cft, const char *buf) struct cftype *cft, char *buf)
{ {
struct cpuset *cs = css_cs(css); struct cpuset *cs = css_cs(css);
struct cpuset *trialcs; struct cpuset *trialcs;
...@@ -2020,8 +1953,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css) ...@@ -2020,8 +1953,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css)
kfree(cs); kfree(cs);
} }
struct cgroup_subsys cpuset_subsys = { struct cgroup_subsys cpuset_cgrp_subsys = {
.name = "cpuset",
.css_alloc = cpuset_css_alloc, .css_alloc = cpuset_css_alloc,
.css_online = cpuset_css_online, .css_online = cpuset_css_online,
.css_offline = cpuset_css_offline, .css_offline = cpuset_css_offline,
...@@ -2029,7 +1961,6 @@ struct cgroup_subsys cpuset_subsys = { ...@@ -2029,7 +1961,6 @@ struct cgroup_subsys cpuset_subsys = {
.can_attach = cpuset_can_attach, .can_attach = cpuset_can_attach,
.cancel_attach = cpuset_cancel_attach, .cancel_attach = cpuset_cancel_attach,
.attach = cpuset_attach, .attach = cpuset_attach,
.subsys_id = cpuset_subsys_id,
.base_cftypes = files, .base_cftypes = files,
.early_init = 1, .early_init = 1,
}; };
...@@ -2086,10 +2017,9 @@ static void remove_tasks_in_empty_cpuset(struct cpuset *cs) ...@@ -2086,10 +2017,9 @@ static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
parent = parent_cs(parent); parent = parent_cs(parent);
if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) { if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) {
rcu_read_lock(); printk(KERN_ERR "cpuset: failed to transfer tasks out of empty cpuset ");
printk(KERN_ERR "cpuset: failed to transfer tasks out of empty cpuset %s\n", pr_cont_cgroup_name(cs->css.cgroup);
cgroup_name(cs->css.cgroup)); pr_cont("\n");
rcu_read_unlock();
} }
} }
...@@ -2137,7 +2067,7 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs) ...@@ -2137,7 +2067,7 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs)
*/ */
if ((sane && cpumask_empty(cs->cpus_allowed)) || if ((sane && cpumask_empty(cs->cpus_allowed)) ||
(!cpumask_empty(&off_cpus) && !cpumask_empty(cs->cpus_allowed))) (!cpumask_empty(&off_cpus) && !cpumask_empty(cs->cpus_allowed)))
update_tasks_cpumask(cs, NULL); update_tasks_cpumask(cs);
mutex_lock(&callback_mutex); mutex_lock(&callback_mutex);
nodes_andnot(cs->mems_allowed, cs->mems_allowed, off_mems); nodes_andnot(cs->mems_allowed, cs->mems_allowed, off_mems);
...@@ -2151,7 +2081,7 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs) ...@@ -2151,7 +2081,7 @@ static void cpuset_hotplug_update_tasks(struct cpuset *cs)
*/ */
if ((sane && nodes_empty(cs->mems_allowed)) || if ((sane && nodes_empty(cs->mems_allowed)) ||
(!nodes_empty(off_mems) && !nodes_empty(cs->mems_allowed))) (!nodes_empty(off_mems) && !nodes_empty(cs->mems_allowed)))
update_tasks_nodemask(cs, NULL); update_tasks_nodemask(cs);
is_empty = cpumask_empty(cs->cpus_allowed) || is_empty = cpumask_empty(cs->cpus_allowed) ||
nodes_empty(cs->mems_allowed); nodes_empty(cs->mems_allowed);
...@@ -2213,7 +2143,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work) ...@@ -2213,7 +2143,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
mutex_lock(&callback_mutex); mutex_lock(&callback_mutex);
top_cpuset.mems_allowed = new_mems; top_cpuset.mems_allowed = new_mems;
mutex_unlock(&callback_mutex); mutex_unlock(&callback_mutex);
update_tasks_nodemask(&top_cpuset, NULL); update_tasks_nodemask(&top_cpuset);
} }
mutex_unlock(&cpuset_mutex); mutex_unlock(&cpuset_mutex);
...@@ -2305,10 +2235,10 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) ...@@ -2305,10 +2235,10 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
struct cpuset *cpus_cs; struct cpuset *cpus_cs;
mutex_lock(&callback_mutex); mutex_lock(&callback_mutex);
task_lock(tsk); rcu_read_lock();
cpus_cs = effective_cpumask_cpuset(task_cs(tsk)); cpus_cs = effective_cpumask_cpuset(task_cs(tsk));
guarantee_online_cpus(cpus_cs, pmask); guarantee_online_cpus(cpus_cs, pmask);
task_unlock(tsk); rcu_read_unlock();
mutex_unlock(&callback_mutex); mutex_unlock(&callback_mutex);
} }
...@@ -2361,10 +2291,10 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk) ...@@ -2361,10 +2291,10 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
nodemask_t mask; nodemask_t mask;
mutex_lock(&callback_mutex); mutex_lock(&callback_mutex);
task_lock(tsk); rcu_read_lock();
mems_cs = effective_nodemask_cpuset(task_cs(tsk)); mems_cs = effective_nodemask_cpuset(task_cs(tsk));
guarantee_online_mems(mems_cs, &mask); guarantee_online_mems(mems_cs, &mask);
task_unlock(tsk); rcu_read_unlock();
mutex_unlock(&callback_mutex); mutex_unlock(&callback_mutex);
return mask; return mask;
...@@ -2480,10 +2410,10 @@ int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) ...@@ -2480,10 +2410,10 @@ int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
/* Not hardwall and node outside mems_allowed: scan up cpusets */ /* Not hardwall and node outside mems_allowed: scan up cpusets */
mutex_lock(&callback_mutex); mutex_lock(&callback_mutex);
task_lock(current); rcu_read_lock();
cs = nearest_hardwall_ancestor(task_cs(current)); cs = nearest_hardwall_ancestor(task_cs(current));
allowed = node_isset(node, cs->mems_allowed); allowed = node_isset(node, cs->mems_allowed);
task_unlock(current); rcu_read_unlock();
mutex_unlock(&callback_mutex); mutex_unlock(&callback_mutex);
return allowed; return allowed;
...@@ -2609,27 +2539,27 @@ int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, ...@@ -2609,27 +2539,27 @@ int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
* @task: pointer to task_struct of some task. * @task: pointer to task_struct of some task.
* *
* Description: Prints @task's name, cpuset name, and cached copy of its * Description: Prints @task's name, cpuset name, and cached copy of its
* mems_allowed to the kernel log. Must hold task_lock(task) to allow * mems_allowed to the kernel log.
* dereferencing task_cs(task).
*/ */
void cpuset_print_task_mems_allowed(struct task_struct *tsk) void cpuset_print_task_mems_allowed(struct task_struct *tsk)
{ {
/* Statically allocated to prevent using excess stack. */ /* Statically allocated to prevent using excess stack. */
static char cpuset_nodelist[CPUSET_NODELIST_LEN]; static char cpuset_nodelist[CPUSET_NODELIST_LEN];
static DEFINE_SPINLOCK(cpuset_buffer_lock); static DEFINE_SPINLOCK(cpuset_buffer_lock);
struct cgroup *cgrp;
struct cgroup *cgrp = task_cs(tsk)->css.cgroup;
rcu_read_lock();
spin_lock(&cpuset_buffer_lock); spin_lock(&cpuset_buffer_lock);
rcu_read_lock();
cgrp = task_cs(tsk)->css.cgroup;
nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN, nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN,
tsk->mems_allowed); tsk->mems_allowed);
printk(KERN_INFO "%s cpuset=%s mems_allowed=%s\n", printk(KERN_INFO "%s cpuset=", tsk->comm);
tsk->comm, cgroup_name(cgrp), cpuset_nodelist); pr_cont_cgroup_name(cgrp);
pr_cont(" mems_allowed=%s\n", cpuset_nodelist);
spin_unlock(&cpuset_buffer_lock);
rcu_read_unlock(); rcu_read_unlock();
spin_unlock(&cpuset_buffer_lock);
} }
/* /*
...@@ -2660,9 +2590,9 @@ int cpuset_memory_pressure_enabled __read_mostly; ...@@ -2660,9 +2590,9 @@ int cpuset_memory_pressure_enabled __read_mostly;
void __cpuset_memory_pressure_bump(void) void __cpuset_memory_pressure_bump(void)
{ {
task_lock(current); rcu_read_lock();
fmeter_markevent(&task_cs(current)->fmeter); fmeter_markevent(&task_cs(current)->fmeter);
task_unlock(current); rcu_read_unlock();
} }
#ifdef CONFIG_PROC_PID_CPUSET #ifdef CONFIG_PROC_PID_CPUSET
...@@ -2679,12 +2609,12 @@ int proc_cpuset_show(struct seq_file *m, void *unused_v) ...@@ -2679,12 +2609,12 @@ int proc_cpuset_show(struct seq_file *m, void *unused_v)
{ {
struct pid *pid; struct pid *pid;
struct task_struct *tsk; struct task_struct *tsk;
char *buf; char *buf, *p;
struct cgroup_subsys_state *css; struct cgroup_subsys_state *css;
int retval; int retval;
retval = -ENOMEM; retval = -ENOMEM;
buf = kmalloc(PAGE_SIZE, GFP_KERNEL); buf = kmalloc(PATH_MAX, GFP_KERNEL);
if (!buf) if (!buf)
goto out; goto out;
...@@ -2694,14 +2624,16 @@ int proc_cpuset_show(struct seq_file *m, void *unused_v) ...@@ -2694,14 +2624,16 @@ int proc_cpuset_show(struct seq_file *m, void *unused_v)
if (!tsk) if (!tsk)
goto out_free; goto out_free;
retval = -ENAMETOOLONG;
rcu_read_lock(); rcu_read_lock();
css = task_css(tsk, cpuset_subsys_id); css = task_css(tsk, cpuset_cgrp_id);
retval = cgroup_path(css->cgroup, buf, PAGE_SIZE); p = cgroup_path(css->cgroup, buf, PATH_MAX);
rcu_read_unlock(); rcu_read_unlock();
if (retval < 0) if (!p)
goto out_put_task; goto out_put_task;
seq_puts(m, buf); seq_puts(m, p);
seq_putc(m, '\n'); seq_putc(m, '\n');
retval = 0;
out_put_task: out_put_task:
put_task_struct(tsk); put_task_struct(tsk);
out_free: out_free:
......
...@@ -361,7 +361,7 @@ struct perf_cgroup { ...@@ -361,7 +361,7 @@ struct perf_cgroup {
static inline struct perf_cgroup * static inline struct perf_cgroup *
perf_cgroup_from_task(struct task_struct *task) perf_cgroup_from_task(struct task_struct *task)
{ {
return container_of(task_css(task, perf_subsys_id), return container_of(task_css(task, perf_event_cgrp_id),
struct perf_cgroup, css); struct perf_cgroup, css);
} }
...@@ -389,11 +389,6 @@ perf_cgroup_match(struct perf_event *event) ...@@ -389,11 +389,6 @@ perf_cgroup_match(struct perf_event *event)
event->cgrp->css.cgroup); event->cgrp->css.cgroup);
} }
static inline bool perf_tryget_cgroup(struct perf_event *event)
{
return css_tryget(&event->cgrp->css);
}
static inline void perf_put_cgroup(struct perf_event *event) static inline void perf_put_cgroup(struct perf_event *event)
{ {
css_put(&event->cgrp->css); css_put(&event->cgrp->css);
...@@ -612,9 +607,7 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event, ...@@ -612,9 +607,7 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event,
if (!f.file) if (!f.file)
return -EBADF; return -EBADF;
rcu_read_lock(); css = css_tryget_from_dir(f.file->f_dentry, &perf_event_cgrp_subsys);
css = css_from_dir(f.file->f_dentry, &perf_subsys);
if (IS_ERR(css)) { if (IS_ERR(css)) {
ret = PTR_ERR(css); ret = PTR_ERR(css);
goto out; goto out;
...@@ -623,13 +616,6 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event, ...@@ -623,13 +616,6 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event,
cgrp = container_of(css, struct perf_cgroup, css); cgrp = container_of(css, struct perf_cgroup, css);
event->cgrp = cgrp; event->cgrp = cgrp;
/* must be done before we fput() the file */
if (!perf_tryget_cgroup(event)) {
event->cgrp = NULL;
ret = -ENOENT;
goto out;
}
/* /*
* all events in a group must monitor * all events in a group must monitor
* the same cgroup because a task belongs * the same cgroup because a task belongs
...@@ -640,7 +626,6 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event, ...@@ -640,7 +626,6 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event,
ret = -EINVAL; ret = -EINVAL;
} }
out: out:
rcu_read_unlock();
fdput(f); fdput(f);
return ret; return ret;
} }
...@@ -8053,7 +8038,7 @@ static void perf_cgroup_attach(struct cgroup_subsys_state *css, ...@@ -8053,7 +8038,7 @@ static void perf_cgroup_attach(struct cgroup_subsys_state *css,
{ {
struct task_struct *task; struct task_struct *task;
cgroup_taskset_for_each(task, css, tset) cgroup_taskset_for_each(task, tset)
task_function_call(task, __perf_cgroup_move, task); task_function_call(task, __perf_cgroup_move, task);
} }
...@@ -8072,9 +8057,7 @@ static void perf_cgroup_exit(struct cgroup_subsys_state *css, ...@@ -8072,9 +8057,7 @@ static void perf_cgroup_exit(struct cgroup_subsys_state *css,
task_function_call(task, __perf_cgroup_move, task); task_function_call(task, __perf_cgroup_move, task);
} }
struct cgroup_subsys perf_subsys = { struct cgroup_subsys perf_event_cgrp_subsys = {
.name = "perf_event",
.subsys_id = perf_subsys_id,
.css_alloc = perf_cgroup_css_alloc, .css_alloc = perf_cgroup_css_alloc,
.css_free = perf_cgroup_css_free, .css_free = perf_cgroup_css_free,
.exit = perf_cgroup_exit, .exit = perf_cgroup_exit,
......
...@@ -797,7 +797,7 @@ void do_exit(long code) ...@@ -797,7 +797,7 @@ void do_exit(long code)
*/ */
perf_event_exit_task(tsk); perf_event_exit_task(tsk);
cgroup_exit(tsk, 1); cgroup_exit(tsk);
if (group_dead) if (group_dead)
disassociate_ctty(1); disassociate_ctty(1);
......
...@@ -1272,7 +1272,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, ...@@ -1272,7 +1272,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
if (IS_ERR(p->mempolicy)) { if (IS_ERR(p->mempolicy)) {
retval = PTR_ERR(p->mempolicy); retval = PTR_ERR(p->mempolicy);
p->mempolicy = NULL; p->mempolicy = NULL;
goto bad_fork_cleanup_cgroup; goto bad_fork_cleanup_threadgroup_lock;
} }
mpol_fix_fork_child_flag(p); mpol_fix_fork_child_flag(p);
#endif #endif
...@@ -1525,11 +1525,10 @@ static struct task_struct *copy_process(unsigned long clone_flags, ...@@ -1525,11 +1525,10 @@ static struct task_struct *copy_process(unsigned long clone_flags,
perf_event_free_task(p); perf_event_free_task(p);
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
mpol_put(p->mempolicy); mpol_put(p->mempolicy);
bad_fork_cleanup_cgroup: bad_fork_cleanup_threadgroup_lock:
#endif #endif
if (clone_flags & CLONE_THREAD) if (clone_flags & CLONE_THREAD)
threadgroup_change_end(current); threadgroup_change_end(current);
cgroup_exit(p, 0);
delayacct_tsk_free(p); delayacct_tsk_free(p);
module_put(task_thread_info(p)->exec_domain->module); module_put(task_thread_info(p)->exec_domain->module);
bad_fork_cleanup_count: bad_fork_cleanup_count:
......
...@@ -7230,7 +7230,7 @@ void sched_move_task(struct task_struct *tsk) ...@@ -7230,7 +7230,7 @@ void sched_move_task(struct task_struct *tsk)
if (unlikely(running)) if (unlikely(running))
tsk->sched_class->put_prev_task(rq, tsk); tsk->sched_class->put_prev_task(rq, tsk);
tg = container_of(task_css_check(tsk, cpu_cgroup_subsys_id, tg = container_of(task_css_check(tsk, cpu_cgrp_id,
lockdep_is_held(&tsk->sighand->siglock)), lockdep_is_held(&tsk->sighand->siglock)),
struct task_group, css); struct task_group, css);
tg = autogroup_task_group(tsk, tg); tg = autogroup_task_group(tsk, tg);
...@@ -7657,7 +7657,7 @@ static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css, ...@@ -7657,7 +7657,7 @@ static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css,
{ {
struct task_struct *task; struct task_struct *task;
cgroup_taskset_for_each(task, css, tset) { cgroup_taskset_for_each(task, tset) {
#ifdef CONFIG_RT_GROUP_SCHED #ifdef CONFIG_RT_GROUP_SCHED
if (!sched_rt_can_attach(css_tg(css), task)) if (!sched_rt_can_attach(css_tg(css), task))
return -EINVAL; return -EINVAL;
...@@ -7675,7 +7675,7 @@ static void cpu_cgroup_attach(struct cgroup_subsys_state *css, ...@@ -7675,7 +7675,7 @@ static void cpu_cgroup_attach(struct cgroup_subsys_state *css,
{ {
struct task_struct *task; struct task_struct *task;
cgroup_taskset_for_each(task, css, tset) cgroup_taskset_for_each(task, tset)
sched_move_task(task); sched_move_task(task);
} }
...@@ -8014,8 +8014,7 @@ static struct cftype cpu_files[] = { ...@@ -8014,8 +8014,7 @@ static struct cftype cpu_files[] = {
{ } /* terminate */ { } /* terminate */
}; };
struct cgroup_subsys cpu_cgroup_subsys = { struct cgroup_subsys cpu_cgrp_subsys = {
.name = "cpu",
.css_alloc = cpu_cgroup_css_alloc, .css_alloc = cpu_cgroup_css_alloc,
.css_free = cpu_cgroup_css_free, .css_free = cpu_cgroup_css_free,
.css_online = cpu_cgroup_css_online, .css_online = cpu_cgroup_css_online,
...@@ -8023,7 +8022,6 @@ struct cgroup_subsys cpu_cgroup_subsys = { ...@@ -8023,7 +8022,6 @@ struct cgroup_subsys cpu_cgroup_subsys = {
.can_attach = cpu_cgroup_can_attach, .can_attach = cpu_cgroup_can_attach,
.attach = cpu_cgroup_attach, .attach = cpu_cgroup_attach,
.exit = cpu_cgroup_exit, .exit = cpu_cgroup_exit,
.subsys_id = cpu_cgroup_subsys_id,
.base_cftypes = cpu_files, .base_cftypes = cpu_files,
.early_init = 1, .early_init = 1,
}; };
......
...@@ -41,7 +41,7 @@ static inline struct cpuacct *css_ca(struct cgroup_subsys_state *css) ...@@ -41,7 +41,7 @@ static inline struct cpuacct *css_ca(struct cgroup_subsys_state *css)
/* return cpu accounting group to which this task belongs */ /* return cpu accounting group to which this task belongs */
static inline struct cpuacct *task_ca(struct task_struct *tsk) static inline struct cpuacct *task_ca(struct task_struct *tsk)
{ {
return css_ca(task_css(tsk, cpuacct_subsys_id)); return css_ca(task_css(tsk, cpuacct_cgrp_id));
} }
static inline struct cpuacct *parent_ca(struct cpuacct *ca) static inline struct cpuacct *parent_ca(struct cpuacct *ca)
...@@ -275,11 +275,9 @@ void cpuacct_account_field(struct task_struct *p, int index, u64 val) ...@@ -275,11 +275,9 @@ void cpuacct_account_field(struct task_struct *p, int index, u64 val)
rcu_read_unlock(); rcu_read_unlock();
} }
struct cgroup_subsys cpuacct_subsys = { struct cgroup_subsys cpuacct_cgrp_subsys = {
.name = "cpuacct",
.css_alloc = cpuacct_css_alloc, .css_alloc = cpuacct_css_alloc,
.css_free = cpuacct_css_free, .css_free = cpuacct_css_free,
.subsys_id = cpuacct_subsys_id,
.base_cftypes = files, .base_cftypes = files,
.early_init = 1, .early_init = 1,
}; };
...@@ -111,8 +111,7 @@ static char *task_group_path(struct task_group *tg) ...@@ -111,8 +111,7 @@ static char *task_group_path(struct task_group *tg)
if (autogroup_path(tg, group_path, PATH_MAX)) if (autogroup_path(tg, group_path, PATH_MAX))
return group_path; return group_path;
cgroup_path(tg->css.cgroup, group_path, PATH_MAX); return cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
return group_path;
} }
#endif #endif
......
...@@ -30,7 +30,6 @@ struct hugetlb_cgroup { ...@@ -30,7 +30,6 @@ struct hugetlb_cgroup {
#define MEMFILE_IDX(val) (((val) >> 16) & 0xffff) #define MEMFILE_IDX(val) (((val) >> 16) & 0xffff)
#define MEMFILE_ATTR(val) ((val) & 0xffff) #define MEMFILE_ATTR(val) ((val) & 0xffff)
struct cgroup_subsys hugetlb_subsys __read_mostly;
static struct hugetlb_cgroup *root_h_cgroup __read_mostly; static struct hugetlb_cgroup *root_h_cgroup __read_mostly;
static inline static inline
...@@ -42,7 +41,7 @@ struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s) ...@@ -42,7 +41,7 @@ struct hugetlb_cgroup *hugetlb_cgroup_from_css(struct cgroup_subsys_state *s)
static inline static inline
struct hugetlb_cgroup *hugetlb_cgroup_from_task(struct task_struct *task) struct hugetlb_cgroup *hugetlb_cgroup_from_task(struct task_struct *task)
{ {
return hugetlb_cgroup_from_css(task_css(task, hugetlb_subsys_id)); return hugetlb_cgroup_from_css(task_css(task, hugetlb_cgrp_id));
} }
static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg) static inline bool hugetlb_cgroup_is_root(struct hugetlb_cgroup *h_cg)
...@@ -255,7 +254,7 @@ static u64 hugetlb_cgroup_read_u64(struct cgroup_subsys_state *css, ...@@ -255,7 +254,7 @@ static u64 hugetlb_cgroup_read_u64(struct cgroup_subsys_state *css,
} }
static int hugetlb_cgroup_write(struct cgroup_subsys_state *css, static int hugetlb_cgroup_write(struct cgroup_subsys_state *css,
struct cftype *cft, const char *buffer) struct cftype *cft, char *buffer)
{ {
int idx, name, ret; int idx, name, ret;
unsigned long long val; unsigned long long val;
...@@ -358,7 +357,7 @@ static void __init __hugetlb_cgroup_file_init(int idx) ...@@ -358,7 +357,7 @@ static void __init __hugetlb_cgroup_file_init(int idx)
cft = &h->cgroup_files[4]; cft = &h->cgroup_files[4];
memset(cft, 0, sizeof(*cft)); memset(cft, 0, sizeof(*cft));
WARN_ON(cgroup_add_cftypes(&hugetlb_subsys, h->cgroup_files)); WARN_ON(cgroup_add_cftypes(&hugetlb_cgrp_subsys, h->cgroup_files));
return; return;
} }
...@@ -402,10 +401,8 @@ void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage) ...@@ -402,10 +401,8 @@ void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
return; return;
} }
struct cgroup_subsys hugetlb_subsys = { struct cgroup_subsys hugetlb_cgrp_subsys = {
.name = "hugetlb",
.css_alloc = hugetlb_cgroup_css_alloc, .css_alloc = hugetlb_cgroup_css_alloc,
.css_offline = hugetlb_cgroup_css_offline, .css_offline = hugetlb_cgroup_css_offline,
.css_free = hugetlb_cgroup_css_free, .css_free = hugetlb_cgroup_css_free,
.subsys_id = hugetlb_subsys_id,
}; };
...@@ -66,8 +66,8 @@ ...@@ -66,8 +66,8 @@
#include <trace/events/vmscan.h> #include <trace/events/vmscan.h>
struct cgroup_subsys mem_cgroup_subsys __read_mostly; struct cgroup_subsys memory_cgrp_subsys __read_mostly;
EXPORT_SYMBOL(mem_cgroup_subsys); EXPORT_SYMBOL(memory_cgrp_subsys);
#define MEM_CGROUP_RECLAIM_RETRIES 5 #define MEM_CGROUP_RECLAIM_RETRIES 5
static struct mem_cgroup *root_mem_cgroup __read_mostly; static struct mem_cgroup *root_mem_cgroup __read_mostly;
...@@ -538,7 +538,7 @@ static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) ...@@ -538,7 +538,7 @@ static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id)
{ {
struct cgroup_subsys_state *css; struct cgroup_subsys_state *css;
css = css_from_id(id - 1, &mem_cgroup_subsys); css = css_from_id(id - 1, &memory_cgrp_subsys);
return mem_cgroup_from_css(css); return mem_cgroup_from_css(css);
} }
...@@ -1072,7 +1072,7 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) ...@@ -1072,7 +1072,7 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
if (unlikely(!p)) if (unlikely(!p))
return NULL; return NULL;
return mem_cgroup_from_css(task_css(p, mem_cgroup_subsys_id)); return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
} }
struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
...@@ -1683,15 +1683,8 @@ static void move_unlock_mem_cgroup(struct mem_cgroup *memcg, ...@@ -1683,15 +1683,8 @@ static void move_unlock_mem_cgroup(struct mem_cgroup *memcg,
*/ */
void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
{ {
/* /* oom_info_lock ensures that parallel ooms do not interleave */
* protects memcg_name and makes sure that parallel ooms do not
* interleave
*/
static DEFINE_MUTEX(oom_info_lock); static DEFINE_MUTEX(oom_info_lock);
struct cgroup *task_cgrp;
struct cgroup *mem_cgrp;
static char memcg_name[PATH_MAX];
int ret;
struct mem_cgroup *iter; struct mem_cgroup *iter;
unsigned int i; unsigned int i;
...@@ -1701,36 +1694,14 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) ...@@ -1701,36 +1694,14 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
mutex_lock(&oom_info_lock); mutex_lock(&oom_info_lock);
rcu_read_lock(); rcu_read_lock();
mem_cgrp = memcg->css.cgroup; pr_info("Task in ");
task_cgrp = task_cgroup(p, mem_cgroup_subsys_id); pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
pr_info(" killed as a result of limit of ");
ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX); pr_cont_cgroup_path(memcg->css.cgroup);
if (ret < 0) { pr_info("\n");
/*
* Unfortunately, we are unable to convert to a useful name
* But we'll still print out the usage information
*/
rcu_read_unlock();
goto done;
}
rcu_read_unlock();
pr_info("Task in %s killed", memcg_name);
rcu_read_lock();
ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
if (ret < 0) {
rcu_read_unlock();
goto done;
}
rcu_read_unlock(); rcu_read_unlock();
/*
* Continues from above, so we don't need an KERN_ level
*/
pr_cont(" as a result of limit of %s\n", memcg_name);
done:
pr_info("memory: usage %llukB, limit %llukB, failcnt %llu\n", pr_info("memory: usage %llukB, limit %llukB, failcnt %llu\n",
res_counter_read_u64(&memcg->res, RES_USAGE) >> 10, res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10, res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
...@@ -1745,13 +1716,8 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) ...@@ -1745,13 +1716,8 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
res_counter_read_u64(&memcg->kmem, RES_FAILCNT)); res_counter_read_u64(&memcg->kmem, RES_FAILCNT));
for_each_mem_cgroup_tree(iter, memcg) { for_each_mem_cgroup_tree(iter, memcg) {
pr_info("Memory cgroup stats"); pr_info("Memory cgroup stats for ");
pr_cont_cgroup_path(iter->css.cgroup);
rcu_read_lock();
ret = cgroup_path(iter->css.cgroup, memcg_name, PATH_MAX);
if (!ret)
pr_cont(" for %s", memcg_name);
rcu_read_unlock();
pr_cont(":"); pr_cont(":");
for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
...@@ -3401,7 +3367,7 @@ static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg, ...@@ -3401,7 +3367,7 @@ static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
struct kmem_cache *s) struct kmem_cache *s)
{ {
struct kmem_cache *new = NULL; struct kmem_cache *new = NULL;
static char *tmp_name = NULL; static char *tmp_path = NULL, *tmp_name = NULL;
static DEFINE_MUTEX(mutex); /* protects tmp_name */ static DEFINE_MUTEX(mutex); /* protects tmp_name */
BUG_ON(!memcg_can_account_kmem(memcg)); BUG_ON(!memcg_can_account_kmem(memcg));
...@@ -3413,18 +3379,20 @@ static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg, ...@@ -3413,18 +3379,20 @@ static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
* This static temporary buffer is used to prevent from * This static temporary buffer is used to prevent from
* pointless shortliving allocation. * pointless shortliving allocation.
*/ */
if (!tmp_name) { if (!tmp_path || !tmp_name) {
tmp_name = kmalloc(PATH_MAX, GFP_KERNEL); if (!tmp_path)
tmp_path = kmalloc(PATH_MAX, GFP_KERNEL);
if (!tmp_name) if (!tmp_name)
tmp_name = kmalloc(NAME_MAX + 1, GFP_KERNEL);
if (!tmp_path || !tmp_name)
goto out; goto out;
} }
rcu_read_lock(); cgroup_name(memcg->css.cgroup, tmp_name, NAME_MAX + 1);
snprintf(tmp_name, PATH_MAX, "%s(%d:%s)", s->name, snprintf(tmp_path, PATH_MAX, "%s(%d:%s)", s->name,
memcg_cache_id(memcg), cgroup_name(memcg->css.cgroup)); memcg_cache_id(memcg), tmp_name);
rcu_read_unlock();
new = kmem_cache_create_memcg(memcg, tmp_name, s->object_size, s->align, new = kmem_cache_create_memcg(memcg, tmp_path, s->object_size, s->align,
(s->flags & ~SLAB_PANIC), s->ctor, s); (s->flags & ~SLAB_PANIC), s->ctor, s);
if (new) if (new)
new->allocflags |= __GFP_KMEMCG; new->allocflags |= __GFP_KMEMCG;
...@@ -4990,7 +4958,7 @@ static int mem_cgroup_force_empty(struct mem_cgroup *memcg) ...@@ -4990,7 +4958,7 @@ static int mem_cgroup_force_empty(struct mem_cgroup *memcg)
struct cgroup *cgrp = memcg->css.cgroup; struct cgroup *cgrp = memcg->css.cgroup;
/* returns EBUSY if there is a task or if we come here twice. */ /* returns EBUSY if there is a task or if we come here twice. */
if (cgroup_task_count(cgrp) || !list_empty(&cgrp->children)) if (cgroup_has_tasks(cgrp) || !list_empty(&cgrp->children))
return -EBUSY; return -EBUSY;
/* we call try-to-free pages for make this cgroup empty */ /* we call try-to-free pages for make this cgroup empty */
...@@ -5172,7 +5140,7 @@ static int __memcg_activate_kmem(struct mem_cgroup *memcg, ...@@ -5172,7 +5140,7 @@ static int __memcg_activate_kmem(struct mem_cgroup *memcg,
* of course permitted. * of course permitted.
*/ */
mutex_lock(&memcg_create_mutex); mutex_lock(&memcg_create_mutex);
if (cgroup_task_count(memcg->css.cgroup) || memcg_has_children(memcg)) if (cgroup_has_tasks(memcg->css.cgroup) || memcg_has_children(memcg))
err = -EBUSY; err = -EBUSY;
mutex_unlock(&memcg_create_mutex); mutex_unlock(&memcg_create_mutex);
if (err) if (err)
...@@ -5274,7 +5242,7 @@ static int memcg_update_kmem_limit(struct mem_cgroup *memcg, ...@@ -5274,7 +5242,7 @@ static int memcg_update_kmem_limit(struct mem_cgroup *memcg,
* RES_LIMIT. * RES_LIMIT.
*/ */
static int mem_cgroup_write(struct cgroup_subsys_state *css, struct cftype *cft, static int mem_cgroup_write(struct cgroup_subsys_state *css, struct cftype *cft,
const char *buffer) char *buffer)
{ {
struct mem_cgroup *memcg = mem_cgroup_from_css(css); struct mem_cgroup *memcg = mem_cgroup_from_css(css);
enum res_type type; enum res_type type;
...@@ -6095,7 +6063,7 @@ static void memcg_event_ptable_queue_proc(struct file *file, ...@@ -6095,7 +6063,7 @@ static void memcg_event_ptable_queue_proc(struct file *file,
* Interpretation of args is defined by control file implementation. * Interpretation of args is defined by control file implementation.
*/ */
static int memcg_write_event_control(struct cgroup_subsys_state *css, static int memcg_write_event_control(struct cgroup_subsys_state *css,
struct cftype *cft, const char *buffer) struct cftype *cft, char *buffer)
{ {
struct mem_cgroup *memcg = mem_cgroup_from_css(css); struct mem_cgroup *memcg = mem_cgroup_from_css(css);
struct mem_cgroup_event *event; struct mem_cgroup_event *event;
...@@ -6183,17 +6151,15 @@ static int memcg_write_event_control(struct cgroup_subsys_state *css, ...@@ -6183,17 +6151,15 @@ static int memcg_write_event_control(struct cgroup_subsys_state *css,
* automatically removed on cgroup destruction but the removal is * automatically removed on cgroup destruction but the removal is
* asynchronous, so take an extra ref on @css. * asynchronous, so take an extra ref on @css.
*/ */
rcu_read_lock(); cfile_css = css_tryget_from_dir(cfile.file->f_dentry->d_parent,
&memory_cgrp_subsys);
ret = -EINVAL; ret = -EINVAL;
cfile_css = css_from_dir(cfile.file->f_dentry->d_parent, if (IS_ERR(cfile_css))
&mem_cgroup_subsys); goto out_put_cfile;
if (cfile_css == css && css_tryget(css)) if (cfile_css != css) {
ret = 0; css_put(cfile_css);
rcu_read_unlock();
if (ret)
goto out_put_cfile; goto out_put_cfile;
}
ret = event->register_event(memcg, event->eventfd, buffer); ret = event->register_event(memcg, event->eventfd, buffer);
if (ret) if (ret)
...@@ -6566,11 +6532,11 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css) ...@@ -6566,11 +6532,11 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
* unfortunate state in our controller. * unfortunate state in our controller.
*/ */
if (parent != root_mem_cgroup) if (parent != root_mem_cgroup)
mem_cgroup_subsys.broken_hierarchy = true; memory_cgrp_subsys.broken_hierarchy = true;
} }
mutex_unlock(&memcg_create_mutex); mutex_unlock(&memcg_create_mutex);
return memcg_init_kmem(memcg, &mem_cgroup_subsys); return memcg_init_kmem(memcg, &memory_cgrp_subsys);
} }
/* /*
...@@ -7272,9 +7238,7 @@ static void mem_cgroup_bind(struct cgroup_subsys_state *root_css) ...@@ -7272,9 +7238,7 @@ static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
mem_cgroup_from_css(root_css)->use_hierarchy = true; mem_cgroup_from_css(root_css)->use_hierarchy = true;
} }
struct cgroup_subsys mem_cgroup_subsys = { struct cgroup_subsys memory_cgrp_subsys = {
.name = "memory",
.subsys_id = mem_cgroup_subsys_id,
.css_alloc = mem_cgroup_css_alloc, .css_alloc = mem_cgroup_css_alloc,
.css_online = mem_cgroup_css_online, .css_online = mem_cgroup_css_online,
.css_offline = mem_cgroup_css_offline, .css_offline = mem_cgroup_css_offline,
...@@ -7300,7 +7264,7 @@ __setup("swapaccount=", enable_swap_account); ...@@ -7300,7 +7264,7 @@ __setup("swapaccount=", enable_swap_account);
static void __init memsw_file_init(void) static void __init memsw_file_init(void)
{ {
WARN_ON(cgroup_add_cftypes(&mem_cgroup_subsys, memsw_cgroup_files)); WARN_ON(cgroup_add_cftypes(&memory_cgrp_subsys, memsw_cgroup_files));
} }
static void __init enable_swap_cgroup(void) static void __init enable_swap_cgroup(void)
......
...@@ -145,14 +145,10 @@ static int hwpoison_filter_task(struct page *p) ...@@ -145,14 +145,10 @@ static int hwpoison_filter_task(struct page *p)
return -EINVAL; return -EINVAL;
css = mem_cgroup_css(mem); css = mem_cgroup_css(mem);
/* root_mem_cgroup has NULL dentries */ ino = cgroup_ino(css->cgroup);
if (!css->cgroup->dentry)
return -EINVAL;
ino = css->cgroup->dentry->d_inode->i_ino;
css_put(css); css_put(css);
if (ino != hwpoison_filter_memcg) if (!ino || ino != hwpoison_filter_memcg)
return -EINVAL; return -EINVAL;
return 0; return 0;
......
...@@ -243,7 +243,7 @@ config XPS ...@@ -243,7 +243,7 @@ config XPS
default y default y
config CGROUP_NET_PRIO config CGROUP_NET_PRIO
tristate "Network priority cgroup" bool "Network priority cgroup"
depends on CGROUPS depends on CGROUPS
---help--- ---help---
Cgroup subsystem for use in assigning processes to network priorities on Cgroup subsystem for use in assigning processes to network priorities on
......
...@@ -23,7 +23,7 @@ static inline struct cgroup_cls_state *css_cls_state(struct cgroup_subsys_state ...@@ -23,7 +23,7 @@ static inline struct cgroup_cls_state *css_cls_state(struct cgroup_subsys_state
struct cgroup_cls_state *task_cls_state(struct task_struct *p) struct cgroup_cls_state *task_cls_state(struct task_struct *p)
{ {
return css_cls_state(task_css(p, net_cls_subsys_id)); return css_cls_state(task_css(p, net_cls_cgrp_id));
} }
EXPORT_SYMBOL_GPL(task_cls_state); EXPORT_SYMBOL_GPL(task_cls_state);
...@@ -73,7 +73,7 @@ static void cgrp_attach(struct cgroup_subsys_state *css, ...@@ -73,7 +73,7 @@ static void cgrp_attach(struct cgroup_subsys_state *css,
void *v = (void *)(unsigned long)cs->classid; void *v = (void *)(unsigned long)cs->classid;
struct task_struct *p; struct task_struct *p;
cgroup_taskset_for_each(p, css, tset) { cgroup_taskset_for_each(p, tset) {
task_lock(p); task_lock(p);
iterate_fd(p->files, 0, update_classid, v); iterate_fd(p->files, 0, update_classid, v);
task_unlock(p); task_unlock(p);
...@@ -102,19 +102,10 @@ static struct cftype ss_files[] = { ...@@ -102,19 +102,10 @@ static struct cftype ss_files[] = {
{ } /* terminate */ { } /* terminate */
}; };
struct cgroup_subsys net_cls_subsys = { struct cgroup_subsys net_cls_cgrp_subsys = {
.name = "net_cls",
.css_alloc = cgrp_css_alloc, .css_alloc = cgrp_css_alloc,
.css_online = cgrp_css_online, .css_online = cgrp_css_online,
.css_free = cgrp_css_free, .css_free = cgrp_css_free,
.attach = cgrp_attach, .attach = cgrp_attach,
.subsys_id = net_cls_subsys_id,
.base_cftypes = ss_files, .base_cftypes = ss_files,
.module = THIS_MODULE,
}; };
static int __init init_netclassid_cgroup(void)
{
return cgroup_load_subsys(&net_cls_subsys);
}
__initcall(init_netclassid_cgroup);
...@@ -186,7 +186,7 @@ static int read_priomap(struct seq_file *sf, void *v) ...@@ -186,7 +186,7 @@ static int read_priomap(struct seq_file *sf, void *v)
} }
static int write_priomap(struct cgroup_subsys_state *css, struct cftype *cft, static int write_priomap(struct cgroup_subsys_state *css, struct cftype *cft,
const char *buffer) char *buffer)
{ {
char devname[IFNAMSIZ + 1]; char devname[IFNAMSIZ + 1];
struct net_device *dev; struct net_device *dev;
...@@ -224,7 +224,7 @@ static void net_prio_attach(struct cgroup_subsys_state *css, ...@@ -224,7 +224,7 @@ static void net_prio_attach(struct cgroup_subsys_state *css,
struct task_struct *p; struct task_struct *p;
void *v = (void *)(unsigned long)css->cgroup->id; void *v = (void *)(unsigned long)css->cgroup->id;
cgroup_taskset_for_each(p, css, tset) { cgroup_taskset_for_each(p, tset) {
task_lock(p); task_lock(p);
iterate_fd(p->files, 0, update_netprio, v); iterate_fd(p->files, 0, update_netprio, v);
task_unlock(p); task_unlock(p);
...@@ -244,15 +244,12 @@ static struct cftype ss_files[] = { ...@@ -244,15 +244,12 @@ static struct cftype ss_files[] = {
{ } /* terminate */ { } /* terminate */
}; };
struct cgroup_subsys net_prio_subsys = { struct cgroup_subsys net_prio_cgrp_subsys = {
.name = "net_prio",
.css_alloc = cgrp_css_alloc, .css_alloc = cgrp_css_alloc,
.css_online = cgrp_css_online, .css_online = cgrp_css_online,
.css_free = cgrp_css_free, .css_free = cgrp_css_free,
.attach = net_prio_attach, .attach = net_prio_attach,
.subsys_id = net_prio_subsys_id,
.base_cftypes = ss_files, .base_cftypes = ss_files,
.module = THIS_MODULE,
}; };
static int netprio_device_event(struct notifier_block *unused, static int netprio_device_event(struct notifier_block *unused,
...@@ -283,37 +280,9 @@ static struct notifier_block netprio_device_notifier = { ...@@ -283,37 +280,9 @@ static struct notifier_block netprio_device_notifier = {
static int __init init_cgroup_netprio(void) static int __init init_cgroup_netprio(void)
{ {
int ret;
ret = cgroup_load_subsys(&net_prio_subsys);
if (ret)
goto out;
register_netdevice_notifier(&netprio_device_notifier); register_netdevice_notifier(&netprio_device_notifier);
return 0;
out:
return ret;
}
static void __exit exit_cgroup_netprio(void)
{
struct netprio_map *old;
struct net_device *dev;
unregister_netdevice_notifier(&netprio_device_notifier);
cgroup_unload_subsys(&net_prio_subsys);
rtnl_lock();
for_each_netdev(&init_net, dev) {
old = rtnl_dereference(dev->priomap);
RCU_INIT_POINTER(dev->priomap, NULL);
if (old)
kfree_rcu(old, rcu);
}
rtnl_unlock();
} }
module_init(init_cgroup_netprio); subsys_initcall(init_cgroup_netprio);
module_exit(exit_cgroup_netprio);
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
...@@ -103,7 +103,7 @@ static int tcp_update_limit(struct mem_cgroup *memcg, u64 val) ...@@ -103,7 +103,7 @@ static int tcp_update_limit(struct mem_cgroup *memcg, u64 val)
} }
static int tcp_cgroup_write(struct cgroup_subsys_state *css, struct cftype *cft, static int tcp_cgroup_write(struct cgroup_subsys_state *css, struct cftype *cft,
const char *buffer) char *buffer)
{ {
struct mem_cgroup *memcg = mem_cgroup_from_css(css); struct mem_cgroup *memcg = mem_cgroup_from_css(css);
unsigned long long val; unsigned long long val;
...@@ -219,7 +219,7 @@ static struct cftype tcp_files[] = { ...@@ -219,7 +219,7 @@ static struct cftype tcp_files[] = {
static int __init tcp_memcontrol_init(void) static int __init tcp_memcontrol_init(void)
{ {
WARN_ON(cgroup_add_cftypes(&mem_cgroup_subsys, tcp_files)); WARN_ON(cgroup_add_cftypes(&memory_cgrp_subsys, tcp_files));
return 0; return 0;
} }
__initcall(tcp_memcontrol_init); __initcall(tcp_memcontrol_init);
...@@ -58,11 +58,9 @@ static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s) ...@@ -58,11 +58,9 @@ static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s)
static inline struct dev_cgroup *task_devcgroup(struct task_struct *task) static inline struct dev_cgroup *task_devcgroup(struct task_struct *task)
{ {
return css_to_devcgroup(task_css(task, devices_subsys_id)); return css_to_devcgroup(task_css(task, devices_cgrp_id));
} }
struct cgroup_subsys devices_subsys;
/* /*
* called under devcgroup_mutex * called under devcgroup_mutex
*/ */
...@@ -498,7 +496,7 @@ static inline bool has_children(struct dev_cgroup *devcgroup) ...@@ -498,7 +496,7 @@ static inline bool has_children(struct dev_cgroup *devcgroup)
* parent cgroup has the access you're asking for. * parent cgroup has the access you're asking for.
*/ */
static int devcgroup_update_access(struct dev_cgroup *devcgroup, static int devcgroup_update_access(struct dev_cgroup *devcgroup,
int filetype, const char *buffer) int filetype, char *buffer)
{ {
const char *b; const char *b;
char temp[12]; /* 11 + 1 characters needed for a u32 */ char temp[12]; /* 11 + 1 characters needed for a u32 */
...@@ -654,7 +652,7 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup, ...@@ -654,7 +652,7 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
} }
static int devcgroup_access_write(struct cgroup_subsys_state *css, static int devcgroup_access_write(struct cgroup_subsys_state *css,
struct cftype *cft, const char *buffer) struct cftype *cft, char *buffer)
{ {
int retval; int retval;
...@@ -684,13 +682,11 @@ static struct cftype dev_cgroup_files[] = { ...@@ -684,13 +682,11 @@ static struct cftype dev_cgroup_files[] = {
{ } /* terminate */ { } /* terminate */
}; };
struct cgroup_subsys devices_subsys = { struct cgroup_subsys devices_cgrp_subsys = {
.name = "devices",
.css_alloc = devcgroup_css_alloc, .css_alloc = devcgroup_css_alloc,
.css_free = devcgroup_css_free, .css_free = devcgroup_css_free,
.css_online = devcgroup_online, .css_online = devcgroup_online,
.css_offline = devcgroup_offline, .css_offline = devcgroup_offline,
.subsys_id = devices_subsys_id,
.base_cftypes = dev_cgroup_files, .base_cftypes = dev_cgroup_files,
}; };
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册