auto_group.c 4.7 KB
Newer Older
1 2
#ifdef CONFIG_SCHED_AUTOGROUP

3 4
#include "sched.h"

5 6 7 8
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/kallsyms.h>
#include <linux/utsname.h>
9 10
#include <linux/security.h>
#include <linux/export.h>
11 12 13 14 15

unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
static struct autogroup autogroup_default;
static atomic_t autogroup_seq_nr;

16
void __init autogroup_init(struct task_struct *init_task)
17
{
18
	autogroup_default.tg = &root_task_group;
19 20 21 22 23
	kref_init(&autogroup_default.kref);
	init_rwsem(&autogroup_default.lock);
	init_task->signal->autogroup = &autogroup_default;
}

24
void autogroup_free(struct task_group *tg)
25 26 27 28 29 30 31 32
{
	kfree(tg->autogroup);
}

static inline void autogroup_destroy(struct kref *kref)
{
	struct autogroup *ag = container_of(kref, struct autogroup, kref);

33 34 35 36 37
#ifdef CONFIG_RT_GROUP_SCHED
	/* We've redirected RT tasks to the root task group... */
	ag->tg->rt_se = NULL;
	ag->tg->rt_rq = NULL;
#endif
38 39 40 41 42 43 44 45 46 47 48 49 50 51
	sched_destroy_group(ag->tg);
}

static inline void autogroup_kref_put(struct autogroup *ag)
{
	kref_put(&ag->kref, autogroup_destroy);
}

static inline struct autogroup *autogroup_kref_get(struct autogroup *ag)
{
	kref_get(&ag->kref);
	return ag;
}

52 53 54 55 56 57 58 59 60 61 62 63 64 65
static inline struct autogroup *autogroup_task_get(struct task_struct *p)
{
	struct autogroup *ag;
	unsigned long flags;

	if (!lock_task_sighand(p, &flags))
		return autogroup_kref_get(&autogroup_default);

	ag = autogroup_kref_get(p->signal->autogroup);
	unlock_task_sighand(p, &flags);

	return ag;
}

66 67 68 69 70 71 72 73
static inline struct autogroup *autogroup_create(void)
{
	struct autogroup *ag = kzalloc(sizeof(*ag), GFP_KERNEL);
	struct task_group *tg;

	if (!ag)
		goto out_fail;

74
	tg = sched_create_group(&root_task_group);
75 76 77 78 79 80 81 82

	if (IS_ERR(tg))
		goto out_free;

	kref_init(&ag->kref);
	init_rwsem(&ag->lock);
	ag->id = atomic_inc_return(&autogroup_seq_nr);
	ag->tg = tg;
83 84 85 86 87 88 89 90 91 92 93 94 95
#ifdef CONFIG_RT_GROUP_SCHED
	/*
	 * Autogroup RT tasks are redirected to the root task group
	 * so we don't have to move tasks around upon policy change,
	 * or flail around trying to allocate bandwidth on the fly.
	 * A bandwidth exception in __sched_setscheduler() allows
	 * the policy change to proceed.  Thereafter, task_group()
	 * returns &root_task_group, so zero bandwidth is required.
	 */
	free_rt_sched_group(tg);
	tg->rt_se = root_task_group.rt_se;
	tg->rt_rq = root_task_group.rt_rq;
#endif
96 97 98 99 100 101 102 103 104 105 106 107 108 109 110
	tg->autogroup = ag;

	return ag;

out_free:
	kfree(ag);
out_fail:
	if (printk_ratelimit()) {
		printk(KERN_WARNING "autogroup_create: %s failure.\n",
			ag ? "sched_create_group()" : "kmalloc()");
	}

	return autogroup_kref_get(&autogroup_default);
}

111
bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
112
{
113 114 115
	if (!sysctl_sched_autogroup_enabled)
		return false;

116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
	if (tg != &root_task_group)
		return false;

	if (p->sched_class != &fair_sched_class)
		return false;

	/*
	 * We can only assume the task group can't go away on us if
	 * autogroup_move_group() can see us on ->thread_group list.
	 */
	if (p->flags & PF_EXITING)
		return false;

	return true;
}

static void
autogroup_move_group(struct task_struct *p, struct autogroup *ag)
{
	struct autogroup *prev;
	struct task_struct *t;
	unsigned long flags;

	BUG_ON(!lock_task_sighand(p, &flags));

	prev = p->signal->autogroup;
	if (prev == ag) {
		unlock_task_sighand(p, &flags);
		return;
	}

	p->signal->autogroup = autogroup_kref_get(ag);

	t = p;
	do {
		sched_move_task(t);
	} while_each_thread(p, t);

	unlock_task_sighand(p, &flags);
	autogroup_kref_put(prev);
}

/* Allocates GFP_KERNEL, cannot be called under any spinlock */
void sched_autogroup_create_attach(struct task_struct *p)
{
161
	struct autogroup *ag;
162

163 164 165
	if (!sysctl_sched_autogroup_enabled)
		return;
	ag = autogroup_create();
166
	autogroup_move_group(p, ag);
L
Lucas De Marchi 已提交
167
	/* drop extra reference added by autogroup_create() */
168 169 170 171 172 173 174 175 176 177 178 179 180
	autogroup_kref_put(ag);
}
EXPORT_SYMBOL(sched_autogroup_create_attach);

/* Cannot be called under siglock.  Currently has no users */
void sched_autogroup_detach(struct task_struct *p)
{
	autogroup_move_group(p, &autogroup_default);
}
EXPORT_SYMBOL(sched_autogroup_detach);

void sched_autogroup_fork(struct signal_struct *sig)
{
181 182
	if (!sysctl_sched_autogroup_enabled)
		return;
183
	sig->autogroup = autogroup_task_get(current);
184 185 186 187
}

void sched_autogroup_exit(struct signal_struct *sig)
{
188 189
	if (!sysctl_sched_autogroup_enabled)
		return;
190 191 192 193 194 195 196 197 198 199 200 201 202
	autogroup_kref_put(sig->autogroup);
}

static int __init setup_autogroup(char *str)
{
	sysctl_sched_autogroup_enabled = 0;

	return 1;
}

__setup("noautogroup", setup_autogroup);

#ifdef CONFIG_SCHED_DEBUG
203
int autogroup_path(struct task_group *tg, char *buf, int buflen)
204
{
205
	if (!task_group_is_autogroup(tg))
206 207
		return 0;

208 209 210 211 212
	return snprintf(buf, buflen, "%s-%ld", "/autogroup", tg->autogroup->id);
}
#endif /* CONFIG_SCHED_DEBUG */

#endif /* CONFIG_SCHED_AUTOGROUP */