cpuset.c 75.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
/*
 *  kernel/cpuset.c
 *
 *  Processor and Memory placement constraints for sets of tasks.
 *
 *  Copyright (C) 2003 BULL SA.
P
Paul Jackson 已提交
7
 *  Copyright (C) 2004-2007 Silicon Graphics, Inc.
8
 *  Copyright (C) 2006 Google, Inc
L
Linus Torvalds 已提交
9 10 11 12
 *
 *  Portions derived from Patrick Mochel's sysfs code.
 *  sysfs is Copyright (c) 2001-3 Patrick Mochel
 *
13
 *  2003-10-10 Written by Simon Derr.
L
Linus Torvalds 已提交
14
 *  2003-10-22 Updates by Stephen Hemminger.
15
 *  2004 May-July Rework by Paul Jackson.
16
 *  2006 Rework by Paul Menage to use generic cgroups
17 18
 *  2008 Rework of the scheduler domains and CPU hotplug handling
 *       by Max Krasnyansky
L
Linus Torvalds 已提交
19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
 *
 *  This file is subject to the terms and conditions of the GNU General Public
 *  License.  See the file COPYING in the main directory of the Linux
 *  distribution for more details.
 */

#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/cpuset.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/kmod.h>
#include <linux/list.h>
37
#include <linux/mempolicy.h>
L
Linus Torvalds 已提交
38
#include <linux/mm.h>
39
#include <linux/memory.h>
40
#include <linux/export.h>
L
Linus Torvalds 已提交
41 42 43 44
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/pagemap.h>
#include <linux/proc_fs.h>
45
#include <linux/rcupdate.h>
L
Linus Torvalds 已提交
46 47
#include <linux/sched.h>
#include <linux/seq_file.h>
48
#include <linux/security.h>
L
Linus Torvalds 已提交
49 50 51 52 53
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/stat.h>
#include <linux/string.h>
#include <linux/time.h>
54
#include <linux/time64.h>
L
Linus Torvalds 已提交
55 56 57 58
#include <linux/backing-dev.h>
#include <linux/sort.h>

#include <asm/uaccess.h>
A
Arun Sharma 已提交
59
#include <linux/atomic.h>
60
#include <linux/mutex.h>
61
#include <linux/cgroup.h>
62
#include <linux/wait.h>
L
Linus Torvalds 已提交
63

64
DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key);
65

66 67 68 69 70
/* See "Frequency meter" comments, below. */

struct fmeter {
	int cnt;		/* unprocessed events count */
	int val;		/* most recent output value */
71
	time64_t time;		/* clock (secs) when val computed */
72 73 74
	spinlock_t lock;	/* guards read or write of above */
};

L
Linus Torvalds 已提交
75
struct cpuset {
76 77
	struct cgroup_subsys_state css;

L
Linus Torvalds 已提交
78
	unsigned long flags;		/* "unsigned long" so bitops work */
79

80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
	/*
	 * On default hierarchy:
	 *
	 * The user-configured masks can only be changed by writing to
	 * cpuset.cpus and cpuset.mems, and won't be limited by the
	 * parent masks.
	 *
	 * The effective masks is the real masks that apply to the tasks
	 * in the cpuset. They may be changed if the configured masks are
	 * changed or hotplug happens.
	 *
	 * effective_mask == configured_mask & parent's effective_mask,
	 * and if it ends up empty, it will inherit the parent's mask.
	 *
	 *
	 * On legacy hierachy:
	 *
	 * The user-configured masks are always the same with effective masks.
	 */

100 101 102 103 104 105 106
	/* user-configured CPUs and Memory Nodes allow to tasks */
	cpumask_var_t cpus_allowed;
	nodemask_t mems_allowed;

	/* effective CPUs and Memory Nodes allow to tasks */
	cpumask_var_t effective_cpus;
	nodemask_t effective_mems;
L
Linus Torvalds 已提交
107

108 109 110 111 112 113 114 115 116 117 118 119
	/*
	 * This is old Memory Nodes tasks took on.
	 *
	 * - top_cpuset.old_mems_allowed is initialized to mems_allowed.
	 * - A new cpuset's old_mems_allowed is initialized when some
	 *   task is moved into it.
	 * - old_mems_allowed is used in cpuset_migrate_mm() when we change
	 *   cpuset.mems_allowed and have tasks' nodemask updated, and
	 *   then old_mems_allowed is updated to mems_allowed.
	 */
	nodemask_t old_mems_allowed;

120
	struct fmeter fmeter;		/* memory_pressure filter */
P
Paul Jackson 已提交
121

122 123 124 125 126 127
	/*
	 * Tasks are being attached to this cpuset.  Used to prevent
	 * zeroing cpus/mems_allowed between ->can_attach() and ->attach().
	 */
	int attach_in_progress;

P
Paul Jackson 已提交
128 129
	/* partition number for rebuild_sched_domains() */
	int pn;
130

131 132
	/* for custom sched domain */
	int relax_domain_level;
L
Linus Torvalds 已提交
133 134
};

135
static inline struct cpuset *css_cs(struct cgroup_subsys_state *css)
136
{
137
	return css ? container_of(css, struct cpuset, css) : NULL;
138 139 140 141 142
}

/* Retrieve the cpuset for a task */
static inline struct cpuset *task_cs(struct task_struct *task)
{
143
	return css_cs(task_css(task, cpuset_cgrp_id));
144 145
}

146
static inline struct cpuset *parent_cs(struct cpuset *cs)
T
Tejun Heo 已提交
147
{
T
Tejun Heo 已提交
148
	return css_cs(cs->css.parent);
T
Tejun Heo 已提交
149 150
}

151 152 153 154 155 156 157 158 159 160 161 162 163
#ifdef CONFIG_NUMA
static inline bool task_has_mempolicy(struct task_struct *task)
{
	return task->mempolicy;
}
#else
static inline bool task_has_mempolicy(struct task_struct *task)
{
	return false;
}
#endif


L
Linus Torvalds 已提交
164 165
/* bits in struct cpuset flags field */
typedef enum {
T
Tejun Heo 已提交
166
	CS_ONLINE,
L
Linus Torvalds 已提交
167 168
	CS_CPU_EXCLUSIVE,
	CS_MEM_EXCLUSIVE,
169
	CS_MEM_HARDWALL,
170
	CS_MEMORY_MIGRATE,
P
Paul Jackson 已提交
171
	CS_SCHED_LOAD_BALANCE,
172 173
	CS_SPREAD_PAGE,
	CS_SPREAD_SLAB,
L
Linus Torvalds 已提交
174 175 176
} cpuset_flagbits_t;

/* convenient tests for these bits */
T
Tejun Heo 已提交
177 178 179 180 181
static inline bool is_cpuset_online(const struct cpuset *cs)
{
	return test_bit(CS_ONLINE, &cs->flags);
}

L
Linus Torvalds 已提交
182 183
static inline int is_cpu_exclusive(const struct cpuset *cs)
{
184
	return test_bit(CS_CPU_EXCLUSIVE, &cs->flags);
L
Linus Torvalds 已提交
185 186 187 188
}

static inline int is_mem_exclusive(const struct cpuset *cs)
{
189
	return test_bit(CS_MEM_EXCLUSIVE, &cs->flags);
L
Linus Torvalds 已提交
190 191
}

192 193 194 195 196
static inline int is_mem_hardwall(const struct cpuset *cs)
{
	return test_bit(CS_MEM_HARDWALL, &cs->flags);
}

P
Paul Jackson 已提交
197 198 199 200 201
static inline int is_sched_load_balance(const struct cpuset *cs)
{
	return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
}

202 203
static inline int is_memory_migrate(const struct cpuset *cs)
{
204
	return test_bit(CS_MEMORY_MIGRATE, &cs->flags);
205 206
}

207 208 209 210 211 212 213 214 215 216
static inline int is_spread_page(const struct cpuset *cs)
{
	return test_bit(CS_SPREAD_PAGE, &cs->flags);
}

static inline int is_spread_slab(const struct cpuset *cs)
{
	return test_bit(CS_SPREAD_SLAB, &cs->flags);
}

L
Linus Torvalds 已提交
217
static struct cpuset top_cpuset = {
T
Tejun Heo 已提交
218 219
	.flags = ((1 << CS_ONLINE) | (1 << CS_CPU_EXCLUSIVE) |
		  (1 << CS_MEM_EXCLUSIVE)),
L
Linus Torvalds 已提交
220 221
};

222 223 224
/**
 * cpuset_for_each_child - traverse online children of a cpuset
 * @child_cs: loop cursor pointing to the current child
225
 * @pos_css: used for iteration
226 227 228 229 230
 * @parent_cs: target cpuset to walk children of
 *
 * Walk @child_cs through the online children of @parent_cs.  Must be used
 * with RCU read locked.
 */
231 232 233
#define cpuset_for_each_child(child_cs, pos_css, parent_cs)		\
	css_for_each_child((pos_css), &(parent_cs)->css)		\
		if (is_cpuset_online(((child_cs) = css_cs((pos_css)))))
234

235 236 237
/**
 * cpuset_for_each_descendant_pre - pre-order walk of a cpuset's descendants
 * @des_cs: loop cursor pointing to the current descendant
238
 * @pos_css: used for iteration
239 240 241
 * @root_cs: target cpuset to walk ancestor of
 *
 * Walk @des_cs through the online descendants of @root_cs.  Must be used
242
 * with RCU read locked.  The caller may modify @pos_css by calling
243 244
 * css_rightmost_descendant() to skip subtree.  @root_cs is included in the
 * iteration and the first node to be visited.
245
 */
246 247 248
#define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs)	\
	css_for_each_descendant_pre((pos_css), &(root_cs)->css)		\
		if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
249

L
Linus Torvalds 已提交
250
/*
251 252 253 254
 * There are two global locks guarding cpuset structures - cpuset_mutex and
 * callback_lock. We also require taking task_lock() when dereferencing a
 * task's cpuset pointer. See "The task_lock() exception", at the end of this
 * comment.
255
 *
256
 * A task must hold both locks to modify cpusets.  If a task holds
257
 * cpuset_mutex, then it blocks others wanting that mutex, ensuring that it
258
 * is the only task able to also acquire callback_lock and be able to
259 260 261
 * modify cpusets.  It can perform various checks on the cpuset structure
 * first, knowing nothing will change.  It can also allocate memory while
 * just holding cpuset_mutex.  While it is performing these checks, various
262 263
 * callback routines can briefly acquire callback_lock to query cpusets.
 * Once it is ready to make the changes, it takes callback_lock, blocking
264
 * everyone else.
265 266
 *
 * Calls to the kernel memory allocator can not be made while holding
267
 * callback_lock, as that would risk double tripping on callback_lock
268 269 270
 * from one of the callbacks into the cpuset code from within
 * __alloc_pages().
 *
271
 * If a task is only holding callback_lock, then it has read-only
272 273
 * access to cpusets.
 *
274 275 276
 * Now, the task_struct fields mems_allowed and mempolicy may be changed
 * by other task, we use alloc_lock in the task_struct fields to protect
 * them.
277
 *
278
 * The cpuset_common_file_read() handlers only hold callback_lock across
279 280 281
 * small pieces of code, such as when reading out possibly multi-word
 * cpumasks and nodemasks.
 *
282 283
 * Accessing a task's cpuset should be done in accordance with the
 * guidelines for accessing subsystem state in kernel/cgroup.c
L
Linus Torvalds 已提交
284 285
 */

286
static DEFINE_MUTEX(cpuset_mutex);
287
static DEFINE_SPINLOCK(callback_lock);
288

289 290
static struct workqueue_struct *cpuset_migrate_mm_wq;

291 292 293 294 295 296
/*
 * CPU / memory hotplug is handled asynchronously.
 */
static void cpuset_hotplug_workfn(struct work_struct *work);
static DECLARE_WORK(cpuset_hotplug_work, cpuset_hotplug_workfn);

297 298
static DECLARE_WAIT_QUEUE_HEAD(cpuset_attach_wq);

299 300
/*
 * This is ugly, but preserves the userspace API for existing cpuset
301
 * users. If someone tries to mount the "cpuset" filesystem, we
302 303
 * silently switch it to mount "cgroup" instead
 */
A
Al Viro 已提交
304 305
static struct dentry *cpuset_mount(struct file_system_type *fs_type,
			 int flags, const char *unused_dev_name, void *data)
L
Linus Torvalds 已提交
306
{
307
	struct file_system_type *cgroup_fs = get_fs_type("cgroup");
A
Al Viro 已提交
308
	struct dentry *ret = ERR_PTR(-ENODEV);
309 310 311 312
	if (cgroup_fs) {
		char mountopts[] =
			"cpuset,noprefix,"
			"release_agent=/sbin/cpuset_release_agent";
A
Al Viro 已提交
313 314
		ret = cgroup_fs->mount(cgroup_fs, flags,
					   unused_dev_name, mountopts);
315 316 317
		put_filesystem(cgroup_fs);
	}
	return ret;
L
Linus Torvalds 已提交
318 319 320 321
}

static struct file_system_type cpuset_fs_type = {
	.name = "cpuset",
A
Al Viro 已提交
322
	.mount = cpuset_mount,
L
Linus Torvalds 已提交
323 324 325
};

/*
326
 * Return in pmask the portion of a cpusets's cpus_allowed that
L
Linus Torvalds 已提交
327
 * are online.  If none are online, walk up the cpuset hierarchy
328 329
 * until we find one that does have some online cpus.  The top
 * cpuset always has some cpus online.
L
Linus Torvalds 已提交
330 331
 *
 * One way or another, we guarantee to return some non-empty subset
332
 * of cpu_online_mask.
L
Linus Torvalds 已提交
333
 *
334
 * Call with callback_lock or cpuset_mutex held.
L
Linus Torvalds 已提交
335
 */
336
static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask)
L
Linus Torvalds 已提交
337
{
338
	while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask))
T
Tejun Heo 已提交
339
		cs = parent_cs(cs);
340
	cpumask_and(pmask, cs->effective_cpus, cpu_online_mask);
L
Linus Torvalds 已提交
341 342 343 344
}

/*
 * Return in *pmask the portion of a cpusets's mems_allowed that
345 346
 * are online, with memory.  If none are online with memory, walk
 * up the cpuset hierarchy until we find one that does have some
347
 * online mems.  The top cpuset always has some mems online.
L
Linus Torvalds 已提交
348 349
 *
 * One way or another, we guarantee to return some non-empty subset
350
 * of node_states[N_MEMORY].
L
Linus Torvalds 已提交
351
 *
352
 * Call with callback_lock or cpuset_mutex held.
L
Linus Torvalds 已提交
353
 */
354
static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
L
Linus Torvalds 已提交
355
{
356
	while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY]))
T
Tejun Heo 已提交
357
		cs = parent_cs(cs);
358
	nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]);
L
Linus Torvalds 已提交
359 360
}

361 362 363
/*
 * update task's spread flag if cpuset's page/slab spread flag is set
 *
364
 * Call with callback_lock or cpuset_mutex held.
365 366 367 368 369
 */
static void cpuset_update_task_spread_flag(struct cpuset *cs,
					struct task_struct *tsk)
{
	if (is_spread_page(cs))
370
		task_set_spread_page(tsk);
371
	else
372 373
		task_clear_spread_page(tsk);

374
	if (is_spread_slab(cs))
375
		task_set_spread_slab(tsk);
376
	else
377
		task_clear_spread_slab(tsk);
378 379
}

L
Linus Torvalds 已提交
380 381 382 383 384
/*
 * is_cpuset_subset(p, q) - Is cpuset p a subset of cpuset q?
 *
 * One cpuset is a subset of another if all its allowed CPUs and
 * Memory Nodes are a subset of the other, and its exclusive flags
385
 * are only set if the other's are set.  Call holding cpuset_mutex.
L
Linus Torvalds 已提交
386 387 388 389
 */

static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
{
390
	return	cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
L
Linus Torvalds 已提交
391 392 393 394 395
		nodes_subset(p->mems_allowed, q->mems_allowed) &&
		is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
		is_mem_exclusive(p) <= is_mem_exclusive(q);
}

396 397 398 399
/**
 * alloc_trial_cpuset - allocate a trial cpuset
 * @cs: the cpuset that the trial cpuset duplicates
 */
400
static struct cpuset *alloc_trial_cpuset(struct cpuset *cs)
401
{
402 403 404 405 406 407
	struct cpuset *trial;

	trial = kmemdup(cs, sizeof(*cs), GFP_KERNEL);
	if (!trial)
		return NULL;

408 409 410 411
	if (!alloc_cpumask_var(&trial->cpus_allowed, GFP_KERNEL))
		goto free_cs;
	if (!alloc_cpumask_var(&trial->effective_cpus, GFP_KERNEL))
		goto free_cpus;
412

413 414
	cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
	cpumask_copy(trial->effective_cpus, cs->effective_cpus);
415
	return trial;
416 417 418 419 420 421

free_cpus:
	free_cpumask_var(trial->cpus_allowed);
free_cs:
	kfree(trial);
	return NULL;
422 423 424 425 426 427 428 429
}

/**
 * free_trial_cpuset - free the trial cpuset
 * @trial: the trial cpuset to be freed
 */
static void free_trial_cpuset(struct cpuset *trial)
{
430
	free_cpumask_var(trial->effective_cpus);
431
	free_cpumask_var(trial->cpus_allowed);
432 433 434
	kfree(trial);
}

L
Linus Torvalds 已提交
435 436 437 438 439 440 441
/*
 * validate_change() - Used to validate that any proposed cpuset change
 *		       follows the structural rules for cpusets.
 *
 * If we replaced the flag and mask values of the current cpuset
 * (cur) with those values in the trial cpuset (trial), would
 * our various subset and exclusive rules still be valid?  Presumes
442
 * cpuset_mutex held.
L
Linus Torvalds 已提交
443 444 445 446 447 448 449 450 451 452 453 454
 *
 * 'cur' is the address of an actual, in-use cpuset.  Operations
 * such as list traversal that depend on the actual address of the
 * cpuset in the list must use cur below, not trial.
 *
 * 'trial' is the address of bulk structure copy of cur, with
 * perhaps one or more of the fields cpus_allowed, mems_allowed,
 * or flags changed to new, trial values.
 *
 * Return 0 if valid, -errno if not.
 */

455
static int validate_change(struct cpuset *cur, struct cpuset *trial)
L
Linus Torvalds 已提交
456
{
457
	struct cgroup_subsys_state *css;
L
Linus Torvalds 已提交
458
	struct cpuset *c, *par;
459 460 461
	int ret;

	rcu_read_lock();
L
Linus Torvalds 已提交
462 463

	/* Each of our child cpusets must be a subset of us */
464
	ret = -EBUSY;
465
	cpuset_for_each_child(c, css, cur)
466 467
		if (!is_cpuset_subset(c, trial))
			goto out;
L
Linus Torvalds 已提交
468 469

	/* Remaining checks don't apply to root cpuset */
470
	ret = 0;
471
	if (cur == &top_cpuset)
472
		goto out;
L
Linus Torvalds 已提交
473

T
Tejun Heo 已提交
474
	par = parent_cs(cur);
475

476
	/* On legacy hiearchy, we must be a subset of our parent cpuset. */
477
	ret = -EACCES;
478 479
	if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
	    !is_cpuset_subset(trial, par))
480
		goto out;
L
Linus Torvalds 已提交
481

482 483 484 485
	/*
	 * If either I or some sibling (!= me) is exclusive, we can't
	 * overlap
	 */
486
	ret = -EINVAL;
487
	cpuset_for_each_child(c, css, par) {
L
Linus Torvalds 已提交
488 489
		if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
		    c != cur &&
490
		    cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
491
			goto out;
L
Linus Torvalds 已提交
492 493 494
		if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
		    c != cur &&
		    nodes_intersects(trial->mems_allowed, c->mems_allowed))
495
			goto out;
L
Linus Torvalds 已提交
496 497
	}

498 499
	/*
	 * Cpusets with tasks - existing or newly being attached - can't
500
	 * be changed to have empty cpus_allowed or mems_allowed.
501
	 */
502
	ret = -ENOSPC;
503
	if ((cgroup_is_populated(cur->css.cgroup) || cur->attach_in_progress)) {
504 505 506 507 508 509 510
		if (!cpumask_empty(cur->cpus_allowed) &&
		    cpumask_empty(trial->cpus_allowed))
			goto out;
		if (!nodes_empty(cur->mems_allowed) &&
		    nodes_empty(trial->mems_allowed))
			goto out;
	}
511

512 513 514 515 516 517 518 519 520 521
	/*
	 * We can't shrink if we won't have enough room for SCHED_DEADLINE
	 * tasks.
	 */
	ret = -EBUSY;
	if (is_cpu_exclusive(cur) &&
	    !cpuset_cpumask_can_shrink(cur->cpus_allowed,
				       trial->cpus_allowed))
		goto out;

522 523 524 525
	ret = 0;
out:
	rcu_read_unlock();
	return ret;
L
Linus Torvalds 已提交
526 527
}

528
#ifdef CONFIG_SMP
P
Paul Jackson 已提交
529
/*
530
 * Helper routine for generate_sched_domains().
531
 * Do cpusets a, b have overlapping effective cpus_allowed masks?
P
Paul Jackson 已提交
532 533 534
 */
static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
{
535
	return cpumask_intersects(a->effective_cpus, b->effective_cpus);
P
Paul Jackson 已提交
536 537
}

538 539 540 541 542 543 544 545
static void
update_domain_attr(struct sched_domain_attr *dattr, struct cpuset *c)
{
	if (dattr->relax_domain_level < c->relax_domain_level)
		dattr->relax_domain_level = c->relax_domain_level;
	return;
}

546 547
static void update_domain_attr_tree(struct sched_domain_attr *dattr,
				    struct cpuset *root_cs)
548
{
549
	struct cpuset *cp;
550
	struct cgroup_subsys_state *pos_css;
551

552
	rcu_read_lock();
553
	cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
554 555
		/* skip the whole subtree if @cp doesn't have any CPU */
		if (cpumask_empty(cp->cpus_allowed)) {
556
			pos_css = css_rightmost_descendant(pos_css);
557
			continue;
558
		}
559 560 561 562

		if (is_sched_load_balance(cp))
			update_domain_attr(dattr, cp);
	}
563
	rcu_read_unlock();
564 565
}

P
Paul Jackson 已提交
566
/*
567 568 569 570 571
 * generate_sched_domains()
 *
 * This function builds a partial partition of the systems CPUs
 * A 'partial partition' is a set of non-overlapping subsets whose
 * union is a subset of that set.
572
 * The output of this function needs to be passed to kernel/sched/core.c
573 574 575
 * partition_sched_domains() routine, which will rebuild the scheduler's
 * load balancing domains (sched domains) as specified by that partial
 * partition.
P
Paul Jackson 已提交
576
 *
L
Li Zefan 已提交
577
 * See "What is sched_load_balance" in Documentation/cgroups/cpusets.txt
P
Paul Jackson 已提交
578 579 580 581 582 583 584
 * for a background explanation of this.
 *
 * Does not return errors, on the theory that the callers of this
 * routine would rather not worry about failures to rebuild sched
 * domains when operating in the severe memory shortage situations
 * that could cause allocation failures below.
 *
585
 * Must be called with cpuset_mutex held.
P
Paul Jackson 已提交
586 587
 *
 * The three key local variables below are:
588
 *    q  - a linked-list queue of cpuset pointers, used to implement a
P
Paul Jackson 已提交
589 590 591 592 593 594 595 596 597 598 599 600
 *	   top-down scan of all cpusets.  This scan loads a pointer
 *	   to each cpuset marked is_sched_load_balance into the
 *	   array 'csa'.  For our purposes, rebuilding the schedulers
 *	   sched domains, we can ignore !is_sched_load_balance cpusets.
 *  csa  - (for CpuSet Array) Array of pointers to all the cpusets
 *	   that need to be load balanced, for convenient iterative
 *	   access by the subsequent code that finds the best partition,
 *	   i.e the set of domains (subsets) of CPUs such that the
 *	   cpus_allowed of every cpuset marked is_sched_load_balance
 *	   is a subset of one of these domains, while there are as
 *	   many such domains as possible, each as small as possible.
 * doms  - Conversion of 'csa' to an array of cpumasks, for passing to
601
 *	   the kernel/sched/core.c routine partition_sched_domains() in a
P
Paul Jackson 已提交
602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619
 *	   convenient format, that can be easily compared to the prior
 *	   value to determine what partition elements (sched domains)
 *	   were changed (added or removed.)
 *
 * Finding the best partition (set of domains):
 *	The triple nested loops below over i, j, k scan over the
 *	load balanced cpusets (using the array of cpuset pointers in
 *	csa[]) looking for pairs of cpusets that have overlapping
 *	cpus_allowed, but which don't have the same 'pn' partition
 *	number and gives them in the same partition number.  It keeps
 *	looping on the 'restart' label until it can no longer find
 *	any such pairs.
 *
 *	The union of the cpus_allowed masks from the set of
 *	all cpusets having the same 'pn' value then form the one
 *	element of the partition (one sched domain) to be passed to
 *	partition_sched_domains().
 */
620
static int generate_sched_domains(cpumask_var_t **domains,
621
			struct sched_domain_attr **attributes)
P
Paul Jackson 已提交
622 623 624 625 626
{
	struct cpuset *cp;	/* scans q */
	struct cpuset **csa;	/* array of all cpuset ptrs */
	int csn;		/* how many cpuset ptrs in csa so far */
	int i, j, k;		/* indices for partition finding loops */
627
	cpumask_var_t *doms;	/* resulting partition; i.e. sched domains */
628
	cpumask_var_t non_isolated_cpus;  /* load balanced CPUs */
629
	struct sched_domain_attr *dattr;  /* attributes for custom domains */
630
	int ndoms = 0;		/* number of sched domains in result */
631
	int nslot;		/* next empty doms[] struct cpumask slot */
632
	struct cgroup_subsys_state *pos_css;
P
Paul Jackson 已提交
633 634

	doms = NULL;
635
	dattr = NULL;
636
	csa = NULL;
P
Paul Jackson 已提交
637

638 639 640 641
	if (!alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL))
		goto done;
	cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);

P
Paul Jackson 已提交
642 643
	/* Special case for the 99% of systems with one, full, sched domain */
	if (is_sched_load_balance(&top_cpuset)) {
644 645
		ndoms = 1;
		doms = alloc_sched_domains(ndoms);
P
Paul Jackson 已提交
646
		if (!doms)
647 648
			goto done;

649 650 651
		dattr = kmalloc(sizeof(struct sched_domain_attr), GFP_KERNEL);
		if (dattr) {
			*dattr = SD_ATTR_INIT;
652
			update_domain_attr_tree(dattr, &top_cpuset);
653
		}
654 655
		cpumask_and(doms[0], top_cpuset.effective_cpus,
				     non_isolated_cpus);
656 657

		goto done;
P
Paul Jackson 已提交
658 659
	}

660
	csa = kmalloc(nr_cpusets() * sizeof(cp), GFP_KERNEL);
P
Paul Jackson 已提交
661 662 663 664
	if (!csa)
		goto done;
	csn = 0;

665
	rcu_read_lock();
666
	cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
667 668
		if (cp == &top_cpuset)
			continue;
669
		/*
670 671 672 673 674 675
		 * Continue traversing beyond @cp iff @cp has some CPUs and
		 * isn't load balancing.  The former is obvious.  The
		 * latter: All child cpusets contain a subset of the
		 * parent's cpus, so just skip them, and then we call
		 * update_domain_attr_tree() to calc relax_domain_level of
		 * the corresponding sched domain.
676
		 */
677
		if (!cpumask_empty(cp->cpus_allowed) &&
678 679
		    !(is_sched_load_balance(cp) &&
		      cpumask_intersects(cp->cpus_allowed, non_isolated_cpus)))
680
			continue;
681

682 683 684 685
		if (is_sched_load_balance(cp))
			csa[csn++] = cp;

		/* skip @cp's subtree */
686
		pos_css = css_rightmost_descendant(pos_css);
687 688
	}
	rcu_read_unlock();
P
Paul Jackson 已提交
689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716

	for (i = 0; i < csn; i++)
		csa[i]->pn = i;
	ndoms = csn;

restart:
	/* Find the best partition (set of sched domains) */
	for (i = 0; i < csn; i++) {
		struct cpuset *a = csa[i];
		int apn = a->pn;

		for (j = 0; j < csn; j++) {
			struct cpuset *b = csa[j];
			int bpn = b->pn;

			if (apn != bpn && cpusets_overlap(a, b)) {
				for (k = 0; k < csn; k++) {
					struct cpuset *c = csa[k];

					if (c->pn == bpn)
						c->pn = apn;
				}
				ndoms--;	/* one less element */
				goto restart;
			}
		}
	}

717 718 719 720
	/*
	 * Now we know how many domains to create.
	 * Convert <csn, csa> to <ndoms, doms> and populate cpu masks.
	 */
721
	doms = alloc_sched_domains(ndoms);
722
	if (!doms)
723 724 725 726 727 728
		goto done;

	/*
	 * The rest of the code, including the scheduler, can deal with
	 * dattr==NULL case. No need to abort if alloc fails.
	 */
729
	dattr = kmalloc(ndoms * sizeof(struct sched_domain_attr), GFP_KERNEL);
P
Paul Jackson 已提交
730 731 732

	for (nslot = 0, i = 0; i < csn; i++) {
		struct cpuset *a = csa[i];
733
		struct cpumask *dp;
P
Paul Jackson 已提交
734 735
		int apn = a->pn;

736 737 738 739 740
		if (apn < 0) {
			/* Skip completed partitions */
			continue;
		}

741
		dp = doms[nslot];
742 743 744 745

		if (nslot == ndoms) {
			static int warnings = 10;
			if (warnings) {
746 747
				pr_warn("rebuild_sched_domains confused: nslot %d, ndoms %d, csn %d, i %d, apn %d\n",
					nslot, ndoms, csn, i, apn);
748
				warnings--;
P
Paul Jackson 已提交
749
			}
750 751
			continue;
		}
P
Paul Jackson 已提交
752

753
		cpumask_clear(dp);
754 755 756 757 758 759
		if (dattr)
			*(dattr + nslot) = SD_ATTR_INIT;
		for (j = i; j < csn; j++) {
			struct cpuset *b = csa[j];

			if (apn == b->pn) {
760
				cpumask_or(dp, dp, b->effective_cpus);
761
				cpumask_and(dp, dp, non_isolated_cpus);
762 763 764 765 766
				if (dattr)
					update_domain_attr_tree(dattr + nslot, b);

				/* Done with this partition */
				b->pn = -1;
P
Paul Jackson 已提交
767 768
			}
		}
769
		nslot++;
P
Paul Jackson 已提交
770 771 772
	}
	BUG_ON(nslot != ndoms);

773
done:
774
	free_cpumask_var(non_isolated_cpus);
775 776
	kfree(csa);

777 778 779 780 781 782 783
	/*
	 * Fallback to the default domain if kmalloc() failed.
	 * See comments in partition_sched_domains().
	 */
	if (doms == NULL)
		ndoms = 1;

784 785 786 787 788 789 790 791
	*domains    = doms;
	*attributes = dattr;
	return ndoms;
}

/*
 * Rebuild scheduler domains.
 *
792 793 794 795 796
 * If the flag 'sched_load_balance' of any cpuset with non-empty
 * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset
 * which has that flag enabled, or if any cpuset with a non-empty
 * 'cpus' is removed, then call this routine to rebuild the
 * scheduler's dynamic sched domains.
797
 *
798
 * Call with cpuset_mutex held.  Takes get_online_cpus().
799
 */
800
static void rebuild_sched_domains_locked(void)
801 802
{
	struct sched_domain_attr *attr;
803
	cpumask_var_t *doms;
804 805
	int ndoms;

806
	lockdep_assert_held(&cpuset_mutex);
807
	get_online_cpus();
808

809 810 811 812 813
	/*
	 * We have raced with CPU hotplug. Don't do anything to avoid
	 * passing doms with offlined cpu to partition_sched_domains().
	 * Anyways, hotplug work item will rebuild sched domains.
	 */
814
	if (!cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask))
815 816
		goto out;

817 818 819 820 821
	/* Generate domain masks and attrs */
	ndoms = generate_sched_domains(&doms, &attr);

	/* Have scheduler rebuild the domains */
	partition_sched_domains(ndoms, doms, attr);
822
out:
823
	put_online_cpus();
824
}
825
#else /* !CONFIG_SMP */
826
static void rebuild_sched_domains_locked(void)
827 828 829
{
}
#endif /* CONFIG_SMP */
P
Paul Jackson 已提交
830

831 832
void rebuild_sched_domains(void)
{
833
	mutex_lock(&cpuset_mutex);
834
	rebuild_sched_domains_locked();
835
	mutex_unlock(&cpuset_mutex);
P
Paul Jackson 已提交
836 837
}

838 839 840 841
/**
 * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
 * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
 *
842 843 844
 * Iterate through each task of @cs updating its cpus_allowed to the
 * effective cpuset's.  As this function is called with cpuset_mutex held,
 * cpuset membership stays stable.
845
 */
846
static void update_tasks_cpumask(struct cpuset *cs)
847
{
848 849 850 851 852
	struct css_task_iter it;
	struct task_struct *task;

	css_task_iter_start(&cs->css, &it);
	while ((task = css_task_iter_next(&it)))
853
		set_cpus_allowed_ptr(task, cs->effective_cpus);
854
	css_task_iter_end(&it);
855 856
}

857
/*
858 859 860 861 862 863
 * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree
 * @cs: the cpuset to consider
 * @new_cpus: temp variable for calculating new effective_cpus
 *
 * When congifured cpumask is changed, the effective cpumasks of this cpuset
 * and all its descendants need to be updated.
864
 *
865
 * On legacy hierachy, effective_cpus will be the same with cpu_allowed.
866 867 868
 *
 * Called with cpuset_mutex held
 */
869
static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus)
870 871
{
	struct cpuset *cp;
872
	struct cgroup_subsys_state *pos_css;
873
	bool need_rebuild_sched_domains = false;
874 875

	rcu_read_lock();
876 877 878 879 880
	cpuset_for_each_descendant_pre(cp, pos_css, cs) {
		struct cpuset *parent = parent_cs(cp);

		cpumask_and(new_cpus, cp->cpus_allowed, parent->effective_cpus);

881 882 883 884
		/*
		 * If it becomes empty, inherit the effective mask of the
		 * parent, which is guaranteed to have some CPUs.
		 */
885 886
		if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
		    cpumask_empty(new_cpus))
887 888
			cpumask_copy(new_cpus, parent->effective_cpus);

889 890 891 892
		/* Skip the whole subtree if the cpumask remains the same. */
		if (cpumask_equal(new_cpus, cp->effective_cpus)) {
			pos_css = css_rightmost_descendant(pos_css);
			continue;
893
		}
894

895
		if (!css_tryget_online(&cp->css))
896 897 898
			continue;
		rcu_read_unlock();

899
		spin_lock_irq(&callback_lock);
900
		cpumask_copy(cp->effective_cpus, new_cpus);
901
		spin_unlock_irq(&callback_lock);
902

903
		WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
904 905
			!cpumask_equal(cp->cpus_allowed, cp->effective_cpus));

906
		update_tasks_cpumask(cp);
907

908 909 910 911 912 913 914 915
		/*
		 * If the effective cpumask of any non-empty cpuset is changed,
		 * we need to rebuild sched domains.
		 */
		if (!cpumask_empty(cp->cpus_allowed) &&
		    is_sched_load_balance(cp))
			need_rebuild_sched_domains = true;

916 917 918 919
		rcu_read_lock();
		css_put(&cp->css);
	}
	rcu_read_unlock();
920 921 922

	if (need_rebuild_sched_domains)
		rebuild_sched_domains_locked();
923 924
}

C
Cliff Wickman 已提交
925 926 927
/**
 * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it
 * @cs: the cpuset to consider
928
 * @trialcs: trial cpuset
C
Cliff Wickman 已提交
929 930
 * @buf: buffer of cpu numbers written to this cpuset
 */
931 932
static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
			  const char *buf)
L
Linus Torvalds 已提交
933
{
C
Cliff Wickman 已提交
934
	int retval;
L
Linus Torvalds 已提交
935

936
	/* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */
937 938 939
	if (cs == &top_cpuset)
		return -EACCES;

940
	/*
941
	 * An empty cpus_allowed is ok only if the cpuset has no tasks.
942 943 944
	 * Since cpulist_parse() fails on an empty mask, we special case
	 * that parsing.  The validate_change() call ensures that cpusets
	 * with tasks have cpus.
945
	 */
946
	if (!*buf) {
947
		cpumask_clear(trialcs->cpus_allowed);
948
	} else {
949
		retval = cpulist_parse(buf, trialcs->cpus_allowed);
950 951
		if (retval < 0)
			return retval;
952

953 954
		if (!cpumask_subset(trialcs->cpus_allowed,
				    top_cpuset.cpus_allowed))
955
			return -EINVAL;
956
	}
P
Paul Jackson 已提交
957

P
Paul Menage 已提交
958
	/* Nothing to do if the cpus didn't change */
959
	if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
P
Paul Menage 已提交
960
		return 0;
C
Cliff Wickman 已提交
961

962 963 964 965
	retval = validate_change(cs, trialcs);
	if (retval < 0)
		return retval;

966
	spin_lock_irq(&callback_lock);
967
	cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
968
	spin_unlock_irq(&callback_lock);
P
Paul Jackson 已提交
969

970 971
	/* use trialcs->cpus_allowed as a temp variable */
	update_cpumasks_hier(cs, trialcs->cpus_allowed);
972
	return 0;
L
Linus Torvalds 已提交
973 974
}

975
/*
976 977 978 979 980
 * Migrate memory region from one set of nodes to another.  This is
 * performed asynchronously as it can be called from process migration path
 * holding locks involved in process management.  All mm migrations are
 * performed in the queued order and can be waited for by flushing
 * cpuset_migrate_mm_wq.
981 982
 */

983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000
struct cpuset_migrate_mm_work {
	struct work_struct	work;
	struct mm_struct	*mm;
	nodemask_t		from;
	nodemask_t		to;
};

static void cpuset_migrate_mm_workfn(struct work_struct *work)
{
	struct cpuset_migrate_mm_work *mwork =
		container_of(work, struct cpuset_migrate_mm_work, work);

	/* on a wq worker, no need to worry about %current's mems_allowed */
	do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL);
	mmput(mwork->mm);
	kfree(mwork);
}

1001 1002 1003
static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
							const nodemask_t *to)
{
1004
	struct cpuset_migrate_mm_work *mwork;
1005

1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016
	mwork = kzalloc(sizeof(*mwork), GFP_KERNEL);
	if (mwork) {
		mwork->mm = mm;
		mwork->from = *from;
		mwork->to = *to;
		INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn);
		queue_work(cpuset_migrate_mm_wq, &mwork->work);
	} else {
		mmput(mm);
	}
}
1017

1018
static void cpuset_post_attach(void)
1019 1020
{
	flush_workqueue(cpuset_migrate_mm_wq);
1021 1022
}

1023
/*
1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034
 * cpuset_change_task_nodemask - change task's mems_allowed and mempolicy
 * @tsk: the task to change
 * @newmems: new nodes that the task will be set
 *
 * In order to avoid seeing no nodes if the old and new nodes are disjoint,
 * we structure updates as setting all new allowed nodes, then clearing newly
 * disallowed ones.
 */
static void cpuset_change_task_nodemask(struct task_struct *tsk,
					nodemask_t *newmems)
{
1035
	bool need_loop;
1036

1037
	task_lock(tsk);
1038 1039
	/*
	 * Determine if a loop is necessary if another thread is doing
1040
	 * read_mems_allowed_begin().  If at least one node remains unchanged and
1041 1042 1043 1044 1045
	 * tsk does not have a mempolicy, then an empty nodemask will not be
	 * possible when mems_allowed is larger than a word.
	 */
	need_loop = task_has_mempolicy(tsk) ||
			!nodes_intersects(*newmems, tsk->mems_allowed);
1046

1047 1048
	if (need_loop) {
		local_irq_disable();
1049
		write_seqcount_begin(&tsk->mems_allowed_seq);
1050
	}
1051

1052 1053
	nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
	mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
1054 1055

	mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP2);
1056
	tsk->mems_allowed = *newmems;
1057

1058
	if (need_loop) {
1059
		write_seqcount_end(&tsk->mems_allowed_seq);
1060 1061
		local_irq_enable();
	}
1062

1063
	task_unlock(tsk);
1064 1065
}

1066 1067
static void *cpuset_being_rebound;

1068 1069 1070 1071
/**
 * update_tasks_nodemask - Update the nodemasks of tasks in the cpuset.
 * @cs: the cpuset in which each task's mems_allowed mask needs to be changed
 *
1072 1073 1074
 * Iterate through each task of @cs updating its mems_allowed to the
 * effective cpuset's.  As this function is called with cpuset_mutex held,
 * cpuset membership stays stable.
1075
 */
1076
static void update_tasks_nodemask(struct cpuset *cs)
L
Linus Torvalds 已提交
1077
{
1078
	static nodemask_t newmems;	/* protected by cpuset_mutex */
1079 1080
	struct css_task_iter it;
	struct task_struct *task;
1081

1082
	cpuset_being_rebound = cs;		/* causes mpol_dup() rebind */
1083

1084
	guarantee_online_mems(cs, &newmems);
1085

1086
	/*
1087 1088 1089 1090
	 * The mpol_rebind_mm() call takes mmap_sem, which we couldn't
	 * take while holding tasklist_lock.  Forks can happen - the
	 * mpol_dup() cpuset_being_rebound check will catch such forks,
	 * and rebind their vma mempolicies too.  Because we still hold
1091
	 * the global cpuset_mutex, we know that no other rebind effort
1092
	 * will be contending for the global variable cpuset_being_rebound.
1093
	 * It's ok if we rebind the same mm twice; mpol_rebind_mm()
1094
	 * is idempotent.  Also migrate pages in each mm to new nodes.
1095
	 */
1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111
	css_task_iter_start(&cs->css, &it);
	while ((task = css_task_iter_next(&it))) {
		struct mm_struct *mm;
		bool migrate;

		cpuset_change_task_nodemask(task, &newmems);

		mm = get_task_mm(task);
		if (!mm)
			continue;

		migrate = is_memory_migrate(cs);

		mpol_rebind_mm(mm, &cs->mems_allowed);
		if (migrate)
			cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
1112 1113
		else
			mmput(mm);
1114 1115
	}
	css_task_iter_end(&it);
1116

1117 1118 1119 1120 1121 1122
	/*
	 * All the tasks' nodemasks have been updated, update
	 * cs->old_mems_allowed.
	 */
	cs->old_mems_allowed = newmems;

1123
	/* We're done rebinding vmas to this cpuset's new mems_allowed. */
1124
	cpuset_being_rebound = NULL;
L
Linus Torvalds 已提交
1125 1126
}

1127
/*
1128 1129 1130
 * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree
 * @cs: the cpuset to consider
 * @new_mems: a temp variable for calculating new effective_mems
1131
 *
1132 1133
 * When configured nodemask is changed, the effective nodemasks of this cpuset
 * and all its descendants need to be updated.
1134
 *
1135
 * On legacy hiearchy, effective_mems will be the same with mems_allowed.
1136 1137 1138
 *
 * Called with cpuset_mutex held
 */
1139
static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
1140 1141
{
	struct cpuset *cp;
1142
	struct cgroup_subsys_state *pos_css;
1143 1144

	rcu_read_lock();
1145 1146 1147 1148 1149
	cpuset_for_each_descendant_pre(cp, pos_css, cs) {
		struct cpuset *parent = parent_cs(cp);

		nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems);

1150 1151 1152 1153
		/*
		 * If it becomes empty, inherit the effective mask of the
		 * parent, which is guaranteed to have some MEMs.
		 */
1154 1155
		if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
		    nodes_empty(*new_mems))
1156 1157
			*new_mems = parent->effective_mems;

1158 1159 1160 1161
		/* Skip the whole subtree if the nodemask remains the same. */
		if (nodes_equal(*new_mems, cp->effective_mems)) {
			pos_css = css_rightmost_descendant(pos_css);
			continue;
1162
		}
1163

1164
		if (!css_tryget_online(&cp->css))
1165 1166 1167
			continue;
		rcu_read_unlock();

1168
		spin_lock_irq(&callback_lock);
1169
		cp->effective_mems = *new_mems;
1170
		spin_unlock_irq(&callback_lock);
1171

1172
		WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
1173
			!nodes_equal(cp->mems_allowed, cp->effective_mems));
1174

1175
		update_tasks_nodemask(cp);
1176 1177 1178 1179 1180 1181 1182

		rcu_read_lock();
		css_put(&cp->css);
	}
	rcu_read_unlock();
}

1183 1184 1185
/*
 * Handle user request to change the 'mems' memory placement
 * of a cpuset.  Needs to validate the request, update the
1186 1187 1188 1189
 * cpusets mems_allowed, and for each task in the cpuset,
 * update mems_allowed and rebind task's mempolicy and any vma
 * mempolicies and if the cpuset is marked 'memory_migrate',
 * migrate the tasks pages to the new memory.
1190
 *
1191
 * Call with cpuset_mutex held. May take callback_lock during call.
1192 1193 1194 1195
 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
 * lock each such tasks mm->mmap_sem, scan its vma's and rebind
 * their mempolicies to the cpusets new mems_allowed.
 */
1196 1197
static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
			   const char *buf)
1198 1199 1200 1201
{
	int retval;

	/*
1202
	 * top_cpuset.mems_allowed tracks node_stats[N_MEMORY];
1203 1204
	 * it's read-only
	 */
1205 1206 1207 1208
	if (cs == &top_cpuset) {
		retval = -EACCES;
		goto done;
	}
1209 1210 1211 1212 1213 1214 1215 1216

	/*
	 * An empty mems_allowed is ok iff there are no tasks in the cpuset.
	 * Since nodelist_parse() fails on an empty mask, we special case
	 * that parsing.  The validate_change() call ensures that cpusets
	 * with tasks have memory.
	 */
	if (!*buf) {
1217
		nodes_clear(trialcs->mems_allowed);
1218
	} else {
1219
		retval = nodelist_parse(buf, trialcs->mems_allowed);
1220 1221 1222
		if (retval < 0)
			goto done;

1223
		if (!nodes_subset(trialcs->mems_allowed,
1224 1225
				  top_cpuset.mems_allowed)) {
			retval = -EINVAL;
1226 1227
			goto done;
		}
1228
	}
1229 1230

	if (nodes_equal(cs->mems_allowed, trialcs->mems_allowed)) {
1231 1232 1233
		retval = 0;		/* Too easy - nothing to do */
		goto done;
	}
1234
	retval = validate_change(cs, trialcs);
1235 1236 1237
	if (retval < 0)
		goto done;

1238
	spin_lock_irq(&callback_lock);
1239
	cs->mems_allowed = trialcs->mems_allowed;
1240
	spin_unlock_irq(&callback_lock);
1241

1242
	/* use trialcs->mems_allowed as a temp variable */
1243
	update_nodemasks_hier(cs, &trialcs->mems_allowed);
1244 1245 1246 1247
done:
	return retval;
}

1248 1249
int current_cpuset_is_being_rebound(void)
{
1250 1251 1252 1253 1254 1255 1256
	int ret;

	rcu_read_lock();
	ret = task_cs(current) == cpuset_being_rebound;
	rcu_read_unlock();

	return ret;
1257 1258
}

1259
static int update_relax_domain_level(struct cpuset *cs, s64 val)
1260
{
1261
#ifdef CONFIG_SMP
1262
	if (val < -1 || val >= sched_domain_level_max)
1263
		return -EINVAL;
1264
#endif
1265 1266 1267

	if (val != cs->relax_domain_level) {
		cs->relax_domain_level = val;
1268 1269
		if (!cpumask_empty(cs->cpus_allowed) &&
		    is_sched_load_balance(cs))
1270
			rebuild_sched_domains_locked();
1271 1272 1273 1274 1275
	}

	return 0;
}

1276
/**
1277 1278 1279
 * update_tasks_flags - update the spread flags of tasks in the cpuset.
 * @cs: the cpuset in which each task's spread flags needs to be changed
 *
1280 1281 1282
 * Iterate through each task of @cs updating its spread flags.  As this
 * function is called with cpuset_mutex held, cpuset membership stays
 * stable.
1283
 */
1284
static void update_tasks_flags(struct cpuset *cs)
1285
{
1286 1287 1288 1289 1290 1291 1292
	struct css_task_iter it;
	struct task_struct *task;

	css_task_iter_start(&cs->css, &it);
	while ((task = css_task_iter_next(&it)))
		cpuset_update_task_spread_flag(cs, task);
	css_task_iter_end(&it);
1293 1294
}

L
Linus Torvalds 已提交
1295 1296
/*
 * update_flag - read a 0 or a 1 in a file and update associated flag
1297 1298 1299
 * bit:		the bit to update (see cpuset_flagbits_t)
 * cs:		the cpuset to update
 * turning_on: 	whether the flag is being set or cleared
1300
 *
1301
 * Call with cpuset_mutex held.
L
Linus Torvalds 已提交
1302 1303
 */

1304 1305
static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
		       int turning_on)
L
Linus Torvalds 已提交
1306
{
1307
	struct cpuset *trialcs;
R
Rakib Mullick 已提交
1308
	int balance_flag_changed;
1309 1310
	int spread_flag_changed;
	int err;
L
Linus Torvalds 已提交
1311

1312 1313 1314 1315
	trialcs = alloc_trial_cpuset(cs);
	if (!trialcs)
		return -ENOMEM;

L
Linus Torvalds 已提交
1316
	if (turning_on)
1317
		set_bit(bit, &trialcs->flags);
L
Linus Torvalds 已提交
1318
	else
1319
		clear_bit(bit, &trialcs->flags);
L
Linus Torvalds 已提交
1320

1321
	err = validate_change(cs, trialcs);
1322
	if (err < 0)
1323
		goto out;
P
Paul Jackson 已提交
1324 1325

	balance_flag_changed = (is_sched_load_balance(cs) !=
1326
				is_sched_load_balance(trialcs));
P
Paul Jackson 已提交
1327

1328 1329 1330
	spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
			|| (is_spread_page(cs) != is_spread_page(trialcs)));

1331
	spin_lock_irq(&callback_lock);
1332
	cs->flags = trialcs->flags;
1333
	spin_unlock_irq(&callback_lock);
1334

1335
	if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
1336
		rebuild_sched_domains_locked();
P
Paul Jackson 已提交
1337

1338
	if (spread_flag_changed)
1339
		update_tasks_flags(cs);
1340 1341 1342
out:
	free_trial_cpuset(trialcs);
	return err;
L
Linus Torvalds 已提交
1343 1344
}

1345
/*
A
Adrian Bunk 已提交
1346
 * Frequency meter - How fast is some event occurring?
1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390
 *
 * These routines manage a digitally filtered, constant time based,
 * event frequency meter.  There are four routines:
 *   fmeter_init() - initialize a frequency meter.
 *   fmeter_markevent() - called each time the event happens.
 *   fmeter_getrate() - returns the recent rate of such events.
 *   fmeter_update() - internal routine used to update fmeter.
 *
 * A common data structure is passed to each of these routines,
 * which is used to keep track of the state required to manage the
 * frequency meter and its digital filter.
 *
 * The filter works on the number of events marked per unit time.
 * The filter is single-pole low-pass recursive (IIR).  The time unit
 * is 1 second.  Arithmetic is done using 32-bit integers scaled to
 * simulate 3 decimal digits of precision (multiplied by 1000).
 *
 * With an FM_COEF of 933, and a time base of 1 second, the filter
 * has a half-life of 10 seconds, meaning that if the events quit
 * happening, then the rate returned from the fmeter_getrate()
 * will be cut in half each 10 seconds, until it converges to zero.
 *
 * It is not worth doing a real infinitely recursive filter.  If more
 * than FM_MAXTICKS ticks have elapsed since the last filter event,
 * just compute FM_MAXTICKS ticks worth, by which point the level
 * will be stable.
 *
 * Limit the count of unprocessed events to FM_MAXCNT, so as to avoid
 * arithmetic overflow in the fmeter_update() routine.
 *
 * Given the simple 32 bit integer arithmetic used, this meter works
 * best for reporting rates between one per millisecond (msec) and
 * one per 32 (approx) seconds.  At constant rates faster than one
 * per msec it maxes out at values just under 1,000,000.  At constant
 * rates between one per msec, and one per second it will stabilize
 * to a value N*1000, where N is the rate of events per second.
 * At constant rates between one per second and one per 32 seconds,
 * it will be choppy, moving up on the seconds that have an event,
 * and then decaying until the next event.  At rates slower than
 * about one in 32 seconds, it decays all the way back to zero between
 * each event.
 */

#define FM_COEF 933		/* coefficient for half-life of 10 secs */
1391
#define FM_MAXTICKS ((u32)99)   /* useless computing more ticks than this */
1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406
#define FM_MAXCNT 1000000	/* limit cnt to avoid overflow */
#define FM_SCALE 1000		/* faux fixed point scale */

/* Initialize a frequency meter */
static void fmeter_init(struct fmeter *fmp)
{
	fmp->cnt = 0;
	fmp->val = 0;
	fmp->time = 0;
	spin_lock_init(&fmp->lock);
}

/* Internal meter update - process cnt events and update value */
static void fmeter_update(struct fmeter *fmp)
{
1407 1408 1409 1410 1411
	time64_t now;
	u32 ticks;

	now = ktime_get_seconds();
	ticks = now - fmp->time;
1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445

	if (ticks == 0)
		return;

	ticks = min(FM_MAXTICKS, ticks);
	while (ticks-- > 0)
		fmp->val = (FM_COEF * fmp->val) / FM_SCALE;
	fmp->time = now;

	fmp->val += ((FM_SCALE - FM_COEF) * fmp->cnt) / FM_SCALE;
	fmp->cnt = 0;
}

/* Process any previous ticks, then bump cnt by one (times scale). */
static void fmeter_markevent(struct fmeter *fmp)
{
	spin_lock(&fmp->lock);
	fmeter_update(fmp);
	fmp->cnt = min(FM_MAXCNT, fmp->cnt + FM_SCALE);
	spin_unlock(&fmp->lock);
}

/* Process any previous ticks, then return current value. */
static int fmeter_getrate(struct fmeter *fmp)
{
	int val;

	spin_lock(&fmp->lock);
	fmeter_update(fmp);
	val = fmp->val;
	spin_unlock(&fmp->lock);
	return val;
}

1446 1447
static struct cpuset *cpuset_attach_old_cs;

1448
/* Called by cgroups to determine if a cpuset is usable; cpuset_mutex held */
1449
static int cpuset_can_attach(struct cgroup_taskset *tset)
1450
{
1451 1452
	struct cgroup_subsys_state *css;
	struct cpuset *cs;
1453 1454
	struct task_struct *task;
	int ret;
L
Linus Torvalds 已提交
1455

1456
	/* used later by cpuset_attach() */
1457 1458
	cpuset_attach_old_cs = task_cs(cgroup_taskset_first(tset, &css));
	cs = css_cs(css);
1459

1460 1461
	mutex_lock(&cpuset_mutex);

1462
	/* allow moving tasks into an empty cpuset if on default hierarchy */
1463
	ret = -ENOSPC;
1464
	if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
1465
	    (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)))
1466
		goto out_unlock;
1467

1468
	cgroup_taskset_for_each(task, css, tset) {
1469 1470
		ret = task_can_attach(task, cs->cpus_allowed);
		if (ret)
1471 1472 1473 1474
			goto out_unlock;
		ret = security_task_setscheduler(task);
		if (ret)
			goto out_unlock;
1475
	}
1476

1477 1478 1479 1480 1481
	/*
	 * Mark attach is in progress.  This makes validate_change() fail
	 * changes which zero cpus/mems_allowed.
	 */
	cs->attach_in_progress++;
1482 1483 1484 1485
	ret = 0;
out_unlock:
	mutex_unlock(&cpuset_mutex);
	return ret;
1486
}
1487

1488
static void cpuset_cancel_attach(struct cgroup_taskset *tset)
1489
{
1490 1491 1492 1493 1494 1495
	struct cgroup_subsys_state *css;
	struct cpuset *cs;

	cgroup_taskset_first(tset, &css);
	cs = css_cs(css);

1496
	mutex_lock(&cpuset_mutex);
1497
	css_cs(css)->attach_in_progress--;
1498
	mutex_unlock(&cpuset_mutex);
1499
}
L
Linus Torvalds 已提交
1500

1501
/*
1502
 * Protected by cpuset_mutex.  cpus_attach is used only by cpuset_attach()
1503 1504 1505 1506 1507
 * but we can't allocate it dynamically there.  Define it global and
 * allocate from cpuset_init().
 */
static cpumask_var_t cpus_attach;

1508
static void cpuset_attach(struct cgroup_taskset *tset)
1509
{
1510
	/* static buf protected by cpuset_mutex */
1511
	static nodemask_t cpuset_attach_nodemask_to;
1512
	struct task_struct *task;
1513
	struct task_struct *leader;
1514 1515
	struct cgroup_subsys_state *css;
	struct cpuset *cs;
1516
	struct cpuset *oldcs = cpuset_attach_old_cs;
1517

1518 1519 1520
	cgroup_taskset_first(tset, &css);
	cs = css_cs(css);

1521 1522
	mutex_lock(&cpuset_mutex);

1523 1524 1525 1526
	/* prepare for attach */
	if (cs == &top_cpuset)
		cpumask_copy(cpus_attach, cpu_possible_mask);
	else
1527
		guarantee_online_cpus(cs, cpus_attach);
1528

1529
	guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
1530

1531
	cgroup_taskset_for_each(task, css, tset) {
1532 1533 1534 1535 1536 1537 1538 1539 1540
		/*
		 * can_attach beforehand should guarantee that this doesn't
		 * fail.  TODO: have a better way to handle failure here
		 */
		WARN_ON_ONCE(set_cpus_allowed_ptr(task, cpus_attach));

		cpuset_change_task_nodemask(task, &cpuset_attach_nodemask_to);
		cpuset_update_task_spread_flag(cs, task);
	}
1541

1542
	/*
1543 1544
	 * Change mm for all threadgroup leaders. This is expensive and may
	 * sleep and should be moved outside migration path proper.
1545
	 */
1546
	cpuset_attach_nodemask_to = cs->effective_mems;
1547
	cgroup_taskset_for_each_leader(leader, css, tset) {
1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560
		struct mm_struct *mm = get_task_mm(leader);

		if (mm) {
			mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);

			/*
			 * old_mems_allowed is the same with mems_allowed
			 * here, except if this task is being moved
			 * automatically due to hotplug.  In that case
			 * @mems_allowed has been updated and is empty, so
			 * @old_mems_allowed is the right nodesets that we
			 * migrate mm from.
			 */
1561
			if (is_memory_migrate(cs))
1562 1563
				cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
						  &cpuset_attach_nodemask_to);
1564 1565
			else
				mmput(mm);
1566
		}
1567
	}
1568

1569
	cs->old_mems_allowed = cpuset_attach_nodemask_to;
1570

1571
	cs->attach_in_progress--;
1572 1573
	if (!cs->attach_in_progress)
		wake_up(&cpuset_attach_wq);
1574 1575

	mutex_unlock(&cpuset_mutex);
L
Linus Torvalds 已提交
1576 1577 1578 1579 1580
}

/* The various types of files and directories in a cpuset file system */

typedef enum {
1581
	FILE_MEMORY_MIGRATE,
L
Linus Torvalds 已提交
1582 1583
	FILE_CPULIST,
	FILE_MEMLIST,
1584 1585
	FILE_EFFECTIVE_CPULIST,
	FILE_EFFECTIVE_MEMLIST,
L
Linus Torvalds 已提交
1586 1587
	FILE_CPU_EXCLUSIVE,
	FILE_MEM_EXCLUSIVE,
1588
	FILE_MEM_HARDWALL,
P
Paul Jackson 已提交
1589
	FILE_SCHED_LOAD_BALANCE,
1590
	FILE_SCHED_RELAX_DOMAIN_LEVEL,
1591 1592
	FILE_MEMORY_PRESSURE_ENABLED,
	FILE_MEMORY_PRESSURE,
1593 1594
	FILE_SPREAD_PAGE,
	FILE_SPREAD_SLAB,
L
Linus Torvalds 已提交
1595 1596
} cpuset_filetype_t;

1597 1598
static int cpuset_write_u64(struct cgroup_subsys_state *css, struct cftype *cft,
			    u64 val)
1599
{
1600
	struct cpuset *cs = css_cs(css);
1601
	cpuset_filetype_t type = cft->private;
1602
	int retval = 0;
1603

1604
	mutex_lock(&cpuset_mutex);
1605 1606
	if (!is_cpuset_online(cs)) {
		retval = -ENODEV;
1607
		goto out_unlock;
1608
	}
1609 1610

	switch (type) {
L
Linus Torvalds 已提交
1611
	case FILE_CPU_EXCLUSIVE:
1612
		retval = update_flag(CS_CPU_EXCLUSIVE, cs, val);
L
Linus Torvalds 已提交
1613 1614
		break;
	case FILE_MEM_EXCLUSIVE:
1615
		retval = update_flag(CS_MEM_EXCLUSIVE, cs, val);
L
Linus Torvalds 已提交
1616
		break;
1617 1618 1619
	case FILE_MEM_HARDWALL:
		retval = update_flag(CS_MEM_HARDWALL, cs, val);
		break;
P
Paul Jackson 已提交
1620
	case FILE_SCHED_LOAD_BALANCE:
1621
		retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val);
1622
		break;
1623
	case FILE_MEMORY_MIGRATE:
1624
		retval = update_flag(CS_MEMORY_MIGRATE, cs, val);
1625
		break;
1626
	case FILE_MEMORY_PRESSURE_ENABLED:
1627
		cpuset_memory_pressure_enabled = !!val;
1628
		break;
1629
	case FILE_SPREAD_PAGE:
1630
		retval = update_flag(CS_SPREAD_PAGE, cs, val);
1631 1632
		break;
	case FILE_SPREAD_SLAB:
1633
		retval = update_flag(CS_SPREAD_SLAB, cs, val);
1634
		break;
L
Linus Torvalds 已提交
1635 1636
	default:
		retval = -EINVAL;
1637
		break;
L
Linus Torvalds 已提交
1638
	}
1639 1640
out_unlock:
	mutex_unlock(&cpuset_mutex);
L
Linus Torvalds 已提交
1641 1642 1643
	return retval;
}

1644 1645
static int cpuset_write_s64(struct cgroup_subsys_state *css, struct cftype *cft,
			    s64 val)
1646
{
1647
	struct cpuset *cs = css_cs(css);
1648
	cpuset_filetype_t type = cft->private;
1649
	int retval = -ENODEV;
1650

1651 1652 1653
	mutex_lock(&cpuset_mutex);
	if (!is_cpuset_online(cs))
		goto out_unlock;
1654

1655 1656 1657 1658 1659 1660 1661 1662
	switch (type) {
	case FILE_SCHED_RELAX_DOMAIN_LEVEL:
		retval = update_relax_domain_level(cs, val);
		break;
	default:
		retval = -EINVAL;
		break;
	}
1663 1664
out_unlock:
	mutex_unlock(&cpuset_mutex);
1665 1666 1667
	return retval;
}

1668 1669 1670
/*
 * Common handling for a write to a "cpus" or "mems" file.
 */
1671 1672
static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
				    char *buf, size_t nbytes, loff_t off)
1673
{
1674
	struct cpuset *cs = css_cs(of_css(of));
1675
	struct cpuset *trialcs;
1676
	int retval = -ENODEV;
1677

1678 1679
	buf = strstrip(buf);

1680 1681 1682 1683 1684 1685 1686 1687 1688 1689
	/*
	 * CPU or memory hotunplug may leave @cs w/o any execution
	 * resources, in which case the hotplug code asynchronously updates
	 * configuration and transfers all tasks to the nearest ancestor
	 * which can execute.
	 *
	 * As writes to "cpus" or "mems" may restore @cs's execution
	 * resources, wait for the previously scheduled operations before
	 * proceeding, so that we don't end up keep removing tasks added
	 * after execution capability is restored.
1690 1691 1692 1693 1694 1695 1696 1697
	 *
	 * cpuset_hotplug_work calls back into cgroup core via
	 * cgroup_transfer_tasks() and waiting for it from a cgroupfs
	 * operation like this one can lead to a deadlock through kernfs
	 * active_ref protection.  Let's break the protection.  Losing the
	 * protection is okay as we check whether @cs is online after
	 * grabbing cpuset_mutex anyway.  This only happens on the legacy
	 * hierarchies.
1698
	 */
1699 1700
	css_get(&cs->css);
	kernfs_break_active_protection(of->kn);
1701 1702
	flush_work(&cpuset_hotplug_work);

1703 1704 1705
	mutex_lock(&cpuset_mutex);
	if (!is_cpuset_online(cs))
		goto out_unlock;
1706

1707
	trialcs = alloc_trial_cpuset(cs);
1708 1709
	if (!trialcs) {
		retval = -ENOMEM;
1710
		goto out_unlock;
1711
	}
1712

1713
	switch (of_cft(of)->private) {
1714
	case FILE_CPULIST:
1715
		retval = update_cpumask(cs, trialcs, buf);
1716 1717
		break;
	case FILE_MEMLIST:
1718
		retval = update_nodemask(cs, trialcs, buf);
1719 1720 1721 1722 1723
		break;
	default:
		retval = -EINVAL;
		break;
	}
1724 1725

	free_trial_cpuset(trialcs);
1726 1727
out_unlock:
	mutex_unlock(&cpuset_mutex);
1728 1729
	kernfs_unbreak_active_protection(of->kn);
	css_put(&cs->css);
1730
	flush_workqueue(cpuset_migrate_mm_wq);
1731
	return retval ?: nbytes;
1732 1733
}

L
Linus Torvalds 已提交
1734 1735 1736 1737 1738 1739 1740 1741
/*
 * These ascii lists should be read in a single call, by using a user
 * buffer large enough to hold the entire map.  If read in smaller
 * chunks, there is no guarantee of atomicity.  Since the display format
 * used, list of ranges of sequential numbers, is variable length,
 * and since these maps can change value dynamically, one could read
 * gibberish by doing partial reads while a list was changing.
 */
1742
static int cpuset_common_seq_show(struct seq_file *sf, void *v)
L
Linus Torvalds 已提交
1743
{
1744 1745
	struct cpuset *cs = css_cs(seq_css(sf));
	cpuset_filetype_t type = seq_cft(sf)->private;
1746
	int ret = 0;
L
Linus Torvalds 已提交
1747

1748
	spin_lock_irq(&callback_lock);
L
Linus Torvalds 已提交
1749 1750 1751

	switch (type) {
	case FILE_CPULIST:
1752
		seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed));
L
Linus Torvalds 已提交
1753 1754
		break;
	case FILE_MEMLIST:
1755
		seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed));
L
Linus Torvalds 已提交
1756
		break;
1757
	case FILE_EFFECTIVE_CPULIST:
1758
		seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->effective_cpus));
1759 1760
		break;
	case FILE_EFFECTIVE_MEMLIST:
1761
		seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->effective_mems));
1762
		break;
L
Linus Torvalds 已提交
1763
	default:
1764
		ret = -EINVAL;
L
Linus Torvalds 已提交
1765 1766
	}

1767
	spin_unlock_irq(&callback_lock);
1768
	return ret;
L
Linus Torvalds 已提交
1769 1770
}

1771
static u64 cpuset_read_u64(struct cgroup_subsys_state *css, struct cftype *cft)
1772
{
1773
	struct cpuset *cs = css_cs(css);
1774 1775 1776 1777 1778 1779
	cpuset_filetype_t type = cft->private;
	switch (type) {
	case FILE_CPU_EXCLUSIVE:
		return is_cpu_exclusive(cs);
	case FILE_MEM_EXCLUSIVE:
		return is_mem_exclusive(cs);
1780 1781
	case FILE_MEM_HARDWALL:
		return is_mem_hardwall(cs);
1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796
	case FILE_SCHED_LOAD_BALANCE:
		return is_sched_load_balance(cs);
	case FILE_MEMORY_MIGRATE:
		return is_memory_migrate(cs);
	case FILE_MEMORY_PRESSURE_ENABLED:
		return cpuset_memory_pressure_enabled;
	case FILE_MEMORY_PRESSURE:
		return fmeter_getrate(&cs->fmeter);
	case FILE_SPREAD_PAGE:
		return is_spread_page(cs);
	case FILE_SPREAD_SLAB:
		return is_spread_slab(cs);
	default:
		BUG();
	}
1797 1798 1799

	/* Unreachable but makes gcc happy */
	return 0;
1800
}
L
Linus Torvalds 已提交
1801

1802
static s64 cpuset_read_s64(struct cgroup_subsys_state *css, struct cftype *cft)
1803
{
1804
	struct cpuset *cs = css_cs(css);
1805 1806 1807 1808 1809 1810 1811
	cpuset_filetype_t type = cft->private;
	switch (type) {
	case FILE_SCHED_RELAX_DOMAIN_LEVEL:
		return cs->relax_domain_level;
	default:
		BUG();
	}
1812 1813 1814

	/* Unrechable but makes gcc happy */
	return 0;
1815 1816
}

L
Linus Torvalds 已提交
1817 1818 1819 1820 1821

/*
 * for the common functions, 'private' gives the type of file
 */

1822 1823 1824
static struct cftype files[] = {
	{
		.name = "cpus",
1825
		.seq_show = cpuset_common_seq_show,
1826
		.write = cpuset_write_resmask,
1827
		.max_write_len = (100U + 6 * NR_CPUS),
1828 1829 1830 1831 1832
		.private = FILE_CPULIST,
	},

	{
		.name = "mems",
1833
		.seq_show = cpuset_common_seq_show,
1834
		.write = cpuset_write_resmask,
1835
		.max_write_len = (100U + 6 * MAX_NUMNODES),
1836 1837 1838
		.private = FILE_MEMLIST,
	},

1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850
	{
		.name = "effective_cpus",
		.seq_show = cpuset_common_seq_show,
		.private = FILE_EFFECTIVE_CPULIST,
	},

	{
		.name = "effective_mems",
		.seq_show = cpuset_common_seq_show,
		.private = FILE_EFFECTIVE_MEMLIST,
	},

1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864
	{
		.name = "cpu_exclusive",
		.read_u64 = cpuset_read_u64,
		.write_u64 = cpuset_write_u64,
		.private = FILE_CPU_EXCLUSIVE,
	},

	{
		.name = "mem_exclusive",
		.read_u64 = cpuset_read_u64,
		.write_u64 = cpuset_write_u64,
		.private = FILE_MEM_EXCLUSIVE,
	},

1865 1866 1867 1868 1869 1870 1871
	{
		.name = "mem_hardwall",
		.read_u64 = cpuset_read_u64,
		.write_u64 = cpuset_write_u64,
		.private = FILE_MEM_HARDWALL,
	},

1872 1873 1874 1875 1876 1877 1878 1879 1880
	{
		.name = "sched_load_balance",
		.read_u64 = cpuset_read_u64,
		.write_u64 = cpuset_write_u64,
		.private = FILE_SCHED_LOAD_BALANCE,
	},

	{
		.name = "sched_relax_domain_level",
1881 1882
		.read_s64 = cpuset_read_s64,
		.write_s64 = cpuset_write_s64,
1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910
		.private = FILE_SCHED_RELAX_DOMAIN_LEVEL,
	},

	{
		.name = "memory_migrate",
		.read_u64 = cpuset_read_u64,
		.write_u64 = cpuset_write_u64,
		.private = FILE_MEMORY_MIGRATE,
	},

	{
		.name = "memory_pressure",
		.read_u64 = cpuset_read_u64,
	},

	{
		.name = "memory_spread_page",
		.read_u64 = cpuset_read_u64,
		.write_u64 = cpuset_write_u64,
		.private = FILE_SPREAD_PAGE,
	},

	{
		.name = "memory_spread_slab",
		.read_u64 = cpuset_read_u64,
		.write_u64 = cpuset_write_u64,
		.private = FILE_SPREAD_SLAB,
	},
1911

1912 1913 1914 1915 1916 1917 1918
	{
		.name = "memory_pressure_enabled",
		.flags = CFTYPE_ONLY_ON_ROOT,
		.read_u64 = cpuset_read_u64,
		.write_u64 = cpuset_write_u64,
		.private = FILE_MEMORY_PRESSURE_ENABLED,
	},
L
Linus Torvalds 已提交
1919

1920 1921
	{ }	/* terminate */
};
L
Linus Torvalds 已提交
1922 1923

/*
1924
 *	cpuset_css_alloc - allocate a cpuset css
L
Li Zefan 已提交
1925
 *	cgrp:	control group that the new cpuset will be part of
L
Linus Torvalds 已提交
1926 1927
 */

1928 1929
static struct cgroup_subsys_state *
cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
L
Linus Torvalds 已提交
1930
{
T
Tejun Heo 已提交
1931
	struct cpuset *cs;
L
Linus Torvalds 已提交
1932

1933
	if (!parent_css)
1934
		return &top_cpuset.css;
1935

T
Tejun Heo 已提交
1936
	cs = kzalloc(sizeof(*cs), GFP_KERNEL);
L
Linus Torvalds 已提交
1937
	if (!cs)
1938
		return ERR_PTR(-ENOMEM);
1939 1940 1941 1942
	if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL))
		goto free_cs;
	if (!alloc_cpumask_var(&cs->effective_cpus, GFP_KERNEL))
		goto free_cpus;
L
Linus Torvalds 已提交
1943

P
Paul Jackson 已提交
1944
	set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
1945
	cpumask_clear(cs->cpus_allowed);
1946
	nodes_clear(cs->mems_allowed);
1947 1948
	cpumask_clear(cs->effective_cpus);
	nodes_clear(cs->effective_mems);
1949
	fmeter_init(&cs->fmeter);
1950
	cs->relax_domain_level = -1;
L
Linus Torvalds 已提交
1951

T
Tejun Heo 已提交
1952
	return &cs->css;
1953 1954 1955 1956 1957 1958

free_cpus:
	free_cpumask_var(cs->cpus_allowed);
free_cs:
	kfree(cs);
	return ERR_PTR(-ENOMEM);
T
Tejun Heo 已提交
1959 1960
}

1961
static int cpuset_css_online(struct cgroup_subsys_state *css)
T
Tejun Heo 已提交
1962
{
1963
	struct cpuset *cs = css_cs(css);
T
Tejun Heo 已提交
1964
	struct cpuset *parent = parent_cs(cs);
1965
	struct cpuset *tmp_cs;
1966
	struct cgroup_subsys_state *pos_css;
T
Tejun Heo 已提交
1967 1968 1969 1970

	if (!parent)
		return 0;

1971 1972
	mutex_lock(&cpuset_mutex);

T
Tejun Heo 已提交
1973
	set_bit(CS_ONLINE, &cs->flags);
T
Tejun Heo 已提交
1974 1975 1976 1977
	if (is_spread_page(parent))
		set_bit(CS_SPREAD_PAGE, &cs->flags);
	if (is_spread_slab(parent))
		set_bit(CS_SPREAD_SLAB, &cs->flags);
L
Linus Torvalds 已提交
1978

1979
	cpuset_inc();
1980

1981
	spin_lock_irq(&callback_lock);
1982
	if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) {
1983 1984 1985
		cpumask_copy(cs->effective_cpus, parent->effective_cpus);
		cs->effective_mems = parent->effective_mems;
	}
1986
	spin_unlock_irq(&callback_lock);
1987

1988
	if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
1989
		goto out_unlock;
1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003

	/*
	 * Clone @parent's configuration if CGRP_CPUSET_CLONE_CHILDREN is
	 * set.  This flag handling is implemented in cgroup core for
	 * histrical reasons - the flag may be specified during mount.
	 *
	 * Currently, if any sibling cpusets have exclusive cpus or mem, we
	 * refuse to clone the configuration - thereby refusing the task to
	 * be entered, and as a result refusing the sys_unshare() or
	 * clone() which initiated it.  If this becomes a problem for some
	 * users who wish to allow that scenario, then this could be
	 * changed to grant parent->cpus_allowed-sibling_cpus_exclusive
	 * (and likewise for mems) to the new cgroup.
	 */
2004
	rcu_read_lock();
2005
	cpuset_for_each_child(tmp_cs, pos_css, parent) {
2006 2007
		if (is_mem_exclusive(tmp_cs) || is_cpu_exclusive(tmp_cs)) {
			rcu_read_unlock();
2008
			goto out_unlock;
2009
		}
2010
	}
2011
	rcu_read_unlock();
2012

2013
	spin_lock_irq(&callback_lock);
2014
	cs->mems_allowed = parent->mems_allowed;
2015
	cs->effective_mems = parent->mems_allowed;
2016
	cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
2017
	cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
D
Dan Carpenter 已提交
2018
	spin_unlock_irq(&callback_lock);
2019 2020
out_unlock:
	mutex_unlock(&cpuset_mutex);
T
Tejun Heo 已提交
2021 2022 2023
	return 0;
}

2024 2025 2026 2027 2028 2029
/*
 * If the cpuset being removed has its flag 'sched_load_balance'
 * enabled, then simulate turning sched_load_balance off, which
 * will call rebuild_sched_domains_locked().
 */

2030
static void cpuset_css_offline(struct cgroup_subsys_state *css)
T
Tejun Heo 已提交
2031
{
2032
	struct cpuset *cs = css_cs(css);
T
Tejun Heo 已提交
2033

2034
	mutex_lock(&cpuset_mutex);
T
Tejun Heo 已提交
2035 2036 2037 2038

	if (is_sched_load_balance(cs))
		update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);

2039
	cpuset_dec();
T
Tejun Heo 已提交
2040
	clear_bit(CS_ONLINE, &cs->flags);
T
Tejun Heo 已提交
2041

2042
	mutex_unlock(&cpuset_mutex);
L
Linus Torvalds 已提交
2043 2044
}

2045
static void cpuset_css_free(struct cgroup_subsys_state *css)
L
Linus Torvalds 已提交
2046
{
2047
	struct cpuset *cs = css_cs(css);
L
Linus Torvalds 已提交
2048

2049
	free_cpumask_var(cs->effective_cpus);
2050
	free_cpumask_var(cs->cpus_allowed);
2051
	kfree(cs);
L
Linus Torvalds 已提交
2052 2053
}

2054 2055 2056
static void cpuset_bind(struct cgroup_subsys_state *root_css)
{
	mutex_lock(&cpuset_mutex);
2057
	spin_lock_irq(&callback_lock);
2058

2059
	if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) {
2060 2061 2062 2063 2064 2065 2066 2067
		cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
		top_cpuset.mems_allowed = node_possible_map;
	} else {
		cpumask_copy(top_cpuset.cpus_allowed,
			     top_cpuset.effective_cpus);
		top_cpuset.mems_allowed = top_cpuset.effective_mems;
	}

2068
	spin_unlock_irq(&callback_lock);
2069 2070 2071
	mutex_unlock(&cpuset_mutex);
}

2072
struct cgroup_subsys cpuset_cgrp_subsys = {
2073 2074 2075 2076 2077 2078 2079
	.css_alloc	= cpuset_css_alloc,
	.css_online	= cpuset_css_online,
	.css_offline	= cpuset_css_offline,
	.css_free	= cpuset_css_free,
	.can_attach	= cpuset_can_attach,
	.cancel_attach	= cpuset_cancel_attach,
	.attach		= cpuset_attach,
2080
	.post_attach	= cpuset_post_attach,
2081
	.bind		= cpuset_bind,
2082
	.legacy_cftypes	= files,
2083
	.early_init	= true,
2084 2085
};

L
Linus Torvalds 已提交
2086 2087 2088 2089 2090 2091 2092 2093
/**
 * cpuset_init - initialize cpusets at system boot
 *
 * Description: Initialize top_cpuset and the cpuset internal file system,
 **/

int __init cpuset_init(void)
{
2094
	int err = 0;
L
Linus Torvalds 已提交
2095

2096 2097
	if (!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL))
		BUG();
2098 2099
	if (!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL))
		BUG();
2100

2101
	cpumask_setall(top_cpuset.cpus_allowed);
2102
	nodes_setall(top_cpuset.mems_allowed);
2103 2104
	cpumask_setall(top_cpuset.effective_cpus);
	nodes_setall(top_cpuset.effective_mems);
L
Linus Torvalds 已提交
2105

2106
	fmeter_init(&top_cpuset.fmeter);
P
Paul Jackson 已提交
2107
	set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
2108
	top_cpuset.relax_domain_level = -1;
L
Linus Torvalds 已提交
2109 2110 2111

	err = register_filesystem(&cpuset_fs_type);
	if (err < 0)
2112 2113
		return err;

2114 2115 2116
	if (!alloc_cpumask_var(&cpus_attach, GFP_KERNEL))
		BUG();

2117
	return 0;
L
Linus Torvalds 已提交
2118 2119
}

2120
/*
2121
 * If CPU and/or memory hotplug handlers, below, unplug any CPUs
2122 2123
 * or memory nodes, we need to walk over the cpuset hierarchy,
 * removing that CPU or node from all cpusets.  If this removes the
2124 2125
 * last CPU or node from a cpuset, then move the tasks in the empty
 * cpuset to its next-highest non-empty parent.
2126
 */
2127 2128 2129 2130 2131 2132 2133 2134
static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
{
	struct cpuset *parent;

	/*
	 * Find its next-highest non-empty parent, (top cpuset
	 * has online cpus, so can't be empty).
	 */
T
Tejun Heo 已提交
2135
	parent = parent_cs(cs);
2136
	while (cpumask_empty(parent->cpus_allowed) ||
2137
			nodes_empty(parent->mems_allowed))
T
Tejun Heo 已提交
2138
		parent = parent_cs(parent);
2139

2140
	if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) {
2141
		pr_err("cpuset: failed to transfer tasks out of empty cpuset ");
T
Tejun Heo 已提交
2142 2143
		pr_cont_cgroup_name(cs->css.cgroup);
		pr_cont("\n");
2144
	}
2145 2146
}

2147 2148 2149 2150
static void
hotplug_update_tasks_legacy(struct cpuset *cs,
			    struct cpumask *new_cpus, nodemask_t *new_mems,
			    bool cpus_updated, bool mems_updated)
2151 2152 2153
{
	bool is_empty;

2154
	spin_lock_irq(&callback_lock);
2155 2156 2157 2158
	cpumask_copy(cs->cpus_allowed, new_cpus);
	cpumask_copy(cs->effective_cpus, new_cpus);
	cs->mems_allowed = *new_mems;
	cs->effective_mems = *new_mems;
2159
	spin_unlock_irq(&callback_lock);
2160 2161 2162 2163 2164

	/*
	 * Don't call update_tasks_cpumask() if the cpuset becomes empty,
	 * as the tasks will be migratecd to an ancestor.
	 */
2165
	if (cpus_updated && !cpumask_empty(cs->cpus_allowed))
2166
		update_tasks_cpumask(cs);
2167
	if (mems_updated && !nodes_empty(cs->mems_allowed))
2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185
		update_tasks_nodemask(cs);

	is_empty = cpumask_empty(cs->cpus_allowed) ||
		   nodes_empty(cs->mems_allowed);

	mutex_unlock(&cpuset_mutex);

	/*
	 * Move tasks to the nearest ancestor with execution resources,
	 * This is full cgroup operation which will also call back into
	 * cpuset. Should be done outside any lock.
	 */
	if (is_empty)
		remove_tasks_in_empty_cpuset(cs);

	mutex_lock(&cpuset_mutex);
}

2186 2187 2188 2189
static void
hotplug_update_tasks(struct cpuset *cs,
		     struct cpumask *new_cpus, nodemask_t *new_mems,
		     bool cpus_updated, bool mems_updated)
2190
{
2191 2192 2193 2194 2195
	if (cpumask_empty(new_cpus))
		cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus);
	if (nodes_empty(*new_mems))
		*new_mems = parent_cs(cs)->effective_mems;

2196
	spin_lock_irq(&callback_lock);
2197 2198
	cpumask_copy(cs->effective_cpus, new_cpus);
	cs->effective_mems = *new_mems;
2199
	spin_unlock_irq(&callback_lock);
2200

2201
	if (cpus_updated)
2202
		update_tasks_cpumask(cs);
2203
	if (mems_updated)
2204 2205 2206
		update_tasks_nodemask(cs);
}

2207
/**
2208
 * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
2209
 * @cs: cpuset in interest
2210
 *
2211 2212 2213
 * Compare @cs's cpu and mem masks against top_cpuset and if some have gone
 * offline, update @cs accordingly.  If @cs ends up with no CPU or memory,
 * all its tasks are moved to the nearest ancestor with both resources.
2214
 */
2215
static void cpuset_hotplug_update_tasks(struct cpuset *cs)
2216
{
2217 2218 2219 2220
	static cpumask_t new_cpus;
	static nodemask_t new_mems;
	bool cpus_updated;
	bool mems_updated;
2221 2222
retry:
	wait_event(cpuset_attach_wq, cs->attach_in_progress == 0);
2223

2224
	mutex_lock(&cpuset_mutex);
2225

2226 2227 2228 2229 2230 2231 2232 2233 2234
	/*
	 * We have raced with task attaching. We wait until attaching
	 * is finished, so we won't attach a task to an empty cpuset.
	 */
	if (cs->attach_in_progress) {
		mutex_unlock(&cpuset_mutex);
		goto retry;
	}

2235 2236
	cpumask_and(&new_cpus, cs->cpus_allowed, parent_cs(cs)->effective_cpus);
	nodes_and(new_mems, cs->mems_allowed, parent_cs(cs)->effective_mems);
2237

2238 2239
	cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);
	mems_updated = !nodes_equal(new_mems, cs->effective_mems);
2240

2241
	if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys))
2242 2243
		hotplug_update_tasks(cs, &new_cpus, &new_mems,
				     cpus_updated, mems_updated);
2244
	else
2245 2246
		hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems,
					    cpus_updated, mems_updated);
2247

2248
	mutex_unlock(&cpuset_mutex);
2249 2250
}

2251
/**
2252
 * cpuset_hotplug_workfn - handle CPU/memory hotunplug for a cpuset
2253
 *
2254 2255 2256 2257 2258
 * This function is called after either CPU or memory configuration has
 * changed and updates cpuset accordingly.  The top_cpuset is always
 * synchronized to cpu_active_mask and N_MEMORY, which is necessary in
 * order to make cpusets transparent (of no affect) on systems that are
 * actively using CPU hotplug but making no active use of cpusets.
2259
 *
2260
 * Non-root cpusets are only affected by offlining.  If any CPUs or memory
2261 2262
 * nodes have been taken down, cpuset_hotplug_update_tasks() is invoked on
 * all descendants.
2263
 *
2264 2265
 * Note that CPU offlining during suspend is ignored.  We don't modify
 * cpusets across suspend/resume cycles at all.
2266
 */
2267
static void cpuset_hotplug_workfn(struct work_struct *work)
2268
{
2269 2270
	static cpumask_t new_cpus;
	static nodemask_t new_mems;
2271
	bool cpus_updated, mems_updated;
2272
	bool on_dfl = cgroup_subsys_on_dfl(cpuset_cgrp_subsys);
2273

2274
	mutex_lock(&cpuset_mutex);
2275

2276 2277 2278
	/* fetch the available cpus/mems and find out which changed how */
	cpumask_copy(&new_cpus, cpu_active_mask);
	new_mems = node_states[N_MEMORY];
2279

2280 2281
	cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus);
	mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems);
2282

2283 2284
	/* synchronize cpus_allowed to cpu_active_mask */
	if (cpus_updated) {
2285
		spin_lock_irq(&callback_lock);
2286 2287
		if (!on_dfl)
			cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
2288
		cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
2289
		spin_unlock_irq(&callback_lock);
2290 2291
		/* we don't mess with cpumasks of tasks in top_cpuset */
	}
2292

2293 2294
	/* synchronize mems_allowed to N_MEMORY */
	if (mems_updated) {
2295
		spin_lock_irq(&callback_lock);
2296 2297
		if (!on_dfl)
			top_cpuset.mems_allowed = new_mems;
2298
		top_cpuset.effective_mems = new_mems;
2299
		spin_unlock_irq(&callback_lock);
2300
		update_tasks_nodemask(&top_cpuset);
2301
	}
2302

2303 2304
	mutex_unlock(&cpuset_mutex);

2305 2306
	/* if cpus or mems changed, we need to propagate to descendants */
	if (cpus_updated || mems_updated) {
2307
		struct cpuset *cs;
2308
		struct cgroup_subsys_state *pos_css;
2309

2310
		rcu_read_lock();
2311
		cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
2312
			if (cs == &top_cpuset || !css_tryget_online(&cs->css))
2313 2314
				continue;
			rcu_read_unlock();
2315

2316
			cpuset_hotplug_update_tasks(cs);
2317

2318 2319 2320 2321 2322
			rcu_read_lock();
			css_put(&cs->css);
		}
		rcu_read_unlock();
	}
2323

2324
	/* rebuild sched domains if cpus_allowed has changed */
2325 2326
	if (cpus_updated)
		rebuild_sched_domains();
2327 2328
}

2329
void cpuset_update_active_cpus(bool cpu_online)
2330
{
2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342
	/*
	 * We're inside cpu hotplug critical region which usually nests
	 * inside cgroup synchronization.  Bounce actual hotplug processing
	 * to a work item to avoid reverse locking order.
	 *
	 * We still need to do partition_sched_domains() synchronously;
	 * otherwise, the scheduler will get confused and put tasks to the
	 * dead CPU.  Fall back to the default single domain.
	 * cpuset_hotplug_workfn() will rebuild it as necessary.
	 */
	partition_sched_domains(1, NULL, NULL);
	schedule_work(&cpuset_hotplug_work);
2343 2344
}

2345
/*
2346 2347
 * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY].
 * Call this routine anytime after node_states[N_MEMORY] changes.
2348
 * See cpuset_update_active_cpus() for CPU hotplug handling.
2349
 */
2350 2351
static int cpuset_track_online_nodes(struct notifier_block *self,
				unsigned long action, void *arg)
2352
{
2353
	schedule_work(&cpuset_hotplug_work);
2354
	return NOTIFY_OK;
2355
}
2356 2357 2358 2359 2360

static struct notifier_block cpuset_track_online_nodes_nb = {
	.notifier_call = cpuset_track_online_nodes,
	.priority = 10,		/* ??! */
};
2361

L
Linus Torvalds 已提交
2362 2363 2364 2365
/**
 * cpuset_init_smp - initialize cpus_allowed
 *
 * Description: Finish top cpuset after cpu, node maps are initialized
2366
 */
L
Linus Torvalds 已提交
2367 2368
void __init cpuset_init_smp(void)
{
2369
	cpumask_copy(top_cpuset.cpus_allowed, cpu_active_mask);
2370
	top_cpuset.mems_allowed = node_states[N_MEMORY];
2371
	top_cpuset.old_mems_allowed = top_cpuset.mems_allowed;
2372

2373 2374 2375
	cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask);
	top_cpuset.effective_mems = node_states[N_MEMORY];

2376
	register_hotmemory_notifier(&cpuset_track_online_nodes_nb);
2377 2378 2379

	cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0);
	BUG_ON(!cpuset_migrate_mm_wq);
L
Linus Torvalds 已提交
2380 2381 2382 2383 2384
}

/**
 * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset.
 * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed.
2385
 * @pmask: pointer to struct cpumask variable to receive cpus_allowed set.
L
Linus Torvalds 已提交
2386
 *
2387
 * Description: Returns the cpumask_var_t cpus_allowed of the cpuset
L
Linus Torvalds 已提交
2388
 * attached to the specified @tsk.  Guaranteed to return some non-empty
2389
 * subset of cpu_online_mask, even if this means going outside the
L
Linus Torvalds 已提交
2390 2391 2392
 * tasks cpuset.
 **/

2393
void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
L
Linus Torvalds 已提交
2394
{
2395 2396 2397
	unsigned long flags;

	spin_lock_irqsave(&callback_lock, flags);
2398
	rcu_read_lock();
2399
	guarantee_online_cpus(task_cs(tsk), pmask);
2400
	rcu_read_unlock();
2401
	spin_unlock_irqrestore(&callback_lock, flags);
L
Linus Torvalds 已提交
2402 2403
}

2404
void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
2405 2406
{
	rcu_read_lock();
2407
	do_set_cpus_allowed(tsk, task_cs(tsk)->effective_cpus);
2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422
	rcu_read_unlock();

	/*
	 * We own tsk->cpus_allowed, nobody can change it under us.
	 *
	 * But we used cs && cs->cpus_allowed lockless and thus can
	 * race with cgroup_attach_task() or update_cpumask() and get
	 * the wrong tsk->cpus_allowed. However, both cases imply the
	 * subsequent cpuset_change_cpumask()->set_cpus_allowed_ptr()
	 * which takes task_rq_lock().
	 *
	 * If we are called after it dropped the lock we must see all
	 * changes in tsk_cs()->cpus_allowed. Otherwise we can temporary
	 * set any mask even if it is not right from task_cs() pov,
	 * the pending set_cpus_allowed_ptr() will fix things.
2423 2424 2425
	 *
	 * select_fallback_rq() will fix things ups and set cpu_possible_mask
	 * if required.
2426 2427 2428
	 */
}

2429
void __init cpuset_init_current_mems_allowed(void)
L
Linus Torvalds 已提交
2430
{
2431
	nodes_setall(current->mems_allowed);
L
Linus Torvalds 已提交
2432 2433
}

2434 2435 2436 2437 2438 2439
/**
 * cpuset_mems_allowed - return mems_allowed mask from a tasks cpuset.
 * @tsk: pointer to task_struct from which to obtain cpuset->mems_allowed.
 *
 * Description: Returns the nodemask_t mems_allowed of the cpuset
 * attached to the specified @tsk.  Guaranteed to return some non-empty
2440
 * subset of node_states[N_MEMORY], even if this means going outside the
2441 2442 2443 2444 2445 2446
 * tasks cpuset.
 **/

nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
{
	nodemask_t mask;
2447
	unsigned long flags;
2448

2449
	spin_lock_irqsave(&callback_lock, flags);
2450
	rcu_read_lock();
2451
	guarantee_online_mems(task_cs(tsk), &mask);
2452
	rcu_read_unlock();
2453
	spin_unlock_irqrestore(&callback_lock, flags);
2454 2455 2456 2457

	return mask;
}

2458
/**
2459 2460
 * cpuset_nodemask_valid_mems_allowed - check nodemask vs. curremt mems_allowed
 * @nodemask: the nodemask to be checked
2461
 *
2462
 * Are any of the nodes in the nodemask allowed in current->mems_allowed?
L
Linus Torvalds 已提交
2463
 */
2464
int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
L
Linus Torvalds 已提交
2465
{
2466
	return nodes_intersects(*nodemask, current->mems_allowed);
L
Linus Torvalds 已提交
2467 2468
}

2469
/*
2470 2471
 * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
 * mem_hardwall ancestor to the specified cpuset.  Call holding
2472
 * callback_lock.  If no ancestor is mem_exclusive or mem_hardwall
2473
 * (an unusual configuration), then returns the root cpuset.
2474
 */
2475
static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
2476
{
T
Tejun Heo 已提交
2477 2478
	while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && parent_cs(cs))
		cs = parent_cs(cs);
2479 2480 2481
	return cs;
}

2482
/**
2483
 * cpuset_node_allowed - Can we allocate on a memory node?
2484
 * @node: is this an allowed node?
2485
 * @gfp_mask: memory allocation flags
2486
 *
2487 2488 2489 2490
 * If we're in interrupt, yes, we can always allocate.  If @node is set in
 * current's mems_allowed, yes.  If it's not a __GFP_HARDWALL request and this
 * node is set in the nearest hardwalled cpuset ancestor to current's cpuset,
 * yes.  If current has access to memory reserves due to TIF_MEMDIE, yes.
2491 2492 2493
 * Otherwise, no.
 *
 * GFP_USER allocations are marked with the __GFP_HARDWALL bit,
2494 2495
 * and do not allow allocations outside the current tasks cpuset
 * unless the task has been OOM killed as is marked TIF_MEMDIE.
2496
 * GFP_KERNEL allocations are not so marked, so can escape to the
2497
 * nearest enclosing hardwalled ancestor cpuset.
2498
 *
2499
 * Scanning up parent cpusets requires callback_lock.  The
2500 2501 2502 2503
 * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
 * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
 * current tasks mems_allowed came up empty on the first pass over
 * the zonelist.  So only GFP_KERNEL allocations, if all nodes in the
2504
 * cpuset are short of memory, might require taking the callback_lock.
2505
 *
2506
 * The first call here from mm/page_alloc:get_page_from_freelist()
2507 2508 2509
 * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
 * so no allocation on a node outside the cpuset is allowed (unless
 * in interrupt, of course).
2510 2511 2512 2513 2514 2515
 *
 * The second pass through get_page_from_freelist() doesn't even call
 * here for GFP_ATOMIC calls.  For those calls, the __alloc_pages()
 * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set
 * in alloc_flags.  That logic and the checks below have the combined
 * affect that:
2516 2517
 *	in_interrupt - any node ok (current task context irrelevant)
 *	GFP_ATOMIC   - any node ok
2518
 *	TIF_MEMDIE   - any node ok
2519
 *	GFP_KERNEL   - any node in enclosing hardwalled cpuset ok
2520
 *	GFP_USER     - only nodes in current tasks mems allowed ok.
2521
 */
2522
bool __cpuset_node_allowed(int node, gfp_t gfp_mask)
L
Linus Torvalds 已提交
2523
{
2524
	struct cpuset *cs;		/* current cpuset ancestors */
2525
	int allowed;			/* is allocation in zone z allowed? */
2526
	unsigned long flags;
2527

2528
	if (in_interrupt())
2529
		return true;
2530
	if (node_isset(node, current->mems_allowed))
2531
		return true;
2532 2533 2534 2535 2536
	/*
	 * Allow tasks that have access to memory reserves because they have
	 * been OOM killed to get memory anywhere.
	 */
	if (unlikely(test_thread_flag(TIF_MEMDIE)))
2537
		return true;
2538
	if (gfp_mask & __GFP_HARDWALL)	/* If hardwall request, stop here */
2539
		return false;
2540

2541
	if (current->flags & PF_EXITING) /* Let dying task have memory */
2542
		return true;
2543

2544
	/* Not hardwall and node outside mems_allowed: scan up cpusets */
2545
	spin_lock_irqsave(&callback_lock, flags);
2546

2547
	rcu_read_lock();
2548
	cs = nearest_hardwall_ancestor(task_cs(current));
2549
	allowed = node_isset(node, cs->mems_allowed);
2550
	rcu_read_unlock();
2551

2552
	spin_unlock_irqrestore(&callback_lock, flags);
2553
	return allowed;
L
Linus Torvalds 已提交
2554 2555
}

2556
/**
2557 2558
 * cpuset_mem_spread_node() - On which node to begin search for a file page
 * cpuset_slab_spread_node() - On which node to begin search for a slab page
2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582
 *
 * If a task is marked PF_SPREAD_PAGE or PF_SPREAD_SLAB (as for
 * tasks in a cpuset with is_spread_page or is_spread_slab set),
 * and if the memory allocation used cpuset_mem_spread_node()
 * to determine on which node to start looking, as it will for
 * certain page cache or slab cache pages such as used for file
 * system buffers and inode caches, then instead of starting on the
 * local node to look for a free page, rather spread the starting
 * node around the tasks mems_allowed nodes.
 *
 * We don't have to worry about the returned node being offline
 * because "it can't happen", and even if it did, it would be ok.
 *
 * The routines calling guarantee_online_mems() are careful to
 * only set nodes in task->mems_allowed that are online.  So it
 * should not be possible for the following code to return an
 * offline node.  But if it did, that would be ok, as this routine
 * is not returning the node where the allocation must be, only
 * the node where the search should start.  The zonelist passed to
 * __alloc_pages() will include all nodes.  If the slab allocator
 * is passed an offline node, it will fall back to the local node.
 * See kmem_cache_alloc_node().
 */

2583
static int cpuset_spread_node(int *rotor)
2584
{
2585
	return *rotor = next_node_in(*rotor, current->mems_allowed);
2586
}
2587 2588 2589

int cpuset_mem_spread_node(void)
{
2590 2591 2592 2593
	if (current->cpuset_mem_spread_rotor == NUMA_NO_NODE)
		current->cpuset_mem_spread_rotor =
			node_random(&current->mems_allowed);

2594 2595 2596 2597 2598
	return cpuset_spread_node(&current->cpuset_mem_spread_rotor);
}

int cpuset_slab_spread_node(void)
{
2599 2600 2601 2602
	if (current->cpuset_slab_spread_rotor == NUMA_NO_NODE)
		current->cpuset_slab_spread_rotor =
			node_random(&current->mems_allowed);

2603 2604 2605
	return cpuset_spread_node(&current->cpuset_slab_spread_rotor);
}

2606 2607
EXPORT_SYMBOL_GPL(cpuset_mem_spread_node);

2608
/**
2609 2610 2611 2612 2613 2614 2615 2616
 * cpuset_mems_allowed_intersects - Does @tsk1's mems_allowed intersect @tsk2's?
 * @tsk1: pointer to task_struct of some task.
 * @tsk2: pointer to task_struct of some other task.
 *
 * Description: Return true if @tsk1's mems_allowed intersects the
 * mems_allowed of @tsk2.  Used by the OOM killer to determine if
 * one of the task's memory usage might impact the memory available
 * to the other.
2617 2618
 **/

2619 2620
int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
				   const struct task_struct *tsk2)
2621
{
2622
	return nodes_intersects(tsk1->mems_allowed, tsk2->mems_allowed);
2623 2624
}

2625
/**
2626
 * cpuset_print_current_mems_allowed - prints current's cpuset and mems_allowed
2627
 *
2628
 * Description: Prints current's name, cpuset name, and cached copy of its
2629
 * mems_allowed to the kernel log.
2630
 */
2631
void cpuset_print_current_mems_allowed(void)
2632
{
2633
	struct cgroup *cgrp;
2634

2635
	rcu_read_lock();
2636

2637 2638
	cgrp = task_cs(current)->css.cgroup;
	pr_info("%s cpuset=", current->comm);
T
Tejun Heo 已提交
2639
	pr_cont_cgroup_name(cgrp);
2640 2641
	pr_cont(" mems_allowed=%*pbl\n",
		nodemask_pr_args(&current->mems_allowed));
2642

2643
	rcu_read_unlock();
2644 2645
}

2646 2647 2648 2649 2650 2651
/*
 * Collection of memory_pressure is suppressed unless
 * this flag is enabled by writing "1" to the special
 * cpuset file 'memory_pressure_enabled' in the root cpuset.
 */

2652
int cpuset_memory_pressure_enabled __read_mostly;
2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673

/**
 * cpuset_memory_pressure_bump - keep stats of per-cpuset reclaims.
 *
 * Keep a running average of the rate of synchronous (direct)
 * page reclaim efforts initiated by tasks in each cpuset.
 *
 * This represents the rate at which some task in the cpuset
 * ran low on memory on all nodes it was allowed to use, and
 * had to enter the kernels page reclaim code in an effort to
 * create more free memory by tossing clean pages or swapping
 * or writing dirty pages.
 *
 * Display to user space in the per-cpuset read-only file
 * "memory_pressure".  Value displayed is an integer
 * representing the recent rate of entry into the synchronous
 * (direct) page reclaim by any task attached to the cpuset.
 **/

void __cpuset_memory_pressure_bump(void)
{
2674
	rcu_read_lock();
2675
	fmeter_markevent(&task_cs(current)->fmeter);
2676
	rcu_read_unlock();
2677 2678
}

2679
#ifdef CONFIG_PROC_PID_CPUSET
L
Linus Torvalds 已提交
2680 2681 2682 2683
/*
 * proc_cpuset_show()
 *  - Print tasks cpuset path into seq_file.
 *  - Used for /proc/<pid>/cpuset.
2684 2685
 *  - No need to task_lock(tsk) on this tsk->cpuset reference, as it
 *    doesn't really matter if tsk->cpuset changes after we read it,
2686
 *    and we take cpuset_mutex, keeping cpuset_attach() from changing it
2687
 *    anyway.
L
Linus Torvalds 已提交
2688
 */
Z
Zefan Li 已提交
2689 2690
int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
		     struct pid *pid, struct task_struct *tsk)
L
Linus Torvalds 已提交
2691
{
2692
	char *buf;
2693
	struct cgroup_subsys_state *css;
2694
	int retval;
L
Linus Torvalds 已提交
2695

2696
	retval = -ENOMEM;
T
Tejun Heo 已提交
2697
	buf = kmalloc(PATH_MAX, GFP_KERNEL);
L
Linus Torvalds 已提交
2698
	if (!buf)
2699 2700
		goto out;

2701
	css = task_get_css(tsk, cpuset_cgrp_id);
2702 2703
	retval = cgroup_path_ns(css->cgroup, buf, PATH_MAX,
				current->nsproxy->cgroup_ns);
2704
	css_put(css);
2705
	if (retval >= PATH_MAX)
2706 2707
		retval = -ENAMETOOLONG;
	if (retval < 0)
Z
Zefan Li 已提交
2708
		goto out_free;
2709
	seq_puts(m, buf);
L
Linus Torvalds 已提交
2710
	seq_putc(m, '\n');
T
Tejun Heo 已提交
2711
	retval = 0;
2712
out_free:
L
Linus Torvalds 已提交
2713
	kfree(buf);
2714
out:
2715
	return retval;
L
Linus Torvalds 已提交
2716
}
2717
#endif /* CONFIG_PROC_PID_CPUSET */
L
Linus Torvalds 已提交
2718

2719
/* Display task mems_allowed in /proc/<pid>/status file. */
2720 2721
void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
{
2722 2723 2724 2725
	seq_printf(m, "Mems_allowed:\t%*pb\n",
		   nodemask_pr_args(&task->mems_allowed));
	seq_printf(m, "Mems_allowed_list:\t%*pbl\n",
		   nodemask_pr_args(&task->mems_allowed));
L
Linus Torvalds 已提交
2726
}