cpuset.h 6.9 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
L
Linus Torvalds 已提交
2 3 4 5 6 7
#ifndef _LINUX_CPUSET_H
#define _LINUX_CPUSET_H
/*
 *  cpuset interface
 *
 *  Copyright (C) 2003 BULL SA
8
 *  Copyright (C) 2004-2006 Silicon Graphics, Inc.
L
Linus Torvalds 已提交
9 10 11 12
 *
 */

#include <linux/sched.h>
13
#include <linux/sched/topology.h>
14
#include <linux/sched/task.h>
L
Linus Torvalds 已提交
15 16
#include <linux/cpumask.h>
#include <linux/nodemask.h>
17
#include <linux/mm.h>
18
#include <linux/jump_label.h>
L
Linus Torvalds 已提交
19 20 21

#ifdef CONFIG_CPUSETS

22 23 24 25 26 27 28 29 30 31 32 33 34
/*
 * Static branch rewrites can happen in an arbitrary order for a given
 * key. In code paths where we need to loop with read_mems_allowed_begin() and
 * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need
 * to ensure that begin() always gets rewritten before retry() in the
 * disabled -> enabled transition. If not, then if local irqs are disabled
 * around the loop, we can deadlock since retry() would always be
 * comparing the latest value of the mems_allowed seqcount against 0 as
 * begin() still would see cpusets_enabled() as false. The enabled -> disabled
 * transition should happen in reverse order for the same reasons (want to stop
 * looking at real value of mems_allowed.sequence in retry() first).
 */
extern struct static_key_false cpusets_pre_enable_key;
35
extern struct static_key_false cpusets_enabled_key;
36 37
static inline bool cpusets_enabled(void)
{
38
	return static_branch_unlikely(&cpusets_enabled_key);
39 40 41 42
}

static inline void cpuset_inc(void)
{
43
	static_branch_inc(&cpusets_pre_enable_key);
44
	static_branch_inc(&cpusets_enabled_key);
45 46 47 48
}

static inline void cpuset_dec(void)
{
49
	static_branch_dec(&cpusets_enabled_key);
50
	static_branch_dec(&cpusets_pre_enable_key);
51
}
52

L
Linus Torvalds 已提交
53 54
extern int cpuset_init(void);
extern void cpuset_init_smp(void);
55
extern void cpuset_update_active_cpus(void);
56
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
57
extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
58
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
59
#define cpuset_current_mems_allowed (current->mems_allowed)
L
Linus Torvalds 已提交
60
void cpuset_init_current_mems_allowed(void);
61
int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
62

63
extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask);
64

65
static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
66
{
67 68 69
	if (cpusets_enabled())
		return __cpuset_node_allowed(node, gfp_mask);
	return true;
70 71
}

72
static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
73
{
74 75 76 77 78 79 80 81
	return __cpuset_node_allowed(zone_to_nid(z), gfp_mask);
}

static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
{
	if (cpusets_enabled())
		return __cpuset_zone_allowed(z, gfp_mask);
	return true;
82 83
}

84 85
extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
					  const struct task_struct *tsk2);
86 87 88 89 90 91 92 93 94

#define cpuset_memory_pressure_bump() 				\
	do {							\
		if (cpuset_memory_pressure_enabled)		\
			__cpuset_memory_pressure_bump();	\
	} while (0)
extern int cpuset_memory_pressure_enabled;
extern void __cpuset_memory_pressure_bump(void);

95 96
extern void cpuset_task_status_allowed(struct seq_file *m,
					struct task_struct *task);
Z
Zefan Li 已提交
97 98
extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
			    struct pid *pid, struct task_struct *tsk);
L
Linus Torvalds 已提交
99

100
extern int cpuset_mem_spread_node(void);
101
extern int cpuset_slab_spread_node(void);
102 103 104

static inline int cpuset_do_page_mem_spread(void)
{
105
	return task_spread_page(current);
106 107 108 109
}

static inline int cpuset_do_slab_mem_spread(void)
{
110
	return task_spread_slab(current);
111 112
}

113 114
extern int current_cpuset_is_being_rebound(void);

115 116
extern void rebuild_sched_domains(void);

117
extern void cpuset_print_current_mems_allowed(void);
118

119
/*
120 121 122 123 124
 * read_mems_allowed_begin is required when making decisions involving
 * mems_allowed such as during page allocation. mems_allowed can be updated in
 * parallel and depending on the new value an operation can fail potentially
 * causing process failure. A retry loop with read_mems_allowed_begin and
 * read_mems_allowed_retry prevents these artificial failures.
125
 */
126
static inline unsigned int read_mems_allowed_begin(void)
127
{
128
	if (!static_branch_unlikely(&cpusets_pre_enable_key))
129 130
		return 0;

131
	return read_seqcount_begin(&current->mems_allowed_seq);
132 133
}

134
/*
135 136 137
 * If this returns true, the operation that took place after
 * read_mems_allowed_begin may have failed artificially due to a concurrent
 * update of mems_allowed. It is up to the caller to retry the operation if
138 139
 * appropriate.
 */
140
static inline bool read_mems_allowed_retry(unsigned int seq)
141
{
142
	if (!static_branch_unlikely(&cpusets_enabled_key))
143 144
		return false;

145
	return read_seqcount_retry(&current->mems_allowed_seq, seq);
146 147
}

148 149
static inline void set_mems_allowed(nodemask_t nodemask)
{
150 151
	unsigned long flags;

152
	task_lock(current);
153
	local_irq_save(flags);
154
	write_seqcount_begin(&current->mems_allowed_seq);
155
	current->mems_allowed = nodemask;
156
	write_seqcount_end(&current->mems_allowed_seq);
157
	local_irq_restore(flags);
158
	task_unlock(current);
159 160
}

L
Linus Torvalds 已提交
161 162
#else /* !CONFIG_CPUSETS */

163 164
static inline bool cpusets_enabled(void) { return false; }

L
Linus Torvalds 已提交
165 166 167
static inline int cpuset_init(void) { return 0; }
static inline void cpuset_init_smp(void) {}

168
static inline void cpuset_update_active_cpus(void)
169 170 171 172
{
	partition_sched_domains(1, NULL, NULL);
}

173 174
static inline void cpuset_cpus_allowed(struct task_struct *p,
				       struct cpumask *mask)
L
Linus Torvalds 已提交
175
{
176
	cpumask_copy(mask, cpu_possible_mask);
L
Linus Torvalds 已提交
177 178
}

179
static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
180 181 182
{
}

183 184 185 186 187
static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
{
	return node_possible_map;
}

188
#define cpuset_current_mems_allowed (node_states[N_MEMORY])
L
Linus Torvalds 已提交
189 190
static inline void cpuset_init_current_mems_allowed(void) {}

191
static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
L
Linus Torvalds 已提交
192 193 194 195
{
	return 1;
}

196
static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
197
{
198
	return true;
199 200
}

201
static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
L
Linus Torvalds 已提交
202
{
203 204 205 206 207 208
	return true;
}

static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
{
	return true;
L
Linus Torvalds 已提交
209 210
}

211 212
static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
						 const struct task_struct *tsk2)
213 214 215 216
{
	return 1;
}

217 218
static inline void cpuset_memory_pressure_bump(void) {}

219 220
static inline void cpuset_task_status_allowed(struct seq_file *m,
						struct task_struct *task)
L
Linus Torvalds 已提交
221 222 223
{
}

224 225 226 227 228
static inline int cpuset_mem_spread_node(void)
{
	return 0;
}

229 230 231 232 233
static inline int cpuset_slab_spread_node(void)
{
	return 0;
}

234 235 236 237 238 239 240 241 242 243
static inline int cpuset_do_page_mem_spread(void)
{
	return 0;
}

static inline int cpuset_do_slab_mem_spread(void)
{
	return 0;
}

244 245 246 247 248
static inline int current_cpuset_is_being_rebound(void)
{
	return 0;
}

249 250
static inline void rebuild_sched_domains(void)
{
251
	partition_sched_domains(1, NULL, NULL);
252 253
}

254
static inline void cpuset_print_current_mems_allowed(void)
255 256 257
{
}

258 259 260 261
static inline void set_mems_allowed(nodemask_t nodemask)
{
}

262
static inline unsigned int read_mems_allowed_begin(void)
263
{
264
	return 0;
265 266
}

267
static inline bool read_mems_allowed_retry(unsigned int seq)
268
{
269
	return false;
270 271
}

L
Linus Torvalds 已提交
272 273 274
#endif /* !CONFIG_CPUSETS */

#endif /* _LINUX_CPUSET_H */