cpuset.h 6.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6
#ifndef _LINUX_CPUSET_H
#define _LINUX_CPUSET_H
/*
 *  cpuset interface
 *
 *  Copyright (C) 2003 BULL SA
7
 *  Copyright (C) 2004-2006 Silicon Graphics, Inc.
L
Linus Torvalds 已提交
8 9 10 11 12 13
 *
 */

#include <linux/sched.h>
#include <linux/cpumask.h>
#include <linux/nodemask.h>
14
#include <linux/mm.h>
15
#include <linux/jump_label.h>
L
Linus Torvalds 已提交
16 17 18

#ifdef CONFIG_CPUSETS

19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39
extern struct static_key cpusets_enabled_key;
static inline bool cpusets_enabled(void)
{
	return static_key_false(&cpusets_enabled_key);
}

static inline int nr_cpusets(void)
{
	/* jump label reference count + the top-level cpuset */
	return static_key_count(&cpusets_enabled_key) + 1;
}

static inline void cpuset_inc(void)
{
	static_key_slow_inc(&cpusets_enabled_key);
}

static inline void cpuset_dec(void)
{
	static_key_slow_dec(&cpusets_enabled_key);
}
40

L
Linus Torvalds 已提交
41 42
extern int cpuset_init(void);
extern void cpuset_init_smp(void);
43
extern void cpuset_update_active_cpus(bool cpu_online);
44
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
45
extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
46
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
47
#define cpuset_current_mems_allowed (current->mems_allowed)
L
Linus Torvalds 已提交
48
void cpuset_init_current_mems_allowed(void);
49
int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
50

51 52
extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask);
extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask);
53

54
static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
55
{
56
	return nr_cpusets() <= 1 ||
57
		__cpuset_node_allowed_softwall(node, gfp_mask);
58 59
}

60
static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
61
{
62
	return nr_cpusets() <= 1 ||
63 64 65 66 67 68 69 70 71 72 73
		__cpuset_node_allowed_hardwall(node, gfp_mask);
}

static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
{
	return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask);
}

static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
{
	return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask);
74 75
}

76 77
extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
					  const struct task_struct *tsk2);
78 79 80 81 82 83 84 85 86

#define cpuset_memory_pressure_bump() 				\
	do {							\
		if (cpuset_memory_pressure_enabled)		\
			__cpuset_memory_pressure_bump();	\
	} while (0)
extern int cpuset_memory_pressure_enabled;
extern void __cpuset_memory_pressure_bump(void);

87 88
extern void cpuset_task_status_allowed(struct seq_file *m,
					struct task_struct *task);
89
extern int proc_cpuset_show(struct seq_file *, void *);
L
Linus Torvalds 已提交
90

91
extern int cpuset_mem_spread_node(void);
92
extern int cpuset_slab_spread_node(void);
93 94 95

static inline int cpuset_do_page_mem_spread(void)
{
96
	return task_spread_page(current);
97 98 99 100
}

static inline int cpuset_do_slab_mem_spread(void)
{
101
	return task_spread_slab(current);
102 103
}

104 105
extern int current_cpuset_is_being_rebound(void);

106 107
extern void rebuild_sched_domains(void);

108 109
extern void cpuset_print_task_mems_allowed(struct task_struct *p);

110
/*
111 112 113 114 115
 * read_mems_allowed_begin is required when making decisions involving
 * mems_allowed such as during page allocation. mems_allowed can be updated in
 * parallel and depending on the new value an operation can fail potentially
 * causing process failure. A retry loop with read_mems_allowed_begin and
 * read_mems_allowed_retry prevents these artificial failures.
116
 */
117
static inline unsigned int read_mems_allowed_begin(void)
118
{
119
	return read_seqcount_begin(&current->mems_allowed_seq);
120 121
}

122
/*
123 124 125
 * If this returns true, the operation that took place after
 * read_mems_allowed_begin may have failed artificially due to a concurrent
 * update of mems_allowed. It is up to the caller to retry the operation if
126 127
 * appropriate.
 */
128
static inline bool read_mems_allowed_retry(unsigned int seq)
129
{
130
	return read_seqcount_retry(&current->mems_allowed_seq, seq);
131 132
}

133 134
static inline void set_mems_allowed(nodemask_t nodemask)
{
135 136
	unsigned long flags;

137
	task_lock(current);
138
	local_irq_save(flags);
139
	write_seqcount_begin(&current->mems_allowed_seq);
140
	current->mems_allowed = nodemask;
141
	write_seqcount_end(&current->mems_allowed_seq);
142
	local_irq_restore(flags);
143
	task_unlock(current);
144 145
}

L
Linus Torvalds 已提交
146 147
#else /* !CONFIG_CPUSETS */

148 149
static inline bool cpusets_enabled(void) { return false; }

L
Linus Torvalds 已提交
150 151 152
static inline int cpuset_init(void) { return 0; }
static inline void cpuset_init_smp(void) {}

153
static inline void cpuset_update_active_cpus(bool cpu_online)
154 155 156 157
{
	partition_sched_domains(1, NULL, NULL);
}

158 159
static inline void cpuset_cpus_allowed(struct task_struct *p,
				       struct cpumask *mask)
L
Linus Torvalds 已提交
160
{
161
	cpumask_copy(mask, cpu_possible_mask);
L
Linus Torvalds 已提交
162 163
}

164
static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
165 166 167
{
}

168 169 170 171 172
static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
{
	return node_possible_map;
}

173
#define cpuset_current_mems_allowed (node_states[N_MEMORY])
L
Linus Torvalds 已提交
174 175
static inline void cpuset_init_current_mems_allowed(void) {}

176
static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
L
Linus Torvalds 已提交
177 178 179 180
{
	return 1;
}

181 182 183 184 185 186 187 188 189 190
static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
{
	return 1;
}

static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
{
	return 1;
}

191 192 193 194 195 196
static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
{
	return 1;
}

static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
L
Linus Torvalds 已提交
197 198 199 200
{
	return 1;
}

201 202
static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
						 const struct task_struct *tsk2)
203 204 205 206
{
	return 1;
}

207 208
static inline void cpuset_memory_pressure_bump(void) {}

209 210
static inline void cpuset_task_status_allowed(struct seq_file *m,
						struct task_struct *task)
L
Linus Torvalds 已提交
211 212 213
{
}

214 215 216 217 218
static inline int cpuset_mem_spread_node(void)
{
	return 0;
}

219 220 221 222 223
static inline int cpuset_slab_spread_node(void)
{
	return 0;
}

224 225 226 227 228 229 230 231 232 233
static inline int cpuset_do_page_mem_spread(void)
{
	return 0;
}

static inline int cpuset_do_slab_mem_spread(void)
{
	return 0;
}

234 235 236 237 238
static inline int current_cpuset_is_being_rebound(void)
{
	return 0;
}

239 240
static inline void rebuild_sched_domains(void)
{
241
	partition_sched_domains(1, NULL, NULL);
242 243
}

244 245 246 247
static inline void cpuset_print_task_mems_allowed(struct task_struct *p)
{
}

248 249 250 251
static inline void set_mems_allowed(nodemask_t nodemask)
{
}

252
static inline unsigned int read_mems_allowed_begin(void)
253
{
254
	return 0;
255 256
}

257
static inline bool read_mems_allowed_retry(unsigned int seq)
258
{
259
	return false;
260 261
}

L
Linus Torvalds 已提交
262 263 264
#endif /* !CONFIG_CPUSETS */

#endif /* _LINUX_CPUSET_H */