domain.c 10.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 * arch/ia64/kernel/domain.c
 * Architecture specific sched-domains builder.
 *
 * Copyright (C) 2004 Jesse Barnes
 * Copyright (C) 2004 Silicon Graphics, Inc.
 */

#include <linux/sched.h>
#include <linux/percpu.h>
#include <linux/slab.h>
#include <linux/cpumask.h>
#include <linux/init.h>
#include <linux/topology.h>
#include <linux/nodemask.h>

N
Nick Piggin 已提交
17
#define SD_NODES_PER_DOMAIN 16
L
Linus Torvalds 已提交
18 19 20 21 22 23 24 25 26 27 28 29

#ifdef CONFIG_NUMA
/**
 * find_next_best_node - find the next node to include in a sched_domain
 * @node: node whose sched_domain we're building
 * @used_nodes: nodes already in the sched_domain
 *
 * Find the next node to include in a given scheduling domain.  Simply
 * finds the closest node not already in the @used_nodes map.
 *
 * Should use nodemask_t.
 */
30
static int find_next_best_node(int node, unsigned long *used_nodes)
L
Linus Torvalds 已提交
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
{
	int i, n, val, min_val, best_node = 0;

	min_val = INT_MAX;

	for (i = 0; i < MAX_NUMNODES; i++) {
		/* Start at @node */
		n = (node + i) % MAX_NUMNODES;

		if (!nr_cpus_node(n))
			continue;

		/* Skip already used nodes */
		if (test_bit(n, used_nodes))
			continue;

		/* Simple min distance search */
		val = node_distance(node, n);

		if (val < min_val) {
			min_val = val;
			best_node = n;
		}
	}

	set_bit(best_node, used_nodes);
	return best_node;
}

/**
 * sched_domain_node_span - get a cpumask for a node's sched_domain
 * @node: node whose cpumask we're constructing
 * @size: number of nodes to include in this span
 *
 * Given a node, construct a good cpumask for its sched_domain to span.  It
 * should be one that prevents unnecessary balancing, but also spreads tasks
 * out optimally.
 */
69
static cpumask_t sched_domain_node_span(int node)
L
Linus Torvalds 已提交
70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
{
	int i;
	cpumask_t span, nodemask;
	DECLARE_BITMAP(used_nodes, MAX_NUMNODES);

	cpus_clear(span);
	bitmap_zero(used_nodes, MAX_NUMNODES);

	nodemask = node_to_cpumask(node);
	cpus_or(span, span, nodemask);
	set_bit(node, used_nodes);

	for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
		int next_node = find_next_best_node(node, used_nodes);
		nodemask = node_to_cpumask(next_node);
		cpus_or(span, span, nodemask);
	}

	return span;
}
#endif

/*
 * At the moment, CONFIG_SCHED_SMT is never defined, but leave it in so we
 * can switch it on easily if needed.
 */
#ifdef CONFIG_SCHED_SMT
static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
static struct sched_group sched_group_cpus[NR_CPUS];
99
static int cpu_to_cpu_group(int cpu)
L
Linus Torvalds 已提交
100 101 102 103 104 105 106
{
	return cpu;
}
#endif

static DEFINE_PER_CPU(struct sched_domain, phys_domains);
static struct sched_group sched_group_phys[NR_CPUS];
107
static int cpu_to_phys_group(int cpu)
L
Linus Torvalds 已提交
108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
{
#ifdef CONFIG_SCHED_SMT
	return first_cpu(cpu_sibling_map[cpu]);
#else
	return cpu;
#endif
}

#ifdef CONFIG_NUMA
/*
 * The init_sched_build_groups can't handle what we want to do with node
 * groups, so roll our own. Now each node has its own list of groups which
 * gets dynamically allocated.
 */
static DEFINE_PER_CPU(struct sched_domain, node_domains);
123
static struct sched_group **sched_group_nodes_bycpu[NR_CPUS];
L
Linus Torvalds 已提交
124 125

static DEFINE_PER_CPU(struct sched_domain, allnodes_domains);
126
static struct sched_group *sched_group_allnodes_bycpu[NR_CPUS];
L
Linus Torvalds 已提交
127

128
static int cpu_to_allnodes_group(int cpu)
L
Linus Torvalds 已提交
129 130 131 132 133 134
{
	return cpu_to_node(cpu);
}
#endif

/*
135 136
 * Build sched domains for a given set of cpus and attach the sched domains
 * to the individual cpus
L
Linus Torvalds 已提交
137
 */
138
void build_sched_domains(const cpumask_t *cpu_map)
L
Linus Torvalds 已提交
139 140
{
	int i;
141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
#ifdef CONFIG_NUMA
	struct sched_group **sched_group_nodes = NULL;
	struct sched_group *sched_group_allnodes = NULL;

	/*
	 * Allocate the per-node list of sched groups
	 */
	sched_group_nodes = kmalloc(sizeof(struct sched_group*)*MAX_NUMNODES,
					   GFP_ATOMIC);
	if (!sched_group_nodes) {
		printk(KERN_WARNING "Can not alloc sched group node list\n");
		return;
	}
	sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes;
#endif
L
Linus Torvalds 已提交
156 157

	/*
158
	 * Set up domains for cpus specified by the cpu_map.
L
Linus Torvalds 已提交
159
	 */
160
	for_each_cpu_mask(i, *cpu_map) {
L
Linus Torvalds 已提交
161 162 163 164
		int group;
		struct sched_domain *sd = NULL, *p;
		cpumask_t nodemask = node_to_cpumask(cpu_to_node(i));

165
		cpus_and(nodemask, nodemask, *cpu_map);
L
Linus Torvalds 已提交
166 167

#ifdef CONFIG_NUMA
168
		if (cpus_weight(*cpu_map)
L
Linus Torvalds 已提交
169
				> SD_NODES_PER_DOMAIN*cpus_weight(nodemask)) {
170 171 172 173 174 175 176 177 178 179 180 181 182
			if (!sched_group_allnodes) {
				sched_group_allnodes
					= kmalloc(sizeof(struct sched_group)
							* MAX_NUMNODES,
						  GFP_KERNEL);
				if (!sched_group_allnodes) {
					printk(KERN_WARNING
					"Can not alloc allnodes sched group\n");
					break;
				}
				sched_group_allnodes_bycpu[i]
						= sched_group_allnodes;
			}
L
Linus Torvalds 已提交
183 184
			sd = &per_cpu(allnodes_domains, i);
			*sd = SD_ALLNODES_INIT;
185
			sd->span = *cpu_map;
L
Linus Torvalds 已提交
186 187 188 189 190 191 192 193 194 195
			group = cpu_to_allnodes_group(i);
			sd->groups = &sched_group_allnodes[group];
			p = sd;
		} else
			p = NULL;

		sd = &per_cpu(node_domains, i);
		*sd = SD_NODE_INIT;
		sd->span = sched_domain_node_span(cpu_to_node(i));
		sd->parent = p;
196
		cpus_and(sd->span, sd->span, *cpu_map);
L
Linus Torvalds 已提交
197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212
#endif

		p = sd;
		sd = &per_cpu(phys_domains, i);
		group = cpu_to_phys_group(i);
		*sd = SD_CPU_INIT;
		sd->span = nodemask;
		sd->parent = p;
		sd->groups = &sched_group_phys[group];

#ifdef CONFIG_SCHED_SMT
		p = sd;
		sd = &per_cpu(cpu_domains, i);
		group = cpu_to_cpu_group(i);
		*sd = SD_SIBLING_INIT;
		sd->span = cpu_sibling_map[i];
213
		cpus_and(sd->span, sd->span, *cpu_map);
L
Linus Torvalds 已提交
214 215 216 217 218 219 220
		sd->parent = p;
		sd->groups = &sched_group_cpus[group];
#endif
	}

#ifdef CONFIG_SCHED_SMT
	/* Set up CPU (sibling) groups */
221
	for_each_cpu_mask(i, *cpu_map) {
L
Linus Torvalds 已提交
222
		cpumask_t this_sibling_map = cpu_sibling_map[i];
223
		cpus_and(this_sibling_map, this_sibling_map, *cpu_map);
L
Linus Torvalds 已提交
224 225 226 227 228 229 230 231 232 233 234 235
		if (i != first_cpu(this_sibling_map))
			continue;

		init_sched_build_groups(sched_group_cpus, this_sibling_map,
						&cpu_to_cpu_group);
	}
#endif

	/* Set up physical groups */
	for (i = 0; i < MAX_NUMNODES; i++) {
		cpumask_t nodemask = node_to_cpumask(i);

236
		cpus_and(nodemask, nodemask, *cpu_map);
L
Linus Torvalds 已提交
237 238 239 240 241 242 243 244
		if (cpus_empty(nodemask))
			continue;

		init_sched_build_groups(sched_group_phys, nodemask,
						&cpu_to_phys_group);
	}

#ifdef CONFIG_NUMA
245 246 247
	if (sched_group_allnodes)
		init_sched_build_groups(sched_group_allnodes, *cpu_map,
					&cpu_to_allnodes_group);
L
Linus Torvalds 已提交
248 249 250 251 252 253 254 255 256

	for (i = 0; i < MAX_NUMNODES; i++) {
		/* Set up node groups */
		struct sched_group *sg, *prev;
		cpumask_t nodemask = node_to_cpumask(i);
		cpumask_t domainspan;
		cpumask_t covered = CPU_MASK_NONE;
		int j;

257
		cpus_and(nodemask, nodemask, *cpu_map);
258 259
		if (cpus_empty(nodemask)) {
			sched_group_nodes[i] = NULL;
L
Linus Torvalds 已提交
260
			continue;
261
		}
L
Linus Torvalds 已提交
262 263

		domainspan = sched_domain_node_span(i);
264
		cpus_and(domainspan, domainspan, *cpu_map);
L
Linus Torvalds 已提交
265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291

		sg = kmalloc(sizeof(struct sched_group), GFP_KERNEL);
		sched_group_nodes[i] = sg;
		for_each_cpu_mask(j, nodemask) {
			struct sched_domain *sd;
			sd = &per_cpu(node_domains, j);
			sd->groups = sg;
			if (sd->groups == NULL) {
				/* Turn off balancing if we have no groups */
				sd->flags = 0;
			}
		}
		if (!sg) {
			printk(KERN_WARNING
			"Can not alloc domain group for node %d\n", i);
			continue;
		}
		sg->cpu_power = 0;
		sg->cpumask = nodemask;
		cpus_or(covered, covered, nodemask);
		prev = sg;

		for (j = 0; j < MAX_NUMNODES; j++) {
			cpumask_t tmp, notcovered;
			int n = (i + j) % MAX_NUMNODES;

			cpus_complement(notcovered, covered);
292
			cpus_and(tmp, notcovered, *cpu_map);
L
Linus Torvalds 已提交
293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
			cpus_and(tmp, tmp, domainspan);
			if (cpus_empty(tmp))
				break;

			nodemask = node_to_cpumask(n);
			cpus_and(tmp, tmp, nodemask);
			if (cpus_empty(tmp))
				continue;

			sg = kmalloc(sizeof(struct sched_group), GFP_KERNEL);
			if (!sg) {
				printk(KERN_WARNING
				"Can not alloc domain group for node %d\n", j);
				break;
			}
			sg->cpu_power = 0;
			sg->cpumask = tmp;
			cpus_or(covered, covered, tmp);
			prev->next = sg;
			prev = sg;
		}
		prev->next = sched_group_nodes[i];
	}
#endif

	/* Calculate CPU power for physical packages and nodes */
319
	for_each_cpu_mask(i, *cpu_map) {
L
Linus Torvalds 已提交
320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374
		int power;
		struct sched_domain *sd;
#ifdef CONFIG_SCHED_SMT
		sd = &per_cpu(cpu_domains, i);
		power = SCHED_LOAD_SCALE;
		sd->groups->cpu_power = power;
#endif

		sd = &per_cpu(phys_domains, i);
		power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE *
				(cpus_weight(sd->groups->cpumask)-1) / 10;
		sd->groups->cpu_power = power;

#ifdef CONFIG_NUMA
		sd = &per_cpu(allnodes_domains, i);
		if (sd->groups) {
			power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE *
				(cpus_weight(sd->groups->cpumask)-1) / 10;
			sd->groups->cpu_power = power;
		}
#endif
	}

#ifdef CONFIG_NUMA
	for (i = 0; i < MAX_NUMNODES; i++) {
		struct sched_group *sg = sched_group_nodes[i];
		int j;

		if (sg == NULL)
			continue;
next_sg:
		for_each_cpu_mask(j, sg->cpumask) {
			struct sched_domain *sd;
			int power;

			sd = &per_cpu(phys_domains, j);
			if (j != first_cpu(sd->groups->cpumask)) {
				/*
				 * Only add "power" once for each
				 * physical package.
				 */
				continue;
			}
			power = SCHED_LOAD_SCALE + SCHED_LOAD_SCALE *
				(cpus_weight(sd->groups->cpumask)-1) / 10;

			sg->cpu_power += power;
		}
		sg = sg->next;
		if (sg != sched_group_nodes[i])
			goto next_sg;
	}
#endif

	/* Attach the domains */
375
	for_each_cpu_mask(i, *cpu_map) {
L
Linus Torvalds 已提交
376 377 378 379 380 381 382 383 384
		struct sched_domain *sd;
#ifdef CONFIG_SCHED_SMT
		sd = &per_cpu(cpu_domains, i);
#else
		sd = &per_cpu(phys_domains, i);
#endif
		cpu_attach_domain(sd, i);
	}
}
385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400
/*
 * Set up scheduler domains and groups.  Callers must hold the hotplug lock.
 */
void arch_init_sched_domains(const cpumask_t *cpu_map)
{
	cpumask_t cpu_default_map;

	/*
	 * Setup mask for cpus without special case scheduling requirements.
	 * For now this just excludes isolated cpus, but could be used to
	 * exclude other special cases in the future.
	 */
	cpus_andnot(cpu_default_map, *cpu_map, cpu_isolated_map);

	build_sched_domains(&cpu_default_map);
}
L
Linus Torvalds 已提交
401

402
void arch_destroy_sched_domains(const cpumask_t *cpu_map)
L
Linus Torvalds 已提交
403 404 405
{
#ifdef CONFIG_NUMA
	int i;
406
	int cpu;
407

408 409 410 411 412
	for_each_cpu_mask(cpu, *cpu_map) {
		struct sched_group *sched_group_allnodes
			= sched_group_allnodes_bycpu[cpu];
		struct sched_group **sched_group_nodes
			= sched_group_nodes_bycpu[cpu];
413

414 415 416 417 418 419
		if (sched_group_allnodes) {
			kfree(sched_group_allnodes);
			sched_group_allnodes_bycpu[cpu] = NULL;
		}

		if (!sched_group_nodes)
L
Linus Torvalds 已提交
420
			continue;
421 422 423 424 425 426 427 428 429 430 431 432

		for (i = 0; i < MAX_NUMNODES; i++) {
			cpumask_t nodemask = node_to_cpumask(i);
			struct sched_group *oldsg, *sg = sched_group_nodes[i];

			cpus_and(nodemask, nodemask, *cpu_map);
			if (cpus_empty(nodemask))
				continue;

			if (sg == NULL)
				continue;
			sg = sg->next;
L
Linus Torvalds 已提交
433
next_sg:
434 435 436 437 438 439 440 441
			oldsg = sg;
			sg = sg->next;
			kfree(oldsg);
			if (oldsg != sched_group_nodes[i])
				goto next_sg;
		}
		kfree(sched_group_nodes);
		sched_group_nodes_bycpu[cpu] = NULL;
L
Linus Torvalds 已提交
442 443 444
	}
#endif
}