cpu.c 14.0 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
L
Linus Torvalds 已提交
2
/*
3
 * CPU subsystem support
L
Linus Torvalds 已提交
4 5
 */

6
#include <linux/kernel.h>
L
Linus Torvalds 已提交
7 8
#include <linux/module.h>
#include <linux/init.h>
A
Al Viro 已提交
9
#include <linux/sched.h>
L
Linus Torvalds 已提交
10 11 12
#include <linux/cpu.h>
#include <linux/topology.h>
#include <linux/device.h>
13
#include <linux/node.h>
14
#include <linux/gfp.h>
15
#include <linux/slab.h>
16
#include <linux/percpu.h>
17
#include <linux/acpi.h>
18
#include <linux/of.h>
19
#include <linux/cpufeature.h>
R
Rik van Riel 已提交
20
#include <linux/tick.h>
21
#include <linux/pm_qos.h>
22
#include <linux/sched/isolation.h>
L
Linus Torvalds 已提交
23

24
#include "base.h"
L
Linus Torvalds 已提交
25

26
static DEFINE_PER_CPU(struct device *, cpu_sys_devices);
27

28 29 30 31 32 33 34 35 36
static int cpu_subsys_match(struct device *dev, struct device_driver *drv)
{
	/* ACPI style match is the only one that may succeed. */
	if (acpi_driver_match_device(dev, drv))
		return 1;

	return 0;
}

L
Linus Torvalds 已提交
37
#ifdef CONFIG_HOTPLUG_CPU
38 39 40 41 42 43 44 45 46
static void change_cpu_under_node(struct cpu *cpu,
			unsigned int from_nid, unsigned int to_nid)
{
	int cpuid = cpu->dev.id;
	unregister_cpu_under_node(cpuid, from_nid);
	register_cpu_under_node(cpuid, to_nid);
	cpu->node_id = to_nid;
}

47
static int cpu_subsys_online(struct device *dev)
L
Linus Torvalds 已提交
48
{
49
	struct cpu *cpu = container_of(dev, struct cpu, dev);
50 51
	int cpuid = dev->id;
	int from_nid, to_nid;
52
	int ret;
L
Linus Torvalds 已提交
53

54
	from_nid = cpu_to_node(cpuid);
55
	if (from_nid == NUMA_NO_NODE)
56
		return -ENODEV;
57

58 59 60 61 62 63 64 65
	ret = cpu_up(cpuid);
	/*
	 * When hot adding memory to memoryless node and enabling a cpu
	 * on the node, node number of the cpu may internally change.
	 */
	to_nid = cpu_to_node(cpuid);
	if (from_nid != to_nid)
		change_cpu_under_node(cpu, from_nid, to_nid);
L
Linus Torvalds 已提交
66

67
	return ret;
L
Linus Torvalds 已提交
68 69
}

70
static int cpu_subsys_offline(struct device *dev)
L
Linus Torvalds 已提交
71
{
72
	return cpu_down(dev->id);
L
Linus Torvalds 已提交
73
}
74

75
void unregister_cpu(struct cpu *cpu)
L
Linus Torvalds 已提交
76
{
77
	int logical_cpu = cpu->dev.id;
L
Linus Torvalds 已提交
78

79 80
	unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu));

81
	device_unregister(&cpu->dev);
82
	per_cpu(cpu_sys_devices, logical_cpu) = NULL;
L
Linus Torvalds 已提交
83 84
	return;
}
85 86

#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
87 88
static ssize_t cpu_probe_store(struct device *dev,
			       struct device_attribute *attr,
89
			       const char *buf,
90 91
			       size_t count)
{
92 93 94 95 96 97 98 99 100 101 102
	ssize_t cnt;
	int ret;

	ret = lock_device_hotplug_sysfs();
	if (ret)
		return ret;

	cnt = arch_cpu_probe(buf, count);

	unlock_device_hotplug();
	return cnt;
103 104
}

105 106
static ssize_t cpu_release_store(struct device *dev,
				 struct device_attribute *attr,
107
				 const char *buf,
108 109
				 size_t count)
{
110 111 112 113 114 115 116 117 118 119 120
	ssize_t cnt;
	int ret;

	ret = lock_device_hotplug_sysfs();
	if (ret)
		return ret;

	cnt = arch_cpu_release(buf, count);

	unlock_device_hotplug();
	return cnt;
121 122
}

123 124
static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store);
125
#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
L
Linus Torvalds 已提交
126 127
#endif /* CONFIG_HOTPLUG_CPU */

128 129 130
struct bus_type cpu_subsys = {
	.name = "cpu",
	.dev_name = "cpu",
131
	.match = cpu_subsys_match,
132 133 134 135 136 137 138
#ifdef CONFIG_HOTPLUG_CPU
	.online = cpu_subsys_online,
	.offline = cpu_subsys_offline,
#endif
};
EXPORT_SYMBOL_GPL(cpu_subsys);

139 140 141
#ifdef CONFIG_KEXEC
#include <linux/kexec.h>

142
static ssize_t show_crash_notes(struct device *dev, struct device_attribute *attr,
143
				char *buf)
144
{
145
	struct cpu *cpu = container_of(dev, struct cpu, dev);
146 147 148 149
	ssize_t rc;
	unsigned long long addr;
	int cpunum;

150
	cpunum = cpu->dev.id;
151 152 153 154 155 156 157

	/*
	 * Might be reading other cpu's data based on which cpu read thread
	 * has been scheduled. But cpu data (memory) is allocated once during
	 * boot up and this data does not change there after. Hence this
	 * operation should be safe. No locking required.
	 */
158
	addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum));
159 160 161
	rc = sprintf(buf, "%Lx\n", addr);
	return rc;
}
162
static DEVICE_ATTR(crash_notes, 0400, show_crash_notes, NULL);
163 164 165 166 167 168 169

static ssize_t show_crash_notes_size(struct device *dev,
				     struct device_attribute *attr,
				     char *buf)
{
	ssize_t rc;

170
	rc = sprintf(buf, "%zu\n", sizeof(note_buf_t));
171 172 173
	return rc;
}
static DEVICE_ATTR(crash_notes_size, 0400, show_crash_notes_size, NULL);
174 175 176 177 178 179 180 181 182 183 184 185 186 187 188

static struct attribute *crash_note_cpu_attrs[] = {
	&dev_attr_crash_notes.attr,
	&dev_attr_crash_notes_size.attr,
	NULL
};

static struct attribute_group crash_note_cpu_attr_group = {
	.attrs = crash_note_cpu_attrs,
};
#endif

static const struct attribute_group *common_cpu_attr_groups[] = {
#ifdef CONFIG_KEXEC
	&crash_note_cpu_attr_group,
189
#endif
190 191
	NULL
};
192

193 194 195 196 197 198 199
static const struct attribute_group *hotplugable_cpu_attr_groups[] = {
#ifdef CONFIG_KEXEC
	&crash_note_cpu_attr_group,
#endif
	NULL
};

200 201 202
/*
 * Print cpu online, possible, present, and system maps
 */
203 204

struct cpu_attr {
205
	struct device_attribute attr;
206
	const struct cpumask *const map;
207 208
};

209 210
static ssize_t show_cpus_attr(struct device *dev,
			      struct device_attribute *attr,
211
			      char *buf)
212
{
213
	struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr);
214

215
	return cpumap_print_to_pagebuf(true, buf, ca->map);
216 217
}

218 219
#define _CPU_ATTR(name, map) \
	{ __ATTR(name, 0444, show_cpus_attr, NULL), map }
220

221
/* Keep in sync with cpu_subsys_attrs */
222
static struct cpu_attr cpu_attrs[] = {
223 224 225
	_CPU_ATTR(online, &__cpu_online_mask),
	_CPU_ATTR(possible, &__cpu_possible_mask),
	_CPU_ATTR(present, &__cpu_present_mask),
226
};
227

228 229 230
/*
 * Print values for NR_CPUS and offlined cpus
 */
231 232
static ssize_t print_cpus_kernel_max(struct device *dev,
				     struct device_attribute *attr, char *buf)
233
{
234
	int n = snprintf(buf, PAGE_SIZE-2, "%d\n", NR_CPUS - 1);
235 236
	return n;
}
237
static DEVICE_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL);
238 239 240 241

/* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */
unsigned int total_cpus;

242 243
static ssize_t print_cpus_offline(struct device *dev,
				  struct device_attribute *attr, char *buf)
244 245 246 247 248 249 250
{
	int n = 0, len = PAGE_SIZE-2;
	cpumask_var_t offline;

	/* display offline cpus < nr_cpu_ids */
	if (!alloc_cpumask_var(&offline, GFP_KERNEL))
		return -ENOMEM;
251
	cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask);
252
	n = scnprintf(buf, len, "%*pbl", cpumask_pr_args(offline));
253 254 255 256 257 258 259 260
	free_cpumask_var(offline);

	/* display offline cpus >= nr_cpu_ids */
	if (total_cpus && nr_cpu_ids < total_cpus) {
		if (n && n < len)
			buf[n++] = ',';

		if (nr_cpu_ids == total_cpus-1)
261
			n += snprintf(&buf[n], len - n, "%u", nr_cpu_ids);
262
		else
263
			n += snprintf(&buf[n], len - n, "%u-%d",
264 265 266 267 268 269
						      nr_cpu_ids, total_cpus-1);
	}

	n += snprintf(&buf[n], len - n, "\n");
	return n;
}
270
static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL);
271

R
Rik van Riel 已提交
272 273 274 275
static ssize_t print_cpus_isolated(struct device *dev,
				  struct device_attribute *attr, char *buf)
{
	int n = 0, len = PAGE_SIZE-2;
276
	cpumask_var_t isolated;
R
Rik van Riel 已提交
277

278 279 280 281 282 283 284 285
	if (!alloc_cpumask_var(&isolated, GFP_KERNEL))
		return -ENOMEM;

	cpumask_andnot(isolated, cpu_possible_mask,
		       housekeeping_cpumask(HK_FLAG_DOMAIN));
	n = scnprintf(buf, len, "%*pbl\n", cpumask_pr_args(isolated));

	free_cpumask_var(isolated);
R
Rik van Riel 已提交
286 287 288 289 290

	return n;
}
static DEVICE_ATTR(isolated, 0444, print_cpus_isolated, NULL);

R
Rik van Riel 已提交
291 292 293 294 295 296 297 298 299 300 301 302 303
#ifdef CONFIG_NO_HZ_FULL
static ssize_t print_cpus_nohz_full(struct device *dev,
				  struct device_attribute *attr, char *buf)
{
	int n = 0, len = PAGE_SIZE-2;

	n = scnprintf(buf, len, "%*pbl\n", cpumask_pr_args(tick_nohz_full_mask));

	return n;
}
static DEVICE_ATTR(nohz_full, 0444, print_cpus_nohz_full, NULL);
#endif

304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
static void cpu_device_release(struct device *dev)
{
	/*
	 * This is an empty function to prevent the driver core from spitting a
	 * warning at us.  Yes, I know this is directly opposite of what the
	 * documentation for the driver core and kobjects say, and the author
	 * of this code has already been publically ridiculed for doing
	 * something as foolish as this.  However, at this point in time, it is
	 * the only way to handle the issue of statically allocated cpu
	 * devices.  The different architectures will have their cpu device
	 * code reworked to properly handle this in the near future, so this
	 * function will then be changed to correctly free up the memory held
	 * by the cpu device.
	 *
	 * Never copy this way of doing things, or you too will be made fun of
319
	 * on the linux-kernel list, you have been warned.
320 321 322
	 */
}

323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357
#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
static ssize_t print_cpu_modalias(struct device *dev,
				  struct device_attribute *attr,
				  char *buf)
{
	ssize_t n;
	u32 i;

	n = sprintf(buf, "cpu:type:" CPU_FEATURE_TYPEFMT ":feature:",
		    CPU_FEATURE_TYPEVAL);

	for (i = 0; i < MAX_CPU_FEATURES; i++)
		if (cpu_have_feature(i)) {
			if (PAGE_SIZE < n + sizeof(",XXXX\n")) {
				WARN(1, "CPU features overflow page\n");
				break;
			}
			n += sprintf(&buf[n], ",%04X", i);
		}
	buf[n++] = '\n';
	return n;
}

static int cpu_uevent(struct device *dev, struct kobj_uevent_env *env)
{
	char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
	if (buf) {
		print_cpu_modalias(NULL, NULL, buf);
		add_uevent_var(env, "MODALIAS=%s", buf);
		kfree(buf);
	}
	return 0;
}
#endif

L
Linus Torvalds 已提交
358
/*
359
 * register_cpu - Setup a sysfs device for a CPU.
360 361
 * @cpu - cpu->hotpluggable field set to 1 will generate a control file in
 *	  sysfs for this CPU.
L
Linus Torvalds 已提交
362 363 364 365
 * @num - CPU number to use when creating the device.
 *
 * Initialize and register the CPU device.
 */
366
int register_cpu(struct cpu *cpu, int num)
L
Linus Torvalds 已提交
367 368
{
	int error;
369

370
	cpu->node_id = cpu_to_node(num);
371
	memset(&cpu->dev, 0x00, sizeof(struct device));
372 373
	cpu->dev.id = num;
	cpu->dev.bus = &cpu_subsys;
374
	cpu->dev.release = cpu_device_release;
375
	cpu->dev.offline_disabled = !cpu->hotpluggable;
376
	cpu->dev.offline = !cpu_online(num);
377
	cpu->dev.of_node = of_get_cpu_node(num, NULL);
378
#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
379
	cpu->dev.bus->uevent = cpu_uevent;
380
#endif
381
	cpu->dev.groups = common_cpu_attr_groups;
382 383
	if (cpu->hotpluggable)
		cpu->dev.groups = hotplugable_cpu_attr_groups;
384
	error = device_register(&cpu->dev);
385 386
	if (error) {
		put_device(&cpu->dev);
A
Alex Shi 已提交
387
		return error;
388
	}
389

A
Alex Shi 已提交
390 391
	per_cpu(cpu_sys_devices, num) = &cpu->dev;
	register_cpu_under_node(num, cpu_to_node(num));
392 393
	dev_pm_qos_expose_latency_limit(&cpu->dev,
					PM_QOS_RESUME_LATENCY_NO_CONSTRAINT);
A
Alex Shi 已提交
394 395

	return 0;
L
Linus Torvalds 已提交
396 397
}

398
struct device *get_cpu_device(unsigned cpu)
399
{
400 401
	if (cpu < nr_cpu_ids && cpu_possible(cpu))
		return per_cpu(cpu_sys_devices, cpu);
402 403 404
	else
		return NULL;
}
405 406
EXPORT_SYMBOL_GPL(get_cpu_device);

407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460
static void device_create_release(struct device *dev)
{
	kfree(dev);
}

static struct device *
__cpu_device_create(struct device *parent, void *drvdata,
		    const struct attribute_group **groups,
		    const char *fmt, va_list args)
{
	struct device *dev = NULL;
	int retval = -ENODEV;

	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
	if (!dev) {
		retval = -ENOMEM;
		goto error;
	}

	device_initialize(dev);
	dev->parent = parent;
	dev->groups = groups;
	dev->release = device_create_release;
	dev_set_drvdata(dev, drvdata);

	retval = kobject_set_name_vargs(&dev->kobj, fmt, args);
	if (retval)
		goto error;

	retval = device_add(dev);
	if (retval)
		goto error;

	return dev;

error:
	put_device(dev);
	return ERR_PTR(retval);
}

struct device *cpu_device_create(struct device *parent, void *drvdata,
				 const struct attribute_group **groups,
				 const char *fmt, ...)
{
	va_list vargs;
	struct device *dev;

	va_start(vargs, fmt);
	dev = __cpu_device_create(parent, drvdata, groups, fmt, vargs);
	va_end(vargs);
	return dev;
}
EXPORT_SYMBOL_GPL(cpu_device_create);

461
#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
462
static DEVICE_ATTR(modalias, 0444, print_cpu_modalias, NULL);
463 464
#endif

465 466 467 468 469 470 471 472 473 474
static struct attribute *cpu_root_attrs[] = {
#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
	&dev_attr_probe.attr,
	&dev_attr_release.attr,
#endif
	&cpu_attrs[0].attr.attr,
	&cpu_attrs[1].attr.attr,
	&cpu_attrs[2].attr.attr,
	&dev_attr_kernel_max.attr,
	&dev_attr_offline.attr,
R
Rik van Riel 已提交
475
	&dev_attr_isolated.attr,
R
Rik van Riel 已提交
476 477 478
#ifdef CONFIG_NO_HZ_FULL
	&dev_attr_nohz_full.attr,
#endif
479
#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
480 481
	&dev_attr_modalias.attr,
#endif
482 483 484 485 486 487 488 489 490 491 492
	NULL
};

static struct attribute_group cpu_root_attr_group = {
	.attrs = cpu_root_attrs,
};

static const struct attribute_group *cpu_root_attr_groups[] = {
	&cpu_root_attr_group,
	NULL,
};
L
Linus Torvalds 已提交
493

494 495
bool cpu_is_hotpluggable(unsigned cpu)
{
496 497
	struct device *dev = get_cpu_device(cpu);
	return dev && container_of(dev, struct cpu, dev)->hotpluggable;
498 499 500
}
EXPORT_SYMBOL_GPL(cpu_is_hotpluggable);

501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516
#ifdef CONFIG_GENERIC_CPU_DEVICES
static DEFINE_PER_CPU(struct cpu, cpu_devices);
#endif

static void __init cpu_dev_register_generic(void)
{
#ifdef CONFIG_GENERIC_CPU_DEVICES
	int i;

	for_each_possible_cpu(i) {
		if (register_cpu(&per_cpu(cpu_devices, i), i))
			panic("Failed to register CPU device");
	}
#endif
}

517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536
#ifdef CONFIG_GENERIC_CPU_VULNERABILITIES

ssize_t __weak cpu_show_meltdown(struct device *dev,
				 struct device_attribute *attr, char *buf)
{
	return sprintf(buf, "Not affected\n");
}

ssize_t __weak cpu_show_spectre_v1(struct device *dev,
				   struct device_attribute *attr, char *buf)
{
	return sprintf(buf, "Not affected\n");
}

ssize_t __weak cpu_show_spectre_v2(struct device *dev,
				   struct device_attribute *attr, char *buf)
{
	return sprintf(buf, "Not affected\n");
}

537 538 539 540 541 542
ssize_t __weak cpu_show_spec_store_bypass(struct device *dev,
					  struct device_attribute *attr, char *buf)
{
	return sprintf(buf, "Not affected\n");
}

543 544 545 546 547 548
ssize_t __weak cpu_show_l1tf(struct device *dev,
			     struct device_attribute *attr, char *buf)
{
	return sprintf(buf, "Not affected\n");
}

549 550 551
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
552
static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
553
static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL);
554 555 556 557 558

static struct attribute *cpu_root_vulnerabilities_attrs[] = {
	&dev_attr_meltdown.attr,
	&dev_attr_spectre_v1.attr,
	&dev_attr_spectre_v2.attr,
559
	&dev_attr_spec_store_bypass.attr,
560
	&dev_attr_l1tf.attr,
561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579
	NULL
};

static const struct attribute_group cpu_root_vulnerabilities_group = {
	.name  = "vulnerabilities",
	.attrs = cpu_root_vulnerabilities_attrs,
};

static void __init cpu_register_vulnerabilities(void)
{
	if (sysfs_create_group(&cpu_subsys.dev_root->kobj,
			       &cpu_root_vulnerabilities_group))
		pr_err("Unable to register CPU vulnerabilities\n");
}

#else
static inline void cpu_register_vulnerabilities(void) { }
#endif

580
void __init cpu_dev_init(void)
L
Linus Torvalds 已提交
581
{
582 583
	if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups))
		panic("Failed to register CPU subsystem");
584

585
	cpu_dev_register_generic();
586
	cpu_register_vulnerabilities();
L
Linus Torvalds 已提交
587
}