core.c 91.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/*
 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 */
#include <linux/list_sort.h>
#include <linux/libnvdimm.h>
#include <linux/module.h>
16
#include <linux/mutex.h>
17
#include <linux/ndctl.h>
18
#include <linux/sysfs.h>
19
#include <linux/delay.h>
20 21
#include <linux/list.h>
#include <linux/acpi.h>
22
#include <linux/sort.h>
23
#include <linux/io.h>
24
#include <linux/nd.h>
25
#include <asm/cacheflush.h>
26 27
#include "nfit.h"

28 29 30 31
/*
 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
 * irrelevant.
 */
32
#include <linux/io-64-nonatomic-hi-lo.h>
33

34 35 36 37
static bool force_enable_dimms;
module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status");

38 39 40 41 42 43 44 45 46 47
static unsigned int scrub_timeout = NFIT_ARS_TIMEOUT;
module_param(scrub_timeout, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(scrub_timeout, "Initial scrub timeout in seconds");

/* after three payloads of overflow, it's dead jim */
static unsigned int scrub_overflow_abort = 3;
module_param(scrub_overflow_abort, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(scrub_overflow_abort,
		"Number of times we overflow ARS results before abort");

48 49 50
static bool disable_vendor_specific;
module_param(disable_vendor_specific, bool, S_IRUGO);
MODULE_PARM_DESC(disable_vendor_specific,
51
		"Limit commands to the publicly specified set");
52

53 54 55 56
static unsigned long override_dsm_mask;
module_param(override_dsm_mask, ulong, S_IRUGO);
MODULE_PARM_DESC(override_dsm_mask, "Bitmask of allowed NVDIMM DSM functions");

57 58 59 60 61
static int default_dsm_family = -1;
module_param(default_dsm_family, int, S_IRUGO);
MODULE_PARM_DESC(default_dsm_family,
		"Try this DSM type first when identifying NVDIMM family");

62 63 64
LIST_HEAD(acpi_descs);
DEFINE_MUTEX(acpi_desc_lock);

65 66
static struct workqueue_struct *nfit_wq;

V
Vishal Verma 已提交
67 68 69 70 71 72 73 74 75
struct nfit_table_prev {
	struct list_head spas;
	struct list_head memdevs;
	struct list_head dcrs;
	struct list_head bdws;
	struct list_head idts;
	struct list_head flushes;
};

76
static guid_t nfit_uuid[NFIT_UUID_MAX];
77

78
const guid_t *to_nfit_uuid(enum nfit_uuids id)
79
{
80
	return &nfit_uuid[id];
81
}
82
EXPORT_SYMBOL(to_nfit_uuid);
83

84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
static struct acpi_nfit_desc *to_acpi_nfit_desc(
		struct nvdimm_bus_descriptor *nd_desc)
{
	return container_of(nd_desc, struct acpi_nfit_desc, nd_desc);
}

static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
{
	struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;

	/*
	 * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct
	 * acpi_device.
	 */
	if (!nd_desc->provider_name
			|| strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0)
		return NULL;

	return to_acpi_device(acpi_desc->dev);
}

105
static int xlat_bus_status(void *buf, unsigned int cmd, u32 status)
106
{
107
	struct nd_cmd_clear_error *clear_err;
108 109 110 111 112
	struct nd_cmd_ars_status *ars_status;
	u16 flags;

	switch (cmd) {
	case ND_CMD_ARS_CAP:
113
		if ((status & 0xffff) == NFIT_ARS_CAP_NONE)
114 115 116
			return -ENOTTY;

		/* Command failed */
117
		if (status & 0xffff)
118 119 120 121
			return -EIO;

		/* No supported scan types for this range */
		flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE;
122
		if ((status >> 16 & flags) == 0)
123
			return -ENOTTY;
124
		return 0;
125 126
	case ND_CMD_ARS_START:
		/* ARS is in progress */
127
		if ((status & 0xffff) == NFIT_ARS_START_BUSY)
128 129 130
			return -EBUSY;

		/* Command failed */
131
		if (status & 0xffff)
132
			return -EIO;
133
		return 0;
134 135 136
	case ND_CMD_ARS_STATUS:
		ars_status = buf;
		/* Command failed */
137
		if (status & 0xffff)
138 139
			return -EIO;
		/* Check extended status (Upper two bytes) */
140
		if (status == NFIT_ARS_STATUS_DONE)
141 142 143
			return 0;

		/* ARS is in progress */
144
		if (status == NFIT_ARS_STATUS_BUSY)
145 146 147
			return -EBUSY;

		/* No ARS performed for the current boot */
148
		if (status == NFIT_ARS_STATUS_NONE)
149 150 151 152 153 154 155
			return -EAGAIN;

		/*
		 * ARS interrupted, either we overflowed or some other
		 * agent wants the scan to stop.  If we didn't overflow
		 * then just continue with the returned results.
		 */
156
		if (status == NFIT_ARS_STATUS_INTR) {
157 158
			if (ars_status->out_length >= 40 && (ars_status->flags
						& NFIT_ARS_F_OVERFLOW))
159 160 161 162 163
				return -ENOSPC;
			return 0;
		}

		/* Unknown status */
164
		if (status >> 16)
165
			return -EIO;
166
		return 0;
167 168
	case ND_CMD_CLEAR_ERROR:
		clear_err = buf;
169
		if (status & 0xffff)
170 171 172 173 174
			return -EIO;
		if (!clear_err->cleared)
			return -EIO;
		if (clear_err->length > clear_err->cleared)
			return clear_err->cleared;
175
		return 0;
176 177 178 179
	default:
		break;
	}

180 181 182
	/* all other non-zero status results in an error */
	if (status)
		return -EIO;
183 184 185
	return 0;
}

186 187 188 189
#define ACPI_LABELS_LOCKED 3

static int xlat_nvdimm_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd,
		u32 status)
190
{
191 192
	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);

193 194
	switch (cmd) {
	case ND_CMD_GET_CONFIG_SIZE:
195 196 197 198 199 200 201
		/*
		 * In the _LSI, _LSR, _LSW case the locked status is
		 * communicated via the read/write commands
		 */
		if (nfit_mem->has_lsi)
			break;

202 203 204
		if (status >> 16 & ND_CONFIG_LOCKED)
			return -EACCES;
		break;
205 206 207 208 209 210 211 212
	case ND_CMD_GET_CONFIG_DATA:
		if (nfit_mem->has_lsr && status == ACPI_LABELS_LOCKED)
			return -EACCES;
		break;
	case ND_CMD_SET_CONFIG_DATA:
		if (nfit_mem->has_lsw && status == ACPI_LABELS_LOCKED)
			return -EACCES;
		break;
213 214 215 216 217 218 219 220 221 222
	default:
		break;
	}

	/* all other non-zero status results in an error */
	if (status)
		return -EIO;
	return 0;
}

223 224 225 226 227
static int xlat_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd,
		u32 status)
{
	if (!nvdimm)
		return xlat_bus_status(buf, cmd, status);
228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370
	return xlat_nvdimm_status(nvdimm, buf, cmd, status);
}

/* convert _LS{I,R} packages to the buffer object acpi_nfit_ctl expects */
static union acpi_object *pkg_to_buf(union acpi_object *pkg)
{
	int i;
	void *dst;
	size_t size = 0;
	union acpi_object *buf = NULL;

	if (pkg->type != ACPI_TYPE_PACKAGE) {
		WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
				pkg->type);
		goto err;
	}

	for (i = 0; i < pkg->package.count; i++) {
		union acpi_object *obj = &pkg->package.elements[i];

		if (obj->type == ACPI_TYPE_INTEGER)
			size += 4;
		else if (obj->type == ACPI_TYPE_BUFFER)
			size += obj->buffer.length;
		else {
			WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
					obj->type);
			goto err;
		}
	}

	buf = ACPI_ALLOCATE(sizeof(*buf) + size);
	if (!buf)
		goto err;

	dst = buf + 1;
	buf->type = ACPI_TYPE_BUFFER;
	buf->buffer.length = size;
	buf->buffer.pointer = dst;
	for (i = 0; i < pkg->package.count; i++) {
		union acpi_object *obj = &pkg->package.elements[i];

		if (obj->type == ACPI_TYPE_INTEGER) {
			memcpy(dst, &obj->integer.value, 4);
			dst += 4;
		} else if (obj->type == ACPI_TYPE_BUFFER) {
			memcpy(dst, obj->buffer.pointer, obj->buffer.length);
			dst += obj->buffer.length;
		}
	}
err:
	ACPI_FREE(pkg);
	return buf;
}

static union acpi_object *int_to_buf(union acpi_object *integer)
{
	union acpi_object *buf = ACPI_ALLOCATE(sizeof(*buf) + 4);
	void *dst = NULL;

	if (!buf)
		goto err;

	if (integer->type != ACPI_TYPE_INTEGER) {
		WARN_ONCE(1, "BIOS bug, unexpected element type: %d\n",
				integer->type);
		goto err;
	}

	dst = buf + 1;
	buf->type = ACPI_TYPE_BUFFER;
	buf->buffer.length = 4;
	buf->buffer.pointer = dst;
	memcpy(dst, &integer->integer.value, 4);
err:
	ACPI_FREE(integer);
	return buf;
}

static union acpi_object *acpi_label_write(acpi_handle handle, u32 offset,
		u32 len, void *data)
{
	acpi_status rc;
	struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
	struct acpi_object_list input = {
		.count = 3,
		.pointer = (union acpi_object []) {
			[0] = {
				.integer.type = ACPI_TYPE_INTEGER,
				.integer.value = offset,
			},
			[1] = {
				.integer.type = ACPI_TYPE_INTEGER,
				.integer.value = len,
			},
			[2] = {
				.buffer.type = ACPI_TYPE_BUFFER,
				.buffer.pointer = data,
				.buffer.length = len,
			},
		},
	};

	rc = acpi_evaluate_object(handle, "_LSW", &input, &buf);
	if (ACPI_FAILURE(rc))
		return NULL;
	return int_to_buf(buf.pointer);
}

static union acpi_object *acpi_label_read(acpi_handle handle, u32 offset,
		u32 len)
{
	acpi_status rc;
	struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
	struct acpi_object_list input = {
		.count = 2,
		.pointer = (union acpi_object []) {
			[0] = {
				.integer.type = ACPI_TYPE_INTEGER,
				.integer.value = offset,
			},
			[1] = {
				.integer.type = ACPI_TYPE_INTEGER,
				.integer.value = len,
			},
		},
	};

	rc = acpi_evaluate_object(handle, "_LSR", &input, &buf);
	if (ACPI_FAILURE(rc))
		return NULL;
	return pkg_to_buf(buf.pointer);
}

static union acpi_object *acpi_label_info(acpi_handle handle)
{
	acpi_status rc;
	struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };

	rc = acpi_evaluate_object(handle, "_LSI", NULL, &buf);
	if (ACPI_FAILURE(rc))
		return NULL;
	return pkg_to_buf(buf.pointer);
371 372
}

373 374
int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
		unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
375
{
376
	struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
377
	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
378
	union acpi_object in_obj, in_buf, *out_obj;
379
	const struct nd_cmd_desc *desc = NULL;
380
	struct device *dev = acpi_desc->dev;
381
	struct nd_cmd_pkg *call_pkg = NULL;
382
	const char *cmd_name, *dimm_name;
383
	unsigned long cmd_mask, dsm_mask;
384
	u32 offset, fw_status = 0;
385
	acpi_handle handle;
386
	unsigned int func;
387
	const guid_t *guid;
388 389
	int rc, i;

390 391 392 393
	func = cmd;
	if (cmd == ND_CMD_CALL) {
		call_pkg = buf;
		func = call_pkg->nd_command;
394 395 396 397

		for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
			if (call_pkg->nd_reserved2[i])
				return -EINVAL;
398 399
	}

400 401 402 403 404
	if (nvdimm) {
		struct acpi_device *adev = nfit_mem->adev;

		if (!adev)
			return -ENOTTY;
405 406 407
		if (call_pkg && nfit_mem->family != call_pkg->nd_family)
			return -ENOTTY;

408
		dimm_name = nvdimm_name(nvdimm);
409
		cmd_name = nvdimm_cmd_name(cmd);
410
		cmd_mask = nvdimm_cmd_mask(nvdimm);
411 412
		dsm_mask = nfit_mem->dsm_mask;
		desc = nd_cmd_dimm_desc(cmd);
413
		guid = to_nfit_uuid(nfit_mem->family);
414 415 416 417 418
		handle = adev->handle;
	} else {
		struct acpi_device *adev = to_acpi_dev(acpi_desc);

		cmd_name = nvdimm_bus_cmd_name(cmd);
419
		cmd_mask = nd_desc->cmd_mask;
420
		dsm_mask = cmd_mask;
421 422
		if (cmd == ND_CMD_CALL)
			dsm_mask = nd_desc->bus_dsm_mask;
423
		desc = nd_cmd_bus_desc(cmd);
424
		guid = to_nfit_uuid(NFIT_DEV_BUS);
425 426 427 428 429 430 431
		handle = adev->handle;
		dimm_name = "bus";
	}

	if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
		return -ENOTTY;

432
	if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask))
433 434 435 436 437 438 439 440 441 442 443 444 445 446
		return -ENOTTY;

	in_obj.type = ACPI_TYPE_PACKAGE;
	in_obj.package.count = 1;
	in_obj.package.elements = &in_buf;
	in_buf.type = ACPI_TYPE_BUFFER;
	in_buf.buffer.pointer = buf;
	in_buf.buffer.length = 0;

	/* libnvdimm has already validated the input envelope */
	for (i = 0; i < desc->in_num; i++)
		in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc,
				i, buf);

447 448 449 450 451 452
	if (call_pkg) {
		/* skip over package wrapper */
		in_buf.buffer.pointer = (void *) &call_pkg->nd_payload;
		in_buf.buffer.length = call_pkg->nd_size_in;
	}

D
Dan Williams 已提交
453 454 455
	dev_dbg(dev, "%s:%s cmd: %d: func: %d input length: %d\n",
			__func__, dimm_name, cmd, func, in_buf.buffer.length);
	print_hex_dump_debug("nvdimm in  ", DUMP_PREFIX_OFFSET, 4, 4,
456 457
			in_buf.buffer.pointer,
			min_t(u32, 256, in_buf.buffer.length), true);
458

459 460 461 462 463 464 465 466 467 468 469 470 471 472 473
	/* call the BIOS, prefer the named methods over _DSM if available */
	if (cmd == ND_CMD_GET_CONFIG_SIZE && nfit_mem->has_lsi)
		out_obj = acpi_label_info(handle);
	else if (cmd == ND_CMD_GET_CONFIG_DATA && nfit_mem->has_lsr) {
		struct nd_cmd_get_config_data_hdr *p = buf;

		out_obj = acpi_label_read(handle, p->in_offset, p->in_length);
	} else if (cmd == ND_CMD_SET_CONFIG_DATA && nfit_mem->has_lsw) {
		struct nd_cmd_set_config_hdr *p = buf;

		out_obj = acpi_label_write(handle, p->in_offset, p->in_length,
				p->in_buf);
	} else
		out_obj = acpi_evaluate_dsm(handle, guid, 1, func, &in_obj);

474 475 476 477 478 479
	if (!out_obj) {
		dev_dbg(dev, "%s:%s _DSM failed cmd: %s\n", __func__, dimm_name,
				cmd_name);
		return -EINVAL;
	}

480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495
	if (call_pkg) {
		call_pkg->nd_fw_size = out_obj->buffer.length;
		memcpy(call_pkg->nd_payload + call_pkg->nd_size_in,
			out_obj->buffer.pointer,
			min(call_pkg->nd_fw_size, call_pkg->nd_size_out));

		ACPI_FREE(out_obj);
		/*
		 * Need to support FW function w/o known size in advance.
		 * Caller can determine required size based upon nd_fw_size.
		 * If we return an error (like elsewhere) then caller wouldn't
		 * be able to rely upon data returned to make calculation.
		 */
		return 0;
	}

496 497 498 499 500 501 502
	if (out_obj->package.type != ACPI_TYPE_BUFFER) {
		dev_dbg(dev, "%s:%s unexpected output object type cmd: %s type: %d\n",
				__func__, dimm_name, cmd_name, out_obj->type);
		rc = -EINVAL;
		goto out;
	}

D
Dan Williams 已提交
503 504 505 506 507
	dev_dbg(dev, "%s:%s cmd: %s output length: %d\n", __func__, dimm_name,
			cmd_name, out_obj->buffer.length);
	print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4,
			out_obj->buffer.pointer,
			min_t(u32, 128, out_obj->buffer.length), true);
508 509 510

	for (i = 0, offset = 0; i < desc->out_num; i++) {
		u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf,
511 512
				(u32 *) out_obj->buffer.pointer,
				out_obj->buffer.length - offset);
513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529

		if (offset + out_size > out_obj->buffer.length) {
			dev_dbg(dev, "%s:%s output object underflow cmd: %s field: %d\n",
					__func__, dimm_name, cmd_name, i);
			break;
		}

		if (in_buf.buffer.length + offset + out_size > buf_len) {
			dev_dbg(dev, "%s:%s output overrun cmd: %s field: %d\n",
					__func__, dimm_name, cmd_name, i);
			rc = -ENXIO;
			goto out;
		}
		memcpy(buf + in_buf.buffer.length + offset,
				out_obj->buffer.pointer + offset, out_size);
		offset += out_size;
	}
530 531 532 533 534 535 536 537 538

	/*
	 * Set fw_status for all the commands with a known format to be
	 * later interpreted by xlat_status().
	 */
	if (i >= 1 && ((cmd >= ND_CMD_ARS_CAP && cmd <= ND_CMD_CLEAR_ERROR)
			|| (cmd >= ND_CMD_SMART && cmd <= ND_CMD_VENDOR)))
		fw_status = *(u32 *) out_obj->buffer.pointer;

539 540 541 542 543 544 545
	if (offset + in_buf.buffer.length < buf_len) {
		if (i >= 1) {
			/*
			 * status valid, return the number of bytes left
			 * unfilled in the output buffer
			 */
			rc = buf_len - offset - in_buf.buffer.length;
546
			if (cmd_rc)
547 548
				*cmd_rc = xlat_status(nvdimm, buf, cmd,
						fw_status);
549 550 551 552 553 554
		} else {
			dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
					__func__, dimm_name, cmd_name, buf_len,
					offset);
			rc = -ENXIO;
		}
555
	} else {
556
		rc = 0;
557
		if (cmd_rc)
558
			*cmd_rc = xlat_status(nvdimm, buf, cmd, fw_status);
559
	}
560 561 562 563 564

 out:
	ACPI_FREE(out_obj);

	return rc;
565
}
566
EXPORT_SYMBOL_GPL(acpi_nfit_ctl);
567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587

static const char *spa_type_name(u16 type)
{
	static const char *to_name[] = {
		[NFIT_SPA_VOLATILE] = "volatile",
		[NFIT_SPA_PM] = "pmem",
		[NFIT_SPA_DCR] = "dimm-control-region",
		[NFIT_SPA_BDW] = "block-data-window",
		[NFIT_SPA_VDISK] = "volatile-disk",
		[NFIT_SPA_VCD] = "volatile-cd",
		[NFIT_SPA_PDISK] = "persistent-disk",
		[NFIT_SPA_PCD] = "persistent-cd",

	};

	if (type > NFIT_SPA_PCD)
		return "unknown";

	return to_name[type];
}

588
int nfit_spa_type(struct acpi_nfit_system_address *spa)
589 590 591 592
{
	int i;

	for (i = 0; i < NFIT_UUID_MAX; i++)
593
		if (guid_equal(to_nfit_uuid(i), (guid_t *)&spa->range_guid))
594 595 596 597 598
			return i;
	return -1;
}

static bool add_spa(struct acpi_nfit_desc *acpi_desc,
V
Vishal Verma 已提交
599
		struct nfit_table_prev *prev,
600 601 602
		struct acpi_nfit_system_address *spa)
{
	struct device *dev = acpi_desc->dev;
V
Vishal Verma 已提交
603 604
	struct nfit_spa *nfit_spa;

605 606 607
	if (spa->header.length != sizeof(*spa))
		return false;

V
Vishal Verma 已提交
608
	list_for_each_entry(nfit_spa, &prev->spas, list) {
609
		if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) {
V
Vishal Verma 已提交
610 611 612 613
			list_move_tail(&nfit_spa->list, &acpi_desc->spas);
			return true;
		}
	}
614

615 616
	nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof(*spa),
			GFP_KERNEL);
617 618 619
	if (!nfit_spa)
		return false;
	INIT_LIST_HEAD(&nfit_spa->list);
620
	memcpy(nfit_spa->spa, spa, sizeof(*spa));
621 622 623 624 625 626 627 628
	list_add_tail(&nfit_spa->list, &acpi_desc->spas);
	dev_dbg(dev, "%s: spa index: %d type: %s\n", __func__,
			spa->range_index,
			spa_type_name(nfit_spa_type(spa)));
	return true;
}

static bool add_memdev(struct acpi_nfit_desc *acpi_desc,
V
Vishal Verma 已提交
629
		struct nfit_table_prev *prev,
630 631 632
		struct acpi_nfit_memory_map *memdev)
{
	struct device *dev = acpi_desc->dev;
V
Vishal Verma 已提交
633
	struct nfit_memdev *nfit_memdev;
634

635 636 637
	if (memdev->header.length != sizeof(*memdev))
		return false;

V
Vishal Verma 已提交
638
	list_for_each_entry(nfit_memdev, &prev->memdevs, list)
639
		if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) {
V
Vishal Verma 已提交
640 641 642 643
			list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs);
			return true;
		}

644 645
	nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev) + sizeof(*memdev),
			GFP_KERNEL);
646 647 648
	if (!nfit_memdev)
		return false;
	INIT_LIST_HEAD(&nfit_memdev->list);
649
	memcpy(nfit_memdev->memdev, memdev, sizeof(*memdev));
650
	list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs);
651
	dev_dbg(dev, "%s: memdev handle: %#x spa: %d dcr: %d flags: %#x\n",
652
			__func__, memdev->device_handle, memdev->range_index,
653
			memdev->region_index, memdev->flags);
654 655 656
	return true;
}

657 658 659 660 661 662 663 664 665 666 667 668 669 670
/*
 * An implementation may provide a truncated control region if no block windows
 * are defined.
 */
static size_t sizeof_dcr(struct acpi_nfit_control_region *dcr)
{
	if (dcr->header.length < offsetof(struct acpi_nfit_control_region,
				window_size))
		return 0;
	if (dcr->windows)
		return sizeof(*dcr);
	return offsetof(struct acpi_nfit_control_region, window_size);
}

671
static bool add_dcr(struct acpi_nfit_desc *acpi_desc,
V
Vishal Verma 已提交
672
		struct nfit_table_prev *prev,
673 674 675
		struct acpi_nfit_control_region *dcr)
{
	struct device *dev = acpi_desc->dev;
V
Vishal Verma 已提交
676 677
	struct nfit_dcr *nfit_dcr;

678 679 680
	if (!sizeof_dcr(dcr))
		return false;

V
Vishal Verma 已提交
681
	list_for_each_entry(nfit_dcr, &prev->dcrs, list)
682
		if (memcmp(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)) == 0) {
V
Vishal Verma 已提交
683 684 685
			list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs);
			return true;
		}
686

687 688
	nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr) + sizeof(*dcr),
			GFP_KERNEL);
689 690 691
	if (!nfit_dcr)
		return false;
	INIT_LIST_HEAD(&nfit_dcr->list);
692
	memcpy(nfit_dcr->dcr, dcr, sizeof_dcr(dcr));
693 694 695 696 697 698 699
	list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs);
	dev_dbg(dev, "%s: dcr index: %d windows: %d\n", __func__,
			dcr->region_index, dcr->windows);
	return true;
}

static bool add_bdw(struct acpi_nfit_desc *acpi_desc,
V
Vishal Verma 已提交
700
		struct nfit_table_prev *prev,
701 702 703
		struct acpi_nfit_data_region *bdw)
{
	struct device *dev = acpi_desc->dev;
V
Vishal Verma 已提交
704 705
	struct nfit_bdw *nfit_bdw;

706 707
	if (bdw->header.length != sizeof(*bdw))
		return false;
V
Vishal Verma 已提交
708
	list_for_each_entry(nfit_bdw, &prev->bdws, list)
709
		if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) {
V
Vishal Verma 已提交
710 711 712
			list_move_tail(&nfit_bdw->list, &acpi_desc->bdws);
			return true;
		}
713

714 715
	nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw) + sizeof(*bdw),
			GFP_KERNEL);
716 717 718
	if (!nfit_bdw)
		return false;
	INIT_LIST_HEAD(&nfit_bdw->list);
719
	memcpy(nfit_bdw->bdw, bdw, sizeof(*bdw));
720 721 722 723 724 725
	list_add_tail(&nfit_bdw->list, &acpi_desc->bdws);
	dev_dbg(dev, "%s: bdw dcr: %d windows: %d\n", __func__,
			bdw->region_index, bdw->windows);
	return true;
}

726 727 728 729 730 731 732
static size_t sizeof_idt(struct acpi_nfit_interleave *idt)
{
	if (idt->header.length < sizeof(*idt))
		return 0;
	return sizeof(*idt) + sizeof(u32) * (idt->line_count - 1);
}

733
static bool add_idt(struct acpi_nfit_desc *acpi_desc,
V
Vishal Verma 已提交
734
		struct nfit_table_prev *prev,
735 736 737
		struct acpi_nfit_interleave *idt)
{
	struct device *dev = acpi_desc->dev;
V
Vishal Verma 已提交
738 739
	struct nfit_idt *nfit_idt;

740 741 742 743 744 745 746 747
	if (!sizeof_idt(idt))
		return false;

	list_for_each_entry(nfit_idt, &prev->idts, list) {
		if (sizeof_idt(nfit_idt->idt) != sizeof_idt(idt))
			continue;

		if (memcmp(nfit_idt->idt, idt, sizeof_idt(idt)) == 0) {
V
Vishal Verma 已提交
748 749 750
			list_move_tail(&nfit_idt->list, &acpi_desc->idts);
			return true;
		}
751
	}
752

753 754
	nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt) + sizeof_idt(idt),
			GFP_KERNEL);
755 756 757
	if (!nfit_idt)
		return false;
	INIT_LIST_HEAD(&nfit_idt->list);
758
	memcpy(nfit_idt->idt, idt, sizeof_idt(idt));
759 760 761 762 763 764
	list_add_tail(&nfit_idt->list, &acpi_desc->idts);
	dev_dbg(dev, "%s: idt index: %d num_lines: %d\n", __func__,
			idt->interleave_index, idt->line_count);
	return true;
}

765 766 767 768 769 770 771
static size_t sizeof_flush(struct acpi_nfit_flush_address *flush)
{
	if (flush->header.length < sizeof(*flush))
		return 0;
	return sizeof(*flush) + sizeof(u64) * (flush->hint_count - 1);
}

772
static bool add_flush(struct acpi_nfit_desc *acpi_desc,
V
Vishal Verma 已提交
773
		struct nfit_table_prev *prev,
774 775 776
		struct acpi_nfit_flush_address *flush)
{
	struct device *dev = acpi_desc->dev;
V
Vishal Verma 已提交
777
	struct nfit_flush *nfit_flush;
778

779 780 781 782 783 784 785 786 787
	if (!sizeof_flush(flush))
		return false;

	list_for_each_entry(nfit_flush, &prev->flushes, list) {
		if (sizeof_flush(nfit_flush->flush) != sizeof_flush(flush))
			continue;

		if (memcmp(nfit_flush->flush, flush,
					sizeof_flush(flush)) == 0) {
V
Vishal Verma 已提交
788 789 790
			list_move_tail(&nfit_flush->list, &acpi_desc->flushes);
			return true;
		}
791
	}
V
Vishal Verma 已提交
792

793 794
	nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush)
			+ sizeof_flush(flush), GFP_KERNEL);
795 796 797
	if (!nfit_flush)
		return false;
	INIT_LIST_HEAD(&nfit_flush->list);
798
	memcpy(nfit_flush->flush, flush, sizeof_flush(flush));
799 800 801 802 803 804
	list_add_tail(&nfit_flush->list, &acpi_desc->flushes);
	dev_dbg(dev, "%s: nfit_flush handle: %d hint_count: %d\n", __func__,
			flush->device_handle, flush->hint_count);
	return true;
}

V
Vishal Verma 已提交
805 806
static void *add_table(struct acpi_nfit_desc *acpi_desc,
		struct nfit_table_prev *prev, void *table, const void *end)
807 808 809 810 811 812 813 814 815
{
	struct device *dev = acpi_desc->dev;
	struct acpi_nfit_header *hdr;
	void *err = ERR_PTR(-ENOMEM);

	if (table >= end)
		return NULL;

	hdr = table;
816 817 818 819 820 821
	if (!hdr->length) {
		dev_warn(dev, "found a zero length table '%d' parsing nfit\n",
			hdr->type);
		return NULL;
	}

822 823
	switch (hdr->type) {
	case ACPI_NFIT_TYPE_SYSTEM_ADDRESS:
V
Vishal Verma 已提交
824
		if (!add_spa(acpi_desc, prev, table))
825 826 827
			return err;
		break;
	case ACPI_NFIT_TYPE_MEMORY_MAP:
V
Vishal Verma 已提交
828
		if (!add_memdev(acpi_desc, prev, table))
829 830 831
			return err;
		break;
	case ACPI_NFIT_TYPE_CONTROL_REGION:
V
Vishal Verma 已提交
832
		if (!add_dcr(acpi_desc, prev, table))
833 834 835
			return err;
		break;
	case ACPI_NFIT_TYPE_DATA_REGION:
V
Vishal Verma 已提交
836
		if (!add_bdw(acpi_desc, prev, table))
837 838 839
			return err;
		break;
	case ACPI_NFIT_TYPE_INTERLEAVE:
V
Vishal Verma 已提交
840
		if (!add_idt(acpi_desc, prev, table))
841
			return err;
842 843
		break;
	case ACPI_NFIT_TYPE_FLUSH_ADDRESS:
V
Vishal Verma 已提交
844
		if (!add_flush(acpi_desc, prev, table))
845
			return err;
846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890
		break;
	case ACPI_NFIT_TYPE_SMBIOS:
		dev_dbg(dev, "%s: smbios\n", __func__);
		break;
	default:
		dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type);
		break;
	}

	return table + hdr->length;
}

static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
		struct nfit_mem *nfit_mem)
{
	u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
	u16 dcr = nfit_mem->dcr->region_index;
	struct nfit_spa *nfit_spa;

	list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
		u16 range_index = nfit_spa->spa->range_index;
		int type = nfit_spa_type(nfit_spa->spa);
		struct nfit_memdev *nfit_memdev;

		if (type != NFIT_SPA_BDW)
			continue;

		list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
			if (nfit_memdev->memdev->range_index != range_index)
				continue;
			if (nfit_memdev->memdev->device_handle != device_handle)
				continue;
			if (nfit_memdev->memdev->region_index != dcr)
				continue;

			nfit_mem->spa_bdw = nfit_spa->spa;
			return;
		}
	}

	dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n",
			nfit_mem->spa_dcr->range_index);
	nfit_mem->bdw = NULL;
}

891
static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
892 893 894
		struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
{
	u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
895
	struct nfit_memdev *nfit_memdev;
896
	struct nfit_bdw *nfit_bdw;
897 898
	struct nfit_idt *nfit_idt;
	u16 idt_idx, range_index;
899 900 901 902 903 904 905 906 907

	list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
		if (nfit_bdw->bdw->region_index != dcr)
			continue;
		nfit_mem->bdw = nfit_bdw->bdw;
		break;
	}

	if (!nfit_mem->bdw)
908
		return;
909 910

	nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
911 912

	if (!nfit_mem->spa_bdw)
913
		return;
914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929

	range_index = nfit_mem->spa_bdw->range_index;
	list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
		if (nfit_memdev->memdev->range_index != range_index ||
				nfit_memdev->memdev->region_index != dcr)
			continue;
		nfit_mem->memdev_bdw = nfit_memdev->memdev;
		idt_idx = nfit_memdev->memdev->interleave_index;
		list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
			if (nfit_idt->idt->interleave_index != idt_idx)
				continue;
			nfit_mem->idt_bdw = nfit_idt->idt;
			break;
		}
		break;
	}
930 931
}

932
static int __nfit_mem_init(struct acpi_nfit_desc *acpi_desc,
933 934 935 936
		struct acpi_nfit_system_address *spa)
{
	struct nfit_mem *nfit_mem, *found;
	struct nfit_memdev *nfit_memdev;
937
	int type = spa ? nfit_spa_type(spa) : 0;
938 939 940 941 942 943

	switch (type) {
	case NFIT_SPA_DCR:
	case NFIT_SPA_PM:
		break;
	default:
944 945
		if (spa)
			return 0;
946 947
	}

948 949 950 951 952 953 954
	/*
	 * This loop runs in two modes, when a dimm is mapped the loop
	 * adds memdev associations to an existing dimm, or creates a
	 * dimm. In the unmapped dimm case this loop sweeps for memdev
	 * instances with an invalid / zero range_index and adds those
	 * dimms without spa associations.
	 */
955
	list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
956
		struct nfit_flush *nfit_flush;
957 958 959
		struct nfit_dcr *nfit_dcr;
		u32 device_handle;
		u16 dcr;
960

961 962 963
		if (spa && nfit_memdev->memdev->range_index != spa->range_index)
			continue;
		if (!spa && nfit_memdev->memdev->range_index)
964 965 966
			continue;
		found = NULL;
		dcr = nfit_memdev->memdev->region_index;
967
		device_handle = nfit_memdev->memdev->device_handle;
968
		list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
969 970
			if (__to_nfit_memdev(nfit_mem)->device_handle
					== device_handle) {
971 972 973 974 975 976 977 978 979 980 981 982
				found = nfit_mem;
				break;
			}

		if (found)
			nfit_mem = found;
		else {
			nfit_mem = devm_kzalloc(acpi_desc->dev,
					sizeof(*nfit_mem), GFP_KERNEL);
			if (!nfit_mem)
				return -ENOMEM;
			INIT_LIST_HEAD(&nfit_mem->list);
983
			nfit_mem->acpi_desc = acpi_desc;
984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004
			list_add(&nfit_mem->list, &acpi_desc->dimms);
		}

		list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
			if (nfit_dcr->dcr->region_index != dcr)
				continue;
			/*
			 * Record the control region for the dimm.  For
			 * the ACPI 6.1 case, where there are separate
			 * control regions for the pmem vs blk
			 * interfaces, be sure to record the extended
			 * blk details.
			 */
			if (!nfit_mem->dcr)
				nfit_mem->dcr = nfit_dcr->dcr;
			else if (nfit_mem->dcr->windows == 0
					&& nfit_dcr->dcr->windows)
				nfit_mem->dcr = nfit_dcr->dcr;
			break;
		}

1005
		list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) {
1006 1007 1008
			struct acpi_nfit_flush_address *flush;
			u16 i;

1009 1010 1011
			if (nfit_flush->flush->device_handle != device_handle)
				continue;
			nfit_mem->nfit_flush = nfit_flush;
1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023
			flush = nfit_flush->flush;
			nfit_mem->flush_wpq = devm_kzalloc(acpi_desc->dev,
					flush->hint_count
					* sizeof(struct resource), GFP_KERNEL);
			if (!nfit_mem->flush_wpq)
				return -ENOMEM;
			for (i = 0; i < flush->hint_count; i++) {
				struct resource *res = &nfit_mem->flush_wpq[i];

				res->start = flush->hint_address[i];
				res->end = res->start + 8 - 1;
			}
1024 1025 1026
			break;
		}

1027 1028 1029 1030
		if (dcr && !nfit_mem->dcr) {
			dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
					spa->range_index, dcr);
			return -ENODEV;
1031 1032 1033
		}

		if (type == NFIT_SPA_DCR) {
1034 1035 1036
			struct nfit_idt *nfit_idt;
			u16 idt_idx;

1037 1038 1039
			/* multiple dimms may share a SPA when interleaved */
			nfit_mem->spa_dcr = spa;
			nfit_mem->memdev_dcr = nfit_memdev->memdev;
1040 1041 1042 1043 1044 1045 1046
			idt_idx = nfit_memdev->memdev->interleave_index;
			list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
				if (nfit_idt->idt->interleave_index != idt_idx)
					continue;
				nfit_mem->idt_dcr = nfit_idt->idt;
				break;
			}
1047
			nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
1048
		} else if (type == NFIT_SPA_PM) {
1049 1050 1051 1052 1053 1054
			/*
			 * A single dimm may belong to multiple SPA-PM
			 * ranges, record at least one in addition to
			 * any SPA-DCR range.
			 */
			nfit_mem->memdev_pmem = nfit_memdev->memdev;
1055 1056
		} else
			nfit_mem->memdev_dcr = nfit_memdev->memdev;
1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079
	}

	return 0;
}

static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b)
{
	struct nfit_mem *a = container_of(_a, typeof(*a), list);
	struct nfit_mem *b = container_of(_b, typeof(*b), list);
	u32 handleA, handleB;

	handleA = __to_nfit_memdev(a)->device_handle;
	handleB = __to_nfit_memdev(b)->device_handle;
	if (handleA < handleB)
		return -1;
	else if (handleA > handleB)
		return 1;
	return 0;
}

static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc)
{
	struct nfit_spa *nfit_spa;
1080 1081
	int rc;

1082 1083 1084 1085 1086 1087 1088 1089 1090 1091

	/*
	 * For each SPA-DCR or SPA-PMEM address range find its
	 * corresponding MEMDEV(s).  From each MEMDEV find the
	 * corresponding DCR.  Then, if we're operating on a SPA-DCR,
	 * try to find a SPA-BDW and a corresponding BDW that references
	 * the DCR.  Throw it all into an nfit_mem object.  Note, that
	 * BDWs are optional.
	 */
	list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
1092
		rc = __nfit_mem_init(acpi_desc, nfit_spa->spa);
1093 1094 1095 1096
		if (rc)
			return rc;
	}

1097 1098 1099 1100 1101 1102 1103 1104 1105
	/*
	 * If a DIMM has failed to be mapped into SPA there will be no
	 * SPA entries above. Find and register all the unmapped DIMMs
	 * for reporting and recovery purposes.
	 */
	rc = __nfit_mem_init(acpi_desc, NULL);
	if (rc)
		return rc;

1106 1107 1108 1109 1110
	list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp);

	return 0;
}

1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121
static ssize_t bus_dsm_mask_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
	struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);

	return sprintf(buf, "%#lx\n", nd_desc->bus_dsm_mask);
}
static struct device_attribute dev_attr_bus_dsm_mask =
		__ATTR(dsm_mask, 0444, bus_dsm_mask_show, NULL);

1122 1123 1124 1125 1126 1127 1128
static ssize_t revision_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
	struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
	struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);

1129
	return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision);
1130 1131 1132
}
static DEVICE_ATTR_RO(revision);

1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184
static ssize_t hw_error_scrub_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
	struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
	struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);

	return sprintf(buf, "%d\n", acpi_desc->scrub_mode);
}

/*
 * The 'hw_error_scrub' attribute can have the following values written to it:
 * '0': Switch to the default mode where an exception will only insert
 *      the address of the memory error into the poison and badblocks lists.
 * '1': Enable a full scrub to happen if an exception for a memory error is
 *      received.
 */
static ssize_t hw_error_scrub_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t size)
{
	struct nvdimm_bus_descriptor *nd_desc;
	ssize_t rc;
	long val;

	rc = kstrtol(buf, 0, &val);
	if (rc)
		return rc;

	device_lock(dev);
	nd_desc = dev_get_drvdata(dev);
	if (nd_desc) {
		struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);

		switch (val) {
		case HW_ERROR_SCRUB_ON:
			acpi_desc->scrub_mode = HW_ERROR_SCRUB_ON;
			break;
		case HW_ERROR_SCRUB_OFF:
			acpi_desc->scrub_mode = HW_ERROR_SCRUB_OFF;
			break;
		default:
			rc = -EINVAL;
			break;
		}
	}
	device_unlock(dev);
	if (rc)
		return rc;
	return size;
}
static DEVICE_ATTR_RW(hw_error_scrub);

1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225
/*
 * This shows the number of full Address Range Scrubs that have been
 * completed since driver load time. Userspace can wait on this using
 * select/poll etc. A '+' at the end indicates an ARS is in progress
 */
static ssize_t scrub_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nvdimm_bus_descriptor *nd_desc;
	ssize_t rc = -ENXIO;

	device_lock(dev);
	nd_desc = dev_get_drvdata(dev);
	if (nd_desc) {
		struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);

		rc = sprintf(buf, "%d%s", acpi_desc->scrub_count,
				(work_busy(&acpi_desc->work)) ? "+\n" : "\n");
	}
	device_unlock(dev);
	return rc;
}

static ssize_t scrub_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t size)
{
	struct nvdimm_bus_descriptor *nd_desc;
	ssize_t rc;
	long val;

	rc = kstrtol(buf, 0, &val);
	if (rc)
		return rc;
	if (val != 1)
		return -EINVAL;

	device_lock(dev);
	nd_desc = dev_get_drvdata(dev);
	if (nd_desc) {
		struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);

1226
		rc = acpi_nfit_ars_rescan(acpi_desc, 0);
1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253
	}
	device_unlock(dev);
	if (rc)
		return rc;
	return size;
}
static DEVICE_ATTR_RW(scrub);

static bool ars_supported(struct nvdimm_bus *nvdimm_bus)
{
	struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
	const unsigned long mask = 1 << ND_CMD_ARS_CAP | 1 << ND_CMD_ARS_START
		| 1 << ND_CMD_ARS_STATUS;

	return (nd_desc->cmd_mask & mask) == mask;
}

static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n)
{
	struct device *dev = container_of(kobj, struct device, kobj);
	struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);

	if (a == &dev_attr_scrub.attr && !ars_supported(nvdimm_bus))
		return 0;
	return a->mode;
}

1254 1255
static struct attribute *acpi_nfit_attributes[] = {
	&dev_attr_revision.attr,
1256
	&dev_attr_scrub.attr,
1257
	&dev_attr_hw_error_scrub.attr,
1258
	&dev_attr_bus_dsm_mask.attr,
1259 1260 1261
	NULL,
};

1262
static const struct attribute_group acpi_nfit_attribute_group = {
1263 1264
	.name = "nfit",
	.attrs = acpi_nfit_attributes,
1265
	.is_visible = nfit_visible,
1266 1267
};

1268
static const struct attribute_group *acpi_nfit_attribute_groups[] = {
1269 1270 1271 1272 1273
	&nvdimm_bus_attribute_group,
	&acpi_nfit_attribute_group,
	NULL,
};

1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312
static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev)
{
	struct nvdimm *nvdimm = to_nvdimm(dev);
	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);

	return __to_nfit_memdev(nfit_mem);
}

static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev)
{
	struct nvdimm *nvdimm = to_nvdimm(dev);
	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);

	return nfit_mem->dcr;
}

static ssize_t handle_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);

	return sprintf(buf, "%#x\n", memdev->device_handle);
}
static DEVICE_ATTR_RO(handle);

static ssize_t phys_id_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);

	return sprintf(buf, "%#x\n", memdev->physical_id);
}
static DEVICE_ATTR_RO(phys_id);

static ssize_t vendor_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);

1313
	return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id));
1314 1315 1316 1317 1318 1319 1320 1321
}
static DEVICE_ATTR_RO(vendor);

static ssize_t rev_id_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);

1322
	return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id));
1323 1324 1325 1326 1327 1328 1329 1330
}
static DEVICE_ATTR_RO(rev_id);

static ssize_t device_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);

1331
	return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->device_id));
1332 1333 1334
}
static DEVICE_ATTR_RO(device);

1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362
static ssize_t subsystem_vendor_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);

	return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id));
}
static DEVICE_ATTR_RO(subsystem_vendor);

static ssize_t subsystem_rev_id_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);

	return sprintf(buf, "0x%04x\n",
			be16_to_cpu(dcr->subsystem_revision_id));
}
static DEVICE_ATTR_RO(subsystem_rev_id);

static ssize_t subsystem_device_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);

	return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id));
}
static DEVICE_ATTR_RO(subsystem_device);

1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374
static int num_nvdimm_formats(struct nvdimm *nvdimm)
{
	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
	int formats = 0;

	if (nfit_mem->memdev_pmem)
		formats++;
	if (nfit_mem->memdev_bdw)
		formats++;
	return formats;
}

1375 1376 1377 1378 1379
static ssize_t format_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);

1380
	return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code));
1381 1382 1383
}
static DEVICE_ATTR_RO(format);

1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412
static ssize_t format1_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	u32 handle;
	ssize_t rc = -ENXIO;
	struct nfit_mem *nfit_mem;
	struct nfit_memdev *nfit_memdev;
	struct acpi_nfit_desc *acpi_desc;
	struct nvdimm *nvdimm = to_nvdimm(dev);
	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);

	nfit_mem = nvdimm_provider_data(nvdimm);
	acpi_desc = nfit_mem->acpi_desc;
	handle = to_nfit_memdev(dev)->device_handle;

	/* assumes DIMMs have at most 2 published interface codes */
	mutex_lock(&acpi_desc->init_mutex);
	list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
		struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
		struct nfit_dcr *nfit_dcr;

		if (memdev->device_handle != handle)
			continue;

		list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
			if (nfit_dcr->dcr->region_index != memdev->region_index)
				continue;
			if (nfit_dcr->dcr->code == dcr->code)
				continue;
1413 1414
			rc = sprintf(buf, "0x%04x\n",
					le16_to_cpu(nfit_dcr->dcr->code));
1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433
			break;
		}
		if (rc != ENXIO)
			break;
	}
	mutex_unlock(&acpi_desc->init_mutex);
	return rc;
}
static DEVICE_ATTR_RO(format1);

static ssize_t formats_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nvdimm *nvdimm = to_nvdimm(dev);

	return sprintf(buf, "%d\n", num_nvdimm_formats(nvdimm));
}
static DEVICE_ATTR_RO(formats);

1434 1435 1436 1437 1438
static ssize_t serial_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);

1439
	return sprintf(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number));
1440 1441 1442
}
static DEVICE_ATTR_RO(serial);

1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466
static ssize_t family_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nvdimm *nvdimm = to_nvdimm(dev);
	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);

	if (nfit_mem->family < 0)
		return -ENXIO;
	return sprintf(buf, "%d\n", nfit_mem->family);
}
static DEVICE_ATTR_RO(family);

static ssize_t dsm_mask_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nvdimm *nvdimm = to_nvdimm(dev);
	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);

	if (nfit_mem->family < 0)
		return -ENXIO;
	return sprintf(buf, "%#lx\n", nfit_mem->dsm_mask);
}
static DEVICE_ATTR_RO(dsm_mask);

1467 1468 1469 1470 1471
static ssize_t flags_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	u16 flags = to_nfit_memdev(dev)->flags;

1472
	return sprintf(buf, "%s%s%s%s%s%s%s\n",
1473 1474 1475
		flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "",
		flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "",
		flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "",
1476
		flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "",
1477 1478 1479
		flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "",
		flags & ACPI_NFIT_MEM_MAP_FAILED ? "map_fail " : "",
		flags & ACPI_NFIT_MEM_HEALTH_ENABLED ? "smart_notify " : "");
1480 1481 1482
}
static DEVICE_ATTR_RO(flags);

1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500
static ssize_t id_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);

	if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID)
		return sprintf(buf, "%04x-%02x-%04x-%08x\n",
				be16_to_cpu(dcr->vendor_id),
				dcr->manufacturing_location,
				be16_to_cpu(dcr->manufacturing_date),
				be32_to_cpu(dcr->serial_number));
	else
		return sprintf(buf, "%04x-%08x\n",
				be16_to_cpu(dcr->vendor_id),
				be32_to_cpu(dcr->serial_number));
}
static DEVICE_ATTR_RO(id);

1501 1502 1503 1504 1505
static struct attribute *acpi_nfit_dimm_attributes[] = {
	&dev_attr_handle.attr,
	&dev_attr_phys_id.attr,
	&dev_attr_vendor.attr,
	&dev_attr_device.attr,
1506 1507 1508 1509
	&dev_attr_rev_id.attr,
	&dev_attr_subsystem_vendor.attr,
	&dev_attr_subsystem_device.attr,
	&dev_attr_subsystem_rev_id.attr,
1510
	&dev_attr_format.attr,
1511 1512
	&dev_attr_formats.attr,
	&dev_attr_format1.attr,
1513
	&dev_attr_serial.attr,
1514
	&dev_attr_flags.attr,
1515
	&dev_attr_id.attr,
1516 1517
	&dev_attr_family.attr,
	&dev_attr_dsm_mask.attr,
1518 1519 1520 1521 1522 1523 1524
	NULL,
};

static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj,
		struct attribute *a, int n)
{
	struct device *dev = container_of(kobj, struct device, kobj);
1525
	struct nvdimm *nvdimm = to_nvdimm(dev);
1526

1527 1528 1529 1530 1531 1532 1533
	if (!to_nfit_dcr(dev)) {
		/* Without a dcr only the memdev attributes can be surfaced */
		if (a == &dev_attr_handle.attr || a == &dev_attr_phys_id.attr
				|| a == &dev_attr_flags.attr
				|| a == &dev_attr_family.attr
				|| a == &dev_attr_dsm_mask.attr)
			return a->mode;
1534
		return 0;
1535 1536
	}

1537
	if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1)
1538
		return 0;
1539
	return a->mode;
1540 1541
}

1542
static const struct attribute_group acpi_nfit_dimm_attribute_group = {
1543 1544 1545 1546 1547 1548
	.name = "nfit",
	.attrs = acpi_nfit_dimm_attributes,
	.is_visible = acpi_nfit_dimm_attr_visible,
};

static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = {
1549
	&nvdimm_attribute_group,
1550
	&nd_device_attribute_group,
1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566
	&acpi_nfit_dimm_attribute_group,
	NULL,
};

static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc,
		u32 device_handle)
{
	struct nfit_mem *nfit_mem;

	list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
		if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle)
			return nfit_mem->nvdimm;

	return NULL;
}

1567
void __acpi_nvdimm_notify(struct device *dev, u32 event)
1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592
{
	struct nfit_mem *nfit_mem;
	struct acpi_nfit_desc *acpi_desc;

	dev_dbg(dev->parent, "%s: %s: event: %d\n", dev_name(dev), __func__,
			event);

	if (event != NFIT_NOTIFY_DIMM_HEALTH) {
		dev_dbg(dev->parent, "%s: unknown event: %d\n", dev_name(dev),
				event);
		return;
	}

	acpi_desc = dev_get_drvdata(dev->parent);
	if (!acpi_desc)
		return;

	/*
	 * If we successfully retrieved acpi_desc, then we know nfit_mem data
	 * is still valid.
	 */
	nfit_mem = dev_get_drvdata(dev);
	if (nfit_mem && nfit_mem->flags_attr)
		sysfs_notify_dirent(nfit_mem->flags_attr);
}
1593
EXPORT_SYMBOL_GPL(__acpi_nvdimm_notify);
1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604

static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data)
{
	struct acpi_device *adev = data;
	struct device *dev = &adev->dev;

	device_lock(dev->parent);
	__acpi_nvdimm_notify(dev, event);
	device_unlock(dev->parent);
}

1605 1606 1607 1608 1609
static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
		struct nfit_mem *nfit_mem, u32 device_handle)
{
	struct acpi_device *adev, *adev_dimm;
	struct device *dev = acpi_desc->dev;
1610
	union acpi_object *obj;
1611
	unsigned long dsm_mask;
1612
	const guid_t *guid;
1613
	int i;
1614
	int family = -1;
1615

1616 1617
	/* nfit test assumes 1:1 relationship between commands and dsms */
	nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en;
1618
	nfit_mem->family = NVDIMM_FAMILY_INTEL;
1619 1620 1621 1622 1623 1624 1625 1626 1627
	adev = to_acpi_dev(acpi_desc);
	if (!adev)
		return 0;

	adev_dimm = acpi_find_child_device(adev, device_handle, false);
	nfit_mem->adev = adev_dimm;
	if (!adev_dimm) {
		dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n",
				device_handle);
1628
		return force_enable_dimms ? 0 : -ENODEV;
1629 1630
	}

1631 1632 1633 1634 1635 1636 1637
	if (ACPI_FAILURE(acpi_install_notify_handler(adev_dimm->handle,
		ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify, adev_dimm))) {
		dev_err(dev, "%s: notification registration failed\n",
				dev_name(&adev_dimm->dev));
		return -ENXIO;
	}

1638
	/*
1639
	 * Until standardization materializes we need to consider 4
D
Dan Williams 已提交
1640
	 * different command sets.  Note, that checking for function0 (bit0)
1641
	 * tells us if any commands are reachable through this GUID.
1642
	 */
1643
	for (i = NVDIMM_FAMILY_INTEL; i <= NVDIMM_FAMILY_MSFT; i++)
D
Dan Williams 已提交
1644
		if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1))
1645 1646
			if (family < 0 || i == default_dsm_family)
				family = i;
1647 1648

	/* limit the supported commands to those that are publicly documented */
1649
	nfit_mem->family = family;
1650 1651 1652
	if (override_dsm_mask && !disable_vendor_specific)
		dsm_mask = override_dsm_mask;
	else if (nfit_mem->family == NVDIMM_FAMILY_INTEL) {
1653
		dsm_mask = 0x3fe;
1654 1655
		if (disable_vendor_specific)
			dsm_mask &= ~(1 << ND_CMD_VENDOR);
1656
	} else if (nfit_mem->family == NVDIMM_FAMILY_HPE1) {
1657
		dsm_mask = 0x1c3c76;
1658
	} else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) {
1659
		dsm_mask = 0x1fe;
1660 1661
		if (disable_vendor_specific)
			dsm_mask &= ~(1 << 8);
1662 1663
	} else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) {
		dsm_mask = 0xffffffff;
1664
	} else {
D
Dan Williams 已提交
1665
		dev_dbg(dev, "unknown dimm command family\n");
1666
		nfit_mem->family = -1;
D
Dan Williams 已提交
1667 1668
		/* DSMs are optional, continue loading the driver... */
		return 0;
1669 1670
	}

1671
	guid = to_nfit_uuid(nfit_mem->family);
1672
	for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
1673
		if (acpi_check_dsm(adev_dimm->handle, guid, 1, 1ULL << i))
1674 1675
			set_bit(i, &nfit_mem->dsm_mask);

1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696
	obj = acpi_label_info(adev_dimm->handle);
	if (obj) {
		ACPI_FREE(obj);
		nfit_mem->has_lsi = 1;
		dev_dbg(dev, "%s: has _LSI\n", dev_name(&adev_dimm->dev));
	}

	obj = acpi_label_read(adev_dimm->handle, 0, 0);
	if (obj) {
		ACPI_FREE(obj);
		nfit_mem->has_lsr = 1;
		dev_dbg(dev, "%s: has _LSR\n", dev_name(&adev_dimm->dev));
	}

	obj = acpi_label_write(adev_dimm->handle, 0, 0, NULL);
	if (obj) {
		ACPI_FREE(obj);
		nfit_mem->has_lsw = 1;
		dev_dbg(dev, "%s: has _LSW\n", dev_name(&adev_dimm->dev));
	}

1697
	return 0;
1698 1699
}

1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710
static void shutdown_dimm_notify(void *data)
{
	struct acpi_nfit_desc *acpi_desc = data;
	struct nfit_mem *nfit_mem;

	mutex_lock(&acpi_desc->init_mutex);
	/*
	 * Clear out the nfit_mem->flags_attr and shut down dimm event
	 * notifications.
	 */
	list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
1711 1712
		struct acpi_device *adev_dimm = nfit_mem->adev;

1713 1714 1715 1716
		if (nfit_mem->flags_attr) {
			sysfs_put(nfit_mem->flags_attr);
			nfit_mem->flags_attr = NULL;
		}
1717 1718 1719
		if (adev_dimm)
			acpi_remove_notify_handler(adev_dimm->handle,
					ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify);
1720 1721 1722 1723
	}
	mutex_unlock(&acpi_desc->init_mutex);
}

1724 1725 1726
static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
{
	struct nfit_mem *nfit_mem;
1727 1728
	int dimm_count = 0, rc;
	struct nvdimm *nvdimm;
1729 1730

	list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
1731
		struct acpi_nfit_flush_address *flush;
1732
		unsigned long flags = 0, cmd_mask;
1733
		struct nfit_memdev *nfit_memdev;
1734
		u32 device_handle;
1735
		u16 mem_flags;
1736 1737 1738 1739

		device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
		nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle);
		if (nvdimm) {
V
Vishal Verma 已提交
1740
			dimm_count++;
1741 1742 1743 1744
			continue;
		}

		if (nfit_mem->bdw && nfit_mem->memdev_pmem)
1745
			set_bit(NDD_ALIASING, &flags);
1746

1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757
		/* collate flags across all memdevs for this dimm */
		list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
			struct acpi_nfit_memory_map *dimm_memdev;

			dimm_memdev = __to_nfit_memdev(nfit_mem);
			if (dimm_memdev->device_handle
					!= nfit_memdev->memdev->device_handle)
				continue;
			dimm_memdev->flags |= nfit_memdev->memdev->flags;
		}

1758
		mem_flags = __to_nfit_memdev(nfit_mem)->flags;
1759
		if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED)
1760
			set_bit(NDD_UNARMED, &flags);
1761

1762 1763 1764 1765
		rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle);
		if (rc)
			continue;

1766
		/*
1767 1768 1769
		 * TODO: provide translation for non-NVDIMM_FAMILY_INTEL
		 * devices (i.e. from nd_cmd to acpi_dsm) to standardize the
		 * userspace interface.
1770
		 */
1771 1772 1773 1774
		cmd_mask = 1UL << ND_CMD_CALL;
		if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
			cmd_mask |= nfit_mem->dsm_mask;

1775 1776 1777 1778 1779 1780 1781
		if (nfit_mem->has_lsi)
			set_bit(ND_CMD_GET_CONFIG_SIZE, &cmd_mask);
		if (nfit_mem->has_lsr)
			set_bit(ND_CMD_GET_CONFIG_DATA, &cmd_mask);
		if (nfit_mem->has_lsw)
			set_bit(ND_CMD_SET_CONFIG_DATA, &cmd_mask);

1782 1783
		flush = nfit_mem->nfit_flush ? nfit_mem->nfit_flush->flush
			: NULL;
1784
		nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem,
1785
				acpi_nfit_dimm_attribute_groups,
1786 1787
				flags, cmd_mask, flush ? flush->hint_count : 0,
				nfit_mem->flush_wpq);
1788 1789 1790 1791
		if (!nvdimm)
			return -ENOMEM;

		nfit_mem->nvdimm = nvdimm;
1792
		dimm_count++;
1793 1794 1795 1796

		if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0)
			continue;

1797
		dev_info(acpi_desc->dev, "%s flags:%s%s%s%s%s\n",
1798
				nvdimm_name(nvdimm),
1799 1800 1801
		  mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "",
		  mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"",
		  mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "",
1802 1803
		  mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : "",
		  mem_flags & ACPI_NFIT_MEM_MAP_FAILED ? " map_fail" : "");
1804

1805 1806
	}

1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830
	rc = nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count);
	if (rc)
		return rc;

	/*
	 * Now that dimms are successfully registered, and async registration
	 * is flushed, attempt to enable event notification.
	 */
	list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
		struct kernfs_node *nfit_kernfs;

		nvdimm = nfit_mem->nvdimm;
		nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit");
		if (nfit_kernfs)
			nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs,
					"flags");
		sysfs_put(nfit_kernfs);
		if (!nfit_mem->flags_attr)
			dev_warn(acpi_desc->dev, "%s: notifications disabled\n",
					nvdimm_name(nvdimm));
	}

	return devm_add_action_or_reset(acpi_desc->dev, shutdown_dimm_notify,
			acpi_desc);
1831 1832
}

1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843
/*
 * These constants are private because there are no kernel consumers of
 * these commands.
 */
enum nfit_aux_cmds {
        NFIT_CMD_TRANSLATE_SPA = 5,
        NFIT_CMD_ARS_INJECT_SET = 7,
        NFIT_CMD_ARS_INJECT_CLEAR = 8,
        NFIT_CMD_ARS_INJECT_GET = 9,
};

1844 1845 1846
static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
{
	struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
1847
	const guid_t *guid = to_nfit_uuid(NFIT_DEV_BUS);
1848
	struct acpi_device *adev;
1849
	unsigned long dsm_mask;
1850 1851
	int i;

1852
	nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en;
1853
	nd_desc->bus_dsm_mask = acpi_desc->bus_nfit_cmd_force_en;
1854 1855 1856 1857
	adev = to_acpi_dev(acpi_desc);
	if (!adev)
		return;

1858
	for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++)
1859
		if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
1860
			set_bit(i, &nd_desc->cmd_mask);
1861
	set_bit(ND_CMD_CALL, &nd_desc->cmd_mask);
1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874

	dsm_mask =
		(1 << ND_CMD_ARS_CAP) |
		(1 << ND_CMD_ARS_START) |
		(1 << ND_CMD_ARS_STATUS) |
		(1 << ND_CMD_CLEAR_ERROR) |
		(1 << NFIT_CMD_TRANSLATE_SPA) |
		(1 << NFIT_CMD_ARS_INJECT_SET) |
		(1 << NFIT_CMD_ARS_INJECT_CLEAR) |
		(1 << NFIT_CMD_ARS_INJECT_GET);
	for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
		if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
			set_bit(i, &nd_desc->bus_dsm_mask);
1875 1876
}

1877 1878 1879 1880 1881 1882 1883 1884 1885 1886
static ssize_t range_index_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nd_region *nd_region = to_nd_region(dev);
	struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region);

	return sprintf(buf, "%d\n", nfit_spa->spa->range_index);
}
static DEVICE_ATTR_RO(range_index);

1887 1888 1889 1890 1891 1892 1893 1894 1895 1896
static ssize_t ecc_unit_size_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nd_region *nd_region = to_nd_region(dev);
	struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region);

	return sprintf(buf, "%d\n", nfit_spa->clear_err_unit);
}
static DEVICE_ATTR_RO(ecc_unit_size);

1897 1898
static struct attribute *acpi_nfit_region_attributes[] = {
	&dev_attr_range_index.attr,
1899
	&dev_attr_ecc_unit_size.attr,
1900 1901 1902
	NULL,
};

1903
static const struct attribute_group acpi_nfit_region_attribute_group = {
1904 1905 1906 1907 1908 1909 1910
	.name = "nfit",
	.attrs = acpi_nfit_region_attributes,
};

static const struct attribute_group *acpi_nfit_region_attribute_groups[] = {
	&nd_region_attribute_group,
	&nd_mapping_attribute_group,
1911
	&nd_device_attribute_group,
1912
	&nd_numa_attribute_group,
1913 1914 1915 1916
	&acpi_nfit_region_attribute_group,
	NULL,
};

1917 1918 1919 1920 1921 1922 1923 1924 1925
/* enough info to uniquely specify an interleave set */
struct nfit_set_info {
	struct nfit_set_info_map {
		u64 region_offset;
		u32 serial_number;
		u32 pad;
	} mapping[0];
};

1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936
struct nfit_set_info2 {
	struct nfit_set_info_map2 {
		u64 region_offset;
		u32 serial_number;
		u16 vendor_id;
		u16 manufacturing_date;
		u8  manufacturing_location;
		u8  reserved[31];
	} mapping[0];
};

1937 1938 1939 1940 1941 1942
static size_t sizeof_nfit_set_info(int num_mappings)
{
	return sizeof(struct nfit_set_info)
		+ num_mappings * sizeof(struct nfit_set_info_map);
}

1943 1944 1945 1946 1947 1948
static size_t sizeof_nfit_set_info2(int num_mappings)
{
	return sizeof(struct nfit_set_info2)
		+ num_mappings * sizeof(struct nfit_set_info_map2);
}

1949
static int cmp_map_compat(const void *m0, const void *m1)
1950 1951 1952 1953 1954 1955 1956 1957
{
	const struct nfit_set_info_map *map0 = m0;
	const struct nfit_set_info_map *map1 = m1;

	return memcmp(&map0->region_offset, &map1->region_offset,
			sizeof(u64));
}

1958 1959 1960 1961 1962
static int cmp_map(const void *m0, const void *m1)
{
	const struct nfit_set_info_map *map0 = m0;
	const struct nfit_set_info_map *map1 = m1;

1963 1964 1965 1966 1967
	if (map0->region_offset < map1->region_offset)
		return -1;
	else if (map0->region_offset > map1->region_offset)
		return 1;
	return 0;
1968 1969
}

1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981
static int cmp_map2(const void *m0, const void *m1)
{
	const struct nfit_set_info_map2 *map0 = m0;
	const struct nfit_set_info_map2 *map1 = m1;

	if (map0->region_offset < map1->region_offset)
		return -1;
	else if (map0->region_offset > map1->region_offset)
		return 1;
	return 0;
}

1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001
/* Retrieve the nth entry referencing this spa */
static struct acpi_nfit_memory_map *memdev_from_spa(
		struct acpi_nfit_desc *acpi_desc, u16 range_index, int n)
{
	struct nfit_memdev *nfit_memdev;

	list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list)
		if (nfit_memdev->memdev->range_index == range_index)
			if (n-- == 0)
				return nfit_memdev->memdev;
	return NULL;
}

static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
		struct nd_region_desc *ndr_desc,
		struct acpi_nfit_system_address *spa)
{
	struct device *dev = acpi_desc->dev;
	struct nd_interleave_set *nd_set;
	u16 nr = ndr_desc->num_mappings;
2002
	struct nfit_set_info2 *info2;
2003
	struct nfit_set_info *info;
2004
	int i;
2005

2006 2007 2008 2009 2010 2011
	nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
	if (!nd_set)
		return -ENOMEM;
	ndr_desc->nd_set = nd_set;
	guid_copy(&nd_set->type_guid, (guid_t *) spa->range_guid);

2012 2013 2014
	info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL);
	if (!info)
		return -ENOMEM;
2015 2016 2017 2018 2019

	info2 = devm_kzalloc(dev, sizeof_nfit_set_info2(nr), GFP_KERNEL);
	if (!info2)
		return -ENOMEM;

2020
	for (i = 0; i < nr; i++) {
2021
		struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
2022
		struct nfit_set_info_map *map = &info->mapping[i];
2023
		struct nfit_set_info_map2 *map2 = &info2->mapping[i];
2024
		struct nvdimm *nvdimm = mapping->nvdimm;
2025 2026 2027
		struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
		struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc,
				spa->range_index, i);
2028
		struct acpi_nfit_control_region *dcr = nfit_mem->dcr;
2029 2030 2031 2032 2033 2034 2035

		if (!memdev || !nfit_mem->dcr) {
			dev_err(dev, "%s: failed to find DCR\n", __func__);
			return -ENODEV;
		}

		map->region_offset = memdev->region_offset;
2036
		map->serial_number = dcr->serial_number;
2037 2038

		map2->region_offset = memdev->region_offset;
2039 2040 2041 2042
		map2->serial_number = dcr->serial_number;
		map2->vendor_id = dcr->vendor_id;
		map2->manufacturing_date = dcr->manufacturing_date;
		map2->manufacturing_location = dcr->manufacturing_location;
2043 2044
	}

2045
	/* v1.1 namespaces */
2046 2047
	sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
			cmp_map, NULL);
2048 2049 2050 2051 2052 2053
	nd_set->cookie1 = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);

	/* v1.2 namespaces */
	sort(&info2->mapping[0], nr, sizeof(struct nfit_set_info_map2),
			cmp_map2, NULL);
	nd_set->cookie2 = nd_fletcher64(info2, sizeof_nfit_set_info2(nr), 0);
2054

2055
	/* support v1.1 namespaces created with the wrong sort order */
2056 2057 2058 2059
	sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
			cmp_map_compat, NULL);
	nd_set->altcookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);

2060 2061 2062 2063 2064 2065 2066 2067 2068
	/* record the result of the sort for the mapping position */
	for (i = 0; i < nr; i++) {
		struct nfit_set_info_map2 *map2 = &info2->mapping[i];
		int j;

		for (j = 0; j < nr; j++) {
			struct nd_mapping_desc *mapping = &ndr_desc->mapping[j];
			struct nvdimm *nvdimm = mapping->nvdimm;
			struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
2069
			struct acpi_nfit_control_region *dcr = nfit_mem->dcr;
2070

2071 2072 2073
			if (map2->serial_number == dcr->serial_number &&
			    map2->vendor_id == dcr->vendor_id &&
			    map2->manufacturing_date == dcr->manufacturing_date &&
2074
			    map2->manufacturing_location
2075
				    == dcr->manufacturing_location) {
2076 2077 2078 2079 2080 2081
				mapping->position = i;
				break;
			}
		}
	}

2082 2083
	ndr_desc->nd_set = nd_set;
	devm_kfree(dev, info);
2084
	devm_kfree(dev, info2);
2085 2086 2087 2088

	return 0;
}

2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103
static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio)
{
	struct acpi_nfit_interleave *idt = mmio->idt;
	u32 sub_line_offset, line_index, line_offset;
	u64 line_no, table_skip_count, table_offset;

	line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset);
	table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index);
	line_offset = idt->line_offset[line_index]
		* mmio->line_size;
	table_offset = table_skip_count * mmio->table_size;

	return mmio->base_offset + line_offset + table_offset + sub_line_offset;
}

2104
static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
2105 2106 2107
{
	struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
	u64 offset = nfit_blk->stat_offset + mmio->size * bw;
2108
	const u32 STATUS_MASK = 0x80000037;
2109 2110 2111 2112

	if (mmio->num_lines)
		offset = to_interleave_offset(offset, mmio);

2113
	return readl(mmio->addr.base + offset) & STATUS_MASK;
2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137
}

static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
		resource_size_t dpa, unsigned int len, unsigned int write)
{
	u64 cmd, offset;
	struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];

	enum {
		BCW_OFFSET_MASK = (1ULL << 48)-1,
		BCW_LEN_SHIFT = 48,
		BCW_LEN_MASK = (1ULL << 8) - 1,
		BCW_CMD_SHIFT = 56,
	};

	cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK;
	len = len >> L1_CACHE_SHIFT;
	cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT;
	cmd |= ((u64) write) << BCW_CMD_SHIFT;

	offset = nfit_blk->cmd_offset + mmio->size * bw;
	if (mmio->num_lines)
		offset = to_interleave_offset(offset, mmio);

2138
	writeq(cmd, mmio->addr.base + offset);
2139
	nvdimm_flush(nfit_blk->nd_region);
2140

2141
	if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH)
2142
		readq(mmio->addr.base + offset);
2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173
}

static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
		resource_size_t dpa, void *iobuf, size_t len, int rw,
		unsigned int lane)
{
	struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
	unsigned int copied = 0;
	u64 base_offset;
	int rc;

	base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES
		+ lane * mmio->size;
	write_blk_ctl(nfit_blk, lane, dpa, len, rw);
	while (len) {
		unsigned int c;
		u64 offset;

		if (mmio->num_lines) {
			u32 line_offset;

			offset = to_interleave_offset(base_offset + copied,
					mmio);
			div_u64_rem(offset, mmio->line_size, &line_offset);
			c = min_t(size_t, len, mmio->line_size - line_offset);
		} else {
			offset = base_offset + nfit_blk->bdw_offset;
			c = len;
		}

		if (rw)
2174
			memcpy_flushcache(mmio->addr.aperture + offset, iobuf + copied, c);
2175
		else {
2176
			if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH)
2177
				arch_invalidate_pmem((void __force *)
2178 2179
					mmio->addr.aperture + offset, c);

2180
			memcpy(iobuf + copied, mmio->addr.aperture + offset, c);
2181
		}
2182 2183 2184 2185

		copied += c;
		len -= c;
	}
2186 2187

	if (rw)
2188
		nvdimm_flush(nfit_blk->nd_region);
2189

2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234
	rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0;
	return rc;
}

static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr,
		resource_size_t dpa, void *iobuf, u64 len, int rw)
{
	struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr);
	struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
	struct nd_region *nd_region = nfit_blk->nd_region;
	unsigned int lane, copied = 0;
	int rc = 0;

	lane = nd_region_acquire_lane(nd_region);
	while (len) {
		u64 c = min(len, mmio->size);

		rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied,
				iobuf + copied, c, rw, lane);
		if (rc)
			break;

		copied += c;
		len -= c;
	}
	nd_region_release_lane(nd_region, lane);

	return rc;
}

static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio,
		struct acpi_nfit_interleave *idt, u16 interleave_ways)
{
	if (idt) {
		mmio->num_lines = idt->line_count;
		mmio->line_size = idt->line_size;
		if (interleave_ways == 0)
			return -ENXIO;
		mmio->table_size = mmio->num_lines * interleave_ways
			* mmio->line_size;
	}

	return 0;
}

2235 2236 2237 2238 2239 2240 2241 2242
static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc,
		struct nvdimm *nvdimm, struct nfit_blk *nfit_blk)
{
	struct nd_cmd_dimm_flags flags;
	int rc;

	memset(&flags, 0, sizeof(flags));
	rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags,
2243
			sizeof(flags), NULL);
2244 2245 2246 2247 2248

	if (rc >= 0 && flags.status == 0)
		nfit_blk->dimm_flags = flags.flags;
	else if (rc == -ENOTTY) {
		/* fall back to a conservative default */
2249
		nfit_blk->dimm_flags = NFIT_BLK_DCR_LATCH | NFIT_BLK_READ_FLUSH;
2250 2251 2252 2253 2254 2255 2256
		rc = 0;
	} else
		rc = -ENXIO;

	return rc;
}

2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272
static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
		struct device *dev)
{
	struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
	struct nd_blk_region *ndbr = to_nd_blk_region(dev);
	struct nfit_blk_mmio *mmio;
	struct nfit_blk *nfit_blk;
	struct nfit_mem *nfit_mem;
	struct nvdimm *nvdimm;
	int rc;

	nvdimm = nd_blk_region_to_dimm(ndbr);
	nfit_mem = nvdimm_provider_data(nvdimm);
	if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) {
		dev_dbg(dev, "%s: missing%s%s%s\n", __func__,
				nfit_mem ? "" : " nfit_mem",
2273 2274
				(nfit_mem && nfit_mem->dcr) ? "" : " dcr",
				(nfit_mem && nfit_mem->bdw) ? "" : " bdw");
2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286
		return -ENXIO;
	}

	nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL);
	if (!nfit_blk)
		return -ENOMEM;
	nd_blk_region_set_provider_data(ndbr, nfit_blk);
	nfit_blk->nd_region = to_nd_region(dev);

	/* map block aperture memory */
	nfit_blk->bdw_offset = nfit_mem->bdw->offset;
	mmio = &nfit_blk->mmio[BDW];
2287
	mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address,
2288
                        nfit_mem->spa_bdw->length, nd_blk_memremap_flags(ndbr));
2289
	if (!mmio->addr.base) {
2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309
		dev_dbg(dev, "%s: %s failed to map bdw\n", __func__,
				nvdimm_name(nvdimm));
		return -ENOMEM;
	}
	mmio->size = nfit_mem->bdw->size;
	mmio->base_offset = nfit_mem->memdev_bdw->region_offset;
	mmio->idt = nfit_mem->idt_bdw;
	mmio->spa = nfit_mem->spa_bdw;
	rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw,
			nfit_mem->memdev_bdw->interleave_ways);
	if (rc) {
		dev_dbg(dev, "%s: %s failed to init bdw interleave\n",
				__func__, nvdimm_name(nvdimm));
		return rc;
	}

	/* map block control memory */
	nfit_blk->cmd_offset = nfit_mem->dcr->command_offset;
	nfit_blk->stat_offset = nfit_mem->dcr->status_offset;
	mmio = &nfit_blk->mmio[DCR];
2310 2311
	mmio->addr.base = devm_nvdimm_ioremap(dev, nfit_mem->spa_dcr->address,
			nfit_mem->spa_dcr->length);
2312
	if (!mmio->addr.base) {
2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328
		dev_dbg(dev, "%s: %s failed to map dcr\n", __func__,
				nvdimm_name(nvdimm));
		return -ENOMEM;
	}
	mmio->size = nfit_mem->dcr->window_size;
	mmio->base_offset = nfit_mem->memdev_dcr->region_offset;
	mmio->idt = nfit_mem->idt_dcr;
	mmio->spa = nfit_mem->spa_dcr;
	rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr,
			nfit_mem->memdev_dcr->interleave_ways);
	if (rc) {
		dev_dbg(dev, "%s: %s failed to init dcr interleave\n",
				__func__, nvdimm_name(nvdimm));
		return rc;
	}

2329 2330 2331 2332 2333 2334 2335
	rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk);
	if (rc < 0) {
		dev_dbg(dev, "%s: %s failed get DIMM flags\n",
				__func__, nvdimm_name(nvdimm));
		return rc;
	}

2336
	if (nvdimm_has_flush(nfit_blk->nd_region) < 0)
2337 2338
		dev_warn(dev, "unable to guarantee persistence of writes\n");

2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354
	if (mmio->line_size == 0)
		return 0;

	if ((u32) nfit_blk->cmd_offset % mmio->line_size
			+ 8 > mmio->line_size) {
		dev_dbg(dev, "cmd_offset crosses interleave boundary\n");
		return -ENXIO;
	} else if ((u32) nfit_blk->stat_offset % mmio->line_size
			+ 8 > mmio->line_size) {
		dev_dbg(dev, "stat_offset crosses interleave boundary\n");
		return -ENXIO;
	}

	return 0;
}

2355
static int ars_get_cap(struct acpi_nfit_desc *acpi_desc,
2356
		struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa)
2357
{
2358
	struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2359
	struct acpi_nfit_system_address *spa = nfit_spa->spa;
2360 2361
	int cmd_rc, rc;

2362 2363
	cmd->address = spa->address;
	cmd->length = spa->length;
2364 2365 2366 2367
	rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd,
			sizeof(*cmd), &cmd_rc);
	if (rc < 0)
		return rc;
2368
	return cmd_rc;
2369 2370
}

2371
static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa)
2372 2373
{
	int rc;
2374 2375 2376 2377
	int cmd_rc;
	struct nd_cmd_ars_start ars_start;
	struct acpi_nfit_system_address *spa = nfit_spa->spa;
	struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2378

2379 2380 2381
	memset(&ars_start, 0, sizeof(ars_start));
	ars_start.address = spa->address;
	ars_start.length = spa->length;
2382
	ars_start.flags = acpi_desc->ars_start_flags;
2383 2384 2385 2386 2387 2388
	if (nfit_spa_type(spa) == NFIT_SPA_PM)
		ars_start.type = ND_ARS_PERSISTENT;
	else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE)
		ars_start.type = ND_ARS_VOLATILE;
	else
		return -ENOTTY;
2389

2390 2391
	rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
			sizeof(ars_start), &cmd_rc);
2392

2393 2394 2395
	if (rc < 0)
		return rc;
	return cmd_rc;
2396 2397
}

2398
static int ars_continue(struct acpi_nfit_desc *acpi_desc)
2399
{
2400
	int rc, cmd_rc;
2401 2402 2403 2404 2405 2406 2407 2408
	struct nd_cmd_ars_start ars_start;
	struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
	struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;

	memset(&ars_start, 0, sizeof(ars_start));
	ars_start.address = ars_status->restart_address;
	ars_start.length = ars_status->restart_length;
	ars_start.type = ars_status->type;
2409
	ars_start.flags = acpi_desc->ars_start_flags;
2410 2411 2412 2413 2414 2415
	rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
			sizeof(ars_start), &cmd_rc);
	if (rc < 0)
		return rc;
	return cmd_rc;
}
2416

2417 2418 2419 2420 2421
static int ars_get_status(struct acpi_nfit_desc *acpi_desc)
{
	struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
	struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
	int rc, cmd_rc;
2422

2423 2424 2425 2426 2427
	rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status,
			acpi_desc->ars_status_size, &cmd_rc);
	if (rc < 0)
		return rc;
	return cmd_rc;
2428 2429
}

2430
static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc,
2431
		struct nd_cmd_ars_status *ars_status)
2432
{
2433
	struct nvdimm_bus *nvdimm_bus = acpi_desc->nvdimm_bus;
2434 2435 2436
	int rc;
	u32 i;

2437 2438 2439 2440 2441 2442
	/*
	 * First record starts at 44 byte offset from the start of the
	 * payload.
	 */
	if (ars_status->out_length < 44)
		return 0;
2443
	for (i = 0; i < ars_status->num_records; i++) {
2444 2445 2446 2447
		/* only process full records */
		if (ars_status->out_length
				< 44 + sizeof(struct nd_ars_record) * (i + 1))
			break;
2448 2449 2450 2451 2452 2453
		rc = nvdimm_bus_add_poison(nvdimm_bus,
				ars_status->records[i].err_address,
				ars_status->records[i].length);
		if (rc)
			return rc;
	}
2454 2455
	if (i < ars_status->num_records)
		dev_warn(acpi_desc->dev, "detected truncated ars results\n");
2456 2457 2458 2459

	return 0;
}

2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492
static void acpi_nfit_remove_resource(void *data)
{
	struct resource *res = data;

	remove_resource(res);
}

static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc,
		struct nd_region_desc *ndr_desc)
{
	struct resource *res, *nd_res = ndr_desc->res;
	int is_pmem, ret;

	/* No operation if the region is already registered as PMEM */
	is_pmem = region_intersects(nd_res->start, resource_size(nd_res),
				IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY);
	if (is_pmem == REGION_INTERSECTS)
		return 0;

	res = devm_kzalloc(acpi_desc->dev, sizeof(*res), GFP_KERNEL);
	if (!res)
		return -ENOMEM;

	res->name = "Persistent Memory";
	res->start = nd_res->start;
	res->end = nd_res->end;
	res->flags = IORESOURCE_MEM;
	res->desc = IORES_DESC_PERSISTENT_MEMORY;

	ret = insert_resource(&iomem_resource, res);
	if (ret)
		return ret;

2493 2494 2495 2496
	ret = devm_add_action_or_reset(acpi_desc->dev,
					acpi_nfit_remove_resource,
					res);
	if (ret)
2497 2498 2499 2500 2501
		return ret;

	return 0;
}

2502
static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
2503
		struct nd_mapping_desc *mapping, struct nd_region_desc *ndr_desc,
2504
		struct acpi_nfit_memory_map *memdev,
2505
		struct nfit_spa *nfit_spa)
2506 2507 2508
{
	struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc,
			memdev->device_handle);
2509
	struct acpi_nfit_system_address *spa = nfit_spa->spa;
2510
	struct nd_blk_region_desc *ndbr_desc;
2511
	struct nfit_mem *nfit_mem;
2512
	int blk_valid = 0, rc;
2513 2514 2515 2516 2517 2518 2519

	if (!nvdimm) {
		dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n",
				spa->range_index, memdev->device_handle);
		return -ENODEV;
	}

2520
	mapping->nvdimm = nvdimm;
2521 2522 2523
	switch (nfit_spa_type(spa)) {
	case NFIT_SPA_PM:
	case NFIT_SPA_VOLATILE:
2524 2525
		mapping->start = memdev->address;
		mapping->size = memdev->region_size;
2526 2527 2528 2529 2530 2531 2532
		break;
	case NFIT_SPA_DCR:
		nfit_mem = nvdimm_provider_data(nvdimm);
		if (!nfit_mem || !nfit_mem->bdw) {
			dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n",
					spa->range_index, nvdimm_name(nvdimm));
		} else {
2533 2534
			mapping->size = nfit_mem->bdw->capacity;
			mapping->start = nfit_mem->bdw->start_address;
V
Vishal Verma 已提交
2535
			ndr_desc->num_lanes = nfit_mem->bdw->windows;
2536 2537 2538
			blk_valid = 1;
		}

2539
		ndr_desc->mapping = mapping;
2540
		ndr_desc->num_mappings = blk_valid;
2541 2542
		ndbr_desc = to_blk_region_desc(ndr_desc);
		ndbr_desc->enable = acpi_nfit_blk_region_enable;
2543
		ndbr_desc->do_io = acpi_desc->blk_do_io;
2544 2545 2546
		rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
		if (rc)
			return rc;
2547 2548 2549
		nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus,
				ndr_desc);
		if (!nfit_spa->nd_region)
2550 2551 2552 2553 2554 2555 2556
			return -ENOMEM;
		break;
	}

	return 0;
}

2557 2558 2559 2560 2561 2562 2563 2564
static bool nfit_spa_is_virtual(struct acpi_nfit_system_address *spa)
{
	return (nfit_spa_type(spa) == NFIT_SPA_VDISK ||
		nfit_spa_type(spa) == NFIT_SPA_VCD   ||
		nfit_spa_type(spa) == NFIT_SPA_PDISK ||
		nfit_spa_type(spa) == NFIT_SPA_PCD);
}

2565 2566 2567 2568 2569 2570 2571
static bool nfit_spa_is_volatile(struct acpi_nfit_system_address *spa)
{
	return (nfit_spa_type(spa) == NFIT_SPA_VDISK ||
		nfit_spa_type(spa) == NFIT_SPA_VCD   ||
		nfit_spa_type(spa) == NFIT_SPA_VOLATILE);
}

2572 2573 2574
static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
		struct nfit_spa *nfit_spa)
{
2575
	static struct nd_mapping_desc mappings[ND_MAX_MAPPINGS];
2576
	struct acpi_nfit_system_address *spa = nfit_spa->spa;
2577 2578
	struct nd_blk_region_desc ndbr_desc;
	struct nd_region_desc *ndr_desc;
2579 2580 2581
	struct nfit_memdev *nfit_memdev;
	struct nvdimm_bus *nvdimm_bus;
	struct resource res;
2582
	int count = 0, rc;
2583

2584
	if (nfit_spa->nd_region)
V
Vishal Verma 已提交
2585 2586
		return 0;

2587
	if (spa->range_index == 0 && !nfit_spa_is_virtual(spa)) {
2588 2589 2590 2591 2592 2593
		dev_dbg(acpi_desc->dev, "%s: detected invalid spa index\n",
				__func__);
		return 0;
	}

	memset(&res, 0, sizeof(res));
2594
	memset(&mappings, 0, sizeof(mappings));
2595
	memset(&ndbr_desc, 0, sizeof(ndbr_desc));
2596 2597
	res.start = spa->address;
	res.end = res.start + spa->length - 1;
2598 2599 2600 2601
	ndr_desc = &ndbr_desc.ndr_desc;
	ndr_desc->res = &res;
	ndr_desc->provider_data = nfit_spa;
	ndr_desc->attr_groups = acpi_nfit_region_attribute_groups;
2602 2603 2604 2605 2606 2607
	if (spa->flags & ACPI_NFIT_PROXIMITY_VALID)
		ndr_desc->numa_node = acpi_map_pxm_to_online_node(
						spa->proximity_domain);
	else
		ndr_desc->numa_node = NUMA_NO_NODE;

2608 2609
	list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
		struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
2610
		struct nd_mapping_desc *mapping;
2611 2612 2613 2614 2615 2616 2617 2618

		if (memdev->range_index != spa->range_index)
			continue;
		if (count >= ND_MAX_MAPPINGS) {
			dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n",
					spa->range_index, ND_MAX_MAPPINGS);
			return -ENXIO;
		}
2619 2620
		mapping = &mappings[count++];
		rc = acpi_nfit_init_mapping(acpi_desc, mapping, ndr_desc,
2621
				memdev, nfit_spa);
2622
		if (rc)
2623
			goto out;
2624 2625
	}

2626
	ndr_desc->mapping = mappings;
2627 2628
	ndr_desc->num_mappings = count;
	rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
2629
	if (rc)
2630
		goto out;
2631

2632 2633
	nvdimm_bus = acpi_desc->nvdimm_bus;
	if (nfit_spa_type(spa) == NFIT_SPA_PM) {
2634
		rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc);
2635
		if (rc) {
2636 2637 2638
			dev_warn(acpi_desc->dev,
				"failed to insert pmem resource to iomem: %d\n",
				rc);
2639
			goto out;
2640
		}
2641

2642 2643 2644 2645
		nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
				ndr_desc);
		if (!nfit_spa->nd_region)
			rc = -ENOMEM;
2646
	} else if (nfit_spa_is_volatile(spa)) {
2647 2648 2649 2650
		nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus,
				ndr_desc);
		if (!nfit_spa->nd_region)
			rc = -ENOMEM;
2651 2652 2653 2654 2655
	} else if (nfit_spa_is_virtual(spa)) {
		nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
				ndr_desc);
		if (!nfit_spa->nd_region)
			rc = -ENOMEM;
2656
	}
V
Vishal Verma 已提交
2657

2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683
 out:
	if (rc)
		dev_err(acpi_desc->dev, "failed to register spa range %d\n",
				nfit_spa->spa->range_index);
	return rc;
}

static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc,
		u32 max_ars)
{
	struct device *dev = acpi_desc->dev;
	struct nd_cmd_ars_status *ars_status;

	if (acpi_desc->ars_status && acpi_desc->ars_status_size >= max_ars) {
		memset(acpi_desc->ars_status, 0, acpi_desc->ars_status_size);
		return 0;
	}

	if (acpi_desc->ars_status)
		devm_kfree(dev, acpi_desc->ars_status);
	acpi_desc->ars_status = NULL;
	ars_status = devm_kzalloc(dev, max_ars, GFP_KERNEL);
	if (!ars_status)
		return -ENOMEM;
	acpi_desc->ars_status = ars_status;
	acpi_desc->ars_status_size = max_ars;
2684 2685 2686
	return 0;
}

2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717
static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc,
		struct nfit_spa *nfit_spa)
{
	struct acpi_nfit_system_address *spa = nfit_spa->spa;
	int rc;

	if (!nfit_spa->max_ars) {
		struct nd_cmd_ars_cap ars_cap;

		memset(&ars_cap, 0, sizeof(ars_cap));
		rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa);
		if (rc < 0)
			return rc;
		nfit_spa->max_ars = ars_cap.max_ars_out;
		nfit_spa->clear_err_unit = ars_cap.clear_err_unit;
		/* check that the supported scrub types match the spa type */
		if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE &&
				((ars_cap.status >> 16) & ND_ARS_VOLATILE) == 0)
			return -ENOTTY;
		else if (nfit_spa_type(spa) == NFIT_SPA_PM &&
				((ars_cap.status >> 16) & ND_ARS_PERSISTENT) == 0)
			return -ENOTTY;
	}

	if (ars_status_alloc(acpi_desc, nfit_spa->max_ars))
		return -ENOMEM;

	rc = ars_get_status(acpi_desc);
	if (rc < 0 && rc != -ENOSPC)
		return rc;

2718
	if (ars_status_process_records(acpi_desc, acpi_desc->ars_status))
2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733
		return -ENOMEM;

	return 0;
}

static void acpi_nfit_async_scrub(struct acpi_nfit_desc *acpi_desc,
		struct nfit_spa *nfit_spa)
{
	struct acpi_nfit_system_address *spa = nfit_spa->spa;
	unsigned int overflow_retry = scrub_overflow_abort;
	u64 init_ars_start = 0, init_ars_len = 0;
	struct device *dev = acpi_desc->dev;
	unsigned int tmo = scrub_timeout;
	int rc;

2734
	if (!nfit_spa->ars_required || !nfit_spa->nd_region)
2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804
		return;

	rc = ars_start(acpi_desc, nfit_spa);
	/*
	 * If we timed out the initial scan we'll still be busy here,
	 * and will wait another timeout before giving up permanently.
	 */
	if (rc < 0 && rc != -EBUSY)
		return;

	do {
		u64 ars_start, ars_len;

		if (acpi_desc->cancel)
			break;
		rc = acpi_nfit_query_poison(acpi_desc, nfit_spa);
		if (rc == -ENOTTY)
			break;
		if (rc == -EBUSY && !tmo) {
			dev_warn(dev, "range %d ars timeout, aborting\n",
					spa->range_index);
			break;
		}

		if (rc == -EBUSY) {
			/*
			 * Note, entries may be appended to the list
			 * while the lock is dropped, but the workqueue
			 * being active prevents entries being deleted /
			 * freed.
			 */
			mutex_unlock(&acpi_desc->init_mutex);
			ssleep(1);
			tmo--;
			mutex_lock(&acpi_desc->init_mutex);
			continue;
		}

		/* we got some results, but there are more pending... */
		if (rc == -ENOSPC && overflow_retry--) {
			if (!init_ars_len) {
				init_ars_len = acpi_desc->ars_status->length;
				init_ars_start = acpi_desc->ars_status->address;
			}
			rc = ars_continue(acpi_desc);
		}

		if (rc < 0) {
			dev_warn(dev, "range %d ars continuation failed\n",
					spa->range_index);
			break;
		}

		if (init_ars_len) {
			ars_start = init_ars_start;
			ars_len = init_ars_len;
		} else {
			ars_start = acpi_desc->ars_status->address;
			ars_len = acpi_desc->ars_status->length;
		}
		dev_dbg(dev, "spa range: %d ars from %#llx + %#llx complete\n",
				spa->range_index, ars_start, ars_len);
		/* notify the region about new poison entries */
		nvdimm_region_notify(nfit_spa->nd_region,
				NVDIMM_REVALIDATE_POISON);
		break;
	} while (1);
}

static void acpi_nfit_scrub(struct work_struct *work)
2805
{
2806 2807
	struct device *dev;
	u64 init_scrub_length = 0;
2808
	struct nfit_spa *nfit_spa;
2809 2810 2811 2812 2813 2814 2815 2816
	u64 init_scrub_address = 0;
	bool init_ars_done = false;
	struct acpi_nfit_desc *acpi_desc;
	unsigned int tmo = scrub_timeout;
	unsigned int overflow_retry = scrub_overflow_abort;

	acpi_desc = container_of(work, typeof(*acpi_desc), work);
	dev = acpi_desc->dev;
2817

2818 2819 2820 2821 2822
	/*
	 * We scrub in 2 phases.  The first phase waits for any platform
	 * firmware initiated scrubs to complete and then we go search for the
	 * affected spa regions to mark them scanned.  In the second phase we
	 * initiate a directed scrub for every range that was not scrubbed in
2823 2824 2825
	 * phase 1. If we're called for a 'rescan', we harmlessly pass through
	 * the first phase, but really only care about running phase 2, where
	 * regions can be notified of new poison.
2826 2827 2828 2829 2830
	 */

	/* process platform firmware initiated scrubs */
 retry:
	mutex_lock(&acpi_desc->init_mutex);
2831
	list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
2832 2833 2834 2835
		struct nd_cmd_ars_status *ars_status;
		struct acpi_nfit_system_address *spa;
		u64 ars_start, ars_len;
		int rc;
2836

2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915
		if (acpi_desc->cancel)
			break;

		if (nfit_spa->nd_region)
			continue;

		if (init_ars_done) {
			/*
			 * No need to re-query, we're now just
			 * reconciling all the ranges covered by the
			 * initial scrub
			 */
			rc = 0;
		} else
			rc = acpi_nfit_query_poison(acpi_desc, nfit_spa);

		if (rc == -ENOTTY) {
			/* no ars capability, just register spa and move on */
			acpi_nfit_register_region(acpi_desc, nfit_spa);
			continue;
		}

		if (rc == -EBUSY && !tmo) {
			/* fallthrough to directed scrub in phase 2 */
			dev_warn(dev, "timeout awaiting ars results, continuing...\n");
			break;
		} else if (rc == -EBUSY) {
			mutex_unlock(&acpi_desc->init_mutex);
			ssleep(1);
			tmo--;
			goto retry;
		}

		/* we got some results, but there are more pending... */
		if (rc == -ENOSPC && overflow_retry--) {
			ars_status = acpi_desc->ars_status;
			/*
			 * Record the original scrub range, so that we
			 * can recall all the ranges impacted by the
			 * initial scrub.
			 */
			if (!init_scrub_length) {
				init_scrub_length = ars_status->length;
				init_scrub_address = ars_status->address;
			}
			rc = ars_continue(acpi_desc);
			if (rc == 0) {
				mutex_unlock(&acpi_desc->init_mutex);
				goto retry;
			}
		}

		if (rc < 0) {
			/*
			 * Initial scrub failed, we'll give it one more
			 * try below...
			 */
			break;
		}

		/* We got some final results, record completed ranges */
		ars_status = acpi_desc->ars_status;
		if (init_scrub_length) {
			ars_start = init_scrub_address;
			ars_len = ars_start + init_scrub_length;
		} else {
			ars_start = ars_status->address;
			ars_len = ars_status->length;
		}
		spa = nfit_spa->spa;

		if (!init_ars_done) {
			init_ars_done = true;
			dev_dbg(dev, "init scrub %#llx + %#llx complete\n",
					ars_start, ars_len);
		}
		if (ars_start <= spa->address && ars_start + ars_len
				>= spa->address + spa->length)
			acpi_nfit_register_region(acpi_desc, nfit_spa);
2916
	}
2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927

	/*
	 * For all the ranges not covered by an initial scrub we still
	 * want to see if there are errors, but it's ok to discover them
	 * asynchronously.
	 */
	list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
		/*
		 * Flag all the ranges that still need scrubbing, but
		 * register them now to make data available.
		 */
2928 2929
		if (!nfit_spa->nd_region) {
			nfit_spa->ars_required = 1;
2930
			acpi_nfit_register_region(acpi_desc, nfit_spa);
2931
		}
2932
	}
2933
	acpi_desc->init_complete = 1;
2934 2935 2936

	list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
		acpi_nfit_async_scrub(acpi_desc, nfit_spa);
2937
	acpi_desc->scrub_count++;
2938
	acpi_desc->ars_start_flags = 0;
2939 2940
	if (acpi_desc->scrub_count_state)
		sysfs_notify_dirent(acpi_desc->scrub_count_state);
2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956
	mutex_unlock(&acpi_desc->init_mutex);
}

static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
{
	struct nfit_spa *nfit_spa;
	int rc;

	list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
		if (nfit_spa_type(nfit_spa->spa) == NFIT_SPA_DCR) {
			/* BLK regions don't need to wait for ars results */
			rc = acpi_nfit_register_region(acpi_desc, nfit_spa);
			if (rc)
				return rc;
		}

2957
	acpi_desc->ars_start_flags = 0;
2958 2959
	if (!acpi_desc->cancel)
		queue_work(nfit_wq, &acpi_desc->work);
2960 2961 2962
	return 0;
}

V
Vishal Verma 已提交
2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979
static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc,
		struct nfit_table_prev *prev)
{
	struct device *dev = acpi_desc->dev;

	if (!list_empty(&prev->spas) ||
			!list_empty(&prev->memdevs) ||
			!list_empty(&prev->dcrs) ||
			!list_empty(&prev->bdws) ||
			!list_empty(&prev->idts) ||
			!list_empty(&prev->flushes)) {
		dev_err(dev, "new nfit deletes entries (unsupported)\n");
		return -ENXIO;
	}
	return 0;
}

2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004
static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc *acpi_desc)
{
	struct device *dev = acpi_desc->dev;
	struct kernfs_node *nfit;
	struct device *bus_dev;

	if (!ars_supported(acpi_desc->nvdimm_bus))
		return 0;

	bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus);
	nfit = sysfs_get_dirent(bus_dev->kobj.sd, "nfit");
	if (!nfit) {
		dev_err(dev, "sysfs_get_dirent 'nfit' failed\n");
		return -ENODEV;
	}
	acpi_desc->scrub_count_state = sysfs_get_dirent(nfit, "scrub");
	sysfs_put(nfit);
	if (!acpi_desc->scrub_count_state) {
		dev_err(dev, "sysfs_get_dirent 'scrub' failed\n");
		return -ENODEV;
	}

	return 0;
}

3005
static void acpi_nfit_unregister(void *data)
3006 3007 3008 3009 3010 3011
{
	struct acpi_nfit_desc *acpi_desc = data;

	nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
}

3012
int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz)
3013 3014
{
	struct device *dev = acpi_desc->dev;
V
Vishal Verma 已提交
3015
	struct nfit_table_prev prev;
3016
	const void *end;
3017
	int rc;
3018

3019
	if (!acpi_desc->nvdimm_bus) {
3020 3021
		acpi_nfit_init_dsms(acpi_desc);

3022 3023 3024 3025
		acpi_desc->nvdimm_bus = nvdimm_bus_register(dev,
				&acpi_desc->nd_desc);
		if (!acpi_desc->nvdimm_bus)
			return -ENOMEM;
3026

3027
		rc = devm_add_action_or_reset(dev, acpi_nfit_unregister,
3028 3029 3030
				acpi_desc);
		if (rc)
			return rc;
3031 3032 3033 3034

		rc = acpi_nfit_desc_init_scrub_attr(acpi_desc);
		if (rc)
			return rc;
3035 3036 3037 3038 3039

		/* register this acpi_desc for mce notifications */
		mutex_lock(&acpi_desc_lock);
		list_add_tail(&acpi_desc->list, &acpi_descs);
		mutex_unlock(&acpi_desc_lock);
3040 3041
	}

V
Vishal Verma 已提交
3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062
	mutex_lock(&acpi_desc->init_mutex);

	INIT_LIST_HEAD(&prev.spas);
	INIT_LIST_HEAD(&prev.memdevs);
	INIT_LIST_HEAD(&prev.dcrs);
	INIT_LIST_HEAD(&prev.bdws);
	INIT_LIST_HEAD(&prev.idts);
	INIT_LIST_HEAD(&prev.flushes);

	list_cut_position(&prev.spas, &acpi_desc->spas,
				acpi_desc->spas.prev);
	list_cut_position(&prev.memdevs, &acpi_desc->memdevs,
				acpi_desc->memdevs.prev);
	list_cut_position(&prev.dcrs, &acpi_desc->dcrs,
				acpi_desc->dcrs.prev);
	list_cut_position(&prev.bdws, &acpi_desc->bdws,
				acpi_desc->bdws.prev);
	list_cut_position(&prev.idts, &acpi_desc->idts,
				acpi_desc->idts.prev);
	list_cut_position(&prev.flushes, &acpi_desc->flushes,
				acpi_desc->flushes.prev);
3063 3064 3065

	end = data + sz;
	while (!IS_ERR_OR_NULL(data))
V
Vishal Verma 已提交
3066
		data = add_table(acpi_desc, &prev, data, end);
3067 3068 3069 3070

	if (IS_ERR(data)) {
		dev_dbg(dev, "%s: nfit table parsing error: %ld\n", __func__,
				PTR_ERR(data));
V
Vishal Verma 已提交
3071 3072
		rc = PTR_ERR(data);
		goto out_unlock;
3073 3074
	}

V
Vishal Verma 已提交
3075 3076 3077 3078
	rc = acpi_nfit_check_deletions(acpi_desc, &prev);
	if (rc)
		goto out_unlock;

3079 3080
	rc = nfit_mem_init(acpi_desc);
	if (rc)
V
Vishal Verma 已提交
3081
		goto out_unlock;
3082

3083 3084
	rc = acpi_nfit_register_dimms(acpi_desc);
	if (rc)
V
Vishal Verma 已提交
3085 3086 3087
		goto out_unlock;

	rc = acpi_nfit_register_regions(acpi_desc);
3088

V
Vishal Verma 已提交
3089 3090 3091
 out_unlock:
	mutex_unlock(&acpi_desc->init_mutex);
	return rc;
3092
}
3093
EXPORT_SYMBOL_GPL(acpi_nfit_init);
3094

3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112
struct acpi_nfit_flush_work {
	struct work_struct work;
	struct completion cmp;
};

static void flush_probe(struct work_struct *work)
{
	struct acpi_nfit_flush_work *flush;

	flush = container_of(work, typeof(*flush), work);
	complete(&flush->cmp);
}

static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
{
	struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
	struct device *dev = acpi_desc->dev;
	struct acpi_nfit_flush_work flush;
3113
	int rc;
3114 3115 3116 3117 3118

	/* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
	device_lock(dev);
	device_unlock(dev);

3119 3120
	/* bounce the init_mutex to make init_complete valid */
	mutex_lock(&acpi_desc->init_mutex);
3121 3122
	if (acpi_desc->cancel || acpi_desc->init_complete) {
		mutex_unlock(&acpi_desc->init_mutex);
3123
		return 0;
3124
	}
3125

3126 3127 3128 3129 3130
	/*
	 * Scrub work could take 10s of seconds, userspace may give up so we
	 * need to be interruptible while waiting.
	 */
	INIT_WORK_ONSTACK(&flush.work, flush_probe);
3131
	init_completion(&flush.cmp);
3132
	queue_work(nfit_wq, &flush.work);
3133
	mutex_unlock(&acpi_desc->init_mutex);
3134 3135 3136 3137

	rc = wait_for_completion_interruptible(&flush.cmp);
	cancel_work_sync(&flush.work);
	return rc;
3138 3139
}

3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161
static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
		struct nvdimm *nvdimm, unsigned int cmd)
{
	struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);

	if (nvdimm)
		return 0;
	if (cmd != ND_CMD_ARS_START)
		return 0;

	/*
	 * The kernel and userspace may race to initiate a scrub, but
	 * the scrub thread is prepared to lose that initial race.  It
	 * just needs guarantees that any ars it initiates are not
	 * interrupted by any intervening start reqeusts from userspace.
	 */
	if (work_busy(&acpi_desc->work))
		return -EBUSY;

	return 0;
}

3162
int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, u8 flags)
3163 3164 3165 3166 3167 3168 3169
{
	struct device *dev = acpi_desc->dev;
	struct nfit_spa *nfit_spa;

	if (work_busy(&acpi_desc->work))
		return -EBUSY;

3170 3171 3172
	mutex_lock(&acpi_desc->init_mutex);
	if (acpi_desc->cancel) {
		mutex_unlock(&acpi_desc->init_mutex);
3173
		return 0;
3174
	}
3175 3176 3177 3178 3179 3180 3181 3182 3183

	list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
		struct acpi_nfit_system_address *spa = nfit_spa->spa;

		if (nfit_spa_type(spa) != NFIT_SPA_PM)
			continue;

		nfit_spa->ars_required = 1;
	}
3184
	acpi_desc->ars_start_flags = flags;
3185 3186 3187 3188 3189 3190 3191
	queue_work(nfit_wq, &acpi_desc->work);
	dev_dbg(dev, "%s: ars_scan triggered\n", __func__);
	mutex_unlock(&acpi_desc->init_mutex);

	return 0;
}

3192
void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev)
3193 3194 3195 3196 3197
{
	struct nvdimm_bus_descriptor *nd_desc;

	dev_set_drvdata(dev, acpi_desc);
	acpi_desc->dev = dev;
3198
	acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io;
3199 3200
	nd_desc = &acpi_desc->nd_desc;
	nd_desc->provider_name = "ACPI.NFIT";
3201
	nd_desc->module = THIS_MODULE;
3202
	nd_desc->ndctl = acpi_nfit_ctl;
3203
	nd_desc->flush_probe = acpi_nfit_flush_probe;
3204
	nd_desc->clear_to_send = acpi_nfit_clear_to_send;
3205
	nd_desc->attr_groups = acpi_nfit_attribute_groups;
3206

V
Vishal Verma 已提交
3207 3208 3209 3210 3211 3212 3213
	INIT_LIST_HEAD(&acpi_desc->spas);
	INIT_LIST_HEAD(&acpi_desc->dcrs);
	INIT_LIST_HEAD(&acpi_desc->bdws);
	INIT_LIST_HEAD(&acpi_desc->idts);
	INIT_LIST_HEAD(&acpi_desc->flushes);
	INIT_LIST_HEAD(&acpi_desc->memdevs);
	INIT_LIST_HEAD(&acpi_desc->dimms);
3214
	INIT_LIST_HEAD(&acpi_desc->list);
V
Vishal Verma 已提交
3215
	mutex_init(&acpi_desc->init_mutex);
3216
	INIT_WORK(&acpi_desc->work, acpi_nfit_scrub);
V
Vishal Verma 已提交
3217
}
3218
EXPORT_SYMBOL_GPL(acpi_nfit_desc_init);
V
Vishal Verma 已提交
3219

3220 3221 3222 3223 3224
static void acpi_nfit_put_table(void *table)
{
	acpi_put_table(table);
}

3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253
void acpi_nfit_shutdown(void *data)
{
	struct acpi_nfit_desc *acpi_desc = data;
	struct device *bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus);

	/*
	 * Destruct under acpi_desc_lock so that nfit_handle_mce does not
	 * race teardown
	 */
	mutex_lock(&acpi_desc_lock);
	list_del(&acpi_desc->list);
	mutex_unlock(&acpi_desc_lock);

	mutex_lock(&acpi_desc->init_mutex);
	acpi_desc->cancel = 1;
	mutex_unlock(&acpi_desc->init_mutex);

	/*
	 * Bounce the nvdimm bus lock to make sure any in-flight
	 * acpi_nfit_ars_rescan() submissions have had a chance to
	 * either submit or see ->cancel set.
	 */
	device_lock(bus_dev);
	device_unlock(bus_dev);

	flush_workqueue(nfit_wq);
}
EXPORT_SYMBOL_GPL(acpi_nfit_shutdown);

V
Vishal Verma 已提交
3254 3255 3256 3257 3258 3259 3260 3261
static int acpi_nfit_add(struct acpi_device *adev)
{
	struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
	struct acpi_nfit_desc *acpi_desc;
	struct device *dev = &adev->dev;
	struct acpi_table_header *tbl;
	acpi_status status = AE_OK;
	acpi_size sz;
3262
	int rc = 0;
V
Vishal Verma 已提交
3263

3264
	status = acpi_get_table(ACPI_SIG_NFIT, 0, &tbl);
V
Vishal Verma 已提交
3265 3266 3267 3268 3269
	if (ACPI_FAILURE(status)) {
		/* This is ok, we could have an nvdimm hotplugged later */
		dev_dbg(dev, "failed to find NFIT at startup\n");
		return 0;
	}
3270 3271 3272 3273

	rc = devm_add_action_or_reset(dev, acpi_nfit_put_table, tbl);
	if (rc)
		return rc;
3274
	sz = tbl->length;
V
Vishal Verma 已提交
3275

3276 3277 3278 3279
	acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
	if (!acpi_desc)
		return -ENOMEM;
	acpi_nfit_desc_init(acpi_desc, &adev->dev);
V
Vishal Verma 已提交
3280

3281
	/* Save the acpi header for exporting the revision via sysfs */
3282
	acpi_desc->acpi_header = *tbl;
V
Vishal Verma 已提交
3283 3284 3285 3286

	/* Evaluate _FIT and override with that if present */
	status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
	if (ACPI_SUCCESS(status) && buf.length > 0) {
3287 3288 3289 3290 3291 3292
		union acpi_object *obj = buf.pointer;

		if (obj->type == ACPI_TYPE_BUFFER)
			rc = acpi_nfit_init(acpi_desc, obj->buffer.pointer,
					obj->buffer.length);
		else
3293 3294
			dev_dbg(dev, "%s invalid type %d, ignoring _FIT\n",
				 __func__, (int) obj->type);
3295 3296
		kfree(buf.pointer);
	} else
3297 3298 3299 3300
		/* skip over the lead-in header table */
		rc = acpi_nfit_init(acpi_desc, (void *) tbl
				+ sizeof(struct acpi_table_nfit),
				sz - sizeof(struct acpi_table_nfit));
3301 3302 3303 3304

	if (rc)
		return rc;
	return devm_add_action_or_reset(dev, acpi_nfit_shutdown, acpi_desc);
3305 3306 3307 3308
}

static int acpi_nfit_remove(struct acpi_device *adev)
{
3309
	/* see acpi_nfit_unregister */
3310 3311 3312
	return 0;
}

3313
static void acpi_nfit_update_notify(struct device *dev, acpi_handle handle)
V
Vishal Verma 已提交
3314
{
3315
	struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev);
V
Vishal Verma 已提交
3316
	struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
3317
	union acpi_object *obj;
V
Vishal Verma 已提交
3318 3319 3320 3321 3322 3323
	acpi_status status;
	int ret;

	if (!dev->driver) {
		/* dev->driver may be null if we're being removed */
		dev_dbg(dev, "%s: no driver found for dev\n", __func__);
3324
		return;
V
Vishal Verma 已提交
3325 3326 3327
	}

	if (!acpi_desc) {
3328 3329
		acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
		if (!acpi_desc)
3330 3331
			return;
		acpi_nfit_desc_init(acpi_desc, dev);
3332 3333 3334 3335 3336 3337
	} else {
		/*
		 * Finish previous registration before considering new
		 * regions.
		 */
		flush_workqueue(nfit_wq);
V
Vishal Verma 已提交
3338 3339 3340
	}

	/* Evaluate _FIT */
3341
	status = acpi_evaluate_object(handle, "_FIT", NULL, &buf);
V
Vishal Verma 已提交
3342 3343
	if (ACPI_FAILURE(status)) {
		dev_err(dev, "failed to evaluate _FIT\n");
3344
		return;
V
Vishal Verma 已提交
3345 3346
	}

3347 3348
	obj = buf.pointer;
	if (obj->type == ACPI_TYPE_BUFFER) {
3349 3350
		ret = acpi_nfit_init(acpi_desc, obj->buffer.pointer,
				obj->buffer.length);
3351
		if (ret)
3352
			dev_err(dev, "failed to merge updated NFIT\n");
3353
	} else
3354
		dev_err(dev, "Invalid _FIT\n");
V
Vishal Verma 已提交
3355
	kfree(buf.pointer);
3356
}
3357 3358 3359 3360

static void acpi_nfit_uc_error_notify(struct device *dev, acpi_handle handle)
{
	struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev);
3361 3362
	u8 flags = (acpi_desc->scrub_mode == HW_ERROR_SCRUB_ON) ?
			0 : ND_ARS_RETURN_PREV_DATA;
3363

3364
	acpi_nfit_ars_rescan(acpi_desc, flags);
3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379
}

void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event)
{
	dev_dbg(dev, "%s: event: 0x%x\n", __func__, event);

	switch (event) {
	case NFIT_NOTIFY_UPDATE:
		return acpi_nfit_update_notify(dev, handle);
	case NFIT_NOTIFY_UC_MEMORY_ERROR:
		return acpi_nfit_uc_error_notify(dev, handle);
	default:
		return;
	}
}
3380
EXPORT_SYMBOL_GPL(__acpi_nfit_notify);
V
Vishal Verma 已提交
3381

3382 3383 3384 3385 3386
static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
{
	device_lock(&adev->dev);
	__acpi_nfit_notify(&adev->dev, adev->handle, event);
	device_unlock(&adev->dev);
V
Vishal Verma 已提交
3387 3388
}

3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400
static const struct acpi_device_id acpi_nfit_ids[] = {
	{ "ACPI0012", 0 },
	{ "", 0 },
};
MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids);

static struct acpi_driver acpi_nfit_driver = {
	.name = KBUILD_MODNAME,
	.ids = acpi_nfit_ids,
	.ops = {
		.add = acpi_nfit_add,
		.remove = acpi_nfit_remove,
V
Vishal Verma 已提交
3401
		.notify = acpi_nfit_notify,
3402 3403 3404 3405 3406
	},
};

static __init int nfit_init(void)
{
3407 3408
	int ret;

3409 3410 3411 3412 3413 3414 3415 3416
	BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40);
	BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56);
	BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48);
	BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20);
	BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9);
	BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80);
	BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40);

3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429
	guid_parse(UUID_VOLATILE_MEMORY, &nfit_uuid[NFIT_SPA_VOLATILE]);
	guid_parse(UUID_PERSISTENT_MEMORY, &nfit_uuid[NFIT_SPA_PM]);
	guid_parse(UUID_CONTROL_REGION, &nfit_uuid[NFIT_SPA_DCR]);
	guid_parse(UUID_DATA_REGION, &nfit_uuid[NFIT_SPA_BDW]);
	guid_parse(UUID_VOLATILE_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_VDISK]);
	guid_parse(UUID_VOLATILE_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_VCD]);
	guid_parse(UUID_PERSISTENT_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_PDISK]);
	guid_parse(UUID_PERSISTENT_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_PCD]);
	guid_parse(UUID_NFIT_BUS, &nfit_uuid[NFIT_DEV_BUS]);
	guid_parse(UUID_NFIT_DIMM, &nfit_uuid[NFIT_DEV_DIMM]);
	guid_parse(UUID_NFIT_DIMM_N_HPE1, &nfit_uuid[NFIT_DEV_DIMM_N_HPE1]);
	guid_parse(UUID_NFIT_DIMM_N_HPE2, &nfit_uuid[NFIT_DEV_DIMM_N_HPE2]);
	guid_parse(UUID_NFIT_DIMM_N_MSFT, &nfit_uuid[NFIT_DEV_DIMM_N_MSFT]);
3430

3431 3432 3433 3434
	nfit_wq = create_singlethread_workqueue("nfit");
	if (!nfit_wq)
		return -ENOMEM;

3435
	nfit_mce_register();
3436 3437 3438 3439 3440 3441 3442
	ret = acpi_bus_register_driver(&acpi_nfit_driver);
	if (ret) {
		nfit_mce_unregister();
		destroy_workqueue(nfit_wq);
	}

	return ret;
3443

3444 3445 3446 3447
}

static __exit void nfit_exit(void)
{
3448
	nfit_mce_unregister();
3449
	acpi_bus_unregister_driver(&acpi_nfit_driver);
3450
	destroy_workqueue(nfit_wq);
3451
	WARN_ON(!list_empty(&acpi_descs));
3452 3453 3454 3455 3456 3457
}

module_init(nfit_init);
module_exit(nfit_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Intel Corporation");