core.c 86.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
/*
 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of version 2 of the GNU General Public License as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 */
#include <linux/list_sort.h>
#include <linux/libnvdimm.h>
#include <linux/module.h>
16
#include <linux/mutex.h>
17
#include <linux/ndctl.h>
18
#include <linux/sysfs.h>
19
#include <linux/delay.h>
20 21
#include <linux/list.h>
#include <linux/acpi.h>
22
#include <linux/sort.h>
23
#include <linux/io.h>
24
#include <linux/nd.h>
25
#include <asm/cacheflush.h>
26 27
#include "nfit.h"

28 29 30 31
/*
 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
 * irrelevant.
 */
32
#include <linux/io-64-nonatomic-hi-lo.h>
33

34 35 36 37
static bool force_enable_dimms;
module_param(force_enable_dimms, bool, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(force_enable_dimms, "Ignore _STA (ACPI DIMM device) status");

38 39 40 41 42 43 44 45 46 47
static unsigned int scrub_timeout = NFIT_ARS_TIMEOUT;
module_param(scrub_timeout, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(scrub_timeout, "Initial scrub timeout in seconds");

/* after three payloads of overflow, it's dead jim */
static unsigned int scrub_overflow_abort = 3;
module_param(scrub_overflow_abort, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(scrub_overflow_abort,
		"Number of times we overflow ARS results before abort");

48 49 50
static bool disable_vendor_specific;
module_param(disable_vendor_specific, bool, S_IRUGO);
MODULE_PARM_DESC(disable_vendor_specific,
51
		"Limit commands to the publicly specified set");
52

53 54 55 56
static unsigned long override_dsm_mask;
module_param(override_dsm_mask, ulong, S_IRUGO);
MODULE_PARM_DESC(override_dsm_mask, "Bitmask of allowed NVDIMM DSM functions");

57 58 59 60 61
static int default_dsm_family = -1;
module_param(default_dsm_family, int, S_IRUGO);
MODULE_PARM_DESC(default_dsm_family,
		"Try this DSM type first when identifying NVDIMM family");

62 63 64
LIST_HEAD(acpi_descs);
DEFINE_MUTEX(acpi_desc_lock);

65 66
static struct workqueue_struct *nfit_wq;

V
Vishal Verma 已提交
67 68 69 70 71 72 73 74 75
struct nfit_table_prev {
	struct list_head spas;
	struct list_head memdevs;
	struct list_head dcrs;
	struct list_head bdws;
	struct list_head idts;
	struct list_head flushes;
};

76
static guid_t nfit_uuid[NFIT_UUID_MAX];
77

78
const guid_t *to_nfit_uuid(enum nfit_uuids id)
79
{
80
	return &nfit_uuid[id];
81
}
82
EXPORT_SYMBOL(to_nfit_uuid);
83

84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
static struct acpi_nfit_desc *to_acpi_nfit_desc(
		struct nvdimm_bus_descriptor *nd_desc)
{
	return container_of(nd_desc, struct acpi_nfit_desc, nd_desc);
}

static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc)
{
	struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;

	/*
	 * If provider == 'ACPI.NFIT' we can assume 'dev' is a struct
	 * acpi_device.
	 */
	if (!nd_desc->provider_name
			|| strcmp(nd_desc->provider_name, "ACPI.NFIT") != 0)
		return NULL;

	return to_acpi_device(acpi_desc->dev);
}

105
static int xlat_bus_status(void *buf, unsigned int cmd, u32 status)
106
{
107
	struct nd_cmd_clear_error *clear_err;
108 109 110 111 112
	struct nd_cmd_ars_status *ars_status;
	u16 flags;

	switch (cmd) {
	case ND_CMD_ARS_CAP:
113
		if ((status & 0xffff) == NFIT_ARS_CAP_NONE)
114 115 116
			return -ENOTTY;

		/* Command failed */
117
		if (status & 0xffff)
118 119 120 121
			return -EIO;

		/* No supported scan types for this range */
		flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE;
122
		if ((status >> 16 & flags) == 0)
123
			return -ENOTTY;
124
		return 0;
125 126
	case ND_CMD_ARS_START:
		/* ARS is in progress */
127
		if ((status & 0xffff) == NFIT_ARS_START_BUSY)
128 129 130
			return -EBUSY;

		/* Command failed */
131
		if (status & 0xffff)
132
			return -EIO;
133
		return 0;
134 135 136
	case ND_CMD_ARS_STATUS:
		ars_status = buf;
		/* Command failed */
137
		if (status & 0xffff)
138 139
			return -EIO;
		/* Check extended status (Upper two bytes) */
140
		if (status == NFIT_ARS_STATUS_DONE)
141 142 143
			return 0;

		/* ARS is in progress */
144
		if (status == NFIT_ARS_STATUS_BUSY)
145 146 147
			return -EBUSY;

		/* No ARS performed for the current boot */
148
		if (status == NFIT_ARS_STATUS_NONE)
149 150 151 152 153 154 155
			return -EAGAIN;

		/*
		 * ARS interrupted, either we overflowed or some other
		 * agent wants the scan to stop.  If we didn't overflow
		 * then just continue with the returned results.
		 */
156
		if (status == NFIT_ARS_STATUS_INTR) {
157 158
			if (ars_status->out_length >= 40 && (ars_status->flags
						& NFIT_ARS_F_OVERFLOW))
159 160 161 162 163
				return -ENOSPC;
			return 0;
		}

		/* Unknown status */
164
		if (status >> 16)
165
			return -EIO;
166
		return 0;
167 168
	case ND_CMD_CLEAR_ERROR:
		clear_err = buf;
169
		if (status & 0xffff)
170 171 172 173 174
			return -EIO;
		if (!clear_err->cleared)
			return -EIO;
		if (clear_err->length > clear_err->cleared)
			return clear_err->cleared;
175
		return 0;
176 177 178 179
	default:
		break;
	}

180 181 182
	/* all other non-zero status results in an error */
	if (status)
		return -EIO;
183 184 185
	return 0;
}

186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
static int xlat_nvdimm_status(void *buf, unsigned int cmd, u32 status)
{
	switch (cmd) {
	case ND_CMD_GET_CONFIG_SIZE:
		if (status >> 16 & ND_CONFIG_LOCKED)
			return -EACCES;
		break;
	default:
		break;
	}

	/* all other non-zero status results in an error */
	if (status)
		return -EIO;
	return 0;
}

203 204 205 206 207
static int xlat_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd,
		u32 status)
{
	if (!nvdimm)
		return xlat_bus_status(buf, cmd, status);
208
	return xlat_nvdimm_status(buf, cmd, status);
209 210
}

211 212
int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
		unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
213
{
214 215
	struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
	union acpi_object in_obj, in_buf, *out_obj;
216
	const struct nd_cmd_desc *desc = NULL;
217
	struct device *dev = acpi_desc->dev;
218
	struct nd_cmd_pkg *call_pkg = NULL;
219
	const char *cmd_name, *dimm_name;
220
	unsigned long cmd_mask, dsm_mask;
221
	u32 offset, fw_status = 0;
222
	acpi_handle handle;
223
	unsigned int func;
224
	const guid_t *guid;
225 226
	int rc, i;

227 228 229 230 231 232
	func = cmd;
	if (cmd == ND_CMD_CALL) {
		call_pkg = buf;
		func = call_pkg->nd_command;
	}

233 234 235 236 237 238
	if (nvdimm) {
		struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
		struct acpi_device *adev = nfit_mem->adev;

		if (!adev)
			return -ENOTTY;
239 240 241
		if (call_pkg && nfit_mem->family != call_pkg->nd_family)
			return -ENOTTY;

242
		dimm_name = nvdimm_name(nvdimm);
243
		cmd_name = nvdimm_cmd_name(cmd);
244
		cmd_mask = nvdimm_cmd_mask(nvdimm);
245 246
		dsm_mask = nfit_mem->dsm_mask;
		desc = nd_cmd_dimm_desc(cmd);
247
		guid = to_nfit_uuid(nfit_mem->family);
248 249 250 251 252
		handle = adev->handle;
	} else {
		struct acpi_device *adev = to_acpi_dev(acpi_desc);

		cmd_name = nvdimm_bus_cmd_name(cmd);
253
		cmd_mask = nd_desc->cmd_mask;
254
		dsm_mask = cmd_mask;
255 256
		if (cmd == ND_CMD_CALL)
			dsm_mask = nd_desc->bus_dsm_mask;
257
		desc = nd_cmd_bus_desc(cmd);
258
		guid = to_nfit_uuid(NFIT_DEV_BUS);
259 260 261 262 263 264 265
		handle = adev->handle;
		dimm_name = "bus";
	}

	if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
		return -ENOTTY;

266
	if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask))
267 268 269 270 271 272 273 274 275 276 277 278 279 280
		return -ENOTTY;

	in_obj.type = ACPI_TYPE_PACKAGE;
	in_obj.package.count = 1;
	in_obj.package.elements = &in_buf;
	in_buf.type = ACPI_TYPE_BUFFER;
	in_buf.buffer.pointer = buf;
	in_buf.buffer.length = 0;

	/* libnvdimm has already validated the input envelope */
	for (i = 0; i < desc->in_num; i++)
		in_buf.buffer.length += nd_cmd_in_size(nvdimm, cmd, desc,
				i, buf);

281 282 283 284 285 286
	if (call_pkg) {
		/* skip over package wrapper */
		in_buf.buffer.pointer = (void *) &call_pkg->nd_payload;
		in_buf.buffer.length = call_pkg->nd_size_in;
	}

D
Dan Williams 已提交
287 288 289
	dev_dbg(dev, "%s:%s cmd: %d: func: %d input length: %d\n",
			__func__, dimm_name, cmd, func, in_buf.buffer.length);
	print_hex_dump_debug("nvdimm in  ", DUMP_PREFIX_OFFSET, 4, 4,
290 291
			in_buf.buffer.pointer,
			min_t(u32, 256, in_buf.buffer.length), true);
292

293
	out_obj = acpi_evaluate_dsm(handle, guid, 1, func, &in_obj);
294 295 296 297 298 299
	if (!out_obj) {
		dev_dbg(dev, "%s:%s _DSM failed cmd: %s\n", __func__, dimm_name,
				cmd_name);
		return -EINVAL;
	}

300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
	if (call_pkg) {
		call_pkg->nd_fw_size = out_obj->buffer.length;
		memcpy(call_pkg->nd_payload + call_pkg->nd_size_in,
			out_obj->buffer.pointer,
			min(call_pkg->nd_fw_size, call_pkg->nd_size_out));

		ACPI_FREE(out_obj);
		/*
		 * Need to support FW function w/o known size in advance.
		 * Caller can determine required size based upon nd_fw_size.
		 * If we return an error (like elsewhere) then caller wouldn't
		 * be able to rely upon data returned to make calculation.
		 */
		return 0;
	}

316 317 318 319 320 321 322
	if (out_obj->package.type != ACPI_TYPE_BUFFER) {
		dev_dbg(dev, "%s:%s unexpected output object type cmd: %s type: %d\n",
				__func__, dimm_name, cmd_name, out_obj->type);
		rc = -EINVAL;
		goto out;
	}

D
Dan Williams 已提交
323 324 325 326 327
	dev_dbg(dev, "%s:%s cmd: %s output length: %d\n", __func__, dimm_name,
			cmd_name, out_obj->buffer.length);
	print_hex_dump_debug(cmd_name, DUMP_PREFIX_OFFSET, 4, 4,
			out_obj->buffer.pointer,
			min_t(u32, 128, out_obj->buffer.length), true);
328 329 330

	for (i = 0, offset = 0; i < desc->out_num; i++) {
		u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i, buf,
331 332
				(u32 *) out_obj->buffer.pointer,
				out_obj->buffer.length - offset);
333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349

		if (offset + out_size > out_obj->buffer.length) {
			dev_dbg(dev, "%s:%s output object underflow cmd: %s field: %d\n",
					__func__, dimm_name, cmd_name, i);
			break;
		}

		if (in_buf.buffer.length + offset + out_size > buf_len) {
			dev_dbg(dev, "%s:%s output overrun cmd: %s field: %d\n",
					__func__, dimm_name, cmd_name, i);
			rc = -ENXIO;
			goto out;
		}
		memcpy(buf + in_buf.buffer.length + offset,
				out_obj->buffer.pointer + offset, out_size);
		offset += out_size;
	}
350 351 352 353 354 355 356 357 358

	/*
	 * Set fw_status for all the commands with a known format to be
	 * later interpreted by xlat_status().
	 */
	if (i >= 1 && ((cmd >= ND_CMD_ARS_CAP && cmd <= ND_CMD_CLEAR_ERROR)
			|| (cmd >= ND_CMD_SMART && cmd <= ND_CMD_VENDOR)))
		fw_status = *(u32 *) out_obj->buffer.pointer;

359 360 361 362 363 364 365
	if (offset + in_buf.buffer.length < buf_len) {
		if (i >= 1) {
			/*
			 * status valid, return the number of bytes left
			 * unfilled in the output buffer
			 */
			rc = buf_len - offset - in_buf.buffer.length;
366
			if (cmd_rc)
367 368
				*cmd_rc = xlat_status(nvdimm, buf, cmd,
						fw_status);
369 370 371 372 373 374
		} else {
			dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n",
					__func__, dimm_name, cmd_name, buf_len,
					offset);
			rc = -ENXIO;
		}
375
	} else {
376
		rc = 0;
377
		if (cmd_rc)
378
			*cmd_rc = xlat_status(nvdimm, buf, cmd, fw_status);
379
	}
380 381 382 383 384

 out:
	ACPI_FREE(out_obj);

	return rc;
385
}
386
EXPORT_SYMBOL_GPL(acpi_nfit_ctl);
387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407

static const char *spa_type_name(u16 type)
{
	static const char *to_name[] = {
		[NFIT_SPA_VOLATILE] = "volatile",
		[NFIT_SPA_PM] = "pmem",
		[NFIT_SPA_DCR] = "dimm-control-region",
		[NFIT_SPA_BDW] = "block-data-window",
		[NFIT_SPA_VDISK] = "volatile-disk",
		[NFIT_SPA_VCD] = "volatile-cd",
		[NFIT_SPA_PDISK] = "persistent-disk",
		[NFIT_SPA_PCD] = "persistent-cd",

	};

	if (type > NFIT_SPA_PCD)
		return "unknown";

	return to_name[type];
}

408
int nfit_spa_type(struct acpi_nfit_system_address *spa)
409 410 411 412
{
	int i;

	for (i = 0; i < NFIT_UUID_MAX; i++)
413
		if (guid_equal(to_nfit_uuid(i), (guid_t *)&spa->range_guid))
414 415 416 417 418
			return i;
	return -1;
}

static bool add_spa(struct acpi_nfit_desc *acpi_desc,
V
Vishal Verma 已提交
419
		struct nfit_table_prev *prev,
420 421 422
		struct acpi_nfit_system_address *spa)
{
	struct device *dev = acpi_desc->dev;
V
Vishal Verma 已提交
423 424
	struct nfit_spa *nfit_spa;

425 426 427
	if (spa->header.length != sizeof(*spa))
		return false;

V
Vishal Verma 已提交
428
	list_for_each_entry(nfit_spa, &prev->spas, list) {
429
		if (memcmp(nfit_spa->spa, spa, sizeof(*spa)) == 0) {
V
Vishal Verma 已提交
430 431 432 433
			list_move_tail(&nfit_spa->list, &acpi_desc->spas);
			return true;
		}
	}
434

435 436
	nfit_spa = devm_kzalloc(dev, sizeof(*nfit_spa) + sizeof(*spa),
			GFP_KERNEL);
437 438 439
	if (!nfit_spa)
		return false;
	INIT_LIST_HEAD(&nfit_spa->list);
440
	memcpy(nfit_spa->spa, spa, sizeof(*spa));
441 442 443 444 445 446 447 448
	list_add_tail(&nfit_spa->list, &acpi_desc->spas);
	dev_dbg(dev, "%s: spa index: %d type: %s\n", __func__,
			spa->range_index,
			spa_type_name(nfit_spa_type(spa)));
	return true;
}

static bool add_memdev(struct acpi_nfit_desc *acpi_desc,
V
Vishal Verma 已提交
449
		struct nfit_table_prev *prev,
450 451 452
		struct acpi_nfit_memory_map *memdev)
{
	struct device *dev = acpi_desc->dev;
V
Vishal Verma 已提交
453
	struct nfit_memdev *nfit_memdev;
454

455 456 457
	if (memdev->header.length != sizeof(*memdev))
		return false;

V
Vishal Verma 已提交
458
	list_for_each_entry(nfit_memdev, &prev->memdevs, list)
459
		if (memcmp(nfit_memdev->memdev, memdev, sizeof(*memdev)) == 0) {
V
Vishal Verma 已提交
460 461 462 463
			list_move_tail(&nfit_memdev->list, &acpi_desc->memdevs);
			return true;
		}

464 465
	nfit_memdev = devm_kzalloc(dev, sizeof(*nfit_memdev) + sizeof(*memdev),
			GFP_KERNEL);
466 467 468
	if (!nfit_memdev)
		return false;
	INIT_LIST_HEAD(&nfit_memdev->list);
469
	memcpy(nfit_memdev->memdev, memdev, sizeof(*memdev));
470
	list_add_tail(&nfit_memdev->list, &acpi_desc->memdevs);
471
	dev_dbg(dev, "%s: memdev handle: %#x spa: %d dcr: %d flags: %#x\n",
472
			__func__, memdev->device_handle, memdev->range_index,
473
			memdev->region_index, memdev->flags);
474 475 476
	return true;
}

477 478 479 480 481 482 483 484 485 486 487 488 489 490
/*
 * An implementation may provide a truncated control region if no block windows
 * are defined.
 */
static size_t sizeof_dcr(struct acpi_nfit_control_region *dcr)
{
	if (dcr->header.length < offsetof(struct acpi_nfit_control_region,
				window_size))
		return 0;
	if (dcr->windows)
		return sizeof(*dcr);
	return offsetof(struct acpi_nfit_control_region, window_size);
}

491
static bool add_dcr(struct acpi_nfit_desc *acpi_desc,
V
Vishal Verma 已提交
492
		struct nfit_table_prev *prev,
493 494 495
		struct acpi_nfit_control_region *dcr)
{
	struct device *dev = acpi_desc->dev;
V
Vishal Verma 已提交
496 497
	struct nfit_dcr *nfit_dcr;

498 499 500
	if (!sizeof_dcr(dcr))
		return false;

V
Vishal Verma 已提交
501
	list_for_each_entry(nfit_dcr, &prev->dcrs, list)
502
		if (memcmp(nfit_dcr->dcr, dcr, sizeof_dcr(dcr)) == 0) {
V
Vishal Verma 已提交
503 504 505
			list_move_tail(&nfit_dcr->list, &acpi_desc->dcrs);
			return true;
		}
506

507 508
	nfit_dcr = devm_kzalloc(dev, sizeof(*nfit_dcr) + sizeof(*dcr),
			GFP_KERNEL);
509 510 511
	if (!nfit_dcr)
		return false;
	INIT_LIST_HEAD(&nfit_dcr->list);
512
	memcpy(nfit_dcr->dcr, dcr, sizeof_dcr(dcr));
513 514 515 516 517 518 519
	list_add_tail(&nfit_dcr->list, &acpi_desc->dcrs);
	dev_dbg(dev, "%s: dcr index: %d windows: %d\n", __func__,
			dcr->region_index, dcr->windows);
	return true;
}

static bool add_bdw(struct acpi_nfit_desc *acpi_desc,
V
Vishal Verma 已提交
520
		struct nfit_table_prev *prev,
521 522 523
		struct acpi_nfit_data_region *bdw)
{
	struct device *dev = acpi_desc->dev;
V
Vishal Verma 已提交
524 525
	struct nfit_bdw *nfit_bdw;

526 527
	if (bdw->header.length != sizeof(*bdw))
		return false;
V
Vishal Verma 已提交
528
	list_for_each_entry(nfit_bdw, &prev->bdws, list)
529
		if (memcmp(nfit_bdw->bdw, bdw, sizeof(*bdw)) == 0) {
V
Vishal Verma 已提交
530 531 532
			list_move_tail(&nfit_bdw->list, &acpi_desc->bdws);
			return true;
		}
533

534 535
	nfit_bdw = devm_kzalloc(dev, sizeof(*nfit_bdw) + sizeof(*bdw),
			GFP_KERNEL);
536 537 538
	if (!nfit_bdw)
		return false;
	INIT_LIST_HEAD(&nfit_bdw->list);
539
	memcpy(nfit_bdw->bdw, bdw, sizeof(*bdw));
540 541 542 543 544 545
	list_add_tail(&nfit_bdw->list, &acpi_desc->bdws);
	dev_dbg(dev, "%s: bdw dcr: %d windows: %d\n", __func__,
			bdw->region_index, bdw->windows);
	return true;
}

546 547 548 549 550 551 552
static size_t sizeof_idt(struct acpi_nfit_interleave *idt)
{
	if (idt->header.length < sizeof(*idt))
		return 0;
	return sizeof(*idt) + sizeof(u32) * (idt->line_count - 1);
}

553
static bool add_idt(struct acpi_nfit_desc *acpi_desc,
V
Vishal Verma 已提交
554
		struct nfit_table_prev *prev,
555 556 557
		struct acpi_nfit_interleave *idt)
{
	struct device *dev = acpi_desc->dev;
V
Vishal Verma 已提交
558 559
	struct nfit_idt *nfit_idt;

560 561 562 563 564 565 566 567
	if (!sizeof_idt(idt))
		return false;

	list_for_each_entry(nfit_idt, &prev->idts, list) {
		if (sizeof_idt(nfit_idt->idt) != sizeof_idt(idt))
			continue;

		if (memcmp(nfit_idt->idt, idt, sizeof_idt(idt)) == 0) {
V
Vishal Verma 已提交
568 569 570
			list_move_tail(&nfit_idt->list, &acpi_desc->idts);
			return true;
		}
571
	}
572

573 574
	nfit_idt = devm_kzalloc(dev, sizeof(*nfit_idt) + sizeof_idt(idt),
			GFP_KERNEL);
575 576 577
	if (!nfit_idt)
		return false;
	INIT_LIST_HEAD(&nfit_idt->list);
578
	memcpy(nfit_idt->idt, idt, sizeof_idt(idt));
579 580 581 582 583 584
	list_add_tail(&nfit_idt->list, &acpi_desc->idts);
	dev_dbg(dev, "%s: idt index: %d num_lines: %d\n", __func__,
			idt->interleave_index, idt->line_count);
	return true;
}

585 586 587 588 589 590 591
static size_t sizeof_flush(struct acpi_nfit_flush_address *flush)
{
	if (flush->header.length < sizeof(*flush))
		return 0;
	return sizeof(*flush) + sizeof(u64) * (flush->hint_count - 1);
}

592
static bool add_flush(struct acpi_nfit_desc *acpi_desc,
V
Vishal Verma 已提交
593
		struct nfit_table_prev *prev,
594 595 596
		struct acpi_nfit_flush_address *flush)
{
	struct device *dev = acpi_desc->dev;
V
Vishal Verma 已提交
597
	struct nfit_flush *nfit_flush;
598

599 600 601 602 603 604 605 606 607
	if (!sizeof_flush(flush))
		return false;

	list_for_each_entry(nfit_flush, &prev->flushes, list) {
		if (sizeof_flush(nfit_flush->flush) != sizeof_flush(flush))
			continue;

		if (memcmp(nfit_flush->flush, flush,
					sizeof_flush(flush)) == 0) {
V
Vishal Verma 已提交
608 609 610
			list_move_tail(&nfit_flush->list, &acpi_desc->flushes);
			return true;
		}
611
	}
V
Vishal Verma 已提交
612

613 614
	nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush)
			+ sizeof_flush(flush), GFP_KERNEL);
615 616 617
	if (!nfit_flush)
		return false;
	INIT_LIST_HEAD(&nfit_flush->list);
618
	memcpy(nfit_flush->flush, flush, sizeof_flush(flush));
619 620 621 622 623 624
	list_add_tail(&nfit_flush->list, &acpi_desc->flushes);
	dev_dbg(dev, "%s: nfit_flush handle: %d hint_count: %d\n", __func__,
			flush->device_handle, flush->hint_count);
	return true;
}

V
Vishal Verma 已提交
625 626
static void *add_table(struct acpi_nfit_desc *acpi_desc,
		struct nfit_table_prev *prev, void *table, const void *end)
627 628 629 630 631 632 633 634 635
{
	struct device *dev = acpi_desc->dev;
	struct acpi_nfit_header *hdr;
	void *err = ERR_PTR(-ENOMEM);

	if (table >= end)
		return NULL;

	hdr = table;
636 637 638 639 640 641
	if (!hdr->length) {
		dev_warn(dev, "found a zero length table '%d' parsing nfit\n",
			hdr->type);
		return NULL;
	}

642 643
	switch (hdr->type) {
	case ACPI_NFIT_TYPE_SYSTEM_ADDRESS:
V
Vishal Verma 已提交
644
		if (!add_spa(acpi_desc, prev, table))
645 646 647
			return err;
		break;
	case ACPI_NFIT_TYPE_MEMORY_MAP:
V
Vishal Verma 已提交
648
		if (!add_memdev(acpi_desc, prev, table))
649 650 651
			return err;
		break;
	case ACPI_NFIT_TYPE_CONTROL_REGION:
V
Vishal Verma 已提交
652
		if (!add_dcr(acpi_desc, prev, table))
653 654 655
			return err;
		break;
	case ACPI_NFIT_TYPE_DATA_REGION:
V
Vishal Verma 已提交
656
		if (!add_bdw(acpi_desc, prev, table))
657 658 659
			return err;
		break;
	case ACPI_NFIT_TYPE_INTERLEAVE:
V
Vishal Verma 已提交
660
		if (!add_idt(acpi_desc, prev, table))
661
			return err;
662 663
		break;
	case ACPI_NFIT_TYPE_FLUSH_ADDRESS:
V
Vishal Verma 已提交
664
		if (!add_flush(acpi_desc, prev, table))
665
			return err;
666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710
		break;
	case ACPI_NFIT_TYPE_SMBIOS:
		dev_dbg(dev, "%s: smbios\n", __func__);
		break;
	default:
		dev_err(dev, "unknown table '%d' parsing nfit\n", hdr->type);
		break;
	}

	return table + hdr->length;
}

static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
		struct nfit_mem *nfit_mem)
{
	u32 device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
	u16 dcr = nfit_mem->dcr->region_index;
	struct nfit_spa *nfit_spa;

	list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
		u16 range_index = nfit_spa->spa->range_index;
		int type = nfit_spa_type(nfit_spa->spa);
		struct nfit_memdev *nfit_memdev;

		if (type != NFIT_SPA_BDW)
			continue;

		list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
			if (nfit_memdev->memdev->range_index != range_index)
				continue;
			if (nfit_memdev->memdev->device_handle != device_handle)
				continue;
			if (nfit_memdev->memdev->region_index != dcr)
				continue;

			nfit_mem->spa_bdw = nfit_spa->spa;
			return;
		}
	}

	dev_dbg(acpi_desc->dev, "SPA-BDW not found for SPA-DCR %d\n",
			nfit_mem->spa_dcr->range_index);
	nfit_mem->bdw = NULL;
}

711
static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
712 713 714
		struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
{
	u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
715
	struct nfit_memdev *nfit_memdev;
716
	struct nfit_bdw *nfit_bdw;
717 718
	struct nfit_idt *nfit_idt;
	u16 idt_idx, range_index;
719 720 721 722 723 724 725 726 727

	list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
		if (nfit_bdw->bdw->region_index != dcr)
			continue;
		nfit_mem->bdw = nfit_bdw->bdw;
		break;
	}

	if (!nfit_mem->bdw)
728
		return;
729 730

	nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
731 732

	if (!nfit_mem->spa_bdw)
733
		return;
734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749

	range_index = nfit_mem->spa_bdw->range_index;
	list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
		if (nfit_memdev->memdev->range_index != range_index ||
				nfit_memdev->memdev->region_index != dcr)
			continue;
		nfit_mem->memdev_bdw = nfit_memdev->memdev;
		idt_idx = nfit_memdev->memdev->interleave_index;
		list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
			if (nfit_idt->idt->interleave_index != idt_idx)
				continue;
			nfit_mem->idt_bdw = nfit_idt->idt;
			break;
		}
		break;
	}
750 751
}

752
static int __nfit_mem_init(struct acpi_nfit_desc *acpi_desc,
753 754 755 756
		struct acpi_nfit_system_address *spa)
{
	struct nfit_mem *nfit_mem, *found;
	struct nfit_memdev *nfit_memdev;
757
	int type = spa ? nfit_spa_type(spa) : 0;
758 759 760 761 762 763

	switch (type) {
	case NFIT_SPA_DCR:
	case NFIT_SPA_PM:
		break;
	default:
764 765
		if (spa)
			return 0;
766 767
	}

768 769 770 771 772 773 774
	/*
	 * This loop runs in two modes, when a dimm is mapped the loop
	 * adds memdev associations to an existing dimm, or creates a
	 * dimm. In the unmapped dimm case this loop sweeps for memdev
	 * instances with an invalid / zero range_index and adds those
	 * dimms without spa associations.
	 */
775
	list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
776
		struct nfit_flush *nfit_flush;
777 778 779
		struct nfit_dcr *nfit_dcr;
		u32 device_handle;
		u16 dcr;
780

781 782 783
		if (spa && nfit_memdev->memdev->range_index != spa->range_index)
			continue;
		if (!spa && nfit_memdev->memdev->range_index)
784 785 786
			continue;
		found = NULL;
		dcr = nfit_memdev->memdev->region_index;
787
		device_handle = nfit_memdev->memdev->device_handle;
788
		list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
789 790
			if (__to_nfit_memdev(nfit_mem)->device_handle
					== device_handle) {
791 792 793 794 795 796 797 798 799 800 801 802
				found = nfit_mem;
				break;
			}

		if (found)
			nfit_mem = found;
		else {
			nfit_mem = devm_kzalloc(acpi_desc->dev,
					sizeof(*nfit_mem), GFP_KERNEL);
			if (!nfit_mem)
				return -ENOMEM;
			INIT_LIST_HEAD(&nfit_mem->list);
803
			nfit_mem->acpi_desc = acpi_desc;
804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824
			list_add(&nfit_mem->list, &acpi_desc->dimms);
		}

		list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
			if (nfit_dcr->dcr->region_index != dcr)
				continue;
			/*
			 * Record the control region for the dimm.  For
			 * the ACPI 6.1 case, where there are separate
			 * control regions for the pmem vs blk
			 * interfaces, be sure to record the extended
			 * blk details.
			 */
			if (!nfit_mem->dcr)
				nfit_mem->dcr = nfit_dcr->dcr;
			else if (nfit_mem->dcr->windows == 0
					&& nfit_dcr->dcr->windows)
				nfit_mem->dcr = nfit_dcr->dcr;
			break;
		}

825
		list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) {
826 827 828
			struct acpi_nfit_flush_address *flush;
			u16 i;

829 830 831
			if (nfit_flush->flush->device_handle != device_handle)
				continue;
			nfit_mem->nfit_flush = nfit_flush;
832 833 834 835 836 837 838 839 840 841 842 843
			flush = nfit_flush->flush;
			nfit_mem->flush_wpq = devm_kzalloc(acpi_desc->dev,
					flush->hint_count
					* sizeof(struct resource), GFP_KERNEL);
			if (!nfit_mem->flush_wpq)
				return -ENOMEM;
			for (i = 0; i < flush->hint_count; i++) {
				struct resource *res = &nfit_mem->flush_wpq[i];

				res->start = flush->hint_address[i];
				res->end = res->start + 8 - 1;
			}
844 845 846
			break;
		}

847 848 849 850
		if (dcr && !nfit_mem->dcr) {
			dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
					spa->range_index, dcr);
			return -ENODEV;
851 852 853
		}

		if (type == NFIT_SPA_DCR) {
854 855 856
			struct nfit_idt *nfit_idt;
			u16 idt_idx;

857 858 859
			/* multiple dimms may share a SPA when interleaved */
			nfit_mem->spa_dcr = spa;
			nfit_mem->memdev_dcr = nfit_memdev->memdev;
860 861 862 863 864 865 866
			idt_idx = nfit_memdev->memdev->interleave_index;
			list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
				if (nfit_idt->idt->interleave_index != idt_idx)
					continue;
				nfit_mem->idt_dcr = nfit_idt->idt;
				break;
			}
867
			nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
868
		} else if (type == NFIT_SPA_PM) {
869 870 871 872 873 874
			/*
			 * A single dimm may belong to multiple SPA-PM
			 * ranges, record at least one in addition to
			 * any SPA-DCR range.
			 */
			nfit_mem->memdev_pmem = nfit_memdev->memdev;
875 876
		} else
			nfit_mem->memdev_dcr = nfit_memdev->memdev;
877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899
	}

	return 0;
}

static int nfit_mem_cmp(void *priv, struct list_head *_a, struct list_head *_b)
{
	struct nfit_mem *a = container_of(_a, typeof(*a), list);
	struct nfit_mem *b = container_of(_b, typeof(*b), list);
	u32 handleA, handleB;

	handleA = __to_nfit_memdev(a)->device_handle;
	handleB = __to_nfit_memdev(b)->device_handle;
	if (handleA < handleB)
		return -1;
	else if (handleA > handleB)
		return 1;
	return 0;
}

static int nfit_mem_init(struct acpi_nfit_desc *acpi_desc)
{
	struct nfit_spa *nfit_spa;
900 901
	int rc;

902 903 904 905 906 907 908 909 910 911

	/*
	 * For each SPA-DCR or SPA-PMEM address range find its
	 * corresponding MEMDEV(s).  From each MEMDEV find the
	 * corresponding DCR.  Then, if we're operating on a SPA-DCR,
	 * try to find a SPA-BDW and a corresponding BDW that references
	 * the DCR.  Throw it all into an nfit_mem object.  Note, that
	 * BDWs are optional.
	 */
	list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
912
		rc = __nfit_mem_init(acpi_desc, nfit_spa->spa);
913 914 915 916
		if (rc)
			return rc;
	}

917 918 919 920 921 922 923 924 925
	/*
	 * If a DIMM has failed to be mapped into SPA there will be no
	 * SPA entries above. Find and register all the unmapped DIMMs
	 * for reporting and recovery purposes.
	 */
	rc = __nfit_mem_init(acpi_desc, NULL);
	if (rc)
		return rc;

926 927 928 929 930
	list_sort(NULL, &acpi_desc->dimms, nfit_mem_cmp);

	return 0;
}

931 932 933 934 935 936 937 938 939 940 941
static ssize_t bus_dsm_mask_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
	struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);

	return sprintf(buf, "%#lx\n", nd_desc->bus_dsm_mask);
}
static struct device_attribute dev_attr_bus_dsm_mask =
		__ATTR(dsm_mask, 0444, bus_dsm_mask_show, NULL);

942 943 944 945 946 947 948
static ssize_t revision_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
	struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
	struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);

949
	return sprintf(buf, "%d\n", acpi_desc->acpi_header.revision);
950 951 952
}
static DEVICE_ATTR_RO(revision);

953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004
static ssize_t hw_error_scrub_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
	struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
	struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);

	return sprintf(buf, "%d\n", acpi_desc->scrub_mode);
}

/*
 * The 'hw_error_scrub' attribute can have the following values written to it:
 * '0': Switch to the default mode where an exception will only insert
 *      the address of the memory error into the poison and badblocks lists.
 * '1': Enable a full scrub to happen if an exception for a memory error is
 *      received.
 */
static ssize_t hw_error_scrub_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t size)
{
	struct nvdimm_bus_descriptor *nd_desc;
	ssize_t rc;
	long val;

	rc = kstrtol(buf, 0, &val);
	if (rc)
		return rc;

	device_lock(dev);
	nd_desc = dev_get_drvdata(dev);
	if (nd_desc) {
		struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);

		switch (val) {
		case HW_ERROR_SCRUB_ON:
			acpi_desc->scrub_mode = HW_ERROR_SCRUB_ON;
			break;
		case HW_ERROR_SCRUB_OFF:
			acpi_desc->scrub_mode = HW_ERROR_SCRUB_OFF;
			break;
		default:
			rc = -EINVAL;
			break;
		}
	}
	device_unlock(dev);
	if (rc)
		return rc;
	return size;
}
static DEVICE_ATTR_RW(hw_error_scrub);

1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045
/*
 * This shows the number of full Address Range Scrubs that have been
 * completed since driver load time. Userspace can wait on this using
 * select/poll etc. A '+' at the end indicates an ARS is in progress
 */
static ssize_t scrub_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nvdimm_bus_descriptor *nd_desc;
	ssize_t rc = -ENXIO;

	device_lock(dev);
	nd_desc = dev_get_drvdata(dev);
	if (nd_desc) {
		struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);

		rc = sprintf(buf, "%d%s", acpi_desc->scrub_count,
				(work_busy(&acpi_desc->work)) ? "+\n" : "\n");
	}
	device_unlock(dev);
	return rc;
}

static ssize_t scrub_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t size)
{
	struct nvdimm_bus_descriptor *nd_desc;
	ssize_t rc;
	long val;

	rc = kstrtol(buf, 0, &val);
	if (rc)
		return rc;
	if (val != 1)
		return -EINVAL;

	device_lock(dev);
	nd_desc = dev_get_drvdata(dev);
	if (nd_desc) {
		struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);

1046
		rc = acpi_nfit_ars_rescan(acpi_desc, 0);
1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073
	}
	device_unlock(dev);
	if (rc)
		return rc;
	return size;
}
static DEVICE_ATTR_RW(scrub);

static bool ars_supported(struct nvdimm_bus *nvdimm_bus)
{
	struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
	const unsigned long mask = 1 << ND_CMD_ARS_CAP | 1 << ND_CMD_ARS_START
		| 1 << ND_CMD_ARS_STATUS;

	return (nd_desc->cmd_mask & mask) == mask;
}

static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n)
{
	struct device *dev = container_of(kobj, struct device, kobj);
	struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);

	if (a == &dev_attr_scrub.attr && !ars_supported(nvdimm_bus))
		return 0;
	return a->mode;
}

1074 1075
static struct attribute *acpi_nfit_attributes[] = {
	&dev_attr_revision.attr,
1076
	&dev_attr_scrub.attr,
1077
	&dev_attr_hw_error_scrub.attr,
1078
	&dev_attr_bus_dsm_mask.attr,
1079 1080 1081
	NULL,
};

1082
static const struct attribute_group acpi_nfit_attribute_group = {
1083 1084
	.name = "nfit",
	.attrs = acpi_nfit_attributes,
1085
	.is_visible = nfit_visible,
1086 1087
};

1088
static const struct attribute_group *acpi_nfit_attribute_groups[] = {
1089 1090 1091 1092 1093
	&nvdimm_bus_attribute_group,
	&acpi_nfit_attribute_group,
	NULL,
};

1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132
static struct acpi_nfit_memory_map *to_nfit_memdev(struct device *dev)
{
	struct nvdimm *nvdimm = to_nvdimm(dev);
	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);

	return __to_nfit_memdev(nfit_mem);
}

static struct acpi_nfit_control_region *to_nfit_dcr(struct device *dev)
{
	struct nvdimm *nvdimm = to_nvdimm(dev);
	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);

	return nfit_mem->dcr;
}

static ssize_t handle_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);

	return sprintf(buf, "%#x\n", memdev->device_handle);
}
static DEVICE_ATTR_RO(handle);

static ssize_t phys_id_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct acpi_nfit_memory_map *memdev = to_nfit_memdev(dev);

	return sprintf(buf, "%#x\n", memdev->physical_id);
}
static DEVICE_ATTR_RO(phys_id);

static ssize_t vendor_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);

1133
	return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->vendor_id));
1134 1135 1136 1137 1138 1139 1140 1141
}
static DEVICE_ATTR_RO(vendor);

static ssize_t rev_id_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);

1142
	return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->revision_id));
1143 1144 1145 1146 1147 1148 1149 1150
}
static DEVICE_ATTR_RO(rev_id);

static ssize_t device_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);

1151
	return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->device_id));
1152 1153 1154
}
static DEVICE_ATTR_RO(device);

1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182
static ssize_t subsystem_vendor_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);

	return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_vendor_id));
}
static DEVICE_ATTR_RO(subsystem_vendor);

static ssize_t subsystem_rev_id_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);

	return sprintf(buf, "0x%04x\n",
			be16_to_cpu(dcr->subsystem_revision_id));
}
static DEVICE_ATTR_RO(subsystem_rev_id);

static ssize_t subsystem_device_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);

	return sprintf(buf, "0x%04x\n", be16_to_cpu(dcr->subsystem_device_id));
}
static DEVICE_ATTR_RO(subsystem_device);

1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194
static int num_nvdimm_formats(struct nvdimm *nvdimm)
{
	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
	int formats = 0;

	if (nfit_mem->memdev_pmem)
		formats++;
	if (nfit_mem->memdev_bdw)
		formats++;
	return formats;
}

1195 1196 1197 1198 1199
static ssize_t format_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);

1200
	return sprintf(buf, "0x%04x\n", le16_to_cpu(dcr->code));
1201 1202 1203
}
static DEVICE_ATTR_RO(format);

1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232
static ssize_t format1_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	u32 handle;
	ssize_t rc = -ENXIO;
	struct nfit_mem *nfit_mem;
	struct nfit_memdev *nfit_memdev;
	struct acpi_nfit_desc *acpi_desc;
	struct nvdimm *nvdimm = to_nvdimm(dev);
	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);

	nfit_mem = nvdimm_provider_data(nvdimm);
	acpi_desc = nfit_mem->acpi_desc;
	handle = to_nfit_memdev(dev)->device_handle;

	/* assumes DIMMs have at most 2 published interface codes */
	mutex_lock(&acpi_desc->init_mutex);
	list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
		struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
		struct nfit_dcr *nfit_dcr;

		if (memdev->device_handle != handle)
			continue;

		list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
			if (nfit_dcr->dcr->region_index != memdev->region_index)
				continue;
			if (nfit_dcr->dcr->code == dcr->code)
				continue;
1233 1234
			rc = sprintf(buf, "0x%04x\n",
					le16_to_cpu(nfit_dcr->dcr->code));
1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253
			break;
		}
		if (rc != ENXIO)
			break;
	}
	mutex_unlock(&acpi_desc->init_mutex);
	return rc;
}
static DEVICE_ATTR_RO(format1);

static ssize_t formats_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nvdimm *nvdimm = to_nvdimm(dev);

	return sprintf(buf, "%d\n", num_nvdimm_formats(nvdimm));
}
static DEVICE_ATTR_RO(formats);

1254 1255 1256 1257 1258
static ssize_t serial_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);

1259
	return sprintf(buf, "0x%08x\n", be32_to_cpu(dcr->serial_number));
1260 1261 1262
}
static DEVICE_ATTR_RO(serial);

1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286
static ssize_t family_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nvdimm *nvdimm = to_nvdimm(dev);
	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);

	if (nfit_mem->family < 0)
		return -ENXIO;
	return sprintf(buf, "%d\n", nfit_mem->family);
}
static DEVICE_ATTR_RO(family);

static ssize_t dsm_mask_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nvdimm *nvdimm = to_nvdimm(dev);
	struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);

	if (nfit_mem->family < 0)
		return -ENXIO;
	return sprintf(buf, "%#lx\n", nfit_mem->dsm_mask);
}
static DEVICE_ATTR_RO(dsm_mask);

1287 1288 1289 1290 1291
static ssize_t flags_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	u16 flags = to_nfit_memdev(dev)->flags;

1292
	return sprintf(buf, "%s%s%s%s%s%s%s\n",
1293 1294 1295
		flags & ACPI_NFIT_MEM_SAVE_FAILED ? "save_fail " : "",
		flags & ACPI_NFIT_MEM_RESTORE_FAILED ? "restore_fail " : "",
		flags & ACPI_NFIT_MEM_FLUSH_FAILED ? "flush_fail " : "",
1296
		flags & ACPI_NFIT_MEM_NOT_ARMED ? "not_armed " : "",
1297 1298 1299
		flags & ACPI_NFIT_MEM_HEALTH_OBSERVED ? "smart_event " : "",
		flags & ACPI_NFIT_MEM_MAP_FAILED ? "map_fail " : "",
		flags & ACPI_NFIT_MEM_HEALTH_ENABLED ? "smart_notify " : "");
1300 1301 1302
}
static DEVICE_ATTR_RO(flags);

1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320
static ssize_t id_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct acpi_nfit_control_region *dcr = to_nfit_dcr(dev);

	if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID)
		return sprintf(buf, "%04x-%02x-%04x-%08x\n",
				be16_to_cpu(dcr->vendor_id),
				dcr->manufacturing_location,
				be16_to_cpu(dcr->manufacturing_date),
				be32_to_cpu(dcr->serial_number));
	else
		return sprintf(buf, "%04x-%08x\n",
				be16_to_cpu(dcr->vendor_id),
				be32_to_cpu(dcr->serial_number));
}
static DEVICE_ATTR_RO(id);

1321 1322 1323 1324 1325
static struct attribute *acpi_nfit_dimm_attributes[] = {
	&dev_attr_handle.attr,
	&dev_attr_phys_id.attr,
	&dev_attr_vendor.attr,
	&dev_attr_device.attr,
1326 1327 1328 1329
	&dev_attr_rev_id.attr,
	&dev_attr_subsystem_vendor.attr,
	&dev_attr_subsystem_device.attr,
	&dev_attr_subsystem_rev_id.attr,
1330
	&dev_attr_format.attr,
1331 1332
	&dev_attr_formats.attr,
	&dev_attr_format1.attr,
1333
	&dev_attr_serial.attr,
1334
	&dev_attr_flags.attr,
1335
	&dev_attr_id.attr,
1336 1337
	&dev_attr_family.attr,
	&dev_attr_dsm_mask.attr,
1338 1339 1340 1341 1342 1343 1344
	NULL,
};

static umode_t acpi_nfit_dimm_attr_visible(struct kobject *kobj,
		struct attribute *a, int n)
{
	struct device *dev = container_of(kobj, struct device, kobj);
1345
	struct nvdimm *nvdimm = to_nvdimm(dev);
1346

1347 1348 1349 1350 1351 1352 1353
	if (!to_nfit_dcr(dev)) {
		/* Without a dcr only the memdev attributes can be surfaced */
		if (a == &dev_attr_handle.attr || a == &dev_attr_phys_id.attr
				|| a == &dev_attr_flags.attr
				|| a == &dev_attr_family.attr
				|| a == &dev_attr_dsm_mask.attr)
			return a->mode;
1354
		return 0;
1355 1356
	}

1357
	if (a == &dev_attr_format1.attr && num_nvdimm_formats(nvdimm) <= 1)
1358
		return 0;
1359
	return a->mode;
1360 1361
}

1362
static const struct attribute_group acpi_nfit_dimm_attribute_group = {
1363 1364 1365 1366 1367 1368
	.name = "nfit",
	.attrs = acpi_nfit_dimm_attributes,
	.is_visible = acpi_nfit_dimm_attr_visible,
};

static const struct attribute_group *acpi_nfit_dimm_attribute_groups[] = {
1369
	&nvdimm_attribute_group,
1370
	&nd_device_attribute_group,
1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386
	&acpi_nfit_dimm_attribute_group,
	NULL,
};

static struct nvdimm *acpi_nfit_dimm_by_handle(struct acpi_nfit_desc *acpi_desc,
		u32 device_handle)
{
	struct nfit_mem *nfit_mem;

	list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
		if (__to_nfit_memdev(nfit_mem)->device_handle == device_handle)
			return nfit_mem->nvdimm;

	return NULL;
}

1387
void __acpi_nvdimm_notify(struct device *dev, u32 event)
1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412
{
	struct nfit_mem *nfit_mem;
	struct acpi_nfit_desc *acpi_desc;

	dev_dbg(dev->parent, "%s: %s: event: %d\n", dev_name(dev), __func__,
			event);

	if (event != NFIT_NOTIFY_DIMM_HEALTH) {
		dev_dbg(dev->parent, "%s: unknown event: %d\n", dev_name(dev),
				event);
		return;
	}

	acpi_desc = dev_get_drvdata(dev->parent);
	if (!acpi_desc)
		return;

	/*
	 * If we successfully retrieved acpi_desc, then we know nfit_mem data
	 * is still valid.
	 */
	nfit_mem = dev_get_drvdata(dev);
	if (nfit_mem && nfit_mem->flags_attr)
		sysfs_notify_dirent(nfit_mem->flags_attr);
}
1413
EXPORT_SYMBOL_GPL(__acpi_nvdimm_notify);
1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424

static void acpi_nvdimm_notify(acpi_handle handle, u32 event, void *data)
{
	struct acpi_device *adev = data;
	struct device *dev = &adev->dev;

	device_lock(dev->parent);
	__acpi_nvdimm_notify(dev, event);
	device_unlock(dev->parent);
}

1425 1426 1427 1428 1429
static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
		struct nfit_mem *nfit_mem, u32 device_handle)
{
	struct acpi_device *adev, *adev_dimm;
	struct device *dev = acpi_desc->dev;
1430
	unsigned long dsm_mask;
1431
	const guid_t *guid;
1432
	int i;
1433
	int family = -1;
1434

1435 1436
	/* nfit test assumes 1:1 relationship between commands and dsms */
	nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en;
1437
	nfit_mem->family = NVDIMM_FAMILY_INTEL;
1438 1439 1440 1441 1442 1443 1444 1445 1446
	adev = to_acpi_dev(acpi_desc);
	if (!adev)
		return 0;

	adev_dimm = acpi_find_child_device(adev, device_handle, false);
	nfit_mem->adev = adev_dimm;
	if (!adev_dimm) {
		dev_err(dev, "no ACPI.NFIT device with _ADR %#x, disabling...\n",
				device_handle);
1447
		return force_enable_dimms ? 0 : -ENODEV;
1448 1449
	}

1450 1451 1452 1453 1454 1455 1456
	if (ACPI_FAILURE(acpi_install_notify_handler(adev_dimm->handle,
		ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify, adev_dimm))) {
		dev_err(dev, "%s: notification registration failed\n",
				dev_name(&adev_dimm->dev));
		return -ENXIO;
	}

1457
	/*
1458
	 * Until standardization materializes we need to consider 4
D
Dan Williams 已提交
1459
	 * different command sets.  Note, that checking for function0 (bit0)
1460
	 * tells us if any commands are reachable through this GUID.
1461
	 */
1462
	for (i = NVDIMM_FAMILY_INTEL; i <= NVDIMM_FAMILY_MSFT; i++)
D
Dan Williams 已提交
1463
		if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1))
1464 1465
			if (family < 0 || i == default_dsm_family)
				family = i;
1466 1467

	/* limit the supported commands to those that are publicly documented */
1468
	nfit_mem->family = family;
1469 1470 1471
	if (override_dsm_mask && !disable_vendor_specific)
		dsm_mask = override_dsm_mask;
	else if (nfit_mem->family == NVDIMM_FAMILY_INTEL) {
1472
		dsm_mask = 0x3fe;
1473 1474
		if (disable_vendor_specific)
			dsm_mask &= ~(1 << ND_CMD_VENDOR);
1475
	} else if (nfit_mem->family == NVDIMM_FAMILY_HPE1) {
1476
		dsm_mask = 0x1c3c76;
1477
	} else if (nfit_mem->family == NVDIMM_FAMILY_HPE2) {
1478
		dsm_mask = 0x1fe;
1479 1480
		if (disable_vendor_specific)
			dsm_mask &= ~(1 << 8);
1481 1482
	} else if (nfit_mem->family == NVDIMM_FAMILY_MSFT) {
		dsm_mask = 0xffffffff;
1483
	} else {
D
Dan Williams 已提交
1484
		dev_dbg(dev, "unknown dimm command family\n");
1485
		nfit_mem->family = -1;
D
Dan Williams 已提交
1486 1487
		/* DSMs are optional, continue loading the driver... */
		return 0;
1488 1489
	}

1490
	guid = to_nfit_uuid(nfit_mem->family);
1491
	for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
1492
		if (acpi_check_dsm(adev_dimm->handle, guid, 1, 1ULL << i))
1493 1494
			set_bit(i, &nfit_mem->dsm_mask);

1495
	return 0;
1496 1497
}

1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508
static void shutdown_dimm_notify(void *data)
{
	struct acpi_nfit_desc *acpi_desc = data;
	struct nfit_mem *nfit_mem;

	mutex_lock(&acpi_desc->init_mutex);
	/*
	 * Clear out the nfit_mem->flags_attr and shut down dimm event
	 * notifications.
	 */
	list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
1509 1510
		struct acpi_device *adev_dimm = nfit_mem->adev;

1511 1512 1513 1514
		if (nfit_mem->flags_attr) {
			sysfs_put(nfit_mem->flags_attr);
			nfit_mem->flags_attr = NULL;
		}
1515 1516 1517
		if (adev_dimm)
			acpi_remove_notify_handler(adev_dimm->handle,
					ACPI_DEVICE_NOTIFY, acpi_nvdimm_notify);
1518 1519 1520 1521
	}
	mutex_unlock(&acpi_desc->init_mutex);
}

1522 1523 1524
static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc)
{
	struct nfit_mem *nfit_mem;
1525 1526
	int dimm_count = 0, rc;
	struct nvdimm *nvdimm;
1527 1528

	list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
1529
		struct acpi_nfit_flush_address *flush;
1530
		unsigned long flags = 0, cmd_mask;
1531
		struct nfit_memdev *nfit_memdev;
1532
		u32 device_handle;
1533
		u16 mem_flags;
1534 1535 1536 1537

		device_handle = __to_nfit_memdev(nfit_mem)->device_handle;
		nvdimm = acpi_nfit_dimm_by_handle(acpi_desc, device_handle);
		if (nvdimm) {
V
Vishal Verma 已提交
1538
			dimm_count++;
1539 1540 1541 1542
			continue;
		}

		if (nfit_mem->bdw && nfit_mem->memdev_pmem)
1543
			set_bit(NDD_ALIASING, &flags);
1544

1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555
		/* collate flags across all memdevs for this dimm */
		list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
			struct acpi_nfit_memory_map *dimm_memdev;

			dimm_memdev = __to_nfit_memdev(nfit_mem);
			if (dimm_memdev->device_handle
					!= nfit_memdev->memdev->device_handle)
				continue;
			dimm_memdev->flags |= nfit_memdev->memdev->flags;
		}

1556
		mem_flags = __to_nfit_memdev(nfit_mem)->flags;
1557
		if (mem_flags & ACPI_NFIT_MEM_NOT_ARMED)
1558
			set_bit(NDD_UNARMED, &flags);
1559

1560 1561 1562 1563
		rc = acpi_nfit_add_dimm(acpi_desc, nfit_mem, device_handle);
		if (rc)
			continue;

1564
		/*
1565 1566 1567
		 * TODO: provide translation for non-NVDIMM_FAMILY_INTEL
		 * devices (i.e. from nd_cmd to acpi_dsm) to standardize the
		 * userspace interface.
1568
		 */
1569 1570 1571 1572
		cmd_mask = 1UL << ND_CMD_CALL;
		if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
			cmd_mask |= nfit_mem->dsm_mask;

1573 1574
		flush = nfit_mem->nfit_flush ? nfit_mem->nfit_flush->flush
			: NULL;
1575
		nvdimm = nvdimm_create(acpi_desc->nvdimm_bus, nfit_mem,
1576
				acpi_nfit_dimm_attribute_groups,
1577 1578
				flags, cmd_mask, flush ? flush->hint_count : 0,
				nfit_mem->flush_wpq);
1579 1580 1581 1582
		if (!nvdimm)
			return -ENOMEM;

		nfit_mem->nvdimm = nvdimm;
1583
		dimm_count++;
1584 1585 1586 1587

		if ((mem_flags & ACPI_NFIT_MEM_FAILED_MASK) == 0)
			continue;

1588
		dev_info(acpi_desc->dev, "%s flags:%s%s%s%s%s\n",
1589
				nvdimm_name(nvdimm),
1590 1591 1592
		  mem_flags & ACPI_NFIT_MEM_SAVE_FAILED ? " save_fail" : "",
		  mem_flags & ACPI_NFIT_MEM_RESTORE_FAILED ? " restore_fail":"",
		  mem_flags & ACPI_NFIT_MEM_FLUSH_FAILED ? " flush_fail" : "",
1593 1594
		  mem_flags & ACPI_NFIT_MEM_NOT_ARMED ? " not_armed" : "",
		  mem_flags & ACPI_NFIT_MEM_MAP_FAILED ? " map_fail" : "");
1595

1596 1597
	}

1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621
	rc = nvdimm_bus_check_dimm_count(acpi_desc->nvdimm_bus, dimm_count);
	if (rc)
		return rc;

	/*
	 * Now that dimms are successfully registered, and async registration
	 * is flushed, attempt to enable event notification.
	 */
	list_for_each_entry(nfit_mem, &acpi_desc->dimms, list) {
		struct kernfs_node *nfit_kernfs;

		nvdimm = nfit_mem->nvdimm;
		nfit_kernfs = sysfs_get_dirent(nvdimm_kobj(nvdimm)->sd, "nfit");
		if (nfit_kernfs)
			nfit_mem->flags_attr = sysfs_get_dirent(nfit_kernfs,
					"flags");
		sysfs_put(nfit_kernfs);
		if (!nfit_mem->flags_attr)
			dev_warn(acpi_desc->dev, "%s: notifications disabled\n",
					nvdimm_name(nvdimm));
	}

	return devm_add_action_or_reset(acpi_desc->dev, shutdown_dimm_notify,
			acpi_desc);
1622 1623
}

1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634
/*
 * These constants are private because there are no kernel consumers of
 * these commands.
 */
enum nfit_aux_cmds {
        NFIT_CMD_TRANSLATE_SPA = 5,
        NFIT_CMD_ARS_INJECT_SET = 7,
        NFIT_CMD_ARS_INJECT_CLEAR = 8,
        NFIT_CMD_ARS_INJECT_GET = 9,
};

1635 1636 1637
static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc)
{
	struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
1638
	const guid_t *guid = to_nfit_uuid(NFIT_DEV_BUS);
1639
	struct acpi_device *adev;
1640
	unsigned long dsm_mask;
1641 1642
	int i;

1643
	nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en;
1644 1645 1646 1647
	adev = to_acpi_dev(acpi_desc);
	if (!adev)
		return;

1648
	for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++)
1649
		if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
1650
			set_bit(i, &nd_desc->cmd_mask);
1651
	set_bit(ND_CMD_CALL, &nd_desc->cmd_mask);
1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664

	dsm_mask =
		(1 << ND_CMD_ARS_CAP) |
		(1 << ND_CMD_ARS_START) |
		(1 << ND_CMD_ARS_STATUS) |
		(1 << ND_CMD_CLEAR_ERROR) |
		(1 << NFIT_CMD_TRANSLATE_SPA) |
		(1 << NFIT_CMD_ARS_INJECT_SET) |
		(1 << NFIT_CMD_ARS_INJECT_CLEAR) |
		(1 << NFIT_CMD_ARS_INJECT_GET);
	for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
		if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i))
			set_bit(i, &nd_desc->bus_dsm_mask);
1665 1666
}

1667 1668 1669 1670 1671 1672 1673 1674 1675 1676
static ssize_t range_index_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nd_region *nd_region = to_nd_region(dev);
	struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region);

	return sprintf(buf, "%d\n", nfit_spa->spa->range_index);
}
static DEVICE_ATTR_RO(range_index);

1677 1678 1679 1680 1681 1682 1683 1684 1685 1686
static ssize_t ecc_unit_size_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct nd_region *nd_region = to_nd_region(dev);
	struct nfit_spa *nfit_spa = nd_region_provider_data(nd_region);

	return sprintf(buf, "%d\n", nfit_spa->clear_err_unit);
}
static DEVICE_ATTR_RO(ecc_unit_size);

1687 1688
static struct attribute *acpi_nfit_region_attributes[] = {
	&dev_attr_range_index.attr,
1689
	&dev_attr_ecc_unit_size.attr,
1690 1691 1692
	NULL,
};

1693
static const struct attribute_group acpi_nfit_region_attribute_group = {
1694 1695 1696 1697 1698 1699 1700
	.name = "nfit",
	.attrs = acpi_nfit_region_attributes,
};

static const struct attribute_group *acpi_nfit_region_attribute_groups[] = {
	&nd_region_attribute_group,
	&nd_mapping_attribute_group,
1701
	&nd_device_attribute_group,
1702
	&nd_numa_attribute_group,
1703 1704 1705 1706
	&acpi_nfit_region_attribute_group,
	NULL,
};

1707 1708 1709 1710 1711 1712 1713 1714 1715
/* enough info to uniquely specify an interleave set */
struct nfit_set_info {
	struct nfit_set_info_map {
		u64 region_offset;
		u32 serial_number;
		u32 pad;
	} mapping[0];
};

1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726
struct nfit_set_info2 {
	struct nfit_set_info_map2 {
		u64 region_offset;
		u32 serial_number;
		u16 vendor_id;
		u16 manufacturing_date;
		u8  manufacturing_location;
		u8  reserved[31];
	} mapping[0];
};

1727 1728 1729 1730 1731 1732
static size_t sizeof_nfit_set_info(int num_mappings)
{
	return sizeof(struct nfit_set_info)
		+ num_mappings * sizeof(struct nfit_set_info_map);
}

1733 1734 1735 1736 1737 1738
static size_t sizeof_nfit_set_info2(int num_mappings)
{
	return sizeof(struct nfit_set_info2)
		+ num_mappings * sizeof(struct nfit_set_info_map2);
}

1739
static int cmp_map_compat(const void *m0, const void *m1)
1740 1741 1742 1743 1744 1745 1746 1747
{
	const struct nfit_set_info_map *map0 = m0;
	const struct nfit_set_info_map *map1 = m1;

	return memcmp(&map0->region_offset, &map1->region_offset,
			sizeof(u64));
}

1748 1749 1750 1751 1752
static int cmp_map(const void *m0, const void *m1)
{
	const struct nfit_set_info_map *map0 = m0;
	const struct nfit_set_info_map *map1 = m1;

1753 1754 1755 1756 1757
	if (map0->region_offset < map1->region_offset)
		return -1;
	else if (map0->region_offset > map1->region_offset)
		return 1;
	return 0;
1758 1759
}

1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771
static int cmp_map2(const void *m0, const void *m1)
{
	const struct nfit_set_info_map2 *map0 = m0;
	const struct nfit_set_info_map2 *map1 = m1;

	if (map0->region_offset < map1->region_offset)
		return -1;
	else if (map0->region_offset > map1->region_offset)
		return 1;
	return 0;
}

1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791
/* Retrieve the nth entry referencing this spa */
static struct acpi_nfit_memory_map *memdev_from_spa(
		struct acpi_nfit_desc *acpi_desc, u16 range_index, int n)
{
	struct nfit_memdev *nfit_memdev;

	list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list)
		if (nfit_memdev->memdev->range_index == range_index)
			if (n-- == 0)
				return nfit_memdev->memdev;
	return NULL;
}

static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc,
		struct nd_region_desc *ndr_desc,
		struct acpi_nfit_system_address *spa)
{
	struct device *dev = acpi_desc->dev;
	struct nd_interleave_set *nd_set;
	u16 nr = ndr_desc->num_mappings;
1792
	struct nfit_set_info2 *info2;
1793
	struct nfit_set_info *info;
1794
	int i;
1795

1796 1797 1798 1799 1800 1801
	nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL);
	if (!nd_set)
		return -ENOMEM;
	ndr_desc->nd_set = nd_set;
	guid_copy(&nd_set->type_guid, (guid_t *) spa->range_guid);

1802 1803 1804
	info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL);
	if (!info)
		return -ENOMEM;
1805 1806 1807 1808 1809

	info2 = devm_kzalloc(dev, sizeof_nfit_set_info2(nr), GFP_KERNEL);
	if (!info2)
		return -ENOMEM;

1810
	for (i = 0; i < nr; i++) {
1811
		struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
1812
		struct nfit_set_info_map *map = &info->mapping[i];
1813
		struct nfit_set_info_map2 *map2 = &info2->mapping[i];
1814
		struct nvdimm *nvdimm = mapping->nvdimm;
1815 1816 1817
		struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
		struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc,
				spa->range_index, i);
1818
		struct acpi_nfit_control_region *dcr = nfit_mem->dcr;
1819 1820 1821 1822 1823 1824 1825

		if (!memdev || !nfit_mem->dcr) {
			dev_err(dev, "%s: failed to find DCR\n", __func__);
			return -ENODEV;
		}

		map->region_offset = memdev->region_offset;
1826
		map->serial_number = dcr->serial_number;
1827 1828

		map2->region_offset = memdev->region_offset;
1829 1830 1831 1832
		map2->serial_number = dcr->serial_number;
		map2->vendor_id = dcr->vendor_id;
		map2->manufacturing_date = dcr->manufacturing_date;
		map2->manufacturing_location = dcr->manufacturing_location;
1833 1834
	}

1835
	/* v1.1 namespaces */
1836 1837
	sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
			cmp_map, NULL);
1838 1839 1840 1841 1842 1843
	nd_set->cookie1 = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);

	/* v1.2 namespaces */
	sort(&info2->mapping[0], nr, sizeof(struct nfit_set_info_map2),
			cmp_map2, NULL);
	nd_set->cookie2 = nd_fletcher64(info2, sizeof_nfit_set_info2(nr), 0);
1844

1845
	/* support v1.1 namespaces created with the wrong sort order */
1846 1847 1848 1849
	sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map),
			cmp_map_compat, NULL);
	nd_set->altcookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0);

1850 1851 1852 1853 1854 1855 1856 1857 1858
	/* record the result of the sort for the mapping position */
	for (i = 0; i < nr; i++) {
		struct nfit_set_info_map2 *map2 = &info2->mapping[i];
		int j;

		for (j = 0; j < nr; j++) {
			struct nd_mapping_desc *mapping = &ndr_desc->mapping[j];
			struct nvdimm *nvdimm = mapping->nvdimm;
			struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
1859
			struct acpi_nfit_control_region *dcr = nfit_mem->dcr;
1860

1861 1862 1863
			if (map2->serial_number == dcr->serial_number &&
			    map2->vendor_id == dcr->vendor_id &&
			    map2->manufacturing_date == dcr->manufacturing_date &&
1864
			    map2->manufacturing_location
1865
				    == dcr->manufacturing_location) {
1866 1867 1868 1869 1870 1871
				mapping->position = i;
				break;
			}
		}
	}

1872 1873
	ndr_desc->nd_set = nd_set;
	devm_kfree(dev, info);
1874
	devm_kfree(dev, info2);
1875 1876 1877 1878

	return 0;
}

1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893
static u64 to_interleave_offset(u64 offset, struct nfit_blk_mmio *mmio)
{
	struct acpi_nfit_interleave *idt = mmio->idt;
	u32 sub_line_offset, line_index, line_offset;
	u64 line_no, table_skip_count, table_offset;

	line_no = div_u64_rem(offset, mmio->line_size, &sub_line_offset);
	table_skip_count = div_u64_rem(line_no, mmio->num_lines, &line_index);
	line_offset = idt->line_offset[line_index]
		* mmio->line_size;
	table_offset = table_skip_count * mmio->table_size;

	return mmio->base_offset + line_offset + table_offset + sub_line_offset;
}

1894
static u32 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
1895 1896 1897
{
	struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
	u64 offset = nfit_blk->stat_offset + mmio->size * bw;
1898
	const u32 STATUS_MASK = 0x80000037;
1899 1900 1901 1902

	if (mmio->num_lines)
		offset = to_interleave_offset(offset, mmio);

1903
	return readl(mmio->addr.base + offset) & STATUS_MASK;
1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927
}

static void write_blk_ctl(struct nfit_blk *nfit_blk, unsigned int bw,
		resource_size_t dpa, unsigned int len, unsigned int write)
{
	u64 cmd, offset;
	struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];

	enum {
		BCW_OFFSET_MASK = (1ULL << 48)-1,
		BCW_LEN_SHIFT = 48,
		BCW_LEN_MASK = (1ULL << 8) - 1,
		BCW_CMD_SHIFT = 56,
	};

	cmd = (dpa >> L1_CACHE_SHIFT) & BCW_OFFSET_MASK;
	len = len >> L1_CACHE_SHIFT;
	cmd |= ((u64) len & BCW_LEN_MASK) << BCW_LEN_SHIFT;
	cmd |= ((u64) write) << BCW_CMD_SHIFT;

	offset = nfit_blk->cmd_offset + mmio->size * bw;
	if (mmio->num_lines)
		offset = to_interleave_offset(offset, mmio);

1928
	writeq(cmd, mmio->addr.base + offset);
1929
	nvdimm_flush(nfit_blk->nd_region);
1930

1931
	if (nfit_blk->dimm_flags & NFIT_BLK_DCR_LATCH)
1932
		readq(mmio->addr.base + offset);
1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963
}

static int acpi_nfit_blk_single_io(struct nfit_blk *nfit_blk,
		resource_size_t dpa, void *iobuf, size_t len, int rw,
		unsigned int lane)
{
	struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
	unsigned int copied = 0;
	u64 base_offset;
	int rc;

	base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES
		+ lane * mmio->size;
	write_blk_ctl(nfit_blk, lane, dpa, len, rw);
	while (len) {
		unsigned int c;
		u64 offset;

		if (mmio->num_lines) {
			u32 line_offset;

			offset = to_interleave_offset(base_offset + copied,
					mmio);
			div_u64_rem(offset, mmio->line_size, &line_offset);
			c = min_t(size_t, len, mmio->line_size - line_offset);
		} else {
			offset = base_offset + nfit_blk->bdw_offset;
			c = len;
		}

		if (rw)
1964
			memcpy_flushcache(mmio->addr.aperture + offset, iobuf + copied, c);
1965
		else {
1966
			if (nfit_blk->dimm_flags & NFIT_BLK_READ_FLUSH)
1967
				arch_invalidate_pmem((void __force *)
1968 1969
					mmio->addr.aperture + offset, c);

1970
			memcpy(iobuf + copied, mmio->addr.aperture + offset, c);
1971
		}
1972 1973 1974 1975

		copied += c;
		len -= c;
	}
1976 1977

	if (rw)
1978
		nvdimm_flush(nfit_blk->nd_region);
1979

1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024
	rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0;
	return rc;
}

static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr,
		resource_size_t dpa, void *iobuf, u64 len, int rw)
{
	struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr);
	struct nfit_blk_mmio *mmio = &nfit_blk->mmio[BDW];
	struct nd_region *nd_region = nfit_blk->nd_region;
	unsigned int lane, copied = 0;
	int rc = 0;

	lane = nd_region_acquire_lane(nd_region);
	while (len) {
		u64 c = min(len, mmio->size);

		rc = acpi_nfit_blk_single_io(nfit_blk, dpa + copied,
				iobuf + copied, c, rw, lane);
		if (rc)
			break;

		copied += c;
		len -= c;
	}
	nd_region_release_lane(nd_region, lane);

	return rc;
}

static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio,
		struct acpi_nfit_interleave *idt, u16 interleave_ways)
{
	if (idt) {
		mmio->num_lines = idt->line_count;
		mmio->line_size = idt->line_size;
		if (interleave_ways == 0)
			return -ENXIO;
		mmio->table_size = mmio->num_lines * interleave_ways
			* mmio->line_size;
	}

	return 0;
}

2025 2026 2027 2028 2029 2030 2031 2032
static int acpi_nfit_blk_get_flags(struct nvdimm_bus_descriptor *nd_desc,
		struct nvdimm *nvdimm, struct nfit_blk *nfit_blk)
{
	struct nd_cmd_dimm_flags flags;
	int rc;

	memset(&flags, 0, sizeof(flags));
	rc = nd_desc->ndctl(nd_desc, nvdimm, ND_CMD_DIMM_FLAGS, &flags,
2033
			sizeof(flags), NULL);
2034 2035 2036 2037 2038

	if (rc >= 0 && flags.status == 0)
		nfit_blk->dimm_flags = flags.flags;
	else if (rc == -ENOTTY) {
		/* fall back to a conservative default */
2039
		nfit_blk->dimm_flags = NFIT_BLK_DCR_LATCH | NFIT_BLK_READ_FLUSH;
2040 2041 2042 2043 2044 2045 2046
		rc = 0;
	} else
		rc = -ENXIO;

	return rc;
}

2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062
static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
		struct device *dev)
{
	struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
	struct nd_blk_region *ndbr = to_nd_blk_region(dev);
	struct nfit_blk_mmio *mmio;
	struct nfit_blk *nfit_blk;
	struct nfit_mem *nfit_mem;
	struct nvdimm *nvdimm;
	int rc;

	nvdimm = nd_blk_region_to_dimm(ndbr);
	nfit_mem = nvdimm_provider_data(nvdimm);
	if (!nfit_mem || !nfit_mem->dcr || !nfit_mem->bdw) {
		dev_dbg(dev, "%s: missing%s%s%s\n", __func__,
				nfit_mem ? "" : " nfit_mem",
2063 2064
				(nfit_mem && nfit_mem->dcr) ? "" : " dcr",
				(nfit_mem && nfit_mem->bdw) ? "" : " bdw");
2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076
		return -ENXIO;
	}

	nfit_blk = devm_kzalloc(dev, sizeof(*nfit_blk), GFP_KERNEL);
	if (!nfit_blk)
		return -ENOMEM;
	nd_blk_region_set_provider_data(ndbr, nfit_blk);
	nfit_blk->nd_region = to_nd_region(dev);

	/* map block aperture memory */
	nfit_blk->bdw_offset = nfit_mem->bdw->offset;
	mmio = &nfit_blk->mmio[BDW];
2077
	mmio->addr.base = devm_nvdimm_memremap(dev, nfit_mem->spa_bdw->address,
2078
                        nfit_mem->spa_bdw->length, nd_blk_memremap_flags(ndbr));
2079
	if (!mmio->addr.base) {
2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099
		dev_dbg(dev, "%s: %s failed to map bdw\n", __func__,
				nvdimm_name(nvdimm));
		return -ENOMEM;
	}
	mmio->size = nfit_mem->bdw->size;
	mmio->base_offset = nfit_mem->memdev_bdw->region_offset;
	mmio->idt = nfit_mem->idt_bdw;
	mmio->spa = nfit_mem->spa_bdw;
	rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_bdw,
			nfit_mem->memdev_bdw->interleave_ways);
	if (rc) {
		dev_dbg(dev, "%s: %s failed to init bdw interleave\n",
				__func__, nvdimm_name(nvdimm));
		return rc;
	}

	/* map block control memory */
	nfit_blk->cmd_offset = nfit_mem->dcr->command_offset;
	nfit_blk->stat_offset = nfit_mem->dcr->status_offset;
	mmio = &nfit_blk->mmio[DCR];
2100 2101
	mmio->addr.base = devm_nvdimm_ioremap(dev, nfit_mem->spa_dcr->address,
			nfit_mem->spa_dcr->length);
2102
	if (!mmio->addr.base) {
2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118
		dev_dbg(dev, "%s: %s failed to map dcr\n", __func__,
				nvdimm_name(nvdimm));
		return -ENOMEM;
	}
	mmio->size = nfit_mem->dcr->window_size;
	mmio->base_offset = nfit_mem->memdev_dcr->region_offset;
	mmio->idt = nfit_mem->idt_dcr;
	mmio->spa = nfit_mem->spa_dcr;
	rc = nfit_blk_init_interleave(mmio, nfit_mem->idt_dcr,
			nfit_mem->memdev_dcr->interleave_ways);
	if (rc) {
		dev_dbg(dev, "%s: %s failed to init dcr interleave\n",
				__func__, nvdimm_name(nvdimm));
		return rc;
	}

2119 2120 2121 2122 2123 2124 2125
	rc = acpi_nfit_blk_get_flags(nd_desc, nvdimm, nfit_blk);
	if (rc < 0) {
		dev_dbg(dev, "%s: %s failed get DIMM flags\n",
				__func__, nvdimm_name(nvdimm));
		return rc;
	}

2126
	if (nvdimm_has_flush(nfit_blk->nd_region) < 0)
2127 2128
		dev_warn(dev, "unable to guarantee persistence of writes\n");

2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144
	if (mmio->line_size == 0)
		return 0;

	if ((u32) nfit_blk->cmd_offset % mmio->line_size
			+ 8 > mmio->line_size) {
		dev_dbg(dev, "cmd_offset crosses interleave boundary\n");
		return -ENXIO;
	} else if ((u32) nfit_blk->stat_offset % mmio->line_size
			+ 8 > mmio->line_size) {
		dev_dbg(dev, "stat_offset crosses interleave boundary\n");
		return -ENXIO;
	}

	return 0;
}

2145
static int ars_get_cap(struct acpi_nfit_desc *acpi_desc,
2146
		struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa)
2147
{
2148
	struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2149
	struct acpi_nfit_system_address *spa = nfit_spa->spa;
2150 2151
	int cmd_rc, rc;

2152 2153
	cmd->address = spa->address;
	cmd->length = spa->length;
2154 2155 2156 2157
	rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, cmd,
			sizeof(*cmd), &cmd_rc);
	if (rc < 0)
		return rc;
2158
	return cmd_rc;
2159 2160
}

2161
static int ars_start(struct acpi_nfit_desc *acpi_desc, struct nfit_spa *nfit_spa)
2162 2163
{
	int rc;
2164 2165 2166 2167
	int cmd_rc;
	struct nd_cmd_ars_start ars_start;
	struct acpi_nfit_system_address *spa = nfit_spa->spa;
	struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
2168

2169 2170 2171
	memset(&ars_start, 0, sizeof(ars_start));
	ars_start.address = spa->address;
	ars_start.length = spa->length;
2172
	ars_start.flags = acpi_desc->ars_start_flags;
2173 2174 2175 2176 2177 2178
	if (nfit_spa_type(spa) == NFIT_SPA_PM)
		ars_start.type = ND_ARS_PERSISTENT;
	else if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE)
		ars_start.type = ND_ARS_VOLATILE;
	else
		return -ENOTTY;
2179

2180 2181
	rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
			sizeof(ars_start), &cmd_rc);
2182

2183 2184 2185
	if (rc < 0)
		return rc;
	return cmd_rc;
2186 2187
}

2188
static int ars_continue(struct acpi_nfit_desc *acpi_desc)
2189
{
2190
	int rc, cmd_rc;
2191 2192 2193 2194 2195 2196 2197 2198
	struct nd_cmd_ars_start ars_start;
	struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
	struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;

	memset(&ars_start, 0, sizeof(ars_start));
	ars_start.address = ars_status->restart_address;
	ars_start.length = ars_status->restart_length;
	ars_start.type = ars_status->type;
2199
	ars_start.flags = acpi_desc->ars_start_flags;
2200 2201 2202 2203 2204 2205
	rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_START, &ars_start,
			sizeof(ars_start), &cmd_rc);
	if (rc < 0)
		return rc;
	return cmd_rc;
}
2206

2207 2208 2209 2210 2211
static int ars_get_status(struct acpi_nfit_desc *acpi_desc)
{
	struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc;
	struct nd_cmd_ars_status *ars_status = acpi_desc->ars_status;
	int rc, cmd_rc;
2212

2213 2214 2215 2216 2217
	rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, ars_status,
			acpi_desc->ars_status_size, &cmd_rc);
	if (rc < 0)
		return rc;
	return cmd_rc;
2218 2219
}

2220
static int ars_status_process_records(struct acpi_nfit_desc *acpi_desc,
2221
		struct nd_cmd_ars_status *ars_status)
2222
{
2223
	struct nvdimm_bus *nvdimm_bus = acpi_desc->nvdimm_bus;
2224 2225 2226
	int rc;
	u32 i;

2227 2228 2229 2230 2231 2232
	/*
	 * First record starts at 44 byte offset from the start of the
	 * payload.
	 */
	if (ars_status->out_length < 44)
		return 0;
2233
	for (i = 0; i < ars_status->num_records; i++) {
2234 2235 2236 2237
		/* only process full records */
		if (ars_status->out_length
				< 44 + sizeof(struct nd_ars_record) * (i + 1))
			break;
2238 2239 2240 2241 2242 2243
		rc = nvdimm_bus_add_poison(nvdimm_bus,
				ars_status->records[i].err_address,
				ars_status->records[i].length);
		if (rc)
			return rc;
	}
2244 2245
	if (i < ars_status->num_records)
		dev_warn(acpi_desc->dev, "detected truncated ars results\n");
2246 2247 2248 2249

	return 0;
}

2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282
static void acpi_nfit_remove_resource(void *data)
{
	struct resource *res = data;

	remove_resource(res);
}

static int acpi_nfit_insert_resource(struct acpi_nfit_desc *acpi_desc,
		struct nd_region_desc *ndr_desc)
{
	struct resource *res, *nd_res = ndr_desc->res;
	int is_pmem, ret;

	/* No operation if the region is already registered as PMEM */
	is_pmem = region_intersects(nd_res->start, resource_size(nd_res),
				IORESOURCE_MEM, IORES_DESC_PERSISTENT_MEMORY);
	if (is_pmem == REGION_INTERSECTS)
		return 0;

	res = devm_kzalloc(acpi_desc->dev, sizeof(*res), GFP_KERNEL);
	if (!res)
		return -ENOMEM;

	res->name = "Persistent Memory";
	res->start = nd_res->start;
	res->end = nd_res->end;
	res->flags = IORESOURCE_MEM;
	res->desc = IORES_DESC_PERSISTENT_MEMORY;

	ret = insert_resource(&iomem_resource, res);
	if (ret)
		return ret;

2283 2284 2285 2286
	ret = devm_add_action_or_reset(acpi_desc->dev,
					acpi_nfit_remove_resource,
					res);
	if (ret)
2287 2288 2289 2290 2291
		return ret;

	return 0;
}

2292
static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
2293
		struct nd_mapping_desc *mapping, struct nd_region_desc *ndr_desc,
2294
		struct acpi_nfit_memory_map *memdev,
2295
		struct nfit_spa *nfit_spa)
2296 2297 2298
{
	struct nvdimm *nvdimm = acpi_nfit_dimm_by_handle(acpi_desc,
			memdev->device_handle);
2299
	struct acpi_nfit_system_address *spa = nfit_spa->spa;
2300
	struct nd_blk_region_desc *ndbr_desc;
2301
	struct nfit_mem *nfit_mem;
2302
	int blk_valid = 0, rc;
2303 2304 2305 2306 2307 2308 2309

	if (!nvdimm) {
		dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n",
				spa->range_index, memdev->device_handle);
		return -ENODEV;
	}

2310
	mapping->nvdimm = nvdimm;
2311 2312 2313
	switch (nfit_spa_type(spa)) {
	case NFIT_SPA_PM:
	case NFIT_SPA_VOLATILE:
2314 2315
		mapping->start = memdev->address;
		mapping->size = memdev->region_size;
2316 2317 2318 2319 2320 2321 2322
		break;
	case NFIT_SPA_DCR:
		nfit_mem = nvdimm_provider_data(nvdimm);
		if (!nfit_mem || !nfit_mem->bdw) {
			dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n",
					spa->range_index, nvdimm_name(nvdimm));
		} else {
2323 2324
			mapping->size = nfit_mem->bdw->capacity;
			mapping->start = nfit_mem->bdw->start_address;
V
Vishal Verma 已提交
2325
			ndr_desc->num_lanes = nfit_mem->bdw->windows;
2326 2327 2328
			blk_valid = 1;
		}

2329
		ndr_desc->mapping = mapping;
2330
		ndr_desc->num_mappings = blk_valid;
2331 2332
		ndbr_desc = to_blk_region_desc(ndr_desc);
		ndbr_desc->enable = acpi_nfit_blk_region_enable;
2333
		ndbr_desc->do_io = acpi_desc->blk_do_io;
2334 2335 2336
		rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
		if (rc)
			return rc;
2337 2338 2339
		nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus,
				ndr_desc);
		if (!nfit_spa->nd_region)
2340 2341 2342 2343 2344 2345 2346
			return -ENOMEM;
		break;
	}

	return 0;
}

2347 2348 2349 2350 2351 2352 2353 2354
static bool nfit_spa_is_virtual(struct acpi_nfit_system_address *spa)
{
	return (nfit_spa_type(spa) == NFIT_SPA_VDISK ||
		nfit_spa_type(spa) == NFIT_SPA_VCD   ||
		nfit_spa_type(spa) == NFIT_SPA_PDISK ||
		nfit_spa_type(spa) == NFIT_SPA_PCD);
}

2355 2356 2357 2358 2359 2360 2361
static bool nfit_spa_is_volatile(struct acpi_nfit_system_address *spa)
{
	return (nfit_spa_type(spa) == NFIT_SPA_VDISK ||
		nfit_spa_type(spa) == NFIT_SPA_VCD   ||
		nfit_spa_type(spa) == NFIT_SPA_VOLATILE);
}

2362 2363 2364
static int acpi_nfit_register_region(struct acpi_nfit_desc *acpi_desc,
		struct nfit_spa *nfit_spa)
{
2365
	static struct nd_mapping_desc mappings[ND_MAX_MAPPINGS];
2366
	struct acpi_nfit_system_address *spa = nfit_spa->spa;
2367 2368
	struct nd_blk_region_desc ndbr_desc;
	struct nd_region_desc *ndr_desc;
2369 2370 2371
	struct nfit_memdev *nfit_memdev;
	struct nvdimm_bus *nvdimm_bus;
	struct resource res;
2372
	int count = 0, rc;
2373

2374
	if (nfit_spa->nd_region)
V
Vishal Verma 已提交
2375 2376
		return 0;

2377
	if (spa->range_index == 0 && !nfit_spa_is_virtual(spa)) {
2378 2379 2380 2381 2382 2383
		dev_dbg(acpi_desc->dev, "%s: detected invalid spa index\n",
				__func__);
		return 0;
	}

	memset(&res, 0, sizeof(res));
2384
	memset(&mappings, 0, sizeof(mappings));
2385
	memset(&ndbr_desc, 0, sizeof(ndbr_desc));
2386 2387
	res.start = spa->address;
	res.end = res.start + spa->length - 1;
2388 2389 2390 2391
	ndr_desc = &ndbr_desc.ndr_desc;
	ndr_desc->res = &res;
	ndr_desc->provider_data = nfit_spa;
	ndr_desc->attr_groups = acpi_nfit_region_attribute_groups;
2392 2393 2394 2395 2396 2397
	if (spa->flags & ACPI_NFIT_PROXIMITY_VALID)
		ndr_desc->numa_node = acpi_map_pxm_to_online_node(
						spa->proximity_domain);
	else
		ndr_desc->numa_node = NUMA_NO_NODE;

2398 2399
	list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
		struct acpi_nfit_memory_map *memdev = nfit_memdev->memdev;
2400
		struct nd_mapping_desc *mapping;
2401 2402 2403 2404 2405 2406 2407 2408

		if (memdev->range_index != spa->range_index)
			continue;
		if (count >= ND_MAX_MAPPINGS) {
			dev_err(acpi_desc->dev, "spa%d exceeds max mappings %d\n",
					spa->range_index, ND_MAX_MAPPINGS);
			return -ENXIO;
		}
2409 2410
		mapping = &mappings[count++];
		rc = acpi_nfit_init_mapping(acpi_desc, mapping, ndr_desc,
2411
				memdev, nfit_spa);
2412
		if (rc)
2413
			goto out;
2414 2415
	}

2416
	ndr_desc->mapping = mappings;
2417 2418
	ndr_desc->num_mappings = count;
	rc = acpi_nfit_init_interleave_set(acpi_desc, ndr_desc, spa);
2419
	if (rc)
2420
		goto out;
2421

2422 2423
	nvdimm_bus = acpi_desc->nvdimm_bus;
	if (nfit_spa_type(spa) == NFIT_SPA_PM) {
2424
		rc = acpi_nfit_insert_resource(acpi_desc, ndr_desc);
2425
		if (rc) {
2426 2427 2428
			dev_warn(acpi_desc->dev,
				"failed to insert pmem resource to iomem: %d\n",
				rc);
2429
			goto out;
2430
		}
2431

2432 2433 2434 2435
		nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
				ndr_desc);
		if (!nfit_spa->nd_region)
			rc = -ENOMEM;
2436
	} else if (nfit_spa_is_volatile(spa)) {
2437 2438 2439 2440
		nfit_spa->nd_region = nvdimm_volatile_region_create(nvdimm_bus,
				ndr_desc);
		if (!nfit_spa->nd_region)
			rc = -ENOMEM;
2441 2442 2443 2444 2445
	} else if (nfit_spa_is_virtual(spa)) {
		nfit_spa->nd_region = nvdimm_pmem_region_create(nvdimm_bus,
				ndr_desc);
		if (!nfit_spa->nd_region)
			rc = -ENOMEM;
2446
	}
V
Vishal Verma 已提交
2447

2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473
 out:
	if (rc)
		dev_err(acpi_desc->dev, "failed to register spa range %d\n",
				nfit_spa->spa->range_index);
	return rc;
}

static int ars_status_alloc(struct acpi_nfit_desc *acpi_desc,
		u32 max_ars)
{
	struct device *dev = acpi_desc->dev;
	struct nd_cmd_ars_status *ars_status;

	if (acpi_desc->ars_status && acpi_desc->ars_status_size >= max_ars) {
		memset(acpi_desc->ars_status, 0, acpi_desc->ars_status_size);
		return 0;
	}

	if (acpi_desc->ars_status)
		devm_kfree(dev, acpi_desc->ars_status);
	acpi_desc->ars_status = NULL;
	ars_status = devm_kzalloc(dev, max_ars, GFP_KERNEL);
	if (!ars_status)
		return -ENOMEM;
	acpi_desc->ars_status = ars_status;
	acpi_desc->ars_status_size = max_ars;
2474 2475 2476
	return 0;
}

2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507
static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc,
		struct nfit_spa *nfit_spa)
{
	struct acpi_nfit_system_address *spa = nfit_spa->spa;
	int rc;

	if (!nfit_spa->max_ars) {
		struct nd_cmd_ars_cap ars_cap;

		memset(&ars_cap, 0, sizeof(ars_cap));
		rc = ars_get_cap(acpi_desc, &ars_cap, nfit_spa);
		if (rc < 0)
			return rc;
		nfit_spa->max_ars = ars_cap.max_ars_out;
		nfit_spa->clear_err_unit = ars_cap.clear_err_unit;
		/* check that the supported scrub types match the spa type */
		if (nfit_spa_type(spa) == NFIT_SPA_VOLATILE &&
				((ars_cap.status >> 16) & ND_ARS_VOLATILE) == 0)
			return -ENOTTY;
		else if (nfit_spa_type(spa) == NFIT_SPA_PM &&
				((ars_cap.status >> 16) & ND_ARS_PERSISTENT) == 0)
			return -ENOTTY;
	}

	if (ars_status_alloc(acpi_desc, nfit_spa->max_ars))
		return -ENOMEM;

	rc = ars_get_status(acpi_desc);
	if (rc < 0 && rc != -ENOSPC)
		return rc;

2508
	if (ars_status_process_records(acpi_desc, acpi_desc->ars_status))
2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523
		return -ENOMEM;

	return 0;
}

static void acpi_nfit_async_scrub(struct acpi_nfit_desc *acpi_desc,
		struct nfit_spa *nfit_spa)
{
	struct acpi_nfit_system_address *spa = nfit_spa->spa;
	unsigned int overflow_retry = scrub_overflow_abort;
	u64 init_ars_start = 0, init_ars_len = 0;
	struct device *dev = acpi_desc->dev;
	unsigned int tmo = scrub_timeout;
	int rc;

2524
	if (!nfit_spa->ars_required || !nfit_spa->nd_region)
2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594
		return;

	rc = ars_start(acpi_desc, nfit_spa);
	/*
	 * If we timed out the initial scan we'll still be busy here,
	 * and will wait another timeout before giving up permanently.
	 */
	if (rc < 0 && rc != -EBUSY)
		return;

	do {
		u64 ars_start, ars_len;

		if (acpi_desc->cancel)
			break;
		rc = acpi_nfit_query_poison(acpi_desc, nfit_spa);
		if (rc == -ENOTTY)
			break;
		if (rc == -EBUSY && !tmo) {
			dev_warn(dev, "range %d ars timeout, aborting\n",
					spa->range_index);
			break;
		}

		if (rc == -EBUSY) {
			/*
			 * Note, entries may be appended to the list
			 * while the lock is dropped, but the workqueue
			 * being active prevents entries being deleted /
			 * freed.
			 */
			mutex_unlock(&acpi_desc->init_mutex);
			ssleep(1);
			tmo--;
			mutex_lock(&acpi_desc->init_mutex);
			continue;
		}

		/* we got some results, but there are more pending... */
		if (rc == -ENOSPC && overflow_retry--) {
			if (!init_ars_len) {
				init_ars_len = acpi_desc->ars_status->length;
				init_ars_start = acpi_desc->ars_status->address;
			}
			rc = ars_continue(acpi_desc);
		}

		if (rc < 0) {
			dev_warn(dev, "range %d ars continuation failed\n",
					spa->range_index);
			break;
		}

		if (init_ars_len) {
			ars_start = init_ars_start;
			ars_len = init_ars_len;
		} else {
			ars_start = acpi_desc->ars_status->address;
			ars_len = acpi_desc->ars_status->length;
		}
		dev_dbg(dev, "spa range: %d ars from %#llx + %#llx complete\n",
				spa->range_index, ars_start, ars_len);
		/* notify the region about new poison entries */
		nvdimm_region_notify(nfit_spa->nd_region,
				NVDIMM_REVALIDATE_POISON);
		break;
	} while (1);
}

static void acpi_nfit_scrub(struct work_struct *work)
2595
{
2596 2597
	struct device *dev;
	u64 init_scrub_length = 0;
2598
	struct nfit_spa *nfit_spa;
2599 2600 2601 2602 2603 2604 2605 2606
	u64 init_scrub_address = 0;
	bool init_ars_done = false;
	struct acpi_nfit_desc *acpi_desc;
	unsigned int tmo = scrub_timeout;
	unsigned int overflow_retry = scrub_overflow_abort;

	acpi_desc = container_of(work, typeof(*acpi_desc), work);
	dev = acpi_desc->dev;
2607

2608 2609 2610 2611 2612
	/*
	 * We scrub in 2 phases.  The first phase waits for any platform
	 * firmware initiated scrubs to complete and then we go search for the
	 * affected spa regions to mark them scanned.  In the second phase we
	 * initiate a directed scrub for every range that was not scrubbed in
2613 2614 2615
	 * phase 1. If we're called for a 'rescan', we harmlessly pass through
	 * the first phase, but really only care about running phase 2, where
	 * regions can be notified of new poison.
2616 2617 2618 2619 2620
	 */

	/* process platform firmware initiated scrubs */
 retry:
	mutex_lock(&acpi_desc->init_mutex);
2621
	list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
2622 2623 2624 2625
		struct nd_cmd_ars_status *ars_status;
		struct acpi_nfit_system_address *spa;
		u64 ars_start, ars_len;
		int rc;
2626

2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705
		if (acpi_desc->cancel)
			break;

		if (nfit_spa->nd_region)
			continue;

		if (init_ars_done) {
			/*
			 * No need to re-query, we're now just
			 * reconciling all the ranges covered by the
			 * initial scrub
			 */
			rc = 0;
		} else
			rc = acpi_nfit_query_poison(acpi_desc, nfit_spa);

		if (rc == -ENOTTY) {
			/* no ars capability, just register spa and move on */
			acpi_nfit_register_region(acpi_desc, nfit_spa);
			continue;
		}

		if (rc == -EBUSY && !tmo) {
			/* fallthrough to directed scrub in phase 2 */
			dev_warn(dev, "timeout awaiting ars results, continuing...\n");
			break;
		} else if (rc == -EBUSY) {
			mutex_unlock(&acpi_desc->init_mutex);
			ssleep(1);
			tmo--;
			goto retry;
		}

		/* we got some results, but there are more pending... */
		if (rc == -ENOSPC && overflow_retry--) {
			ars_status = acpi_desc->ars_status;
			/*
			 * Record the original scrub range, so that we
			 * can recall all the ranges impacted by the
			 * initial scrub.
			 */
			if (!init_scrub_length) {
				init_scrub_length = ars_status->length;
				init_scrub_address = ars_status->address;
			}
			rc = ars_continue(acpi_desc);
			if (rc == 0) {
				mutex_unlock(&acpi_desc->init_mutex);
				goto retry;
			}
		}

		if (rc < 0) {
			/*
			 * Initial scrub failed, we'll give it one more
			 * try below...
			 */
			break;
		}

		/* We got some final results, record completed ranges */
		ars_status = acpi_desc->ars_status;
		if (init_scrub_length) {
			ars_start = init_scrub_address;
			ars_len = ars_start + init_scrub_length;
		} else {
			ars_start = ars_status->address;
			ars_len = ars_status->length;
		}
		spa = nfit_spa->spa;

		if (!init_ars_done) {
			init_ars_done = true;
			dev_dbg(dev, "init scrub %#llx + %#llx complete\n",
					ars_start, ars_len);
		}
		if (ars_start <= spa->address && ars_start + ars_len
				>= spa->address + spa->length)
			acpi_nfit_register_region(acpi_desc, nfit_spa);
2706
	}
2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717

	/*
	 * For all the ranges not covered by an initial scrub we still
	 * want to see if there are errors, but it's ok to discover them
	 * asynchronously.
	 */
	list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
		/*
		 * Flag all the ranges that still need scrubbing, but
		 * register them now to make data available.
		 */
2718 2719
		if (!nfit_spa->nd_region) {
			nfit_spa->ars_required = 1;
2720
			acpi_nfit_register_region(acpi_desc, nfit_spa);
2721
		}
2722
	}
2723
	acpi_desc->init_complete = 1;
2724 2725 2726

	list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
		acpi_nfit_async_scrub(acpi_desc, nfit_spa);
2727
	acpi_desc->scrub_count++;
2728
	acpi_desc->ars_start_flags = 0;
2729 2730
	if (acpi_desc->scrub_count_state)
		sysfs_notify_dirent(acpi_desc->scrub_count_state);
2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746
	mutex_unlock(&acpi_desc->init_mutex);
}

static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
{
	struct nfit_spa *nfit_spa;
	int rc;

	list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
		if (nfit_spa_type(nfit_spa->spa) == NFIT_SPA_DCR) {
			/* BLK regions don't need to wait for ars results */
			rc = acpi_nfit_register_region(acpi_desc, nfit_spa);
			if (rc)
				return rc;
		}

2747
	acpi_desc->ars_start_flags = 0;
2748 2749
	if (!acpi_desc->cancel)
		queue_work(nfit_wq, &acpi_desc->work);
2750 2751 2752
	return 0;
}

V
Vishal Verma 已提交
2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769
static int acpi_nfit_check_deletions(struct acpi_nfit_desc *acpi_desc,
		struct nfit_table_prev *prev)
{
	struct device *dev = acpi_desc->dev;

	if (!list_empty(&prev->spas) ||
			!list_empty(&prev->memdevs) ||
			!list_empty(&prev->dcrs) ||
			!list_empty(&prev->bdws) ||
			!list_empty(&prev->idts) ||
			!list_empty(&prev->flushes)) {
		dev_err(dev, "new nfit deletes entries (unsupported)\n");
		return -ENXIO;
	}
	return 0;
}

2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794
static int acpi_nfit_desc_init_scrub_attr(struct acpi_nfit_desc *acpi_desc)
{
	struct device *dev = acpi_desc->dev;
	struct kernfs_node *nfit;
	struct device *bus_dev;

	if (!ars_supported(acpi_desc->nvdimm_bus))
		return 0;

	bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus);
	nfit = sysfs_get_dirent(bus_dev->kobj.sd, "nfit");
	if (!nfit) {
		dev_err(dev, "sysfs_get_dirent 'nfit' failed\n");
		return -ENODEV;
	}
	acpi_desc->scrub_count_state = sysfs_get_dirent(nfit, "scrub");
	sysfs_put(nfit);
	if (!acpi_desc->scrub_count_state) {
		dev_err(dev, "sysfs_get_dirent 'scrub' failed\n");
		return -ENODEV;
	}

	return 0;
}

2795
static void acpi_nfit_unregister(void *data)
2796 2797 2798 2799 2800 2801
{
	struct acpi_nfit_desc *acpi_desc = data;

	nvdimm_bus_unregister(acpi_desc->nvdimm_bus);
}

2802
int acpi_nfit_init(struct acpi_nfit_desc *acpi_desc, void *data, acpi_size sz)
2803 2804
{
	struct device *dev = acpi_desc->dev;
V
Vishal Verma 已提交
2805
	struct nfit_table_prev prev;
2806
	const void *end;
2807
	int rc;
2808

2809
	if (!acpi_desc->nvdimm_bus) {
2810 2811
		acpi_nfit_init_dsms(acpi_desc);

2812 2813 2814 2815
		acpi_desc->nvdimm_bus = nvdimm_bus_register(dev,
				&acpi_desc->nd_desc);
		if (!acpi_desc->nvdimm_bus)
			return -ENOMEM;
2816

2817
		rc = devm_add_action_or_reset(dev, acpi_nfit_unregister,
2818 2819 2820
				acpi_desc);
		if (rc)
			return rc;
2821 2822 2823 2824

		rc = acpi_nfit_desc_init_scrub_attr(acpi_desc);
		if (rc)
			return rc;
2825 2826 2827 2828 2829

		/* register this acpi_desc for mce notifications */
		mutex_lock(&acpi_desc_lock);
		list_add_tail(&acpi_desc->list, &acpi_descs);
		mutex_unlock(&acpi_desc_lock);
2830 2831
	}

V
Vishal Verma 已提交
2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852
	mutex_lock(&acpi_desc->init_mutex);

	INIT_LIST_HEAD(&prev.spas);
	INIT_LIST_HEAD(&prev.memdevs);
	INIT_LIST_HEAD(&prev.dcrs);
	INIT_LIST_HEAD(&prev.bdws);
	INIT_LIST_HEAD(&prev.idts);
	INIT_LIST_HEAD(&prev.flushes);

	list_cut_position(&prev.spas, &acpi_desc->spas,
				acpi_desc->spas.prev);
	list_cut_position(&prev.memdevs, &acpi_desc->memdevs,
				acpi_desc->memdevs.prev);
	list_cut_position(&prev.dcrs, &acpi_desc->dcrs,
				acpi_desc->dcrs.prev);
	list_cut_position(&prev.bdws, &acpi_desc->bdws,
				acpi_desc->bdws.prev);
	list_cut_position(&prev.idts, &acpi_desc->idts,
				acpi_desc->idts.prev);
	list_cut_position(&prev.flushes, &acpi_desc->flushes,
				acpi_desc->flushes.prev);
2853 2854 2855

	end = data + sz;
	while (!IS_ERR_OR_NULL(data))
V
Vishal Verma 已提交
2856
		data = add_table(acpi_desc, &prev, data, end);
2857 2858 2859 2860

	if (IS_ERR(data)) {
		dev_dbg(dev, "%s: nfit table parsing error: %ld\n", __func__,
				PTR_ERR(data));
V
Vishal Verma 已提交
2861 2862
		rc = PTR_ERR(data);
		goto out_unlock;
2863 2864
	}

V
Vishal Verma 已提交
2865 2866 2867 2868
	rc = acpi_nfit_check_deletions(acpi_desc, &prev);
	if (rc)
		goto out_unlock;

2869 2870
	rc = nfit_mem_init(acpi_desc);
	if (rc)
V
Vishal Verma 已提交
2871
		goto out_unlock;
2872

2873 2874
	rc = acpi_nfit_register_dimms(acpi_desc);
	if (rc)
V
Vishal Verma 已提交
2875 2876 2877
		goto out_unlock;

	rc = acpi_nfit_register_regions(acpi_desc);
2878

V
Vishal Verma 已提交
2879 2880 2881
 out_unlock:
	mutex_unlock(&acpi_desc->init_mutex);
	return rc;
2882
}
2883
EXPORT_SYMBOL_GPL(acpi_nfit_init);
2884

2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902
struct acpi_nfit_flush_work {
	struct work_struct work;
	struct completion cmp;
};

static void flush_probe(struct work_struct *work)
{
	struct acpi_nfit_flush_work *flush;

	flush = container_of(work, typeof(*flush), work);
	complete(&flush->cmp);
}

static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
{
	struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
	struct device *dev = acpi_desc->dev;
	struct acpi_nfit_flush_work flush;
2903
	int rc;
2904 2905 2906 2907 2908

	/* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
	device_lock(dev);
	device_unlock(dev);

2909 2910
	/* bounce the init_mutex to make init_complete valid */
	mutex_lock(&acpi_desc->init_mutex);
2911 2912
	if (acpi_desc->cancel || acpi_desc->init_complete) {
		mutex_unlock(&acpi_desc->init_mutex);
2913
		return 0;
2914
	}
2915

2916 2917 2918 2919 2920 2921 2922
	/*
	 * Scrub work could take 10s of seconds, userspace may give up so we
	 * need to be interruptible while waiting.
	 */
	INIT_WORK_ONSTACK(&flush.work, flush_probe);
	COMPLETION_INITIALIZER_ONSTACK(flush.cmp);
	queue_work(nfit_wq, &flush.work);
2923
	mutex_unlock(&acpi_desc->init_mutex);
2924 2925 2926 2927

	rc = wait_for_completion_interruptible(&flush.cmp);
	cancel_work_sync(&flush.work);
	return rc;
2928 2929
}

2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951
static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
		struct nvdimm *nvdimm, unsigned int cmd)
{
	struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);

	if (nvdimm)
		return 0;
	if (cmd != ND_CMD_ARS_START)
		return 0;

	/*
	 * The kernel and userspace may race to initiate a scrub, but
	 * the scrub thread is prepared to lose that initial race.  It
	 * just needs guarantees that any ars it initiates are not
	 * interrupted by any intervening start reqeusts from userspace.
	 */
	if (work_busy(&acpi_desc->work))
		return -EBUSY;

	return 0;
}

2952
int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, u8 flags)
2953 2954 2955 2956 2957 2958 2959
{
	struct device *dev = acpi_desc->dev;
	struct nfit_spa *nfit_spa;

	if (work_busy(&acpi_desc->work))
		return -EBUSY;

2960 2961 2962
	mutex_lock(&acpi_desc->init_mutex);
	if (acpi_desc->cancel) {
		mutex_unlock(&acpi_desc->init_mutex);
2963
		return 0;
2964
	}
2965 2966 2967 2968 2969 2970 2971 2972 2973

	list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
		struct acpi_nfit_system_address *spa = nfit_spa->spa;

		if (nfit_spa_type(spa) != NFIT_SPA_PM)
			continue;

		nfit_spa->ars_required = 1;
	}
2974
	acpi_desc->ars_start_flags = flags;
2975 2976 2977 2978 2979 2980 2981
	queue_work(nfit_wq, &acpi_desc->work);
	dev_dbg(dev, "%s: ars_scan triggered\n", __func__);
	mutex_unlock(&acpi_desc->init_mutex);

	return 0;
}

2982
void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev)
2983 2984 2985 2986 2987
{
	struct nvdimm_bus_descriptor *nd_desc;

	dev_set_drvdata(dev, acpi_desc);
	acpi_desc->dev = dev;
2988
	acpi_desc->blk_do_io = acpi_nfit_blk_region_do_io;
2989 2990
	nd_desc = &acpi_desc->nd_desc;
	nd_desc->provider_name = "ACPI.NFIT";
2991
	nd_desc->module = THIS_MODULE;
2992
	nd_desc->ndctl = acpi_nfit_ctl;
2993
	nd_desc->flush_probe = acpi_nfit_flush_probe;
2994
	nd_desc->clear_to_send = acpi_nfit_clear_to_send;
2995
	nd_desc->attr_groups = acpi_nfit_attribute_groups;
2996

V
Vishal Verma 已提交
2997 2998 2999 3000 3001 3002 3003
	INIT_LIST_HEAD(&acpi_desc->spas);
	INIT_LIST_HEAD(&acpi_desc->dcrs);
	INIT_LIST_HEAD(&acpi_desc->bdws);
	INIT_LIST_HEAD(&acpi_desc->idts);
	INIT_LIST_HEAD(&acpi_desc->flushes);
	INIT_LIST_HEAD(&acpi_desc->memdevs);
	INIT_LIST_HEAD(&acpi_desc->dimms);
3004
	INIT_LIST_HEAD(&acpi_desc->list);
V
Vishal Verma 已提交
3005
	mutex_init(&acpi_desc->init_mutex);
3006
	INIT_WORK(&acpi_desc->work, acpi_nfit_scrub);
V
Vishal Verma 已提交
3007
}
3008
EXPORT_SYMBOL_GPL(acpi_nfit_desc_init);
V
Vishal Verma 已提交
3009

3010 3011 3012 3013 3014
static void acpi_nfit_put_table(void *table)
{
	acpi_put_table(table);
}

3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043
void acpi_nfit_shutdown(void *data)
{
	struct acpi_nfit_desc *acpi_desc = data;
	struct device *bus_dev = to_nvdimm_bus_dev(acpi_desc->nvdimm_bus);

	/*
	 * Destruct under acpi_desc_lock so that nfit_handle_mce does not
	 * race teardown
	 */
	mutex_lock(&acpi_desc_lock);
	list_del(&acpi_desc->list);
	mutex_unlock(&acpi_desc_lock);

	mutex_lock(&acpi_desc->init_mutex);
	acpi_desc->cancel = 1;
	mutex_unlock(&acpi_desc->init_mutex);

	/*
	 * Bounce the nvdimm bus lock to make sure any in-flight
	 * acpi_nfit_ars_rescan() submissions have had a chance to
	 * either submit or see ->cancel set.
	 */
	device_lock(bus_dev);
	device_unlock(bus_dev);

	flush_workqueue(nfit_wq);
}
EXPORT_SYMBOL_GPL(acpi_nfit_shutdown);

V
Vishal Verma 已提交
3044 3045 3046 3047 3048 3049 3050 3051
static int acpi_nfit_add(struct acpi_device *adev)
{
	struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
	struct acpi_nfit_desc *acpi_desc;
	struct device *dev = &adev->dev;
	struct acpi_table_header *tbl;
	acpi_status status = AE_OK;
	acpi_size sz;
3052
	int rc = 0;
V
Vishal Verma 已提交
3053

3054
	status = acpi_get_table(ACPI_SIG_NFIT, 0, &tbl);
V
Vishal Verma 已提交
3055 3056 3057 3058 3059
	if (ACPI_FAILURE(status)) {
		/* This is ok, we could have an nvdimm hotplugged later */
		dev_dbg(dev, "failed to find NFIT at startup\n");
		return 0;
	}
3060 3061 3062 3063

	rc = devm_add_action_or_reset(dev, acpi_nfit_put_table, tbl);
	if (rc)
		return rc;
3064
	sz = tbl->length;
V
Vishal Verma 已提交
3065

3066 3067 3068 3069
	acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
	if (!acpi_desc)
		return -ENOMEM;
	acpi_nfit_desc_init(acpi_desc, &adev->dev);
V
Vishal Verma 已提交
3070

3071
	/* Save the acpi header for exporting the revision via sysfs */
3072
	acpi_desc->acpi_header = *tbl;
V
Vishal Verma 已提交
3073 3074 3075 3076

	/* Evaluate _FIT and override with that if present */
	status = acpi_evaluate_object(adev->handle, "_FIT", NULL, &buf);
	if (ACPI_SUCCESS(status) && buf.length > 0) {
3077 3078 3079 3080 3081 3082
		union acpi_object *obj = buf.pointer;

		if (obj->type == ACPI_TYPE_BUFFER)
			rc = acpi_nfit_init(acpi_desc, obj->buffer.pointer,
					obj->buffer.length);
		else
3083 3084
			dev_dbg(dev, "%s invalid type %d, ignoring _FIT\n",
				 __func__, (int) obj->type);
3085 3086
		kfree(buf.pointer);
	} else
3087 3088 3089 3090
		/* skip over the lead-in header table */
		rc = acpi_nfit_init(acpi_desc, (void *) tbl
				+ sizeof(struct acpi_table_nfit),
				sz - sizeof(struct acpi_table_nfit));
3091 3092 3093 3094

	if (rc)
		return rc;
	return devm_add_action_or_reset(dev, acpi_nfit_shutdown, acpi_desc);
3095 3096 3097 3098
}

static int acpi_nfit_remove(struct acpi_device *adev)
{
3099
	/* see acpi_nfit_unregister */
3100 3101 3102
	return 0;
}

3103
static void acpi_nfit_update_notify(struct device *dev, acpi_handle handle)
V
Vishal Verma 已提交
3104
{
3105
	struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev);
V
Vishal Verma 已提交
3106
	struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
3107
	union acpi_object *obj;
V
Vishal Verma 已提交
3108 3109 3110 3111 3112 3113
	acpi_status status;
	int ret;

	if (!dev->driver) {
		/* dev->driver may be null if we're being removed */
		dev_dbg(dev, "%s: no driver found for dev\n", __func__);
3114
		return;
V
Vishal Verma 已提交
3115 3116 3117
	}

	if (!acpi_desc) {
3118 3119
		acpi_desc = devm_kzalloc(dev, sizeof(*acpi_desc), GFP_KERNEL);
		if (!acpi_desc)
3120 3121
			return;
		acpi_nfit_desc_init(acpi_desc, dev);
3122 3123 3124 3125 3126 3127
	} else {
		/*
		 * Finish previous registration before considering new
		 * regions.
		 */
		flush_workqueue(nfit_wq);
V
Vishal Verma 已提交
3128 3129 3130
	}

	/* Evaluate _FIT */
3131
	status = acpi_evaluate_object(handle, "_FIT", NULL, &buf);
V
Vishal Verma 已提交
3132 3133
	if (ACPI_FAILURE(status)) {
		dev_err(dev, "failed to evaluate _FIT\n");
3134
		return;
V
Vishal Verma 已提交
3135 3136
	}

3137 3138
	obj = buf.pointer;
	if (obj->type == ACPI_TYPE_BUFFER) {
3139 3140
		ret = acpi_nfit_init(acpi_desc, obj->buffer.pointer,
				obj->buffer.length);
3141
		if (ret)
3142
			dev_err(dev, "failed to merge updated NFIT\n");
3143
	} else
3144
		dev_err(dev, "Invalid _FIT\n");
V
Vishal Verma 已提交
3145
	kfree(buf.pointer);
3146
}
3147 3148 3149 3150

static void acpi_nfit_uc_error_notify(struct device *dev, acpi_handle handle)
{
	struct acpi_nfit_desc *acpi_desc = dev_get_drvdata(dev);
3151 3152
	u8 flags = (acpi_desc->scrub_mode == HW_ERROR_SCRUB_ON) ?
			0 : ND_ARS_RETURN_PREV_DATA;
3153

3154
	acpi_nfit_ars_rescan(acpi_desc, flags);
3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169
}

void __acpi_nfit_notify(struct device *dev, acpi_handle handle, u32 event)
{
	dev_dbg(dev, "%s: event: 0x%x\n", __func__, event);

	switch (event) {
	case NFIT_NOTIFY_UPDATE:
		return acpi_nfit_update_notify(dev, handle);
	case NFIT_NOTIFY_UC_MEMORY_ERROR:
		return acpi_nfit_uc_error_notify(dev, handle);
	default:
		return;
	}
}
3170
EXPORT_SYMBOL_GPL(__acpi_nfit_notify);
V
Vishal Verma 已提交
3171

3172 3173 3174 3175 3176
static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
{
	device_lock(&adev->dev);
	__acpi_nfit_notify(&adev->dev, adev->handle, event);
	device_unlock(&adev->dev);
V
Vishal Verma 已提交
3177 3178
}

3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190
static const struct acpi_device_id acpi_nfit_ids[] = {
	{ "ACPI0012", 0 },
	{ "", 0 },
};
MODULE_DEVICE_TABLE(acpi, acpi_nfit_ids);

static struct acpi_driver acpi_nfit_driver = {
	.name = KBUILD_MODNAME,
	.ids = acpi_nfit_ids,
	.ops = {
		.add = acpi_nfit_add,
		.remove = acpi_nfit_remove,
V
Vishal Verma 已提交
3191
		.notify = acpi_nfit_notify,
3192 3193 3194 3195 3196
	},
};

static __init int nfit_init(void)
{
3197 3198
	int ret;

3199 3200 3201 3202 3203 3204 3205 3206
	BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40);
	BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56);
	BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48);
	BUILD_BUG_ON(sizeof(struct acpi_nfit_interleave) != 20);
	BUILD_BUG_ON(sizeof(struct acpi_nfit_smbios) != 9);
	BUILD_BUG_ON(sizeof(struct acpi_nfit_control_region) != 80);
	BUILD_BUG_ON(sizeof(struct acpi_nfit_data_region) != 40);

3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219
	guid_parse(UUID_VOLATILE_MEMORY, &nfit_uuid[NFIT_SPA_VOLATILE]);
	guid_parse(UUID_PERSISTENT_MEMORY, &nfit_uuid[NFIT_SPA_PM]);
	guid_parse(UUID_CONTROL_REGION, &nfit_uuid[NFIT_SPA_DCR]);
	guid_parse(UUID_DATA_REGION, &nfit_uuid[NFIT_SPA_BDW]);
	guid_parse(UUID_VOLATILE_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_VDISK]);
	guid_parse(UUID_VOLATILE_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_VCD]);
	guid_parse(UUID_PERSISTENT_VIRTUAL_DISK, &nfit_uuid[NFIT_SPA_PDISK]);
	guid_parse(UUID_PERSISTENT_VIRTUAL_CD, &nfit_uuid[NFIT_SPA_PCD]);
	guid_parse(UUID_NFIT_BUS, &nfit_uuid[NFIT_DEV_BUS]);
	guid_parse(UUID_NFIT_DIMM, &nfit_uuid[NFIT_DEV_DIMM]);
	guid_parse(UUID_NFIT_DIMM_N_HPE1, &nfit_uuid[NFIT_DEV_DIMM_N_HPE1]);
	guid_parse(UUID_NFIT_DIMM_N_HPE2, &nfit_uuid[NFIT_DEV_DIMM_N_HPE2]);
	guid_parse(UUID_NFIT_DIMM_N_MSFT, &nfit_uuid[NFIT_DEV_DIMM_N_MSFT]);
3220

3221 3222 3223 3224
	nfit_wq = create_singlethread_workqueue("nfit");
	if (!nfit_wq)
		return -ENOMEM;

3225
	nfit_mce_register();
3226 3227 3228 3229 3230 3231 3232
	ret = acpi_bus_register_driver(&acpi_nfit_driver);
	if (ret) {
		nfit_mce_unregister();
		destroy_workqueue(nfit_wq);
	}

	return ret;
3233

3234 3235 3236 3237
}

static __exit void nfit_exit(void)
{
3238
	nfit_mce_unregister();
3239
	acpi_bus_unregister_driver(&acpi_nfit_driver);
3240
	destroy_workqueue(nfit_wq);
3241
	WARN_ON(!list_empty(&acpi_descs));
3242 3243 3244 3245 3246 3247
}

module_init(nfit_init);
module_exit(nfit_exit);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Intel Corporation");