main.c 70.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * CXL Flash Device Driver
 *
 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation
 *             Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation
 *
 * Copyright (C) 2015 IBM Corporation
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */

#include <linux/delay.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/pci.h>

#include <asm/unaligned.h>

#include <misc/cxl.h>

#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
M
Matthew R. Ochs 已提交
26
#include <uapi/scsi/cxlflash_ioctl.h>
27 28 29 30 31 32 33 34 35 36 37

#include "main.h"
#include "sislite.h"
#include "common.h"

MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME);
MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>");
MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>");
MODULE_LICENSE("GPL");

/**
M
Matthew R. Ochs 已提交
38
 * cmd_checkin() - checks in an AFU command
39 40 41 42 43 44 45 46
 * @cmd:	AFU command to checkin.
 *
 * Safe to pass commands that have already been checked in. Several
 * internal tracking fields are reset as part of the checkin. Note
 * that these are intentionally reset prior to toggling the free bit
 * to avoid clobbering values in the event that the command is checked
 * out right away.
 */
M
Matthew R. Ochs 已提交
47
static void cmd_checkin(struct afu_cmd *cmd)
48 49 50 51 52 53 54 55 56 57 58 59 60
{
	cmd->rcb.scp = NULL;
	cmd->rcb.timeout = 0;
	cmd->sa.ioasc = 0;
	cmd->cmd_tmf = false;
	cmd->sa.host_use[0] = 0; /* clears both completion and retry bytes */

	if (unlikely(atomic_inc_return(&cmd->free) != 1)) {
		pr_err("%s: Freeing cmd (%d) that is not in use!\n",
		       __func__, cmd->slot);
		return;
	}

61
	pr_devel("%s: released cmd %p index=%d\n", __func__, cmd, cmd->slot);
62 63 64 65 66 67 68 69 70 71 72 73 74
}

/**
 * process_cmd_err() - command error handler
 * @cmd:	AFU command that experienced the error.
 * @scp:	SCSI command associated with the AFU command in error.
 *
 * Translates error bits from AFU command to SCSI command results.
 */
static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp)
{
	struct sisl_ioarcb *ioarcb;
	struct sisl_ioasa *ioasa;
75
	u32 resid;
76 77 78 79 80 81 82 83

	if (unlikely(!cmd))
		return;

	ioarcb = &(cmd->rcb);
	ioasa = &(cmd->sa);

	if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) {
84 85 86 87
		resid = ioasa->resid;
		scsi_set_resid(scp, resid);
		pr_debug("%s: cmd underrun cmd = %p scp = %p, resid = %d\n",
			 __func__, cmd, scp, resid);
88 89 90 91 92 93 94 95 96
	}

	if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) {
		pr_debug("%s: cmd underrun cmd = %p scp = %p\n",
			 __func__, cmd, scp);
		scp->result = (DID_ERROR << 16);
	}

	pr_debug("%s: cmd failed afu_rc=%d scsi_rc=%d fc_rc=%d "
97
		 "afu_extra=0x%X, scsi_extra=0x%X, fc_extra=0x%X\n",
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
		 __func__, ioasa->rc.afu_rc, ioasa->rc.scsi_rc,
		 ioasa->rc.fc_rc, ioasa->afu_extra, ioasa->scsi_extra,
		 ioasa->fc_extra);

	if (ioasa->rc.scsi_rc) {
		/* We have a SCSI status */
		if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) {
			memcpy(scp->sense_buffer, ioasa->sense_data,
			       SISL_SENSE_DATA_LEN);
			scp->result = ioasa->rc.scsi_rc;
		} else
			scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16);
	}

	/*
	 * We encountered an error. Set scp->result based on nature
	 * of error.
	 */
	if (ioasa->rc.fc_rc) {
		/* We have an FC status */
		switch (ioasa->rc.fc_rc) {
		case SISL_FC_RC_LINKDOWN:
			scp->result = (DID_REQUEUE << 16);
			break;
		case SISL_FC_RC_RESID:
			/* This indicates an FCP resid underrun */
			if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) {
				/* If the SISL_RC_FLAGS_OVERRUN flag was set,
				 * then we will handle this error else where.
				 * If not then we must handle it here.
128
				 * This is probably an AFU bug.
129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151
				 */
				scp->result = (DID_ERROR << 16);
			}
			break;
		case SISL_FC_RC_RESIDERR:
			/* Resid mismatch between adapter and device */
		case SISL_FC_RC_TGTABORT:
		case SISL_FC_RC_ABORTOK:
		case SISL_FC_RC_ABORTFAIL:
		case SISL_FC_RC_NOLOGI:
		case SISL_FC_RC_ABORTPEND:
		case SISL_FC_RC_WRABORTPEND:
		case SISL_FC_RC_NOEXP:
		case SISL_FC_RC_INUSE:
			scp->result = (DID_ERROR << 16);
			break;
		}
	}

	if (ioasa->rc.afu_rc) {
		/* We have an AFU error */
		switch (ioasa->rc.afu_rc) {
		case SISL_AFU_RC_NO_CHANNELS:
152
			scp->result = (DID_NO_CONNECT << 16);
153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
			break;
		case SISL_AFU_RC_DATA_DMA_ERR:
			switch (ioasa->afu_extra) {
			case SISL_AFU_DMA_ERR_PAGE_IN:
				/* Retry */
				scp->result = (DID_IMM_RETRY << 16);
				break;
			case SISL_AFU_DMA_ERR_INVALID_EA:
			default:
				scp->result = (DID_ERROR << 16);
			}
			break;
		case SISL_AFU_RC_OUT_OF_DATA_BUFS:
			/* Retry */
			scp->result = (DID_ALLOC_FAILURE << 16);
			break;
		default:
			scp->result = (DID_ERROR << 16);
		}
	}
}

/**
 * cmd_complete() - command completion handler
 * @cmd:	AFU command that has completed.
 *
 * Prepares and submits command that has either completed or timed out to
 * the SCSI stack. Checks AFU command back into command pool for non-internal
 * (rcb.scp populated) commands.
 */
static void cmd_complete(struct afu_cmd *cmd)
{
	struct scsi_cmnd *scp;
	ulong lock_flags;
	struct afu *afu = cmd->parent;
	struct cxlflash_cfg *cfg = afu->parent;
	bool cmd_is_tmf;

	spin_lock_irqsave(&cmd->slock, lock_flags);
	cmd->sa.host_use_b[0] |= B_DONE;
	spin_unlock_irqrestore(&cmd->slock, lock_flags);

	if (cmd->rcb.scp) {
		scp = cmd->rcb.scp;
197
		if (unlikely(cmd->sa.ioasc))
198 199 200 201 202 203
			process_cmd_err(cmd, scp);
		else
			scp->result = (DID_OK << 16);

		cmd_is_tmf = cmd->cmd_tmf;

204 205 206
		pr_debug_ratelimited("%s: calling scsi_done scp=%p result=%X "
				     "ioasc=%d\n", __func__, scp, scp->result,
				     cmd->sa.ioasc);
207 208 209 210 211

		scsi_dma_unmap(scp);
		scp->scsi_done(scp);

		if (cmd_is_tmf) {
212
			spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
213 214
			cfg->tmf_active = false;
			wake_up_all_locked(&cfg->tmf_waitq);
215
			spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
216 217 218 219 220
		}
	} else
		complete(&cmd->cevent);
}

M
Matthew R. Ochs 已提交
221 222 223 224 225 226 227 228 229 230 231
/**
 * context_reset() - timeout handler for AFU commands
 * @cmd:	AFU command that timed out.
 *
 * Sends a reset to the AFU.
 */
static void context_reset(struct afu_cmd *cmd)
{
	int nretry = 0;
	u64 rrin = 0x1;
	struct afu *afu = cmd->parent;
232 233
	struct cxlflash_cfg *cfg = afu->parent;
	struct device *dev = &cfg->dev->dev;
M
Matthew R. Ochs 已提交
234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254
	ulong lock_flags;

	pr_debug("%s: cmd=%p\n", __func__, cmd);

	spin_lock_irqsave(&cmd->slock, lock_flags);

	/* Already completed? */
	if (cmd->sa.host_use_b[0] & B_DONE) {
		spin_unlock_irqrestore(&cmd->slock, lock_flags);
		return;
	}

	cmd->sa.host_use_b[0] |= (B_DONE | B_ERROR | B_TIMEOUT);
	spin_unlock_irqrestore(&cmd->slock, lock_flags);

	writeq_be(rrin, &afu->host_map->ioarrin);
	do {
		rrin = readq_be(&afu->host_map->ioarrin);
		if (rrin != 0x1)
			break;
		/* Double delay each time */
255
		udelay(1 << nretry);
M
Matthew R. Ochs 已提交
256
	} while (nretry++ < MC_ROOM_RETRY_CNT);
257 258 259

	dev_dbg(dev, "%s: returning rrin=0x%016llX nretry=%d\n",
		__func__, rrin, nretry);
M
Matthew R. Ochs 已提交
260 261 262 263 264 265 266 267
}

/**
 * send_cmd() - sends an AFU command
 * @afu:	AFU associated with the host.
 * @cmd:	AFU command to send.
 *
 * Return:
268
 *	0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
M
Matthew R. Ochs 已提交
269 270 271 272 273 274
 */
static int send_cmd(struct afu *afu, struct afu_cmd *cmd)
{
	struct cxlflash_cfg *cfg = afu->parent;
	struct device *dev = &cfg->dev->dev;
	int rc = 0;
275 276
	s64 room;
	ulong lock_flags;
M
Matthew R. Ochs 已提交
277 278

	/*
279 280
	 * To avoid the performance penalty of MMIO, spread the update of
	 * 'room' over multiple commands.
M
Matthew R. Ochs 已提交
281
	 */
282 283 284 285 286 287 288 289 290 291
	spin_lock_irqsave(&afu->rrin_slock, lock_flags);
	if (--afu->room < 0) {
		room = readq_be(&afu->host_map->cmd_room);
		if (room <= 0) {
			dev_dbg_ratelimited(dev, "%s: no cmd_room to send "
					    "0x%02X, room=0x%016llX\n",
					    __func__, cmd->rcb.cdb[0], room);
			afu->room = 0;
			rc = SCSI_MLQUEUE_HOST_BUSY;
			goto out;
M
Matthew R. Ochs 已提交
292
		}
293
		afu->room = room - 1;
M
Matthew R. Ochs 已提交
294 295 296 297
	}

	writeq_be((u64)&cmd->rcb, &afu->host_map->ioarrin);
out:
298
	spin_unlock_irqrestore(&afu->rrin_slock, lock_flags);
M
Matthew R. Ochs 已提交
299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323
	pr_devel("%s: cmd=%p len=%d ea=%p rc=%d\n", __func__, cmd,
		 cmd->rcb.data_len, (void *)cmd->rcb.data_ea, rc);
	return rc;
}

/**
 * wait_resp() - polls for a response or timeout to a sent AFU command
 * @afu:	AFU associated with the host.
 * @cmd:	AFU command that was sent.
 */
static void wait_resp(struct afu *afu, struct afu_cmd *cmd)
{
	ulong timeout = msecs_to_jiffies(cmd->rcb.timeout * 2 * 1000);

	timeout = wait_for_completion_timeout(&cmd->cevent, timeout);
	if (!timeout)
		context_reset(cmd);

	if (unlikely(cmd->sa.ioasc != 0))
		pr_err("%s: CMD 0x%X failed, IOASC: flags 0x%X, afu_rc 0x%X, "
		       "scsi_rc 0x%X, fc_rc 0x%X\n", __func__, cmd->rcb.cdb[0],
		       cmd->sa.rc.flags, cmd->sa.rc.afu_rc, cmd->sa.rc.scsi_rc,
		       cmd->sa.rc.fc_rc);
}

324 325 326 327 328 329 330
/**
 * send_tmf() - sends a Task Management Function (TMF)
 * @afu:	AFU to checkout from.
 * @scp:	SCSI command from stack.
 * @tmfcmd:	TMF command to send.
 *
 * Return:
331
 *	0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
332 333 334
 */
static int send_tmf(struct afu *afu, struct scsi_cmnd *scp, u64 tmfcmd)
{
335
	struct afu_cmd *cmd = sc_to_afucz(scp);
336 337 338 339 340

	u32 port_sel = scp->device->channel + 1;
	short lflag = 0;
	struct Scsi_Host *host = scp->device->host;
	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
341
	struct device *dev = &cfg->dev->dev;
342 343
	ulong lock_flags;
	int rc = 0;
344
	ulong to;
345

346 347
	/* When Task Management Function is active do not send another */
	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
348
	if (cfg->tmf_active)
349 350 351
		wait_event_interruptible_lock_irq(cfg->tmf_waitq,
						  !cfg->tmf_active,
						  cfg->tmf_slock);
352 353
	cfg->tmf_active = true;
	cmd->cmd_tmf = true;
354
	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
355 356

	cmd->rcb.ctx_id = afu->ctx_hndl;
357
	cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
358 359 360 361 362 363 364 365
	cmd->rcb.port_sel = port_sel;
	cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);

	lflag = SISL_REQ_FLAGS_TMF_CMD;

	cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
			      SISL_REQ_FLAGS_SUP_UNDERRUN | lflag);

366
	/* Stash the scp in the command, for reuse during interrupt */
367
	cmd->rcb.scp = scp;
368 369
	cmd->parent = afu;
	spin_lock_init(&cmd->slock);
370 371 372 373 374

	/* Copy the CDB from the cmd passed in */
	memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd));

	/* Send the command */
M
Matthew R. Ochs 已提交
375
	rc = send_cmd(afu, cmd);
376
	if (unlikely(rc)) {
377
		spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
378
		cfg->tmf_active = false;
379
		spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
380 381 382
		goto out;
	}

383 384 385 386 387 388 389 390 391 392 393 394
	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
	to = msecs_to_jiffies(5000);
	to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq,
						       !cfg->tmf_active,
						       cfg->tmf_slock,
						       to);
	if (!to) {
		cfg->tmf_active = false;
		dev_err(dev, "%s: TMF timed out!\n", __func__);
		rc = -1;
	}
	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
395 396 397 398
out:
	return rc;
}

399 400 401 402 403 404 405 406 407 408
static void afu_unmap(struct kref *ref)
{
	struct afu *afu = container_of(ref, struct afu, mapcount);

	if (likely(afu->afu_map)) {
		cxl_psa_unmap((void __iomem *)afu->afu_map);
		afu->afu_map = NULL;
	}
}

409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424
/**
 * cxlflash_driver_info() - information handler for this host driver
 * @host:	SCSI host associated with device.
 *
 * Return: A string describing the device.
 */
static const char *cxlflash_driver_info(struct Scsi_Host *host)
{
	return CXLFLASH_ADAPTER_NAME;
}

/**
 * cxlflash_queuecommand() - sends a mid-layer request
 * @host:	SCSI host associated with device.
 * @scp:	SCSI command to send.
 *
425
 * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure
426 427 428 429 430
 */
static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp)
{
	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
	struct afu *afu = cfg->afu;
431
	struct device *dev = &cfg->dev->dev;
432
	struct afu_cmd *cmd = sc_to_afucz(scp);
433 434 435 436 437 438
	u32 port_sel = scp->device->channel + 1;
	int nseg, i, ncount;
	struct scatterlist *sg;
	ulong lock_flags;
	short lflag = 0;
	int rc = 0;
439
	int kref_got = 0;
440

441 442 443 444 445 446 447 448
	dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu "
			    "cdb=(%08X-%08X-%08X-%08X)\n",
			    __func__, scp, host->host_no, scp->device->channel,
			    scp->device->id, scp->device->lun,
			    get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
			    get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
			    get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
			    get_unaligned_be32(&((u32 *)scp->cmnd)[3]));
449

450 451
	/*
	 * If a Task Management Function is active, wait for it to complete
452 453
	 * before continuing with regular commands.
	 */
454
	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
455
	if (cfg->tmf_active) {
456
		spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
457 458 459
		rc = SCSI_MLQUEUE_HOST_BUSY;
		goto out;
	}
460
	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
461

462
	switch (cfg->state) {
463
	case STATE_RESET:
464
		dev_dbg_ratelimited(dev, "%s: device is in reset!\n", __func__);
465 466 467
		rc = SCSI_MLQUEUE_HOST_BUSY;
		goto out;
	case STATE_FAILTERM:
468
		dev_dbg_ratelimited(dev, "%s: device has failed!\n", __func__);
469 470 471 472 473 474 475 476
		scp->result = (DID_NO_CONNECT << 16);
		scp->scsi_done(scp);
		rc = 0;
		goto out;
	default:
		break;
	}

477 478 479
	kref_get(&cfg->afu->mapcount);
	kref_got = 1;

480
	cmd->rcb.ctx_id = afu->ctx_hndl;
481
	cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
482 483 484 485 486 487 488 489 490 491 492 493 494
	cmd->rcb.port_sel = port_sel;
	cmd->rcb.lun_id = lun_to_lunid(scp->device->lun);

	if (scp->sc_data_direction == DMA_TO_DEVICE)
		lflag = SISL_REQ_FLAGS_HOST_WRITE;
	else
		lflag = SISL_REQ_FLAGS_HOST_READ;

	cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID |
			      SISL_REQ_FLAGS_SUP_UNDERRUN | lflag);

	/* Stash the scp in the reserved field, for reuse during interrupt */
	cmd->rcb.scp = scp;
495 496
	cmd->parent = afu;
	spin_lock_init(&cmd->slock);
497 498 499

	nseg = scsi_dma_map(scp);
	if (unlikely(nseg < 0)) {
500
		dev_err(dev, "%s: Fail DMA map! nseg=%d\n",
501 502 503 504 505 506 507 508 509 510 511 512 513 514 515
			__func__, nseg);
		rc = SCSI_MLQUEUE_HOST_BUSY;
		goto out;
	}

	ncount = scsi_sg_count(scp);
	scsi_for_each_sg(scp, sg, ncount, i) {
		cmd->rcb.data_len = sg_dma_len(sg);
		cmd->rcb.data_ea = sg_dma_address(sg);
	}

	/* Copy the CDB from the scsi_cmnd passed in */
	memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb));

	/* Send the command */
M
Matthew R. Ochs 已提交
516
	rc = send_cmd(afu, cmd);
517
	if (unlikely(rc))
518 519 520
		scsi_dma_unmap(scp);

out:
521 522
	if (kref_got)
		kref_put(&afu->mapcount, afu_unmap);
523
	pr_devel("%s: returning rc=%d\n", __func__, rc);
524 525 526 527
	return rc;
}

/**
M
Matthew R. Ochs 已提交
528
 * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe
529
 * @cfg:	Internal structure associated with the host.
530
 */
M
Matthew R. Ochs 已提交
531
static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg)
532
{
M
Matthew R. Ochs 已提交
533
	struct pci_dev *pdev = cfg->dev;
534

M
Matthew R. Ochs 已提交
535 536 537 538
	if (pci_channel_offline(pdev))
		wait_event_timeout(cfg->reset_waitq,
				   !pci_channel_offline(pdev),
				   CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT);
539 540 541
}

/**
M
Matthew R. Ochs 已提交
542
 * free_mem() - free memory associated with the AFU
543
 * @cfg:	Internal structure associated with the host.
544
 */
M
Matthew R. Ochs 已提交
545
static void free_mem(struct cxlflash_cfg *cfg)
546
{
M
Matthew R. Ochs 已提交
547
	struct afu *afu = cfg->afu;
548

M
Matthew R. Ochs 已提交
549 550 551
	if (cfg->afu) {
		free_pages((ulong)afu, get_order(sizeof(struct afu)));
		cfg->afu = NULL;
552
	}
553 554 555
}

/**
M
Matthew R. Ochs 已提交
556
 * stop_afu() - stops the AFU command timers and unmaps the MMIO space
557
 * @cfg:	Internal structure associated with the host.
558
 *
M
Matthew R. Ochs 已提交
559
 * Safe to call with AFU in a partially allocated/initialized state.
560 561 562 563 564 565 566 567 568 569
 *
 * Cleans up all state associated with the command queue, and unmaps
 * the MMIO space.
 *
 *  - complete() will take care of commands we initiated (they'll be checked
 *  in as part of the cleanup that occurs after the completion)
 *
 *  - cmd_checkin() will take care of entries that we did not initiate and that
 *  have not (and will not) complete because they are sitting on a [now stale]
 *  hardware queue
570
 */
M
Matthew R. Ochs 已提交
571
static void stop_afu(struct cxlflash_cfg *cfg)
572
{
M
Matthew R. Ochs 已提交
573 574
	int i;
	struct afu *afu = cfg->afu;
575
	struct afu_cmd *cmd;
576

M
Matthew R. Ochs 已提交
577
	if (likely(afu)) {
578 579 580 581 582 583
		for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
			cmd = &afu->cmd[i];
			complete(&cmd->cevent);
			if (!atomic_read(&cmd->free))
				cmd_checkin(cmd);
		}
584 585

		if (likely(afu->afu_map)) {
586
			cxl_psa_unmap((void __iomem *)afu->afu_map);
587 588
			afu->afu_map = NULL;
		}
589
		kref_put(&afu->mapcount, afu_unmap);
590 591 592 593
	}
}

/**
594
 * term_intr() - disables all AFU interrupts
595
 * @cfg:	Internal structure associated with the host.
596 597 598 599
 * @level:	Depth of allocation, where to begin waterfall tear down.
 *
 * Safe to call with AFU/MC in partially allocated/initialized state.
 */
600
static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level)
601 602
{
	struct afu *afu = cfg->afu;
603
	struct device *dev = &cfg->dev->dev;
604 605

	if (!afu || !cfg->mcctx) {
606
		dev_err(dev, "%s: returning with NULL afu or MC\n", __func__);
607 608 609 610 611 612 613 614 615 616 617 618
		return;
	}

	switch (level) {
	case UNMAP_THREE:
		cxl_unmap_afu_irq(cfg->mcctx, 3, afu);
	case UNMAP_TWO:
		cxl_unmap_afu_irq(cfg->mcctx, 2, afu);
	case UNMAP_ONE:
		cxl_unmap_afu_irq(cfg->mcctx, 1, afu);
	case FREE_IRQ:
		cxl_free_afu_irqs(cfg->mcctx);
619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641
		/* fall through */
	case UNDO_NOOP:
		/* No action required */
		break;
	}
}

/**
 * term_mc() - terminates the master context
 * @cfg:	Internal structure associated with the host.
 * @level:	Depth of allocation, where to begin waterfall tear down.
 *
 * Safe to call with AFU/MC in partially allocated/initialized state.
 */
static void term_mc(struct cxlflash_cfg *cfg)
{
	int rc = 0;
	struct afu *afu = cfg->afu;
	struct device *dev = &cfg->dev->dev;

	if (!afu || !cfg->mcctx) {
		dev_err(dev, "%s: returning with NULL afu or MC\n", __func__);
		return;
642
	}
643 644 645 646

	rc = cxl_stop_context(cfg->mcctx);
	WARN_ON(rc);
	cfg->mcctx = NULL;
647 648 649 650
}

/**
 * term_afu() - terminates the AFU
651
 * @cfg:	Internal structure associated with the host.
652 653 654 655 656
 *
 * Safe to call with AFU/MC in partially allocated/initialized state.
 */
static void term_afu(struct cxlflash_cfg *cfg)
{
657 658 659 660 661 662 663 664 665 666
	/*
	 * Tear down is carefully orchestrated to ensure
	 * no interrupts can come in when the problem state
	 * area is unmapped.
	 *
	 * 1) Disable all AFU interrupts
	 * 2) Unmap the problem state area
	 * 3) Stop the master context
	 */
	term_intr(cfg, UNMAP_THREE);
667 668 669
	if (cfg->afu)
		stop_afu(cfg);

670
	term_mc(cfg);
671

672 673 674
	pr_debug("%s: returning\n", __func__);
}

675 676 677 678 679 680 681 682 683 684 685 686 687 688
/**
 * notify_shutdown() - notifies device of pending shutdown
 * @cfg:	Internal structure associated with the host.
 * @wait:	Whether to wait for shutdown processing to complete.
 *
 * This function will notify the AFU that the adapter is being shutdown
 * and will wait for shutdown processing to complete if wait is true.
 * This notification should flush pending I/Os to the device and halt
 * further I/Os until the next AFU reset is issued and device restarted.
 */
static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait)
{
	struct afu *afu = cfg->afu;
	struct device *dev = &cfg->dev->dev;
689
	struct sisl_global_map __iomem *global;
690 691 692 693 694 695 696 697
	struct dev_dependent_vals *ddv;
	u64 reg, status;
	int i, retry_cnt = 0;

	ddv = (struct dev_dependent_vals *)cfg->dev_id->driver_data;
	if (!(ddv->flags & CXLFLASH_NOTIFY_SHUTDOWN))
		return;

698 699 700 701 702 703 704 705
	if (!afu || !afu->afu_map) {
		dev_dbg(dev, "%s: The problem state area is not mapped\n",
			__func__);
		return;
	}

	global = &afu->afu_map->global;

706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732
	/* Notify AFU */
	for (i = 0; i < NUM_FC_PORTS; i++) {
		reg = readq_be(&global->fc_regs[i][FC_CONFIG2 / 8]);
		reg |= SISL_FC_SHUTDOWN_NORMAL;
		writeq_be(reg, &global->fc_regs[i][FC_CONFIG2 / 8]);
	}

	if (!wait)
		return;

	/* Wait up to 1.5 seconds for shutdown processing to complete */
	for (i = 0; i < NUM_FC_PORTS; i++) {
		retry_cnt = 0;
		while (true) {
			status = readq_be(&global->fc_regs[i][FC_STATUS / 8]);
			if (status & SISL_STATUS_SHUTDOWN_COMPLETE)
				break;
			if (++retry_cnt >= MC_RETRY_CNT) {
				dev_dbg(dev, "%s: port %d shutdown processing "
					"not yet completed\n", __func__, i);
				break;
			}
			msleep(100 * retry_cnt);
		}
	}
}

733 734 735 736 737 738 739 740 741 742 743
/**
 * cxlflash_remove() - PCI entry point to tear down host
 * @pdev:	PCI device associated with the host.
 *
 * Safe to use as a cleanup in partially allocated/initialized state.
 */
static void cxlflash_remove(struct pci_dev *pdev)
{
	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
	ulong lock_flags;

744 745 746 747 748
	if (!pci_is_enabled(pdev)) {
		pr_debug("%s: Device is disabled\n", __func__);
		return;
	}

749 750 751
	/* If a Task Management Function is active, wait for it to complete
	 * before continuing with remove.
	 */
752
	spin_lock_irqsave(&cfg->tmf_slock, lock_flags);
753
	if (cfg->tmf_active)
754 755 756 757
		wait_event_interruptible_lock_irq(cfg->tmf_waitq,
						  !cfg->tmf_active,
						  cfg->tmf_slock);
	spin_unlock_irqrestore(&cfg->tmf_slock, lock_flags);
758

759 760 761
	/* Notify AFU and wait for shutdown processing to complete */
	notify_shutdown(cfg, true);

762
	cfg->state = STATE_FAILTERM;
M
Matthew R. Ochs 已提交
763
	cxlflash_stop_term_user_contexts(cfg);
764

765 766
	switch (cfg->init_state) {
	case INIT_STATE_SCSI:
M
Matthew R. Ochs 已提交
767
		cxlflash_term_local_luns(cfg);
768
		scsi_remove_host(cfg->host);
769
		/* fall through */
770
	case INIT_STATE_AFU:
771
		cancel_work_sync(&cfg->work_q);
772
		term_afu(cfg);
773 774 775 776
	case INIT_STATE_PCI:
		pci_disable_device(pdev);
	case INIT_STATE_NONE:
		free_mem(cfg);
777
		scsi_host_put(cfg->host);
778 779 780 781 782 783 784 785
		break;
	}

	pr_debug("%s: returning\n", __func__);
}

/**
 * alloc_mem() - allocates the AFU and its command pool
786
 * @cfg:	Internal structure associated with the host.
787 788 789 790 791 792 793 794 795 796 797
 *
 * A partially allocated state remains on failure.
 *
 * Return:
 *	0 on success
 *	-ENOMEM on failure to allocate memory
 */
static int alloc_mem(struct cxlflash_cfg *cfg)
{
	int rc = 0;
	int i;
798
	struct device *dev = &cfg->dev->dev;
799

800
	/* AFU is ~12k, i.e. only one 64k page or up to four 4k pages */
801 802 803
	cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
					    get_order(sizeof(struct afu)));
	if (unlikely(!cfg->afu)) {
804 805
		dev_err(dev, "%s: cannot get %d free pages\n",
			__func__, get_order(sizeof(struct afu)));
806 807 808 809 810 811
		rc = -ENOMEM;
		goto out;
	}
	cfg->afu->parent = cfg;
	cfg->afu->afu_map = NULL;

812
	for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
813 814 815 816 817 818 819 820 821 822
		atomic_set(&cfg->afu->cmd[i].free, 1);
		cfg->afu->cmd[i].slot = i;
	}

out:
	return rc;
}

/**
 * init_pci() - initializes the host as a PCI device
823
 * @cfg:	Internal structure associated with the host.
824
 *
825
 * Return: 0 on success, -errno on failure
826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842
 */
static int init_pci(struct cxlflash_cfg *cfg)
{
	struct pci_dev *pdev = cfg->dev;
	int rc = 0;

	rc = pci_enable_device(pdev);
	if (rc || pci_channel_offline(pdev)) {
		if (pci_channel_offline(pdev)) {
			cxlflash_wait_for_pci_err_recovery(cfg);
			rc = pci_enable_device(pdev);
		}

		if (rc) {
			dev_err(&pdev->dev, "%s: Cannot enable adapter\n",
				__func__);
			cxlflash_wait_for_pci_err_recovery(cfg);
843
			goto out;
844 845 846 847 848 849 850 851 852 853
		}
	}

out:
	pr_debug("%s: returning rc=%d\n", __func__, rc);
	return rc;
}

/**
 * init_scsi() - adds the host to the SCSI stack and kicks off host scan
854
 * @cfg:	Internal structure associated with the host.
855
 *
856
 * Return: 0 on success, -errno on failure
857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884
 */
static int init_scsi(struct cxlflash_cfg *cfg)
{
	struct pci_dev *pdev = cfg->dev;
	int rc = 0;

	rc = scsi_add_host(cfg->host, &pdev->dev);
	if (rc) {
		dev_err(&pdev->dev, "%s: scsi_add_host failed (rc=%d)\n",
			__func__, rc);
		goto out;
	}

	scsi_scan_host(cfg->host);

out:
	pr_debug("%s: returning rc=%d\n", __func__, rc);
	return rc;
}

/**
 * set_port_online() - transitions the specified host FC port to online state
 * @fc_regs:	Top of MMIO region defined for specified port.
 *
 * The provided MMIO region must be mapped prior to call. Online state means
 * that the FC link layer has synced, completed the handshaking process, and
 * is ready for login to start.
 */
885
static void set_port_online(__be64 __iomem *fc_regs)
886 887 888 889 890 891 892 893 894 895 896 897 898 899 900
{
	u64 cmdcfg;

	cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
	cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE);	/* clear OFF_LINE */
	cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE);	/* set ON_LINE */
	writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
}

/**
 * set_port_offline() - transitions the specified host FC port to offline state
 * @fc_regs:	Top of MMIO region defined for specified port.
 *
 * The provided MMIO region must be mapped prior to call.
 */
901
static void set_port_offline(__be64 __iomem *fc_regs)
902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924
{
	u64 cmdcfg;

	cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]);
	cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE);	/* clear ON_LINE */
	cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE);	/* set OFF_LINE */
	writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]);
}

/**
 * wait_port_online() - waits for the specified host FC port come online
 * @fc_regs:	Top of MMIO region defined for specified port.
 * @delay_us:	Number of microseconds to delay between reading port status.
 * @nretry:	Number of cycles to retry reading port status.
 *
 * The provided MMIO region must be mapped prior to call. This will timeout
 * when the cable is not plugged in.
 *
 * Return:
 *	TRUE (1) when the specified port is online
 *	FALSE (0) when the specified port fails to come online after timeout
 *	-EINVAL when @delay_us is less than 1000
 */
925
static int wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
926 927 928 929 930 931 932 933 934 935 936
{
	u64 status;

	if (delay_us < 1000) {
		pr_err("%s: invalid delay specified %d\n", __func__, delay_us);
		return -EINVAL;
	}

	do {
		msleep(delay_us / 1000);
		status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
937 938
		if (status == U64_MAX)
			nretry /= 2;
939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957
	} while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
		 nretry--);

	return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE);
}

/**
 * wait_port_offline() - waits for the specified host FC port go offline
 * @fc_regs:	Top of MMIO region defined for specified port.
 * @delay_us:	Number of microseconds to delay between reading port status.
 * @nretry:	Number of cycles to retry reading port status.
 *
 * The provided MMIO region must be mapped prior to call.
 *
 * Return:
 *	TRUE (1) when the specified port is offline
 *	FALSE (0) when the specified port fails to go offline after timeout
 *	-EINVAL when @delay_us is less than 1000
 */
958
static int wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry)
959 960 961 962 963 964 965 966 967 968 969
{
	u64 status;

	if (delay_us < 1000) {
		pr_err("%s: invalid delay specified %d\n", __func__, delay_us);
		return -EINVAL;
	}

	do {
		msleep(delay_us / 1000);
		status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
970 971
		if (status == U64_MAX)
			nretry /= 2;
972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990
	} while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
		 nretry--);

	return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE);
}

/**
 * afu_set_wwpn() - configures the WWPN for the specified host FC port
 * @afu:	AFU associated with the host that owns the specified FC port.
 * @port:	Port number being configured.
 * @fc_regs:	Top of MMIO region defined for specified port.
 * @wwpn:	The world-wide-port-number previously discovered for port.
 *
 * The provided MMIO region must be mapped prior to call. As part of the
 * sequence to configure the WWPN, the port is toggled offline and then back
 * online. This toggling action can cause this routine to delay up to a few
 * seconds. When configured to use the internal LUN feature of the AFU, a
 * failure to come online is overridden.
 */
991 992
static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs,
			 u64 wwpn)
993 994 995 996 997 998 999 1000
{
	set_port_offline(fc_regs);
	if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
			       FC_PORT_STATUS_RETRY_CNT)) {
		pr_debug("%s: wait on port %d to go offline timed out\n",
			 __func__, port);
	}

1001
	writeq_be(wwpn, &fc_regs[FC_PNAME / 8]);
1002

1003 1004 1005
	set_port_online(fc_regs);
	if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
			      FC_PORT_STATUS_RETRY_CNT)) {
1006 1007
		pr_debug("%s: wait on port %d to go online timed out\n",
			 __func__, port);
1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023
	}
}

/**
 * afu_link_reset() - resets the specified host FC port
 * @afu:	AFU associated with the host that owns the specified FC port.
 * @port:	Port number being configured.
 * @fc_regs:	Top of MMIO region defined for specified port.
 *
 * The provided MMIO region must be mapped prior to call. The sequence to
 * reset the port involves toggling it offline and then back online. This
 * action can cause this routine to delay up to a few seconds. An effort
 * is made to maintain link with the device by switching to host to use
 * the alternate port exclusively while the reset takes place.
 * failure to come online is overridden.
 */
1024
static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs)
1025 1026 1027 1028 1029
{
	u64 port_sel;

	/* first switch the AFU to the other links, if any */
	port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel);
1030
	port_sel &= ~(1ULL << port);
1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046
	writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
	cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);

	set_port_offline(fc_regs);
	if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
			       FC_PORT_STATUS_RETRY_CNT))
		pr_err("%s: wait on port %d to go offline timed out\n",
		       __func__, port);

	set_port_online(fc_regs);
	if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US,
			      FC_PORT_STATUS_RETRY_CNT))
		pr_err("%s: wait on port %d to go online timed out\n",
		       __func__, port);

	/* switch back to include this port */
1047
	port_sel |= (1ULL << port);
1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060
	writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel);
	cxlflash_afu_sync(afu, 0, 0, AFU_GSYNC);

	pr_debug("%s: returning port_sel=%lld\n", __func__, port_sel);
}

/*
 * Asynchronous interrupt information table
 */
static const struct asyc_intr_info ainfo[] = {
	{SISL_ASTATUS_FC0_OTHER, "other error", 0, CLR_FC_ERROR | LINK_RESET},
	{SISL_ASTATUS_FC0_LOGO, "target initiated LOGO", 0, 0},
	{SISL_ASTATUS_FC0_CRC_T, "CRC threshold exceeded", 0, LINK_RESET},
1061
	{SISL_ASTATUS_FC0_LOGI_R, "login timed out, retrying", 0, LINK_RESET},
1062
	{SISL_ASTATUS_FC0_LOGI_F, "login failed", 0, CLR_FC_ERROR},
1063
	{SISL_ASTATUS_FC0_LOGI_S, "login succeeded", 0, SCAN_HOST},
1064
	{SISL_ASTATUS_FC0_LINK_DN, "link down", 0, 0},
1065
	{SISL_ASTATUS_FC0_LINK_UP, "link up", 0, 0},
1066 1067 1068
	{SISL_ASTATUS_FC1_OTHER, "other error", 1, CLR_FC_ERROR | LINK_RESET},
	{SISL_ASTATUS_FC1_LOGO, "target initiated LOGO", 1, 0},
	{SISL_ASTATUS_FC1_CRC_T, "CRC threshold exceeded", 1, LINK_RESET},
1069
	{SISL_ASTATUS_FC1_LOGI_R, "login timed out, retrying", 1, LINK_RESET},
1070
	{SISL_ASTATUS_FC1_LOGI_F, "login failed", 1, CLR_FC_ERROR},
1071
	{SISL_ASTATUS_FC1_LOGI_S, "login succeeded", 1, SCAN_HOST},
1072
	{SISL_ASTATUS_FC1_LINK_DN, "link down", 1, 0},
1073
	{SISL_ASTATUS_FC1_LINK_UP, "link up", 1, 0},
1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234
	{0x0, "", 0, 0}		/* terminator */
};

/**
 * find_ainfo() - locates and returns asynchronous interrupt information
 * @status:	Status code set by AFU on error.
 *
 * Return: The located information or NULL when the status code is invalid.
 */
static const struct asyc_intr_info *find_ainfo(u64 status)
{
	const struct asyc_intr_info *info;

	for (info = &ainfo[0]; info->status; info++)
		if (info->status == status)
			return info;

	return NULL;
}

/**
 * afu_err_intr_init() - clears and initializes the AFU for error interrupts
 * @afu:	AFU associated with the host.
 */
static void afu_err_intr_init(struct afu *afu)
{
	int i;
	u64 reg;

	/* global async interrupts: AFU clears afu_ctrl on context exit
	 * if async interrupts were sent to that context. This prevents
	 * the AFU form sending further async interrupts when
	 * there is
	 * nobody to receive them.
	 */

	/* mask all */
	writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask);
	/* set LISN# to send and point to master context */
	reg = ((u64) (((afu->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40);

	if (afu->internal_lun)
		reg |= 1;	/* Bit 63 indicates local lun */
	writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl);
	/* clear all */
	writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);
	/* unmask bits that are of interest */
	/* note: afu can send an interrupt after this step */
	writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask);
	/* clear again in case a bit came on after previous clear but before */
	/* unmask */
	writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear);

	/* Clear/Set internal lun bits */
	reg = readq_be(&afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]);
	reg &= SISL_FC_INTERNAL_MASK;
	if (afu->internal_lun)
		reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT);
	writeq_be(reg, &afu->afu_map->global.fc_regs[0][FC_CONFIG2 / 8]);

	/* now clear FC errors */
	for (i = 0; i < NUM_FC_PORTS; i++) {
		writeq_be(0xFFFFFFFFU,
			  &afu->afu_map->global.fc_regs[i][FC_ERROR / 8]);
		writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRCAP / 8]);
	}

	/* sync interrupts for master's IOARRIN write */
	/* note that unlike asyncs, there can be no pending sync interrupts */
	/* at this time (this is a fresh context and master has not written */
	/* IOARRIN yet), so there is nothing to clear. */

	/* set LISN#, it is always sent to the context that wrote IOARRIN */
	writeq_be(SISL_MSI_SYNC_ERROR, &afu->host_map->ctx_ctrl);
	writeq_be(SISL_ISTATUS_MASK, &afu->host_map->intr_mask);
}

/**
 * cxlflash_sync_err_irq() - interrupt handler for synchronous errors
 * @irq:	Interrupt number.
 * @data:	Private data provided at interrupt registration, the AFU.
 *
 * Return: Always return IRQ_HANDLED.
 */
static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
{
	struct afu *afu = (struct afu *)data;
	u64 reg;
	u64 reg_unmasked;

	reg = readq_be(&afu->host_map->intr_status);
	reg_unmasked = (reg & SISL_ISTATUS_UNMASK);

	if (reg_unmasked == 0UL) {
		pr_err("%s: %llX: spurious interrupt, intr_status %016llX\n",
		       __func__, (u64)afu, reg);
		goto cxlflash_sync_err_irq_exit;
	}

	pr_err("%s: %llX: unexpected interrupt, intr_status %016llX\n",
	       __func__, (u64)afu, reg);

	writeq_be(reg_unmasked, &afu->host_map->intr_clear);

cxlflash_sync_err_irq_exit:
	pr_debug("%s: returning rc=%d\n", __func__, IRQ_HANDLED);
	return IRQ_HANDLED;
}

/**
 * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path)
 * @irq:	Interrupt number.
 * @data:	Private data provided at interrupt registration, the AFU.
 *
 * Return: Always return IRQ_HANDLED.
 */
static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
{
	struct afu *afu = (struct afu *)data;
	struct afu_cmd *cmd;
	bool toggle = afu->toggle;
	u64 entry,
	    *hrrq_start = afu->hrrq_start,
	    *hrrq_end = afu->hrrq_end,
	    *hrrq_curr = afu->hrrq_curr;

	/* Process however many RRQ entries that are ready */
	while (true) {
		entry = *hrrq_curr;

		if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle)
			break;

		cmd = (struct afu_cmd *)(entry & ~SISL_RESP_HANDLE_T_BIT);
		cmd_complete(cmd);

		/* Advance to next entry or wrap and flip the toggle bit */
		if (hrrq_curr < hrrq_end)
			hrrq_curr++;
		else {
			hrrq_curr = hrrq_start;
			toggle ^= SISL_RESP_HANDLE_T_BIT;
		}
	}

	afu->hrrq_curr = hrrq_curr;
	afu->toggle = toggle;

	return IRQ_HANDLED;
}

/**
 * cxlflash_async_err_irq() - interrupt handler for asynchronous errors
 * @irq:	Interrupt number.
 * @data:	Private data provided at interrupt registration, the AFU.
 *
 * Return: Always return IRQ_HANDLED.
 */
static irqreturn_t cxlflash_async_err_irq(int irq, void *data)
{
	struct afu *afu = (struct afu *)data;
1235 1236
	struct cxlflash_cfg *cfg = afu->parent;
	struct device *dev = &cfg->dev->dev;
1237 1238
	u64 reg_unmasked;
	const struct asyc_intr_info *info;
1239
	struct sisl_global_map __iomem *global = &afu->afu_map->global;
1240 1241 1242 1243 1244 1245 1246 1247
	u64 reg;
	u8 port;
	int i;

	reg = readq_be(&global->regs.aintr_status);
	reg_unmasked = (reg & SISL_ASTATUS_UNMASK);

	if (reg_unmasked == 0) {
1248 1249
		dev_err(dev, "%s: spurious interrupt, aintr_status 0x%016llX\n",
			__func__, reg);
1250 1251 1252
		goto out;
	}

1253
	/* FYI, it is 'okay' to clear AFU status before FC_ERROR */
1254 1255
	writeq_be(reg_unmasked, &global->regs.aintr_clear);

1256
	/* Check each bit that is on */
1257 1258
	for (i = 0; reg_unmasked; i++, reg_unmasked = (reg_unmasked >> 1)) {
		info = find_ainfo(1ULL << i);
1259
		if (((reg_unmasked & 0x1) == 0) || !info)
1260 1261 1262 1263
			continue;

		port = info->port;

1264 1265
		dev_err(dev, "%s: FC Port %d -> %s, fc_status 0x%08llX\n",
			__func__, port, info->desc,
1266 1267 1268
		       readq_be(&global->fc_regs[port][FC_STATUS / 8]));

		/*
1269
		 * Do link reset first, some OTHER errors will set FC_ERROR
1270 1271 1272
		 * again if cleared before or w/o a reset
		 */
		if (info->action & LINK_RESET) {
1273 1274
			dev_err(dev, "%s: FC Port %d: resetting link\n",
				__func__, port);
1275 1276
			cfg->lr_state = LINK_RESET_REQUIRED;
			cfg->lr_port = port;
1277
			kref_get(&cfg->afu->mapcount);
1278 1279 1280 1281 1282 1283 1284
			schedule_work(&cfg->work_q);
		}

		if (info->action & CLR_FC_ERROR) {
			reg = readq_be(&global->fc_regs[port][FC_ERROR / 8]);

			/*
1285
			 * Since all errors are unmasked, FC_ERROR and FC_ERRCAP
1286 1287 1288
			 * should be the same and tracing one is sufficient.
			 */

1289 1290
			dev_err(dev, "%s: fc %d: clearing fc_error 0x%08llX\n",
				__func__, port, reg);
1291 1292 1293 1294

			writeq_be(reg, &global->fc_regs[port][FC_ERROR / 8]);
			writeq_be(0, &global->fc_regs[port][FC_ERRCAP / 8]);
		}
1295 1296 1297

		if (info->action & SCAN_HOST) {
			atomic_inc(&cfg->scan_host_needed);
1298
			kref_get(&cfg->afu->mapcount);
1299 1300
			schedule_work(&cfg->work_q);
		}
1301 1302 1303
	}

out:
1304
	dev_dbg(dev, "%s: returning IRQ_HANDLED, afu=%p\n", __func__, afu);
1305 1306 1307 1308 1309
	return IRQ_HANDLED;
}

/**
 * start_context() - starts the master context
1310
 * @cfg:	Internal structure associated with the host.
1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327
 *
 * Return: A success or failure value from CXL services.
 */
static int start_context(struct cxlflash_cfg *cfg)
{
	int rc = 0;

	rc = cxl_start_context(cfg->mcctx,
			       cfg->afu->work.work_element_descriptor,
			       NULL);

	pr_debug("%s: returning rc=%d\n", __func__, rc);
	return rc;
}

/**
 * read_vpd() - obtains the WWPNs from VPD
1328
 * @cfg:	Internal structure associated with the host.
1329 1330
 * @wwpn:	Array of size NUM_FC_PORTS to pass back WWPNs
 *
1331
 * Return: 0 on success, -errno on failure
1332 1333 1334
 */
static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[])
{
1335
	struct pci_dev *dev = cfg->dev;
1336 1337 1338 1339 1340 1341 1342 1343
	int rc = 0;
	int ro_start, ro_size, i, j, k;
	ssize_t vpd_size;
	char vpd_data[CXLFLASH_VPD_LEN];
	char tmp_buf[WWPN_BUF_LEN] = { 0 };
	char *wwpn_vpd_tags[NUM_FC_PORTS] = { "V5", "V6" };

	/* Get the VPD data from the device */
1344
	vpd_size = cxl_read_adapter_vpd(dev, vpd_data, sizeof(vpd_data));
1345
	if (unlikely(vpd_size <= 0)) {
1346
		dev_err(&dev->dev, "%s: Unable to read VPD (size = %ld)\n",
1347 1348 1349 1350 1351 1352 1353 1354 1355
		       __func__, vpd_size);
		rc = -ENODEV;
		goto out;
	}

	/* Get the read only section offset */
	ro_start = pci_vpd_find_tag(vpd_data, 0, vpd_size,
				    PCI_VPD_LRDT_RO_DATA);
	if (unlikely(ro_start < 0)) {
1356 1357
		dev_err(&dev->dev, "%s: VPD Read-only data not found\n",
			__func__);
1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385
		rc = -ENODEV;
		goto out;
	}

	/* Get the read only section size, cap when extends beyond read VPD */
	ro_size = pci_vpd_lrdt_size(&vpd_data[ro_start]);
	j = ro_size;
	i = ro_start + PCI_VPD_LRDT_TAG_SIZE;
	if (unlikely((i + j) > vpd_size)) {
		pr_debug("%s: Might need to read more VPD (%d > %ld)\n",
			 __func__, (i + j), vpd_size);
		ro_size = vpd_size - i;
	}

	/*
	 * Find the offset of the WWPN tag within the read only
	 * VPD data and validate the found field (partials are
	 * no good to us). Convert the ASCII data to an integer
	 * value. Note that we must copy to a temporary buffer
	 * because the conversion service requires that the ASCII
	 * string be terminated.
	 */
	for (k = 0; k < NUM_FC_PORTS; k++) {
		j = ro_size;
		i = ro_start + PCI_VPD_LRDT_TAG_SIZE;

		i = pci_vpd_find_info_keyword(vpd_data, i, j, wwpn_vpd_tags[k]);
		if (unlikely(i < 0)) {
1386 1387
			dev_err(&dev->dev, "%s: Port %d WWPN not found "
				"in VPD\n", __func__, k);
1388 1389 1390 1391 1392 1393 1394
			rc = -ENODEV;
			goto out;
		}

		j = pci_vpd_info_field_size(&vpd_data[i]);
		i += PCI_VPD_INFO_FLD_HDR_SIZE;
		if (unlikely((i + j > vpd_size) || (j != WWPN_LEN))) {
1395 1396
			dev_err(&dev->dev, "%s: Port %d WWPN incomplete or "
				"VPD corrupt\n",
1397 1398 1399 1400 1401 1402 1403 1404
			       __func__, k);
			rc = -ENODEV;
			goto out;
		}

		memcpy(tmp_buf, &vpd_data[i], WWPN_LEN);
		rc = kstrtoul(tmp_buf, WWPN_LEN, (ulong *)&wwpn[k]);
		if (unlikely(rc)) {
1405 1406
			dev_err(&dev->dev, "%s: Fail to convert port %d WWPN "
				"to integer\n", __func__, k);
1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417
			rc = -ENODEV;
			goto out;
		}
	}

out:
	pr_debug("%s: returning rc=%d\n", __func__, rc);
	return rc;
}

/**
M
Matthew R. Ochs 已提交
1418
 * init_pcr() - initialize the provisioning and control registers
1419
 * @cfg:	Internal structure associated with the host.
1420
 *
M
Matthew R. Ochs 已提交
1421 1422
 * Also sets up fast access to the mapped registers and initializes AFU
 * command fields that never change.
1423
 */
M
Matthew R. Ochs 已提交
1424
static void init_pcr(struct cxlflash_cfg *cfg)
1425 1426
{
	struct afu *afu = cfg->afu;
1427
	struct sisl_ctrl_map __iomem *ctrl_map;
1428 1429 1430 1431
	int i;

	for (i = 0; i < MAX_CONTEXT; i++) {
		ctrl_map = &afu->afu_map->ctrls[i].ctrl;
1432 1433
		/* Disrupt any clients that could be running */
		/* e.g. clients that survived a master restart */
1434 1435 1436 1437 1438
		writeq_be(0, &ctrl_map->rht_start);
		writeq_be(0, &ctrl_map->rht_cnt_id);
		writeq_be(0, &ctrl_map->ctx_cap);
	}

1439
	/* Copy frequently used fields into afu */
1440 1441 1442 1443 1444 1445 1446
	afu->ctx_hndl = (u16) cxl_process_element(cfg->mcctx);
	afu->host_map = &afu->afu_map->hosts[afu->ctx_hndl].host;
	afu->ctrl_map = &afu->afu_map->ctrls[afu->ctx_hndl].ctrl;

	/* Program the Endian Control for the master context */
	writeq_be(SISL_ENDIAN_CTRL, &afu->host_map->endian_ctrl);

1447
	/* Initialize cmd fields that never change */
1448 1449 1450 1451 1452 1453 1454 1455 1456
	for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
		afu->cmd[i].rcb.ctx_id = afu->ctx_hndl;
		afu->cmd[i].rcb.msi = SISL_MSI_RRQ_UPDATED;
		afu->cmd[i].rcb.rrq = 0x0;
	}
}

/**
 * init_global() - initialize AFU global registers
1457
 * @cfg:	Internal structure associated with the host.
1458
 */
M
Matthew R. Ochs 已提交
1459
static int init_global(struct cxlflash_cfg *cfg)
1460 1461
{
	struct afu *afu = cfg->afu;
1462
	struct device *dev = &cfg->dev->dev;
1463 1464 1465 1466 1467 1468 1469
	u64 wwpn[NUM_FC_PORTS];	/* wwpn of AFU ports */
	int i = 0, num_ports = 0;
	int rc = 0;
	u64 reg;

	rc = read_vpd(cfg, &wwpn[0]);
	if (rc) {
1470
		dev_err(dev, "%s: could not read vpd rc=%d\n", __func__, rc);
1471 1472 1473 1474 1475
		goto out;
	}

	pr_debug("%s: wwpn0=0x%llX wwpn1=0x%llX\n", __func__, wwpn[0], wwpn[1]);

1476
	/* Set up RRQ in AFU for master issued cmds */
1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488
	writeq_be((u64) afu->hrrq_start, &afu->host_map->rrq_start);
	writeq_be((u64) afu->hrrq_end, &afu->host_map->rrq_end);

	/* AFU configuration */
	reg = readq_be(&afu->afu_map->global.regs.afu_config);
	reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN;
	/* enable all auto retry options and control endianness */
	/* leave others at default: */
	/* CTX_CAP write protected, mbox_r does not clear on read and */
	/* checker on if dual afu */
	writeq_be(reg, &afu->afu_map->global.regs.afu_config);

1489
	/* Global port select: select either port */
1490
	if (afu->internal_lun) {
1491
		/* Only use port 0 */
1492 1493 1494 1495 1496 1497 1498 1499
		writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel);
		num_ports = NUM_FC_PORTS - 1;
	} else {
		writeq_be(BOTH_PORTS, &afu->afu_map->global.regs.afu_port_sel);
		num_ports = NUM_FC_PORTS;
	}

	for (i = 0; i < num_ports; i++) {
1500
		/* Unmask all errors (but they are still masked at AFU) */
1501
		writeq_be(0, &afu->afu_map->global.fc_regs[i][FC_ERRMSK / 8]);
1502
		/* Clear CRC error cnt & set a threshold */
1503 1504 1505 1506 1507
		(void)readq_be(&afu->afu_map->global.
			       fc_regs[i][FC_CNT_CRCERR / 8]);
		writeq_be(MC_CRC_THRESH, &afu->afu_map->global.fc_regs[i]
			  [FC_CRC_THRESH / 8]);

1508
		/* Set WWPNs. If already programmed, wwpn[i] is 0 */
1509 1510 1511 1512
		if (wwpn[i] != 0)
			afu_set_wwpn(afu, i,
				     &afu->afu_map->global.fc_regs[i][0],
				     wwpn[i]);
1513 1514 1515 1516 1517 1518
		/* Programming WWPN back to back causes additional
		 * offline/online transitions and a PLOGI
		 */
		msleep(100);
	}

1519 1520
	/* Set up master's own CTX_CAP to allow real mode, host translation */
	/* tables, afu cmds and read/write GSCSI cmds. */
1521 1522 1523 1524 1525 1526
	/* First, unlock ctx_cap write by reading mbox */
	(void)readq_be(&afu->ctrl_map->mbox_r);	/* unlock ctx_cap */
	writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE |
		   SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD |
		   SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD),
		  &afu->ctrl_map->ctx_cap);
1527
	/* Initialize heartbeat */
1528 1529 1530 1531 1532 1533 1534 1535
	afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb);

out:
	return rc;
}

/**
 * start_afu() - initializes and starts the AFU
1536
 * @cfg:	Internal structure associated with the host.
1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555
 */
static int start_afu(struct cxlflash_cfg *cfg)
{
	struct afu *afu = cfg->afu;
	struct afu_cmd *cmd;

	int i = 0;
	int rc = 0;

	for (i = 0; i < CXLFLASH_NUM_CMDS; i++) {
		cmd = &afu->cmd[i];

		init_completion(&cmd->cevent);
		spin_lock_init(&cmd->slock);
		cmd->parent = afu;
	}

	init_pcr(cfg);

1556 1557 1558
	/* After an AFU reset, RRQ entries are stale, clear them */
	memset(&afu->rrq_entry, 0, sizeof(afu->rrq_entry));

1559
	/* Initialize RRQ pointers */
1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571
	afu->hrrq_start = &afu->rrq_entry[0];
	afu->hrrq_end = &afu->rrq_entry[NUM_RRQ_ENTRY - 1];
	afu->hrrq_curr = afu->hrrq_start;
	afu->toggle = 1;

	rc = init_global(cfg);

	pr_debug("%s: returning rc=%d\n", __func__, rc);
	return rc;
}

/**
1572
 * init_intr() - setup interrupt handlers for the master context
1573
 * @cfg:	Internal structure associated with the host.
1574
 *
1575
 * Return: 0 on success, -errno on failure
1576
 */
1577 1578
static enum undo_level init_intr(struct cxlflash_cfg *cfg,
				 struct cxl_context *ctx)
1579 1580
{
	struct afu *afu = cfg->afu;
1581
	struct device *dev = &cfg->dev->dev;
1582
	int rc = 0;
1583
	enum undo_level level = UNDO_NOOP;
1584 1585 1586 1587 1588

	rc = cxl_allocate_afu_irqs(ctx, 3);
	if (unlikely(rc)) {
		dev_err(dev, "%s: call to allocate_afu_irqs failed rc=%d!\n",
			__func__, rc);
1589
		level = UNDO_NOOP;
1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618
		goto out;
	}

	rc = cxl_map_afu_irq(ctx, 1, cxlflash_sync_err_irq, afu,
			     "SISL_MSI_SYNC_ERROR");
	if (unlikely(rc <= 0)) {
		dev_err(dev, "%s: IRQ 1 (SISL_MSI_SYNC_ERROR) map failed!\n",
			__func__);
		level = FREE_IRQ;
		goto out;
	}

	rc = cxl_map_afu_irq(ctx, 2, cxlflash_rrq_irq, afu,
			     "SISL_MSI_RRQ_UPDATED");
	if (unlikely(rc <= 0)) {
		dev_err(dev, "%s: IRQ 2 (SISL_MSI_RRQ_UPDATED) map failed!\n",
			__func__);
		level = UNMAP_ONE;
		goto out;
	}

	rc = cxl_map_afu_irq(ctx, 3, cxlflash_async_err_irq, afu,
			     "SISL_MSI_ASYNC_ERROR");
	if (unlikely(rc <= 0)) {
		dev_err(dev, "%s: IRQ 3 (SISL_MSI_ASYNC_ERROR) map failed!\n",
			__func__);
		level = UNMAP_TWO;
		goto out;
	}
1619 1620 1621
out:
	return level;
}
1622

1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659
/**
 * init_mc() - create and register as the master context
 * @cfg:	Internal structure associated with the host.
 *
 * Return: 0 on success, -errno on failure
 */
static int init_mc(struct cxlflash_cfg *cfg)
{
	struct cxl_context *ctx;
	struct device *dev = &cfg->dev->dev;
	int rc = 0;
	enum undo_level level;

	ctx = cxl_get_context(cfg->dev);
	if (unlikely(!ctx)) {
		rc = -ENOMEM;
		goto ret;
	}
	cfg->mcctx = ctx;

	/* Set it up as a master with the CXL */
	cxl_set_master(ctx);

	/* During initialization reset the AFU to start from a clean slate */
	rc = cxl_afu_reset(cfg->mcctx);
	if (unlikely(rc)) {
		dev_err(dev, "%s: initial AFU reset failed rc=%d\n",
			__func__, rc);
		goto ret;
	}

	level = init_intr(cfg, ctx);
	if (unlikely(level)) {
		dev_err(dev, "%s: setting up interrupts failed rc=%d\n",
			__func__, rc);
		goto out;
	}
1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674

	/* This performs the equivalent of the CXL_IOCTL_START_WORK.
	 * The CXL_IOCTL_GET_PROCESS_ELEMENT is implicit in the process
	 * element (pe) that is embedded in the context (ctx)
	 */
	rc = start_context(cfg);
	if (unlikely(rc)) {
		dev_err(dev, "%s: start context failed rc=%d\n", __func__, rc);
		level = UNMAP_THREE;
		goto out;
	}
ret:
	pr_debug("%s: returning rc=%d\n", __func__, rc);
	return rc;
out:
1675
	term_intr(cfg, level);
1676 1677 1678 1679 1680
	goto ret;
}

/**
 * init_afu() - setup as master context and start AFU
1681
 * @cfg:	Internal structure associated with the host.
1682 1683 1684 1685
 *
 * This routine is a higher level of control for configuring the
 * AFU on probe and reset paths.
 *
1686
 * Return: 0 on success, -errno on failure
1687 1688 1689 1690 1691 1692 1693 1694
 */
static int init_afu(struct cxlflash_cfg *cfg)
{
	u64 reg;
	int rc = 0;
	struct afu *afu = cfg->afu;
	struct device *dev = &cfg->dev->dev;

1695 1696
	cxl_perst_reloads_same_image(cfg->cxl_afu, true);

1697 1698 1699 1700
	rc = init_mc(cfg);
	if (rc) {
		dev_err(dev, "%s: call to init_mc failed, rc=%d!\n",
			__func__, rc);
1701
		goto out;
1702 1703
	}

1704
	/* Map the entire MMIO space of the AFU */
1705 1706 1707
	afu->afu_map = cxl_psa_map(cfg->mcctx);
	if (!afu->afu_map) {
		dev_err(dev, "%s: call to cxl_psa_map failed!\n", __func__);
1708
		rc = -ENOMEM;
1709 1710
		goto err1;
	}
1711
	kref_init(&afu->mapcount);
1712

1713 1714 1715
	/* No byte reverse on reading afu_version or string will be backwards */
	reg = readq(&afu->afu_map->global.regs.afu_version);
	memcpy(afu->version, &reg, sizeof(reg));
1716 1717
	afu->interface_version =
	    readq_be(&afu->afu_map->global.regs.interface_version);
1718 1719 1720 1721 1722
	if ((afu->interface_version + 1) == 0) {
		pr_err("Back level AFU, please upgrade. AFU version %s "
		       "interface version 0x%llx\n", afu->version,
		       afu->interface_version);
		rc = -EINVAL;
1723 1724 1725 1726 1727
		goto err2;
	}

	pr_debug("%s: afu version %s, interface version 0x%llX\n", __func__,
		 afu->version, afu->interface_version);
1728 1729 1730 1731 1732

	rc = start_afu(cfg);
	if (rc) {
		dev_err(dev, "%s: call to start_afu failed, rc=%d!\n",
			__func__, rc);
1733
		goto err2;
1734 1735 1736
	}

	afu_err_intr_init(cfg->afu);
1737 1738
	spin_lock_init(&afu->rrin_slock);
	afu->room = readq_be(&afu->host_map->cmd_room);
1739

M
Matthew R. Ochs 已提交
1740 1741
	/* Restore the LUN mappings */
	cxlflash_restore_luntable(cfg);
1742
out:
1743 1744
	pr_debug("%s: returning rc=%d\n", __func__, rc);
	return rc;
1745 1746

err2:
1747
	kref_put(&afu->mapcount, afu_unmap);
1748
err1:
1749 1750
	term_intr(cfg, UNMAP_THREE);
	term_mc(cfg);
1751
	goto out;
1752 1753 1754 1755 1756 1757 1758 1759 1760 1761
}

/**
 * cxlflash_afu_sync() - builds and sends an AFU sync command
 * @afu:	AFU associated with the host.
 * @ctx_hndl_u:	Identifies context requesting sync.
 * @res_hndl_u:	Identifies resource requesting sync.
 * @mode:	Type of sync to issue (lightweight, heavyweight, global).
 *
 * The AFU can only take 1 sync command at a time. This routine enforces this
1762
 * limitation by using a mutex to provide exclusive access to the AFU during
1763 1764 1765
 * the sync. This design point requires calling threads to not be on interrupt
 * context due to the possibility of sleeping during concurrent sync operations.
 *
1766 1767 1768 1769 1770 1771
 * AFU sync operations are only necessary and allowed when the device is
 * operating normally. When not operating normally, sync requests can occur as
 * part of cleaning up resources associated with an adapter prior to removal.
 * In this scenario, these requests are simply ignored (safe due to the AFU
 * going away).
 *
1772 1773 1774 1775 1776 1777 1778
 * Return:
 *	0 on success
 *	-1 on failure
 */
int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
		      res_hndl_t res_hndl_u, u8 mode)
{
1779
	struct cxlflash_cfg *cfg = afu->parent;
1780
	struct device *dev = &cfg->dev->dev;
1781
	struct afu_cmd *cmd = NULL;
1782
	char *buf = NULL;
1783 1784 1785
	int rc = 0;
	static DEFINE_MUTEX(sync_active);

1786 1787 1788 1789 1790
	if (cfg->state != STATE_NORMAL) {
		pr_debug("%s: Sync not required! (%u)\n", __func__, cfg->state);
		return 0;
	}

1791
	mutex_lock(&sync_active);
1792 1793 1794
	buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
	if (unlikely(!buf)) {
		dev_err(dev, "%s: no memory for command\n", __func__);
1795 1796 1797 1798
		rc = -1;
		goto out;
	}

1799 1800 1801 1802
	cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd));
	init_completion(&cmd->cevent);
	spin_lock_init(&cmd->slock);
	cmd->parent = afu;
1803

1804
	pr_debug("%s: afu=%p cmd=%p %d\n", __func__, afu, cmd, ctx_hndl_u);
1805 1806

	cmd->rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD;
1807 1808
	cmd->rcb.ctx_id = afu->ctx_hndl;
	cmd->rcb.msi = SISL_MSI_RRQ_UPDATED;
1809 1810 1811 1812 1813 1814 1815 1816 1817 1818
	cmd->rcb.port_sel = 0x0;	/* NA */
	cmd->rcb.lun_id = 0x0;	/* NA */
	cmd->rcb.data_len = 0x0;
	cmd->rcb.data_ea = 0x0;
	cmd->rcb.timeout = MC_AFU_SYNC_TIMEOUT;

	cmd->rcb.cdb[0] = 0xC0;	/* AFU Sync */
	cmd->rcb.cdb[1] = mode;

	/* The cdb is aligned, no unaligned accessors required */
1819 1820
	*((__be16 *)&cmd->rcb.cdb[2]) = cpu_to_be16(ctx_hndl_u);
	*((__be32 *)&cmd->rcb.cdb[4]) = cpu_to_be32(res_hndl_u);
1821

M
Matthew R. Ochs 已提交
1822
	rc = send_cmd(afu, cmd);
1823 1824 1825
	if (unlikely(rc))
		goto out;

M
Matthew R. Ochs 已提交
1826
	wait_resp(afu, cmd);
1827

1828
	/* Set on timeout */
1829 1830 1831 1832 1833
	if (unlikely((cmd->sa.ioasc != 0) ||
		     (cmd->sa.host_use_b[0] & B_ERROR)))
		rc = -1;
out:
	mutex_unlock(&sync_active);
1834
	kfree(buf);
1835 1836 1837 1838 1839
	pr_debug("%s: returning rc=%d\n", __func__, rc);
	return rc;
}

/**
M
Matthew R. Ochs 已提交
1840 1841
 * afu_reset() - resets the AFU
 * @cfg:	Internal structure associated with the host.
1842
 *
1843
 * Return: 0 on success, -errno on failure
1844
 */
M
Matthew R. Ochs 已提交
1845
static int afu_reset(struct cxlflash_cfg *cfg)
1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859
{
	int rc = 0;
	/* Stop the context before the reset. Since the context is
	 * no longer available restart it after the reset is complete
	 */

	term_afu(cfg);

	rc = init_afu(cfg);

	pr_debug("%s: returning rc=%d\n", __func__, rc);
	return rc;
}

1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872
/**
 * drain_ioctls() - wait until all currently executing ioctls have completed
 * @cfg:	Internal structure associated with the host.
 *
 * Obtain write access to read/write semaphore that wraps ioctl
 * handling to 'drain' ioctls currently executing.
 */
static void drain_ioctls(struct cxlflash_cfg *cfg)
{
	down_write(&cfg->ioctl_rwsem);
	up_write(&cfg->ioctl_rwsem);
}

M
Matthew R. Ochs 已提交
1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897
/**
 * cxlflash_eh_device_reset_handler() - reset a single LUN
 * @scp:	SCSI command to send.
 *
 * Return:
 *	SUCCESS as defined in scsi/scsi.h
 *	FAILED as defined in scsi/scsi.h
 */
static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp)
{
	int rc = SUCCESS;
	struct Scsi_Host *host = scp->device->host;
	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;
	struct afu *afu = cfg->afu;
	int rcr = 0;

	pr_debug("%s: (scp=%p) %d/%d/%d/%llu "
		 "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp,
		 host->host_no, scp->device->channel,
		 scp->device->id, scp->device->lun,
		 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
		 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
		 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
		 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));

1898
retry:
M
Matthew R. Ochs 已提交
1899 1900 1901 1902 1903 1904 1905 1906
	switch (cfg->state) {
	case STATE_NORMAL:
		rcr = send_tmf(afu, scp, TMF_LUN_RESET);
		if (unlikely(rcr))
			rc = FAILED;
		break;
	case STATE_RESET:
		wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
1907
		goto retry;
M
Matthew R. Ochs 已提交
1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920
	default:
		rc = FAILED;
		break;
	}

	pr_debug("%s: returning rc=%d\n", __func__, rc);
	return rc;
}

/**
 * cxlflash_eh_host_reset_handler() - reset the host adapter
 * @scp:	SCSI command from stack identifying host.
 *
1921 1922 1923 1924 1925
 * Following a reset, the state is evaluated again in case an EEH occurred
 * during the reset. In such a scenario, the host reset will either yield
 * until the EEH recovery is complete or return success or failure based
 * upon the current device state.
 *
M
Matthew R. Ochs 已提交
1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948
 * Return:
 *	SUCCESS as defined in scsi/scsi.h
 *	FAILED as defined in scsi/scsi.h
 */
static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp)
{
	int rc = SUCCESS;
	int rcr = 0;
	struct Scsi_Host *host = scp->device->host;
	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)host->hostdata;

	pr_debug("%s: (scp=%p) %d/%d/%d/%llu "
		 "cdb=(%08X-%08X-%08X-%08X)\n", __func__, scp,
		 host->host_no, scp->device->channel,
		 scp->device->id, scp->device->lun,
		 get_unaligned_be32(&((u32 *)scp->cmnd)[0]),
		 get_unaligned_be32(&((u32 *)scp->cmnd)[1]),
		 get_unaligned_be32(&((u32 *)scp->cmnd)[2]),
		 get_unaligned_be32(&((u32 *)scp->cmnd)[3]));

	switch (cfg->state) {
	case STATE_NORMAL:
		cfg->state = STATE_RESET;
1949
		drain_ioctls(cfg);
M
Matthew R. Ochs 已提交
1950 1951 1952 1953 1954 1955 1956 1957
		cxlflash_mark_contexts_error(cfg);
		rcr = afu_reset(cfg);
		if (rcr) {
			rc = FAILED;
			cfg->state = STATE_FAILTERM;
		} else
			cfg->state = STATE_NORMAL;
		wake_up_all(&cfg->reset_waitq);
1958 1959
		ssleep(1);
		/* fall through */
M
Matthew R. Ochs 已提交
1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994
	case STATE_RESET:
		wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
		if (cfg->state == STATE_NORMAL)
			break;
		/* fall through */
	default:
		rc = FAILED;
		break;
	}

	pr_debug("%s: returning rc=%d\n", __func__, rc);
	return rc;
}

/**
 * cxlflash_change_queue_depth() - change the queue depth for the device
 * @sdev:	SCSI device destined for queue depth change.
 * @qdepth:	Requested queue depth value to set.
 *
 * The requested queue depth is capped to the maximum supported value.
 *
 * Return: The actual queue depth set.
 */
static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth)
{

	if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN)
		qdepth = CXLFLASH_MAX_CMDS_PER_LUN;

	scsi_change_queue_depth(sdev, qdepth);
	return sdev->queue_depth;
}

/**
 * cxlflash_show_port_status() - queries and presents the current port status
1995 1996
 * @port:	Desired port for status reporting.
 * @afu:	AFU owning the specified port.
M
Matthew R. Ochs 已提交
1997 1998 1999 2000
 * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
 *
 * Return: The size of the ASCII string returned in @buf.
 */
2001
static ssize_t cxlflash_show_port_status(u32 port, struct afu *afu, char *buf)
M
Matthew R. Ochs 已提交
2002 2003 2004
{
	char *disp_status;
	u64 status;
2005
	__be64 __iomem *fc_regs;
M
Matthew R. Ochs 已提交
2006

2007
	if (port >= NUM_FC_PORTS)
M
Matthew R. Ochs 已提交
2008 2009 2010
		return 0;

	fc_regs = &afu->afu_map->global.fc_regs[port][0];
2011 2012
	status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
	status &= FC_MTIP_STATUS_MASK;
M
Matthew R. Ochs 已提交
2013 2014 2015 2016 2017 2018 2019 2020

	if (status == FC_MTIP_STATUS_ONLINE)
		disp_status = "online";
	else if (status == FC_MTIP_STATUS_OFFLINE)
		disp_status = "offline";
	else
		disp_status = "unknown";

2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040
	return scnprintf(buf, PAGE_SIZE, "%s\n", disp_status);
}

/**
 * port0_show() - queries and presents the current status of port 0
 * @dev:	Generic device associated with the host owning the port.
 * @attr:	Device attribute representing the port.
 * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
 *
 * Return: The size of the ASCII string returned in @buf.
 */
static ssize_t port0_show(struct device *dev,
			  struct device_attribute *attr,
			  char *buf)
{
	struct Scsi_Host *shost = class_to_shost(dev);
	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
	struct afu *afu = cfg->afu;

	return cxlflash_show_port_status(0, afu, buf);
M
Matthew R. Ochs 已提交
2041 2042 2043
}

/**
2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063
 * port1_show() - queries and presents the current status of port 1
 * @dev:	Generic device associated with the host owning the port.
 * @attr:	Device attribute representing the port.
 * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
 *
 * Return: The size of the ASCII string returned in @buf.
 */
static ssize_t port1_show(struct device *dev,
			  struct device_attribute *attr,
			  char *buf)
{
	struct Scsi_Host *shost = class_to_shost(dev);
	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
	struct afu *afu = cfg->afu;

	return cxlflash_show_port_status(1, afu, buf);
}

/**
 * lun_mode_show() - presents the current LUN mode of the host
M
Matthew R. Ochs 已提交
2064
 * @dev:	Generic device associated with the host.
2065
 * @attr:	Device attribute representing the LUN mode.
M
Matthew R. Ochs 已提交
2066 2067 2068 2069
 * @buf:	Buffer of length PAGE_SIZE to report back the LUN mode in ASCII.
 *
 * Return: The size of the ASCII string returned in @buf.
 */
2070 2071
static ssize_t lun_mode_show(struct device *dev,
			     struct device_attribute *attr, char *buf)
M
Matthew R. Ochs 已提交
2072 2073 2074 2075 2076
{
	struct Scsi_Host *shost = class_to_shost(dev);
	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
	struct afu *afu = cfg->afu;

2077
	return scnprintf(buf, PAGE_SIZE, "%u\n", afu->internal_lun);
M
Matthew R. Ochs 已提交
2078 2079 2080
}

/**
2081
 * lun_mode_store() - sets the LUN mode of the host
M
Matthew R. Ochs 已提交
2082
 * @dev:	Generic device associated with the host.
2083
 * @attr:	Device attribute representing the LUN mode.
M
Matthew R. Ochs 已提交
2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101
 * @buf:	Buffer of length PAGE_SIZE containing the LUN mode in ASCII.
 * @count:	Length of data resizing in @buf.
 *
 * The CXL Flash AFU supports a dummy LUN mode where the external
 * links and storage are not required. Space on the FPGA is used
 * to create 1 or 2 small LUNs which are presented to the system
 * as if they were a normal storage device. This feature is useful
 * during development and also provides manufacturing with a way
 * to test the AFU without an actual device.
 *
 * 0 = external LUN[s] (default)
 * 1 = internal LUN (1 x 64K, 512B blocks, id 0)
 * 2 = internal LUN (1 x 64K, 4K blocks, id 0)
 * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1)
 * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1)
 *
 * Return: The size of the ASCII string returned in @buf.
 */
2102 2103 2104
static ssize_t lun_mode_store(struct device *dev,
			      struct device_attribute *attr,
			      const char *buf, size_t count)
M
Matthew R. Ochs 已提交
2105 2106 2107 2108 2109 2110 2111 2112 2113 2114
{
	struct Scsi_Host *shost = class_to_shost(dev);
	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
	struct afu *afu = cfg->afu;
	int rc;
	u32 lun_mode;

	rc = kstrtouint(buf, 10, &lun_mode);
	if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) {
		afu->internal_lun = lun_mode;
2115 2116 2117 2118 2119 2120 2121 2122 2123 2124

		/*
		 * When configured for internal LUN, there is only one channel,
		 * channel number 0, else there will be 2 (default).
		 */
		if (afu->internal_lun)
			shost->max_channel = 0;
		else
			shost->max_channel = NUM_FC_PORTS - 1;

M
Matthew R. Ochs 已提交
2125 2126 2127 2128 2129 2130 2131 2132
		afu_reset(cfg);
		scsi_scan_host(cfg->host);
	}

	return count;
}

/**
2133
 * ioctl_version_show() - presents the current ioctl version of the host
M
Matthew R. Ochs 已提交
2134 2135 2136 2137 2138 2139
 * @dev:	Generic device associated with the host.
 * @attr:	Device attribute representing the ioctl version.
 * @buf:	Buffer of length PAGE_SIZE to report back the ioctl version.
 *
 * Return: The size of the ASCII string returned in @buf.
 */
2140 2141
static ssize_t ioctl_version_show(struct device *dev,
				  struct device_attribute *attr, char *buf)
M
Matthew R. Ochs 已提交
2142 2143 2144 2145 2146
{
	return scnprintf(buf, PAGE_SIZE, "%u\n", DK_CXLFLASH_VERSION_0);
}

/**
2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212
 * cxlflash_show_port_lun_table() - queries and presents the port LUN table
 * @port:	Desired port for status reporting.
 * @afu:	AFU owning the specified port.
 * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
 *
 * Return: The size of the ASCII string returned in @buf.
 */
static ssize_t cxlflash_show_port_lun_table(u32 port,
					    struct afu *afu,
					    char *buf)
{
	int i;
	ssize_t bytes = 0;
	__be64 __iomem *fc_port;

	if (port >= NUM_FC_PORTS)
		return 0;

	fc_port = &afu->afu_map->global.fc_port[port][0];

	for (i = 0; i < CXLFLASH_NUM_VLUNS; i++)
		bytes += scnprintf(buf + bytes, PAGE_SIZE - bytes,
				   "%03d: %016llX\n", i, readq_be(&fc_port[i]));
	return bytes;
}

/**
 * port0_lun_table_show() - presents the current LUN table of port 0
 * @dev:	Generic device associated with the host owning the port.
 * @attr:	Device attribute representing the port.
 * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
 *
 * Return: The size of the ASCII string returned in @buf.
 */
static ssize_t port0_lun_table_show(struct device *dev,
				    struct device_attribute *attr,
				    char *buf)
{
	struct Scsi_Host *shost = class_to_shost(dev);
	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
	struct afu *afu = cfg->afu;

	return cxlflash_show_port_lun_table(0, afu, buf);
}

/**
 * port1_lun_table_show() - presents the current LUN table of port 1
 * @dev:	Generic device associated with the host owning the port.
 * @attr:	Device attribute representing the port.
 * @buf:	Buffer of length PAGE_SIZE to report back port status in ASCII.
 *
 * Return: The size of the ASCII string returned in @buf.
 */
static ssize_t port1_lun_table_show(struct device *dev,
				    struct device_attribute *attr,
				    char *buf)
{
	struct Scsi_Host *shost = class_to_shost(dev);
	struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)shost->hostdata;
	struct afu *afu = cfg->afu;

	return cxlflash_show_port_lun_table(1, afu, buf);
}

/**
 * mode_show() - presents the current mode of the device
M
Matthew R. Ochs 已提交
2213 2214 2215 2216 2217 2218
 * @dev:	Generic device associated with the device.
 * @attr:	Device attribute representing the device mode.
 * @buf:	Buffer of length PAGE_SIZE to report back the dev mode in ASCII.
 *
 * Return: The size of the ASCII string returned in @buf.
 */
2219 2220
static ssize_t mode_show(struct device *dev,
			 struct device_attribute *attr, char *buf)
M
Matthew R. Ochs 已提交
2221 2222 2223
{
	struct scsi_device *sdev = to_scsi_device(dev);

2224 2225
	return scnprintf(buf, PAGE_SIZE, "%s\n",
			 sdev->hostdata ? "superpipe" : "legacy");
M
Matthew R. Ochs 已提交
2226 2227 2228 2229 2230
}

/*
 * Host attributes
 */
2231 2232 2233 2234 2235 2236
static DEVICE_ATTR_RO(port0);
static DEVICE_ATTR_RO(port1);
static DEVICE_ATTR_RW(lun_mode);
static DEVICE_ATTR_RO(ioctl_version);
static DEVICE_ATTR_RO(port0_lun_table);
static DEVICE_ATTR_RO(port1_lun_table);
M
Matthew R. Ochs 已提交
2237 2238 2239 2240 2241 2242

static struct device_attribute *cxlflash_host_attrs[] = {
	&dev_attr_port0,
	&dev_attr_port1,
	&dev_attr_lun_mode,
	&dev_attr_ioctl_version,
2243 2244
	&dev_attr_port0_lun_table,
	&dev_attr_port1_lun_table,
M
Matthew R. Ochs 已提交
2245 2246 2247 2248 2249 2250
	NULL
};

/*
 * Device attributes
 */
2251
static DEVICE_ATTR_RO(mode);
M
Matthew R. Ochs 已提交
2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270

static struct device_attribute *cxlflash_dev_attrs[] = {
	&dev_attr_mode,
	NULL
};

/*
 * Host template
 */
static struct scsi_host_template driver_template = {
	.module = THIS_MODULE,
	.name = CXLFLASH_ADAPTER_NAME,
	.info = cxlflash_driver_info,
	.ioctl = cxlflash_ioctl,
	.proc_name = CXLFLASH_NAME,
	.queuecommand = cxlflash_queuecommand,
	.eh_device_reset_handler = cxlflash_eh_device_reset_handler,
	.eh_host_reset_handler = cxlflash_eh_host_reset_handler,
	.change_queue_depth = cxlflash_change_queue_depth,
2271
	.cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN,
M
Matthew R. Ochs 已提交
2272
	.can_queue = CXLFLASH_MAX_CMDS,
2273
	.cmd_size = sizeof(struct afu_cmd) + __alignof__(struct afu_cmd) - 1,
M
Matthew R. Ochs 已提交
2274
	.this_id = -1,
2275
	.sg_tablesize = 1,	/* No scatter gather support */
M
Matthew R. Ochs 已提交
2276 2277 2278 2279 2280 2281 2282 2283 2284
	.max_sectors = CXLFLASH_MAX_SECTORS,
	.use_clustering = ENABLE_CLUSTERING,
	.shost_attrs = cxlflash_host_attrs,
	.sdev_attrs = cxlflash_dev_attrs,
};

/*
 * Device dependent values
 */
2285 2286 2287
static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS,
					0ULL };
static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS,
2288
					CXLFLASH_NOTIFY_SHUTDOWN };
M
Matthew R. Ochs 已提交
2289 2290 2291 2292 2293 2294 2295

/*
 * PCI device binding table
 */
static struct pci_device_id cxlflash_pci_table[] = {
	{PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA,
	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals},
2296 2297
	{PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT,
	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals},
M
Matthew R. Ochs 已提交
2298 2299 2300 2301 2302
	{}
};

MODULE_DEVICE_TABLE(pci, cxlflash_pci_table);

2303 2304 2305 2306 2307 2308 2309
/**
 * cxlflash_worker_thread() - work thread handler for the AFU
 * @work:	Work structure contained within cxlflash associated with host.
 *
 * Handles the following events:
 * - Link reset which cannot be performed on interrupt context due to
 * blocking up to a few seconds
2310
 * - Rescan the host
2311 2312 2313
 */
static void cxlflash_worker_thread(struct work_struct *work)
{
2314 2315
	struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg,
						work_q);
2316
	struct afu *afu = cfg->afu;
2317
	struct device *dev = &cfg->dev->dev;
2318 2319 2320
	int port;
	ulong lock_flags;

2321 2322 2323 2324 2325
	/* Avoid MMIO if the device has failed */

	if (cfg->state != STATE_NORMAL)
		return;

2326 2327 2328 2329 2330
	spin_lock_irqsave(cfg->host->host_lock, lock_flags);

	if (cfg->lr_state == LINK_RESET_REQUIRED) {
		port = cfg->lr_port;
		if (port < 0)
2331 2332
			dev_err(dev, "%s: invalid port index %d\n",
				__func__, port);
2333 2334 2335 2336 2337 2338
		else {
			spin_unlock_irqrestore(cfg->host->host_lock,
					       lock_flags);

			/* The reset can block... */
			afu_link_reset(afu, port,
2339
				       &afu->afu_map->global.fc_regs[port][0]);
2340 2341 2342 2343 2344 2345 2346
			spin_lock_irqsave(cfg->host->host_lock, lock_flags);
		}

		cfg->lr_state = LINK_RESET_COMPLETE;
	}

	spin_unlock_irqrestore(cfg->host->host_lock, lock_flags);
2347 2348 2349

	if (atomic_dec_if_positive(&cfg->scan_host_needed) >= 0)
		scsi_scan_host(cfg->host);
2350
	kref_put(&afu->mapcount, afu_unmap);
2351 2352 2353 2354 2355 2356 2357
}

/**
 * cxlflash_probe() - PCI entry point to add host
 * @pdev:	PCI device associated with the host.
 * @dev_id:	PCI device id associated with device.
 *
2358
 * Return: 0 on success, -errno on failure
2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391
 */
static int cxlflash_probe(struct pci_dev *pdev,
			  const struct pci_device_id *dev_id)
{
	struct Scsi_Host *host;
	struct cxlflash_cfg *cfg = NULL;
	struct dev_dependent_vals *ddv;
	int rc = 0;

	dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n",
		__func__, pdev->irq);

	ddv = (struct dev_dependent_vals *)dev_id->driver_data;
	driver_template.max_sectors = ddv->max_sectors;

	host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg));
	if (!host) {
		dev_err(&pdev->dev, "%s: call to scsi_host_alloc failed!\n",
			__func__);
		rc = -ENOMEM;
		goto out;
	}

	host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS;
	host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET;
	host->max_channel = NUM_FC_PORTS - 1;
	host->unique_id = host->host_no;
	host->max_cmd_len = CXLFLASH_MAX_CDB_LEN;

	cfg = (struct cxlflash_cfg *)host->hostdata;
	cfg->host = host;
	rc = alloc_mem(cfg);
	if (rc) {
M
Matthew R. Ochs 已提交
2392
		dev_err(&pdev->dev, "%s: call to alloc_mem failed!\n",
2393 2394
			__func__);
		rc = -ENOMEM;
2395
		scsi_host_put(cfg->host);
2396 2397 2398 2399 2400
		goto out;
	}

	cfg->init_state = INIT_STATE_NONE;
	cfg->dev = pdev;
2401
	cfg->cxl_fops = cxlflash_cxl_fops;
M
Matthew R. Ochs 已提交
2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412

	/*
	 * The promoted LUNs move to the top of the LUN table. The rest stay
	 * on the bottom half. The bottom half grows from the end
	 * (index = 255), whereas the top half grows from the beginning
	 * (index = 0).
	 */
	cfg->promote_lun_index  = 0;
	cfg->last_lun_index[0] = CXLFLASH_NUM_VLUNS/2 - 1;
	cfg->last_lun_index[1] = CXLFLASH_NUM_VLUNS/2 - 1;

2413 2414 2415
	cfg->dev_id = (struct pci_device_id *)dev_id;

	init_waitqueue_head(&cfg->tmf_waitq);
2416
	init_waitqueue_head(&cfg->reset_waitq);
2417 2418 2419 2420

	INIT_WORK(&cfg->work_q, cxlflash_worker_thread);
	cfg->lr_state = LINK_RESET_INVALID;
	cfg->lr_port = -1;
2421
	spin_lock_init(&cfg->tmf_slock);
M
Matthew R. Ochs 已提交
2422 2423
	mutex_init(&cfg->ctx_tbl_list_mutex);
	mutex_init(&cfg->ctx_recovery_mutex);
2424
	init_rwsem(&cfg->ioctl_rwsem);
M
Matthew R. Ochs 已提交
2425 2426
	INIT_LIST_HEAD(&cfg->ctx_err_recovery);
	INIT_LIST_HEAD(&cfg->lluns);
2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464

	pci_set_drvdata(pdev, cfg);

	cfg->cxl_afu = cxl_pci_to_afu(pdev);

	rc = init_pci(cfg);
	if (rc) {
		dev_err(&pdev->dev, "%s: call to init_pci "
			"failed rc=%d!\n", __func__, rc);
		goto out_remove;
	}
	cfg->init_state = INIT_STATE_PCI;

	rc = init_afu(cfg);
	if (rc) {
		dev_err(&pdev->dev, "%s: call to init_afu "
			"failed rc=%d!\n", __func__, rc);
		goto out_remove;
	}
	cfg->init_state = INIT_STATE_AFU;

	rc = init_scsi(cfg);
	if (rc) {
		dev_err(&pdev->dev, "%s: call to init_scsi "
			"failed rc=%d!\n", __func__, rc);
		goto out_remove;
	}
	cfg->init_state = INIT_STATE_SCSI;

out:
	pr_debug("%s: returning rc=%d\n", __func__, rc);
	return rc;

out_remove:
	cxlflash_remove(pdev);
	goto out;
}

2465 2466 2467 2468 2469
/**
 * cxlflash_pci_error_detected() - called when a PCI error is detected
 * @pdev:	PCI device struct.
 * @state:	PCI channel state.
 *
2470 2471 2472
 * When an EEH occurs during an active reset, wait until the reset is
 * complete and then take action based upon the device state.
 *
2473 2474 2475 2476 2477
 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
 */
static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
						    pci_channel_state_t state)
{
M
Matthew R. Ochs 已提交
2478
	int rc = 0;
2479 2480 2481 2482 2483 2484 2485
	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
	struct device *dev = &cfg->dev->dev;

	dev_dbg(dev, "%s: pdev=%p state=%u\n", __func__, pdev, state);

	switch (state) {
	case pci_channel_io_frozen:
2486 2487 2488 2489
		wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
		if (cfg->state == STATE_FAILTERM)
			return PCI_ERS_RESULT_DISCONNECT;

2490
		cfg->state = STATE_RESET;
2491
		scsi_block_requests(cfg->host);
2492
		drain_ioctls(cfg);
M
Matthew R. Ochs 已提交
2493 2494 2495 2496
		rc = cxlflash_mark_contexts_error(cfg);
		if (unlikely(rc))
			dev_err(dev, "%s: Failed to mark user contexts!(%d)\n",
				__func__, rc);
2497
		term_afu(cfg);
2498 2499 2500
		return PCI_ERS_RESULT_NEED_RESET;
	case pci_channel_io_perm_failure:
		cfg->state = STATE_FAILTERM;
2501
		wake_up_all(&cfg->reset_waitq);
2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547
		scsi_unblock_requests(cfg->host);
		return PCI_ERS_RESULT_DISCONNECT;
	default:
		break;
	}
	return PCI_ERS_RESULT_NEED_RESET;
}

/**
 * cxlflash_pci_slot_reset() - called when PCI slot has been reset
 * @pdev:	PCI device struct.
 *
 * This routine is called by the pci error recovery code after the PCI
 * slot has been reset, just before we should resume normal operations.
 *
 * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT
 */
static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev)
{
	int rc = 0;
	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
	struct device *dev = &cfg->dev->dev;

	dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);

	rc = init_afu(cfg);
	if (unlikely(rc)) {
		dev_err(dev, "%s: EEH recovery failed! (%d)\n", __func__, rc);
		return PCI_ERS_RESULT_DISCONNECT;
	}

	return PCI_ERS_RESULT_RECOVERED;
}

/**
 * cxlflash_pci_resume() - called when normal operation can resume
 * @pdev:	PCI device struct
 */
static void cxlflash_pci_resume(struct pci_dev *pdev)
{
	struct cxlflash_cfg *cfg = pci_get_drvdata(pdev);
	struct device *dev = &cfg->dev->dev;

	dev_dbg(dev, "%s: pdev=%p\n", __func__, pdev);

	cfg->state = STATE_NORMAL;
2548
	wake_up_all(&cfg->reset_waitq);
2549 2550 2551 2552 2553 2554 2555 2556 2557
	scsi_unblock_requests(cfg->host);
}

static const struct pci_error_handlers cxlflash_err_handler = {
	.error_detected = cxlflash_pci_error_detected,
	.slot_reset = cxlflash_pci_slot_reset,
	.resume = cxlflash_pci_resume,
};

2558 2559 2560 2561 2562 2563 2564 2565
/*
 * PCI device structure
 */
static struct pci_driver cxlflash_driver = {
	.name = CXLFLASH_NAME,
	.id_table = cxlflash_pci_table,
	.probe = cxlflash_probe,
	.remove = cxlflash_remove,
2566
	.shutdown = cxlflash_remove,
2567
	.err_handler = &cxlflash_err_handler,
2568 2569 2570 2571 2572
};

/**
 * init_cxlflash() - module entry point
 *
2573
 * Return: 0 on success, -errno on failure
2574 2575 2576
 */
static int __init init_cxlflash(void)
{
2577
	pr_info("%s: %s\n", __func__, CXLFLASH_ADAPTER_NAME);
2578

M
Matthew R. Ochs 已提交
2579 2580
	cxlflash_list_init();

2581 2582 2583 2584 2585 2586 2587 2588
	return pci_register_driver(&cxlflash_driver);
}

/**
 * exit_cxlflash() - module exit point
 */
static void __exit exit_cxlflash(void)
{
M
Matthew R. Ochs 已提交
2589 2590 2591
	cxlflash_term_global_luns();
	cxlflash_free_errpage();

2592 2593 2594 2595 2596
	pci_unregister_driver(&cxlflash_driver);
}

module_init(init_cxlflash);
module_exit(exit_cxlflash);