lpfc_scsi.c 65.3 KB
Newer Older
已提交
1 2
/*******************************************************************
 * This file is part of the Emulex Linux Device Driver for         *
3
 * Fibre Channel Host Bus Adapters.                                *
4
 * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
5
 * EMULEX and SLI are trademarks of Emulex.                        *
已提交
6
 * www.emulex.com                                                  *
7
 * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
已提交
8 9
 *                                                                 *
 * This program is free software; you can redistribute it and/or   *
10 11 12 13 14 15 16 17 18 19
 * modify it under the terms of version 2 of the GNU General       *
 * Public License as published by the Free Software Foundation.    *
 * This program is distributed in the hope that it will be useful. *
 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
 * more details, a copy of which can be found in the file COPYING  *
 * included with this package.                                     *
已提交
20 21 22 23
 *******************************************************************/

#include <linux/pci.h>
#include <linux/interrupt.h>
24
#include <linux/delay.h>
已提交
25 26 27 28 29 30 31 32 33 34

#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_transport_fc.h>

#include "lpfc_version.h"
#include "lpfc_hw.h"
#include "lpfc_sli.h"
35
#include "lpfc_nl.h"
已提交
36 37 38 39 40
#include "lpfc_disc.h"
#include "lpfc_scsi.h"
#include "lpfc.h"
#include "lpfc_logmsg.h"
#include "lpfc_crtn.h"
41
#include "lpfc_vport.h"
已提交
42 43 44 45

#define LPFC_RESET_WAIT  2
#define LPFC_ABORT_WAIT  2

46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149
/**
 * lpfc_update_stats: Update statistical data for the command completion.
 * @phba: Pointer to HBA object.
 * @lpfc_cmd: lpfc scsi command object pointer.
 *
 * This function is called when there is a command completion and this
 * function updates the statistical data for the command completion.
 **/
static void
lpfc_update_stats(struct lpfc_hba *phba, struct  lpfc_scsi_buf *lpfc_cmd)
{
	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
	struct lpfc_nodelist *pnode = rdata->pnode;
	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
	unsigned long flags;
	struct Scsi_Host  *shost = cmd->device->host;
	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
	unsigned long latency;
	int i;

	if (cmd->result)
		return;

	spin_lock_irqsave(shost->host_lock, flags);
	if (!vport->stat_data_enabled ||
		vport->stat_data_blocked ||
		!pnode->lat_data ||
		(phba->bucket_type == LPFC_NO_BUCKET)) {
		spin_unlock_irqrestore(shost->host_lock, flags);
		return;
	}
	latency = jiffies_to_msecs(jiffies - lpfc_cmd->start_time);

	if (phba->bucket_type == LPFC_LINEAR_BUCKET) {
		i = (latency + phba->bucket_step - 1 - phba->bucket_base)/
			phba->bucket_step;
		if (i >= LPFC_MAX_BUCKET_COUNT)
			i = LPFC_MAX_BUCKET_COUNT;
	} else {
		for (i = 0; i < LPFC_MAX_BUCKET_COUNT-1; i++)
			if (latency <= (phba->bucket_base +
				((1<<i)*phba->bucket_step)))
				break;
	}

	pnode->lat_data[i].cmd_count++;
	spin_unlock_irqrestore(shost->host_lock, flags);
}

/**
 * lpfc_send_sdev_queuedepth_change_event: Posts a queuedepth change
 *                   event.
 * @phba: Pointer to HBA context object.
 * @vport: Pointer to vport object.
 * @ndlp: Pointer to FC node associated with the target.
 * @lun: Lun number of the scsi device.
 * @old_val: Old value of the queue depth.
 * @new_val: New value of the queue depth.
 *
 * This function sends an event to the mgmt application indicating
 * there is a change in the scsi device queue depth.
 **/
static void
lpfc_send_sdev_queuedepth_change_event(struct lpfc_hba *phba,
		struct lpfc_vport  *vport,
		struct lpfc_nodelist *ndlp,
		uint32_t lun,
		uint32_t old_val,
		uint32_t new_val)
{
	struct lpfc_fast_path_event *fast_path_evt;
	unsigned long flags;

	fast_path_evt = lpfc_alloc_fast_evt(phba);
	if (!fast_path_evt)
		return;

	fast_path_evt->un.queue_depth_evt.scsi_event.event_type =
		FC_REG_SCSI_EVENT;
	fast_path_evt->un.queue_depth_evt.scsi_event.subcategory =
		LPFC_EVENT_VARQUEDEPTH;

	/* Report all luns with change in queue depth */
	fast_path_evt->un.queue_depth_evt.scsi_event.lun = lun;
	if (ndlp && NLP_CHK_NODE_ACT(ndlp)) {
		memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwpn,
			&ndlp->nlp_portname, sizeof(struct lpfc_name));
		memcpy(&fast_path_evt->un.queue_depth_evt.scsi_event.wwnn,
			&ndlp->nlp_nodename, sizeof(struct lpfc_name));
	}

	fast_path_evt->un.queue_depth_evt.oldval = old_val;
	fast_path_evt->un.queue_depth_evt.newval = new_val;
	fast_path_evt->vport = vport;

	fast_path_evt->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
	spin_lock_irqsave(&phba->hbalock, flags);
	list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
	spin_unlock_irqrestore(&phba->hbalock, flags);
	lpfc_worker_wake_up(phba);

	return;
}

150
/**
151
 * lpfc_rampdown_queue_depth: Post RAMP_DOWN_QUEUE event to worker thread.
152 153 154 155 156 157 158 159 160
 * @phba: The Hba for which this call is being executed.
 *
 * This routine is called when there is resource error in driver or firmware.
 * This routine posts WORKER_RAMP_DOWN_QUEUE event for @phba. This routine
 * posts at most 1 event each second. This routine wakes up worker thread of
 * @phba to process WORKER_RAM_DOWN_EVENT event.
 *
 * This routine should be called with no lock held.
 **/
161
void
162
lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
163 164
{
	unsigned long flags;
165
	uint32_t evt_posted;
166 167 168 169 170 171 172 173 174 175 176 177 178 179 180

	spin_lock_irqsave(&phba->hbalock, flags);
	atomic_inc(&phba->num_rsrc_err);
	phba->last_rsrc_error_time = jiffies;

	if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
		spin_unlock_irqrestore(&phba->hbalock, flags);
		return;
	}

	phba->last_ramp_down_time = jiffies;

	spin_unlock_irqrestore(&phba->hbalock, flags);

	spin_lock_irqsave(&phba->pport->work_port_lock, flags);
181 182
	evt_posted = phba->pport->work_port_events & WORKER_RAMP_DOWN_QUEUE;
	if (!evt_posted)
183 184 185
		phba->pport->work_port_events |= WORKER_RAMP_DOWN_QUEUE;
	spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);

186 187
	if (!evt_posted)
		lpfc_worker_wake_up(phba);
188 189 190
	return;
}

191 192 193 194 195 196 197 198 199 200 201
/**
 * lpfc_rampup_queue_depth: Post RAMP_UP_QUEUE event for worker thread.
 * @phba: The Hba for which this call is being executed.
 *
 * This routine post WORKER_RAMP_UP_QUEUE event for @phba vport. This routine
 * post at most 1 event every 5 minute after last_ramp_up_time or
 * last_rsrc_error_time.  This routine wakes up worker thread of @phba
 * to process WORKER_RAM_DOWN_EVENT event.
 *
 * This routine should be called with no lock held.
 **/
202
static inline void
203
lpfc_rampup_queue_depth(struct lpfc_vport  *vport,
204 205 206
			struct scsi_device *sdev)
{
	unsigned long flags;
207
	struct lpfc_hba *phba = vport->phba;
208
	uint32_t evt_posted;
209 210
	atomic_inc(&phba->num_cmd_success);

211
	if (vport->cfg_lun_queue_depth <= sdev->queue_depth)
212 213 214 215 216 217 218 219 220 221 222
		return;
	spin_lock_irqsave(&phba->hbalock, flags);
	if (((phba->last_ramp_up_time + QUEUE_RAMP_UP_INTERVAL) > jiffies) ||
	 ((phba->last_rsrc_error_time + QUEUE_RAMP_UP_INTERVAL ) > jiffies)) {
		spin_unlock_irqrestore(&phba->hbalock, flags);
		return;
	}
	phba->last_ramp_up_time = jiffies;
	spin_unlock_irqrestore(&phba->hbalock, flags);

	spin_lock_irqsave(&phba->pport->work_port_lock, flags);
223 224
	evt_posted = phba->pport->work_port_events & WORKER_RAMP_UP_QUEUE;
	if (!evt_posted)
225 226 227
		phba->pport->work_port_events |= WORKER_RAMP_UP_QUEUE;
	spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);

228 229 230
	if (!evt_posted)
		lpfc_worker_wake_up(phba);
	return;
231 232
}

233 234 235 236 237 238 239 240
/**
 * lpfc_ramp_down_queue_handler: WORKER_RAMP_DOWN_QUEUE event handler.
 * @phba: The Hba for which this call is being executed.
 *
 * This routine is called to  process WORKER_RAMP_DOWN_QUEUE event for worker
 * thread.This routine reduces queue depth for all scsi device on each vport
 * associated with @phba.
 **/
241 242 243
void
lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
{
244 245
	struct lpfc_vport **vports;
	struct Scsi_Host  *shost;
246
	struct scsi_device *sdev;
247
	unsigned long new_queue_depth, old_queue_depth;
248
	unsigned long num_rsrc_err, num_cmd_success;
249
	int i;
250
	struct lpfc_rport_data *rdata;
251 252 253 254

	num_rsrc_err = atomic_read(&phba->num_rsrc_err);
	num_cmd_success = atomic_read(&phba->num_cmd_success);

255 256
	vports = lpfc_create_vport_work_array(phba);
	if (vports != NULL)
257
		for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
258 259
			shost = lpfc_shost_from_vport(vports[i]);
			shost_for_each_device(sdev, shost) {
260
				new_queue_depth =
261 262 263 264 265 266 267
					sdev->queue_depth * num_rsrc_err /
					(num_rsrc_err + num_cmd_success);
				if (!new_queue_depth)
					new_queue_depth = sdev->queue_depth - 1;
				else
					new_queue_depth = sdev->queue_depth -
								new_queue_depth;
268
				old_queue_depth = sdev->queue_depth;
269 270 271 272 273 274 275 276
				if (sdev->ordered_tags)
					scsi_adjust_queue_depth(sdev,
							MSG_ORDERED_TAG,
							new_queue_depth);
				else
					scsi_adjust_queue_depth(sdev,
							MSG_SIMPLE_TAG,
							new_queue_depth);
277 278 279 280 281 282 283
				rdata = sdev->hostdata;
				if (rdata)
					lpfc_send_sdev_queuedepth_change_event(
						phba, vports[i],
						rdata->pnode,
						sdev->lun, old_queue_depth,
						new_queue_depth);
284
			}
285
		}
286
	lpfc_destroy_vport_work_array(phba, vports);
287 288 289 290
	atomic_set(&phba->num_rsrc_err, 0);
	atomic_set(&phba->num_cmd_success, 0);
}

291 292 293 294 295 296 297 298 299
/**
 * lpfc_ramp_up_queue_handler: WORKER_RAMP_UP_QUEUE event handler.
 * @phba: The Hba for which this call is being executed.
 *
 * This routine is called to  process WORKER_RAMP_UP_QUEUE event for worker
 * thread.This routine increases queue depth for all scsi device on each vport
 * associated with @phba by 1. This routine also sets @phba num_rsrc_err and
 * num_cmd_success to zero.
 **/
300 301 302
void
lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
{
303 304
	struct lpfc_vport **vports;
	struct Scsi_Host  *shost;
305
	struct scsi_device *sdev;
306
	int i;
307
	struct lpfc_rport_data *rdata;
308 309 310

	vports = lpfc_create_vport_work_array(phba);
	if (vports != NULL)
311
		for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
312 313
			shost = lpfc_shost_from_vport(vports[i]);
			shost_for_each_device(sdev, shost) {
314 315 316
				if (vports[i]->cfg_lun_queue_depth <=
				    sdev->queue_depth)
					continue;
317 318 319 320 321 322 323 324
				if (sdev->ordered_tags)
					scsi_adjust_queue_depth(sdev,
							MSG_ORDERED_TAG,
							sdev->queue_depth+1);
				else
					scsi_adjust_queue_depth(sdev,
							MSG_SIMPLE_TAG,
							sdev->queue_depth+1);
325 326 327 328 329 330 331 332
				rdata = sdev->hostdata;
				if (rdata)
					lpfc_send_sdev_queuedepth_change_event(
						phba, vports[i],
						rdata->pnode,
						sdev->lun,
						sdev->queue_depth - 1,
						sdev->queue_depth);
333
			}
334
		}
335
	lpfc_destroy_vport_work_array(phba, vports);
336 337 338 339
	atomic_set(&phba->num_rsrc_err, 0);
	atomic_set(&phba->num_cmd_success, 0);
}

340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368
/**
 * lpfc_scsi_dev_block: set all scsi hosts to block state.
 * @phba: Pointer to HBA context object.
 *
 * This function walks vport list and set each SCSI host to block state
 * by invoking fc_remote_port_delete() routine. This function is invoked
 * with EEH when device's PCI slot has been permanently disabled.
 **/
void
lpfc_scsi_dev_block(struct lpfc_hba *phba)
{
	struct lpfc_vport **vports;
	struct Scsi_Host  *shost;
	struct scsi_device *sdev;
	struct fc_rport *rport;
	int i;

	vports = lpfc_create_vport_work_array(phba);
	if (vports != NULL)
		for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
			shost = lpfc_shost_from_vport(vports[i]);
			shost_for_each_device(sdev, shost) {
				rport = starget_to_rport(scsi_target(sdev));
				fc_remote_port_delete(rport);
			}
		}
	lpfc_destroy_vport_work_array(phba, vports);
}

369 370 371 372
/**
 * lpfc_new_scsi_buf: Scsi buffer allocator.
 * @vport: The virtual port for which this call being executed.
 *
已提交
373 374 375
 * This routine allocates a scsi buffer, which contains all the necessary
 * information needed to initiate a SCSI I/O.  The non-DMAable buffer region
 * contains information to build the IOCB.  The DMAable region contains
376 377
 * memory for the FCP CMND, FCP RSP, and the initial BPL.  In addition to
 * allocating memory, the FCP CMND and FCP RSP BDEs are setup in the BPL
已提交
378
 * and the BPL BDE is setup in the IOCB.
379 380 381 382 383
 *
 * Return codes:
 *   NULL - Error
 *   Pointer to lpfc_scsi_buf data structure - Success
 **/
已提交
384
static struct lpfc_scsi_buf *
J
James Smart 已提交
385
lpfc_new_scsi_buf(struct lpfc_vport *vport)
已提交
386
{
J
James Smart 已提交
387
	struct lpfc_hba *phba = vport->phba;
已提交
388 389 390
	struct lpfc_scsi_buf *psb;
	struct ulp_bde64 *bpl;
	IOCB_t *iocb;
391 392 393
	dma_addr_t pdma_phys_fcp_cmd;
	dma_addr_t pdma_phys_fcp_rsp;
	dma_addr_t pdma_phys_bpl;
394
	uint16_t iotag;
已提交
395

396
	psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
已提交
397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415
	if (!psb)
		return NULL;

	/*
	 * Get memory from the pci pool to map the virt space to pci bus space
	 * for an I/O.  The DMA buffer includes space for the struct fcp_cmnd,
	 * struct fcp_rsp and the number of bde's necessary to support the
	 * sg_tablesize.
	 */
	psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL,
							&psb->dma_handle);
	if (!psb->data) {
		kfree(psb);
		return NULL;
	}

	/* Initialize virtual ptrs to dma_buf region. */
	memset(psb->data, 0, phba->cfg_sg_dma_buf_size);

416 417 418 419 420 421 422 423
	/* Allocate iotag for psb->cur_iocbq. */
	iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
	if (iotag == 0) {
		pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
			      psb->data, psb->dma_handle);
		kfree (psb);
		return NULL;
	}
424
	psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
425

已提交
426 427 428 429 430 431 432
	psb->fcp_cmnd = psb->data;
	psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
	psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
							sizeof(struct fcp_rsp);

	/* Initialize local short-hand pointers. */
	bpl = psb->fcp_bpl;
433 434 435 436
	pdma_phys_fcp_cmd = psb->dma_handle;
	pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
	pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
			sizeof(struct fcp_rsp);
已提交
437 438 439 440 441 442

	/*
	 * The first two bdes are the FCP_CMD and FCP_RSP.  The balance are sg
	 * list bdes.  Initialize the first two and leave the rest for
	 * queuecommand.
	 */
443 444 445 446 447
	bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
	bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
	bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
	bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
	bpl[0].tus.w = le32_to_cpu(bpl->tus.w);
已提交
448 449

	/* Setup the physical region for the FCP RSP */
450 451 452 453 454
	bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
	bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
	bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
	bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
	bpl[1].tus.w = le32_to_cpu(bpl->tus.w);
已提交
455 456 457 458 459 460 461

	/*
	 * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
	 * initialize it with all known data now.
	 */
	iocb = &psb->cur_iocbq.iocb;
	iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486
	if (phba->sli_rev == 3) {
		/* fill in immediate fcp command BDE */
		iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
		iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
		iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
						       unsli3.fcp_ext.icd);
		iocb->un.fcpi64.bdl.addrHigh = 0;
		iocb->ulpBdeCount = 0;
		iocb->ulpLe = 0;
		/* fill in responce BDE */
		iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
		iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
						sizeof(struct fcp_rsp);
		iocb->unsli3.fcp_ext.rbde.addrLow =
						putPaddrLow(pdma_phys_fcp_rsp);
		iocb->unsli3.fcp_ext.rbde.addrHigh =
						putPaddrHigh(pdma_phys_fcp_rsp);
	} else {
		iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
		iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
		iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_bpl);
		iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_bpl);
		iocb->ulpBdeCount = 1;
		iocb->ulpLe = 1;
	}
已提交
487 488 489 490 491
	iocb->ulpClass = CLASS3;

	return psb;
}

492 493 494 495 496 497 498 499 500 501 502
/**
 * lpfc_get_scsi_buf: Get a scsi buffer from lpfc_scsi_buf_list list of Hba.
 * @phba: The Hba for which this call is being executed.
 *
 * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
 * and returns to caller.
 *
 * Return codes:
 *   NULL - Error
 *   Pointer to lpfc_scsi_buf - Success
 **/
503
static struct lpfc_scsi_buf*
504
lpfc_get_scsi_buf(struct lpfc_hba * phba)
已提交
505
{
506 507
	struct  lpfc_scsi_buf * lpfc_cmd = NULL;
	struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list;
508
	unsigned long iflag = 0;
509

510
	spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
511
	list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list);
512 513 514 515
	if (lpfc_cmd) {
		lpfc_cmd->seg_cnt = 0;
		lpfc_cmd->nonsg_phys = 0;
	}
516
	spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
517 518
	return  lpfc_cmd;
}
已提交
519

520 521 522 523 524 525 526 527
/**
 * lpfc_release_scsi_buf: Return a scsi buffer back to hba lpfc_scsi_buf_list list.
 * @phba: The Hba for which this call is being executed.
 * @psb: The scsi buffer which is being released.
 *
 * This routine releases @psb scsi buffer by adding it to tail of @phba
 * lpfc_scsi_buf_list list.
 **/
528
static void
529
lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
530
{
531
	unsigned long iflag = 0;
已提交
532

533
	spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
534
	psb->pCmd = NULL;
已提交
535
	list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
536
	spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
已提交
537 538
}

539 540 541 542 543 544 545 546 547 548 549 550 551 552
/**
 * lpfc_scsi_prep_dma_buf: Routine to do DMA mapping for scsi buffer.
 * @phba: The Hba for which this call is being executed.
 * @lpfc_cmd: The scsi buffer which is going to be mapped.
 *
 * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
 * field of @lpfc_cmd. This routine scans through sg elements and format the
 * bdea. This routine also initializes all IOCB fields which are dependent on
 * scsi command request buffer.
 *
 * Return codes:
 *   1 - Error
 *   0 - Success
 **/
已提交
553
static int
554
lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
已提交
555 556 557 558 559 560
{
	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
	struct scatterlist *sgel = NULL;
	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
	struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
561
	struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
已提交
562
	dma_addr_t physaddr;
563
	uint32_t num_bde = 0;
564
	int nseg, datadir = scsi_cmnd->sc_data_direction;
已提交
565 566 567 568 569 570 571 572

	/*
	 * There are three possibilities here - use scatter-gather segment, use
	 * the single mapping, or neither.  Start the lpfc command prep by
	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
	 * data bde entry.
	 */
	bpl += 2;
573
	if (scsi_sg_count(scsi_cmnd)) {
已提交
574 575 576 577 578 579 580
		/*
		 * The driver stores the segment count returned from pci_map_sg
		 * because this a count of dma-mappings used to map the use_sg
		 * pages.  They are not guaranteed to be the same for those
		 * architectures that implement an IOMMU.
		 */

581 582 583 584 585
		nseg = dma_map_sg(&phba->pcidev->dev, scsi_sglist(scsi_cmnd),
				  scsi_sg_count(scsi_cmnd), datadir);
		if (unlikely(!nseg))
			return 1;

586
		lpfc_cmd->seg_cnt = nseg;
已提交
587 588 589
		if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
			printk(KERN_ERR "%s: Too many sg segments from "
			       "dma_map_sg.  Config %d, seg_cnt %d",
590
			       __func__, phba->cfg_sg_seg_cnt,
已提交
591
			       lpfc_cmd->seg_cnt);
592
			scsi_dma_unmap(scsi_cmnd);
已提交
593 594 595 596 597 598 599 600
			return 1;
		}

		/*
		 * The driver established a maximum scatter-gather segment count
		 * during probe that limits the number of sg elements in any
		 * single scsi command.  Just run through the seg_cnt and format
		 * the bde's.
601 602 603
		 * When using SLI-3 the driver will try to fit all the BDEs into
		 * the IOCB. If it can't then the BDEs get added to a BPL as it
		 * does for SLI-2 mode.
已提交
604
		 */
605
		scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
已提交
606
			physaddr = sg_dma_address(sgel);
607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623
			if (phba->sli_rev == 3 &&
			    nseg <= LPFC_EXT_DATA_BDE_COUNT) {
				data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
				data_bde->tus.f.bdeSize = sg_dma_len(sgel);
				data_bde->addrLow = putPaddrLow(physaddr);
				data_bde->addrHigh = putPaddrHigh(physaddr);
				data_bde++;
			} else {
				bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
				bpl->tus.f.bdeSize = sg_dma_len(sgel);
				bpl->tus.w = le32_to_cpu(bpl->tus.w);
				bpl->addrLow =
					le32_to_cpu(putPaddrLow(physaddr));
				bpl->addrHigh =
					le32_to_cpu(putPaddrHigh(physaddr));
				bpl++;
			}
已提交
624
		}
625
	}
已提交
626 627 628

	/*
	 * Finish initializing those IOCB fields that are dependent on the
629 630 631
	 * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
	 * explicitly reinitialized and for SLI-3 the extended bde count is
	 * explicitly reinitialized since all iocb memory resources are reused.
已提交
632
	 */
633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658
	if (phba->sli_rev == 3) {
		if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
			/*
			 * The extended IOCB format can only fit 3 BDE or a BPL.
			 * This I/O has more than 3 BDE so the 1st data bde will
			 * be a BPL that is filled in here.
			 */
			physaddr = lpfc_cmd->dma_handle;
			data_bde->tus.f.bdeFlags = BUFF_TYPE_BLP_64;
			data_bde->tus.f.bdeSize = (num_bde *
						   sizeof(struct ulp_bde64));
			physaddr += (sizeof(struct fcp_cmnd) +
				     sizeof(struct fcp_rsp) +
				     (2 * sizeof(struct ulp_bde64)));
			data_bde->addrHigh = putPaddrHigh(physaddr);
			data_bde->addrLow = putPaddrLow(physaddr);
			/* ebde count includes the responce bde and data bpl */
			iocb_cmd->unsli3.fcp_ext.ebde_count = 2;
		} else {
			/* ebde count includes the responce bde and data bdes */
			iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
		}
	} else {
		iocb_cmd->un.fcpi64.bdl.bdeSize =
			((num_bde + 2) * sizeof(struct ulp_bde64));
	}
659
	fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
已提交
660 661 662
	return 0;
}

663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753
/**
 * lpfc_send_scsi_error_event: Posts an event when there is SCSI error.
 * @phba: Pointer to hba context object.
 * @vport: Pointer to vport object.
 * @lpfc_cmd: Pointer to lpfc scsi command which reported the error.
 * @rsp_iocb: Pointer to response iocb object which reported error.
 *
 * This function posts an event when there is a SCSI command reporting
 * error from the scsi device.
 **/
static void
lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
		struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_iocbq *rsp_iocb) {
	struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
	uint32_t resp_info = fcprsp->rspStatus2;
	uint32_t scsi_status = fcprsp->rspStatus3;
	uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
	struct lpfc_fast_path_event *fast_path_evt = NULL;
	struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
	unsigned long flags;

	/* If there is queuefull or busy condition send a scsi event */
	if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
		(cmnd->result == SAM_STAT_BUSY)) {
		fast_path_evt = lpfc_alloc_fast_evt(phba);
		if (!fast_path_evt)
			return;
		fast_path_evt->un.scsi_evt.event_type =
			FC_REG_SCSI_EVENT;
		fast_path_evt->un.scsi_evt.subcategory =
		(cmnd->result == SAM_STAT_TASK_SET_FULL) ?
		LPFC_EVENT_QFULL : LPFC_EVENT_DEVBSY;
		fast_path_evt->un.scsi_evt.lun = cmnd->device->lun;
		memcpy(&fast_path_evt->un.scsi_evt.wwpn,
			&pnode->nlp_portname, sizeof(struct lpfc_name));
		memcpy(&fast_path_evt->un.scsi_evt.wwnn,
			&pnode->nlp_nodename, sizeof(struct lpfc_name));
	} else if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen &&
		((cmnd->cmnd[0] == READ_10) || (cmnd->cmnd[0] == WRITE_10))) {
		fast_path_evt = lpfc_alloc_fast_evt(phba);
		if (!fast_path_evt)
			return;
		fast_path_evt->un.check_cond_evt.scsi_event.event_type =
			FC_REG_SCSI_EVENT;
		fast_path_evt->un.check_cond_evt.scsi_event.subcategory =
			LPFC_EVENT_CHECK_COND;
		fast_path_evt->un.check_cond_evt.scsi_event.lun =
			cmnd->device->lun;
		memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwpn,
			&pnode->nlp_portname, sizeof(struct lpfc_name));
		memcpy(&fast_path_evt->un.check_cond_evt.scsi_event.wwnn,
			&pnode->nlp_nodename, sizeof(struct lpfc_name));
		fast_path_evt->un.check_cond_evt.sense_key =
			cmnd->sense_buffer[2] & 0xf;
		fast_path_evt->un.check_cond_evt.asc = cmnd->sense_buffer[12];
		fast_path_evt->un.check_cond_evt.ascq = cmnd->sense_buffer[13];
	} else if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
		     fcpi_parm &&
		     ((be32_to_cpu(fcprsp->rspResId) != fcpi_parm) ||
			((scsi_status == SAM_STAT_GOOD) &&
			!(resp_info & (RESID_UNDER | RESID_OVER))))) {
		/*
		 * If status is good or resid does not match with fcp_param and
		 * there is valid fcpi_parm, then there is a read_check error
		 */
		fast_path_evt = lpfc_alloc_fast_evt(phba);
		if (!fast_path_evt)
			return;
		fast_path_evt->un.read_check_error.header.event_type =
			FC_REG_FABRIC_EVENT;
		fast_path_evt->un.read_check_error.header.subcategory =
			LPFC_EVENT_FCPRDCHKERR;
		memcpy(&fast_path_evt->un.read_check_error.header.wwpn,
			&pnode->nlp_portname, sizeof(struct lpfc_name));
		memcpy(&fast_path_evt->un.read_check_error.header.wwnn,
			&pnode->nlp_nodename, sizeof(struct lpfc_name));
		fast_path_evt->un.read_check_error.lun = cmnd->device->lun;
		fast_path_evt->un.read_check_error.opcode = cmnd->cmnd[0];
		fast_path_evt->un.read_check_error.fcpiparam =
			fcpi_parm;
	} else
		return;

	fast_path_evt->vport = vport;
	spin_lock_irqsave(&phba->hbalock, flags);
	list_add_tail(&fast_path_evt->work_evt.evt_listp, &phba->work_list);
	spin_unlock_irqrestore(&phba->hbalock, flags);
	lpfc_worker_wake_up(phba);
	return;
}
754 755 756 757 758 759 760 761 762

/**
 * lpfc_scsi_unprep_dma_buf: Routine to un-map DMA mapping of scatter gather.
 * @phba: The Hba for which this call is being executed.
 * @psb: The scsi buffer which is going to be un-mapped.
 *
 * This routine does DMA un-mapping of scatter gather list of scsi command
 * field of @lpfc_cmd.
 **/
763 764 765 766 767 768 769 770 771
static void
lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
{
	/*
	 * There are only two special cases to consider.  (1) the scsi command
	 * requested scatter-gather usage or (2) the scsi command allocated
	 * a request buffer, but did not request use_sg.  There is a third
	 * case, but it does not require resource deallocation.
	 */
772 773
	if (psb->seg_cnt > 0)
		scsi_dma_unmap(psb->pCmd);
774 775
}

776 777 778 779 780 781 782 783 784 785
/**
 * lpfc_handler_fcp_err: FCP response handler.
 * @vport: The virtual port for which this call is being executed.
 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
 * @rsp_iocb: The response IOCB which contains FCP error.
 *
 * This routine is called to process response IOCB with status field
 * IOSTAT_FCP_RSP_ERROR. This routine sets result field of scsi command
 * based upon SCSI and FCP error.
 **/
已提交
786
static void
J
James Smart 已提交
787 788
lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
		    struct lpfc_iocbq *rsp_iocb)
已提交
789 790 791 792
{
	struct scsi_cmnd *cmnd = lpfc_cmd->pCmd;
	struct fcp_cmnd *fcpcmd = lpfc_cmd->fcp_cmnd;
	struct fcp_rsp *fcprsp = lpfc_cmd->fcp_rsp;
793
	uint32_t fcpi_parm = rsp_iocb->iocb.un.fcpi.fcpi_parm;
已提交
794 795
	uint32_t resp_info = fcprsp->rspStatus2;
	uint32_t scsi_status = fcprsp->rspStatus3;
796
	uint32_t *lp;
已提交
797 798
	uint32_t host_status = DID_OK;
	uint32_t rsplen = 0;
799
	uint32_t logit = LOG_FCP | LOG_FCP_ERROR;
已提交
800

801

已提交
802 803 804 805 806 807 808 809 810 811
	/*
	 *  If this is a task management command, there is no
	 *  scsi packet associated with this lpfc_cmd.  The driver
	 *  consumes it.
	 */
	if (fcpcmd->fcpCntl2) {
		scsi_status = 0;
		goto out;
	}

812 813 814 815 816 817 818 819 820 821 822 823 824 825
	if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
		uint32_t snslen = be32_to_cpu(fcprsp->rspSnsLen);
		if (snslen > SCSI_SENSE_BUFFERSIZE)
			snslen = SCSI_SENSE_BUFFERSIZE;

		if (resp_info & RSP_LEN_VALID)
		  rsplen = be32_to_cpu(fcprsp->rspRspLen);
		memcpy(cmnd->sense_buffer, &fcprsp->rspInfo0 + rsplen, snslen);
	}
	lp = (uint32_t *)cmnd->sense_buffer;

	if (!scsi_status && (resp_info & RESID_UNDER))
		logit = LOG_FCP;

826 827 828 829 830 831 832 833 834
	lpfc_printf_vlog(vport, KERN_WARNING, logit,
			 "0730 FCP command x%x failed: x%x SNS x%x x%x "
			 "Data: x%x x%x x%x x%x x%x\n",
			 cmnd->cmnd[0], scsi_status,
			 be32_to_cpu(*lp), be32_to_cpu(*(lp + 3)), resp_info,
			 be32_to_cpu(fcprsp->rspResId),
			 be32_to_cpu(fcprsp->rspSnsLen),
			 be32_to_cpu(fcprsp->rspRspLen),
			 fcprsp->rspInfo3);
已提交
835 836 837 838 839 840 841 842 843 844

	if (resp_info & RSP_LEN_VALID) {
		rsplen = be32_to_cpu(fcprsp->rspRspLen);
		if ((rsplen != 0 && rsplen != 4 && rsplen != 8) ||
		    (fcprsp->rspInfo3 != RSP_NO_FAILURE)) {
			host_status = DID_ERROR;
			goto out;
		}
	}

845
	scsi_set_resid(cmnd, 0);
已提交
846
	if (resp_info & RESID_UNDER) {
847
		scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId));
已提交
848

849 850 851 852 853 854
		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
				 "0716 FCP Read Underrun, expected %d, "
				 "residual %d Data: x%x x%x x%x\n",
				 be32_to_cpu(fcpcmd->fcpDl),
				 scsi_get_resid(cmnd), fcpi_parm, cmnd->cmnd[0],
				 cmnd->underflow);
已提交
855

856 857 858 859 860 861 862
		/*
		 * If there is an under run check if under run reported by
		 * storage array is same as the under run reported by HBA.
		 * If this is not same, there is a dropped frame.
		 */
		if ((cmnd->sc_data_direction == DMA_FROM_DEVICE) &&
			fcpi_parm &&
863
			(scsi_get_resid(cmnd) != fcpi_parm)) {
864 865 866 867 868 869 870
			lpfc_printf_vlog(vport, KERN_WARNING,
					 LOG_FCP | LOG_FCP_ERROR,
					 "0735 FCP Read Check Error "
					 "and Underrun Data: x%x x%x x%x x%x\n",
					 be32_to_cpu(fcpcmd->fcpDl),
					 scsi_get_resid(cmnd), fcpi_parm,
					 cmnd->cmnd[0]);
871
			scsi_set_resid(cmnd, scsi_bufflen(cmnd));
872 873
			host_status = DID_ERROR;
		}
已提交
874 875 876 877 878 879 880 881
		/*
		 * The cmnd->underflow is the minimum number of bytes that must
		 * be transfered for this command.  Provided a sense condition
		 * is not present, make sure the actual amount transferred is at
		 * least the underflow value or fail.
		 */
		if (!(resp_info & SNS_LEN_VALID) &&
		    (scsi_status == SAM_STAT_GOOD) &&
882 883
		    (scsi_bufflen(cmnd) - scsi_get_resid(cmnd)
		     < cmnd->underflow)) {
884 885 886 887
			lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
					 "0717 FCP command x%x residual "
					 "underrun converted to error "
					 "Data: x%x x%x x%x\n",
888
					 cmnd->cmnd[0], scsi_bufflen(cmnd),
889
					 scsi_get_resid(cmnd), cmnd->underflow);
已提交
890 891 892
			host_status = DID_ERROR;
		}
	} else if (resp_info & RESID_OVER) {
893 894 895 896
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
				 "0720 FCP command x%x residual overrun error. "
				 "Data: x%x x%x \n", cmnd->cmnd[0],
				 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
已提交
897 898 899 900 901 902 903 904
		host_status = DID_ERROR;

	/*
	 * Check SLI validation that all the transfer was actually done
	 * (fcpi_parm should be zero). Apply check only to reads.
	 */
	} else if ((scsi_status == SAM_STAT_GOOD) && fcpi_parm &&
			(cmnd->sc_data_direction == DMA_FROM_DEVICE)) {
905 906 907 908 909 910
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR,
				 "0734 FCP Read Check Error Data: "
				 "x%x x%x x%x x%x\n",
				 be32_to_cpu(fcpcmd->fcpDl),
				 be32_to_cpu(fcprsp->rspResId),
				 fcpi_parm, cmnd->cmnd[0]);
已提交
911
		host_status = DID_ERROR;
912
		scsi_set_resid(cmnd, scsi_bufflen(cmnd));
已提交
913 914 915 916
	}

 out:
	cmnd->result = ScsiResult(host_status, scsi_status);
917
	lpfc_send_scsi_error_event(vport->phba, vport, lpfc_cmd, rsp_iocb);
已提交
918 919
}

920 921 922 923 924 925 926 927 928 929
/**
 * lpfc_scsi_cmd_iocb_cmpl: Scsi cmnd IOCB completion routine.
 * @phba: The Hba for which this call is being executed.
 * @pIocbIn: The command IOCBQ for the scsi cmnd.
 * @pIocbOut: The response IOCBQ for the scsi cmnd .
 *
 * This routine assigns scsi command result by looking into response IOCB
 * status field appropriately. This routine handles QUEUE FULL condition as
 * well by ramping down device queue depth.
 **/
已提交
930 931 932 933 934 935
static void
lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
			struct lpfc_iocbq *pIocbOut)
{
	struct lpfc_scsi_buf *lpfc_cmd =
		(struct lpfc_scsi_buf *) pIocbIn->context1;
J
James Smart 已提交
936
	struct lpfc_vport      *vport = pIocbIn->vport;
已提交
937 938 939
	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
	struct lpfc_nodelist *pnode = rdata->pnode;
	struct scsi_cmnd *cmd = lpfc_cmd->pCmd;
940 941 942
	int result;
	struct scsi_device *sdev, *tmp_sdev;
	int depth = 0;
943
	unsigned long flags;
944
	struct lpfc_fast_path_event *fast_path_evt;
已提交
945 946 947

	lpfc_cmd->result = pIocbOut->iocb.un.ulpWord[4];
	lpfc_cmd->status = pIocbOut->iocb.ulpStatus;
948 949
	if (pnode && NLP_CHK_NODE_ACT(pnode))
		atomic_dec(&pnode->cmd_pending);
已提交
950 951 952 953 954 955 956 957

	if (lpfc_cmd->status) {
		if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
		    (lpfc_cmd->result & IOERR_DRVR_MASK))
			lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
		else if (lpfc_cmd->status >= IOSTAT_CNT)
			lpfc_cmd->status = IOSTAT_DEFAULT;

958 959 960 961 962 963 964 965 966
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
				 "0729 FCP cmd x%x failed <%d/%d> "
				 "status: x%x result: x%x Data: x%x x%x\n",
				 cmd->cmnd[0],
				 cmd->device ? cmd->device->id : 0xffff,
				 cmd->device ? cmd->device->lun : 0xffff,
				 lpfc_cmd->status, lpfc_cmd->result,
				 pIocbOut->iocb.ulpContext,
				 lpfc_cmd->cur_iocbq.iocb.ulpIoTag);
已提交
967 968 969 970

		switch (lpfc_cmd->status) {
		case IOSTAT_FCP_RSP_ERROR:
			/* Call FCP RSP handler to determine result */
J
James Smart 已提交
971
			lpfc_handle_fcp_err(vport, lpfc_cmd, pIocbOut);
已提交
972 973 974
			break;
		case IOSTAT_NPORT_BSY:
		case IOSTAT_FABRIC_BSY:
975
			cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999
			fast_path_evt = lpfc_alloc_fast_evt(phba);
			if (!fast_path_evt)
				break;
			fast_path_evt->un.fabric_evt.event_type =
				FC_REG_FABRIC_EVENT;
			fast_path_evt->un.fabric_evt.subcategory =
				(lpfc_cmd->status == IOSTAT_NPORT_BSY) ?
				LPFC_EVENT_PORT_BUSY : LPFC_EVENT_FABRIC_BUSY;
			if (pnode && NLP_CHK_NODE_ACT(pnode)) {
				memcpy(&fast_path_evt->un.fabric_evt.wwpn,
					&pnode->nlp_portname,
					sizeof(struct lpfc_name));
				memcpy(&fast_path_evt->un.fabric_evt.wwnn,
					&pnode->nlp_nodename,
					sizeof(struct lpfc_name));
			}
			fast_path_evt->vport = vport;
			fast_path_evt->work_evt.evt =
				LPFC_EVT_FASTPATH_MGMT_EVT;
			spin_lock_irqsave(&phba->hbalock, flags);
			list_add_tail(&fast_path_evt->work_evt.evt_listp,
				&phba->work_list);
			spin_unlock_irqrestore(&phba->hbalock, flags);
			lpfc_worker_wake_up(phba);
已提交
1000
			break;
1001
		case IOSTAT_LOCAL_REJECT:
1002
			if (lpfc_cmd->result == IOERR_INVALID_RPI ||
1003
			    lpfc_cmd->result == IOERR_NO_RESOURCES ||
1004
			    lpfc_cmd->result == IOERR_ABORT_REQUESTED) {
1005
				cmd->result = ScsiResult(DID_REQUEUE, 0);
1006 1007
				break;
			} /* else: fall through */
已提交
1008 1009 1010 1011 1012
		default:
			cmd->result = ScsiResult(DID_ERROR, 0);
			break;
		}

1013
		if (!pnode || !NLP_CHK_NODE_ACT(pnode)
1014
		    || (pnode->nlp_state != NLP_STE_MAPPED_NODE))
1015 1016
			cmd->result = ScsiResult(DID_TRANSPORT_DISRUPTED,
						 SAM_STAT_BUSY);
已提交
1017 1018 1019 1020 1021 1022 1023
	} else {
		cmd->result = ScsiResult(DID_OK, 0);
	}

	if (cmd->result || lpfc_cmd->fcp_rsp->rspSnsLen) {
		uint32_t *lp = (uint32_t *)cmd->sense_buffer;

1024 1025 1026 1027 1028 1029
		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
				 "0710 Iodone <%d/%d> cmd %p, error "
				 "x%x SNS x%x x%x Data: x%x x%x\n",
				 cmd->device->id, cmd->device->lun, cmd,
				 cmd->result, *lp, *(lp + 3), cmd->retries,
				 scsi_get_resid(cmd));
已提交
1030 1031
	}

1032
	lpfc_update_stats(phba, lpfc_cmd);
1033 1034
	result = cmd->result;
	sdev = cmd->device;
1035 1036 1037 1038
	if (vport->cfg_max_scsicmpl_time &&
	   time_after(jiffies, lpfc_cmd->start_time +
		msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) {
		spin_lock_irqsave(sdev->host->host_lock, flags);
1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050
		if (pnode && NLP_CHK_NODE_ACT(pnode)) {
			if (pnode->cmd_qdepth >
				atomic_read(&pnode->cmd_pending) &&
				(atomic_read(&pnode->cmd_pending) >
				LPFC_MIN_TGT_QDEPTH) &&
				((cmd->cmnd[0] == READ_10) ||
				(cmd->cmnd[0] == WRITE_10)))
				pnode->cmd_qdepth =
					atomic_read(&pnode->cmd_pending);

			pnode->last_change_time = jiffies;
		}
1051
		spin_unlock_irqrestore(sdev->host->host_lock, flags);
1052 1053
	} else if (pnode && NLP_CHK_NODE_ACT(pnode)) {
		if ((pnode->cmd_qdepth < LPFC_MAX_TGT_QDEPTH) &&
1054
		   time_after(jiffies, pnode->last_change_time +
1055 1056 1057 1058 1059 1060 1061 1062 1063
			      msecs_to_jiffies(LPFC_TGTQ_INTERVAL))) {
			spin_lock_irqsave(sdev->host->host_lock, flags);
			pnode->cmd_qdepth += pnode->cmd_qdepth *
				LPFC_TGTQ_RAMPUP_PCENT / 100;
			if (pnode->cmd_qdepth > LPFC_MAX_TGT_QDEPTH)
				pnode->cmd_qdepth = LPFC_MAX_TGT_QDEPTH;
			pnode->last_change_time = jiffies;
			spin_unlock_irqrestore(sdev->host->host_lock, flags);
		}
1064 1065
	}

1066
	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
1067 1068
	cmd->scsi_done(cmd);

1069
	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1070 1071 1072 1073 1074
		/*
		 * If there is a thread waiting for command completion
		 * wake up the thread.
		 */
		spin_lock_irqsave(sdev->host->host_lock, flags);
1075
		lpfc_cmd->pCmd = NULL;
1076 1077 1078
		if (lpfc_cmd->waitq)
			wake_up(lpfc_cmd->waitq);
		spin_unlock_irqrestore(sdev->host->host_lock, flags);
1079 1080 1081 1082
		lpfc_release_scsi_buf(phba, lpfc_cmd);
		return;
	}

1083 1084

	if (!result)
1085
		lpfc_rampup_queue_depth(vport, sdev);
1086

1087
	if (!result && pnode && NLP_CHK_NODE_ACT(pnode) &&
1088 1089 1090 1091
	   ((jiffies - pnode->last_ramp_up_time) >
		LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
	   ((jiffies - pnode->last_q_full_time) >
		LPFC_Q_RAMP_UP_INTERVAL * HZ) &&
1092
	   (vport->cfg_lun_queue_depth > sdev->queue_depth)) {
1093
		shost_for_each_device(tmp_sdev, sdev->host) {
1094
			if (vport->cfg_lun_queue_depth > tmp_sdev->queue_depth){
1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108
				if (tmp_sdev->id != sdev->id)
					continue;
				if (tmp_sdev->ordered_tags)
					scsi_adjust_queue_depth(tmp_sdev,
						MSG_ORDERED_TAG,
						tmp_sdev->queue_depth+1);
				else
					scsi_adjust_queue_depth(tmp_sdev,
						MSG_SIMPLE_TAG,
						tmp_sdev->queue_depth+1);

				pnode->last_ramp_up_time = jiffies;
			}
		}
1109 1110 1111
		lpfc_send_sdev_queuedepth_change_event(phba, vport, pnode,
			0xFFFFFFFF,
			sdev->queue_depth - 1, sdev->queue_depth);
1112 1113 1114 1115 1116 1117
	}

	/*
	 * Check for queue full.  If the lun is reporting queue full, then
	 * back off the lun queue depth to prevent target overloads.
	 */
1118 1119
	if (result == SAM_STAT_TASK_SET_FULL && pnode &&
	    NLP_CHK_NODE_ACT(pnode)) {
1120 1121 1122 1123 1124 1125 1126 1127 1128
		pnode->last_q_full_time = jiffies;

		shost_for_each_device(tmp_sdev, sdev->host) {
			if (tmp_sdev->id != sdev->id)
				continue;
			depth = scsi_track_queue_full(tmp_sdev,
					tmp_sdev->queue_depth - 1);
		}
		/*
J
James Smart 已提交
1129
		 * The queue depth cannot be lowered any more.
1130 1131 1132 1133 1134 1135 1136 1137
		 * Modify the returned error code to store
		 * the final depth value set by
		 * scsi_track_queue_full.
		 */
		if (depth == -1)
			depth = sdev->host->cmd_per_lun;

		if (depth) {
1138 1139 1140
			lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
					 "0711 detected queue full - lun queue "
					 "depth adjusted to %d.\n", depth);
1141 1142 1143
			lpfc_send_sdev_queuedepth_change_event(phba, vport,
				pnode, 0xFFFFFFFF,
				depth+1, depth);
1144 1145 1146
		}
	}

1147 1148 1149 1150 1151
	/*
	 * If there is a thread waiting for command completion
	 * wake up the thread.
	 */
	spin_lock_irqsave(sdev->host->host_lock, flags);
1152
	lpfc_cmd->pCmd = NULL;
1153 1154 1155 1156
	if (lpfc_cmd->waitq)
		wake_up(lpfc_cmd->waitq);
	spin_unlock_irqrestore(sdev->host->host_lock, flags);

1157
	lpfc_release_scsi_buf(phba, lpfc_cmd);
已提交
1158 1159
}

1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177
/**
 * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB.
 * @data: A pointer to the immediate command data portion of the IOCB.
 * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
 *
 * The routine copies the entire FCP command from @fcp_cmnd to @data while
 * byte swapping the data to big endian format for transmission on the wire.
 **/
static void
lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
{
	int i, j;
	for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
	     i += sizeof(uint32_t), j++) {
		((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
	}
}

1178 1179 1180 1181 1182 1183 1184 1185 1186
/**
 * lpfc_scsi_prep_cmnd:  Routine to convert scsi cmnd to FCP information unit.
 * @vport: The virtual port for which this call is being executed.
 * @lpfc_cmd: The scsi command which needs to send.
 * @pnode: Pointer to lpfc_nodelist.
 *
 * This routine initializes fcp_cmnd and iocb data structure from scsi command
 * to transfer.
 **/
已提交
1187
static void
J
James Smart 已提交
1188 1189
lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
		    struct lpfc_nodelist *pnode)
已提交
1190
{
J
James Smart 已提交
1191
	struct lpfc_hba *phba = vport->phba;
已提交
1192 1193 1194 1195 1196
	struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
	struct lpfc_iocbq *piocbq = &(lpfc_cmd->cur_iocbq);
	int datadir = scsi_cmnd->sc_data_direction;
1197
	char tag[2];
已提交
1198

1199 1200 1201
	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
		return;

已提交
1202
	lpfc_cmd->fcp_rsp->rspSnsLen = 0;
1203 1204
	/* clear task management bits */
	lpfc_cmd->fcp_cmnd->fcpCntl2 = 0;
已提交
1205

1206 1207
	int_to_scsilun(lpfc_cmd->pCmd->device->lun,
			&lpfc_cmd->fcp_cmnd->fcp_lun);
已提交
1208 1209 1210

	memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16);

1211 1212
	if (scsi_populate_tag_msg(scsi_cmnd, tag)) {
		switch (tag[0]) {
已提交
1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231
		case HEAD_OF_QUEUE_TAG:
			fcp_cmnd->fcpCntl1 = HEAD_OF_Q;
			break;
		case ORDERED_QUEUE_TAG:
			fcp_cmnd->fcpCntl1 = ORDERED_Q;
			break;
		default:
			fcp_cmnd->fcpCntl1 = SIMPLE_Q;
			break;
		}
	} else
		fcp_cmnd->fcpCntl1 = 0;

	/*
	 * There are three possibilities here - use scatter-gather segment, use
	 * the single mapping, or neither.  Start the lpfc command prep by
	 * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
	 * data bde entry.
	 */
1232
	if (scsi_sg_count(scsi_cmnd)) {
已提交
1233 1234 1235 1236 1237 1238 1239 1240 1241
		if (datadir == DMA_TO_DEVICE) {
			iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
			iocb_cmd->un.fcpi.fcpi_parm = 0;
			iocb_cmd->ulpPU = 0;
			fcp_cmnd->fcpCntl3 = WRITE_DATA;
			phba->fc4OutputRequests++;
		} else {
			iocb_cmd->ulpCommand = CMD_FCP_IREAD64_CR;
			iocb_cmd->ulpPU = PARM_READ_CHECK;
1242
			iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
已提交
1243 1244 1245 1246 1247 1248 1249 1250 1251 1252
			fcp_cmnd->fcpCntl3 = READ_DATA;
			phba->fc4InputRequests++;
		}
	} else {
		iocb_cmd->ulpCommand = CMD_FCP_ICMND64_CR;
		iocb_cmd->un.fcpi.fcpi_parm = 0;
		iocb_cmd->ulpPU = 0;
		fcp_cmnd->fcpCntl3 = 0;
		phba->fc4ControlRequests++;
	}
1253 1254
	if (phba->sli_rev == 3)
		lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
已提交
1255 1256 1257 1258 1259 1260 1261
	/*
	 * Finish initializing those IOCB fields that are independent
	 * of the scsi_cmnd request_buffer
	 */
	piocbq->iocb.ulpContext = pnode->nlp_rpi;
	if (pnode->nlp_fcp_info & NLP_FCP_2_DEVICE)
		piocbq->iocb.ulpFCP2Rcvy = 1;
1262 1263
	else
		piocbq->iocb.ulpFCP2Rcvy = 0;
已提交
1264 1265 1266 1267 1268

	piocbq->iocb.ulpClass = (pnode->nlp_fcp_info & 0x0f);
	piocbq->context1  = lpfc_cmd;
	piocbq->iocb_cmpl = lpfc_scsi_cmd_iocb_cmpl;
	piocbq->iocb.ulpTimeout = lpfc_cmd->timeout;
J
James Smart 已提交
1269
	piocbq->vport = vport;
已提交
1270 1271
}

1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284
/**
 * lpfc_scsi_prep_task_mgmt_cmnd: Convert scsi TM cmnd to FCP information unit.
 * @vport: The virtual port for which this call is being executed.
 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
 * @lun: Logical unit number.
 * @task_mgmt_cmd: SCSI task management command.
 *
 * This routine creates FCP information unit corresponding to @task_mgmt_cmd.
 *
 * Return codes:
 *   0 - Error
 *   1 - Success
 **/
已提交
1285
static int
J
James Smart 已提交
1286
lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
已提交
1287
			     struct lpfc_scsi_buf *lpfc_cmd,
1288
			     unsigned int lun,
已提交
1289 1290 1291 1292 1293
			     uint8_t task_mgmt_cmd)
{
	struct lpfc_iocbq *piocbq;
	IOCB_t *piocb;
	struct fcp_cmnd *fcp_cmnd;
1294
	struct lpfc_rport_data *rdata = lpfc_cmd->rdata;
已提交
1295 1296
	struct lpfc_nodelist *ndlp = rdata->pnode;

1297 1298
	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp) ||
	    ndlp->nlp_state != NLP_STE_MAPPED_NODE)
已提交
1299 1300 1301
		return 0;

	piocbq = &(lpfc_cmd->cur_iocbq);
J
James Smart 已提交
1302 1303
	piocbq->vport = vport;

已提交
1304 1305 1306
	piocb = &piocbq->iocb;

	fcp_cmnd = lpfc_cmd->fcp_cmnd;
1307 1308 1309
	/* Clear out any old data in the FCP command area */
	memset(fcp_cmnd, 0, sizeof(struct fcp_cmnd));
	int_to_scsilun(lun, &fcp_cmnd->fcp_lun);
已提交
1310
	fcp_cmnd->fcpCntl2 = task_mgmt_cmd;
1311 1312
	if (vport->phba->sli_rev == 3)
		lpfc_fcpcmd_to_iocb(piocb->unsli3.fcp_ext.icd, fcp_cmnd);
已提交
1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330
	piocb->ulpCommand = CMD_FCP_ICMND64_CR;
	piocb->ulpContext = ndlp->nlp_rpi;
	if (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) {
		piocb->ulpFCP2Rcvy = 1;
	}
	piocb->ulpClass = (ndlp->nlp_fcp_info & 0x0f);

	/* ulpTimeout is only one byte */
	if (lpfc_cmd->timeout > 0xff) {
		/*
		 * Do not timeout the command at the firmware level.
		 * The driver will provide the timeout mechanism.
		 */
		piocb->ulpTimeout = 0;
	} else {
		piocb->ulpTimeout = lpfc_cmd->timeout;
	}

J
James Smart 已提交
1331
	return 1;
已提交
1332 1333
}

1334 1335 1336 1337 1338 1339 1340 1341 1342
/**
 * lpc_taskmgmt_def_cmpl: IOCB completion routine for task management command.
 * @phba: The Hba for which this call is being executed.
 * @cmdiocbq: Pointer to lpfc_iocbq data structure.
 * @rspiocbq: Pointer to lpfc_iocbq data structure.
 *
 * This routine is IOCB completion routine for device reset and target reset
 * routine. This routine release scsi buffer associated with lpfc_cmd.
 **/
1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354
static void
lpfc_tskmgmt_def_cmpl(struct lpfc_hba *phba,
			struct lpfc_iocbq *cmdiocbq,
			struct lpfc_iocbq *rspiocbq)
{
	struct lpfc_scsi_buf *lpfc_cmd =
		(struct lpfc_scsi_buf *) cmdiocbq->context1;
	if (lpfc_cmd)
		lpfc_release_scsi_buf(phba, lpfc_cmd);
	return;
}

1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368
/**
 * lpfc_scsi_tgt_reset: Target reset handler.
 * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure
 * @vport: The virtual port for which this call is being executed.
 * @tgt_id: Target ID.
 * @lun: Lun number.
 * @rdata: Pointer to lpfc_rport_data.
 *
 * This routine issues a TARGET RESET iocb to reset a target with @tgt_id ID.
 *
 * Return Code:
 *   0x2003 - Error
 *   0x2002 - Success.
 **/
已提交
1369
static int
J
James Smart 已提交
1370
lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
1371 1372
		    unsigned  tgt_id, unsigned int lun,
		    struct lpfc_rport_data *rdata)
已提交
1373
{
J
James Smart 已提交
1374
	struct lpfc_hba   *phba = vport->phba;
已提交
1375
	struct lpfc_iocbq *iocbq;
1376
	struct lpfc_iocbq *iocbqrsp;
已提交
1377
	int ret;
1378
	int status;
已提交
1379

1380
	if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
J
James Smart 已提交
1381 1382
		return FAILED;

1383
	lpfc_cmd->rdata = rdata;
1384
	status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
1385
					   FCP_TARGET_RESET);
1386
	if (!status)
已提交
1387 1388 1389
		return FAILED;

	iocbq = &lpfc_cmd->cur_iocbq;
1390 1391
	iocbqrsp = lpfc_sli_get_iocbq(phba);

已提交
1392 1393 1394
	if (!iocbqrsp)
		return FAILED;

1395
	/* Issue Target Reset to TGT <num> */
1396 1397 1398
	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
			 "0702 Issue Target Reset to TGT %d Data: x%x x%x\n",
			 tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
1399
	status = lpfc_sli_issue_iocb_wait(phba,
1400 1401
				       &phba->sli.ring[phba->sli.fcp_ring],
				       iocbq, iocbqrsp, lpfc_cmd->timeout);
1402 1403
	if (status != IOCB_SUCCESS) {
		if (status == IOCB_TIMEDOUT) {
1404
			iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
1405 1406 1407
			ret = TIMEOUT_ERROR;
		} else
			ret = FAILED;
已提交
1408 1409 1410 1411 1412 1413 1414 1415 1416 1417
		lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
	} else {
		ret = SUCCESS;
		lpfc_cmd->result = iocbqrsp->iocb.un.ulpWord[4];
		lpfc_cmd->status = iocbqrsp->iocb.ulpStatus;
		if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT &&
			(lpfc_cmd->result & IOERR_DRVR_MASK))
				lpfc_cmd->status = IOSTAT_DRIVER_REJECT;
	}

1418
	lpfc_sli_release_iocbq(phba, iocbqrsp);
已提交
1419 1420 1421
	return ret;
}

1422 1423 1424 1425 1426 1427 1428 1429 1430
/**
 * lpfc_info: Info entry point of scsi_host_template data structure.
 * @host: The scsi host for which this call is being executed.
 *
 * This routine provides module information about hba.
 *
 * Reutrn code:
 *   Pointer to char - Success.
 **/
已提交
1431 1432 1433
const char *
lpfc_info(struct Scsi_Host *host)
{
J
James Smart 已提交
1434 1435
	struct lpfc_vport *vport = (struct lpfc_vport *) host->hostdata;
	struct lpfc_hba   *phba = vport->phba;
已提交
1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459
	int len;
	static char  lpfcinfobuf[384];

	memset(lpfcinfobuf,0,384);
	if (phba && phba->pcidev){
		strncpy(lpfcinfobuf, phba->ModelDesc, 256);
		len = strlen(lpfcinfobuf);
		snprintf(lpfcinfobuf + len,
			384-len,
			" on PCI bus %02x device %02x irq %d",
			phba->pcidev->bus->number,
			phba->pcidev->devfn,
			phba->pcidev->irq);
		len = strlen(lpfcinfobuf);
		if (phba->Port[0]) {
			snprintf(lpfcinfobuf + len,
				 384-len,
				 " port %s",
				 phba->Port);
		}
	}
	return lpfcinfobuf;
}

1460 1461 1462 1463 1464 1465 1466
/**
 * lpfc_poll_rearm_time: Routine to modify fcp_poll timer of hba.
 * @phba: The Hba for which this call is being executed.
 *
 * This routine modifies fcp_poll_timer  field of @phba by cfg_poll_tmo.
 * The default value of cfg_poll_tmo is 10 milliseconds.
 **/
1467 1468 1469 1470 1471 1472 1473 1474 1475 1476
static __inline__ void lpfc_poll_rearm_timer(struct lpfc_hba * phba)
{
	unsigned long  poll_tmo_expires =
		(jiffies + msecs_to_jiffies(phba->cfg_poll_tmo));

	if (phba->sli.ring[LPFC_FCP_RING].txcmplq_cnt)
		mod_timer(&phba->fcp_poll_timer,
			  poll_tmo_expires);
}

1477 1478 1479 1480 1481 1482
/**
 * lpfc_poll_start_timer: Routine to start fcp_poll_timer of HBA.
 * @phba: The Hba for which this call is being executed.
 *
 * This routine starts the fcp_poll_timer of @phba.
 **/
1483 1484 1485 1486 1487
void lpfc_poll_start_timer(struct lpfc_hba * phba)
{
	lpfc_poll_rearm_timer(phba);
}

1488 1489 1490 1491 1492 1493 1494 1495
/**
 * lpfc_poll_timeout: Restart polling timer.
 * @ptr: Map to lpfc_hba data structure pointer.
 *
 * This routine restarts fcp_poll timer, when FCP ring  polling is enable
 * and FCP Ring interrupt is disable.
 **/

1496 1497
void lpfc_poll_timeout(unsigned long ptr)
{
J
James Smart 已提交
1498
	struct lpfc_hba *phba = (struct lpfc_hba *) ptr;
1499 1500 1501 1502 1503 1504 1505 1506

	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
		lpfc_sli_poll_fcp_ring (phba);
		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
			lpfc_poll_rearm_timer(phba);
	}
}

1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520
/**
 * lpfc_queuecommand: Queuecommand entry point of Scsi Host Templater data
 * structure.
 * @cmnd: Pointer to scsi_cmnd data structure.
 * @done: Pointer to done routine.
 *
 * Driver registers this routine to scsi midlayer to submit a @cmd to process.
 * This routine prepares an IOCB from scsi command and provides to firmware.
 * The @done callback is invoked after driver finished processing the command.
 *
 * Return value :
 *   0 - Success
 *   SCSI_MLQUEUE_HOST_BUSY - Block all devices served by this host temporarily.
 **/
已提交
1521 1522 1523
static int
lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
{
J
James Smart 已提交
1524 1525 1526 1527
	struct Scsi_Host  *shost = cmnd->device->host;
	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
	struct lpfc_hba   *phba = vport->phba;
	struct lpfc_sli   *psli = &phba->sli;
已提交
1528 1529
	struct lpfc_rport_data *rdata = cmnd->device->hostdata;
	struct lpfc_nodelist *ndlp = rdata->pnode;
1530
	struct lpfc_scsi_buf *lpfc_cmd;
1531 1532
	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
	int err;
已提交
1533

1534 1535 1536
	err = fc_remote_port_chkready(rport);
	if (err) {
		cmnd->result = err;
已提交
1537 1538 1539 1540
		goto out_fail_command;
	}

	/*
1541 1542
	 * Catch race where our node has transitioned, but the
	 * transport is still transitioning.
已提交
1543
	 */
1544 1545 1546 1547
	if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
		cmnd->result = ScsiResult(DID_TRANSPORT_DISRUPTED, 0);
		goto out_fail_command;
	}
1548 1549
	if (vport->cfg_max_scsicmpl_time &&
		(atomic_read(&ndlp->cmd_pending) >= ndlp->cmd_qdepth))
1550
		goto out_host_busy;
1551

1552
	lpfc_cmd = lpfc_get_scsi_buf(phba);
已提交
1553
	if (lpfc_cmd == NULL) {
1554
		lpfc_rampdown_queue_depth(phba);
1555

1556 1557 1558
		lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
				 "0707 driver's buffer pool is empty, "
				 "IO busied\n");
已提交
1559 1560 1561 1562 1563 1564 1565 1566 1567 1568
		goto out_host_busy;
	}

	/*
	 * Store the midlayer's command structure for the completion phase
	 * and complete the command initialization.
	 */
	lpfc_cmd->pCmd  = cmnd;
	lpfc_cmd->rdata = rdata;
	lpfc_cmd->timeout = 0;
1569
	lpfc_cmd->start_time = jiffies;
已提交
1570 1571 1572 1573 1574 1575 1576
	cmnd->host_scribble = (unsigned char *)lpfc_cmd;
	cmnd->scsi_done = done;

	err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
	if (err)
		goto out_host_busy_free_buf;

J
James Smart 已提交
1577
	lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
已提交
1578

1579
	atomic_inc(&ndlp->cmd_pending);
已提交
1580
	err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
1581
				  &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
1582 1583
	if (err) {
		atomic_dec(&ndlp->cmd_pending);
已提交
1584
		goto out_host_busy_free_buf;
1585
	}
1586 1587 1588 1589 1590 1591
	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
		lpfc_sli_poll_fcp_ring(phba);
		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
			lpfc_poll_rearm_timer(phba);
	}

已提交
1592 1593 1594
	return 0;

 out_host_busy_free_buf:
1595
	lpfc_scsi_unprep_dma_buf(phba, lpfc_cmd);
1596
	lpfc_release_scsi_buf(phba, lpfc_cmd);
已提交
1597 1598 1599 1600 1601 1602 1603 1604
 out_host_busy:
	return SCSI_MLQUEUE_HOST_BUSY;

 out_fail_command:
	done(cmnd);
	return 0;
}

1605 1606 1607 1608 1609 1610
/**
 * lpfc_block_error_handler: Routine to block error  handler.
 * @cmnd: Pointer to scsi_cmnd data structure.
 *
 *  This routine blocks execution till fc_rport state is not FC_PORSTAT_BLCOEKD.
 **/
1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625
static void
lpfc_block_error_handler(struct scsi_cmnd *cmnd)
{
	struct Scsi_Host *shost = cmnd->device->host;
	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));

	spin_lock_irq(shost->host_lock);
	while (rport->port_state == FC_PORTSTATE_BLOCKED) {
		spin_unlock_irq(shost->host_lock);
		msleep(1000);
		spin_lock_irq(shost->host_lock);
	}
	spin_unlock_irq(shost->host_lock);
	return;
}
1626

1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637
/**
 * lpfc_abort_handler: Eh_abort_handler entry point of Scsi Host Template data
 *structure.
 * @cmnd: Pointer to scsi_cmnd data structure.
 *
 * This routine aborts @cmnd pending in base driver.
 *
 * Return code :
 *   0x2003 - Error
 *   0x2002 - Success
 **/
已提交
1638
static int
1639
lpfc_abort_handler(struct scsi_cmnd *cmnd)
已提交
1640
{
J
James Smart 已提交
1641 1642 1643
	struct Scsi_Host  *shost = cmnd->device->host;
	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
	struct lpfc_hba   *phba = vport->phba;
已提交
1644
	struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
1645 1646
	struct lpfc_iocbq *iocb;
	struct lpfc_iocbq *abtsiocb;
已提交
1647 1648
	struct lpfc_scsi_buf *lpfc_cmd;
	IOCB_t *cmd, *icmd;
1649
	int ret = SUCCESS;
1650
	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
已提交
1651

1652
	lpfc_block_error_handler(cmnd);
1653 1654
	lpfc_cmd = (struct lpfc_scsi_buf *)cmnd->host_scribble;
	BUG_ON(!lpfc_cmd);
已提交
1655

1656 1657 1658 1659 1660 1661 1662 1663 1664 1665
	/*
	 * If pCmd field of the corresponding lpfc_scsi_buf structure
	 * points to a different SCSI command, then the driver has
	 * already completed this command, but the midlayer did not
	 * see the completion before the eh fired.  Just return
	 * SUCCESS.
	 */
	iocb = &lpfc_cmd->cur_iocbq;
	if (lpfc_cmd->pCmd != cmnd)
		goto out;
已提交
1666

1667
	BUG_ON(iocb->context1 != lpfc_cmd);
已提交
1668

1669 1670 1671
	abtsiocb = lpfc_sli_get_iocbq(phba);
	if (abtsiocb == NULL) {
		ret = FAILED;
已提交
1672 1673 1674 1675
		goto out;
	}

	/*
1676 1677 1678
	 * The scsi command can not be in txq and it is in flight because the
	 * pCmd is still pointig at the SCSI command we have to abort. There
	 * is no need to search the txcmplq. Just send an abort to the FW.
已提交
1679 1680
	 */

1681 1682 1683 1684 1685
	cmd = &iocb->iocb;
	icmd = &abtsiocb->iocb;
	icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
	icmd->un.acxri.abortContextTag = cmd->ulpContext;
	icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
已提交
1686

1687 1688
	icmd->ulpLe = 1;
	icmd->ulpClass = cmd->ulpClass;
J
James Smart 已提交
1689
	if (lpfc_is_link_up(phba))
1690 1691 1692
		icmd->ulpCommand = CMD_ABORT_XRI_CN;
	else
		icmd->ulpCommand = CMD_CLOSE_XRI_CN;
已提交
1693

1694
	abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
J
James Smart 已提交
1695
	abtsiocb->vport = vport;
1696 1697 1698 1699 1700
	if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) {
		lpfc_sli_release_iocbq(phba, abtsiocb);
		ret = FAILED;
		goto out;
	}
已提交
1701

1702 1703 1704
	if (phba->cfg_poll & DISABLE_FCP_RING_INT)
		lpfc_sli_poll_fcp_ring (phba);

1705
	lpfc_cmd->waitq = &waitq;
1706
	/* Wait for abort to complete */
1707 1708 1709
	wait_event_timeout(waitq,
			  (lpfc_cmd->pCmd != cmnd),
			   (2*vport->cfg_devloss_tmo*HZ));
1710

1711 1712 1713
	spin_lock_irq(shost->host_lock);
	lpfc_cmd->waitq = NULL;
	spin_unlock_irq(shost->host_lock);
已提交
1714

1715 1716
	if (lpfc_cmd->pCmd == cmnd) {
		ret = FAILED;
1717 1718 1719 1720 1721 1722
		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
				 "0748 abort handler timed out waiting "
				 "for abort to complete: ret %#x, ID %d, "
				 "LUN %d, snum %#lx\n",
				 ret, cmnd->device->id, cmnd->device->lun,
				 cmnd->serial_number);
已提交
1723 1724 1725
	}

 out:
1726 1727 1728 1729
	lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
			 "0749 SCSI Layer I/O Abort Request Status x%x ID %d "
			 "LUN %d snum %#lx\n", ret, cmnd->device->id,
			 cmnd->device->lun, cmnd->serial_number);
1730
	return ret;
1731 1732
}

1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744
/**
 * lpfc_device_reset_handler: eh_device_reset entry point of Scsi Host Template
 *data structure.
 * @cmnd: Pointer to scsi_cmnd data structure.
 *
 * This routine does a device reset by sending a TARGET_RESET task management
 * command.
 *
 * Return code :
 *  0x2003 - Error
 *  0ex2002 - Success
 **/
已提交
1745
static int
1746
lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
已提交
1747
{
J
James Smart 已提交
1748 1749 1750
	struct Scsi_Host  *shost = cmnd->device->host;
	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
	struct lpfc_hba   *phba = vport->phba;
1751 1752
	struct lpfc_scsi_buf *lpfc_cmd;
	struct lpfc_iocbq *iocbq, *iocbqrsp;
已提交
1753 1754
	struct lpfc_rport_data *rdata = cmnd->device->hostdata;
	struct lpfc_nodelist *pnode = rdata->pnode;
1755 1756 1757 1758
	unsigned long later;
	int ret = SUCCESS;
	int status;
	int cnt;
1759
	struct lpfc_scsi_event_header scsi_event;
已提交
1760

1761
	lpfc_block_error_handler(cmnd);
已提交
1762 1763
	/*
	 * If target is not in a MAPPED state, delay the reset until
1764
	 * target is rediscovered or devloss timeout expires.
已提交
1765
	 */
1766 1767
	later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
	while (time_after(later, jiffies)) {
1768
		if (!pnode || !NLP_CHK_NODE_ACT(pnode))
1769
			return FAILED;
J
James Smart 已提交
1770
		if (pnode->nlp_state == NLP_STE_MAPPED_NODE)
已提交
1771
			break;
1772 1773 1774 1775 1776 1777
		schedule_timeout_uninterruptible(msecs_to_jiffies(500));
		rdata = cmnd->device->hostdata;
		if (!rdata)
			break;
		pnode = rdata->pnode;
	}
1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788

	scsi_event.event_type = FC_REG_SCSI_EVENT;
	scsi_event.subcategory = LPFC_EVENT_TGTRESET;
	scsi_event.lun = 0;
	memcpy(scsi_event.wwpn, &pnode->nlp_portname, sizeof(struct lpfc_name));
	memcpy(scsi_event.wwnn, &pnode->nlp_nodename, sizeof(struct lpfc_name));

	fc_host_post_vendor_event(shost,
		fc_get_event_number(),
		sizeof(scsi_event),
		(char *)&scsi_event,
1789
		LPFC_NL_VENDOR_ID);
1790

1791 1792 1793 1794 1795 1796
	if (!rdata || pnode->nlp_state != NLP_STE_MAPPED_NODE) {
		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
				 "0721 LUN Reset rport "
				 "failure: msec x%x rdata x%p\n",
				 jiffies_to_msecs(jiffies - later), rdata);
		return FAILED;
已提交
1797
	}
J
James Smart 已提交
1798
	lpfc_cmd = lpfc_get_scsi_buf(phba);
已提交
1799
	if (lpfc_cmd == NULL)
1800
		return FAILED;
已提交
1801
	lpfc_cmd->timeout = 60;
1802
	lpfc_cmd->rdata = rdata;
已提交
1803

1804 1805 1806 1807 1808 1809 1810
	status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd,
					      cmnd->device->lun,
					      FCP_TARGET_RESET);
	if (!status) {
		lpfc_release_scsi_buf(phba, lpfc_cmd);
		return FAILED;
	}
已提交
1811 1812 1813
	iocbq = &lpfc_cmd->cur_iocbq;

	/* get a buffer for this IOCB command response */
1814
	iocbqrsp = lpfc_sli_get_iocbq(phba);
1815 1816 1817 1818
	if (iocbqrsp == NULL) {
		lpfc_release_scsi_buf(phba, lpfc_cmd);
		return FAILED;
	}
1819 1820 1821 1822
	lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
			 "0703 Issue target reset to TGT %d LUN %d "
			 "rpi x%x nlp_flag x%x\n", cmnd->device->id,
			 cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
1823 1824 1825 1826
	status = lpfc_sli_issue_iocb_wait(phba,
					  &phba->sli.ring[phba->sli.fcp_ring],
					  iocbq, iocbqrsp, lpfc_cmd->timeout);
	if (status == IOCB_TIMEDOUT) {
1827
		iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839
		ret = TIMEOUT_ERROR;
	} else {
		if (status != IOCB_SUCCESS)
			ret = FAILED;
		lpfc_release_scsi_buf(phba, lpfc_cmd);
	}
	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
			 "0713 SCSI layer issued device reset (%d, %d) "
			 "return x%x status x%x result x%x\n",
			 cmnd->device->id, cmnd->device->lun, ret,
			 iocbqrsp->iocb.ulpStatus,
			 iocbqrsp->iocb.un.ulpWord[4]);
1840
	lpfc_sli_release_iocbq(phba, iocbqrsp);
1841
	cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id, cmnd->device->lun,
1842
				LPFC_CTX_TGT);
1843
	if (cnt)
1844
		lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
1845
				    cmnd->device->id, cmnd->device->lun,
1846 1847 1848 1849
				    LPFC_CTX_TGT);
	later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
	while (time_after(later, jiffies) && cnt) {
		schedule_timeout_uninterruptible(msecs_to_jiffies(20));
1850
		cnt = lpfc_sli_sum_iocb(vport, cmnd->device->id,
1851
					cmnd->device->lun, LPFC_CTX_TGT);
已提交
1852 1853
	}
	if (cnt) {
1854 1855 1856
		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
				 "0719 device reset I/O flush failure: "
				 "cnt x%x\n", cnt);
1857
		ret = FAILED;
已提交
1858 1859 1860 1861
	}
	return ret;
}

1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872
/**
 * lpfc_bus_reset_handler: eh_bus_reset_handler entry point of Scsi Host
 * Template data structure.
 * @cmnd: Pointer to scsi_cmnd data structure.
 *
 * This routine does target reset to all target on @cmnd->device->host.
 *
 * Return Code:
 *   0x2003 - Error
 *   0x2002 - Success
 **/
1873
static int
1874
lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
已提交
1875
{
J
James Smart 已提交
1876 1877 1878
	struct Scsi_Host  *shost = cmnd->device->host;
	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
	struct lpfc_hba   *phba = vport->phba;
已提交
1879 1880
	struct lpfc_nodelist *ndlp = NULL;
	int match;
1881
	int ret = SUCCESS, status = SUCCESS, i;
1882
	int cnt;
1883
	struct lpfc_scsi_buf * lpfc_cmd;
1884
	unsigned long later;
1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896
	struct lpfc_scsi_event_header scsi_event;

	scsi_event.event_type = FC_REG_SCSI_EVENT;
	scsi_event.subcategory = LPFC_EVENT_BUSRESET;
	scsi_event.lun = 0;
	memcpy(scsi_event.wwpn, &vport->fc_portname, sizeof(struct lpfc_name));
	memcpy(scsi_event.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name));

	fc_host_post_vendor_event(shost,
		fc_get_event_number(),
		sizeof(scsi_event),
		(char *)&scsi_event,
1897
		LPFC_NL_VENDOR_ID);
已提交
1898

1899
	lpfc_block_error_handler(cmnd);
已提交
1900 1901 1902 1903 1904
	/*
	 * Since the driver manages a single bus device, reset all
	 * targets known to the driver.  Should any target reset
	 * fail, this routine returns failure to the midlayer.
	 */
1905
	for (i = 0; i < LPFC_MAX_TARGET; i++) {
1906
		/* Search for mapped node by target ID */
已提交
1907
		match = 0;
J
James Smart 已提交
1908 1909
		spin_lock_irq(shost->host_lock);
		list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
1910 1911
			if (!NLP_CHK_NODE_ACT(ndlp))
				continue;
1912
			if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
1913
			    ndlp->nlp_sid == i &&
1914
			    ndlp->rport) {
已提交
1915 1916 1917 1918
				match = 1;
				break;
			}
		}
J
James Smart 已提交
1919
		spin_unlock_irq(shost->host_lock);
已提交
1920 1921
		if (!match)
			continue;
1922 1923 1924 1925 1926 1927 1928 1929 1930 1931
		lpfc_cmd = lpfc_get_scsi_buf(phba);
		if (lpfc_cmd) {
			lpfc_cmd->timeout = 60;
			status = lpfc_scsi_tgt_reset(lpfc_cmd, vport, i,
						     cmnd->device->lun,
						     ndlp->rport->dd_data);
			if (status != TIMEOUT_ERROR)
				lpfc_release_scsi_buf(phba, lpfc_cmd);
		}
		if (!lpfc_cmd || status != SUCCESS) {
1932 1933 1934
			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
					 "0700 Bus Reset on target %d failed\n",
					 i);
1935
			ret = FAILED;
已提交
1936 1937
		}
	}
1938 1939 1940 1941 1942
	/*
	 * All outstanding txcmplq I/Os should have been aborted by
	 * the targets.  Unfortunately, some targets do not abide by
	 * this forcing the driver to double check.
	 */
1943
	cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
1944
	if (cnt)
1945 1946
		lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
				    0, 0, LPFC_CTX_HOST);
1947 1948 1949
	later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
	while (time_after(later, jiffies) && cnt) {
		schedule_timeout_uninterruptible(msecs_to_jiffies(20));
1950
		cnt = lpfc_sli_sum_iocb(vport, 0, 0, LPFC_CTX_HOST);
已提交
1951 1952
	}
	if (cnt) {
1953 1954 1955
		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
				 "0715 Bus Reset I/O flush failure: "
				 "cnt x%x left x%x\n", cnt, i);
1956
		ret = FAILED;
1957
	}
1958 1959
	lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
			 "0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
已提交
1960 1961 1962
	return ret;
}

1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976
/**
 * lpfc_slave_alloc: slave_alloc entry point of Scsi Host Template data
 * structure.
 * @sdev: Pointer to scsi_device.
 *
 * This routine populates the cmds_per_lun count + 2 scsi_bufs into  this host's
 * globally available list of scsi buffers. This routine also makes sure scsi
 * buffer is not allocated more than HBA limit conveyed to midlayer. This list
 * of scsi buffer exists for the lifetime of the driver.
 *
 * Return codes:
 *   non-0 - Error
 *   0 - Success
 **/
已提交
1977 1978 1979
static int
lpfc_slave_alloc(struct scsi_device *sdev)
{
J
James Smart 已提交
1980 1981
	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
	struct lpfc_hba   *phba = vport->phba;
已提交
1982
	struct lpfc_scsi_buf *scsi_buf = NULL;
1983
	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
已提交
1984 1985 1986 1987
	uint32_t total = 0, i;
	uint32_t num_to_alloc = 0;
	unsigned long flags;

1988
	if (!rport || fc_remote_port_chkready(rport))
已提交
1989 1990
		return -ENXIO;

1991
	sdev->hostdata = rport->dd_data;
已提交
1992 1993 1994 1995

	/*
	 * Populate the cmds_per_lun count scsi_bufs into this host's globally
	 * available list of scsi buffers.  Don't allocate more than the
1996 1997 1998
	 * HBA limit conveyed to the midlayer via the host structure.  The
	 * formula accounts for the lun_queue_depth + error handlers + 1
	 * extra.  This list of scsi bufs exists for the lifetime of the driver.
已提交
1999 2000
	 */
	total = phba->total_scsi_bufs;
2001
	num_to_alloc = vport->cfg_lun_queue_depth + 2;
2002 2003 2004

	/* Allow some exchanges to be available always to complete discovery */
	if (total >= phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
2005 2006 2007
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
				 "0704 At limitation of %d preallocated "
				 "command buffers\n", total);
已提交
2008
		return 0;
2009 2010 2011
	/* Allow some exchanges to be available always to complete discovery */
	} else if (total + num_to_alloc >
		phba->cfg_hba_queue_depth - LPFC_DISC_IOCB_BUFF_COUNT ) {
2012 2013 2014 2015 2016 2017
		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
				 "0705 Allocation request of %d "
				 "command buffers will exceed max of %d.  "
				 "Reducing allocation request to %d.\n",
				 num_to_alloc, phba->cfg_hba_queue_depth,
				 (phba->cfg_hba_queue_depth - total));
已提交
2018 2019 2020 2021
		num_to_alloc = phba->cfg_hba_queue_depth - total;
	}

	for (i = 0; i < num_to_alloc; i++) {
J
James Smart 已提交
2022
		scsi_buf = lpfc_new_scsi_buf(vport);
已提交
2023
		if (!scsi_buf) {
2024 2025 2026
			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
					 "0706 Failed to allocate "
					 "command buffer\n");
已提交
2027 2028 2029
			break;
		}

2030
		spin_lock_irqsave(&phba->scsi_buf_list_lock, flags);
已提交
2031 2032
		phba->total_scsi_bufs++;
		list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list);
2033
		spin_unlock_irqrestore(&phba->scsi_buf_list_lock, flags);
已提交
2034 2035 2036 2037
	}
	return 0;
}

2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050
/**
 * lpfc_slave_configure: slave_configure entry point of Scsi Host Templater data
 *  structure.
 * @sdev: Pointer to scsi_device.
 *
 * This routine configures following items
 *   - Tag command queuing support for @sdev if supported.
 *   - Dev loss time out value of fc_rport.
 *   - Enable SLI polling for fcp ring if ENABLE_FCP_RING_POLLING flag is set.
 *
 * Return codes:
 *   0 - Success
 **/
已提交
2051 2052 2053
static int
lpfc_slave_configure(struct scsi_device *sdev)
{
J
James Smart 已提交
2054 2055 2056
	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
	struct lpfc_hba   *phba = vport->phba;
	struct fc_rport   *rport = starget_to_rport(sdev->sdev_target);
已提交
2057 2058

	if (sdev->tagged_supported)
2059
		scsi_activate_tcq(sdev, vport->cfg_lun_queue_depth);
已提交
2060
	else
2061
		scsi_deactivate_tcq(sdev, vport->cfg_lun_queue_depth);
已提交
2062 2063 2064 2065 2066 2067 2068

	/*
	 * Initialize the fc transport attributes for the target
	 * containing this scsi device.  Also note that the driver's
	 * target pointer is stored in the starget_data for the
	 * driver's sysfs entry point functions.
	 */
2069
	rport->dev_loss_tmo = vport->cfg_devloss_tmo;
已提交
2070

2071 2072 2073 2074 2075 2076
	if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
		lpfc_sli_poll_fcp_ring(phba);
		if (phba->cfg_poll & DISABLE_FCP_RING_INT)
			lpfc_poll_rearm_timer(phba);
	}

已提交
2077 2078 2079
	return 0;
}

2080 2081 2082 2083 2084 2085
/**
 * lpfc_slave_destroy: slave_destroy entry point of SHT data structure.
 * @sdev: Pointer to scsi_device.
 *
 * This routine sets @sdev hostatdata filed to null.
 **/
已提交
2086 2087 2088 2089 2090 2091 2092
static void
lpfc_slave_destroy(struct scsi_device *sdev)
{
	sdev->hostdata = NULL;
	return;
}

2093

已提交
2094 2095 2096 2097 2098 2099
struct scsi_host_template lpfc_template = {
	.module			= THIS_MODULE,
	.name			= LPFC_DRIVER_NAME,
	.info			= lpfc_info,
	.queuecommand		= lpfc_queuecommand,
	.eh_abort_handler	= lpfc_abort_handler,
2100 2101
	.eh_device_reset_handler= lpfc_device_reset_handler,
	.eh_bus_reset_handler	= lpfc_bus_reset_handler,
已提交
2102 2103 2104
	.slave_alloc		= lpfc_slave_alloc,
	.slave_configure	= lpfc_slave_configure,
	.slave_destroy		= lpfc_slave_destroy,
2105
	.scan_finished		= lpfc_scan_finished,
已提交
2106
	.this_id		= -1,
2107
	.sg_tablesize		= LPFC_DEFAULT_SG_SEG_CNT,
已提交
2108 2109
	.cmd_per_lun		= LPFC_CMD_PER_LUN,
	.use_clustering		= ENABLE_CLUSTERING,
J
James Smart 已提交
2110
	.shost_attrs		= lpfc_hba_attrs,
2111
	.max_sectors		= 0xFFFF,
已提交
2112
};
2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126

struct scsi_host_template lpfc_vport_template = {
	.module			= THIS_MODULE,
	.name			= LPFC_DRIVER_NAME,
	.info			= lpfc_info,
	.queuecommand		= lpfc_queuecommand,
	.eh_abort_handler	= lpfc_abort_handler,
	.eh_device_reset_handler= lpfc_device_reset_handler,
	.eh_bus_reset_handler	= lpfc_bus_reset_handler,
	.slave_alloc		= lpfc_slave_alloc,
	.slave_configure	= lpfc_slave_configure,
	.slave_destroy		= lpfc_slave_destroy,
	.scan_finished		= lpfc_scan_finished,
	.this_id		= -1,
2127
	.sg_tablesize		= LPFC_DEFAULT_SG_SEG_CNT,
2128 2129 2130 2131 2132
	.cmd_per_lun		= LPFC_CMD_PER_LUN,
	.use_clustering		= ENABLE_CLUSTERING,
	.shost_attrs		= lpfc_vport_attrs,
	.max_sectors		= 0xFFFF,
};