fc_fcp.c 58.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
/*
 * Copyright(c) 2007 Intel Corporation. All rights reserved.
 * Copyright(c) 2008 Red Hat, Inc.  All rights reserved.
 * Copyright(c) 2008 Mike Christie
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program; if not, write to the Free Software Foundation, Inc.,
 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Maintained at www.Open-FCoE.org
 */

#include <linux/module.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/scatterlist.h>
#include <linux/err.h>
#include <linux/crc32.h>
30
#include <linux/slab.h>
31 32 33 34 35 36 37 38 39 40 41 42

#include <scsi/scsi_tcq.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>

#include <scsi/fc/fc_fc2.h>

#include <scsi/libfc.h>
#include <scsi/fc_encode.h>

43
#include "fc_libfc.h"
44

45
struct kmem_cache *scsi_pkt_cachep;
46 47 48 49 50 51

/* SRB state definitions */
#define FC_SRB_FREE		0		/* cmd is free */
#define FC_SRB_CMD_SENT		(1 << 0)	/* cmd has been sent */
#define FC_SRB_RCV_STATUS	(1 << 1)	/* response has arrived */
#define FC_SRB_ABORT_PENDING	(1 << 2)	/* cmd abort sent to device */
D
Daniel Mack 已提交
52
#define FC_SRB_ABORTED		(1 << 3)	/* abort acknowledged */
53 54 55 56 57 58 59 60
#define FC_SRB_DISCONTIG	(1 << 4)	/* non-sequential data recvd */
#define FC_SRB_COMPL		(1 << 5)	/* fc_io_compl has been run */
#define FC_SRB_FCP_PROCESSING_TMO (1 << 6)	/* timer function processing */

#define FC_SRB_READ		(1 << 1)
#define FC_SRB_WRITE		(1 << 0)

/*
61
 * The SCp.ptr should be tested and set under the scsi_pkt_queue lock
62 63 64 65 66 67 68
 */
#define CMD_SP(Cmnd)		    ((struct fc_fcp_pkt *)(Cmnd)->SCp.ptr)
#define CMD_ENTRY_STATUS(Cmnd)	    ((Cmnd)->SCp.have_data_in)
#define CMD_COMPL_STATUS(Cmnd)	    ((Cmnd)->SCp.this_residual)
#define CMD_SCSI_STATUS(Cmnd)	    ((Cmnd)->SCp.Status)
#define CMD_RESID_LEN(Cmnd)	    ((Cmnd)->SCp.buffers_residual)

69 70
/**
 * struct fc_fcp_internal - FCP layer internal data
71 72
 * @scsi_pkt_pool: Memory pool to draw FCP packets from
 * @scsi_queue_lock: Protects the scsi_pkt_queue
73
 * @scsi_pkt_queue: Current FCP packets
V
Vasu Dev 已提交
74 75 76
 * @last_can_queue_ramp_down_time: ramp down time
 * @last_can_queue_ramp_up_time: ramp up time
 * @max_can_queue: max can_queue size
77
 */
78
struct fc_fcp_internal {
79 80 81 82 83 84
	mempool_t		*scsi_pkt_pool;
	spinlock_t		scsi_queue_lock;
	struct list_head	scsi_pkt_queue;
	unsigned long		last_can_queue_ramp_down_time;
	unsigned long		last_can_queue_ramp_up_time;
	int			max_can_queue;
85 86 87 88 89 90 91 92 93 94 95 96 97
};

#define fc_get_scsi_internal(x)	((struct fc_fcp_internal *)(x)->scsi_priv)

/*
 * function prototypes
 * FC scsi I/O related functions
 */
static void fc_fcp_recv_data(struct fc_fcp_pkt *, struct fc_frame *);
static void fc_fcp_recv(struct fc_seq *, struct fc_frame *, void *);
static void fc_fcp_resp(struct fc_fcp_pkt *, struct fc_frame *);
static void fc_fcp_complete_locked(struct fc_fcp_pkt *);
static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *);
98
static void fc_fcp_error(struct fc_fcp_pkt *, struct fc_frame *);
99
static void fc_fcp_recovery(struct fc_fcp_pkt *);
100
static void fc_fcp_timeout(unsigned long);
101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
static void fc_fcp_rec(struct fc_fcp_pkt *);
static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *);
static void fc_fcp_rec_resp(struct fc_seq *, struct fc_frame *, void *);
static void fc_io_compl(struct fc_fcp_pkt *);

static void fc_fcp_srr(struct fc_fcp_pkt *, enum fc_rctl, u32);
static void fc_fcp_srr_resp(struct fc_seq *, struct fc_frame *, void *);
static void fc_fcp_srr_error(struct fc_fcp_pkt *, struct fc_frame *);

/*
 * command status codes
 */
#define FC_COMPLETE		0
#define FC_CMD_ABORTED		1
#define FC_CMD_RESET		2
#define FC_CMD_PLOGO		3
#define FC_SNS_RCV		4
#define FC_TRANS_ERR		5
#define FC_DATA_OVRRUN		6
#define FC_DATA_UNDRUN		7
#define FC_ERROR		8
#define FC_HRD_ERROR		9
123
#define FC_CMD_RECOVERY		10
124 125 126 127 128 129 130 131

/*
 * Error recovery timeout values.
 */
#define FC_SCSI_ER_TIMEOUT	(10 * HZ)
#define FC_SCSI_TM_TOV		(10 * HZ)
#define FC_SCSI_REC_TOV		(2 * HZ)
#define FC_HOST_RESET_TIMEOUT	(30 * HZ)
V
Vasu Dev 已提交
132
#define FC_CAN_QUEUE_PERIOD	(60 * HZ)
133 134 135 136 137 138 139

#define FC_MAX_ERROR_CNT	5
#define FC_MAX_RECOV_RETRY	3

#define FC_FCP_DFLT_QUEUE_DEPTH 32

/**
140 141 142
 * fc_fcp_pkt_alloc() - Allocate a fcp_pkt
 * @lport: The local port that the FCP packet is for
 * @gfp:   GFP flags for allocation
143
 *
144 145
 * Return value: fcp_pkt structure or null on allocation failure.
 * Context:	 Can be called from process context, no lock is required.
146
 */
147
static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp)
148
{
149
	struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
150 151 152 153 154
	struct fc_fcp_pkt *fsp;

	fsp = mempool_alloc(si->scsi_pkt_pool, gfp);
	if (fsp) {
		memset(fsp, 0, sizeof(*fsp));
155
		fsp->lp = lport;
156 157 158 159 160 161 162 163 164
		atomic_set(&fsp->ref_cnt, 1);
		init_timer(&fsp->timer);
		INIT_LIST_HEAD(&fsp->list);
		spin_lock_init(&fsp->scsi_pkt_lock);
	}
	return fsp;
}

/**
165 166
 * fc_fcp_pkt_release() - Release hold on a fcp_pkt
 * @fsp: The FCP packet to be released
167
 *
168 169
 * Context: Can be called from process or interrupt context,
 *	    no lock is required.
170 171 172 173 174 175 176 177 178 179
 */
static void fc_fcp_pkt_release(struct fc_fcp_pkt *fsp)
{
	if (atomic_dec_and_test(&fsp->ref_cnt)) {
		struct fc_fcp_internal *si = fc_get_scsi_internal(fsp->lp);

		mempool_free(fsp, si->scsi_pkt_pool);
	}
}

180 181 182 183
/**
 * fc_fcp_pkt_hold() - Hold a fcp_pkt
 * @fsp: The FCP packet to be held
 */
184 185 186 187 188 189
static void fc_fcp_pkt_hold(struct fc_fcp_pkt *fsp)
{
	atomic_inc(&fsp->ref_cnt);
}

/**
190 191 192 193 194 195 196
 * fc_fcp_pkt_destory() - Release hold on a fcp_pkt
 * @seq: The sequence that the FCP packet is on (required by destructor API)
 * @fsp: The FCP packet to be released
 *
 * This routine is called by a destructor callback in the exch_seq_send()
 * routine of the libfc Transport Template. The 'struct fc_seq' is a required
 * argument even though it is not used by this routine.
197
 *
198
 * Context: No locking required.
199 200 201 202 203 204 205
 */
static void fc_fcp_pkt_destroy(struct fc_seq *seq, void *fsp)
{
	fc_fcp_pkt_release(fsp);
}

/**
206 207
 * fc_fcp_lock_pkt() - Lock a fcp_pkt and increase its reference count
 * @fsp: The FCP packet to be locked and incremented
208
 *
209
 * We should only return error if we return a command to SCSI-ml before
210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233
 * getting a response. This can happen in cases where we send a abort, but
 * do not wait for the response and the abort and command can be passing
 * each other on the wire/network-layer.
 *
 * Note: this function locks the packet and gets a reference to allow
 * callers to call the completion function while the lock is held and
 * not have to worry about the packets refcount.
 *
 * TODO: Maybe we should just have callers grab/release the lock and
 * have a function that they call to verify the fsp and grab a ref if
 * needed.
 */
static inline int fc_fcp_lock_pkt(struct fc_fcp_pkt *fsp)
{
	spin_lock_bh(&fsp->scsi_pkt_lock);
	if (fsp->state & FC_SRB_COMPL) {
		spin_unlock_bh(&fsp->scsi_pkt_lock);
		return -EPERM;
	}

	fc_fcp_pkt_hold(fsp);
	return 0;
}

234 235 236 237 238
/**
 * fc_fcp_unlock_pkt() - Release a fcp_pkt's lock and decrement its
 *			 reference count
 * @fsp: The FCP packet to be unlocked and decremented
 */
239 240 241 242 243 244
static inline void fc_fcp_unlock_pkt(struct fc_fcp_pkt *fsp)
{
	spin_unlock_bh(&fsp->scsi_pkt_lock);
	fc_fcp_pkt_release(fsp);
}

245 246 247 248 249
/**
 * fc_fcp_timer_set() - Start a timer for a fcp_pkt
 * @fsp:   The FCP packet to start a timer for
 * @delay: The timeout period for the timer
 */
250 251 252 253 254 255
static void fc_fcp_timer_set(struct fc_fcp_pkt *fsp, unsigned long delay)
{
	if (!(fsp->state & FC_SRB_COMPL))
		mod_timer(&fsp->timer, jiffies + delay);
}

256 257 258 259 260
/**
 * fc_fcp_send_abort() - Send an abort for exchanges associated with a
 *			 fcp_pkt
 * @fsp: The FCP packet to abort exchanges on
 */
261 262 263 264 265 266 267 268 269
static int fc_fcp_send_abort(struct fc_fcp_pkt *fsp)
{
	if (!fsp->seq_ptr)
		return -EINVAL;

	fsp->state |= FC_SRB_ABORT_PENDING;
	return fsp->lp->tt.seq_exch_abort(fsp->seq_ptr, 0);
}

270 271 272 273 274 275 276 277
/**
 * fc_fcp_retry_cmd() - Retry a fcp_pkt
 * @fsp: The FCP packet to be retried
 *
 * Sets the status code to be FC_ERROR and then calls
 * fc_fcp_complete_locked() which in turn calls fc_io_compl().
 * fc_io_compl() will notify the SCSI-ml that the I/O is done.
 * The SCSI-ml will retry the command.
278 279 280 281 282 283 284 285 286
 */
static void fc_fcp_retry_cmd(struct fc_fcp_pkt *fsp)
{
	if (fsp->seq_ptr) {
		fsp->lp->tt.exch_done(fsp->seq_ptr);
		fsp->seq_ptr = NULL;
	}

	fsp->state &= ~FC_SRB_ABORT_PENDING;
M
Martin K. Petersen 已提交
287
	fsp->io_status = 0;
288 289 290 291
	fsp->status_code = FC_ERROR;
	fc_fcp_complete_locked(fsp);
}

292 293 294 295
/**
 * fc_fcp_ddp_setup() - Calls a LLD's ddp_setup routine to set up DDP context
 * @fsp: The FCP packet that will manage the DDP frames
 * @xid: The XID that will be used for the DDP exchange
296 297 298
 */
void fc_fcp_ddp_setup(struct fc_fcp_pkt *fsp, u16 xid)
{
299
	struct fc_lport *lport;
300

301
	lport = fsp->lp;
302
	if ((fsp->req_flags & FC_SRB_READ) &&
303 304 305
	    (lport->lro_enabled) && (lport->tt.ddp_setup)) {
		if (lport->tt.ddp_setup(lport, xid, scsi_sglist(fsp->cmd),
					scsi_sg_count(fsp->cmd)))
306 307 308 309
			fsp->xfer_ddp = xid;
	}
}

310 311 312 313
/**
 * fc_fcp_ddp_done() - Calls a LLD's ddp_done routine to release any
 *		       DDP related resources for a fcp_pkt
 * @fsp: The FCP packet that DDP had been used on
314 315 316
 */
static void fc_fcp_ddp_done(struct fc_fcp_pkt *fsp)
{
317
	struct fc_lport *lport;
318 319 320 321

	if (!fsp)
		return;

Y
Yi Zou 已提交
322 323 324
	if (fsp->xfer_ddp == FC_XID_UNKNOWN)
		return;

325 326 327
	lport = fsp->lp;
	if (lport->tt.ddp_done) {
		fsp->xfer_len = lport->tt.ddp_done(lport, fsp->xfer_ddp);
Y
Yi Zou 已提交
328
		fsp->xfer_ddp = FC_XID_UNKNOWN;
329 330 331
	}
}

V
Vasu Dev 已提交
332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363
/**
 * fc_fcp_can_queue_ramp_up() - increases can_queue
 * @lport: lport to ramp up can_queue
 *
 * Locking notes: Called with Scsi_Host lock held
 */
static void fc_fcp_can_queue_ramp_up(struct fc_lport *lport)
{
	struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
	int can_queue;

	if (si->last_can_queue_ramp_up_time &&
	    (time_before(jiffies, si->last_can_queue_ramp_up_time +
			 FC_CAN_QUEUE_PERIOD)))
		return;

	if (time_before(jiffies, si->last_can_queue_ramp_down_time +
			FC_CAN_QUEUE_PERIOD))
		return;

	si->last_can_queue_ramp_up_time = jiffies;

	can_queue = lport->host->can_queue << 1;
	if (can_queue >= si->max_can_queue) {
		can_queue = si->max_can_queue;
		si->last_can_queue_ramp_down_time = 0;
	}
	lport->host->can_queue = can_queue;
	shost_printk(KERN_ERR, lport->host, "libfc: increased "
		     "can_queue to %d.\n", can_queue);
}

364 365 366 367 368 369 370 371 372
/**
 * fc_fcp_can_queue_ramp_down() - reduces can_queue
 * @lport: lport to reduce can_queue
 *
 * If we are getting memory allocation failures, then we may
 * be trying to execute too many commands. We let the running
 * commands complete or timeout, then try again with a reduced
 * can_queue. Eventually we will hit the point where we run
 * on all reserved structs.
V
Vasu Dev 已提交
373 374
 *
 * Locking notes: Called with Scsi_Host lock held
375 376 377 378 379 380
 */
static void fc_fcp_can_queue_ramp_down(struct fc_lport *lport)
{
	struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
	int can_queue;

V
Vasu Dev 已提交
381 382 383 384 385 386
	if (si->last_can_queue_ramp_down_time &&
	    (time_before(jiffies, si->last_can_queue_ramp_down_time +
			 FC_CAN_QUEUE_PERIOD)))
		return;

	si->last_can_queue_ramp_down_time = jiffies;
387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408

	can_queue = lport->host->can_queue;
	can_queue >>= 1;
	if (!can_queue)
		can_queue = 1;
	lport->host->can_queue = can_queue;
	shost_printk(KERN_ERR, lport->host, "libfc: Could not allocate frame.\n"
		     "Reducing can_queue to %d.\n", can_queue);
}

/*
 * fc_fcp_frame_alloc() -  Allocates fc_frame structure and buffer.
 * @lport:	fc lport struct
 * @len:	payload length
 *
 * Allocates fc_frame structure and buffer but if fails to allocate
 * then reduce can_queue.
 */
static inline struct fc_frame *fc_fcp_frame_alloc(struct fc_lport *lport,
						  size_t len)
{
	struct fc_frame *fp;
V
Vasu Dev 已提交
409
	unsigned long flags;
410 411

	fp = fc_frame_alloc(lport, len);
412 413 414 415 416 417 418 419
	if (likely(fp))
		return fp;

	/* error case */
	spin_lock_irqsave(lport->host->host_lock, flags);
	fc_fcp_can_queue_ramp_down(lport);
	spin_unlock_irqrestore(lport->host->host_lock, flags);
	return NULL;
420 421
}

422 423 424 425
/**
 * fc_fcp_recv_data() - Handler for receiving SCSI-FCP data from a target
 * @fsp: The FCP packet the data is on
 * @fp:	 The data frame
426 427 428 429
 */
static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
{
	struct scsi_cmnd *sc = fsp->cmd;
430
	struct fc_lport *lport = fsp->lp;
431 432 433 434 435 436 437 438 439
	struct fcoe_dev_stats *stats;
	struct fc_frame_header *fh;
	size_t start_offset;
	size_t offset;
	u32 crc;
	u32 copy_len = 0;
	size_t len;
	void *buf;
	struct scatterlist *sg;
440
	u32 nents;
441 442 443 444 445 446 447

	fh = fc_frame_header_get(fp);
	offset = ntohl(fh->fh_parm_offset);
	start_offset = offset;
	len = fr_len(fp) - sizeof(*fh);
	buf = fc_frame_payload_get(fp, 0);

448 449 450 451 452 453 454 455 456 457
	/*
	 * if this I/O is ddped then clear it
	 * and initiate recovery since data
	 * frames are expected to be placed
	 * directly in that case.
	 */
	if (fsp->xfer_ddp != FC_XID_UNKNOWN) {
		fc_fcp_ddp_done(fsp);
		goto err;
	}
458
	if (offset + len > fsp->data_len) {
459
		/* this should never happen */
460 461 462
		if ((fr_flags(fp) & FCPHF_CRC_UNCHECKED) &&
		    fc_frame_crc_check(fp))
			goto crc_err;
463 464
		FC_FCP_DBG(fsp, "data received past end. len %zx offset %zx "
			   "data_len %x\n", len, offset, fsp->data_len);
465
		goto err;
466 467 468 469 470
	}
	if (offset != fsp->xfer_len)
		fsp->state |= FC_SRB_DISCONTIG;

	sg = scsi_sglist(sc);
471
	nents = scsi_sg_count(sc);
472

473 474 475 476 477 478 479
	if (!(fr_flags(fp) & FCPHF_CRC_UNCHECKED)) {
		copy_len = fc_copy_buffer_to_sglist(buf, len, sg, &nents,
						    &offset, KM_SOFTIRQ0, NULL);
	} else {
		crc = crc32(~0, (u8 *) fh, sizeof(*fh));
		copy_len = fc_copy_buffer_to_sglist(buf, len, sg, &nents,
						    &offset, KM_SOFTIRQ0, &crc);
480
		buf = fc_frame_payload_get(fp, 0);
481
		if (len % 4)
482 483 484 485
			crc = crc32(crc, buf + len, 4 - (len % 4));

		if (~crc != le32_to_cpu(fr_crc(fp))) {
crc_err:
486
			stats = per_cpu_ptr(lport->dev_stats, get_cpu());
487
			stats->ErrorFrames++;
488
			/* per cpu count, not total count, but OK for limit */
489
			if (stats->InvalidCRCCount++ < 5)
490
				printk(KERN_WARNING "libfc: CRC error on data "
491
				       "frame for port (%6.6x)\n",
492
				       lport->port_id);
493
			put_cpu();
494 495 496 497 498 499 500 501
			/*
			 * Assume the frame is total garbage.
			 * We may have copied it over the good part
			 * of the buffer.
			 * If so, we need to retry the entire operation.
			 * Otherwise, ignore it.
			 */
			if (fsp->state & FC_SRB_DISCONTIG)
502
				goto err;
503 504 505 506 507 508 509 510 511 512 513 514 515 516 517
			return;
		}
	}

	if (fsp->xfer_contig_end == start_offset)
		fsp->xfer_contig_end += copy_len;
	fsp->xfer_len += copy_len;

	/*
	 * In the very rare event that this data arrived after the response
	 * and completes the transfer, call the completion handler.
	 */
	if (unlikely(fsp->state & FC_SRB_RCV_STATUS) &&
	    fsp->xfer_len == fsp->data_len - fsp->scsi_resid)
		fc_fcp_complete_locked(fsp);
518 519 520
	return;
err:
	fc_fcp_recovery(fsp);
521 522
}

523
/**
524 525 526 527 528
 * fc_fcp_send_data() - Send SCSI data to a target
 * @fsp:      The FCP packet the data is on
 * @sp:	      The sequence the data is to be sent on
 * @offset:   The starting offset for this data request
 * @seq_blen: The burst length for this data request
529 530
 *
 * Called after receiving a Transfer Ready data descriptor.
531
 * If the LLD is capable of sequence offload then send down the
532
 * seq_blen amount of data in single frame, otherwise send
533 534
 * multiple frames of the maximum frame payload supported by
 * the target port.
535 536 537 538 539 540 541 542
 */
static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
			    size_t offset, size_t seq_blen)
{
	struct fc_exch *ep;
	struct scsi_cmnd *sc;
	struct scatterlist *sg;
	struct fc_frame *fp = NULL;
543
	struct fc_lport *lport = fsp->lp;
544
	struct page *page;
545 546 547 548 549
	size_t remaining;
	size_t t_blen;
	size_t tlen;
	size_t sg_bytes;
	size_t frame_offset, fh_parm_offset;
550
	size_t off;
551 552 553
	int error;
	void *data = NULL;
	void *page_addr;
554
	int using_sg = lport->sg_supp;
555 556 557 558 559
	u32 f_ctl;

	WARN_ON(seq_blen <= 0);
	if (unlikely(offset + seq_blen > fsp->data_len)) {
		/* this should never happen */
560 561
		FC_FCP_DBG(fsp, "xfer-ready past end. seq_blen %zx "
			   "offset %zx\n", seq_blen, offset);
562 563 564 565
		fc_fcp_send_abort(fsp);
		return 0;
	} else if (offset != fsp->xfer_len) {
		/* Out of Order Data Request - no problem, but unexpected. */
566 567
		FC_FCP_DBG(fsp, "xfer-ready non-contiguous. "
			   "seq_blen %zx offset %zx\n", seq_blen, offset);
568 569 570 571 572 573 574
	}

	/*
	 * if LLD is capable of seq_offload then set transport
	 * burst length (t_blen) to seq_blen, otherwise set t_blen
	 * to max FC frame payload previously set in fsp->max_payload.
	 */
575
	t_blen = fsp->max_payload;
576 577
	if (lport->seq_offload) {
		t_blen = min(seq_blen, (size_t)lport->lso_max);
578
		FC_FCP_DBG(fsp, "fsp=%p:lso:blen=%zx lso_max=0x%x t_blen=%zx\n",
579
			   fsp, seq_blen, lport->lso_max, t_blen);
580 581
	}

582 583 584 585 586 587 588
	if (t_blen > 512)
		t_blen &= ~(512 - 1);	/* round down to block size */
	sc = fsp->cmd;

	remaining = seq_blen;
	fh_parm_offset = frame_offset = offset;
	tlen = 0;
589
	seq = lport->tt.seq_start_next(seq);
590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610
	f_ctl = FC_FC_REL_OFF;
	WARN_ON(!seq);

	sg = scsi_sglist(sc);

	while (remaining > 0 && sg) {
		if (offset >= sg->length) {
			offset -= sg->length;
			sg = sg_next(sg);
			continue;
		}
		if (!fp) {
			tlen = min(t_blen, remaining);

			/*
			 * TODO.  Temporary workaround.	 fc_seq_send() can't
			 * handle odd lengths in non-linear skbs.
			 * This will be the final fragment only.
			 */
			if (tlen % 4)
				using_sg = 0;
611 612 613 614 615
			fp = fc_frame_alloc(lport, using_sg ? 0 : tlen);
			if (!fp)
				return -ENOMEM;

			data = fc_frame_header_get(fp) + 1;
616 617 618
			fh_parm_offset = frame_offset;
			fr_max_payload(fp) = fsp->max_payload;
		}
619 620

		off = offset + sg->offset;
621
		sg_bytes = min(tlen, sg->length - offset);
622 623 624
		sg_bytes = min(sg_bytes,
			       (size_t) (PAGE_SIZE - (off & ~PAGE_MASK)));
		page = sg_page(sg) + (off >> PAGE_SHIFT);
625
		if (using_sg) {
626
			get_page(page);
627 628
			skb_fill_page_desc(fp_skb(fp),
					   skb_shinfo(fp_skb(fp))->nr_frags,
629
					   page, off & ~PAGE_MASK, sg_bytes);
630 631 632 633 634 635 636 637
			fp_skb(fp)->data_len += sg_bytes;
			fr_len(fp) += sg_bytes;
			fp_skb(fp)->truesize += PAGE_SIZE;
		} else {
			/*
			 * The scatterlist item may be bigger than PAGE_SIZE,
			 * but we must not cross pages inside the kmap.
			 */
638
			page_addr = kmap_atomic(page, KM_SOFTIRQ0);
639 640 641 642 643 644 645 646 647 648
			memcpy(data, (char *)page_addr + (off & ~PAGE_MASK),
			       sg_bytes);
			kunmap_atomic(page_addr, KM_SOFTIRQ0);
			data += sg_bytes;
		}
		offset += sg_bytes;
		frame_offset += sg_bytes;
		tlen -= sg_bytes;
		remaining -= sg_bytes;

649 650
		if ((skb_shinfo(fp_skb(fp))->nr_frags < FC_FRAME_SG_LEN) &&
		    (tlen))
651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666
			continue;

		/*
		 * Send sequence with transfer sequence initiative in case
		 * this is last FCP frame of the sequence.
		 */
		if (remaining == 0)
			f_ctl |= FC_FC_SEQ_INIT | FC_FC_END_SEQ;

		ep = fc_seq_exch(seq);
		fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
			       FC_TYPE_FCP, f_ctl, fh_parm_offset);

		/*
		 * send fragment using for a sequence.
		 */
667
		error = lport->tt.seq_send(lport, seq, fp);
668 669 670 671 672 673 674 675 676 677 678
		if (error) {
			WARN_ON(1);		/* send error should be rare */
			fc_fcp_retry_cmd(fsp);
			return 0;
		}
		fp = NULL;
	}
	fsp->xfer_len += seq_blen;	/* premature count? */
	return 0;
}

679 680 681 682 683
/**
 * fc_fcp_abts_resp() - Send an ABTS response
 * @fsp: The FCP packet that is being aborted
 * @fp:	 The response frame
 */
684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718
static void fc_fcp_abts_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
{
	int ba_done = 1;
	struct fc_ba_rjt *brp;
	struct fc_frame_header *fh;

	fh = fc_frame_header_get(fp);
	switch (fh->fh_r_ctl) {
	case FC_RCTL_BA_ACC:
		break;
	case FC_RCTL_BA_RJT:
		brp = fc_frame_payload_get(fp, sizeof(*brp));
		if (brp && brp->br_reason == FC_BA_RJT_LOG_ERR)
			break;
		/* fall thru */
	default:
		/*
		 * we will let the command timeout
		 * and scsi-ml recover in this case,
		 * therefore cleared the ba_done flag.
		 */
		ba_done = 0;
	}

	if (ba_done) {
		fsp->state |= FC_SRB_ABORTED;
		fsp->state &= ~FC_SRB_ABORT_PENDING;

		if (fsp->wait_for_comp)
			complete(&fsp->tm_done);
		else
			fc_fcp_complete_locked(fsp);
	}
}

719
/**
720
 * fc_fcp_recv() - Reveive an FCP frame
721
 * @seq: The sequence the frame is on
722
 * @fp:	 The received frame
723
 * @arg: The related FCP packet
724
 *
725 726
 * Context: Called from Soft IRQ context. Can not be called
 *	    holding the FCP packet list lock.
727 728 729 730
 */
static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg)
{
	struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg;
731
	struct fc_lport *lport = fsp->lp;
732 733 734 735 736
	struct fc_frame_header *fh;
	struct fcp_txrdy *dd;
	u8 r_ctl;
	int rc = 0;

737 738 739 740
	if (IS_ERR(fp)) {
		fc_fcp_error(fsp, fp);
		return;
	}
741 742 743 744

	fh = fc_frame_header_get(fp);
	r_ctl = fh->fh_r_ctl;

745
	if (lport->state != LPORT_ST_READY)
746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785
		goto out;
	if (fc_fcp_lock_pkt(fsp))
		goto out;
	fsp->last_pkt_time = jiffies;

	if (fh->fh_type == FC_TYPE_BLS) {
		fc_fcp_abts_resp(fsp, fp);
		goto unlock;
	}

	if (fsp->state & (FC_SRB_ABORTED | FC_SRB_ABORT_PENDING))
		goto unlock;

	if (r_ctl == FC_RCTL_DD_DATA_DESC) {
		/*
		 * received XFER RDY from the target
		 * need to send data to the target
		 */
		WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED);
		dd = fc_frame_payload_get(fp, sizeof(*dd));
		WARN_ON(!dd);

		rc = fc_fcp_send_data(fsp, seq,
				      (size_t) ntohl(dd->ft_data_ro),
				      (size_t) ntohl(dd->ft_burst_len));
		if (!rc)
			seq->rec_data = fsp->xfer_len;
	} else if (r_ctl == FC_RCTL_DD_SOL_DATA) {
		/*
		 * received a DATA frame
		 * next we will copy the data to the system buffer
		 */
		WARN_ON(fr_len(fp) < sizeof(*fh));	/* len may be 0 */
		fc_fcp_recv_data(fsp, fp);
		seq->rec_data = fsp->xfer_contig_end;
	} else if (r_ctl == FC_RCTL_DD_CMD_STATUS) {
		WARN_ON(fr_flags(fp) & FCPHF_CRC_UNCHECKED);

		fc_fcp_resp(fsp, fp);
	} else {
786
		FC_FCP_DBG(fsp, "unexpected frame.  r_ctl %x\n", r_ctl);
787 788 789 790 791 792 793
	}
unlock:
	fc_fcp_unlock_pkt(fsp);
out:
	fc_frame_free(fp);
}

794 795 796 797 798
/**
 * fc_fcp_resp() - Handler for FCP responses
 * @fsp: The FCP packet the response is for
 * @fp:	 The response frame
 */
799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821
static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
{
	struct fc_frame_header *fh;
	struct fcp_resp *fc_rp;
	struct fcp_resp_ext *rp_ex;
	struct fcp_resp_rsp_info *fc_rp_info;
	u32 plen;
	u32 expected_len;
	u32 respl = 0;
	u32 snsl = 0;
	u8 flags = 0;

	plen = fr_len(fp);
	fh = (struct fc_frame_header *)fr_hdr(fp);
	if (unlikely(plen < sizeof(*fh) + sizeof(*fc_rp)))
		goto len_err;
	plen -= sizeof(*fh);
	fc_rp = (struct fcp_resp *)(fh + 1);
	fsp->cdb_status = fc_rp->fr_status;
	flags = fc_rp->fr_flags;
	fsp->scsi_comp_flags = flags;
	expected_len = fsp->data_len;

822 823 824
	/* if ddp, update xfer len */
	fc_fcp_ddp_done(fsp);

825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843
	if (unlikely((flags & ~FCP_CONF_REQ) || fc_rp->fr_status)) {
		rp_ex = (void *)(fc_rp + 1);
		if (flags & (FCP_RSP_LEN_VAL | FCP_SNS_LEN_VAL)) {
			if (plen < sizeof(*fc_rp) + sizeof(*rp_ex))
				goto len_err;
			fc_rp_info = (struct fcp_resp_rsp_info *)(rp_ex + 1);
			if (flags & FCP_RSP_LEN_VAL) {
				respl = ntohl(rp_ex->fr_rsp_len);
				if (respl != sizeof(*fc_rp_info))
					goto len_err;
				if (fsp->wait_for_comp) {
					/* Abuse cdb_status for rsp code */
					fsp->cdb_status = fc_rp_info->rsp_code;
					complete(&fsp->tm_done);
					/*
					 * tmfs will not have any scsi cmd so
					 * exit here
					 */
					return;
844
				}
845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893
			}
			if (flags & FCP_SNS_LEN_VAL) {
				snsl = ntohl(rp_ex->fr_sns_len);
				if (snsl > SCSI_SENSE_BUFFERSIZE)
					snsl = SCSI_SENSE_BUFFERSIZE;
				memcpy(fsp->cmd->sense_buffer,
				       (char *)fc_rp_info + respl, snsl);
			}
		}
		if (flags & (FCP_RESID_UNDER | FCP_RESID_OVER)) {
			if (plen < sizeof(*fc_rp) + sizeof(rp_ex->fr_resid))
				goto len_err;
			if (flags & FCP_RESID_UNDER) {
				fsp->scsi_resid = ntohl(rp_ex->fr_resid);
				/*
				 * The cmnd->underflow is the minimum number of
				 * bytes that must be transfered for this
				 * command.  Provided a sense condition is not
				 * present, make sure the actual amount
				 * transferred is at least the underflow value
				 * or fail.
				 */
				if (!(flags & FCP_SNS_LEN_VAL) &&
				    (fc_rp->fr_status == 0) &&
				    (scsi_bufflen(fsp->cmd) -
				     fsp->scsi_resid) < fsp->cmd->underflow)
					goto err;
				expected_len -= fsp->scsi_resid;
			} else {
				fsp->status_code = FC_ERROR;
			}
		}
	}
	fsp->state |= FC_SRB_RCV_STATUS;

	/*
	 * Check for missing or extra data frames.
	 */
	if (unlikely(fsp->xfer_len != expected_len)) {
		if (fsp->xfer_len < expected_len) {
			/*
			 * Some data may be queued locally,
			 * Wait a at least one jiffy to see if it is delivered.
			 * If this expires without data, we may do SRR.
			 */
			fc_fcp_timer_set(fsp, 2);
			return;
		}
		fsp->status_code = FC_DATA_OVRRUN;
894
		FC_FCP_DBG(fsp, "tgt %6.6x xfer len %zx greater than expected, "
895 896 897
			   "len %x, data len %x\n",
			   fsp->rport->port_id,
			   fsp->xfer_len, expected_len, fsp->data_len);
898 899 900 901 902
	}
	fc_fcp_complete_locked(fsp);
	return;

len_err:
903 904
	FC_FCP_DBG(fsp, "short FCP response. flags 0x%x len %u respl %u "
		   "snsl %u\n", flags, fr_len(fp), respl, snsl);
905 906 907 908 909 910
err:
	fsp->status_code = FC_ERROR;
	fc_fcp_complete_locked(fsp);
}

/**
911 912 913
 * fc_fcp_complete_locked() - Complete processing of a fcp_pkt with the
 *			      fcp_pkt lock held
 * @fsp: The FCP packet to be completed
914 915 916 917 918 919
 *
 * This function may sleep if a timer is pending. The packet lock must be
 * held, and the host lock must not be held.
 */
static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
{
920
	struct fc_lport *lport = fsp->lp;
921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939
	struct fc_seq *seq;
	struct fc_exch *ep;
	u32 f_ctl;

	if (fsp->state & FC_SRB_ABORT_PENDING)
		return;

	if (fsp->state & FC_SRB_ABORTED) {
		if (!fsp->status_code)
			fsp->status_code = FC_CMD_ABORTED;
	} else {
		/*
		 * Test for transport underrun, independent of response
		 * underrun status.
		 */
		if (fsp->xfer_len < fsp->data_len && !fsp->io_status &&
		    (!(fsp->scsi_comp_flags & FCP_RESID_UNDER) ||
		     fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) {
			fsp->status_code = FC_DATA_UNDRUN;
M
Martin K. Petersen 已提交
940
			fsp->io_status = 0;
941 942 943 944 945 946 947 948 949 950
		}
	}

	seq = fsp->seq_ptr;
	if (seq) {
		fsp->seq_ptr = NULL;
		if (unlikely(fsp->scsi_comp_flags & FCP_CONF_REQ)) {
			struct fc_frame *conf_frame;
			struct fc_seq *csp;

951
			csp = lport->tt.seq_start_next(seq);
952
			conf_frame = fc_fcp_frame_alloc(fsp->lp, 0);
953 954 955 956 957 958 959
			if (conf_frame) {
				f_ctl = FC_FC_SEQ_INIT;
				f_ctl |= FC_FC_LAST_SEQ | FC_FC_END_SEQ;
				ep = fc_seq_exch(seq);
				fc_fill_fc_hdr(conf_frame, FC_RCTL_DD_SOL_CTL,
					       ep->did, ep->sid,
					       FC_TYPE_FCP, f_ctl, 0);
960
				lport->tt.seq_send(lport, csp, conf_frame);
961 962
			}
		}
963
		lport->tt.exch_done(seq);
964 965 966 967
	}
	fc_io_compl(fsp);
}

968 969 970 971 972
/**
 * fc_fcp_cleanup_cmd() - Cancel the active exchange on a fcp_pkt
 * @fsp:   The FCP packet whose exchanges should be canceled
 * @error: The reason for the cancellation
 */
973 974
static void fc_fcp_cleanup_cmd(struct fc_fcp_pkt *fsp, int error)
{
975
	struct fc_lport *lport = fsp->lp;
976 977

	if (fsp->seq_ptr) {
978
		lport->tt.exch_done(fsp->seq_ptr);
979 980 981 982 983 984
		fsp->seq_ptr = NULL;
	}
	fsp->status_code = error;
}

/**
985 986 987 988 989
 * fc_fcp_cleanup_each_cmd() - Cancel all exchanges on a local port
 * @lport: The local port whose exchanges should be canceled
 * @id:	   The target's ID
 * @lun:   The LUN
 * @error: The reason for cancellation
990 991 992
 *
 * If lun or id is -1, they are ignored.
 */
993
static void fc_fcp_cleanup_each_cmd(struct fc_lport *lport, unsigned int id,
994 995
				    unsigned int lun, int error)
{
996
	struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
997 998 999 1000
	struct fc_fcp_pkt *fsp;
	struct scsi_cmnd *sc_cmd;
	unsigned long flags;

1001
	spin_lock_irqsave(&si->scsi_queue_lock, flags);
1002 1003 1004 1005 1006 1007 1008 1009 1010 1011
restart:
	list_for_each_entry(fsp, &si->scsi_pkt_queue, list) {
		sc_cmd = fsp->cmd;
		if (id != -1 && scmd_id(sc_cmd) != id)
			continue;

		if (lun != -1 && sc_cmd->device->lun != lun)
			continue;

		fc_fcp_pkt_hold(fsp);
1012
		spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
1013 1014 1015 1016 1017 1018 1019 1020

		if (!fc_fcp_lock_pkt(fsp)) {
			fc_fcp_cleanup_cmd(fsp, error);
			fc_io_compl(fsp);
			fc_fcp_unlock_pkt(fsp);
		}

		fc_fcp_pkt_release(fsp);
1021
		spin_lock_irqsave(&si->scsi_queue_lock, flags);
1022 1023 1024 1025 1026 1027
		/*
		 * while we dropped the lock multiple pkts could
		 * have been released, so we have to start over.
		 */
		goto restart;
	}
1028
	spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
1029 1030
}

1031 1032 1033 1034 1035
/**
 * fc_fcp_abort_io() - Abort all FCP-SCSI exchanges on a local port
 * @lport: The local port whose exchanges are to be aborted
 */
static void fc_fcp_abort_io(struct fc_lport *lport)
1036
{
1037
	fc_fcp_cleanup_each_cmd(lport, -1, -1, FC_HRD_ERROR);
1038 1039 1040
}

/**
1041 1042 1043
 * fc_fcp_pkt_send() - Send a fcp_pkt
 * @lport: The local port to send the FCP packet on
 * @fsp:   The FCP packet to send
1044
 *
1045
 * Return:  Zero for success and -1 for failure
1046
 * Locks:   Called without locks held
1047
 */
1048
static int fc_fcp_pkt_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp)
1049
{
1050
	struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
1051
	unsigned long flags;
1052 1053 1054 1055 1056 1057 1058 1059 1060 1061
	int rc;

	fsp->cmd->SCp.ptr = (char *)fsp;
	fsp->cdb_cmd.fc_dl = htonl(fsp->data_len);
	fsp->cdb_cmd.fc_flags = fsp->req_flags & ~FCP_CFL_LEN_MASK;

	int_to_scsilun(fsp->cmd->device->lun,
		       (struct scsi_lun *)fsp->cdb_cmd.fc_lun);
	memcpy(fsp->cdb_cmd.fc_cdb, fsp->cmd->cmnd, fsp->cmd->cmd_len);

1062 1063 1064
	spin_lock_irqsave(&si->scsi_queue_lock, flags);
	list_add_tail(&fsp->list, &si->scsi_pkt_queue);
	spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
1065
	rc = lport->tt.fcp_cmd_send(lport, fsp, fc_fcp_recv);
1066 1067
	if (unlikely(rc)) {
		spin_lock_irqsave(&si->scsi_queue_lock, flags);
1068
		list_del(&fsp->list);
1069 1070
		spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
	}
1071 1072 1073 1074

	return rc;
}

1075 1076 1077 1078 1079 1080 1081
/**
 * fc_fcp_cmd_send() - Send a FCP command
 * @lport: The local port to send the command on
 * @fsp:   The FCP packet the command is on
 * @resp:  The handler for the response
 */
static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
1082 1083 1084 1085 1086 1087 1088
			   void (*resp)(struct fc_seq *,
					struct fc_frame *fp,
					void *arg))
{
	struct fc_frame *fp;
	struct fc_seq *seq;
	struct fc_rport *rport;
1089
	struct fc_rport_libfc_priv *rpriv;
1090 1091 1092 1093 1094 1095
	const size_t len = sizeof(fsp->cdb_cmd);
	int rc = 0;

	if (fc_fcp_lock_pkt(fsp))
		return 0;

1096
	fp = fc_fcp_frame_alloc(lport, sizeof(fsp->cdb_cmd));
1097 1098 1099 1100 1101 1102
	if (!fp) {
		rc = -1;
		goto unlock;
	}

	memcpy(fc_frame_payload_get(fp, len), &fsp->cdb_cmd, len);
1103
	fr_fsp(fp) = fsp;
1104 1105
	rport = fsp->rport;
	fsp->max_payload = rport->maxframe_size;
1106
	rpriv = rport->dd_data;
1107 1108

	fc_fill_fc_hdr(fp, FC_RCTL_DD_UNSOL_CMD, rport->port_id,
1109
		       rpriv->local_port->port_id, FC_TYPE_FCP,
1110
		       FC_FCTL_REQ, 0);
1111

1112 1113
	seq = lport->tt.exch_seq_send(lport, fp, resp, fc_fcp_pkt_destroy,
				      fsp, 0);
1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130
	if (!seq) {
		rc = -1;
		goto unlock;
	}
	fsp->last_pkt_time = jiffies;
	fsp->seq_ptr = seq;
	fc_fcp_pkt_hold(fsp);	/* hold for fc_fcp_pkt_destroy */

	setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp);
	fc_fcp_timer_set(fsp,
			 (fsp->tgt_flags & FC_RP_FLAGS_REC_SUPPORTED) ?
			 FC_SCSI_REC_TOV : FC_SCSI_ER_TIMEOUT);
unlock:
	fc_fcp_unlock_pkt(fsp);
	return rc;
}

1131 1132 1133 1134
/**
 * fc_fcp_error() - Handler for FCP layer errors
 * @fsp: The FCP packet the error is on
 * @fp:	 The frame that has errored
1135 1136 1137 1138 1139 1140 1141 1142
 */
static void fc_fcp_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
{
	int error = PTR_ERR(fp);

	if (fc_fcp_lock_pkt(fsp))
		return;

1143
	if (error == -FC_EX_CLOSED) {
1144 1145 1146
		fc_fcp_retry_cmd(fsp);
		goto unlock;
	}
1147

1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158
	/*
	 * clear abort pending, because the lower layer
	 * decided to force completion.
	 */
	fsp->state &= ~FC_SRB_ABORT_PENDING;
	fsp->status_code = FC_CMD_PLOGO;
	fc_fcp_complete_locked(fsp);
unlock:
	fc_fcp_unlock_pkt(fsp);
}

1159 1160 1161 1162 1163
/**
 * fc_fcp_pkt_abort() - Abort a fcp_pkt
 * @fsp:   The FCP packet to abort on
 *
 * Called to send an abort and then wait for abort completion
1164
 */
1165
static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp)
1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180
{
	int rc = FAILED;

	if (fc_fcp_send_abort(fsp))
		return FAILED;

	init_completion(&fsp->tm_done);
	fsp->wait_for_comp = 1;

	spin_unlock_bh(&fsp->scsi_pkt_lock);
	rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV);
	spin_lock_bh(&fsp->scsi_pkt_lock);
	fsp->wait_for_comp = 0;

	if (!rc) {
1181
		FC_FCP_DBG(fsp, "target abort cmd  failed\n");
1182 1183
		rc = FAILED;
	} else if (fsp->state & FC_SRB_ABORTED) {
1184
		FC_FCP_DBG(fsp, "target abort cmd  passed\n");
1185 1186 1187 1188 1189 1190 1191
		rc = SUCCESS;
		fc_fcp_complete_locked(fsp);
	}

	return rc;
}

1192 1193 1194
/**
 * fc_lun_reset_send() - Send LUN reset command
 * @data: The FCP packet that identifies the LUN to be reset
1195 1196 1197 1198
 */
static void fc_lun_reset_send(unsigned long data)
{
	struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data;
1199 1200
	struct fc_lport *lport = fsp->lp;
	if (lport->tt.fcp_cmd_send(lport, fsp, fc_tm_done)) {
1201 1202 1203 1204 1205 1206 1207 1208 1209 1210
		if (fsp->recov_retry++ >= FC_MAX_RECOV_RETRY)
			return;
		if (fc_fcp_lock_pkt(fsp))
			return;
		setup_timer(&fsp->timer, fc_lun_reset_send, (unsigned long)fsp);
		fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
		fc_fcp_unlock_pkt(fsp);
	}
}

1211 1212 1213 1214 1215 1216 1217
/**
 * fc_lun_reset() - Send a LUN RESET command to a device
 *		    and wait for the reply
 * @lport: The local port to sent the comand on
 * @fsp:   The FCP packet that identifies the LUN to be reset
 * @id:	   The SCSI command ID
 * @lun:   The LUN ID to be reset
1218
 */
1219
static int fc_lun_reset(struct fc_lport *lport, struct fc_fcp_pkt *fsp,
1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246
			unsigned int id, unsigned int lun)
{
	int rc;

	fsp->cdb_cmd.fc_dl = htonl(fsp->data_len);
	fsp->cdb_cmd.fc_tm_flags = FCP_TMF_LUN_RESET;
	int_to_scsilun(lun, (struct scsi_lun *)fsp->cdb_cmd.fc_lun);

	fsp->wait_for_comp = 1;
	init_completion(&fsp->tm_done);

	fc_lun_reset_send((unsigned long)fsp);

	/*
	 * wait for completion of reset
	 * after that make sure all commands are terminated
	 */
	rc = wait_for_completion_timeout(&fsp->tm_done, FC_SCSI_TM_TOV);

	spin_lock_bh(&fsp->scsi_pkt_lock);
	fsp->state |= FC_SRB_COMPL;
	spin_unlock_bh(&fsp->scsi_pkt_lock);

	del_timer_sync(&fsp->timer);

	spin_lock_bh(&fsp->scsi_pkt_lock);
	if (fsp->seq_ptr) {
1247
		lport->tt.exch_done(fsp->seq_ptr);
1248 1249 1250 1251 1252 1253
		fsp->seq_ptr = NULL;
	}
	fsp->wait_for_comp = 0;
	spin_unlock_bh(&fsp->scsi_pkt_lock);

	if (!rc) {
1254
		FC_SCSI_DBG(lport, "lun reset failed\n");
1255 1256 1257 1258 1259 1260 1261
		return FAILED;
	}

	/* cdb_status holds the tmf's rsp code */
	if (fsp->cdb_status != FCP_TMF_CMPL)
		return FAILED;

1262 1263
	FC_SCSI_DBG(lport, "lun reset to lun %u completed\n", lun);
	fc_fcp_cleanup_each_cmd(lport, id, lun, FC_CMD_ABORTED);
1264 1265 1266
	return SUCCESS;
}

1267 1268 1269 1270 1271
/**
 * fc_tm_done() - Task Managment response handler
 * @seq: The sequence that the response is on
 * @fp:	 The response frame
 * @arg: The FCP packet the response is for
1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307
 */
static void fc_tm_done(struct fc_seq *seq, struct fc_frame *fp, void *arg)
{
	struct fc_fcp_pkt *fsp = arg;
	struct fc_frame_header *fh;

	if (IS_ERR(fp)) {
		/*
		 * If there is an error just let it timeout or wait
		 * for TMF to be aborted if it timedout.
		 *
		 * scsi-eh will escalate for when either happens.
		 */
		return;
	}

	if (fc_fcp_lock_pkt(fsp))
		return;

	/*
	 * raced with eh timeout handler.
	 */
	if (!fsp->seq_ptr || !fsp->wait_for_comp) {
		spin_unlock_bh(&fsp->scsi_pkt_lock);
		return;
	}

	fh = fc_frame_header_get(fp);
	if (fh->fh_type != FC_TYPE_BLS)
		fc_fcp_resp(fsp, fp);
	fsp->seq_ptr = NULL;
	fsp->lp->tt.exch_done(seq);
	fc_frame_free(fp);
	fc_fcp_unlock_pkt(fsp);
}

1308 1309 1310 1311 1312
/**
 * fc_fcp_cleanup() - Cleanup all FCP exchanges on a local port
 * @lport: The local port to be cleaned up
 */
static void fc_fcp_cleanup(struct fc_lport *lport)
1313
{
1314
	fc_fcp_cleanup_each_cmd(lport, -1, -1, FC_ERROR);
1315 1316
}

1317 1318 1319
/**
 * fc_fcp_timeout() - Handler for fcp_pkt timeouts
 * @data: The FCP packet that has timed out
1320
 *
1321 1322 1323 1324 1325 1326
 * If REC is supported then just issue it and return. The REC exchange will
 * complete or time out and recovery can continue at that point. Otherwise,
 * if the response has been received without all the data it has been
 * ER_TIMEOUT since the response was received. If the response has not been
 * received we see if data was received recently. If it has been then we
 * continue waiting, otherwise, we abort the command.
1327 1328 1329 1330 1331
 */
static void fc_fcp_timeout(unsigned long data)
{
	struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data;
	struct fc_rport *rport = fsp->rport;
1332
	struct fc_rport_libfc_priv *rpriv = rport->dd_data;
1333 1334 1335 1336 1337 1338 1339 1340 1341

	if (fc_fcp_lock_pkt(fsp))
		return;

	if (fsp->cdb_cmd.fc_tm_flags)
		goto unlock;

	fsp->state |= FC_SRB_FCP_PROCESSING_TMO;

1342
	if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
1343 1344 1345 1346 1347 1348 1349
		fc_fcp_rec(fsp);
	else if (time_after_eq(fsp->last_pkt_time + (FC_SCSI_ER_TIMEOUT / 2),
			       jiffies))
		fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT);
	else if (fsp->state & FC_SRB_RCV_STATUS)
		fc_fcp_complete_locked(fsp);
	else
1350
		fc_fcp_recovery(fsp);
1351 1352 1353 1354 1355
	fsp->state &= ~FC_SRB_FCP_PROCESSING_TMO;
unlock:
	fc_fcp_unlock_pkt(fsp);
}

1356 1357 1358
/**
 * fc_fcp_rec() - Send a REC ELS request
 * @fsp: The FCP packet to send the REC request on
1359 1360 1361
 */
static void fc_fcp_rec(struct fc_fcp_pkt *fsp)
{
1362
	struct fc_lport *lport;
1363 1364
	struct fc_frame *fp;
	struct fc_rport *rport;
1365
	struct fc_rport_libfc_priv *rpriv;
1366

1367
	lport = fsp->lp;
1368
	rport = fsp->rport;
1369 1370
	rpriv = rport->dd_data;
	if (!fsp->seq_ptr || rpriv->rp_state != RPORT_ST_READY) {
1371
		fsp->status_code = FC_HRD_ERROR;
M
Martin K. Petersen 已提交
1372
		fsp->io_status = 0;
1373 1374 1375
		fc_fcp_complete_locked(fsp);
		return;
	}
1376
	fp = fc_fcp_frame_alloc(lport, sizeof(struct fc_els_rec));
1377 1378 1379 1380 1381
	if (!fp)
		goto retry;

	fr_seq(fp) = fsp->seq_ptr;
	fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id,
1382
		       rpriv->local_port->port_id, FC_TYPE_ELS,
1383
		       FC_FCTL_REQ, 0);
1384 1385 1386
	if (lport->tt.elsct_send(lport, rport->port_id, fp, ELS_REC,
				 fc_fcp_rec_resp, fsp,
				 jiffies_to_msecs(FC_SCSI_REC_TOV))) {
1387 1388 1389 1390 1391 1392 1393
		fc_fcp_pkt_hold(fsp);		/* hold while REC outstanding */
		return;
	}
retry:
	if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
		fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
	else
1394
		fc_fcp_recovery(fsp);
1395 1396
}

1397 1398 1399 1400 1401 1402 1403 1404 1405 1406
/**
 * fc_fcp_rec_resp() - Handler for REC ELS responses
 * @seq: The sequence the response is on
 * @fp:	 The response frame
 * @arg: The FCP packet the response is on
 *
 * If the response is a reject then the scsi layer will handle
 * the timeout. If the response is a LS_ACC then if the I/O was not completed
 * set the timeout and return. If the I/O was completed then complete the
 * exchange and tell the SCSI layer.
1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417
 */
static void fc_fcp_rec_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
{
	struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)arg;
	struct fc_els_rec_acc *recp;
	struct fc_els_ls_rjt *rjt;
	u32 e_stat;
	u8 opcode;
	u32 offset;
	enum dma_data_direction data_dir;
	enum fc_rctl r_ctl;
1418
	struct fc_rport_libfc_priv *rpriv;
1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433

	if (IS_ERR(fp)) {
		fc_fcp_rec_error(fsp, fp);
		return;
	}

	if (fc_fcp_lock_pkt(fsp))
		goto out;

	fsp->recov_retry = 0;
	opcode = fc_frame_payload_op(fp);
	if (opcode == ELS_LS_RJT) {
		rjt = fc_frame_payload_get(fp, sizeof(*rjt));
		switch (rjt->er_reason) {
		default:
1434 1435 1436 1437
			FC_FCP_DBG(fsp, "device %x unexpected REC reject "
				   "reason %d expl %d\n",
				   fsp->rport->port_id, rjt->er_reason,
				   rjt->er_explan);
1438 1439
			/* fall through */
		case ELS_RJT_UNSUP:
1440
			FC_FCP_DBG(fsp, "device does not support REC\n");
1441
			rpriv = fsp->rport->dd_data;
1442 1443 1444 1445 1446
			/*
			 * if we do not spport RECs or got some bogus
			 * reason then resetup timer so we check for
			 * making progress.
			 */
1447
			rpriv->flags &= ~FC_RP_FLAGS_REC_SUPPORTED;
1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462
			fc_fcp_timer_set(fsp, FC_SCSI_ER_TIMEOUT);
			break;
		case ELS_RJT_LOGIC:
		case ELS_RJT_UNAB:
			/*
			 * If no data transfer, the command frame got dropped
			 * so we just retry.  If data was transferred, we
			 * lost the response but the target has no record,
			 * so we abort and retry.
			 */
			if (rjt->er_explan == ELS_EXPL_OXID_RXID &&
			    fsp->xfer_len == 0) {
				fc_fcp_retry_cmd(fsp);
				break;
			}
1463
			fc_fcp_recovery(fsp);
1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543
			break;
		}
	} else if (opcode == ELS_LS_ACC) {
		if (fsp->state & FC_SRB_ABORTED)
			goto unlock_out;

		data_dir = fsp->cmd->sc_data_direction;
		recp = fc_frame_payload_get(fp, sizeof(*recp));
		offset = ntohl(recp->reca_fc4value);
		e_stat = ntohl(recp->reca_e_stat);

		if (e_stat & ESB_ST_COMPLETE) {

			/*
			 * The exchange is complete.
			 *
			 * For output, we must've lost the response.
			 * For input, all data must've been sent.
			 * We lost may have lost the response
			 * (and a confirmation was requested) and maybe
			 * some data.
			 *
			 * If all data received, send SRR
			 * asking for response.	 If partial data received,
			 * or gaps, SRR requests data at start of gap.
			 * Recovery via SRR relies on in-order-delivery.
			 */
			if (data_dir == DMA_TO_DEVICE) {
				r_ctl = FC_RCTL_DD_CMD_STATUS;
			} else if (fsp->xfer_contig_end == offset) {
				r_ctl = FC_RCTL_DD_CMD_STATUS;
			} else {
				offset = fsp->xfer_contig_end;
				r_ctl = FC_RCTL_DD_SOL_DATA;
			}
			fc_fcp_srr(fsp, r_ctl, offset);
		} else if (e_stat & ESB_ST_SEQ_INIT) {

			/*
			 * The remote port has the initiative, so just
			 * keep waiting for it to complete.
			 */
			fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
		} else {

			/*
			 * The exchange is incomplete, we have seq. initiative.
			 * Lost response with requested confirmation,
			 * lost confirmation, lost transfer ready or
			 * lost write data.
			 *
			 * For output, if not all data was received, ask
			 * for transfer ready to be repeated.
			 *
			 * If we received or sent all the data, send SRR to
			 * request response.
			 *
			 * If we lost a response, we may have lost some read
			 * data as well.
			 */
			r_ctl = FC_RCTL_DD_SOL_DATA;
			if (data_dir == DMA_TO_DEVICE) {
				r_ctl = FC_RCTL_DD_CMD_STATUS;
				if (offset < fsp->data_len)
					r_ctl = FC_RCTL_DD_DATA_DESC;
			} else if (offset == fsp->xfer_contig_end) {
				r_ctl = FC_RCTL_DD_CMD_STATUS;
			} else if (fsp->xfer_contig_end < offset) {
				offset = fsp->xfer_contig_end;
			}
			fc_fcp_srr(fsp, r_ctl, offset);
		}
	}
unlock_out:
	fc_fcp_unlock_pkt(fsp);
out:
	fc_fcp_pkt_release(fsp);	/* drop hold for outstanding REC */
	fc_frame_free(fp);
}

1544 1545 1546 1547
/**
 * fc_fcp_rec_error() - Handler for REC errors
 * @fsp: The FCP packet the error is on
 * @fp:	 The REC frame
1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561
 */
static void fc_fcp_rec_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
{
	int error = PTR_ERR(fp);

	if (fc_fcp_lock_pkt(fsp))
		goto out;

	switch (error) {
	case -FC_EX_CLOSED:
		fc_fcp_retry_cmd(fsp);
		break;

	default:
1562
		FC_FCP_DBG(fsp, "REC %p fid %6.6x error unexpected error %d\n",
1563
			   fsp, fsp->rport->port_id, error);
1564 1565 1566 1567 1568 1569 1570 1571
		fsp->status_code = FC_CMD_PLOGO;
		/* fall through */

	case -FC_EX_TIMEOUT:
		/*
		 * Assume REC or LS_ACC was lost.
		 * The exchange manager will have aborted REC, so retry.
		 */
1572
		FC_FCP_DBG(fsp, "REC fid %6.6x error error %d retry %d/%d\n",
1573 1574
			   fsp->rport->port_id, error, fsp->recov_retry,
			   FC_MAX_RECOV_RETRY);
1575 1576 1577
		if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
			fc_fcp_rec(fsp);
		else
1578
			fc_fcp_recovery(fsp);
1579 1580 1581 1582 1583 1584 1585
		break;
	}
	fc_fcp_unlock_pkt(fsp);
out:
	fc_fcp_pkt_release(fsp);	/* drop hold for outstanding REC */
}

1586
/**
1587 1588
 * fc_fcp_recovery() - Handler for fcp_pkt recovery
 * @fsp: The FCP pkt that needs to be aborted
1589
 */
1590
static void fc_fcp_recovery(struct fc_fcp_pkt *fsp)
1591
{
1592
	fsp->status_code = FC_CMD_RECOVERY;
1593 1594 1595 1596 1597 1598 1599 1600 1601
	fsp->cdb_status = 0;
	fsp->io_status = 0;
	/*
	 * if this fails then we let the scsi command timer fire and
	 * scsi-ml escalate.
	 */
	fc_fcp_send_abort(fsp);
}

1602 1603 1604 1605
/**
 * fc_fcp_srr() - Send a SRR request (Sequence Retransmission Request)
 * @fsp:   The FCP packet the SRR is to be sent on
 * @r_ctl: The R_CTL field for the SRR request
1606 1607 1608 1609 1610
 * This is called after receiving status but insufficient data, or
 * when expecting status but the request has timed out.
 */
static void fc_fcp_srr(struct fc_fcp_pkt *fsp, enum fc_rctl r_ctl, u32 offset)
{
1611
	struct fc_lport *lport = fsp->lp;
1612
	struct fc_rport *rport;
1613
	struct fc_rport_libfc_priv *rpriv;
1614 1615 1616 1617 1618 1619 1620
	struct fc_exch *ep = fc_seq_exch(fsp->seq_ptr);
	struct fc_seq *seq;
	struct fcp_srr *srr;
	struct fc_frame *fp;
	u8 cdb_op;

	rport = fsp->rport;
1621
	rpriv = rport->dd_data;
1622 1623
	cdb_op = fsp->cdb_cmd.fc_cdb[0];

1624 1625
	if (!(rpriv->flags & FC_RP_FLAGS_RETRY) ||
	    rpriv->rp_state != RPORT_ST_READY)
1626
		goto retry;			/* shouldn't happen */
1627
	fp = fc_fcp_frame_alloc(lport, sizeof(*srr));
1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639
	if (!fp)
		goto retry;

	srr = fc_frame_payload_get(fp, sizeof(*srr));
	memset(srr, 0, sizeof(*srr));
	srr->srr_op = ELS_SRR;
	srr->srr_ox_id = htons(ep->oxid);
	srr->srr_rx_id = htons(ep->rxid);
	srr->srr_r_ctl = r_ctl;
	srr->srr_rel_off = htonl(offset);

	fc_fill_fc_hdr(fp, FC_RCTL_ELS4_REQ, rport->port_id,
1640
		       rpriv->local_port->port_id, FC_TYPE_FCP,
1641
		       FC_FCTL_REQ, 0);
1642

1643 1644
	seq = lport->tt.exch_seq_send(lport, fp, fc_fcp_srr_resp, NULL,
				      fsp, jiffies_to_msecs(FC_SCSI_REC_TOV));
1645
	if (!seq)
1646
		goto retry;
1647

1648 1649 1650 1651 1652 1653 1654 1655 1656 1657
	fsp->recov_seq = seq;
	fsp->xfer_len = offset;
	fsp->xfer_contig_end = offset;
	fsp->state &= ~FC_SRB_RCV_STATUS;
	fc_fcp_pkt_hold(fsp);		/* hold for outstanding SRR */
	return;
retry:
	fc_fcp_retry_cmd(fsp);
}

1658 1659 1660 1661 1662
/**
 * fc_fcp_srr_resp() - Handler for SRR response
 * @seq: The sequence the SRR is on
 * @fp:	 The SRR frame
 * @arg: The FCP packet the SRR is on
1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697
 */
static void fc_fcp_srr_resp(struct fc_seq *seq, struct fc_frame *fp, void *arg)
{
	struct fc_fcp_pkt *fsp = arg;
	struct fc_frame_header *fh;

	if (IS_ERR(fp)) {
		fc_fcp_srr_error(fsp, fp);
		return;
	}

	if (fc_fcp_lock_pkt(fsp))
		goto out;

	fh = fc_frame_header_get(fp);
	/*
	 * BUG? fc_fcp_srr_error calls exch_done which would release
	 * the ep. But if fc_fcp_srr_error had got -FC_EX_TIMEOUT,
	 * then fc_exch_timeout would be sending an abort. The exch_done
	 * call by fc_fcp_srr_error would prevent fc_exch.c from seeing
	 * an abort response though.
	 */
	if (fh->fh_type == FC_TYPE_BLS) {
		fc_fcp_unlock_pkt(fsp);
		return;
	}

	fsp->recov_seq = NULL;
	switch (fc_frame_payload_op(fp)) {
	case ELS_LS_ACC:
		fsp->recov_retry = 0;
		fc_fcp_timer_set(fsp, FC_SCSI_REC_TOV);
		break;
	case ELS_LS_RJT:
	default:
1698
		fc_fcp_recovery(fsp);
1699 1700 1701 1702 1703 1704 1705 1706 1707
		break;
	}
	fc_fcp_unlock_pkt(fsp);
	fsp->lp->tt.exch_done(seq);
out:
	fc_frame_free(fp);
	fc_fcp_pkt_release(fsp);	/* drop hold for outstanding SRR */
}

1708 1709 1710 1711 1712
/**
 * fc_fcp_srr_error() - Handler for SRR errors
 * @fsp: The FCP packet that the SRR error is on
 * @fp:	 The SRR frame
 */
1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723
static void fc_fcp_srr_error(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
{
	if (fc_fcp_lock_pkt(fsp))
		goto out;
	fsp->lp->tt.exch_done(fsp->recov_seq);
	fsp->recov_seq = NULL;
	switch (PTR_ERR(fp)) {
	case -FC_EX_TIMEOUT:
		if (fsp->recov_retry++ < FC_MAX_RECOV_RETRY)
			fc_fcp_rec(fsp);
		else
1724
			fc_fcp_recovery(fsp);
1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736
		break;
	case -FC_EX_CLOSED:			/* e.g., link failure */
		/* fall through */
	default:
		fc_fcp_retry_cmd(fsp);
		break;
	}
	fc_fcp_unlock_pkt(fsp);
out:
	fc_fcp_pkt_release(fsp);	/* drop hold for outstanding SRR */
}

1737 1738 1739 1740 1741
/**
 * fc_fcp_lport_queue_ready() - Determine if the lport and it's queue is ready
 * @lport: The local port to be checked
 */
static inline int fc_fcp_lport_queue_ready(struct fc_lport *lport)
1742 1743
{
	/* lock ? */
1744 1745
	return (lport->state == LPORT_ST_READY) &&
		lport->link_up && !lport->qfull;
1746 1747 1748
}

/**
1749 1750 1751
 * fc_queuecommand() - The queuecommand function of the SCSI template
 * @cmd:   The scsi_cmnd to be executed
 * @done:  The callback function to be called when the scsi_cmnd is complete
1752
 *
1753 1754
 * This is the i/o strategy routine, called by the SCSI layer. This routine
 * is called with the host_lock held.
1755
 */
J
Jeff Garzik 已提交
1756
static int fc_queuecommand_lck(struct scsi_cmnd *sc_cmd, void (*done)(struct scsi_cmnd *))
1757
{
1758
	struct fc_lport *lport;
1759 1760
	struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
	struct fc_fcp_pkt *fsp;
1761
	struct fc_rport_libfc_priv *rpriv;
1762 1763 1764 1765
	int rval;
	int rc = 0;
	struct fcoe_dev_stats *stats;

1766
	lport = shost_priv(sc_cmd->device->host);
1767 1768 1769 1770 1771

	rval = fc_remote_port_chkready(rport);
	if (rval) {
		sc_cmd->result = rval;
		done(sc_cmd);
1772
		return 0;
1773
	}
1774
	spin_unlock_irq(lport->host->host_lock);
1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785

	if (!*(struct fc_remote_port **)rport->dd_data) {
		/*
		 * rport is transitioning from blocked/deleted to
		 * online
		 */
		sc_cmd->result = DID_IMM_RETRY << 16;
		done(sc_cmd);
		goto out;
	}

1786
	rpriv = rport->dd_data;
1787

1788
	if (!fc_fcp_lport_queue_ready(lport)) {
V
Vasu Dev 已提交
1789 1790
		if (lport->qfull)
			fc_fcp_can_queue_ramp_down(lport);
1791 1792 1793 1794
		rc = SCSI_MLQUEUE_HOST_BUSY;
		goto out;
	}

1795
	fsp = fc_fcp_pkt_alloc(lport, GFP_ATOMIC);
1796 1797 1798 1799 1800 1801 1802 1803 1804
	if (fsp == NULL) {
		rc = SCSI_MLQUEUE_HOST_BUSY;
		goto out;
	}

	/*
	 * build the libfc request pkt
	 */
	fsp->cmd = sc_cmd;	/* save the cmd */
1805
	fsp->lp = lport;	/* save the softc ptr */
1806
	fsp->rport = rport;	/* set the remote port ptr */
Y
Yi Zou 已提交
1807
	fsp->xfer_ddp = FC_XID_UNKNOWN;
1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818
	sc_cmd->scsi_done = done;

	/*
	 * set up the transfer length
	 */
	fsp->data_len = scsi_bufflen(sc_cmd);
	fsp->xfer_len = 0;

	/*
	 * setup the data direction
	 */
1819
	stats = per_cpu_ptr(lport->dev_stats, get_cpu());
1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831
	if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) {
		fsp->req_flags = FC_SRB_READ;
		stats->InputRequests++;
		stats->InputMegabytes = fsp->data_len;
	} else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) {
		fsp->req_flags = FC_SRB_WRITE;
		stats->OutputRequests++;
		stats->OutputMegabytes = fsp->data_len;
	} else {
		fsp->req_flags = 0;
		stats->ControlRequests++;
	}
1832
	put_cpu();
1833

1834
	fsp->tgt_flags = rpriv->flags;
1835 1836 1837 1838 1839 1840 1841 1842 1843

	init_timer(&fsp->timer);
	fsp->timer.data = (unsigned long)fsp;

	/*
	 * send it to the lower layer
	 * if we get -1 return then put the request in the pending
	 * queue.
	 */
1844
	rval = fc_fcp_pkt_send(lport, fsp);
1845 1846 1847 1848 1849 1850
	if (rval != 0) {
		fsp->state = FC_SRB_FREE;
		fc_fcp_pkt_release(fsp);
		rc = SCSI_MLQUEUE_HOST_BUSY;
	}
out:
1851
	spin_lock_irq(lport->host->host_lock);
1852 1853
	return rc;
}
J
Jeff Garzik 已提交
1854 1855

DEF_SCSI_QCMD(fc_queuecommand)
1856 1857 1858
EXPORT_SYMBOL(fc_queuecommand);

/**
1859 1860
 * fc_io_compl() - Handle responses for completed commands
 * @fsp: The FCP packet that is complete
1861
 *
1862
 * Translates fcp_pkt errors to a Linux SCSI errors.
1863 1864 1865 1866 1867 1868
 * The fcp packet lock must be held when calling.
 */
static void fc_io_compl(struct fc_fcp_pkt *fsp)
{
	struct fc_fcp_internal *si;
	struct scsi_cmnd *sc_cmd;
1869
	struct fc_lport *lport;
1870 1871
	unsigned long flags;

1872 1873 1874
	/* release outstanding ddp context */
	fc_fcp_ddp_done(fsp);

1875 1876 1877 1878 1879 1880 1881
	fsp->state |= FC_SRB_COMPL;
	if (!(fsp->state & FC_SRB_FCP_PROCESSING_TMO)) {
		spin_unlock_bh(&fsp->scsi_pkt_lock);
		del_timer_sync(&fsp->timer);
		spin_lock_bh(&fsp->scsi_pkt_lock);
	}

1882 1883
	lport = fsp->lp;
	si = fc_get_scsi_internal(lport);
1884 1885

	/*
V
Vasu Dev 已提交
1886 1887
	 * if can_queue ramp down is done then try can_queue ramp up
	 * since commands are completing now.
1888
	 */
V
Vasu Dev 已提交
1889 1890
	if (si->last_can_queue_ramp_down_time)
		fc_fcp_can_queue_ramp_up(lport);
1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911

	sc_cmd = fsp->cmd;
	CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status;
	switch (fsp->status_code) {
	case FC_COMPLETE:
		if (fsp->cdb_status == 0) {
			/*
			 * good I/O status
			 */
			sc_cmd->result = DID_OK << 16;
			if (fsp->scsi_resid)
				CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid;
		} else {
			/*
			 * transport level I/O was ok but scsi
			 * has non zero status
			 */
			sc_cmd->result = (DID_OK << 16) | fsp->cdb_status;
		}
		break;
	case FC_ERROR:
1912 1913
		FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
			   "due to FC_ERROR\n");
1914 1915 1916
		sc_cmd->result = DID_ERROR << 16;
		break;
	case FC_DATA_UNDRUN:
1917
		if ((fsp->cdb_status == 0) && !(fsp->req_flags & FC_SRB_READ)) {
1918 1919
			/*
			 * scsi status is good but transport level
1920
			 * underrun.
1921
			 */
1922 1923 1924 1925 1926 1927 1928
			if (fsp->state & FC_SRB_RCV_STATUS) {
				sc_cmd->result = DID_OK << 16;
			} else {
				FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml"
					   " due to FC_DATA_UNDRUN (trans)\n");
				sc_cmd->result = DID_ERROR << 16;
			}
1929 1930 1931 1932
		} else {
			/*
			 * scsi got underrun, this is an error
			 */
1933 1934
			FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
				   "due to FC_DATA_UNDRUN (scsi)\n");
1935 1936 1937 1938 1939 1940 1941 1942
			CMD_RESID_LEN(sc_cmd) = fsp->scsi_resid;
			sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
		}
		break;
	case FC_DATA_OVRRUN:
		/*
		 * overrun is an error
		 */
1943 1944
		FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
			   "due to FC_DATA_OVRRUN\n");
1945 1946 1947
		sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
		break;
	case FC_CMD_ABORTED:
1948 1949
		FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
			   "due to FC_CMD_ABORTED\n");
1950
		sc_cmd->result = (DID_ERROR << 16) | fsp->io_status;
1951
		break;
1952
	case FC_CMD_RECOVERY:
1953 1954 1955 1956 1957 1958 1959 1960 1961
		sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status;
		break;
	case FC_CMD_RESET:
		sc_cmd->result = (DID_RESET << 16);
		break;
	case FC_HRD_ERROR:
		sc_cmd->result = (DID_NO_CONNECT << 16);
		break;
	default:
1962 1963
		FC_FCP_DBG(fsp, "Returning DID_ERROR to scsi-ml "
			   "due to unknown error\n");
1964 1965 1966 1967
		sc_cmd->result = (DID_ERROR << 16);
		break;
	}

1968 1969
	if (lport->state != LPORT_ST_READY && fsp->status_code != FC_COMPLETE)
		sc_cmd->result = (DID_TRANSPORT_DISRUPTED << 16);
1970

1971
	spin_lock_irqsave(&si->scsi_queue_lock, flags);
1972 1973
	list_del(&fsp->list);
	sc_cmd->SCp.ptr = NULL;
1974
	spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
1975 1976 1977 1978 1979 1980 1981
	sc_cmd->scsi_done(sc_cmd);

	/* release ref from initial allocation in queue command */
	fc_fcp_pkt_release(fsp);
}

/**
1982
 * fc_eh_abort() - Abort a command
1983
 * @sc_cmd: The SCSI command to abort
1984
 *
1985 1986
 * From SCSI host template.
 * Send an ABTS to the target device and wait for the response.
1987 1988 1989 1990
 */
int fc_eh_abort(struct scsi_cmnd *sc_cmd)
{
	struct fc_fcp_pkt *fsp;
1991
	struct fc_lport *lport;
1992
	struct fc_fcp_internal *si;
1993 1994 1995
	int rc = FAILED;
	unsigned long flags;

1996 1997
	lport = shost_priv(sc_cmd->device->host);
	if (lport->state != LPORT_ST_READY)
1998
		return rc;
1999
	else if (!lport->link_up)
2000 2001
		return rc;

2002 2003
	si = fc_get_scsi_internal(lport);
	spin_lock_irqsave(&si->scsi_queue_lock, flags);
2004 2005 2006
	fsp = CMD_SP(sc_cmd);
	if (!fsp) {
		/* command completed while scsi eh was setting up */
2007
		spin_unlock_irqrestore(lport->host->host_lock, flags);
2008 2009 2010 2011
		return SUCCESS;
	}
	/* grab a ref so the fsp and sc_cmd cannot be relased from under us */
	fc_fcp_pkt_hold(fsp);
2012
	spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
2013 2014 2015 2016 2017 2018 2019

	if (fc_fcp_lock_pkt(fsp)) {
		/* completed while we were waiting for timer to be deleted */
		rc = SUCCESS;
		goto release_pkt;
	}

2020
	rc = fc_fcp_pkt_abort(fsp);
2021 2022 2023 2024 2025 2026 2027 2028 2029
	fc_fcp_unlock_pkt(fsp);

release_pkt:
	fc_fcp_pkt_release(fsp);
	return rc;
}
EXPORT_SYMBOL(fc_eh_abort);

/**
2030 2031 2032
 * fc_eh_device_reset() - Reset a single LUN
 * @sc_cmd: The SCSI command which identifies the device whose
 *	    LUN is to be reset
2033
 *
2034
 * Set from SCSI host template.
2035 2036 2037
 */
int fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
{
2038
	struct fc_lport *lport;
2039 2040 2041 2042 2043 2044 2045 2046 2047
	struct fc_fcp_pkt *fsp;
	struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
	int rc = FAILED;
	int rval;

	rval = fc_remote_port_chkready(rport);
	if (rval)
		goto out;

2048
	lport = shost_priv(sc_cmd->device->host);
2049

2050
	if (lport->state != LPORT_ST_READY)
2051 2052
		return rc;

2053
	FC_SCSI_DBG(lport, "Resetting rport (%6.6x)\n", rport->port_id);
2054

2055
	fsp = fc_fcp_pkt_alloc(lport, GFP_NOIO);
2056
	if (fsp == NULL) {
2057
		printk(KERN_WARNING "libfc: could not allocate scsi_pkt\n");
2058 2059 2060 2061 2062 2063 2064 2065
		goto out;
	}

	/*
	 * Build the libfc request pkt. Do not set the scsi cmnd, because
	 * the sc passed in is not setup for execution like when sent
	 * through the queuecommand callout.
	 */
2066
	fsp->lp = lport;	/* save the softc ptr */
2067 2068 2069 2070 2071
	fsp->rport = rport;	/* set the remote port ptr */

	/*
	 * flush outstanding commands
	 */
2072
	rc = fc_lun_reset(lport, fsp, scmd_id(sc_cmd), sc_cmd->device->lun);
2073 2074 2075 2076 2077 2078 2079 2080 2081
	fsp->state = FC_SRB_FREE;
	fc_fcp_pkt_release(fsp);

out:
	return rc;
}
EXPORT_SYMBOL(fc_eh_device_reset);

/**
2082 2083
 * fc_eh_host_reset() - Reset a Scsi_Host.
 * @sc_cmd: The SCSI command that identifies the SCSI host to be reset
2084 2085 2086 2087
 */
int fc_eh_host_reset(struct scsi_cmnd *sc_cmd)
{
	struct Scsi_Host *shost = sc_cmd->device->host;
2088
	struct fc_lport *lport = shost_priv(shost);
2089 2090
	unsigned long wait_tmo;

2091
	FC_SCSI_DBG(lport, "Resetting host\n");
2092

2093
	lport->tt.lport_reset(lport);
2094
	wait_tmo = jiffies + FC_HOST_RESET_TIMEOUT;
2095 2096
	while (!fc_fcp_lport_queue_ready(lport) && time_before(jiffies,
							       wait_tmo))
2097 2098
		msleep(1000);

2099
	if (fc_fcp_lport_queue_ready(lport)) {
2100
		shost_printk(KERN_INFO, shost, "libfc: Host reset succeeded "
2101
			     "on port (%6.6x)\n", lport->port_id);
2102 2103
		return SUCCESS;
	} else {
2104
		shost_printk(KERN_INFO, shost, "libfc: Host reset failed, "
2105
			     "port (%6.6x) is not ready.\n",
2106
			     lport->port_id);
2107 2108 2109 2110 2111 2112
		return FAILED;
	}
}
EXPORT_SYMBOL(fc_eh_host_reset);

/**
2113 2114
 * fc_slave_alloc() - Configure the queue depth of a Scsi_Host
 * @sdev: The SCSI device that identifies the SCSI host
2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125
 *
 * Configures queue depth based on host's cmd_per_len. If not set
 * then we use the libfc default.
 */
int fc_slave_alloc(struct scsi_device *sdev)
{
	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));

	if (!rport || fc_remote_port_chkready(rport))
		return -ENXIO;

2126 2127 2128 2129 2130 2131
	if (sdev->tagged_supported)
		scsi_activate_tcq(sdev, FC_FCP_DFLT_QUEUE_DEPTH);
	else
		scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev),
					FC_FCP_DFLT_QUEUE_DEPTH);

2132 2133 2134 2135
	return 0;
}
EXPORT_SYMBOL(fc_slave_alloc);

2136 2137 2138 2139 2140 2141
/**
 * fc_change_queue_depth() - Change a device's queue depth
 * @sdev:   The SCSI device whose queue depth is to change
 * @qdepth: The new queue depth
 * @reason: The resason for the change
 */
2142
int fc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
2143
{
2144 2145 2146 2147 2148 2149 2150
	switch (reason) {
	case SCSI_QDEPTH_DEFAULT:
		scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
		break;
	case SCSI_QDEPTH_QFULL:
		scsi_track_queue_full(sdev, qdepth);
		break;
V
Vasu Dev 已提交
2151 2152 2153
	case SCSI_QDEPTH_RAMP_UP:
		scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
		break;
2154
	default:
2155
		return -EOPNOTSUPP;
2156
	}
2157 2158 2159 2160
	return sdev->queue_depth;
}
EXPORT_SYMBOL(fc_change_queue_depth);

2161 2162 2163 2164 2165
/**
 * fc_change_queue_type() - Change a device's queue type
 * @sdev:     The SCSI device whose queue depth is to change
 * @tag_type: Identifier for queue type
 */
2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180
int fc_change_queue_type(struct scsi_device *sdev, int tag_type)
{
	if (sdev->tagged_supported) {
		scsi_set_tag_type(sdev, tag_type);
		if (tag_type)
			scsi_activate_tcq(sdev, sdev->queue_depth);
		else
			scsi_deactivate_tcq(sdev, sdev->queue_depth);
	} else
		tag_type = 0;

	return tag_type;
}
EXPORT_SYMBOL(fc_change_queue_type);

2181 2182 2183 2184 2185
/**
 * fc_fcp_destory() - Tear down the FCP layer for a given local port
 * @lport: The local port that no longer needs the FCP layer
 */
void fc_fcp_destroy(struct fc_lport *lport)
2186
{
2187
	struct fc_fcp_internal *si = fc_get_scsi_internal(lport);
2188 2189

	if (!list_empty(&si->scsi_pkt_queue))
2190
		printk(KERN_ERR "libfc: Leaked SCSI packets when destroying "
2191
		       "port (%6.6x)\n", lport->port_id);
2192 2193 2194

	mempool_destroy(si->scsi_pkt_pool);
	kfree(si);
2195
	lport->scsi_priv = NULL;
2196 2197 2198
}
EXPORT_SYMBOL(fc_fcp_destroy);

2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220
int fc_setup_fcp()
{
	int rc = 0;

	scsi_pkt_cachep = kmem_cache_create("libfc_fcp_pkt",
					    sizeof(struct fc_fcp_pkt),
					    0, SLAB_HWCACHE_ALIGN, NULL);
	if (!scsi_pkt_cachep) {
		printk(KERN_ERR "libfc: Unable to allocate SRB cache, "
		       "module load failed!");
		rc = -ENOMEM;
	}

	return rc;
}

void fc_destroy_fcp()
{
	if (scsi_pkt_cachep)
		kmem_cache_destroy(scsi_pkt_cachep);
}

2221 2222 2223 2224 2225
/**
 * fc_fcp_init() - Initialize the FCP layer for a local port
 * @lport: The local port to initialize the exchange layer for
 */
int fc_fcp_init(struct fc_lport *lport)
2226 2227 2228 2229
{
	int rc;
	struct fc_fcp_internal *si;

2230 2231
	if (!lport->tt.fcp_cmd_send)
		lport->tt.fcp_cmd_send = fc_fcp_cmd_send;
2232

2233 2234
	if (!lport->tt.fcp_cleanup)
		lport->tt.fcp_cleanup = fc_fcp_cleanup;
2235

2236 2237
	if (!lport->tt.fcp_abort_io)
		lport->tt.fcp_abort_io = fc_fcp_abort_io;
2238 2239 2240 2241

	si = kzalloc(sizeof(struct fc_fcp_internal), GFP_KERNEL);
	if (!si)
		return -ENOMEM;
2242
	lport->scsi_priv = si;
V
Vasu Dev 已提交
2243
	si->max_can_queue = lport->host->can_queue;
2244
	INIT_LIST_HEAD(&si->scsi_pkt_queue);
2245
	spin_lock_init(&si->scsi_queue_lock);
2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258

	si->scsi_pkt_pool = mempool_create_slab_pool(2, scsi_pkt_cachep);
	if (!si->scsi_pkt_pool) {
		rc = -ENOMEM;
		goto free_internal;
	}
	return 0;

free_internal:
	kfree(si);
	return rc;
}
EXPORT_SYMBOL(fc_fcp_init);