scsi_lib.c 79.4 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2 3
 * Copyright (C) 1999 Eric Youngdale
 * Copyright (C) 2014 Christoph Hellwig
L
Linus Torvalds 已提交
4 5 6 7 8 9 10 11
 *
 *  SCSI queueing library.
 *      Initial versions: Eric Youngdale (eric@andante.org).
 *                        Based upon conversations with large numbers
 *                        of people at Linux Expo.
 */

#include <linux/bio.h>
J
James Bottomley 已提交
12
#include <linux/bitops.h>
L
Linus Torvalds 已提交
13 14 15
#include <linux/blkdev.h>
#include <linux/completion.h>
#include <linux/kernel.h>
16
#include <linux/export.h>
L
Linus Torvalds 已提交
17 18 19 20 21
#include <linux/mempool.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/delay.h>
22
#include <linux/hardirq.h>
J
Jens Axboe 已提交
23
#include <linux/scatterlist.h>
24
#include <linux/blk-mq.h>
25
#include <linux/ratelimit.h>
L
Linus Torvalds 已提交
26 27

#include <scsi/scsi.h>
28
#include <scsi/scsi_cmnd.h>
L
Linus Torvalds 已提交
29 30 31 32 33 34
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_driver.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>

35 36
#include <trace/events/scsi.h>

L
Linus Torvalds 已提交
37 38 39 40
#include "scsi_priv.h"
#include "scsi_logging.h"


41
#define SG_MEMPOOL_NR		ARRAY_SIZE(scsi_sg_pools)
42
#define SG_MEMPOOL_SIZE		2
L
Linus Torvalds 已提交
43 44 45

struct scsi_host_sg_pool {
	size_t		size;
46
	char		*name;
47
	struct kmem_cache	*slab;
L
Linus Torvalds 已提交
48 49 50
	mempool_t	*pool;
};

51
#define SP(x) { .size = x, "sgpool-" __stringify(x) }
J
James Bottomley 已提交
52 53 54
#if (SCSI_MAX_SG_SEGMENTS < 32)
#error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater)
#endif
A
Adrian Bunk 已提交
55
static struct scsi_host_sg_pool scsi_sg_pools[] = {
L
Linus Torvalds 已提交
56 57
	SP(8),
	SP(16),
F
FUJITA Tomonori 已提交
58
#if (SCSI_MAX_SG_SEGMENTS > 32)
J
James Bottomley 已提交
59
	SP(32),
F
FUJITA Tomonori 已提交
60
#if (SCSI_MAX_SG_SEGMENTS > 64)
J
James Bottomley 已提交
61 62
	SP(64),
#if (SCSI_MAX_SG_SEGMENTS > 128)
L
Linus Torvalds 已提交
63
	SP(128),
J
James Bottomley 已提交
64 65
#if (SCSI_MAX_SG_SEGMENTS > 256)
#error SCSI_MAX_SG_SEGMENTS is too large (256 MAX)
F
FUJITA Tomonori 已提交
66 67 68
#endif
#endif
#endif
J
James Bottomley 已提交
69 70
#endif
	SP(SCSI_MAX_SG_SEGMENTS)
71
};
L
Linus Torvalds 已提交
72 73
#undef SP

74
struct kmem_cache *scsi_sdb_cache;
75

J
Jens Axboe 已提交
76 77 78 79 80 81 82
/*
 * When to reinvoke queueing after a resource shortage. It's 3 msecs to
 * not change behaviour from the previous unplug mechanism, experimentation
 * may prove this needs changing.
 */
#define SCSI_QUEUE_DELAY	3

83 84
static void
scsi_set_blocked(struct scsi_cmnd *cmd, int reason)
L
Linus Torvalds 已提交
85 86 87
{
	struct Scsi_Host *host = cmd->device->host;
	struct scsi_device *device = cmd->device;
88
	struct scsi_target *starget = scsi_target(device);
L
Linus Torvalds 已提交
89 90

	/*
91
	 * Set the appropriate busy bit for the device/host.
L
Linus Torvalds 已提交
92 93 94 95 96 97 98 99 100 101 102
	 *
	 * If the host/device isn't busy, assume that something actually
	 * completed, and that we should be able to queue a command now.
	 *
	 * Note that the prior mid-layer assumption that any host could
	 * always queue at least one command is now broken.  The mid-layer
	 * will implement a user specifiable stall (see
	 * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
	 * if a command is requeued with no other commands outstanding
	 * either for the device or for the host.
	 */
103 104
	switch (reason) {
	case SCSI_MLQUEUE_HOST_BUSY:
105
		atomic_set(&host->host_blocked, host->max_host_blocked);
106 107
		break;
	case SCSI_MLQUEUE_DEVICE_BUSY:
108
	case SCSI_MLQUEUE_EH_RETRY:
109 110
		atomic_set(&device->device_blocked,
			   device->max_device_blocked);
111 112
		break;
	case SCSI_MLQUEUE_TARGET_BUSY:
113 114
		atomic_set(&starget->target_blocked,
			   starget->max_target_blocked);
115 116
		break;
	}
117 118
}

119 120 121 122 123 124 125 126 127 128
static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd)
{
	struct scsi_device *sdev = cmd->device;
	struct request_queue *q = cmd->request->q;

	blk_mq_requeue_request(cmd->request);
	blk_mq_kick_requeue_list(q);
	put_device(&sdev->sdev_gendev);
}

129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
/**
 * __scsi_queue_insert - private queue insertion
 * @cmd: The SCSI command being requeued
 * @reason:  The reason for the requeue
 * @unbusy: Whether the queue should be unbusied
 *
 * This is a private queue insertion.  The public interface
 * scsi_queue_insert() always assumes the queue should be unbusied
 * because it's always called before the completion.  This function is
 * for a requeue after completion, which should only occur in this
 * file.
 */
static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
{
	struct scsi_device *device = cmd->device;
	struct request_queue *q = device->request_queue;
	unsigned long flags;

	SCSI_LOG_MLQUEUE(1, scmd_printk(KERN_INFO, cmd,
		"Inserting command %p into mlqueue\n", cmd));

	scsi_set_blocked(cmd, reason);
L
Linus Torvalds 已提交
151 152 153 154 155

	/*
	 * Decrement the counters, since these commands are no longer
	 * active on the host/device.
	 */
156 157
	if (unbusy)
		scsi_device_unbusy(device);
L
Linus Torvalds 已提交
158 159

	/*
160
	 * Requeue this command.  It will go before all other commands
161 162 163
	 * that are already in the queue. Schedule requeue work under
	 * lock such that the kblockd_schedule_work() call happens
	 * before blk_cleanup_queue() finishes.
J
Jens Axboe 已提交
164
	 */
165
	cmd->result = 0;
166 167 168 169
	if (q->mq_ops) {
		scsi_mq_requeue_cmd(cmd);
		return;
	}
170
	spin_lock_irqsave(q->queue_lock, flags);
J
James Bottomley 已提交
171
	blk_requeue_request(q, cmd->request);
172
	kblockd_schedule_work(&device->requeue_work);
173
	spin_unlock_irqrestore(q->queue_lock, flags);
L
Linus Torvalds 已提交
174 175
}

176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
/*
 * Function:    scsi_queue_insert()
 *
 * Purpose:     Insert a command in the midlevel queue.
 *
 * Arguments:   cmd    - command that we are adding to queue.
 *              reason - why we are inserting command to queue.
 *
 * Lock status: Assumed that lock is not held upon entry.
 *
 * Returns:     Nothing.
 *
 * Notes:       We do this for one of two cases.  Either the host is busy
 *              and it cannot accept any more commands for the time being,
 *              or the device returned QUEUE_FULL and can accept no more
 *              commands.
 * Notes:       This could be called either from an interrupt context or a
 *              normal process context.
 */
195
void scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
196
{
197
	__scsi_queue_insert(cmd, reason, 1);
198
}
199
/**
200
 * scsi_execute - insert request and wait for the result
201 202 203 204 205 206 207 208
 * @sdev:	scsi device
 * @cmd:	scsi command
 * @data_direction: data direction
 * @buffer:	data buffer
 * @bufflen:	len of buffer
 * @sense:	optional sense buffer
 * @timeout:	request timeout in seconds
 * @retries:	number of times to retry request
209
 * @flags:	or into request flags;
210
 * @resid:	optional residual length
211
 *
212
 * returns the req->errors value which is the scsi_cmnd result
213
 * field.
214
 */
215 216
int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
		 int data_direction, void *buffer, unsigned bufflen,
217
		 unsigned char *sense, int timeout, int retries, u64 flags,
218
		 int *resid)
219 220 221 222 223 224
{
	struct request *req;
	int write = (data_direction == DMA_TO_DEVICE);
	int ret = DRIVER_ERROR << 24;

	req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
225
	if (IS_ERR(req))
226
		return ret;
J
Jens Axboe 已提交
227
	blk_rq_set_block_pc(req);
228 229 230 231 232 233 234 235 236

	if (bufflen &&	blk_rq_map_kern(sdev->request_queue, req,
					buffer, bufflen, __GFP_WAIT))
		goto out;

	req->cmd_len = COMMAND_SIZE(cmd[0]);
	memcpy(req->cmd, cmd, req->cmd_len);
	req->sense = sense;
	req->sense_len = 0;
237
	req->retries = retries;
238
	req->timeout = timeout;
239
	req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT;
240 241 242 243 244 245

	/*
	 * head injection *required* here otherwise quiesce won't work
	 */
	blk_execute_rq(req->q, NULL, req, 1);

246 247 248 249 250 251
	/*
	 * Some devices (USB mass-storage in particular) may transfer
	 * garbage data together with a residue indicating that the data
	 * is invalid.  Prevent the garbage from being misinterpreted
	 * and prevent security leaks by zeroing out the excess data.
	 */
T
Tejun Heo 已提交
252 253
	if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen))
		memset(buffer + (bufflen - req->resid_len), 0, req->resid_len);
254

255
	if (resid)
T
Tejun Heo 已提交
256
		*resid = req->resid_len;
257 258 259 260 261 262
	ret = req->errors;
 out:
	blk_put_request(req);

	return ret;
}
263
EXPORT_SYMBOL(scsi_execute);
264

265
int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd,
266
		     int data_direction, void *buffer, unsigned bufflen,
267
		     struct scsi_sense_hdr *sshdr, int timeout, int retries,
268
		     int *resid, u64 flags)
269 270
{
	char *sense = NULL;
271 272
	int result;
	
273
	if (sshdr) {
J
Jes Sorensen 已提交
274
		sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
275 276 277
		if (!sense)
			return DRIVER_ERROR << 24;
	}
278
	result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
279
			      sense, timeout, retries, flags, resid);
280
	if (sshdr)
281
		scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
282 283 284 285

	kfree(sense);
	return result;
}
286
EXPORT_SYMBOL(scsi_execute_req_flags);
287

L
Linus Torvalds 已提交
288 289 290 291 292 293 294 295 296 297 298
/*
 * Function:    scsi_init_cmd_errh()
 *
 * Purpose:     Initialize cmd fields related to error handling.
 *
 * Arguments:   cmd	- command that is ready to be queued.
 *
 * Notes:       This function has the job of initializing a number of
 *              fields related to error handling.   Typically this will
 *              be called once for each command, as required.
 */
299
static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
L
Linus Torvalds 已提交
300 301
{
	cmd->serial_number = 0;
B
Boaz Harrosh 已提交
302
	scsi_set_resid(cmd, 0);
303
	memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
L
Linus Torvalds 已提交
304
	if (cmd->cmd_len == 0)
305
		cmd->cmd_len = scsi_command_size(cmd->cmnd);
L
Linus Torvalds 已提交
306 307 308 309 310
}

void scsi_device_unbusy(struct scsi_device *sdev)
{
	struct Scsi_Host *shost = sdev->host;
311
	struct scsi_target *starget = scsi_target(sdev);
L
Linus Torvalds 已提交
312 313
	unsigned long flags;

314
	atomic_dec(&shost->host_busy);
315 316
	if (starget->can_queue > 0)
		atomic_dec(&starget->target_busy);
317

318
	if (unlikely(scsi_host_in_recovery(shost) &&
319 320
		     (shost->host_failed || shost->host_eh_scheduled))) {
		spin_lock_irqsave(shost->host_lock, flags);
L
Linus Torvalds 已提交
321
		scsi_eh_wakeup(shost);
322 323 324
		spin_unlock_irqrestore(shost->host_lock, flags);
	}

325
	atomic_dec(&sdev->device_busy);
L
Linus Torvalds 已提交
326 327
}

328 329 330 331 332 333 334 335
static void scsi_kick_queue(struct request_queue *q)
{
	if (q->mq_ops)
		blk_mq_start_hw_queues(q);
	else
		blk_run_queue(q);
}

L
Linus Torvalds 已提交
336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359
/*
 * Called for single_lun devices on IO completion. Clear starget_sdev_user,
 * and call blk_run_queue for all the scsi_devices on the target -
 * including current_sdev first.
 *
 * Called with *no* scsi locks held.
 */
static void scsi_single_lun_run(struct scsi_device *current_sdev)
{
	struct Scsi_Host *shost = current_sdev->host;
	struct scsi_device *sdev, *tmp;
	struct scsi_target *starget = scsi_target(current_sdev);
	unsigned long flags;

	spin_lock_irqsave(shost->host_lock, flags);
	starget->starget_sdev_user = NULL;
	spin_unlock_irqrestore(shost->host_lock, flags);

	/*
	 * Call blk_run_queue for all LUNs on the target, starting with
	 * current_sdev. We race with others (to set starget_sdev_user),
	 * but in most cases, we will be first. Ideally, each LU on the
	 * target would get some limited time or requests on the target.
	 */
360
	scsi_kick_queue(current_sdev->request_queue);
L
Linus Torvalds 已提交
361 362 363 364 365 366 367 368 369 370 371 372

	spin_lock_irqsave(shost->host_lock, flags);
	if (starget->starget_sdev_user)
		goto out;
	list_for_each_entry_safe(sdev, tmp, &starget->devices,
			same_target_siblings) {
		if (sdev == current_sdev)
			continue;
		if (scsi_device_get(sdev))
			continue;

		spin_unlock_irqrestore(shost->host_lock, flags);
373
		scsi_kick_queue(sdev->request_queue);
L
Linus Torvalds 已提交
374 375 376 377 378 379 380 381
		spin_lock_irqsave(shost->host_lock, flags);
	
		scsi_device_put(sdev);
	}
 out:
	spin_unlock_irqrestore(shost->host_lock, flags);
}

382
static inline bool scsi_device_is_busy(struct scsi_device *sdev)
383
{
384 385 386 387 388
	if (atomic_read(&sdev->device_busy) >= sdev->queue_depth)
		return true;
	if (atomic_read(&sdev->device_blocked) > 0)
		return true;
	return false;
389 390
}

391
static inline bool scsi_target_is_busy(struct scsi_target *starget)
392
{
393 394 395 396 397 398
	if (starget->can_queue > 0) {
		if (atomic_read(&starget->target_busy) >= starget->can_queue)
			return true;
		if (atomic_read(&starget->target_blocked) > 0)
			return true;
	}
399
	return false;
400 401
}

402
static inline bool scsi_host_is_busy(struct Scsi_Host *shost)
403
{
404 405 406 407 408 409 410 411
	if (shost->can_queue > 0 &&
	    atomic_read(&shost->host_busy) >= shost->can_queue)
		return true;
	if (atomic_read(&shost->host_blocked) > 0)
		return true;
	if (shost->host_self_blocked)
		return true;
	return false;
412 413
}

414
static void scsi_starved_list_run(struct Scsi_Host *shost)
L
Linus Torvalds 已提交
415
{
416
	LIST_HEAD(starved_list);
417
	struct scsi_device *sdev;
L
Linus Torvalds 已提交
418 419 420
	unsigned long flags;

	spin_lock_irqsave(shost->host_lock, flags);
421 422 423
	list_splice_init(&shost->starved_list, &starved_list);

	while (!list_empty(&starved_list)) {
424 425
		struct request_queue *slq;

L
Linus Torvalds 已提交
426 427 428 429 430 431 432 433 434 435
		/*
		 * As long as shost is accepting commands and we have
		 * starved queues, call blk_run_queue. scsi_request_fn
		 * drops the queue_lock and can add us back to the
		 * starved_list.
		 *
		 * host_lock protects the starved_list and starved_entry.
		 * scsi_request_fn must get the host_lock before checking
		 * or modifying starved_list or starved_entry.
		 */
436
		if (scsi_host_is_busy(shost))
437 438
			break;

439 440 441
		sdev = list_entry(starved_list.next,
				  struct scsi_device, starved_entry);
		list_del_init(&sdev->starved_entry);
442 443 444 445 446 447
		if (scsi_target_is_busy(scsi_target(sdev))) {
			list_move_tail(&sdev->starved_entry,
				       &shost->starved_list);
			continue;
		}

448 449 450 451 452 453 454 455 456 457 458 459 460 461 462
		/*
		 * Once we drop the host lock, a racing scsi_remove_device()
		 * call may remove the sdev from the starved list and destroy
		 * it and the queue.  Mitigate by taking a reference to the
		 * queue and never touching the sdev again after we drop the
		 * host lock.  Note: if __scsi_remove_device() invokes
		 * blk_cleanup_queue() before the queue is run from this
		 * function then blk_run_queue() will return immediately since
		 * blk_cleanup_queue() marks the queue with QUEUE_FLAG_DYING.
		 */
		slq = sdev->request_queue;
		if (!blk_get_queue(slq))
			continue;
		spin_unlock_irqrestore(shost->host_lock, flags);

463
		scsi_kick_queue(slq);
464 465 466
		blk_put_queue(slq);

		spin_lock_irqsave(shost->host_lock, flags);
L
Linus Torvalds 已提交
467
	}
468 469
	/* put any unprocessed entries back */
	list_splice(&starved_list, &shost->starved_list);
L
Linus Torvalds 已提交
470
	spin_unlock_irqrestore(shost->host_lock, flags);
471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492
}

/*
 * Function:   scsi_run_queue()
 *
 * Purpose:    Select a proper request queue to serve next
 *
 * Arguments:  q       - last request's queue
 *
 * Returns:     Nothing
 *
 * Notes:      The previous command was completely finished, start
 *             a new one if possible.
 */
static void scsi_run_queue(struct request_queue *q)
{
	struct scsi_device *sdev = q->queuedata;

	if (scsi_target(sdev)->single_lun)
		scsi_single_lun_run(sdev);
	if (!list_empty(&sdev->host->starved_list))
		scsi_starved_list_run(sdev->host);
L
Linus Torvalds 已提交
493

494 495 496 497
	if (q->mq_ops)
		blk_mq_start_stopped_hw_queues(q, false);
	else
		blk_run_queue(q);
L
Linus Torvalds 已提交
498 499
}

500 501 502 503 504 505 506 507 508 509
void scsi_requeue_run_queue(struct work_struct *work)
{
	struct scsi_device *sdev;
	struct request_queue *q;

	sdev = container_of(work, struct scsi_device, requeue_work);
	q = sdev->request_queue;
	scsi_run_queue(q);
}

L
Linus Torvalds 已提交
510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525
/*
 * Function:	scsi_requeue_command()
 *
 * Purpose:	Handle post-processing of completed commands.
 *
 * Arguments:	q	- queue to operate on
 *		cmd	- command that may need to be requeued.
 *
 * Returns:	Nothing
 *
 * Notes:	After command completion, there may be blocks left
 *		over which weren't finished by the previous command
 *		this can be for a number of reasons - the main one is
 *		I/O errors in the middle of the request, in which case
 *		we need to request the blocks that come after the bad
 *		sector.
526
 * Notes:	Upon return, cmd is a stale pointer.
L
Linus Torvalds 已提交
527 528 529
 */
static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
{
530
	struct scsi_device *sdev = cmd->device;
531
	struct request *req = cmd->request;
532 533 534
	unsigned long flags;

	spin_lock_irqsave(q->queue_lock, flags);
535 536 537
	blk_unprep_request(req);
	req->special = NULL;
	scsi_put_command(cmd);
538
	blk_requeue_request(q, req);
539
	spin_unlock_irqrestore(q->queue_lock, flags);
L
Linus Torvalds 已提交
540 541

	scsi_run_queue(q);
542 543

	put_device(&sdev->sdev_gendev);
L
Linus Torvalds 已提交
544 545 546 547 548 549 550 551 552 553
}

void scsi_run_host_queues(struct Scsi_Host *shost)
{
	struct scsi_device *sdev;

	shost_for_each_device(sdev, shost)
		scsi_run_queue(sdev->request_queue);
}

554 555 556 557
static inline unsigned int scsi_sgtable_index(unsigned short nents)
{
	unsigned int index;

J
James Bottomley 已提交
558 559 560
	BUG_ON(nents > SCSI_MAX_SG_SEGMENTS);

	if (nents <= 8)
561
		index = 0;
J
James Bottomley 已提交
562 563
	else
		index = get_count_order(nents) - 3;
L
Linus Torvalds 已提交
564

565 566 567
	return index;
}

568
static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents)
569 570 571
{
	struct scsi_host_sg_pool *sgp;

572 573 574
	sgp = scsi_sg_pools + scsi_sgtable_index(nents);
	mempool_free(sgl, sgp->pool);
}
575

576 577 578
static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
{
	struct scsi_host_sg_pool *sgp;
579

580 581 582
	sgp = scsi_sg_pools + scsi_sgtable_index(nents);
	return mempool_alloc(sgp->pool, gfp_mask);
}
583

584
static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq)
585
{
586 587 588
	if (mq && sdb->table.nents <= SCSI_MAX_SG_SEGMENTS)
		return;
	__sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, mq, scsi_sg_free);
589 590
}

591
static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq)
592
{
593
	struct scatterlist *first_chunk = NULL;
594
	int ret;
595

B
Boaz Harrosh 已提交
596
	BUG_ON(!nents);
597

598 599 600 601 602 603 604 605 606
	if (mq) {
		if (nents <= SCSI_MAX_SG_SEGMENTS) {
			sdb->table.nents = nents;
			sg_init_table(sdb->table.sgl, sdb->table.nents);
			return 0;
		}
		first_chunk = sdb->table.sgl;
	}

B
Boaz Harrosh 已提交
607
	ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS,
C
Christoph Hellwig 已提交
608
			       first_chunk, GFP_ATOMIC, scsi_sg_alloc);
609
	if (unlikely(ret))
610
		scsi_free_sgtable(sdb, mq);
611
	return ret;
L
Linus Torvalds 已提交
612 613
}

614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636
static void scsi_uninit_cmd(struct scsi_cmnd *cmd)
{
	if (cmd->request->cmd_type == REQ_TYPE_FS) {
		struct scsi_driver *drv = scsi_cmd_to_driver(cmd);

		if (drv->uninit_command)
			drv->uninit_command(cmd);
	}
}

static void scsi_mq_free_sgtables(struct scsi_cmnd *cmd)
{
	if (cmd->sdb.table.nents)
		scsi_free_sgtable(&cmd->sdb, true);
	if (cmd->request->next_rq && cmd->request->next_rq->special)
		scsi_free_sgtable(cmd->request->next_rq->special, true);
	if (scsi_prot_sg_count(cmd))
		scsi_free_sgtable(cmd->prot_sdb, true);
}

static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
{
	struct scsi_device *sdev = cmd->device;
637
	struct Scsi_Host *shost = sdev->host;
638 639 640 641 642
	unsigned long flags;

	scsi_mq_free_sgtables(cmd);
	scsi_uninit_cmd(cmd);

643 644 645 646 647 648
	if (shost->use_cmd_list) {
		BUG_ON(list_empty(&cmd->list));
		spin_lock_irqsave(&sdev->list_lock, flags);
		list_del_init(&cmd->list);
		spin_unlock_irqrestore(&sdev->list_lock, flags);
	}
649 650
}

L
Linus Torvalds 已提交
651 652 653
/*
 * Function:    scsi_release_buffers()
 *
654
 * Purpose:     Free resources allocate for a scsi_command.
L
Linus Torvalds 已提交
655 656 657 658 659 660 661 662 663 664
 *
 * Arguments:   cmd	- command that we are bailing.
 *
 * Lock status: Assumed that no lock is held upon entry.
 *
 * Returns:     Nothing
 *
 * Notes:       In the event that an upper level driver rejects a
 *		command, we must release resources allocated during
 *		the __init_io() function.  Primarily this would involve
665
 *		the scatter-gather table.
L
Linus Torvalds 已提交
666
 */
667
static void scsi_release_buffers(struct scsi_cmnd *cmd)
L
Linus Torvalds 已提交
668
{
669
	if (cmd->sdb.table.nents)
670
		scsi_free_sgtable(&cmd->sdb, false);
671 672 673 674

	memset(&cmd->sdb, 0, sizeof(cmd->sdb));

	if (scsi_prot_sg_count(cmd))
675
		scsi_free_sgtable(cmd->prot_sdb, false);
L
Linus Torvalds 已提交
676 677
}

678 679 680 681
static void scsi_release_bidi_buffers(struct scsi_cmnd *cmd)
{
	struct scsi_data_buffer *bidi_sdb = cmd->request->next_rq->special;

682
	scsi_free_sgtable(bidi_sdb, false);
683 684 685 686
	kmem_cache_free(scsi_sdb_cache, bidi_sdb);
	cmd->request->next_rq->special = NULL;
}

687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704
static bool scsi_end_request(struct request *req, int error,
		unsigned int bytes, unsigned int bidi_bytes)
{
	struct scsi_cmnd *cmd = req->special;
	struct scsi_device *sdev = cmd->device;
	struct request_queue *q = sdev->request_queue;

	if (blk_update_request(req, error, bytes))
		return true;

	/* Bidi request must be completed as a whole */
	if (unlikely(bidi_bytes) &&
	    blk_update_request(req->next_rq, error, bidi_bytes))
		return true;

	if (blk_queue_add_random(q))
		add_disk_randomness(req->rq_disk);

705 706
	if (req->mq_ctx) {
		/*
707
		 * In the MQ case the command gets freed by __blk_mq_end_request,
708 709 710 711 712 713 714
		 * so we have to do all cleanup that depends on it earlier.
		 *
		 * We also can't kick the queues from irq context, so we
		 * will have to defer it to a workqueue.
		 */
		scsi_mq_uninit_cmd(cmd);

715
		__blk_mq_end_request(req, error);
716 717 718 719 720 721 722 723 724

		if (scsi_target(sdev)->single_lun ||
		    !list_empty(&sdev->host->starved_list))
			kblockd_schedule_work(&sdev->requeue_work);
		else
			blk_mq_start_stopped_hw_queues(q, true);
	} else {
		unsigned long flags;

725 726 727
		if (bidi_bytes)
			scsi_release_bidi_buffers(cmd);

728 729 730 731 732
		spin_lock_irqsave(q->queue_lock, flags);
		blk_finish_request(req, error);
		spin_unlock_irqrestore(q->queue_lock, flags);

		scsi_release_buffers(cmd);
C
Christoph Hellwig 已提交
733 734 735

		scsi_put_command(cmd);
		scsi_run_queue(q);
736
	}
737

C
Christoph Hellwig 已提交
738
	put_device(&sdev->sdev_gendev);
739 740 741
	return false;
}

742 743 744 745 746 747 748 749 750 751
/**
 * __scsi_error_from_host_byte - translate SCSI error code into errno
 * @cmd:	SCSI command (unused)
 * @result:	scsi error code
 *
 * Translate SCSI error code into standard UNIX errno.
 * Return values:
 * -ENOLINK	temporary transport failure
 * -EREMOTEIO	permanent target failure, do not retry
 * -EBADE	permanent nexus failure, retry on other path
752
 * -ENOSPC	No write space available
753
 * -ENODATA	Medium error
754 755
 * -EIO		unspecified I/O error
 */
756 757 758 759 760 761 762 763 764
static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
{
	int error = 0;

	switch(host_byte(result)) {
	case DID_TRANSPORT_FAILFAST:
		error = -ENOLINK;
		break;
	case DID_TARGET_FAILURE:
765
		set_host_byte(cmd, DID_OK);
766 767 768
		error = -EREMOTEIO;
		break;
	case DID_NEXUS_FAILURE:
769
		set_host_byte(cmd, DID_OK);
770 771
		error = -EBADE;
		break;
772 773 774 775
	case DID_ALLOC_FAILURE:
		set_host_byte(cmd, DID_OK);
		error = -ENOSPC;
		break;
776 777 778 779
	case DID_MEDIUM_ERROR:
		set_host_byte(cmd, DID_OK);
		error = -ENODATA;
		break;
780 781 782 783 784 785 786 787
	default:
		error = -EIO;
		break;
	}

	return error;
}

L
Linus Torvalds 已提交
788 789 790 791 792 793 794 795 796 797 798
/*
 * Function:    scsi_io_completion()
 *
 * Purpose:     Completion processing for block device I/O requests.
 *
 * Arguments:   cmd   - command that is finished.
 *
 * Lock status: Assumed that no lock is held upon entry.
 *
 * Returns:     Nothing
 *
799 800 801
 * Notes:       We will finish off the specified number of sectors.  If we
 *		are done, the command block will be released and the queue
 *		function will be goosed.  If we are not done then we have to
A
Alan Stern 已提交
802
 *		figure out what to do next:
L
Linus Torvalds 已提交
803
 *
A
Alan Stern 已提交
804 805 806 807 808
 *		a) We can call scsi_requeue_command().  The request
 *		   will be unprepared and put back on the queue.  Then
 *		   a new command will be created for it.  This should
 *		   be used if we made forward progress, or if we want
 *		   to switch from READ(10) to READ(6) for example.
L
Linus Torvalds 已提交
809
 *
810
 *		b) We can call __scsi_queue_insert().  The request will
A
Alan Stern 已提交
811 812 813
 *		   be put back on the queue and retried using the same
 *		   command as before, possibly after a delay.
 *
814
 *		c) We can call scsi_end_request() with -EIO to fail
A
Alan Stern 已提交
815
 *		   the remainder of the request.
L
Linus Torvalds 已提交
816
 */
817
void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
L
Linus Torvalds 已提交
818 819
{
	int result = cmd->result;
820
	struct request_queue *q = cmd->device->request_queue;
L
Linus Torvalds 已提交
821
	struct request *req = cmd->request;
822
	int error = 0;
L
Linus Torvalds 已提交
823
	struct scsi_sense_hdr sshdr;
824
	bool sense_valid = false;
825
	int sense_deferred = 0, level = 0;
A
Alan Stern 已提交
826 827
	enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
	      ACTION_DELAYED_RETRY} action;
828
	unsigned long wait_for = (cmd->allowed + 1) * req->timeout;
L
Linus Torvalds 已提交
829 830 831 832 833 834

	if (result) {
		sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
		if (sense_valid)
			sense_deferred = scsi_sense_is_deferred(&sshdr);
	}
835

836
	if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */
L
Linus Torvalds 已提交
837 838 839 840 841 842 843 844 845 846 847 848
		if (result) {
			if (sense_valid && req->sense) {
				/*
				 * SG_IO wants current and deferred errors
				 */
				int len = 8 + cmd->sense_buffer[7];

				if (len > SCSI_SENSE_BUFFERSIZE)
					len = SCSI_SENSE_BUFFERSIZE;
				memcpy(req->sense, cmd->sense_buffer,  len);
				req->sense_len = len;
			}
849
			if (!sense_deferred)
850
				error = __scsi_error_from_host_byte(cmd, result);
851
		}
852 853 854 855
		/*
		 * __scsi_error_from_host_byte may have reset the host_byte
		 */
		req->errors = cmd->result;
856 857 858

		req->resid_len = scsi_get_resid(cmd);

859
		if (scsi_bidi_cmnd(cmd)) {
860 861 862 863 864
			/*
			 * Bidi commands Must be complete as a whole,
			 * both sides at once.
			 */
			req->next_rq->resid_len = scsi_in(cmd)->resid;
865 866 867
			if (scsi_end_request(req, 0, blk_rq_bytes(req),
					blk_rq_bytes(req->next_rq)))
				BUG();
868 869
			return;
		}
870 871 872 873 874 875 876 877
	} else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) {
		/*
		 * Certain non BLOCK_PC requests are commands that don't
		 * actually transfer anything (FLUSH), so cannot use
		 * good_bytes != blk_rq_bytes(req) as the signal for an error.
		 * This sets the error explicitly for the problem case.
		 */
		error = __scsi_error_from_host_byte(cmd, result);
L
Linus Torvalds 已提交
878 879
	}

880 881
	/* no bidi support for !REQ_TYPE_BLOCK_PC yet */
	BUG_ON(blk_bidi_rq(req));
B
Boaz Harrosh 已提交
882

L
Linus Torvalds 已提交
883 884 885 886
	/*
	 * Next deal with any sectors which we were able to correctly
	 * handle.
	 */
887 888 889
	SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, cmd,
		"%u sectors total, %d bytes done.\n",
		blk_rq_sectors(req), good_bytes));
890

891 892 893 894 895 896
	/*
	 * Recovered errors need reporting, but they're always treated
	 * as success, so fiddle the result code here.  For BLOCK_PC
	 * we already took a copy of the original into rq->errors which
	 * is what gets returned to the user
	 */
897 898 899 900 901 902 903 904
	if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
		/* if ATA PASS-THROUGH INFORMATION AVAILABLE skip
		 * print since caller wants ATA registers. Only occurs on
		 * SCSI ATA PASS_THROUGH commands when CK_COND=1
		 */
		if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
			;
		else if (!(req->cmd_flags & REQ_QUIET))
905
			scsi_print_sense(cmd);
906 907 908 909 910 911
		result = 0;
		/* BLOCK_PC may have set error */
		error = 0;
	}

	/*
912
	 * If we finished all bytes in the request we are done now.
913
	 */
914 915
	if (!scsi_end_request(req, error, good_bytes, 0))
		return;
916 917 918 919 920

	/*
	 * Kill remainder if no retrys.
	 */
	if (error && scsi_noretry_cmd(cmd)) {
921 922 923
		if (scsi_end_request(req, error, blk_rq_bytes(req), 0))
			BUG();
		return;
924 925 926 927 928
	}

	/*
	 * If there had been no error, but we have leftover bytes in the
	 * requeues just queue the command up again.
929
	 */
930 931
	if (result == 0)
		goto requeue;
932

933
	error = __scsi_error_from_host_byte(cmd, result);
934

A
Alan Stern 已提交
935 936 937 938 939 940 941
	if (host_byte(result) == DID_RESET) {
		/* Third party bus reset or reset for error recovery
		 * reasons.  Just retry the command and see what
		 * happens.
		 */
		action = ACTION_RETRY;
	} else if (sense_valid && !sense_deferred) {
L
Linus Torvalds 已提交
942 943 944
		switch (sshdr.sense_key) {
		case UNIT_ATTENTION:
			if (cmd->device->removable) {
945
				/* Detected disc change.  Set a bit
L
Linus Torvalds 已提交
946 947 948
				 * and quietly refuse further access.
				 */
				cmd->device->changed = 1;
A
Alan Stern 已提交
949
				action = ACTION_FAIL;
L
Linus Torvalds 已提交
950
			} else {
951 952 953
				/* Must have been a power glitch, or a
				 * bus reset.  Could not have been a
				 * media change, so we just retry the
A
Alan Stern 已提交
954
				 * command and see what happens.
955
				 */
A
Alan Stern 已提交
956
				action = ACTION_RETRY;
L
Linus Torvalds 已提交
957 958 959
			}
			break;
		case ILLEGAL_REQUEST:
960 961 962 963 964 965 966 967
			/* If we had an ILLEGAL REQUEST returned, then
			 * we may have performed an unsupported
			 * command.  The only thing this should be
			 * would be a ten byte read where only a six
			 * byte read was supported.  Also, on a system
			 * where READ CAPACITY failed, we may have
			 * read past the end of the disk.
			 */
968 969
			if ((cmd->device->use_10_for_rw &&
			    sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
L
Linus Torvalds 已提交
970 971
			    (cmd->cmnd[0] == READ_10 ||
			     cmd->cmnd[0] == WRITE_10)) {
A
Alan Stern 已提交
972
				/* This will issue a new 6-byte command. */
L
Linus Torvalds 已提交
973
				cmd->device->use_10_for_rw = 0;
A
Alan Stern 已提交
974
				action = ACTION_REPREP;
975 976 977
			} else if (sshdr.asc == 0x10) /* DIX */ {
				action = ACTION_FAIL;
				error = -EILSEQ;
978
			/* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
979
			} else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) {
980
				action = ACTION_FAIL;
981
				error = -EREMOTEIO;
A
Alan Stern 已提交
982 983 984
			} else
				action = ACTION_FAIL;
			break;
985
		case ABORTED_COMMAND:
986
			action = ACTION_FAIL;
987
			if (sshdr.asc == 0x10) /* DIF */
988
				error = -EILSEQ;
L
Linus Torvalds 已提交
989 990
			break;
		case NOT_READY:
991
			/* If the device is in the process of becoming
J
James Bottomley 已提交
992
			 * ready, or has a temporary blockage, retry.
L
Linus Torvalds 已提交
993
			 */
J
James Bottomley 已提交
994 995 996 997 998 999 1000 1001 1002
			if (sshdr.asc == 0x04) {
				switch (sshdr.ascq) {
				case 0x01: /* becoming ready */
				case 0x04: /* format in progress */
				case 0x05: /* rebuild in progress */
				case 0x06: /* recalculation in progress */
				case 0x07: /* operation in progress */
				case 0x08: /* Long write in progress */
				case 0x09: /* self test in progress */
1003
				case 0x14: /* space allocation in progress */
A
Alan Stern 已提交
1004
					action = ACTION_DELAYED_RETRY;
J
James Bottomley 已提交
1005
					break;
1006 1007 1008
				default:
					action = ACTION_FAIL;
					break;
J
James Bottomley 已提交
1009
				}
1010
			} else
A
Alan Stern 已提交
1011 1012
				action = ACTION_FAIL;
			break;
L
Linus Torvalds 已提交
1013
		case VOLUME_OVERFLOW:
1014
			/* See SSC3rXX or current. */
A
Alan Stern 已提交
1015 1016
			action = ACTION_FAIL;
			break;
L
Linus Torvalds 已提交
1017
		default:
A
Alan Stern 已提交
1018
			action = ACTION_FAIL;
L
Linus Torvalds 已提交
1019 1020
			break;
		}
1021
	} else
A
Alan Stern 已提交
1022 1023
		action = ACTION_FAIL;

1024
	if (action != ACTION_FAIL &&
1025
	    time_before(cmd->jiffies_at_alloc + wait_for, jiffies))
1026 1027
		action = ACTION_FAIL;

A
Alan Stern 已提交
1028 1029 1030
	switch (action) {
	case ACTION_FAIL:
		/* Give up and fail the remainder of the request */
1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049
		if (!(req->cmd_flags & REQ_QUIET)) {
			static DEFINE_RATELIMIT_STATE(_rs,
					DEFAULT_RATELIMIT_INTERVAL,
					DEFAULT_RATELIMIT_BURST);

			if (unlikely(scsi_logging_level))
				level = SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT,
						       SCSI_LOG_MLCOMPLETE_BITS);

			/*
			 * if logging is enabled the failure will be printed
			 * in scsi_log_completion(), so avoid duplicate messages
			 */
			if (!level && __ratelimit(&_rs)) {
				scsi_print_result(cmd, NULL, FAILED);
				if (driver_byte(result) & DRIVER_SENSE)
					scsi_print_sense(cmd);
				scsi_print_command(cmd);
			}
1050
		}
1051 1052
		if (!scsi_end_request(req, error, blk_rq_err_bytes(req), 0))
			return;
1053
		/*FALLTHRU*/
A
Alan Stern 已提交
1054
	case ACTION_REPREP:
1055
	requeue:
A
Alan Stern 已提交
1056 1057 1058
		/* Unprep the request and put it back at the head of the queue.
		 * A new command will be prepared and issued.
		 */
1059 1060 1061 1062 1063 1064 1065 1066
		if (q->mq_ops) {
			cmd->request->cmd_flags &= ~REQ_DONTPREP;
			scsi_mq_uninit_cmd(cmd);
			scsi_mq_requeue_cmd(cmd);
		} else {
			scsi_release_buffers(cmd);
			scsi_requeue_command(q, cmd);
		}
A
Alan Stern 已提交
1067 1068 1069
		break;
	case ACTION_RETRY:
		/* Retry the same command immediately */
1070
		__scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0);
A
Alan Stern 已提交
1071 1072 1073
		break;
	case ACTION_DELAYED_RETRY:
		/* Retry the same command after a delay */
1074
		__scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
A
Alan Stern 已提交
1075
		break;
L
Linus Torvalds 已提交
1076 1077 1078
	}
}

1079
static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb)
L
Linus Torvalds 已提交
1080
{
1081
	int count;
L
Linus Torvalds 已提交
1082 1083

	/*
C
Christoph Hellwig 已提交
1084
	 * If sg table allocation fails, requeue request later.
L
Linus Torvalds 已提交
1085
	 */
B
Boaz Harrosh 已提交
1086
	if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments,
1087
					req->mq_ctx != NULL)))
L
Linus Torvalds 已提交
1088 1089 1090 1091 1092 1093
		return BLKPREP_DEFER;

	/* 
	 * Next, walk the list, and fill in the addresses and sizes of
	 * each segment.
	 */
B
Boaz Harrosh 已提交
1094 1095 1096
	count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
	BUG_ON(count > sdb->table.nents);
	sdb->table.nents = count;
1097
	sdb->length = blk_rq_bytes(req);
1098
	return BLKPREP_OK;
L
Linus Torvalds 已提交
1099
}
1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111

/*
 * Function:    scsi_init_io()
 *
 * Purpose:     SCSI I/O initialize function.
 *
 * Arguments:   cmd   - Command descriptor we wish to initialize
 *
 * Returns:     0 on success
 *		BLKPREP_DEFER if the failure is retryable
 *		BLKPREP_KILL if the failure is fatal
 */
1112
int scsi_init_io(struct scsi_cmnd *cmd)
1113
{
1114
	struct scsi_device *sdev = cmd->device;
1115
	struct request *rq = cmd->request;
1116
	bool is_mq = (rq->mq_ctx != NULL);
1117
	int error;
1118

1119 1120
	BUG_ON(!rq->nr_phys_segments);

1121
	error = scsi_init_sgtable(rq, &cmd->sdb);
1122 1123 1124
	if (error)
		goto err_exit;

1125
	if (blk_bidi_rq(rq)) {
1126 1127 1128 1129 1130 1131 1132 1133 1134
		if (!rq->q->mq_ops) {
			struct scsi_data_buffer *bidi_sdb =
				kmem_cache_zalloc(scsi_sdb_cache, GFP_ATOMIC);
			if (!bidi_sdb) {
				error = BLKPREP_DEFER;
				goto err_exit;
			}

			rq->next_rq->special = bidi_sdb;
1135 1136
		}

1137
		error = scsi_init_sgtable(rq->next_rq, rq->next_rq->special);
1138 1139 1140 1141
		if (error)
			goto err_exit;
	}

1142
	if (blk_integrity_rq(rq)) {
1143 1144 1145
		struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
		int ivecs, count;

1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156
		if (prot_sdb == NULL) {
			/*
			 * This can happen if someone (e.g. multipath)
			 * queues a command to a device on an adapter
			 * that does not support DIX.
			 */
			WARN_ON_ONCE(1);
			error = BLKPREP_KILL;
			goto err_exit;
		}

1157
		ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
1158

1159
		if (scsi_alloc_sgtable(prot_sdb, ivecs, is_mq)) {
1160 1161 1162 1163
			error = BLKPREP_DEFER;
			goto err_exit;
		}

1164
		count = blk_rq_map_integrity_sg(rq->q, rq->bio,
1165 1166
						prot_sdb->table.sgl);
		BUG_ON(unlikely(count > ivecs));
1167
		BUG_ON(unlikely(count > queue_max_integrity_segments(rq->q)));
1168 1169 1170 1171 1172

		cmd->prot_sdb = prot_sdb;
		cmd->prot_sdb->table.nents = count;
	}

1173
	return BLKPREP_OK;
1174
err_exit:
1175 1176 1177 1178 1179 1180 1181 1182
	if (is_mq) {
		scsi_mq_free_sgtables(cmd);
	} else {
		scsi_release_buffers(cmd);
		cmd->request->special = NULL;
		scsi_put_command(cmd);
		put_device(&sdev->sdev_gendev);
	}
1183 1184
	return error;
}
1185
EXPORT_SYMBOL(scsi_init_io);
L
Linus Torvalds 已提交
1186

C
Christoph Hellwig 已提交
1187 1188 1189 1190 1191 1192
static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
		struct request *req)
{
	struct scsi_cmnd *cmd;

	if (!req->special) {
1193 1194 1195 1196
		/* Bail if we can't get a reference to the device */
		if (!get_device(&sdev->sdev_gendev))
			return NULL;

C
Christoph Hellwig 已提交
1197
		cmd = scsi_get_command(sdev, GFP_ATOMIC);
1198 1199
		if (unlikely(!cmd)) {
			put_device(&sdev->sdev_gendev);
C
Christoph Hellwig 已提交
1200
			return NULL;
1201
		}
C
Christoph Hellwig 已提交
1202 1203 1204 1205 1206 1207 1208 1209 1210
		req->special = cmd;
	} else {
		cmd = req->special;
	}

	/* pull a tag out of the request if we have one */
	cmd->tag = req->tag;
	cmd->request = req;

1211
	cmd->cmnd = req->cmd;
1212
	cmd->prot_op = SCSI_PROT_NORMAL;
1213

C
Christoph Hellwig 已提交
1214 1215 1216
	return cmd;
}

1217
static int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
J
James Bottomley 已提交
1218
{
1219
	struct scsi_cmnd *cmd = req->special;
C
Christoph Hellwig 已提交
1220 1221 1222 1223 1224 1225 1226 1227

	/*
	 * BLOCK_PC requests may transfer data, in which case they must
	 * a bio attached to them.  Or they might contain a SCSI command
	 * that does not transfer data, in which case they may optionally
	 * submit a request without an attached bio.
	 */
	if (req->bio) {
1228
		int ret = scsi_init_io(cmd);
C
Christoph Hellwig 已提交
1229 1230 1231
		if (unlikely(ret))
			return ret;
	} else {
T
Tejun Heo 已提交
1232
		BUG_ON(blk_rq_bytes(req));
C
Christoph Hellwig 已提交
1233

B
Boaz Harrosh 已提交
1234
		memset(&cmd->sdb, 0, sizeof(cmd->sdb));
C
Christoph Hellwig 已提交
1235
	}
J
James Bottomley 已提交
1236 1237

	cmd->cmd_len = req->cmd_len;
T
Tejun Heo 已提交
1238
	cmd->transfersize = blk_rq_bytes(req);
J
James Bottomley 已提交
1239
	cmd->allowed = req->retries;
C
Christoph Hellwig 已提交
1240
	return BLKPREP_OK;
J
James Bottomley 已提交
1241 1242
}

C
Christoph Hellwig 已提交
1243
/*
1244 1245
 * Setup a REQ_TYPE_FS command.  These are simple request from filesystems
 * that still need to be translated to SCSI CDBs from the ULD.
C
Christoph Hellwig 已提交
1246
 */
1247
static int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
L
Linus Torvalds 已提交
1248
{
1249
	struct scsi_cmnd *cmd = req->special;
1250 1251 1252

	if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh
			 && sdev->scsi_dh_data->scsi_dh->prep_fn)) {
1253
		int ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req);
1254 1255 1256 1257
		if (ret != BLKPREP_OK)
			return ret;
	}

1258
	memset(cmd->cmnd, 0, BLK_MAX_CDB);
1259
	return scsi_cmd_to_driver(cmd)->init_command(cmd);
C
Christoph Hellwig 已提交
1260 1261
}

1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282
static int scsi_setup_cmnd(struct scsi_device *sdev, struct request *req)
{
	struct scsi_cmnd *cmd = req->special;

	if (!blk_rq_bytes(req))
		cmd->sc_data_direction = DMA_NONE;
	else if (rq_data_dir(req) == WRITE)
		cmd->sc_data_direction = DMA_TO_DEVICE;
	else
		cmd->sc_data_direction = DMA_FROM_DEVICE;

	switch (req->cmd_type) {
	case REQ_TYPE_FS:
		return scsi_setup_fs_cmnd(sdev, req);
	case REQ_TYPE_BLOCK_PC:
		return scsi_setup_blk_pc_cmnd(sdev, req);
	default:
		return BLKPREP_KILL;
	}
}

1283 1284
static int
scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
C
Christoph Hellwig 已提交
1285 1286 1287
{
	int ret = BLKPREP_OK;

L
Linus Torvalds 已提交
1288
	/*
C
Christoph Hellwig 已提交
1289 1290
	 * If the device is not in running state we will reject some
	 * or all commands.
L
Linus Torvalds 已提交
1291
	 */
C
Christoph Hellwig 已提交
1292 1293 1294
	if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
		switch (sdev->sdev_state) {
		case SDEV_OFFLINE:
1295
		case SDEV_TRANSPORT_OFFLINE:
C
Christoph Hellwig 已提交
1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309
			/*
			 * If the device is offline we refuse to process any
			 * commands.  The device must be brought online
			 * before trying any recovery commands.
			 */
			sdev_printk(KERN_ERR, sdev,
				    "rejecting I/O to offline device\n");
			ret = BLKPREP_KILL;
			break;
		case SDEV_DEL:
			/*
			 * If the device is fully deleted, we refuse to
			 * process any commands as well.
			 */
1310
			sdev_printk(KERN_ERR, sdev,
C
Christoph Hellwig 已提交
1311 1312 1313 1314 1315
				    "rejecting I/O to dead device\n");
			ret = BLKPREP_KILL;
			break;
		case SDEV_QUIESCE:
		case SDEV_BLOCK:
1316
		case SDEV_CREATED_BLOCK:
C
Christoph Hellwig 已提交
1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331
			/*
			 * If the devices is blocked we defer normal commands.
			 */
			if (!(req->cmd_flags & REQ_PREEMPT))
				ret = BLKPREP_DEFER;
			break;
		default:
			/*
			 * For any other not fully online state we only allow
			 * special commands.  In particular any user initiated
			 * command is not allowed.
			 */
			if (!(req->cmd_flags & REQ_PREEMPT))
				ret = BLKPREP_KILL;
			break;
L
Linus Torvalds 已提交
1332 1333
		}
	}
1334 1335
	return ret;
}
L
Linus Torvalds 已提交
1336

1337 1338
static int
scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1339 1340
{
	struct scsi_device *sdev = q->queuedata;
L
Linus Torvalds 已提交
1341

C
Christoph Hellwig 已提交
1342 1343 1344
	switch (ret) {
	case BLKPREP_KILL:
		req->errors = DID_NO_CONNECT << 16;
1345 1346 1347 1348 1349
		/* release the command and kill it */
		if (req->special) {
			struct scsi_cmnd *cmd = req->special;
			scsi_release_buffers(cmd);
			scsi_put_command(cmd);
1350
			put_device(&sdev->sdev_gendev);
1351 1352
			req->special = NULL;
		}
C
Christoph Hellwig 已提交
1353 1354
		break;
	case BLKPREP_DEFER:
L
Linus Torvalds 已提交
1355
		/*
1356
		 * If we defer, the blk_peek_request() returns NULL, but the
J
Jens Axboe 已提交
1357 1358
		 * queue must be restarted, so we schedule a callback to happen
		 * shortly.
L
Linus Torvalds 已提交
1359
		 */
1360
		if (atomic_read(&sdev->device_busy) == 0)
J
Jens Axboe 已提交
1361
			blk_delay_queue(q, SCSI_QUEUE_DELAY);
C
Christoph Hellwig 已提交
1362 1363 1364
		break;
	default:
		req->cmd_flags |= REQ_DONTPREP;
L
Linus Torvalds 已提交
1365 1366
	}

C
Christoph Hellwig 已提交
1367
	return ret;
L
Linus Torvalds 已提交
1368
}
1369

1370
static int scsi_prep_fn(struct request_queue *q, struct request *req)
1371 1372
{
	struct scsi_device *sdev = q->queuedata;
1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384
	struct scsi_cmnd *cmd;
	int ret;

	ret = scsi_prep_state_check(sdev, req);
	if (ret != BLKPREP_OK)
		goto out;

	cmd = scsi_get_cmd_from_req(sdev, req);
	if (unlikely(!cmd)) {
		ret = BLKPREP_DEFER;
		goto out;
	}
1385

1386
	ret = scsi_setup_cmnd(sdev, req);
1387
out:
1388 1389
	return scsi_prep_return(q, req, ret);
}
1390 1391 1392

static void scsi_unprep_fn(struct request_queue *q, struct request *req)
{
1393
	scsi_uninit_cmd(req->special);
1394
}
L
Linus Torvalds 已提交
1395 1396 1397 1398 1399 1400 1401 1402 1403 1404

/*
 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
 * return 0.
 *
 * Called with the queue_lock held.
 */
static inline int scsi_dev_queue_ready(struct request_queue *q,
				  struct scsi_device *sdev)
{
1405 1406 1407
	unsigned int busy;

	busy = atomic_inc_return(&sdev->device_busy) - 1;
1408
	if (atomic_read(&sdev->device_blocked)) {
1409 1410 1411
		if (busy)
			goto out_dec;

L
Linus Torvalds 已提交
1412 1413 1414
		/*
		 * unblock after device_blocked iterates to zero
		 */
1415
		if (atomic_dec_return(&sdev->device_blocked) > 0) {
1416 1417 1418 1419 1420
			/*
			 * For the MQ case we take care of this in the caller.
			 */
			if (!q->mq_ops)
				blk_delay_queue(q, SCSI_QUEUE_DELAY);
1421
			goto out_dec;
L
Linus Torvalds 已提交
1422
		}
1423 1424
		SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev,
				   "unblocking device at zero depth\n"));
L
Linus Torvalds 已提交
1425
	}
1426 1427 1428

	if (busy >= sdev->queue_depth)
		goto out_dec;
L
Linus Torvalds 已提交
1429 1430

	return 1;
1431 1432 1433
out_dec:
	atomic_dec(&sdev->device_busy);
	return 0;
L
Linus Torvalds 已提交
1434 1435
}

1436 1437 1438 1439 1440 1441 1442 1443
/*
 * scsi_target_queue_ready: checks if there we can send commands to target
 * @sdev: scsi device on starget to check.
 */
static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
					   struct scsi_device *sdev)
{
	struct scsi_target *starget = scsi_target(sdev);
1444
	unsigned int busy;
1445 1446

	if (starget->single_lun) {
1447
		spin_lock_irq(shost->host_lock);
1448
		if (starget->starget_sdev_user &&
1449 1450 1451 1452
		    starget->starget_sdev_user != sdev) {
			spin_unlock_irq(shost->host_lock);
			return 0;
		}
1453
		starget->starget_sdev_user = sdev;
1454
		spin_unlock_irq(shost->host_lock);
1455 1456
	}

1457 1458 1459
	if (starget->can_queue <= 0)
		return 1;

1460
	busy = atomic_inc_return(&starget->target_busy) - 1;
1461
	if (atomic_read(&starget->target_blocked) > 0) {
1462 1463 1464
		if (busy)
			goto starved;

1465 1466 1467
		/*
		 * unblock after target_blocked iterates to zero
		 */
1468
		if (atomic_dec_return(&starget->target_blocked) > 0)
1469
			goto out_dec;
1470 1471 1472

		SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
				 "unblocking target at zero depth\n"));
1473 1474
	}

1475
	if (busy >= starget->can_queue)
1476
		goto starved;
1477

1478 1479 1480 1481 1482
	return 1;

starved:
	spin_lock_irq(shost->host_lock);
	list_move_tail(&sdev->starved_entry, &shost->starved_list);
1483
	spin_unlock_irq(shost->host_lock);
1484
out_dec:
1485 1486
	if (starget->can_queue > 0)
		atomic_dec(&starget->target_busy);
1487
	return 0;
1488 1489
}

L
Linus Torvalds 已提交
1490 1491 1492 1493 1494 1495 1496 1497 1498
/*
 * scsi_host_queue_ready: if we can send requests to shost, return 1 else
 * return 0. We must end up running the queue again whenever 0 is
 * returned, else IO can hang.
 */
static inline int scsi_host_queue_ready(struct request_queue *q,
				   struct Scsi_Host *shost,
				   struct scsi_device *sdev)
{
1499
	unsigned int busy;
1500

1501
	if (scsi_host_in_recovery(shost))
1502 1503 1504
		return 0;

	busy = atomic_inc_return(&shost->host_busy) - 1;
1505
	if (atomic_read(&shost->host_blocked) > 0) {
1506 1507 1508
		if (busy)
			goto starved;

L
Linus Torvalds 已提交
1509 1510 1511
		/*
		 * unblock after host_blocked iterates to zero
		 */
1512
		if (atomic_dec_return(&shost->host_blocked) > 0)
1513
			goto out_dec;
1514 1515 1516 1517

		SCSI_LOG_MLQUEUE(3,
			shost_printk(KERN_INFO, shost,
				     "unblocking host at zero depth\n"));
L
Linus Torvalds 已提交
1518
	}
1519 1520 1521 1522 1523

	if (shost->can_queue > 0 && busy >= shost->can_queue)
		goto starved;
	if (shost->host_self_blocked)
		goto starved;
L
Linus Torvalds 已提交
1524 1525

	/* We're OK to process the command, so we can't be starved */
1526 1527 1528 1529 1530 1531
	if (!list_empty(&sdev->starved_entry)) {
		spin_lock_irq(shost->host_lock);
		if (!list_empty(&sdev->starved_entry))
			list_del_init(&sdev->starved_entry);
		spin_unlock_irq(shost->host_lock);
	}
L
Linus Torvalds 已提交
1532

1533 1534 1535 1536 1537 1538
	return 1;

starved:
	spin_lock_irq(shost->host_lock);
	if (list_empty(&sdev->starved_entry))
		list_add_tail(&sdev->starved_entry, &shost->starved_list);
1539
	spin_unlock_irq(shost->host_lock);
1540 1541 1542
out_dec:
	atomic_dec(&shost->host_busy);
	return 0;
L
Linus Torvalds 已提交
1543 1544
}

1545 1546 1547 1548 1549 1550 1551 1552
/*
 * Busy state exporting function for request stacking drivers.
 *
 * For efficiency, no lock is taken to check the busy state of
 * shost/starget/sdev, since the returned value is not guaranteed and
 * may be changed after request stacking drivers call the function,
 * regardless of taking lock or not.
 *
1553 1554 1555
 * When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi
 * needs to return 'not busy'. Otherwise, request stacking drivers
 * may hold requests forever.
1556 1557 1558 1559 1560 1561
 */
static int scsi_lld_busy(struct request_queue *q)
{
	struct scsi_device *sdev = q->queuedata;
	struct Scsi_Host *shost;

B
Bart Van Assche 已提交
1562
	if (blk_queue_dying(q))
1563 1564 1565 1566
		return 0;

	shost = sdev->host;

1567 1568 1569 1570 1571 1572 1573
	/*
	 * Ignore host/starget busy state.
	 * Since block layer does not have a concept of fairness across
	 * multiple queues, congestion of host/starget needs to be handled
	 * in SCSI layer.
	 */
	if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
1574 1575 1576 1577 1578
		return 1;

	return 0;
}

L
Linus Torvalds 已提交
1579
/*
1580
 * Kill a request for a dead device
L
Linus Torvalds 已提交
1581
 */
1582
static void scsi_kill_request(struct request *req, struct request_queue *q)
L
Linus Torvalds 已提交
1583
{
1584
	struct scsi_cmnd *cmd = req->special;
1585 1586 1587
	struct scsi_device *sdev;
	struct scsi_target *starget;
	struct Scsi_Host *shost;
L
Linus Torvalds 已提交
1588

1589
	blk_start_request(req);
1590

1591 1592
	scmd_printk(KERN_INFO, cmd, "killing request\n");

1593 1594 1595
	sdev = cmd->device;
	starget = scsi_target(sdev);
	shost = sdev->host;
1596 1597 1598
	scsi_init_cmd_errh(cmd);
	cmd->result = DID_NO_CONNECT << 16;
	atomic_inc(&cmd->device->iorequest_cnt);
1599 1600 1601 1602 1603 1604

	/*
	 * SCSI request completion path will do scsi_device_unbusy(),
	 * bump busy counts.  To bump the counters, we need to dance
	 * with the locks as normal issue path does.
	 */
1605
	atomic_inc(&sdev->device_busy);
1606
	atomic_inc(&shost->host_busy);
1607 1608
	if (starget->can_queue > 0)
		atomic_inc(&starget->target_busy);
1609

J
Jens Axboe 已提交
1610
	blk_complete_request(req);
L
Linus Torvalds 已提交
1611 1612
}

1613 1614
static void scsi_softirq_done(struct request *rq)
{
J
Jens Axboe 已提交
1615 1616
	struct scsi_cmnd *cmd = rq->special;
	unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
1617 1618 1619 1620
	int disposition;

	INIT_LIST_HEAD(&cmd->eh_entry);

J
Jens Axboe 已提交
1621 1622 1623 1624
	atomic_inc(&cmd->device->iodone_cnt);
	if (cmd->result)
		atomic_inc(&cmd->device->ioerr_cnt);

1625 1626 1627 1628 1629 1630 1631 1632
	disposition = scsi_decide_disposition(cmd);
	if (disposition != SUCCESS &&
	    time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
		sdev_printk(KERN_ERR, cmd->device,
			    "timing out command, waited %lus\n",
			    wait_for/HZ);
		disposition = SUCCESS;
	}
1633

1634 1635 1636 1637 1638 1639 1640
	scsi_log_completion(cmd, disposition);

	switch (disposition) {
		case SUCCESS:
			scsi_finish_command(cmd);
			break;
		case NEEDS_RETRY:
1641
			scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
1642 1643 1644 1645 1646 1647 1648 1649 1650 1651
			break;
		case ADD_TO_MLQUEUE:
			scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
			break;
		default:
			if (!scsi_eh_scmd_add(cmd, 0))
				scsi_finish_command(cmd);
	}
}

1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732
/**
 * scsi_dispatch_command - Dispatch a command to the low-level driver.
 * @cmd: command block we are dispatching.
 *
 * Return: nonzero return request was rejected and device's queue needs to be
 * plugged.
 */
static int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
{
	struct Scsi_Host *host = cmd->device->host;
	int rtn = 0;

	atomic_inc(&cmd->device->iorequest_cnt);

	/* check if the device is still usable */
	if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
		/* in SDEV_DEL we error all commands. DID_NO_CONNECT
		 * returns an immediate error upwards, and signals
		 * that the device is no longer present */
		cmd->result = DID_NO_CONNECT << 16;
		goto done;
	}

	/* Check to see if the scsi lld made this device blocked. */
	if (unlikely(scsi_device_blocked(cmd->device))) {
		/*
		 * in blocked state, the command is just put back on
		 * the device queue.  The suspend state has already
		 * blocked the queue so future requests should not
		 * occur until the device transitions out of the
		 * suspend state.
		 */
		SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
			"queuecommand : device blocked\n"));
		return SCSI_MLQUEUE_DEVICE_BUSY;
	}

	/* Store the LUN value in cmnd, if needed. */
	if (cmd->device->lun_in_cdb)
		cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) |
			       (cmd->device->lun << 5 & 0xe0);

	scsi_log_send(cmd);

	/*
	 * Before we queue this command, check if the command
	 * length exceeds what the host adapter can handle.
	 */
	if (cmd->cmd_len > cmd->device->host->max_cmd_len) {
		SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
			       "queuecommand : command too long. "
			       "cdb_size=%d host->max_cmd_len=%d\n",
			       cmd->cmd_len, cmd->device->host->max_cmd_len));
		cmd->result = (DID_ABORT << 16);
		goto done;
	}

	if (unlikely(host->shost_state == SHOST_DEL)) {
		cmd->result = (DID_NO_CONNECT << 16);
		goto done;

	}

	trace_scsi_dispatch_cmd_start(cmd);
	rtn = host->hostt->queuecommand(host, cmd);
	if (rtn) {
		trace_scsi_dispatch_cmd_error(cmd, rtn);
		if (rtn != SCSI_MLQUEUE_DEVICE_BUSY &&
		    rtn != SCSI_MLQUEUE_TARGET_BUSY)
			rtn = SCSI_MLQUEUE_HOST_BUSY;

		SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd,
			"queuecommand : request rejected\n"));
	}

	return rtn;
 done:
	cmd->scsi_done(cmd);
	return 0;
}

1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749
/**
 * scsi_done - Invoke completion on finished SCSI command.
 * @cmd: The SCSI Command for which a low-level device driver (LLDD) gives
 * ownership back to SCSI Core -- i.e. the LLDD has finished with it.
 *
 * Description: This function is the mid-level's (SCSI Core) interrupt routine,
 * which regains ownership of the SCSI command (de facto) from a LLDD, and
 * calls blk_complete_request() for further processing.
 *
 * This function is interrupt context safe.
 */
static void scsi_done(struct scsi_cmnd *cmd)
{
	trace_scsi_dispatch_cmd_done(cmd);
	blk_complete_request(cmd->request);
}

L
Linus Torvalds 已提交
1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761
/*
 * Function:    scsi_request_fn()
 *
 * Purpose:     Main strategy routine for SCSI.
 *
 * Arguments:   q       - Pointer to actual queue.
 *
 * Returns:     Nothing
 *
 * Lock status: IO request lock assumed to be held when called.
 */
static void scsi_request_fn(struct request_queue *q)
1762 1763
	__releases(q->queue_lock)
	__acquires(q->queue_lock)
L
Linus Torvalds 已提交
1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774
{
	struct scsi_device *sdev = q->queuedata;
	struct Scsi_Host *shost;
	struct scsi_cmnd *cmd;
	struct request *req;

	/*
	 * To start with, we keep looping until the queue is empty, or until
	 * the host is no longer able to accept any more requests.
	 */
	shost = sdev->host;
J
Jens Axboe 已提交
1775
	for (;;) {
L
Linus Torvalds 已提交
1776 1777 1778
		int rtn;
		/*
		 * get next queueable request.  We do this early to make sure
1779
		 * that the request is fully prepared even if we cannot
L
Linus Torvalds 已提交
1780 1781
		 * accept it.
		 */
1782
		req = blk_peek_request(q);
1783
		if (!req)
L
Linus Torvalds 已提交
1784 1785 1786
			break;

		if (unlikely(!scsi_device_online(sdev))) {
1787 1788
			sdev_printk(KERN_ERR, sdev,
				    "rejecting I/O to offline device\n");
1789
			scsi_kill_request(req, q);
L
Linus Torvalds 已提交
1790 1791 1792
			continue;
		}

1793 1794
		if (!scsi_dev_queue_ready(q, sdev))
			break;
L
Linus Torvalds 已提交
1795 1796 1797 1798 1799

		/*
		 * Remove the request from the request list.
		 */
		if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1800
			blk_start_request(req);
L
Linus Torvalds 已提交
1801

1802
		spin_unlock_irq(q->queue_lock);
1803 1804 1805 1806
		cmd = req->special;
		if (unlikely(cmd == NULL)) {
			printk(KERN_CRIT "impossible request in %s.\n"
					 "please mail a stack trace to "
1807
					 "linux-scsi@vger.kernel.org\n",
1808
					 __func__);
1809
			blk_dump_rq_flags(req, "foo");
1810 1811
			BUG();
		}
L
Linus Torvalds 已提交
1812

1813 1814 1815 1816 1817 1818 1819 1820
		/*
		 * We hit this when the driver is using a host wide
		 * tag map. For device level tag maps the queue_depth check
		 * in the device ready fn would prevent us from trying
		 * to allocate a tag. Since the map is a shared host resource
		 * we add the dev to the starved list so it eventually gets
		 * a run when a tag is freed.
		 */
1821
		if (blk_queue_tagged(q) && !(req->cmd_flags & REQ_QUEUED)) {
1822
			spin_lock_irq(shost->host_lock);
1823 1824 1825
			if (list_empty(&sdev->starved_entry))
				list_add_tail(&sdev->starved_entry,
					      &shost->starved_list);
1826
			spin_unlock_irq(shost->host_lock);
1827 1828 1829
			goto not_ready;
		}

1830 1831 1832
		if (!scsi_target_queue_ready(shost, sdev))
			goto not_ready;

L
Linus Torvalds 已提交
1833
		if (!scsi_host_queue_ready(q, shost, sdev))
1834
			goto host_not_ready;
1835 1836 1837 1838 1839
	
		if (sdev->simple_tags)
			cmd->flags |= SCMD_TAGGED;
		else
			cmd->flags &= ~SCMD_TAGGED;
L
Linus Torvalds 已提交
1840 1841 1842 1843 1844 1845 1846 1847 1848 1849

		/*
		 * Finally, initialize any error handling parameters, and set up
		 * the timers for timeouts.
		 */
		scsi_init_cmd_errh(cmd);

		/*
		 * Dispatch the command to the low-level driver.
		 */
1850
		cmd->scsi_done = scsi_done;
L
Linus Torvalds 已提交
1851
		rtn = scsi_dispatch_cmd(cmd);
1852 1853 1854
		if (rtn) {
			scsi_queue_insert(cmd, rtn);
			spin_lock_irq(q->queue_lock);
J
Jens Axboe 已提交
1855
			goto out_delay;
1856 1857
		}
		spin_lock_irq(q->queue_lock);
L
Linus Torvalds 已提交
1858 1859
	}

1860
	return;
L
Linus Torvalds 已提交
1861

1862
 host_not_ready:
1863 1864
	if (scsi_target(sdev)->can_queue > 0)
		atomic_dec(&scsi_target(sdev)->target_busy);
1865
 not_ready:
L
Linus Torvalds 已提交
1866 1867 1868 1869 1870 1871 1872 1873 1874 1875
	/*
	 * lock q, handle tag, requeue req, and decrement device_busy. We
	 * must return with queue_lock held.
	 *
	 * Decrementing device_busy without checking it is OK, as all such
	 * cases (host limits or settings) should run the queue at some
	 * later time.
	 */
	spin_lock_irq(q->queue_lock);
	blk_requeue_request(q, req);
1876
	atomic_dec(&sdev->device_busy);
J
Jens Axboe 已提交
1877
out_delay:
G
Guenter Roeck 已提交
1878
	if (!atomic_read(&sdev->device_busy) && !scsi_device_blocked(sdev))
J
Jens Axboe 已提交
1879
		blk_delay_queue(q, SCSI_QUEUE_DELAY);
L
Linus Torvalds 已提交
1880 1881
}

1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918
static inline int prep_to_mq(int ret)
{
	switch (ret) {
	case BLKPREP_OK:
		return 0;
	case BLKPREP_DEFER:
		return BLK_MQ_RQ_QUEUE_BUSY;
	default:
		return BLK_MQ_RQ_QUEUE_ERROR;
	}
}

static int scsi_mq_prep_fn(struct request *req)
{
	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
	struct scsi_device *sdev = req->q->queuedata;
	struct Scsi_Host *shost = sdev->host;
	unsigned char *sense_buf = cmd->sense_buffer;
	struct scatterlist *sg;

	memset(cmd, 0, sizeof(struct scsi_cmnd));

	req->special = cmd;

	cmd->request = req;
	cmd->device = sdev;
	cmd->sense_buffer = sense_buf;

	cmd->tag = req->tag;

	cmd->cmnd = req->cmd;
	cmd->prot_op = SCSI_PROT_NORMAL;

	INIT_LIST_HEAD(&cmd->list);
	INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
	cmd->jiffies_at_alloc = jiffies;

1919 1920 1921 1922 1923
	if (shost->use_cmd_list) {
		spin_lock_irq(&sdev->list_lock);
		list_add_tail(&cmd->list, &sdev->cmd_list);
		spin_unlock_irq(&sdev->list_lock);
	}
1924 1925 1926 1927 1928 1929

	sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
	cmd->sdb.table.sgl = sg;

	if (scsi_host_get_prot(shost)) {
		cmd->prot_sdb = (void *)sg +
1930 1931 1932
			min_t(unsigned int,
			      shost->sg_tablesize, SCSI_MAX_SG_SEGMENTS) *
			sizeof(struct scatterlist);
1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949
		memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer));

		cmd->prot_sdb->table.sgl =
			(struct scatterlist *)(cmd->prot_sdb + 1);
	}

	if (blk_bidi_rq(req)) {
		struct request *next_rq = req->next_rq;
		struct scsi_data_buffer *bidi_sdb = blk_mq_rq_to_pdu(next_rq);

		memset(bidi_sdb, 0, sizeof(struct scsi_data_buffer));
		bidi_sdb->table.sgl =
			(struct scatterlist *)(bidi_sdb + 1);

		next_rq->special = bidi_sdb;
	}

1950 1951
	blk_mq_start_request(req);

1952 1953 1954 1955 1956 1957 1958 1959 1960
	return scsi_setup_cmnd(sdev, req);
}

static void scsi_mq_done(struct scsi_cmnd *cmd)
{
	trace_scsi_dispatch_cmd_done(cmd);
	blk_mq_complete_request(cmd->request);
}

1961 1962
static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
			 const struct blk_mq_queue_data *bd)
1963
{
1964
	struct request *req = bd->rq;
1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986
	struct request_queue *q = req->q;
	struct scsi_device *sdev = q->queuedata;
	struct Scsi_Host *shost = sdev->host;
	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
	int ret;
	int reason;

	ret = prep_to_mq(scsi_prep_state_check(sdev, req));
	if (ret)
		goto out;

	ret = BLK_MQ_RQ_QUEUE_BUSY;
	if (!get_device(&sdev->sdev_gendev))
		goto out;

	if (!scsi_dev_queue_ready(q, sdev))
		goto out_put_device;
	if (!scsi_target_queue_ready(shost, sdev))
		goto out_dec_device_busy;
	if (!scsi_host_queue_ready(q, shost, sdev))
		goto out_dec_target_busy;

1987

1988 1989 1990 1991 1992
	if (!(req->cmd_flags & REQ_DONTPREP)) {
		ret = prep_to_mq(scsi_mq_prep_fn(req));
		if (ret)
			goto out_dec_host_busy;
		req->cmd_flags |= REQ_DONTPREP;
1993 1994
	} else {
		blk_mq_start_request(req);
1995 1996
	}

1997 1998
	if (sdev->simple_tags)
		cmd->flags |= SCMD_TAGGED;
1999
	else
2000
		cmd->flags &= ~SCMD_TAGGED;
2001

2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045
	scsi_init_cmd_errh(cmd);
	cmd->scsi_done = scsi_mq_done;

	reason = scsi_dispatch_cmd(cmd);
	if (reason) {
		scsi_set_blocked(cmd, reason);
		ret = BLK_MQ_RQ_QUEUE_BUSY;
		goto out_dec_host_busy;
	}

	return BLK_MQ_RQ_QUEUE_OK;

out_dec_host_busy:
	atomic_dec(&shost->host_busy);
out_dec_target_busy:
	if (scsi_target(sdev)->can_queue > 0)
		atomic_dec(&scsi_target(sdev)->target_busy);
out_dec_device_busy:
	atomic_dec(&sdev->device_busy);
out_put_device:
	put_device(&sdev->sdev_gendev);
out:
	switch (ret) {
	case BLK_MQ_RQ_QUEUE_BUSY:
		blk_mq_stop_hw_queue(hctx);
		if (atomic_read(&sdev->device_busy) == 0 &&
		    !scsi_device_blocked(sdev))
			blk_mq_delay_queue(hctx, SCSI_QUEUE_DELAY);
		break;
	case BLK_MQ_RQ_QUEUE_ERROR:
		/*
		 * Make sure to release all allocated ressources when
		 * we hit an error, as we will never see this command
		 * again.
		 */
		if (req->cmd_flags & REQ_DONTPREP)
			scsi_mq_uninit_cmd(cmd);
		break;
	default:
		break;
	}
	return ret;
}

2046 2047 2048 2049 2050 2051 2052 2053
static enum blk_eh_timer_return scsi_timeout(struct request *req,
		bool reserved)
{
	if (reserved)
		return BLK_EH_RESET_TIMER;
	return scsi_times_out(req);
}

2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074
static int scsi_init_request(void *data, struct request *rq,
		unsigned int hctx_idx, unsigned int request_idx,
		unsigned int numa_node)
{
	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);

	cmd->sense_buffer = kzalloc_node(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL,
			numa_node);
	if (!cmd->sense_buffer)
		return -ENOMEM;
	return 0;
}

static void scsi_exit_request(void *data, struct request *rq,
		unsigned int hctx_idx, unsigned int request_idx)
{
	struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);

	kfree(cmd->sense_buffer);
}

2075
static u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
L
Linus Torvalds 已提交
2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090
{
	struct device *host_dev;
	u64 bounce_limit = 0xffffffff;

	if (shost->unchecked_isa_dma)
		return BLK_BOUNCE_ISA;
	/*
	 * Platforms with virtual-DMA translation
	 * hardware have no practical limit.
	 */
	if (!PCI_DMA_BUS_IS_PHYS)
		return BLK_BOUNCE_ANY;

	host_dev = scsi_get_device(shost);
	if (host_dev && host_dev->dma_mask)
2091
		bounce_limit = (u64)dma_max_pfn(host_dev) << PAGE_SHIFT;
L
Linus Torvalds 已提交
2092 2093 2094 2095

	return bounce_limit;
}

2096
static void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
L
Linus Torvalds 已提交
2097
{
2098
	struct device *dev = shost->dma_dev;
L
Linus Torvalds 已提交
2099

2100 2101 2102
	/*
	 * this limit is imposed by hardware restrictions
	 */
2103 2104
	blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
					SCSI_MAX_SG_CHAIN_SEGMENTS));
2105

2106 2107 2108 2109 2110 2111 2112 2113
	if (scsi_host_prot_dma(shost)) {
		shost->sg_prot_tablesize =
			min_not_zero(shost->sg_prot_tablesize,
				     (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS);
		BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize);
		blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
	}

2114
	blk_queue_max_hw_sectors(q, shost->max_sectors);
L
Linus Torvalds 已提交
2115 2116
	blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
	blk_queue_segment_boundary(q, shost->dma_boundary);
2117
	dma_set_seg_boundary(dev, shost->dma_boundary);
L
Linus Torvalds 已提交
2118

2119 2120
	blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));

L
Linus Torvalds 已提交
2121
	if (!shost->use_clustering)
2122
		q->limits.cluster = 0;
2123 2124 2125 2126 2127 2128 2129

	/*
	 * set a reasonable default alignment on word boundaries: the
	 * host and device may alter it using
	 * blk_queue_update_dma_alignment() later.
	 */
	blk_queue_dma_alignment(q, 0x03);
2130
}
2131

2132 2133 2134 2135 2136 2137 2138 2139 2140
struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
					 request_fn_proc *request_fn)
{
	struct request_queue *q;

	q = blk_init_queue(request_fn, NULL);
	if (!q)
		return NULL;
	__scsi_init_queue(shost, q);
L
Linus Torvalds 已提交
2141 2142
	return q;
}
2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153
EXPORT_SYMBOL(__scsi_alloc_queue);

struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
{
	struct request_queue *q;

	q = __scsi_alloc_queue(sdev->host, scsi_request_fn);
	if (!q)
		return NULL;

	blk_queue_prep_rq(q, scsi_prep_fn);
2154
	blk_queue_unprep_rq(q, scsi_unprep_fn);
2155
	blk_queue_softirq_done(q, scsi_softirq_done);
J
Jens Axboe 已提交
2156
	blk_queue_rq_timed_out(q, scsi_times_out);
2157
	blk_queue_lld_busy(q, scsi_lld_busy);
2158 2159
	return q;
}
L
Linus Torvalds 已提交
2160

2161 2162 2163 2164
static struct blk_mq_ops scsi_mq_ops = {
	.map_queue	= blk_mq_map_queue,
	.queue_rq	= scsi_queue_rq,
	.complete	= scsi_softirq_done,
2165
	.timeout	= scsi_timeout,
2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194
	.init_request	= scsi_init_request,
	.exit_request	= scsi_exit_request,
};

struct request_queue *scsi_mq_alloc_queue(struct scsi_device *sdev)
{
	sdev->request_queue = blk_mq_init_queue(&sdev->host->tag_set);
	if (IS_ERR(sdev->request_queue))
		return NULL;

	sdev->request_queue->queuedata = sdev;
	__scsi_init_queue(sdev->host, sdev->request_queue);
	return sdev->request_queue;
}

int scsi_mq_setup_tags(struct Scsi_Host *shost)
{
	unsigned int cmd_size, sgl_size, tbl_size;

	tbl_size = shost->sg_tablesize;
	if (tbl_size > SCSI_MAX_SG_SEGMENTS)
		tbl_size = SCSI_MAX_SG_SEGMENTS;
	sgl_size = tbl_size * sizeof(struct scatterlist);
	cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size;
	if (scsi_host_get_prot(shost))
		cmd_size += sizeof(struct scsi_data_buffer) + sgl_size;

	memset(&shost->tag_set, 0, sizeof(shost->tag_set));
	shost->tag_set.ops = &scsi_mq_ops;
2195
	shost->tag_set.nr_hw_queues = shost->nr_hw_queues ? : 1;
2196 2197 2198 2199
	shost->tag_set.queue_depth = shost->can_queue;
	shost->tag_set.cmd_size = cmd_size;
	shost->tag_set.numa_node = NUMA_NO_NODE;
	shost->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
S
Shaohua Li 已提交
2200 2201
	shost->tag_set.flags |=
		BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy);
2202 2203 2204 2205 2206 2207 2208 2209 2210 2211
	shost->tag_set.driver_data = shost;

	return blk_mq_alloc_tag_set(&shost->tag_set);
}

void scsi_mq_destroy_tags(struct Scsi_Host *shost)
{
	blk_mq_free_tag_set(&shost->tag_set);
}

L
Linus Torvalds 已提交
2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264
/*
 * Function:    scsi_block_requests()
 *
 * Purpose:     Utility function used by low-level drivers to prevent further
 *		commands from being queued to the device.
 *
 * Arguments:   shost       - Host in question
 *
 * Returns:     Nothing
 *
 * Lock status: No locks are assumed held.
 *
 * Notes:       There is no timer nor any other means by which the requests
 *		get unblocked other than the low-level driver calling
 *		scsi_unblock_requests().
 */
void scsi_block_requests(struct Scsi_Host *shost)
{
	shost->host_self_blocked = 1;
}
EXPORT_SYMBOL(scsi_block_requests);

/*
 * Function:    scsi_unblock_requests()
 *
 * Purpose:     Utility function used by low-level drivers to allow further
 *		commands from being queued to the device.
 *
 * Arguments:   shost       - Host in question
 *
 * Returns:     Nothing
 *
 * Lock status: No locks are assumed held.
 *
 * Notes:       There is no timer nor any other means by which the requests
 *		get unblocked other than the low-level driver calling
 *		scsi_unblock_requests().
 *
 *		This is done as an API function so that changes to the
 *		internals of the scsi mid-layer won't require wholesale
 *		changes to drivers that use this feature.
 */
void scsi_unblock_requests(struct Scsi_Host *shost)
{
	shost->host_self_blocked = 0;
	scsi_run_host_queues(shost);
}
EXPORT_SYMBOL(scsi_unblock_requests);

int __init scsi_init_queue(void)
{
	int i;

2265 2266 2267 2268 2269
	scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
					   sizeof(struct scsi_data_buffer),
					   0, 0, NULL);
	if (!scsi_sdb_cache) {
		printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
F
FUJITA Tomonori 已提交
2270
		return -ENOMEM;
2271 2272
	}

L
Linus Torvalds 已提交
2273 2274 2275 2276 2277
	for (i = 0; i < SG_MEMPOOL_NR; i++) {
		struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
		int size = sgp->size * sizeof(struct scatterlist);

		sgp->slab = kmem_cache_create(sgp->name, size, 0,
2278
				SLAB_HWCACHE_ALIGN, NULL);
L
Linus Torvalds 已提交
2279 2280 2281
		if (!sgp->slab) {
			printk(KERN_ERR "SCSI: can't init sg slab %s\n",
					sgp->name);
2282
			goto cleanup_sdb;
L
Linus Torvalds 已提交
2283 2284
		}

2285 2286
		sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
						     sgp->slab);
L
Linus Torvalds 已提交
2287 2288 2289
		if (!sgp->pool) {
			printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
					sgp->name);
2290
			goto cleanup_sdb;
L
Linus Torvalds 已提交
2291 2292 2293 2294
		}
	}

	return 0;
2295

2296
cleanup_sdb:
2297 2298 2299 2300 2301 2302 2303
	for (i = 0; i < SG_MEMPOOL_NR; i++) {
		struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
		if (sgp->pool)
			mempool_destroy(sgp->pool);
		if (sgp->slab)
			kmem_cache_destroy(sgp->slab);
	}
2304
	kmem_cache_destroy(scsi_sdb_cache);
2305 2306

	return -ENOMEM;
L
Linus Torvalds 已提交
2307 2308 2309 2310 2311 2312
}

void scsi_exit_queue(void)
{
	int i;

2313
	kmem_cache_destroy(scsi_sdb_cache);
2314

L
Linus Torvalds 已提交
2315 2316 2317 2318 2319 2320
	for (i = 0; i < SG_MEMPOOL_NR; i++) {
		struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
		mempool_destroy(sgp->pool);
		kmem_cache_destroy(sgp->slab);
	}
}
2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332

/**
 *	scsi_mode_select - issue a mode select
 *	@sdev:	SCSI device to be queried
 *	@pf:	Page format bit (1 == standard, 0 == vendor specific)
 *	@sp:	Save page bit (0 == don't save, 1 == save)
 *	@modepage: mode page being requested
 *	@buffer: request buffer (may not be smaller than eight bytes)
 *	@len:	length of request buffer.
 *	@timeout: command timeout
 *	@retries: number of retries before failing
 *	@data: returns a structure abstracting the mode header data
2333
 *	@sshdr: place to put sense data (or NULL if no sense to be collected).
2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392
 *		must be SCSI_SENSE_BUFFERSIZE big.
 *
 *	Returns zero if successful; negative error number or scsi
 *	status on error
 *
 */
int
scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
		 unsigned char *buffer, int len, int timeout, int retries,
		 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
{
	unsigned char cmd[10];
	unsigned char *real_buffer;
	int ret;

	memset(cmd, 0, sizeof(cmd));
	cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);

	if (sdev->use_10_for_ms) {
		if (len > 65535)
			return -EINVAL;
		real_buffer = kmalloc(8 + len, GFP_KERNEL);
		if (!real_buffer)
			return -ENOMEM;
		memcpy(real_buffer + 8, buffer, len);
		len += 8;
		real_buffer[0] = 0;
		real_buffer[1] = 0;
		real_buffer[2] = data->medium_type;
		real_buffer[3] = data->device_specific;
		real_buffer[4] = data->longlba ? 0x01 : 0;
		real_buffer[5] = 0;
		real_buffer[6] = data->block_descriptor_length >> 8;
		real_buffer[7] = data->block_descriptor_length;

		cmd[0] = MODE_SELECT_10;
		cmd[7] = len >> 8;
		cmd[8] = len;
	} else {
		if (len > 255 || data->block_descriptor_length > 255 ||
		    data->longlba)
			return -EINVAL;

		real_buffer = kmalloc(4 + len, GFP_KERNEL);
		if (!real_buffer)
			return -ENOMEM;
		memcpy(real_buffer + 4, buffer, len);
		len += 4;
		real_buffer[0] = 0;
		real_buffer[1] = data->medium_type;
		real_buffer[2] = data->device_specific;
		real_buffer[3] = data->block_descriptor_length;
		

		cmd[0] = MODE_SELECT;
		cmd[4] = len;
	}

	ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
2393
			       sshdr, timeout, retries, NULL);
2394 2395 2396 2397 2398
	kfree(real_buffer);
	return ret;
}
EXPORT_SYMBOL_GPL(scsi_mode_select);

L
Linus Torvalds 已提交
2399
/**
2400
 *	scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary.
2401
 *	@sdev:	SCSI device to be queried
L
Linus Torvalds 已提交
2402 2403 2404 2405 2406 2407 2408
 *	@dbd:	set if mode sense will allow block descriptors to be returned
 *	@modepage: mode page being requested
 *	@buffer: request buffer (may not be smaller than eight bytes)
 *	@len:	length of request buffer.
 *	@timeout: command timeout
 *	@retries: number of retries before failing
 *	@data: returns a structure abstracting the mode header data
2409
 *	@sshdr: place to put sense data (or NULL if no sense to be collected).
2410
 *		must be SCSI_SENSE_BUFFERSIZE big.
L
Linus Torvalds 已提交
2411 2412 2413 2414
 *
 *	Returns zero if unsuccessful, or the header offset (either 4
 *	or 8 depending on whether a six or ten byte command was
 *	issued) if successful.
2415
 */
L
Linus Torvalds 已提交
2416
int
2417
scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
L
Linus Torvalds 已提交
2418
		  unsigned char *buffer, int len, int timeout, int retries,
2419 2420
		  struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
{
L
Linus Torvalds 已提交
2421 2422 2423
	unsigned char cmd[12];
	int use_10_for_ms;
	int header_length;
2424
	int result;
2425
	struct scsi_sense_hdr my_sshdr;
L
Linus Torvalds 已提交
2426 2427 2428 2429 2430 2431

	memset(data, 0, sizeof(*data));
	memset(&cmd[0], 0, 12);
	cmd[1] = dbd & 0x18;	/* allows DBD and LLBA bits */
	cmd[2] = modepage;

2432 2433 2434 2435
	/* caller might not be interested in sense, but we need it */
	if (!sshdr)
		sshdr = &my_sshdr;

L
Linus Torvalds 已提交
2436
 retry:
2437
	use_10_for_ms = sdev->use_10_for_ms;
L
Linus Torvalds 已提交
2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456

	if (use_10_for_ms) {
		if (len < 8)
			len = 8;

		cmd[0] = MODE_SENSE_10;
		cmd[8] = len;
		header_length = 8;
	} else {
		if (len < 4)
			len = 4;

		cmd[0] = MODE_SENSE;
		cmd[4] = len;
		header_length = 4;
	}

	memset(buffer, 0, len);

2457
	result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
2458
				  sshdr, timeout, retries, NULL);
L
Linus Torvalds 已提交
2459 2460 2461 2462 2463 2464

	/* This code looks awful: what it's doing is making sure an
	 * ILLEGAL REQUEST sense return identifies the actual command
	 * byte as the problem.  MODE_SENSE commands can return
	 * ILLEGAL REQUEST if the code page isn't supported */

2465 2466
	if (use_10_for_ms && !scsi_status_is_good(result) &&
	    (driver_byte(result) & DRIVER_SENSE)) {
2467 2468 2469
		if (scsi_sense_valid(sshdr)) {
			if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
			    (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
L
Linus Torvalds 已提交
2470 2471 2472
				/* 
				 * Invalid command operation code
				 */
2473
				sdev->use_10_for_ms = 0;
L
Linus Torvalds 已提交
2474 2475 2476 2477 2478
				goto retry;
			}
		}
	}

2479
	if(scsi_status_is_good(result)) {
2480 2481 2482 2483 2484 2485 2486 2487 2488 2489
		if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
			     (modepage == 6 || modepage == 8))) {
			/* Initio breakage? */
			header_length = 0;
			data->length = 13;
			data->medium_type = 0;
			data->device_specific = 0;
			data->longlba = 0;
			data->block_descriptor_length = 0;
		} else if(use_10_for_ms) {
L
Linus Torvalds 已提交
2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501
			data->length = buffer[0]*256 + buffer[1] + 2;
			data->medium_type = buffer[2];
			data->device_specific = buffer[3];
			data->longlba = buffer[4] & 0x01;
			data->block_descriptor_length = buffer[6]*256
				+ buffer[7];
		} else {
			data->length = buffer[0] + 1;
			data->medium_type = buffer[1];
			data->device_specific = buffer[2];
			data->block_descriptor_length = buffer[3];
		}
2502
		data->header_length = header_length;
L
Linus Torvalds 已提交
2503 2504
	}

2505
	return result;
L
Linus Torvalds 已提交
2506 2507 2508
}
EXPORT_SYMBOL(scsi_mode_sense);

2509 2510 2511 2512 2513 2514 2515 2516 2517 2518
/**
 *	scsi_test_unit_ready - test if unit is ready
 *	@sdev:	scsi device to change the state of.
 *	@timeout: command timeout
 *	@retries: number of retries before failing
 *	@sshdr_external: Optional pointer to struct scsi_sense_hdr for
 *		returning sense. Make sure that this is cleared before passing
 *		in.
 *
 *	Returns zero if unsuccessful or an error if TUR failed.  For
2519
 *	removable media, UNIT_ATTENTION sets ->changed flag.
2520
 **/
L
Linus Torvalds 已提交
2521
int
2522 2523
scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
		     struct scsi_sense_hdr *sshdr_external)
L
Linus Torvalds 已提交
2524 2525 2526 2527
{
	char cmd[] = {
		TEST_UNIT_READY, 0, 0, 0, 0, 0,
	};
2528
	struct scsi_sense_hdr *sshdr;
L
Linus Torvalds 已提交
2529
	int result;
2530 2531 2532 2533 2534 2535 2536 2537 2538

	if (!sshdr_external)
		sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
	else
		sshdr = sshdr_external;

	/* try to eat the UNIT_ATTENTION if there are enough retries */
	do {
		result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
2539
					  timeout, retries, NULL);
2540 2541 2542 2543 2544
		if (sdev->removable && scsi_sense_valid(sshdr) &&
		    sshdr->sense_key == UNIT_ATTENTION)
			sdev->changed = 1;
	} while (scsi_sense_valid(sshdr) &&
		 sshdr->sense_key == UNIT_ATTENTION && --retries);
2545 2546 2547

	if (!sshdr_external)
		kfree(sshdr);
L
Linus Torvalds 已提交
2548 2549 2550 2551 2552
	return result;
}
EXPORT_SYMBOL(scsi_test_unit_ready);

/**
2553
 *	scsi_device_set_state - Take the given device through the device state model.
L
Linus Torvalds 已提交
2554 2555 2556 2557 2558
 *	@sdev:	scsi device to change the state of.
 *	@state:	state to change to.
 *
 *	Returns zero if unsuccessful or an error if the requested 
 *	transition is illegal.
2559
 */
L
Linus Torvalds 已提交
2560 2561 2562 2563 2564 2565 2566 2567 2568 2569
int
scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
{
	enum scsi_device_state oldstate = sdev->sdev_state;

	if (state == oldstate)
		return 0;

	switch (state) {
	case SDEV_CREATED:
2570 2571 2572 2573 2574 2575 2576
		switch (oldstate) {
		case SDEV_CREATED_BLOCK:
			break;
		default:
			goto illegal;
		}
		break;
L
Linus Torvalds 已提交
2577 2578 2579 2580 2581
			
	case SDEV_RUNNING:
		switch (oldstate) {
		case SDEV_CREATED:
		case SDEV_OFFLINE:
2582
		case SDEV_TRANSPORT_OFFLINE:
L
Linus Torvalds 已提交
2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594
		case SDEV_QUIESCE:
		case SDEV_BLOCK:
			break;
		default:
			goto illegal;
		}
		break;

	case SDEV_QUIESCE:
		switch (oldstate) {
		case SDEV_RUNNING:
		case SDEV_OFFLINE:
2595
		case SDEV_TRANSPORT_OFFLINE:
L
Linus Torvalds 已提交
2596 2597 2598 2599 2600 2601 2602
			break;
		default:
			goto illegal;
		}
		break;

	case SDEV_OFFLINE:
2603
	case SDEV_TRANSPORT_OFFLINE:
L
Linus Torvalds 已提交
2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617
		switch (oldstate) {
		case SDEV_CREATED:
		case SDEV_RUNNING:
		case SDEV_QUIESCE:
		case SDEV_BLOCK:
			break;
		default:
			goto illegal;
		}
		break;

	case SDEV_BLOCK:
		switch (oldstate) {
		case SDEV_RUNNING:
2618 2619 2620 2621 2622 2623 2624 2625 2626 2627
		case SDEV_CREATED_BLOCK:
			break;
		default:
			goto illegal;
		}
		break;

	case SDEV_CREATED_BLOCK:
		switch (oldstate) {
		case SDEV_CREATED:
L
Linus Torvalds 已提交
2628 2629 2630 2631 2632 2633 2634 2635 2636 2637
			break;
		default:
			goto illegal;
		}
		break;

	case SDEV_CANCEL:
		switch (oldstate) {
		case SDEV_CREATED:
		case SDEV_RUNNING:
2638
		case SDEV_QUIESCE:
L
Linus Torvalds 已提交
2639
		case SDEV_OFFLINE:
2640
		case SDEV_TRANSPORT_OFFLINE:
L
Linus Torvalds 已提交
2641 2642 2643 2644 2645 2646 2647 2648 2649
		case SDEV_BLOCK:
			break;
		default:
			goto illegal;
		}
		break;

	case SDEV_DEL:
		switch (oldstate) {
2650 2651 2652
		case SDEV_CREATED:
		case SDEV_RUNNING:
		case SDEV_OFFLINE:
2653
		case SDEV_TRANSPORT_OFFLINE:
L
Linus Torvalds 已提交
2654
		case SDEV_CANCEL:
2655
		case SDEV_CREATED_BLOCK:
L
Linus Torvalds 已提交
2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666
			break;
		default:
			goto illegal;
		}
		break;

	}
	sdev->sdev_state = state;
	return 0;

 illegal:
2667
	SCSI_LOG_ERROR_RECOVERY(1,
2668
				sdev_printk(KERN_ERR, sdev,
2669
					    "Illegal state transition %s->%s",
2670 2671
					    scsi_device_state_name(oldstate),
					    scsi_device_state_name(state))
L
Linus Torvalds 已提交
2672 2673 2674 2675 2676
				);
	return -EINVAL;
}
EXPORT_SYMBOL(scsi_device_set_state);

2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692
/**
 * 	sdev_evt_emit - emit a single SCSI device uevent
 *	@sdev: associated SCSI device
 *	@evt: event to emit
 *
 *	Send a single uevent (scsi_event) to the associated scsi_device.
 */
static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
{
	int idx = 0;
	char *envp[3];

	switch (evt->evt_type) {
	case SDEV_EVT_MEDIA_CHANGE:
		envp[idx++] = "SDEV_MEDIA_CHANGE=1";
		break;
2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707
	case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
		envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED";
		break;
	case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
		envp[idx++] = "SDEV_UA=CAPACITY_DATA_HAS_CHANGED";
		break;
	case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
	       envp[idx++] = "SDEV_UA=THIN_PROVISIONING_SOFT_THRESHOLD_REACHED";
		break;
	case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
		envp[idx++] = "SDEV_UA=MODE_PARAMETERS_CHANGED";
		break;
	case SDEV_EVT_LUN_CHANGE_REPORTED:
		envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED";
		break;
2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727
	default:
		/* do nothing */
		break;
	}

	envp[idx++] = NULL;

	kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp);
}

/**
 * 	sdev_evt_thread - send a uevent for each scsi event
 *	@work: work struct for scsi_device
 *
 *	Dispatch queued events to their associated scsi_device kobjects
 *	as uevents.
 */
void scsi_evt_thread(struct work_struct *work)
{
	struct scsi_device *sdev;
2728
	enum scsi_device_event evt_type;
2729 2730 2731 2732
	LIST_HEAD(event_list);

	sdev = container_of(work, struct scsi_device, event_work);

2733 2734 2735 2736
	for (evt_type = SDEV_EVT_FIRST; evt_type <= SDEV_EVT_LAST; evt_type++)
		if (test_and_clear_bit(evt_type, sdev->pending_events))
			sdev_evt_send_simple(sdev, evt_type, GFP_KERNEL);

2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768
	while (1) {
		struct scsi_event *evt;
		struct list_head *this, *tmp;
		unsigned long flags;

		spin_lock_irqsave(&sdev->list_lock, flags);
		list_splice_init(&sdev->event_list, &event_list);
		spin_unlock_irqrestore(&sdev->list_lock, flags);

		if (list_empty(&event_list))
			break;

		list_for_each_safe(this, tmp, &event_list) {
			evt = list_entry(this, struct scsi_event, node);
			list_del(&evt->node);
			scsi_evt_emit(sdev, evt);
			kfree(evt);
		}
	}
}

/**
 * 	sdev_evt_send - send asserted event to uevent thread
 *	@sdev: scsi_device event occurred on
 *	@evt: event to send
 *
 *	Assert scsi device event asynchronously.
 */
void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt)
{
	unsigned long flags;

2769 2770 2771 2772
#if 0
	/* FIXME: currently this check eliminates all media change events
	 * for polled devices.  Need to update to discriminate between AN
	 * and polled events */
2773 2774 2775 2776
	if (!test_bit(evt->evt_type, sdev->supported_events)) {
		kfree(evt);
		return;
	}
2777
#endif
2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805

	spin_lock_irqsave(&sdev->list_lock, flags);
	list_add_tail(&evt->node, &sdev->event_list);
	schedule_work(&sdev->event_work);
	spin_unlock_irqrestore(&sdev->list_lock, flags);
}
EXPORT_SYMBOL_GPL(sdev_evt_send);

/**
 * 	sdev_evt_alloc - allocate a new scsi event
 *	@evt_type: type of event to allocate
 *	@gfpflags: GFP flags for allocation
 *
 *	Allocates and returns a new scsi_event.
 */
struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
				  gfp_t gfpflags)
{
	struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags);
	if (!evt)
		return NULL;

	evt->evt_type = evt_type;
	INIT_LIST_HEAD(&evt->node);

	/* evt_type-specific initialization, if any */
	switch (evt_type) {
	case SDEV_EVT_MEDIA_CHANGE:
2806 2807 2808 2809 2810
	case SDEV_EVT_INQUIRY_CHANGE_REPORTED:
	case SDEV_EVT_CAPACITY_CHANGE_REPORTED:
	case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED:
	case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED:
	case SDEV_EVT_LUN_CHANGE_REPORTED:
2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841
	default:
		/* do nothing */
		break;
	}

	return evt;
}
EXPORT_SYMBOL_GPL(sdev_evt_alloc);

/**
 * 	sdev_evt_send_simple - send asserted event to uevent thread
 *	@sdev: scsi_device event occurred on
 *	@evt_type: type of event to send
 *	@gfpflags: GFP flags for allocation
 *
 *	Assert scsi device event asynchronously, given an event type.
 */
void sdev_evt_send_simple(struct scsi_device *sdev,
			  enum scsi_device_event evt_type, gfp_t gfpflags)
{
	struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags);
	if (!evt) {
		sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n",
			    evt_type);
		return;
	}

	sdev_evt_send(sdev, evt);
}
EXPORT_SYMBOL_GPL(sdev_evt_send_simple);

L
Linus Torvalds 已提交
2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855
/**
 *	scsi_device_quiesce - Block user issued commands.
 *	@sdev:	scsi device to quiesce.
 *
 *	This works by trying to transition to the SDEV_QUIESCE state
 *	(which must be a legal transition).  When the device is in this
 *	state, only special requests will be accepted, all others will
 *	be deferred.  Since special requests may also be requeued requests,
 *	a successful return doesn't guarantee the device will be 
 *	totally quiescent.
 *
 *	Must be called with user context, may sleep.
 *
 *	Returns zero if unsuccessful or an error if not.
2856
 */
L
Linus Torvalds 已提交
2857 2858 2859 2860 2861 2862 2863 2864
int
scsi_device_quiesce(struct scsi_device *sdev)
{
	int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
	if (err)
		return err;

	scsi_run_queue(sdev->request_queue);
2865
	while (atomic_read(&sdev->device_busy)) {
L
Linus Torvalds 已提交
2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880
		msleep_interruptible(200);
		scsi_run_queue(sdev->request_queue);
	}
	return 0;
}
EXPORT_SYMBOL(scsi_device_quiesce);

/**
 *	scsi_device_resume - Restart user issued commands to a quiesced device.
 *	@sdev:	scsi device to resume.
 *
 *	Moves the device from quiesced back to running and restarts the
 *	queues.
 *
 *	Must be called with user context, may sleep.
2881
 */
2882
void scsi_device_resume(struct scsi_device *sdev)
L
Linus Torvalds 已提交
2883
{
2884 2885 2886 2887 2888 2889
	/* check if the device state was mutated prior to resume, and if
	 * so assume the state is being managed elsewhere (for example
	 * device deleted during suspend)
	 */
	if (sdev->sdev_state != SDEV_QUIESCE ||
	    scsi_device_set_state(sdev, SDEV_RUNNING))
L
Linus Torvalds 已提交
2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921
		return;
	scsi_run_queue(sdev->request_queue);
}
EXPORT_SYMBOL(scsi_device_resume);

static void
device_quiesce_fn(struct scsi_device *sdev, void *data)
{
	scsi_device_quiesce(sdev);
}

void
scsi_target_quiesce(struct scsi_target *starget)
{
	starget_for_each_device(starget, NULL, device_quiesce_fn);
}
EXPORT_SYMBOL(scsi_target_quiesce);

static void
device_resume_fn(struct scsi_device *sdev, void *data)
{
	scsi_device_resume(sdev);
}

void
scsi_target_resume(struct scsi_target *starget)
{
	starget_for_each_device(starget, NULL, device_resume_fn);
}
EXPORT_SYMBOL(scsi_target_resume);

/**
2922
 * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state
L
Linus Torvalds 已提交
2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935
 * @sdev:	device to block
 *
 * Block request made by scsi lld's to temporarily stop all
 * scsi commands on the specified device.  Called from interrupt
 * or normal process context.
 *
 * Returns zero if successful or error if not
 *
 * Notes:       
 *	This routine transitions the device to the SDEV_BLOCK state
 *	(which must be a legal transition).  When the device is in this
 *	state, all commands are deferred until the scsi lld reenables
 *	the device with scsi_device_unblock or device_block_tmo fires.
2936
 */
L
Linus Torvalds 已提交
2937 2938 2939
int
scsi_internal_device_block(struct scsi_device *sdev)
{
2940
	struct request_queue *q = sdev->request_queue;
L
Linus Torvalds 已提交
2941 2942 2943 2944
	unsigned long flags;
	int err = 0;

	err = scsi_device_set_state(sdev, SDEV_BLOCK);
2945 2946 2947 2948 2949 2950
	if (err) {
		err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);

		if (err)
			return err;
	}
L
Linus Torvalds 已提交
2951 2952 2953 2954 2955 2956

	/* 
	 * The device has transitioned to SDEV_BLOCK.  Stop the
	 * block layer from calling the midlayer with this device's
	 * request queue. 
	 */
2957 2958 2959 2960 2961 2962 2963
	if (q->mq_ops) {
		blk_mq_stop_hw_queues(q);
	} else {
		spin_lock_irqsave(q->queue_lock, flags);
		blk_stop_queue(q);
		spin_unlock_irqrestore(q->queue_lock, flags);
	}
L
Linus Torvalds 已提交
2964 2965 2966 2967 2968 2969 2970 2971

	return 0;
}
EXPORT_SYMBOL_GPL(scsi_internal_device_block);
 
/**
 * scsi_internal_device_unblock - resume a device after a block request
 * @sdev:	device to resume
2972
 * @new_state:	state to set devices to after unblocking
L
Linus Torvalds 已提交
2973 2974 2975 2976 2977 2978 2979 2980 2981
 *
 * Called by scsi lld's or the midlayer to restart the device queue
 * for the previously suspended scsi device.  Called from interrupt or
 * normal process context.
 *
 * Returns zero if successful or error if not.
 *
 * Notes:       
 *	This routine transitions the device to the SDEV_RUNNING state
2982
 *	or to one of the offline states (which must be a legal transition)
2983
 *	allowing the midlayer to goose the queue for this device.
2984
 */
L
Linus Torvalds 已提交
2985
int
2986 2987
scsi_internal_device_unblock(struct scsi_device *sdev,
			     enum scsi_device_state new_state)
L
Linus Torvalds 已提交
2988
{
2989
	struct request_queue *q = sdev->request_queue; 
L
Linus Torvalds 已提交
2990
	unsigned long flags;
2991 2992 2993 2994

	/*
	 * Try to transition the scsi device to SDEV_RUNNING or one of the
	 * offlined states and goose the device queue if successful.
L
Linus Torvalds 已提交
2995
	 */
2996 2997
	if ((sdev->sdev_state == SDEV_BLOCK) ||
	    (sdev->sdev_state == SDEV_TRANSPORT_OFFLINE))
2998 2999 3000 3001 3002 3003 3004 3005
		sdev->sdev_state = new_state;
	else if (sdev->sdev_state == SDEV_CREATED_BLOCK) {
		if (new_state == SDEV_TRANSPORT_OFFLINE ||
		    new_state == SDEV_OFFLINE)
			sdev->sdev_state = new_state;
		else
			sdev->sdev_state = SDEV_CREATED;
	} else if (sdev->sdev_state != SDEV_CANCEL &&
3006
		 sdev->sdev_state != SDEV_OFFLINE)
3007
		return -EINVAL;
L
Linus Torvalds 已提交
3008

3009 3010 3011 3012 3013 3014 3015
	if (q->mq_ops) {
		blk_mq_start_stopped_hw_queues(q, false);
	} else {
		spin_lock_irqsave(q->queue_lock, flags);
		blk_start_queue(q);
		spin_unlock_irqrestore(q->queue_lock, flags);
	}
L
Linus Torvalds 已提交
3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049

	return 0;
}
EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);

static void
device_block(struct scsi_device *sdev, void *data)
{
	scsi_internal_device_block(sdev);
}

static int
target_block(struct device *dev, void *data)
{
	if (scsi_is_target_device(dev))
		starget_for_each_device(to_scsi_target(dev), NULL,
					device_block);
	return 0;
}

void
scsi_target_block(struct device *dev)
{
	if (scsi_is_target_device(dev))
		starget_for_each_device(to_scsi_target(dev), NULL,
					device_block);
	else
		device_for_each_child(dev, NULL, target_block);
}
EXPORT_SYMBOL_GPL(scsi_target_block);

static void
device_unblock(struct scsi_device *sdev, void *data)
{
3050
	scsi_internal_device_unblock(sdev, *(enum scsi_device_state *)data);
L
Linus Torvalds 已提交
3051 3052 3053 3054 3055 3056
}

static int
target_unblock(struct device *dev, void *data)
{
	if (scsi_is_target_device(dev))
3057
		starget_for_each_device(to_scsi_target(dev), data,
L
Linus Torvalds 已提交
3058 3059 3060 3061 3062
					device_unblock);
	return 0;
}

void
3063
scsi_target_unblock(struct device *dev, enum scsi_device_state new_state)
L
Linus Torvalds 已提交
3064 3065
{
	if (scsi_is_target_device(dev))
3066
		starget_for_each_device(to_scsi_target(dev), &new_state,
L
Linus Torvalds 已提交
3067 3068
					device_unblock);
	else
3069
		device_for_each_child(dev, &new_state, target_unblock);
L
Linus Torvalds 已提交
3070 3071
}
EXPORT_SYMBOL_GPL(scsi_target_unblock);
3072 3073 3074

/**
 * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt
3075
 * @sgl:	scatter-gather list
3076 3077 3078 3079 3080 3081
 * @sg_count:	number of segments in sg
 * @offset:	offset in bytes into sg, on return offset into the mapped area
 * @len:	bytes to map, on return number of bytes mapped
 *
 * Returns virtual address of the start of the mapped page
 */
J
Jens Axboe 已提交
3082
void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
3083 3084 3085 3086
			  size_t *offset, size_t *len)
{
	int i;
	size_t sg_len = 0, len_complete = 0;
J
Jens Axboe 已提交
3087
	struct scatterlist *sg;
3088 3089
	struct page *page;

3090 3091
	WARN_ON(!irqs_disabled());

J
Jens Axboe 已提交
3092
	for_each_sg(sgl, sg, sg_count, i) {
3093
		len_complete = sg_len; /* Complete sg-entries */
J
Jens Axboe 已提交
3094
		sg_len += sg->length;
3095 3096 3097 3098 3099
		if (sg_len > *offset)
			break;
	}

	if (unlikely(i == sg_count)) {
3100 3101
		printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
			"elements %d\n",
3102
		       __func__, sg_len, *offset, sg_count);
3103 3104 3105 3106 3107
		WARN_ON(1);
		return NULL;
	}

	/* Offset starting from the beginning of first page in this sg-entry */
J
Jens Axboe 已提交
3108
	*offset = *offset - len_complete + sg->offset;
3109 3110

	/* Assumption: contiguous pages can be accessed as "page + i" */
J
Jens Axboe 已提交
3111
	page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
3112 3113 3114 3115 3116 3117 3118
	*offset &= ~PAGE_MASK;

	/* Bytes in this sg-entry from *offset to the end of the page */
	sg_len = PAGE_SIZE - *offset;
	if (*len > sg_len)
		*len = sg_len;

3119
	return kmap_atomic(page);
3120 3121 3122 3123
}
EXPORT_SYMBOL(scsi_kmap_atomic_sg);

/**
3124
 * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg
3125 3126 3127 3128
 * @virt:	virtual address to be unmapped
 */
void scsi_kunmap_atomic_sg(void *virt)
{
3129
	kunmap_atomic(virt);
3130 3131
}
EXPORT_SYMBOL(scsi_kunmap_atomic_sg);
3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145

void sdev_disable_disk_events(struct scsi_device *sdev)
{
	atomic_inc(&sdev->disk_events_disable_depth);
}
EXPORT_SYMBOL(sdev_disable_disk_events);

void sdev_enable_disk_events(struct scsi_device *sdev)
{
	if (WARN_ON_ONCE(atomic_read(&sdev->disk_events_disable_depth) <= 0))
		return;
	atomic_dec(&sdev->disk_events_disable_depth);
}
EXPORT_SYMBOL(sdev_enable_disk_events);