sd.c 99.3 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
L
Linus Torvalds 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47
/*
 *      sd.c Copyright (C) 1992 Drew Eckhardt
 *           Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
 *
 *      Linux scsi disk driver
 *              Initial versions: Drew Eckhardt
 *              Subsequent revisions: Eric Youngdale
 *	Modification history:
 *       - Drew Eckhardt <drew@colorado.edu> original
 *       - Eric Youngdale <eric@andante.org> add scatter-gather, multiple 
 *         outstanding request, and other enhancements.
 *         Support loadable low-level scsi drivers.
 *       - Jirka Hanika <geo@ff.cuni.cz> support more scsi disks using 
 *         eight major numbers.
 *       - Richard Gooch <rgooch@atnf.csiro.au> support devfs.
 *	 - Torben Mathiasen <tmm@image.dk> Resource allocation fixes in 
 *	   sd_init and cleanups.
 *	 - Alex Davis <letmein@erols.com> Fix problem where partition info
 *	   not being read in sd_open. Fix problem where removable media 
 *	   could be ejected after sd_open.
 *	 - Douglas Gilbert <dgilbert@interlog.com> cleanup for lk 2.5.x
 *	 - Badari Pulavarty <pbadari@us.ibm.com>, Matthew Wilcox 
 *	   <willy@debian.org>, Kurt Garloff <garloff@suse.de>: 
 *	   Support 32k/1M disks.
 *
 *	Logging policy (needs CONFIG_SCSI_LOGGING defined):
 *	 - setting up transfer: SCSI_LOG_HLQUEUE levels 1 and 2
 *	 - end of transfer (bh + scsi_lib): SCSI_LOG_HLCOMPLETE level 1
 *	 - entering sd_ioctl: SCSI_LOG_IOCTL level 1
 *	 - entering other commands: SCSI_LOG_HLQUEUE level 3
 *	Note: when the logging level is set by the user, it must be greater
 *	than the level indicated above to trigger output.	
 */

#include <linux/module.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/bio.h>
#include <linux/hdreg.h>
#include <linux/errno.h>
#include <linux/idr.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/blkpg.h>
48
#include <linux/blk-pm.h>
L
Linus Torvalds 已提交
49
#include <linux/delay.h>
50
#include <linux/major.h>
51
#include <linux/mutex.h>
52
#include <linux/string_helpers.h>
53
#include <linux/slab.h>
54
#include <linux/sed-opal.h>
55
#include <linux/pm_runtime.h>
56
#include <linux/pr.h>
57
#include <linux/t10-pi.h>
58
#include <linux/uaccess.h>
59
#include <asm/unaligned.h>
L
Linus Torvalds 已提交
60 61 62 63 64 65 66 67 68 69 70

#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_driver.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_ioctl.h>
#include <scsi/scsicam.h>

71
#include "sd.h"
72
#include "scsi_priv.h"
L
Linus Torvalds 已提交
73 74
#include "scsi_logging.h"

75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
MODULE_AUTHOR("Eric Youngdale");
MODULE_DESCRIPTION("SCSI disk (sd) driver");
MODULE_LICENSE("GPL");

MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK0_MAJOR);
MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK1_MAJOR);
MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK2_MAJOR);
MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK3_MAJOR);
MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK4_MAJOR);
MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK5_MAJOR);
MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK6_MAJOR);
MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK7_MAJOR);
MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK8_MAJOR);
MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK9_MAJOR);
MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK10_MAJOR);
MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK11_MAJOR);
MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK12_MAJOR);
MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK13_MAJOR);
MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK14_MAJOR);
MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK15_MAJOR);
95 96 97
MODULE_ALIAS_SCSI_DEVICE(TYPE_DISK);
MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD);
MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC);
98
MODULE_ALIAS_SCSI_DEVICE(TYPE_ZBC);
99

100
#define SD_MINORS	16
101

102
static void sd_config_discard(struct scsi_disk *, unsigned int);
103
static void sd_config_write_same(struct scsi_disk *);
104
static int  sd_revalidate_disk(struct gendisk *);
105
static void sd_unlock_native_capacity(struct gendisk *disk);
106 107 108
static int  sd_probe(struct device *);
static int  sd_remove(struct device *);
static void sd_shutdown(struct device *);
109 110
static int sd_suspend_system(struct device *);
static int sd_suspend_runtime(struct device *);
111
static int sd_resume_system(struct device *);
112
static int sd_resume_runtime(struct device *);
113
static void sd_rescan(struct device *);
114
static blk_status_t sd_init_command(struct scsi_cmnd *SCpnt);
115
static void sd_uninit_command(struct scsi_cmnd *SCpnt);
116
static int sd_done(struct scsi_cmnd *);
117
static void sd_eh_reset(struct scsi_cmnd *);
118
static int sd_eh_action(struct scsi_cmnd *, int);
119
static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer);
120
static void scsi_disk_release(struct device *cdev);
121

122
static DEFINE_IDA(sd_index_ida);
L
Linus Torvalds 已提交
123

124
static struct kmem_cache *sd_cdb_cache;
125
static mempool_t *sd_page_pool;
126
static struct lock_class_key sd_bio_compl_lkclass;
127

128 129 130 131 132
static const char *sd_cache_types[] = {
	"write through", "none", "write back",
	"write back, no read (daft)"
};

133 134
static void sd_set_flush_flag(struct scsi_disk *sdkp)
{
135
	bool wc = false, fua = false;
136 137

	if (sdkp->WCE) {
138
		wc = true;
139
		if (sdkp->DPOFUA)
140
			fua = true;
141 142
	}

143
	blk_queue_write_cache(sdkp->disk->queue, wc, fua);
144 145
}

146
static ssize_t
147 148
cache_type_store(struct device *dev, struct device_attribute *attr,
		 const char *buf, size_t count)
149
{
150
	int ct, rcd, wce, sp;
151
	struct scsi_disk *sdkp = to_scsi_disk(dev);
152 153 154 155 156
	struct scsi_device *sdp = sdkp->device;
	char buffer[64];
	char *buffer_data;
	struct scsi_mode_data data;
	struct scsi_sense_hdr sshdr;
157
	static const char temp[] = "temporary ";
158 159
	int len;

160
	if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
161 162 163 164 165
		/* no cache control on RBC devices; theoretically they
		 * can do it, but there's probably so many exceptions
		 * it's not worth the risk */
		return -EINVAL;

166 167 168 169 170 171 172
	if (strncmp(buf, temp, sizeof(temp) - 1) == 0) {
		buf += sizeof(temp) - 1;
		sdkp->cache_override = 1;
	} else {
		sdkp->cache_override = 0;
	}

173
	ct = sysfs_match_string(sd_cache_types, buf);
174 175
	if (ct < 0)
		return -EINVAL;
176

177
	rcd = ct & 0x01 ? 1 : 0;
178
	wce = (ct & 0x02) && !sdkp->write_prot ? 1 : 0;
179 180 181 182

	if (sdkp->cache_override) {
		sdkp->WCE = wce;
		sdkp->RCD = rcd;
183
		sd_set_flush_flag(sdkp);
184 185 186
		return count;
	}

187
	if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT,
188
			    sdkp->max_retries, &data, NULL))
189
		return -EINVAL;
190
	len = min_t(size_t, sizeof(buffer), data.length - data.header_length -
191 192 193 194 195 196
		  data.block_descriptor_length);
	buffer_data = buffer + data.header_length +
		data.block_descriptor_length;
	buffer_data[2] &= ~0x05;
	buffer_data[2] |= wce << 2 | rcd;
	sp = buffer_data[0] & 0x80 ? 1 : 0;
197
	buffer_data[0] &= ~0x80;
198

I
Ivan Mironov 已提交
199 200 201 202 203 204
	/*
	 * Ensure WP, DPOFUA, and RESERVED fields are cleared in
	 * received mode parameter buffer before doing MODE SELECT.
	 */
	data.device_specific = 0;

205
	if (scsi_mode_select(sdp, 1, sp, buffer_data, len, SD_TIMEOUT,
206
			     sdkp->max_retries, &data, &sshdr)) {
207
		if (scsi_sense_valid(&sshdr))
208
			sd_print_sense_hdr(sdkp, &sshdr);
209 210
		return -EINVAL;
	}
211
	sd_revalidate_disk(sdkp->disk);
212 213 214
	return count;
}

215
static ssize_t
216 217 218 219 220 221
manage_start_stop_show(struct device *dev, struct device_attribute *attr,
		       char *buf)
{
	struct scsi_disk *sdkp = to_scsi_disk(dev);
	struct scsi_device *sdp = sdkp->device;

222
	return sprintf(buf, "%u\n", sdp->manage_start_stop);
223 224 225 226 227
}

static ssize_t
manage_start_stop_store(struct device *dev, struct device_attribute *attr,
			const char *buf, size_t count)
228
{
229
	struct scsi_disk *sdkp = to_scsi_disk(dev);
230
	struct scsi_device *sdp = sdkp->device;
231
	bool v;
232 233 234 235

	if (!capable(CAP_SYS_ADMIN))
		return -EACCES;

236 237 238 239
	if (kstrtobool(buf, &v))
		return -EINVAL;

	sdp->manage_start_stop = v;
240 241 242

	return count;
}
243
static DEVICE_ATTR_RW(manage_start_stop);
244

245
static ssize_t
246 247 248 249
allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf)
{
	struct scsi_disk *sdkp = to_scsi_disk(dev);

250
	return sprintf(buf, "%u\n", sdkp->device->allow_restart);
251 252 253 254 255
}

static ssize_t
allow_restart_store(struct device *dev, struct device_attribute *attr,
		    const char *buf, size_t count)
256
{
257
	bool v;
258
	struct scsi_disk *sdkp = to_scsi_disk(dev);
259 260 261 262 263
	struct scsi_device *sdp = sdkp->device;

	if (!capable(CAP_SYS_ADMIN))
		return -EACCES;

264
	if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
265 266
		return -EINVAL;

267 268 269 270
	if (kstrtobool(buf, &v))
		return -EINVAL;

	sdp->allow_restart = v;
271 272 273

	return count;
}
274
static DEVICE_ATTR_RW(allow_restart);
275

276
static ssize_t
277
cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
278
{
279
	struct scsi_disk *sdkp = to_scsi_disk(dev);
280 281
	int ct = sdkp->RCD + 2*sdkp->WCE;

282
	return sprintf(buf, "%s\n", sd_cache_types[ct]);
283
}
284
static DEVICE_ATTR_RW(cache_type);
285

286
static ssize_t
287
FUA_show(struct device *dev, struct device_attribute *attr, char *buf)
288
{
289
	struct scsi_disk *sdkp = to_scsi_disk(dev);
290

291
	return sprintf(buf, "%u\n", sdkp->DPOFUA);
292
}
293
static DEVICE_ATTR_RO(FUA);
294

295
static ssize_t
296 297
protection_type_show(struct device *dev, struct device_attribute *attr,
		     char *buf)
298 299 300
{
	struct scsi_disk *sdkp = to_scsi_disk(dev);

301
	return sprintf(buf, "%u\n", sdkp->protection_type);
302 303
}

304
static ssize_t
305 306
protection_type_store(struct device *dev, struct device_attribute *attr,
		      const char *buf, size_t count)
307 308 309 310 311 312 313 314 315 316 317 318 319
{
	struct scsi_disk *sdkp = to_scsi_disk(dev);
	unsigned int val;
	int err;

	if (!capable(CAP_SYS_ADMIN))
		return -EACCES;

	err = kstrtouint(buf, 10, &val);

	if (err)
		return err;

320
	if (val <= T10_PI_TYPE3_PROTECTION)
321 322 323 324
		sdkp->protection_type = val;

	return count;
}
325
static DEVICE_ATTR_RW(protection_type);
326

327
static ssize_t
328 329
protection_mode_show(struct device *dev, struct device_attribute *attr,
		     char *buf)
330 331 332 333 334 335 336 337
{
	struct scsi_disk *sdkp = to_scsi_disk(dev);
	struct scsi_device *sdp = sdkp->device;
	unsigned int dif, dix;

	dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type);
	dix = scsi_host_dix_capable(sdp->host, sdkp->protection_type);

338
	if (!dix && scsi_host_dix_capable(sdp->host, T10_PI_TYPE0_PROTECTION)) {
339 340 341 342 343
		dif = 0;
		dix = 1;
	}

	if (!dif && !dix)
344
		return sprintf(buf, "none\n");
345

346
	return sprintf(buf, "%s%u\n", dix ? "dix" : "dif", dif);
347
}
348
static DEVICE_ATTR_RO(protection_mode);
349

350
static ssize_t
351
app_tag_own_show(struct device *dev, struct device_attribute *attr, char *buf)
352 353 354
{
	struct scsi_disk *sdkp = to_scsi_disk(dev);

355
	return sprintf(buf, "%u\n", sdkp->ATO);
356
}
357
static DEVICE_ATTR_RO(app_tag_own);
358

359
static ssize_t
360 361
thin_provisioning_show(struct device *dev, struct device_attribute *attr,
		       char *buf)
362 363 364
{
	struct scsi_disk *sdkp = to_scsi_disk(dev);

365
	return sprintf(buf, "%u\n", sdkp->lbpme);
366
}
367
static DEVICE_ATTR_RO(thin_provisioning);
368

369
/* sysfs_match_string() requires dense arrays */
370 371 372 373 374 375 376 377 378 379
static const char *lbp_mode[] = {
	[SD_LBP_FULL]		= "full",
	[SD_LBP_UNMAP]		= "unmap",
	[SD_LBP_WS16]		= "writesame_16",
	[SD_LBP_WS10]		= "writesame_10",
	[SD_LBP_ZERO]		= "writesame_zero",
	[SD_LBP_DISABLE]	= "disabled",
};

static ssize_t
380 381
provisioning_mode_show(struct device *dev, struct device_attribute *attr,
		       char *buf)
382 383 384
{
	struct scsi_disk *sdkp = to_scsi_disk(dev);

385
	return sprintf(buf, "%s\n", lbp_mode[sdkp->provisioning_mode]);
386 387 388
}

static ssize_t
389 390
provisioning_mode_store(struct device *dev, struct device_attribute *attr,
			const char *buf, size_t count)
391 392 393
{
	struct scsi_disk *sdkp = to_scsi_disk(dev);
	struct scsi_device *sdp = sdkp->device;
394
	int mode;
395 396 397 398

	if (!capable(CAP_SYS_ADMIN))
		return -EACCES;

399 400 401 402 403
	if (sd_is_zoned(sdkp)) {
		sd_config_discard(sdkp, SD_LBP_DISABLE);
		return count;
	}

404 405 406
	if (sdp->type != TYPE_DISK)
		return -EINVAL;

407 408
	mode = sysfs_match_string(lbp_mode, buf);
	if (mode < 0)
409 410
		return -EINVAL;

411 412
	sd_config_discard(sdkp, mode);

413
	return count;
414
}
415
static DEVICE_ATTR_RW(provisioning_mode);
416

417
/* sysfs_match_string() requires dense arrays */
418 419 420 421 422 423 424 425 426 427 428 429 430
static const char *zeroing_mode[] = {
	[SD_ZERO_WRITE]		= "write",
	[SD_ZERO_WS]		= "writesame",
	[SD_ZERO_WS16_UNMAP]	= "writesame_16_unmap",
	[SD_ZERO_WS10_UNMAP]	= "writesame_10_unmap",
};

static ssize_t
zeroing_mode_show(struct device *dev, struct device_attribute *attr,
		  char *buf)
{
	struct scsi_disk *sdkp = to_scsi_disk(dev);

431
	return sprintf(buf, "%s\n", zeroing_mode[sdkp->zeroing_mode]);
432 433 434 435 436 437 438
}

static ssize_t
zeroing_mode_store(struct device *dev, struct device_attribute *attr,
		   const char *buf, size_t count)
{
	struct scsi_disk *sdkp = to_scsi_disk(dev);
439
	int mode;
440 441 442 443

	if (!capable(CAP_SYS_ADMIN))
		return -EACCES;

444 445
	mode = sysfs_match_string(zeroing_mode, buf);
	if (mode < 0)
446 447
		return -EINVAL;

448 449
	sdkp->zeroing_mode = mode;

450 451 452 453
	return count;
}
static DEVICE_ATTR_RW(zeroing_mode);

454
static ssize_t
455 456
max_medium_access_timeouts_show(struct device *dev,
				struct device_attribute *attr, char *buf)
457 458 459
{
	struct scsi_disk *sdkp = to_scsi_disk(dev);

460
	return sprintf(buf, "%u\n", sdkp->max_medium_access_timeouts);
461 462 463
}

static ssize_t
464 465 466
max_medium_access_timeouts_store(struct device *dev,
				 struct device_attribute *attr, const char *buf,
				 size_t count)
467 468 469 470 471 472 473 474 475 476 477
{
	struct scsi_disk *sdkp = to_scsi_disk(dev);
	int err;

	if (!capable(CAP_SYS_ADMIN))
		return -EACCES;

	err = kstrtouint(buf, 10, &sdkp->max_medium_access_timeouts);

	return err ? err : count;
}
478
static DEVICE_ATTR_RW(max_medium_access_timeouts);
479

480
static ssize_t
481 482
max_write_same_blocks_show(struct device *dev, struct device_attribute *attr,
			   char *buf)
483 484 485
{
	struct scsi_disk *sdkp = to_scsi_disk(dev);

486
	return sprintf(buf, "%u\n", sdkp->max_ws_blocks);
487 488 489
}

static ssize_t
490 491
max_write_same_blocks_store(struct device *dev, struct device_attribute *attr,
			    const char *buf, size_t count)
492 493 494 495 496 497 498 499 500
{
	struct scsi_disk *sdkp = to_scsi_disk(dev);
	struct scsi_device *sdp = sdkp->device;
	unsigned long max;
	int err;

	if (!capable(CAP_SYS_ADMIN))
		return -EACCES;

501
	if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
502 503 504 505 506 507 508 509 510
		return -EINVAL;

	err = kstrtoul(buf, 10, &max);

	if (err)
		return err;

	if (max == 0)
		sdp->no_write_same = 1;
511 512
	else if (max <= SD_MAX_WS16_BLOCKS) {
		sdp->no_write_same = 0;
513
		sdkp->max_ws_blocks = max;
514
	}
515 516 517 518 519

	sd_config_write_same(sdkp);

	return count;
}
520 521
static DEVICE_ATTR_RW(max_write_same_blocks);

522 523 524 525 526 527 528 529 530 531 532 533 534 535 536
static ssize_t
zoned_cap_show(struct device *dev, struct device_attribute *attr, char *buf)
{
	struct scsi_disk *sdkp = to_scsi_disk(dev);

	if (sdkp->device->type == TYPE_ZBC)
		return sprintf(buf, "host-managed\n");
	if (sdkp->zoned == 1)
		return sprintf(buf, "host-aware\n");
	if (sdkp->zoned == 2)
		return sprintf(buf, "drive-managed\n");
	return sprintf(buf, "none\n");
}
static DEVICE_ATTR_RO(zoned_cap);

537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569
static ssize_t
max_retries_store(struct device *dev, struct device_attribute *attr,
		  const char *buf, size_t count)
{
	struct scsi_disk *sdkp = to_scsi_disk(dev);
	struct scsi_device *sdev = sdkp->device;
	int retries, err;

	err = kstrtoint(buf, 10, &retries);
	if (err)
		return err;

	if (retries == SCSI_CMD_RETRIES_NO_LIMIT || retries <= SD_MAX_RETRIES) {
		sdkp->max_retries = retries;
		return count;
	}

	sdev_printk(KERN_ERR, sdev, "max_retries must be between -1 and %d\n",
		    SD_MAX_RETRIES);
	return -EINVAL;
}

static ssize_t
max_retries_show(struct device *dev, struct device_attribute *attr,
		 char *buf)
{
	struct scsi_disk *sdkp = to_scsi_disk(dev);

	return sprintf(buf, "%d\n", sdkp->max_retries);
}

static DEVICE_ATTR_RW(max_retries);

570 571 572 573 574 575 576 577 578 579
static struct attribute *sd_disk_attrs[] = {
	&dev_attr_cache_type.attr,
	&dev_attr_FUA.attr,
	&dev_attr_allow_restart.attr,
	&dev_attr_manage_start_stop.attr,
	&dev_attr_protection_type.attr,
	&dev_attr_protection_mode.attr,
	&dev_attr_app_tag_own.attr,
	&dev_attr_thin_provisioning.attr,
	&dev_attr_provisioning_mode.attr,
580
	&dev_attr_zeroing_mode.attr,
581 582
	&dev_attr_max_write_same_blocks.attr,
	&dev_attr_max_medium_access_timeouts.attr,
583
	&dev_attr_zoned_cap.attr,
584
	&dev_attr_max_retries.attr,
585
	NULL,
586
};
587
ATTRIBUTE_GROUPS(sd_disk);
588 589 590 591

static struct class sd_disk_class = {
	.name		= "scsi_disk",
	.owner		= THIS_MODULE,
592
	.dev_release	= scsi_disk_release,
593
	.dev_groups	= sd_disk_groups,
594
};
L
Linus Torvalds 已提交
595

596
static const struct dev_pm_ops sd_pm_ops = {
597
	.suspend		= sd_suspend_system,
598
	.resume			= sd_resume_system,
599
	.poweroff		= sd_suspend_system,
600
	.restore		= sd_resume_system,
601
	.runtime_suspend	= sd_suspend_runtime,
602
	.runtime_resume		= sd_resume_runtime,
603 604
};

L
Linus Torvalds 已提交
605 606 607
static struct scsi_driver sd_template = {
	.gendrv = {
		.name		= "sd",
608
		.owner		= THIS_MODULE,
L
Linus Torvalds 已提交
609
		.probe		= sd_probe,
610
		.probe_type	= PROBE_PREFER_ASYNCHRONOUS,
L
Linus Torvalds 已提交
611 612
		.remove		= sd_remove,
		.shutdown	= sd_shutdown,
613
		.pm		= &sd_pm_ops,
L
Linus Torvalds 已提交
614 615
	},
	.rescan			= sd_rescan,
616 617
	.init_command		= sd_init_command,
	.uninit_command		= sd_uninit_command,
618
	.done			= sd_done,
619
	.eh_action		= sd_eh_action,
620
	.eh_reset		= sd_eh_reset,
L
Linus Torvalds 已提交
621 622
};

623
/*
624 625
 * Don't request a new module, as that could deadlock in multipath
 * environment.
626
 */
627
static void sd_default_probe(dev_t devt)
628 629 630
{
}

L
Linus Torvalds 已提交
631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659
/*
 * Device no to disk mapping:
 * 
 *       major         disc2     disc  p1
 *   |............|.............|....|....| <- dev_t
 *    31        20 19          8 7  4 3  0
 * 
 * Inside a major, we have 16k disks, however mapped non-
 * contiguously. The first 16 disks are for major0, the next
 * ones with major1, ... Disk 256 is for major0 again, disk 272 
 * for major1, ... 
 * As we stay compatible with our numbering scheme, we can reuse 
 * the well-know SCSI majors 8, 65--71, 136--143.
 */
static int sd_major(int major_idx)
{
	switch (major_idx) {
	case 0:
		return SCSI_DISK0_MAJOR;
	case 1 ... 7:
		return SCSI_DISK1_MAJOR + major_idx - 1;
	case 8 ... 15:
		return SCSI_DISK8_MAJOR + major_idx - 8;
	default:
		BUG();
		return 0;	/* shut up gcc */
	}
}

660 661 662 663
#ifdef CONFIG_BLK_SED_OPAL
static int sd_sec_submit(void *data, u16 spsp, u8 secp, void *buffer,
		size_t len, bool send)
{
664 665
	struct scsi_disk *sdkp = data;
	struct scsi_device *sdev = sdkp->device;
666 667 668 669 670 671 672 673
	u8 cdb[12] = { 0, };
	int ret;

	cdb[0] = send ? SECURITY_PROTOCOL_OUT : SECURITY_PROTOCOL_IN;
	cdb[1] = secp;
	put_unaligned_be16(spsp, &cdb[2]);
	put_unaligned_be32(len, &cdb[6]);

B
Bart Van Assche 已提交
674 675 676
	ret = scsi_execute(sdev, cdb, send ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
		buffer, len, NULL, NULL, SD_TIMEOUT, sdkp->max_retries, 0,
		RQF_PM, NULL);
677 678 679 680
	return ret <= 0 ? ret : -EIO;
}
#endif /* CONFIG_BLK_SED_OPAL */

681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742
/*
 * Look up the DIX operation based on whether the command is read or
 * write and whether dix and dif are enabled.
 */
static unsigned int sd_prot_op(bool write, bool dix, bool dif)
{
	/* Lookup table: bit 2 (write), bit 1 (dix), bit 0 (dif) */
	static const unsigned int ops[] = {	/* wrt dix dif */
		SCSI_PROT_NORMAL,		/*  0	0   0  */
		SCSI_PROT_READ_STRIP,		/*  0	0   1  */
		SCSI_PROT_READ_INSERT,		/*  0	1   0  */
		SCSI_PROT_READ_PASS,		/*  0	1   1  */
		SCSI_PROT_NORMAL,		/*  1	0   0  */
		SCSI_PROT_WRITE_INSERT,		/*  1	0   1  */
		SCSI_PROT_WRITE_STRIP,		/*  1	1   0  */
		SCSI_PROT_WRITE_PASS,		/*  1	1   1  */
	};

	return ops[write << 2 | dix << 1 | dif];
}

/*
 * Returns a mask of the protection flags that are valid for a given DIX
 * operation.
 */
static unsigned int sd_prot_flag_mask(unsigned int prot_op)
{
	static const unsigned int flag_mask[] = {
		[SCSI_PROT_NORMAL]		= 0,

		[SCSI_PROT_READ_STRIP]		= SCSI_PROT_TRANSFER_PI |
						  SCSI_PROT_GUARD_CHECK |
						  SCSI_PROT_REF_CHECK |
						  SCSI_PROT_REF_INCREMENT,

		[SCSI_PROT_READ_INSERT]		= SCSI_PROT_REF_INCREMENT |
						  SCSI_PROT_IP_CHECKSUM,

		[SCSI_PROT_READ_PASS]		= SCSI_PROT_TRANSFER_PI |
						  SCSI_PROT_GUARD_CHECK |
						  SCSI_PROT_REF_CHECK |
						  SCSI_PROT_REF_INCREMENT |
						  SCSI_PROT_IP_CHECKSUM,

		[SCSI_PROT_WRITE_INSERT]	= SCSI_PROT_TRANSFER_PI |
						  SCSI_PROT_REF_INCREMENT,

		[SCSI_PROT_WRITE_STRIP]		= SCSI_PROT_GUARD_CHECK |
						  SCSI_PROT_REF_CHECK |
						  SCSI_PROT_REF_INCREMENT |
						  SCSI_PROT_IP_CHECKSUM,

		[SCSI_PROT_WRITE_PASS]		= SCSI_PROT_TRANSFER_PI |
						  SCSI_PROT_GUARD_CHECK |
						  SCSI_PROT_REF_CHECK |
						  SCSI_PROT_REF_INCREMENT |
						  SCSI_PROT_IP_CHECKSUM,
	};

	return flag_mask[prot_op];
}

743 744
static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd,
					   unsigned int dix, unsigned int dif)
745
{
746 747 748
	struct request *rq = scsi_cmd_to_rq(scmd);
	struct bio *bio = rq->bio;
	unsigned int prot_op = sd_prot_op(rq_data_dir(rq), dix, dif);
749 750 751 752 753 754 755 756 757 758
	unsigned int protect = 0;

	if (dix) {				/* DIX Type 0, 1, 2, 3 */
		if (bio_integrity_flagged(bio, BIP_IP_CHECKSUM))
			scmd->prot_flags |= SCSI_PROT_IP_CHECKSUM;

		if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false)
			scmd->prot_flags |= SCSI_PROT_GUARD_CHECK;
	}

759
	if (dif != T10_PI_TYPE3_PROTECTION) {	/* DIX/DIF Type 0, 1, 2 */
760 761 762 763 764 765 766 767 768 769 770 771 772
		scmd->prot_flags |= SCSI_PROT_REF_INCREMENT;

		if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false)
			scmd->prot_flags |= SCSI_PROT_REF_CHECK;
	}

	if (dif) {				/* DIX/DIF Type 1, 2, 3 */
		scmd->prot_flags |= SCSI_PROT_TRANSFER_PI;

		if (bio_integrity_flagged(bio, BIP_DISK_NOCHECK))
			protect = 3 << 5;	/* Disable target PI checking */
		else
			protect = 1 << 5;	/* Enable target PI checking */
773 774 775 776
	}

	scsi_set_prot_op(scmd, prot_op);
	scsi_set_prot_type(scmd, dif);
777 778 779
	scmd->prot_flags &= sd_prot_flag_mask(prot_op);

	return protect;
780 781
}

782 783 784 785 786 787
static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
{
	struct request_queue *q = sdkp->disk->queue;
	unsigned int logical_block_size = sdkp->device->sector_size;
	unsigned int max_blocks = 0;

788 789 790 791 792
	q->limits.discard_alignment =
		sdkp->unmap_alignment * logical_block_size;
	q->limits.discard_granularity =
		max(sdkp->physical_block_size,
		    sdkp->unmap_granularity * logical_block_size);
793 794
	sdkp->provisioning_mode = mode;

795 796
	switch (mode) {

797
	case SD_LBP_FULL:
798
	case SD_LBP_DISABLE:
799
		blk_queue_max_discard_sectors(q, 0);
800
		blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
801 802 803
		return;

	case SD_LBP_UNMAP:
804 805
		max_blocks = min_not_zero(sdkp->max_unmap_blocks,
					  (u32)SD_MAX_WS16_BLOCKS);
806 807 808
		break;

	case SD_LBP_WS16:
809 810 811 812 813 814
		if (sdkp->device->unmap_limit_for_ws)
			max_blocks = sdkp->max_unmap_blocks;
		else
			max_blocks = sdkp->max_ws_blocks;

		max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS16_BLOCKS);
815 816 817
		break;

	case SD_LBP_WS10:
818 819 820 821 822 823
		if (sdkp->device->unmap_limit_for_ws)
			max_blocks = sdkp->max_unmap_blocks;
		else
			max_blocks = sdkp->max_ws_blocks;

		max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS10_BLOCKS);
824 825 826
		break;

	case SD_LBP_ZERO:
827 828
		max_blocks = min_not_zero(sdkp->max_ws_blocks,
					  (u32)SD_MAX_WS10_BLOCKS);
829 830 831
		break;
	}

832
	blk_queue_max_discard_sectors(q, max_blocks * (logical_block_size >> 9));
833
	blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
834 835
}

836
static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
837
{
838
	struct scsi_device *sdp = cmd->device;
839
	struct request *rq = scsi_cmd_to_rq(cmd);
840
	struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
841 842
	u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
	u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
843
	unsigned int data_len = 24;
844
	char *buf;
845

846
	rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
847
	if (!rq->special_vec.bv_page)
848
		return BLK_STS_RESOURCE;
849
	clear_highpage(rq->special_vec.bv_page);
850 851 852
	rq->special_vec.bv_offset = 0;
	rq->special_vec.bv_len = data_len;
	rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
853

854 855 856
	cmd->cmd_len = 10;
	cmd->cmnd[0] = UNMAP;
	cmd->cmnd[8] = 24;
857

C
Christoph Hellwig 已提交
858
	buf = bvec_virt(&rq->special_vec);
859 860
	put_unaligned_be16(6 + 16, &buf[0]);
	put_unaligned_be16(16, &buf[2]);
861 862
	put_unaligned_be64(lba, &buf[8]);
	put_unaligned_be32(nr_blocks, &buf[16]);
863

864
	cmd->allowed = sdkp->max_retries;
865 866
	cmd->transfersize = data_len;
	rq->timeout = SD_TIMEOUT;
867

868
	return scsi_alloc_sgtables(cmd);
869
}
870

871 872
static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
		bool unmap)
873 874
{
	struct scsi_device *sdp = cmd->device;
875
	struct request *rq = scsi_cmd_to_rq(cmd);
876
	struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
877 878
	u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
	u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
879
	u32 data_len = sdp->sector_size;
880

881
	rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
882
	if (!rq->special_vec.bv_page)
883
		return BLK_STS_RESOURCE;
884
	clear_highpage(rq->special_vec.bv_page);
885 886 887
	rq->special_vec.bv_offset = 0;
	rq->special_vec.bv_len = data_len;
	rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
888

889 890
	cmd->cmd_len = 16;
	cmd->cmnd[0] = WRITE_SAME_16;
891
	if (unmap)
892
		cmd->cmnd[1] = 0x8; /* UNMAP */
893 894
	put_unaligned_be64(lba, &cmd->cmnd[2]);
	put_unaligned_be32(nr_blocks, &cmd->cmnd[10]);
895

896
	cmd->allowed = sdkp->max_retries;
897
	cmd->transfersize = data_len;
898
	rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT;
899

900
	return scsi_alloc_sgtables(cmd);
901
}
902

903 904
static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd,
		bool unmap)
905 906
{
	struct scsi_device *sdp = cmd->device;
907
	struct request *rq = scsi_cmd_to_rq(cmd);
908
	struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
909 910
	u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
	u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
911
	u32 data_len = sdp->sector_size;
912

913
	rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
914
	if (!rq->special_vec.bv_page)
915
		return BLK_STS_RESOURCE;
916
	clear_highpage(rq->special_vec.bv_page);
917 918
	rq->special_vec.bv_offset = 0;
	rq->special_vec.bv_len = data_len;
919
	rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
920

921 922 923 924
	cmd->cmd_len = 10;
	cmd->cmnd[0] = WRITE_SAME;
	if (unmap)
		cmd->cmnd[1] = 0x8; /* UNMAP */
925 926
	put_unaligned_be32(lba, &cmd->cmnd[2]);
	put_unaligned_be16(nr_blocks, &cmd->cmnd[7]);
927

928
	cmd->allowed = sdkp->max_retries;
929
	cmd->transfersize = data_len;
930
	rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT;
931

932
	return scsi_alloc_sgtables(cmd);
933
}
934

935
static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
936
{
937
	struct request *rq = scsi_cmd_to_rq(cmd);
938
	struct scsi_device *sdp = cmd->device;
939
	struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
940 941
	u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
	u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
942

943
	if (!(rq->cmd_flags & REQ_NOUNMAP)) {
944 945
		switch (sdkp->zeroing_mode) {
		case SD_ZERO_WS16_UNMAP:
946
			return sd_setup_write_same16_cmnd(cmd, true);
947
		case SD_ZERO_WS10_UNMAP:
948
			return sd_setup_write_same10_cmnd(cmd, true);
949 950
		}
	}
951

952 953
	if (sdp->no_write_same) {
		rq->rq_flags |= RQF_QUIET;
954
		return BLK_STS_TARGET;
955
	}
956

957
	if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff)
958
		return sd_setup_write_same16_cmnd(cmd, false);
959

960
	return sd_setup_write_same10_cmnd(cmd, false);
961 962
}

963 964 965 966 967 968 969 970 971 972 973 974 975 976 977
static void sd_config_write_same(struct scsi_disk *sdkp)
{
	struct request_queue *q = sdkp->disk->queue;
	unsigned int logical_block_size = sdkp->device->sector_size;

	if (sdkp->device->no_write_same) {
		sdkp->max_ws_blocks = 0;
		goto out;
	}

	/* Some devices can not handle block counts above 0xffff despite
	 * supporting WRITE SAME(16). Consequently we default to 64k
	 * blocks per I/O unless the device explicitly advertises a
	 * bigger limit.
	 */
978 979 980 981 982 983 984 985 986 987
	if (sdkp->max_ws_blocks > SD_MAX_WS10_BLOCKS)
		sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks,
						   (u32)SD_MAX_WS16_BLOCKS);
	else if (sdkp->ws16 || sdkp->ws10 || sdkp->device->no_report_opcodes)
		sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks,
						   (u32)SD_MAX_WS10_BLOCKS);
	else {
		sdkp->device->no_write_same = 1;
		sdkp->max_ws_blocks = 0;
	}
988

989 990 991 992 993 994 995 996 997
	if (sdkp->lbprz && sdkp->lbpws)
		sdkp->zeroing_mode = SD_ZERO_WS16_UNMAP;
	else if (sdkp->lbprz && sdkp->lbpws10)
		sdkp->zeroing_mode = SD_ZERO_WS10_UNMAP;
	else if (sdkp->max_ws_blocks)
		sdkp->zeroing_mode = SD_ZERO_WS;
	else
		sdkp->zeroing_mode = SD_ZERO_WRITE;

998 999 1000 1001 1002 1003
	if (sdkp->max_ws_blocks &&
	    sdkp->physical_block_size > logical_block_size) {
		/*
		 * Reporting a maximum number of blocks that is not aligned
		 * on the device physical size would cause a large write same
		 * request to be split into physically unaligned chunks by
1004 1005 1006 1007 1008 1009 1010
		 * __blkdev_issue_write_zeroes() even if the caller of this
		 * functions took care to align the large request. So make sure
		 * the maximum reported is aligned to the device physical block
		 * size. This is only an optional optimization for regular
		 * disks, but this is mandatory to avoid failure of large write
		 * same requests directed at sequential write required zones of
		 * host-managed ZBC disks.
1011 1012 1013 1014 1015 1016 1017
		 */
		sdkp->max_ws_blocks =
			round_down(sdkp->max_ws_blocks,
				   bytes_to_logical(sdkp->device,
						    sdkp->physical_block_size));
	}

1018
out:
1019 1020
	blk_queue_max_write_zeroes_sectors(q, sdkp->max_ws_blocks *
					 (logical_block_size >> 9));
1021 1022
}

1023
static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
1024
{
1025
	struct request *rq = scsi_cmd_to_rq(cmd);
1026
	struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
1027 1028 1029

	/* flush requests don't perform I/O, zero the S/G table */
	memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1030

1031 1032 1033
	cmd->cmnd[0] = SYNCHRONIZE_CACHE;
	cmd->cmd_len = 10;
	cmd->transfersize = 0;
1034
	cmd->allowed = sdkp->max_retries;
1035

1036
	rq->timeout = rq->q->rq_timeout * SD_FLUSH_TIMEOUT_MULTIPLIER;
1037
	return BLK_STS_OK;
1038 1039
}

1040 1041 1042
static blk_status_t sd_setup_rw32_cmnd(struct scsi_cmnd *cmd, bool write,
				       sector_t lba, unsigned int nr_blocks,
				       unsigned char flags)
L
Linus Torvalds 已提交
1043
{
1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089
	cmd->cmd_len = SD_EXT_CDB_SIZE;
	cmd->cmnd[0]  = VARIABLE_LENGTH_CMD;
	cmd->cmnd[7]  = 0x18; /* Additional CDB len */
	cmd->cmnd[9]  = write ? WRITE_32 : READ_32;
	cmd->cmnd[10] = flags;
	put_unaligned_be64(lba, &cmd->cmnd[12]);
	put_unaligned_be32(lba, &cmd->cmnd[20]); /* Expected Indirect LBA */
	put_unaligned_be32(nr_blocks, &cmd->cmnd[28]);

	return BLK_STS_OK;
}

static blk_status_t sd_setup_rw16_cmnd(struct scsi_cmnd *cmd, bool write,
				       sector_t lba, unsigned int nr_blocks,
				       unsigned char flags)
{
	cmd->cmd_len  = 16;
	cmd->cmnd[0]  = write ? WRITE_16 : READ_16;
	cmd->cmnd[1]  = flags;
	cmd->cmnd[14] = 0;
	cmd->cmnd[15] = 0;
	put_unaligned_be64(lba, &cmd->cmnd[2]);
	put_unaligned_be32(nr_blocks, &cmd->cmnd[10]);

	return BLK_STS_OK;
}

static blk_status_t sd_setup_rw10_cmnd(struct scsi_cmnd *cmd, bool write,
				       sector_t lba, unsigned int nr_blocks,
				       unsigned char flags)
{
	cmd->cmd_len = 10;
	cmd->cmnd[0] = write ? WRITE_10 : READ_10;
	cmd->cmnd[1] = flags;
	cmd->cmnd[6] = 0;
	cmd->cmnd[9] = 0;
	put_unaligned_be32(lba, &cmd->cmnd[2]);
	put_unaligned_be16(nr_blocks, &cmd->cmnd[7]);

	return BLK_STS_OK;
}

static blk_status_t sd_setup_rw6_cmnd(struct scsi_cmnd *cmd, bool write,
				      sector_t lba, unsigned int nr_blocks,
				      unsigned char flags)
{
1090 1091 1092 1093
	/* Avoid that 0 blocks gets translated into 256 blocks. */
	if (WARN_ON_ONCE(nr_blocks == 0))
		return BLK_STS_IOERR;

1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114
	if (unlikely(flags & 0x8)) {
		/*
		 * This happens only if this drive failed 10byte rw
		 * command with ILLEGAL_REQUEST during operation and
		 * thus turned off use_10_for_rw.
		 */
		scmd_printk(KERN_ERR, cmd, "FUA write on READ/WRITE(6) drive\n");
		return BLK_STS_IOERR;
	}

	cmd->cmd_len = 6;
	cmd->cmnd[0] = write ? WRITE_6 : READ_6;
	cmd->cmnd[1] = (lba >> 16) & 0x1f;
	cmd->cmnd[2] = (lba >> 8) & 0xff;
	cmd->cmnd[3] = lba & 0xff;
	cmd->cmnd[4] = nr_blocks;
	cmd->cmnd[5] = 0;

	return BLK_STS_OK;
}

1115
static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
L
Linus Torvalds 已提交
1116
{
1117
	struct request *rq = scsi_cmd_to_rq(cmd);
1118
	struct scsi_device *sdp = cmd->device;
1119
	struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
1120
	sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq));
1121
	sector_t threshold;
1122 1123
	unsigned int nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
	unsigned int mask = logical_to_sectors(sdp, 1) - 1;
1124 1125
	bool write = rq_data_dir(rq) == WRITE;
	unsigned char protect, fua;
1126
	blk_status_t ret;
1127 1128
	unsigned int dif;
	bool dix;
1129

1130
	ret = scsi_alloc_sgtables(cmd);
1131
	if (ret != BLK_STS_OK)
1132
		return ret;
1133

1134
	ret = BLK_STS_IOERR;
1135
	if (!scsi_device_online(sdp) || sdp->changed) {
1136
		scmd_printk(KERN_ERR, cmd, "device offline or changed\n");
1137
		goto fail;
L
Linus Torvalds 已提交
1138 1139
	}

1140
	if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->q->disk)) {
1141
		scmd_printk(KERN_ERR, cmd, "access beyond end of device\n");
1142
		goto fail;
1143 1144 1145
	}

	if ((blk_rq_pos(rq) & mask) || (blk_rq_sectors(rq) & mask)) {
1146
		scmd_printk(KERN_ERR, cmd, "request not aligned to the logical block size\n");
1147
		goto fail;
L
Linus Torvalds 已提交
1148
	}
1149

1150
	/*
1151 1152
	 * Some SD card readers can't handle accesses which touch the
	 * last one or two logical blocks. Split accesses as needed.
1153
	 */
1154
	threshold = sdkp->capacity - SD_LAST_BUGGY_SECTORS;
1155

1156 1157
	if (unlikely(sdp->last_sector_bug && lba + nr_blocks > threshold)) {
		if (lba < threshold) {
1158
			/* Access up to the threshold but not beyond */
1159
			nr_blocks = threshold - lba;
1160
		} else {
1161 1162
			/* Access only a single logical block */
			nr_blocks = 1;
1163 1164
		}
	}
1165

1166 1167 1168
	if (req_op(rq) == REQ_OP_ZONE_APPEND) {
		ret = sd_zbc_prepare_zone_append(cmd, &lba, nr_blocks);
		if (ret)
1169
			goto fail;
1170 1171
	}

1172
	fua = rq->cmd_flags & REQ_FUA ? 0x8 : 0;
1173 1174
	dix = scsi_prot_sg_count(cmd);
	dif = scsi_host_dif_capable(cmd->device->host, sdkp->protection_type);
L
Linus Torvalds 已提交
1175

1176
	if (dif || dix)
1177
		protect = sd_setup_protect_cmnd(cmd, dix, dif);
1178
	else
1179 1180
		protect = 0;

1181
	if (protect && sdkp->protection_type == T10_PI_TYPE2_PROTECTION) {
1182
		ret = sd_setup_rw32_cmnd(cmd, write, lba, nr_blocks,
1183
					 protect | fua);
1184
	} else if (sdp->use_16_for_rw || (nr_blocks > 0xffff)) {
1185
		ret = sd_setup_rw16_cmnd(cmd, write, lba, nr_blocks,
1186
					 protect | fua);
1187
	} else if ((nr_blocks > 0xff) || (lba > 0x1fffff) ||
1188
		   sdp->use_10_for_rw || protect) {
1189
		ret = sd_setup_rw10_cmnd(cmd, write, lba, nr_blocks,
1190
					 protect | fua);
L
Linus Torvalds 已提交
1191
	} else {
1192
		ret = sd_setup_rw6_cmnd(cmd, write, lba, nr_blocks,
1193
					protect | fua);
L
Linus Torvalds 已提交
1194
	}
1195 1196

	if (unlikely(ret != BLK_STS_OK))
1197
		goto fail;
L
Linus Torvalds 已提交
1198 1199 1200 1201 1202 1203

	/*
	 * We shouldn't disconnect in the middle of a sector, so with a dumb
	 * host adapter, it's safe to assume that we can at least transfer
	 * this many bytes between each connect / disconnect.
	 */
1204 1205
	cmd->transfersize = sdp->sector_size;
	cmd->underflow = nr_blocks << 9;
1206
	cmd->allowed = sdkp->max_retries;
1207
	cmd->sdb.length = nr_blocks * sdp->sector_size;
1208 1209

	SCSI_LOG_HLQUEUE(1,
1210
			 scmd_printk(KERN_INFO, cmd,
1211 1212 1213 1214
				     "%s: block=%llu, count=%d\n", __func__,
				     (unsigned long long)blk_rq_pos(rq),
				     blk_rq_sectors(rq)));
	SCSI_LOG_HLQUEUE(2,
1215
			 scmd_printk(KERN_INFO, cmd,
1216 1217 1218
				     "%s %d/%u 512 byte blocks.\n",
				     write ? "writing" : "reading", nr_blocks,
				     blk_rq_sectors(rq)));
L
Linus Torvalds 已提交
1219 1220

	/*
1221
	 * This indicates that the command is ready from our end to be queued.
L
Linus Torvalds 已提交
1222
	 */
1223
	return BLK_STS_OK;
1224 1225 1226
fail:
	scsi_free_sgtables(cmd);
	return ret;
L
Linus Torvalds 已提交
1227 1228
}

1229
static blk_status_t sd_init_command(struct scsi_cmnd *cmd)
C
Christoph Hellwig 已提交
1230
{
1231
	struct request *rq = scsi_cmd_to_rq(cmd);
C
Christoph Hellwig 已提交
1232

M
Mike Christie 已提交
1233 1234
	switch (req_op(rq)) {
	case REQ_OP_DISCARD:
1235
		switch (scsi_disk(rq->q->disk)->provisioning_mode) {
1236 1237 1238
		case SD_LBP_UNMAP:
			return sd_setup_unmap_cmnd(cmd);
		case SD_LBP_WS16:
1239
			return sd_setup_write_same16_cmnd(cmd, true);
1240 1241 1242 1243 1244
		case SD_LBP_WS10:
			return sd_setup_write_same10_cmnd(cmd, true);
		case SD_LBP_ZERO:
			return sd_setup_write_same10_cmnd(cmd, false);
		default:
1245
			return BLK_STS_TARGET;
1246
		}
1247 1248
	case REQ_OP_WRITE_ZEROES:
		return sd_setup_write_zeroes_cmnd(cmd);
1249
	case REQ_OP_FLUSH:
C
Christoph Hellwig 已提交
1250
		return sd_setup_flush_cmnd(cmd);
M
Mike Christie 已提交
1251 1252
	case REQ_OP_READ:
	case REQ_OP_WRITE:
1253
	case REQ_OP_ZONE_APPEND:
C
Christoph Hellwig 已提交
1254
		return sd_setup_read_write_cmnd(cmd);
1255
	case REQ_OP_ZONE_RESET:
1256 1257
		return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER,
						   false);
1258
	case REQ_OP_ZONE_RESET_ALL:
1259 1260 1261 1262 1263 1264 1265 1266
		return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER,
						   true);
	case REQ_OP_ZONE_OPEN:
		return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_OPEN_ZONE, false);
	case REQ_OP_ZONE_CLOSE:
		return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_CLOSE_ZONE, false);
	case REQ_OP_ZONE_FINISH:
		return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_FINISH_ZONE, false);
M
Mike Christie 已提交
1267
	default:
1268
		WARN_ON_ONCE(1);
1269
		return BLK_STS_NOTSUPP;
M
Mike Christie 已提交
1270
	}
C
Christoph Hellwig 已提交
1271 1272 1273 1274
}

static void sd_uninit_command(struct scsi_cmnd *SCpnt)
{
1275
	struct request *rq = scsi_cmd_to_rq(SCpnt);
C
Christoph Hellwig 已提交
1276

1277
	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1278
		mempool_free(rq->special_vec.bv_page, sd_page_pool);
C
Christoph Hellwig 已提交
1279 1280
}

1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296
static bool sd_need_revalidate(struct block_device *bdev,
		struct scsi_disk *sdkp)
{
	if (sdkp->device->removable || sdkp->write_prot) {
		if (bdev_check_media_change(bdev))
			return true;
	}

	/*
	 * Force a full rescan after ioctl(BLKRRPART).  While the disk state has
	 * nothing to do with partitions, BLKRRPART is used to force a full
	 * revalidate after things like a format for historical reasons.
	 */
	return test_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
}

L
Linus Torvalds 已提交
1297 1298
/**
 *	sd_open - open a scsi disk device
1299 1300
 *	@bdev: Block device of the scsi disk to open
 *	@mode: FMODE_* mask
L
Linus Torvalds 已提交
1301 1302 1303 1304 1305 1306 1307 1308
 *
 *	Returns 0 if successful. Returns a negated errno value in case 
 *	of error.
 *
 *	Note: This can be called from a user context (e.g. fsck(1) )
 *	or from within the kernel (e.g. as a result of a mount(1) ).
 *	In the latter case @inode and @filp carry an abridged amount
 *	of information as noted above.
A
Arnd Bergmann 已提交
1309
 *
1310
 *	Locking: called with bdev->bd_disk->open_mutex held.
L
Linus Torvalds 已提交
1311
 **/
A
Al Viro 已提交
1312
static int sd_open(struct block_device *bdev, fmode_t mode)
L
Linus Torvalds 已提交
1313
{
1314 1315
	struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
	struct scsi_device *sdev = sdkp->device;
L
Linus Torvalds 已提交
1316 1317
	int retval;

1318
	if (scsi_device_get(sdev))
L
Linus Torvalds 已提交
1319 1320
		return -ENXIO;

1321
	SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_open\n"));
L
Linus Torvalds 已提交
1322 1323 1324 1325 1326 1327 1328 1329 1330

	/*
	 * If the device is in error recovery, wait until it is done.
	 * If the device is offline, then disallow any access to it.
	 */
	retval = -ENXIO;
	if (!scsi_block_when_processing_errors(sdev))
		goto error_out;

1331 1332
	if (sd_need_revalidate(bdev, sdkp))
		sd_revalidate_disk(bdev->bd_disk);
L
Linus Torvalds 已提交
1333 1334 1335 1336 1337

	/*
	 * If the drive is empty, just let the open fail.
	 */
	retval = -ENOMEDIUM;
A
Al Viro 已提交
1338
	if (sdev->removable && !sdkp->media_present && !(mode & FMODE_NDELAY))
L
Linus Torvalds 已提交
1339 1340 1341 1342 1343 1344 1345
		goto error_out;

	/*
	 * If the device has the write protect tab set, have the open fail
	 * if the user expects to be able to write to the thing.
	 */
	retval = -EROFS;
A
Al Viro 已提交
1346
	if (sdkp->write_prot && (mode & FMODE_WRITE))
L
Linus Torvalds 已提交
1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358
		goto error_out;

	/*
	 * It is possible that the disk changing stuff resulted in
	 * the device being taken offline.  If this is the case,
	 * report this to the user, and don't pretend that the
	 * open actually succeeded.
	 */
	retval = -ENXIO;
	if (!scsi_device_online(sdev))
		goto error_out;

A
Arnd Bergmann 已提交
1359
	if ((atomic_inc_return(&sdkp->openers) == 1) && sdev->removable) {
L
Linus Torvalds 已提交
1360 1361 1362 1363 1364 1365 1366
		if (scsi_block_when_processing_errors(sdev))
			scsi_set_medium_removal(sdev, SCSI_REMOVAL_PREVENT);
	}

	return 0;

error_out:
1367
	scsi_device_put(sdev);
L
Linus Torvalds 已提交
1368 1369 1370 1371 1372 1373
	return retval;	
}

/**
 *	sd_release - invoked when the (last) close(2) is called on this
 *	scsi disk.
1374 1375
 *	@disk: disk to release
 *	@mode: FMODE_* mask
L
Linus Torvalds 已提交
1376 1377 1378 1379 1380
 *
 *	Returns 0. 
 *
 *	Note: may block (uninterruptible) if error recovery is underway
 *	on this disk.
A
Arnd Bergmann 已提交
1381
 *
1382
 *	Locking: called with bdev->bd_disk->open_mutex held.
L
Linus Torvalds 已提交
1383
 **/
1384
static void sd_release(struct gendisk *disk, fmode_t mode)
L
Linus Torvalds 已提交
1385 1386 1387 1388
{
	struct scsi_disk *sdkp = scsi_disk(disk);
	struct scsi_device *sdev = sdkp->device;

1389
	SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n"));
L
Linus Torvalds 已提交
1390

A
Alan Stern 已提交
1391
	if (atomic_dec_return(&sdkp->openers) == 0 && sdev->removable) {
L
Linus Torvalds 已提交
1392 1393 1394 1395
		if (scsi_block_when_processing_errors(sdev))
			scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
	}

1396
	scsi_device_put(sdev);
L
Linus Torvalds 已提交
1397 1398
}

1399
static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
L
Linus Torvalds 已提交
1400 1401 1402 1403
{
	struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
	struct scsi_device *sdp = sdkp->device;
	struct Scsi_Host *host = sdp->host;
1404
	sector_t capacity = logical_to_sectors(sdp, sdkp->capacity);
L
Linus Torvalds 已提交
1405 1406 1407
	int diskinfo[4];

	/* default to most commonly used values */
1408 1409 1410 1411
	diskinfo[0] = 0x40;	/* 1 << 6 */
	diskinfo[1] = 0x20;	/* 1 << 5 */
	diskinfo[2] = capacity >> 11;

L
Linus Torvalds 已提交
1412 1413
	/* override with calculated, extended default, or driver values */
	if (host->hostt->bios_param)
1414
		host->hostt->bios_param(sdp, bdev, capacity, diskinfo);
L
Linus Torvalds 已提交
1415
	else
1416
		scsicam_bios_param(bdev, capacity, diskinfo);
L
Linus Torvalds 已提交
1417

1418 1419 1420
	geo->heads = diskinfo[0];
	geo->sectors = diskinfo[1];
	geo->cylinders = diskinfo[2];
L
Linus Torvalds 已提交
1421 1422 1423 1424
	return 0;
}

/**
1425
 *	sd_ioctl - process an ioctl
1426 1427
 *	@bdev: target block device
 *	@mode: FMODE_* mask
L
Linus Torvalds 已提交
1428
 *	@cmd: ioctl command number
1429
 *	@arg: this is third argument given to ioctl(2) system call.
L
Linus Torvalds 已提交
1430 1431
 *	Often contains a pointer.
 *
L
Lucas De Marchi 已提交
1432
 *	Returns 0 if successful (some ioctls return positive numbers on
L
Linus Torvalds 已提交
1433 1434 1435
 *	success as well). Returns a negated errno value in case of error.
 *
 *	Note: most ioctls are forward onto the block subsystem or further
1436
 *	down in the scsi subsystem.
L
Linus Torvalds 已提交
1437
 **/
1438 1439
static int sd_ioctl(struct block_device *bdev, fmode_t mode,
		    unsigned int cmd, unsigned long arg)
L
Linus Torvalds 已提交
1440 1441
{
	struct gendisk *disk = bdev->bd_disk;
1442 1443
	struct scsi_disk *sdkp = scsi_disk(disk);
	struct scsi_device *sdp = sdkp->device;
1444
	void __user *p = (void __user *)arg;
L
Linus Torvalds 已提交
1445 1446
	int error;
    
1447 1448
	SCSI_LOG_IOCTL(1, sd_printk(KERN_INFO, sdkp, "sd_ioctl: disk=%s, "
				    "cmd=0x%x\n", disk->disk_name, cmd));
L
Linus Torvalds 已提交
1449

1450 1451
	if (bdev_is_partition(bdev) && !capable(CAP_SYS_RAWIO))
		return -ENOIOCTLCMD;
1452

L
Linus Torvalds 已提交
1453 1454 1455 1456 1457 1458
	/*
	 * If we are in the middle of error recovery, don't let anyone
	 * else try and use this device.  Also, if error recovery fails, it
	 * may try and take the device offline, in which case all further
	 * access to the device is prohibited.
	 */
1459 1460 1461
	error = scsi_ioctl_block_when_processing_errors(sdp, cmd,
			(mode & FMODE_NDELAY) != 0);
	if (error)
1462
		return error;
L
Linus Torvalds 已提交
1463

1464 1465
	if (is_sed_ioctl(cmd))
		return sed_ioctl(sdkp->opal_dev, cmd, p);
1466
	return scsi_ioctl(sdp, mode, cmd, p);
L
Linus Torvalds 已提交
1467 1468 1469 1470
}

static void set_media_not_present(struct scsi_disk *sdkp)
{
1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496
	if (sdkp->media_present)
		sdkp->device->changed = 1;

	if (sdkp->device->removable) {
		sdkp->media_present = 0;
		sdkp->capacity = 0;
	}
}

static int media_not_present(struct scsi_disk *sdkp,
			     struct scsi_sense_hdr *sshdr)
{
	if (!scsi_sense_valid(sshdr))
		return 0;

	/* not invoked for commands that could return deferred errors */
	switch (sshdr->sense_key) {
	case UNIT_ATTENTION:
	case NOT_READY:
		/* medium not present */
		if (sshdr->asc == 0x3A) {
			set_media_not_present(sdkp);
			return 1;
		}
	}
	return 0;
L
Linus Torvalds 已提交
1497 1498 1499
}

/**
1500 1501 1502
 *	sd_check_events - check media events
 *	@disk: kernel device descriptor
 *	@clearing: disk events currently being cleared
L
Linus Torvalds 已提交
1503
 *
1504
 *	Returns mask of DISK_EVENT_*.
L
Linus Torvalds 已提交
1505 1506 1507
 *
 *	Note: this function is invoked from the block subsystem.
 **/
1508
static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
L
Linus Torvalds 已提交
1509
{
1510
	struct scsi_disk *sdkp = disk->private_data;
1511
	struct scsi_device *sdp;
L
Linus Torvalds 已提交
1512
	int retval;
1513
	bool disk_changed;
L
Linus Torvalds 已提交
1514

1515 1516 1517 1518
	if (!sdkp)
		return 0;

	sdp = sdkp->device;
1519
	SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n"));
L
Linus Torvalds 已提交
1520 1521 1522 1523 1524 1525 1526

	/*
	 * If the device is offline, don't send any commands - just pretend as
	 * if the command failed.  If the device ever comes back online, we
	 * can deal with it then.  It is only because of unrecoverable errors
	 * that we would ever take a device offline in the first place.
	 */
1527 1528 1529 1530
	if (!scsi_device_online(sdp)) {
		set_media_not_present(sdkp);
		goto out;
	}
L
Linus Torvalds 已提交
1531 1532 1533 1534 1535 1536 1537 1538 1539 1540

	/*
	 * Using TEST_UNIT_READY enables differentiation between drive with
	 * no cartridge loaded - NOT READY, drive with changed cartridge -
	 * UNIT ATTENTION, or with same cartridge - GOOD STATUS.
	 *
	 * Drives that auto spin down. eg iomega jaz 1G, will be started
	 * by sd_spinup_disk() from sd_revalidate_disk(), which happens whenever
	 * sd_revalidate() is called.
	 */
1541
	if (scsi_block_when_processing_errors(sdp)) {
1542 1543
		struct scsi_sense_hdr sshdr = { 0, };

1544
		retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, sdkp->max_retries,
1545
					      &sshdr);
L
Linus Torvalds 已提交
1546

1547
		/* failed to execute TUR, assume media not present */
1548
		if (retval < 0 || host_byte(retval)) {
1549 1550 1551
			set_media_not_present(sdkp);
			goto out;
		}
L
Linus Torvalds 已提交
1552

1553 1554 1555
		if (media_not_present(sdkp, &sshdr))
			goto out;
	}
1556

L
Linus Torvalds 已提交
1557 1558
	/*
	 * For removable scsi disk we have to recognise the presence
1559
	 * of a disk in the drive.
L
Linus Torvalds 已提交
1560
	 */
1561 1562
	if (!sdkp->media_present)
		sdp->changed = 1;
L
Linus Torvalds 已提交
1563
	sdkp->media_present = 1;
1564
out:
1565
	/*
1566
	 * sdp->changed is set under the following conditions:
1567
	 *
1568 1569
	 *	Medium present state has changed in either direction.
	 *	Device has indicated UNIT_ATTENTION.
1570
	 */
1571
	disk_changed = sdp->changed;
1572
	sdp->changed = 0;
1573
	return disk_changed ? DISK_EVENT_MEDIA_CHANGE : 0;
L
Linus Torvalds 已提交
1574 1575
}

1576
static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
L
Linus Torvalds 已提交
1577 1578
{
	int retries, res;
1579
	struct scsi_device *sdp = sdkp->device;
1580 1581
	const int timeout = sdp->request_queue->rq_timeout
		* SD_FLUSH_TIMEOUT_MULTIPLIER;
1582
	struct scsi_sense_hdr my_sshdr;
L
Linus Torvalds 已提交
1583 1584 1585 1586

	if (!scsi_device_online(sdp))
		return -ENODEV;

1587 1588 1589 1590
	/* caller might not be interested in sense, but we need it */
	if (!sshdr)
		sshdr = &my_sshdr;

L
Linus Torvalds 已提交
1591 1592 1593 1594 1595 1596 1597 1598
	for (retries = 3; retries > 0; --retries) {
		unsigned char cmd[10] = { 0 };

		cmd[0] = SYNCHRONIZE_CACHE;
		/*
		 * Leave the rest of the command zero to indicate
		 * flush everything.
		 */
1599
		res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, sshdr,
1600
				timeout, sdkp->max_retries, 0, RQF_PM, NULL);
1601
		if (res == 0)
L
Linus Torvalds 已提交
1602 1603 1604
			break;
	}

1605
	if (res) {
H
Hannes Reinecke 已提交
1606
		sd_print_result(sdkp, "Synchronize Cache(10) failed", res);
1607

1608 1609 1610
		if (res < 0)
			return res;

H
Hannes Reinecke 已提交
1611 1612
		if (scsi_status_is_check_condition(res) &&
		    scsi_sense_valid(sshdr)) {
1613 1614
			sd_print_sense_hdr(sdkp, sshdr);

H
Hannes Reinecke 已提交
1615 1616 1617 1618
			/* we need to evaluate the error return  */
			if (sshdr->asc == 0x3a ||	/* medium not present */
			    sshdr->asc == 0x20 ||	/* invalid command */
			    (sshdr->asc == 0x74 && sshdr->ascq == 0x71))	/* drive is password locked */
1619 1620
				/* this is no error here */
				return 0;
H
Hannes Reinecke 已提交
1621
		}
1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636

		switch (host_byte(res)) {
		/* ignore errors due to racing a disconnection */
		case DID_BAD_TARGET:
		case DID_NO_CONNECT:
			return 0;
		/* signal the upper layer it might try again */
		case DID_BUS_BUSY:
		case DID_IMM_RETRY:
		case DID_REQUEUE:
		case DID_SOFT_ERROR:
			return -EBUSY;
		default:
			return -EIO;
		}
L
Linus Torvalds 已提交
1637
	}
1638
	return 0;
L
Linus Torvalds 已提交
1639 1640 1641 1642
}

static void sd_rescan(struct device *dev)
{
1643
	struct scsi_disk *sdkp = dev_get_drvdata(dev);
A
Alan Stern 已提交
1644

1645
	sd_revalidate_disk(sdkp->disk);
L
Linus Torvalds 已提交
1646 1647
}

C
Christoph Hellwig 已提交
1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685
static int sd_get_unique_id(struct gendisk *disk, u8 id[16],
		enum blk_unique_id type)
{
	struct scsi_device *sdev = scsi_disk(disk)->device;
	const struct scsi_vpd *vpd;
	const unsigned char *d;
	int ret = -ENXIO, len;

	rcu_read_lock();
	vpd = rcu_dereference(sdev->vpd_pg83);
	if (!vpd)
		goto out_unlock;

	ret = -EINVAL;
	for (d = vpd->data + 4; d < vpd->data + vpd->len; d += d[3] + 4) {
		/* we only care about designators with LU association */
		if (((d[1] >> 4) & 0x3) != 0x00)
			continue;
		if ((d[1] & 0xf) != type)
			continue;

		/*
		 * Only exit early if a 16-byte descriptor was found.  Otherwise
		 * keep looking as one with more entropy might still show up.
		 */
		len = d[3];
		if (len != 8 && len != 12 && len != 16)
			continue;
		ret = len;
		memcpy(id, d + 4, len);
		if (len == 16)
			break;
	}
out_unlock:
	rcu_read_unlock();
	return ret;
}

1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708
static char sd_pr_type(enum pr_type type)
{
	switch (type) {
	case PR_WRITE_EXCLUSIVE:
		return 0x01;
	case PR_EXCLUSIVE_ACCESS:
		return 0x03;
	case PR_WRITE_EXCLUSIVE_REG_ONLY:
		return 0x05;
	case PR_EXCLUSIVE_ACCESS_REG_ONLY:
		return 0x06;
	case PR_WRITE_EXCLUSIVE_ALL_REGS:
		return 0x07;
	case PR_EXCLUSIVE_ACCESS_ALL_REGS:
		return 0x08;
	default:
		return 0;
	}
};

static int sd_pr_command(struct block_device *bdev, u8 sa,
		u64 key, u64 sa_key, u8 type, u8 flags)
{
1709 1710
	struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
	struct scsi_device *sdev = sdkp->device;
1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725
	struct scsi_sense_hdr sshdr;
	int result;
	u8 cmd[16] = { 0, };
	u8 data[24] = { 0, };

	cmd[0] = PERSISTENT_RESERVE_OUT;
	cmd[1] = sa;
	cmd[2] = type;
	put_unaligned_be32(sizeof(data), &cmd[5]);

	put_unaligned_be64(key, &data[0]);
	put_unaligned_be64(sa_key, &data[8]);
	data[20] = flags;

	result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, &data, sizeof(data),
1726
			&sshdr, SD_TIMEOUT, sdkp->max_retries, NULL);
1727

H
Hannes Reinecke 已提交
1728
	if (scsi_status_is_check_condition(result) &&
1729
	    scsi_sense_valid(&sshdr)) {
1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743
		sdev_printk(KERN_INFO, sdev, "PR command failed: %d\n", result);
		scsi_print_sense_hdr(sdev, NULL, &sshdr);
	}

	return result;
}

static int sd_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
		u32 flags)
{
	if (flags & ~PR_FL_IGNORE_KEY)
		return -EOPNOTSUPP;
	return sd_pr_command(bdev, (flags & PR_FL_IGNORE_KEY) ? 0x06 : 0x00,
			old_key, new_key, 0,
1744
			(1 << 0) /* APTPL */);
1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779
}

static int sd_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
		u32 flags)
{
	if (flags)
		return -EOPNOTSUPP;
	return sd_pr_command(bdev, 0x01, key, 0, sd_pr_type(type), 0);
}

static int sd_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
{
	return sd_pr_command(bdev, 0x02, key, 0, sd_pr_type(type), 0);
}

static int sd_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
		enum pr_type type, bool abort)
{
	return sd_pr_command(bdev, abort ? 0x05 : 0x04, old_key, new_key,
			     sd_pr_type(type), 0);
}

static int sd_pr_clear(struct block_device *bdev, u64 key)
{
	return sd_pr_command(bdev, 0x03, key, 0, 0, 0);
}

static const struct pr_ops sd_pr_ops = {
	.pr_register	= sd_pr_register,
	.pr_reserve	= sd_pr_reserve,
	.pr_release	= sd_pr_release,
	.pr_preempt	= sd_pr_preempt,
	.pr_clear	= sd_pr_clear,
};

1780 1781 1782 1783 1784 1785 1786
static void scsi_disk_free_disk(struct gendisk *disk)
{
	struct scsi_disk *sdkp = scsi_disk(disk);

	put_device(&sdkp->disk_dev);
}

1787
static const struct block_device_operations sd_fops = {
L
Linus Torvalds 已提交
1788
	.owner			= THIS_MODULE,
A
Al Viro 已提交
1789 1790
	.open			= sd_open,
	.release		= sd_release,
1791
	.ioctl			= sd_ioctl,
1792
	.getgeo			= sd_getgeo,
1793
	.compat_ioctl		= blkdev_compat_ptr_ioctl,
1794
	.check_events		= sd_check_events,
1795
	.unlock_native_capacity	= sd_unlock_native_capacity,
1796
	.report_zones		= sd_zbc_report_zones,
C
Christoph Hellwig 已提交
1797
	.get_unique_id		= sd_get_unique_id,
1798
	.free_disk		= scsi_disk_free_disk,
1799
	.pr_ops			= &sd_pr_ops,
L
Linus Torvalds 已提交
1800 1801
};

1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815
/**
 *	sd_eh_reset - reset error handling callback
 *	@scmd:		sd-issued command that has failed
 *
 *	This function is called by the SCSI midlayer before starting
 *	SCSI EH. When counting medium access failures we have to be
 *	careful to register it only only once per device and SCSI EH run;
 *	there might be several timed out commands which will cause the
 *	'max_medium_access_timeouts' counter to trigger after the first
 *	SCSI EH run already and set the device to offline.
 *	So this function resets the internal counter before starting SCSI EH.
 **/
static void sd_eh_reset(struct scsi_cmnd *scmd)
{
1816
	struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->q->disk);
1817 1818 1819 1820 1821

	/* New SCSI EH run, reset gate variable */
	sdkp->ignore_medium_access_errors = false;
}

1822 1823 1824 1825 1826
/**
 *	sd_eh_action - error handling callback
 *	@scmd:		sd-issued command that has failed
 *	@eh_disp:	The recovery disposition suggested by the midlayer
 *
1827 1828 1829 1830 1831 1832
 *	This function is called by the SCSI midlayer upon completion of an
 *	error test command (currently TEST UNIT READY). The result of sending
 *	the eh command is passed in eh_disp.  We're looking for devices that
 *	fail medium access commands but are OK with non access commands like
 *	test unit ready (so wrongly see the device as having a successful
 *	recovery)
1833
 **/
1834
static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp)
1835
{
1836
	struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->q->disk);
1837
	struct scsi_device *sdev = scmd->device;
1838

1839
	if (!scsi_device_online(sdev) ||
1840 1841 1842
	    !scsi_medium_access_command(scmd) ||
	    host_byte(scmd->result) != DID_TIME_OUT ||
	    eh_disp != SUCCESS)
1843 1844 1845 1846 1847 1848 1849 1850 1851
		return eh_disp;

	/*
	 * The device has timed out executing a medium access command.
	 * However, the TEST UNIT READY command sent during error
	 * handling completed successfully. Either the device is in the
	 * process of recovering or has it suffered an internal failure
	 * that prevents access to the storage medium.
	 */
1852 1853 1854 1855
	if (!sdkp->ignore_medium_access_errors) {
		sdkp->medium_access_timed_out++;
		sdkp->ignore_medium_access_errors = true;
	}
1856 1857 1858 1859 1860 1861 1862 1863 1864

	/*
	 * If the device keeps failing read/write commands but TEST UNIT
	 * READY always completes successfully we assume that medium
	 * access is no longer possible and take the device offline.
	 */
	if (sdkp->medium_access_timed_out >= sdkp->max_medium_access_timeouts) {
		scmd_printk(KERN_ERR, scmd,
			    "Medium access timeout failure. Offlining disk!\n");
1865 1866 1867
		mutex_lock(&sdev->state_mutex);
		scsi_device_set_state(sdev, SDEV_OFFLINE);
		mutex_unlock(&sdev->state_mutex);
1868

1869
		return SUCCESS;
1870 1871 1872 1873 1874
	}

	return eh_disp;
}

1875 1876
static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
{
1877
	struct request *req = scsi_cmd_to_rq(scmd);
1878 1879 1880 1881
	struct scsi_device *sdev = scmd->device;
	unsigned int transferred, good_bytes;
	u64 start_lba, end_lba, bad_lba;

1882
	/*
1883 1884
	 * Some commands have a payload smaller than the device logical
	 * block size (e.g. INQUIRY on a 4K disk).
1885
	 */
1886
	if (scsi_bufflen(scmd) <= sdev->sector_size)
1887 1888
		return 0;

1889 1890 1891 1892
	/* Check if we have a 'bad_lba' information */
	if (!scsi_get_sense_info_fld(scmd->sense_buffer,
				     SCSI_SENSE_BUFFERSIZE,
				     &bad_lba))
1893 1894
		return 0;

1895 1896
	/*
	 * If the bad lba was reported incorrectly, we have no idea where
1897 1898
	 * the error is.
	 */
1899 1900 1901
	start_lba = sectors_to_logical(sdev, blk_rq_pos(req));
	end_lba = start_lba + bytes_to_logical(sdev, scsi_bufflen(scmd));
	if (bad_lba < start_lba || bad_lba >= end_lba)
1902 1903
		return 0;

1904 1905 1906
	/*
	 * resid is optional but mostly filled in.  When it's unused,
	 * its value is zero, so we assume the whole buffer transferred
1907
	 */
1908 1909 1910 1911
	transferred = scsi_bufflen(scmd) - scsi_get_resid(scmd);

	/* This computation should always be done in terms of the
	 * resolution of the device's medium.
1912
	 */
1913 1914
	good_bytes = logical_to_bytes(sdev, bad_lba - start_lba);

1915
	return min(good_bytes, transferred);
1916 1917
}

L
Linus Torvalds 已提交
1918
/**
1919
 *	sd_done - bottom half handler: called when the lower level
L
Linus Torvalds 已提交
1920 1921 1922 1923 1924
 *	driver has completed (successfully or otherwise) a scsi command.
 *	@SCpnt: mid-level's per command structure.
 *
 *	Note: potentially run from within an ISR. Must not block.
 **/
1925
static int sd_done(struct scsi_cmnd *SCpnt)
L
Linus Torvalds 已提交
1926 1927
{
	int result = SCpnt->result;
1928
	unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt);
1929 1930
	unsigned int sector_size = SCpnt->device->sector_size;
	unsigned int resid;
L
Linus Torvalds 已提交
1931
	struct scsi_sense_hdr sshdr;
1932
	struct request *req = scsi_cmd_to_rq(SCpnt);
1933
	struct scsi_disk *sdkp = scsi_disk(req->q->disk);
L
Linus Torvalds 已提交
1934 1935 1936
	int sense_valid = 0;
	int sense_deferred = 0;

1937 1938
	switch (req_op(req)) {
	case REQ_OP_DISCARD:
1939
	case REQ_OP_WRITE_ZEROES:
1940
	case REQ_OP_ZONE_RESET:
1941
	case REQ_OP_ZONE_RESET_ALL:
1942 1943 1944
	case REQ_OP_ZONE_OPEN:
	case REQ_OP_ZONE_CLOSE:
	case REQ_OP_ZONE_FINISH:
1945 1946 1947 1948 1949 1950
		if (!result) {
			good_bytes = blk_rq_bytes(req);
			scsi_set_resid(SCpnt, 0);
		} else {
			good_bytes = 0;
			scsi_set_resid(SCpnt, blk_rq_bytes(req));
1951 1952
		}
		break;
1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963
	default:
		/*
		 * In case of bogus fw or device, we could end up having
		 * an unaligned partial completion. Check this here and force
		 * alignment.
		 */
		resid = scsi_get_resid(SCpnt);
		if (resid & (sector_size - 1)) {
			sd_printk(KERN_INFO, sdkp,
				"Unaligned partial completion (resid=%u, sector_sz=%u)\n",
				resid, sector_size);
1964
			scsi_print_command(SCpnt);
1965 1966 1967 1968
			resid = min(scsi_bufflen(SCpnt),
				    round_up(resid, sector_size));
			scsi_set_resid(SCpnt, resid);
		}
1969
	}
1970

L
Linus Torvalds 已提交
1971 1972 1973 1974 1975
	if (result) {
		sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr);
		if (sense_valid)
			sense_deferred = scsi_sense_is_deferred(&sshdr);
	}
1976 1977
	sdkp->medium_access_timed_out = 0;

H
Hannes Reinecke 已提交
1978
	if (!scsi_status_is_check_condition(result) &&
1979 1980 1981 1982 1983 1984
	    (!sense_valid || sense_deferred))
		goto out;

	switch (sshdr.sense_key) {
	case HARDWARE_ERROR:
	case MEDIUM_ERROR:
1985
		good_bytes = sd_completed_bytes(SCpnt);
1986 1987
		break;
	case RECOVERED_ERROR:
1988 1989
		good_bytes = scsi_bufflen(SCpnt);
		break;
1990 1991 1992 1993 1994 1995 1996 1997
	case NO_SENSE:
		/* This indicates a false check condition, so ignore it.  An
		 * unknown amount of data was transferred so treat it as an
		 * error.
		 */
		SCpnt->result = 0;
		memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
		break;
1998 1999 2000 2001 2002
	case ABORTED_COMMAND:
		if (sshdr.asc == 0x10)  /* DIF: Target detected corruption */
			good_bytes = sd_completed_bytes(SCpnt);
		break;
	case ILLEGAL_REQUEST:
2003 2004
		switch (sshdr.asc) {
		case 0x10:	/* DIX: Host detected corruption */
2005
			good_bytes = sd_completed_bytes(SCpnt);
2006 2007 2008 2009
			break;
		case 0x20:	/* INVALID COMMAND OPCODE */
		case 0x24:	/* INVALID FIELD IN CDB */
			switch (SCpnt->cmnd[0]) {
2010 2011 2012 2013 2014
			case UNMAP:
				sd_config_discard(sdkp, SD_LBP_DISABLE);
				break;
			case WRITE_SAME_16:
			case WRITE_SAME:
2015
				if (SCpnt->cmnd[1] & 8) { /* UNMAP */
2016
					sd_config_discard(sdkp, SD_LBP_DISABLE);
2017
				} else {
2018 2019
					sdkp->device->no_write_same = 1;
					sd_config_write_same(sdkp);
2020
					req->rq_flags |= RQF_QUIET;
2021
				}
2022
				break;
2023 2024
			}
		}
2025 2026 2027
		break;
	default:
		break;
L
Linus Torvalds 已提交
2028
	}
2029

2030
 out:
2031
	if (sd_is_zoned(sdkp))
2032
		good_bytes = sd_zbc_complete(SCpnt, good_bytes, &sshdr);
2033

H
Hannes Reinecke 已提交
2034 2035 2036 2037
	SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, SCpnt,
					   "sd_done: completed %d of %d bytes\n",
					   good_bytes, scsi_bufflen(SCpnt)));

2038
	return good_bytes;
L
Linus Torvalds 已提交
2039 2040 2041 2042 2043 2044
}

/*
 * spinup disk - called only in sd_revalidate_disk()
 */
static void
2045
sd_spinup_disk(struct scsi_disk *sdkp)
2046
{
L
Linus Torvalds 已提交
2047
	unsigned char cmd[10];
2048
	unsigned long spintime_expire = 0;
L
Linus Torvalds 已提交
2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061
	int retries, spintime;
	unsigned int the_result;
	struct scsi_sense_hdr sshdr;
	int sense_valid = 0;

	spintime = 0;

	/* Spin up drives, as required.  Only do this at boot time */
	/* Spinup needs to be done for module loads too. */
	do {
		retries = 0;

		do {
2062 2063
			bool media_was_present = sdkp->media_present;

L
Linus Torvalds 已提交
2064 2065 2066
			cmd[0] = TEST_UNIT_READY;
			memset((void *) &cmd[1], 0, 9);

2067 2068 2069
			the_result = scsi_execute_req(sdkp->device, cmd,
						      DMA_NONE, NULL, 0,
						      &sshdr, SD_TIMEOUT,
2070
						      sdkp->max_retries, NULL);
L
Linus Torvalds 已提交
2071

A
Alan Stern 已提交
2072 2073 2074 2075 2076
			/*
			 * If the drive has indicated to us that it
			 * doesn't have any media in it, don't bother
			 * with any more polling.
			 */
2077
			if (media_not_present(sdkp, &sshdr)) {
2078 2079
				if (media_was_present)
					sd_printk(KERN_NOTICE, sdkp, "Media removed, stopped polling\n");
A
Alan Stern 已提交
2080
				return;
2081
			}
A
Alan Stern 已提交
2082

L
Linus Torvalds 已提交
2083
			if (the_result)
2084
				sense_valid = scsi_sense_valid(&sshdr);
L
Linus Torvalds 已提交
2085
			retries++;
H
Hannes Reinecke 已提交
2086
		} while (retries < 3 &&
L
Linus Torvalds 已提交
2087
			 (!scsi_status_is_good(the_result) ||
H
Hannes Reinecke 已提交
2088
			  (scsi_status_is_check_condition(the_result) &&
L
Linus Torvalds 已提交
2089 2090
			  sense_valid && sshdr.sense_key == UNIT_ATTENTION)));

H
Hannes Reinecke 已提交
2091
		if (!scsi_status_is_check_condition(the_result)) {
L
Linus Torvalds 已提交
2092 2093
			/* no sense, TUR either succeeded or failed
			 * with a status error */
2094
			if(!spintime && !scsi_status_is_good(the_result)) {
H
Hannes Reinecke 已提交
2095 2096
				sd_print_result(sdkp, "Test Unit Ready failed",
						the_result);
2097
			}
L
Linus Torvalds 已提交
2098 2099
			break;
		}
H
Hannes Reinecke 已提交
2100

L
Linus Torvalds 已提交
2101 2102 2103
		/*
		 * The device does not want the automatic start to be issued.
		 */
2104
		if (sdkp->device->no_start_on_add)
L
Linus Torvalds 已提交
2105 2106
			break;

2107 2108 2109 2110 2111 2112 2113
		if (sense_valid && sshdr.sense_key == NOT_READY) {
			if (sshdr.asc == 4 && sshdr.ascq == 3)
				break;	/* manual intervention required */
			if (sshdr.asc == 4 && sshdr.ascq == 0xb)
				break;	/* standby */
			if (sshdr.asc == 4 && sshdr.ascq == 0xc)
				break;	/* unavailable */
2114 2115
			if (sshdr.asc == 4 && sshdr.ascq == 0x1b)
				break;	/* sanitize in progress */
2116 2117 2118
			/*
			 * Issue command to spin up drive when not ready
			 */
L
Linus Torvalds 已提交
2119
			if (!spintime) {
2120
				sd_printk(KERN_NOTICE, sdkp, "Spinning up disk...");
L
Linus Torvalds 已提交
2121 2122 2123 2124
				cmd[0] = START_STOP;
				cmd[1] = 1;	/* Return immediately */
				memset((void *) &cmd[2], 0, 8);
				cmd[4] = 1;	/* Start spin cycle */
2125 2126
				if (sdkp->device->start_stop_pwr_cond)
					cmd[4] |= 1 << 4;
2127 2128
				scsi_execute_req(sdkp->device, cmd, DMA_NONE,
						 NULL, 0, &sshdr,
2129
						 SD_TIMEOUT, sdkp->max_retries,
2130
						 NULL);
2131 2132
				spintime_expire = jiffies + 100 * HZ;
				spintime = 1;
L
Linus Torvalds 已提交
2133 2134 2135
			}
			/* Wait 1 second for next try */
			msleep(1000);
2136
			printk(KERN_CONT ".");
2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151

		/*
		 * Wait for USB flash devices with slow firmware.
		 * Yes, this sense key/ASC combination shouldn't
		 * occur here.  It's characteristic of these devices.
		 */
		} else if (sense_valid &&
				sshdr.sense_key == UNIT_ATTENTION &&
				sshdr.asc == 0x28) {
			if (!spintime) {
				spintime_expire = jiffies + 5 * HZ;
				spintime = 1;
			}
			/* Wait 1 second for next try */
			msleep(1000);
L
Linus Torvalds 已提交
2152 2153 2154 2155
		} else {
			/* we don't understand the sense code, so it's
			 * probably pointless to loop */
			if(!spintime) {
2156 2157
				sd_printk(KERN_NOTICE, sdkp, "Unit Not Ready\n");
				sd_print_sense_hdr(sdkp, &sshdr);
L
Linus Torvalds 已提交
2158 2159 2160 2161
			}
			break;
		}
				
2162
	} while (spintime && time_before_eq(jiffies, spintime_expire));
L
Linus Torvalds 已提交
2163 2164 2165

	if (spintime) {
		if (scsi_status_is_good(the_result))
2166
			printk(KERN_CONT "ready\n");
L
Linus Torvalds 已提交
2167
		else
2168
			printk(KERN_CONT "not responding...\n");
L
Linus Torvalds 已提交
2169 2170 2171
	}
}

2172 2173 2174
/*
 * Determine whether disk supports Data Integrity Field.
 */
2175
static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer)
2176 2177 2178
{
	struct scsi_device *sdp = sdkp->device;
	u8 type;
2179
	int ret = 0;
2180

2181 2182
	if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) {
		sdkp->protection_type = 0;
2183
		return ret;
2184
	}
2185 2186 2187

	type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */

2188
	if (type > T10_PI_TYPE3_PROTECTION)
2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208
		ret = -ENODEV;
	else if (scsi_host_dif_capable(sdp->host, type))
		ret = 1;

	if (sdkp->first_scan || type != sdkp->protection_type)
		switch (ret) {
		case -ENODEV:
			sd_printk(KERN_ERR, sdkp, "formatted with unsupported" \
				  " protection type %u. Disabling disk!\n",
				  type);
			break;
		case 1:
			sd_printk(KERN_NOTICE, sdkp,
				  "Enabling DIF Type %u protection\n", type);
			break;
		case 0:
			sd_printk(KERN_NOTICE, sdkp,
				  "Disabling DIF Type %u protection\n", type);
			break;
		}
2209

2210 2211
	sdkp->protection_type = type;

2212
	return ret;
2213 2214
}

2215 2216 2217 2218
static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
			struct scsi_sense_hdr *sshdr, int sense_valid,
			int the_result)
{
H
Hannes Reinecke 已提交
2219
	if (sense_valid)
2220 2221 2222 2223 2224 2225 2226 2227 2228 2229
		sd_print_sense_hdr(sdkp, sshdr);
	else
		sd_printk(KERN_NOTICE, sdkp, "Sense not available.\n");

	/*
	 * Set dirty bit for removable devices if not ready -
	 * sometimes drives will not report this properly.
	 */
	if (sdp->removable &&
	    sense_valid && sshdr->sense_key == NOT_READY)
2230
		set_media_not_present(sdkp);
2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244

	/*
	 * We used to set media_present to 0 here to indicate no media
	 * in the drive, but some drives fail read capacity even with
	 * media present, so we can't do that.
	 */
	sdkp->capacity = 0; /* unknown mapped to zero - as usual */
}

#define RC16_LEN 32
#if RC16_LEN > SD_BUF_SIZE
#error RC16_LEN must not be more than SD_BUF_SIZE
#endif

2245 2246
#define READ_CAPACITY_RETRIES_ON_RESET	10

2247 2248
static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
						unsigned char *buffer)
2249
{
L
Linus Torvalds 已提交
2250 2251 2252
	unsigned char cmd[16];
	struct scsi_sense_hdr sshdr;
	int sense_valid = 0;
2253
	int the_result;
2254
	int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
2255
	unsigned int alignment;
2256 2257
	unsigned long long lba;
	unsigned sector_size;
L
Linus Torvalds 已提交
2258

2259 2260 2261
	if (sdp->no_read_capacity_16)
		return -EINVAL;

L
Linus Torvalds 已提交
2262
	do {
2263
		memset(cmd, 0, 16);
2264
		cmd[0] = SERVICE_ACTION_IN_16;
2265 2266 2267 2268
		cmd[1] = SAI_READ_CAPACITY_16;
		cmd[13] = RC16_LEN;
		memset(buffer, 0, RC16_LEN);

2269
		the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
2270
					buffer, RC16_LEN, &sshdr,
2271
					SD_TIMEOUT, sdkp->max_retries, NULL);
L
Linus Torvalds 已提交
2272

2273
		if (media_not_present(sdkp, &sshdr))
2274
			return -ENODEV;
L
Linus Torvalds 已提交
2275

2276
		if (the_result > 0) {
2277
			sense_valid = scsi_sense_valid(&sshdr);
2278 2279 2280 2281 2282 2283 2284 2285
			if (sense_valid &&
			    sshdr.sense_key == ILLEGAL_REQUEST &&
			    (sshdr.asc == 0x20 || sshdr.asc == 0x24) &&
			    sshdr.ascq == 0x00)
				/* Invalid Command Operation Code or
				 * Invalid Field in CDB, just retry
				 * silently with RC10 */
				return -EINVAL;
2286 2287 2288 2289 2290 2291 2292
			if (sense_valid &&
			    sshdr.sense_key == UNIT_ATTENTION &&
			    sshdr.asc == 0x29 && sshdr.ascq == 0x00)
				/* Device reset might occur several times,
				 * give it one more chance */
				if (--reset_retries > 0)
					continue;
2293
		}
L
Linus Torvalds 已提交
2294 2295 2296 2297
		retries--;

	} while (the_result && retries);

2298
	if (the_result) {
H
Hannes Reinecke 已提交
2299
		sd_print_result(sdkp, "Read Capacity(16) failed", the_result);
2300 2301 2302
		read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result);
		return -EINVAL;
	}
2303

2304 2305
	sector_size = get_unaligned_be32(&buffer[8]);
	lba = get_unaligned_be64(&buffer[0]);
2306

2307 2308 2309 2310
	if (sd_read_protection_type(sdkp, buffer) < 0) {
		sdkp->capacity = 0;
		return -ENODEV;
	}
2311

2312
	/* Logical blocks per physical block exponent */
2313
	sdkp->physical_block_size = (1 << (buffer[13] & 0xf)) * sector_size;
2314

2315 2316 2317
	/* RC basis */
	sdkp->rc_basis = (buffer[12] >> 4) & 0x3;

2318 2319 2320 2321 2322 2323 2324
	/* Lowest aligned logical block */
	alignment = ((buffer[14] & 0x3f) << 8 | buffer[15]) * sector_size;
	blk_queue_alignment_offset(sdp->request_queue, alignment);
	if (alignment && sdkp->first_scan)
		sd_printk(KERN_NOTICE, sdkp,
			  "physical block alignment offset: %u\n", alignment);

2325 2326
	if (buffer[14] & 0x80) { /* LBPME */
		sdkp->lbpme = 1;
2327

2328 2329
		if (buffer[14] & 0x40) /* LBPRZ */
			sdkp->lbprz = 1;
2330

2331
		sd_config_discard(sdkp, SD_LBP_WS16);
2332 2333
	}

2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344
	sdkp->capacity = lba + 1;
	return sector_size;
}

static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
						unsigned char *buffer)
{
	unsigned char cmd[16];
	struct scsi_sense_hdr sshdr;
	int sense_valid = 0;
	int the_result;
2345
	int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
2346 2347 2348 2349 2350 2351 2352 2353 2354 2355
	sector_t lba;
	unsigned sector_size;

	do {
		cmd[0] = READ_CAPACITY;
		memset(&cmd[1], 0, 9);
		memset(buffer, 0, 8);

		the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
					buffer, 8, &sshdr,
2356
					SD_TIMEOUT, sdkp->max_retries, NULL);
2357 2358 2359 2360

		if (media_not_present(sdkp, &sshdr))
			return -ENODEV;

2361
		if (the_result > 0) {
2362
			sense_valid = scsi_sense_valid(&sshdr);
2363 2364 2365 2366 2367 2368 2369 2370
			if (sense_valid &&
			    sshdr.sense_key == UNIT_ATTENTION &&
			    sshdr.asc == 0x29 && sshdr.ascq == 0x00)
				/* Device reset might occur several times,
				 * give it one more chance */
				if (--reset_retries > 0)
					continue;
		}
2371 2372 2373 2374 2375
		retries--;

	} while (the_result && retries);

	if (the_result) {
H
Hannes Reinecke 已提交
2376
		sd_print_result(sdkp, "Read Capacity(10) failed", the_result);
2377 2378 2379 2380
		read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result);
		return -EINVAL;
	}

2381 2382
	sector_size = get_unaligned_be32(&buffer[4]);
	lba = get_unaligned_be32(&buffer[0]);
2383

2384 2385 2386 2387 2388
	if (sdp->no_read_capacity_16 && (lba == 0xffffffff)) {
		/* Some buggy (usb cardreader) devices return an lba of
		   0xffffffff when the want to report a size of 0 (with
		   which they really mean no media is present) */
		sdkp->capacity = 0;
2389
		sdkp->physical_block_size = sector_size;
2390 2391 2392
		return sector_size;
	}

2393
	sdkp->capacity = lba + 1;
2394
	sdkp->physical_block_size = sector_size;
2395 2396 2397
	return sector_size;
}

2398 2399
static int sd_try_rc16_first(struct scsi_device *sdp)
{
2400 2401
	if (sdp->host->max_cmd_len < 16)
		return 0;
2402 2403
	if (sdp->try_rc_10_first)
		return 0;
2404 2405 2406 2407 2408 2409 2410
	if (sdp->scsi_level > SCSI_SPC_2)
		return 1;
	if (scsi_device_protection(sdp))
		return 1;
	return 0;
}

2411 2412 2413 2414 2415 2416 2417 2418 2419
/*
 * read disk capacity
 */
static void
sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer)
{
	int sector_size;
	struct scsi_device *sdp = sdkp->device;

2420
	if (sd_try_rc16_first(sdp)) {
2421 2422
		sector_size = read_capacity_16(sdkp, sdp, buffer);
		if (sector_size == -EOVERFLOW)
L
Linus Torvalds 已提交
2423
			goto got_data;
2424 2425 2426 2427
		if (sector_size == -ENODEV)
			return;
		if (sector_size < 0)
			sector_size = read_capacity_10(sdkp, sdp, buffer);
2428 2429
		if (sector_size < 0)
			return;
L
Linus Torvalds 已提交
2430
	} else {
2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448
		sector_size = read_capacity_10(sdkp, sdp, buffer);
		if (sector_size == -EOVERFLOW)
			goto got_data;
		if (sector_size < 0)
			return;
		if ((sizeof(sdkp->capacity) > 4) &&
		    (sdkp->capacity > 0xffffffffULL)) {
			int old_sector_size = sector_size;
			sd_printk(KERN_NOTICE, sdkp, "Very big device. "
					"Trying to use READ CAPACITY(16).\n");
			sector_size = read_capacity_16(sdkp, sdp, buffer);
			if (sector_size < 0) {
				sd_printk(KERN_NOTICE, sdkp,
					"Using 0xffffffff as device size\n");
				sdkp->capacity = 1 + (sector_t) 0xffffffff;
				sector_size = old_sector_size;
				goto got_data;
			}
2449 2450
			/* Remember that READ CAPACITY(16) succeeded */
			sdp->try_rc_10_first = 0;
2451 2452
		}
	}
L
Linus Torvalds 已提交
2453

2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468
	/* Some devices are known to return the total number of blocks,
	 * not the highest block number.  Some devices have versions
	 * which do this and others which do not.  Some devices we might
	 * suspect of doing this but we don't know for certain.
	 *
	 * If we know the reported capacity is wrong, decrement it.  If
	 * we can only guess, then assume the number of blocks is even
	 * (usually true but not always) and err on the side of lowering
	 * the capacity.
	 */
	if (sdp->fix_capacity ||
	    (sdp->guess_capacity && (sdkp->capacity & 0x01))) {
		sd_printk(KERN_INFO, sdkp, "Adjusting the sector count "
				"from its reported value: %llu\n",
				(unsigned long long) sdkp->capacity);
L
Linus Torvalds 已提交
2469
		--sdkp->capacity;
2470 2471
	}

L
Linus Torvalds 已提交
2472 2473 2474
got_data:
	if (sector_size == 0) {
		sector_size = 512;
2475 2476
		sd_printk(KERN_NOTICE, sdkp, "Sector size 0 reported, "
			  "assuming 512.\n");
L
Linus Torvalds 已提交
2477 2478 2479 2480 2481
	}

	if (sector_size != 512 &&
	    sector_size != 1024 &&
	    sector_size != 2048 &&
2482
	    sector_size != 4096) {
2483 2484
		sd_printk(KERN_NOTICE, sdkp, "Unsupported sector size %d.\n",
			  sector_size);
L
Linus Torvalds 已提交
2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499
		/*
		 * The user might want to re-format the drive with
		 * a supported sectorsize.  Once this happens, it
		 * would be relatively trivial to set the thing up.
		 * For this reason, we leave the thing in the table.
		 */
		sdkp->capacity = 0;
		/*
		 * set a bogus sector size so the normal read/write
		 * logic in the block layer will eventually refuse any
		 * request on this device without tripping over power
		 * of two sector size assumptions
		 */
		sector_size = 512;
	}
2500
	blk_queue_logical_block_size(sdp->request_queue, sector_size);
2501 2502 2503
	blk_queue_physical_block_size(sdp->request_queue,
				      sdkp->physical_block_size);
	sdkp->device->sector_size = sector_size;
2504

2505 2506
	if (sdkp->capacity > 0xffffffff)
		sdp->use_16_for_rw = 1;
L
Linus Torvalds 已提交
2507

2508
}
L
Linus Torvalds 已提交
2509

2510 2511 2512 2513 2514 2515 2516 2517 2518
/*
 * Print disk capacity
 */
static void
sd_print_capacity(struct scsi_disk *sdkp,
		  sector_t old_capacity)
{
	int sector_size = sdkp->device->sector_size;
	char cap_str_2[10], cap_str_10[10];
2519

2520 2521 2522
	if (!sdkp->first_scan && old_capacity == sdkp->capacity)
		return;

2523 2524 2525
	string_get_size(sdkp->capacity, sector_size,
			STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
	string_get_size(sdkp->capacity, sector_size,
2526
			STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
L
Linus Torvalds 已提交
2527

2528 2529 2530 2531
	sd_printk(KERN_NOTICE, sdkp,
		  "%llu %d-byte logical blocks: (%s/%s)\n",
		  (unsigned long long)sdkp->capacity,
		  sector_size, cap_str_10, cap_str_2);
2532

2533 2534 2535 2536
	if (sdkp->physical_block_size != sector_size)
		sd_printk(KERN_NOTICE, sdkp,
			  "%u-byte physical blocks\n",
			  sdkp->physical_block_size);
L
Linus Torvalds 已提交
2537 2538 2539 2540
}

/* called with buffer of length 512 */
static inline int
2541
sd_do_mode_sense(struct scsi_disk *sdkp, int dbd, int modepage,
2542 2543
		 unsigned char *buffer, int len, struct scsi_mode_data *data,
		 struct scsi_sense_hdr *sshdr)
L
Linus Torvalds 已提交
2544
{
2545 2546 2547 2548 2549 2550 2551
	/*
	 * If we must use MODE SENSE(10), make sure that the buffer length
	 * is at least 8 bytes so that the mode sense header fits.
	 */
	if (sdkp->device->use_10_for_ms && len < 8)
		len = 8;

2552 2553
	return scsi_mode_sense(sdkp->device, dbd, modepage, buffer, len,
			       SD_TIMEOUT, sdkp->max_retries, data,
2554
			       sshdr);
L
Linus Torvalds 已提交
2555 2556 2557 2558
}

/*
 * read write protect setting, if possible - called only in sd_revalidate_disk()
2559
 * called with buffer of length SD_BUF_SIZE
L
Linus Torvalds 已提交
2560 2561
 */
static void
2562
sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
2563
{
L
Linus Torvalds 已提交
2564
	int res;
2565
	struct scsi_device *sdp = sdkp->device;
L
Linus Torvalds 已提交
2566
	struct scsi_mode_data data;
2567
	int old_wp = sdkp->write_prot;
L
Linus Torvalds 已提交
2568 2569

	set_disk_ro(sdkp->disk, 0);
2570
	if (sdp->skip_ms_page_3f) {
2571
		sd_first_printk(KERN_NOTICE, sdkp, "Assuming Write Enabled\n");
L
Linus Torvalds 已提交
2572 2573 2574
		return;
	}

2575
	if (sdp->use_192_bytes_for_3f) {
2576
		res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 192, &data, NULL);
L
Linus Torvalds 已提交
2577 2578 2579 2580 2581 2582
	} else {
		/*
		 * First attempt: ask for all pages (0x3F), but only 4 bytes.
		 * We have to start carefully: some devices hang if we ask
		 * for more than is available.
		 */
2583
		res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 4, &data, NULL);
L
Linus Torvalds 已提交
2584 2585 2586 2587 2588 2589 2590

		/*
		 * Second attempt: ask for page 0 When only page 0 is
		 * implemented, a request for page 3F may return Sense Key
		 * 5: Illegal Request, Sense Code 24: Invalid field in
		 * CDB.
		 */
2591
		if (res < 0)
2592
			res = sd_do_mode_sense(sdkp, 0, 0, buffer, 4, &data, NULL);
L
Linus Torvalds 已提交
2593 2594 2595 2596

		/*
		 * Third attempt: ask 255 bytes, as we did earlier.
		 */
2597
		if (res < 0)
2598
			res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 255,
2599
					       &data, NULL);
L
Linus Torvalds 已提交
2600 2601
	}

2602
	if (res < 0) {
2603
		sd_first_printk(KERN_WARNING, sdkp,
2604
			  "Test WP failed, assume Write Enabled\n");
L
Linus Torvalds 已提交
2605 2606
	} else {
		sdkp->write_prot = ((data.device_specific & 0x80) != 0);
2607
		set_disk_ro(sdkp->disk, sdkp->write_prot);
2608 2609 2610
		if (sdkp->first_scan || old_wp != sdkp->write_prot) {
			sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n",
				  sdkp->write_prot ? "on" : "off");
2611
			sd_printk(KERN_DEBUG, sdkp, "Mode Sense: %4ph\n", buffer);
2612
		}
L
Linus Torvalds 已提交
2613 2614 2615 2616 2617
	}
}

/*
 * sd_read_cache_type - called only from sd_revalidate_disk()
2618
 * called with buffer of length SD_BUF_SIZE
L
Linus Torvalds 已提交
2619 2620
 */
static void
2621
sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
2622
{
L
Linus Torvalds 已提交
2623
	int len = 0, res;
2624
	struct scsi_device *sdp = sdkp->device;
L
Linus Torvalds 已提交
2625

2626 2627
	int dbd;
	int modepage;
2628
	int first_len;
L
Linus Torvalds 已提交
2629 2630
	struct scsi_mode_data data;
	struct scsi_sense_hdr sshdr;
2631 2632 2633
	int old_wce = sdkp->WCE;
	int old_rcd = sdkp->RCD;
	int old_dpofua = sdkp->DPOFUA;
L
Linus Torvalds 已提交
2634

2635 2636 2637 2638

	if (sdkp->cache_override)
		return;

2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651
	first_len = 4;
	if (sdp->skip_ms_page_8) {
		if (sdp->type == TYPE_RBC)
			goto defaults;
		else {
			if (sdp->skip_ms_page_3f)
				goto defaults;
			modepage = 0x3F;
			if (sdp->use_192_bytes_for_3f)
				first_len = 192;
			dbd = 0;
		}
	} else if (sdp->type == TYPE_RBC) {
2652 2653 2654 2655 2656 2657 2658
		modepage = 6;
		dbd = 8;
	} else {
		modepage = 8;
		dbd = 0;
	}

L
Linus Torvalds 已提交
2659
	/* cautiously ask */
2660
	res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, first_len,
2661
			&data, &sshdr);
L
Linus Torvalds 已提交
2662

2663
	if (res < 0)
L
Linus Torvalds 已提交
2664 2665
		goto bad_sense;

2666 2667
	if (!data.header_length) {
		modepage = 6;
2668
		first_len = 0;
2669 2670
		sd_first_printk(KERN_ERR, sdkp,
				"Missing header in MODE_SENSE response\n");
2671 2672
	}

L
Linus Torvalds 已提交
2673 2674 2675 2676 2677 2678 2679 2680 2681
	/* that went OK, now ask for the proper length */
	len = data.length;

	/*
	 * We're only interested in the first three bytes, actually.
	 * But the data cache page is defined for the first 20.
	 */
	if (len < 3)
		goto bad_sense;
2682
	else if (len > SD_BUF_SIZE) {
2683
		sd_first_printk(KERN_NOTICE, sdkp, "Truncating mode parameter "
2684 2685 2686 2687 2688
			  "data from %d to %d bytes\n", len, SD_BUF_SIZE);
		len = SD_BUF_SIZE;
	}
	if (modepage == 0x3F && sdp->use_192_bytes_for_3f)
		len = 192;
L
Linus Torvalds 已提交
2689 2690

	/* Get the data */
2691
	if (len > first_len)
2692
		res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, len,
2693
				&data, &sshdr);
L
Linus Torvalds 已提交
2694

2695
	if (!res) {
2696
		int offset = data.header_length + data.block_descriptor_length;
L
Linus Torvalds 已提交
2697

2698 2699 2700 2701 2702 2703 2704 2705
		while (offset < len) {
			u8 page_code = buffer[offset] & 0x3F;
			u8 spf       = buffer[offset] & 0x40;

			if (page_code == 8 || page_code == 6) {
				/* We're interested only in the first 3 bytes.
				 */
				if (len - offset <= 2) {
2706 2707 2708
					sd_first_printk(KERN_ERR, sdkp,
						"Incomplete mode parameter "
							"data\n");
2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721
					goto defaults;
				} else {
					modepage = page_code;
					goto Page_found;
				}
			} else {
				/* Go to the next page */
				if (spf && len - offset > 3)
					offset += 4 + (buffer[offset+2] << 8) +
						buffer[offset+3];
				else if (!spf && len - offset > 1)
					offset += 2 + buffer[offset+1];
				else {
2722 2723 2724
					sd_first_printk(KERN_ERR, sdkp,
							"Incomplete mode "
							"parameter data\n");
2725 2726 2727
					goto defaults;
				}
			}
2728 2729
		}

2730 2731
		sd_first_printk(KERN_WARNING, sdkp,
				"No Caching mode page found\n");
2732 2733
		goto defaults;

2734
	Page_found:
2735 2736 2737 2738 2739 2740 2741
		if (modepage == 8) {
			sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0);
			sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0);
		} else {
			sdkp->WCE = ((buffer[offset + 2] & 0x01) == 0);
			sdkp->RCD = 0;
		}
L
Linus Torvalds 已提交
2742

T
Tejun Heo 已提交
2743
		sdkp->DPOFUA = (data.device_specific & 0x10) != 0;
2744 2745 2746
		if (sdp->broken_fua) {
			sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n");
			sdkp->DPOFUA = 0;
2747 2748
		} else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw &&
			   !sdkp->device->use_16_for_rw) {
2749
			sd_first_printk(KERN_NOTICE, sdkp,
2750
				  "Uses READ/WRITE(6), disabling FUA\n");
T
Tejun Heo 已提交
2751 2752 2753
			sdkp->DPOFUA = 0;
		}

2754 2755 2756 2757
		/* No cache flush allowed for write protected devices */
		if (sdkp->WCE && sdkp->write_prot)
			sdkp->WCE = 0;

2758 2759 2760 2761 2762 2763 2764 2765
		if (sdkp->first_scan || old_wce != sdkp->WCE ||
		    old_rcd != sdkp->RCD || old_dpofua != sdkp->DPOFUA)
			sd_printk(KERN_NOTICE, sdkp,
				  "Write cache: %s, read cache: %s, %s\n",
				  sdkp->WCE ? "enabled" : "disabled",
				  sdkp->RCD ? "disabled" : "enabled",
				  sdkp->DPOFUA ? "supports DPO and FUA"
				  : "doesn't support DPO or FUA");
L
Linus Torvalds 已提交
2766 2767 2768 2769 2770

		return;
	}

bad_sense:
2771
	if (scsi_sense_valid(&sshdr) &&
L
Linus Torvalds 已提交
2772 2773
	    sshdr.sense_key == ILLEGAL_REQUEST &&
	    sshdr.asc == 0x24 && sshdr.ascq == 0x0)
2774
		/* Invalid field in CDB */
2775
		sd_first_printk(KERN_NOTICE, sdkp, "Cache data unavailable\n");
L
Linus Torvalds 已提交
2776
	else
2777 2778
		sd_first_printk(KERN_ERR, sdkp,
				"Asking for cache data failed\n");
L
Linus Torvalds 已提交
2779 2780

defaults:
2781
	if (sdp->wce_default_on) {
2782 2783
		sd_first_printk(KERN_NOTICE, sdkp,
				"Assuming drive cache: write back\n");
2784 2785
		sdkp->WCE = 1;
	} else {
2786
		sd_first_printk(KERN_WARNING, sdkp,
2787
				"Assuming drive cache: write through\n");
2788 2789
		sdkp->WCE = 0;
	}
L
Linus Torvalds 已提交
2790
	sdkp->RCD = 0;
2791
	sdkp->DPOFUA = 0;
L
Linus Torvalds 已提交
2792 2793
}

2794 2795 2796 2797
/*
 * The ATO bit indicates whether the DIF application tag is available
 * for use by the operating system.
 */
2798
static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
2799 2800 2801 2802 2803 2804
{
	int res, offset;
	struct scsi_device *sdp = sdkp->device;
	struct scsi_mode_data data;
	struct scsi_sense_hdr sshdr;

2805
	if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
2806 2807 2808 2809 2810 2811
		return;

	if (sdkp->protection_type == 0)
		return;

	res = scsi_mode_sense(sdp, 1, 0x0a, buffer, 36, SD_TIMEOUT,
2812
			      sdkp->max_retries, &data, &sshdr);
2813

2814
	if (res < 0 || !data.header_length ||
2815
	    data.length < 6) {
2816
		sd_first_printk(KERN_WARNING, sdkp,
2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827
			  "getting Control mode page failed, assume no ATO\n");

		if (scsi_sense_valid(&sshdr))
			sd_print_sense_hdr(sdkp, &sshdr);

		return;
	}

	offset = data.header_length + data.block_descriptor_length;

	if ((buffer[offset] & 0x3f) != 0x0a) {
2828
		sd_first_printk(KERN_ERR, sdkp, "ATO Got wrong page\n");
2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839
		return;
	}

	if ((buffer[offset + 5] & 0x80) == 0)
		return;

	sdkp->ATO = 1;

	return;
}

2840 2841
/**
 * sd_read_block_limits - Query disk device for preferred I/O sizes.
2842
 * @sdkp: disk to query
2843 2844 2845 2846
 */
static void sd_read_block_limits(struct scsi_disk *sdkp)
{
	unsigned int sector_sz = sdkp->device->sector_size;
2847
	const int vpd_len = 64;
2848
	unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL);
2849

2850 2851 2852 2853
	if (!buffer ||
	    /* Block Limits VPD */
	    scsi_get_vpd_page(sdkp->device, 0xb0, buffer, vpd_len))
		goto out;
2854 2855 2856

	blk_queue_io_min(sdkp->disk->queue,
			 get_unaligned_be16(&buffer[6]) * sector_sz);
2857 2858 2859

	sdkp->max_xfer_blocks = get_unaligned_be32(&buffer[8]);
	sdkp->opt_xfer_blocks = get_unaligned_be32(&buffer[12]);
2860

2861 2862
	if (buffer[3] == 0x3c) {
		unsigned int lba_count, desc_count;
2863

2864
		sdkp->max_ws_blocks = (u32)get_unaligned_be64(&buffer[36]);
2865

2866
		if (!sdkp->lbpme)
2867 2868
			goto out;

2869 2870
		lba_count = get_unaligned_be32(&buffer[20]);
		desc_count = get_unaligned_be32(&buffer[24]);
2871

2872 2873
		if (lba_count && desc_count)
			sdkp->max_unmap_blocks = lba_count;
2874

2875
		sdkp->unmap_granularity = get_unaligned_be32(&buffer[28]);
2876 2877

		if (buffer[32] & 0x80)
2878
			sdkp->unmap_alignment =
2879
				get_unaligned_be32(&buffer[32]) & ~(1 << 31);
2880 2881 2882 2883 2884 2885 2886 2887 2888

		if (!sdkp->lbpvpd) { /* LBP VPD page not provided */

			if (sdkp->max_unmap_blocks)
				sd_config_discard(sdkp, SD_LBP_UNMAP);
			else
				sd_config_discard(sdkp, SD_LBP_WS16);

		} else {	/* LBP VPD page tells us what to use */
2889
			if (sdkp->lbpu && sdkp->max_unmap_blocks)
2890 2891
				sd_config_discard(sdkp, SD_LBP_UNMAP);
			else if (sdkp->lbpws)
2892 2893 2894 2895 2896 2897
				sd_config_discard(sdkp, SD_LBP_WS16);
			else if (sdkp->lbpws10)
				sd_config_discard(sdkp, SD_LBP_WS10);
			else
				sd_config_discard(sdkp, SD_LBP_DISABLE);
		}
2898 2899
	}

2900
 out:
2901 2902 2903
	kfree(buffer);
}

2904 2905
/**
 * sd_read_block_characteristics - Query block dev. characteristics
2906
 * @sdkp: disk to query
2907 2908 2909
 */
static void sd_read_block_characteristics(struct scsi_disk *sdkp)
{
2910
	struct request_queue *q = sdkp->disk->queue;
2911
	unsigned char *buffer;
2912
	u16 rot;
2913
	const int vpd_len = 64;
2914

2915
	buffer = kmalloc(vpd_len, GFP_KERNEL);
2916

2917 2918 2919 2920
	if (!buffer ||
	    /* Block Device Characteristics VPD */
	    scsi_get_vpd_page(sdkp->device, 0xb1, buffer, vpd_len))
		goto out;
2921 2922 2923

	rot = get_unaligned_be16(&buffer[4]);

2924
	if (rot == 1) {
2925 2926
		blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
		blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
2927
	}
2928

2929 2930
	if (sdkp->device->type == TYPE_ZBC) {
		/* Host-managed */
2931
		blk_queue_set_zoned(sdkp->disk, BLK_ZONED_HM);
2932 2933
	} else {
		sdkp->zoned = (buffer[8] >> 4) & 3;
2934
		if (sdkp->zoned == 1) {
2935
			/* Host-aware */
2936
			blk_queue_set_zoned(sdkp->disk, BLK_ZONED_HA);
2937
		} else {
2938 2939
			/* Regular disk or drive managed disk */
			blk_queue_set_zoned(sdkp->disk, BLK_ZONED_NONE);
2940
		}
2941
	}
2942 2943 2944 2945 2946

	if (!sdkp->first_scan)
		goto out;

	if (blk_queue_is_zoned(q)) {
2947 2948
		sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n",
		      q->limits.zoned == BLK_ZONED_HM ? "managed" : "aware");
2949 2950 2951 2952 2953 2954 2955 2956
	} else {
		if (sdkp->zoned == 1)
			sd_printk(KERN_NOTICE, sdkp,
				  "Host-aware SMR disk used as regular disk\n");
		else if (sdkp->zoned == 2)
			sd_printk(KERN_NOTICE, sdkp,
				  "Drive-managed SMR disk\n");
	}
2957

2958
 out:
2959 2960 2961
	kfree(buffer);
}

2962
/**
2963
 * sd_read_block_provisioning - Query provisioning VPD page
2964
 * @sdkp: disk to query
2965
 */
2966
static void sd_read_block_provisioning(struct scsi_disk *sdkp)
2967 2968 2969 2970
{
	unsigned char *buffer;
	const int vpd_len = 8;

2971
	if (sdkp->lbpme == 0)
2972 2973 2974 2975 2976 2977 2978
		return;

	buffer = kmalloc(vpd_len, GFP_KERNEL);

	if (!buffer || scsi_get_vpd_page(sdkp->device, 0xb2, buffer, vpd_len))
		goto out;

2979 2980 2981 2982
	sdkp->lbpvpd	= 1;
	sdkp->lbpu	= (buffer[5] >> 7) & 1;	/* UNMAP */
	sdkp->lbpws	= (buffer[5] >> 6) & 1;	/* WRITE SAME(16) with UNMAP */
	sdkp->lbpws10	= (buffer[5] >> 5) & 1;	/* WRITE SAME(10) with UNMAP */
2983 2984 2985 2986 2987

 out:
	kfree(buffer);
}

2988 2989
static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
{
2990 2991
	struct scsi_device *sdev = sdkp->device;

2992 2993 2994 2995 2996 2997
	if (sdev->host->no_write_same) {
		sdev->no_write_same = 1;

		return;
	}

2998
	if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY) < 0) {
2999 3000 3001
		/* too large values might cause issues with arcmsr */
		int vpd_buf_len = 64;

3002 3003 3004 3005 3006 3007
		sdev->no_report_opcodes = 1;

		/* Disable WRITE SAME if REPORT SUPPORTED OPERATION
		 * CODES is unsupported and the device has an ATA
		 * Information VPD page (SAT).
		 */
3008
		if (!scsi_get_vpd_page(sdev, 0x89, buffer, vpd_buf_len))
3009 3010 3011 3012
			sdev->no_write_same = 1;
	}

	if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME_16) == 1)
3013
		sdkp->ws16 = 1;
3014 3015 3016

	if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME) == 1)
		sdkp->ws10 = 1;
3017 3018
}

3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032
static void sd_read_security(struct scsi_disk *sdkp, unsigned char *buffer)
{
	struct scsi_device *sdev = sdkp->device;

	if (!sdev->security_supported)
		return;

	if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE,
			SECURITY_PROTOCOL_IN) == 1 &&
	    scsi_report_opcode(sdev, buffer, SD_BUF_SIZE,
			SECURITY_PROTOCOL_OUT) == 1)
		sdkp->security = 1;
}

3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112
static inline sector_t sd64_to_sectors(struct scsi_disk *sdkp, u8 *buf)
{
	return logical_to_sectors(sdkp->device, get_unaligned_be64(buf));
}

/**
 * sd_read_cpr - Query concurrent positioning ranges
 * @sdkp:	disk to query
 */
static void sd_read_cpr(struct scsi_disk *sdkp)
{
	struct blk_independent_access_ranges *iars = NULL;
	unsigned char *buffer = NULL;
	unsigned int nr_cpr = 0;
	int i, vpd_len, buf_len = SD_BUF_SIZE;
	u8 *desc;

	/*
	 * We need to have the capacity set first for the block layer to be
	 * able to check the ranges.
	 */
	if (sdkp->first_scan)
		return;

	if (!sdkp->capacity)
		goto out;

	/*
	 * Concurrent Positioning Ranges VPD: there can be at most 256 ranges,
	 * leading to a maximum page size of 64 + 256*32 bytes.
	 */
	buf_len = 64 + 256*32;
	buffer = kmalloc(buf_len, GFP_KERNEL);
	if (!buffer || scsi_get_vpd_page(sdkp->device, 0xb9, buffer, buf_len))
		goto out;

	/* We must have at least a 64B header and one 32B range descriptor */
	vpd_len = get_unaligned_be16(&buffer[2]) + 3;
	if (vpd_len > buf_len || vpd_len < 64 + 32 || (vpd_len & 31)) {
		sd_printk(KERN_ERR, sdkp,
			  "Invalid Concurrent Positioning Ranges VPD page\n");
		goto out;
	}

	nr_cpr = (vpd_len - 64) / 32;
	if (nr_cpr == 1) {
		nr_cpr = 0;
		goto out;
	}

	iars = disk_alloc_independent_access_ranges(sdkp->disk, nr_cpr);
	if (!iars) {
		nr_cpr = 0;
		goto out;
	}

	desc = &buffer[64];
	for (i = 0; i < nr_cpr; i++, desc += 32) {
		if (desc[0] != i) {
			sd_printk(KERN_ERR, sdkp,
				"Invalid Concurrent Positioning Range number\n");
			nr_cpr = 0;
			break;
		}

		iars->ia_range[i].sector = sd64_to_sectors(sdkp, desc + 8);
		iars->ia_range[i].nr_sectors = sd64_to_sectors(sdkp, desc + 16);
	}

out:
	disk_set_independent_access_ranges(sdkp->disk, iars);
	if (nr_cpr && sdkp->nr_actuators != nr_cpr) {
		sd_printk(KERN_NOTICE, sdkp,
			  "%u concurrent positioning ranges\n", nr_cpr);
		sdkp->nr_actuators = nr_cpr;
	}

	kfree(buffer);
}

3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124
/*
 * Determine the device's preferred I/O size for reads and writes
 * unless the reported value is unreasonably small, large, not a
 * multiple of the physical block size, or simply garbage.
 */
static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
				      unsigned int dev_max)
{
	struct scsi_device *sdp = sdkp->device;
	unsigned int opt_xfer_bytes =
		logical_to_bytes(sdp, sdkp->opt_xfer_blocks);

3125 3126 3127
	if (sdkp->opt_xfer_blocks == 0)
		return false;

3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164
	if (sdkp->opt_xfer_blocks > dev_max) {
		sd_first_printk(KERN_WARNING, sdkp,
				"Optimal transfer size %u logical blocks " \
				"> dev_max (%u logical blocks)\n",
				sdkp->opt_xfer_blocks, dev_max);
		return false;
	}

	if (sdkp->opt_xfer_blocks > SD_DEF_XFER_BLOCKS) {
		sd_first_printk(KERN_WARNING, sdkp,
				"Optimal transfer size %u logical blocks " \
				"> sd driver limit (%u logical blocks)\n",
				sdkp->opt_xfer_blocks, SD_DEF_XFER_BLOCKS);
		return false;
	}

	if (opt_xfer_bytes < PAGE_SIZE) {
		sd_first_printk(KERN_WARNING, sdkp,
				"Optimal transfer size %u bytes < " \
				"PAGE_SIZE (%u bytes)\n",
				opt_xfer_bytes, (unsigned int)PAGE_SIZE);
		return false;
	}

	if (opt_xfer_bytes & (sdkp->physical_block_size - 1)) {
		sd_first_printk(KERN_WARNING, sdkp,
				"Optimal transfer size %u bytes not a " \
				"multiple of physical block size (%u bytes)\n",
				opt_xfer_bytes, sdkp->physical_block_size);
		return false;
	}

	sd_first_printk(KERN_INFO, sdkp, "Optimal transfer size %u bytes\n",
			opt_xfer_bytes);
	return true;
}

L
Linus Torvalds 已提交
3165 3166 3167 3168 3169 3170 3171 3172 3173
/**
 *	sd_revalidate_disk - called the first time a new disk is seen,
 *	performs disk spin up, read_capacity, etc.
 *	@disk: struct gendisk we care about
 **/
static int sd_revalidate_disk(struct gendisk *disk)
{
	struct scsi_disk *sdkp = scsi_disk(disk);
	struct scsi_device *sdp = sdkp->device;
3174
	struct request_queue *q = sdkp->disk->queue;
3175
	sector_t old_capacity = sdkp->capacity;
L
Linus Torvalds 已提交
3176
	unsigned char *buffer;
3177
	unsigned int dev_max, rw_max;
L
Linus Torvalds 已提交
3178

3179 3180
	SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp,
				      "sd_revalidate_disk\n"));
L
Linus Torvalds 已提交
3181 3182 3183 3184 3185 3186 3187 3188

	/*
	 * If the device is offline, don't try and read capacity or any
	 * of the other niceties.
	 */
	if (!scsi_device_online(sdp))
		goto out;

B
Bernhard Walle 已提交
3189
	buffer = kmalloc(SD_BUF_SIZE, GFP_KERNEL);
L
Linus Torvalds 已提交
3190
	if (!buffer) {
3191 3192
		sd_printk(KERN_WARNING, sdkp, "sd_revalidate_disk: Memory "
			  "allocation failure.\n");
3193
		goto out;
L
Linus Torvalds 已提交
3194 3195
	}

3196
	sd_spinup_disk(sdkp);
L
Linus Torvalds 已提交
3197 3198 3199 3200 3201 3202

	/*
	 * Without media there is no reason to ask; moreover, some devices
	 * react badly if we do.
	 */
	if (sdkp->media_present) {
3203
		sd_read_capacity(sdkp, buffer);
3204

3205 3206 3207 3208 3209 3210 3211 3212 3213
		/*
		 * set the default to rotational.  All non-rotational devices
		 * support the block characteristics VPD page, which will
		 * cause this to be updated correctly and any device which
		 * doesn't support it should be treated as rotational.
		 */
		blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
		blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);

3214
		if (scsi_device_supports_vpd(sdp)) {
3215
			sd_read_block_provisioning(sdkp);
3216 3217
			sd_read_block_limits(sdkp);
			sd_read_block_characteristics(sdkp);
3218
			sd_zbc_read_zones(sdkp, buffer);
3219
			sd_read_cpr(sdkp);
3220 3221
		}

3222 3223
		sd_print_capacity(sdkp, old_capacity);

3224 3225
		sd_read_write_protect_flag(sdkp, buffer);
		sd_read_cache_type(sdkp, buffer);
3226
		sd_read_app_tag_own(sdkp, buffer);
3227
		sd_read_write_same(sdkp, buffer);
3228
		sd_read_security(sdkp, buffer);
L
Linus Torvalds 已提交
3229
	}
3230 3231 3232

	/*
	 * We now have all cache related info, determine how we deal
3233
	 * with flush requests.
3234
	 */
3235
	sd_set_flush_flag(sdkp);
3236

3237 3238 3239 3240 3241 3242 3243
	/* Initial block count limit based on CDB TRANSFER LENGTH field size. */
	dev_max = sdp->use_16_for_rw ? SD_MAX_XFER_BLOCKS : SD_DEF_XFER_BLOCKS;

	/* Some devices report a maximum block count for READ/WRITE requests. */
	dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks);
	q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);

3244
	if (sd_validate_opt_xfer_size(sdkp, dev_max)) {
3245 3246
		q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
		rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
3247 3248
	} else {
		q->limits.io_opt = 0;
3249 3250
		rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
				      (sector_t)BLK_DEF_MAX_SECTORS);
3251
	}
3252

3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265
	/* Do not exceed controller limit */
	rw_max = min(rw_max, queue_max_hw_sectors(q));

	/*
	 * Only update max_sectors if previously unset or if the current value
	 * exceeds the capabilities of the hardware.
	 */
	if (sdkp->first_scan ||
	    q->limits.max_sectors > q->limits.max_dev_sectors ||
	    q->limits.max_sectors > q->limits.max_hw_sectors)
		q->limits.max_sectors = rw_max;

	sdkp->first_scan = 0;
3266

3267
	set_capacity_and_notify(disk, logical_to_sectors(sdp, sdkp->capacity));
3268
	sd_config_write_same(sdkp);
L
Linus Torvalds 已提交
3269 3270
	kfree(buffer);

3271 3272 3273 3274 3275 3276
	/*
	 * For a zoned drive, revalidating the zones can be done only once
	 * the gendisk capacity is set. So if this fails, set back the gendisk
	 * capacity to 0.
	 */
	if (sd_zbc_revalidate_zones(sdkp))
3277
		set_capacity_and_notify(disk, 0);
3278

L
Linus Torvalds 已提交
3279 3280 3281 3282
 out:
	return 0;
}

3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302
/**
 *	sd_unlock_native_capacity - unlock native capacity
 *	@disk: struct gendisk to set capacity for
 *
 *	Block layer calls this function if it detects that partitions
 *	on @disk reach beyond the end of the device.  If the SCSI host
 *	implements ->unlock_native_capacity() method, it's invoked to
 *	give it a chance to adjust the device capacity.
 *
 *	CONTEXT:
 *	Defined by block layer.  Might sleep.
 */
static void sd_unlock_native_capacity(struct gendisk *disk)
{
	struct scsi_device *sdev = scsi_disk(disk)->device;

	if (sdev->host->hostt->unlock_native_capacity)
		sdev->host->hostt->unlock_native_capacity(sdev);
}

3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314
/**
 *	sd_format_disk_name - format disk name
 *	@prefix: name prefix - ie. "sd" for SCSI disks
 *	@index: index of the disk to format name for
 *	@buf: output buffer
 *	@buflen: length of the output buffer
 *
 *	SCSI disk names starts at sda.  The 26th device is sdz and the
 *	27th is sdaa.  The last one for two lettered suffix is sdzz
 *	which is followed by sdaaa.
 *
 *	This is basically 26 base counting with one extra 'nil' entry
D
Daniel Mack 已提交
3315
 *	at the beginning from the second digit on and can be
3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348
 *	determined using similar method as 26 base conversion with the
 *	index shifted -1 after each digit is computed.
 *
 *	CONTEXT:
 *	Don't care.
 *
 *	RETURNS:
 *	0 on success, -errno on failure.
 */
static int sd_format_disk_name(char *prefix, int index, char *buf, int buflen)
{
	const int base = 'z' - 'a' + 1;
	char *begin = buf + strlen(prefix);
	char *end = buf + buflen;
	char *p;
	int unit;

	p = end - 1;
	*p = '\0';
	unit = base;
	do {
		if (p == begin)
			return -EINVAL;
		*--p = 'a' + (index % unit);
		index = (index / unit) - 1;
	} while (index >= 0);

	memmove(begin, p, end - p);
	memcpy(buf, prefix, strlen(prefix));

	return 0;
}

L
Linus Torvalds 已提交
3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363
/**
 *	sd_probe - called during driver initialization and whenever a
 *	new scsi device is attached to the system. It is called once
 *	for each scsi device (not just disks) present.
 *	@dev: pointer to device object
 *
 *	Returns 0 if successful (or not interested in this scsi device 
 *	(e.g. scanner)); 1 when there is an error.
 *
 *	Note: this function is invoked from the scsi mid-level.
 *	This function sets up the mapping between a given 
 *	<host,channel,id,lun> (found in sdp) and new device name 
 *	(e.g. /dev/sda). More precisely it is the block device major 
 *	and minor number that is chosen here.
 *
3364 3365
 *	Assume sd_probe is not re-entrant (for time being)
 *	Also think about sd_probe() and sd_remove() running coincidentally.
L
Linus Torvalds 已提交
3366 3367 3368 3369 3370 3371
 **/
static int sd_probe(struct device *dev)
{
	struct scsi_device *sdp = to_scsi_device(dev);
	struct scsi_disk *sdkp;
	struct gendisk *gd;
3372
	int index;
L
Linus Torvalds 已提交
3373 3374
	int error;

3375
	scsi_autopm_get_device(sdp);
L
Linus Torvalds 已提交
3376
	error = -ENODEV;
3377 3378 3379 3380
	if (sdp->type != TYPE_DISK &&
	    sdp->type != TYPE_ZBC &&
	    sdp->type != TYPE_MOD &&
	    sdp->type != TYPE_RBC)
L
Linus Torvalds 已提交
3381 3382
		goto out;

3383 3384 3385
	if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) && sdp->type == TYPE_ZBC) {
		sdev_printk(KERN_WARNING, sdp,
			    "Unsupported ZBC host-managed device.\n");
3386
		goto out;
3387 3388
	}

3389
	SCSI_LOG_HLQUEUE(3, sdev_printk(KERN_INFO, sdp,
3390
					"sd_probe\n"));
L
Linus Torvalds 已提交
3391 3392

	error = -ENOMEM;
J
Jes Sorensen 已提交
3393
	sdkp = kzalloc(sizeof(*sdkp), GFP_KERNEL);
L
Linus Torvalds 已提交
3394 3395 3396
	if (!sdkp)
		goto out;

3397 3398
	gd = __alloc_disk_node(sdp->request_queue, NUMA_NO_NODE,
			       &sd_bio_compl_lkclass);
L
Linus Torvalds 已提交
3399
	if (!gd)
3400
		goto out_free;
L
Linus Torvalds 已提交
3401

M
Matthew Wilcox 已提交
3402 3403
	index = ida_alloc(&sd_index_ida, GFP_KERNEL);
	if (index < 0) {
3404
		sdev_printk(KERN_WARNING, sdp, "sd_probe: memory exhausted.\n");
L
Linus Torvalds 已提交
3405
		goto out_put;
3406 3407
	}

3408
	error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN);
3409 3410
	if (error) {
		sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name length exceeded.\n");
3411
		goto out_free_index;
3412
	}
3413

L
Linus Torvalds 已提交
3414 3415 3416
	sdkp->device = sdp;
	sdkp->disk = gd;
	sdkp->index = index;
3417
	sdkp->max_retries = SD_MAX_RETRIES;
A
Arnd Bergmann 已提交
3418
	atomic_set(&sdkp->openers, 0);
J
Josh Hunt 已提交
3419
	atomic_set(&sdkp->device->ioerr_cnt, 0);
L
Linus Torvalds 已提交
3420

3421 3422 3423 3424 3425 3426 3427 3428
	if (!sdp->request_queue->rq_timeout) {
		if (sdp->type != TYPE_MOD)
			blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT);
		else
			blk_queue_rq_timeout(sdp->request_queue,
					     SD_MOD_TIMEOUT);
	}

3429 3430 3431 3432
	device_initialize(&sdkp->disk_dev);
	sdkp->disk_dev.parent = get_device(dev);
	sdkp->disk_dev.class = &sd_disk_class;
	dev_set_name(&sdkp->disk_dev, "%s", dev_name(dev));
3433

3434
	error = device_add(&sdkp->disk_dev);
3435
	if (error) {
3436
		put_device(&sdkp->disk_dev);
3437 3438
		goto out;
	}
3439

3440
	dev_set_drvdata(dev, sdkp);
3441

3442 3443
	gd->major = sd_major((index & 0xf0) >> 4);
	gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
3444
	gd->minors = SD_MINORS;
3445 3446

	gd->fops = &sd_fops;
3447
	gd->private_data = sdkp;
3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469

	/* defaults, until the device tells us otherwise */
	sdp->sector_size = 512;
	sdkp->capacity = 0;
	sdkp->media_present = 1;
	sdkp->write_prot = 0;
	sdkp->cache_override = 0;
	sdkp->WCE = 0;
	sdkp->RCD = 0;
	sdkp->ATO = 0;
	sdkp->first_scan = 1;
	sdkp->max_medium_access_timeouts = SD_MAX_MEDIUM_TIMEOUTS;

	sd_revalidate_disk(gd);

	if (sdp->removable) {
		gd->flags |= GENHD_FL_REMOVABLE;
		gd->events |= DISK_EVENT_MEDIA_CHANGE;
		gd->event_flags = DISK_EVENT_FLAG_POLL | DISK_EVENT_FLAG_UEVENT;
	}

	blk_pm_runtime_init(sdp->request_queue, dev);
3470 3471 3472 3473
	if (sdp->rpm_autosuspend) {
		pm_runtime_set_autosuspend_delay(dev,
			sdp->host->hostt->rpm_autosuspend_delay);
	}
3474 3475 3476

	error = device_add_disk(dev, gd, NULL);
	if (error) {
3477
		put_device(&sdkp->disk_dev);
3478
		blk_cleanup_disk(gd);
3479 3480 3481
		goto out;
	}

3482 3483 3484 3485 3486 3487
	if (sdkp->capacity)
		sd_dif_config_host(sdkp);

	sd_revalidate_disk(gd);

	if (sdkp->security) {
3488
		sdkp->opal_dev = init_opal_dev(sdkp, &sd_sec_submit);
3489 3490 3491 3492 3493 3494 3495
		if (sdkp->opal_dev)
			sd_printk(KERN_NOTICE, sdkp, "supports TCG Opal\n");
	}

	sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
		  sdp->removable ? "removable " : "");
	scsi_autopm_put_device(sdp);
L
Linus Torvalds 已提交
3496 3497 3498

	return 0;

3499
 out_free_index:
M
Matthew Wilcox 已提交
3500
	ida_free(&sd_index_ida, index);
3501
 out_put:
L
Linus Torvalds 已提交
3502
	put_disk(gd);
3503
 out_free:
3504
	sd_zbc_release_disk(sdkp);
3505
	kfree(sdkp);
3506
 out:
3507
	scsi_autopm_put_device(sdp);
L
Linus Torvalds 已提交
3508 3509 3510 3511 3512 3513 3514
	return error;
}

/**
 *	sd_remove - called whenever a scsi disk (previously recognized by
 *	sd_probe) is detached from the system. It is called (potentially
 *	multiple times) during sd module unload.
3515
 *	@dev: pointer to device object
L
Linus Torvalds 已提交
3516 3517 3518 3519 3520 3521 3522 3523
 *
 *	Note: this function is invoked from the scsi mid-level.
 *	This function potentially frees up a device name (e.g. /dev/sdc)
 *	that could be re-used by a subsequent sd_probe().
 *	This function is not called when the built-in sd driver is "exit-ed".
 **/
static int sd_remove(struct device *dev)
{
3524
	struct scsi_disk *sdkp = dev_get_drvdata(dev);
L
Linus Torvalds 已提交
3525

3526 3527
	scsi_autopm_get_device(sdkp->device);

3528
	device_del(&sdkp->disk_dev);
L
Linus Torvalds 已提交
3529 3530
	del_gendisk(sdkp->disk);
	sd_shutdown(dev);
A
Alan Stern 已提交
3531

3532
	put_disk(sdkp->disk);
L
Linus Torvalds 已提交
3533 3534 3535
	return 0;
}

3536
static void scsi_disk_release(struct device *dev)
L
Linus Torvalds 已提交
3537
{
3538
	struct scsi_disk *sdkp = to_scsi_disk(dev);
3539

M
Matthew Wilcox 已提交
3540
	ida_free(&sd_index_ida, sdkp->index);
3541
	sd_zbc_release_disk(sdkp);
3542
	put_device(&sdkp->device->sdev_gendev);
3543
	free_opal_dev(sdkp->opal_dev);
3544

L
Linus Torvalds 已提交
3545 3546 3547
	kfree(sdkp);
}

3548
static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
3549 3550 3551
{
	unsigned char cmd[6] = { START_STOP };	/* START_VALID */
	struct scsi_sense_hdr sshdr;
3552
	struct scsi_device *sdp = sdkp->device;
3553 3554 3555 3556 3557
	int res;

	if (start)
		cmd[4] |= 1;	/* START */

3558 3559 3560
	if (sdp->start_stop_pwr_cond)
		cmd[4] |= start ? 1 << 4 : 3 << 4;	/* Active or Standby */

3561 3562 3563
	if (!scsi_device_online(sdp))
		return -ENODEV;

3564
	res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
3565
			SD_TIMEOUT, sdkp->max_retries, 0, RQF_PM, NULL);
3566
	if (res) {
H
Hannes Reinecke 已提交
3567
		sd_print_result(sdkp, "Start/Stop Unit failed", res);
H
Hannes Reinecke 已提交
3568
		if (res > 0 && scsi_sense_valid(&sshdr)) {
3569
			sd_print_sense_hdr(sdkp, &sshdr);
3570
			/* 0x3a is medium not present */
H
Hannes Reinecke 已提交
3571 3572 3573
			if (sshdr.asc == 0x3a)
				res = 0;
		}
3574 3575
	}

3576 3577 3578 3579 3580
	/* SCSI error codes must not go to the generic layer */
	if (res)
		return -EIO;

	return 0;
3581 3582
}

L
Linus Torvalds 已提交
3583 3584 3585 3586 3587 3588 3589
/*
 * Send a SYNCHRONIZE CACHE instruction down to the device through
 * the normal SCSI command structure.  Wait for the command to
 * complete.
 */
static void sd_shutdown(struct device *dev)
{
3590
	struct scsi_disk *sdkp = dev_get_drvdata(dev);
L
Linus Torvalds 已提交
3591 3592 3593 3594

	if (!sdkp)
		return;         /* this can happen */

3595
	if (pm_runtime_suspended(dev))
3596
		return;
3597

3598
	if (sdkp->WCE && sdkp->media_present) {
3599
		sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
3600
		sd_sync_cache(sdkp, NULL);
A
Alan Stern 已提交
3601
	}
3602

3603 3604 3605
	if (system_state != SYSTEM_RESTART && sdkp->device->manage_start_stop) {
		sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
		sd_start_stop_device(sdkp, 0);
3606
	}
A
Alan Stern 已提交
3607
}
L
Linus Torvalds 已提交
3608

3609
static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
3610
{
3611
	struct scsi_disk *sdkp = dev_get_drvdata(dev);
3612
	struct scsi_sense_hdr sshdr;
3613
	int ret = 0;
3614

3615 3616
	if (!sdkp)	/* E.g.: runtime suspend following sd_remove() */
		return 0;
3617

3618
	if (sdkp->WCE && sdkp->media_present) {
3619 3620
		if (!sdkp->device->silence_suspend)
			sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
3621 3622
		ret = sd_sync_cache(sdkp, &sshdr);

3623 3624 3625
		if (ret) {
			/* ignore OFFLINE device */
			if (ret == -ENODEV)
3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636
				return 0;

			if (!scsi_sense_valid(&sshdr) ||
			    sshdr.sense_key != ILLEGAL_REQUEST)
				return ret;

			/*
			 * sshdr.sense_key == ILLEGAL_REQUEST means this drive
			 * doesn't support sync. There's not much to do and
			 * suspend shouldn't fail.
			 */
B
Bart Van Assche 已提交
3637
			ret = 0;
3638
		}
3639 3640
	}

3641
	if (sdkp->device->manage_start_stop) {
3642 3643
		if (!sdkp->device->silence_suspend)
			sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
3644
		/* an error is not worth aborting a system sleep */
3645
		ret = sd_start_stop_device(sdkp, 0);
3646 3647
		if (ignore_stop_errors)
			ret = 0;
3648 3649
	}

3650
	return ret;
3651 3652
}

3653 3654
static int sd_suspend_system(struct device *dev)
{
3655 3656 3657
	if (pm_runtime_suspended(dev))
		return 0;

3658 3659 3660 3661 3662 3663 3664 3665
	return sd_suspend_common(dev, true);
}

static int sd_suspend_runtime(struct device *dev)
{
	return sd_suspend_common(dev, false);
}

3666 3667
static int sd_resume(struct device *dev)
{
3668
	struct scsi_disk *sdkp = dev_get_drvdata(dev);
3669
	int ret;
3670

3671 3672 3673
	if (!sdkp)	/* E.g.: runtime resume at the start of sd_probe() */
		return 0;

3674
	if (!sdkp->device->manage_start_stop)
3675
		return 0;
3676

3677
	sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
3678 3679 3680 3681
	ret = sd_start_stop_device(sdkp, 1);
	if (!ret)
		opal_unlock_from_suspend(sdkp->opal_dev);
	return ret;
3682 3683
}

3684 3685
static int sd_resume_system(struct device *dev)
{
3686 3687 3688
	if (pm_runtime_suspended(dev))
		return 0;

3689 3690 3691
	return sd_resume(dev);
}

3692 3693 3694
static int sd_resume_runtime(struct device *dev)
{
	struct scsi_disk *sdkp = dev_get_drvdata(dev);
3695 3696 3697 3698 3699 3700
	struct scsi_device *sdp;

	if (!sdkp)	/* E.g.: runtime resume at the start of sd_probe() */
		return 0;

	sdp = sdkp->device;
3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715

	if (sdp->ignore_media_change) {
		/* clear the device's sense data */
		static const u8 cmd[10] = { REQUEST_SENSE };

		if (scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL,
				 NULL, sdp->request_queue->rq_timeout, 1, 0,
				 RQF_PM, NULL))
			sd_printk(KERN_NOTICE, sdkp,
				  "Failed to clear sense data\n");
	}

	return sd_resume(dev);
}

L
Linus Torvalds 已提交
3716 3717 3718 3719 3720 3721 3722 3723
/**
 *	init_sd - entry point for this driver (both when built in or when
 *	a module).
 *
 *	Note: this function registers this driver with the scsi mid-level.
 **/
static int __init init_sd(void)
{
3724
	int majors = 0, i, err;
L
Linus Torvalds 已提交
3725 3726 3727

	SCSI_LOG_HLQUEUE(3, printk("init_sd: sd driver entry point\n"));

3728
	for (i = 0; i < SD_MAJORS; i++) {
3729
		if (__register_blkdev(sd_major(i), "sd", sd_default_probe))
3730 3731 3732
			continue;
		majors++;
	}
L
Linus Torvalds 已提交
3733 3734 3735 3736

	if (!majors)
		return -ENODEV;

3737 3738 3739
	err = class_register(&sd_disk_class);
	if (err)
		goto err_out;
3740

3741 3742 3743 3744
	sd_cdb_cache = kmem_cache_create("sd_ext_cdb", SD_EXT_CDB_SIZE,
					 0, 0, NULL);
	if (!sd_cdb_cache) {
		printk(KERN_ERR "sd: can't init extended cdb cache\n");
3745
		err = -ENOMEM;
3746 3747 3748
		goto err_out_class;
	}

3749 3750 3751 3752
	sd_page_pool = mempool_create_page_pool(SD_MEMPOOL_SIZE, 0);
	if (!sd_page_pool) {
		printk(KERN_ERR "sd: can't init discard page pool\n");
		err = -ENOMEM;
3753
		goto err_out_cache;
3754 3755
	}

3756 3757 3758 3759
	err = scsi_register_driver(&sd_template.gendrv);
	if (err)
		goto err_out_driver;

3760 3761
	return 0;

3762
err_out_driver:
3763 3764
	mempool_destroy(sd_page_pool);

3765 3766 3767
err_out_cache:
	kmem_cache_destroy(sd_cdb_cache);

3768 3769 3770 3771 3772 3773
err_out_class:
	class_unregister(&sd_disk_class);
err_out:
	for (i = 0; i < SD_MAJORS; i++)
		unregister_blkdev(sd_major(i), "sd");
	return err;
L
Linus Torvalds 已提交
3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786
}

/**
 *	exit_sd - exit point for this driver (when it is a module).
 *
 *	Note: this function unregisters this driver from the scsi mid-level.
 **/
static void __exit exit_sd(void)
{
	int i;

	SCSI_LOG_HLQUEUE(3, printk("exit_sd: exiting sd driver\n"));

3787
	scsi_unregister_driver(&sd_template.gendrv);
3788
	mempool_destroy(sd_page_pool);
3789 3790
	kmem_cache_destroy(sd_cdb_cache);

3791 3792
	class_unregister(&sd_disk_class);

3793
	for (i = 0; i < SD_MAJORS; i++)
L
Linus Torvalds 已提交
3794 3795 3796 3797 3798
		unregister_blkdev(sd_major(i), "sd");
}

module_init(init_sd);
module_exit(exit_sd);
3799

3800
void sd_print_sense_hdr(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
3801
{
3802 3803
	scsi_print_sense_hdr(sdkp->device,
			     sdkp->disk ? sdkp->disk->disk_name : NULL, sshdr);
3804 3805
}

3806
void sd_print_result(const struct scsi_disk *sdkp, const char *msg, int result)
3807
{
H
Hannes Reinecke 已提交
3808 3809
	const char *hb_string = scsi_hostbyte_string(result);

3810
	if (hb_string)
H
Hannes Reinecke 已提交
3811 3812 3813
		sd_printk(KERN_INFO, sdkp,
			  "%s: Result: hostbyte=%s driverbyte=%s\n", msg,
			  hb_string ? hb_string : "invalid",
3814
			  "DRIVER_OK");
H
Hannes Reinecke 已提交
3815 3816
	else
		sd_printk(KERN_INFO, sdkp,
3817 3818
			  "%s: Result: hostbyte=0x%02x driverbyte=%s\n",
			  msg, host_byte(result), "DRIVER_OK");
3819
}