sd.c 101.7 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
L
Linus Torvalds 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47
/*
 *      sd.c Copyright (C) 1992 Drew Eckhardt
 *           Copyright (C) 1993, 1994, 1995, 1999 Eric Youngdale
 *
 *      Linux scsi disk driver
 *              Initial versions: Drew Eckhardt
 *              Subsequent revisions: Eric Youngdale
 *	Modification history:
 *       - Drew Eckhardt <drew@colorado.edu> original
 *       - Eric Youngdale <eric@andante.org> add scatter-gather, multiple 
 *         outstanding request, and other enhancements.
 *         Support loadable low-level scsi drivers.
 *       - Jirka Hanika <geo@ff.cuni.cz> support more scsi disks using 
 *         eight major numbers.
 *       - Richard Gooch <rgooch@atnf.csiro.au> support devfs.
 *	 - Torben Mathiasen <tmm@image.dk> Resource allocation fixes in 
 *	   sd_init and cleanups.
 *	 - Alex Davis <letmein@erols.com> Fix problem where partition info
 *	   not being read in sd_open. Fix problem where removable media 
 *	   could be ejected after sd_open.
 *	 - Douglas Gilbert <dgilbert@interlog.com> cleanup for lk 2.5.x
 *	 - Badari Pulavarty <pbadari@us.ibm.com>, Matthew Wilcox 
 *	   <willy@debian.org>, Kurt Garloff <garloff@suse.de>: 
 *	   Support 32k/1M disks.
 *
 *	Logging policy (needs CONFIG_SCSI_LOGGING defined):
 *	 - setting up transfer: SCSI_LOG_HLQUEUE levels 1 and 2
 *	 - end of transfer (bh + scsi_lib): SCSI_LOG_HLCOMPLETE level 1
 *	 - entering sd_ioctl: SCSI_LOG_IOCTL level 1
 *	 - entering other commands: SCSI_LOG_HLQUEUE level 3
 *	Note: when the logging level is set by the user, it must be greater
 *	than the level indicated above to trigger output.	
 */

#include <linux/module.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/bio.h>
#include <linux/hdreg.h>
#include <linux/errno.h>
#include <linux/idr.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/blkpg.h>
48
#include <linux/blk-pm.h>
L
Linus Torvalds 已提交
49
#include <linux/delay.h>
50
#include <linux/major.h>
51
#include <linux/mutex.h>
52
#include <linux/string_helpers.h>
53
#include <linux/slab.h>
54
#include <linux/sed-opal.h>
55
#include <linux/pm_runtime.h>
56
#include <linux/pr.h>
57
#include <linux/t10-pi.h>
58
#include <linux/uaccess.h>
59
#include <asm/unaligned.h>
L
Linus Torvalds 已提交
60 61 62 63 64 65 66 67 68 69 70

#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_driver.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_ioctl.h>
#include <scsi/scsicam.h>

71
#include "sd.h"
72
#include "scsi_priv.h"
L
Linus Torvalds 已提交
73 74
#include "scsi_logging.h"

75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
MODULE_AUTHOR("Eric Youngdale");
MODULE_DESCRIPTION("SCSI disk (sd) driver");
MODULE_LICENSE("GPL");

MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK0_MAJOR);
MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK1_MAJOR);
MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK2_MAJOR);
MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK3_MAJOR);
MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK4_MAJOR);
MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK5_MAJOR);
MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK6_MAJOR);
MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK7_MAJOR);
MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK8_MAJOR);
MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK9_MAJOR);
MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK10_MAJOR);
MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK11_MAJOR);
MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK12_MAJOR);
MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK13_MAJOR);
MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK14_MAJOR);
MODULE_ALIAS_BLOCKDEV_MAJOR(SCSI_DISK15_MAJOR);
95 96 97
MODULE_ALIAS_SCSI_DEVICE(TYPE_DISK);
MODULE_ALIAS_SCSI_DEVICE(TYPE_MOD);
MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC);
98
MODULE_ALIAS_SCSI_DEVICE(TYPE_ZBC);
99

100
#define SD_MINORS	16
101

102
static void sd_config_discard(struct scsi_disk *, unsigned int);
103
static void sd_config_write_same(struct scsi_disk *);
104
static int  sd_revalidate_disk(struct gendisk *);
105
static void sd_unlock_native_capacity(struct gendisk *disk);
106 107 108
static int  sd_probe(struct device *);
static int  sd_remove(struct device *);
static void sd_shutdown(struct device *);
109 110
static int sd_suspend_system(struct device *);
static int sd_suspend_runtime(struct device *);
111
static int sd_resume_system(struct device *);
112
static int sd_resume_runtime(struct device *);
113
static void sd_rescan(struct device *);
114
static blk_status_t sd_init_command(struct scsi_cmnd *SCpnt);
115
static void sd_uninit_command(struct scsi_cmnd *SCpnt);
116
static int sd_done(struct scsi_cmnd *);
117
static void sd_eh_reset(struct scsi_cmnd *);
118
static int sd_eh_action(struct scsi_cmnd *, int);
119
static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer);
120
static void scsi_disk_release(struct device *cdev);
121

122
static DEFINE_IDA(sd_index_ida);
L
Linus Torvalds 已提交
123

124
static mempool_t *sd_page_pool;
125
static struct lock_class_key sd_bio_compl_lkclass;
126

127 128 129 130 131
static const char *sd_cache_types[] = {
	"write through", "none", "write back",
	"write back, no read (daft)"
};

132 133
static void sd_set_flush_flag(struct scsi_disk *sdkp)
{
134
	bool wc = false, fua = false;
135 136

	if (sdkp->WCE) {
137
		wc = true;
138
		if (sdkp->DPOFUA)
139
			fua = true;
140 141
	}

142
	blk_queue_write_cache(sdkp->disk->queue, wc, fua);
143 144
}

145
static ssize_t
146 147
cache_type_store(struct device *dev, struct device_attribute *attr,
		 const char *buf, size_t count)
148
{
149
	int ct, rcd, wce, sp;
150
	struct scsi_disk *sdkp = to_scsi_disk(dev);
151 152 153 154 155
	struct scsi_device *sdp = sdkp->device;
	char buffer[64];
	char *buffer_data;
	struct scsi_mode_data data;
	struct scsi_sense_hdr sshdr;
156
	static const char temp[] = "temporary ";
157 158
	int len;

159
	if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
160 161 162 163 164
		/* no cache control on RBC devices; theoretically they
		 * can do it, but there's probably so many exceptions
		 * it's not worth the risk */
		return -EINVAL;

165 166 167 168 169 170 171
	if (strncmp(buf, temp, sizeof(temp) - 1) == 0) {
		buf += sizeof(temp) - 1;
		sdkp->cache_override = 1;
	} else {
		sdkp->cache_override = 0;
	}

172
	ct = sysfs_match_string(sd_cache_types, buf);
173 174
	if (ct < 0)
		return -EINVAL;
175

176
	rcd = ct & 0x01 ? 1 : 0;
177
	wce = (ct & 0x02) && !sdkp->write_prot ? 1 : 0;
178 179 180 181

	if (sdkp->cache_override) {
		sdkp->WCE = wce;
		sdkp->RCD = rcd;
182
		sd_set_flush_flag(sdkp);
183 184 185
		return count;
	}

186
	if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT,
187
			    sdkp->max_retries, &data, NULL))
188
		return -EINVAL;
189
	len = min_t(size_t, sizeof(buffer), data.length - data.header_length -
190 191 192 193 194 195
		  data.block_descriptor_length);
	buffer_data = buffer + data.header_length +
		data.block_descriptor_length;
	buffer_data[2] &= ~0x05;
	buffer_data[2] |= wce << 2 | rcd;
	sp = buffer_data[0] & 0x80 ? 1 : 0;
196
	buffer_data[0] &= ~0x80;
197

I
Ivan Mironov 已提交
198 199 200 201 202 203
	/*
	 * Ensure WP, DPOFUA, and RESERVED fields are cleared in
	 * received mode parameter buffer before doing MODE SELECT.
	 */
	data.device_specific = 0;

204
	if (scsi_mode_select(sdp, 1, sp, buffer_data, len, SD_TIMEOUT,
205
			     sdkp->max_retries, &data, &sshdr)) {
206
		if (scsi_sense_valid(&sshdr))
207
			sd_print_sense_hdr(sdkp, &sshdr);
208 209
		return -EINVAL;
	}
210
	sd_revalidate_disk(sdkp->disk);
211 212 213
	return count;
}

214
static ssize_t
215 216 217 218 219 220
manage_start_stop_show(struct device *dev, struct device_attribute *attr,
		       char *buf)
{
	struct scsi_disk *sdkp = to_scsi_disk(dev);
	struct scsi_device *sdp = sdkp->device;

221
	return sprintf(buf, "%u\n", sdp->manage_start_stop);
222 223 224 225 226
}

static ssize_t
manage_start_stop_store(struct device *dev, struct device_attribute *attr,
			const char *buf, size_t count)
227
{
228
	struct scsi_disk *sdkp = to_scsi_disk(dev);
229
	struct scsi_device *sdp = sdkp->device;
230
	bool v;
231 232 233 234

	if (!capable(CAP_SYS_ADMIN))
		return -EACCES;

235 236 237 238
	if (kstrtobool(buf, &v))
		return -EINVAL;

	sdp->manage_start_stop = v;
239 240 241

	return count;
}
242
static DEVICE_ATTR_RW(manage_start_stop);
243

244
static ssize_t
245 246 247 248
allow_restart_show(struct device *dev, struct device_attribute *attr, char *buf)
{
	struct scsi_disk *sdkp = to_scsi_disk(dev);

249
	return sprintf(buf, "%u\n", sdkp->device->allow_restart);
250 251 252 253 254
}

static ssize_t
allow_restart_store(struct device *dev, struct device_attribute *attr,
		    const char *buf, size_t count)
255
{
256
	bool v;
257
	struct scsi_disk *sdkp = to_scsi_disk(dev);
258 259 260 261 262
	struct scsi_device *sdp = sdkp->device;

	if (!capable(CAP_SYS_ADMIN))
		return -EACCES;

263
	if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
264 265
		return -EINVAL;

266 267 268 269
	if (kstrtobool(buf, &v))
		return -EINVAL;

	sdp->allow_restart = v;
270 271 272

	return count;
}
273
static DEVICE_ATTR_RW(allow_restart);
274

275
static ssize_t
276
cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
277
{
278
	struct scsi_disk *sdkp = to_scsi_disk(dev);
279 280
	int ct = sdkp->RCD + 2*sdkp->WCE;

281
	return sprintf(buf, "%s\n", sd_cache_types[ct]);
282
}
283
static DEVICE_ATTR_RW(cache_type);
284

285
static ssize_t
286
FUA_show(struct device *dev, struct device_attribute *attr, char *buf)
287
{
288
	struct scsi_disk *sdkp = to_scsi_disk(dev);
289

290
	return sprintf(buf, "%u\n", sdkp->DPOFUA);
291
}
292
static DEVICE_ATTR_RO(FUA);
293

294
static ssize_t
295 296
protection_type_show(struct device *dev, struct device_attribute *attr,
		     char *buf)
297 298 299
{
	struct scsi_disk *sdkp = to_scsi_disk(dev);

300
	return sprintf(buf, "%u\n", sdkp->protection_type);
301 302
}

303
static ssize_t
304 305
protection_type_store(struct device *dev, struct device_attribute *attr,
		      const char *buf, size_t count)
306 307 308 309 310 311 312 313 314 315 316 317 318
{
	struct scsi_disk *sdkp = to_scsi_disk(dev);
	unsigned int val;
	int err;

	if (!capable(CAP_SYS_ADMIN))
		return -EACCES;

	err = kstrtouint(buf, 10, &val);

	if (err)
		return err;

319
	if (val <= T10_PI_TYPE3_PROTECTION)
320 321 322 323
		sdkp->protection_type = val;

	return count;
}
324
static DEVICE_ATTR_RW(protection_type);
325

326
static ssize_t
327 328
protection_mode_show(struct device *dev, struct device_attribute *attr,
		     char *buf)
329 330 331 332 333 334 335 336
{
	struct scsi_disk *sdkp = to_scsi_disk(dev);
	struct scsi_device *sdp = sdkp->device;
	unsigned int dif, dix;

	dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type);
	dix = scsi_host_dix_capable(sdp->host, sdkp->protection_type);

337
	if (!dix && scsi_host_dix_capable(sdp->host, T10_PI_TYPE0_PROTECTION)) {
338 339 340 341 342
		dif = 0;
		dix = 1;
	}

	if (!dif && !dix)
343
		return sprintf(buf, "none\n");
344

345
	return sprintf(buf, "%s%u\n", dix ? "dix" : "dif", dif);
346
}
347
static DEVICE_ATTR_RO(protection_mode);
348

349
static ssize_t
350
app_tag_own_show(struct device *dev, struct device_attribute *attr, char *buf)
351 352 353
{
	struct scsi_disk *sdkp = to_scsi_disk(dev);

354
	return sprintf(buf, "%u\n", sdkp->ATO);
355
}
356
static DEVICE_ATTR_RO(app_tag_own);
357

358
static ssize_t
359 360
thin_provisioning_show(struct device *dev, struct device_attribute *attr,
		       char *buf)
361 362 363
{
	struct scsi_disk *sdkp = to_scsi_disk(dev);

364
	return sprintf(buf, "%u\n", sdkp->lbpme);
365
}
366
static DEVICE_ATTR_RO(thin_provisioning);
367

368
/* sysfs_match_string() requires dense arrays */
369 370 371 372 373 374 375 376 377 378
static const char *lbp_mode[] = {
	[SD_LBP_FULL]		= "full",
	[SD_LBP_UNMAP]		= "unmap",
	[SD_LBP_WS16]		= "writesame_16",
	[SD_LBP_WS10]		= "writesame_10",
	[SD_LBP_ZERO]		= "writesame_zero",
	[SD_LBP_DISABLE]	= "disabled",
};

static ssize_t
379 380
provisioning_mode_show(struct device *dev, struct device_attribute *attr,
		       char *buf)
381 382 383
{
	struct scsi_disk *sdkp = to_scsi_disk(dev);

384
	return sprintf(buf, "%s\n", lbp_mode[sdkp->provisioning_mode]);
385 386 387
}

static ssize_t
388 389
provisioning_mode_store(struct device *dev, struct device_attribute *attr,
			const char *buf, size_t count)
390 391 392
{
	struct scsi_disk *sdkp = to_scsi_disk(dev);
	struct scsi_device *sdp = sdkp->device;
393
	int mode;
394 395 396 397

	if (!capable(CAP_SYS_ADMIN))
		return -EACCES;

398 399 400 401 402
	if (sd_is_zoned(sdkp)) {
		sd_config_discard(sdkp, SD_LBP_DISABLE);
		return count;
	}

403 404 405
	if (sdp->type != TYPE_DISK)
		return -EINVAL;

406 407
	mode = sysfs_match_string(lbp_mode, buf);
	if (mode < 0)
408 409
		return -EINVAL;

410 411
	sd_config_discard(sdkp, mode);

412
	return count;
413
}
414
static DEVICE_ATTR_RW(provisioning_mode);
415

416
/* sysfs_match_string() requires dense arrays */
417 418 419 420 421 422 423 424 425 426 427 428 429
static const char *zeroing_mode[] = {
	[SD_ZERO_WRITE]		= "write",
	[SD_ZERO_WS]		= "writesame",
	[SD_ZERO_WS16_UNMAP]	= "writesame_16_unmap",
	[SD_ZERO_WS10_UNMAP]	= "writesame_10_unmap",
};

static ssize_t
zeroing_mode_show(struct device *dev, struct device_attribute *attr,
		  char *buf)
{
	struct scsi_disk *sdkp = to_scsi_disk(dev);

430
	return sprintf(buf, "%s\n", zeroing_mode[sdkp->zeroing_mode]);
431 432 433 434 435 436 437
}

static ssize_t
zeroing_mode_store(struct device *dev, struct device_attribute *attr,
		   const char *buf, size_t count)
{
	struct scsi_disk *sdkp = to_scsi_disk(dev);
438
	int mode;
439 440 441 442

	if (!capable(CAP_SYS_ADMIN))
		return -EACCES;

443 444
	mode = sysfs_match_string(zeroing_mode, buf);
	if (mode < 0)
445 446
		return -EINVAL;

447 448
	sdkp->zeroing_mode = mode;

449 450 451 452
	return count;
}
static DEVICE_ATTR_RW(zeroing_mode);

453
static ssize_t
454 455
max_medium_access_timeouts_show(struct device *dev,
				struct device_attribute *attr, char *buf)
456 457 458
{
	struct scsi_disk *sdkp = to_scsi_disk(dev);

459
	return sprintf(buf, "%u\n", sdkp->max_medium_access_timeouts);
460 461 462
}

static ssize_t
463 464 465
max_medium_access_timeouts_store(struct device *dev,
				 struct device_attribute *attr, const char *buf,
				 size_t count)
466 467 468 469 470 471 472 473 474 475 476
{
	struct scsi_disk *sdkp = to_scsi_disk(dev);
	int err;

	if (!capable(CAP_SYS_ADMIN))
		return -EACCES;

	err = kstrtouint(buf, 10, &sdkp->max_medium_access_timeouts);

	return err ? err : count;
}
477
static DEVICE_ATTR_RW(max_medium_access_timeouts);
478

479
static ssize_t
480 481
max_write_same_blocks_show(struct device *dev, struct device_attribute *attr,
			   char *buf)
482 483 484
{
	struct scsi_disk *sdkp = to_scsi_disk(dev);

485
	return sprintf(buf, "%u\n", sdkp->max_ws_blocks);
486 487 488
}

static ssize_t
489 490
max_write_same_blocks_store(struct device *dev, struct device_attribute *attr,
			    const char *buf, size_t count)
491 492 493 494 495 496 497 498 499
{
	struct scsi_disk *sdkp = to_scsi_disk(dev);
	struct scsi_device *sdp = sdkp->device;
	unsigned long max;
	int err;

	if (!capable(CAP_SYS_ADMIN))
		return -EACCES;

500
	if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
501 502 503 504 505 506 507 508 509
		return -EINVAL;

	err = kstrtoul(buf, 10, &max);

	if (err)
		return err;

	if (max == 0)
		sdp->no_write_same = 1;
510 511
	else if (max <= SD_MAX_WS16_BLOCKS) {
		sdp->no_write_same = 0;
512
		sdkp->max_ws_blocks = max;
513
	}
514 515 516 517 518

	sd_config_write_same(sdkp);

	return count;
}
519 520
static DEVICE_ATTR_RW(max_write_same_blocks);

521 522 523 524 525 526 527 528 529 530 531 532 533 534 535
static ssize_t
zoned_cap_show(struct device *dev, struct device_attribute *attr, char *buf)
{
	struct scsi_disk *sdkp = to_scsi_disk(dev);

	if (sdkp->device->type == TYPE_ZBC)
		return sprintf(buf, "host-managed\n");
	if (sdkp->zoned == 1)
		return sprintf(buf, "host-aware\n");
	if (sdkp->zoned == 2)
		return sprintf(buf, "drive-managed\n");
	return sprintf(buf, "none\n");
}
static DEVICE_ATTR_RO(zoned_cap);

536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568
static ssize_t
max_retries_store(struct device *dev, struct device_attribute *attr,
		  const char *buf, size_t count)
{
	struct scsi_disk *sdkp = to_scsi_disk(dev);
	struct scsi_device *sdev = sdkp->device;
	int retries, err;

	err = kstrtoint(buf, 10, &retries);
	if (err)
		return err;

	if (retries == SCSI_CMD_RETRIES_NO_LIMIT || retries <= SD_MAX_RETRIES) {
		sdkp->max_retries = retries;
		return count;
	}

	sdev_printk(KERN_ERR, sdev, "max_retries must be between -1 and %d\n",
		    SD_MAX_RETRIES);
	return -EINVAL;
}

static ssize_t
max_retries_show(struct device *dev, struct device_attribute *attr,
		 char *buf)
{
	struct scsi_disk *sdkp = to_scsi_disk(dev);

	return sprintf(buf, "%d\n", sdkp->max_retries);
}

static DEVICE_ATTR_RW(max_retries);

569 570 571 572 573 574 575 576 577 578
static struct attribute *sd_disk_attrs[] = {
	&dev_attr_cache_type.attr,
	&dev_attr_FUA.attr,
	&dev_attr_allow_restart.attr,
	&dev_attr_manage_start_stop.attr,
	&dev_attr_protection_type.attr,
	&dev_attr_protection_mode.attr,
	&dev_attr_app_tag_own.attr,
	&dev_attr_thin_provisioning.attr,
	&dev_attr_provisioning_mode.attr,
579
	&dev_attr_zeroing_mode.attr,
580 581
	&dev_attr_max_write_same_blocks.attr,
	&dev_attr_max_medium_access_timeouts.attr,
582
	&dev_attr_zoned_cap.attr,
583
	&dev_attr_max_retries.attr,
584
	NULL,
585
};
586
ATTRIBUTE_GROUPS(sd_disk);
587 588 589 590

static struct class sd_disk_class = {
	.name		= "scsi_disk",
	.owner		= THIS_MODULE,
591
	.dev_release	= scsi_disk_release,
592
	.dev_groups	= sd_disk_groups,
593
};
L
Linus Torvalds 已提交
594

595
static const struct dev_pm_ops sd_pm_ops = {
596
	.suspend		= sd_suspend_system,
597
	.resume			= sd_resume_system,
598
	.poweroff		= sd_suspend_system,
599
	.restore		= sd_resume_system,
600
	.runtime_suspend	= sd_suspend_runtime,
601
	.runtime_resume		= sd_resume_runtime,
602 603
};

L
Linus Torvalds 已提交
604 605 606
static struct scsi_driver sd_template = {
	.gendrv = {
		.name		= "sd",
607
		.owner		= THIS_MODULE,
L
Linus Torvalds 已提交
608
		.probe		= sd_probe,
609
		.probe_type	= PROBE_PREFER_ASYNCHRONOUS,
L
Linus Torvalds 已提交
610 611
		.remove		= sd_remove,
		.shutdown	= sd_shutdown,
612
		.pm		= &sd_pm_ops,
L
Linus Torvalds 已提交
613 614
	},
	.rescan			= sd_rescan,
615 616
	.init_command		= sd_init_command,
	.uninit_command		= sd_uninit_command,
617
	.done			= sd_done,
618
	.eh_action		= sd_eh_action,
619
	.eh_reset		= sd_eh_reset,
L
Linus Torvalds 已提交
620 621
};

622
/*
623 624
 * Don't request a new module, as that could deadlock in multipath
 * environment.
625
 */
626
static void sd_default_probe(dev_t devt)
627 628 629
{
}

L
Linus Torvalds 已提交
630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658
/*
 * Device no to disk mapping:
 * 
 *       major         disc2     disc  p1
 *   |............|.............|....|....| <- dev_t
 *    31        20 19          8 7  4 3  0
 * 
 * Inside a major, we have 16k disks, however mapped non-
 * contiguously. The first 16 disks are for major0, the next
 * ones with major1, ... Disk 256 is for major0 again, disk 272 
 * for major1, ... 
 * As we stay compatible with our numbering scheme, we can reuse 
 * the well-know SCSI majors 8, 65--71, 136--143.
 */
static int sd_major(int major_idx)
{
	switch (major_idx) {
	case 0:
		return SCSI_DISK0_MAJOR;
	case 1 ... 7:
		return SCSI_DISK1_MAJOR + major_idx - 1;
	case 8 ... 15:
		return SCSI_DISK8_MAJOR + major_idx - 8;
	default:
		BUG();
		return 0;	/* shut up gcc */
	}
}

659 660 661 662
#ifdef CONFIG_BLK_SED_OPAL
static int sd_sec_submit(void *data, u16 spsp, u8 secp, void *buffer,
		size_t len, bool send)
{
663 664
	struct scsi_disk *sdkp = data;
	struct scsi_device *sdev = sdkp->device;
665
	u8 cdb[12] = { 0, };
666 667 668
	const struct scsi_exec_args exec_args = {
		.req_flags = BLK_MQ_REQ_PM,
	};
669 670 671 672 673 674 675
	int ret;

	cdb[0] = send ? SECURITY_PROTOCOL_OUT : SECURITY_PROTOCOL_IN;
	cdb[1] = secp;
	put_unaligned_be16(spsp, &cdb[2]);
	put_unaligned_be32(len, &cdb[6]);

676 677 678
	ret = scsi_execute_cmd(sdev, cdb, send ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
			       buffer, len, SD_TIMEOUT, sdkp->max_retries,
			       &exec_args);
679 680 681 682
	return ret <= 0 ? ret : -EIO;
}
#endif /* CONFIG_BLK_SED_OPAL */

683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744
/*
 * Look up the DIX operation based on whether the command is read or
 * write and whether dix and dif are enabled.
 */
static unsigned int sd_prot_op(bool write, bool dix, bool dif)
{
	/* Lookup table: bit 2 (write), bit 1 (dix), bit 0 (dif) */
	static const unsigned int ops[] = {	/* wrt dix dif */
		SCSI_PROT_NORMAL,		/*  0	0   0  */
		SCSI_PROT_READ_STRIP,		/*  0	0   1  */
		SCSI_PROT_READ_INSERT,		/*  0	1   0  */
		SCSI_PROT_READ_PASS,		/*  0	1   1  */
		SCSI_PROT_NORMAL,		/*  1	0   0  */
		SCSI_PROT_WRITE_INSERT,		/*  1	0   1  */
		SCSI_PROT_WRITE_STRIP,		/*  1	1   0  */
		SCSI_PROT_WRITE_PASS,		/*  1	1   1  */
	};

	return ops[write << 2 | dix << 1 | dif];
}

/*
 * Returns a mask of the protection flags that are valid for a given DIX
 * operation.
 */
static unsigned int sd_prot_flag_mask(unsigned int prot_op)
{
	static const unsigned int flag_mask[] = {
		[SCSI_PROT_NORMAL]		= 0,

		[SCSI_PROT_READ_STRIP]		= SCSI_PROT_TRANSFER_PI |
						  SCSI_PROT_GUARD_CHECK |
						  SCSI_PROT_REF_CHECK |
						  SCSI_PROT_REF_INCREMENT,

		[SCSI_PROT_READ_INSERT]		= SCSI_PROT_REF_INCREMENT |
						  SCSI_PROT_IP_CHECKSUM,

		[SCSI_PROT_READ_PASS]		= SCSI_PROT_TRANSFER_PI |
						  SCSI_PROT_GUARD_CHECK |
						  SCSI_PROT_REF_CHECK |
						  SCSI_PROT_REF_INCREMENT |
						  SCSI_PROT_IP_CHECKSUM,

		[SCSI_PROT_WRITE_INSERT]	= SCSI_PROT_TRANSFER_PI |
						  SCSI_PROT_REF_INCREMENT,

		[SCSI_PROT_WRITE_STRIP]		= SCSI_PROT_GUARD_CHECK |
						  SCSI_PROT_REF_CHECK |
						  SCSI_PROT_REF_INCREMENT |
						  SCSI_PROT_IP_CHECKSUM,

		[SCSI_PROT_WRITE_PASS]		= SCSI_PROT_TRANSFER_PI |
						  SCSI_PROT_GUARD_CHECK |
						  SCSI_PROT_REF_CHECK |
						  SCSI_PROT_REF_INCREMENT |
						  SCSI_PROT_IP_CHECKSUM,
	};

	return flag_mask[prot_op];
}

745 746
static unsigned char sd_setup_protect_cmnd(struct scsi_cmnd *scmd,
					   unsigned int dix, unsigned int dif)
747
{
748 749 750
	struct request *rq = scsi_cmd_to_rq(scmd);
	struct bio *bio = rq->bio;
	unsigned int prot_op = sd_prot_op(rq_data_dir(rq), dix, dif);
751 752 753 754 755 756 757 758 759 760
	unsigned int protect = 0;

	if (dix) {				/* DIX Type 0, 1, 2, 3 */
		if (bio_integrity_flagged(bio, BIP_IP_CHECKSUM))
			scmd->prot_flags |= SCSI_PROT_IP_CHECKSUM;

		if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false)
			scmd->prot_flags |= SCSI_PROT_GUARD_CHECK;
	}

761
	if (dif != T10_PI_TYPE3_PROTECTION) {	/* DIX/DIF Type 0, 1, 2 */
762 763 764 765 766 767 768 769 770 771 772 773 774
		scmd->prot_flags |= SCSI_PROT_REF_INCREMENT;

		if (bio_integrity_flagged(bio, BIP_CTRL_NOCHECK) == false)
			scmd->prot_flags |= SCSI_PROT_REF_CHECK;
	}

	if (dif) {				/* DIX/DIF Type 1, 2, 3 */
		scmd->prot_flags |= SCSI_PROT_TRANSFER_PI;

		if (bio_integrity_flagged(bio, BIP_DISK_NOCHECK))
			protect = 3 << 5;	/* Disable target PI checking */
		else
			protect = 1 << 5;	/* Enable target PI checking */
775 776 777 778
	}

	scsi_set_prot_op(scmd, prot_op);
	scsi_set_prot_type(scmd, dif);
779 780 781
	scmd->prot_flags &= sd_prot_flag_mask(prot_op);

	return protect;
782 783
}

784 785 786 787 788 789
static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
{
	struct request_queue *q = sdkp->disk->queue;
	unsigned int logical_block_size = sdkp->device->sector_size;
	unsigned int max_blocks = 0;

790 791 792 793 794
	q->limits.discard_alignment =
		sdkp->unmap_alignment * logical_block_size;
	q->limits.discard_granularity =
		max(sdkp->physical_block_size,
		    sdkp->unmap_granularity * logical_block_size);
795 796
	sdkp->provisioning_mode = mode;

797 798
	switch (mode) {

799
	case SD_LBP_FULL:
800
	case SD_LBP_DISABLE:
801
		blk_queue_max_discard_sectors(q, 0);
802 803 804
		return;

	case SD_LBP_UNMAP:
805 806
		max_blocks = min_not_zero(sdkp->max_unmap_blocks,
					  (u32)SD_MAX_WS16_BLOCKS);
807 808 809
		break;

	case SD_LBP_WS16:
810 811 812 813 814 815
		if (sdkp->device->unmap_limit_for_ws)
			max_blocks = sdkp->max_unmap_blocks;
		else
			max_blocks = sdkp->max_ws_blocks;

		max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS16_BLOCKS);
816 817 818
		break;

	case SD_LBP_WS10:
819 820 821 822 823 824
		if (sdkp->device->unmap_limit_for_ws)
			max_blocks = sdkp->max_unmap_blocks;
		else
			max_blocks = sdkp->max_ws_blocks;

		max_blocks = min_not_zero(max_blocks, (u32)SD_MAX_WS10_BLOCKS);
825 826 827
		break;

	case SD_LBP_ZERO:
828 829
		max_blocks = min_not_zero(sdkp->max_ws_blocks,
					  (u32)SD_MAX_WS10_BLOCKS);
830 831 832
		break;
	}

833
	blk_queue_max_discard_sectors(q, max_blocks * (logical_block_size >> 9));
834 835
}

836
static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
837
{
838
	struct scsi_device *sdp = cmd->device;
839
	struct request *rq = scsi_cmd_to_rq(cmd);
840
	struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
841 842
	u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
	u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
843
	unsigned int data_len = 24;
844
	char *buf;
845

846
	rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
847
	if (!rq->special_vec.bv_page)
848
		return BLK_STS_RESOURCE;
849
	clear_highpage(rq->special_vec.bv_page);
850 851 852
	rq->special_vec.bv_offset = 0;
	rq->special_vec.bv_len = data_len;
	rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
853

854 855 856
	cmd->cmd_len = 10;
	cmd->cmnd[0] = UNMAP;
	cmd->cmnd[8] = 24;
857

C
Christoph Hellwig 已提交
858
	buf = bvec_virt(&rq->special_vec);
859 860
	put_unaligned_be16(6 + 16, &buf[0]);
	put_unaligned_be16(16, &buf[2]);
861 862
	put_unaligned_be64(lba, &buf[8]);
	put_unaligned_be32(nr_blocks, &buf[16]);
863

864
	cmd->allowed = sdkp->max_retries;
865 866
	cmd->transfersize = data_len;
	rq->timeout = SD_TIMEOUT;
867

868
	return scsi_alloc_sgtables(cmd);
869
}
870

871 872
static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
		bool unmap)
873 874
{
	struct scsi_device *sdp = cmd->device;
875
	struct request *rq = scsi_cmd_to_rq(cmd);
876
	struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
877 878
	u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
	u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
879
	u32 data_len = sdp->sector_size;
880

881
	rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
882
	if (!rq->special_vec.bv_page)
883
		return BLK_STS_RESOURCE;
884
	clear_highpage(rq->special_vec.bv_page);
885 886 887
	rq->special_vec.bv_offset = 0;
	rq->special_vec.bv_len = data_len;
	rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
888

889 890
	cmd->cmd_len = 16;
	cmd->cmnd[0] = WRITE_SAME_16;
891
	if (unmap)
892
		cmd->cmnd[1] = 0x8; /* UNMAP */
893 894
	put_unaligned_be64(lba, &cmd->cmnd[2]);
	put_unaligned_be32(nr_blocks, &cmd->cmnd[10]);
895

896
	cmd->allowed = sdkp->max_retries;
897
	cmd->transfersize = data_len;
898
	rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT;
899

900
	return scsi_alloc_sgtables(cmd);
901
}
902

903 904
static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd,
		bool unmap)
905 906
{
	struct scsi_device *sdp = cmd->device;
907
	struct request *rq = scsi_cmd_to_rq(cmd);
908
	struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
909 910
	u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
	u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
911
	u32 data_len = sdp->sector_size;
912

913
	rq->special_vec.bv_page = mempool_alloc(sd_page_pool, GFP_ATOMIC);
914
	if (!rq->special_vec.bv_page)
915
		return BLK_STS_RESOURCE;
916
	clear_highpage(rq->special_vec.bv_page);
917 918
	rq->special_vec.bv_offset = 0;
	rq->special_vec.bv_len = data_len;
919
	rq->rq_flags |= RQF_SPECIAL_PAYLOAD;
920

921 922 923 924
	cmd->cmd_len = 10;
	cmd->cmnd[0] = WRITE_SAME;
	if (unmap)
		cmd->cmnd[1] = 0x8; /* UNMAP */
925 926
	put_unaligned_be32(lba, &cmd->cmnd[2]);
	put_unaligned_be16(nr_blocks, &cmd->cmnd[7]);
927

928
	cmd->allowed = sdkp->max_retries;
929
	cmd->transfersize = data_len;
930
	rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT;
931

932
	return scsi_alloc_sgtables(cmd);
933
}
934

935
static blk_status_t sd_setup_write_zeroes_cmnd(struct scsi_cmnd *cmd)
936
{
937
	struct request *rq = scsi_cmd_to_rq(cmd);
938
	struct scsi_device *sdp = cmd->device;
939
	struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
940 941
	u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
	u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
942

943
	if (!(rq->cmd_flags & REQ_NOUNMAP)) {
944 945
		switch (sdkp->zeroing_mode) {
		case SD_ZERO_WS16_UNMAP:
946
			return sd_setup_write_same16_cmnd(cmd, true);
947
		case SD_ZERO_WS10_UNMAP:
948
			return sd_setup_write_same10_cmnd(cmd, true);
949 950
		}
	}
951

952 953
	if (sdp->no_write_same) {
		rq->rq_flags |= RQF_QUIET;
954
		return BLK_STS_TARGET;
955
	}
956

957
	if (sdkp->ws16 || lba > 0xffffffff || nr_blocks > 0xffff)
958
		return sd_setup_write_same16_cmnd(cmd, false);
959

960
	return sd_setup_write_same10_cmnd(cmd, false);
961 962
}

963 964 965 966 967 968 969 970 971 972 973 974 975 976 977
static void sd_config_write_same(struct scsi_disk *sdkp)
{
	struct request_queue *q = sdkp->disk->queue;
	unsigned int logical_block_size = sdkp->device->sector_size;

	if (sdkp->device->no_write_same) {
		sdkp->max_ws_blocks = 0;
		goto out;
	}

	/* Some devices can not handle block counts above 0xffff despite
	 * supporting WRITE SAME(16). Consequently we default to 64k
	 * blocks per I/O unless the device explicitly advertises a
	 * bigger limit.
	 */
978 979 980 981 982 983 984 985 986 987
	if (sdkp->max_ws_blocks > SD_MAX_WS10_BLOCKS)
		sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks,
						   (u32)SD_MAX_WS16_BLOCKS);
	else if (sdkp->ws16 || sdkp->ws10 || sdkp->device->no_report_opcodes)
		sdkp->max_ws_blocks = min_not_zero(sdkp->max_ws_blocks,
						   (u32)SD_MAX_WS10_BLOCKS);
	else {
		sdkp->device->no_write_same = 1;
		sdkp->max_ws_blocks = 0;
	}
988

989 990 991 992 993 994 995 996 997
	if (sdkp->lbprz && sdkp->lbpws)
		sdkp->zeroing_mode = SD_ZERO_WS16_UNMAP;
	else if (sdkp->lbprz && sdkp->lbpws10)
		sdkp->zeroing_mode = SD_ZERO_WS10_UNMAP;
	else if (sdkp->max_ws_blocks)
		sdkp->zeroing_mode = SD_ZERO_WS;
	else
		sdkp->zeroing_mode = SD_ZERO_WRITE;

998 999 1000 1001 1002 1003
	if (sdkp->max_ws_blocks &&
	    sdkp->physical_block_size > logical_block_size) {
		/*
		 * Reporting a maximum number of blocks that is not aligned
		 * on the device physical size would cause a large write same
		 * request to be split into physically unaligned chunks by
1004 1005 1006 1007 1008 1009 1010
		 * __blkdev_issue_write_zeroes() even if the caller of this
		 * functions took care to align the large request. So make sure
		 * the maximum reported is aligned to the device physical block
		 * size. This is only an optional optimization for regular
		 * disks, but this is mandatory to avoid failure of large write
		 * same requests directed at sequential write required zones of
		 * host-managed ZBC disks.
1011 1012 1013 1014 1015 1016 1017
		 */
		sdkp->max_ws_blocks =
			round_down(sdkp->max_ws_blocks,
				   bytes_to_logical(sdkp->device,
						    sdkp->physical_block_size));
	}

1018
out:
1019 1020
	blk_queue_max_write_zeroes_sectors(q, sdkp->max_ws_blocks *
					 (logical_block_size >> 9));
1021 1022
}

1023
static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
1024
{
1025
	struct request *rq = scsi_cmd_to_rq(cmd);
1026
	struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
1027 1028 1029

	/* flush requests don't perform I/O, zero the S/G table */
	memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1030

1031 1032 1033 1034 1035 1036 1037
	if (cmd->device->use_16_for_sync) {
		cmd->cmnd[0] = SYNCHRONIZE_CACHE_16;
		cmd->cmd_len = 16;
	} else {
		cmd->cmnd[0] = SYNCHRONIZE_CACHE;
		cmd->cmd_len = 10;
	}
1038
	cmd->transfersize = 0;
1039
	cmd->allowed = sdkp->max_retries;
1040

1041
	rq->timeout = rq->q->rq_timeout * SD_FLUSH_TIMEOUT_MULTIPLIER;
1042
	return BLK_STS_OK;
1043 1044
}

1045 1046 1047
static blk_status_t sd_setup_rw32_cmnd(struct scsi_cmnd *cmd, bool write,
				       sector_t lba, unsigned int nr_blocks,
				       unsigned char flags)
L
Linus Torvalds 已提交
1048
{
1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094
	cmd->cmd_len = SD_EXT_CDB_SIZE;
	cmd->cmnd[0]  = VARIABLE_LENGTH_CMD;
	cmd->cmnd[7]  = 0x18; /* Additional CDB len */
	cmd->cmnd[9]  = write ? WRITE_32 : READ_32;
	cmd->cmnd[10] = flags;
	put_unaligned_be64(lba, &cmd->cmnd[12]);
	put_unaligned_be32(lba, &cmd->cmnd[20]); /* Expected Indirect LBA */
	put_unaligned_be32(nr_blocks, &cmd->cmnd[28]);

	return BLK_STS_OK;
}

static blk_status_t sd_setup_rw16_cmnd(struct scsi_cmnd *cmd, bool write,
				       sector_t lba, unsigned int nr_blocks,
				       unsigned char flags)
{
	cmd->cmd_len  = 16;
	cmd->cmnd[0]  = write ? WRITE_16 : READ_16;
	cmd->cmnd[1]  = flags;
	cmd->cmnd[14] = 0;
	cmd->cmnd[15] = 0;
	put_unaligned_be64(lba, &cmd->cmnd[2]);
	put_unaligned_be32(nr_blocks, &cmd->cmnd[10]);

	return BLK_STS_OK;
}

static blk_status_t sd_setup_rw10_cmnd(struct scsi_cmnd *cmd, bool write,
				       sector_t lba, unsigned int nr_blocks,
				       unsigned char flags)
{
	cmd->cmd_len = 10;
	cmd->cmnd[0] = write ? WRITE_10 : READ_10;
	cmd->cmnd[1] = flags;
	cmd->cmnd[6] = 0;
	cmd->cmnd[9] = 0;
	put_unaligned_be32(lba, &cmd->cmnd[2]);
	put_unaligned_be16(nr_blocks, &cmd->cmnd[7]);

	return BLK_STS_OK;
}

static blk_status_t sd_setup_rw6_cmnd(struct scsi_cmnd *cmd, bool write,
				      sector_t lba, unsigned int nr_blocks,
				      unsigned char flags)
{
1095 1096 1097 1098
	/* Avoid that 0 blocks gets translated into 256 blocks. */
	if (WARN_ON_ONCE(nr_blocks == 0))
		return BLK_STS_IOERR;

1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119
	if (unlikely(flags & 0x8)) {
		/*
		 * This happens only if this drive failed 10byte rw
		 * command with ILLEGAL_REQUEST during operation and
		 * thus turned off use_10_for_rw.
		 */
		scmd_printk(KERN_ERR, cmd, "FUA write on READ/WRITE(6) drive\n");
		return BLK_STS_IOERR;
	}

	cmd->cmd_len = 6;
	cmd->cmnd[0] = write ? WRITE_6 : READ_6;
	cmd->cmnd[1] = (lba >> 16) & 0x1f;
	cmd->cmnd[2] = (lba >> 8) & 0xff;
	cmd->cmnd[3] = lba & 0xff;
	cmd->cmnd[4] = nr_blocks;
	cmd->cmnd[5] = 0;

	return BLK_STS_OK;
}

1120
static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
L
Linus Torvalds 已提交
1121
{
1122
	struct request *rq = scsi_cmd_to_rq(cmd);
1123
	struct scsi_device *sdp = cmd->device;
1124
	struct scsi_disk *sdkp = scsi_disk(rq->q->disk);
1125
	sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq));
1126
	sector_t threshold;
1127 1128
	unsigned int nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
	unsigned int mask = logical_to_sectors(sdp, 1) - 1;
1129 1130
	bool write = rq_data_dir(rq) == WRITE;
	unsigned char protect, fua;
1131
	blk_status_t ret;
1132 1133
	unsigned int dif;
	bool dix;
1134

1135
	ret = scsi_alloc_sgtables(cmd);
1136
	if (ret != BLK_STS_OK)
1137
		return ret;
1138

1139
	ret = BLK_STS_IOERR;
1140
	if (!scsi_device_online(sdp) || sdp->changed) {
1141
		scmd_printk(KERN_ERR, cmd, "device offline or changed\n");
1142
		goto fail;
L
Linus Torvalds 已提交
1143 1144
	}

1145
	if (blk_rq_pos(rq) + blk_rq_sectors(rq) > get_capacity(rq->q->disk)) {
1146
		scmd_printk(KERN_ERR, cmd, "access beyond end of device\n");
1147
		goto fail;
1148 1149 1150
	}

	if ((blk_rq_pos(rq) & mask) || (blk_rq_sectors(rq) & mask)) {
1151
		scmd_printk(KERN_ERR, cmd, "request not aligned to the logical block size\n");
1152
		goto fail;
L
Linus Torvalds 已提交
1153
	}
1154

1155
	/*
1156 1157
	 * Some SD card readers can't handle accesses which touch the
	 * last one or two logical blocks. Split accesses as needed.
1158
	 */
1159
	threshold = sdkp->capacity - SD_LAST_BUGGY_SECTORS;
1160

1161 1162
	if (unlikely(sdp->last_sector_bug && lba + nr_blocks > threshold)) {
		if (lba < threshold) {
1163
			/* Access up to the threshold but not beyond */
1164
			nr_blocks = threshold - lba;
1165
		} else {
1166 1167
			/* Access only a single logical block */
			nr_blocks = 1;
1168 1169
		}
	}
1170

1171 1172 1173
	if (req_op(rq) == REQ_OP_ZONE_APPEND) {
		ret = sd_zbc_prepare_zone_append(cmd, &lba, nr_blocks);
		if (ret)
1174
			goto fail;
1175 1176
	}

1177
	fua = rq->cmd_flags & REQ_FUA ? 0x8 : 0;
1178 1179
	dix = scsi_prot_sg_count(cmd);
	dif = scsi_host_dif_capable(cmd->device->host, sdkp->protection_type);
L
Linus Torvalds 已提交
1180

1181
	if (dif || dix)
1182
		protect = sd_setup_protect_cmnd(cmd, dix, dif);
1183
	else
1184 1185
		protect = 0;

1186
	if (protect && sdkp->protection_type == T10_PI_TYPE2_PROTECTION) {
1187
		ret = sd_setup_rw32_cmnd(cmd, write, lba, nr_blocks,
1188
					 protect | fua);
1189
	} else if (sdp->use_16_for_rw || (nr_blocks > 0xffff)) {
1190
		ret = sd_setup_rw16_cmnd(cmd, write, lba, nr_blocks,
1191
					 protect | fua);
1192
	} else if ((nr_blocks > 0xff) || (lba > 0x1fffff) ||
1193
		   sdp->use_10_for_rw || protect) {
1194
		ret = sd_setup_rw10_cmnd(cmd, write, lba, nr_blocks,
1195
					 protect | fua);
L
Linus Torvalds 已提交
1196
	} else {
1197
		ret = sd_setup_rw6_cmnd(cmd, write, lba, nr_blocks,
1198
					protect | fua);
L
Linus Torvalds 已提交
1199
	}
1200 1201

	if (unlikely(ret != BLK_STS_OK))
1202
		goto fail;
L
Linus Torvalds 已提交
1203 1204 1205 1206 1207 1208

	/*
	 * We shouldn't disconnect in the middle of a sector, so with a dumb
	 * host adapter, it's safe to assume that we can at least transfer
	 * this many bytes between each connect / disconnect.
	 */
1209 1210
	cmd->transfersize = sdp->sector_size;
	cmd->underflow = nr_blocks << 9;
1211
	cmd->allowed = sdkp->max_retries;
1212
	cmd->sdb.length = nr_blocks * sdp->sector_size;
1213 1214

	SCSI_LOG_HLQUEUE(1,
1215
			 scmd_printk(KERN_INFO, cmd,
1216 1217 1218 1219
				     "%s: block=%llu, count=%d\n", __func__,
				     (unsigned long long)blk_rq_pos(rq),
				     blk_rq_sectors(rq)));
	SCSI_LOG_HLQUEUE(2,
1220
			 scmd_printk(KERN_INFO, cmd,
1221 1222 1223
				     "%s %d/%u 512 byte blocks.\n",
				     write ? "writing" : "reading", nr_blocks,
				     blk_rq_sectors(rq)));
L
Linus Torvalds 已提交
1224 1225

	/*
1226
	 * This indicates that the command is ready from our end to be queued.
L
Linus Torvalds 已提交
1227
	 */
1228
	return BLK_STS_OK;
1229 1230 1231
fail:
	scsi_free_sgtables(cmd);
	return ret;
L
Linus Torvalds 已提交
1232 1233
}

1234
static blk_status_t sd_init_command(struct scsi_cmnd *cmd)
C
Christoph Hellwig 已提交
1235
{
1236
	struct request *rq = scsi_cmd_to_rq(cmd);
C
Christoph Hellwig 已提交
1237

M
Mike Christie 已提交
1238 1239
	switch (req_op(rq)) {
	case REQ_OP_DISCARD:
1240
		switch (scsi_disk(rq->q->disk)->provisioning_mode) {
1241 1242 1243
		case SD_LBP_UNMAP:
			return sd_setup_unmap_cmnd(cmd);
		case SD_LBP_WS16:
1244
			return sd_setup_write_same16_cmnd(cmd, true);
1245 1246 1247 1248 1249
		case SD_LBP_WS10:
			return sd_setup_write_same10_cmnd(cmd, true);
		case SD_LBP_ZERO:
			return sd_setup_write_same10_cmnd(cmd, false);
		default:
1250
			return BLK_STS_TARGET;
1251
		}
1252 1253
	case REQ_OP_WRITE_ZEROES:
		return sd_setup_write_zeroes_cmnd(cmd);
1254
	case REQ_OP_FLUSH:
C
Christoph Hellwig 已提交
1255
		return sd_setup_flush_cmnd(cmd);
M
Mike Christie 已提交
1256 1257
	case REQ_OP_READ:
	case REQ_OP_WRITE:
1258
	case REQ_OP_ZONE_APPEND:
C
Christoph Hellwig 已提交
1259
		return sd_setup_read_write_cmnd(cmd);
1260
	case REQ_OP_ZONE_RESET:
1261 1262
		return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER,
						   false);
1263
	case REQ_OP_ZONE_RESET_ALL:
1264 1265 1266 1267 1268 1269 1270 1271
		return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER,
						   true);
	case REQ_OP_ZONE_OPEN:
		return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_OPEN_ZONE, false);
	case REQ_OP_ZONE_CLOSE:
		return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_CLOSE_ZONE, false);
	case REQ_OP_ZONE_FINISH:
		return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_FINISH_ZONE, false);
M
Mike Christie 已提交
1272
	default:
1273
		WARN_ON_ONCE(1);
1274
		return BLK_STS_NOTSUPP;
M
Mike Christie 已提交
1275
	}
C
Christoph Hellwig 已提交
1276 1277 1278 1279
}

static void sd_uninit_command(struct scsi_cmnd *SCpnt)
{
1280
	struct request *rq = scsi_cmd_to_rq(SCpnt);
C
Christoph Hellwig 已提交
1281

1282
	if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1283
		mempool_free(rq->special_vec.bv_page, sd_page_pool);
C
Christoph Hellwig 已提交
1284 1285
}

1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301
static bool sd_need_revalidate(struct block_device *bdev,
		struct scsi_disk *sdkp)
{
	if (sdkp->device->removable || sdkp->write_prot) {
		if (bdev_check_media_change(bdev))
			return true;
	}

	/*
	 * Force a full rescan after ioctl(BLKRRPART).  While the disk state has
	 * nothing to do with partitions, BLKRRPART is used to force a full
	 * revalidate after things like a format for historical reasons.
	 */
	return test_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state);
}

L
Linus Torvalds 已提交
1302 1303
/**
 *	sd_open - open a scsi disk device
1304 1305
 *	@bdev: Block device of the scsi disk to open
 *	@mode: FMODE_* mask
L
Linus Torvalds 已提交
1306 1307 1308 1309 1310 1311 1312 1313
 *
 *	Returns 0 if successful. Returns a negated errno value in case 
 *	of error.
 *
 *	Note: This can be called from a user context (e.g. fsck(1) )
 *	or from within the kernel (e.g. as a result of a mount(1) ).
 *	In the latter case @inode and @filp carry an abridged amount
 *	of information as noted above.
A
Arnd Bergmann 已提交
1314
 *
1315
 *	Locking: called with bdev->bd_disk->open_mutex held.
L
Linus Torvalds 已提交
1316
 **/
A
Al Viro 已提交
1317
static int sd_open(struct block_device *bdev, fmode_t mode)
L
Linus Torvalds 已提交
1318
{
1319 1320
	struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
	struct scsi_device *sdev = sdkp->device;
L
Linus Torvalds 已提交
1321 1322
	int retval;

1323
	if (scsi_device_get(sdev))
L
Linus Torvalds 已提交
1324 1325
		return -ENXIO;

1326
	SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_open\n"));
L
Linus Torvalds 已提交
1327 1328 1329 1330 1331 1332 1333 1334 1335

	/*
	 * If the device is in error recovery, wait until it is done.
	 * If the device is offline, then disallow any access to it.
	 */
	retval = -ENXIO;
	if (!scsi_block_when_processing_errors(sdev))
		goto error_out;

1336 1337
	if (sd_need_revalidate(bdev, sdkp))
		sd_revalidate_disk(bdev->bd_disk);
L
Linus Torvalds 已提交
1338 1339 1340 1341 1342

	/*
	 * If the drive is empty, just let the open fail.
	 */
	retval = -ENOMEDIUM;
A
Al Viro 已提交
1343
	if (sdev->removable && !sdkp->media_present && !(mode & FMODE_NDELAY))
L
Linus Torvalds 已提交
1344 1345 1346 1347 1348 1349 1350
		goto error_out;

	/*
	 * If the device has the write protect tab set, have the open fail
	 * if the user expects to be able to write to the thing.
	 */
	retval = -EROFS;
A
Al Viro 已提交
1351
	if (sdkp->write_prot && (mode & FMODE_WRITE))
L
Linus Torvalds 已提交
1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363
		goto error_out;

	/*
	 * It is possible that the disk changing stuff resulted in
	 * the device being taken offline.  If this is the case,
	 * report this to the user, and don't pretend that the
	 * open actually succeeded.
	 */
	retval = -ENXIO;
	if (!scsi_device_online(sdev))
		goto error_out;

A
Arnd Bergmann 已提交
1364
	if ((atomic_inc_return(&sdkp->openers) == 1) && sdev->removable) {
L
Linus Torvalds 已提交
1365 1366 1367 1368 1369 1370 1371
		if (scsi_block_when_processing_errors(sdev))
			scsi_set_medium_removal(sdev, SCSI_REMOVAL_PREVENT);
	}

	return 0;

error_out:
1372
	scsi_device_put(sdev);
L
Linus Torvalds 已提交
1373 1374 1375 1376 1377 1378
	return retval;	
}

/**
 *	sd_release - invoked when the (last) close(2) is called on this
 *	scsi disk.
1379 1380
 *	@disk: disk to release
 *	@mode: FMODE_* mask
L
Linus Torvalds 已提交
1381 1382 1383 1384 1385
 *
 *	Returns 0. 
 *
 *	Note: may block (uninterruptible) if error recovery is underway
 *	on this disk.
A
Arnd Bergmann 已提交
1386
 *
1387
 *	Locking: called with bdev->bd_disk->open_mutex held.
L
Linus Torvalds 已提交
1388
 **/
1389
static void sd_release(struct gendisk *disk, fmode_t mode)
L
Linus Torvalds 已提交
1390 1391 1392 1393
{
	struct scsi_disk *sdkp = scsi_disk(disk);
	struct scsi_device *sdev = sdkp->device;

1394
	SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n"));
L
Linus Torvalds 已提交
1395

A
Alan Stern 已提交
1396
	if (atomic_dec_return(&sdkp->openers) == 0 && sdev->removable) {
L
Linus Torvalds 已提交
1397 1398 1399 1400
		if (scsi_block_when_processing_errors(sdev))
			scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
	}

1401
	scsi_device_put(sdev);
L
Linus Torvalds 已提交
1402 1403
}

1404
static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo)
L
Linus Torvalds 已提交
1405 1406 1407 1408
{
	struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
	struct scsi_device *sdp = sdkp->device;
	struct Scsi_Host *host = sdp->host;
1409
	sector_t capacity = logical_to_sectors(sdp, sdkp->capacity);
L
Linus Torvalds 已提交
1410 1411 1412
	int diskinfo[4];

	/* default to most commonly used values */
1413 1414 1415 1416
	diskinfo[0] = 0x40;	/* 1 << 6 */
	diskinfo[1] = 0x20;	/* 1 << 5 */
	diskinfo[2] = capacity >> 11;

L
Linus Torvalds 已提交
1417 1418
	/* override with calculated, extended default, or driver values */
	if (host->hostt->bios_param)
1419
		host->hostt->bios_param(sdp, bdev, capacity, diskinfo);
L
Linus Torvalds 已提交
1420
	else
1421
		scsicam_bios_param(bdev, capacity, diskinfo);
L
Linus Torvalds 已提交
1422

1423 1424 1425
	geo->heads = diskinfo[0];
	geo->sectors = diskinfo[1];
	geo->cylinders = diskinfo[2];
L
Linus Torvalds 已提交
1426 1427 1428 1429
	return 0;
}

/**
1430
 *	sd_ioctl - process an ioctl
1431 1432
 *	@bdev: target block device
 *	@mode: FMODE_* mask
L
Linus Torvalds 已提交
1433
 *	@cmd: ioctl command number
1434
 *	@arg: this is third argument given to ioctl(2) system call.
L
Linus Torvalds 已提交
1435 1436
 *	Often contains a pointer.
 *
L
Lucas De Marchi 已提交
1437
 *	Returns 0 if successful (some ioctls return positive numbers on
L
Linus Torvalds 已提交
1438 1439 1440
 *	success as well). Returns a negated errno value in case of error.
 *
 *	Note: most ioctls are forward onto the block subsystem or further
1441
 *	down in the scsi subsystem.
L
Linus Torvalds 已提交
1442
 **/
1443 1444
static int sd_ioctl(struct block_device *bdev, fmode_t mode,
		    unsigned int cmd, unsigned long arg)
L
Linus Torvalds 已提交
1445 1446
{
	struct gendisk *disk = bdev->bd_disk;
1447 1448
	struct scsi_disk *sdkp = scsi_disk(disk);
	struct scsi_device *sdp = sdkp->device;
1449
	void __user *p = (void __user *)arg;
L
Linus Torvalds 已提交
1450 1451
	int error;
    
1452 1453
	SCSI_LOG_IOCTL(1, sd_printk(KERN_INFO, sdkp, "sd_ioctl: disk=%s, "
				    "cmd=0x%x\n", disk->disk_name, cmd));
L
Linus Torvalds 已提交
1454

1455 1456
	if (bdev_is_partition(bdev) && !capable(CAP_SYS_RAWIO))
		return -ENOIOCTLCMD;
1457

L
Linus Torvalds 已提交
1458 1459 1460 1461 1462 1463
	/*
	 * If we are in the middle of error recovery, don't let anyone
	 * else try and use this device.  Also, if error recovery fails, it
	 * may try and take the device offline, in which case all further
	 * access to the device is prohibited.
	 */
1464 1465 1466
	error = scsi_ioctl_block_when_processing_errors(sdp, cmd,
			(mode & FMODE_NDELAY) != 0);
	if (error)
1467
		return error;
L
Linus Torvalds 已提交
1468

1469 1470
	if (is_sed_ioctl(cmd))
		return sed_ioctl(sdkp->opal_dev, cmd, p);
1471
	return scsi_ioctl(sdp, mode, cmd, p);
L
Linus Torvalds 已提交
1472 1473 1474 1475
}

static void set_media_not_present(struct scsi_disk *sdkp)
{
1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501
	if (sdkp->media_present)
		sdkp->device->changed = 1;

	if (sdkp->device->removable) {
		sdkp->media_present = 0;
		sdkp->capacity = 0;
	}
}

static int media_not_present(struct scsi_disk *sdkp,
			     struct scsi_sense_hdr *sshdr)
{
	if (!scsi_sense_valid(sshdr))
		return 0;

	/* not invoked for commands that could return deferred errors */
	switch (sshdr->sense_key) {
	case UNIT_ATTENTION:
	case NOT_READY:
		/* medium not present */
		if (sshdr->asc == 0x3A) {
			set_media_not_present(sdkp);
			return 1;
		}
	}
	return 0;
L
Linus Torvalds 已提交
1502 1503 1504
}

/**
1505 1506 1507
 *	sd_check_events - check media events
 *	@disk: kernel device descriptor
 *	@clearing: disk events currently being cleared
L
Linus Torvalds 已提交
1508
 *
1509
 *	Returns mask of DISK_EVENT_*.
L
Linus Torvalds 已提交
1510 1511 1512
 *
 *	Note: this function is invoked from the block subsystem.
 **/
1513
static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
L
Linus Torvalds 已提交
1514
{
1515
	struct scsi_disk *sdkp = disk->private_data;
1516
	struct scsi_device *sdp;
L
Linus Torvalds 已提交
1517
	int retval;
1518
	bool disk_changed;
L
Linus Torvalds 已提交
1519

1520 1521 1522 1523
	if (!sdkp)
		return 0;

	sdp = sdkp->device;
1524
	SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n"));
L
Linus Torvalds 已提交
1525 1526 1527 1528 1529 1530 1531

	/*
	 * If the device is offline, don't send any commands - just pretend as
	 * if the command failed.  If the device ever comes back online, we
	 * can deal with it then.  It is only because of unrecoverable errors
	 * that we would ever take a device offline in the first place.
	 */
1532 1533 1534 1535
	if (!scsi_device_online(sdp)) {
		set_media_not_present(sdkp);
		goto out;
	}
L
Linus Torvalds 已提交
1536 1537 1538 1539 1540 1541 1542 1543 1544 1545

	/*
	 * Using TEST_UNIT_READY enables differentiation between drive with
	 * no cartridge loaded - NOT READY, drive with changed cartridge -
	 * UNIT ATTENTION, or with same cartridge - GOOD STATUS.
	 *
	 * Drives that auto spin down. eg iomega jaz 1G, will be started
	 * by sd_spinup_disk() from sd_revalidate_disk(), which happens whenever
	 * sd_revalidate() is called.
	 */
1546
	if (scsi_block_when_processing_errors(sdp)) {
1547 1548
		struct scsi_sense_hdr sshdr = { 0, };

1549
		retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, sdkp->max_retries,
1550
					      &sshdr);
L
Linus Torvalds 已提交
1551

1552
		/* failed to execute TUR, assume media not present */
1553
		if (retval < 0 || host_byte(retval)) {
1554 1555 1556
			set_media_not_present(sdkp);
			goto out;
		}
L
Linus Torvalds 已提交
1557

1558 1559 1560
		if (media_not_present(sdkp, &sshdr))
			goto out;
	}
1561

L
Linus Torvalds 已提交
1562 1563
	/*
	 * For removable scsi disk we have to recognise the presence
1564
	 * of a disk in the drive.
L
Linus Torvalds 已提交
1565
	 */
1566 1567
	if (!sdkp->media_present)
		sdp->changed = 1;
L
Linus Torvalds 已提交
1568
	sdkp->media_present = 1;
1569
out:
1570
	/*
1571
	 * sdp->changed is set under the following conditions:
1572
	 *
1573 1574
	 *	Medium present state has changed in either direction.
	 *	Device has indicated UNIT_ATTENTION.
1575
	 */
1576
	disk_changed = sdp->changed;
1577
	sdp->changed = 0;
1578
	return disk_changed ? DISK_EVENT_MEDIA_CHANGE : 0;
L
Linus Torvalds 已提交
1579 1580
}

1581
static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
L
Linus Torvalds 已提交
1582 1583
{
	int retries, res;
1584
	struct scsi_device *sdp = sdkp->device;
1585 1586
	const int timeout = sdp->request_queue->rq_timeout
		* SD_FLUSH_TIMEOUT_MULTIPLIER;
1587
	struct scsi_sense_hdr my_sshdr;
1588 1589 1590 1591 1592
	const struct scsi_exec_args exec_args = {
		.req_flags = BLK_MQ_REQ_PM,
		/* caller might not be interested in sense, but we need it */
		.sshdr = sshdr ? : &my_sshdr,
	};
L
Linus Torvalds 已提交
1593 1594 1595 1596

	if (!scsi_device_online(sdp))
		return -ENODEV;

1597
	sshdr = exec_args.sshdr;
1598

L
Linus Torvalds 已提交
1599
	for (retries = 3; retries > 0; --retries) {
1600
		unsigned char cmd[16] = { 0 };
L
Linus Torvalds 已提交
1601

1602 1603 1604 1605
		if (sdp->use_16_for_sync)
			cmd[0] = SYNCHRONIZE_CACHE_16;
		else
			cmd[0] = SYNCHRONIZE_CACHE;
L
Linus Torvalds 已提交
1606 1607 1608 1609
		/*
		 * Leave the rest of the command zero to indicate
		 * flush everything.
		 */
1610 1611
		res = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, NULL, 0,
				       timeout, sdkp->max_retries, &exec_args);
1612
		if (res == 0)
L
Linus Torvalds 已提交
1613 1614 1615
			break;
	}

1616
	if (res) {
H
Hannes Reinecke 已提交
1617
		sd_print_result(sdkp, "Synchronize Cache(10) failed", res);
1618

1619 1620 1621
		if (res < 0)
			return res;

H
Hannes Reinecke 已提交
1622 1623
		if (scsi_status_is_check_condition(res) &&
		    scsi_sense_valid(sshdr)) {
1624 1625
			sd_print_sense_hdr(sdkp, sshdr);

H
Hannes Reinecke 已提交
1626 1627 1628 1629
			/* we need to evaluate the error return  */
			if (sshdr->asc == 0x3a ||	/* medium not present */
			    sshdr->asc == 0x20 ||	/* invalid command */
			    (sshdr->asc == 0x74 && sshdr->ascq == 0x71))	/* drive is password locked */
1630 1631
				/* this is no error here */
				return 0;
H
Hannes Reinecke 已提交
1632
		}
1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647

		switch (host_byte(res)) {
		/* ignore errors due to racing a disconnection */
		case DID_BAD_TARGET:
		case DID_NO_CONNECT:
			return 0;
		/* signal the upper layer it might try again */
		case DID_BUS_BUSY:
		case DID_IMM_RETRY:
		case DID_REQUEUE:
		case DID_SOFT_ERROR:
			return -EBUSY;
		default:
			return -EIO;
		}
L
Linus Torvalds 已提交
1648
	}
1649
	return 0;
L
Linus Torvalds 已提交
1650 1651 1652 1653
}

static void sd_rescan(struct device *dev)
{
1654
	struct scsi_disk *sdkp = dev_get_drvdata(dev);
A
Alan Stern 已提交
1655

1656
	sd_revalidate_disk(sdkp->disk);
L
Linus Torvalds 已提交
1657 1658
}

C
Christoph Hellwig 已提交
1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696
static int sd_get_unique_id(struct gendisk *disk, u8 id[16],
		enum blk_unique_id type)
{
	struct scsi_device *sdev = scsi_disk(disk)->device;
	const struct scsi_vpd *vpd;
	const unsigned char *d;
	int ret = -ENXIO, len;

	rcu_read_lock();
	vpd = rcu_dereference(sdev->vpd_pg83);
	if (!vpd)
		goto out_unlock;

	ret = -EINVAL;
	for (d = vpd->data + 4; d < vpd->data + vpd->len; d += d[3] + 4) {
		/* we only care about designators with LU association */
		if (((d[1] >> 4) & 0x3) != 0x00)
			continue;
		if ((d[1] & 0xf) != type)
			continue;

		/*
		 * Only exit early if a 16-byte descriptor was found.  Otherwise
		 * keep looking as one with more entropy might still show up.
		 */
		len = d[3];
		if (len != 8 && len != 12 && len != 16)
			continue;
		ret = len;
		memcpy(id, d + 4, len);
		if (len == 16)
			break;
	}
out_unlock:
	rcu_read_unlock();
	return ret;
}

1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716
static char sd_pr_type(enum pr_type type)
{
	switch (type) {
	case PR_WRITE_EXCLUSIVE:
		return 0x01;
	case PR_EXCLUSIVE_ACCESS:
		return 0x03;
	case PR_WRITE_EXCLUSIVE_REG_ONLY:
		return 0x05;
	case PR_EXCLUSIVE_ACCESS_REG_ONLY:
		return 0x06;
	case PR_WRITE_EXCLUSIVE_ALL_REGS:
		return 0x07;
	case PR_EXCLUSIVE_ACCESS_ALL_REGS:
		return 0x08;
	default:
		return 0;
	}
};

1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746
static int sd_scsi_to_pr_err(struct scsi_sense_hdr *sshdr, int result)
{
	switch (host_byte(result)) {
	case DID_TRANSPORT_MARGINAL:
	case DID_TRANSPORT_DISRUPTED:
	case DID_BUS_BUSY:
		return PR_STS_RETRY_PATH_FAILURE;
	case DID_NO_CONNECT:
		return PR_STS_PATH_FAILED;
	case DID_TRANSPORT_FAILFAST:
		return PR_STS_PATH_FAST_FAILED;
	}

	switch (status_byte(result)) {
	case SAM_STAT_RESERVATION_CONFLICT:
		return PR_STS_RESERVATION_CONFLICT;
	case SAM_STAT_CHECK_CONDITION:
		if (!scsi_sense_valid(sshdr))
			return PR_STS_IOERR;

		if (sshdr->sense_key == ILLEGAL_REQUEST &&
		    (sshdr->asc == 0x26 || sshdr->asc == 0x24))
			return -EINVAL;

		fallthrough;
	default:
		return PR_STS_IOERR;
	}
}

1747 1748 1749
static int sd_pr_command(struct block_device *bdev, u8 sa,
		u64 key, u64 sa_key, u8 type, u8 flags)
{
1750 1751
	struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
	struct scsi_device *sdev = sdkp->device;
1752
	struct scsi_sense_hdr sshdr;
1753 1754 1755
	const struct scsi_exec_args exec_args = {
		.sshdr = &sshdr,
	};
1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768
	int result;
	u8 cmd[16] = { 0, };
	u8 data[24] = { 0, };

	cmd[0] = PERSISTENT_RESERVE_OUT;
	cmd[1] = sa;
	cmd[2] = type;
	put_unaligned_be32(sizeof(data), &cmd[5]);

	put_unaligned_be64(key, &data[0]);
	put_unaligned_be64(sa_key, &data[8]);
	data[20] = flags;

1769 1770 1771
	result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_OUT, &data,
				  sizeof(data), SD_TIMEOUT, sdkp->max_retries,
				  &exec_args);
1772

H
Hannes Reinecke 已提交
1773
	if (scsi_status_is_check_condition(result) &&
1774
	    scsi_sense_valid(&sshdr)) {
1775 1776 1777 1778
		sdev_printk(KERN_INFO, sdev, "PR command failed: %d\n", result);
		scsi_print_sense_hdr(sdev, NULL, &sshdr);
	}

1779 1780 1781 1782
	if (result <= 0)
		return result;

	return sd_scsi_to_pr_err(&sshdr, result);
1783 1784 1785 1786 1787 1788 1789 1790 1791
}

static int sd_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
		u32 flags)
{
	if (flags & ~PR_FL_IGNORE_KEY)
		return -EOPNOTSUPP;
	return sd_pr_command(bdev, (flags & PR_FL_IGNORE_KEY) ? 0x06 : 0x00,
			old_key, new_key, 0,
1792
			(1 << 0) /* APTPL */);
1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827
}

static int sd_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
		u32 flags)
{
	if (flags)
		return -EOPNOTSUPP;
	return sd_pr_command(bdev, 0x01, key, 0, sd_pr_type(type), 0);
}

static int sd_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
{
	return sd_pr_command(bdev, 0x02, key, 0, sd_pr_type(type), 0);
}

static int sd_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
		enum pr_type type, bool abort)
{
	return sd_pr_command(bdev, abort ? 0x05 : 0x04, old_key, new_key,
			     sd_pr_type(type), 0);
}

static int sd_pr_clear(struct block_device *bdev, u64 key)
{
	return sd_pr_command(bdev, 0x03, key, 0, 0, 0);
}

static const struct pr_ops sd_pr_ops = {
	.pr_register	= sd_pr_register,
	.pr_reserve	= sd_pr_reserve,
	.pr_release	= sd_pr_release,
	.pr_preempt	= sd_pr_preempt,
	.pr_clear	= sd_pr_clear,
};

1828 1829 1830 1831 1832 1833 1834
static void scsi_disk_free_disk(struct gendisk *disk)
{
	struct scsi_disk *sdkp = scsi_disk(disk);

	put_device(&sdkp->disk_dev);
}

1835
static const struct block_device_operations sd_fops = {
L
Linus Torvalds 已提交
1836
	.owner			= THIS_MODULE,
A
Al Viro 已提交
1837 1838
	.open			= sd_open,
	.release		= sd_release,
1839
	.ioctl			= sd_ioctl,
1840
	.getgeo			= sd_getgeo,
1841
	.compat_ioctl		= blkdev_compat_ptr_ioctl,
1842
	.check_events		= sd_check_events,
1843
	.unlock_native_capacity	= sd_unlock_native_capacity,
1844
	.report_zones		= sd_zbc_report_zones,
C
Christoph Hellwig 已提交
1845
	.get_unique_id		= sd_get_unique_id,
1846
	.free_disk		= scsi_disk_free_disk,
1847
	.pr_ops			= &sd_pr_ops,
L
Linus Torvalds 已提交
1848 1849
};

1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863
/**
 *	sd_eh_reset - reset error handling callback
 *	@scmd:		sd-issued command that has failed
 *
 *	This function is called by the SCSI midlayer before starting
 *	SCSI EH. When counting medium access failures we have to be
 *	careful to register it only only once per device and SCSI EH run;
 *	there might be several timed out commands which will cause the
 *	'max_medium_access_timeouts' counter to trigger after the first
 *	SCSI EH run already and set the device to offline.
 *	So this function resets the internal counter before starting SCSI EH.
 **/
static void sd_eh_reset(struct scsi_cmnd *scmd)
{
1864
	struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->q->disk);
1865 1866 1867 1868 1869

	/* New SCSI EH run, reset gate variable */
	sdkp->ignore_medium_access_errors = false;
}

1870 1871 1872 1873 1874
/**
 *	sd_eh_action - error handling callback
 *	@scmd:		sd-issued command that has failed
 *	@eh_disp:	The recovery disposition suggested by the midlayer
 *
1875 1876 1877 1878 1879 1880
 *	This function is called by the SCSI midlayer upon completion of an
 *	error test command (currently TEST UNIT READY). The result of sending
 *	the eh command is passed in eh_disp.  We're looking for devices that
 *	fail medium access commands but are OK with non access commands like
 *	test unit ready (so wrongly see the device as having a successful
 *	recovery)
1881
 **/
1882
static int sd_eh_action(struct scsi_cmnd *scmd, int eh_disp)
1883
{
1884
	struct scsi_disk *sdkp = scsi_disk(scsi_cmd_to_rq(scmd)->q->disk);
1885
	struct scsi_device *sdev = scmd->device;
1886

1887
	if (!scsi_device_online(sdev) ||
1888 1889 1890
	    !scsi_medium_access_command(scmd) ||
	    host_byte(scmd->result) != DID_TIME_OUT ||
	    eh_disp != SUCCESS)
1891 1892 1893 1894 1895 1896 1897 1898 1899
		return eh_disp;

	/*
	 * The device has timed out executing a medium access command.
	 * However, the TEST UNIT READY command sent during error
	 * handling completed successfully. Either the device is in the
	 * process of recovering or has it suffered an internal failure
	 * that prevents access to the storage medium.
	 */
1900 1901 1902 1903
	if (!sdkp->ignore_medium_access_errors) {
		sdkp->medium_access_timed_out++;
		sdkp->ignore_medium_access_errors = true;
	}
1904 1905 1906 1907 1908 1909 1910 1911 1912

	/*
	 * If the device keeps failing read/write commands but TEST UNIT
	 * READY always completes successfully we assume that medium
	 * access is no longer possible and take the device offline.
	 */
	if (sdkp->medium_access_timed_out >= sdkp->max_medium_access_timeouts) {
		scmd_printk(KERN_ERR, scmd,
			    "Medium access timeout failure. Offlining disk!\n");
1913 1914 1915
		mutex_lock(&sdev->state_mutex);
		scsi_device_set_state(sdev, SDEV_OFFLINE);
		mutex_unlock(&sdev->state_mutex);
1916

1917
		return SUCCESS;
1918 1919 1920 1921 1922
	}

	return eh_disp;
}

1923 1924
static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
{
1925
	struct request *req = scsi_cmd_to_rq(scmd);
1926 1927 1928 1929
	struct scsi_device *sdev = scmd->device;
	unsigned int transferred, good_bytes;
	u64 start_lba, end_lba, bad_lba;

1930
	/*
1931 1932
	 * Some commands have a payload smaller than the device logical
	 * block size (e.g. INQUIRY on a 4K disk).
1933
	 */
1934
	if (scsi_bufflen(scmd) <= sdev->sector_size)
1935 1936
		return 0;

1937 1938 1939 1940
	/* Check if we have a 'bad_lba' information */
	if (!scsi_get_sense_info_fld(scmd->sense_buffer,
				     SCSI_SENSE_BUFFERSIZE,
				     &bad_lba))
1941 1942
		return 0;

1943 1944
	/*
	 * If the bad lba was reported incorrectly, we have no idea where
1945 1946
	 * the error is.
	 */
1947 1948 1949
	start_lba = sectors_to_logical(sdev, blk_rq_pos(req));
	end_lba = start_lba + bytes_to_logical(sdev, scsi_bufflen(scmd));
	if (bad_lba < start_lba || bad_lba >= end_lba)
1950 1951
		return 0;

1952 1953 1954
	/*
	 * resid is optional but mostly filled in.  When it's unused,
	 * its value is zero, so we assume the whole buffer transferred
1955
	 */
1956 1957 1958 1959
	transferred = scsi_bufflen(scmd) - scsi_get_resid(scmd);

	/* This computation should always be done in terms of the
	 * resolution of the device's medium.
1960
	 */
1961 1962
	good_bytes = logical_to_bytes(sdev, bad_lba - start_lba);

1963
	return min(good_bytes, transferred);
1964 1965
}

L
Linus Torvalds 已提交
1966
/**
1967
 *	sd_done - bottom half handler: called when the lower level
L
Linus Torvalds 已提交
1968 1969 1970 1971 1972
 *	driver has completed (successfully or otherwise) a scsi command.
 *	@SCpnt: mid-level's per command structure.
 *
 *	Note: potentially run from within an ISR. Must not block.
 **/
1973
static int sd_done(struct scsi_cmnd *SCpnt)
L
Linus Torvalds 已提交
1974 1975
{
	int result = SCpnt->result;
1976
	unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt);
1977 1978
	unsigned int sector_size = SCpnt->device->sector_size;
	unsigned int resid;
L
Linus Torvalds 已提交
1979
	struct scsi_sense_hdr sshdr;
1980
	struct request *req = scsi_cmd_to_rq(SCpnt);
1981
	struct scsi_disk *sdkp = scsi_disk(req->q->disk);
L
Linus Torvalds 已提交
1982 1983 1984
	int sense_valid = 0;
	int sense_deferred = 0;

1985 1986
	switch (req_op(req)) {
	case REQ_OP_DISCARD:
1987
	case REQ_OP_WRITE_ZEROES:
1988
	case REQ_OP_ZONE_RESET:
1989
	case REQ_OP_ZONE_RESET_ALL:
1990 1991 1992
	case REQ_OP_ZONE_OPEN:
	case REQ_OP_ZONE_CLOSE:
	case REQ_OP_ZONE_FINISH:
1993 1994 1995 1996 1997 1998
		if (!result) {
			good_bytes = blk_rq_bytes(req);
			scsi_set_resid(SCpnt, 0);
		} else {
			good_bytes = 0;
			scsi_set_resid(SCpnt, blk_rq_bytes(req));
1999 2000
		}
		break;
2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011
	default:
		/*
		 * In case of bogus fw or device, we could end up having
		 * an unaligned partial completion. Check this here and force
		 * alignment.
		 */
		resid = scsi_get_resid(SCpnt);
		if (resid & (sector_size - 1)) {
			sd_printk(KERN_INFO, sdkp,
				"Unaligned partial completion (resid=%u, sector_sz=%u)\n",
				resid, sector_size);
2012
			scsi_print_command(SCpnt);
2013 2014 2015 2016
			resid = min(scsi_bufflen(SCpnt),
				    round_up(resid, sector_size));
			scsi_set_resid(SCpnt, resid);
		}
2017
	}
2018

L
Linus Torvalds 已提交
2019 2020 2021 2022 2023
	if (result) {
		sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr);
		if (sense_valid)
			sense_deferred = scsi_sense_is_deferred(&sshdr);
	}
2024 2025
	sdkp->medium_access_timed_out = 0;

H
Hannes Reinecke 已提交
2026
	if (!scsi_status_is_check_condition(result) &&
2027 2028 2029 2030 2031 2032
	    (!sense_valid || sense_deferred))
		goto out;

	switch (sshdr.sense_key) {
	case HARDWARE_ERROR:
	case MEDIUM_ERROR:
2033
		good_bytes = sd_completed_bytes(SCpnt);
2034 2035
		break;
	case RECOVERED_ERROR:
2036 2037
		good_bytes = scsi_bufflen(SCpnt);
		break;
2038 2039 2040 2041 2042 2043 2044 2045
	case NO_SENSE:
		/* This indicates a false check condition, so ignore it.  An
		 * unknown amount of data was transferred so treat it as an
		 * error.
		 */
		SCpnt->result = 0;
		memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
		break;
2046 2047 2048 2049 2050
	case ABORTED_COMMAND:
		if (sshdr.asc == 0x10)  /* DIF: Target detected corruption */
			good_bytes = sd_completed_bytes(SCpnt);
		break;
	case ILLEGAL_REQUEST:
2051 2052
		switch (sshdr.asc) {
		case 0x10:	/* DIX: Host detected corruption */
2053
			good_bytes = sd_completed_bytes(SCpnt);
2054 2055 2056 2057
			break;
		case 0x20:	/* INVALID COMMAND OPCODE */
		case 0x24:	/* INVALID FIELD IN CDB */
			switch (SCpnt->cmnd[0]) {
2058 2059 2060 2061 2062
			case UNMAP:
				sd_config_discard(sdkp, SD_LBP_DISABLE);
				break;
			case WRITE_SAME_16:
			case WRITE_SAME:
2063
				if (SCpnt->cmnd[1] & 8) { /* UNMAP */
2064
					sd_config_discard(sdkp, SD_LBP_DISABLE);
2065
				} else {
2066 2067
					sdkp->device->no_write_same = 1;
					sd_config_write_same(sdkp);
2068
					req->rq_flags |= RQF_QUIET;
2069
				}
2070
				break;
2071 2072
			}
		}
2073 2074 2075
		break;
	default:
		break;
L
Linus Torvalds 已提交
2076
	}
2077

2078
 out:
2079
	if (sd_is_zoned(sdkp))
2080
		good_bytes = sd_zbc_complete(SCpnt, good_bytes, &sshdr);
2081

H
Hannes Reinecke 已提交
2082 2083 2084 2085
	SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, SCpnt,
					   "sd_done: completed %d of %d bytes\n",
					   good_bytes, scsi_bufflen(SCpnt)));

2086
	return good_bytes;
L
Linus Torvalds 已提交
2087 2088 2089 2090 2091 2092
}

/*
 * spinup disk - called only in sd_revalidate_disk()
 */
static void
2093
sd_spinup_disk(struct scsi_disk *sdkp)
2094
{
L
Linus Torvalds 已提交
2095
	unsigned char cmd[10];
2096
	unsigned long spintime_expire = 0;
L
Linus Torvalds 已提交
2097 2098 2099
	int retries, spintime;
	unsigned int the_result;
	struct scsi_sense_hdr sshdr;
2100 2101 2102
	const struct scsi_exec_args exec_args = {
		.sshdr = &sshdr,
	};
L
Linus Torvalds 已提交
2103 2104 2105 2106 2107 2108 2109 2110 2111 2112
	int sense_valid = 0;

	spintime = 0;

	/* Spin up drives, as required.  Only do this at boot time */
	/* Spinup needs to be done for module loads too. */
	do {
		retries = 0;

		do {
2113 2114
			bool media_was_present = sdkp->media_present;

L
Linus Torvalds 已提交
2115 2116 2117
			cmd[0] = TEST_UNIT_READY;
			memset((void *) &cmd[1], 0, 9);

2118 2119 2120 2121 2122
			the_result = scsi_execute_cmd(sdkp->device, cmd,
						      REQ_OP_DRV_IN, NULL, 0,
						      SD_TIMEOUT,
						      sdkp->max_retries,
						      &exec_args);
L
Linus Torvalds 已提交
2123

A
Alan Stern 已提交
2124 2125 2126 2127 2128
			/*
			 * If the drive has indicated to us that it
			 * doesn't have any media in it, don't bother
			 * with any more polling.
			 */
2129
			if (media_not_present(sdkp, &sshdr)) {
2130 2131
				if (media_was_present)
					sd_printk(KERN_NOTICE, sdkp, "Media removed, stopped polling\n");
A
Alan Stern 已提交
2132
				return;
2133
			}
A
Alan Stern 已提交
2134

L
Linus Torvalds 已提交
2135
			if (the_result)
2136
				sense_valid = scsi_sense_valid(&sshdr);
L
Linus Torvalds 已提交
2137
			retries++;
H
Hannes Reinecke 已提交
2138
		} while (retries < 3 &&
L
Linus Torvalds 已提交
2139
			 (!scsi_status_is_good(the_result) ||
H
Hannes Reinecke 已提交
2140
			  (scsi_status_is_check_condition(the_result) &&
L
Linus Torvalds 已提交
2141 2142
			  sense_valid && sshdr.sense_key == UNIT_ATTENTION)));

H
Hannes Reinecke 已提交
2143
		if (!scsi_status_is_check_condition(the_result)) {
L
Linus Torvalds 已提交
2144 2145
			/* no sense, TUR either succeeded or failed
			 * with a status error */
2146
			if(!spintime && !scsi_status_is_good(the_result)) {
H
Hannes Reinecke 已提交
2147 2148
				sd_print_result(sdkp, "Test Unit Ready failed",
						the_result);
2149
			}
L
Linus Torvalds 已提交
2150 2151
			break;
		}
H
Hannes Reinecke 已提交
2152

L
Linus Torvalds 已提交
2153 2154 2155
		/*
		 * The device does not want the automatic start to be issued.
		 */
2156
		if (sdkp->device->no_start_on_add)
L
Linus Torvalds 已提交
2157 2158
			break;

2159 2160 2161 2162 2163 2164 2165
		if (sense_valid && sshdr.sense_key == NOT_READY) {
			if (sshdr.asc == 4 && sshdr.ascq == 3)
				break;	/* manual intervention required */
			if (sshdr.asc == 4 && sshdr.ascq == 0xb)
				break;	/* standby */
			if (sshdr.asc == 4 && sshdr.ascq == 0xc)
				break;	/* unavailable */
2166 2167
			if (sshdr.asc == 4 && sshdr.ascq == 0x1b)
				break;	/* sanitize in progress */
2168 2169 2170
			/*
			 * Issue command to spin up drive when not ready
			 */
L
Linus Torvalds 已提交
2171
			if (!spintime) {
2172
				sd_printk(KERN_NOTICE, sdkp, "Spinning up disk...");
L
Linus Torvalds 已提交
2173 2174 2175 2176
				cmd[0] = START_STOP;
				cmd[1] = 1;	/* Return immediately */
				memset((void *) &cmd[2], 0, 8);
				cmd[4] = 1;	/* Start spin cycle */
2177 2178
				if (sdkp->device->start_stop_pwr_cond)
					cmd[4] |= 1 << 4;
2179 2180
				scsi_execute_cmd(sdkp->device, cmd,
						 REQ_OP_DRV_IN, NULL, 0,
2181
						 SD_TIMEOUT, sdkp->max_retries,
2182
						 &exec_args);
2183 2184
				spintime_expire = jiffies + 100 * HZ;
				spintime = 1;
L
Linus Torvalds 已提交
2185 2186 2187
			}
			/* Wait 1 second for next try */
			msleep(1000);
2188
			printk(KERN_CONT ".");
2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203

		/*
		 * Wait for USB flash devices with slow firmware.
		 * Yes, this sense key/ASC combination shouldn't
		 * occur here.  It's characteristic of these devices.
		 */
		} else if (sense_valid &&
				sshdr.sense_key == UNIT_ATTENTION &&
				sshdr.asc == 0x28) {
			if (!spintime) {
				spintime_expire = jiffies + 5 * HZ;
				spintime = 1;
			}
			/* Wait 1 second for next try */
			msleep(1000);
L
Linus Torvalds 已提交
2204 2205 2206 2207
		} else {
			/* we don't understand the sense code, so it's
			 * probably pointless to loop */
			if(!spintime) {
2208 2209
				sd_printk(KERN_NOTICE, sdkp, "Unit Not Ready\n");
				sd_print_sense_hdr(sdkp, &sshdr);
L
Linus Torvalds 已提交
2210 2211 2212 2213
			}
			break;
		}
				
2214
	} while (spintime && time_before_eq(jiffies, spintime_expire));
L
Linus Torvalds 已提交
2215 2216 2217

	if (spintime) {
		if (scsi_status_is_good(the_result))
2218
			printk(KERN_CONT "ready\n");
L
Linus Torvalds 已提交
2219
		else
2220
			printk(KERN_CONT "not responding...\n");
L
Linus Torvalds 已提交
2221 2222 2223
	}
}

2224 2225 2226
/*
 * Determine whether disk supports Data Integrity Field.
 */
2227
static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer)
2228 2229 2230 2231
{
	struct scsi_device *sdp = sdkp->device;
	u8 type;

2232 2233
	if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) {
		sdkp->protection_type = 0;
2234
		return 0;
2235
	}
2236 2237 2238

	type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */

2239 2240 2241 2242 2243 2244 2245
	if (type > T10_PI_TYPE3_PROTECTION) {
		sd_printk(KERN_ERR, sdkp, "formatted with unsupported"	\
			  " protection type %u. Disabling disk!\n",
			  type);
		sdkp->protection_type = 0;
		return -ENODEV;
	}
2246

2247 2248
	sdkp->protection_type = type;

2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272
	return 0;
}

static void sd_config_protection(struct scsi_disk *sdkp)
{
	struct scsi_device *sdp = sdkp->device;

	if (!sdkp->first_scan)
		return;

	sd_dif_config_host(sdkp);

	if (!sdkp->protection_type)
		return;

	if (!scsi_host_dif_capable(sdp->host, sdkp->protection_type)) {
		sd_printk(KERN_NOTICE, sdkp,
			  "Disabling DIF Type %u protection\n",
			  sdkp->protection_type);
		sdkp->protection_type = 0;
	}

	sd_printk(KERN_NOTICE, sdkp, "Enabling DIF Type %u protection\n",
		  sdkp->protection_type);
2273 2274
}

2275 2276 2277 2278
static void read_capacity_error(struct scsi_disk *sdkp, struct scsi_device *sdp,
			struct scsi_sense_hdr *sshdr, int sense_valid,
			int the_result)
{
H
Hannes Reinecke 已提交
2279
	if (sense_valid)
2280 2281 2282 2283 2284 2285 2286 2287 2288 2289
		sd_print_sense_hdr(sdkp, sshdr);
	else
		sd_printk(KERN_NOTICE, sdkp, "Sense not available.\n");

	/*
	 * Set dirty bit for removable devices if not ready -
	 * sometimes drives will not report this properly.
	 */
	if (sdp->removable &&
	    sense_valid && sshdr->sense_key == NOT_READY)
2290
		set_media_not_present(sdkp);
2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304

	/*
	 * We used to set media_present to 0 here to indicate no media
	 * in the drive, but some drives fail read capacity even with
	 * media present, so we can't do that.
	 */
	sdkp->capacity = 0; /* unknown mapped to zero - as usual */
}

#define RC16_LEN 32
#if RC16_LEN > SD_BUF_SIZE
#error RC16_LEN must not be more than SD_BUF_SIZE
#endif

2305 2306
#define READ_CAPACITY_RETRIES_ON_RESET	10

2307 2308
static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
						unsigned char *buffer)
2309
{
L
Linus Torvalds 已提交
2310 2311
	unsigned char cmd[16];
	struct scsi_sense_hdr sshdr;
2312 2313 2314
	const struct scsi_exec_args exec_args = {
		.sshdr = &sshdr,
	};
L
Linus Torvalds 已提交
2315
	int sense_valid = 0;
2316
	int the_result;
2317
	int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
2318
	unsigned int alignment;
2319 2320
	unsigned long long lba;
	unsigned sector_size;
L
Linus Torvalds 已提交
2321

2322 2323 2324
	if (sdp->no_read_capacity_16)
		return -EINVAL;

L
Linus Torvalds 已提交
2325
	do {
2326
		memset(cmd, 0, 16);
2327
		cmd[0] = SERVICE_ACTION_IN_16;
2328 2329 2330 2331
		cmd[1] = SAI_READ_CAPACITY_16;
		cmd[13] = RC16_LEN;
		memset(buffer, 0, RC16_LEN);

2332 2333 2334
		the_result = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN,
					      buffer, RC16_LEN, SD_TIMEOUT,
					      sdkp->max_retries, &exec_args);
L
Linus Torvalds 已提交
2335

2336
		if (media_not_present(sdkp, &sshdr))
2337
			return -ENODEV;
L
Linus Torvalds 已提交
2338

2339
		if (the_result > 0) {
2340
			sense_valid = scsi_sense_valid(&sshdr);
2341 2342 2343 2344 2345 2346 2347 2348
			if (sense_valid &&
			    sshdr.sense_key == ILLEGAL_REQUEST &&
			    (sshdr.asc == 0x20 || sshdr.asc == 0x24) &&
			    sshdr.ascq == 0x00)
				/* Invalid Command Operation Code or
				 * Invalid Field in CDB, just retry
				 * silently with RC10 */
				return -EINVAL;
2349 2350 2351 2352 2353 2354 2355
			if (sense_valid &&
			    sshdr.sense_key == UNIT_ATTENTION &&
			    sshdr.asc == 0x29 && sshdr.ascq == 0x00)
				/* Device reset might occur several times,
				 * give it one more chance */
				if (--reset_retries > 0)
					continue;
2356
		}
L
Linus Torvalds 已提交
2357 2358 2359 2360
		retries--;

	} while (the_result && retries);

2361
	if (the_result) {
H
Hannes Reinecke 已提交
2362
		sd_print_result(sdkp, "Read Capacity(16) failed", the_result);
2363 2364 2365
		read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result);
		return -EINVAL;
	}
2366

2367 2368
	sector_size = get_unaligned_be32(&buffer[8]);
	lba = get_unaligned_be64(&buffer[0]);
2369

2370 2371 2372 2373
	if (sd_read_protection_type(sdkp, buffer) < 0) {
		sdkp->capacity = 0;
		return -ENODEV;
	}
2374

2375
	/* Logical blocks per physical block exponent */
2376
	sdkp->physical_block_size = (1 << (buffer[13] & 0xf)) * sector_size;
2377

2378 2379 2380
	/* RC basis */
	sdkp->rc_basis = (buffer[12] >> 4) & 0x3;

2381 2382 2383 2384 2385 2386 2387
	/* Lowest aligned logical block */
	alignment = ((buffer[14] & 0x3f) << 8 | buffer[15]) * sector_size;
	blk_queue_alignment_offset(sdp->request_queue, alignment);
	if (alignment && sdkp->first_scan)
		sd_printk(KERN_NOTICE, sdkp,
			  "physical block alignment offset: %u\n", alignment);

2388 2389
	if (buffer[14] & 0x80) { /* LBPME */
		sdkp->lbpme = 1;
2390

2391 2392
		if (buffer[14] & 0x40) /* LBPRZ */
			sdkp->lbprz = 1;
2393

2394
		sd_config_discard(sdkp, SD_LBP_WS16);
2395 2396
	}

2397 2398 2399 2400 2401 2402 2403 2404 2405
	sdkp->capacity = lba + 1;
	return sector_size;
}

static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
						unsigned char *buffer)
{
	unsigned char cmd[16];
	struct scsi_sense_hdr sshdr;
2406 2407 2408
	const struct scsi_exec_args exec_args = {
		.sshdr = &sshdr,
	};
2409 2410
	int sense_valid = 0;
	int the_result;
2411
	int retries = 3, reset_retries = READ_CAPACITY_RETRIES_ON_RESET;
2412 2413 2414 2415 2416 2417 2418 2419
	sector_t lba;
	unsigned sector_size;

	do {
		cmd[0] = READ_CAPACITY;
		memset(&cmd[1], 0, 9);
		memset(buffer, 0, 8);

2420 2421 2422
		the_result = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, buffer,
					      8, SD_TIMEOUT, sdkp->max_retries,
					      &exec_args);
2423 2424 2425 2426

		if (media_not_present(sdkp, &sshdr))
			return -ENODEV;

2427
		if (the_result > 0) {
2428
			sense_valid = scsi_sense_valid(&sshdr);
2429 2430 2431 2432 2433 2434 2435 2436
			if (sense_valid &&
			    sshdr.sense_key == UNIT_ATTENTION &&
			    sshdr.asc == 0x29 && sshdr.ascq == 0x00)
				/* Device reset might occur several times,
				 * give it one more chance */
				if (--reset_retries > 0)
					continue;
		}
2437 2438 2439 2440 2441
		retries--;

	} while (the_result && retries);

	if (the_result) {
H
Hannes Reinecke 已提交
2442
		sd_print_result(sdkp, "Read Capacity(10) failed", the_result);
2443 2444 2445 2446
		read_capacity_error(sdkp, sdp, &sshdr, sense_valid, the_result);
		return -EINVAL;
	}

2447 2448
	sector_size = get_unaligned_be32(&buffer[4]);
	lba = get_unaligned_be32(&buffer[0]);
2449

2450 2451 2452 2453 2454
	if (sdp->no_read_capacity_16 && (lba == 0xffffffff)) {
		/* Some buggy (usb cardreader) devices return an lba of
		   0xffffffff when the want to report a size of 0 (with
		   which they really mean no media is present) */
		sdkp->capacity = 0;
2455
		sdkp->physical_block_size = sector_size;
2456 2457 2458
		return sector_size;
	}

2459
	sdkp->capacity = lba + 1;
2460
	sdkp->physical_block_size = sector_size;
2461 2462 2463
	return sector_size;
}

2464 2465
static int sd_try_rc16_first(struct scsi_device *sdp)
{
2466 2467
	if (sdp->host->max_cmd_len < 16)
		return 0;
2468 2469
	if (sdp->try_rc_10_first)
		return 0;
2470 2471 2472 2473 2474 2475 2476
	if (sdp->scsi_level > SCSI_SPC_2)
		return 1;
	if (scsi_device_protection(sdp))
		return 1;
	return 0;
}

2477 2478 2479 2480 2481 2482 2483 2484 2485
/*
 * read disk capacity
 */
static void
sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer)
{
	int sector_size;
	struct scsi_device *sdp = sdkp->device;

2486
	if (sd_try_rc16_first(sdp)) {
2487 2488
		sector_size = read_capacity_16(sdkp, sdp, buffer);
		if (sector_size == -EOVERFLOW)
L
Linus Torvalds 已提交
2489
			goto got_data;
2490 2491 2492 2493
		if (sector_size == -ENODEV)
			return;
		if (sector_size < 0)
			sector_size = read_capacity_10(sdkp, sdp, buffer);
2494 2495
		if (sector_size < 0)
			return;
L
Linus Torvalds 已提交
2496
	} else {
2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514
		sector_size = read_capacity_10(sdkp, sdp, buffer);
		if (sector_size == -EOVERFLOW)
			goto got_data;
		if (sector_size < 0)
			return;
		if ((sizeof(sdkp->capacity) > 4) &&
		    (sdkp->capacity > 0xffffffffULL)) {
			int old_sector_size = sector_size;
			sd_printk(KERN_NOTICE, sdkp, "Very big device. "
					"Trying to use READ CAPACITY(16).\n");
			sector_size = read_capacity_16(sdkp, sdp, buffer);
			if (sector_size < 0) {
				sd_printk(KERN_NOTICE, sdkp,
					"Using 0xffffffff as device size\n");
				sdkp->capacity = 1 + (sector_t) 0xffffffff;
				sector_size = old_sector_size;
				goto got_data;
			}
2515 2516
			/* Remember that READ CAPACITY(16) succeeded */
			sdp->try_rc_10_first = 0;
2517 2518
		}
	}
L
Linus Torvalds 已提交
2519

2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534
	/* Some devices are known to return the total number of blocks,
	 * not the highest block number.  Some devices have versions
	 * which do this and others which do not.  Some devices we might
	 * suspect of doing this but we don't know for certain.
	 *
	 * If we know the reported capacity is wrong, decrement it.  If
	 * we can only guess, then assume the number of blocks is even
	 * (usually true but not always) and err on the side of lowering
	 * the capacity.
	 */
	if (sdp->fix_capacity ||
	    (sdp->guess_capacity && (sdkp->capacity & 0x01))) {
		sd_printk(KERN_INFO, sdkp, "Adjusting the sector count "
				"from its reported value: %llu\n",
				(unsigned long long) sdkp->capacity);
L
Linus Torvalds 已提交
2535
		--sdkp->capacity;
2536 2537
	}

L
Linus Torvalds 已提交
2538 2539 2540
got_data:
	if (sector_size == 0) {
		sector_size = 512;
2541 2542
		sd_printk(KERN_NOTICE, sdkp, "Sector size 0 reported, "
			  "assuming 512.\n");
L
Linus Torvalds 已提交
2543 2544 2545 2546 2547
	}

	if (sector_size != 512 &&
	    sector_size != 1024 &&
	    sector_size != 2048 &&
2548
	    sector_size != 4096) {
2549 2550
		sd_printk(KERN_NOTICE, sdkp, "Unsupported sector size %d.\n",
			  sector_size);
L
Linus Torvalds 已提交
2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565
		/*
		 * The user might want to re-format the drive with
		 * a supported sectorsize.  Once this happens, it
		 * would be relatively trivial to set the thing up.
		 * For this reason, we leave the thing in the table.
		 */
		sdkp->capacity = 0;
		/*
		 * set a bogus sector size so the normal read/write
		 * logic in the block layer will eventually refuse any
		 * request on this device without tripping over power
		 * of two sector size assumptions
		 */
		sector_size = 512;
	}
2566
	blk_queue_logical_block_size(sdp->request_queue, sector_size);
2567 2568 2569
	blk_queue_physical_block_size(sdp->request_queue,
				      sdkp->physical_block_size);
	sdkp->device->sector_size = sector_size;
2570

2571 2572
	if (sdkp->capacity > 0xffffffff)
		sdp->use_16_for_rw = 1;
L
Linus Torvalds 已提交
2573

2574
}
L
Linus Torvalds 已提交
2575

2576 2577 2578 2579 2580 2581 2582 2583 2584
/*
 * Print disk capacity
 */
static void
sd_print_capacity(struct scsi_disk *sdkp,
		  sector_t old_capacity)
{
	int sector_size = sdkp->device->sector_size;
	char cap_str_2[10], cap_str_10[10];
2585

2586 2587 2588
	if (!sdkp->first_scan && old_capacity == sdkp->capacity)
		return;

2589 2590 2591
	string_get_size(sdkp->capacity, sector_size,
			STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
	string_get_size(sdkp->capacity, sector_size,
2592
			STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
L
Linus Torvalds 已提交
2593

2594 2595 2596 2597
	sd_printk(KERN_NOTICE, sdkp,
		  "%llu %d-byte logical blocks: (%s/%s)\n",
		  (unsigned long long)sdkp->capacity,
		  sector_size, cap_str_10, cap_str_2);
2598

2599 2600 2601 2602
	if (sdkp->physical_block_size != sector_size)
		sd_printk(KERN_NOTICE, sdkp,
			  "%u-byte physical blocks\n",
			  sdkp->physical_block_size);
L
Linus Torvalds 已提交
2603 2604 2605 2606
}

/* called with buffer of length 512 */
static inline int
2607
sd_do_mode_sense(struct scsi_disk *sdkp, int dbd, int modepage,
2608 2609
		 unsigned char *buffer, int len, struct scsi_mode_data *data,
		 struct scsi_sense_hdr *sshdr)
L
Linus Torvalds 已提交
2610
{
2611 2612 2613 2614 2615 2616 2617
	/*
	 * If we must use MODE SENSE(10), make sure that the buffer length
	 * is at least 8 bytes so that the mode sense header fits.
	 */
	if (sdkp->device->use_10_for_ms && len < 8)
		len = 8;

2618 2619
	return scsi_mode_sense(sdkp->device, dbd, modepage, buffer, len,
			       SD_TIMEOUT, sdkp->max_retries, data,
2620
			       sshdr);
L
Linus Torvalds 已提交
2621 2622 2623 2624
}

/*
 * read write protect setting, if possible - called only in sd_revalidate_disk()
2625
 * called with buffer of length SD_BUF_SIZE
L
Linus Torvalds 已提交
2626 2627
 */
static void
2628
sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
2629
{
L
Linus Torvalds 已提交
2630
	int res;
2631
	struct scsi_device *sdp = sdkp->device;
L
Linus Torvalds 已提交
2632
	struct scsi_mode_data data;
2633
	int old_wp = sdkp->write_prot;
L
Linus Torvalds 已提交
2634 2635

	set_disk_ro(sdkp->disk, 0);
2636
	if (sdp->skip_ms_page_3f) {
2637
		sd_first_printk(KERN_NOTICE, sdkp, "Assuming Write Enabled\n");
L
Linus Torvalds 已提交
2638 2639 2640
		return;
	}

2641
	if (sdp->use_192_bytes_for_3f) {
2642
		res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 192, &data, NULL);
L
Linus Torvalds 已提交
2643 2644 2645 2646 2647 2648
	} else {
		/*
		 * First attempt: ask for all pages (0x3F), but only 4 bytes.
		 * We have to start carefully: some devices hang if we ask
		 * for more than is available.
		 */
2649
		res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 4, &data, NULL);
L
Linus Torvalds 已提交
2650 2651 2652 2653 2654 2655 2656

		/*
		 * Second attempt: ask for page 0 When only page 0 is
		 * implemented, a request for page 3F may return Sense Key
		 * 5: Illegal Request, Sense Code 24: Invalid field in
		 * CDB.
		 */
2657
		if (res < 0)
2658
			res = sd_do_mode_sense(sdkp, 0, 0, buffer, 4, &data, NULL);
L
Linus Torvalds 已提交
2659 2660 2661 2662

		/*
		 * Third attempt: ask 255 bytes, as we did earlier.
		 */
2663
		if (res < 0)
2664
			res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 255,
2665
					       &data, NULL);
L
Linus Torvalds 已提交
2666 2667
	}

2668
	if (res < 0) {
2669
		sd_first_printk(KERN_WARNING, sdkp,
2670
			  "Test WP failed, assume Write Enabled\n");
L
Linus Torvalds 已提交
2671 2672
	} else {
		sdkp->write_prot = ((data.device_specific & 0x80) != 0);
2673
		set_disk_ro(sdkp->disk, sdkp->write_prot);
2674 2675 2676
		if (sdkp->first_scan || old_wp != sdkp->write_prot) {
			sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n",
				  sdkp->write_prot ? "on" : "off");
2677
			sd_printk(KERN_DEBUG, sdkp, "Mode Sense: %4ph\n", buffer);
2678
		}
L
Linus Torvalds 已提交
2679 2680 2681 2682 2683
	}
}

/*
 * sd_read_cache_type - called only from sd_revalidate_disk()
2684
 * called with buffer of length SD_BUF_SIZE
L
Linus Torvalds 已提交
2685 2686
 */
static void
2687
sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
2688
{
L
Linus Torvalds 已提交
2689
	int len = 0, res;
2690
	struct scsi_device *sdp = sdkp->device;
L
Linus Torvalds 已提交
2691

2692 2693
	int dbd;
	int modepage;
2694
	int first_len;
L
Linus Torvalds 已提交
2695 2696
	struct scsi_mode_data data;
	struct scsi_sense_hdr sshdr;
2697 2698 2699
	int old_wce = sdkp->WCE;
	int old_rcd = sdkp->RCD;
	int old_dpofua = sdkp->DPOFUA;
L
Linus Torvalds 已提交
2700

2701 2702 2703 2704

	if (sdkp->cache_override)
		return;

2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717
	first_len = 4;
	if (sdp->skip_ms_page_8) {
		if (sdp->type == TYPE_RBC)
			goto defaults;
		else {
			if (sdp->skip_ms_page_3f)
				goto defaults;
			modepage = 0x3F;
			if (sdp->use_192_bytes_for_3f)
				first_len = 192;
			dbd = 0;
		}
	} else if (sdp->type == TYPE_RBC) {
2718 2719 2720 2721 2722 2723 2724
		modepage = 6;
		dbd = 8;
	} else {
		modepage = 8;
		dbd = 0;
	}

L
Linus Torvalds 已提交
2725
	/* cautiously ask */
2726
	res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, first_len,
2727
			&data, &sshdr);
L
Linus Torvalds 已提交
2728

2729
	if (res < 0)
L
Linus Torvalds 已提交
2730 2731
		goto bad_sense;

2732 2733
	if (!data.header_length) {
		modepage = 6;
2734
		first_len = 0;
2735 2736
		sd_first_printk(KERN_ERR, sdkp,
				"Missing header in MODE_SENSE response\n");
2737 2738
	}

L
Linus Torvalds 已提交
2739 2740 2741 2742 2743 2744 2745 2746 2747
	/* that went OK, now ask for the proper length */
	len = data.length;

	/*
	 * We're only interested in the first three bytes, actually.
	 * But the data cache page is defined for the first 20.
	 */
	if (len < 3)
		goto bad_sense;
2748
	else if (len > SD_BUF_SIZE) {
2749
		sd_first_printk(KERN_NOTICE, sdkp, "Truncating mode parameter "
2750 2751 2752 2753 2754
			  "data from %d to %d bytes\n", len, SD_BUF_SIZE);
		len = SD_BUF_SIZE;
	}
	if (modepage == 0x3F && sdp->use_192_bytes_for_3f)
		len = 192;
L
Linus Torvalds 已提交
2755 2756

	/* Get the data */
2757
	if (len > first_len)
2758
		res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, len,
2759
				&data, &sshdr);
L
Linus Torvalds 已提交
2760

2761
	if (!res) {
2762
		int offset = data.header_length + data.block_descriptor_length;
L
Linus Torvalds 已提交
2763

2764 2765 2766 2767 2768 2769 2770 2771
		while (offset < len) {
			u8 page_code = buffer[offset] & 0x3F;
			u8 spf       = buffer[offset] & 0x40;

			if (page_code == 8 || page_code == 6) {
				/* We're interested only in the first 3 bytes.
				 */
				if (len - offset <= 2) {
2772 2773 2774
					sd_first_printk(KERN_ERR, sdkp,
						"Incomplete mode parameter "
							"data\n");
2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787
					goto defaults;
				} else {
					modepage = page_code;
					goto Page_found;
				}
			} else {
				/* Go to the next page */
				if (spf && len - offset > 3)
					offset += 4 + (buffer[offset+2] << 8) +
						buffer[offset+3];
				else if (!spf && len - offset > 1)
					offset += 2 + buffer[offset+1];
				else {
2788 2789 2790
					sd_first_printk(KERN_ERR, sdkp,
							"Incomplete mode "
							"parameter data\n");
2791 2792 2793
					goto defaults;
				}
			}
2794 2795
		}

2796 2797
		sd_first_printk(KERN_WARNING, sdkp,
				"No Caching mode page found\n");
2798 2799
		goto defaults;

2800
	Page_found:
2801 2802 2803 2804 2805 2806 2807
		if (modepage == 8) {
			sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0);
			sdkp->RCD = ((buffer[offset + 2] & 0x01) != 0);
		} else {
			sdkp->WCE = ((buffer[offset + 2] & 0x01) == 0);
			sdkp->RCD = 0;
		}
L
Linus Torvalds 已提交
2808

T
Tejun Heo 已提交
2809
		sdkp->DPOFUA = (data.device_specific & 0x10) != 0;
2810 2811 2812
		if (sdp->broken_fua) {
			sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n");
			sdkp->DPOFUA = 0;
2813 2814
		} else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw &&
			   !sdkp->device->use_16_for_rw) {
2815
			sd_first_printk(KERN_NOTICE, sdkp,
2816
				  "Uses READ/WRITE(6), disabling FUA\n");
T
Tejun Heo 已提交
2817 2818 2819
			sdkp->DPOFUA = 0;
		}

2820 2821 2822 2823
		/* No cache flush allowed for write protected devices */
		if (sdkp->WCE && sdkp->write_prot)
			sdkp->WCE = 0;

2824 2825 2826 2827 2828 2829 2830 2831
		if (sdkp->first_scan || old_wce != sdkp->WCE ||
		    old_rcd != sdkp->RCD || old_dpofua != sdkp->DPOFUA)
			sd_printk(KERN_NOTICE, sdkp,
				  "Write cache: %s, read cache: %s, %s\n",
				  sdkp->WCE ? "enabled" : "disabled",
				  sdkp->RCD ? "disabled" : "enabled",
				  sdkp->DPOFUA ? "supports DPO and FUA"
				  : "doesn't support DPO or FUA");
L
Linus Torvalds 已提交
2832 2833 2834 2835 2836

		return;
	}

bad_sense:
2837
	if (scsi_sense_valid(&sshdr) &&
L
Linus Torvalds 已提交
2838 2839
	    sshdr.sense_key == ILLEGAL_REQUEST &&
	    sshdr.asc == 0x24 && sshdr.ascq == 0x0)
2840
		/* Invalid field in CDB */
2841
		sd_first_printk(KERN_NOTICE, sdkp, "Cache data unavailable\n");
L
Linus Torvalds 已提交
2842
	else
2843 2844
		sd_first_printk(KERN_ERR, sdkp,
				"Asking for cache data failed\n");
L
Linus Torvalds 已提交
2845 2846

defaults:
2847
	if (sdp->wce_default_on) {
2848 2849
		sd_first_printk(KERN_NOTICE, sdkp,
				"Assuming drive cache: write back\n");
2850 2851
		sdkp->WCE = 1;
	} else {
2852
		sd_first_printk(KERN_WARNING, sdkp,
2853
				"Assuming drive cache: write through\n");
2854 2855
		sdkp->WCE = 0;
	}
L
Linus Torvalds 已提交
2856
	sdkp->RCD = 0;
2857
	sdkp->DPOFUA = 0;
L
Linus Torvalds 已提交
2858 2859
}

2860 2861 2862 2863
/*
 * The ATO bit indicates whether the DIF application tag is available
 * for use by the operating system.
 */
2864
static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
2865 2866 2867 2868 2869 2870
{
	int res, offset;
	struct scsi_device *sdp = sdkp->device;
	struct scsi_mode_data data;
	struct scsi_sense_hdr sshdr;

2871
	if (sdp->type != TYPE_DISK && sdp->type != TYPE_ZBC)
2872 2873 2874 2875 2876 2877
		return;

	if (sdkp->protection_type == 0)
		return;

	res = scsi_mode_sense(sdp, 1, 0x0a, buffer, 36, SD_TIMEOUT,
2878
			      sdkp->max_retries, &data, &sshdr);
2879

2880
	if (res < 0 || !data.header_length ||
2881
	    data.length < 6) {
2882
		sd_first_printk(KERN_WARNING, sdkp,
2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893
			  "getting Control mode page failed, assume no ATO\n");

		if (scsi_sense_valid(&sshdr))
			sd_print_sense_hdr(sdkp, &sshdr);

		return;
	}

	offset = data.header_length + data.block_descriptor_length;

	if ((buffer[offset] & 0x3f) != 0x0a) {
2894
		sd_first_printk(KERN_ERR, sdkp, "ATO Got wrong page\n");
2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905
		return;
	}

	if ((buffer[offset + 5] & 0x80) == 0)
		return;

	sdkp->ATO = 1;

	return;
}

2906 2907
/**
 * sd_read_block_limits - Query disk device for preferred I/O sizes.
2908
 * @sdkp: disk to query
2909 2910 2911
 */
static void sd_read_block_limits(struct scsi_disk *sdkp)
{
2912
	struct scsi_vpd *vpd;
2913

2914
	rcu_read_lock();
2915

2916 2917
	vpd = rcu_dereference(sdkp->device->vpd_pgb0);
	if (!vpd || vpd->len < 16)
2918
		goto out;
2919

2920
	sdkp->min_xfer_blocks = get_unaligned_be16(&vpd->data[6]);
2921 2922
	sdkp->max_xfer_blocks = get_unaligned_be32(&vpd->data[8]);
	sdkp->opt_xfer_blocks = get_unaligned_be32(&vpd->data[12]);
2923

2924
	if (vpd->len >= 64) {
2925
		unsigned int lba_count, desc_count;
2926

2927
		sdkp->max_ws_blocks = (u32)get_unaligned_be64(&vpd->data[36]);
2928

2929
		if (!sdkp->lbpme)
2930 2931
			goto out;

2932 2933
		lba_count = get_unaligned_be32(&vpd->data[20]);
		desc_count = get_unaligned_be32(&vpd->data[24]);
2934

2935 2936
		if (lba_count && desc_count)
			sdkp->max_unmap_blocks = lba_count;
2937

2938
		sdkp->unmap_granularity = get_unaligned_be32(&vpd->data[28]);
2939

2940
		if (vpd->data[32] & 0x80)
2941
			sdkp->unmap_alignment =
2942
				get_unaligned_be32(&vpd->data[32]) & ~(1 << 31);
2943 2944 2945 2946 2947 2948 2949 2950 2951

		if (!sdkp->lbpvpd) { /* LBP VPD page not provided */

			if (sdkp->max_unmap_blocks)
				sd_config_discard(sdkp, SD_LBP_UNMAP);
			else
				sd_config_discard(sdkp, SD_LBP_WS16);

		} else {	/* LBP VPD page tells us what to use */
2952
			if (sdkp->lbpu && sdkp->max_unmap_blocks)
2953 2954
				sd_config_discard(sdkp, SD_LBP_UNMAP);
			else if (sdkp->lbpws)
2955 2956 2957 2958 2959 2960
				sd_config_discard(sdkp, SD_LBP_WS16);
			else if (sdkp->lbpws10)
				sd_config_discard(sdkp, SD_LBP_WS10);
			else
				sd_config_discard(sdkp, SD_LBP_DISABLE);
		}
2961 2962
	}

2963
 out:
2964
	rcu_read_unlock();
2965 2966
}

2967 2968
/**
 * sd_read_block_characteristics - Query block dev. characteristics
2969
 * @sdkp: disk to query
2970 2971 2972
 */
static void sd_read_block_characteristics(struct scsi_disk *sdkp)
{
2973
	struct request_queue *q = sdkp->disk->queue;
2974
	struct scsi_vpd *vpd;
2975
	u16 rot;
2976
	u8 zoned;
2977

2978 2979
	rcu_read_lock();
	vpd = rcu_dereference(sdkp->device->vpd_pgb1);
2980

2981 2982 2983 2984
	if (!vpd || vpd->len < 8) {
		rcu_read_unlock();
	        return;
	}
2985

2986 2987 2988
	rot = get_unaligned_be16(&vpd->data[4]);
	zoned = (vpd->data[8] >> 4) & 3;
	rcu_read_unlock();
2989

2990
	if (rot == 1) {
2991 2992
		blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
		blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q);
2993
	}
2994

2995 2996
	if (sdkp->device->type == TYPE_ZBC) {
		/* Host-managed */
2997
		disk_set_zoned(sdkp->disk, BLK_ZONED_HM);
2998
	} else {
2999
		sdkp->zoned = zoned;
3000
		if (sdkp->zoned == 1) {
3001
			/* Host-aware */
3002
			disk_set_zoned(sdkp->disk, BLK_ZONED_HA);
3003
		} else {
3004
			/* Regular disk or drive managed disk */
3005
			disk_set_zoned(sdkp->disk, BLK_ZONED_NONE);
3006
		}
3007
	}
3008 3009

	if (!sdkp->first_scan)
3010
		return;
3011 3012

	if (blk_queue_is_zoned(q)) {
3013 3014
		sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n",
		      q->limits.zoned == BLK_ZONED_HM ? "managed" : "aware");
3015 3016 3017 3018 3019 3020 3021 3022
	} else {
		if (sdkp->zoned == 1)
			sd_printk(KERN_NOTICE, sdkp,
				  "Host-aware SMR disk used as regular disk\n");
		else if (sdkp->zoned == 2)
			sd_printk(KERN_NOTICE, sdkp,
				  "Drive-managed SMR disk\n");
	}
3023 3024
}

3025
/**
3026
 * sd_read_block_provisioning - Query provisioning VPD page
3027
 * @sdkp: disk to query
3028
 */
3029
static void sd_read_block_provisioning(struct scsi_disk *sdkp)
3030
{
3031
	struct scsi_vpd *vpd;
3032

3033
	if (sdkp->lbpme == 0)
3034 3035
		return;

3036 3037
	rcu_read_lock();
	vpd = rcu_dereference(sdkp->device->vpd_pgb2);
3038

3039 3040 3041 3042
	if (!vpd || vpd->len < 8) {
		rcu_read_unlock();
		return;
	}
3043

3044
	sdkp->lbpvpd	= 1;
3045 3046 3047 3048
	sdkp->lbpu	= (vpd->data[5] >> 7) & 1; /* UNMAP */
	sdkp->lbpws	= (vpd->data[5] >> 6) & 1; /* WRITE SAME(16) w/ UNMAP */
	sdkp->lbpws10	= (vpd->data[5] >> 5) & 1; /* WRITE SAME(10) w/ UNMAP */
	rcu_read_unlock();
3049 3050
}

3051 3052
static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
{
3053 3054
	struct scsi_device *sdev = sdkp->device;

3055 3056 3057 3058 3059 3060
	if (sdev->host->no_write_same) {
		sdev->no_write_same = 1;

		return;
	}

3061
	if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, INQUIRY) < 0) {
3062
		struct scsi_vpd *vpd;
3063

3064 3065 3066 3067 3068 3069
		sdev->no_report_opcodes = 1;

		/* Disable WRITE SAME if REPORT SUPPORTED OPERATION
		 * CODES is unsupported and the device has an ATA
		 * Information VPD page (SAT).
		 */
3070 3071 3072
		rcu_read_lock();
		vpd = rcu_dereference(sdev->vpd_pg89);
		if (vpd)
3073
			sdev->no_write_same = 1;
3074
		rcu_read_unlock();
3075 3076 3077
	}

	if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME_16) == 1)
3078
		sdkp->ws16 = 1;
3079 3080 3081

	if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE, WRITE_SAME) == 1)
		sdkp->ws10 = 1;
3082 3083
}

3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097
static void sd_read_security(struct scsi_disk *sdkp, unsigned char *buffer)
{
	struct scsi_device *sdev = sdkp->device;

	if (!sdev->security_supported)
		return;

	if (scsi_report_opcode(sdev, buffer, SD_BUF_SIZE,
			SECURITY_PROTOCOL_IN) == 1 &&
	    scsi_report_opcode(sdev, buffer, SD_BUF_SIZE,
			SECURITY_PROTOCOL_OUT) == 1)
		sdkp->security = 1;
}

3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134
static inline sector_t sd64_to_sectors(struct scsi_disk *sdkp, u8 *buf)
{
	return logical_to_sectors(sdkp->device, get_unaligned_be64(buf));
}

/**
 * sd_read_cpr - Query concurrent positioning ranges
 * @sdkp:	disk to query
 */
static void sd_read_cpr(struct scsi_disk *sdkp)
{
	struct blk_independent_access_ranges *iars = NULL;
	unsigned char *buffer = NULL;
	unsigned int nr_cpr = 0;
	int i, vpd_len, buf_len = SD_BUF_SIZE;
	u8 *desc;

	/*
	 * We need to have the capacity set first for the block layer to be
	 * able to check the ranges.
	 */
	if (sdkp->first_scan)
		return;

	if (!sdkp->capacity)
		goto out;

	/*
	 * Concurrent Positioning Ranges VPD: there can be at most 256 ranges,
	 * leading to a maximum page size of 64 + 256*32 bytes.
	 */
	buf_len = 64 + 256*32;
	buffer = kmalloc(buf_len, GFP_KERNEL);
	if (!buffer || scsi_get_vpd_page(sdkp->device, 0xb9, buffer, buf_len))
		goto out;

	/* We must have at least a 64B header and one 32B range descriptor */
3135
	vpd_len = get_unaligned_be16(&buffer[2]) + 4;
3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177
	if (vpd_len > buf_len || vpd_len < 64 + 32 || (vpd_len & 31)) {
		sd_printk(KERN_ERR, sdkp,
			  "Invalid Concurrent Positioning Ranges VPD page\n");
		goto out;
	}

	nr_cpr = (vpd_len - 64) / 32;
	if (nr_cpr == 1) {
		nr_cpr = 0;
		goto out;
	}

	iars = disk_alloc_independent_access_ranges(sdkp->disk, nr_cpr);
	if (!iars) {
		nr_cpr = 0;
		goto out;
	}

	desc = &buffer[64];
	for (i = 0; i < nr_cpr; i++, desc += 32) {
		if (desc[0] != i) {
			sd_printk(KERN_ERR, sdkp,
				"Invalid Concurrent Positioning Range number\n");
			nr_cpr = 0;
			break;
		}

		iars->ia_range[i].sector = sd64_to_sectors(sdkp, desc + 8);
		iars->ia_range[i].nr_sectors = sd64_to_sectors(sdkp, desc + 16);
	}

out:
	disk_set_independent_access_ranges(sdkp->disk, iars);
	if (nr_cpr && sdkp->nr_actuators != nr_cpr) {
		sd_printk(KERN_NOTICE, sdkp,
			  "%u concurrent positioning ranges\n", nr_cpr);
		sdkp->nr_actuators = nr_cpr;
	}

	kfree(buffer);
}

3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200
static bool sd_validate_min_xfer_size(struct scsi_disk *sdkp)
{
	struct scsi_device *sdp = sdkp->device;
	unsigned int min_xfer_bytes =
		logical_to_bytes(sdp, sdkp->min_xfer_blocks);

	if (sdkp->min_xfer_blocks == 0)
		return false;

	if (min_xfer_bytes & (sdkp->physical_block_size - 1)) {
		sd_first_printk(KERN_WARNING, sdkp,
				"Preferred minimum I/O size %u bytes not a " \
				"multiple of physical block size (%u bytes)\n",
				min_xfer_bytes, sdkp->physical_block_size);
		sdkp->min_xfer_blocks = 0;
		return false;
	}

	sd_first_printk(KERN_INFO, sdkp, "Preferred minimum I/O size %u bytes\n",
			min_xfer_bytes);
	return true;
}

3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211
/*
 * Determine the device's preferred I/O size for reads and writes
 * unless the reported value is unreasonably small, large, not a
 * multiple of the physical block size, or simply garbage.
 */
static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
				      unsigned int dev_max)
{
	struct scsi_device *sdp = sdkp->device;
	unsigned int opt_xfer_bytes =
		logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
3212 3213
	unsigned int min_xfer_bytes =
		logical_to_bytes(sdp, sdkp->min_xfer_blocks);
3214

3215 3216 3217
	if (sdkp->opt_xfer_blocks == 0)
		return false;

3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241
	if (sdkp->opt_xfer_blocks > dev_max) {
		sd_first_printk(KERN_WARNING, sdkp,
				"Optimal transfer size %u logical blocks " \
				"> dev_max (%u logical blocks)\n",
				sdkp->opt_xfer_blocks, dev_max);
		return false;
	}

	if (sdkp->opt_xfer_blocks > SD_DEF_XFER_BLOCKS) {
		sd_first_printk(KERN_WARNING, sdkp,
				"Optimal transfer size %u logical blocks " \
				"> sd driver limit (%u logical blocks)\n",
				sdkp->opt_xfer_blocks, SD_DEF_XFER_BLOCKS);
		return false;
	}

	if (opt_xfer_bytes < PAGE_SIZE) {
		sd_first_printk(KERN_WARNING, sdkp,
				"Optimal transfer size %u bytes < " \
				"PAGE_SIZE (%u bytes)\n",
				opt_xfer_bytes, (unsigned int)PAGE_SIZE);
		return false;
	}

3242 3243 3244 3245 3246 3247 3248 3249 3250
	if (min_xfer_bytes && opt_xfer_bytes % min_xfer_bytes) {
		sd_first_printk(KERN_WARNING, sdkp,
				"Optimal transfer size %u bytes not a " \
				"multiple of preferred minimum block " \
				"size (%u bytes)\n",
				opt_xfer_bytes, min_xfer_bytes);
		return false;
	}

3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263
	if (opt_xfer_bytes & (sdkp->physical_block_size - 1)) {
		sd_first_printk(KERN_WARNING, sdkp,
				"Optimal transfer size %u bytes not a " \
				"multiple of physical block size (%u bytes)\n",
				opt_xfer_bytes, sdkp->physical_block_size);
		return false;
	}

	sd_first_printk(KERN_INFO, sdkp, "Optimal transfer size %u bytes\n",
			opt_xfer_bytes);
	return true;
}

L
Linus Torvalds 已提交
3264 3265 3266 3267 3268 3269 3270 3271 3272
/**
 *	sd_revalidate_disk - called the first time a new disk is seen,
 *	performs disk spin up, read_capacity, etc.
 *	@disk: struct gendisk we care about
 **/
static int sd_revalidate_disk(struct gendisk *disk)
{
	struct scsi_disk *sdkp = scsi_disk(disk);
	struct scsi_device *sdp = sdkp->device;
3273
	struct request_queue *q = sdkp->disk->queue;
3274
	sector_t old_capacity = sdkp->capacity;
L
Linus Torvalds 已提交
3275
	unsigned char *buffer;
3276
	unsigned int dev_max, rw_max;
L
Linus Torvalds 已提交
3277

3278 3279
	SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp,
				      "sd_revalidate_disk\n"));
L
Linus Torvalds 已提交
3280 3281 3282 3283 3284 3285 3286 3287

	/*
	 * If the device is offline, don't try and read capacity or any
	 * of the other niceties.
	 */
	if (!scsi_device_online(sdp))
		goto out;

B
Bernhard Walle 已提交
3288
	buffer = kmalloc(SD_BUF_SIZE, GFP_KERNEL);
L
Linus Torvalds 已提交
3289
	if (!buffer) {
3290 3291
		sd_printk(KERN_WARNING, sdkp, "sd_revalidate_disk: Memory "
			  "allocation failure.\n");
3292
		goto out;
L
Linus Torvalds 已提交
3293 3294
	}

3295
	sd_spinup_disk(sdkp);
L
Linus Torvalds 已提交
3296 3297 3298 3299 3300 3301

	/*
	 * Without media there is no reason to ask; moreover, some devices
	 * react badly if we do.
	 */
	if (sdkp->media_present) {
3302
		sd_read_capacity(sdkp, buffer);
3303

3304 3305 3306 3307 3308 3309 3310 3311 3312
		/*
		 * set the default to rotational.  All non-rotational devices
		 * support the block characteristics VPD page, which will
		 * cause this to be updated correctly and any device which
		 * doesn't support it should be treated as rotational.
		 */
		blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
		blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q);

3313
		if (scsi_device_supports_vpd(sdp)) {
3314
			sd_read_block_provisioning(sdkp);
3315 3316
			sd_read_block_limits(sdkp);
			sd_read_block_characteristics(sdkp);
3317
			sd_zbc_read_zones(sdkp, buffer);
3318
			sd_read_cpr(sdkp);
3319 3320
		}

3321 3322
		sd_print_capacity(sdkp, old_capacity);

3323 3324
		sd_read_write_protect_flag(sdkp, buffer);
		sd_read_cache_type(sdkp, buffer);
3325
		sd_read_app_tag_own(sdkp, buffer);
3326
		sd_read_write_same(sdkp, buffer);
3327
		sd_read_security(sdkp, buffer);
3328
		sd_config_protection(sdkp);
L
Linus Torvalds 已提交
3329
	}
3330 3331 3332

	/*
	 * We now have all cache related info, determine how we deal
3333
	 * with flush requests.
3334
	 */
3335
	sd_set_flush_flag(sdkp);
3336

3337 3338 3339 3340 3341 3342 3343
	/* Initial block count limit based on CDB TRANSFER LENGTH field size. */
	dev_max = sdp->use_16_for_rw ? SD_MAX_XFER_BLOCKS : SD_DEF_XFER_BLOCKS;

	/* Some devices report a maximum block count for READ/WRITE requests. */
	dev_max = min_not_zero(dev_max, sdkp->max_xfer_blocks);
	q->limits.max_dev_sectors = logical_to_sectors(sdp, dev_max);

3344 3345 3346 3347 3348 3349
	if (sd_validate_min_xfer_size(sdkp))
		blk_queue_io_min(sdkp->disk->queue,
				 logical_to_bytes(sdp, sdkp->min_xfer_blocks));
	else
		blk_queue_io_min(sdkp->disk->queue, 0);

3350
	if (sd_validate_opt_xfer_size(sdkp, dev_max)) {
3351 3352
		q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
		rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
3353 3354
	} else {
		q->limits.io_opt = 0;
3355 3356
		rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
				      (sector_t)BLK_DEF_MAX_SECTORS);
3357
	}
3358

3359 3360 3361 3362 3363
	/*
	 * Limit default to SCSI host optimal sector limit if set. There may be
	 * an impact on performance for when the size of a request exceeds this
	 * host limit.
	 */
3364 3365
	rw_max = min_not_zero(rw_max, sdp->host->opt_sectors);

3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378
	/* Do not exceed controller limit */
	rw_max = min(rw_max, queue_max_hw_sectors(q));

	/*
	 * Only update max_sectors if previously unset or if the current value
	 * exceeds the capabilities of the hardware.
	 */
	if (sdkp->first_scan ||
	    q->limits.max_sectors > q->limits.max_dev_sectors ||
	    q->limits.max_sectors > q->limits.max_hw_sectors)
		q->limits.max_sectors = rw_max;

	sdkp->first_scan = 0;
3379

3380
	set_capacity_and_notify(disk, logical_to_sectors(sdp, sdkp->capacity));
3381
	sd_config_write_same(sdkp);
L
Linus Torvalds 已提交
3382 3383
	kfree(buffer);

3384 3385 3386 3387 3388 3389
	/*
	 * For a zoned drive, revalidating the zones can be done only once
	 * the gendisk capacity is set. So if this fails, set back the gendisk
	 * capacity to 0.
	 */
	if (sd_zbc_revalidate_zones(sdkp))
3390
		set_capacity_and_notify(disk, 0);
3391

L
Linus Torvalds 已提交
3392 3393 3394 3395
 out:
	return 0;
}

3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415
/**
 *	sd_unlock_native_capacity - unlock native capacity
 *	@disk: struct gendisk to set capacity for
 *
 *	Block layer calls this function if it detects that partitions
 *	on @disk reach beyond the end of the device.  If the SCSI host
 *	implements ->unlock_native_capacity() method, it's invoked to
 *	give it a chance to adjust the device capacity.
 *
 *	CONTEXT:
 *	Defined by block layer.  Might sleep.
 */
static void sd_unlock_native_capacity(struct gendisk *disk)
{
	struct scsi_device *sdev = scsi_disk(disk)->device;

	if (sdev->host->hostt->unlock_native_capacity)
		sdev->host->hostt->unlock_native_capacity(sdev);
}

3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427
/**
 *	sd_format_disk_name - format disk name
 *	@prefix: name prefix - ie. "sd" for SCSI disks
 *	@index: index of the disk to format name for
 *	@buf: output buffer
 *	@buflen: length of the output buffer
 *
 *	SCSI disk names starts at sda.  The 26th device is sdz and the
 *	27th is sdaa.  The last one for two lettered suffix is sdzz
 *	which is followed by sdaaa.
 *
 *	This is basically 26 base counting with one extra 'nil' entry
D
Daniel Mack 已提交
3428
 *	at the beginning from the second digit on and can be
3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461
 *	determined using similar method as 26 base conversion with the
 *	index shifted -1 after each digit is computed.
 *
 *	CONTEXT:
 *	Don't care.
 *
 *	RETURNS:
 *	0 on success, -errno on failure.
 */
static int sd_format_disk_name(char *prefix, int index, char *buf, int buflen)
{
	const int base = 'z' - 'a' + 1;
	char *begin = buf + strlen(prefix);
	char *end = buf + buflen;
	char *p;
	int unit;

	p = end - 1;
	*p = '\0';
	unit = base;
	do {
		if (p == begin)
			return -EINVAL;
		*--p = 'a' + (index % unit);
		index = (index / unit) - 1;
	} while (index >= 0);

	memmove(begin, p, end - p);
	memcpy(buf, prefix, strlen(prefix));

	return 0;
}

L
Linus Torvalds 已提交
3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476
/**
 *	sd_probe - called during driver initialization and whenever a
 *	new scsi device is attached to the system. It is called once
 *	for each scsi device (not just disks) present.
 *	@dev: pointer to device object
 *
 *	Returns 0 if successful (or not interested in this scsi device 
 *	(e.g. scanner)); 1 when there is an error.
 *
 *	Note: this function is invoked from the scsi mid-level.
 *	This function sets up the mapping between a given 
 *	<host,channel,id,lun> (found in sdp) and new device name 
 *	(e.g. /dev/sda). More precisely it is the block device major 
 *	and minor number that is chosen here.
 *
3477 3478
 *	Assume sd_probe is not re-entrant (for time being)
 *	Also think about sd_probe() and sd_remove() running coincidentally.
L
Linus Torvalds 已提交
3479 3480 3481 3482 3483 3484
 **/
static int sd_probe(struct device *dev)
{
	struct scsi_device *sdp = to_scsi_device(dev);
	struct scsi_disk *sdkp;
	struct gendisk *gd;
3485
	int index;
L
Linus Torvalds 已提交
3486 3487
	int error;

3488
	scsi_autopm_get_device(sdp);
L
Linus Torvalds 已提交
3489
	error = -ENODEV;
3490 3491 3492 3493
	if (sdp->type != TYPE_DISK &&
	    sdp->type != TYPE_ZBC &&
	    sdp->type != TYPE_MOD &&
	    sdp->type != TYPE_RBC)
L
Linus Torvalds 已提交
3494 3495
		goto out;

3496 3497 3498
	if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) && sdp->type == TYPE_ZBC) {
		sdev_printk(KERN_WARNING, sdp,
			    "Unsupported ZBC host-managed device.\n");
3499
		goto out;
3500 3501
	}

3502
	SCSI_LOG_HLQUEUE(3, sdev_printk(KERN_INFO, sdp,
3503
					"sd_probe\n"));
L
Linus Torvalds 已提交
3504 3505

	error = -ENOMEM;
J
Jes Sorensen 已提交
3506
	sdkp = kzalloc(sizeof(*sdkp), GFP_KERNEL);
L
Linus Torvalds 已提交
3507 3508 3509
	if (!sdkp)
		goto out;

3510 3511
	gd = blk_mq_alloc_disk_for_queue(sdp->request_queue,
					 &sd_bio_compl_lkclass);
L
Linus Torvalds 已提交
3512
	if (!gd)
3513
		goto out_free;
L
Linus Torvalds 已提交
3514

M
Matthew Wilcox 已提交
3515 3516
	index = ida_alloc(&sd_index_ida, GFP_KERNEL);
	if (index < 0) {
3517
		sdev_printk(KERN_WARNING, sdp, "sd_probe: memory exhausted.\n");
L
Linus Torvalds 已提交
3518
		goto out_put;
3519 3520
	}

3521
	error = sd_format_disk_name("sd", index, gd->disk_name, DISK_NAME_LEN);
3522 3523
	if (error) {
		sdev_printk(KERN_WARNING, sdp, "SCSI disk (sd) name length exceeded.\n");
3524
		goto out_free_index;
3525
	}
3526

L
Linus Torvalds 已提交
3527 3528 3529
	sdkp->device = sdp;
	sdkp->disk = gd;
	sdkp->index = index;
3530
	sdkp->max_retries = SD_MAX_RETRIES;
A
Arnd Bergmann 已提交
3531
	atomic_set(&sdkp->openers, 0);
J
Josh Hunt 已提交
3532
	atomic_set(&sdkp->device->ioerr_cnt, 0);
L
Linus Torvalds 已提交
3533

3534 3535 3536 3537 3538 3539 3540 3541
	if (!sdp->request_queue->rq_timeout) {
		if (sdp->type != TYPE_MOD)
			blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT);
		else
			blk_queue_rq_timeout(sdp->request_queue,
					     SD_MOD_TIMEOUT);
	}

3542 3543 3544 3545
	device_initialize(&sdkp->disk_dev);
	sdkp->disk_dev.parent = get_device(dev);
	sdkp->disk_dev.class = &sd_disk_class;
	dev_set_name(&sdkp->disk_dev, "%s", dev_name(dev));
3546

3547
	error = device_add(&sdkp->disk_dev);
3548
	if (error) {
3549
		put_device(&sdkp->disk_dev);
3550 3551
		goto out;
	}
3552

3553
	dev_set_drvdata(dev, sdkp);
3554

3555 3556
	gd->major = sd_major((index & 0xf0) >> 4);
	gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
3557
	gd->minors = SD_MINORS;
3558 3559

	gd->fops = &sd_fops;
3560
	gd->private_data = sdkp;
3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582

	/* defaults, until the device tells us otherwise */
	sdp->sector_size = 512;
	sdkp->capacity = 0;
	sdkp->media_present = 1;
	sdkp->write_prot = 0;
	sdkp->cache_override = 0;
	sdkp->WCE = 0;
	sdkp->RCD = 0;
	sdkp->ATO = 0;
	sdkp->first_scan = 1;
	sdkp->max_medium_access_timeouts = SD_MAX_MEDIUM_TIMEOUTS;

	sd_revalidate_disk(gd);

	if (sdp->removable) {
		gd->flags |= GENHD_FL_REMOVABLE;
		gd->events |= DISK_EVENT_MEDIA_CHANGE;
		gd->event_flags = DISK_EVENT_FLAG_POLL | DISK_EVENT_FLAG_UEVENT;
	}

	blk_pm_runtime_init(sdp->request_queue, dev);
3583 3584 3585 3586
	if (sdp->rpm_autosuspend) {
		pm_runtime_set_autosuspend_delay(dev,
			sdp->host->hostt->rpm_autosuspend_delay);
	}
3587 3588 3589

	error = device_add_disk(dev, gd, NULL);
	if (error) {
3590
		put_device(&sdkp->disk_dev);
3591
		put_disk(gd);
3592 3593 3594
		goto out;
	}

3595
	if (sdkp->security) {
3596
		sdkp->opal_dev = init_opal_dev(sdkp, &sd_sec_submit);
3597 3598 3599 3600 3601 3602 3603
		if (sdkp->opal_dev)
			sd_printk(KERN_NOTICE, sdkp, "supports TCG Opal\n");
	}

	sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
		  sdp->removable ? "removable " : "");
	scsi_autopm_put_device(sdp);
L
Linus Torvalds 已提交
3604 3605 3606

	return 0;

3607
 out_free_index:
M
Matthew Wilcox 已提交
3608
	ida_free(&sd_index_ida, index);
3609
 out_put:
L
Linus Torvalds 已提交
3610
	put_disk(gd);
3611 3612
 out_free:
	kfree(sdkp);
3613
 out:
3614
	scsi_autopm_put_device(sdp);
L
Linus Torvalds 已提交
3615 3616 3617 3618 3619 3620 3621
	return error;
}

/**
 *	sd_remove - called whenever a scsi disk (previously recognized by
 *	sd_probe) is detached from the system. It is called (potentially
 *	multiple times) during sd module unload.
3622
 *	@dev: pointer to device object
L
Linus Torvalds 已提交
3623 3624 3625 3626 3627 3628 3629 3630
 *
 *	Note: this function is invoked from the scsi mid-level.
 *	This function potentially frees up a device name (e.g. /dev/sdc)
 *	that could be re-used by a subsequent sd_probe().
 *	This function is not called when the built-in sd driver is "exit-ed".
 **/
static int sd_remove(struct device *dev)
{
3631
	struct scsi_disk *sdkp = dev_get_drvdata(dev);
L
Linus Torvalds 已提交
3632

3633 3634
	scsi_autopm_get_device(sdkp->device);

3635
	device_del(&sdkp->disk_dev);
L
Linus Torvalds 已提交
3636 3637
	del_gendisk(sdkp->disk);
	sd_shutdown(dev);
A
Alan Stern 已提交
3638

3639
	put_disk(sdkp->disk);
L
Linus Torvalds 已提交
3640 3641 3642
	return 0;
}

3643
static void scsi_disk_release(struct device *dev)
L
Linus Torvalds 已提交
3644
{
3645
	struct scsi_disk *sdkp = to_scsi_disk(dev);
3646

M
Matthew Wilcox 已提交
3647
	ida_free(&sd_index_ida, sdkp->index);
3648
	sd_zbc_free_zone_info(sdkp);
3649
	put_device(&sdkp->device->sdev_gendev);
3650
	free_opal_dev(sdkp->opal_dev);
3651

L
Linus Torvalds 已提交
3652 3653 3654
	kfree(sdkp);
}

3655
static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
3656 3657
{
	unsigned char cmd[6] = { START_STOP };	/* START_VALID */
3658
	struct scsi_sense_hdr sshdr;
3659 3660 3661 3662
	const struct scsi_exec_args exec_args = {
		.sshdr = &sshdr,
		.req_flags = BLK_MQ_REQ_PM,
	};
3663
	struct scsi_device *sdp = sdkp->device;
3664
	int res;
3665 3666 3667 3668

	if (start)
		cmd[4] |= 1;	/* START */

3669 3670 3671
	if (sdp->start_stop_pwr_cond)
		cmd[4] |= start ? 1 << 4 : 3 << 4;	/* Active or Standby */

3672 3673 3674
	if (!scsi_device_online(sdp))
		return -ENODEV;

3675 3676
	res = scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, NULL, 0, SD_TIMEOUT,
			       sdkp->max_retries, &exec_args);
3677 3678 3679 3680 3681 3682 3683 3684 3685
	if (res) {
		sd_print_result(sdkp, "Start/Stop Unit failed", res);
		if (res > 0 && scsi_sense_valid(&sshdr)) {
			sd_print_sense_hdr(sdkp, &sshdr);
			/* 0x3a is medium not present */
			if (sshdr.asc == 0x3a)
				res = 0;
		}
	}
3686

3687 3688 3689 3690 3691
	/* SCSI error codes must not go to the generic layer */
	if (res)
		return -EIO;

	return 0;
3692 3693
}

L
Linus Torvalds 已提交
3694 3695 3696 3697 3698 3699 3700
/*
 * Send a SYNCHRONIZE CACHE instruction down to the device through
 * the normal SCSI command structure.  Wait for the command to
 * complete.
 */
static void sd_shutdown(struct device *dev)
{
3701
	struct scsi_disk *sdkp = dev_get_drvdata(dev);
L
Linus Torvalds 已提交
3702 3703 3704 3705

	if (!sdkp)
		return;         /* this can happen */

3706
	if (pm_runtime_suspended(dev))
3707
		return;
3708

3709
	if (sdkp->WCE && sdkp->media_present) {
3710
		sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
3711
		sd_sync_cache(sdkp, NULL);
A
Alan Stern 已提交
3712
	}
3713

3714 3715 3716
	if (system_state != SYSTEM_RESTART && sdkp->device->manage_start_stop) {
		sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
		sd_start_stop_device(sdkp, 0);
3717
	}
A
Alan Stern 已提交
3718
}
L
Linus Torvalds 已提交
3719

3720
static int sd_suspend_common(struct device *dev, bool ignore_stop_errors)
3721
{
3722
	struct scsi_disk *sdkp = dev_get_drvdata(dev);
3723
	struct scsi_sense_hdr sshdr;
3724
	int ret = 0;
3725

3726 3727
	if (!sdkp)	/* E.g.: runtime suspend following sd_remove() */
		return 0;
3728

3729
	if (sdkp->WCE && sdkp->media_present) {
3730 3731
		if (!sdkp->device->silence_suspend)
			sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
3732 3733
		ret = sd_sync_cache(sdkp, &sshdr);

3734 3735 3736
		if (ret) {
			/* ignore OFFLINE device */
			if (ret == -ENODEV)
3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747
				return 0;

			if (!scsi_sense_valid(&sshdr) ||
			    sshdr.sense_key != ILLEGAL_REQUEST)
				return ret;

			/*
			 * sshdr.sense_key == ILLEGAL_REQUEST means this drive
			 * doesn't support sync. There's not much to do and
			 * suspend shouldn't fail.
			 */
B
Bart Van Assche 已提交
3748
			ret = 0;
3749
		}
3750 3751
	}

3752
	if (sdkp->device->manage_start_stop) {
3753 3754
		if (!sdkp->device->silence_suspend)
			sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
3755
		/* an error is not worth aborting a system sleep */
3756
		ret = sd_start_stop_device(sdkp, 0);
3757 3758
		if (ignore_stop_errors)
			ret = 0;
3759 3760
	}

3761
	return ret;
3762 3763
}

3764 3765
static int sd_suspend_system(struct device *dev)
{
3766 3767 3768
	if (pm_runtime_suspended(dev))
		return 0;

3769 3770 3771 3772 3773 3774 3775 3776
	return sd_suspend_common(dev, true);
}

static int sd_suspend_runtime(struct device *dev)
{
	return sd_suspend_common(dev, false);
}

3777 3778
static int sd_resume(struct device *dev)
{
3779
	struct scsi_disk *sdkp = dev_get_drvdata(dev);
3780
	int ret;
3781

3782 3783 3784
	if (!sdkp)	/* E.g.: runtime resume at the start of sd_probe() */
		return 0;

3785
	if (!sdkp->device->manage_start_stop)
3786
		return 0;
3787

3788
	sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
3789 3790 3791 3792
	ret = sd_start_stop_device(sdkp, 1);
	if (!ret)
		opal_unlock_from_suspend(sdkp->opal_dev);
	return ret;
3793 3794
}

3795 3796
static int sd_resume_system(struct device *dev)
{
3797 3798 3799
	if (pm_runtime_suspended(dev))
		return 0;

3800 3801 3802
	return sd_resume(dev);
}

3803 3804 3805
static int sd_resume_runtime(struct device *dev)
{
	struct scsi_disk *sdkp = dev_get_drvdata(dev);
3806 3807 3808 3809 3810 3811
	struct scsi_device *sdp;

	if (!sdkp)	/* E.g.: runtime resume at the start of sd_probe() */
		return 0;

	sdp = sdkp->device;
3812 3813 3814 3815

	if (sdp->ignore_media_change) {
		/* clear the device's sense data */
		static const u8 cmd[10] = { REQUEST_SENSE };
3816 3817 3818
		const struct scsi_exec_args exec_args = {
			.req_flags = BLK_MQ_REQ_PM,
		};
3819

3820 3821 3822
		if (scsi_execute_cmd(sdp, cmd, REQ_OP_DRV_IN, NULL, 0,
				     sdp->request_queue->rq_timeout, 1,
				     &exec_args))
3823 3824 3825 3826 3827 3828 3829
			sd_printk(KERN_NOTICE, sdkp,
				  "Failed to clear sense data\n");
	}

	return sd_resume(dev);
}

L
Linus Torvalds 已提交
3830 3831 3832 3833 3834 3835 3836 3837
/**
 *	init_sd - entry point for this driver (both when built in or when
 *	a module).
 *
 *	Note: this function registers this driver with the scsi mid-level.
 **/
static int __init init_sd(void)
{
3838
	int majors = 0, i, err;
L
Linus Torvalds 已提交
3839 3840 3841

	SCSI_LOG_HLQUEUE(3, printk("init_sd: sd driver entry point\n"));

3842
	for (i = 0; i < SD_MAJORS; i++) {
3843
		if (__register_blkdev(sd_major(i), "sd", sd_default_probe))
3844 3845 3846
			continue;
		majors++;
	}
L
Linus Torvalds 已提交
3847 3848 3849 3850

	if (!majors)
		return -ENODEV;

3851 3852 3853
	err = class_register(&sd_disk_class);
	if (err)
		goto err_out;
3854

3855 3856 3857 3858
	sd_page_pool = mempool_create_page_pool(SD_MEMPOOL_SIZE, 0);
	if (!sd_page_pool) {
		printk(KERN_ERR "sd: can't init discard page pool\n");
		err = -ENOMEM;
3859
		goto err_out_class;
3860 3861
	}

3862 3863 3864 3865
	err = scsi_register_driver(&sd_template.gendrv);
	if (err)
		goto err_out_driver;

3866 3867
	return 0;

3868
err_out_driver:
3869
	mempool_destroy(sd_page_pool);
3870 3871 3872 3873 3874 3875
err_out_class:
	class_unregister(&sd_disk_class);
err_out:
	for (i = 0; i < SD_MAJORS; i++)
		unregister_blkdev(sd_major(i), "sd");
	return err;
L
Linus Torvalds 已提交
3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888
}

/**
 *	exit_sd - exit point for this driver (when it is a module).
 *
 *	Note: this function unregisters this driver from the scsi mid-level.
 **/
static void __exit exit_sd(void)
{
	int i;

	SCSI_LOG_HLQUEUE(3, printk("exit_sd: exiting sd driver\n"));

3889
	scsi_unregister_driver(&sd_template.gendrv);
3890
	mempool_destroy(sd_page_pool);
3891

3892 3893
	class_unregister(&sd_disk_class);

3894
	for (i = 0; i < SD_MAJORS; i++)
L
Linus Torvalds 已提交
3895 3896 3897 3898 3899
		unregister_blkdev(sd_major(i), "sd");
}

module_init(init_sd);
module_exit(exit_sd);
3900

3901
void sd_print_sense_hdr(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
3902
{
3903 3904
	scsi_print_sense_hdr(sdkp->device,
			     sdkp->disk ? sdkp->disk->disk_name : NULL, sshdr);
3905 3906
}

3907
void sd_print_result(const struct scsi_disk *sdkp, const char *msg, int result)
3908
{
H
Hannes Reinecke 已提交
3909 3910
	const char *hb_string = scsi_hostbyte_string(result);

3911
	if (hb_string)
H
Hannes Reinecke 已提交
3912 3913 3914
		sd_printk(KERN_INFO, sdkp,
			  "%s: Result: hostbyte=%s driverbyte=%s\n", msg,
			  hb_string ? hb_string : "invalid",
3915
			  "DRIVER_OK");
H
Hannes Reinecke 已提交
3916 3917
	else
		sd_printk(KERN_INFO, sdkp,
3918 3919
			  "%s: Result: hostbyte=0x%02x driverbyte=%s\n",
			  msg, host_byte(result), "DRIVER_OK");
3920
}