virtio_blk.c 24.1 KB
Newer Older
R
Rusty Russell 已提交
1 2
//#define DEBUG
#include <linux/spinlock.h>
3
#include <linux/slab.h>
R
Rusty Russell 已提交
4 5
#include <linux/blkdev.h>
#include <linux/hdreg.h>
6
#include <linux/module.h>
7
#include <linux/mutex.h>
8
#include <linux/interrupt.h>
R
Rusty Russell 已提交
9 10
#include <linux/virtio.h>
#include <linux/virtio_blk.h>
11
#include <linux/scatterlist.h>
12
#include <linux/string_helpers.h>
13
#include <scsi/scsi_cmnd.h>
14
#include <linux/idr.h>
J
Jens Axboe 已提交
15
#include <linux/blk-mq.h>
16
#include <linux/blk-mq-virtio.h>
J
Jens Axboe 已提交
17
#include <linux/numa.h>
18

19
#define PART_BITS 4
20
#define VQ_NAME_LEN 16
R
Rusty Russell 已提交
21

22 23 24
static int major;
static DEFINE_IDA(vd_index_ida);

25
static struct workqueue_struct *virtblk_wq;
26

27 28 29 30 31 32
struct virtio_blk_vq {
	struct virtqueue *vq;
	spinlock_t lock;
	char name[VQ_NAME_LEN];
} ____cacheline_aligned_in_smp;

33
struct virtio_blk {
R
Rusty Russell 已提交
34 35 36 37 38
	struct virtio_device *vdev;

	/* The disk structure for the kernel. */
	struct gendisk *disk;

39 40 41
	/* Block layer tags. */
	struct blk_mq_tag_set tag_set;

42 43 44
	/* Process context for config space updates */
	struct work_struct config_work;

45 46 47
	/* What host tells us, plus 2 for header & tailer. */
	unsigned int sg_elems;

48 49
	/* Ida index - used to track minor number allocations. */
	int index;
50 51 52 53

	/* num of vqs */
	int num_vqs;
	struct virtio_blk_vq *vqs;
R
Rusty Russell 已提交
54 55
};

56
struct virtblk_req {
57 58 59
#ifdef CONFIG_VIRTIO_BLK_SCSI
	struct scsi_request sreq;	/* for SCSI passthrough, must be first */
	u8 sense[SCSI_SENSE_BUFFERSIZE];
60
	struct virtio_scsi_inhdr in_hdr;
61 62
#endif
	struct virtio_blk_outhdr out_hdr;
63
	u8 status;
64
	struct scatterlist sg[];
R
Rusty Russell 已提交
65 66
};

67 68 69 70 71 72 73 74 75 76 77 78
static inline int virtblk_result(struct virtblk_req *vbr)
{
	switch (vbr->status) {
	case VIRTIO_BLK_S_OK:
		return 0;
	case VIRTIO_BLK_S_UNSUPP:
		return -ENOTTY;
	default:
		return -EIO;
	}
}

79 80 81 82 83 84 85 86 87
/*
 * If this is a packet command we need a couple of additional headers.  Behind
 * the normal outhdr we put a segment with the scsi command block, and before
 * the normal inhdr we put the sense data and the inhdr with additional status
 * information.
 */
#ifdef CONFIG_VIRTIO_BLK_SCSI
static int virtblk_add_req_scsi(struct virtqueue *vq, struct virtblk_req *vbr,
		struct scatterlist *data_sg, bool have_data)
88
{
89
	struct scatterlist hdr, status, cmd, sense, inhdr, *sgs[6];
90 91 92 93
	unsigned int num_out = 0, num_in = 0;

	sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
	sgs[num_out++] = &hdr;
94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
	sg_init_one(&cmd, vbr->sreq.cmd, vbr->sreq.cmd_len);
	sgs[num_out++] = &cmd;

	if (have_data) {
		if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
			sgs[num_out++] = data_sg;
		else
			sgs[num_out + num_in++] = data_sg;
	}

	sg_init_one(&sense, vbr->sense, SCSI_SENSE_BUFFERSIZE);
	sgs[num_out + num_in++] = &sense;
	sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
	sgs[num_out + num_in++] = &inhdr;
	sg_init_one(&status, &vbr->status, sizeof(vbr->status));
	sgs[num_out + num_in++] = &status;

	return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
}

static inline void virtblk_scsi_reques_done(struct request *req)
{
	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
	struct virtio_blk *vblk = req->q->queuedata;
	struct scsi_request *sreq = &vbr->sreq;

	sreq->resid_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.residual);
	sreq->sense_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.sense_len);
	req->errors = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.errors);
}

static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
			     unsigned int cmd, unsigned long data)
{
	struct gendisk *disk = bdev->bd_disk;
	struct virtio_blk *vblk = disk->private_data;
130

131
	/*
132
	 * Only allow the generic SCSI ioctls if the host can support it.
133
	 */
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
	if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
		return -ENOTTY;

	return scsi_cmd_blk_ioctl(bdev, mode, cmd,
				  (void __user *)data);
}
#else
static inline int virtblk_add_req_scsi(struct virtqueue *vq,
		struct virtblk_req *vbr, struct scatterlist *data_sg,
		bool have_data)
{
	return -EIO;
}
static inline void virtblk_scsi_reques_done(struct request *req)
{
}
#define virtblk_ioctl	NULL
#endif /* CONFIG_VIRTIO_BLK_SCSI */

static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr,
		struct scatterlist *data_sg, bool have_data)
{
	struct scatterlist hdr, status, *sgs[3];
	unsigned int num_out = 0, num_in = 0;

	sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
	sgs[num_out++] = &hdr;
161

R
Rusty Russell 已提交
162
	if (have_data) {
M
Michael S. Tsirkin 已提交
163
		if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
164
			sgs[num_out++] = data_sg;
165
		else
166 167 168
			sgs[num_out + num_in++] = data_sg;
	}

169 170 171 172
	sg_init_one(&status, &vbr->status, sizeof(vbr->status));
	sgs[num_out + num_in++] = &status;

	return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
173 174
}

175
static inline void virtblk_request_done(struct request *req)
176
{
177
	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
178 179
	int error = virtblk_result(vbr);

180 181 182
	switch (req_op(req)) {
	case REQ_OP_SCSI_IN:
	case REQ_OP_SCSI_OUT:
183 184
		virtblk_scsi_reques_done(req);
		break;
185
	case REQ_OP_DRV_IN:
186
		req->errors = (error != 0);
187
		break;
188 189
	}

190
	blk_mq_end_request(req, error);
191 192 193
}

static void virtblk_done(struct virtqueue *vq)
R
Rusty Russell 已提交
194 195
{
	struct virtio_blk *vblk = vq->vdev->priv;
J
Jens Axboe 已提交
196
	bool req_done = false;
197
	int qid = vq->index;
R
Rusty Russell 已提交
198 199
	struct virtblk_req *vbr;
	unsigned long flags;
200
	unsigned int len;
R
Rusty Russell 已提交
201

202
	spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
203 204
	do {
		virtqueue_disable_cb(vq);
205
		while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
206 207 208
			struct request *req = blk_mq_rq_from_pdu(vbr);

			blk_mq_complete_request(req, req->errors);
J
Jens Axboe 已提交
209
			req_done = true;
210
		}
211 212
		if (unlikely(virtqueue_is_broken(vq)))
			break;
213
	} while (!virtqueue_enable_cb(vq));
J
Jens Axboe 已提交
214

R
Rusty Russell 已提交
215
	/* In case queue is stopped waiting for more buffers. */
216
	if (req_done)
217
		blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
218
	spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
219 220
}

221 222
static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
			   const struct blk_mq_queue_data *bd)
R
Rusty Russell 已提交
223
{
J
Jens Axboe 已提交
224
	struct virtio_blk *vblk = hctx->queue->queuedata;
225
	struct request *req = bd->rq;
226
	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
J
Jens Axboe 已提交
227
	unsigned long flags;
228
	unsigned int num;
229
	int qid = hctx->queue_num;
230
	int err;
231
	bool notify = false;
232
	u32 type;
R
Rusty Russell 已提交
233

J
Jens Axboe 已提交
234
	BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
R
Rusty Russell 已提交
235

236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253
	switch (req_op(req)) {
	case REQ_OP_READ:
	case REQ_OP_WRITE:
		type = 0;
		break;
	case REQ_OP_FLUSH:
		type = VIRTIO_BLK_T_FLUSH;
		break;
	case REQ_OP_SCSI_IN:
	case REQ_OP_SCSI_OUT:
		type = VIRTIO_BLK_T_SCSI_CMD;
		break;
	case REQ_OP_DRV_IN:
		type = VIRTIO_BLK_T_GET_ID;
		break;
	default:
		WARN_ON_ONCE(1);
		return BLK_MQ_RQ_QUEUE_ERROR;
R
Rusty Russell 已提交
254 255
	}

256 257 258 259 260
	vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type);
	vbr->out_hdr.sector = type ?
		0 : cpu_to_virtio64(vblk->vdev, blk_rq_pos(req));
	vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req));

261 262
	blk_mq_start_request(req);

263
	num = blk_rq_map_sg(hctx->queue, req, vbr->sg);
264
	if (num) {
265
		if (rq_data_dir(req) == WRITE)
M
Michael S. Tsirkin 已提交
266
			vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT);
267
		else
M
Michael S. Tsirkin 已提交
268
			vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN);
R
Rusty Russell 已提交
269 270
	}

271
	spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
272
	if (req_op(req) == REQ_OP_SCSI_IN || req_op(req) == REQ_OP_SCSI_OUT)
273 274 275
		err = virtblk_add_req_scsi(vblk->vqs[qid].vq, vbr, vbr->sg, num);
	else
		err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
276
	if (err) {
277
		virtqueue_kick(vblk->vqs[qid].vq);
J
Jens Axboe 已提交
278
		blk_mq_stop_hw_queue(hctx);
279
		spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
280 281 282 283 284
		/* Out of mem doesn't actually happen, since we fall back
		 * to direct descriptors */
		if (err == -ENOMEM || err == -ENOSPC)
			return BLK_MQ_RQ_QUEUE_BUSY;
		return BLK_MQ_RQ_QUEUE_ERROR;
285 286
	}

287
	if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
288
		notify = true;
289
	spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
290 291

	if (notify)
292
		virtqueue_notify(vblk->vqs[qid].vq);
J
Jens Axboe 已提交
293
	return BLK_MQ_RQ_QUEUE_OK;
294 295
}

296 297 298 299 300
/* return id (s/n) string for *disk to *id_str
 */
static int virtblk_get_id(struct gendisk *disk, char *id_str)
{
	struct virtio_blk *vblk = disk->private_data;
301
	struct request_queue *q = vblk->disk->queue;
302
	struct request *req;
M
Mike Snitzer 已提交
303
	int err;
304

305
	req = blk_get_request(q, REQ_OP_DRV_IN, GFP_KERNEL);
306
	if (IS_ERR(req))
307
		return PTR_ERR(req);
308 309 310 311 312

	err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
	if (err)
		goto out;

M
Mike Snitzer 已提交
313
	err = blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
314
out:
M
Mike Snitzer 已提交
315 316
	blk_put_request(req);
	return err;
317 318
}

319 320 321
/* We provide getgeo only to please some old bootloader/partitioning tools */
static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
{
322 323 324
	struct virtio_blk *vblk = bd->bd_disk->private_data;

	/* see if the host passed in geometry config */
325 326 327 328 329 330 331
	if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
		virtio_cread(vblk->vdev, struct virtio_blk_config,
			     geometry.cylinders, &geo->cylinders);
		virtio_cread(vblk->vdev, struct virtio_blk_config,
			     geometry.heads, &geo->heads);
		virtio_cread(vblk->vdev, struct virtio_blk_config,
			     geometry.sectors, &geo->sectors);
332 333 334 335 336 337
	} else {
		/* some standard values, similar to sd */
		geo->heads = 1 << 6;
		geo->sectors = 1 << 5;
		geo->cylinders = get_capacity(bd->bd_disk) >> 11;
	}
338 339 340
	return 0;
}

341
static const struct block_device_operations virtblk_fops = {
342
	.ioctl  = virtblk_ioctl,
343 344
	.owner  = THIS_MODULE,
	.getgeo = virtblk_getgeo,
R
Rusty Russell 已提交
345 346
};

347 348 349 350 351
static int index_to_minor(int index)
{
	return index << PART_BITS;
}

352 353 354 355 356
static int minor_to_index(int minor)
{
	return minor >> PART_BITS;
}

357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375
static ssize_t virtblk_serial_show(struct device *dev,
				struct device_attribute *attr, char *buf)
{
	struct gendisk *disk = dev_to_disk(dev);
	int err;

	/* sysfs gives us a PAGE_SIZE buffer */
	BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);

	buf[VIRTIO_BLK_ID_BYTES] = '\0';
	err = virtblk_get_id(disk, buf);
	if (!err)
		return strlen(buf);

	if (err == -EIO) /* Unsupported? Make it empty. */
		return 0;

	return err;
}
376 377

static DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL);
378

379 380 381 382 383 384 385
static void virtblk_config_changed_work(struct work_struct *work)
{
	struct virtio_blk *vblk =
		container_of(work, struct virtio_blk, config_work);
	struct virtio_device *vdev = vblk->vdev;
	struct request_queue *q = vblk->disk->queue;
	char cap_str_2[10], cap_str_10[10];
386
	char *envp[] = { "RESIZE=1", NULL };
387
	u64 capacity;
388 389

	/* Host must always specify the capacity. */
390
	virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
391 392 393 394 395 396 397 398

	/* If capacity is too big, truncate with warning. */
	if ((sector_t)capacity != capacity) {
		dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
			 (unsigned long long)capacity);
		capacity = (sector_t)-1;
	}

399 400 401 402
	string_get_size(capacity, queue_logical_block_size(q),
			STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
	string_get_size(capacity, queue_logical_block_size(q),
			STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
403 404 405 406 407 408 409 410

	dev_notice(&vdev->dev,
		  "new size: %llu %d-byte logical blocks (%s/%s)\n",
		  (unsigned long long)capacity,
		  queue_logical_block_size(q),
		  cap_str_10, cap_str_2);

	set_capacity(vblk->disk, capacity);
411
	revalidate_disk(vblk->disk);
412
	kobject_uevent_env(&disk_to_dev(vblk->disk)->kobj, KOBJ_CHANGE, envp);
413 414 415 416 417 418 419 420 421
}

static void virtblk_config_changed(struct virtio_device *vdev)
{
	struct virtio_blk *vblk = vdev->priv;

	queue_work(virtblk_wq, &vblk->config_work);
}

422 423
static int init_vq(struct virtio_blk *vblk)
{
424
	int err;
425 426 427 428 429 430
	int i;
	vq_callback_t **callbacks;
	const char **names;
	struct virtqueue **vqs;
	unsigned short num_vqs;
	struct virtio_device *vdev = vblk->vdev;
431
	struct irq_affinity desc = { 0, };
432 433 434 435 436 437 438

	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ,
				   struct virtio_blk_config, num_queues,
				   &num_vqs);
	if (err)
		num_vqs = 1;

439
	vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
440 441
	if (!vblk->vqs)
		return -ENOMEM;
442

443 444 445
	names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL);
	callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL);
	vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL);
446 447 448 449
	if (!names || !callbacks || !vqs) {
		err = -ENOMEM;
		goto out;
	}
450

451 452 453 454 455 456 457
	for (i = 0; i < num_vqs; i++) {
		callbacks[i] = virtblk_done;
		snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
		names[i] = vblk->vqs[i].name;
	}

	/* Discover virtqueues and write information to configuration.  */
458
	err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names,
459
			&desc);
460
	if (err)
461
		goto out;
462

463 464 465 466 467 468
	for (i = 0; i < num_vqs; i++) {
		spin_lock_init(&vblk->vqs[i].lock);
		vblk->vqs[i].vq = vqs[i];
	}
	vblk->num_vqs = num_vqs;

469
out:
470 471 472 473 474
	kfree(vqs);
	kfree(callbacks);
	kfree(names);
	if (err)
		kfree(vblk->vqs);
475 476 477
	return err;
}

478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505
/*
 * Legacy naming scheme used for virtio devices.  We are stuck with it for
 * virtio blk but don't ever use it for any new driver.
 */
static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
{
	const int base = 'z' - 'a' + 1;
	char *begin = buf + strlen(prefix);
	char *end = buf + buflen;
	char *p;
	int unit;

	p = end - 1;
	*p = '\0';
	unit = base;
	do {
		if (p == begin)
			return -EINVAL;
		*--p = 'a' + (index % unit);
		index = (index / unit) - 1;
	} while (index >= 0);

	memmove(begin, p, end - p);
	memcpy(buf, prefix, strlen(prefix));

	return 0;
}

506 507 508 509 510
static int virtblk_get_cache_mode(struct virtio_device *vdev)
{
	u8 writeback;
	int err;

511 512 513
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE,
				   struct virtio_blk_config, wce,
				   &writeback);
514 515 516 517 518

	/*
	 * If WCE is not configurable and flush is not available,
	 * assume no writeback cache is in use.
	 */
519
	if (err)
520
		writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH);
521 522 523 524 525 526 527 528 529

	return writeback;
}

static void virtblk_update_cache_mode(struct virtio_device *vdev)
{
	u8 writeback = virtblk_get_cache_mode(vdev);
	struct virtio_blk *vblk = vdev->priv;

530
	blk_queue_write_cache(vblk->disk->queue, writeback, false);
531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554
	revalidate_disk(vblk->disk);
}

static const char *const virtblk_cache_types[] = {
	"write through", "write back"
};

static ssize_t
virtblk_cache_type_store(struct device *dev, struct device_attribute *attr,
			 const char *buf, size_t count)
{
	struct gendisk *disk = dev_to_disk(dev);
	struct virtio_blk *vblk = disk->private_data;
	struct virtio_device *vdev = vblk->vdev;
	int i;

	BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
	for (i = ARRAY_SIZE(virtblk_cache_types); --i >= 0; )
		if (sysfs_streq(buf, virtblk_cache_types[i]))
			break;

	if (i < 0)
		return -EINVAL;

555
	virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578
	virtblk_update_cache_mode(vdev);
	return count;
}

static ssize_t
virtblk_cache_type_show(struct device *dev, struct device_attribute *attr,
			 char *buf)
{
	struct gendisk *disk = dev_to_disk(dev);
	struct virtio_blk *vblk = disk->private_data;
	u8 writeback = virtblk_get_cache_mode(vblk->vdev);

	BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
	return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]);
}

static const struct device_attribute dev_attr_cache_type_ro =
	__ATTR(cache_type, S_IRUGO,
	       virtblk_cache_type_show, NULL);
static const struct device_attribute dev_attr_cache_type_rw =
	__ATTR(cache_type, S_IRUGO|S_IWUSR,
	       virtblk_cache_type_show, virtblk_cache_type_store);

579 580 581
static int virtblk_init_request(void *data, struct request *rq,
		unsigned int hctx_idx, unsigned int request_idx,
		unsigned int numa_node)
582 583 584 585
{
	struct virtio_blk *vblk = data;
	struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);

586
#ifdef CONFIG_VIRTIO_BLK_SCSI
587
	vbr->sreq.sense = vbr->sense;
588
#endif
589 590 591 592
	sg_init_table(vbr->sg, vblk->sg_elems);
	return 0;
}

593 594 595 596 597 598 599
static int virtblk_map_queues(struct blk_mq_tag_set *set)
{
	struct virtio_blk *vblk = set->driver_data;

	return blk_mq_virtio_map_queues(set, vblk->vdev, 0);
}

600
static const struct blk_mq_ops virtio_mq_ops = {
J
Jens Axboe 已提交
601
	.queue_rq	= virtio_queue_rq,
602
	.complete	= virtblk_request_done,
603
	.init_request	= virtblk_init_request,
604
	.map_queues	= virtblk_map_queues,
J
Jens Axboe 已提交
605 606
};

607 608
static unsigned int virtblk_queue_depth;
module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
J
Jens Axboe 已提交
609

610
static int virtblk_probe(struct virtio_device *vdev)
R
Rusty Russell 已提交
611 612
{
	struct virtio_blk *vblk;
613
	struct request_queue *q;
614
	int err, index;
615

R
Rusty Russell 已提交
616
	u64 cap;
617 618 619
	u32 v, blk_size, sg_elems, opt_io_size;
	u16 min_io_size;
	u8 physical_block_exp, alignment_offset;
R
Rusty Russell 已提交
620

621 622 623 624 625 626
	if (!vdev->config->get) {
		dev_err(&vdev->dev, "%s failure: config access disabled\n",
			__func__);
		return -EINVAL;
	}

627 628 629 630 631
	err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
			     GFP_KERNEL);
	if (err < 0)
		goto out;
	index = err;
632

633
	/* We need to know how many segments before we allocate. */
634 635 636
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
				   struct virtio_blk_config, seg_max,
				   &sg_elems);
637 638 639

	/* We need at least one SG element, whatever they say. */
	if (err || !sg_elems)
640 641 642 643
		sg_elems = 1;

	/* We need an extra sg elements at head and tail. */
	sg_elems += 2;
J
Jens Axboe 已提交
644
	vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
R
Rusty Russell 已提交
645 646
	if (!vblk) {
		err = -ENOMEM;
647
		goto out_free_index;
R
Rusty Russell 已提交
648 649 650
	}

	vblk->vdev = vdev;
651
	vblk->sg_elems = sg_elems;
652

653
	INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
R
Rusty Russell 已提交
654

655 656
	err = init_vq(vblk);
	if (err)
R
Rusty Russell 已提交
657 658 659
		goto out_free_vblk;

	/* FIXME: How many partitions?  How long is a piece of string? */
660
	vblk->disk = alloc_disk(1 << PART_BITS);
R
Rusty Russell 已提交
661 662
	if (!vblk->disk) {
		err = -ENOMEM;
J
Jens Axboe 已提交
663
		goto out_free_vq;
R
Rusty Russell 已提交
664 665
	}

666
	/* Default queue sizing is to fill the ring. */
667
	if (!virtblk_queue_depth) {
668
		virtblk_queue_depth = vblk->vqs[0].vq->num_free;
669 670
		/* ... but without indirect descs, we use 2 descs per req */
		if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
671
			virtblk_queue_depth /= 2;
672
	}
673 674 675 676 677 678 679

	memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
	vblk->tag_set.ops = &virtio_mq_ops;
	vblk->tag_set.queue_depth = virtblk_queue_depth;
	vblk->tag_set.numa_node = NUMA_NO_NODE;
	vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
	vblk->tag_set.cmd_size =
J
Jens Axboe 已提交
680 681
		sizeof(struct virtblk_req) +
		sizeof(struct scatterlist) * sg_elems;
682
	vblk->tag_set.driver_data = vblk;
683
	vblk->tag_set.nr_hw_queues = vblk->num_vqs;
J
Jens Axboe 已提交
684

685 686 687 688
	err = blk_mq_alloc_tag_set(&vblk->tag_set);
	if (err)
		goto out_put_disk;

689
	q = blk_mq_init_queue(&vblk->tag_set);
690
	if (IS_ERR(q)) {
R
Rusty Russell 已提交
691
		err = -ENOMEM;
692
		goto out_free_tags;
R
Rusty Russell 已提交
693
	}
694
	vblk->disk->queue = q;
R
Rusty Russell 已提交
695

696
	q->queuedata = vblk;
697

698
	virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
699

R
Rusty Russell 已提交
700
	vblk->disk->major = major;
701
	vblk->disk->first_minor = index_to_minor(index);
R
Rusty Russell 已提交
702 703
	vblk->disk->private_data = vblk;
	vblk->disk->fops = &virtblk_fops;
704
	vblk->disk->flags |= GENHD_FL_EXT_DEVT;
705
	vblk->index = index;
706

707
	/* configure queue flush support */
708
	virtblk_update_cache_mode(vdev);
R
Rusty Russell 已提交
709

710 711 712 713
	/* If disk is read-only in the host, the guest should obey */
	if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
		set_disk_ro(vblk->disk, 1);

714
	/* Host must always specify the capacity. */
715
	virtio_cread(vdev, struct virtio_blk_config, capacity, &cap);
R
Rusty Russell 已提交
716 717 718 719 720 721 722 723 724

	/* If capacity is too big, truncate with warning. */
	if ((sector_t)cap != cap) {
		dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
			 (unsigned long long)cap);
		cap = (sector_t)-1;
	}
	set_capacity(vblk->disk, cap);

725
	/* We can handle whatever the host told us to handle. */
726
	blk_queue_max_segments(q, vblk->sg_elems-2);
727

728
	/* No need to bounce any requests */
729
	blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
730

731
	/* No real sector limit. */
732
	blk_queue_max_hw_sectors(q, -1U);
733

734 735
	/* Host can optionally specify maximum segment size and number of
	 * segments. */
736 737
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
				   struct virtio_blk_config, size_max, &v);
R
Rusty Russell 已提交
738
	if (!err)
739
		blk_queue_max_segment_size(q, v);
740
	else
741
		blk_queue_max_segment_size(q, -1U);
R
Rusty Russell 已提交
742

743
	/* Host can optionally specify the block size of the device */
744 745 746
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
				   struct virtio_blk_config, blk_size,
				   &blk_size);
747
	if (!err)
748 749 750 751 752
		blk_queue_logical_block_size(q, blk_size);
	else
		blk_size = queue_logical_block_size(q);

	/* Use topology information if available */
753 754 755
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
				   struct virtio_blk_config, physical_block_exp,
				   &physical_block_exp);
756 757 758 759
	if (!err && physical_block_exp)
		blk_queue_physical_block_size(q,
				blk_size * (1 << physical_block_exp));

760 761 762
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
				   struct virtio_blk_config, alignment_offset,
				   &alignment_offset);
763 764 765
	if (!err && alignment_offset)
		blk_queue_alignment_offset(q, blk_size * alignment_offset);

766 767 768
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
				   struct virtio_blk_config, min_io_size,
				   &min_io_size);
769 770 771
	if (!err && min_io_size)
		blk_queue_io_min(q, blk_size * min_io_size);

772 773 774
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
				   struct virtio_blk_config, opt_io_size,
				   &opt_io_size);
775 776 777
	if (!err && opt_io_size)
		blk_queue_io_opt(q, blk_size * opt_io_size);

M
Michael S. Tsirkin 已提交
778 779
	virtio_device_ready(vdev);

780
	device_add_disk(&vdev->dev, vblk->disk);
781 782 783 784
	err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial);
	if (err)
		goto out_del_disk;

785 786 787 788 789 790 791 792
	if (virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
		err = device_create_file(disk_to_dev(vblk->disk),
					 &dev_attr_cache_type_rw);
	else
		err = device_create_file(disk_to_dev(vblk->disk),
					 &dev_attr_cache_type_ro);
	if (err)
		goto out_del_disk;
R
Rusty Russell 已提交
793 794
	return 0;

795 796 797
out_del_disk:
	del_gendisk(vblk->disk);
	blk_cleanup_queue(vblk->disk->queue);
798 799
out_free_tags:
	blk_mq_free_tag_set(&vblk->tag_set);
R
Rusty Russell 已提交
800 801 802
out_put_disk:
	put_disk(vblk->disk);
out_free_vq:
803
	vdev->config->del_vqs(vdev);
R
Rusty Russell 已提交
804 805
out_free_vblk:
	kfree(vblk);
806 807
out_free_index:
	ida_simple_remove(&vd_index_ida, index);
R
Rusty Russell 已提交
808 809 810 811
out:
	return err;
}

812
static void virtblk_remove(struct virtio_device *vdev)
R
Rusty Russell 已提交
813 814
{
	struct virtio_blk *vblk = vdev->priv;
815
	int index = vblk->index;
816
	int refc;
R
Rusty Russell 已提交
817

818 819
	/* Make sure no work handler is accessing the device. */
	flush_work(&vblk->config_work);
820

821
	del_gendisk(vblk->disk);
822
	blk_cleanup_queue(vblk->disk->queue);
823

824 825
	blk_mq_free_tag_set(&vblk->tag_set);

R
Rusty Russell 已提交
826 827 828
	/* Stop all the virtqueues. */
	vdev->config->reset(vdev);

829
	refc = kref_read(&disk_to_dev(vblk->disk)->kobj.kref);
R
Rusty Russell 已提交
830
	put_disk(vblk->disk);
831
	vdev->config->del_vqs(vdev);
832
	kfree(vblk->vqs);
R
Rusty Russell 已提交
833
	kfree(vblk);
834 835 836 837

	/* Only free device id if we don't have any users */
	if (refc == 1)
		ida_simple_remove(&vd_index_ida, index);
R
Rusty Russell 已提交
838 839
}

840
#ifdef CONFIG_PM_SLEEP
841 842 843 844 845 846 847
static int virtblk_freeze(struct virtio_device *vdev)
{
	struct virtio_blk *vblk = vdev->priv;

	/* Ensure we don't receive any more interrupts */
	vdev->config->reset(vdev);

848
	/* Make sure no work handler is accessing the device. */
849 850
	flush_work(&vblk->config_work);

J
Jens Axboe 已提交
851
	blk_mq_stop_hw_queues(vblk->disk->queue);
852 853 854 855 856 857 858 859 860 861 862

	vdev->config->del_vqs(vdev);
	return 0;
}

static int virtblk_restore(struct virtio_device *vdev)
{
	struct virtio_blk *vblk = vdev->priv;
	int ret;

	ret = init_vq(vdev->priv);
863 864 865 866
	if (ret)
		return ret;

	virtio_device_ready(vdev);
J
Jens Axboe 已提交
867

868 869
	blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
	return 0;
870 871 872
}
#endif

873
static const struct virtio_device_id id_table[] = {
R
Rusty Russell 已提交
874 875 876 877
	{ VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
	{ 0 },
};

M
Michael S. Tsirkin 已提交
878
static unsigned int features_legacy[] = {
879
	VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
880 881 882 883
	VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
#ifdef CONFIG_VIRTIO_BLK_SCSI
	VIRTIO_BLK_F_SCSI,
#endif
884
	VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
885
	VIRTIO_BLK_F_MQ,
M
Michael S. Tsirkin 已提交
886 887 888 889 890
}
;
static unsigned int features[] = {
	VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
	VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
891
	VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
M
Michael S. Tsirkin 已提交
892
	VIRTIO_BLK_F_MQ,
893 894
};

895
static struct virtio_driver virtio_blk = {
M
Michael S. Tsirkin 已提交
896 897 898 899 900 901 902 903 904 905
	.feature_table			= features,
	.feature_table_size		= ARRAY_SIZE(features),
	.feature_table_legacy		= features_legacy,
	.feature_table_size_legacy	= ARRAY_SIZE(features_legacy),
	.driver.name			= KBUILD_MODNAME,
	.driver.owner			= THIS_MODULE,
	.id_table			= id_table,
	.probe				= virtblk_probe,
	.remove				= virtblk_remove,
	.config_changed			= virtblk_config_changed,
906
#ifdef CONFIG_PM_SLEEP
M
Michael S. Tsirkin 已提交
907 908
	.freeze				= virtblk_freeze,
	.restore			= virtblk_restore,
909
#endif
R
Rusty Russell 已提交
910 911 912 913
};

static int __init init(void)
{
914 915 916 917 918 919
	int error;

	virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
	if (!virtblk_wq)
		return -ENOMEM;

920
	major = register_blkdev(0, "virtblk");
921 922 923 924 925 926 927 928 929 930 931 932 933 934 935
	if (major < 0) {
		error = major;
		goto out_destroy_workqueue;
	}

	error = register_virtio_driver(&virtio_blk);
	if (error)
		goto out_unregister_blkdev;
	return 0;

out_unregister_blkdev:
	unregister_blkdev(major, "virtblk");
out_destroy_workqueue:
	destroy_workqueue(virtblk_wq);
	return error;
R
Rusty Russell 已提交
936 937 938 939 940
}

static void __exit fini(void)
{
	unregister_virtio_driver(&virtio_blk);
941
	unregister_blkdev(major, "virtblk");
942
	destroy_workqueue(virtblk_wq);
R
Rusty Russell 已提交
943 944 945 946 947 948 949
}
module_init(init);
module_exit(fini);

MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio block driver");
MODULE_LICENSE("GPL");