virtio_blk.c 24.1 KB
Newer Older
R
Rusty Russell 已提交
1 2
//#define DEBUG
#include <linux/spinlock.h>
3
#include <linux/slab.h>
R
Rusty Russell 已提交
4 5
#include <linux/blkdev.h>
#include <linux/hdreg.h>
6
#include <linux/module.h>
7
#include <linux/mutex.h>
8
#include <linux/interrupt.h>
R
Rusty Russell 已提交
9 10
#include <linux/virtio.h>
#include <linux/virtio_blk.h>
11
#include <linux/scatterlist.h>
12
#include <linux/string_helpers.h>
13
#include <scsi/scsi_cmnd.h>
14
#include <linux/idr.h>
J
Jens Axboe 已提交
15
#include <linux/blk-mq.h>
16
#include <linux/blk-mq-virtio.h>
J
Jens Axboe 已提交
17
#include <linux/numa.h>
18

19
#define PART_BITS 4
20
#define VQ_NAME_LEN 16
R
Rusty Russell 已提交
21

22 23 24
static int major;
static DEFINE_IDA(vd_index_ida);

25
static struct workqueue_struct *virtblk_wq;
26

27 28 29 30 31 32
struct virtio_blk_vq {
	struct virtqueue *vq;
	spinlock_t lock;
	char name[VQ_NAME_LEN];
} ____cacheline_aligned_in_smp;

33
struct virtio_blk {
R
Rusty Russell 已提交
34 35 36 37 38
	struct virtio_device *vdev;

	/* The disk structure for the kernel. */
	struct gendisk *disk;

39 40 41
	/* Block layer tags. */
	struct blk_mq_tag_set tag_set;

42 43 44
	/* Process context for config space updates */
	struct work_struct config_work;

45 46 47
	/* What host tells us, plus 2 for header & tailer. */
	unsigned int sg_elems;

48 49
	/* Ida index - used to track minor number allocations. */
	int index;
50 51 52 53

	/* num of vqs */
	int num_vqs;
	struct virtio_blk_vq *vqs;
R
Rusty Russell 已提交
54 55
};

56
struct virtblk_req {
57 58 59
#ifdef CONFIG_VIRTIO_BLK_SCSI
	struct scsi_request sreq;	/* for SCSI passthrough, must be first */
	u8 sense[SCSI_SENSE_BUFFERSIZE];
60
	struct virtio_scsi_inhdr in_hdr;
61 62
#endif
	struct virtio_blk_outhdr out_hdr;
63
	u8 status;
64
	struct scatterlist sg[];
R
Rusty Russell 已提交
65 66
};

67 68 69 70 71 72 73 74 75 76 77 78
static inline int virtblk_result(struct virtblk_req *vbr)
{
	switch (vbr->status) {
	case VIRTIO_BLK_S_OK:
		return 0;
	case VIRTIO_BLK_S_UNSUPP:
		return -ENOTTY;
	default:
		return -EIO;
	}
}

79 80 81 82 83 84 85 86 87
/*
 * If this is a packet command we need a couple of additional headers.  Behind
 * the normal outhdr we put a segment with the scsi command block, and before
 * the normal inhdr we put the sense data and the inhdr with additional status
 * information.
 */
#ifdef CONFIG_VIRTIO_BLK_SCSI
static int virtblk_add_req_scsi(struct virtqueue *vq, struct virtblk_req *vbr,
		struct scatterlist *data_sg, bool have_data)
88
{
89
	struct scatterlist hdr, status, cmd, sense, inhdr, *sgs[6];
90 91 92 93
	unsigned int num_out = 0, num_in = 0;

	sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
	sgs[num_out++] = &hdr;
94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
	sg_init_one(&cmd, vbr->sreq.cmd, vbr->sreq.cmd_len);
	sgs[num_out++] = &cmd;

	if (have_data) {
		if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
			sgs[num_out++] = data_sg;
		else
			sgs[num_out + num_in++] = data_sg;
	}

	sg_init_one(&sense, vbr->sense, SCSI_SENSE_BUFFERSIZE);
	sgs[num_out + num_in++] = &sense;
	sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
	sgs[num_out + num_in++] = &inhdr;
	sg_init_one(&status, &vbr->status, sizeof(vbr->status));
	sgs[num_out + num_in++] = &status;

	return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
}

static inline void virtblk_scsi_reques_done(struct request *req)
{
	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
	struct virtio_blk *vblk = req->q->queuedata;
	struct scsi_request *sreq = &vbr->sreq;

	sreq->resid_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.residual);
	sreq->sense_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.sense_len);
	req->errors = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.errors);
}

static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
			     unsigned int cmd, unsigned long data)
{
	struct gendisk *disk = bdev->bd_disk;
	struct virtio_blk *vblk = disk->private_data;
130

131
	/*
132
	 * Only allow the generic SCSI ioctls if the host can support it.
133
	 */
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
	if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
		return -ENOTTY;

	return scsi_cmd_blk_ioctl(bdev, mode, cmd,
				  (void __user *)data);
}
#else
static inline int virtblk_add_req_scsi(struct virtqueue *vq,
		struct virtblk_req *vbr, struct scatterlist *data_sg,
		bool have_data)
{
	return -EIO;
}
static inline void virtblk_scsi_reques_done(struct request *req)
{
}
#define virtblk_ioctl	NULL
#endif /* CONFIG_VIRTIO_BLK_SCSI */

static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr,
		struct scatterlist *data_sg, bool have_data)
{
	struct scatterlist hdr, status, *sgs[3];
	unsigned int num_out = 0, num_in = 0;

	sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
	sgs[num_out++] = &hdr;
161

R
Rusty Russell 已提交
162
	if (have_data) {
M
Michael S. Tsirkin 已提交
163
		if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
164
			sgs[num_out++] = data_sg;
165
		else
166 167 168
			sgs[num_out + num_in++] = data_sg;
	}

169 170 171 172
	sg_init_one(&status, &vbr->status, sizeof(vbr->status));
	sgs[num_out + num_in++] = &status;

	return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
173 174
}

175
static inline void virtblk_request_done(struct request *req)
176
{
177
	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
178 179
	int error = virtblk_result(vbr);

180 181 182
	switch (req_op(req)) {
	case REQ_OP_SCSI_IN:
	case REQ_OP_SCSI_OUT:
183 184
		virtblk_scsi_reques_done(req);
		break;
185
	case REQ_OP_DRV_IN:
186
		req->errors = (error != 0);
187
		break;
188 189
	}

190
	blk_mq_end_request(req, error);
191 192 193
}

static void virtblk_done(struct virtqueue *vq)
R
Rusty Russell 已提交
194 195
{
	struct virtio_blk *vblk = vq->vdev->priv;
J
Jens Axboe 已提交
196
	bool req_done = false;
197
	int qid = vq->index;
R
Rusty Russell 已提交
198 199
	struct virtblk_req *vbr;
	unsigned long flags;
200
	unsigned int len;
R
Rusty Russell 已提交
201

202
	spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
203 204
	do {
		virtqueue_disable_cb(vq);
205
		while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
206 207 208
			struct request *req = blk_mq_rq_from_pdu(vbr);

			blk_mq_complete_request(req, req->errors);
J
Jens Axboe 已提交
209
			req_done = true;
210
		}
211 212
		if (unlikely(virtqueue_is_broken(vq)))
			break;
213
	} while (!virtqueue_enable_cb(vq));
J
Jens Axboe 已提交
214

R
Rusty Russell 已提交
215
	/* In case queue is stopped waiting for more buffers. */
216
	if (req_done)
217
		blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
218
	spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
219 220
}

221 222
static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
			   const struct blk_mq_queue_data *bd)
R
Rusty Russell 已提交
223
{
J
Jens Axboe 已提交
224
	struct virtio_blk *vblk = hctx->queue->queuedata;
225
	struct request *req = bd->rq;
226
	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
J
Jens Axboe 已提交
227
	unsigned long flags;
228
	unsigned int num;
229
	int qid = hctx->queue_num;
230
	int err;
231
	bool notify = false;
232
	u32 type;
R
Rusty Russell 已提交
233

J
Jens Axboe 已提交
234
	BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
R
Rusty Russell 已提交
235

236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253
	switch (req_op(req)) {
	case REQ_OP_READ:
	case REQ_OP_WRITE:
		type = 0;
		break;
	case REQ_OP_FLUSH:
		type = VIRTIO_BLK_T_FLUSH;
		break;
	case REQ_OP_SCSI_IN:
	case REQ_OP_SCSI_OUT:
		type = VIRTIO_BLK_T_SCSI_CMD;
		break;
	case REQ_OP_DRV_IN:
		type = VIRTIO_BLK_T_GET_ID;
		break;
	default:
		WARN_ON_ONCE(1);
		return BLK_MQ_RQ_QUEUE_ERROR;
R
Rusty Russell 已提交
254 255
	}

256 257 258 259 260
	vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type);
	vbr->out_hdr.sector = type ?
		0 : cpu_to_virtio64(vblk->vdev, blk_rq_pos(req));
	vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req));

261 262
	blk_mq_start_request(req);

263
	num = blk_rq_map_sg(hctx->queue, req, vbr->sg);
264
	if (num) {
265
		if (rq_data_dir(req) == WRITE)
M
Michael S. Tsirkin 已提交
266
			vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT);
267
		else
M
Michael S. Tsirkin 已提交
268
			vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN);
R
Rusty Russell 已提交
269 270
	}

271
	spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
272
	if (req_op(req) == REQ_OP_SCSI_IN || req_op(req) == REQ_OP_SCSI_OUT)
273 274 275
		err = virtblk_add_req_scsi(vblk->vqs[qid].vq, vbr, vbr->sg, num);
	else
		err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
276
	if (err) {
277
		virtqueue_kick(vblk->vqs[qid].vq);
J
Jens Axboe 已提交
278
		blk_mq_stop_hw_queue(hctx);
279
		spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
280 281 282 283 284
		/* Out of mem doesn't actually happen, since we fall back
		 * to direct descriptors */
		if (err == -ENOMEM || err == -ENOSPC)
			return BLK_MQ_RQ_QUEUE_BUSY;
		return BLK_MQ_RQ_QUEUE_ERROR;
285 286
	}

287
	if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
288
		notify = true;
289
	spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
290 291

	if (notify)
292
		virtqueue_notify(vblk->vqs[qid].vq);
J
Jens Axboe 已提交
293
	return BLK_MQ_RQ_QUEUE_OK;
294 295
}

296 297 298 299 300
/* return id (s/n) string for *disk to *id_str
 */
static int virtblk_get_id(struct gendisk *disk, char *id_str)
{
	struct virtio_blk *vblk = disk->private_data;
301
	struct request_queue *q = vblk->disk->queue;
302
	struct request *req;
M
Mike Snitzer 已提交
303
	int err;
304

305
	req = blk_get_request(q, REQ_OP_DRV_IN, GFP_KERNEL);
306
	if (IS_ERR(req))
307
		return PTR_ERR(req);
308 309 310 311 312

	err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
	if (err)
		goto out;

313 314
	blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
	err = req->errors ? -EIO : 0;
315
out:
M
Mike Snitzer 已提交
316 317
	blk_put_request(req);
	return err;
318 319
}

320 321 322
/* We provide getgeo only to please some old bootloader/partitioning tools */
static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
{
323 324 325
	struct virtio_blk *vblk = bd->bd_disk->private_data;

	/* see if the host passed in geometry config */
326 327 328 329 330 331 332
	if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
		virtio_cread(vblk->vdev, struct virtio_blk_config,
			     geometry.cylinders, &geo->cylinders);
		virtio_cread(vblk->vdev, struct virtio_blk_config,
			     geometry.heads, &geo->heads);
		virtio_cread(vblk->vdev, struct virtio_blk_config,
			     geometry.sectors, &geo->sectors);
333 334 335 336 337 338
	} else {
		/* some standard values, similar to sd */
		geo->heads = 1 << 6;
		geo->sectors = 1 << 5;
		geo->cylinders = get_capacity(bd->bd_disk) >> 11;
	}
339 340 341
	return 0;
}

342
static const struct block_device_operations virtblk_fops = {
343
	.ioctl  = virtblk_ioctl,
344 345
	.owner  = THIS_MODULE,
	.getgeo = virtblk_getgeo,
R
Rusty Russell 已提交
346 347
};

348 349 350 351 352
static int index_to_minor(int index)
{
	return index << PART_BITS;
}

353 354 355 356 357
static int minor_to_index(int minor)
{
	return minor >> PART_BITS;
}

358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376
static ssize_t virtblk_serial_show(struct device *dev,
				struct device_attribute *attr, char *buf)
{
	struct gendisk *disk = dev_to_disk(dev);
	int err;

	/* sysfs gives us a PAGE_SIZE buffer */
	BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);

	buf[VIRTIO_BLK_ID_BYTES] = '\0';
	err = virtblk_get_id(disk, buf);
	if (!err)
		return strlen(buf);

	if (err == -EIO) /* Unsupported? Make it empty. */
		return 0;

	return err;
}
377 378

static DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL);
379

380 381 382 383 384 385 386
static void virtblk_config_changed_work(struct work_struct *work)
{
	struct virtio_blk *vblk =
		container_of(work, struct virtio_blk, config_work);
	struct virtio_device *vdev = vblk->vdev;
	struct request_queue *q = vblk->disk->queue;
	char cap_str_2[10], cap_str_10[10];
387
	char *envp[] = { "RESIZE=1", NULL };
388
	u64 capacity;
389 390

	/* Host must always specify the capacity. */
391
	virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
392 393 394 395 396 397 398 399

	/* If capacity is too big, truncate with warning. */
	if ((sector_t)capacity != capacity) {
		dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
			 (unsigned long long)capacity);
		capacity = (sector_t)-1;
	}

400 401 402 403
	string_get_size(capacity, queue_logical_block_size(q),
			STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
	string_get_size(capacity, queue_logical_block_size(q),
			STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
404 405 406 407 408 409 410 411

	dev_notice(&vdev->dev,
		  "new size: %llu %d-byte logical blocks (%s/%s)\n",
		  (unsigned long long)capacity,
		  queue_logical_block_size(q),
		  cap_str_10, cap_str_2);

	set_capacity(vblk->disk, capacity);
412
	revalidate_disk(vblk->disk);
413
	kobject_uevent_env(&disk_to_dev(vblk->disk)->kobj, KOBJ_CHANGE, envp);
414 415 416 417 418 419 420 421 422
}

static void virtblk_config_changed(struct virtio_device *vdev)
{
	struct virtio_blk *vblk = vdev->priv;

	queue_work(virtblk_wq, &vblk->config_work);
}

423 424
static int init_vq(struct virtio_blk *vblk)
{
425
	int err;
426 427 428 429 430 431
	int i;
	vq_callback_t **callbacks;
	const char **names;
	struct virtqueue **vqs;
	unsigned short num_vqs;
	struct virtio_device *vdev = vblk->vdev;
432
	struct irq_affinity desc = { 0, };
433 434 435 436 437 438 439

	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ,
				   struct virtio_blk_config, num_queues,
				   &num_vqs);
	if (err)
		num_vqs = 1;

440
	vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
441 442
	if (!vblk->vqs)
		return -ENOMEM;
443

444 445 446
	names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL);
	callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL);
	vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL);
447 448 449 450
	if (!names || !callbacks || !vqs) {
		err = -ENOMEM;
		goto out;
	}
451

452 453 454 455 456 457 458
	for (i = 0; i < num_vqs; i++) {
		callbacks[i] = virtblk_done;
		snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
		names[i] = vblk->vqs[i].name;
	}

	/* Discover virtqueues and write information to configuration.  */
459
	err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names,
460
			&desc);
461
	if (err)
462
		goto out;
463

464 465 466 467 468 469
	for (i = 0; i < num_vqs; i++) {
		spin_lock_init(&vblk->vqs[i].lock);
		vblk->vqs[i].vq = vqs[i];
	}
	vblk->num_vqs = num_vqs;

470
out:
471 472 473 474 475
	kfree(vqs);
	kfree(callbacks);
	kfree(names);
	if (err)
		kfree(vblk->vqs);
476 477 478
	return err;
}

479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506
/*
 * Legacy naming scheme used for virtio devices.  We are stuck with it for
 * virtio blk but don't ever use it for any new driver.
 */
static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
{
	const int base = 'z' - 'a' + 1;
	char *begin = buf + strlen(prefix);
	char *end = buf + buflen;
	char *p;
	int unit;

	p = end - 1;
	*p = '\0';
	unit = base;
	do {
		if (p == begin)
			return -EINVAL;
		*--p = 'a' + (index % unit);
		index = (index / unit) - 1;
	} while (index >= 0);

	memmove(begin, p, end - p);
	memcpy(buf, prefix, strlen(prefix));

	return 0;
}

507 508 509 510 511
static int virtblk_get_cache_mode(struct virtio_device *vdev)
{
	u8 writeback;
	int err;

512 513 514
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE,
				   struct virtio_blk_config, wce,
				   &writeback);
515 516 517 518 519

	/*
	 * If WCE is not configurable and flush is not available,
	 * assume no writeback cache is in use.
	 */
520
	if (err)
521
		writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH);
522 523 524 525 526 527 528 529 530

	return writeback;
}

static void virtblk_update_cache_mode(struct virtio_device *vdev)
{
	u8 writeback = virtblk_get_cache_mode(vdev);
	struct virtio_blk *vblk = vdev->priv;

531
	blk_queue_write_cache(vblk->disk->queue, writeback, false);
532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555
	revalidate_disk(vblk->disk);
}

static const char *const virtblk_cache_types[] = {
	"write through", "write back"
};

static ssize_t
virtblk_cache_type_store(struct device *dev, struct device_attribute *attr,
			 const char *buf, size_t count)
{
	struct gendisk *disk = dev_to_disk(dev);
	struct virtio_blk *vblk = disk->private_data;
	struct virtio_device *vdev = vblk->vdev;
	int i;

	BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
	for (i = ARRAY_SIZE(virtblk_cache_types); --i >= 0; )
		if (sysfs_streq(buf, virtblk_cache_types[i]))
			break;

	if (i < 0)
		return -EINVAL;

556
	virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579
	virtblk_update_cache_mode(vdev);
	return count;
}

static ssize_t
virtblk_cache_type_show(struct device *dev, struct device_attribute *attr,
			 char *buf)
{
	struct gendisk *disk = dev_to_disk(dev);
	struct virtio_blk *vblk = disk->private_data;
	u8 writeback = virtblk_get_cache_mode(vblk->vdev);

	BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
	return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]);
}

static const struct device_attribute dev_attr_cache_type_ro =
	__ATTR(cache_type, S_IRUGO,
	       virtblk_cache_type_show, NULL);
static const struct device_attribute dev_attr_cache_type_rw =
	__ATTR(cache_type, S_IRUGO|S_IWUSR,
	       virtblk_cache_type_show, virtblk_cache_type_store);

580 581 582
static int virtblk_init_request(void *data, struct request *rq,
		unsigned int hctx_idx, unsigned int request_idx,
		unsigned int numa_node)
583 584 585 586
{
	struct virtio_blk *vblk = data;
	struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);

587
#ifdef CONFIG_VIRTIO_BLK_SCSI
588
	vbr->sreq.sense = vbr->sense;
589
#endif
590 591 592 593
	sg_init_table(vbr->sg, vblk->sg_elems);
	return 0;
}

594 595 596 597 598 599 600
static int virtblk_map_queues(struct blk_mq_tag_set *set)
{
	struct virtio_blk *vblk = set->driver_data;

	return blk_mq_virtio_map_queues(set, vblk->vdev, 0);
}

601
static const struct blk_mq_ops virtio_mq_ops = {
J
Jens Axboe 已提交
602
	.queue_rq	= virtio_queue_rq,
603
	.complete	= virtblk_request_done,
604
	.init_request	= virtblk_init_request,
605
	.map_queues	= virtblk_map_queues,
J
Jens Axboe 已提交
606 607
};

608 609
static unsigned int virtblk_queue_depth;
module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
J
Jens Axboe 已提交
610

611
static int virtblk_probe(struct virtio_device *vdev)
R
Rusty Russell 已提交
612 613
{
	struct virtio_blk *vblk;
614
	struct request_queue *q;
615
	int err, index;
616

R
Rusty Russell 已提交
617
	u64 cap;
618 619 620
	u32 v, blk_size, sg_elems, opt_io_size;
	u16 min_io_size;
	u8 physical_block_exp, alignment_offset;
R
Rusty Russell 已提交
621

622 623 624 625 626 627
	if (!vdev->config->get) {
		dev_err(&vdev->dev, "%s failure: config access disabled\n",
			__func__);
		return -EINVAL;
	}

628 629 630 631 632
	err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
			     GFP_KERNEL);
	if (err < 0)
		goto out;
	index = err;
633

634
	/* We need to know how many segments before we allocate. */
635 636 637
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
				   struct virtio_blk_config, seg_max,
				   &sg_elems);
638 639 640

	/* We need at least one SG element, whatever they say. */
	if (err || !sg_elems)
641 642 643 644
		sg_elems = 1;

	/* We need an extra sg elements at head and tail. */
	sg_elems += 2;
J
Jens Axboe 已提交
645
	vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
R
Rusty Russell 已提交
646 647
	if (!vblk) {
		err = -ENOMEM;
648
		goto out_free_index;
R
Rusty Russell 已提交
649 650 651
	}

	vblk->vdev = vdev;
652
	vblk->sg_elems = sg_elems;
653

654
	INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
R
Rusty Russell 已提交
655

656 657
	err = init_vq(vblk);
	if (err)
R
Rusty Russell 已提交
658 659 660
		goto out_free_vblk;

	/* FIXME: How many partitions?  How long is a piece of string? */
661
	vblk->disk = alloc_disk(1 << PART_BITS);
R
Rusty Russell 已提交
662 663
	if (!vblk->disk) {
		err = -ENOMEM;
J
Jens Axboe 已提交
664
		goto out_free_vq;
R
Rusty Russell 已提交
665 666
	}

667
	/* Default queue sizing is to fill the ring. */
668
	if (!virtblk_queue_depth) {
669
		virtblk_queue_depth = vblk->vqs[0].vq->num_free;
670 671
		/* ... but without indirect descs, we use 2 descs per req */
		if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
672
			virtblk_queue_depth /= 2;
673
	}
674 675 676 677 678 679 680

	memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
	vblk->tag_set.ops = &virtio_mq_ops;
	vblk->tag_set.queue_depth = virtblk_queue_depth;
	vblk->tag_set.numa_node = NUMA_NO_NODE;
	vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
	vblk->tag_set.cmd_size =
J
Jens Axboe 已提交
681 682
		sizeof(struct virtblk_req) +
		sizeof(struct scatterlist) * sg_elems;
683
	vblk->tag_set.driver_data = vblk;
684
	vblk->tag_set.nr_hw_queues = vblk->num_vqs;
J
Jens Axboe 已提交
685

686 687 688 689
	err = blk_mq_alloc_tag_set(&vblk->tag_set);
	if (err)
		goto out_put_disk;

690
	q = blk_mq_init_queue(&vblk->tag_set);
691
	if (IS_ERR(q)) {
R
Rusty Russell 已提交
692
		err = -ENOMEM;
693
		goto out_free_tags;
R
Rusty Russell 已提交
694
	}
695
	vblk->disk->queue = q;
R
Rusty Russell 已提交
696

697
	q->queuedata = vblk;
698

699
	virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
700

R
Rusty Russell 已提交
701
	vblk->disk->major = major;
702
	vblk->disk->first_minor = index_to_minor(index);
R
Rusty Russell 已提交
703 704
	vblk->disk->private_data = vblk;
	vblk->disk->fops = &virtblk_fops;
705
	vblk->disk->flags |= GENHD_FL_EXT_DEVT;
706
	vblk->index = index;
707

708
	/* configure queue flush support */
709
	virtblk_update_cache_mode(vdev);
R
Rusty Russell 已提交
710

711 712 713 714
	/* If disk is read-only in the host, the guest should obey */
	if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
		set_disk_ro(vblk->disk, 1);

715
	/* Host must always specify the capacity. */
716
	virtio_cread(vdev, struct virtio_blk_config, capacity, &cap);
R
Rusty Russell 已提交
717 718 719 720 721 722 723 724 725

	/* If capacity is too big, truncate with warning. */
	if ((sector_t)cap != cap) {
		dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
			 (unsigned long long)cap);
		cap = (sector_t)-1;
	}
	set_capacity(vblk->disk, cap);

726
	/* We can handle whatever the host told us to handle. */
727
	blk_queue_max_segments(q, vblk->sg_elems-2);
728

729
	/* No need to bounce any requests */
730
	blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
731

732
	/* No real sector limit. */
733
	blk_queue_max_hw_sectors(q, -1U);
734

735 736
	/* Host can optionally specify maximum segment size and number of
	 * segments. */
737 738
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
				   struct virtio_blk_config, size_max, &v);
R
Rusty Russell 已提交
739
	if (!err)
740
		blk_queue_max_segment_size(q, v);
741
	else
742
		blk_queue_max_segment_size(q, -1U);
R
Rusty Russell 已提交
743

744
	/* Host can optionally specify the block size of the device */
745 746 747
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
				   struct virtio_blk_config, blk_size,
				   &blk_size);
748
	if (!err)
749 750 751 752 753
		blk_queue_logical_block_size(q, blk_size);
	else
		blk_size = queue_logical_block_size(q);

	/* Use topology information if available */
754 755 756
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
				   struct virtio_blk_config, physical_block_exp,
				   &physical_block_exp);
757 758 759 760
	if (!err && physical_block_exp)
		blk_queue_physical_block_size(q,
				blk_size * (1 << physical_block_exp));

761 762 763
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
				   struct virtio_blk_config, alignment_offset,
				   &alignment_offset);
764 765 766
	if (!err && alignment_offset)
		blk_queue_alignment_offset(q, blk_size * alignment_offset);

767 768 769
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
				   struct virtio_blk_config, min_io_size,
				   &min_io_size);
770 771 772
	if (!err && min_io_size)
		blk_queue_io_min(q, blk_size * min_io_size);

773 774 775
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
				   struct virtio_blk_config, opt_io_size,
				   &opt_io_size);
776 777 778
	if (!err && opt_io_size)
		blk_queue_io_opt(q, blk_size * opt_io_size);

M
Michael S. Tsirkin 已提交
779 780
	virtio_device_ready(vdev);

781
	device_add_disk(&vdev->dev, vblk->disk);
782 783 784 785
	err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial);
	if (err)
		goto out_del_disk;

786 787 788 789 790 791 792 793
	if (virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
		err = device_create_file(disk_to_dev(vblk->disk),
					 &dev_attr_cache_type_rw);
	else
		err = device_create_file(disk_to_dev(vblk->disk),
					 &dev_attr_cache_type_ro);
	if (err)
		goto out_del_disk;
R
Rusty Russell 已提交
794 795
	return 0;

796 797 798
out_del_disk:
	del_gendisk(vblk->disk);
	blk_cleanup_queue(vblk->disk->queue);
799 800
out_free_tags:
	blk_mq_free_tag_set(&vblk->tag_set);
R
Rusty Russell 已提交
801 802 803
out_put_disk:
	put_disk(vblk->disk);
out_free_vq:
804
	vdev->config->del_vqs(vdev);
R
Rusty Russell 已提交
805 806
out_free_vblk:
	kfree(vblk);
807 808
out_free_index:
	ida_simple_remove(&vd_index_ida, index);
R
Rusty Russell 已提交
809 810 811 812
out:
	return err;
}

813
static void virtblk_remove(struct virtio_device *vdev)
R
Rusty Russell 已提交
814 815
{
	struct virtio_blk *vblk = vdev->priv;
816
	int index = vblk->index;
817
	int refc;
R
Rusty Russell 已提交
818

819 820
	/* Make sure no work handler is accessing the device. */
	flush_work(&vblk->config_work);
821

822
	del_gendisk(vblk->disk);
823
	blk_cleanup_queue(vblk->disk->queue);
824

825 826
	blk_mq_free_tag_set(&vblk->tag_set);

R
Rusty Russell 已提交
827 828 829
	/* Stop all the virtqueues. */
	vdev->config->reset(vdev);

830
	refc = kref_read(&disk_to_dev(vblk->disk)->kobj.kref);
R
Rusty Russell 已提交
831
	put_disk(vblk->disk);
832
	vdev->config->del_vqs(vdev);
833
	kfree(vblk->vqs);
R
Rusty Russell 已提交
834
	kfree(vblk);
835 836 837 838

	/* Only free device id if we don't have any users */
	if (refc == 1)
		ida_simple_remove(&vd_index_ida, index);
R
Rusty Russell 已提交
839 840
}

841
#ifdef CONFIG_PM_SLEEP
842 843 844 845 846 847 848
static int virtblk_freeze(struct virtio_device *vdev)
{
	struct virtio_blk *vblk = vdev->priv;

	/* Ensure we don't receive any more interrupts */
	vdev->config->reset(vdev);

849
	/* Make sure no work handler is accessing the device. */
850 851
	flush_work(&vblk->config_work);

J
Jens Axboe 已提交
852
	blk_mq_stop_hw_queues(vblk->disk->queue);
853 854 855 856 857 858 859 860 861 862 863

	vdev->config->del_vqs(vdev);
	return 0;
}

static int virtblk_restore(struct virtio_device *vdev)
{
	struct virtio_blk *vblk = vdev->priv;
	int ret;

	ret = init_vq(vdev->priv);
864 865 866 867
	if (ret)
		return ret;

	virtio_device_ready(vdev);
J
Jens Axboe 已提交
868

869 870
	blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
	return 0;
871 872 873
}
#endif

874
static const struct virtio_device_id id_table[] = {
R
Rusty Russell 已提交
875 876 877 878
	{ VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
	{ 0 },
};

M
Michael S. Tsirkin 已提交
879
static unsigned int features_legacy[] = {
880
	VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
881 882 883 884
	VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
#ifdef CONFIG_VIRTIO_BLK_SCSI
	VIRTIO_BLK_F_SCSI,
#endif
885
	VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
886
	VIRTIO_BLK_F_MQ,
M
Michael S. Tsirkin 已提交
887 888 889 890 891
}
;
static unsigned int features[] = {
	VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
	VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
892
	VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
M
Michael S. Tsirkin 已提交
893
	VIRTIO_BLK_F_MQ,
894 895
};

896
static struct virtio_driver virtio_blk = {
M
Michael S. Tsirkin 已提交
897 898 899 900 901 902 903 904 905 906
	.feature_table			= features,
	.feature_table_size		= ARRAY_SIZE(features),
	.feature_table_legacy		= features_legacy,
	.feature_table_size_legacy	= ARRAY_SIZE(features_legacy),
	.driver.name			= KBUILD_MODNAME,
	.driver.owner			= THIS_MODULE,
	.id_table			= id_table,
	.probe				= virtblk_probe,
	.remove				= virtblk_remove,
	.config_changed			= virtblk_config_changed,
907
#ifdef CONFIG_PM_SLEEP
M
Michael S. Tsirkin 已提交
908 909
	.freeze				= virtblk_freeze,
	.restore			= virtblk_restore,
910
#endif
R
Rusty Russell 已提交
911 912 913 914
};

static int __init init(void)
{
915 916 917 918 919 920
	int error;

	virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
	if (!virtblk_wq)
		return -ENOMEM;

921
	major = register_blkdev(0, "virtblk");
922 923 924 925 926 927 928 929 930 931 932 933 934 935 936
	if (major < 0) {
		error = major;
		goto out_destroy_workqueue;
	}

	error = register_virtio_driver(&virtio_blk);
	if (error)
		goto out_unregister_blkdev;
	return 0;

out_unregister_blkdev:
	unregister_blkdev(major, "virtblk");
out_destroy_workqueue:
	destroy_workqueue(virtblk_wq);
	return error;
R
Rusty Russell 已提交
937 938 939 940 941
}

static void __exit fini(void)
{
	unregister_virtio_driver(&virtio_blk);
942
	unregister_blkdev(major, "virtblk");
943
	destroy_workqueue(virtblk_wq);
R
Rusty Russell 已提交
944 945 946 947 948 949 950
}
module_init(init);
module_exit(fini);

MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio block driver");
MODULE_LICENSE("GPL");