virtio_blk.c 25.8 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
R
Rusty Russell 已提交
2 3
//#define DEBUG
#include <linux/spinlock.h>
4
#include <linux/slab.h>
R
Rusty Russell 已提交
5 6
#include <linux/blkdev.h>
#include <linux/hdreg.h>
7
#include <linux/module.h>
8
#include <linux/mutex.h>
9
#include <linux/interrupt.h>
R
Rusty Russell 已提交
10 11
#include <linux/virtio.h>
#include <linux/virtio_blk.h>
12
#include <linux/scatterlist.h>
13
#include <linux/string_helpers.h>
14
#include <linux/idr.h>
J
Jens Axboe 已提交
15
#include <linux/blk-mq.h>
16
#include <linux/blk-mq-virtio.h>
J
Jens Axboe 已提交
17
#include <linux/numa.h>
18
#include <uapi/linux/virtio_ring.h>
19

20
#define PART_BITS 4
21
#define VQ_NAME_LEN 16
22
#define MAX_DISCARD_SEGMENTS 256u
R
Rusty Russell 已提交
23

24 25 26
/* The maximum number of sg elements that fit into a virtqueue */
#define VIRTIO_BLK_MAX_SG_ELEMS 32768

27 28 29
static int major;
static DEFINE_IDA(vd_index_ida);

30
static struct workqueue_struct *virtblk_wq;
31

32 33 34 35 36 37
struct virtio_blk_vq {
	struct virtqueue *vq;
	spinlock_t lock;
	char name[VQ_NAME_LEN];
} ____cacheline_aligned_in_smp;

38
struct virtio_blk {
39 40 41 42 43 44 45 46 47
	/*
	 * This mutex must be held by anything that may run after
	 * virtblk_remove() sets vblk->vdev to NULL.
	 *
	 * blk-mq, virtqueue processing, and sysfs attribute code paths are
	 * shut down before vblk->vdev is set to NULL and therefore do not need
	 * to hold this mutex.
	 */
	struct mutex vdev_mutex;
R
Rusty Russell 已提交
48 49 50 51 52
	struct virtio_device *vdev;

	/* The disk structure for the kernel. */
	struct gendisk *disk;

53 54 55
	/* Block layer tags. */
	struct blk_mq_tag_set tag_set;

56 57 58
	/* Process context for config space updates */
	struct work_struct config_work;

59 60 61 62 63 64 65
	/*
	 * Tracks references from block_device_operations open/release and
	 * virtio_driver probe/remove so this object can be freed once no
	 * longer in use.
	 */
	refcount_t refs;

66 67 68
	/* What host tells us, plus 2 for header & tailer. */
	unsigned int sg_elems;

69 70
	/* Ida index - used to track minor number allocations. */
	int index;
71 72 73 74

	/* num of vqs */
	int num_vqs;
	struct virtio_blk_vq *vqs;
R
Rusty Russell 已提交
75 76
};

77
struct virtblk_req {
78
	struct virtio_blk_outhdr out_hdr;
79
	u8 status;
80
	struct scatterlist sg[];
R
Rusty Russell 已提交
81 82
};

83
static inline blk_status_t virtblk_result(struct virtblk_req *vbr)
84 85 86
{
	switch (vbr->status) {
	case VIRTIO_BLK_S_OK:
87
		return BLK_STS_OK;
88
	case VIRTIO_BLK_S_UNSUPP:
89
		return BLK_STS_NOTSUPP;
90
	default:
91
		return BLK_STS_IOERR;
92 93 94
	}
}

95 96 97 98 99 100 101 102
static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr,
		struct scatterlist *data_sg, bool have_data)
{
	struct scatterlist hdr, status, *sgs[3];
	unsigned int num_out = 0, num_in = 0;

	sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
	sgs[num_out++] = &hdr;
103

R
Rusty Russell 已提交
104
	if (have_data) {
M
Michael S. Tsirkin 已提交
105
		if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
106
			sgs[num_out++] = data_sg;
107
		else
108 109 110
			sgs[num_out + num_in++] = data_sg;
	}

111 112 113 114
	sg_init_one(&status, &vbr->status, sizeof(vbr->status));
	sgs[num_out + num_in++] = &status;

	return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
115 116
}

117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
static int virtblk_setup_discard_write_zeroes(struct request *req, bool unmap)
{
	unsigned short segments = blk_rq_nr_discard_segments(req);
	unsigned short n = 0;
	struct virtio_blk_discard_write_zeroes *range;
	struct bio *bio;
	u32 flags = 0;

	if (unmap)
		flags |= VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP;

	range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC);
	if (!range)
		return -ENOMEM;

132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152
	/*
	 * Single max discard segment means multi-range discard isn't
	 * supported, and block layer only runs contiguity merge like
	 * normal RW request. So we can't reply on bio for retrieving
	 * each range info.
	 */
	if (queue_max_discard_segments(req->q) == 1) {
		range[0].flags = cpu_to_le32(flags);
		range[0].num_sectors = cpu_to_le32(blk_rq_sectors(req));
		range[0].sector = cpu_to_le64(blk_rq_pos(req));
		n = 1;
	} else {
		__rq_for_each_bio(bio, req) {
			u64 sector = bio->bi_iter.bi_sector;
			u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;

			range[n].flags = cpu_to_le32(flags);
			range[n].num_sectors = cpu_to_le32(num_sectors);
			range[n].sector = cpu_to_le64(sector);
			n++;
		}
153 154
	}

155 156
	WARN_ON_ONCE(n != segments);

157 158 159 160 161 162 163 164
	req->special_vec.bv_page = virt_to_page(range);
	req->special_vec.bv_offset = offset_in_page(range);
	req->special_vec.bv_len = sizeof(*range) * segments;
	req->rq_flags |= RQF_SPECIAL_PAYLOAD;

	return 0;
}

165
static inline void virtblk_request_done(struct request *req)
166
{
167
	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
168

C
Christoph Hellwig 已提交
169 170
	if (req->rq_flags & RQF_SPECIAL_PAYLOAD)
		kfree(bvec_virt(&req->special_vec));
171
	blk_mq_end_request(req, virtblk_result(vbr));
172 173 174
}

static void virtblk_done(struct virtqueue *vq)
R
Rusty Russell 已提交
175 176
{
	struct virtio_blk *vblk = vq->vdev->priv;
J
Jens Axboe 已提交
177
	bool req_done = false;
178
	int qid = vq->index;
R
Rusty Russell 已提交
179 180
	struct virtblk_req *vbr;
	unsigned long flags;
181
	unsigned int len;
R
Rusty Russell 已提交
182

183
	spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
184 185
	do {
		virtqueue_disable_cb(vq);
186
		while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
187 188
			struct request *req = blk_mq_rq_from_pdu(vbr);

189 190
			if (likely(!blk_should_fake_timeout(req->q)))
				blk_mq_complete_request(req);
J
Jens Axboe 已提交
191
			req_done = true;
192
		}
193 194
		if (unlikely(virtqueue_is_broken(vq)))
			break;
195
	} while (!virtqueue_enable_cb(vq));
J
Jens Axboe 已提交
196

R
Rusty Russell 已提交
197
	/* In case queue is stopped waiting for more buffers. */
198
	if (req_done)
199
		blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
200
	spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
201 202
}

203 204 205 206 207 208 209 210 211 212 213 214 215 216
static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
{
	struct virtio_blk *vblk = hctx->queue->queuedata;
	struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
	bool kick;

	spin_lock_irq(&vq->lock);
	kick = virtqueue_kick_prepare(vq->vq);
	spin_unlock_irq(&vq->lock);

	if (kick)
		virtqueue_notify(vq->vq);
}

217
static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
218
			   const struct blk_mq_queue_data *bd)
R
Rusty Russell 已提交
219
{
J
Jens Axboe 已提交
220
	struct virtio_blk *vblk = hctx->queue->queuedata;
221
	struct request *req = bd->rq;
222
	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
J
Jens Axboe 已提交
223
	unsigned long flags;
224
	unsigned int num;
225
	int qid = hctx->queue_num;
226
	int err;
227
	bool notify = false;
228
	bool unmap = false;
229
	u32 type;
R
Rusty Russell 已提交
230

J
Jens Axboe 已提交
231
	BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
R
Rusty Russell 已提交
232

233 234 235 236 237 238 239 240
	switch (req_op(req)) {
	case REQ_OP_READ:
	case REQ_OP_WRITE:
		type = 0;
		break;
	case REQ_OP_FLUSH:
		type = VIRTIO_BLK_T_FLUSH;
		break;
241 242 243 244 245 246 247
	case REQ_OP_DISCARD:
		type = VIRTIO_BLK_T_DISCARD;
		break;
	case REQ_OP_WRITE_ZEROES:
		type = VIRTIO_BLK_T_WRITE_ZEROES;
		unmap = !(req->cmd_flags & REQ_NOUNMAP);
		break;
248 249 250 251 252
	case REQ_OP_DRV_IN:
		type = VIRTIO_BLK_T_GET_ID;
		break;
	default:
		WARN_ON_ONCE(1);
253
		return BLK_STS_IOERR;
R
Rusty Russell 已提交
254 255
	}

256 257 258 259 260
	vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type);
	vbr->out_hdr.sector = type ?
		0 : cpu_to_virtio64(vblk->vdev, blk_rq_pos(req));
	vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req));

261 262
	blk_mq_start_request(req);

263 264 265 266 267 268
	if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES) {
		err = virtblk_setup_discard_write_zeroes(req, unmap);
		if (err)
			return BLK_STS_RESOURCE;
	}

269
	num = blk_rq_map_sg(hctx->queue, req, vbr->sg);
270
	if (num) {
271
		if (rq_data_dir(req) == WRITE)
M
Michael S. Tsirkin 已提交
272
			vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT);
273
		else
M
Michael S. Tsirkin 已提交
274
			vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN);
R
Rusty Russell 已提交
275 276
	}

277
	spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
278
	err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
279
	if (err) {
280
		virtqueue_kick(vblk->vqs[qid].vq);
281 282 283 284 285
		/* Don't stop the queue if -ENOMEM: we may have failed to
		 * bounce the buffer due to global resource outage.
		 */
		if (err == -ENOSPC)
			blk_mq_stop_hw_queue(hctx);
286
		spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
287 288
		switch (err) {
		case -ENOSPC:
289
			return BLK_STS_DEV_RESOURCE;
290 291 292 293 294
		case -ENOMEM:
			return BLK_STS_RESOURCE;
		default:
			return BLK_STS_IOERR;
		}
295 296
	}

297
	if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
298
		notify = true;
299
	spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
300 301

	if (notify)
302
		virtqueue_notify(vblk->vqs[qid].vq);
303
	return BLK_STS_OK;
304 305
}

306 307 308 309 310
/* return id (s/n) string for *disk to *id_str
 */
static int virtblk_get_id(struct gendisk *disk, char *id_str)
{
	struct virtio_blk *vblk = disk->private_data;
311
	struct request_queue *q = vblk->disk->queue;
312
	struct request *req;
M
Mike Snitzer 已提交
313
	int err;
314

315
	req = blk_get_request(q, REQ_OP_DRV_IN, 0);
316
	if (IS_ERR(req))
317
		return PTR_ERR(req);
318 319 320 321 322

	err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
	if (err)
		goto out;

323
	blk_execute_rq(vblk->disk, req, false);
324
	err = blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req)));
325
out:
M
Mike Snitzer 已提交
326 327
	blk_put_request(req);
	return err;
328 329
}

330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366
static void virtblk_get(struct virtio_blk *vblk)
{
	refcount_inc(&vblk->refs);
}

static void virtblk_put(struct virtio_blk *vblk)
{
	if (refcount_dec_and_test(&vblk->refs)) {
		ida_simple_remove(&vd_index_ida, vblk->index);
		mutex_destroy(&vblk->vdev_mutex);
		kfree(vblk);
	}
}

static int virtblk_open(struct block_device *bd, fmode_t mode)
{
	struct virtio_blk *vblk = bd->bd_disk->private_data;
	int ret = 0;

	mutex_lock(&vblk->vdev_mutex);

	if (vblk->vdev)
		virtblk_get(vblk);
	else
		ret = -ENXIO;

	mutex_unlock(&vblk->vdev_mutex);
	return ret;
}

static void virtblk_release(struct gendisk *disk, fmode_t mode)
{
	struct virtio_blk *vblk = disk->private_data;

	virtblk_put(vblk);
}

367 368 369
/* We provide getgeo only to please some old bootloader/partitioning tools */
static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
{
370
	struct virtio_blk *vblk = bd->bd_disk->private_data;
371 372 373 374 375 376 377 378
	int ret = 0;

	mutex_lock(&vblk->vdev_mutex);

	if (!vblk->vdev) {
		ret = -ENXIO;
		goto out;
	}
379 380

	/* see if the host passed in geometry config */
381 382 383 384 385 386 387
	if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
		virtio_cread(vblk->vdev, struct virtio_blk_config,
			     geometry.cylinders, &geo->cylinders);
		virtio_cread(vblk->vdev, struct virtio_blk_config,
			     geometry.heads, &geo->heads);
		virtio_cread(vblk->vdev, struct virtio_blk_config,
			     geometry.sectors, &geo->sectors);
388 389 390 391 392 393
	} else {
		/* some standard values, similar to sd */
		geo->heads = 1 << 6;
		geo->sectors = 1 << 5;
		geo->cylinders = get_capacity(bd->bd_disk) >> 11;
	}
394 395 396
out:
	mutex_unlock(&vblk->vdev_mutex);
	return ret;
397 398
}

399
static const struct block_device_operations virtblk_fops = {
400
	.owner  = THIS_MODULE,
401 402
	.open = virtblk_open,
	.release = virtblk_release,
403
	.getgeo = virtblk_getgeo,
R
Rusty Russell 已提交
404 405
};

406 407 408 409 410
static int index_to_minor(int index)
{
	return index << PART_BITS;
}

411 412 413 414 415
static int minor_to_index(int minor)
{
	return minor >> PART_BITS;
}

416 417
static ssize_t serial_show(struct device *dev,
			   struct device_attribute *attr, char *buf)
418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434
{
	struct gendisk *disk = dev_to_disk(dev);
	int err;

	/* sysfs gives us a PAGE_SIZE buffer */
	BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);

	buf[VIRTIO_BLK_ID_BYTES] = '\0';
	err = virtblk_get_id(disk, buf);
	if (!err)
		return strlen(buf);

	if (err == -EIO) /* Unsupported? Make it empty. */
		return 0;

	return err;
}
435

436
static DEVICE_ATTR_RO(serial);
437

438 439
/* The queue's logical block size must be set before calling this */
static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize)
440 441 442 443
{
	struct virtio_device *vdev = vblk->vdev;
	struct request_queue *q = vblk->disk->queue;
	char cap_str_2[10], cap_str_10[10];
444
	unsigned long long nblocks;
445
	u64 capacity;
446 447

	/* Host must always specify the capacity. */
448
	virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
449

450 451 452
	nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9);

	string_get_size(nblocks, queue_logical_block_size(q),
453
			STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
454
	string_get_size(nblocks, queue_logical_block_size(q),
455
			STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
456 457

	dev_notice(&vdev->dev,
458 459 460
		   "[%s] %s%llu %d-byte logical blocks (%s/%s)\n",
		   vblk->disk->disk_name,
		   resize ? "new size: " : "",
461 462 463 464
		   nblocks,
		   queue_logical_block_size(q),
		   cap_str_10,
		   cap_str_2);
465

466
	set_capacity_and_notify(vblk->disk, capacity);
467 468 469 470 471 472 473 474
}

static void virtblk_config_changed_work(struct work_struct *work)
{
	struct virtio_blk *vblk =
		container_of(work, struct virtio_blk, config_work);

	virtblk_update_capacity(vblk, true);
475 476 477 478 479 480 481 482 483
}

static void virtblk_config_changed(struct virtio_device *vdev)
{
	struct virtio_blk *vblk = vdev->priv;

	queue_work(virtblk_wq, &vblk->config_work);
}

484 485
static int init_vq(struct virtio_blk *vblk)
{
486
	int err;
487 488 489 490 491 492
	int i;
	vq_callback_t **callbacks;
	const char **names;
	struct virtqueue **vqs;
	unsigned short num_vqs;
	struct virtio_device *vdev = vblk->vdev;
493
	struct irq_affinity desc = { 0, };
494 495 496 497 498 499 500

	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ,
				   struct virtio_blk_config, num_queues,
				   &num_vqs);
	if (err)
		num_vqs = 1;

501 502
	num_vqs = min_t(unsigned int, nr_cpu_ids, num_vqs);

503
	vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
504 505
	if (!vblk->vqs)
		return -ENOMEM;
506

507 508 509
	names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL);
	callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL);
	vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL);
510 511 512 513
	if (!names || !callbacks || !vqs) {
		err = -ENOMEM;
		goto out;
	}
514

515 516 517 518 519 520 521
	for (i = 0; i < num_vqs; i++) {
		callbacks[i] = virtblk_done;
		snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
		names[i] = vblk->vqs[i].name;
	}

	/* Discover virtqueues and write information to configuration.  */
M
Michael S. Tsirkin 已提交
522
	err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc);
523
	if (err)
524
		goto out;
525

526 527 528 529 530 531
	for (i = 0; i < num_vqs; i++) {
		spin_lock_init(&vblk->vqs[i].lock);
		vblk->vqs[i].vq = vqs[i];
	}
	vblk->num_vqs = num_vqs;

532
out:
533 534 535 536 537
	kfree(vqs);
	kfree(callbacks);
	kfree(names);
	if (err)
		kfree(vblk->vqs);
538 539 540
	return err;
}

541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568
/*
 * Legacy naming scheme used for virtio devices.  We are stuck with it for
 * virtio blk but don't ever use it for any new driver.
 */
static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
{
	const int base = 'z' - 'a' + 1;
	char *begin = buf + strlen(prefix);
	char *end = buf + buflen;
	char *p;
	int unit;

	p = end - 1;
	*p = '\0';
	unit = base;
	do {
		if (p == begin)
			return -EINVAL;
		*--p = 'a' + (index % unit);
		index = (index / unit) - 1;
	} while (index >= 0);

	memmove(begin, p, end - p);
	memcpy(buf, prefix, strlen(prefix));

	return 0;
}

569 570 571 572 573
static int virtblk_get_cache_mode(struct virtio_device *vdev)
{
	u8 writeback;
	int err;

574 575 576
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE,
				   struct virtio_blk_config, wce,
				   &writeback);
577 578 579 580 581

	/*
	 * If WCE is not configurable and flush is not available,
	 * assume no writeback cache is in use.
	 */
582
	if (err)
583
		writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH);
584 585 586 587 588 589 590 591 592

	return writeback;
}

static void virtblk_update_cache_mode(struct virtio_device *vdev)
{
	u8 writeback = virtblk_get_cache_mode(vdev);
	struct virtio_blk *vblk = vdev->priv;

593
	blk_queue_write_cache(vblk->disk->queue, writeback, false);
594 595 596 597 598 599 600
}

static const char *const virtblk_cache_types[] = {
	"write through", "write back"
};

static ssize_t
601 602
cache_type_store(struct device *dev, struct device_attribute *attr,
		 const char *buf, size_t count)
603 604 605 606 607 608 609
{
	struct gendisk *disk = dev_to_disk(dev);
	struct virtio_blk *vblk = disk->private_data;
	struct virtio_device *vdev = vblk->vdev;
	int i;

	BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
610
	i = sysfs_match_string(virtblk_cache_types, buf);
611
	if (i < 0)
612
		return i;
613

614
	virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
615 616 617 618 619
	virtblk_update_cache_mode(vdev);
	return count;
}

static ssize_t
620
cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
621 622 623 624 625 626 627 628 629
{
	struct gendisk *disk = dev_to_disk(dev);
	struct virtio_blk *vblk = disk->private_data;
	u8 writeback = virtblk_get_cache_mode(vblk->vdev);

	BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
	return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]);
}

630 631 632 633 634 635 636 637 638 639 640
static DEVICE_ATTR_RW(cache_type);

static struct attribute *virtblk_attrs[] = {
	&dev_attr_serial.attr,
	&dev_attr_cache_type.attr,
	NULL,
};

static umode_t virtblk_attrs_are_visible(struct kobject *kobj,
		struct attribute *a, int n)
{
641
	struct device *dev = kobj_to_dev(kobj);
642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661
	struct gendisk *disk = dev_to_disk(dev);
	struct virtio_blk *vblk = disk->private_data;
	struct virtio_device *vdev = vblk->vdev;

	if (a == &dev_attr_cache_type.attr &&
	    !virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
		return S_IRUGO;

	return a->mode;
}

static const struct attribute_group virtblk_attr_group = {
	.attrs = virtblk_attrs,
	.is_visible = virtblk_attrs_are_visible,
};

static const struct attribute_group *virtblk_attr_groups[] = {
	&virtblk_attr_group,
	NULL,
};
662

663 664
static int virtblk_init_request(struct blk_mq_tag_set *set, struct request *rq,
		unsigned int hctx_idx, unsigned int numa_node)
665
{
666
	struct virtio_blk *vblk = set->driver_data;
667 668 669 670 671 672
	struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);

	sg_init_table(vbr->sg, vblk->sg_elems);
	return 0;
}

673 674 675 676
static int virtblk_map_queues(struct blk_mq_tag_set *set)
{
	struct virtio_blk *vblk = set->driver_data;

677 678
	return blk_mq_virtio_map_queues(&set->map[HCTX_TYPE_DEFAULT],
					vblk->vdev, 0);
679 680
}

681
static const struct blk_mq_ops virtio_mq_ops = {
J
Jens Axboe 已提交
682
	.queue_rq	= virtio_queue_rq,
683
	.commit_rqs	= virtio_commit_rqs,
684
	.complete	= virtblk_request_done,
685
	.init_request	= virtblk_init_request,
686
	.map_queues	= virtblk_map_queues,
J
Jens Axboe 已提交
687 688
};

689 690
static unsigned int virtblk_queue_depth;
module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
J
Jens Axboe 已提交
691

692
static int virtblk_probe(struct virtio_device *vdev)
R
Rusty Russell 已提交
693 694
{
	struct virtio_blk *vblk;
695
	struct request_queue *q;
696
	int err, index;
697

698
	u32 v, blk_size, max_size, sg_elems, opt_io_size;
699 700
	u16 min_io_size;
	u8 physical_block_exp, alignment_offset;
701
	unsigned int queue_depth;
R
Rusty Russell 已提交
702

703 704 705 706 707 708
	if (!vdev->config->get) {
		dev_err(&vdev->dev, "%s failure: config access disabled\n",
			__func__);
		return -EINVAL;
	}

709 710 711 712 713
	err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
			     GFP_KERNEL);
	if (err < 0)
		goto out;
	index = err;
714

715
	/* We need to know how many segments before we allocate. */
716 717 718
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
				   struct virtio_blk_config, seg_max,
				   &sg_elems);
719 720 721

	/* We need at least one SG element, whatever they say. */
	if (err || !sg_elems)
722 723
		sg_elems = 1;

724 725 726 727
	/* Prevent integer overflows and honor max vq size */
	sg_elems = min_t(u32, sg_elems, VIRTIO_BLK_MAX_SG_ELEMS - 2);

	/* We need extra sg elements at head and tail. */
728
	sg_elems += 2;
J
Jens Axboe 已提交
729
	vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
R
Rusty Russell 已提交
730 731
	if (!vblk) {
		err = -ENOMEM;
732
		goto out_free_index;
R
Rusty Russell 已提交
733 734
	}

735 736 737 738
	/* This reference is dropped in virtblk_remove(). */
	refcount_set(&vblk->refs, 1);
	mutex_init(&vblk->vdev_mutex);

R
Rusty Russell 已提交
739
	vblk->vdev = vdev;
740
	vblk->sg_elems = sg_elems;
741

742
	INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
R
Rusty Russell 已提交
743

744 745
	err = init_vq(vblk);
	if (err)
R
Rusty Russell 已提交
746 747
		goto out_free_vblk;

748
	/* Default queue sizing is to fill the ring. */
749
	if (!virtblk_queue_depth) {
750
		queue_depth = vblk->vqs[0].vq->num_free;
751 752
		/* ... but without indirect descs, we use 2 descs per req */
		if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
753 754 755
			queue_depth /= 2;
	} else {
		queue_depth = virtblk_queue_depth;
756
	}
757 758 759

	memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
	vblk->tag_set.ops = &virtio_mq_ops;
760
	vblk->tag_set.queue_depth = queue_depth;
761 762 763
	vblk->tag_set.numa_node = NUMA_NO_NODE;
	vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
	vblk->tag_set.cmd_size =
J
Jens Axboe 已提交
764 765
		sizeof(struct virtblk_req) +
		sizeof(struct scatterlist) * sg_elems;
766
	vblk->tag_set.driver_data = vblk;
767
	vblk->tag_set.nr_hw_queues = vblk->num_vqs;
J
Jens Axboe 已提交
768

769 770
	err = blk_mq_alloc_tag_set(&vblk->tag_set);
	if (err)
771
		goto out_free_vq;
772

773 774 775
	vblk->disk = blk_mq_alloc_disk(&vblk->tag_set, vblk);
	if (IS_ERR(vblk->disk)) {
		err = PTR_ERR(vblk->disk);
776
		goto out_free_tags;
R
Rusty Russell 已提交
777
	}
778
	q = vblk->disk->queue;
779

780
	virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
781

R
Rusty Russell 已提交
782
	vblk->disk->major = major;
783
	vblk->disk->first_minor = index_to_minor(index);
784
	vblk->disk->minors = 1 << PART_BITS;
R
Rusty Russell 已提交
785 786
	vblk->disk->private_data = vblk;
	vblk->disk->fops = &virtblk_fops;
787
	vblk->disk->flags |= GENHD_FL_EXT_DEVT;
788
	vblk->index = index;
789

790
	/* configure queue flush support */
791
	virtblk_update_cache_mode(vdev);
R
Rusty Russell 已提交
792

793 794 795 796
	/* If disk is read-only in the host, the guest should obey */
	if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
		set_disk_ro(vblk->disk, 1);

797
	/* We can handle whatever the host told us to handle. */
798
	blk_queue_max_segments(q, vblk->sg_elems-2);
799

800
	/* No real sector limit. */
801
	blk_queue_max_hw_sectors(q, -1U);
802

803 804
	max_size = virtio_max_dma_size(vdev);

805 806
	/* Host can optionally specify maximum segment size and number of
	 * segments. */
807 808
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
				   struct virtio_blk_config, size_max, &v);
R
Rusty Russell 已提交
809
	if (!err)
810 811 812
		max_size = min(max_size, v);

	blk_queue_max_segment_size(q, max_size);
R
Rusty Russell 已提交
813

814
	/* Host can optionally specify the block size of the device */
815 816 817
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
				   struct virtio_blk_config, blk_size,
				   &blk_size);
818 819 820 821 822 823 824 825 826
	if (!err) {
		err = blk_validate_block_size(blk_size);
		if (err) {
			dev_err(&vdev->dev,
				"virtio_blk: invalid block size: 0x%x\n",
				blk_size);
			goto out_cleanup_disk;
		}

827
		blk_queue_logical_block_size(q, blk_size);
828
	} else
829 830 831
		blk_size = queue_logical_block_size(q);

	/* Use topology information if available */
832 833 834
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
				   struct virtio_blk_config, physical_block_exp,
				   &physical_block_exp);
835 836 837 838
	if (!err && physical_block_exp)
		blk_queue_physical_block_size(q,
				blk_size * (1 << physical_block_exp));

839 840 841
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
				   struct virtio_blk_config, alignment_offset,
				   &alignment_offset);
842 843 844
	if (!err && alignment_offset)
		blk_queue_alignment_offset(q, blk_size * alignment_offset);

845 846 847
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
				   struct virtio_blk_config, min_io_size,
				   &min_io_size);
848 849 850
	if (!err && min_io_size)
		blk_queue_io_min(q, blk_size * min_io_size);

851 852 853
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
				   struct virtio_blk_config, opt_io_size,
				   &opt_io_size);
854 855 856
	if (!err && opt_io_size)
		blk_queue_io_opt(q, blk_size * opt_io_size);

857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882
	if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
		q->limits.discard_granularity = blk_size;

		virtio_cread(vdev, struct virtio_blk_config,
			     discard_sector_alignment, &v);
		q->limits.discard_alignment = v ? v << SECTOR_SHIFT : 0;

		virtio_cread(vdev, struct virtio_blk_config,
			     max_discard_sectors, &v);
		blk_queue_max_discard_sectors(q, v ? v : UINT_MAX);

		virtio_cread(vdev, struct virtio_blk_config, max_discard_seg,
			     &v);
		blk_queue_max_discard_segments(q,
					       min_not_zero(v,
							    MAX_DISCARD_SEGMENTS));

		blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
	}

	if (virtio_has_feature(vdev, VIRTIO_BLK_F_WRITE_ZEROES)) {
		virtio_cread(vdev, struct virtio_blk_config,
			     max_write_zeroes_sectors, &v);
		blk_queue_max_write_zeroes_sectors(q, v ? v : UINT_MAX);
	}

883
	virtblk_update_capacity(vblk, false);
M
Michael S. Tsirkin 已提交
884 885
	virtio_device_ready(vdev);

886 887 888 889
	err = device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
	if (err)
		goto out_cleanup_disk;

R
Rusty Russell 已提交
890 891
	return 0;

892
out_cleanup_disk:
893
	blk_cleanup_disk(vblk->disk);
894 895
out_free_tags:
	blk_mq_free_tag_set(&vblk->tag_set);
R
Rusty Russell 已提交
896
out_free_vq:
897
	vdev->config->del_vqs(vdev);
898
	kfree(vblk->vqs);
R
Rusty Russell 已提交
899 900
out_free_vblk:
	kfree(vblk);
901 902
out_free_index:
	ida_simple_remove(&vd_index_ida, index);
R
Rusty Russell 已提交
903 904 905 906
out:
	return err;
}

907
static void virtblk_remove(struct virtio_device *vdev)
R
Rusty Russell 已提交
908 909 910
{
	struct virtio_blk *vblk = vdev->priv;

911 912
	/* Make sure no work handler is accessing the device. */
	flush_work(&vblk->config_work);
913

914
	del_gendisk(vblk->disk);
915
	blk_cleanup_disk(vblk->disk);
916 917
	blk_mq_free_tag_set(&vblk->tag_set);

918 919
	mutex_lock(&vblk->vdev_mutex);

R
Rusty Russell 已提交
920 921 922
	/* Stop all the virtqueues. */
	vdev->config->reset(vdev);

923 924 925
	/* Virtqueues are stopped, nothing can use vblk->vdev anymore. */
	vblk->vdev = NULL;

926
	vdev->config->del_vqs(vdev);
927
	kfree(vblk->vqs);
928

929 930 931
	mutex_unlock(&vblk->vdev_mutex);

	virtblk_put(vblk);
R
Rusty Russell 已提交
932 933
}

934
#ifdef CONFIG_PM_SLEEP
935 936 937 938 939 940 941
static int virtblk_freeze(struct virtio_device *vdev)
{
	struct virtio_blk *vblk = vdev->priv;

	/* Ensure we don't receive any more interrupts */
	vdev->config->reset(vdev);

942
	/* Make sure no work handler is accessing the device. */
943 944
	flush_work(&vblk->config_work);

945
	blk_mq_quiesce_queue(vblk->disk->queue);
946 947

	vdev->config->del_vqs(vdev);
948 949
	kfree(vblk->vqs);

950 951 952 953 954 955 956 957 958
	return 0;
}

static int virtblk_restore(struct virtio_device *vdev)
{
	struct virtio_blk *vblk = vdev->priv;
	int ret;

	ret = init_vq(vdev->priv);
959 960 961 962
	if (ret)
		return ret;

	virtio_device_ready(vdev);
J
Jens Axboe 已提交
963

964
	blk_mq_unquiesce_queue(vblk->disk->queue);
965
	return 0;
966 967 968
}
#endif

969
static const struct virtio_device_id id_table[] = {
R
Rusty Russell 已提交
970 971 972 973
	{ VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
	{ 0 },
};

M
Michael S. Tsirkin 已提交
974
static unsigned int features_legacy[] = {
975
	VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
976
	VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
977
	VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
978
	VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
M
Michael S. Tsirkin 已提交
979 980 981 982 983
}
;
static unsigned int features[] = {
	VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
	VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
984
	VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
985
	VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
986 987
};

988
static struct virtio_driver virtio_blk = {
M
Michael S. Tsirkin 已提交
989 990 991 992 993 994 995 996 997 998
	.feature_table			= features,
	.feature_table_size		= ARRAY_SIZE(features),
	.feature_table_legacy		= features_legacy,
	.feature_table_size_legacy	= ARRAY_SIZE(features_legacy),
	.driver.name			= KBUILD_MODNAME,
	.driver.owner			= THIS_MODULE,
	.id_table			= id_table,
	.probe				= virtblk_probe,
	.remove				= virtblk_remove,
	.config_changed			= virtblk_config_changed,
999
#ifdef CONFIG_PM_SLEEP
M
Michael S. Tsirkin 已提交
1000 1001
	.freeze				= virtblk_freeze,
	.restore			= virtblk_restore,
1002
#endif
R
Rusty Russell 已提交
1003 1004 1005 1006
};

static int __init init(void)
{
1007 1008 1009 1010 1011 1012
	int error;

	virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
	if (!virtblk_wq)
		return -ENOMEM;

1013
	major = register_blkdev(0, "virtblk");
1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028
	if (major < 0) {
		error = major;
		goto out_destroy_workqueue;
	}

	error = register_virtio_driver(&virtio_blk);
	if (error)
		goto out_unregister_blkdev;
	return 0;

out_unregister_blkdev:
	unregister_blkdev(major, "virtblk");
out_destroy_workqueue:
	destroy_workqueue(virtblk_wq);
	return error;
R
Rusty Russell 已提交
1029 1030 1031 1032 1033
}

static void __exit fini(void)
{
	unregister_virtio_driver(&virtio_blk);
1034
	unregister_blkdev(major, "virtblk");
1035
	destroy_workqueue(virtblk_wq);
R
Rusty Russell 已提交
1036 1037 1038 1039 1040 1041 1042
}
module_init(init);
module_exit(fini);

MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio block driver");
MODULE_LICENSE("GPL");