virtio_blk.c 25.6 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
R
Rusty Russell 已提交
2 3
//#define DEBUG
#include <linux/spinlock.h>
4
#include <linux/slab.h>
R
Rusty Russell 已提交
5 6
#include <linux/blkdev.h>
#include <linux/hdreg.h>
7
#include <linux/module.h>
8
#include <linux/mutex.h>
9
#include <linux/interrupt.h>
R
Rusty Russell 已提交
10 11
#include <linux/virtio.h>
#include <linux/virtio_blk.h>
12
#include <linux/scatterlist.h>
13
#include <linux/string_helpers.h>
14
#include <linux/idr.h>
J
Jens Axboe 已提交
15
#include <linux/blk-mq.h>
16
#include <linux/blk-mq-virtio.h>
J
Jens Axboe 已提交
17
#include <linux/numa.h>
18
#include <uapi/linux/virtio_ring.h>
19

20
#define PART_BITS 4
21
#define VQ_NAME_LEN 16
22
#define MAX_DISCARD_SEGMENTS 256u
R
Rusty Russell 已提交
23

24 25 26
/* The maximum number of sg elements that fit into a virtqueue */
#define VIRTIO_BLK_MAX_SG_ELEMS 32768

27 28 29
static int major;
static DEFINE_IDA(vd_index_ida);

30
static struct workqueue_struct *virtblk_wq;
31

32 33 34 35 36 37
struct virtio_blk_vq {
	struct virtqueue *vq;
	spinlock_t lock;
	char name[VQ_NAME_LEN];
} ____cacheline_aligned_in_smp;

38
struct virtio_blk {
39 40 41 42 43 44 45 46 47
	/*
	 * This mutex must be held by anything that may run after
	 * virtblk_remove() sets vblk->vdev to NULL.
	 *
	 * blk-mq, virtqueue processing, and sysfs attribute code paths are
	 * shut down before vblk->vdev is set to NULL and therefore do not need
	 * to hold this mutex.
	 */
	struct mutex vdev_mutex;
R
Rusty Russell 已提交
48 49 50 51 52
	struct virtio_device *vdev;

	/* The disk structure for the kernel. */
	struct gendisk *disk;

53 54 55
	/* Block layer tags. */
	struct blk_mq_tag_set tag_set;

56 57 58
	/* Process context for config space updates */
	struct work_struct config_work;

59 60 61 62 63 64 65
	/*
	 * Tracks references from block_device_operations open/release and
	 * virtio_driver probe/remove so this object can be freed once no
	 * longer in use.
	 */
	refcount_t refs;

66 67 68
	/* What host tells us, plus 2 for header & tailer. */
	unsigned int sg_elems;

69 70
	/* Ida index - used to track minor number allocations. */
	int index;
71 72 73 74

	/* num of vqs */
	int num_vqs;
	struct virtio_blk_vq *vqs;
R
Rusty Russell 已提交
75 76
};

77
struct virtblk_req {
78
	struct virtio_blk_outhdr out_hdr;
79
	u8 status;
80
	struct scatterlist sg[];
R
Rusty Russell 已提交
81 82
};

83
static inline blk_status_t virtblk_result(struct virtblk_req *vbr)
84 85 86
{
	switch (vbr->status) {
	case VIRTIO_BLK_S_OK:
87
		return BLK_STS_OK;
88
	case VIRTIO_BLK_S_UNSUPP:
89
		return BLK_STS_NOTSUPP;
90
	default:
91
		return BLK_STS_IOERR;
92 93 94
	}
}

95 96 97 98 99 100 101 102
static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr,
		struct scatterlist *data_sg, bool have_data)
{
	struct scatterlist hdr, status, *sgs[3];
	unsigned int num_out = 0, num_in = 0;

	sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
	sgs[num_out++] = &hdr;
103

R
Rusty Russell 已提交
104
	if (have_data) {
M
Michael S. Tsirkin 已提交
105
		if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
106
			sgs[num_out++] = data_sg;
107
		else
108 109 110
			sgs[num_out + num_in++] = data_sg;
	}

111 112 113 114
	sg_init_one(&status, &vbr->status, sizeof(vbr->status));
	sgs[num_out + num_in++] = &status;

	return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
115 116
}

117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
static int virtblk_setup_discard_write_zeroes(struct request *req, bool unmap)
{
	unsigned short segments = blk_rq_nr_discard_segments(req);
	unsigned short n = 0;
	struct virtio_blk_discard_write_zeroes *range;
	struct bio *bio;
	u32 flags = 0;

	if (unmap)
		flags |= VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP;

	range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC);
	if (!range)
		return -ENOMEM;

132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152
	/*
	 * Single max discard segment means multi-range discard isn't
	 * supported, and block layer only runs contiguity merge like
	 * normal RW request. So we can't reply on bio for retrieving
	 * each range info.
	 */
	if (queue_max_discard_segments(req->q) == 1) {
		range[0].flags = cpu_to_le32(flags);
		range[0].num_sectors = cpu_to_le32(blk_rq_sectors(req));
		range[0].sector = cpu_to_le64(blk_rq_pos(req));
		n = 1;
	} else {
		__rq_for_each_bio(bio, req) {
			u64 sector = bio->bi_iter.bi_sector;
			u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;

			range[n].flags = cpu_to_le32(flags);
			range[n].num_sectors = cpu_to_le32(num_sectors);
			range[n].sector = cpu_to_le64(sector);
			n++;
		}
153 154
	}

155 156
	WARN_ON_ONCE(n != segments);

157 158 159 160 161 162 163 164
	req->special_vec.bv_page = virt_to_page(range);
	req->special_vec.bv_offset = offset_in_page(range);
	req->special_vec.bv_len = sizeof(*range) * segments;
	req->rq_flags |= RQF_SPECIAL_PAYLOAD;

	return 0;
}

165
static inline void virtblk_request_done(struct request *req)
166
{
167
	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
168

169 170 171 172 173
	if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
		kfree(page_address(req->special_vec.bv_page) +
		      req->special_vec.bv_offset);
	}

174
	blk_mq_end_request(req, virtblk_result(vbr));
175 176 177
}

static void virtblk_done(struct virtqueue *vq)
R
Rusty Russell 已提交
178 179
{
	struct virtio_blk *vblk = vq->vdev->priv;
J
Jens Axboe 已提交
180
	bool req_done = false;
181
	int qid = vq->index;
R
Rusty Russell 已提交
182 183
	struct virtblk_req *vbr;
	unsigned long flags;
184
	unsigned int len;
R
Rusty Russell 已提交
185

186
	spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
187 188
	do {
		virtqueue_disable_cb(vq);
189
		while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
190 191
			struct request *req = blk_mq_rq_from_pdu(vbr);

192 193
			if (likely(!blk_should_fake_timeout(req->q)))
				blk_mq_complete_request(req);
J
Jens Axboe 已提交
194
			req_done = true;
195
		}
196 197
		if (unlikely(virtqueue_is_broken(vq)))
			break;
198
	} while (!virtqueue_enable_cb(vq));
J
Jens Axboe 已提交
199

R
Rusty Russell 已提交
200
	/* In case queue is stopped waiting for more buffers. */
201
	if (req_done)
202
		blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
203
	spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
204 205
}

206 207 208 209 210 211 212 213 214 215 216 217 218 219
static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
{
	struct virtio_blk *vblk = hctx->queue->queuedata;
	struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
	bool kick;

	spin_lock_irq(&vq->lock);
	kick = virtqueue_kick_prepare(vq->vq);
	spin_unlock_irq(&vq->lock);

	if (kick)
		virtqueue_notify(vq->vq);
}

220
static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
221
			   const struct blk_mq_queue_data *bd)
R
Rusty Russell 已提交
222
{
J
Jens Axboe 已提交
223
	struct virtio_blk *vblk = hctx->queue->queuedata;
224
	struct request *req = bd->rq;
225
	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
J
Jens Axboe 已提交
226
	unsigned long flags;
227
	unsigned int num;
228
	int qid = hctx->queue_num;
229
	int err;
230
	bool notify = false;
231
	bool unmap = false;
232
	u32 type;
R
Rusty Russell 已提交
233

J
Jens Axboe 已提交
234
	BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
R
Rusty Russell 已提交
235

236 237 238 239 240 241 242 243
	switch (req_op(req)) {
	case REQ_OP_READ:
	case REQ_OP_WRITE:
		type = 0;
		break;
	case REQ_OP_FLUSH:
		type = VIRTIO_BLK_T_FLUSH;
		break;
244 245 246 247 248 249 250
	case REQ_OP_DISCARD:
		type = VIRTIO_BLK_T_DISCARD;
		break;
	case REQ_OP_WRITE_ZEROES:
		type = VIRTIO_BLK_T_WRITE_ZEROES;
		unmap = !(req->cmd_flags & REQ_NOUNMAP);
		break;
251 252 253 254 255
	case REQ_OP_DRV_IN:
		type = VIRTIO_BLK_T_GET_ID;
		break;
	default:
		WARN_ON_ONCE(1);
256
		return BLK_STS_IOERR;
R
Rusty Russell 已提交
257 258
	}

259 260 261 262 263
	vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type);
	vbr->out_hdr.sector = type ?
		0 : cpu_to_virtio64(vblk->vdev, blk_rq_pos(req));
	vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req));

264 265
	blk_mq_start_request(req);

266 267 268 269 270 271
	if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES) {
		err = virtblk_setup_discard_write_zeroes(req, unmap);
		if (err)
			return BLK_STS_RESOURCE;
	}

272
	num = blk_rq_map_sg(hctx->queue, req, vbr->sg);
273
	if (num) {
274
		if (rq_data_dir(req) == WRITE)
M
Michael S. Tsirkin 已提交
275
			vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT);
276
		else
M
Michael S. Tsirkin 已提交
277
			vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN);
R
Rusty Russell 已提交
278 279
	}

280
	spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
281
	err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
282
	if (err) {
283
		virtqueue_kick(vblk->vqs[qid].vq);
284 285 286 287 288
		/* Don't stop the queue if -ENOMEM: we may have failed to
		 * bounce the buffer due to global resource outage.
		 */
		if (err == -ENOSPC)
			blk_mq_stop_hw_queue(hctx);
289
		spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
290 291
		switch (err) {
		case -ENOSPC:
292
			return BLK_STS_DEV_RESOURCE;
293 294 295 296 297
		case -ENOMEM:
			return BLK_STS_RESOURCE;
		default:
			return BLK_STS_IOERR;
		}
298 299
	}

300
	if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
301
		notify = true;
302
	spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
303 304

	if (notify)
305
		virtqueue_notify(vblk->vqs[qid].vq);
306
	return BLK_STS_OK;
307 308
}

309 310 311 312 313
/* return id (s/n) string for *disk to *id_str
 */
static int virtblk_get_id(struct gendisk *disk, char *id_str)
{
	struct virtio_blk *vblk = disk->private_data;
314
	struct request_queue *q = vblk->disk->queue;
315
	struct request *req;
M
Mike Snitzer 已提交
316
	int err;
317

318
	req = blk_get_request(q, REQ_OP_DRV_IN, 0);
319
	if (IS_ERR(req))
320
		return PTR_ERR(req);
321 322 323 324 325

	err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
	if (err)
		goto out;

326
	blk_execute_rq(vblk->disk, req, false);
327
	err = blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req)));
328
out:
M
Mike Snitzer 已提交
329 330
	blk_put_request(req);
	return err;
331 332
}

333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369
static void virtblk_get(struct virtio_blk *vblk)
{
	refcount_inc(&vblk->refs);
}

static void virtblk_put(struct virtio_blk *vblk)
{
	if (refcount_dec_and_test(&vblk->refs)) {
		ida_simple_remove(&vd_index_ida, vblk->index);
		mutex_destroy(&vblk->vdev_mutex);
		kfree(vblk);
	}
}

static int virtblk_open(struct block_device *bd, fmode_t mode)
{
	struct virtio_blk *vblk = bd->bd_disk->private_data;
	int ret = 0;

	mutex_lock(&vblk->vdev_mutex);

	if (vblk->vdev)
		virtblk_get(vblk);
	else
		ret = -ENXIO;

	mutex_unlock(&vblk->vdev_mutex);
	return ret;
}

static void virtblk_release(struct gendisk *disk, fmode_t mode)
{
	struct virtio_blk *vblk = disk->private_data;

	virtblk_put(vblk);
}

370 371 372
/* We provide getgeo only to please some old bootloader/partitioning tools */
static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
{
373
	struct virtio_blk *vblk = bd->bd_disk->private_data;
374 375 376 377 378 379 380 381
	int ret = 0;

	mutex_lock(&vblk->vdev_mutex);

	if (!vblk->vdev) {
		ret = -ENXIO;
		goto out;
	}
382 383

	/* see if the host passed in geometry config */
384 385 386 387 388 389 390
	if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
		virtio_cread(vblk->vdev, struct virtio_blk_config,
			     geometry.cylinders, &geo->cylinders);
		virtio_cread(vblk->vdev, struct virtio_blk_config,
			     geometry.heads, &geo->heads);
		virtio_cread(vblk->vdev, struct virtio_blk_config,
			     geometry.sectors, &geo->sectors);
391 392 393 394 395 396
	} else {
		/* some standard values, similar to sd */
		geo->heads = 1 << 6;
		geo->sectors = 1 << 5;
		geo->cylinders = get_capacity(bd->bd_disk) >> 11;
	}
397 398 399
out:
	mutex_unlock(&vblk->vdev_mutex);
	return ret;
400 401
}

402
static const struct block_device_operations virtblk_fops = {
403
	.owner  = THIS_MODULE,
404 405
	.open = virtblk_open,
	.release = virtblk_release,
406
	.getgeo = virtblk_getgeo,
R
Rusty Russell 已提交
407 408
};

409 410 411 412 413
static int index_to_minor(int index)
{
	return index << PART_BITS;
}

414 415 416 417 418
static int minor_to_index(int minor)
{
	return minor >> PART_BITS;
}

419 420
static ssize_t serial_show(struct device *dev,
			   struct device_attribute *attr, char *buf)
421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437
{
	struct gendisk *disk = dev_to_disk(dev);
	int err;

	/* sysfs gives us a PAGE_SIZE buffer */
	BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);

	buf[VIRTIO_BLK_ID_BYTES] = '\0';
	err = virtblk_get_id(disk, buf);
	if (!err)
		return strlen(buf);

	if (err == -EIO) /* Unsupported? Make it empty. */
		return 0;

	return err;
}
438

439
static DEVICE_ATTR_RO(serial);
440

441 442
/* The queue's logical block size must be set before calling this */
static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize)
443 444 445 446
{
	struct virtio_device *vdev = vblk->vdev;
	struct request_queue *q = vblk->disk->queue;
	char cap_str_2[10], cap_str_10[10];
447
	unsigned long long nblocks;
448
	u64 capacity;
449 450

	/* Host must always specify the capacity. */
451
	virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
452

453 454 455
	nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9);

	string_get_size(nblocks, queue_logical_block_size(q),
456
			STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
457
	string_get_size(nblocks, queue_logical_block_size(q),
458
			STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
459 460

	dev_notice(&vdev->dev,
461 462 463
		   "[%s] %s%llu %d-byte logical blocks (%s/%s)\n",
		   vblk->disk->disk_name,
		   resize ? "new size: " : "",
464 465 466 467
		   nblocks,
		   queue_logical_block_size(q),
		   cap_str_10,
		   cap_str_2);
468

469
	set_capacity_and_notify(vblk->disk, capacity);
470 471 472 473 474 475 476 477
}

static void virtblk_config_changed_work(struct work_struct *work)
{
	struct virtio_blk *vblk =
		container_of(work, struct virtio_blk, config_work);

	virtblk_update_capacity(vblk, true);
478 479 480 481 482 483 484 485 486
}

static void virtblk_config_changed(struct virtio_device *vdev)
{
	struct virtio_blk *vblk = vdev->priv;

	queue_work(virtblk_wq, &vblk->config_work);
}

487 488
static int init_vq(struct virtio_blk *vblk)
{
489
	int err;
490 491 492 493 494 495
	int i;
	vq_callback_t **callbacks;
	const char **names;
	struct virtqueue **vqs;
	unsigned short num_vqs;
	struct virtio_device *vdev = vblk->vdev;
496
	struct irq_affinity desc = { 0, };
497 498 499 500 501 502 503

	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ,
				   struct virtio_blk_config, num_queues,
				   &num_vqs);
	if (err)
		num_vqs = 1;

504 505
	num_vqs = min_t(unsigned int, nr_cpu_ids, num_vqs);

506
	vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
507 508
	if (!vblk->vqs)
		return -ENOMEM;
509

510 511 512
	names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL);
	callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL);
	vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL);
513 514 515 516
	if (!names || !callbacks || !vqs) {
		err = -ENOMEM;
		goto out;
	}
517

518 519 520 521 522 523 524
	for (i = 0; i < num_vqs; i++) {
		callbacks[i] = virtblk_done;
		snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
		names[i] = vblk->vqs[i].name;
	}

	/* Discover virtqueues and write information to configuration.  */
M
Michael S. Tsirkin 已提交
525
	err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc);
526
	if (err)
527
		goto out;
528

529 530 531 532 533 534
	for (i = 0; i < num_vqs; i++) {
		spin_lock_init(&vblk->vqs[i].lock);
		vblk->vqs[i].vq = vqs[i];
	}
	vblk->num_vqs = num_vqs;

535
out:
536 537 538 539 540
	kfree(vqs);
	kfree(callbacks);
	kfree(names);
	if (err)
		kfree(vblk->vqs);
541 542 543
	return err;
}

544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571
/*
 * Legacy naming scheme used for virtio devices.  We are stuck with it for
 * virtio blk but don't ever use it for any new driver.
 */
static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
{
	const int base = 'z' - 'a' + 1;
	char *begin = buf + strlen(prefix);
	char *end = buf + buflen;
	char *p;
	int unit;

	p = end - 1;
	*p = '\0';
	unit = base;
	do {
		if (p == begin)
			return -EINVAL;
		*--p = 'a' + (index % unit);
		index = (index / unit) - 1;
	} while (index >= 0);

	memmove(begin, p, end - p);
	memcpy(buf, prefix, strlen(prefix));

	return 0;
}

572 573 574 575 576
static int virtblk_get_cache_mode(struct virtio_device *vdev)
{
	u8 writeback;
	int err;

577 578 579
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE,
				   struct virtio_blk_config, wce,
				   &writeback);
580 581 582 583 584

	/*
	 * If WCE is not configurable and flush is not available,
	 * assume no writeback cache is in use.
	 */
585
	if (err)
586
		writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH);
587 588 589 590 591 592 593 594 595

	return writeback;
}

static void virtblk_update_cache_mode(struct virtio_device *vdev)
{
	u8 writeback = virtblk_get_cache_mode(vdev);
	struct virtio_blk *vblk = vdev->priv;

596
	blk_queue_write_cache(vblk->disk->queue, writeback, false);
597 598 599 600 601 602 603
}

static const char *const virtblk_cache_types[] = {
	"write through", "write back"
};

static ssize_t
604 605
cache_type_store(struct device *dev, struct device_attribute *attr,
		 const char *buf, size_t count)
606 607 608 609 610 611 612
{
	struct gendisk *disk = dev_to_disk(dev);
	struct virtio_blk *vblk = disk->private_data;
	struct virtio_device *vdev = vblk->vdev;
	int i;

	BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
613
	i = sysfs_match_string(virtblk_cache_types, buf);
614
	if (i < 0)
615
		return i;
616

617
	virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
618 619 620 621 622
	virtblk_update_cache_mode(vdev);
	return count;
}

static ssize_t
623
cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
624 625 626 627 628 629 630 631 632
{
	struct gendisk *disk = dev_to_disk(dev);
	struct virtio_blk *vblk = disk->private_data;
	u8 writeback = virtblk_get_cache_mode(vblk->vdev);

	BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
	return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]);
}

633 634 635 636 637 638 639 640 641 642 643
static DEVICE_ATTR_RW(cache_type);

static struct attribute *virtblk_attrs[] = {
	&dev_attr_serial.attr,
	&dev_attr_cache_type.attr,
	NULL,
};

static umode_t virtblk_attrs_are_visible(struct kobject *kobj,
		struct attribute *a, int n)
{
644
	struct device *dev = kobj_to_dev(kobj);
645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664
	struct gendisk *disk = dev_to_disk(dev);
	struct virtio_blk *vblk = disk->private_data;
	struct virtio_device *vdev = vblk->vdev;

	if (a == &dev_attr_cache_type.attr &&
	    !virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
		return S_IRUGO;

	return a->mode;
}

static const struct attribute_group virtblk_attr_group = {
	.attrs = virtblk_attrs,
	.is_visible = virtblk_attrs_are_visible,
};

static const struct attribute_group *virtblk_attr_groups[] = {
	&virtblk_attr_group,
	NULL,
};
665

666 667
static int virtblk_init_request(struct blk_mq_tag_set *set, struct request *rq,
		unsigned int hctx_idx, unsigned int numa_node)
668
{
669
	struct virtio_blk *vblk = set->driver_data;
670 671 672 673 674 675
	struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);

	sg_init_table(vbr->sg, vblk->sg_elems);
	return 0;
}

676 677 678 679
static int virtblk_map_queues(struct blk_mq_tag_set *set)
{
	struct virtio_blk *vblk = set->driver_data;

680 681
	return blk_mq_virtio_map_queues(&set->map[HCTX_TYPE_DEFAULT],
					vblk->vdev, 0);
682 683
}

684
static const struct blk_mq_ops virtio_mq_ops = {
J
Jens Axboe 已提交
685
	.queue_rq	= virtio_queue_rq,
686
	.commit_rqs	= virtio_commit_rqs,
687
	.complete	= virtblk_request_done,
688
	.init_request	= virtblk_init_request,
689
	.map_queues	= virtblk_map_queues,
J
Jens Axboe 已提交
690 691
};

692 693
static unsigned int virtblk_queue_depth;
module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
J
Jens Axboe 已提交
694

695
static int virtblk_probe(struct virtio_device *vdev)
R
Rusty Russell 已提交
696 697
{
	struct virtio_blk *vblk;
698
	struct request_queue *q;
699
	int err, index;
700

701
	u32 v, blk_size, max_size, sg_elems, opt_io_size;
702 703
	u16 min_io_size;
	u8 physical_block_exp, alignment_offset;
704
	unsigned int queue_depth;
R
Rusty Russell 已提交
705

706 707 708 709 710 711
	if (!vdev->config->get) {
		dev_err(&vdev->dev, "%s failure: config access disabled\n",
			__func__);
		return -EINVAL;
	}

712 713 714 715 716
	err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
			     GFP_KERNEL);
	if (err < 0)
		goto out;
	index = err;
717

718
	/* We need to know how many segments before we allocate. */
719 720 721
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
				   struct virtio_blk_config, seg_max,
				   &sg_elems);
722 723 724

	/* We need at least one SG element, whatever they say. */
	if (err || !sg_elems)
725 726
		sg_elems = 1;

727 728 729 730
	/* Prevent integer overflows and honor max vq size */
	sg_elems = min_t(u32, sg_elems, VIRTIO_BLK_MAX_SG_ELEMS - 2);

	/* We need extra sg elements at head and tail. */
731
	sg_elems += 2;
J
Jens Axboe 已提交
732
	vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
R
Rusty Russell 已提交
733 734
	if (!vblk) {
		err = -ENOMEM;
735
		goto out_free_index;
R
Rusty Russell 已提交
736 737
	}

738 739 740 741
	/* This reference is dropped in virtblk_remove(). */
	refcount_set(&vblk->refs, 1);
	mutex_init(&vblk->vdev_mutex);

R
Rusty Russell 已提交
742
	vblk->vdev = vdev;
743
	vblk->sg_elems = sg_elems;
744

745
	INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
R
Rusty Russell 已提交
746

747 748
	err = init_vq(vblk);
	if (err)
R
Rusty Russell 已提交
749 750
		goto out_free_vblk;

751
	/* Default queue sizing is to fill the ring. */
752 753
	if (likely(!virtblk_queue_depth)) {
		queue_depth = vblk->vqs[0].vq->num_free;
754 755
		/* ... but without indirect descs, we use 2 descs per req */
		if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
756 757 758
			queue_depth /= 2;
	} else {
		queue_depth = virtblk_queue_depth;
759
	}
760 761 762

	memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
	vblk->tag_set.ops = &virtio_mq_ops;
763
	vblk->tag_set.queue_depth = queue_depth;
764 765 766
	vblk->tag_set.numa_node = NUMA_NO_NODE;
	vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
	vblk->tag_set.cmd_size =
J
Jens Axboe 已提交
767 768
		sizeof(struct virtblk_req) +
		sizeof(struct scatterlist) * sg_elems;
769
	vblk->tag_set.driver_data = vblk;
770
	vblk->tag_set.nr_hw_queues = vblk->num_vqs;
J
Jens Axboe 已提交
771

772 773
	err = blk_mq_alloc_tag_set(&vblk->tag_set);
	if (err)
774
		goto out_free_vq;
775

776 777 778
	vblk->disk = blk_mq_alloc_disk(&vblk->tag_set, vblk);
	if (IS_ERR(vblk->disk)) {
		err = PTR_ERR(vblk->disk);
779
		goto out_free_tags;
R
Rusty Russell 已提交
780
	}
781
	q = vblk->disk->queue;
782

783
	virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
784

R
Rusty Russell 已提交
785
	vblk->disk->major = major;
786
	vblk->disk->first_minor = index_to_minor(index);
787
	vblk->disk->minors = 1 << PART_BITS;
R
Rusty Russell 已提交
788 789
	vblk->disk->private_data = vblk;
	vblk->disk->fops = &virtblk_fops;
790
	vblk->disk->flags |= GENHD_FL_EXT_DEVT;
791
	vblk->index = index;
792

793
	/* configure queue flush support */
794
	virtblk_update_cache_mode(vdev);
R
Rusty Russell 已提交
795

796 797 798 799
	/* If disk is read-only in the host, the guest should obey */
	if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
		set_disk_ro(vblk->disk, 1);

800
	/* We can handle whatever the host told us to handle. */
801
	blk_queue_max_segments(q, vblk->sg_elems-2);
802

803
	/* No real sector limit. */
804
	blk_queue_max_hw_sectors(q, -1U);
805

806 807
	max_size = virtio_max_dma_size(vdev);

808 809
	/* Host can optionally specify maximum segment size and number of
	 * segments. */
810 811
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
				   struct virtio_blk_config, size_max, &v);
R
Rusty Russell 已提交
812
	if (!err)
813 814 815
		max_size = min(max_size, v);

	blk_queue_max_segment_size(q, max_size);
R
Rusty Russell 已提交
816

817
	/* Host can optionally specify the block size of the device */
818 819 820
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
				   struct virtio_blk_config, blk_size,
				   &blk_size);
821
	if (!err)
822 823 824 825 826
		blk_queue_logical_block_size(q, blk_size);
	else
		blk_size = queue_logical_block_size(q);

	/* Use topology information if available */
827 828 829
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
				   struct virtio_blk_config, physical_block_exp,
				   &physical_block_exp);
830 831 832 833
	if (!err && physical_block_exp)
		blk_queue_physical_block_size(q,
				blk_size * (1 << physical_block_exp));

834 835 836
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
				   struct virtio_blk_config, alignment_offset,
				   &alignment_offset);
837 838 839
	if (!err && alignment_offset)
		blk_queue_alignment_offset(q, blk_size * alignment_offset);

840 841 842
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
				   struct virtio_blk_config, min_io_size,
				   &min_io_size);
843 844 845
	if (!err && min_io_size)
		blk_queue_io_min(q, blk_size * min_io_size);

846 847 848
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
				   struct virtio_blk_config, opt_io_size,
				   &opt_io_size);
849 850 851
	if (!err && opt_io_size)
		blk_queue_io_opt(q, blk_size * opt_io_size);

852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877
	if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
		q->limits.discard_granularity = blk_size;

		virtio_cread(vdev, struct virtio_blk_config,
			     discard_sector_alignment, &v);
		q->limits.discard_alignment = v ? v << SECTOR_SHIFT : 0;

		virtio_cread(vdev, struct virtio_blk_config,
			     max_discard_sectors, &v);
		blk_queue_max_discard_sectors(q, v ? v : UINT_MAX);

		virtio_cread(vdev, struct virtio_blk_config, max_discard_seg,
			     &v);
		blk_queue_max_discard_segments(q,
					       min_not_zero(v,
							    MAX_DISCARD_SEGMENTS));

		blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
	}

	if (virtio_has_feature(vdev, VIRTIO_BLK_F_WRITE_ZEROES)) {
		virtio_cread(vdev, struct virtio_blk_config,
			     max_write_zeroes_sectors, &v);
		blk_queue_max_write_zeroes_sectors(q, v ? v : UINT_MAX);
	}

878
	virtblk_update_capacity(vblk, false);
M
Michael S. Tsirkin 已提交
879 880
	virtio_device_ready(vdev);

881
	device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
R
Rusty Russell 已提交
882 883
	return 0;

884 885
out_free_tags:
	blk_mq_free_tag_set(&vblk->tag_set);
R
Rusty Russell 已提交
886
out_free_vq:
887
	vdev->config->del_vqs(vdev);
888
	kfree(vblk->vqs);
R
Rusty Russell 已提交
889 890
out_free_vblk:
	kfree(vblk);
891 892
out_free_index:
	ida_simple_remove(&vd_index_ida, index);
R
Rusty Russell 已提交
893 894 895 896
out:
	return err;
}

897
static void virtblk_remove(struct virtio_device *vdev)
R
Rusty Russell 已提交
898 899 900
{
	struct virtio_blk *vblk = vdev->priv;

901 902
	/* Make sure no work handler is accessing the device. */
	flush_work(&vblk->config_work);
903

904
	del_gendisk(vblk->disk);
905
	blk_cleanup_disk(vblk->disk);
906 907
	blk_mq_free_tag_set(&vblk->tag_set);

908 909
	mutex_lock(&vblk->vdev_mutex);

R
Rusty Russell 已提交
910 911 912
	/* Stop all the virtqueues. */
	vdev->config->reset(vdev);

913 914 915
	/* Virtqueues are stopped, nothing can use vblk->vdev anymore. */
	vblk->vdev = NULL;

916
	vdev->config->del_vqs(vdev);
917
	kfree(vblk->vqs);
918

919 920 921
	mutex_unlock(&vblk->vdev_mutex);

	virtblk_put(vblk);
R
Rusty Russell 已提交
922 923
}

924
#ifdef CONFIG_PM_SLEEP
925 926 927 928 929 930 931
static int virtblk_freeze(struct virtio_device *vdev)
{
	struct virtio_blk *vblk = vdev->priv;

	/* Ensure we don't receive any more interrupts */
	vdev->config->reset(vdev);

932
	/* Make sure no work handler is accessing the device. */
933 934
	flush_work(&vblk->config_work);

935
	blk_mq_quiesce_queue(vblk->disk->queue);
936 937

	vdev->config->del_vqs(vdev);
938 939
	kfree(vblk->vqs);

940 941 942 943 944 945 946 947 948
	return 0;
}

static int virtblk_restore(struct virtio_device *vdev)
{
	struct virtio_blk *vblk = vdev->priv;
	int ret;

	ret = init_vq(vdev->priv);
949 950 951 952
	if (ret)
		return ret;

	virtio_device_ready(vdev);
J
Jens Axboe 已提交
953

954
	blk_mq_unquiesce_queue(vblk->disk->queue);
955
	return 0;
956 957 958
}
#endif

959
static const struct virtio_device_id id_table[] = {
R
Rusty Russell 已提交
960 961 962 963
	{ VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
	{ 0 },
};

M
Michael S. Tsirkin 已提交
964
static unsigned int features_legacy[] = {
965
	VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
966
	VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
967
	VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
968
	VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
M
Michael S. Tsirkin 已提交
969 970 971 972 973
}
;
static unsigned int features[] = {
	VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
	VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
974
	VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
975
	VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
976 977
};

978
static struct virtio_driver virtio_blk = {
M
Michael S. Tsirkin 已提交
979 980 981 982 983 984 985 986 987 988
	.feature_table			= features,
	.feature_table_size		= ARRAY_SIZE(features),
	.feature_table_legacy		= features_legacy,
	.feature_table_size_legacy	= ARRAY_SIZE(features_legacy),
	.driver.name			= KBUILD_MODNAME,
	.driver.owner			= THIS_MODULE,
	.id_table			= id_table,
	.probe				= virtblk_probe,
	.remove				= virtblk_remove,
	.config_changed			= virtblk_config_changed,
989
#ifdef CONFIG_PM_SLEEP
M
Michael S. Tsirkin 已提交
990 991
	.freeze				= virtblk_freeze,
	.restore			= virtblk_restore,
992
#endif
R
Rusty Russell 已提交
993 994 995 996
};

static int __init init(void)
{
997 998 999 1000 1001 1002
	int error;

	virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
	if (!virtblk_wq)
		return -ENOMEM;

1003
	major = register_blkdev(0, "virtblk");
1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018
	if (major < 0) {
		error = major;
		goto out_destroy_workqueue;
	}

	error = register_virtio_driver(&virtio_blk);
	if (error)
		goto out_unregister_blkdev;
	return 0;

out_unregister_blkdev:
	unregister_blkdev(major, "virtblk");
out_destroy_workqueue:
	destroy_workqueue(virtblk_wq);
	return error;
R
Rusty Russell 已提交
1019 1020 1021 1022 1023
}

static void __exit fini(void)
{
	unregister_virtio_driver(&virtio_blk);
1024
	unregister_blkdev(major, "virtblk");
1025
	destroy_workqueue(virtblk_wq);
R
Rusty Russell 已提交
1026 1027 1028 1029 1030 1031 1032
}
module_init(init);
module_exit(fini);

MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio block driver");
MODULE_LICENSE("GPL");