virtio_blk.c 25.7 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
R
Rusty Russell 已提交
2 3
//#define DEBUG
#include <linux/spinlock.h>
4
#include <linux/slab.h>
R
Rusty Russell 已提交
5 6
#include <linux/blkdev.h>
#include <linux/hdreg.h>
7
#include <linux/module.h>
8
#include <linux/mutex.h>
9
#include <linux/interrupt.h>
R
Rusty Russell 已提交
10 11
#include <linux/virtio.h>
#include <linux/virtio_blk.h>
12
#include <linux/scatterlist.h>
13
#include <linux/string_helpers.h>
14
#include <linux/idr.h>
J
Jens Axboe 已提交
15
#include <linux/blk-mq.h>
16
#include <linux/blk-mq-virtio.h>
J
Jens Axboe 已提交
17
#include <linux/numa.h>
18
#include <uapi/linux/virtio_ring.h>
19

20
#define PART_BITS 4
21
#define VQ_NAME_LEN 16
22
#define MAX_DISCARD_SEGMENTS 256u
R
Rusty Russell 已提交
23

24 25 26
static int major;
static DEFINE_IDA(vd_index_ida);

27
static struct workqueue_struct *virtblk_wq;
28

29 30 31 32 33 34
struct virtio_blk_vq {
	struct virtqueue *vq;
	spinlock_t lock;
	char name[VQ_NAME_LEN];
} ____cacheline_aligned_in_smp;

35
struct virtio_blk {
36 37 38 39 40 41 42 43 44
	/*
	 * This mutex must be held by anything that may run after
	 * virtblk_remove() sets vblk->vdev to NULL.
	 *
	 * blk-mq, virtqueue processing, and sysfs attribute code paths are
	 * shut down before vblk->vdev is set to NULL and therefore do not need
	 * to hold this mutex.
	 */
	struct mutex vdev_mutex;
R
Rusty Russell 已提交
45 46 47 48 49
	struct virtio_device *vdev;

	/* The disk structure for the kernel. */
	struct gendisk *disk;

50 51 52
	/* Block layer tags. */
	struct blk_mq_tag_set tag_set;

53 54 55
	/* Process context for config space updates */
	struct work_struct config_work;

56 57 58 59 60 61 62
	/*
	 * Tracks references from block_device_operations open/release and
	 * virtio_driver probe/remove so this object can be freed once no
	 * longer in use.
	 */
	refcount_t refs;

63 64 65
	/* What host tells us, plus 2 for header & tailer. */
	unsigned int sg_elems;

66 67
	/* Ida index - used to track minor number allocations. */
	int index;
68 69 70 71

	/* num of vqs */
	int num_vqs;
	struct virtio_blk_vq *vqs;
R
Rusty Russell 已提交
72 73
};

74
struct virtblk_req {
75
	struct virtio_blk_outhdr out_hdr;
76
	u8 status;
77
	struct scatterlist sg[];
R
Rusty Russell 已提交
78 79
};

80
static inline blk_status_t virtblk_result(struct virtblk_req *vbr)
81 82 83
{
	switch (vbr->status) {
	case VIRTIO_BLK_S_OK:
84
		return BLK_STS_OK;
85
	case VIRTIO_BLK_S_UNSUPP:
86
		return BLK_STS_NOTSUPP;
87
	default:
88
		return BLK_STS_IOERR;
89 90 91
	}
}

92 93 94 95 96 97 98 99
static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr,
		struct scatterlist *data_sg, bool have_data)
{
	struct scatterlist hdr, status, *sgs[3];
	unsigned int num_out = 0, num_in = 0;

	sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
	sgs[num_out++] = &hdr;
100

R
Rusty Russell 已提交
101
	if (have_data) {
M
Michael S. Tsirkin 已提交
102
		if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
103
			sgs[num_out++] = data_sg;
104
		else
105 106 107
			sgs[num_out + num_in++] = data_sg;
	}

108 109 110 111
	sg_init_one(&status, &vbr->status, sizeof(vbr->status));
	sgs[num_out + num_in++] = &status;

	return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
112 113
}

114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
static int virtblk_setup_discard_write_zeroes(struct request *req, bool unmap)
{
	unsigned short segments = blk_rq_nr_discard_segments(req);
	unsigned short n = 0;
	struct virtio_blk_discard_write_zeroes *range;
	struct bio *bio;
	u32 flags = 0;

	if (unmap)
		flags |= VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP;

	range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC);
	if (!range)
		return -ENOMEM;

129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149
	/*
	 * Single max discard segment means multi-range discard isn't
	 * supported, and block layer only runs contiguity merge like
	 * normal RW request. So we can't reply on bio for retrieving
	 * each range info.
	 */
	if (queue_max_discard_segments(req->q) == 1) {
		range[0].flags = cpu_to_le32(flags);
		range[0].num_sectors = cpu_to_le32(blk_rq_sectors(req));
		range[0].sector = cpu_to_le64(blk_rq_pos(req));
		n = 1;
	} else {
		__rq_for_each_bio(bio, req) {
			u64 sector = bio->bi_iter.bi_sector;
			u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;

			range[n].flags = cpu_to_le32(flags);
			range[n].num_sectors = cpu_to_le32(num_sectors);
			range[n].sector = cpu_to_le64(sector);
			n++;
		}
150 151
	}

152 153
	WARN_ON_ONCE(n != segments);

154 155 156 157 158 159 160 161
	req->special_vec.bv_page = virt_to_page(range);
	req->special_vec.bv_offset = offset_in_page(range);
	req->special_vec.bv_len = sizeof(*range) * segments;
	req->rq_flags |= RQF_SPECIAL_PAYLOAD;

	return 0;
}

162
static inline void virtblk_request_done(struct request *req)
163
{
164
	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
165

166 167 168 169 170
	if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
		kfree(page_address(req->special_vec.bv_page) +
		      req->special_vec.bv_offset);
	}

171
	blk_mq_end_request(req, virtblk_result(vbr));
172 173 174
}

static void virtblk_done(struct virtqueue *vq)
R
Rusty Russell 已提交
175 176
{
	struct virtio_blk *vblk = vq->vdev->priv;
J
Jens Axboe 已提交
177
	bool req_done = false;
178
	int qid = vq->index;
R
Rusty Russell 已提交
179 180
	struct virtblk_req *vbr;
	unsigned long flags;
181
	unsigned int len;
R
Rusty Russell 已提交
182

183
	spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
184 185
	do {
		virtqueue_disable_cb(vq);
186
		while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
187 188
			struct request *req = blk_mq_rq_from_pdu(vbr);

189 190
			if (likely(!blk_should_fake_timeout(req->q)))
				blk_mq_complete_request(req);
J
Jens Axboe 已提交
191
			req_done = true;
192
		}
193 194
		if (unlikely(virtqueue_is_broken(vq)))
			break;
195
	} while (!virtqueue_enable_cb(vq));
J
Jens Axboe 已提交
196

R
Rusty Russell 已提交
197
	/* In case queue is stopped waiting for more buffers. */
198
	if (req_done)
199
		blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
200
	spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
201 202
}

203 204 205 206 207 208 209 210 211 212 213 214 215 216
static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
{
	struct virtio_blk *vblk = hctx->queue->queuedata;
	struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
	bool kick;

	spin_lock_irq(&vq->lock);
	kick = virtqueue_kick_prepare(vq->vq);
	spin_unlock_irq(&vq->lock);

	if (kick)
		virtqueue_notify(vq->vq);
}

217
static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
218
			   const struct blk_mq_queue_data *bd)
R
Rusty Russell 已提交
219
{
J
Jens Axboe 已提交
220
	struct virtio_blk *vblk = hctx->queue->queuedata;
221
	struct request *req = bd->rq;
222
	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
J
Jens Axboe 已提交
223
	unsigned long flags;
224
	unsigned int num;
225
	int qid = hctx->queue_num;
226
	int err;
227
	bool notify = false;
228
	bool unmap = false;
229
	u32 type;
R
Rusty Russell 已提交
230

J
Jens Axboe 已提交
231
	BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
R
Rusty Russell 已提交
232

233 234 235 236 237 238 239 240
	switch (req_op(req)) {
	case REQ_OP_READ:
	case REQ_OP_WRITE:
		type = 0;
		break;
	case REQ_OP_FLUSH:
		type = VIRTIO_BLK_T_FLUSH;
		break;
241 242 243 244 245 246 247
	case REQ_OP_DISCARD:
		type = VIRTIO_BLK_T_DISCARD;
		break;
	case REQ_OP_WRITE_ZEROES:
		type = VIRTIO_BLK_T_WRITE_ZEROES;
		unmap = !(req->cmd_flags & REQ_NOUNMAP);
		break;
248 249 250 251 252
	case REQ_OP_DRV_IN:
		type = VIRTIO_BLK_T_GET_ID;
		break;
	default:
		WARN_ON_ONCE(1);
253
		return BLK_STS_IOERR;
R
Rusty Russell 已提交
254 255
	}

256 257 258 259 260
	vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type);
	vbr->out_hdr.sector = type ?
		0 : cpu_to_virtio64(vblk->vdev, blk_rq_pos(req));
	vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req));

261 262
	blk_mq_start_request(req);

263 264 265 266 267 268
	if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES) {
		err = virtblk_setup_discard_write_zeroes(req, unmap);
		if (err)
			return BLK_STS_RESOURCE;
	}

269
	num = blk_rq_map_sg(hctx->queue, req, vbr->sg);
270
	if (num) {
271
		if (rq_data_dir(req) == WRITE)
M
Michael S. Tsirkin 已提交
272
			vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT);
273
		else
M
Michael S. Tsirkin 已提交
274
			vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN);
R
Rusty Russell 已提交
275 276
	}

277
	spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
278
	err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
279
	if (err) {
280
		virtqueue_kick(vblk->vqs[qid].vq);
281 282 283 284 285
		/* Don't stop the queue if -ENOMEM: we may have failed to
		 * bounce the buffer due to global resource outage.
		 */
		if (err == -ENOSPC)
			blk_mq_stop_hw_queue(hctx);
286
		spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
287 288
		switch (err) {
		case -ENOSPC:
289
			return BLK_STS_DEV_RESOURCE;
290 291 292 293 294
		case -ENOMEM:
			return BLK_STS_RESOURCE;
		default:
			return BLK_STS_IOERR;
		}
295 296
	}

297
	if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
298
		notify = true;
299
	spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
300 301

	if (notify)
302
		virtqueue_notify(vblk->vqs[qid].vq);
303
	return BLK_STS_OK;
304 305
}

306 307 308 309 310
/* return id (s/n) string for *disk to *id_str
 */
static int virtblk_get_id(struct gendisk *disk, char *id_str)
{
	struct virtio_blk *vblk = disk->private_data;
311
	struct request_queue *q = vblk->disk->queue;
312
	struct request *req;
M
Mike Snitzer 已提交
313
	int err;
314

315
	req = blk_get_request(q, REQ_OP_DRV_IN, 0);
316
	if (IS_ERR(req))
317
		return PTR_ERR(req);
318 319 320 321 322

	err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
	if (err)
		goto out;

323
	blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
324
	err = blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req)));
325
out:
M
Mike Snitzer 已提交
326 327
	blk_put_request(req);
	return err;
328 329
}

330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366
static void virtblk_get(struct virtio_blk *vblk)
{
	refcount_inc(&vblk->refs);
}

static void virtblk_put(struct virtio_blk *vblk)
{
	if (refcount_dec_and_test(&vblk->refs)) {
		ida_simple_remove(&vd_index_ida, vblk->index);
		mutex_destroy(&vblk->vdev_mutex);
		kfree(vblk);
	}
}

static int virtblk_open(struct block_device *bd, fmode_t mode)
{
	struct virtio_blk *vblk = bd->bd_disk->private_data;
	int ret = 0;

	mutex_lock(&vblk->vdev_mutex);

	if (vblk->vdev)
		virtblk_get(vblk);
	else
		ret = -ENXIO;

	mutex_unlock(&vblk->vdev_mutex);
	return ret;
}

static void virtblk_release(struct gendisk *disk, fmode_t mode)
{
	struct virtio_blk *vblk = disk->private_data;

	virtblk_put(vblk);
}

367 368 369
/* We provide getgeo only to please some old bootloader/partitioning tools */
static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
{
370
	struct virtio_blk *vblk = bd->bd_disk->private_data;
371 372 373 374 375 376 377 378
	int ret = 0;

	mutex_lock(&vblk->vdev_mutex);

	if (!vblk->vdev) {
		ret = -ENXIO;
		goto out;
	}
379 380

	/* see if the host passed in geometry config */
381 382 383 384 385 386 387
	if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
		virtio_cread(vblk->vdev, struct virtio_blk_config,
			     geometry.cylinders, &geo->cylinders);
		virtio_cread(vblk->vdev, struct virtio_blk_config,
			     geometry.heads, &geo->heads);
		virtio_cread(vblk->vdev, struct virtio_blk_config,
			     geometry.sectors, &geo->sectors);
388 389 390 391 392 393
	} else {
		/* some standard values, similar to sd */
		geo->heads = 1 << 6;
		geo->sectors = 1 << 5;
		geo->cylinders = get_capacity(bd->bd_disk) >> 11;
	}
394 395 396
out:
	mutex_unlock(&vblk->vdev_mutex);
	return ret;
397 398
}

399
static const struct block_device_operations virtblk_fops = {
400
	.owner  = THIS_MODULE,
401 402
	.open = virtblk_open,
	.release = virtblk_release,
403
	.getgeo = virtblk_getgeo,
R
Rusty Russell 已提交
404 405
};

406 407 408 409 410
static int index_to_minor(int index)
{
	return index << PART_BITS;
}

411 412 413 414 415
static int minor_to_index(int minor)
{
	return minor >> PART_BITS;
}

416 417
static ssize_t serial_show(struct device *dev,
			   struct device_attribute *attr, char *buf)
418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434
{
	struct gendisk *disk = dev_to_disk(dev);
	int err;

	/* sysfs gives us a PAGE_SIZE buffer */
	BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);

	buf[VIRTIO_BLK_ID_BYTES] = '\0';
	err = virtblk_get_id(disk, buf);
	if (!err)
		return strlen(buf);

	if (err == -EIO) /* Unsupported? Make it empty. */
		return 0;

	return err;
}
435

436
static DEVICE_ATTR_RO(serial);
437

438 439
/* The queue's logical block size must be set before calling this */
static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize)
440 441 442 443
{
	struct virtio_device *vdev = vblk->vdev;
	struct request_queue *q = vblk->disk->queue;
	char cap_str_2[10], cap_str_10[10];
444
	unsigned long long nblocks;
445
	u64 capacity;
446 447

	/* Host must always specify the capacity. */
448
	virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
449 450 451 452 453 454 455 456

	/* If capacity is too big, truncate with warning. */
	if ((sector_t)capacity != capacity) {
		dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
			 (unsigned long long)capacity);
		capacity = (sector_t)-1;
	}

457 458 459
	nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9);

	string_get_size(nblocks, queue_logical_block_size(q),
460
			STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
461
	string_get_size(nblocks, queue_logical_block_size(q),
462
			STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
463 464

	dev_notice(&vdev->dev,
465 466 467
		   "[%s] %s%llu %d-byte logical blocks (%s/%s)\n",
		   vblk->disk->disk_name,
		   resize ? "new size: " : "",
468 469 470 471
		   nblocks,
		   queue_logical_block_size(q),
		   cap_str_10,
		   cap_str_2);
472

473
	set_capacity_and_notify(vblk->disk, capacity);
474 475 476 477 478 479 480 481
}

static void virtblk_config_changed_work(struct work_struct *work)
{
	struct virtio_blk *vblk =
		container_of(work, struct virtio_blk, config_work);

	virtblk_update_capacity(vblk, true);
482 483 484 485 486 487 488 489 490
}

static void virtblk_config_changed(struct virtio_device *vdev)
{
	struct virtio_blk *vblk = vdev->priv;

	queue_work(virtblk_wq, &vblk->config_work);
}

491 492
static int init_vq(struct virtio_blk *vblk)
{
493
	int err;
494 495 496 497 498 499
	int i;
	vq_callback_t **callbacks;
	const char **names;
	struct virtqueue **vqs;
	unsigned short num_vqs;
	struct virtio_device *vdev = vblk->vdev;
500
	struct irq_affinity desc = { 0, };
501 502 503 504 505 506 507

	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ,
				   struct virtio_blk_config, num_queues,
				   &num_vqs);
	if (err)
		num_vqs = 1;

508 509
	num_vqs = min_t(unsigned int, nr_cpu_ids, num_vqs);

510
	vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
511 512
	if (!vblk->vqs)
		return -ENOMEM;
513

514 515 516
	names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL);
	callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL);
	vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL);
517 518 519 520
	if (!names || !callbacks || !vqs) {
		err = -ENOMEM;
		goto out;
	}
521

522 523 524 525 526 527 528
	for (i = 0; i < num_vqs; i++) {
		callbacks[i] = virtblk_done;
		snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
		names[i] = vblk->vqs[i].name;
	}

	/* Discover virtqueues and write information to configuration.  */
M
Michael S. Tsirkin 已提交
529
	err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc);
530
	if (err)
531
		goto out;
532

533 534 535 536 537 538
	for (i = 0; i < num_vqs; i++) {
		spin_lock_init(&vblk->vqs[i].lock);
		vblk->vqs[i].vq = vqs[i];
	}
	vblk->num_vqs = num_vqs;

539
out:
540 541 542 543 544
	kfree(vqs);
	kfree(callbacks);
	kfree(names);
	if (err)
		kfree(vblk->vqs);
545 546 547
	return err;
}

548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575
/*
 * Legacy naming scheme used for virtio devices.  We are stuck with it for
 * virtio blk but don't ever use it for any new driver.
 */
static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
{
	const int base = 'z' - 'a' + 1;
	char *begin = buf + strlen(prefix);
	char *end = buf + buflen;
	char *p;
	int unit;

	p = end - 1;
	*p = '\0';
	unit = base;
	do {
		if (p == begin)
			return -EINVAL;
		*--p = 'a' + (index % unit);
		index = (index / unit) - 1;
	} while (index >= 0);

	memmove(begin, p, end - p);
	memcpy(buf, prefix, strlen(prefix));

	return 0;
}

576 577 578 579 580
static int virtblk_get_cache_mode(struct virtio_device *vdev)
{
	u8 writeback;
	int err;

581 582 583
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE,
				   struct virtio_blk_config, wce,
				   &writeback);
584 585 586 587 588

	/*
	 * If WCE is not configurable and flush is not available,
	 * assume no writeback cache is in use.
	 */
589
	if (err)
590
		writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH);
591 592 593 594 595 596 597 598 599

	return writeback;
}

static void virtblk_update_cache_mode(struct virtio_device *vdev)
{
	u8 writeback = virtblk_get_cache_mode(vdev);
	struct virtio_blk *vblk = vdev->priv;

600
	blk_queue_write_cache(vblk->disk->queue, writeback, false);
601 602 603 604 605 606 607
}

static const char *const virtblk_cache_types[] = {
	"write through", "write back"
};

static ssize_t
608 609
cache_type_store(struct device *dev, struct device_attribute *attr,
		 const char *buf, size_t count)
610 611 612 613 614 615 616
{
	struct gendisk *disk = dev_to_disk(dev);
	struct virtio_blk *vblk = disk->private_data;
	struct virtio_device *vdev = vblk->vdev;
	int i;

	BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
617
	i = sysfs_match_string(virtblk_cache_types, buf);
618
	if (i < 0)
619
		return i;
620

621
	virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
622 623 624 625 626
	virtblk_update_cache_mode(vdev);
	return count;
}

static ssize_t
627
cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
628 629 630 631 632 633 634 635 636
{
	struct gendisk *disk = dev_to_disk(dev);
	struct virtio_blk *vblk = disk->private_data;
	u8 writeback = virtblk_get_cache_mode(vblk->vdev);

	BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
	return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]);
}

637 638 639 640 641 642 643 644 645 646 647
static DEVICE_ATTR_RW(cache_type);

static struct attribute *virtblk_attrs[] = {
	&dev_attr_serial.attr,
	&dev_attr_cache_type.attr,
	NULL,
};

static umode_t virtblk_attrs_are_visible(struct kobject *kobj,
		struct attribute *a, int n)
{
648
	struct device *dev = kobj_to_dev(kobj);
649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668
	struct gendisk *disk = dev_to_disk(dev);
	struct virtio_blk *vblk = disk->private_data;
	struct virtio_device *vdev = vblk->vdev;

	if (a == &dev_attr_cache_type.attr &&
	    !virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
		return S_IRUGO;

	return a->mode;
}

static const struct attribute_group virtblk_attr_group = {
	.attrs = virtblk_attrs,
	.is_visible = virtblk_attrs_are_visible,
};

static const struct attribute_group *virtblk_attr_groups[] = {
	&virtblk_attr_group,
	NULL,
};
669

670 671
static int virtblk_init_request(struct blk_mq_tag_set *set, struct request *rq,
		unsigned int hctx_idx, unsigned int numa_node)
672
{
673
	struct virtio_blk *vblk = set->driver_data;
674 675 676 677 678 679
	struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);

	sg_init_table(vbr->sg, vblk->sg_elems);
	return 0;
}

680 681 682 683
static int virtblk_map_queues(struct blk_mq_tag_set *set)
{
	struct virtio_blk *vblk = set->driver_data;

684 685
	return blk_mq_virtio_map_queues(&set->map[HCTX_TYPE_DEFAULT],
					vblk->vdev, 0);
686 687
}

688
static const struct blk_mq_ops virtio_mq_ops = {
J
Jens Axboe 已提交
689
	.queue_rq	= virtio_queue_rq,
690
	.commit_rqs	= virtio_commit_rqs,
691
	.complete	= virtblk_request_done,
692
	.init_request	= virtblk_init_request,
693
	.map_queues	= virtblk_map_queues,
J
Jens Axboe 已提交
694 695
};

696 697
static unsigned int virtblk_queue_depth;
module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
J
Jens Axboe 已提交
698

699
static int virtblk_probe(struct virtio_device *vdev)
R
Rusty Russell 已提交
700 701
{
	struct virtio_blk *vblk;
702
	struct request_queue *q;
703
	int err, index;
704

705
	u32 v, blk_size, max_size, sg_elems, opt_io_size;
706 707
	u16 min_io_size;
	u8 physical_block_exp, alignment_offset;
R
Rusty Russell 已提交
708

709 710 711 712 713 714
	if (!vdev->config->get) {
		dev_err(&vdev->dev, "%s failure: config access disabled\n",
			__func__);
		return -EINVAL;
	}

715 716 717 718 719
	err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
			     GFP_KERNEL);
	if (err < 0)
		goto out;
	index = err;
720

721
	/* We need to know how many segments before we allocate. */
722 723 724
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
				   struct virtio_blk_config, seg_max,
				   &sg_elems);
725 726 727

	/* We need at least one SG element, whatever they say. */
	if (err || !sg_elems)
728 729 730 731
		sg_elems = 1;

	/* We need an extra sg elements at head and tail. */
	sg_elems += 2;
J
Jens Axboe 已提交
732
	vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
R
Rusty Russell 已提交
733 734
	if (!vblk) {
		err = -ENOMEM;
735
		goto out_free_index;
R
Rusty Russell 已提交
736 737
	}

738 739 740 741
	/* This reference is dropped in virtblk_remove(). */
	refcount_set(&vblk->refs, 1);
	mutex_init(&vblk->vdev_mutex);

R
Rusty Russell 已提交
742
	vblk->vdev = vdev;
743
	vblk->sg_elems = sg_elems;
744

745
	INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
R
Rusty Russell 已提交
746

747 748
	err = init_vq(vblk);
	if (err)
R
Rusty Russell 已提交
749 750 751
		goto out_free_vblk;

	/* FIXME: How many partitions?  How long is a piece of string? */
752
	vblk->disk = alloc_disk(1 << PART_BITS);
R
Rusty Russell 已提交
753 754
	if (!vblk->disk) {
		err = -ENOMEM;
J
Jens Axboe 已提交
755
		goto out_free_vq;
R
Rusty Russell 已提交
756 757
	}

758
	/* Default queue sizing is to fill the ring. */
759
	if (!virtblk_queue_depth) {
760
		virtblk_queue_depth = vblk->vqs[0].vq->num_free;
761 762
		/* ... but without indirect descs, we use 2 descs per req */
		if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
763
			virtblk_queue_depth /= 2;
764
	}
765 766 767 768 769 770 771

	memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
	vblk->tag_set.ops = &virtio_mq_ops;
	vblk->tag_set.queue_depth = virtblk_queue_depth;
	vblk->tag_set.numa_node = NUMA_NO_NODE;
	vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
	vblk->tag_set.cmd_size =
J
Jens Axboe 已提交
772 773
		sizeof(struct virtblk_req) +
		sizeof(struct scatterlist) * sg_elems;
774
	vblk->tag_set.driver_data = vblk;
775
	vblk->tag_set.nr_hw_queues = vblk->num_vqs;
J
Jens Axboe 已提交
776

777 778 779 780
	err = blk_mq_alloc_tag_set(&vblk->tag_set);
	if (err)
		goto out_put_disk;

781
	q = blk_mq_init_queue(&vblk->tag_set);
782
	if (IS_ERR(q)) {
R
Rusty Russell 已提交
783
		err = -ENOMEM;
784
		goto out_free_tags;
R
Rusty Russell 已提交
785
	}
786
	vblk->disk->queue = q;
R
Rusty Russell 已提交
787

788
	q->queuedata = vblk;
789

790
	virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
791

R
Rusty Russell 已提交
792
	vblk->disk->major = major;
793
	vblk->disk->first_minor = index_to_minor(index);
R
Rusty Russell 已提交
794 795
	vblk->disk->private_data = vblk;
	vblk->disk->fops = &virtblk_fops;
796
	vblk->disk->flags |= GENHD_FL_EXT_DEVT;
797
	vblk->index = index;
798

799
	/* configure queue flush support */
800
	virtblk_update_cache_mode(vdev);
R
Rusty Russell 已提交
801

802 803 804 805
	/* If disk is read-only in the host, the guest should obey */
	if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
		set_disk_ro(vblk->disk, 1);

806
	/* We can handle whatever the host told us to handle. */
807
	blk_queue_max_segments(q, vblk->sg_elems-2);
808

809
	/* No real sector limit. */
810
	blk_queue_max_hw_sectors(q, -1U);
811

812 813
	max_size = virtio_max_dma_size(vdev);

814 815
	/* Host can optionally specify maximum segment size and number of
	 * segments. */
816 817
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
				   struct virtio_blk_config, size_max, &v);
R
Rusty Russell 已提交
818
	if (!err)
819 820 821
		max_size = min(max_size, v);

	blk_queue_max_segment_size(q, max_size);
R
Rusty Russell 已提交
822

823
	/* Host can optionally specify the block size of the device */
824 825 826
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
				   struct virtio_blk_config, blk_size,
				   &blk_size);
827
	if (!err)
828 829 830 831 832
		blk_queue_logical_block_size(q, blk_size);
	else
		blk_size = queue_logical_block_size(q);

	/* Use topology information if available */
833 834 835
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
				   struct virtio_blk_config, physical_block_exp,
				   &physical_block_exp);
836 837 838 839
	if (!err && physical_block_exp)
		blk_queue_physical_block_size(q,
				blk_size * (1 << physical_block_exp));

840 841 842
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
				   struct virtio_blk_config, alignment_offset,
				   &alignment_offset);
843 844 845
	if (!err && alignment_offset)
		blk_queue_alignment_offset(q, blk_size * alignment_offset);

846 847 848
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
				   struct virtio_blk_config, min_io_size,
				   &min_io_size);
849 850 851
	if (!err && min_io_size)
		blk_queue_io_min(q, blk_size * min_io_size);

852 853 854
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
				   struct virtio_blk_config, opt_io_size,
				   &opt_io_size);
855 856 857
	if (!err && opt_io_size)
		blk_queue_io_opt(q, blk_size * opt_io_size);

858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883
	if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
		q->limits.discard_granularity = blk_size;

		virtio_cread(vdev, struct virtio_blk_config,
			     discard_sector_alignment, &v);
		q->limits.discard_alignment = v ? v << SECTOR_SHIFT : 0;

		virtio_cread(vdev, struct virtio_blk_config,
			     max_discard_sectors, &v);
		blk_queue_max_discard_sectors(q, v ? v : UINT_MAX);

		virtio_cread(vdev, struct virtio_blk_config, max_discard_seg,
			     &v);
		blk_queue_max_discard_segments(q,
					       min_not_zero(v,
							    MAX_DISCARD_SEGMENTS));

		blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
	}

	if (virtio_has_feature(vdev, VIRTIO_BLK_F_WRITE_ZEROES)) {
		virtio_cread(vdev, struct virtio_blk_config,
			     max_write_zeroes_sectors, &v);
		blk_queue_max_write_zeroes_sectors(q, v ? v : UINT_MAX);
	}

884
	virtblk_update_capacity(vblk, false);
M
Michael S. Tsirkin 已提交
885 886
	virtio_device_ready(vdev);

887
	device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
R
Rusty Russell 已提交
888 889
	return 0;

890 891
out_free_tags:
	blk_mq_free_tag_set(&vblk->tag_set);
R
Rusty Russell 已提交
892 893 894
out_put_disk:
	put_disk(vblk->disk);
out_free_vq:
895
	vdev->config->del_vqs(vdev);
896
	kfree(vblk->vqs);
R
Rusty Russell 已提交
897 898
out_free_vblk:
	kfree(vblk);
899 900
out_free_index:
	ida_simple_remove(&vd_index_ida, index);
R
Rusty Russell 已提交
901 902 903 904
out:
	return err;
}

905
static void virtblk_remove(struct virtio_device *vdev)
R
Rusty Russell 已提交
906 907 908
{
	struct virtio_blk *vblk = vdev->priv;

909 910
	/* Make sure no work handler is accessing the device. */
	flush_work(&vblk->config_work);
911

912
	del_gendisk(vblk->disk);
913
	blk_cleanup_queue(vblk->disk->queue);
914

915 916
	blk_mq_free_tag_set(&vblk->tag_set);

917 918
	mutex_lock(&vblk->vdev_mutex);

R
Rusty Russell 已提交
919 920 921
	/* Stop all the virtqueues. */
	vdev->config->reset(vdev);

922 923 924
	/* Virtqueues are stopped, nothing can use vblk->vdev anymore. */
	vblk->vdev = NULL;

R
Rusty Russell 已提交
925
	put_disk(vblk->disk);
926
	vdev->config->del_vqs(vdev);
927
	kfree(vblk->vqs);
928

929 930 931
	mutex_unlock(&vblk->vdev_mutex);

	virtblk_put(vblk);
R
Rusty Russell 已提交
932 933
}

934
#ifdef CONFIG_PM_SLEEP
935 936 937 938 939 940 941
static int virtblk_freeze(struct virtio_device *vdev)
{
	struct virtio_blk *vblk = vdev->priv;

	/* Ensure we don't receive any more interrupts */
	vdev->config->reset(vdev);

942
	/* Make sure no work handler is accessing the device. */
943 944
	flush_work(&vblk->config_work);

945
	blk_mq_quiesce_queue(vblk->disk->queue);
946 947 948 949 950 951 952 953 954 955 956

	vdev->config->del_vqs(vdev);
	return 0;
}

static int virtblk_restore(struct virtio_device *vdev)
{
	struct virtio_blk *vblk = vdev->priv;
	int ret;

	ret = init_vq(vdev->priv);
957 958 959 960
	if (ret)
		return ret;

	virtio_device_ready(vdev);
J
Jens Axboe 已提交
961

962
	blk_mq_unquiesce_queue(vblk->disk->queue);
963
	return 0;
964 965 966
}
#endif

967
static const struct virtio_device_id id_table[] = {
R
Rusty Russell 已提交
968 969 970 971
	{ VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
	{ 0 },
};

M
Michael S. Tsirkin 已提交
972
static unsigned int features_legacy[] = {
973
	VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
974
	VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
975
	VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
976
	VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
M
Michael S. Tsirkin 已提交
977 978 979 980 981
}
;
static unsigned int features[] = {
	VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
	VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
982
	VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
983
	VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
984 985
};

986
static struct virtio_driver virtio_blk = {
M
Michael S. Tsirkin 已提交
987 988 989 990 991 992 993 994 995 996
	.feature_table			= features,
	.feature_table_size		= ARRAY_SIZE(features),
	.feature_table_legacy		= features_legacy,
	.feature_table_size_legacy	= ARRAY_SIZE(features_legacy),
	.driver.name			= KBUILD_MODNAME,
	.driver.owner			= THIS_MODULE,
	.id_table			= id_table,
	.probe				= virtblk_probe,
	.remove				= virtblk_remove,
	.config_changed			= virtblk_config_changed,
997
#ifdef CONFIG_PM_SLEEP
M
Michael S. Tsirkin 已提交
998 999
	.freeze				= virtblk_freeze,
	.restore			= virtblk_restore,
1000
#endif
R
Rusty Russell 已提交
1001 1002 1003 1004
};

static int __init init(void)
{
1005 1006 1007 1008 1009 1010
	int error;

	virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
	if (!virtblk_wq)
		return -ENOMEM;

1011
	major = register_blkdev(0, "virtblk");
1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026
	if (major < 0) {
		error = major;
		goto out_destroy_workqueue;
	}

	error = register_virtio_driver(&virtio_blk);
	if (error)
		goto out_unregister_blkdev;
	return 0;

out_unregister_blkdev:
	unregister_blkdev(major, "virtblk");
out_destroy_workqueue:
	destroy_workqueue(virtblk_wq);
	return error;
R
Rusty Russell 已提交
1027 1028 1029 1030 1031
}

static void __exit fini(void)
{
	unregister_virtio_driver(&virtio_blk);
1032
	unregister_blkdev(major, "virtblk");
1033
	destroy_workqueue(virtblk_wq);
R
Rusty Russell 已提交
1034 1035 1036 1037 1038 1039 1040
}
module_init(init);
module_exit(fini);

MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio block driver");
MODULE_LICENSE("GPL");