virtio_blk.c 13.1 KB
Newer Older
R
Rusty Russell 已提交
1 2
//#define DEBUG
#include <linux/spinlock.h>
3
#include <linux/slab.h>
R
Rusty Russell 已提交
4 5 6 7
#include <linux/blkdev.h>
#include <linux/hdreg.h>
#include <linux/virtio.h>
#include <linux/virtio_blk.h>
8 9
#include <linux/scatterlist.h>

10
#define PART_BITS 4
R
Rusty Russell 已提交
11

12
static int major, index;
13

R
Rusty Russell 已提交
14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
struct virtio_blk
{
	spinlock_t lock;

	struct virtio_device *vdev;
	struct virtqueue *vq;

	/* The disk structure for the kernel. */
	struct gendisk *disk;

	/* Request tracking. */
	struct list_head reqs;

	mempool_t *pool;

29 30 31
	/* What host tells us, plus 2 for header & tailer. */
	unsigned int sg_elems;

R
Rusty Russell 已提交
32
	/* Scatterlist: can be too big for stack. */
33
	struct scatterlist sg[/*sg_elems*/];
R
Rusty Russell 已提交
34 35 36 37 38 39 40
};

struct virtblk_req
{
	struct list_head list;
	struct request *req;
	struct virtio_blk_outhdr out_hdr;
41
	struct virtio_scsi_inhdr in_hdr;
42
	u8 status;
R
Rusty Russell 已提交
43 44
};

45
static void blk_done(struct virtqueue *vq)
R
Rusty Russell 已提交
46 47 48 49 50 51 52 53
{
	struct virtio_blk *vblk = vq->vdev->priv;
	struct virtblk_req *vbr;
	unsigned int len;
	unsigned long flags;

	spin_lock_irqsave(&vblk->lock, flags);
	while ((vbr = vblk->vq->vq_ops->get_buf(vblk->vq, &len)) != NULL) {
54
		int error;
55

56
		switch (vbr->status) {
R
Rusty Russell 已提交
57
		case VIRTIO_BLK_S_OK:
58
			error = 0;
R
Rusty Russell 已提交
59 60
			break;
		case VIRTIO_BLK_S_UNSUPP:
61
			error = -ENOTTY;
R
Rusty Russell 已提交
62 63
			break;
		default:
64
			error = -EIO;
R
Rusty Russell 已提交
65 66 67
			break;
		}

68 69 70 71 72
		if (blk_pc_request(vbr->req)) {
			vbr->req->resid_len = vbr->in_hdr.residual;
			vbr->req->sense_len = vbr->in_hdr.sense_len;
			vbr->req->errors = vbr->in_hdr.errors;
		}
73 74
		if (blk_special_request(vbr->req))
			vbr->req->errors = (error != 0);
75

76
		__blk_end_request_all(vbr->req, error);
R
Rusty Russell 已提交
77 78 79 80 81 82 83 84 85 86 87
		list_del(&vbr->list);
		mempool_free(vbr, vblk->pool);
	}
	/* In case queue is stopped waiting for more buffers. */
	blk_start_queue(vblk->disk->queue);
	spin_unlock_irqrestore(&vblk->lock, flags);
}

static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
		   struct request *req)
{
88
	unsigned long num, out = 0, in = 0;
R
Rusty Russell 已提交
89 90 91 92 93 94 95 96
	struct virtblk_req *vbr;

	vbr = mempool_alloc(vblk->pool, GFP_ATOMIC);
	if (!vbr)
		/* When another request finishes we'll try again. */
		return false;

	vbr->req = req;
97 98
	switch (req->cmd_type) {
	case REQ_TYPE_FS:
R
Rusty Russell 已提交
99
		vbr->out_hdr.type = 0;
100
		vbr->out_hdr.sector = blk_rq_pos(vbr->req);
101
		vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
102 103
		break;
	case REQ_TYPE_BLOCK_PC:
R
Rusty Russell 已提交
104 105
		vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
		vbr->out_hdr.sector = 0;
106
		vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
107
		break;
108 109 110 111 112
	case REQ_TYPE_SPECIAL:
		vbr->out_hdr.type = VIRTIO_BLK_T_GET_ID;
		vbr->out_hdr.sector = 0;
		vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
		break;
113 114 115 116 117 118 119 120 121
	case REQ_TYPE_LINUX_BLOCK:
		if (req->cmd[0] == REQ_LB_OP_FLUSH) {
			vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
			vbr->out_hdr.sector = 0;
			vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
			break;
		}
		/*FALLTHRU*/
	default:
R
Rusty Russell 已提交
122 123 124 125 126 127 128
		/* We don't put anything else in the queue. */
		BUG();
	}

	if (blk_barrier_rq(vbr->req))
		vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER;

129
	sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
R
Rusty Russell 已提交
130

131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158
	/*
	 * If this is a packet command we need a couple of additional headers.
	 * Behind the normal outhdr we put a segment with the scsi command
	 * block, and before the normal inhdr we put the sense data and the
	 * inhdr with additional status information before the normal inhdr.
	 */
	if (blk_pc_request(vbr->req))
		sg_set_buf(&vblk->sg[out++], vbr->req->cmd, vbr->req->cmd_len);

	num = blk_rq_map_sg(q, vbr->req, vblk->sg + out);

	if (blk_pc_request(vbr->req)) {
		sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, 96);
		sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr,
			   sizeof(vbr->in_hdr));
	}

	sg_set_buf(&vblk->sg[num + out + in++], &vbr->status,
		   sizeof(vbr->status));

	if (num) {
		if (rq_data_dir(vbr->req) == WRITE) {
			vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
			out += num;
		} else {
			vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
			in += num;
		}
R
Rusty Russell 已提交
159 160
	}

161
	if (vblk->vq->vq_ops->add_buf(vblk->vq, vblk->sg, out, in, vbr) < 0) {
R
Rusty Russell 已提交
162 163 164 165 166 167 168 169 170 171
		mempool_free(vbr, vblk->pool);
		return false;
	}

	list_add_tail(&vbr->list, &vblk->reqs);
	return true;
}

static void do_virtblk_request(struct request_queue *q)
{
172
	struct virtio_blk *vblk = q->queuedata;
R
Rusty Russell 已提交
173 174 175
	struct request *req;
	unsigned int issued = 0;

176
	while ((req = blk_peek_request(q)) != NULL) {
177
		BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
R
Rusty Russell 已提交
178 179 180 181 182 183 184

		/* If this request fails, stop queue and wait for something to
		   finish to restart it. */
		if (!do_req(q, vblk, req)) {
			blk_stop_queue(q);
			break;
		}
185
		blk_start_request(req);
R
Rusty Russell 已提交
186 187 188 189 190 191 192
		issued++;
	}

	if (issued)
		vblk->vq->vq_ops->kick(vblk->vq);
}

193 194 195 196 197 198
static void virtblk_prepare_flush(struct request_queue *q, struct request *req)
{
	req->cmd_type = REQ_TYPE_LINUX_BLOCK;
	req->cmd[0] = REQ_LB_OP_FLUSH;
}

199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221
/* return id (s/n) string for *disk to *id_str
 */
static int virtblk_get_id(struct gendisk *disk, char *id_str)
{
	struct virtio_blk *vblk = disk->private_data;
	struct request *req;
	struct bio *bio;

	bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES,
			   GFP_KERNEL);
	if (IS_ERR(bio))
		return PTR_ERR(bio);

	req = blk_make_request(vblk->disk->queue, bio, GFP_KERNEL);
	if (IS_ERR(req)) {
		bio_put(bio);
		return PTR_ERR(req);
	}

	req->cmd_type = REQ_TYPE_SPECIAL;
	return blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
}

A
Al Viro 已提交
222
static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
R
Rusty Russell 已提交
223 224
			 unsigned cmd, unsigned long data)
{
225 226 227
	struct gendisk *disk = bdev->bd_disk;
	struct virtio_blk *vblk = disk->private_data;

228
	if (cmd == 0x56424944) { /* 'VBID' */
229 230 231 232 233 234 235 236 237
		void __user *usr_data = (void __user *)data;
		char id_str[VIRTIO_BLK_ID_BYTES];
		int err;

		err = virtblk_get_id(disk, id_str);
		if (!err && copy_to_user(usr_data, id_str, VIRTIO_BLK_ID_BYTES))
			err = -EFAULT;
		return err;
	}
238 239 240 241
	/*
	 * Only allow the generic SCSI ioctls if the host can support it.
	 */
	if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
242
		return -ENOTTY;
243

244 245
	return scsi_cmd_ioctl(disk->queue, disk, mode, cmd,
			      (void __user *)data);
R
Rusty Russell 已提交
246 247
}

248 249 250
/* We provide getgeo only to please some old bootloader/partitioning tools */
static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
{
251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
	struct virtio_blk *vblk = bd->bd_disk->private_data;
	struct virtio_blk_geometry vgeo;
	int err;

	/* see if the host passed in geometry config */
	err = virtio_config_val(vblk->vdev, VIRTIO_BLK_F_GEOMETRY,
				offsetof(struct virtio_blk_config, geometry),
				&vgeo);

	if (!err) {
		geo->heads = vgeo.heads;
		geo->sectors = vgeo.sectors;
		geo->cylinders = vgeo.cylinders;
	} else {
		/* some standard values, similar to sd */
		geo->heads = 1 << 6;
		geo->sectors = 1 << 5;
		geo->cylinders = get_capacity(bd->bd_disk) >> 11;
	}
270 271 272
	return 0;
}

273
static const struct block_device_operations virtblk_fops = {
A
Al Viro 已提交
274
	.locked_ioctl = virtblk_ioctl,
275 276
	.owner  = THIS_MODULE,
	.getgeo = virtblk_getgeo,
R
Rusty Russell 已提交
277 278
};

279 280 281 282 283
static int index_to_minor(int index)
{
	return index << PART_BITS;
}

284
static int __devinit virtblk_probe(struct virtio_device *vdev)
R
Rusty Russell 已提交
285 286
{
	struct virtio_blk *vblk;
287
	struct request_queue *q;
288
	int err;
R
Rusty Russell 已提交
289
	u64 cap;
290 291 292
	u32 v, blk_size, sg_elems, opt_io_size;
	u16 min_io_size;
	u8 physical_block_exp, alignment_offset;
R
Rusty Russell 已提交
293

294
	if (index_to_minor(index) >= 1 << MINORBITS)
295 296
		return -ENOSPC;

297 298 299 300 301 302 303 304 305 306 307
	/* We need to know how many segments before we allocate. */
	err = virtio_config_val(vdev, VIRTIO_BLK_F_SEG_MAX,
				offsetof(struct virtio_blk_config, seg_max),
				&sg_elems);
	if (err)
		sg_elems = 1;

	/* We need an extra sg elements at head and tail. */
	sg_elems += 2;
	vdev->priv = vblk = kmalloc(sizeof(*vblk) +
				    sizeof(vblk->sg[0]) * sg_elems, GFP_KERNEL);
R
Rusty Russell 已提交
308 309 310 311 312 313 314 315
	if (!vblk) {
		err = -ENOMEM;
		goto out;
	}

	INIT_LIST_HEAD(&vblk->reqs);
	spin_lock_init(&vblk->lock);
	vblk->vdev = vdev;
316 317
	vblk->sg_elems = sg_elems;
	sg_init_table(vblk->sg, vblk->sg_elems);
R
Rusty Russell 已提交
318 319

	/* We expect one virtqueue, for output. */
320
	vblk->vq = virtio_find_single_vq(vdev, blk_done, "requests");
R
Rusty Russell 已提交
321 322 323 324 325 326 327 328 329 330 331 332
	if (IS_ERR(vblk->vq)) {
		err = PTR_ERR(vblk->vq);
		goto out_free_vblk;
	}

	vblk->pool = mempool_create_kmalloc_pool(1,sizeof(struct virtblk_req));
	if (!vblk->pool) {
		err = -ENOMEM;
		goto out_free_vq;
	}

	/* FIXME: How many partitions?  How long is a piece of string? */
333
	vblk->disk = alloc_disk(1 << PART_BITS);
R
Rusty Russell 已提交
334 335
	if (!vblk->disk) {
		err = -ENOMEM;
336
		goto out_mempool;
R
Rusty Russell 已提交
337 338
	}

339 340
	q = vblk->disk->queue = blk_init_queue(do_virtblk_request, &vblk->lock);
	if (!q) {
R
Rusty Russell 已提交
341 342 343 344
		err = -ENOMEM;
		goto out_put_disk;
	}

345
	q->queuedata = vblk;
346

347 348 349 350 351 352 353 354 355 356 357 358 359
	if (index < 26) {
		sprintf(vblk->disk->disk_name, "vd%c", 'a' + index % 26);
	} else if (index < (26 + 1) * 26) {
		sprintf(vblk->disk->disk_name, "vd%c%c",
			'a' + index / 26 - 1, 'a' + index % 26);
	} else {
		const unsigned int m1 = (index / 26 - 1) / 26 - 1;
		const unsigned int m2 = (index / 26 - 1) % 26;
		const unsigned int m3 =  index % 26;
		sprintf(vblk->disk->disk_name, "vd%c%c%c",
			'a' + m1, 'a' + m2, 'a' + m3);
	}

R
Rusty Russell 已提交
360
	vblk->disk->major = major;
361
	vblk->disk->first_minor = index_to_minor(index);
R
Rusty Russell 已提交
362 363
	vblk->disk->private_data = vblk;
	vblk->disk->fops = &virtblk_fops;
364
	vblk->disk->driverfs_dev = &vdev->dev;
365
	index++;
366

R
Rusty Russell 已提交
367
	/* If barriers are supported, tell block layer that queue is ordered */
368
	if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH))
369
		blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH,
370 371
				  virtblk_prepare_flush);
	else if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER))
372
		blk_queue_ordered(q, QUEUE_ORDERED_TAG, NULL);
R
Rusty Russell 已提交
373

374 375 376 377
	/* If disk is read-only in the host, the guest should obey */
	if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
		set_disk_ro(vblk->disk, 1);

378
	/* Host must always specify the capacity. */
379 380
	vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity),
			  &cap, sizeof(cap));
R
Rusty Russell 已提交
381 382 383 384 385 386 387 388 389

	/* If capacity is too big, truncate with warning. */
	if ((sector_t)cap != cap) {
		dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
			 (unsigned long long)cap);
		cap = (sector_t)-1;
	}
	set_capacity(vblk->disk, cap);

390
	/* We can handle whatever the host told us to handle. */
391
	blk_queue_max_segments(q, vblk->sg_elems-2);
392

393
	/* No need to bounce any requests */
394
	blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
395

396
	/* No real sector limit. */
397
	blk_queue_max_hw_sectors(q, -1U);
398

399 400 401 402 403
	/* Host can optionally specify maximum segment size and number of
	 * segments. */
	err = virtio_config_val(vdev, VIRTIO_BLK_F_SIZE_MAX,
				offsetof(struct virtio_blk_config, size_max),
				&v);
R
Rusty Russell 已提交
404
	if (!err)
405
		blk_queue_max_segment_size(q, v);
406
	else
407
		blk_queue_max_segment_size(q, -1U);
R
Rusty Russell 已提交
408

409 410 411 412 413
	/* Host can optionally specify the block size of the device */
	err = virtio_config_val(vdev, VIRTIO_BLK_F_BLK_SIZE,
				offsetof(struct virtio_blk_config, blk_size),
				&blk_size);
	if (!err)
414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443
		blk_queue_logical_block_size(q, blk_size);
	else
		blk_size = queue_logical_block_size(q);

	/* Use topology information if available */
	err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
			offsetof(struct virtio_blk_config, physical_block_exp),
			&physical_block_exp);
	if (!err && physical_block_exp)
		blk_queue_physical_block_size(q,
				blk_size * (1 << physical_block_exp));

	err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
			offsetof(struct virtio_blk_config, alignment_offset),
			&alignment_offset);
	if (!err && alignment_offset)
		blk_queue_alignment_offset(q, blk_size * alignment_offset);

	err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
			offsetof(struct virtio_blk_config, min_io_size),
			&min_io_size);
	if (!err && min_io_size)
		blk_queue_io_min(q, blk_size * min_io_size);

	err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
			offsetof(struct virtio_blk_config, opt_io_size),
			&opt_io_size);
	if (!err && opt_io_size)
		blk_queue_io_opt(q, blk_size * opt_io_size);

444

R
Rusty Russell 已提交
445 446 447 448 449 450 451 452
	add_disk(vblk->disk);
	return 0;

out_put_disk:
	put_disk(vblk->disk);
out_mempool:
	mempool_destroy(vblk->pool);
out_free_vq:
453
	vdev->config->del_vqs(vdev);
R
Rusty Russell 已提交
454 455 456 457 458 459
out_free_vblk:
	kfree(vblk);
out:
	return err;
}

460
static void __devexit virtblk_remove(struct virtio_device *vdev)
R
Rusty Russell 已提交
461 462 463
{
	struct virtio_blk *vblk = vdev->priv;

R
Rusty Russell 已提交
464
	/* Nothing should be pending. */
R
Rusty Russell 已提交
465
	BUG_ON(!list_empty(&vblk->reqs));
R
Rusty Russell 已提交
466 467 468 469

	/* Stop all the virtqueues. */
	vdev->config->reset(vdev);

470
	del_gendisk(vblk->disk);
R
Rusty Russell 已提交
471 472 473
	blk_cleanup_queue(vblk->disk->queue);
	put_disk(vblk->disk);
	mempool_destroy(vblk->pool);
474
	vdev->config->del_vqs(vdev);
R
Rusty Russell 已提交
475 476 477
	kfree(vblk);
}

478
static const struct virtio_device_id id_table[] = {
R
Rusty Russell 已提交
479 480 481 482
	{ VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
	{ 0 },
};

483 484
static unsigned int features[] = {
	VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX,
485
	VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
486
	VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY
487 488
};

489 490 491 492 493 494
/*
 * virtio_blk causes spurious section mismatch warning by
 * simultaneously referring to a __devinit and a __devexit function.
 * Use __refdata to avoid this warning.
 */
static struct virtio_driver __refdata virtio_blk = {
495 496
	.feature_table = features,
	.feature_table_size = ARRAY_SIZE(features),
R
Rusty Russell 已提交
497 498 499 500 501 502 503 504 505
	.driver.name =	KBUILD_MODNAME,
	.driver.owner =	THIS_MODULE,
	.id_table =	id_table,
	.probe =	virtblk_probe,
	.remove =	__devexit_p(virtblk_remove),
};

static int __init init(void)
{
506 507 508
	major = register_blkdev(0, "virtblk");
	if (major < 0)
		return major;
R
Rusty Russell 已提交
509 510 511 512 513
	return register_virtio_driver(&virtio_blk);
}

static void __exit fini(void)
{
514
	unregister_blkdev(major, "virtblk");
R
Rusty Russell 已提交
515 516 517 518 519 520 521 522
	unregister_virtio_driver(&virtio_blk);
}
module_init(init);
module_exit(fini);

MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio block driver");
MODULE_LICENSE("GPL");