virtio_blk.c 14.0 KB
Newer Older
R
Rusty Russell 已提交
1 2
//#define DEBUG
#include <linux/spinlock.h>
3
#include <linux/slab.h>
R
Rusty Russell 已提交
4
#include <linux/blkdev.h>
5
#include <linux/smp_lock.h>
R
Rusty Russell 已提交
6 7 8
#include <linux/hdreg.h>
#include <linux/virtio.h>
#include <linux/virtio_blk.h>
9 10
#include <linux/scatterlist.h>

11
#define PART_BITS 4
R
Rusty Russell 已提交
12

13
static int major, index;
14

R
Rusty Russell 已提交
15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
struct virtio_blk
{
	spinlock_t lock;

	struct virtio_device *vdev;
	struct virtqueue *vq;

	/* The disk structure for the kernel. */
	struct gendisk *disk;

	/* Request tracking. */
	struct list_head reqs;

	mempool_t *pool;

30 31 32
	/* What host tells us, plus 2 for header & tailer. */
	unsigned int sg_elems;

R
Rusty Russell 已提交
33
	/* Scatterlist: can be too big for stack. */
34
	struct scatterlist sg[/*sg_elems*/];
R
Rusty Russell 已提交
35 36 37 38 39 40 41
};

struct virtblk_req
{
	struct list_head list;
	struct request *req;
	struct virtio_blk_outhdr out_hdr;
42
	struct virtio_scsi_inhdr in_hdr;
43
	u8 status;
R
Rusty Russell 已提交
44 45
};

46
static void blk_done(struct virtqueue *vq)
R
Rusty Russell 已提交
47 48 49 50 51 52 53
{
	struct virtio_blk *vblk = vq->vdev->priv;
	struct virtblk_req *vbr;
	unsigned int len;
	unsigned long flags;

	spin_lock_irqsave(&vblk->lock, flags);
54
	while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) {
55
		int error;
56

57
		switch (vbr->status) {
R
Rusty Russell 已提交
58
		case VIRTIO_BLK_S_OK:
59
			error = 0;
R
Rusty Russell 已提交
60 61
			break;
		case VIRTIO_BLK_S_UNSUPP:
62
			error = -ENOTTY;
R
Rusty Russell 已提交
63 64
			break;
		default:
65
			error = -EIO;
R
Rusty Russell 已提交
66 67 68
			break;
		}

69 70
		switch (vbr->req->cmd_type) {
		case REQ_TYPE_BLOCK_PC:
71 72 73
			vbr->req->resid_len = vbr->in_hdr.residual;
			vbr->req->sense_len = vbr->in_hdr.sense_len;
			vbr->req->errors = vbr->in_hdr.errors;
74 75
			break;
		case REQ_TYPE_SPECIAL:
76
			vbr->req->errors = (error != 0);
77
			break;
78 79
		default:
			break;
80
		}
81

82
		__blk_end_request_all(vbr->req, error);
R
Rusty Russell 已提交
83 84 85 86 87 88 89 90 91 92 93
		list_del(&vbr->list);
		mempool_free(vbr, vblk->pool);
	}
	/* In case queue is stopped waiting for more buffers. */
	blk_start_queue(vblk->disk->queue);
	spin_unlock_irqrestore(&vblk->lock, flags);
}

static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
		   struct request *req)
{
94
	unsigned long num, out = 0, in = 0;
R
Rusty Russell 已提交
95 96 97 98 99 100 101 102
	struct virtblk_req *vbr;

	vbr = mempool_alloc(vblk->pool, GFP_ATOMIC);
	if (!vbr)
		/* When another request finishes we'll try again. */
		return false;

	vbr->req = req;
103 104 105

	if (req->cmd_flags & REQ_FLUSH) {
		vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
106 107
		vbr->out_hdr.sector = 0;
		vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
108 109 110 111 112 113 114 115 116
	} else {
		switch (req->cmd_type) {
		case REQ_TYPE_FS:
			vbr->out_hdr.type = 0;
			vbr->out_hdr.sector = blk_rq_pos(vbr->req);
			vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
			break;
		case REQ_TYPE_BLOCK_PC:
			vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
117 118 119
			vbr->out_hdr.sector = 0;
			vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
			break;
120 121 122 123 124 125 126 127
		case REQ_TYPE_SPECIAL:
			vbr->out_hdr.type = VIRTIO_BLK_T_GET_ID;
			vbr->out_hdr.sector = 0;
			vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
			break;
		default:
			/* We don't put anything else in the queue. */
			BUG();
128
		}
R
Rusty Russell 已提交
129 130
	}

131
	if (vbr->req->cmd_flags & REQ_HARDBARRIER)
R
Rusty Russell 已提交
132 133
		vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER;

134
	sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
R
Rusty Russell 已提交
135

136 137 138 139 140 141
	/*
	 * If this is a packet command we need a couple of additional headers.
	 * Behind the normal outhdr we put a segment with the scsi command
	 * block, and before the normal inhdr we put the sense data and the
	 * inhdr with additional status information before the normal inhdr.
	 */
142
	if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC)
143 144 145 146
		sg_set_buf(&vblk->sg[out++], vbr->req->cmd, vbr->req->cmd_len);

	num = blk_rq_map_sg(q, vbr->req, vblk->sg + out);

147
	if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC) {
148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163
		sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, 96);
		sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr,
			   sizeof(vbr->in_hdr));
	}

	sg_set_buf(&vblk->sg[num + out + in++], &vbr->status,
		   sizeof(vbr->status));

	if (num) {
		if (rq_data_dir(vbr->req) == WRITE) {
			vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
			out += num;
		} else {
			vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
			in += num;
		}
R
Rusty Russell 已提交
164 165
	}

166
	if (virtqueue_add_buf(vblk->vq, vblk->sg, out, in, vbr) < 0) {
R
Rusty Russell 已提交
167 168 169 170 171 172 173 174 175 176
		mempool_free(vbr, vblk->pool);
		return false;
	}

	list_add_tail(&vbr->list, &vblk->reqs);
	return true;
}

static void do_virtblk_request(struct request_queue *q)
{
177
	struct virtio_blk *vblk = q->queuedata;
R
Rusty Russell 已提交
178 179 180
	struct request *req;
	unsigned int issued = 0;

181
	while ((req = blk_peek_request(q)) != NULL) {
182
		BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
R
Rusty Russell 已提交
183 184 185 186 187 188 189

		/* If this request fails, stop queue and wait for something to
		   finish to restart it. */
		if (!do_req(q, vblk, req)) {
			blk_stop_queue(q);
			break;
		}
190
		blk_start_request(req);
R
Rusty Russell 已提交
191 192 193 194
		issued++;
	}

	if (issued)
195
		virtqueue_kick(vblk->vq);
R
Rusty Russell 已提交
196 197
}

198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
/* return id (s/n) string for *disk to *id_str
 */
static int virtblk_get_id(struct gendisk *disk, char *id_str)
{
	struct virtio_blk *vblk = disk->private_data;
	struct request *req;
	struct bio *bio;

	bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES,
			   GFP_KERNEL);
	if (IS_ERR(bio))
		return PTR_ERR(bio);

	req = blk_make_request(vblk->disk->queue, bio, GFP_KERNEL);
	if (IS_ERR(req)) {
		bio_put(bio);
		return PTR_ERR(req);
	}

	req->cmd_type = REQ_TYPE_SPECIAL;
	return blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
}

221
static int virtblk_locked_ioctl(struct block_device *bdev, fmode_t mode,
R
Rusty Russell 已提交
222 223
			 unsigned cmd, unsigned long data)
{
224 225 226 227 228 229 230
	struct gendisk *disk = bdev->bd_disk;
	struct virtio_blk *vblk = disk->private_data;

	/*
	 * Only allow the generic SCSI ioctls if the host can support it.
	 */
	if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
231
		return -ENOTTY;
232

233 234
	return scsi_cmd_ioctl(disk->queue, disk, mode, cmd,
			      (void __user *)data);
R
Rusty Russell 已提交
235 236
}

237 238 239 240 241 242 243 244 245 246 247 248
static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
			     unsigned int cmd, unsigned long param)
{
	int ret;

	lock_kernel();
	ret = virtblk_locked_ioctl(bdev, mode, cmd, param);
	unlock_kernel();

	return ret;
}

249 250 251
/* We provide getgeo only to please some old bootloader/partitioning tools */
static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
{
252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
	struct virtio_blk *vblk = bd->bd_disk->private_data;
	struct virtio_blk_geometry vgeo;
	int err;

	/* see if the host passed in geometry config */
	err = virtio_config_val(vblk->vdev, VIRTIO_BLK_F_GEOMETRY,
				offsetof(struct virtio_blk_config, geometry),
				&vgeo);

	if (!err) {
		geo->heads = vgeo.heads;
		geo->sectors = vgeo.sectors;
		geo->cylinders = vgeo.cylinders;
	} else {
		/* some standard values, similar to sd */
		geo->heads = 1 << 6;
		geo->sectors = 1 << 5;
		geo->cylinders = get_capacity(bd->bd_disk) >> 11;
	}
271 272 273
	return 0;
}

274
static const struct block_device_operations virtblk_fops = {
275
	.ioctl  = virtblk_ioctl,
276 277
	.owner  = THIS_MODULE,
	.getgeo = virtblk_getgeo,
R
Rusty Russell 已提交
278 279
};

280 281 282 283 284
static int index_to_minor(int index)
{
	return index << PART_BITS;
}

285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305
static ssize_t virtblk_serial_show(struct device *dev,
				struct device_attribute *attr, char *buf)
{
	struct gendisk *disk = dev_to_disk(dev);
	int err;

	/* sysfs gives us a PAGE_SIZE buffer */
	BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);

	buf[VIRTIO_BLK_ID_BYTES] = '\0';
	err = virtblk_get_id(disk, buf);
	if (!err)
		return strlen(buf);

	if (err == -EIO) /* Unsupported? Make it empty. */
		return 0;

	return err;
}
DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL);

306
static int __devinit virtblk_probe(struct virtio_device *vdev)
R
Rusty Russell 已提交
307 308
{
	struct virtio_blk *vblk;
309
	struct request_queue *q;
310
	int err;
R
Rusty Russell 已提交
311
	u64 cap;
312 313 314
	u32 v, blk_size, sg_elems, opt_io_size;
	u16 min_io_size;
	u8 physical_block_exp, alignment_offset;
R
Rusty Russell 已提交
315

316
	if (index_to_minor(index) >= 1 << MINORBITS)
317 318
		return -ENOSPC;

319 320 321 322
	/* We need to know how many segments before we allocate. */
	err = virtio_config_val(vdev, VIRTIO_BLK_F_SEG_MAX,
				offsetof(struct virtio_blk_config, seg_max),
				&sg_elems);
323 324 325

	/* We need at least one SG element, whatever they say. */
	if (err || !sg_elems)
326 327 328 329 330 331
		sg_elems = 1;

	/* We need an extra sg elements at head and tail. */
	sg_elems += 2;
	vdev->priv = vblk = kmalloc(sizeof(*vblk) +
				    sizeof(vblk->sg[0]) * sg_elems, GFP_KERNEL);
R
Rusty Russell 已提交
332 333 334 335 336 337 338 339
	if (!vblk) {
		err = -ENOMEM;
		goto out;
	}

	INIT_LIST_HEAD(&vblk->reqs);
	spin_lock_init(&vblk->lock);
	vblk->vdev = vdev;
340 341
	vblk->sg_elems = sg_elems;
	sg_init_table(vblk->sg, vblk->sg_elems);
R
Rusty Russell 已提交
342 343

	/* We expect one virtqueue, for output. */
344
	vblk->vq = virtio_find_single_vq(vdev, blk_done, "requests");
R
Rusty Russell 已提交
345 346 347 348 349 350 351 352 353 354 355 356
	if (IS_ERR(vblk->vq)) {
		err = PTR_ERR(vblk->vq);
		goto out_free_vblk;
	}

	vblk->pool = mempool_create_kmalloc_pool(1,sizeof(struct virtblk_req));
	if (!vblk->pool) {
		err = -ENOMEM;
		goto out_free_vq;
	}

	/* FIXME: How many partitions?  How long is a piece of string? */
357
	vblk->disk = alloc_disk(1 << PART_BITS);
R
Rusty Russell 已提交
358 359
	if (!vblk->disk) {
		err = -ENOMEM;
360
		goto out_mempool;
R
Rusty Russell 已提交
361 362
	}

363 364
	q = vblk->disk->queue = blk_init_queue(do_virtblk_request, &vblk->lock);
	if (!q) {
R
Rusty Russell 已提交
365 366 367 368
		err = -ENOMEM;
		goto out_put_disk;
	}

369
	q->queuedata = vblk;
370

371 372 373 374 375 376 377 378 379 380 381 382 383
	if (index < 26) {
		sprintf(vblk->disk->disk_name, "vd%c", 'a' + index % 26);
	} else if (index < (26 + 1) * 26) {
		sprintf(vblk->disk->disk_name, "vd%c%c",
			'a' + index / 26 - 1, 'a' + index % 26);
	} else {
		const unsigned int m1 = (index / 26 - 1) / 26 - 1;
		const unsigned int m2 = (index / 26 - 1) % 26;
		const unsigned int m3 =  index % 26;
		sprintf(vblk->disk->disk_name, "vd%c%c%c",
			'a' + m1, 'a' + m2, 'a' + m3);
	}

R
Rusty Russell 已提交
384
	vblk->disk->major = major;
385
	vblk->disk->first_minor = index_to_minor(index);
R
Rusty Russell 已提交
386 387
	vblk->disk->private_data = vblk;
	vblk->disk->fops = &virtblk_fops;
388
	vblk->disk->driverfs_dev = &vdev->dev;
389
	index++;
390

391 392 393 394 395 396
	if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH)) {
		/*
		 * If the FLUSH feature is supported we do have support for
		 * flushing a volatile write cache on the host.  Use that
		 * to implement write barrier support.
		 */
397
		blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH);
398 399 400 401 402 403 404
	} else {
		/*
		 * If the FLUSH feature is not supported we must assume that
		 * the host does not perform any kind of volatile write
		 * caching. We still need to drain the queue to provider
		 * proper barrier semantics.
		 */
405
		blk_queue_ordered(q, QUEUE_ORDERED_DRAIN);
406
	}
R
Rusty Russell 已提交
407

408 409 410 411
	/* If disk is read-only in the host, the guest should obey */
	if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
		set_disk_ro(vblk->disk, 1);

412
	/* Host must always specify the capacity. */
413 414
	vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity),
			  &cap, sizeof(cap));
R
Rusty Russell 已提交
415 416 417 418 419 420 421 422 423

	/* If capacity is too big, truncate with warning. */
	if ((sector_t)cap != cap) {
		dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
			 (unsigned long long)cap);
		cap = (sector_t)-1;
	}
	set_capacity(vblk->disk, cap);

424
	/* We can handle whatever the host told us to handle. */
425
	blk_queue_max_segments(q, vblk->sg_elems-2);
426

427
	/* No need to bounce any requests */
428
	blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
429

430
	/* No real sector limit. */
431
	blk_queue_max_hw_sectors(q, -1U);
432

433 434 435 436 437
	/* Host can optionally specify maximum segment size and number of
	 * segments. */
	err = virtio_config_val(vdev, VIRTIO_BLK_F_SIZE_MAX,
				offsetof(struct virtio_blk_config, size_max),
				&v);
R
Rusty Russell 已提交
438
	if (!err)
439
		blk_queue_max_segment_size(q, v);
440
	else
441
		blk_queue_max_segment_size(q, -1U);
R
Rusty Russell 已提交
442

443 444 445 446 447
	/* Host can optionally specify the block size of the device */
	err = virtio_config_val(vdev, VIRTIO_BLK_F_BLK_SIZE,
				offsetof(struct virtio_blk_config, blk_size),
				&blk_size);
	if (!err)
448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477
		blk_queue_logical_block_size(q, blk_size);
	else
		blk_size = queue_logical_block_size(q);

	/* Use topology information if available */
	err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
			offsetof(struct virtio_blk_config, physical_block_exp),
			&physical_block_exp);
	if (!err && physical_block_exp)
		blk_queue_physical_block_size(q,
				blk_size * (1 << physical_block_exp));

	err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
			offsetof(struct virtio_blk_config, alignment_offset),
			&alignment_offset);
	if (!err && alignment_offset)
		blk_queue_alignment_offset(q, blk_size * alignment_offset);

	err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
			offsetof(struct virtio_blk_config, min_io_size),
			&min_io_size);
	if (!err && min_io_size)
		blk_queue_io_min(q, blk_size * min_io_size);

	err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
			offsetof(struct virtio_blk_config, opt_io_size),
			&opt_io_size);
	if (!err && opt_io_size)
		blk_queue_io_opt(q, blk_size * opt_io_size);

478

R
Rusty Russell 已提交
479
	add_disk(vblk->disk);
480 481 482 483
	err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial);
	if (err)
		goto out_del_disk;

R
Rusty Russell 已提交
484 485
	return 0;

486 487 488
out_del_disk:
	del_gendisk(vblk->disk);
	blk_cleanup_queue(vblk->disk->queue);
R
Rusty Russell 已提交
489 490 491 492 493
out_put_disk:
	put_disk(vblk->disk);
out_mempool:
	mempool_destroy(vblk->pool);
out_free_vq:
494
	vdev->config->del_vqs(vdev);
R
Rusty Russell 已提交
495 496 497 498 499 500
out_free_vblk:
	kfree(vblk);
out:
	return err;
}

501
static void __devexit virtblk_remove(struct virtio_device *vdev)
R
Rusty Russell 已提交
502 503 504
{
	struct virtio_blk *vblk = vdev->priv;

R
Rusty Russell 已提交
505
	/* Nothing should be pending. */
R
Rusty Russell 已提交
506
	BUG_ON(!list_empty(&vblk->reqs));
R
Rusty Russell 已提交
507 508 509 510

	/* Stop all the virtqueues. */
	vdev->config->reset(vdev);

511
	del_gendisk(vblk->disk);
R
Rusty Russell 已提交
512 513 514
	blk_cleanup_queue(vblk->disk->queue);
	put_disk(vblk->disk);
	mempool_destroy(vblk->pool);
515
	vdev->config->del_vqs(vdev);
R
Rusty Russell 已提交
516 517 518
	kfree(vblk);
}

519
static const struct virtio_device_id id_table[] = {
R
Rusty Russell 已提交
520 521 522 523
	{ VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
	{ 0 },
};

524 525
static unsigned int features[] = {
	VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX,
526
	VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
527
	VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY
528 529
};

530 531 532 533 534 535
/*
 * virtio_blk causes spurious section mismatch warning by
 * simultaneously referring to a __devinit and a __devexit function.
 * Use __refdata to avoid this warning.
 */
static struct virtio_driver __refdata virtio_blk = {
536 537
	.feature_table = features,
	.feature_table_size = ARRAY_SIZE(features),
R
Rusty Russell 已提交
538 539 540 541 542 543 544 545 546
	.driver.name =	KBUILD_MODNAME,
	.driver.owner =	THIS_MODULE,
	.id_table =	id_table,
	.probe =	virtblk_probe,
	.remove =	__devexit_p(virtblk_remove),
};

static int __init init(void)
{
547 548 549
	major = register_blkdev(0, "virtblk");
	if (major < 0)
		return major;
R
Rusty Russell 已提交
550 551 552 553 554
	return register_virtio_driver(&virtio_blk);
}

static void __exit fini(void)
{
555
	unregister_blkdev(major, "virtblk");
R
Rusty Russell 已提交
556 557 558 559 560 561 562 563
	unregister_virtio_driver(&virtio_blk);
}
module_init(init);
module_exit(fini);

MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio block driver");
MODULE_LICENSE("GPL");