virtio_blk.c 24.5 KB
Newer Older
R
Rusty Russell 已提交
1 2
//#define DEBUG
#include <linux/spinlock.h>
3
#include <linux/slab.h>
R
Rusty Russell 已提交
4 5
#include <linux/blkdev.h>
#include <linux/hdreg.h>
6
#include <linux/module.h>
7
#include <linux/mutex.h>
R
Rusty Russell 已提交
8 9
#include <linux/virtio.h>
#include <linux/virtio_blk.h>
10
#include <linux/scatterlist.h>
11
#include <linux/string_helpers.h>
12
#include <scsi/scsi_cmnd.h>
13
#include <linux/idr.h>
14

15
#define PART_BITS 4
R
Rusty Russell 已提交
16

17 18 19
static bool use_bio;
module_param(use_bio, bool, S_IRUGO);

20 21 22
static int major;
static DEFINE_IDA(vd_index_ida);

23
struct workqueue_struct *virtblk_wq;
24

R
Rusty Russell 已提交
25 26 27 28
struct virtio_blk
{
	struct virtio_device *vdev;
	struct virtqueue *vq;
29
	wait_queue_head_t queue_wait;
R
Rusty Russell 已提交
30 31 32 33 34 35

	/* The disk structure for the kernel. */
	struct gendisk *disk;

	mempool_t *pool;

36 37 38
	/* Process context for config space updates */
	struct work_struct config_work;

39 40 41 42 43 44
	/* Lock for config space updates */
	struct mutex config_lock;

	/* enable config space updates */
	bool config_enable;

45 46 47
	/* What host tells us, plus 2 for header & tailer. */
	unsigned int sg_elems;

48 49 50
	/* Ida index - used to track minor number allocations. */
	int index;

R
Rusty Russell 已提交
51
	/* Scatterlist: can be too big for stack. */
52
	struct scatterlist sg[/*sg_elems*/];
R
Rusty Russell 已提交
53 54 55 56 57
};

struct virtblk_req
{
	struct request *req;
58
	struct bio *bio;
R
Rusty Russell 已提交
59
	struct virtio_blk_outhdr out_hdr;
60
	struct virtio_scsi_inhdr in_hdr;
61 62 63
	struct work_struct work;
	struct virtio_blk *vblk;
	int flags;
64
	u8 status;
65
	struct scatterlist sg[];
R
Rusty Russell 已提交
66 67
};

68 69 70 71 72 73 74
enum {
	VBLK_IS_FLUSH		= 1,
	VBLK_REQ_FLUSH		= 2,
	VBLK_REQ_DATA		= 4,
	VBLK_REQ_FUA		= 8,
};

75 76 77 78 79 80 81 82 83 84 85 86
static inline int virtblk_result(struct virtblk_req *vbr)
{
	switch (vbr->status) {
	case VIRTIO_BLK_S_OK:
		return 0;
	case VIRTIO_BLK_S_UNSUPP:
		return -ENOTTY;
	default:
		return -EIO;
	}
}

87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
static inline struct virtblk_req *virtblk_alloc_req(struct virtio_blk *vblk,
						    gfp_t gfp_mask)
{
	struct virtblk_req *vbr;

	vbr = mempool_alloc(vblk->pool, gfp_mask);
	if (vbr && use_bio)
		sg_init_table(vbr->sg, vblk->sg_elems);

	vbr->vblk = vblk;

	return vbr;
}

static void virtblk_add_buf_wait(struct virtio_blk *vblk,
				 struct virtblk_req *vbr,
				 unsigned long out,
				 unsigned long in)
{
	DEFINE_WAIT(wait);

	for (;;) {
		prepare_to_wait_exclusive(&vblk->queue_wait, &wait,
					  TASK_UNINTERRUPTIBLE);

		spin_lock_irq(vblk->disk->queue->queue_lock);
		if (virtqueue_add_buf(vblk->vq, vbr->sg, out, in, vbr,
				      GFP_ATOMIC) < 0) {
			spin_unlock_irq(vblk->disk->queue->queue_lock);
			io_schedule();
		} else {
			virtqueue_kick(vblk->vq);
			spin_unlock_irq(vblk->disk->queue->queue_lock);
			break;
		}

	}

	finish_wait(&vblk->queue_wait, &wait);
}

static inline void virtblk_add_req(struct virtblk_req *vbr,
				   unsigned int out, unsigned int in)
{
	struct virtio_blk *vblk = vbr->vblk;

	spin_lock_irq(vblk->disk->queue->queue_lock);
	if (unlikely(virtqueue_add_buf(vblk->vq, vbr->sg, out, in, vbr,
					GFP_ATOMIC) < 0)) {
		spin_unlock_irq(vblk->disk->queue->queue_lock);
		virtblk_add_buf_wait(vblk, vbr, out, in);
		return;
	}
	virtqueue_kick(vblk->vq);
	spin_unlock_irq(vblk->disk->queue->queue_lock);
}

static int virtblk_bio_send_flush(struct virtblk_req *vbr)
{
	unsigned int out = 0, in = 0;

	vbr->flags |= VBLK_IS_FLUSH;
	vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
	vbr->out_hdr.sector = 0;
	vbr->out_hdr.ioprio = 0;
	sg_set_buf(&vbr->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
	sg_set_buf(&vbr->sg[out + in++], &vbr->status, sizeof(vbr->status));

	virtblk_add_req(vbr, out, in);

	return 0;
}

static int virtblk_bio_send_data(struct virtblk_req *vbr)
{
	struct virtio_blk *vblk = vbr->vblk;
	unsigned int num, out = 0, in = 0;
	struct bio *bio = vbr->bio;

	vbr->flags &= ~VBLK_IS_FLUSH;
	vbr->out_hdr.type = 0;
	vbr->out_hdr.sector = bio->bi_sector;
	vbr->out_hdr.ioprio = bio_prio(bio);

	sg_set_buf(&vbr->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));

	num = blk_bio_map_sg(vblk->disk->queue, bio, vbr->sg + out);

	sg_set_buf(&vbr->sg[num + out + in++], &vbr->status,
		   sizeof(vbr->status));

	if (num) {
		if (bio->bi_rw & REQ_WRITE) {
			vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
			out += num;
		} else {
			vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
			in += num;
		}
	}

	virtblk_add_req(vbr, out, in);

	return 0;
}

static void virtblk_bio_send_data_work(struct work_struct *work)
{
	struct virtblk_req *vbr;

	vbr = container_of(work, struct virtblk_req, work);

	virtblk_bio_send_data(vbr);
}

static void virtblk_bio_send_flush_work(struct work_struct *work)
{
	struct virtblk_req *vbr;

	vbr = container_of(work, struct virtblk_req, work);

	virtblk_bio_send_flush(vbr);
}

static inline void virtblk_request_done(struct virtblk_req *vbr)
212
{
213
	struct virtio_blk *vblk = vbr->vblk;
214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
	struct request *req = vbr->req;
	int error = virtblk_result(vbr);

	if (req->cmd_type == REQ_TYPE_BLOCK_PC) {
		req->resid_len = vbr->in_hdr.residual;
		req->sense_len = vbr->in_hdr.sense_len;
		req->errors = vbr->in_hdr.errors;
	} else if (req->cmd_type == REQ_TYPE_SPECIAL) {
		req->errors = (error != 0);
	}

	__blk_end_request_all(req, error);
	mempool_free(vbr, vblk->pool);
}

229
static inline void virtblk_bio_flush_done(struct virtblk_req *vbr)
230
{
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263
	struct virtio_blk *vblk = vbr->vblk;

	if (vbr->flags & VBLK_REQ_DATA) {
		/* Send out the actual write data */
		INIT_WORK(&vbr->work, virtblk_bio_send_data_work);
		queue_work(virtblk_wq, &vbr->work);
	} else {
		bio_endio(vbr->bio, virtblk_result(vbr));
		mempool_free(vbr, vblk->pool);
	}
}

static inline void virtblk_bio_data_done(struct virtblk_req *vbr)
{
	struct virtio_blk *vblk = vbr->vblk;

	if (unlikely(vbr->flags & VBLK_REQ_FUA)) {
		/* Send out a flush before end the bio */
		vbr->flags &= ~VBLK_REQ_DATA;
		INIT_WORK(&vbr->work, virtblk_bio_send_flush_work);
		queue_work(virtblk_wq, &vbr->work);
	} else {
		bio_endio(vbr->bio, virtblk_result(vbr));
		mempool_free(vbr, vblk->pool);
	}
}

static inline void virtblk_bio_done(struct virtblk_req *vbr)
{
	if (unlikely(vbr->flags & VBLK_IS_FLUSH))
		virtblk_bio_flush_done(vbr);
	else
		virtblk_bio_data_done(vbr);
264 265 266
}

static void virtblk_done(struct virtqueue *vq)
R
Rusty Russell 已提交
267 268
{
	struct virtio_blk *vblk = vq->vdev->priv;
269
	bool bio_done = false, req_done = false;
R
Rusty Russell 已提交
270 271
	struct virtblk_req *vbr;
	unsigned long flags;
272
	unsigned int len;
R
Rusty Russell 已提交
273

274
	spin_lock_irqsave(vblk->disk->queue->queue_lock, flags);
275
	while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) {
276
		if (vbr->bio) {
277 278
			virtblk_bio_done(vbr);
			bio_done = true;
279
		} else {
280 281
			virtblk_request_done(vbr);
			req_done = true;
282
		}
R
Rusty Russell 已提交
283 284
	}
	/* In case queue is stopped waiting for more buffers. */
285 286
	if (req_done)
		blk_start_queue(vblk->disk->queue);
287
	spin_unlock_irqrestore(vblk->disk->queue->queue_lock, flags);
288 289 290 291 292

	if (bio_done)
		wake_up(&vblk->queue_wait);
}

R
Rusty Russell 已提交
293 294 295
static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
		   struct request *req)
{
296
	unsigned long num, out = 0, in = 0;
R
Rusty Russell 已提交
297 298
	struct virtblk_req *vbr;

299
	vbr = virtblk_alloc_req(vblk, GFP_ATOMIC);
R
Rusty Russell 已提交
300 301 302 303 304
	if (!vbr)
		/* When another request finishes we'll try again. */
		return false;

	vbr->req = req;
305
	vbr->bio = NULL;
306 307
	if (req->cmd_flags & REQ_FLUSH) {
		vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
308 309
		vbr->out_hdr.sector = 0;
		vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
310 311 312 313 314 315 316 317 318
	} else {
		switch (req->cmd_type) {
		case REQ_TYPE_FS:
			vbr->out_hdr.type = 0;
			vbr->out_hdr.sector = blk_rq_pos(vbr->req);
			vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
			break;
		case REQ_TYPE_BLOCK_PC:
			vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
319 320 321
			vbr->out_hdr.sector = 0;
			vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
			break;
322 323 324 325 326 327 328 329
		case REQ_TYPE_SPECIAL:
			vbr->out_hdr.type = VIRTIO_BLK_T_GET_ID;
			vbr->out_hdr.sector = 0;
			vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
			break;
		default:
			/* We don't put anything else in the queue. */
			BUG();
330
		}
R
Rusty Russell 已提交
331 332
	}

333
	sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
R
Rusty Russell 已提交
334

335 336 337 338 339 340
	/*
	 * If this is a packet command we need a couple of additional headers.
	 * Behind the normal outhdr we put a segment with the scsi command
	 * block, and before the normal inhdr we put the sense data and the
	 * inhdr with additional status information before the normal inhdr.
	 */
341
	if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC)
342 343 344 345
		sg_set_buf(&vblk->sg[out++], vbr->req->cmd, vbr->req->cmd_len);

	num = blk_rq_map_sg(q, vbr->req, vblk->sg + out);

346
	if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC) {
347
		sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
348 349 350 351 352 353 354 355 356 357 358 359 360 361 362
		sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr,
			   sizeof(vbr->in_hdr));
	}

	sg_set_buf(&vblk->sg[num + out + in++], &vbr->status,
		   sizeof(vbr->status));

	if (num) {
		if (rq_data_dir(vbr->req) == WRITE) {
			vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
			out += num;
		} else {
			vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
			in += num;
		}
R
Rusty Russell 已提交
363 364
	}

365 366
	if (virtqueue_add_buf(vblk->vq, vblk->sg, out, in, vbr,
			      GFP_ATOMIC) < 0) {
R
Rusty Russell 已提交
367 368 369 370 371 372 373
		mempool_free(vbr, vblk->pool);
		return false;
	}

	return true;
}

374
static void virtblk_request(struct request_queue *q)
R
Rusty Russell 已提交
375
{
376
	struct virtio_blk *vblk = q->queuedata;
R
Rusty Russell 已提交
377 378 379
	struct request *req;
	unsigned int issued = 0;

380
	while ((req = blk_peek_request(q)) != NULL) {
381
		BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
R
Rusty Russell 已提交
382 383 384 385 386 387 388

		/* If this request fails, stop queue and wait for something to
		   finish to restart it. */
		if (!do_req(q, vblk, req)) {
			blk_stop_queue(q);
			break;
		}
389
		blk_start_request(req);
R
Rusty Russell 已提交
390 391 392 393
		issued++;
	}

	if (issued)
394
		virtqueue_kick(vblk->vq);
R
Rusty Russell 已提交
395 396
}

397 398 399 400 401 402 403 404 405 406 407 408 409 410
static void virtblk_make_request(struct request_queue *q, struct bio *bio)
{
	struct virtio_blk *vblk = q->queuedata;
	struct virtblk_req *vbr;

	BUG_ON(bio->bi_phys_segments + 2 > vblk->sg_elems);

	vbr = virtblk_alloc_req(vblk, GFP_NOIO);
	if (!vbr) {
		bio_endio(bio, -ENOMEM);
		return;
	}

	vbr->bio = bio;
411 412 413 414 415 416 417 418 419 420 421 422
	vbr->flags = 0;
	if (bio->bi_rw & REQ_FLUSH)
		vbr->flags |= VBLK_REQ_FLUSH;
	if (bio->bi_rw & REQ_FUA)
		vbr->flags |= VBLK_REQ_FUA;
	if (bio->bi_size)
		vbr->flags |= VBLK_REQ_DATA;

	if (unlikely(vbr->flags & VBLK_REQ_FLUSH))
		virtblk_bio_send_flush(vbr);
	else
		virtblk_bio_send_data(vbr);
423 424
}

425 426 427 428 429 430 431
/* return id (s/n) string for *disk to *id_str
 */
static int virtblk_get_id(struct gendisk *disk, char *id_str)
{
	struct virtio_blk *vblk = disk->private_data;
	struct request *req;
	struct bio *bio;
M
Mike Snitzer 已提交
432
	int err;
433 434 435 436 437 438 439 440 441 442 443 444 445

	bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES,
			   GFP_KERNEL);
	if (IS_ERR(bio))
		return PTR_ERR(bio);

	req = blk_make_request(vblk->disk->queue, bio, GFP_KERNEL);
	if (IS_ERR(req)) {
		bio_put(bio);
		return PTR_ERR(req);
	}

	req->cmd_type = REQ_TYPE_SPECIAL;
M
Mike Snitzer 已提交
446 447 448 449
	err = blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
	blk_put_request(req);

	return err;
450 451
}

452 453
static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
			     unsigned int cmd, unsigned long data)
R
Rusty Russell 已提交
454
{
455 456 457 458 459 460 461
	struct gendisk *disk = bdev->bd_disk;
	struct virtio_blk *vblk = disk->private_data;

	/*
	 * Only allow the generic SCSI ioctls if the host can support it.
	 */
	if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
462
		return -ENOTTY;
463

464 465
	return scsi_cmd_blk_ioctl(bdev, mode, cmd,
				  (void __user *)data);
R
Rusty Russell 已提交
466 467
}

468 469 470
/* We provide getgeo only to please some old bootloader/partitioning tools */
static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
{
471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489
	struct virtio_blk *vblk = bd->bd_disk->private_data;
	struct virtio_blk_geometry vgeo;
	int err;

	/* see if the host passed in geometry config */
	err = virtio_config_val(vblk->vdev, VIRTIO_BLK_F_GEOMETRY,
				offsetof(struct virtio_blk_config, geometry),
				&vgeo);

	if (!err) {
		geo->heads = vgeo.heads;
		geo->sectors = vgeo.sectors;
		geo->cylinders = vgeo.cylinders;
	} else {
		/* some standard values, similar to sd */
		geo->heads = 1 << 6;
		geo->sectors = 1 << 5;
		geo->cylinders = get_capacity(bd->bd_disk) >> 11;
	}
490 491 492
	return 0;
}

493
static const struct block_device_operations virtblk_fops = {
494
	.ioctl  = virtblk_ioctl,
495 496
	.owner  = THIS_MODULE,
	.getgeo = virtblk_getgeo,
R
Rusty Russell 已提交
497 498
};

499 500 501 502 503
static int index_to_minor(int index)
{
	return index << PART_BITS;
}

504 505 506 507 508
static int minor_to_index(int minor)
{
	return minor >> PART_BITS;
}

509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529
static ssize_t virtblk_serial_show(struct device *dev,
				struct device_attribute *attr, char *buf)
{
	struct gendisk *disk = dev_to_disk(dev);
	int err;

	/* sysfs gives us a PAGE_SIZE buffer */
	BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);

	buf[VIRTIO_BLK_ID_BYTES] = '\0';
	err = virtblk_get_id(disk, buf);
	if (!err)
		return strlen(buf);

	if (err == -EIO) /* Unsupported? Make it empty. */
		return 0;

	return err;
}
DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL);

530 531 532 533 534 535 536 537 538
static void virtblk_config_changed_work(struct work_struct *work)
{
	struct virtio_blk *vblk =
		container_of(work, struct virtio_blk, config_work);
	struct virtio_device *vdev = vblk->vdev;
	struct request_queue *q = vblk->disk->queue;
	char cap_str_2[10], cap_str_10[10];
	u64 capacity, size;

539 540 541 542
	mutex_lock(&vblk->config_lock);
	if (!vblk->config_enable)
		goto done;

543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564
	/* Host must always specify the capacity. */
	vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity),
			  &capacity, sizeof(capacity));

	/* If capacity is too big, truncate with warning. */
	if ((sector_t)capacity != capacity) {
		dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
			 (unsigned long long)capacity);
		capacity = (sector_t)-1;
	}

	size = capacity * queue_logical_block_size(q);
	string_get_size(size, STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
	string_get_size(size, STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));

	dev_notice(&vdev->dev,
		  "new size: %llu %d-byte logical blocks (%s/%s)\n",
		  (unsigned long long)capacity,
		  queue_logical_block_size(q),
		  cap_str_10, cap_str_2);

	set_capacity(vblk->disk, capacity);
565
	revalidate_disk(vblk->disk);
566 567
done:
	mutex_unlock(&vblk->config_lock);
568 569 570 571 572 573 574 575 576
}

static void virtblk_config_changed(struct virtio_device *vdev)
{
	struct virtio_blk *vblk = vdev->priv;

	queue_work(virtblk_wq, &vblk->config_work);
}

577 578 579 580 581
static int init_vq(struct virtio_blk *vblk)
{
	int err = 0;

	/* We expect one virtqueue, for output. */
582
	vblk->vq = virtio_find_single_vq(vblk->vdev, virtblk_done, "requests");
583 584 585 586 587 588
	if (IS_ERR(vblk->vq))
		err = PTR_ERR(vblk->vq);

	return err;
}

589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616
/*
 * Legacy naming scheme used for virtio devices.  We are stuck with it for
 * virtio blk but don't ever use it for any new driver.
 */
static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
{
	const int base = 'z' - 'a' + 1;
	char *begin = buf + strlen(prefix);
	char *end = buf + buflen;
	char *p;
	int unit;

	p = end - 1;
	*p = '\0';
	unit = base;
	do {
		if (p == begin)
			return -EINVAL;
		*--p = 'a' + (index % unit);
		index = (index / unit) - 1;
	} while (index >= 0);

	memmove(begin, p, end - p);
	memcpy(buf, prefix, strlen(prefix));

	return 0;
}

617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635
static int virtblk_get_cache_mode(struct virtio_device *vdev)
{
	u8 writeback;
	int err;

	err = virtio_config_val(vdev, VIRTIO_BLK_F_CONFIG_WCE,
				offsetof(struct virtio_blk_config, wce),
				&writeback);
	if (err)
		writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_WCE);

	return writeback;
}

static void virtblk_update_cache_mode(struct virtio_device *vdev)
{
	u8 writeback = virtblk_get_cache_mode(vdev);
	struct virtio_blk *vblk = vdev->priv;

636
	if (writeback)
637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693
		blk_queue_flush(vblk->disk->queue, REQ_FLUSH);
	else
		blk_queue_flush(vblk->disk->queue, 0);

	revalidate_disk(vblk->disk);
}

static const char *const virtblk_cache_types[] = {
	"write through", "write back"
};

static ssize_t
virtblk_cache_type_store(struct device *dev, struct device_attribute *attr,
			 const char *buf, size_t count)
{
	struct gendisk *disk = dev_to_disk(dev);
	struct virtio_blk *vblk = disk->private_data;
	struct virtio_device *vdev = vblk->vdev;
	int i;
	u8 writeback;

	BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
	for (i = ARRAY_SIZE(virtblk_cache_types); --i >= 0; )
		if (sysfs_streq(buf, virtblk_cache_types[i]))
			break;

	if (i < 0)
		return -EINVAL;

	writeback = i;
	vdev->config->set(vdev,
			  offsetof(struct virtio_blk_config, wce),
			  &writeback, sizeof(writeback));

	virtblk_update_cache_mode(vdev);
	return count;
}

static ssize_t
virtblk_cache_type_show(struct device *dev, struct device_attribute *attr,
			 char *buf)
{
	struct gendisk *disk = dev_to_disk(dev);
	struct virtio_blk *vblk = disk->private_data;
	u8 writeback = virtblk_get_cache_mode(vblk->vdev);

	BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
	return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]);
}

static const struct device_attribute dev_attr_cache_type_ro =
	__ATTR(cache_type, S_IRUGO,
	       virtblk_cache_type_show, NULL);
static const struct device_attribute dev_attr_cache_type_rw =
	__ATTR(cache_type, S_IRUGO|S_IWUSR,
	       virtblk_cache_type_show, virtblk_cache_type_store);

694
static int __devinit virtblk_probe(struct virtio_device *vdev)
R
Rusty Russell 已提交
695 696
{
	struct virtio_blk *vblk;
697
	struct request_queue *q;
698
	int err, index;
699 700
	int pool_size;

R
Rusty Russell 已提交
701
	u64 cap;
702 703 704
	u32 v, blk_size, sg_elems, opt_io_size;
	u16 min_io_size;
	u8 physical_block_exp, alignment_offset;
R
Rusty Russell 已提交
705

706 707 708 709 710
	err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
			     GFP_KERNEL);
	if (err < 0)
		goto out;
	index = err;
711

712 713 714 715
	/* We need to know how many segments before we allocate. */
	err = virtio_config_val(vdev, VIRTIO_BLK_F_SEG_MAX,
				offsetof(struct virtio_blk_config, seg_max),
				&sg_elems);
716 717 718

	/* We need at least one SG element, whatever they say. */
	if (err || !sg_elems)
719 720 721 722 723 724
		sg_elems = 1;

	/* We need an extra sg elements at head and tail. */
	sg_elems += 2;
	vdev->priv = vblk = kmalloc(sizeof(*vblk) +
				    sizeof(vblk->sg[0]) * sg_elems, GFP_KERNEL);
R
Rusty Russell 已提交
725 726
	if (!vblk) {
		err = -ENOMEM;
727
		goto out_free_index;
R
Rusty Russell 已提交
728 729
	}

730
	init_waitqueue_head(&vblk->queue_wait);
R
Rusty Russell 已提交
731
	vblk->vdev = vdev;
732 733
	vblk->sg_elems = sg_elems;
	sg_init_table(vblk->sg, vblk->sg_elems);
734
	mutex_init(&vblk->config_lock);
735

736
	INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
737
	vblk->config_enable = true;
R
Rusty Russell 已提交
738

739 740
	err = init_vq(vblk);
	if (err)
R
Rusty Russell 已提交
741 742
		goto out_free_vblk;

743 744 745 746
	pool_size = sizeof(struct virtblk_req);
	if (use_bio)
		pool_size += sizeof(struct scatterlist) * sg_elems;
	vblk->pool = mempool_create_kmalloc_pool(1, pool_size);
R
Rusty Russell 已提交
747 748 749 750 751 752
	if (!vblk->pool) {
		err = -ENOMEM;
		goto out_free_vq;
	}

	/* FIXME: How many partitions?  How long is a piece of string? */
753
	vblk->disk = alloc_disk(1 << PART_BITS);
R
Rusty Russell 已提交
754 755
	if (!vblk->disk) {
		err = -ENOMEM;
756
		goto out_mempool;
R
Rusty Russell 已提交
757 758
	}

759
	q = vblk->disk->queue = blk_init_queue(virtblk_request, NULL);
760
	if (!q) {
R
Rusty Russell 已提交
761 762 763 764
		err = -ENOMEM;
		goto out_put_disk;
	}

765 766
	if (use_bio)
		blk_queue_make_request(q, virtblk_make_request);
767
	q->queuedata = vblk;
768

769
	virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
770

R
Rusty Russell 已提交
771
	vblk->disk->major = major;
772
	vblk->disk->first_minor = index_to_minor(index);
R
Rusty Russell 已提交
773 774
	vblk->disk->private_data = vblk;
	vblk->disk->fops = &virtblk_fops;
775
	vblk->disk->driverfs_dev = &vdev->dev;
776
	vblk->index = index;
777

778
	/* configure queue flush support */
779
	virtblk_update_cache_mode(vdev);
R
Rusty Russell 已提交
780

781 782 783 784
	/* If disk is read-only in the host, the guest should obey */
	if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
		set_disk_ro(vblk->disk, 1);

785
	/* Host must always specify the capacity. */
786 787
	vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity),
			  &cap, sizeof(cap));
R
Rusty Russell 已提交
788 789 790 791 792 793 794 795 796

	/* If capacity is too big, truncate with warning. */
	if ((sector_t)cap != cap) {
		dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
			 (unsigned long long)cap);
		cap = (sector_t)-1;
	}
	set_capacity(vblk->disk, cap);

797
	/* We can handle whatever the host told us to handle. */
798
	blk_queue_max_segments(q, vblk->sg_elems-2);
799

800
	/* No need to bounce any requests */
801
	blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
802

803
	/* No real sector limit. */
804
	blk_queue_max_hw_sectors(q, -1U);
805

806 807 808 809 810
	/* Host can optionally specify maximum segment size and number of
	 * segments. */
	err = virtio_config_val(vdev, VIRTIO_BLK_F_SIZE_MAX,
				offsetof(struct virtio_blk_config, size_max),
				&v);
R
Rusty Russell 已提交
811
	if (!err)
812
		blk_queue_max_segment_size(q, v);
813
	else
814
		blk_queue_max_segment_size(q, -1U);
R
Rusty Russell 已提交
815

816 817 818 819 820
	/* Host can optionally specify the block size of the device */
	err = virtio_config_val(vdev, VIRTIO_BLK_F_BLK_SIZE,
				offsetof(struct virtio_blk_config, blk_size),
				&blk_size);
	if (!err)
821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850
		blk_queue_logical_block_size(q, blk_size);
	else
		blk_size = queue_logical_block_size(q);

	/* Use topology information if available */
	err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
			offsetof(struct virtio_blk_config, physical_block_exp),
			&physical_block_exp);
	if (!err && physical_block_exp)
		blk_queue_physical_block_size(q,
				blk_size * (1 << physical_block_exp));

	err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
			offsetof(struct virtio_blk_config, alignment_offset),
			&alignment_offset);
	if (!err && alignment_offset)
		blk_queue_alignment_offset(q, blk_size * alignment_offset);

	err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
			offsetof(struct virtio_blk_config, min_io_size),
			&min_io_size);
	if (!err && min_io_size)
		blk_queue_io_min(q, blk_size * min_io_size);

	err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
			offsetof(struct virtio_blk_config, opt_io_size),
			&opt_io_size);
	if (!err && opt_io_size)
		blk_queue_io_opt(q, blk_size * opt_io_size);

R
Rusty Russell 已提交
851
	add_disk(vblk->disk);
852 853 854 855
	err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial);
	if (err)
		goto out_del_disk;

856 857 858 859 860 861 862 863
	if (virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
		err = device_create_file(disk_to_dev(vblk->disk),
					 &dev_attr_cache_type_rw);
	else
		err = device_create_file(disk_to_dev(vblk->disk),
					 &dev_attr_cache_type_ro);
	if (err)
		goto out_del_disk;
R
Rusty Russell 已提交
864 865
	return 0;

866 867 868
out_del_disk:
	del_gendisk(vblk->disk);
	blk_cleanup_queue(vblk->disk->queue);
R
Rusty Russell 已提交
869 870 871 872 873
out_put_disk:
	put_disk(vblk->disk);
out_mempool:
	mempool_destroy(vblk->pool);
out_free_vq:
874
	vdev->config->del_vqs(vdev);
R
Rusty Russell 已提交
875 876
out_free_vblk:
	kfree(vblk);
877 878
out_free_index:
	ida_simple_remove(&vd_index_ida, index);
R
Rusty Russell 已提交
879 880 881 882
out:
	return err;
}

883
static void __devexit virtblk_remove(struct virtio_device *vdev)
R
Rusty Russell 已提交
884 885
{
	struct virtio_blk *vblk = vdev->priv;
886
	int index = vblk->index;
R
Rusty Russell 已提交
887

888 889 890 891
	/* Prevent config work handler from accessing the device. */
	mutex_lock(&vblk->config_lock);
	vblk->config_enable = false;
	mutex_unlock(&vblk->config_lock);
892

893
	del_gendisk(vblk->disk);
894
	blk_cleanup_queue(vblk->disk->queue);
895

R
Rusty Russell 已提交
896 897 898
	/* Stop all the virtqueues. */
	vdev->config->reset(vdev);

899 900
	flush_work(&vblk->config_work);

R
Rusty Russell 已提交
901 902
	put_disk(vblk->disk);
	mempool_destroy(vblk->pool);
903
	vdev->config->del_vqs(vdev);
R
Rusty Russell 已提交
904
	kfree(vblk);
905
	ida_simple_remove(&vd_index_ida, index);
R
Rusty Russell 已提交
906 907
}

908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947
#ifdef CONFIG_PM
static int virtblk_freeze(struct virtio_device *vdev)
{
	struct virtio_blk *vblk = vdev->priv;

	/* Ensure we don't receive any more interrupts */
	vdev->config->reset(vdev);

	/* Prevent config work handler from accessing the device. */
	mutex_lock(&vblk->config_lock);
	vblk->config_enable = false;
	mutex_unlock(&vblk->config_lock);

	flush_work(&vblk->config_work);

	spin_lock_irq(vblk->disk->queue->queue_lock);
	blk_stop_queue(vblk->disk->queue);
	spin_unlock_irq(vblk->disk->queue->queue_lock);
	blk_sync_queue(vblk->disk->queue);

	vdev->config->del_vqs(vdev);
	return 0;
}

static int virtblk_restore(struct virtio_device *vdev)
{
	struct virtio_blk *vblk = vdev->priv;
	int ret;

	vblk->config_enable = true;
	ret = init_vq(vdev->priv);
	if (!ret) {
		spin_lock_irq(vblk->disk->queue->queue_lock);
		blk_start_queue(vblk->disk->queue);
		spin_unlock_irq(vblk->disk->queue->queue_lock);
	}
	return ret;
}
#endif

948
static const struct virtio_device_id id_table[] = {
R
Rusty Russell 已提交
949 950 951 952
	{ VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
	{ 0 },
};

953
static unsigned int features[] = {
954 955
	VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
	VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, VIRTIO_BLK_F_SCSI,
956
	VIRTIO_BLK_F_WCE, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE
957 958
};

959 960 961 962 963 964
/*
 * virtio_blk causes spurious section mismatch warning by
 * simultaneously referring to a __devinit and a __devexit function.
 * Use __refdata to avoid this warning.
 */
static struct virtio_driver __refdata virtio_blk = {
965 966 967 968 969 970 971 972
	.feature_table		= features,
	.feature_table_size	= ARRAY_SIZE(features),
	.driver.name		= KBUILD_MODNAME,
	.driver.owner		= THIS_MODULE,
	.id_table		= id_table,
	.probe			= virtblk_probe,
	.remove			= __devexit_p(virtblk_remove),
	.config_changed		= virtblk_config_changed,
973 974 975 976
#ifdef CONFIG_PM
	.freeze			= virtblk_freeze,
	.restore		= virtblk_restore,
#endif
R
Rusty Russell 已提交
977 978 979 980
};

static int __init init(void)
{
981 982 983 984 985 986
	int error;

	virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
	if (!virtblk_wq)
		return -ENOMEM;

987
	major = register_blkdev(0, "virtblk");
988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002
	if (major < 0) {
		error = major;
		goto out_destroy_workqueue;
	}

	error = register_virtio_driver(&virtio_blk);
	if (error)
		goto out_unregister_blkdev;
	return 0;

out_unregister_blkdev:
	unregister_blkdev(major, "virtblk");
out_destroy_workqueue:
	destroy_workqueue(virtblk_wq);
	return error;
R
Rusty Russell 已提交
1003 1004 1005 1006
}

static void __exit fini(void)
{
1007
	unregister_blkdev(major, "virtblk");
R
Rusty Russell 已提交
1008
	unregister_virtio_driver(&virtio_blk);
1009
	destroy_workqueue(virtblk_wq);
R
Rusty Russell 已提交
1010 1011 1012 1013 1014 1015 1016
}
module_init(init);
module_exit(fini);

MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio block driver");
MODULE_LICENSE("GPL");