virtio_blk.c 22.4 KB
Newer Older
R
Rusty Russell 已提交
1 2
//#define DEBUG
#include <linux/spinlock.h>
3
#include <linux/slab.h>
R
Rusty Russell 已提交
4 5
#include <linux/blkdev.h>
#include <linux/hdreg.h>
6
#include <linux/module.h>
7
#include <linux/mutex.h>
R
Rusty Russell 已提交
8 9
#include <linux/virtio.h>
#include <linux/virtio_blk.h>
10
#include <linux/scatterlist.h>
11
#include <linux/string_helpers.h>
12
#include <scsi/scsi_cmnd.h>
13
#include <linux/idr.h>
J
Jens Axboe 已提交
14 15
#include <linux/blk-mq.h>
#include <linux/numa.h>
16

17
#define PART_BITS 4
18
#define VQ_NAME_LEN 16
R
Rusty Russell 已提交
19

20 21 22
static int major;
static DEFINE_IDA(vd_index_ida);

23
static struct workqueue_struct *virtblk_wq;
24

25 26 27 28 29 30
struct virtio_blk_vq {
	struct virtqueue *vq;
	spinlock_t lock;
	char name[VQ_NAME_LEN];
} ____cacheline_aligned_in_smp;

R
Rusty Russell 已提交
31 32 33 34 35 36 37
struct virtio_blk
{
	struct virtio_device *vdev;

	/* The disk structure for the kernel. */
	struct gendisk *disk;

38 39 40
	/* Block layer tags. */
	struct blk_mq_tag_set tag_set;

41 42 43
	/* Process context for config space updates */
	struct work_struct config_work;

44 45 46 47 48 49
	/* Lock for config space updates */
	struct mutex config_lock;

	/* enable config space updates */
	bool config_enable;

50 51 52
	/* What host tells us, plus 2 for header & tailer. */
	unsigned int sg_elems;

53 54
	/* Ida index - used to track minor number allocations. */
	int index;
55 56 57 58

	/* num of vqs */
	int num_vqs;
	struct virtio_blk_vq *vqs;
R
Rusty Russell 已提交
59 60 61 62 63 64
};

struct virtblk_req
{
	struct request *req;
	struct virtio_blk_outhdr out_hdr;
65
	struct virtio_scsi_inhdr in_hdr;
66
	u8 status;
67
	struct scatterlist sg[];
R
Rusty Russell 已提交
68 69
};

70 71 72 73 74 75 76 77 78 79 80 81
static inline int virtblk_result(struct virtblk_req *vbr)
{
	switch (vbr->status) {
	case VIRTIO_BLK_S_OK:
		return 0;
	case VIRTIO_BLK_S_UNSUPP:
		return -ENOTTY;
	default:
		return -EIO;
	}
}

82
static int __virtblk_add_req(struct virtqueue *vq,
83 84
			     struct virtblk_req *vbr,
			     struct scatterlist *data_sg,
R
Rusty Russell 已提交
85
			     bool have_data)
86
{
87
	struct scatterlist hdr, status, cmd, sense, inhdr, *sgs[6];
88
	unsigned int num_out = 0, num_in = 0;
89
	int type = vbr->out_hdr.type & ~VIRTIO_BLK_T_OUT;
90 91 92 93

	sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
	sgs[num_out++] = &hdr;

94 95 96 97 98 99 100 101 102 103 104
	/*
	 * If this is a packet command we need a couple of additional headers.
	 * Behind the normal outhdr we put a segment with the scsi command
	 * block, and before the normal inhdr we put the sense data and the
	 * inhdr with additional status information.
	 */
	if (type == VIRTIO_BLK_T_SCSI_CMD) {
		sg_init_one(&cmd, vbr->req->cmd, vbr->req->cmd_len);
		sgs[num_out++] = &cmd;
	}

R
Rusty Russell 已提交
105
	if (have_data) {
106
		if (vbr->out_hdr.type & VIRTIO_BLK_T_OUT)
107
			sgs[num_out++] = data_sg;
108
		else
109 110 111 112 113 114 115 116
			sgs[num_out + num_in++] = data_sg;
	}

	if (type == VIRTIO_BLK_T_SCSI_CMD) {
		sg_init_one(&sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
		sgs[num_out + num_in++] = &sense;
		sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
		sgs[num_out + num_in++] = &inhdr;
117 118 119 120 121 122
	}

	sg_init_one(&status, &vbr->status, sizeof(vbr->status));
	sgs[num_out + num_in++] = &status;

	return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
123 124
}

125
static inline void virtblk_request_done(struct request *req)
126
{
127
	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
128 129 130 131 132 133 134 135 136 137
	int error = virtblk_result(vbr);

	if (req->cmd_type == REQ_TYPE_BLOCK_PC) {
		req->resid_len = vbr->in_hdr.residual;
		req->sense_len = vbr->in_hdr.sense_len;
		req->errors = vbr->in_hdr.errors;
	} else if (req->cmd_type == REQ_TYPE_SPECIAL) {
		req->errors = (error != 0);
	}

J
Jens Axboe 已提交
138
	blk_mq_end_io(req, error);
139 140 141
}

static void virtblk_done(struct virtqueue *vq)
R
Rusty Russell 已提交
142 143
{
	struct virtio_blk *vblk = vq->vdev->priv;
J
Jens Axboe 已提交
144
	bool req_done = false;
145
	int qid = vq->index;
R
Rusty Russell 已提交
146 147
	struct virtblk_req *vbr;
	unsigned long flags;
148
	unsigned int len;
R
Rusty Russell 已提交
149

150
	spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
151 152
	do {
		virtqueue_disable_cb(vq);
153
		while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
154
			blk_mq_complete_request(vbr->req);
J
Jens Axboe 已提交
155
			req_done = true;
156
		}
157 158
		if (unlikely(virtqueue_is_broken(vq)))
			break;
159
	} while (!virtqueue_enable_cb(vq));
J
Jens Axboe 已提交
160

R
Rusty Russell 已提交
161
	/* In case queue is stopped waiting for more buffers. */
162
	if (req_done)
163
		blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
164
	spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
165 166
}

J
Jens Axboe 已提交
167
static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req)
R
Rusty Russell 已提交
168
{
J
Jens Axboe 已提交
169
	struct virtio_blk *vblk = hctx->queue->queuedata;
170
	struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
J
Jens Axboe 已提交
171
	unsigned long flags;
172
	unsigned int num;
173
	int qid = hctx->queue_num;
J
Jens Axboe 已提交
174
	const bool last = (req->cmd_flags & REQ_END) != 0;
175
	int err;
176
	bool notify = false;
R
Rusty Russell 已提交
177

J
Jens Axboe 已提交
178
	BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
R
Rusty Russell 已提交
179 180

	vbr->req = req;
181 182
	if (req->cmd_flags & REQ_FLUSH) {
		vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
183 184
		vbr->out_hdr.sector = 0;
		vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
185 186 187 188 189 190 191 192 193
	} else {
		switch (req->cmd_type) {
		case REQ_TYPE_FS:
			vbr->out_hdr.type = 0;
			vbr->out_hdr.sector = blk_rq_pos(vbr->req);
			vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
			break;
		case REQ_TYPE_BLOCK_PC:
			vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
194 195 196
			vbr->out_hdr.sector = 0;
			vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
			break;
197 198 199 200 201 202 203 204
		case REQ_TYPE_SPECIAL:
			vbr->out_hdr.type = VIRTIO_BLK_T_GET_ID;
			vbr->out_hdr.sector = 0;
			vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
			break;
		default:
			/* We don't put anything else in the queue. */
			BUG();
205
		}
R
Rusty Russell 已提交
206 207
	}

J
Jens Axboe 已提交
208
	num = blk_rq_map_sg(hctx->queue, vbr->req, vbr->sg);
209
	if (num) {
210
		if (rq_data_dir(vbr->req) == WRITE)
211
			vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
212
		else
213
			vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
R
Rusty Russell 已提交
214 215
	}

216 217
	spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
	err = __virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
218
	if (err) {
219
		virtqueue_kick(vblk->vqs[qid].vq);
J
Jens Axboe 已提交
220
		blk_mq_stop_hw_queue(hctx);
221
		spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
222 223 224 225 226
		/* Out of mem doesn't actually happen, since we fall back
		 * to direct descriptors */
		if (err == -ENOMEM || err == -ENOSPC)
			return BLK_MQ_RQ_QUEUE_BUSY;
		return BLK_MQ_RQ_QUEUE_ERROR;
227 228
	}

229
	if (last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
230
		notify = true;
231
	spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
232 233

	if (notify)
234
		virtqueue_notify(vblk->vqs[qid].vq);
J
Jens Axboe 已提交
235
	return BLK_MQ_RQ_QUEUE_OK;
236 237
}

238 239 240 241 242 243 244
/* return id (s/n) string for *disk to *id_str
 */
static int virtblk_get_id(struct gendisk *disk, char *id_str)
{
	struct virtio_blk *vblk = disk->private_data;
	struct request *req;
	struct bio *bio;
M
Mike Snitzer 已提交
245
	int err;
246 247 248 249 250 251 252 253 254 255 256 257 258

	bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES,
			   GFP_KERNEL);
	if (IS_ERR(bio))
		return PTR_ERR(bio);

	req = blk_make_request(vblk->disk->queue, bio, GFP_KERNEL);
	if (IS_ERR(req)) {
		bio_put(bio);
		return PTR_ERR(req);
	}

	req->cmd_type = REQ_TYPE_SPECIAL;
M
Mike Snitzer 已提交
259 260 261 262
	err = blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
	blk_put_request(req);

	return err;
263 264
}

265 266
static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
			     unsigned int cmd, unsigned long data)
R
Rusty Russell 已提交
267
{
268 269 270 271 272 273 274
	struct gendisk *disk = bdev->bd_disk;
	struct virtio_blk *vblk = disk->private_data;

	/*
	 * Only allow the generic SCSI ioctls if the host can support it.
	 */
	if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
275
		return -ENOTTY;
276

277 278
	return scsi_cmd_blk_ioctl(bdev, mode, cmd,
				  (void __user *)data);
R
Rusty Russell 已提交
279 280
}

281 282 283
/* We provide getgeo only to please some old bootloader/partitioning tools */
static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
{
284 285 286
	struct virtio_blk *vblk = bd->bd_disk->private_data;

	/* see if the host passed in geometry config */
287 288 289 290 291 292 293
	if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
		virtio_cread(vblk->vdev, struct virtio_blk_config,
			     geometry.cylinders, &geo->cylinders);
		virtio_cread(vblk->vdev, struct virtio_blk_config,
			     geometry.heads, &geo->heads);
		virtio_cread(vblk->vdev, struct virtio_blk_config,
			     geometry.sectors, &geo->sectors);
294 295 296 297 298 299
	} else {
		/* some standard values, similar to sd */
		geo->heads = 1 << 6;
		geo->sectors = 1 << 5;
		geo->cylinders = get_capacity(bd->bd_disk) >> 11;
	}
300 301 302
	return 0;
}

303
static const struct block_device_operations virtblk_fops = {
304
	.ioctl  = virtblk_ioctl,
305 306
	.owner  = THIS_MODULE,
	.getgeo = virtblk_getgeo,
R
Rusty Russell 已提交
307 308
};

309 310 311 312 313
static int index_to_minor(int index)
{
	return index << PART_BITS;
}

314 315 316 317 318
static int minor_to_index(int minor)
{
	return minor >> PART_BITS;
}

319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339
static ssize_t virtblk_serial_show(struct device *dev,
				struct device_attribute *attr, char *buf)
{
	struct gendisk *disk = dev_to_disk(dev);
	int err;

	/* sysfs gives us a PAGE_SIZE buffer */
	BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);

	buf[VIRTIO_BLK_ID_BYTES] = '\0';
	err = virtblk_get_id(disk, buf);
	if (!err)
		return strlen(buf);

	if (err == -EIO) /* Unsupported? Make it empty. */
		return 0;

	return err;
}
DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL);

340 341 342 343 344 345 346
static void virtblk_config_changed_work(struct work_struct *work)
{
	struct virtio_blk *vblk =
		container_of(work, struct virtio_blk, config_work);
	struct virtio_device *vdev = vblk->vdev;
	struct request_queue *q = vblk->disk->queue;
	char cap_str_2[10], cap_str_10[10];
347
	char *envp[] = { "RESIZE=1", NULL };
348 349
	u64 capacity, size;

350 351 352 353
	mutex_lock(&vblk->config_lock);
	if (!vblk->config_enable)
		goto done;

354
	/* Host must always specify the capacity. */
355
	virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374

	/* If capacity is too big, truncate with warning. */
	if ((sector_t)capacity != capacity) {
		dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
			 (unsigned long long)capacity);
		capacity = (sector_t)-1;
	}

	size = capacity * queue_logical_block_size(q);
	string_get_size(size, STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
	string_get_size(size, STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));

	dev_notice(&vdev->dev,
		  "new size: %llu %d-byte logical blocks (%s/%s)\n",
		  (unsigned long long)capacity,
		  queue_logical_block_size(q),
		  cap_str_10, cap_str_2);

	set_capacity(vblk->disk, capacity);
375
	revalidate_disk(vblk->disk);
376
	kobject_uevent_env(&disk_to_dev(vblk->disk)->kobj, KOBJ_CHANGE, envp);
377 378
done:
	mutex_unlock(&vblk->config_lock);
379 380 381 382 383 384 385 386 387
}

static void virtblk_config_changed(struct virtio_device *vdev)
{
	struct virtio_blk *vblk = vdev->priv;

	queue_work(virtblk_wq, &vblk->config_work);
}

388 389 390
static int init_vq(struct virtio_blk *vblk)
{
	int err = 0;
391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420
	int i;
	vq_callback_t **callbacks;
	const char **names;
	struct virtqueue **vqs;
	unsigned short num_vqs;
	struct virtio_device *vdev = vblk->vdev;

	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ,
				   struct virtio_blk_config, num_queues,
				   &num_vqs);
	if (err)
		num_vqs = 1;

	vblk->vqs = kmalloc(sizeof(*vblk->vqs) * num_vqs, GFP_KERNEL);
	if (!vblk->vqs) {
		err = -ENOMEM;
		goto out;
	}

	names = kmalloc(sizeof(*names) * num_vqs, GFP_KERNEL);
	if (!names)
		goto err_names;

	callbacks = kmalloc(sizeof(*callbacks) * num_vqs, GFP_KERNEL);
	if (!callbacks)
		goto err_callbacks;

	vqs = kmalloc(sizeof(*vqs) * num_vqs, GFP_KERNEL);
	if (!vqs)
		goto err_vqs;
421

422 423 424 425 426 427 428 429 430 431
	for (i = 0; i < num_vqs; i++) {
		callbacks[i] = virtblk_done;
		snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
		names[i] = vblk->vqs[i].name;
	}

	/* Discover virtqueues and write information to configuration.  */
	err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names);
	if (err)
		goto err_find_vqs;
432

433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448
	for (i = 0; i < num_vqs; i++) {
		spin_lock_init(&vblk->vqs[i].lock);
		vblk->vqs[i].vq = vqs[i];
	}
	vblk->num_vqs = num_vqs;

 err_find_vqs:
	kfree(vqs);
 err_vqs:
	kfree(callbacks);
 err_callbacks:
	kfree(names);
 err_names:
	if (err)
		kfree(vblk->vqs);
 out:
449 450 451
	return err;
}

452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479
/*
 * Legacy naming scheme used for virtio devices.  We are stuck with it for
 * virtio blk but don't ever use it for any new driver.
 */
static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
{
	const int base = 'z' - 'a' + 1;
	char *begin = buf + strlen(prefix);
	char *end = buf + buflen;
	char *p;
	int unit;

	p = end - 1;
	*p = '\0';
	unit = base;
	do {
		if (p == begin)
			return -EINVAL;
		*--p = 'a' + (index % unit);
		index = (index / unit) - 1;
	} while (index >= 0);

	memmove(begin, p, end - p);
	memcpy(buf, prefix, strlen(prefix));

	return 0;
}

480 481 482 483 484
static int virtblk_get_cache_mode(struct virtio_device *vdev)
{
	u8 writeback;
	int err;

485 486 487
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE,
				   struct virtio_blk_config, wce,
				   &writeback);
488 489 490 491 492 493 494 495 496 497 498
	if (err)
		writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_WCE);

	return writeback;
}

static void virtblk_update_cache_mode(struct virtio_device *vdev)
{
	u8 writeback = virtblk_get_cache_mode(vdev);
	struct virtio_blk *vblk = vdev->priv;

499
	if (writeback)
500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527
		blk_queue_flush(vblk->disk->queue, REQ_FLUSH);
	else
		blk_queue_flush(vblk->disk->queue, 0);

	revalidate_disk(vblk->disk);
}

static const char *const virtblk_cache_types[] = {
	"write through", "write back"
};

static ssize_t
virtblk_cache_type_store(struct device *dev, struct device_attribute *attr,
			 const char *buf, size_t count)
{
	struct gendisk *disk = dev_to_disk(dev);
	struct virtio_blk *vblk = disk->private_data;
	struct virtio_device *vdev = vblk->vdev;
	int i;

	BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
	for (i = ARRAY_SIZE(virtblk_cache_types); --i >= 0; )
		if (sysfs_streq(buf, virtblk_cache_types[i]))
			break;

	if (i < 0)
		return -EINVAL;

528
	virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551
	virtblk_update_cache_mode(vdev);
	return count;
}

static ssize_t
virtblk_cache_type_show(struct device *dev, struct device_attribute *attr,
			 char *buf)
{
	struct gendisk *disk = dev_to_disk(dev);
	struct virtio_blk *vblk = disk->private_data;
	u8 writeback = virtblk_get_cache_mode(vblk->vdev);

	BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
	return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]);
}

static const struct device_attribute dev_attr_cache_type_ro =
	__ATTR(cache_type, S_IRUGO,
	       virtblk_cache_type_show, NULL);
static const struct device_attribute dev_attr_cache_type_rw =
	__ATTR(cache_type, S_IRUGO|S_IWUSR,
	       virtblk_cache_type_show, virtblk_cache_type_store);

552 553 554
static int virtblk_init_request(void *data, struct request *rq,
		unsigned int hctx_idx, unsigned int request_idx,
		unsigned int numa_node)
555 556 557 558 559 560 561 562
{
	struct virtio_blk *vblk = data;
	struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);

	sg_init_table(vbr->sg, vblk->sg_elems);
	return 0;
}

J
Jens Axboe 已提交
563 564 565
static struct blk_mq_ops virtio_mq_ops = {
	.queue_rq	= virtio_queue_rq,
	.map_queue	= blk_mq_map_queue,
566
	.complete	= virtblk_request_done,
567
	.init_request	= virtblk_init_request,
J
Jens Axboe 已提交
568 569
};

570 571
static unsigned int virtblk_queue_depth;
module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
J
Jens Axboe 已提交
572

573
static int virtblk_probe(struct virtio_device *vdev)
R
Rusty Russell 已提交
574 575
{
	struct virtio_blk *vblk;
576
	struct request_queue *q;
577
	int err, index;
578

R
Rusty Russell 已提交
579
	u64 cap;
580 581 582
	u32 v, blk_size, sg_elems, opt_io_size;
	u16 min_io_size;
	u8 physical_block_exp, alignment_offset;
R
Rusty Russell 已提交
583

584 585 586 587 588
	err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
			     GFP_KERNEL);
	if (err < 0)
		goto out;
	index = err;
589

590
	/* We need to know how many segments before we allocate. */
591 592 593
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
				   struct virtio_blk_config, seg_max,
				   &sg_elems);
594 595 596

	/* We need at least one SG element, whatever they say. */
	if (err || !sg_elems)
597 598 599 600
		sg_elems = 1;

	/* We need an extra sg elements at head and tail. */
	sg_elems += 2;
J
Jens Axboe 已提交
601
	vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
R
Rusty Russell 已提交
602 603
	if (!vblk) {
		err = -ENOMEM;
604
		goto out_free_index;
R
Rusty Russell 已提交
605 606 607
	}

	vblk->vdev = vdev;
608
	vblk->sg_elems = sg_elems;
609
	mutex_init(&vblk->config_lock);
610

611
	INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
612
	vblk->config_enable = true;
R
Rusty Russell 已提交
613

614 615
	err = init_vq(vblk);
	if (err)
R
Rusty Russell 已提交
616 617 618
		goto out_free_vblk;

	/* FIXME: How many partitions?  How long is a piece of string? */
619
	vblk->disk = alloc_disk(1 << PART_BITS);
R
Rusty Russell 已提交
620 621
	if (!vblk->disk) {
		err = -ENOMEM;
J
Jens Axboe 已提交
622
		goto out_free_vq;
R
Rusty Russell 已提交
623 624
	}

625
	/* Default queue sizing is to fill the ring. */
626
	if (!virtblk_queue_depth) {
627
		virtblk_queue_depth = vblk->vqs[0].vq->num_free;
628 629
		/* ... but without indirect descs, we use 2 descs per req */
		if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
630
			virtblk_queue_depth /= 2;
631
	}
632 633 634 635 636 637 638

	memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
	vblk->tag_set.ops = &virtio_mq_ops;
	vblk->tag_set.queue_depth = virtblk_queue_depth;
	vblk->tag_set.numa_node = NUMA_NO_NODE;
	vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
	vblk->tag_set.cmd_size =
J
Jens Axboe 已提交
639 640
		sizeof(struct virtblk_req) +
		sizeof(struct scatterlist) * sg_elems;
641
	vblk->tag_set.driver_data = vblk;
642
	vblk->tag_set.nr_hw_queues = vblk->num_vqs;
J
Jens Axboe 已提交
643

644 645 646 647 648
	err = blk_mq_alloc_tag_set(&vblk->tag_set);
	if (err)
		goto out_put_disk;

	q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set);
649
	if (!q) {
R
Rusty Russell 已提交
650
		err = -ENOMEM;
651
		goto out_free_tags;
R
Rusty Russell 已提交
652 653
	}

654
	q->queuedata = vblk;
655

656
	virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
657

R
Rusty Russell 已提交
658
	vblk->disk->major = major;
659
	vblk->disk->first_minor = index_to_minor(index);
R
Rusty Russell 已提交
660 661
	vblk->disk->private_data = vblk;
	vblk->disk->fops = &virtblk_fops;
662
	vblk->disk->driverfs_dev = &vdev->dev;
663
	vblk->index = index;
664

665
	/* configure queue flush support */
666
	virtblk_update_cache_mode(vdev);
R
Rusty Russell 已提交
667

668 669 670 671
	/* If disk is read-only in the host, the guest should obey */
	if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
		set_disk_ro(vblk->disk, 1);

672
	/* Host must always specify the capacity. */
673
	virtio_cread(vdev, struct virtio_blk_config, capacity, &cap);
R
Rusty Russell 已提交
674 675 676 677 678 679 680 681 682

	/* If capacity is too big, truncate with warning. */
	if ((sector_t)cap != cap) {
		dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
			 (unsigned long long)cap);
		cap = (sector_t)-1;
	}
	set_capacity(vblk->disk, cap);

683
	/* We can handle whatever the host told us to handle. */
684
	blk_queue_max_segments(q, vblk->sg_elems-2);
685

686
	/* No need to bounce any requests */
687
	blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
688

689
	/* No real sector limit. */
690
	blk_queue_max_hw_sectors(q, -1U);
691

692 693
	/* Host can optionally specify maximum segment size and number of
	 * segments. */
694 695
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
				   struct virtio_blk_config, size_max, &v);
R
Rusty Russell 已提交
696
	if (!err)
697
		blk_queue_max_segment_size(q, v);
698
	else
699
		blk_queue_max_segment_size(q, -1U);
R
Rusty Russell 已提交
700

701
	/* Host can optionally specify the block size of the device */
702 703 704
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
				   struct virtio_blk_config, blk_size,
				   &blk_size);
705
	if (!err)
706 707 708 709 710
		blk_queue_logical_block_size(q, blk_size);
	else
		blk_size = queue_logical_block_size(q);

	/* Use topology information if available */
711 712 713
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
				   struct virtio_blk_config, physical_block_exp,
				   &physical_block_exp);
714 715 716 717
	if (!err && physical_block_exp)
		blk_queue_physical_block_size(q,
				blk_size * (1 << physical_block_exp));

718 719 720
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
				   struct virtio_blk_config, alignment_offset,
				   &alignment_offset);
721 722 723
	if (!err && alignment_offset)
		blk_queue_alignment_offset(q, blk_size * alignment_offset);

724 725 726
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
				   struct virtio_blk_config, min_io_size,
				   &min_io_size);
727 728 729
	if (!err && min_io_size)
		blk_queue_io_min(q, blk_size * min_io_size);

730 731 732
	err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
				   struct virtio_blk_config, opt_io_size,
				   &opt_io_size);
733 734 735
	if (!err && opt_io_size)
		blk_queue_io_opt(q, blk_size * opt_io_size);

R
Rusty Russell 已提交
736
	add_disk(vblk->disk);
737 738 739 740
	err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial);
	if (err)
		goto out_del_disk;

741 742 743 744 745 746 747 748
	if (virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
		err = device_create_file(disk_to_dev(vblk->disk),
					 &dev_attr_cache_type_rw);
	else
		err = device_create_file(disk_to_dev(vblk->disk),
					 &dev_attr_cache_type_ro);
	if (err)
		goto out_del_disk;
R
Rusty Russell 已提交
749 750
	return 0;

751 752 753
out_del_disk:
	del_gendisk(vblk->disk);
	blk_cleanup_queue(vblk->disk->queue);
754 755
out_free_tags:
	blk_mq_free_tag_set(&vblk->tag_set);
R
Rusty Russell 已提交
756 757 758
out_put_disk:
	put_disk(vblk->disk);
out_free_vq:
759
	vdev->config->del_vqs(vdev);
R
Rusty Russell 已提交
760 761
out_free_vblk:
	kfree(vblk);
762 763
out_free_index:
	ida_simple_remove(&vd_index_ida, index);
R
Rusty Russell 已提交
764 765 766 767
out:
	return err;
}

768
static void virtblk_remove(struct virtio_device *vdev)
R
Rusty Russell 已提交
769 770
{
	struct virtio_blk *vblk = vdev->priv;
771
	int index = vblk->index;
772
	int refc;
R
Rusty Russell 已提交
773

774 775 776 777
	/* Prevent config work handler from accessing the device. */
	mutex_lock(&vblk->config_lock);
	vblk->config_enable = false;
	mutex_unlock(&vblk->config_lock);
778

779
	del_gendisk(vblk->disk);
780
	blk_cleanup_queue(vblk->disk->queue);
781

782 783
	blk_mq_free_tag_set(&vblk->tag_set);

R
Rusty Russell 已提交
784 785 786
	/* Stop all the virtqueues. */
	vdev->config->reset(vdev);

787 788
	flush_work(&vblk->config_work);

789
	refc = atomic_read(&disk_to_dev(vblk->disk)->kobj.kref.refcount);
R
Rusty Russell 已提交
790
	put_disk(vblk->disk);
791
	vdev->config->del_vqs(vdev);
792
	kfree(vblk->vqs);
R
Rusty Russell 已提交
793
	kfree(vblk);
794 795 796 797

	/* Only free device id if we don't have any users */
	if (refc == 1)
		ida_simple_remove(&vd_index_ida, index);
R
Rusty Russell 已提交
798 799
}

800
#ifdef CONFIG_PM_SLEEP
801 802 803 804 805 806 807 808 809 810 811 812 813 814
static int virtblk_freeze(struct virtio_device *vdev)
{
	struct virtio_blk *vblk = vdev->priv;

	/* Ensure we don't receive any more interrupts */
	vdev->config->reset(vdev);

	/* Prevent config work handler from accessing the device. */
	mutex_lock(&vblk->config_lock);
	vblk->config_enable = false;
	mutex_unlock(&vblk->config_lock);

	flush_work(&vblk->config_work);

J
Jens Axboe 已提交
815
	blk_mq_stop_hw_queues(vblk->disk->queue);
816 817 818 819 820 821 822 823 824 825 826 827

	vdev->config->del_vqs(vdev);
	return 0;
}

static int virtblk_restore(struct virtio_device *vdev)
{
	struct virtio_blk *vblk = vdev->priv;
	int ret;

	vblk->config_enable = true;
	ret = init_vq(vdev->priv);
J
Jens Axboe 已提交
828
	if (!ret)
829
		blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
J
Jens Axboe 已提交
830

831 832 833 834
	return ret;
}
#endif

835
static const struct virtio_device_id id_table[] = {
R
Rusty Russell 已提交
836 837 838 839
	{ VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
	{ 0 },
};

840
static unsigned int features[] = {
841 842
	VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
	VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, VIRTIO_BLK_F_SCSI,
843 844
	VIRTIO_BLK_F_WCE, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
	VIRTIO_BLK_F_MQ,
845 846
};

847
static struct virtio_driver virtio_blk = {
848 849 850 851 852 853
	.feature_table		= features,
	.feature_table_size	= ARRAY_SIZE(features),
	.driver.name		= KBUILD_MODNAME,
	.driver.owner		= THIS_MODULE,
	.id_table		= id_table,
	.probe			= virtblk_probe,
854
	.remove			= virtblk_remove,
855
	.config_changed		= virtblk_config_changed,
856
#ifdef CONFIG_PM_SLEEP
857 858 859
	.freeze			= virtblk_freeze,
	.restore		= virtblk_restore,
#endif
R
Rusty Russell 已提交
860 861 862 863
};

static int __init init(void)
{
864 865 866 867 868 869
	int error;

	virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
	if (!virtblk_wq)
		return -ENOMEM;

870
	major = register_blkdev(0, "virtblk");
871 872 873 874 875 876 877 878 879 880 881 882 883 884 885
	if (major < 0) {
		error = major;
		goto out_destroy_workqueue;
	}

	error = register_virtio_driver(&virtio_blk);
	if (error)
		goto out_unregister_blkdev;
	return 0;

out_unregister_blkdev:
	unregister_blkdev(major, "virtblk");
out_destroy_workqueue:
	destroy_workqueue(virtblk_wq);
	return error;
R
Rusty Russell 已提交
886 887 888 889
}

static void __exit fini(void)
{
890
	unregister_blkdev(major, "virtblk");
R
Rusty Russell 已提交
891
	unregister_virtio_driver(&virtio_blk);
892
	destroy_workqueue(virtblk_wq);
R
Rusty Russell 已提交
893 894 895 896 897 898 899
}
module_init(init);
module_exit(fini);

MODULE_DEVICE_TABLE(virtio, id_table);
MODULE_DESCRIPTION("Virtio block driver");
MODULE_LICENSE("GPL");