xen-blkfront.c 62.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39
/*
 * blkfront.c
 *
 * XenLinux virtual block device driver.
 *
 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
 * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
 * Copyright (c) 2004, Christian Limpach
 * Copyright (c) 2004, Andrew Warfield
 * Copyright (c) 2005, Christopher Clark
 * Copyright (c) 2005, XenSource Ltd
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version 2
 * as published by the Free Software Foundation; or, when distributed
 * separately from the Linux kernel or incorporated into other
 * software packages, subject to the following license:
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this source file (the "Software"), to deal in the Software without
 * restriction, including without limitation the rights to use, copy, modify,
 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
 * and to permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 */

#include <linux/interrupt.h>
#include <linux/blkdev.h>
B
Bob Liu 已提交
40
#include <linux/blk-mq.h>
41
#include <linux/hdreg.h>
42
#include <linux/cdrom.h>
43
#include <linux/module.h>
44
#include <linux/slab.h>
45
#include <linux/mutex.h>
46
#include <linux/scatterlist.h>
47
#include <linux/bitmap.h>
48
#include <linux/list.h>
49

50
#include <xen/xen.h>
51 52 53 54
#include <xen/xenbus.h>
#include <xen/grant_table.h>
#include <xen/events.h>
#include <xen/page.h>
55
#include <xen/platform_pci.h>
56 57 58

#include <xen/interface/grant_table.h>
#include <xen/interface/io/blkif.h>
59
#include <xen/interface/io/protocols.h>
60 61 62 63 64 65 66 67 68

#include <asm/xen/hypervisor.h>

enum blkif_state {
	BLKIF_STATE_DISCONNECTED,
	BLKIF_STATE_CONNECTED,
	BLKIF_STATE_SUSPENDED,
};

69 70
struct grant {
	grant_ref_t gref;
71
	struct page *page;
72
	struct list_head node;
73 74
};

75 76
struct blk_shadow {
	struct blkif_request req;
77
	struct request *request;
78 79
	struct grant **grants_used;
	struct grant **indirect_grants;
80
	struct scatterlist *sg;
81
	unsigned int num_sg;
82 83 84 85 86
};

struct split_bio {
	struct bio *bio;
	atomic_t pending;
87 88
};

89
static DEFINE_MUTEX(blkfront_mutex);
90
static const struct block_device_operations xlvbd_block_fops;
91

92 93 94 95 96 97 98
/*
 * Maximum number of segments in indirect requests, the actual value used by
 * the frontend driver is the minimum of this value and the value provided
 * by the backend driver.
 */

static unsigned int xen_blkif_max_segments = 32;
99 100
module_param_named(max, xen_blkif_max_segments, int, S_IRUGO);
MODULE_PARM_DESC(max, "Maximum amount of segments in indirect requests (default is 32)");
101

B
Bob Liu 已提交
102 103 104 105 106 107 108 109
/*
 * Maximum order of pages to be used for the shared ring between front and
 * backend, 4KB page granularity is used.
 */
static unsigned int xen_blkif_max_ring_order;
module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, S_IRUGO);
MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");

110 111 112 113
#define BLK_RING_SIZE(info)	\
	__CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * (info)->nr_ring_pages)

#define BLK_MAX_RING_SIZE	\
114
	__CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * XENBUS_MAX_RING_GRANTS)
115

B
Bob Liu 已提交
116 117 118 119 120
/*
 * ring-ref%i i=(-1UL) would take 11 characters + 'ring-ref' is 8, so 19
 * characters are enough. Define to 20 to keep consist with backend.
 */
#define RINGREF_NAME_LEN (20)
121

122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138
/*
 *  Per-ring info.
 *  Every blkfront device can associate with one or more blkfront_ring_info,
 *  depending on how many hardware queues/rings to be used.
 */
struct blkfront_ring_info {
	struct blkif_front_ring ring;
	unsigned int ring_ref[XENBUS_MAX_RING_GRANTS];
	unsigned int evtchn, irq;
	struct work_struct work;
	struct gnttab_free_callback callback;
	struct blk_shadow shadow[BLK_MAX_RING_SIZE];
	struct list_head indirect_pages;
	unsigned long shadow_free;
	struct blkfront_info *dev_info;
};

139 140 141 142 143 144 145
/*
 * We have one of these per vbd, whether ide, scsi or 'other'.  They
 * hang in private_data off the gendisk structure. We may end up
 * putting all kinds of interesting stuff here :-)
 */
struct blkfront_info
{
146
	spinlock_t io_lock;
147
	struct mutex mutex;
148 149 150 151 152
	struct xenbus_device *xbdev;
	struct gendisk *gd;
	int vdevice;
	blkif_vdev_t handle;
	enum blkif_state connected;
B
Bob Liu 已提交
153
	unsigned int nr_ring_pages;
154
	struct request_queue *rq;
155
	struct list_head grants;
156
	unsigned int persistent_gnts_c;
157
	unsigned int feature_flush;
158 159
	unsigned int feature_discard:1;
	unsigned int feature_secdiscard:1;
160 161
	unsigned int discard_granularity;
	unsigned int discard_alignment;
162
	unsigned int feature_persistent:1;
163
	/* Number of 4KB segments handled */
164
	unsigned int max_indirect_segments;
165
	int is_ready;
B
Bob Liu 已提交
166
	struct blk_mq_tag_set tag_set;
167
	struct blkfront_ring_info rinfo;
168 169
};

170 171 172 173
static unsigned int nr_minors;
static unsigned long *minors;
static DEFINE_SPINLOCK(minor_lock);

174 175 176
#define GRANT_INVALID_REF	0

#define PARTS_PER_DISK		16
177
#define PARTS_PER_EXT_DISK      256
178 179 180 181

#define BLKIF_MAJOR(dev) ((dev)>>8)
#define BLKIF_MINOR(dev) ((dev) & 0xff)

182 183 184 185
#define EXT_SHIFT 28
#define EXTENDED (1<<EXT_SHIFT)
#define VDEV_IS_EXTENDED(dev) ((dev)&(EXTENDED))
#define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED))
186 187
#define EMULATED_HD_DISK_MINOR_OFFSET (0)
#define EMULATED_HD_DISK_NAME_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET / 256)
188 189
#define EMULATED_SD_DISK_MINOR_OFFSET (0)
#define EMULATED_SD_DISK_NAME_OFFSET (EMULATED_SD_DISK_MINOR_OFFSET / 256)
190

191
#define DEV_NAME	"xvd"	/* name in /dev */
192

193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209
/*
 * Grants are always the same size as a Xen page (i.e 4KB).
 * A physical segment is always the same size as a Linux page.
 * Number of grants per physical segment
 */
#define GRANTS_PER_PSEG	(PAGE_SIZE / XEN_PAGE_SIZE)

#define GRANTS_PER_INDIRECT_FRAME \
	(XEN_PAGE_SIZE / sizeof(struct blkif_request_segment))

#define PSEGS_PER_INDIRECT_FRAME	\
	(GRANTS_INDIRECT_FRAME / GRANTS_PSEGS)

#define INDIRECT_GREFS(_grants)		\
	DIV_ROUND_UP(_grants, GRANTS_PER_INDIRECT_FRAME)

#define GREFS(_psegs)	((_psegs) * GRANTS_PER_PSEG)
210

211
static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo);
212
static int blkfront_gather_backend_features(struct blkfront_info *info);
213

214
static int get_id_from_freelist(struct blkfront_ring_info *rinfo)
215
{
216 217 218 219 220
	unsigned long free = rinfo->shadow_free;

	BUG_ON(free >= BLK_RING_SIZE(rinfo->dev_info));
	rinfo->shadow_free = rinfo->shadow[free].req.u.rw.id;
	rinfo->shadow[free].req.u.rw.id = 0x0fffffee; /* debug */
221 222 223
	return free;
}

224
static int add_id_to_freelist(struct blkfront_ring_info *rinfo,
225 226
			       unsigned long id)
{
227
	if (rinfo->shadow[id].req.u.rw.id != id)
228
		return -EINVAL;
229
	if (rinfo->shadow[id].request == NULL)
230
		return -EINVAL;
231 232 233
	rinfo->shadow[id].req.u.rw.id  = rinfo->shadow_free;
	rinfo->shadow[id].request = NULL;
	rinfo->shadow_free = id;
234
	return 0;
235 236
}

237
static int fill_grant_buffer(struct blkfront_ring_info *rinfo, int num)
238
{
239
	struct blkfront_info *info = rinfo->dev_info;
240 241 242 243 244 245 246 247 248
	struct page *granted_page;
	struct grant *gnt_list_entry, *n;
	int i = 0;

	while(i < num) {
		gnt_list_entry = kzalloc(sizeof(struct grant), GFP_NOIO);
		if (!gnt_list_entry)
			goto out_of_memory;

249 250 251 252 253 254
		if (info->feature_persistent) {
			granted_page = alloc_page(GFP_NOIO);
			if (!granted_page) {
				kfree(gnt_list_entry);
				goto out_of_memory;
			}
255
			gnt_list_entry->page = granted_page;
256 257 258
		}

		gnt_list_entry->gref = GRANT_INVALID_REF;
259
		list_add(&gnt_list_entry->node, &info->grants);
260 261 262 263 264 265 266
		i++;
	}

	return 0;

out_of_memory:
	list_for_each_entry_safe(gnt_list_entry, n,
267
	                         &info->grants, node) {
268
		list_del(&gnt_list_entry->node);
269
		if (info->feature_persistent)
270
			__free_page(gnt_list_entry->page);
271 272 273 274 275 276 277
		kfree(gnt_list_entry);
		i--;
	}
	BUG_ON(i != 0);
	return -ENOMEM;
}

278
static struct grant *get_free_grant(struct blkfront_info *info)
279 280 281
{
	struct grant *gnt_list_entry;

282 283
	BUG_ON(list_empty(&info->grants));
	gnt_list_entry = list_first_entry(&info->grants, struct grant,
284
					  node);
285 286
	list_del(&gnt_list_entry->node);

287
	if (gnt_list_entry->gref != GRANT_INVALID_REF)
288
		info->persistent_gnts_c--;
289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308

	return gnt_list_entry;
}

static inline void grant_foreign_access(const struct grant *gnt_list_entry,
					const struct blkfront_info *info)
{
	gnttab_page_grant_foreign_access_ref_one(gnt_list_entry->gref,
						 info->xbdev->otherend_id,
						 gnt_list_entry->page,
						 0);
}

static struct grant *get_grant(grant_ref_t *gref_head,
			       unsigned long gfn,
			       struct blkfront_info *info)
{
	struct grant *gnt_list_entry = get_free_grant(info);

	if (gnt_list_entry->gref != GRANT_INVALID_REF)
309
		return gnt_list_entry;
310 311 312 313 314 315 316 317 318 319 320

	/* Assign a gref to this page */
	gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
	BUG_ON(gnt_list_entry->gref == -ENOSPC);
	if (info->feature_persistent)
		grant_foreign_access(gnt_list_entry, info);
	else {
		/* Grant access to the GFN passed by the caller */
		gnttab_grant_foreign_access_ref(gnt_list_entry->gref,
						info->xbdev->otherend_id,
						gfn, 0);
321 322
	}

323 324 325 326 327 328 329 330 331 332 333
	return gnt_list_entry;
}

static struct grant *get_indirect_grant(grant_ref_t *gref_head,
					struct blkfront_info *info)
{
	struct grant *gnt_list_entry = get_free_grant(info);

	if (gnt_list_entry->gref != GRANT_INVALID_REF)
		return gnt_list_entry;

334 335 336
	/* Assign a gref to this page */
	gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
	BUG_ON(gnt_list_entry->gref == -ENOSPC);
337
	if (!info->feature_persistent) {
338 339 340
		struct page *indirect_page;

		/* Fetch a pre-allocated page to use for indirect grefs */
341 342
		BUG_ON(list_empty(&info->rinfo.indirect_pages));
		indirect_page = list_first_entry(&info->rinfo.indirect_pages,
343 344 345
						 struct page, lru);
		list_del(&indirect_page->lru);
		gnt_list_entry->page = indirect_page;
346
	}
347 348
	grant_foreign_access(gnt_list_entry, info);

349 350 351
	return gnt_list_entry;
}

352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368
static const char *op_name(int op)
{
	static const char *const names[] = {
		[BLKIF_OP_READ] = "read",
		[BLKIF_OP_WRITE] = "write",
		[BLKIF_OP_WRITE_BARRIER] = "barrier",
		[BLKIF_OP_FLUSH_DISKCACHE] = "flush",
		[BLKIF_OP_DISCARD] = "discard" };

	if (op < 0 || op >= ARRAY_SIZE(names))
		return "unknown";

	if (!names[op])
		return "reserved";

	return names[op];
}
369 370 371 372 373 374 375 376
static int xlbd_reserve_minors(unsigned int minor, unsigned int nr)
{
	unsigned int end = minor + nr;
	int rc;

	if (end > nr_minors) {
		unsigned long *bitmap, *old;

377
		bitmap = kcalloc(BITS_TO_LONGS(end), sizeof(*bitmap),
378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396
				 GFP_KERNEL);
		if (bitmap == NULL)
			return -ENOMEM;

		spin_lock(&minor_lock);
		if (end > nr_minors) {
			old = minors;
			memcpy(bitmap, minors,
			       BITS_TO_LONGS(nr_minors) * sizeof(*bitmap));
			minors = bitmap;
			nr_minors = BITS_TO_LONGS(end) * BITS_PER_LONG;
		} else
			old = bitmap;
		spin_unlock(&minor_lock);
		kfree(old);
	}

	spin_lock(&minor_lock);
	if (find_next_bit(minors, end, minor) >= end) {
397
		bitmap_set(minors, minor, nr);
398 399 400 401 402 403 404 405 406 407 408 409 410 411
		rc = 0;
	} else
		rc = -EBUSY;
	spin_unlock(&minor_lock);

	return rc;
}

static void xlbd_release_minors(unsigned int minor, unsigned int nr)
{
	unsigned int end = minor + nr;

	BUG_ON(end > nr_minors);
	spin_lock(&minor_lock);
412
	bitmap_clear(minors,  minor, nr);
413 414 415
	spin_unlock(&minor_lock);
}

416 417
static void blkif_restart_queue_callback(void *arg)
{
418 419
	struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)arg;
	schedule_work(&rinfo->work);
420 421
}

H
Harvey Harrison 已提交
422
static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
423 424 425 426 427 428 429 430 431 432 433 434 435 436 437
{
	/* We don't have real geometry info, but let's at least return
	   values consistent with the size of the device */
	sector_t nsect = get_capacity(bd->bd_disk);
	sector_t cylinders = nsect;

	hg->heads = 0xff;
	hg->sectors = 0x3f;
	sector_div(cylinders, hg->heads * hg->sectors);
	hg->cylinders = cylinders;
	if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect)
		hg->cylinders = 0xffff;
	return 0;
}

A
Al Viro 已提交
438
static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
439
		       unsigned command, unsigned long argument)
440
{
A
Al Viro 已提交
441
	struct blkfront_info *info = bdev->bd_disk->private_data;
442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470
	int i;

	dev_dbg(&info->xbdev->dev, "command: 0x%x, argument: 0x%lx\n",
		command, (long)argument);

	switch (command) {
	case CDROMMULTISESSION:
		dev_dbg(&info->xbdev->dev, "FIXME: support multisession CDs later\n");
		for (i = 0; i < sizeof(struct cdrom_multisession); i++)
			if (put_user(0, (char __user *)(argument + i)))
				return -EFAULT;
		return 0;

	case CDROM_GET_CAPABILITY: {
		struct gendisk *gd = info->gd;
		if (gd->flags & GENHD_FL_CD)
			return 0;
		return -EINVAL;
	}

	default:
		/*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n",
		  command);*/
		return -EINVAL; /* same return as native Linux */
	}

	return 0;
}

471
static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_info *rinfo)
472
{
473
	struct blkfront_info *info = rinfo->dev_info;
474 475
	struct blkif_request *ring_req;
	unsigned long id;
476 477

	/* Fill out a communications ring structure. */
478 479 480
	ring_req = RING_GET_REQUEST(&rinfo->ring, rinfo->ring.req_prod_pvt);
	id = get_id_from_freelist(rinfo);
	rinfo->shadow[id].request = req;
481 482 483 484 485 486 487 488 489 490

	ring_req->operation = BLKIF_OP_DISCARD;
	ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
	ring_req->u.discard.id = id;
	ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req);
	if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard)
		ring_req->u.discard.flag = BLKIF_DISCARD_SECURE;
	else
		ring_req->u.discard.flag = 0;

491
	rinfo->ring.req_prod_pvt++;
492 493

	/* Keep a private copy so we can reissue requests when recovering. */
494
	rinfo->shadow[id].req = *ring_req;
495 496 497 498

	return 0;
}

499 500 501
struct setup_rw_req {
	unsigned int grant_idx;
	struct blkif_request_segment *segments;
502
	struct blkfront_ring_info *rinfo;
503 504 505 506 507 508 509 510 511 512 513 514 515 516 517
	struct blkif_request *ring_req;
	grant_ref_t gref_head;
	unsigned int id;
	/* Only used when persistent grant is used and it's a read request */
	bool need_copy;
	unsigned int bvec_off;
	char *bvec_data;
};

static void blkif_setup_rw_req_grant(unsigned long gfn, unsigned int offset,
				     unsigned int len, void *data)
{
	struct setup_rw_req *setup = data;
	int n, ref;
	struct grant *gnt_list_entry;
518
	unsigned int fsect, lsect;
519 520 521
	/* Convenient aliases */
	unsigned int grant_idx = setup->grant_idx;
	struct blkif_request *ring_req = setup->ring_req;
522 523 524
	struct blkfront_ring_info *rinfo = setup->rinfo;
	struct blkfront_info *info = rinfo->dev_info;
	struct blk_shadow *shadow = &rinfo->shadow[setup->id];
525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581

	if ((ring_req->operation == BLKIF_OP_INDIRECT) &&
	    (grant_idx % GRANTS_PER_INDIRECT_FRAME == 0)) {
		if (setup->segments)
			kunmap_atomic(setup->segments);

		n = grant_idx / GRANTS_PER_INDIRECT_FRAME;
		gnt_list_entry = get_indirect_grant(&setup->gref_head, info);
		shadow->indirect_grants[n] = gnt_list_entry;
		setup->segments = kmap_atomic(gnt_list_entry->page);
		ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref;
	}

	gnt_list_entry = get_grant(&setup->gref_head, gfn, info);
	ref = gnt_list_entry->gref;
	shadow->grants_used[grant_idx] = gnt_list_entry;

	if (setup->need_copy) {
		void *shared_data;

		shared_data = kmap_atomic(gnt_list_entry->page);
		/*
		 * this does not wipe data stored outside the
		 * range sg->offset..sg->offset+sg->length.
		 * Therefore, blkback *could* see data from
		 * previous requests. This is OK as long as
		 * persistent grants are shared with just one
		 * domain. It may need refactoring if this
		 * changes
		 */
		memcpy(shared_data + offset,
		       setup->bvec_data + setup->bvec_off,
		       len);

		kunmap_atomic(shared_data);
		setup->bvec_off += len;
	}

	fsect = offset >> 9;
	lsect = fsect + (len >> 9) - 1;
	if (ring_req->operation != BLKIF_OP_INDIRECT) {
		ring_req->u.rw.seg[grant_idx] =
			(struct blkif_request_segment) {
				.gref       = ref,
				.first_sect = fsect,
				.last_sect  = lsect };
	} else {
		setup->segments[grant_idx % GRANTS_PER_INDIRECT_FRAME] =
			(struct blkif_request_segment) {
				.gref       = ref,
				.first_sect = fsect,
				.last_sect  = lsect };
	}

	(setup->grant_idx)++;
}

582
static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *rinfo)
583
{
584
	struct blkfront_info *info = rinfo->dev_info;
585 586
	struct blkif_request *ring_req;
	unsigned long id;
587 588 589 590
	int i;
	struct setup_rw_req setup = {
		.grant_idx = 0,
		.segments = NULL,
591
		.rinfo = rinfo,
592 593
		.need_copy = rq_data_dir(req) && info->feature_persistent,
	};
594 595 596 597 598 599 600

	/*
	 * Used to store if we are able to queue the request by just using
	 * existing persistent grants, or if we have to get new grants,
	 * as there are not sufficiently many free.
	 */
	bool new_persistent_gnts;
601
	struct scatterlist *sg;
602
	int num_sg, max_grefs, num_grant;
603

604
	max_grefs = req->nr_phys_segments * GRANTS_PER_PSEG;
605 606 607 608 609
	if (max_grefs > BLKIF_MAX_SEGMENTS_PER_REQUEST)
		/*
		 * If we are using indirect segments we need to account
		 * for the indirect grefs used in the request.
		 */
610
		max_grefs += INDIRECT_GREFS(max_grefs);
611 612 613

	/* Check if we have enough grants to allocate a requests */
	if (info->persistent_gnts_c < max_grefs) {
614 615
		new_persistent_gnts = 1;
		if (gnttab_alloc_grant_references(
616
		    max_grefs - info->persistent_gnts_c,
617
		    &setup.gref_head) < 0) {
618
			gnttab_request_free_callback(
619
				&rinfo->callback,
620
				blkif_restart_queue_callback,
621
				rinfo,
622
				max_grefs);
623 624 625 626
			return 1;
		}
	} else
		new_persistent_gnts = 0;
627 628

	/* Fill out a communications ring structure. */
629 630 631
	ring_req = RING_GET_REQUEST(&rinfo->ring, rinfo->ring.req_prod_pvt);
	id = get_id_from_freelist(rinfo);
	rinfo->shadow[id].request = req;
632

633
	BUG_ON(info->max_indirect_segments == 0 &&
634
	       GREFS(req->nr_phys_segments) > BLKIF_MAX_SEGMENTS_PER_REQUEST);
635
	BUG_ON(info->max_indirect_segments &&
636 637
	       GREFS(req->nr_phys_segments) > info->max_indirect_segments);

638
	num_sg = blk_rq_map_sg(req->q, req, rinfo->shadow[id].sg);
639 640
	num_grant = 0;
	/* Calculate the number of grant used */
641
	for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i)
642 643
	       num_grant += gnttab_count_grant(sg->offset, sg->length);

644
	ring_req->u.rw.id = id;
645
	rinfo->shadow[id].num_sg = num_sg;
646
	if (num_grant > BLKIF_MAX_SEGMENTS_PER_REQUEST) {
647 648 649 650 651 652 653 654 655 656
		/*
		 * The indirect operation can only be a BLKIF_OP_READ or
		 * BLKIF_OP_WRITE
		 */
		BUG_ON(req->cmd_flags & (REQ_FLUSH | REQ_FUA));
		ring_req->operation = BLKIF_OP_INDIRECT;
		ring_req->u.indirect.indirect_op = rq_data_dir(req) ?
			BLKIF_OP_WRITE : BLKIF_OP_READ;
		ring_req->u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req);
		ring_req->u.indirect.handle = info->handle;
657
		ring_req->u.indirect.nr_segments = num_grant;
658
	} else {
659 660 661 662 663
		ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
		ring_req->u.rw.handle = info->handle;
		ring_req->operation = rq_data_dir(req) ?
			BLKIF_OP_WRITE : BLKIF_OP_READ;
		if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
664
			/*
665 666 667 668 669
			 * Ideally we can do an unordered flush-to-disk.
			 * In case the backend onlysupports barriers, use that.
			 * A barrier request a superset of FUA, so we can
			 * implement it the same way.  (It's also a FLUSH+FUA,
			 * since it is guaranteed ordered WRT previous writes.)
670
			 */
671 672 673 674 675 676 677 678 679 680 681 682
			switch (info->feature_flush &
				((REQ_FLUSH|REQ_FUA))) {
			case REQ_FLUSH|REQ_FUA:
				ring_req->operation =
					BLKIF_OP_WRITE_BARRIER;
				break;
			case REQ_FLUSH:
				ring_req->operation =
					BLKIF_OP_FLUSH_DISKCACHE;
				break;
			default:
				ring_req->operation = 0;
683 684
			}
		}
685
		ring_req->u.rw.nr_segments = num_grant;
686
	}
687

688 689
	setup.ring_req = ring_req;
	setup.id = id;
690
	for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i) {
691
		BUG_ON(sg->offset + sg->length > PAGE_SIZE);
692

693 694 695 696
		if (setup.need_copy) {
			setup.bvec_off = sg->offset;
			setup.bvec_data = kmap_atomic(sg_page(sg));
		}
697

698 699 700 701 702
		gnttab_foreach_grant_in_range(sg_page(sg),
					      sg->offset,
					      sg->length,
					      blkif_setup_rw_req_grant,
					      &setup);
703

704 705
		if (setup.need_copy)
			kunmap_atomic(setup.bvec_data);
706
	}
707 708
	if (setup.segments)
		kunmap_atomic(setup.segments);
709

710
	rinfo->ring.req_prod_pvt++;
711 712

	/* Keep a private copy so we can reissue requests when recovering. */
713
	rinfo->shadow[id].req = *ring_req;
714

715
	if (new_persistent_gnts)
716
		gnttab_free_grant_references(setup.gref_head);
717 718 719 720

	return 0;
}

721 722 723 724 725 726
/*
 * Generate a Xen blkfront IO request from a blk layer request.  Reads
 * and writes are handled as expected.
 *
 * @req: a request struct
 */
727
static int blkif_queue_request(struct request *req, struct blkfront_ring_info *rinfo)
728
{
729
	if (unlikely(rinfo->dev_info->connected != BLKIF_STATE_CONNECTED))
730 731 732
		return 1;

	if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE)))
733
		return blkif_queue_discard_req(req, rinfo);
734
	else
735
		return blkif_queue_rw_req(req, rinfo);
736
}
737

738
static inline void flush_requests(struct blkfront_ring_info *rinfo)
739 740 741
{
	int notify;

742
	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&rinfo->ring, notify);
743 744

	if (notify)
745
		notify_remote_via_irq(rinfo->irq);
746 747
}

748 749
static inline bool blkif_request_flush_invalid(struct request *req,
					       struct blkfront_info *info)
750 751
{
	return ((req->cmd_type != REQ_TYPE_FS) ||
752 753 754 755
		((req->cmd_flags & REQ_FLUSH) &&
		 !(info->feature_flush & REQ_FLUSH)) ||
		((req->cmd_flags & REQ_FUA) &&
		 !(info->feature_flush & REQ_FUA)));
756 757
}

B
Bob Liu 已提交
758 759
static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
			   const struct blk_mq_queue_data *qd)
760
{
761 762
	struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)hctx->driver_data;
	struct blkfront_info *info = rinfo->dev_info;
763

B
Bob Liu 已提交
764 765
	blk_mq_start_request(qd->rq);
	spin_lock_irq(&info->io_lock);
766
	if (RING_FULL(&rinfo->ring))
B
Bob Liu 已提交
767
		goto out_busy;
768

769
	if (blkif_request_flush_invalid(qd->rq, rinfo->dev_info))
B
Bob Liu 已提交
770
		goto out_err;
771

772
	if (blkif_queue_request(qd->rq, rinfo))
B
Bob Liu 已提交
773
		goto out_busy;
774

775
	flush_requests(rinfo);
B
Bob Liu 已提交
776 777
	spin_unlock_irq(&info->io_lock);
	return BLK_MQ_RQ_QUEUE_OK;
778

B
Bob Liu 已提交
779 780 781
out_err:
	spin_unlock_irq(&info->io_lock);
	return BLK_MQ_RQ_QUEUE_ERROR;
782

B
Bob Liu 已提交
783 784 785 786
out_busy:
	spin_unlock_irq(&info->io_lock);
	blk_mq_stop_hw_queue(hctx);
	return BLK_MQ_RQ_QUEUE_BUSY;
787 788
}

789 790 791 792 793 794 795 796 797
static int blk_mq_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
			    unsigned int index)
{
	struct blkfront_info *info = (struct blkfront_info *)data;

	hctx->driver_data = &info->rinfo;
	return 0;
}

B
Bob Liu 已提交
798 799 800
static struct blk_mq_ops blkfront_mq_ops = {
	.queue_rq = blkif_queue_rq,
	.map_queue = blk_mq_map_queue,
801
	.init_hctx = blk_mq_init_hctx,
B
Bob Liu 已提交
802 803
};

804
static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
805
				unsigned int physical_sector_size,
806
				unsigned int segments)
807
{
808
	struct request_queue *rq;
809
	struct blkfront_info *info = gd->private_data;
810

B
Bob Liu 已提交
811 812 813 814 815 816 817 818 819 820
	memset(&info->tag_set, 0, sizeof(info->tag_set));
	info->tag_set.ops = &blkfront_mq_ops;
	info->tag_set.nr_hw_queues = 1;
	info->tag_set.queue_depth =  BLK_RING_SIZE(info);
	info->tag_set.numa_node = NUMA_NO_NODE;
	info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
	info->tag_set.cmd_size = 0;
	info->tag_set.driver_data = info;

	if (blk_mq_alloc_tag_set(&info->tag_set))
821
		return -1;
B
Bob Liu 已提交
822 823 824 825 826
	rq = blk_mq_init_queue(&info->tag_set);
	if (IS_ERR(rq)) {
		blk_mq_free_tag_set(&info->tag_set);
		return -1;
	}
827

828
	queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
829

830 831 832 833 834
	if (info->feature_discard) {
		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq);
		blk_queue_max_discard_sectors(rq, get_capacity(gd));
		rq->limits.discard_granularity = info->discard_granularity;
		rq->limits.discard_alignment = info->discard_alignment;
835 836
		if (info->feature_secdiscard)
			queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, rq);
837 838
	}

839
	/* Hard sector size and max sectors impersonate the equiv. hardware. */
840
	blk_queue_logical_block_size(rq, sector_size);
841
	blk_queue_physical_block_size(rq, physical_sector_size);
842
	blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512);
843 844 845 846 847 848

	/* Each segment in a request is up to an aligned page in size. */
	blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
	blk_queue_max_segment_size(rq, PAGE_SIZE);

	/* Ensure a merged request will fit in a single I/O ring slot. */
849
	blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG);
850 851 852 853

	/* Make sure buffer addresses are sector-aligned. */
	blk_queue_dma_alignment(rq, 511);

854 855 856
	/* Make sure we don't use bounce buffers. */
	blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);

857 858 859 860 861
	gd->queue = rq;

	return 0;
}

862 863 864 865 866 867 868 869 870 871 872
static const char *flush_info(unsigned int feature_flush)
{
	switch (feature_flush & ((REQ_FLUSH | REQ_FUA))) {
	case REQ_FLUSH|REQ_FUA:
		return "barrier: enabled;";
	case REQ_FLUSH:
		return "flush diskcache: enabled;";
	default:
		return "barrier or flush: disabled;";
	}
}
873

874
static void xlvbd_flush(struct blkfront_info *info)
875
{
876
	blk_queue_flush(info->rq, info->feature_flush);
877 878 879 880 881
	pr_info("blkfront: %s: %s %s %s %s %s\n",
		info->gd->disk_name, flush_info(info->feature_flush),
		"persistent grants:", info->feature_persistent ?
		"enabled;" : "disabled;", "indirect descriptors:",
		info->max_indirect_segments ? "enabled;" : "disabled;");
882 883
}

884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942
static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
{
	int major;
	major = BLKIF_MAJOR(vdevice);
	*minor = BLKIF_MINOR(vdevice);
	switch (major) {
		case XEN_IDE0_MAJOR:
			*offset = (*minor / 64) + EMULATED_HD_DISK_NAME_OFFSET;
			*minor = ((*minor / 64) * PARTS_PER_DISK) +
				EMULATED_HD_DISK_MINOR_OFFSET;
			break;
		case XEN_IDE1_MAJOR:
			*offset = (*minor / 64) + 2 + EMULATED_HD_DISK_NAME_OFFSET;
			*minor = (((*minor / 64) + 2) * PARTS_PER_DISK) +
				EMULATED_HD_DISK_MINOR_OFFSET;
			break;
		case XEN_SCSI_DISK0_MAJOR:
			*offset = (*minor / PARTS_PER_DISK) + EMULATED_SD_DISK_NAME_OFFSET;
			*minor = *minor + EMULATED_SD_DISK_MINOR_OFFSET;
			break;
		case XEN_SCSI_DISK1_MAJOR:
		case XEN_SCSI_DISK2_MAJOR:
		case XEN_SCSI_DISK3_MAJOR:
		case XEN_SCSI_DISK4_MAJOR:
		case XEN_SCSI_DISK5_MAJOR:
		case XEN_SCSI_DISK6_MAJOR:
		case XEN_SCSI_DISK7_MAJOR:
			*offset = (*minor / PARTS_PER_DISK) + 
				((major - XEN_SCSI_DISK1_MAJOR + 1) * 16) +
				EMULATED_SD_DISK_NAME_OFFSET;
			*minor = *minor +
				((major - XEN_SCSI_DISK1_MAJOR + 1) * 16 * PARTS_PER_DISK) +
				EMULATED_SD_DISK_MINOR_OFFSET;
			break;
		case XEN_SCSI_DISK8_MAJOR:
		case XEN_SCSI_DISK9_MAJOR:
		case XEN_SCSI_DISK10_MAJOR:
		case XEN_SCSI_DISK11_MAJOR:
		case XEN_SCSI_DISK12_MAJOR:
		case XEN_SCSI_DISK13_MAJOR:
		case XEN_SCSI_DISK14_MAJOR:
		case XEN_SCSI_DISK15_MAJOR:
			*offset = (*minor / PARTS_PER_DISK) + 
				((major - XEN_SCSI_DISK8_MAJOR + 8) * 16) +
				EMULATED_SD_DISK_NAME_OFFSET;
			*minor = *minor +
				((major - XEN_SCSI_DISK8_MAJOR + 8) * 16 * PARTS_PER_DISK) +
				EMULATED_SD_DISK_MINOR_OFFSET;
			break;
		case XENVBD_MAJOR:
			*offset = *minor / PARTS_PER_DISK;
			break;
		default:
			printk(KERN_WARNING "blkfront: your disk configuration is "
					"incorrect, please use an xvd device instead\n");
			return -ENODEV;
	}
	return 0;
}
943

944 945 946 947 948 949 950 951
static char *encode_disk_name(char *ptr, unsigned int n)
{
	if (n >= 26)
		ptr = encode_disk_name(ptr, n / 26 - 1);
	*ptr = 'a' + n % 26;
	return ptr + 1;
}

952 953
static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
			       struct blkfront_info *info,
954 955
			       u16 vdisk_info, u16 sector_size,
			       unsigned int physical_sector_size)
956 957 958
{
	struct gendisk *gd;
	int nr_minors = 1;
959
	int err;
960 961 962
	unsigned int offset;
	int minor;
	int nr_parts;
963
	char *ptr;
964 965 966 967

	BUG_ON(info->gd != NULL);
	BUG_ON(info->rq != NULL);

968 969 970 971 972 973 974
	if ((info->vdevice>>EXT_SHIFT) > 1) {
		/* this is above the extended range; something is wrong */
		printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", info->vdevice);
		return -ENODEV;
	}

	if (!VDEV_IS_EXTENDED(info->vdevice)) {
975 976 977 978
		err = xen_translate_vdev(info->vdevice, &minor, &offset);
		if (err)
			return err;		
 		nr_parts = PARTS_PER_DISK;
979 980 981
	} else {
		minor = BLKIF_MINOR_EXT(info->vdevice);
		nr_parts = PARTS_PER_EXT_DISK;
982
		offset = minor / nr_parts;
983
		if (xen_hvm_domain() && offset < EMULATED_HD_DISK_NAME_OFFSET + 4)
984 985 986
			printk(KERN_WARNING "blkfront: vdevice 0x%x might conflict with "
					"emulated IDE disks,\n\t choose an xvd device name"
					"from xvde on\n", info->vdevice);
987
	}
988 989 990 991 992
	if (minor >> MINORBITS) {
		pr_warn("blkfront: %#x's minor (%#x) out of range; ignoring\n",
			info->vdevice, minor);
		return -ENODEV;
	}
993 994 995

	if ((minor % nr_parts) == 0)
		nr_minors = nr_parts;
996

997 998 999 1000 1001
	err = xlbd_reserve_minors(minor, nr_minors);
	if (err)
		goto out;
	err = -ENODEV;

1002 1003
	gd = alloc_disk(nr_minors);
	if (gd == NULL)
1004
		goto release;
1005

1006 1007 1008 1009 1010 1011 1012 1013
	strcpy(gd->disk_name, DEV_NAME);
	ptr = encode_disk_name(gd->disk_name + sizeof(DEV_NAME) - 1, offset);
	BUG_ON(ptr >= gd->disk_name + DISK_NAME_LEN);
	if (nr_minors > 1)
		*ptr = 0;
	else
		snprintf(ptr, gd->disk_name + DISK_NAME_LEN - ptr,
			 "%d", minor & (nr_parts - 1));
1014 1015 1016 1017 1018 1019 1020 1021

	gd->major = XENVBD_MAJOR;
	gd->first_minor = minor;
	gd->fops = &xlvbd_block_fops;
	gd->private_data = info;
	gd->driverfs_dev = &(info->xbdev->dev);
	set_capacity(gd, capacity);

1022
	if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size,
1023 1024
				 info->max_indirect_segments ? :
				 BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
1025
		del_gendisk(gd);
1026
		goto release;
1027 1028 1029 1030 1031
	}

	info->rq = gd->queue;
	info->gd = gd;

1032
	xlvbd_flush(info);
1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044

	if (vdisk_info & VDISK_READONLY)
		set_disk_ro(gd, 1);

	if (vdisk_info & VDISK_REMOVABLE)
		gd->flags |= GENHD_FL_REMOVABLE;

	if (vdisk_info & VDISK_CDROM)
		gd->flags |= GENHD_FL_CD;

	return 0;

1045 1046
 release:
	xlbd_release_minors(minor, nr_minors);
1047 1048 1049 1050
 out:
	return err;
}

D
Daniel Stodden 已提交
1051 1052 1053
static void xlvbd_release_gendisk(struct blkfront_info *info)
{
	unsigned int minor, nr_minors;
1054
	struct blkfront_ring_info *rinfo = &info->rinfo;
D
Daniel Stodden 已提交
1055 1056 1057 1058 1059

	if (info->rq == NULL)
		return;

	/* No more blkif_request(). */
B
Bob Liu 已提交
1060
	blk_mq_stop_hw_queues(info->rq);
D
Daniel Stodden 已提交
1061 1062

	/* No more gnttab callback work. */
1063
	gnttab_cancel_free_callback(&rinfo->callback);
D
Daniel Stodden 已提交
1064 1065

	/* Flush gnttab callback work. Must be done with no locks held. */
1066
	flush_work(&rinfo->work);
D
Daniel Stodden 已提交
1067 1068 1069 1070 1071 1072 1073 1074

	del_gendisk(info->gd);

	minor = info->gd->first_minor;
	nr_minors = info->gd->minors;
	xlbd_release_minors(minor, nr_minors);

	blk_cleanup_queue(info->rq);
B
Bob Liu 已提交
1075
	blk_mq_free_tag_set(&info->tag_set);
D
Daniel Stodden 已提交
1076 1077 1078 1079 1080 1081
	info->rq = NULL;

	put_disk(info->gd);
	info->gd = NULL;
}

B
Bob Liu 已提交
1082
/* Must be called with io_lock holded */
1083
static void kick_pending_request_queues(struct blkfront_ring_info *rinfo)
1084
{
1085 1086
	if (!RING_FULL(&rinfo->ring))
		blk_mq_start_stopped_hw_queues(rinfo->dev_info->rq, true);
1087 1088 1089 1090
}

static void blkif_restart_queue(struct work_struct *work)
{
1091
	struct blkfront_ring_info *rinfo = container_of(work, struct blkfront_ring_info, work);
1092

1093 1094 1095 1096
	spin_lock_irq(&rinfo->dev_info->io_lock);
	if (rinfo->dev_info->connected == BLKIF_STATE_CONNECTED)
		kick_pending_request_queues(rinfo);
	spin_unlock_irq(&rinfo->dev_info->io_lock);
1097 1098 1099 1100
}

static void blkif_free(struct blkfront_info *info, int suspend)
{
1101 1102
	struct grant *persistent_gnt;
	struct grant *n;
1103
	int i, j, segs;
1104
	struct blkfront_ring_info *rinfo = &info->rinfo;
1105

1106
	/* Prevent new requests being issued until we fix things up. */
1107
	spin_lock_irq(&info->io_lock);
1108 1109 1110 1111
	info->connected = suspend ?
		BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
	/* No more blkif_request(). */
	if (info->rq)
B
Bob Liu 已提交
1112
		blk_mq_stop_hw_queues(info->rq);
1113 1114

	/* Remove all persistent grants */
1115
	if (!list_empty(&info->grants)) {
1116
		list_for_each_entry_safe(persistent_gnt, n,
1117
					 &info->grants, node) {
1118
			list_del(&persistent_gnt->node);
1119 1120 1121 1122 1123
			if (persistent_gnt->gref != GRANT_INVALID_REF) {
				gnttab_end_foreign_access(persistent_gnt->gref,
				                          0, 0UL);
				info->persistent_gnts_c--;
			}
1124
			if (info->feature_persistent)
1125
				__free_page(persistent_gnt->page);
1126
			kfree(persistent_gnt);
1127 1128
		}
	}
1129
	BUG_ON(info->persistent_gnts_c != 0);
1130

1131 1132 1133 1134
	/*
	 * Remove indirect pages, this only happens when using indirect
	 * descriptors but not persistent grants
	 */
1135
	if (!list_empty(&rinfo->indirect_pages)) {
1136 1137 1138
		struct page *indirect_page, *n;

		BUG_ON(info->feature_persistent);
1139
		list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) {
1140 1141 1142 1143 1144
			list_del(&indirect_page->lru);
			__free_page(indirect_page);
		}
	}

B
Bob Liu 已提交
1145
	for (i = 0; i < BLK_RING_SIZE(info); i++) {
1146 1147 1148 1149
		/*
		 * Clear persistent grants present in requests already
		 * on the shared ring
		 */
1150
		if (!rinfo->shadow[i].request)
1151 1152
			goto free_shadow;

1153 1154 1155
		segs = rinfo->shadow[i].req.operation == BLKIF_OP_INDIRECT ?
		       rinfo->shadow[i].req.u.indirect.nr_segments :
		       rinfo->shadow[i].req.u.rw.nr_segments;
1156
		for (j = 0; j < segs; j++) {
1157
			persistent_gnt = rinfo->shadow[i].grants_used[j];
1158
			gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
1159
			if (info->feature_persistent)
1160
				__free_page(persistent_gnt->page);
1161 1162 1163
			kfree(persistent_gnt);
		}

1164
		if (rinfo->shadow[i].req.operation != BLKIF_OP_INDIRECT)
1165 1166 1167 1168 1169 1170 1171
			/*
			 * If this is not an indirect operation don't try to
			 * free indirect segments
			 */
			goto free_shadow;

		for (j = 0; j < INDIRECT_GREFS(segs); j++) {
1172
			persistent_gnt = rinfo->shadow[i].indirect_grants[j];
1173
			gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
1174
			__free_page(persistent_gnt->page);
1175 1176 1177 1178
			kfree(persistent_gnt);
		}

free_shadow:
1179 1180 1181 1182 1183 1184
		kfree(rinfo->shadow[i].grants_used);
		rinfo->shadow[i].grants_used = NULL;
		kfree(rinfo->shadow[i].indirect_grants);
		rinfo->shadow[i].indirect_grants = NULL;
		kfree(rinfo->shadow[i].sg);
		rinfo->shadow[i].sg = NULL;
1185 1186
	}

1187
	/* No more gnttab callback work. */
1188
	gnttab_cancel_free_callback(&rinfo->callback);
1189
	spin_unlock_irq(&info->io_lock);
1190 1191

	/* Flush gnttab callback work. Must be done with no locks held. */
1192
	flush_work(&rinfo->work);
1193 1194

	/* Free resources associated with old device channel. */
B
Bob Liu 已提交
1195
	for (i = 0; i < info->nr_ring_pages; i++) {
1196 1197 1198
		if (rinfo->ring_ref[i] != GRANT_INVALID_REF) {
			gnttab_end_foreign_access(rinfo->ring_ref[i], 0, 0);
			rinfo->ring_ref[i] = GRANT_INVALID_REF;
B
Bob Liu 已提交
1199
		}
1200
	}
1201 1202
	free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * PAGE_SIZE));
	rinfo->ring.sring = NULL;
B
Bob Liu 已提交
1203

1204 1205 1206
	if (rinfo->irq)
		unbind_from_irqhandler(rinfo->irq, rinfo);
	rinfo->evtchn = rinfo->irq = 0;
1207 1208 1209

}

1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235
struct copy_from_grant {
	const struct blk_shadow *s;
	unsigned int grant_idx;
	unsigned int bvec_offset;
	char *bvec_data;
};

static void blkif_copy_from_grant(unsigned long gfn, unsigned int offset,
				  unsigned int len, void *data)
{
	struct copy_from_grant *info = data;
	char *shared_data;
	/* Convenient aliases */
	const struct blk_shadow *s = info->s;

	shared_data = kmap_atomic(s->grants_used[info->grant_idx]->page);

	memcpy(info->bvec_data + info->bvec_offset,
	       shared_data + offset, len);

	info->bvec_offset += len;
	info->grant_idx++;

	kunmap_atomic(shared_data);
}

1236
static void blkif_completion(struct blk_shadow *s, struct blkfront_ring_info *rinfo,
1237
			     struct blkif_response *bret)
1238
{
1239
	int i = 0;
1240
	struct scatterlist *sg;
1241
	int num_sg, num_grant;
1242
	struct blkfront_info *info = rinfo->dev_info;
1243 1244 1245 1246
	struct copy_from_grant data = {
		.s = s,
		.grant_idx = 0,
	};
1247

1248
	num_grant = s->req.operation == BLKIF_OP_INDIRECT ?
1249
		s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments;
1250
	num_sg = s->num_sg;
1251

1252
	if (bret->operation == BLKIF_OP_READ && info->feature_persistent) {
1253
		for_each_sg(s->sg, sg, num_sg, i) {
1254
			BUG_ON(sg->offset + sg->length > PAGE_SIZE);
1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265

			data.bvec_offset = sg->offset;
			data.bvec_data = kmap_atomic(sg_page(sg));

			gnttab_foreach_grant_in_range(sg_page(sg),
						      sg->offset,
						      sg->length,
						      blkif_copy_from_grant,
						      &data);

			kunmap_atomic(data.bvec_data);
1266 1267 1268
		}
	}
	/* Add the persistent grant into the list of free grants */
1269
	for (i = 0; i < num_grant; i++) {
1270 1271 1272 1273 1274 1275 1276
		if (gnttab_query_foreign_access(s->grants_used[i]->gref)) {
			/*
			 * If the grant is still mapped by the backend (the
			 * backend has chosen to make this grant persistent)
			 * we add it at the head of the list, so it will be
			 * reused first.
			 */
1277 1278 1279 1280
			if (!info->feature_persistent)
				pr_alert_ratelimited("backed has not unmapped grant: %u\n",
						     s->grants_used[i]->gref);
			list_add(&s->grants_used[i]->node, &info->grants);
1281 1282 1283 1284 1285 1286 1287 1288 1289 1290
			info->persistent_gnts_c++;
		} else {
			/*
			 * If the grant is not mapped by the backend we end the
			 * foreign access and add it to the tail of the list,
			 * so it will not be picked again unless we run out of
			 * persistent grants.
			 */
			gnttab_end_foreign_access(s->grants_used[i]->gref, 0, 0UL);
			s->grants_used[i]->gref = GRANT_INVALID_REF;
1291
			list_add_tail(&s->grants_used[i]->node, &info->grants);
1292
		}
1293
	}
1294
	if (s->req.operation == BLKIF_OP_INDIRECT) {
1295
		for (i = 0; i < INDIRECT_GREFS(num_grant); i++) {
1296
			if (gnttab_query_foreign_access(s->indirect_grants[i]->gref)) {
1297 1298 1299 1300
				if (!info->feature_persistent)
					pr_alert_ratelimited("backed has not unmapped grant: %u\n",
							     s->indirect_grants[i]->gref);
				list_add(&s->indirect_grants[i]->node, &info->grants);
1301 1302
				info->persistent_gnts_c++;
			} else {
1303 1304
				struct page *indirect_page;

1305
				gnttab_end_foreign_access(s->indirect_grants[i]->gref, 0, 0UL);
1306 1307 1308 1309
				/*
				 * Add the used indirect page back to the list of
				 * available pages for indirect grefs.
				 */
1310
				if (!info->feature_persistent) {
1311
					indirect_page = s->indirect_grants[i]->page;
1312
					list_add(&indirect_page->lru, &rinfo->indirect_pages);
1313
				}
1314
				s->indirect_grants[i]->gref = GRANT_INVALID_REF;
1315
				list_add_tail(&s->indirect_grants[i]->node, &info->grants);
1316
			}
1317 1318
		}
	}
1319 1320 1321 1322 1323 1324 1325 1326
}

static irqreturn_t blkif_interrupt(int irq, void *dev_id)
{
	struct request *req;
	struct blkif_response *bret;
	RING_IDX i, rp;
	unsigned long flags;
1327 1328
	struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id;
	struct blkfront_info *info = rinfo->dev_info;
1329
	int error;
1330

1331
	spin_lock_irqsave(&info->io_lock, flags);
1332 1333

	if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
1334
		spin_unlock_irqrestore(&info->io_lock, flags);
1335 1336 1337 1338
		return IRQ_HANDLED;
	}

 again:
1339
	rp = rinfo->ring.sring->rsp_prod;
1340 1341
	rmb(); /* Ensure we see queued responses up to 'rp'. */

1342
	for (i = rinfo->ring.rsp_cons; i != rp; i++) {
1343 1344
		unsigned long id;

1345
		bret = RING_GET_RESPONSE(&rinfo->ring, i);
1346
		id   = bret->id;
1347 1348 1349 1350 1351
		/*
		 * The backend has messed up and given us an id that we would
		 * never have given to it (we stamp it up to BLK_RING_SIZE -
		 * look in get_id_from_freelist.
		 */
B
Bob Liu 已提交
1352
		if (id >= BLK_RING_SIZE(info)) {
1353 1354 1355 1356 1357 1358
			WARN(1, "%s: response to %s has incorrect id (%ld)\n",
			     info->gd->disk_name, op_name(bret->operation), id);
			/* We can't safely get the 'struct request' as
			 * the id is busted. */
			continue;
		}
1359
		req  = rinfo->shadow[id].request;
1360

1361
		if (bret->operation != BLKIF_OP_DISCARD)
1362
			blkif_completion(&rinfo->shadow[id], rinfo, bret);
1363

1364
		if (add_id_to_freelist(rinfo, id)) {
1365 1366 1367 1368
			WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
			     info->gd->disk_name, op_name(bret->operation), id);
			continue;
		}
1369

1370
		error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO;
1371
		switch (bret->operation) {
1372 1373 1374
		case BLKIF_OP_DISCARD:
			if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
				struct request_queue *rq = info->rq;
1375 1376
				printk(KERN_WARNING "blkfront: %s: %s op failed\n",
					   info->gd->disk_name, op_name(bret->operation));
1377
				error = -EOPNOTSUPP;
1378
				info->feature_discard = 0;
1379
				info->feature_secdiscard = 0;
1380
				queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
1381
				queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq);
1382
			}
1383
			blk_mq_complete_request(req, error);
1384
			break;
1385
		case BLKIF_OP_FLUSH_DISKCACHE:
1386 1387
		case BLKIF_OP_WRITE_BARRIER:
			if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
1388 1389
				printk(KERN_WARNING "blkfront: %s: %s op failed\n",
				       info->gd->disk_name, op_name(bret->operation));
1390
				error = -EOPNOTSUPP;
1391 1392
			}
			if (unlikely(bret->status == BLKIF_RSP_ERROR &&
1393
				     rinfo->shadow[id].req.u.rw.nr_segments == 0)) {
1394 1395
				printk(KERN_WARNING "blkfront: %s: empty %s op failed\n",
				       info->gd->disk_name, op_name(bret->operation));
1396
				error = -EOPNOTSUPP;
1397
			}
1398 1399 1400
			if (unlikely(error)) {
				if (error == -EOPNOTSUPP)
					error = 0;
1401 1402
				info->feature_flush = 0;
				xlvbd_flush(info);
1403 1404 1405 1406 1407 1408 1409 1410
			}
			/* fall through */
		case BLKIF_OP_READ:
		case BLKIF_OP_WRITE:
			if (unlikely(bret->status != BLKIF_RSP_OKAY))
				dev_dbg(&info->xbdev->dev, "Bad return from blkdev data "
					"request: %x\n", bret->status);

1411
			blk_mq_complete_request(req, error);
1412 1413 1414 1415 1416 1417
			break;
		default:
			BUG();
		}
	}

1418
	rinfo->ring.rsp_cons = i;
1419

1420
	if (i != rinfo->ring.req_prod_pvt) {
1421
		int more_to_do;
1422
		RING_FINAL_CHECK_FOR_RESPONSES(&rinfo->ring, more_to_do);
1423 1424 1425
		if (more_to_do)
			goto again;
	} else
1426
		rinfo->ring.sring->rsp_event = i + 1;
1427

1428
	kick_pending_request_queues(rinfo);
1429

1430
	spin_unlock_irqrestore(&info->io_lock, flags);
1431 1432 1433 1434 1435 1436

	return IRQ_HANDLED;
}


static int setup_blkring(struct xenbus_device *dev,
1437
			 struct blkfront_ring_info *rinfo)
1438 1439
{
	struct blkif_sring *sring;
B
Bob Liu 已提交
1440
	int err, i;
1441
	struct blkfront_info *info = rinfo->dev_info;
1442
	unsigned long ring_size = info->nr_ring_pages * XEN_PAGE_SIZE;
1443
	grant_ref_t gref[XENBUS_MAX_RING_GRANTS];
1444

B
Bob Liu 已提交
1445
	for (i = 0; i < info->nr_ring_pages; i++)
1446
		rinfo->ring_ref[i] = GRANT_INVALID_REF;
1447

B
Bob Liu 已提交
1448 1449
	sring = (struct blkif_sring *)__get_free_pages(GFP_NOIO | __GFP_HIGH,
						       get_order(ring_size));
1450 1451 1452 1453 1454
	if (!sring) {
		xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
		return -ENOMEM;
	}
	SHARED_RING_INIT(sring);
1455
	FRONT_RING_INIT(&rinfo->ring, sring, ring_size);
1456

1457
	err = xenbus_grant_ring(dev, rinfo->ring.sring, info->nr_ring_pages, gref);
1458
	if (err < 0) {
B
Bob Liu 已提交
1459
		free_pages((unsigned long)sring, get_order(ring_size));
1460
		rinfo->ring.sring = NULL;
1461 1462
		goto fail;
	}
B
Bob Liu 已提交
1463
	for (i = 0; i < info->nr_ring_pages; i++)
1464
		rinfo->ring_ref[i] = gref[i];
1465

1466
	err = xenbus_alloc_evtchn(dev, &rinfo->evtchn);
1467 1468 1469
	if (err)
		goto fail;

1470 1471
	err = bind_evtchn_to_irqhandler(rinfo->evtchn, blkif_interrupt, 0,
					"blkif", rinfo);
1472 1473 1474 1475 1476
	if (err <= 0) {
		xenbus_dev_fatal(dev, err,
				 "bind_evtchn_to_irqhandler failed");
		goto fail;
	}
1477
	rinfo->irq = err;
1478 1479 1480 1481 1482 1483 1484 1485 1486

	return 0;
fail:
	blkif_free(info, 0);
	return err;
}


/* Common code used when first setting up, and when resuming. */
1487
static int talk_to_blkback(struct xenbus_device *dev,
1488 1489 1490 1491
			   struct blkfront_info *info)
{
	const char *message = NULL;
	struct xenbus_transaction xbt;
B
Bob Liu 已提交
1492 1493 1494
	int err, i;
	unsigned int max_page_order = 0;
	unsigned int ring_page_order = 0;
1495
	struct blkfront_ring_info *rinfo = &info->rinfo;
B
Bob Liu 已提交
1496 1497 1498 1499 1500 1501 1502 1503 1504

	err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
			   "max-ring-page-order", "%u", &max_page_order);
	if (err != 1)
		info->nr_ring_pages = 1;
	else {
		ring_page_order = min(xen_blkif_max_ring_order, max_page_order);
		info->nr_ring_pages = 1 << ring_page_order;
	}
1505 1506

	/* Create shared ring, alloc event channel. */
1507
	err = setup_blkring(dev, rinfo);
1508 1509 1510 1511 1512 1513 1514 1515 1516 1517
	if (err)
		goto out;

again:
	err = xenbus_transaction_start(&xbt);
	if (err) {
		xenbus_dev_fatal(dev, err, "starting transaction");
		goto destroy_blkring;
	}

B
Bob Liu 已提交
1518 1519
	if (info->nr_ring_pages == 1) {
		err = xenbus_printf(xbt, dev->nodename,
1520
				    "ring-ref", "%u", rinfo->ring_ref[0]);
B
Bob Liu 已提交
1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537
		if (err) {
			message = "writing ring-ref";
			goto abort_transaction;
		}
	} else {
		err = xenbus_printf(xbt, dev->nodename,
				    "ring-page-order", "%u", ring_page_order);
		if (err) {
			message = "writing ring-page-order";
			goto abort_transaction;
		}

		for (i = 0; i < info->nr_ring_pages; i++) {
			char ring_ref_name[RINGREF_NAME_LEN];

			snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
			err = xenbus_printf(xbt, dev->nodename, ring_ref_name,
1538
					    "%u", rinfo->ring_ref[i]);
B
Bob Liu 已提交
1539 1540 1541 1542 1543
			if (err) {
				message = "writing ring-ref";
				goto abort_transaction;
			}
		}
1544 1545
	}
	err = xenbus_printf(xbt, dev->nodename,
1546
			    "event-channel", "%u", rinfo->evtchn);
1547 1548 1549 1550
	if (err) {
		message = "writing event-channel";
		goto abort_transaction;
	}
1551 1552 1553 1554 1555 1556
	err = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
			    XEN_IO_PROTO_ABI_NATIVE);
	if (err) {
		message = "writing protocol";
		goto abort_transaction;
	}
1557
	err = xenbus_printf(xbt, dev->nodename,
1558
			    "feature-persistent", "%u", 1);
1559 1560 1561
	if (err)
		dev_warn(&dev->dev,
			 "writing persistent grants feature to xenbus");
1562 1563 1564 1565 1566 1567 1568 1569 1570

	err = xenbus_transaction_end(xbt, 0);
	if (err) {
		if (err == -EAGAIN)
			goto again;
		xenbus_dev_fatal(dev, err, "completing transaction");
		goto destroy_blkring;
	}

B
Bob Liu 已提交
1571
	for (i = 0; i < BLK_RING_SIZE(info); i++)
1572 1573
		rinfo->shadow[i].req.u.rw.id = i+1;
	rinfo->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596
	xenbus_switch_state(dev, XenbusStateInitialised);

	return 0;

 abort_transaction:
	xenbus_transaction_end(xbt, 1);
	if (message)
		xenbus_dev_fatal(dev, err, "%s", message);
 destroy_blkring:
	blkif_free(info, 0);
 out:
	return err;
}

/**
 * Entry point to this code when a new device is created.  Allocate the basic
 * structures and the ring buffer for communication with the backend, and
 * inform the backend of the appropriate details for those.  Switch to
 * Initialised state.
 */
static int blkfront_probe(struct xenbus_device *dev,
			  const struct xenbus_device_id *id)
{
B
Bob Liu 已提交
1597
	int err, vdevice;
1598
	struct blkfront_info *info;
1599
	struct blkfront_ring_info *rinfo;
1600 1601 1602 1603 1604

	/* FIXME: Use dynamic device id if this is not set. */
	err = xenbus_scanf(XBT_NIL, dev->nodename,
			   "virtual-device", "%i", &vdevice);
	if (err != 1) {
1605 1606 1607 1608 1609 1610 1611
		/* go looking in the extended area instead */
		err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext",
				   "%i", &vdevice);
		if (err != 1) {
			xenbus_dev_fatal(dev, err, "reading virtual-device");
			return err;
		}
1612 1613
	}

1614 1615 1616 1617
	if (xen_hvm_domain()) {
		char *type;
		int len;
		/* no unplug has been done: do not hook devices != xen vbds */
1618
		if (xen_has_pv_and_legacy_disk_devices()) {
1619 1620 1621 1622 1623 1624 1625 1626 1627 1628
			int major;

			if (!VDEV_IS_EXTENDED(vdevice))
				major = BLKIF_MAJOR(vdevice);
			else
				major = XENVBD_MAJOR;

			if (major != XENVBD_MAJOR) {
				printk(KERN_INFO
						"%s: HVM does not support vbd %d as xen block device\n",
1629
						__func__, vdevice);
1630 1631 1632 1633 1634 1635 1636 1637 1638
				return -ENODEV;
			}
		}
		/* do not create a PV cdrom device if we are an HVM guest */
		type = xenbus_read(XBT_NIL, dev->nodename, "device-type", &len);
		if (IS_ERR(type))
			return -ENODEV;
		if (strncmp(type, "cdrom", 5) == 0) {
			kfree(type);
1639 1640
			return -ENODEV;
		}
1641
		kfree(type);
1642
	}
1643 1644 1645 1646 1647 1648
	info = kzalloc(sizeof(*info), GFP_KERNEL);
	if (!info) {
		xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
		return -ENOMEM;
	}

1649 1650 1651 1652 1653
	rinfo = &info->rinfo;
	INIT_LIST_HEAD(&rinfo->indirect_pages);
	rinfo->dev_info = info;
	INIT_WORK(&rinfo->work, blkif_restart_queue);

1654
	mutex_init(&info->mutex);
1655
	spin_lock_init(&info->io_lock);
1656 1657
	info->xbdev = dev;
	info->vdevice = vdevice;
1658
	INIT_LIST_HEAD(&info->grants);
1659
	info->persistent_gnts_c = 0;
1660 1661 1662 1663
	info->connected = BLKIF_STATE_DISCONNECTED;

	/* Front end dir is a number, which is used as the id. */
	info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
1664
	dev_set_drvdata(&dev->dev, info);
1665 1666 1667 1668

	return 0;
}

1669
static void split_bio_end(struct bio *bio)
1670 1671 1672 1673 1674
{
	struct split_bio *split_bio = bio->bi_private;

	if (atomic_dec_and_test(&split_bio->pending)) {
		split_bio->bio->bi_phys_segments = 0;
1675 1676
		split_bio->bio->bi_error = bio->bi_error;
		bio_endio(split_bio->bio);
1677 1678 1679 1680
		kfree(split_bio);
	}
	bio_put(bio);
}
1681 1682 1683 1684

static int blkif_recover(struct blkfront_info *info)
{
	int i;
1685
	struct request *req, *n;
1686
	struct blk_shadow *copy;
1687 1688 1689 1690 1691 1692 1693
	int rc;
	struct bio *bio, *cloned_bio;
	struct bio_list bio_list, merge_bio;
	unsigned int segs, offset;
	int pending, size;
	struct split_bio *split_bio;
	struct list_head requests;
1694
	struct blkfront_ring_info *rinfo = &info->rinfo;
1695 1696

	/* Stage 1: Make a safe copy of the shadow state. */
1697
	copy = kmemdup(rinfo->shadow, sizeof(rinfo->shadow),
1698
		       GFP_NOIO | __GFP_REPEAT | __GFP_HIGH);
1699 1700 1701 1702
	if (!copy)
		return -ENOMEM;

	/* Stage 2: Set up free list. */
1703
	memset(&rinfo->shadow, 0, sizeof(rinfo->shadow));
B
Bob Liu 已提交
1704
	for (i = 0; i < BLK_RING_SIZE(info); i++)
1705 1706 1707
		rinfo->shadow[i].req.u.rw.id = i+1;
	rinfo->shadow_free = rinfo->ring.req_prod_pvt;
	rinfo->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
1708

1709
	rc = blkfront_gather_backend_features(info);
1710 1711 1712 1713 1714 1715 1716 1717 1718
	if (rc) {
		kfree(copy);
		return rc;
	}

	segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
	blk_queue_max_segments(info->rq, segs);
	bio_list_init(&bio_list);
	INIT_LIST_HEAD(&requests);
B
Bob Liu 已提交
1719
	for (i = 0; i < BLK_RING_SIZE(info); i++) {
1720
		/* Not in use? */
1721
		if (!copy[i].request)
1722 1723
			continue;

1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734
		/*
		 * Get the bios in the request so we can re-queue them.
		 */
		if (copy[i].request->cmd_flags &
		    (REQ_FLUSH | REQ_FUA | REQ_DISCARD | REQ_SECURE)) {
			/*
			 * Flush operations don't contain bios, so
			 * we need to requeue the whole request
			 */
			list_add(&copy[i].request->queuelist, &requests);
			continue;
1735
		}
1736 1737 1738 1739
		merge_bio.head = copy[i].request->bio;
		merge_bio.tail = copy[i].request->biotail;
		bio_list_merge(&bio_list, &merge_bio);
		copy[i].request->bio = NULL;
1740
		blk_end_request_all(copy[i].request, 0);
1741 1742 1743 1744 1745 1746
	}

	kfree(copy);

	xenbus_switch_state(info->xbdev, XenbusStateConnected);

1747
	spin_lock_irq(&info->io_lock);
1748 1749 1750 1751 1752

	/* Now safe for us to use the shared ring */
	info->connected = BLKIF_STATE_CONNECTED;

	/* Kick any other new requests queued since we resumed */
1753
	kick_pending_request_queues(rinfo);
1754

1755 1756 1757 1758
	list_for_each_entry_safe(req, n, &requests, queuelist) {
		/* Requeue pending requests (flush or discard) */
		list_del_init(&req->queuelist);
		BUG_ON(req->nr_phys_segments > segs);
B
Bob Liu 已提交
1759
		blk_mq_requeue_request(req);
1760
	}
1761
	spin_unlock_irq(&info->io_lock);
B
Bob Liu 已提交
1762
	blk_mq_kick_requeue_list(info->rq);
1763

1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776
	while ((bio = bio_list_pop(&bio_list)) != NULL) {
		/* Traverse the list of pending bios and re-queue them */
		if (bio_segments(bio) > segs) {
			/*
			 * This bio has more segments than what we can
			 * handle, we have to split it.
			 */
			pending = (bio_segments(bio) + segs - 1) / segs;
			split_bio = kzalloc(sizeof(*split_bio), GFP_NOIO);
			BUG_ON(split_bio == NULL);
			atomic_set(&split_bio->pending, pending);
			split_bio->bio = bio;
			for (i = 0; i < pending; i++) {
1777 1778
				offset = (i * segs * XEN_PAGE_SIZE) >> 9;
				size = min((unsigned int)(segs * XEN_PAGE_SIZE) >> 9,
1779
					   (unsigned int)bio_sectors(bio) - offset);
1780 1781
				cloned_bio = bio_clone(bio, GFP_NOIO);
				BUG_ON(cloned_bio == NULL);
1782
				bio_trim(cloned_bio, offset, size);
1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796
				cloned_bio->bi_private = split_bio;
				cloned_bio->bi_end_io = split_bio_end;
				submit_bio(cloned_bio->bi_rw, cloned_bio);
			}
			/*
			 * Now we have to wait for all those smaller bios to
			 * end, so we can also end the "parent" bio.
			 */
			continue;
		}
		/* We don't need to split this bio */
		submit_bio(bio->bi_rw, bio);
	}

1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807
	return 0;
}

/**
 * We are reconnecting to the backend, due to a suspend/resume, or a backend
 * driver restart.  We tear down our blkif structure and recreate it, but
 * leave the device-layer structures intact so that this is transparent to the
 * rest of the kernel.
 */
static int blkfront_resume(struct xenbus_device *dev)
{
1808
	struct blkfront_info *info = dev_get_drvdata(&dev->dev);
1809 1810 1811 1812 1813 1814
	int err;

	dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);

	blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);

1815
	err = talk_to_blkback(dev, info);
1816 1817 1818 1819 1820 1821

	/*
	 * We have to wait for the backend to switch to
	 * connected state, since we want to read which
	 * features it supports.
	 */
1822 1823 1824 1825

	return err;
}

1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850
static void
blkfront_closing(struct blkfront_info *info)
{
	struct xenbus_device *xbdev = info->xbdev;
	struct block_device *bdev = NULL;

	mutex_lock(&info->mutex);

	if (xbdev->state == XenbusStateClosing) {
		mutex_unlock(&info->mutex);
		return;
	}

	if (info->gd)
		bdev = bdget_disk(info->gd, 0);

	mutex_unlock(&info->mutex);

	if (!bdev) {
		xenbus_frontend_closed(xbdev);
		return;
	}

	mutex_lock(&bdev->bd_mutex);

1851
	if (bdev->bd_openers) {
1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862
		xenbus_dev_error(xbdev, -EBUSY,
				 "Device in use; refusing to close");
		xenbus_switch_state(xbdev, XenbusStateClosing);
	} else {
		xlvbd_release_gendisk(info);
		xenbus_frontend_closed(xbdev);
	}

	mutex_unlock(&bdev->bd_mutex);
	bdput(bdev);
}
1863

1864 1865 1866 1867 1868
static void blkfront_setup_discard(struct blkfront_info *info)
{
	int err;
	unsigned int discard_granularity;
	unsigned int discard_alignment;
1869
	unsigned int discard_secure;
1870

1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884
	info->feature_discard = 1;
	err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
		"discard-granularity", "%u", &discard_granularity,
		"discard-alignment", "%u", &discard_alignment,
		NULL);
	if (!err) {
		info->discard_granularity = discard_granularity;
		info->discard_alignment = discard_alignment;
	}
	err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
		    "discard-secure", "%d", &discard_secure,
		    NULL);
	if (!err)
		info->feature_secdiscard = !!discard_secure;
1885 1886
}

1887
static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
1888
{
1889
	unsigned int psegs, grants;
1890
	int err, i;
1891
	struct blkfront_info *info = rinfo->dev_info;
1892

1893
	if (info->max_indirect_segments == 0)
1894
		grants = BLKIF_MAX_SEGMENTS_PER_REQUEST;
1895
	else
1896 1897
		grants = info->max_indirect_segments;
	psegs = grants / GRANTS_PER_PSEG;
1898

1899
	err = fill_grant_buffer(rinfo,
1900
				(grants + INDIRECT_GREFS(grants)) * BLK_RING_SIZE(info));
1901 1902 1903
	if (err)
		goto out_of_memory;

1904 1905 1906 1907 1908 1909
	if (!info->feature_persistent && info->max_indirect_segments) {
		/*
		 * We are using indirect descriptors but not persistent
		 * grants, we need to allocate a set of pages that can be
		 * used for mapping indirect grefs
		 */
1910
		int num = INDIRECT_GREFS(grants) * BLK_RING_SIZE(info);
1911

1912
		BUG_ON(!list_empty(&rinfo->indirect_pages));
1913 1914 1915 1916
		for (i = 0; i < num; i++) {
			struct page *indirect_page = alloc_page(GFP_NOIO);
			if (!indirect_page)
				goto out_of_memory;
1917
			list_add(&indirect_page->lru, &rinfo->indirect_pages);
1918 1919 1920
		}
	}

B
Bob Liu 已提交
1921
	for (i = 0; i < BLK_RING_SIZE(info); i++) {
1922 1923
		rinfo->shadow[i].grants_used = kzalloc(
			sizeof(rinfo->shadow[i].grants_used[0]) * grants,
1924
			GFP_NOIO);
1925
		rinfo->shadow[i].sg = kzalloc(sizeof(rinfo->shadow[i].sg[0]) * psegs, GFP_NOIO);
1926
		if (info->max_indirect_segments)
1927 1928
			rinfo->shadow[i].indirect_grants = kzalloc(
				sizeof(rinfo->shadow[i].indirect_grants[0]) *
1929
				INDIRECT_GREFS(grants),
1930
				GFP_NOIO);
1931 1932
		if ((rinfo->shadow[i].grants_used == NULL) ||
			(rinfo->shadow[i].sg == NULL) ||
1933
		     (info->max_indirect_segments &&
1934
		     (rinfo->shadow[i].indirect_grants == NULL)))
1935
			goto out_of_memory;
1936
		sg_init_table(rinfo->shadow[i].sg, psegs);
1937 1938 1939 1940 1941 1942
	}


	return 0;

out_of_memory:
B
Bob Liu 已提交
1943
	for (i = 0; i < BLK_RING_SIZE(info); i++) {
1944 1945 1946 1947 1948 1949
		kfree(rinfo->shadow[i].grants_used);
		rinfo->shadow[i].grants_used = NULL;
		kfree(rinfo->shadow[i].sg);
		rinfo->shadow[i].sg = NULL;
		kfree(rinfo->shadow[i].indirect_grants);
		rinfo->shadow[i].indirect_grants = NULL;
1950
	}
1951
	if (!list_empty(&rinfo->indirect_pages)) {
1952
		struct page *indirect_page, *n;
1953
		list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) {
1954 1955 1956 1957
			list_del(&indirect_page->lru);
			__free_page(indirect_page);
		}
	}
1958 1959 1960
	return -ENOMEM;
}

1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019
/*
 * Gather all backend feature-*
 */
static int blkfront_gather_backend_features(struct blkfront_info *info)
{
	int err;
	int barrier, flush, discard, persistent;
	unsigned int indirect_segments;

	info->feature_flush = 0;

	err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
			"feature-barrier", "%d", &barrier,
			NULL);

	/*
	 * If there's no "feature-barrier" defined, then it means
	 * we're dealing with a very old backend which writes
	 * synchronously; nothing to do.
	 *
	 * If there are barriers, then we use flush.
	 */
	if (!err && barrier)
		info->feature_flush = REQ_FLUSH | REQ_FUA;
	/*
	 * And if there is "feature-flush-cache" use that above
	 * barriers.
	 */
	err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
			"feature-flush-cache", "%d", &flush,
			NULL);

	if (!err && flush)
		info->feature_flush = REQ_FLUSH;

	err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
			"feature-discard", "%d", &discard,
			NULL);

	if (!err && discard)
		blkfront_setup_discard(info);

	err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
			"feature-persistent", "%u", &persistent,
			NULL);
	if (err)
		info->feature_persistent = 0;
	else
		info->feature_persistent = persistent;

	err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
			    "feature-max-indirect-segments", "%u", &indirect_segments,
			    NULL);
	if (err)
		info->max_indirect_segments = 0;
	else
		info->max_indirect_segments = min(indirect_segments,
						  xen_blkif_max_segments);

2020
	return blkfront_setup_indirect(&info->rinfo);
2021 2022
}

2023 2024 2025 2026 2027 2028 2029 2030
/*
 * Invoked when the backend is finally 'ready' (and has told produced
 * the details about the physical device - #sectors, size, etc).
 */
static void blkfront_connect(struct blkfront_info *info)
{
	unsigned long long sectors;
	unsigned long sector_size;
2031
	unsigned int physical_sector_size;
2032 2033
	unsigned int binfo;
	int err;
2034
	struct blkfront_ring_info *rinfo = &info->rinfo;
2035

2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048
	switch (info->connected) {
	case BLKIF_STATE_CONNECTED:
		/*
		 * Potentially, the back-end may be signalling
		 * a capacity change; update the capacity.
		 */
		err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
				   "sectors", "%Lu", &sectors);
		if (XENBUS_EXIST_ERR(err))
			return;
		printk(KERN_INFO "Setting capacity to %Lu\n",
		       sectors);
		set_capacity(info->gd, sectors);
2049
		revalidate_disk(info->gd);
2050

2051
		return;
2052
	case BLKIF_STATE_SUSPENDED:
2053 2054 2055 2056 2057 2058 2059
		/*
		 * If we are recovering from suspension, we need to wait
		 * for the backend to announce it's features before
		 * reconnecting, at least we need to know if the backend
		 * supports indirect descriptors, and how many.
		 */
		blkif_recover(info);
2060 2061
		return;

2062 2063
	default:
		break;
2064
	}
2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080

	dev_dbg(&info->xbdev->dev, "%s:%s.\n",
		__func__, info->xbdev->otherend);

	err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
			    "sectors", "%llu", &sectors,
			    "info", "%u", &binfo,
			    "sector-size", "%lu", &sector_size,
			    NULL);
	if (err) {
		xenbus_dev_fatal(info->xbdev, err,
				 "reading backend fields at %s",
				 info->xbdev->otherend);
		return;
	}

2081 2082 2083 2084 2085 2086 2087 2088 2089 2090
	/*
	 * physcial-sector-size is a newer field, so old backends may not
	 * provide this. Assume physical sector size to be the same as
	 * sector_size in that case.
	 */
	err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
			   "physical-sector-size", "%u", &physical_sector_size);
	if (err != 1)
		physical_sector_size = sector_size;

2091
	err = blkfront_gather_backend_features(info);
2092 2093 2094 2095 2096 2097
	if (err) {
		xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s",
				 info->xbdev->otherend);
		return;
	}

2098 2099
	err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size,
				  physical_sector_size);
2100 2101 2102 2103 2104 2105 2106 2107 2108
	if (err) {
		xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
				 info->xbdev->otherend);
		return;
	}

	xenbus_switch_state(info->xbdev, XenbusStateConnected);

	/* Kick pending requests. */
2109
	spin_lock_irq(&info->io_lock);
2110
	info->connected = BLKIF_STATE_CONNECTED;
2111
	kick_pending_request_queues(rinfo);
2112
	spin_unlock_irq(&info->io_lock);
2113 2114

	add_disk(info->gd);
2115 2116

	info->is_ready = 1;
2117 2118 2119 2120 2121
}

/**
 * Callback received when the backend's state changes.
 */
2122
static void blkback_changed(struct xenbus_device *dev,
2123 2124
			    enum xenbus_state backend_state)
{
2125
	struct blkfront_info *info = dev_get_drvdata(&dev->dev);
2126

2127
	dev_dbg(&dev->dev, "blkfront:blkback_changed to state %d.\n", backend_state);
2128 2129 2130

	switch (backend_state) {
	case XenbusStateInitWait:
2131 2132
		if (dev->state != XenbusStateInitialising)
			break;
2133 2134 2135 2136 2137 2138
		if (talk_to_blkback(dev, info)) {
			kfree(info);
			dev_set_drvdata(&dev->dev, NULL);
			break;
		}
	case XenbusStateInitialising:
2139
	case XenbusStateInitialised:
2140 2141
	case XenbusStateReconfiguring:
	case XenbusStateReconfigured:
2142 2143 2144 2145 2146 2147 2148
	case XenbusStateUnknown:
		break;

	case XenbusStateConnected:
		blkfront_connect(info);
		break;

2149 2150 2151 2152
	case XenbusStateClosed:
		if (dev->state == XenbusStateClosed)
			break;
		/* Missed the backend's Closing state -- fallthrough */
2153
	case XenbusStateClosing:
2154 2155
		if (info)
			blkfront_closing(info);
2156 2157 2158 2159
		break;
	}
}

2160
static int blkfront_remove(struct xenbus_device *xbdev)
2161
{
2162 2163 2164
	struct blkfront_info *info = dev_get_drvdata(&xbdev->dev);
	struct block_device *bdev = NULL;
	struct gendisk *disk;
2165

2166
	dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename);
2167 2168 2169

	blkif_free(info, 0);

2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192
	mutex_lock(&info->mutex);

	disk = info->gd;
	if (disk)
		bdev = bdget_disk(disk, 0);

	info->xbdev = NULL;
	mutex_unlock(&info->mutex);

	if (!bdev) {
		kfree(info);
		return 0;
	}

	/*
	 * The xbdev was removed before we reached the Closed
	 * state. See if it's safe to remove the disk. If the bdev
	 * isn't closed yet, we let release take care of it.
	 */

	mutex_lock(&bdev->bd_mutex);
	info = disk->private_data;

2193 2194 2195 2196
	dev_warn(disk_to_dev(disk),
		 "%s was hot-unplugged, %d stale handles\n",
		 xbdev->nodename, bdev->bd_openers);

2197
	if (info && !bdev->bd_openers) {
2198 2199
		xlvbd_release_gendisk(info);
		disk->private_data = NULL;
2200
		kfree(info);
2201 2202 2203 2204
	}

	mutex_unlock(&bdev->bd_mutex);
	bdput(bdev);
2205 2206 2207 2208

	return 0;
}

2209 2210
static int blkfront_is_ready(struct xenbus_device *dev)
{
2211
	struct blkfront_info *info = dev_get_drvdata(&dev->dev);
2212

2213
	return info->is_ready && info->xbdev;
2214 2215
}

A
Al Viro 已提交
2216
static int blkif_open(struct block_device *bdev, fmode_t mode)
2217
{
2218 2219 2220
	struct gendisk *disk = bdev->bd_disk;
	struct blkfront_info *info;
	int err = 0;
2221

2222
	mutex_lock(&blkfront_mutex);
2223

2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239
	info = disk->private_data;
	if (!info) {
		/* xbdev gone */
		err = -ERESTARTSYS;
		goto out;
	}

	mutex_lock(&info->mutex);

	if (!info->gd)
		/* xbdev is closed */
		err = -ERESTARTSYS;

	mutex_unlock(&info->mutex);

out:
2240
	mutex_unlock(&blkfront_mutex);
2241
	return err;
2242 2243
}

2244
static void blkif_release(struct gendisk *disk, fmode_t mode)
2245
{
A
Al Viro 已提交
2246
	struct blkfront_info *info = disk->private_data;
2247 2248 2249
	struct block_device *bdev;
	struct xenbus_device *xbdev;

2250
	mutex_lock(&blkfront_mutex);
2251 2252 2253

	bdev = bdget_disk(disk, 0);

2254 2255 2256 2257
	if (!bdev) {
		WARN(1, "Block device %s yanked out from us!\n", disk->disk_name);
		goto out_mutex;
	}
2258 2259 2260
	if (bdev->bd_openers)
		goto out;

2261 2262 2263 2264 2265 2266 2267 2268 2269 2270
	/*
	 * Check if we have been instructed to close. We will have
	 * deferred this request, because the bdev was still open.
	 */

	mutex_lock(&info->mutex);
	xbdev = info->xbdev;

	if (xbdev && xbdev->state == XenbusStateClosing) {
		/* pending switch to state closed */
2271
		dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
2272 2273 2274 2275 2276 2277 2278 2279
		xlvbd_release_gendisk(info);
		xenbus_frontend_closed(info->xbdev);
 	}

	mutex_unlock(&info->mutex);

	if (!xbdev) {
		/* sudden device removal */
2280
		dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
2281 2282 2283
		xlvbd_release_gendisk(info);
		disk->private_data = NULL;
		kfree(info);
2284
	}
2285

J
Jens Axboe 已提交
2286
out:
2287
	bdput(bdev);
2288
out_mutex:
2289
	mutex_unlock(&blkfront_mutex);
2290 2291
}

2292
static const struct block_device_operations xlvbd_block_fops =
2293 2294
{
	.owner = THIS_MODULE,
A
Al Viro 已提交
2295 2296
	.open = blkif_open,
	.release = blkif_release,
2297
	.getgeo = blkif_getgeo,
2298
	.ioctl = blkif_ioctl,
2299 2300 2301
};


2302
static const struct xenbus_device_id blkfront_ids[] = {
2303 2304 2305 2306
	{ "vbd" },
	{ "" }
};

2307 2308
static struct xenbus_driver blkfront_driver = {
	.ids  = blkfront_ids,
2309 2310 2311
	.probe = blkfront_probe,
	.remove = blkfront_remove,
	.resume = blkfront_resume,
2312
	.otherend_changed = blkback_changed,
2313
	.is_ready = blkfront_is_ready,
2314
};
2315 2316 2317

static int __init xlblk_init(void)
{
2318 2319
	int ret;

2320
	if (!xen_domain())
2321 2322
		return -ENODEV;

2323
	if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
B
Bob Liu 已提交
2324
		pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
2325
			xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
B
Bob Liu 已提交
2326 2327 2328
		xen_blkif_max_ring_order = 0;
	}

2329
	if (!xen_has_pv_disk_devices())
2330 2331
		return -ENODEV;

2332 2333 2334 2335 2336 2337
	if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
		printk(KERN_WARNING "xen_blk: can't get major %d with name %s\n",
		       XENVBD_MAJOR, DEV_NAME);
		return -ENODEV;
	}

2338
	ret = xenbus_register_frontend(&blkfront_driver);
2339 2340 2341 2342 2343 2344
	if (ret) {
		unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
		return ret;
	}

	return 0;
2345 2346 2347 2348
}
module_init(xlblk_init);


2349
static void __exit xlblk_exit(void)
2350
{
2351 2352 2353
	xenbus_unregister_driver(&blkfront_driver);
	unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
	kfree(minors);
2354 2355 2356 2357 2358 2359
}
module_exit(xlblk_exit);

MODULE_DESCRIPTION("Xen virtual block device frontend");
MODULE_LICENSE("GPL");
MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR);
2360
MODULE_ALIAS("xen:vbd");
2361
MODULE_ALIAS("xenblk");