blkback.c 24.0 KB
Newer Older
K
Konrad Rzeszutek Wilk 已提交
1 2 3 4 5 6
/******************************************************************************
 *
 * Back-end of the driver for virtual block devices. This portion of the
 * driver exports a 'unified' block-device interface that can be accessed
 * by any operating system that implements a compatible front end. A
 * reference front-end implementation can be found in:
7
 *  drivers/block/xen-blkfront.c
K
Konrad Rzeszutek Wilk 已提交
8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
 *
 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
 * Copyright (c) 2005, Christopher Clark
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version 2
 * as published by the Free Software Foundation; or, when distributed
 * separately from the Linux kernel or incorporated into other
 * software packages, subject to the following license:
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this source file (the "Software"), to deal in the Software without
 * restriction, including without limitation the rights to use, copy, modify,
 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
 * and to permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 */

#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/delay.h>
J
Jeremy Fitzhardinge 已提交
41
#include <linux/freezer.h>
42

J
Jeremy Fitzhardinge 已提交
43 44 45 46
#include <xen/events.h>
#include <xen/page.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
K
Konrad Rzeszutek Wilk 已提交
47 48 49 50 51 52 53
#include "common.h"

/*
 * These are rather arbitrary. They are fairly large because adjacent requests
 * pulled from a communication ring are quite likely to end up being part of
 * the same scatter/gather request at the disc.
 *
54
 * ** TRY INCREASING 'xen_blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW **
K
Konrad Rzeszutek Wilk 已提交
55 56 57 58
 *
 * This will increase the chances of being able to write whole tracks.
 * 64 should be enough to keep us competitive with Linux.
 */
59 60
static int xen_blkif_reqs = 64;
module_param_named(reqs, xen_blkif_reqs, int, 0);
K
Konrad Rzeszutek Wilk 已提交
61 62 63
MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate");

/* Run-time switchable: /sys/module/blkback/parameters/ */
64
static unsigned int log_stats;
K
Konrad Rzeszutek Wilk 已提交
65 66 67 68 69 70 71 72
module_param(log_stats, int, 0644);

/*
 * Each outstanding request that we've passed to the lower device layers has a
 * 'pending_req' allocated to it. Each buffer_head that completes decrements
 * the pendcnt towards zero. When it hits zero, the specified domain has a
 * response queued for it, with the saved 'id' passed back.
 */
73
struct pending_req {
74
	struct xen_blkif	*blkif;
75 76 77 78 79 80
	u64			id;
	int			nr_pages;
	atomic_t		pendcnt;
	unsigned short		operation;
	int			status;
	struct list_head	free_list;
81
};
K
Konrad Rzeszutek Wilk 已提交
82 83 84

#define BLKBACK_INVALID_HANDLE (~0)

85
struct xen_blkbk {
86
	struct pending_req	*pending_reqs;
87
	/* List of all 'pending_req' available */
88
	struct list_head	pending_free;
89
	/* And its spinlock. */
90 91
	spinlock_t		pending_free_lock;
	wait_queue_head_t	pending_free_wq;
92
	/* The list of all pages that are available. */
93
	struct page		**pending_pages;
94
	/* And the grant handles that are available. */
95 96 97 98
	grant_handle_t		*pending_grant_handles;
};

static struct xen_blkbk *blkbk;
K
Konrad Rzeszutek Wilk 已提交
99

100 101 102 103
/*
 * Little helpful macro to figure out the index and virtual address of the
 * pending_pages[..]. For each 'pending_req' we have have up to
 * BLKIF_MAX_SEGMENTS_PER_REQUEST (11) pages. The seg would be from 0 through
104 105
 * 10 and would index in the pending_pages[..].
 */
106
static inline int vaddr_pagenr(struct pending_req *req, int seg)
K
Konrad Rzeszutek Wilk 已提交
107
{
108 109
	return (req - blkbk->pending_reqs) *
		BLKIF_MAX_SEGMENTS_PER_REQUEST + seg;
K
Konrad Rzeszutek Wilk 已提交
110 111
}

112 113
#define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)]

114
static inline unsigned long vaddr(struct pending_req *req, int seg)
K
Konrad Rzeszutek Wilk 已提交
115
{
116
	unsigned long pfn = page_to_pfn(blkbk->pending_page(req, seg));
K
Konrad Rzeszutek Wilk 已提交
117 118 119 120
	return (unsigned long)pfn_to_kaddr(pfn);
}

#define pending_handle(_req, _seg) \
121
	(blkbk->pending_grant_handles[vaddr_pagenr(_req, _seg)])
K
Konrad Rzeszutek Wilk 已提交
122 123


124 125
static int do_block_io_op(struct xen_blkif *blkif);
static int dispatch_rw_block_io(struct xen_blkif *blkif,
126 127
				struct blkif_request *req,
				struct pending_req *pending_req);
128
static void make_response(struct xen_blkif *blkif, u64 id,
K
Konrad Rzeszutek Wilk 已提交
129 130
			  unsigned short op, int st);

131 132
/*
 * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
K
Konrad Rzeszutek Wilk 已提交
133
 */
134
static struct pending_req *alloc_req(void)
K
Konrad Rzeszutek Wilk 已提交
135
{
136
	struct pending_req *req = NULL;
K
Konrad Rzeszutek Wilk 已提交
137 138
	unsigned long flags;

139 140
	spin_lock_irqsave(&blkbk->pending_free_lock, flags);
	if (!list_empty(&blkbk->pending_free)) {
141 142
		req = list_entry(blkbk->pending_free.next, struct pending_req,
				 free_list);
K
Konrad Rzeszutek Wilk 已提交
143 144
		list_del(&req->free_list);
	}
145
	spin_unlock_irqrestore(&blkbk->pending_free_lock, flags);
K
Konrad Rzeszutek Wilk 已提交
146 147 148
	return req;
}

149 150 151 152
/*
 * Return the 'pending_req' structure back to the freepool. We also
 * wake up the thread if it was waiting for a free page.
 */
153
static void free_req(struct pending_req *req)
K
Konrad Rzeszutek Wilk 已提交
154 155 156 157
{
	unsigned long flags;
	int was_empty;

158 159 160 161
	spin_lock_irqsave(&blkbk->pending_free_lock, flags);
	was_empty = list_empty(&blkbk->pending_free);
	list_add(&req->free_list, &blkbk->pending_free);
	spin_unlock_irqrestore(&blkbk->pending_free_lock, flags);
K
Konrad Rzeszutek Wilk 已提交
162
	if (was_empty)
163
		wake_up(&blkbk->pending_free_wq);
K
Konrad Rzeszutek Wilk 已提交
164 165
}

166 167 168
/*
 * Routines for managing virtual block devices (vbds).
 */
169 170
static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
			     int operation)
171
{
172
	struct xen_vbd *vbd = &blkif->vbd;
173 174 175 176 177
	int rc = -EACCES;

	if ((operation != READ) && vbd->readonly)
		goto out;

178 179 180 181 182 183 184 185
	if (likely(req->nr_sects)) {
		blkif_sector_t end = req->sector_number + req->nr_sects;

		if (unlikely(end < req->sector_number))
			goto out;
		if (unlikely(end > vbd_sz(vbd)))
			goto out;
	}
186 187 188 189 190 191 192 193 194

	req->dev  = vbd->pdevice;
	req->bdev = vbd->bdev;
	rc = 0;

 out:
	return rc;
}

195
static void xen_vbd_resize(struct xen_blkif *blkif)
196
{
197
	struct xen_vbd *vbd = &blkif->vbd;
198 199
	struct xenbus_transaction xbt;
	int err;
200
	struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be);
201
	unsigned long long new_size = vbd_sz(vbd);
202

203
	pr_info(DRV_PFX "VBD Resize: Domid: %d, Device: (%d, %d)\n",
204
		blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice));
205
	pr_info(DRV_PFX "VBD Resize: new size %llu\n", new_size);
206 207 208 209
	vbd->size = new_size;
again:
	err = xenbus_transaction_start(&xbt);
	if (err) {
210
		pr_warn(DRV_PFX "Error starting transaction");
211 212 213
		return;
	}
	err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
214
			    (unsigned long long)vbd_sz(vbd));
215
	if (err) {
216
		pr_warn(DRV_PFX "Error writing new size");
217 218 219 220 221 222 223 224 225
		goto abort;
	}
	/*
	 * Write the current state; we will use this to synchronize
	 * the front-end. If the current state is "connected" the
	 * front-end will get the new size information online.
	 */
	err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state);
	if (err) {
226
		pr_warn(DRV_PFX "Error writing the state");
227 228 229 230 231 232 233
		goto abort;
	}

	err = xenbus_transaction_end(xbt, 0);
	if (err == -EAGAIN)
		goto again;
	if (err)
234
		pr_warn(DRV_PFX "Error ending transaction");
235
	return;
236 237 238 239
abort:
	xenbus_transaction_end(xbt, 1);
}

240
/*
241 242
 * Notification from the guest OS.
 */
243
static void blkif_notify_work(struct xen_blkif *blkif)
K
Konrad Rzeszutek Wilk 已提交
244
{
245 246 247
	blkif->waiting_reqs = 1;
	wake_up(&blkif->wq);
}
K
Konrad Rzeszutek Wilk 已提交
248

249
irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
250 251 252
{
	blkif_notify_work(dev_id);
	return IRQ_HANDLED;
K
Konrad Rzeszutek Wilk 已提交
253 254
}

255
/*
K
Konrad Rzeszutek Wilk 已提交
256 257 258
 * SCHEDULER FUNCTIONS
 */

259
static void print_stats(struct xen_blkif *blkif)
K
Konrad Rzeszutek Wilk 已提交
260
{
261 262
	pr_info("xen-blkback (%s): oo %3d  |  rd %4d  |  wr %4d  |  f %4d"
		 "  |  ds %4d\n",
263
		 current->comm, blkif->st_oo_req,
264 265
		 blkif->st_rd_req, blkif->st_wr_req,
		 blkif->st_f_req, blkif->st_ds_req);
K
Konrad Rzeszutek Wilk 已提交
266 267 268 269
	blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
	blkif->st_rd_req = 0;
	blkif->st_wr_req = 0;
	blkif->st_oo_req = 0;
270
	blkif->st_ds_req = 0;
K
Konrad Rzeszutek Wilk 已提交
271 272
}

273
int xen_blkif_schedule(void *arg)
K
Konrad Rzeszutek Wilk 已提交
274
{
275
	struct xen_blkif *blkif = arg;
276
	struct xen_vbd *vbd = &blkif->vbd;
K
Konrad Rzeszutek Wilk 已提交
277

278
	xen_blkif_get(blkif);
K
Konrad Rzeszutek Wilk 已提交
279 280 281 282

	while (!kthread_should_stop()) {
		if (try_to_freeze())
			continue;
283
		if (unlikely(vbd->size != vbd_sz(vbd)))
284
			xen_vbd_resize(blkif);
K
Konrad Rzeszutek Wilk 已提交
285 286 287 288 289

		wait_event_interruptible(
			blkif->wq,
			blkif->waiting_reqs || kthread_should_stop());
		wait_event_interruptible(
290
			blkbk->pending_free_wq,
291 292
			!list_empty(&blkbk->pending_free) ||
			kthread_should_stop());
K
Konrad Rzeszutek Wilk 已提交
293 294 295 296 297 298 299 300 301 302 303 304 305 306 307

		blkif->waiting_reqs = 0;
		smp_mb(); /* clear flag *before* checking for work */

		if (do_block_io_op(blkif))
			blkif->waiting_reqs = 1;

		if (log_stats && time_after(jiffies, blkif->st_print))
			print_stats(blkif);
	}

	if (log_stats)
		print_stats(blkif);

	blkif->xenblkd = NULL;
308
	xen_blkif_put(blkif);
K
Konrad Rzeszutek Wilk 已提交
309 310 311 312

	return 0;
}

313 314 315 316
struct seg_buf {
	unsigned long buf;
	unsigned int nsec;
};
317 318 319
/*
 * Unmap the grant references, and also remove the M2P over-rides
 * used in the 'pending_req'.
320
 */
321
static void xen_blkbk_unmap(struct pending_req *req)
322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340
{
	struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
	unsigned int i, invcount = 0;
	grant_handle_t handle;
	int ret;

	for (i = 0; i < req->nr_pages; i++) {
		handle = pending_handle(req, i);
		if (handle == BLKBACK_INVALID_HANDLE)
			continue;
		gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
				    GNTMAP_host_map, handle);
		pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
		invcount++;
	}

	ret = HYPERVISOR_grant_table_op(
		GNTTABOP_unmap_grant_ref, unmap, invcount);
	BUG_ON(ret);
341 342
	/*
	 * Note, we use invcount, so nr->pages, so we can't index
343 344 345 346 347 348
	 * using vaddr(req, i).
	 */
	for (i = 0; i < invcount; i++) {
		ret = m2p_remove_override(
			virt_to_page(unmap[i].host_addr), false);
		if (ret) {
349
			pr_alert(DRV_PFX "Failed to remove M2P override for %lx\n",
350
				 (unsigned long)unmap[i].host_addr);
351 352 353 354
			continue;
		}
	}
}
355 356 357

static int xen_blkbk_map(struct blkif_request *req,
			 struct pending_req *pending_req,
358
			 struct seg_buf seg[])
359 360 361
{
	struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
	int i;
362
	int nseg = req->u.rw.nr_segments;
363
	int ret = 0;
364 365 366

	/*
	 * Fill out preq.nr_sects with proper amount of sectors, and setup
367 368 369 370 371 372 373 374 375 376
	 * assign map[..] with the PFN of the page in our domain with the
	 * corresponding grant reference for each page.
	 */
	for (i = 0; i < nseg; i++) {
		uint32_t flags;

		flags = GNTMAP_host_map;
		if (pending_req->operation != BLKIF_OP_READ)
			flags |= GNTMAP_readonly;
		gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
377 378
				  req->u.rw.seg[i].gref,
				  pending_req->blkif->domid);
379 380 381 382 383
	}

	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
	BUG_ON(ret);

384 385
	/*
	 * Now swizzle the MFN in our domain with the MFN from the other domain
386 387 388 389 390
	 * so that when we access vaddr(pending_req,i) it has the contents of
	 * the page from the other domain.
	 */
	for (i = 0; i < nseg; i++) {
		if (unlikely(map[i].status != 0)) {
391
			pr_debug(DRV_PFX "invalid buffer -- could not remap it\n");
392 393 394 395 396 397 398 399 400 401
			map[i].handle = BLKBACK_INVALID_HANDLE;
			ret |= 1;
		}

		pending_handle(pending_req, i) = map[i].handle;

		if (ret)
			continue;

		ret = m2p_add_override(PFN_DOWN(map[i].dev_bus_addr),
402
			blkbk->pending_page(pending_req, i), NULL);
403
		if (ret) {
404
			pr_alert(DRV_PFX "Failed to install M2P override for %lx (ret: %d)\n",
405
				 (unsigned long)map[i].dev_bus_addr, ret);
406 407 408 409 410 411 412 413 414 415
			/* We could switch over to GNTTABOP_copy */
			continue;
		}

		seg[i].buf  = map[i].dev_bus_addr |
			(req->u.rw.seg[i].first_sect << 9);
	}
	return ret;
}

416 417
static int dispatch_discard_io(struct xen_blkif *blkif,
				struct blkif_request *req)
418 419 420 421 422
{
	int err = 0;
	int status = BLKIF_RSP_OKAY;
	struct block_device *bdev = blkif->vbd.bdev;

423 424 425
	blkif->st_ds_req++;

	xen_blkif_get(blkif);
426 427
	if (blkif->blk_backend_type == BLKIF_BACKEND_PHY ||
	    blkif->blk_backend_type == BLKIF_BACKEND_FILE) {
428 429 430
		unsigned long secure = (blkif->vbd.discard_secure &&
			(req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
			BLKDEV_DISCARD_SECURE : 0;
431 432 433
		err = blkdev_issue_discard(bdev,
				req->u.discard.sector_number,
				req->u.discard.nr_sectors,
434
				GFP_KERNEL, secure);
435 436 437 438 439 440 441 442 443
	} else
		err = -EOPNOTSUPP;

	if (err == -EOPNOTSUPP) {
		pr_debug(DRV_PFX "discard op failed, not supported\n");
		status = BLKIF_RSP_EOPNOTSUPP;
	} else if (err)
		status = BLKIF_RSP_ERROR;

444
	make_response(blkif, req->u.discard.id, req->operation, status);
445 446
	xen_blkif_put(blkif);
	return err;
447 448
}

449 450 451 452
static void xen_blk_drain_io(struct xen_blkif *blkif)
{
	atomic_set(&blkif->drain, 1);
	do {
453 454 455 456
		/* The initial value is one, and one refcnt taken at the
		 * start of the xen_blkif_schedule thread. */
		if (atomic_read(&blkif->refcnt) <= 2)
			break;
457 458 459 460 461 462 463 464 465
		wait_for_completion_interruptible_timeout(
				&blkif->drain_complete, HZ);

		if (!atomic_read(&blkif->drain))
			break;
	} while (!kthread_should_stop());
	atomic_set(&blkif->drain, 0);
}

466 467
/*
 * Completion callback on the bio's. Called as bh->b_end_io()
K
Konrad Rzeszutek Wilk 已提交
468 469
 */

470
static void __end_block_io_op(struct pending_req *pending_req, int error)
K
Konrad Rzeszutek Wilk 已提交
471 472
{
	/* An error fails the entire request. */
473
	if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) &&
K
Konrad Rzeszutek Wilk 已提交
474
	    (error == -EOPNOTSUPP)) {
475
		pr_debug(DRV_PFX "flush diskcache op failed, not supported\n");
476
		xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0);
K
Konrad Rzeszutek Wilk 已提交
477
		pending_req->status = BLKIF_RSP_EOPNOTSUPP;
478 479 480 481 482
	} else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
		    (error == -EOPNOTSUPP)) {
		pr_debug(DRV_PFX "write barrier op failed, not supported\n");
		xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0);
		pending_req->status = BLKIF_RSP_EOPNOTSUPP;
K
Konrad Rzeszutek Wilk 已提交
483
	} else if (error) {
484
		pr_debug(DRV_PFX "Buffer not up-to-date at end of operation,"
485
			 " error=%d\n", error);
K
Konrad Rzeszutek Wilk 已提交
486 487 488
		pending_req->status = BLKIF_RSP_ERROR;
	}

489 490
	/*
	 * If all of the bio's have completed it is time to unmap
491
	 * the grant references associated with 'request' and provide
492 493
	 * the proper response on the ring.
	 */
K
Konrad Rzeszutek Wilk 已提交
494
	if (atomic_dec_and_test(&pending_req->pendcnt)) {
495
		xen_blkbk_unmap(pending_req);
K
Konrad Rzeszutek Wilk 已提交
496 497
		make_response(pending_req->blkif, pending_req->id,
			      pending_req->operation, pending_req->status);
498
		xen_blkif_put(pending_req->blkif);
499 500 501 502
		if (atomic_read(&pending_req->blkif->refcnt) <= 2) {
			if (atomic_read(&pending_req->blkif->drain))
				complete(&pending_req->blkif->drain_complete);
		}
K
Konrad Rzeszutek Wilk 已提交
503 504 505 506
		free_req(pending_req);
	}
}

507 508 509
/*
 * bio callback.
 */
J
Jeremy Fitzhardinge 已提交
510
static void end_block_io_op(struct bio *bio, int error)
K
Konrad Rzeszutek Wilk 已提交
511 512 513 514 515 516 517
{
	__end_block_io_op(bio->bi_private, error);
	bio_put(bio);
}



518 519 520 521
/*
 * Function to copy the from the ring buffer the 'struct blkif_request'
 * (which has the sectors we want, number of them, grant references, etc),
 * and transmute  it to the block API to hand it over to the proper block disk.
K
Konrad Rzeszutek Wilk 已提交
522
 */
523 524
static int
__do_block_io_op(struct xen_blkif *blkif)
K
Konrad Rzeszutek Wilk 已提交
525
{
J
Jeremy Fitzhardinge 已提交
526 527
	union blkif_back_rings *blk_rings = &blkif->blk_rings;
	struct blkif_request req;
528
	struct pending_req *pending_req;
K
Konrad Rzeszutek Wilk 已提交
529 530 531 532 533 534 535 536 537 538 539 540
	RING_IDX rc, rp;
	int more_to_do = 0;

	rc = blk_rings->common.req_cons;
	rp = blk_rings->common.sring->req_prod;
	rmb(); /* Ensure we see queued requests up to 'rp'. */

	while (rc != rp) {

		if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
			break;

541
		if (kthread_should_stop()) {
K
Konrad Rzeszutek Wilk 已提交
542 543 544 545
			more_to_do = 1;
			break;
		}

546 547 548
		pending_req = alloc_req();
		if (NULL == pending_req) {
			blkif->st_oo_req++;
K
Konrad Rzeszutek Wilk 已提交
549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569
			more_to_do = 1;
			break;
		}

		switch (blkif->blk_protocol) {
		case BLKIF_PROTOCOL_NATIVE:
			memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
			break;
		case BLKIF_PROTOCOL_X86_32:
			blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
			break;
		case BLKIF_PROTOCOL_X86_64:
			blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
			break;
		default:
			BUG();
		}
		blk_rings->common.req_cons = ++rc; /* before make_response() */

		/* Apply all sanity checks to /private copy/ of request. */
		barrier();
570 571 572 573 574
		if (unlikely(req.operation == BLKIF_OP_DISCARD)) {
			free_req(pending_req);
			if (dispatch_discard_io(blkif, &req))
				break;
		} else if (dispatch_rw_block_io(blkif, &req, pending_req))
K
Konrad Rzeszutek Wilk 已提交
575 576 577 578 579 580 581 582 583
			break;

		/* Yield point for this unbounded loop. */
		cond_resched();
	}

	return more_to_do;
}

584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599
static int
do_block_io_op(struct xen_blkif *blkif)
{
	union blkif_back_rings *blk_rings = &blkif->blk_rings;
	int more_to_do;

	do {
		more_to_do = __do_block_io_op(blkif);
		if (more_to_do)
			break;

		RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
	} while (more_to_do);

	return more_to_do;
}
600
/*
601 602
 * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
 * and call the 'submit_bio' to pass it to the underlying storage.
603
 */
604 605 606
static int dispatch_rw_block_io(struct xen_blkif *blkif,
				struct blkif_request *req,
				struct pending_req *pending_req)
K
Konrad Rzeszutek Wilk 已提交
607 608
{
	struct phys_req preq;
609
	struct seg_buf seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
K
Konrad Rzeszutek Wilk 已提交
610 611
	unsigned int nseg;
	struct bio *bio = NULL;
612
	struct bio *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST];
613
	int i, nbio = 0;
K
Konrad Rzeszutek Wilk 已提交
614
	int operation;
615
	struct blk_plug plug;
616
	bool drain = false;
K
Konrad Rzeszutek Wilk 已提交
617 618 619

	switch (req->operation) {
	case BLKIF_OP_READ:
620
		blkif->st_rd_req++;
K
Konrad Rzeszutek Wilk 已提交
621 622 623
		operation = READ;
		break;
	case BLKIF_OP_WRITE:
624
		blkif->st_wr_req++;
625
		operation = WRITE_ODIRECT;
K
Konrad Rzeszutek Wilk 已提交
626
		break;
627 628
	case BLKIF_OP_WRITE_BARRIER:
		drain = true;
629
	case BLKIF_OP_FLUSH_DISKCACHE:
630
		blkif->st_f_req++;
631
		operation = WRITE_FLUSH;
K
Konrad Rzeszutek Wilk 已提交
632 633 634
		break;
	default:
		operation = 0; /* make gcc happy */
635 636
		goto fail_response;
		break;
K
Konrad Rzeszutek Wilk 已提交
637 638
	}

639 640
	/* Check that the number of segments is sane. */
	nseg = req->u.rw.nr_segments;
641

642
	if (unlikely(nseg == 0 && operation != WRITE_FLUSH) ||
K
Konrad Rzeszutek Wilk 已提交
643
	    unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
644
		pr_debug(DRV_PFX "Bad number of segments in request (%d)\n",
645
			 nseg);
646
		/* Haven't submitted any bio's yet. */
K
Konrad Rzeszutek Wilk 已提交
647 648 649
		goto fail_response;
	}

650
	preq.dev           = req->u.rw.handle;
651
	preq.sector_number = req->u.rw.sector_number;
K
Konrad Rzeszutek Wilk 已提交
652 653 654
	preq.nr_sects      = 0;

	pending_req->blkif     = blkif;
655
	pending_req->id        = req->u.rw.id;
K
Konrad Rzeszutek Wilk 已提交
656 657 658
	pending_req->operation = req->operation;
	pending_req->status    = BLKIF_RSP_OKAY;
	pending_req->nr_pages  = nseg;
659

K
Konrad Rzeszutek Wilk 已提交
660
	for (i = 0; i < nseg; i++) {
661 662 663 664
		seg[i].nsec = req->u.rw.seg[i].last_sect -
			req->u.rw.seg[i].first_sect + 1;
		if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
		    (req->u.rw.seg[i].last_sect < req->u.rw.seg[i].first_sect))
K
Konrad Rzeszutek Wilk 已提交
665 666
			goto fail_response;
		preq.nr_sects += seg[i].nsec;
667

K
Konrad Rzeszutek Wilk 已提交
668 669
	}

670
	if (xen_vbd_translate(&preq, blkif, operation) != 0) {
671
		pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n",
672 673 674
			 operation == READ ? "read" : "write",
			 preq.sector_number,
			 preq.sector_number + preq.nr_sects, preq.dev);
675
		goto fail_response;
K
Konrad Rzeszutek Wilk 已提交
676
	}
677 678

	/*
679
	 * This check _MUST_ be done after xen_vbd_translate as the preq.bdev
680 681
	 * is set there.
	 */
682 683 684
	for (i = 0; i < nseg; i++) {
		if (((int)preq.sector_number|(int)seg[i].nsec) &
		    ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
685
			pr_debug(DRV_PFX "Misaligned I/O request from domain %d",
686
				 blkif->domid);
687 688 689
			goto fail_response;
		}
	}
690

691 692 693 694 695 696
	/* Wait on all outstanding I/O's and once that has been completed
	 * issue the WRITE_FLUSH.
	 */
	if (drain)
		xen_blk_drain_io(pending_req->blkif);

697 698
	/*
	 * If we have failed at this point, we need to undo the M2P override,
699 700
	 * set gnttab_set_unmap_op on all of the grant references and perform
	 * the hypercall to unmap the grants - that is all done in
701
	 * xen_blkbk_unmap.
702
	 */
703
	if (xen_blkbk_map(req, pending_req, seg))
K
Konrad Rzeszutek Wilk 已提交
704 705
		goto fail_flush;

706 707 708 709
	/*
	 * This corresponding xen_blkif_put is done in __end_block_io_op, or
	 * below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
	 */
710
	xen_blkif_get(blkif);
K
Konrad Rzeszutek Wilk 已提交
711 712 713 714

	for (i = 0; i < nseg; i++) {
		while ((bio == NULL) ||
		       (bio_add_page(bio,
715
				     blkbk->pending_page(pending_req, i),
K
Konrad Rzeszutek Wilk 已提交
716 717
				     seg[i].nsec << 9,
				     seg[i].buf & ~PAGE_MASK) == 0)) {
718

719
			bio = bio_alloc(GFP_KERNEL, nseg-i);
K
Konrad Rzeszutek Wilk 已提交
720 721 722
			if (unlikely(bio == NULL))
				goto fail_put_bio;

723
			biolist[nbio++] = bio;
K
Konrad Rzeszutek Wilk 已提交
724 725 726 727 728 729 730 731 732
			bio->bi_bdev    = preq.bdev;
			bio->bi_private = pending_req;
			bio->bi_end_io  = end_block_io_op;
			bio->bi_sector  = preq.sector_number;
		}

		preq.sector_number += seg[i].nsec;
	}

733
	/* This will be hit if the operation was a flush or discard. */
K
Konrad Rzeszutek Wilk 已提交
734
	if (!bio) {
735
		BUG_ON(operation != WRITE_FLUSH);
736

737 738 739
		bio = bio_alloc(GFP_KERNEL, 0);
		if (unlikely(bio == NULL))
			goto fail_put_bio;
K
Konrad Rzeszutek Wilk 已提交
740

741 742 743 744
		biolist[nbio++] = bio;
		bio->bi_bdev    = preq.bdev;
		bio->bi_private = pending_req;
		bio->bi_end_io  = end_block_io_op;
K
Konrad Rzeszutek Wilk 已提交
745 746
	}

747 748
	/*
	 * We set it one so that the last submit_bio does not have to call
749 750 751 752
	 * atomic_inc.
	 */
	atomic_set(&pending_req->pendcnt, nbio);

753 754 755
	/* Get a reference count for the disk queue and start sending I/O */
	blk_start_plug(&plug);

756 757 758
	for (i = 0; i < nbio; i++)
		submit_bio(operation, biolist[i]);

759
	/* Let the I/Os go.. */
760
	blk_finish_plug(&plug);
761

K
Konrad Rzeszutek Wilk 已提交
762 763
	if (operation == READ)
		blkif->st_rd_sect += preq.nr_sects;
764
	else if (operation & WRITE)
K
Konrad Rzeszutek Wilk 已提交
765 766
		blkif->st_wr_sect += preq.nr_sects;

767
	return 0;
K
Konrad Rzeszutek Wilk 已提交
768 769

 fail_flush:
770
	xen_blkbk_unmap(pending_req);
K
Konrad Rzeszutek Wilk 已提交
771
 fail_response:
772
	/* Haven't submitted any bio's yet. */
773
	make_response(blkif, req->u.rw.id, req->operation, BLKIF_RSP_ERROR);
K
Konrad Rzeszutek Wilk 已提交
774 775
	free_req(pending_req);
	msleep(1); /* back off a bit */
776
	return -EIO;
K
Konrad Rzeszutek Wilk 已提交
777 778

 fail_put_bio:
779
	for (i = 0; i < nbio; i++)
780
		bio_put(biolist[i]);
K
Konrad Rzeszutek Wilk 已提交
781 782
	__end_block_io_op(pending_req, -EINVAL);
	msleep(1); /* back off a bit */
783
	return -EIO;
K
Konrad Rzeszutek Wilk 已提交
784 785 786 787
}



788 789
/*
 * Put a response on the ring on how the operation fared.
K
Konrad Rzeszutek Wilk 已提交
790
 */
791
static void make_response(struct xen_blkif *blkif, u64 id,
K
Konrad Rzeszutek Wilk 已提交
792 793
			  unsigned short op, int st)
{
J
Jeremy Fitzhardinge 已提交
794
	struct blkif_response  resp;
K
Konrad Rzeszutek Wilk 已提交
795
	unsigned long     flags;
J
Jeremy Fitzhardinge 已提交
796
	union blkif_back_rings *blk_rings = &blkif->blk_rings;
K
Konrad Rzeszutek Wilk 已提交
797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827
	int notify;

	resp.id        = id;
	resp.operation = op;
	resp.status    = st;

	spin_lock_irqsave(&blkif->blk_ring_lock, flags);
	/* Place on the response ring for the relevant domain. */
	switch (blkif->blk_protocol) {
	case BLKIF_PROTOCOL_NATIVE:
		memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
		       &resp, sizeof(resp));
		break;
	case BLKIF_PROTOCOL_X86_32:
		memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
		       &resp, sizeof(resp));
		break;
	case BLKIF_PROTOCOL_X86_64:
		memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
		       &resp, sizeof(resp));
		break;
	default:
		BUG();
	}
	blk_rings->common.rsp_prod_pvt++;
	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
	spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
	if (notify)
		notify_remote_via_irq(blkif->irq);
}

828
static int __init xen_blkif_init(void)
K
Konrad Rzeszutek Wilk 已提交
829 830
{
	int i, mmap_pages;
831
	int rc = 0;
K
Konrad Rzeszutek Wilk 已提交
832

J
Jeremy Fitzhardinge 已提交
833
	if (!xen_pv_domain())
K
Konrad Rzeszutek Wilk 已提交
834 835
		return -ENODEV;

836
	blkbk = kzalloc(sizeof(struct xen_blkbk), GFP_KERNEL);
837
	if (!blkbk) {
838
		pr_alert(DRV_PFX "%s: out of memory!\n", __func__);
839 840 841
		return -ENOMEM;
	}

842
	mmap_pages = xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST;
K
Konrad Rzeszutek Wilk 已提交
843

844
	blkbk->pending_reqs          = kzalloc(sizeof(blkbk->pending_reqs[0]) *
845
					xen_blkif_reqs, GFP_KERNEL);
846
	blkbk->pending_grant_handles = kmalloc(sizeof(blkbk->pending_grant_handles[0]) *
847 848 849
					mmap_pages, GFP_KERNEL);
	blkbk->pending_pages         = kzalloc(sizeof(blkbk->pending_pages[0]) *
					mmap_pages, GFP_KERNEL);
K
Konrad Rzeszutek Wilk 已提交
850

851 852
	if (!blkbk->pending_reqs || !blkbk->pending_grant_handles ||
	    !blkbk->pending_pages) {
853
		rc = -ENOMEM;
K
Konrad Rzeszutek Wilk 已提交
854
		goto out_of_memory;
855
	}
K
Konrad Rzeszutek Wilk 已提交
856

857
	for (i = 0; i < mmap_pages; i++) {
858
		blkbk->pending_grant_handles[i] = BLKBACK_INVALID_HANDLE;
859
		blkbk->pending_pages[i] = alloc_page(GFP_KERNEL);
860 861 862 863 864
		if (blkbk->pending_pages[i] == NULL) {
			rc = -ENOMEM;
			goto out_of_memory;
		}
	}
865
	rc = xen_blkif_interface_init();
866 867
	if (rc)
		goto failed_init;
K
Konrad Rzeszutek Wilk 已提交
868

869 870 871
	INIT_LIST_HEAD(&blkbk->pending_free);
	spin_lock_init(&blkbk->pending_free_lock);
	init_waitqueue_head(&blkbk->pending_free_wq);
K
Konrad Rzeszutek Wilk 已提交
872

873
	for (i = 0; i < xen_blkif_reqs; i++)
874 875
		list_add_tail(&blkbk->pending_reqs[i].free_list,
			      &blkbk->pending_free);
K
Konrad Rzeszutek Wilk 已提交
876

877
	rc = xen_blkif_xenbus_init();
878 879
	if (rc)
		goto failed_init;
K
Konrad Rzeszutek Wilk 已提交
880 881 882 883

	return 0;

 out_of_memory:
884
	pr_alert(DRV_PFX "%s: out of memory\n", __func__);
885
 failed_init:
886
	kfree(blkbk->pending_reqs);
887
	kfree(blkbk->pending_grant_handles);
888 889 890 891 892 893
	if (blkbk->pending_pages) {
		for (i = 0; i < mmap_pages; i++) {
			if (blkbk->pending_pages[i])
				__free_page(blkbk->pending_pages[i]);
		}
		kfree(blkbk->pending_pages);
894
	}
895
	kfree(blkbk);
896
	blkbk = NULL;
897
	return rc;
K
Konrad Rzeszutek Wilk 已提交
898 899
}

900
module_init(xen_blkif_init);
K
Konrad Rzeszutek Wilk 已提交
901 902

MODULE_LICENSE("Dual BSD/GPL");
903
MODULE_ALIAS("xen-backend:vbd");