blkback.c 22.0 KB
Newer Older
K
Konrad Rzeszutek Wilk 已提交
1 2 3 4 5 6
/******************************************************************************
 *
 * Back-end of the driver for virtual block devices. This portion of the
 * driver exports a 'unified' block-device interface that can be accessed
 * by any operating system that implements a compatible front end. A
 * reference front-end implementation can be found in:
7
 *  drivers/block/xen-blkfront.c
K
Konrad Rzeszutek Wilk 已提交
8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40
 *
 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
 * Copyright (c) 2005, Christopher Clark
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version 2
 * as published by the Free Software Foundation; or, when distributed
 * separately from the Linux kernel or incorporated into other
 * software packages, subject to the following license:
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this source file (the "Software"), to deal in the Software without
 * restriction, including without limitation the rights to use, copy, modify,
 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
 * and to permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 */

#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/delay.h>
J
Jeremy Fitzhardinge 已提交
41
#include <linux/freezer.h>
42

J
Jeremy Fitzhardinge 已提交
43 44 45 46
#include <xen/events.h>
#include <xen/page.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
K
Konrad Rzeszutek Wilk 已提交
47 48 49 50 51 52 53
#include "common.h"

/*
 * These are rather arbitrary. They are fairly large because adjacent requests
 * pulled from a communication ring are quite likely to end up being part of
 * the same scatter/gather request at the disc.
 *
54
 * ** TRY INCREASING 'xen_blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW **
K
Konrad Rzeszutek Wilk 已提交
55 56 57 58
 *
 * This will increase the chances of being able to write whole tracks.
 * 64 should be enough to keep us competitive with Linux.
 */
59 60
static int xen_blkif_reqs = 64;
module_param_named(reqs, xen_blkif_reqs, int, 0);
K
Konrad Rzeszutek Wilk 已提交
61 62 63
MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate");

/* Run-time switchable: /sys/module/blkback/parameters/ */
64
static unsigned int log_stats;
K
Konrad Rzeszutek Wilk 已提交
65 66 67 68 69 70 71 72
module_param(log_stats, int, 0644);

/*
 * Each outstanding request that we've passed to the lower device layers has a
 * 'pending_req' allocated to it. Each buffer_head that completes decrements
 * the pendcnt towards zero. When it hits zero, the specified domain has a
 * response queued for it, with the saved 'id' passed back.
 */
73
struct pending_req {
74
	struct xen_blkif	*blkif;
75 76 77 78 79 80
	u64			id;
	int			nr_pages;
	atomic_t		pendcnt;
	unsigned short		operation;
	int			status;
	struct list_head	free_list;
81
};
K
Konrad Rzeszutek Wilk 已提交
82 83 84

#define BLKBACK_INVALID_HANDLE (~0)

85
struct xen_blkbk {
86
	struct pending_req	*pending_reqs;
87
	/* List of all 'pending_req' available */
88
	struct list_head	pending_free;
89
	/* And its spinlock. */
90 91
	spinlock_t		pending_free_lock;
	wait_queue_head_t	pending_free_wq;
92
	/* The list of all pages that are available. */
93
	struct page		**pending_pages;
94
	/* And the grant handles that are available. */
95 96 97 98
	grant_handle_t		*pending_grant_handles;
};

static struct xen_blkbk *blkbk;
K
Konrad Rzeszutek Wilk 已提交
99

100 101 102 103
/*
 * Little helpful macro to figure out the index and virtual address of the
 * pending_pages[..]. For each 'pending_req' we have have up to
 * BLKIF_MAX_SEGMENTS_PER_REQUEST (11) pages. The seg would be from 0 through
104 105
 * 10 and would index in the pending_pages[..].
 */
106
static inline int vaddr_pagenr(struct pending_req *req, int seg)
K
Konrad Rzeszutek Wilk 已提交
107
{
108 109
	return (req - blkbk->pending_reqs) *
		BLKIF_MAX_SEGMENTS_PER_REQUEST + seg;
K
Konrad Rzeszutek Wilk 已提交
110 111
}

112 113
#define pending_page(req, seg) pending_pages[vaddr_pagenr(req, seg)]

114
static inline unsigned long vaddr(struct pending_req *req, int seg)
K
Konrad Rzeszutek Wilk 已提交
115
{
116
	unsigned long pfn = page_to_pfn(blkbk->pending_page(req, seg));
K
Konrad Rzeszutek Wilk 已提交
117 118 119 120
	return (unsigned long)pfn_to_kaddr(pfn);
}

#define pending_handle(_req, _seg) \
121
	(blkbk->pending_grant_handles[vaddr_pagenr(_req, _seg)])
K
Konrad Rzeszutek Wilk 已提交
122 123


124 125
static int do_block_io_op(struct xen_blkif *blkif);
static int dispatch_rw_block_io(struct xen_blkif *blkif,
126 127
				struct blkif_request *req,
				struct pending_req *pending_req);
128
static void make_response(struct xen_blkif *blkif, u64 id,
K
Konrad Rzeszutek Wilk 已提交
129 130
			  unsigned short op, int st);

131 132
/*
 * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
K
Konrad Rzeszutek Wilk 已提交
133
 */
134
static struct pending_req *alloc_req(void)
K
Konrad Rzeszutek Wilk 已提交
135
{
136
	struct pending_req *req = NULL;
K
Konrad Rzeszutek Wilk 已提交
137 138
	unsigned long flags;

139 140
	spin_lock_irqsave(&blkbk->pending_free_lock, flags);
	if (!list_empty(&blkbk->pending_free)) {
141 142
		req = list_entry(blkbk->pending_free.next, struct pending_req,
				 free_list);
K
Konrad Rzeszutek Wilk 已提交
143 144
		list_del(&req->free_list);
	}
145
	spin_unlock_irqrestore(&blkbk->pending_free_lock, flags);
K
Konrad Rzeszutek Wilk 已提交
146 147 148
	return req;
}

149 150 151 152
/*
 * Return the 'pending_req' structure back to the freepool. We also
 * wake up the thread if it was waiting for a free page.
 */
153
static void free_req(struct pending_req *req)
K
Konrad Rzeszutek Wilk 已提交
154 155 156 157
{
	unsigned long flags;
	int was_empty;

158 159 160 161
	spin_lock_irqsave(&blkbk->pending_free_lock, flags);
	was_empty = list_empty(&blkbk->pending_free);
	list_add(&req->free_list, &blkbk->pending_free);
	spin_unlock_irqrestore(&blkbk->pending_free_lock, flags);
K
Konrad Rzeszutek Wilk 已提交
162
	if (was_empty)
163
		wake_up(&blkbk->pending_free_wq);
K
Konrad Rzeszutek Wilk 已提交
164 165
}

166 167 168
/*
 * Routines for managing virtual block devices (vbds).
 */
169 170
static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
			     int operation)
171
{
172
	struct xen_vbd *vbd = &blkif->vbd;
173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188
	int rc = -EACCES;

	if ((operation != READ) && vbd->readonly)
		goto out;

	if (unlikely((req->sector_number + req->nr_sects) > vbd_sz(vbd)))
		goto out;

	req->dev  = vbd->pdevice;
	req->bdev = vbd->bdev;
	rc = 0;

 out:
	return rc;
}

189
static void xen_vbd_resize(struct xen_blkif *blkif)
190
{
191
	struct xen_vbd *vbd = &blkif->vbd;
192 193
	struct xenbus_transaction xbt;
	int err;
194
	struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be);
195
	unsigned long long new_size = vbd_sz(vbd);
196

197
	pr_info(DRV_PFX "VBD Resize: Domid: %d, Device: (%d, %d)\n",
198
		blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice));
199
	pr_info(DRV_PFX "VBD Resize: new size %llu\n", new_size);
200 201 202 203
	vbd->size = new_size;
again:
	err = xenbus_transaction_start(&xbt);
	if (err) {
204
		pr_warn(DRV_PFX "Error starting transaction");
205 206 207
		return;
	}
	err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
208
			    (unsigned long long)vbd_sz(vbd));
209
	if (err) {
210
		pr_warn(DRV_PFX "Error writing new size");
211 212 213 214 215 216 217 218 219
		goto abort;
	}
	/*
	 * Write the current state; we will use this to synchronize
	 * the front-end. If the current state is "connected" the
	 * front-end will get the new size information online.
	 */
	err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state);
	if (err) {
220
		pr_warn(DRV_PFX "Error writing the state");
221 222 223 224 225 226 227
		goto abort;
	}

	err = xenbus_transaction_end(xbt, 0);
	if (err == -EAGAIN)
		goto again;
	if (err)
228
		pr_warn(DRV_PFX "Error ending transaction");
229
	return;
230 231 232 233
abort:
	xenbus_transaction_end(xbt, 1);
}

234
/*
235 236
 * Notification from the guest OS.
 */
237
static void blkif_notify_work(struct xen_blkif *blkif)
K
Konrad Rzeszutek Wilk 已提交
238
{
239 240 241
	blkif->waiting_reqs = 1;
	wake_up(&blkif->wq);
}
K
Konrad Rzeszutek Wilk 已提交
242

243
irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
244 245 246
{
	blkif_notify_work(dev_id);
	return IRQ_HANDLED;
K
Konrad Rzeszutek Wilk 已提交
247 248
}

249
/*
K
Konrad Rzeszutek Wilk 已提交
250 251 252
 * SCHEDULER FUNCTIONS
 */

253
static void print_stats(struct xen_blkif *blkif)
K
Konrad Rzeszutek Wilk 已提交
254
{
255
	pr_info("xen-blkback (%s): oo %3d  |  rd %4d  |  wr %4d  |  f %4d\n",
256 257
		 current->comm, blkif->st_oo_req,
		 blkif->st_rd_req, blkif->st_wr_req, blkif->st_f_req);
K
Konrad Rzeszutek Wilk 已提交
258 259 260 261 262 263
	blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
	blkif->st_rd_req = 0;
	blkif->st_wr_req = 0;
	blkif->st_oo_req = 0;
}

264
int xen_blkif_schedule(void *arg)
K
Konrad Rzeszutek Wilk 已提交
265
{
266
	struct xen_blkif *blkif = arg;
267
	struct xen_vbd *vbd = &blkif->vbd;
K
Konrad Rzeszutek Wilk 已提交
268

269
	xen_blkif_get(blkif);
K
Konrad Rzeszutek Wilk 已提交
270 271 272 273

	while (!kthread_should_stop()) {
		if (try_to_freeze())
			continue;
274
		if (unlikely(vbd->size != vbd_sz(vbd)))
275
			xen_vbd_resize(blkif);
K
Konrad Rzeszutek Wilk 已提交
276 277 278 279 280

		wait_event_interruptible(
			blkif->wq,
			blkif->waiting_reqs || kthread_should_stop());
		wait_event_interruptible(
281
			blkbk->pending_free_wq,
282 283
			!list_empty(&blkbk->pending_free) ||
			kthread_should_stop());
K
Konrad Rzeszutek Wilk 已提交
284 285 286 287 288 289 290 291 292 293 294 295 296 297 298

		blkif->waiting_reqs = 0;
		smp_mb(); /* clear flag *before* checking for work */

		if (do_block_io_op(blkif))
			blkif->waiting_reqs = 1;

		if (log_stats && time_after(jiffies, blkif->st_print))
			print_stats(blkif);
	}

	if (log_stats)
		print_stats(blkif);

	blkif->xenblkd = NULL;
299
	xen_blkif_put(blkif);
K
Konrad Rzeszutek Wilk 已提交
300 301 302 303

	return 0;
}

304 305 306 307
struct seg_buf {
	unsigned long buf;
	unsigned int nsec;
};
308 309 310
/*
 * Unmap the grant references, and also remove the M2P over-rides
 * used in the 'pending_req'.
311
 */
312
static void xen_blkbk_unmap(struct pending_req *req)
313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331
{
	struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
	unsigned int i, invcount = 0;
	grant_handle_t handle;
	int ret;

	for (i = 0; i < req->nr_pages; i++) {
		handle = pending_handle(req, i);
		if (handle == BLKBACK_INVALID_HANDLE)
			continue;
		gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
				    GNTMAP_host_map, handle);
		pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
		invcount++;
	}

	ret = HYPERVISOR_grant_table_op(
		GNTTABOP_unmap_grant_ref, unmap, invcount);
	BUG_ON(ret);
332 333
	/*
	 * Note, we use invcount, so nr->pages, so we can't index
334 335 336 337 338 339
	 * using vaddr(req, i).
	 */
	for (i = 0; i < invcount; i++) {
		ret = m2p_remove_override(
			virt_to_page(unmap[i].host_addr), false);
		if (ret) {
340
			pr_alert(DRV_PFX "Failed to remove M2P override for %lx\n",
341
				 (unsigned long)unmap[i].host_addr);
342 343 344 345
			continue;
		}
	}
}
346 347 348

static int xen_blkbk_map(struct blkif_request *req,
			 struct pending_req *pending_req,
349
			 struct seg_buf seg[])
350 351 352 353 354
{
	struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
	int i;
	int nseg = req->nr_segments;
	int ret = 0;
355 356 357

	/*
	 * Fill out preq.nr_sects with proper amount of sectors, and setup
358 359 360 361 362 363 364 365 366 367
	 * assign map[..] with the PFN of the page in our domain with the
	 * corresponding grant reference for each page.
	 */
	for (i = 0; i < nseg; i++) {
		uint32_t flags;

		flags = GNTMAP_host_map;
		if (pending_req->operation != BLKIF_OP_READ)
			flags |= GNTMAP_readonly;
		gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
368 369
				  req->u.rw.seg[i].gref,
				  pending_req->blkif->domid);
370 371 372 373 374
	}

	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
	BUG_ON(ret);

375 376
	/*
	 * Now swizzle the MFN in our domain with the MFN from the other domain
377 378 379 380 381
	 * so that when we access vaddr(pending_req,i) it has the contents of
	 * the page from the other domain.
	 */
	for (i = 0; i < nseg; i++) {
		if (unlikely(map[i].status != 0)) {
382
			pr_debug(DRV_PFX "invalid buffer -- could not remap it\n");
383 384 385 386 387 388 389 390 391 392 393 394
			map[i].handle = BLKBACK_INVALID_HANDLE;
			ret |= 1;
		}

		pending_handle(pending_req, i) = map[i].handle;

		if (ret)
			continue;

		ret = m2p_add_override(PFN_DOWN(map[i].dev_bus_addr),
			blkbk->pending_page(pending_req, i), false);
		if (ret) {
395
			pr_alert(DRV_PFX "Failed to install M2P override for %lx (ret: %d)\n",
396
				 (unsigned long)map[i].dev_bus_addr, ret);
397 398 399 400 401 402 403 404 405 406
			/* We could switch over to GNTTABOP_copy */
			continue;
		}

		seg[i].buf  = map[i].dev_bus_addr |
			(req->u.rw.seg[i].first_sect << 9);
	}
	return ret;
}

407 408
/*
 * Completion callback on the bio's. Called as bh->b_end_io()
K
Konrad Rzeszutek Wilk 已提交
409 410
 */

411
static void __end_block_io_op(struct pending_req *pending_req, int error)
K
Konrad Rzeszutek Wilk 已提交
412 413
{
	/* An error fails the entire request. */
414
	if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) &&
K
Konrad Rzeszutek Wilk 已提交
415
	    (error == -EOPNOTSUPP)) {
416
		pr_debug(DRV_PFX "flush diskcache op failed, not supported\n");
417
		xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0);
K
Konrad Rzeszutek Wilk 已提交
418 419
		pending_req->status = BLKIF_RSP_EOPNOTSUPP;
	} else if (error) {
420
		pr_debug(DRV_PFX "Buffer not up-to-date at end of operation,"
421
			 " error=%d\n", error);
K
Konrad Rzeszutek Wilk 已提交
422 423 424
		pending_req->status = BLKIF_RSP_ERROR;
	}

425 426
	/*
	 * If all of the bio's have completed it is time to unmap
427
	 * the grant references associated with 'request' and provide
428 429
	 * the proper response on the ring.
	 */
K
Konrad Rzeszutek Wilk 已提交
430
	if (atomic_dec_and_test(&pending_req->pendcnt)) {
431
		xen_blkbk_unmap(pending_req);
K
Konrad Rzeszutek Wilk 已提交
432 433
		make_response(pending_req->blkif, pending_req->id,
			      pending_req->operation, pending_req->status);
434
		xen_blkif_put(pending_req->blkif);
K
Konrad Rzeszutek Wilk 已提交
435 436 437 438
		free_req(pending_req);
	}
}

439 440 441
/*
 * bio callback.
 */
J
Jeremy Fitzhardinge 已提交
442
static void end_block_io_op(struct bio *bio, int error)
K
Konrad Rzeszutek Wilk 已提交
443 444 445 446 447 448 449
{
	__end_block_io_op(bio->bi_private, error);
	bio_put(bio);
}



450 451 452 453
/*
 * Function to copy the from the ring buffer the 'struct blkif_request'
 * (which has the sectors we want, number of them, grant references, etc),
 * and transmute  it to the block API to hand it over to the proper block disk.
K
Konrad Rzeszutek Wilk 已提交
454
 */
455
static int do_block_io_op(struct xen_blkif *blkif)
K
Konrad Rzeszutek Wilk 已提交
456
{
J
Jeremy Fitzhardinge 已提交
457 458
	union blkif_back_rings *blk_rings = &blkif->blk_rings;
	struct blkif_request req;
459
	struct pending_req *pending_req;
K
Konrad Rzeszutek Wilk 已提交
460 461 462 463 464 465 466 467 468 469 470 471
	RING_IDX rc, rp;
	int more_to_do = 0;

	rc = blk_rings->common.req_cons;
	rp = blk_rings->common.sring->req_prod;
	rmb(); /* Ensure we see queued requests up to 'rp'. */

	while (rc != rp) {

		if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
			break;

472
		if (kthread_should_stop()) {
K
Konrad Rzeszutek Wilk 已提交
473 474 475 476
			more_to_do = 1;
			break;
		}

477 478 479
		pending_req = alloc_req();
		if (NULL == pending_req) {
			blkif->st_oo_req++;
K
Konrad Rzeszutek Wilk 已提交
480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501
			more_to_do = 1;
			break;
		}

		switch (blkif->blk_protocol) {
		case BLKIF_PROTOCOL_NATIVE:
			memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
			break;
		case BLKIF_PROTOCOL_X86_32:
			blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
			break;
		case BLKIF_PROTOCOL_X86_64:
			blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
			break;
		default:
			BUG();
		}
		blk_rings->common.req_cons = ++rc; /* before make_response() */

		/* Apply all sanity checks to /private copy/ of request. */
		barrier();

502
		if (dispatch_rw_block_io(blkif, &req, pending_req))
K
Konrad Rzeszutek Wilk 已提交
503 504 505 506 507 508 509 510 511
			break;

		/* Yield point for this unbounded loop. */
		cond_resched();
	}

	return more_to_do;
}

512
/*
513 514
 * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
 * and call the 'submit_bio' to pass it to the underlying storage.
515
 */
516 517 518
static int dispatch_rw_block_io(struct xen_blkif *blkif,
				struct blkif_request *req,
				struct pending_req *pending_req)
K
Konrad Rzeszutek Wilk 已提交
519 520
{
	struct phys_req preq;
521
	struct seg_buf seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
K
Konrad Rzeszutek Wilk 已提交
522 523
	unsigned int nseg;
	struct bio *bio = NULL;
524
	struct bio *biolist[BLKIF_MAX_SEGMENTS_PER_REQUEST];
525
	int i, nbio = 0;
K
Konrad Rzeszutek Wilk 已提交
526
	int operation;
527
	struct blk_plug plug;
K
Konrad Rzeszutek Wilk 已提交
528 529 530

	switch (req->operation) {
	case BLKIF_OP_READ:
531
		blkif->st_rd_req++;
K
Konrad Rzeszutek Wilk 已提交
532 533 534
		operation = READ;
		break;
	case BLKIF_OP_WRITE:
535
		blkif->st_wr_req++;
536
		operation = WRITE_ODIRECT;
K
Konrad Rzeszutek Wilk 已提交
537
		break;
538
	case BLKIF_OP_FLUSH_DISKCACHE:
539
		blkif->st_f_req++;
540
		operation = WRITE_FLUSH;
541
		/*
542
		 * The frontend likes to set this to -1, which xen_vbd_translate
543 544
		 * is alergic too.
		 */
545
		req->u.rw.sector_number = 0;
K
Konrad Rzeszutek Wilk 已提交
546
		break;
547
	case BLKIF_OP_WRITE_BARRIER:
K
Konrad Rzeszutek Wilk 已提交
548 549
	default:
		operation = 0; /* make gcc happy */
550 551
		goto fail_response;
		break;
K
Konrad Rzeszutek Wilk 已提交
552 553
	}

554
	/* Check that the number of segments is sane. */
K
Konrad Rzeszutek Wilk 已提交
555
	nseg = req->nr_segments;
556
	if (unlikely(nseg == 0 && operation != WRITE_FLUSH) ||
K
Konrad Rzeszutek Wilk 已提交
557
	    unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
558
		pr_debug(DRV_PFX "Bad number of segments in request (%d)\n",
559
			 nseg);
560
		/* Haven't submitted any bio's yet. */
K
Konrad Rzeszutek Wilk 已提交
561 562 563 564
		goto fail_response;
	}

	preq.dev           = req->handle;
565
	preq.sector_number = req->u.rw.sector_number;
K
Konrad Rzeszutek Wilk 已提交
566 567 568 569 570 571 572
	preq.nr_sects      = 0;

	pending_req->blkif     = blkif;
	pending_req->id        = req->id;
	pending_req->operation = req->operation;
	pending_req->status    = BLKIF_RSP_OKAY;
	pending_req->nr_pages  = nseg;
573

K
Konrad Rzeszutek Wilk 已提交
574
	for (i = 0; i < nseg; i++) {
575 576 577 578
		seg[i].nsec = req->u.rw.seg[i].last_sect -
			req->u.rw.seg[i].first_sect + 1;
		if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
		    (req->u.rw.seg[i].last_sect < req->u.rw.seg[i].first_sect))
K
Konrad Rzeszutek Wilk 已提交
579 580
			goto fail_response;
		preq.nr_sects += seg[i].nsec;
581

K
Konrad Rzeszutek Wilk 已提交
582 583
	}

584
	if (xen_vbd_translate(&preq, blkif, operation) != 0) {
585
		pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n",
586 587 588
			 operation == READ ? "read" : "write",
			 preq.sector_number,
			 preq.sector_number + preq.nr_sects, preq.dev);
589
		goto fail_response;
K
Konrad Rzeszutek Wilk 已提交
590
	}
591 592

	/*
593
	 * This check _MUST_ be done after xen_vbd_translate as the preq.bdev
594 595
	 * is set there.
	 */
596 597 598
	for (i = 0; i < nseg; i++) {
		if (((int)preq.sector_number|(int)seg[i].nsec) &
		    ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
599
			pr_debug(DRV_PFX "Misaligned I/O request from domain %d",
600
				 blkif->domid);
601 602 603
			goto fail_response;
		}
	}
604 605 606

	/*
	 * If we have failed at this point, we need to undo the M2P override,
607 608
	 * set gnttab_set_unmap_op on all of the grant references and perform
	 * the hypercall to unmap the grants - that is all done in
609
	 * xen_blkbk_unmap.
610
	 */
611
	if (xen_blkbk_map(req, pending_req, seg))
K
Konrad Rzeszutek Wilk 已提交
612 613
		goto fail_flush;

614
	/* This corresponding xen_blkif_put is done in __end_block_io_op */
615
	xen_blkif_get(blkif);
K
Konrad Rzeszutek Wilk 已提交
616 617 618 619

	for (i = 0; i < nseg; i++) {
		while ((bio == NULL) ||
		       (bio_add_page(bio,
620
				     blkbk->pending_page(pending_req, i),
K
Konrad Rzeszutek Wilk 已提交
621 622
				     seg[i].nsec << 9,
				     seg[i].buf & ~PAGE_MASK) == 0)) {
623

624
			bio = bio_alloc(GFP_KERNEL, nseg-i);
K
Konrad Rzeszutek Wilk 已提交
625 626 627
			if (unlikely(bio == NULL))
				goto fail_put_bio;

628
			biolist[nbio++] = bio;
K
Konrad Rzeszutek Wilk 已提交
629 630 631 632 633 634 635 636 637
			bio->bi_bdev    = preq.bdev;
			bio->bi_private = pending_req;
			bio->bi_end_io  = end_block_io_op;
			bio->bi_sector  = preq.sector_number;
		}

		preq.sector_number += seg[i].nsec;
	}

638
	/* This will be hit if the operation was a flush. */
K
Konrad Rzeszutek Wilk 已提交
639
	if (!bio) {
640
		BUG_ON(operation != WRITE_FLUSH);
641

642
		bio = bio_alloc(GFP_KERNEL, 0);
K
Konrad Rzeszutek Wilk 已提交
643 644 645
		if (unlikely(bio == NULL))
			goto fail_put_bio;

646
		biolist[nbio++] = bio;
K
Konrad Rzeszutek Wilk 已提交
647 648 649 650 651
		bio->bi_bdev    = preq.bdev;
		bio->bi_private = pending_req;
		bio->bi_end_io  = end_block_io_op;
	}

652 653
	/*
	 * We set it one so that the last submit_bio does not have to call
654 655 656 657
	 * atomic_inc.
	 */
	atomic_set(&pending_req->pendcnt, nbio);

658 659 660
	/* Get a reference count for the disk queue and start sending I/O */
	blk_start_plug(&plug);

661 662 663
	for (i = 0; i < nbio; i++)
		submit_bio(operation, biolist[i]);

664
	/* Let the I/Os go.. */
665
	blk_finish_plug(&plug);
666

K
Konrad Rzeszutek Wilk 已提交
667 668
	if (operation == READ)
		blkif->st_rd_sect += preq.nr_sects;
669
	else if (operation == WRITE || operation == WRITE_FLUSH)
K
Konrad Rzeszutek Wilk 已提交
670 671
		blkif->st_wr_sect += preq.nr_sects;

672
	return 0;
K
Konrad Rzeszutek Wilk 已提交
673 674

 fail_flush:
675
	xen_blkbk_unmap(pending_req);
K
Konrad Rzeszutek Wilk 已提交
676
 fail_response:
677
	/* Haven't submitted any bio's yet. */
K
Konrad Rzeszutek Wilk 已提交
678 679 680
	make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
	free_req(pending_req);
	msleep(1); /* back off a bit */
681
	return -EIO;
K
Konrad Rzeszutek Wilk 已提交
682 683

 fail_put_bio:
684
	for (i = 0; i < nbio; i++)
685
		bio_put(biolist[i]);
K
Konrad Rzeszutek Wilk 已提交
686 687
	__end_block_io_op(pending_req, -EINVAL);
	msleep(1); /* back off a bit */
688
	return -EIO;
K
Konrad Rzeszutek Wilk 已提交
689 690 691 692
}



693 694
/*
 * Put a response on the ring on how the operation fared.
K
Konrad Rzeszutek Wilk 已提交
695
 */
696
static void make_response(struct xen_blkif *blkif, u64 id,
K
Konrad Rzeszutek Wilk 已提交
697 698
			  unsigned short op, int st)
{
J
Jeremy Fitzhardinge 已提交
699
	struct blkif_response  resp;
K
Konrad Rzeszutek Wilk 已提交
700
	unsigned long     flags;
J
Jeremy Fitzhardinge 已提交
701
	union blkif_back_rings *blk_rings = &blkif->blk_rings;
K
Konrad Rzeszutek Wilk 已提交
702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748
	int more_to_do = 0;
	int notify;

	resp.id        = id;
	resp.operation = op;
	resp.status    = st;

	spin_lock_irqsave(&blkif->blk_ring_lock, flags);
	/* Place on the response ring for the relevant domain. */
	switch (blkif->blk_protocol) {
	case BLKIF_PROTOCOL_NATIVE:
		memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
		       &resp, sizeof(resp));
		break;
	case BLKIF_PROTOCOL_X86_32:
		memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
		       &resp, sizeof(resp));
		break;
	case BLKIF_PROTOCOL_X86_64:
		memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
		       &resp, sizeof(resp));
		break;
	default:
		BUG();
	}
	blk_rings->common.rsp_prod_pvt++;
	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
	if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) {
		/*
		 * Tail check for pending requests. Allows frontend to avoid
		 * notifications if requests are already in flight (lower
		 * overheads and promotes batching).
		 */
		RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);

	} else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) {
		more_to_do = 1;
	}

	spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);

	if (more_to_do)
		blkif_notify_work(blkif);
	if (notify)
		notify_remote_via_irq(blkif->irq);
}

749
static int __init xen_blkif_init(void)
K
Konrad Rzeszutek Wilk 已提交
750 751
{
	int i, mmap_pages;
752
	int rc = 0;
K
Konrad Rzeszutek Wilk 已提交
753

J
Jeremy Fitzhardinge 已提交
754
	if (!xen_pv_domain())
K
Konrad Rzeszutek Wilk 已提交
755 756
		return -ENODEV;

757
	blkbk = kzalloc(sizeof(struct xen_blkbk), GFP_KERNEL);
758
	if (!blkbk) {
759
		pr_alert(DRV_PFX "%s: out of memory!\n", __func__);
760 761 762
		return -ENOMEM;
	}

763
	mmap_pages = xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST;
K
Konrad Rzeszutek Wilk 已提交
764

765
	blkbk->pending_reqs          = kmalloc(sizeof(blkbk->pending_reqs[0]) *
766
					xen_blkif_reqs, GFP_KERNEL);
767 768 769 770
	blkbk->pending_grant_handles = kzalloc(sizeof(blkbk->pending_grant_handles[0]) *
					mmap_pages, GFP_KERNEL);
	blkbk->pending_pages         = kzalloc(sizeof(blkbk->pending_pages[0]) *
					mmap_pages, GFP_KERNEL);
K
Konrad Rzeszutek Wilk 已提交
771

772 773
	if (!blkbk->pending_reqs || !blkbk->pending_grant_handles ||
	    !blkbk->pending_pages) {
774
		rc = -ENOMEM;
K
Konrad Rzeszutek Wilk 已提交
775
		goto out_of_memory;
776
	}
K
Konrad Rzeszutek Wilk 已提交
777

778
	for (i = 0; i < mmap_pages; i++) {
779
		blkbk->pending_grant_handles[i] = BLKBACK_INVALID_HANDLE;
780
		blkbk->pending_pages[i] = alloc_page(GFP_KERNEL);
781 782 783 784 785
		if (blkbk->pending_pages[i] == NULL) {
			rc = -ENOMEM;
			goto out_of_memory;
		}
	}
786
	rc = xen_blkif_interface_init();
787 788
	if (rc)
		goto failed_init;
K
Konrad Rzeszutek Wilk 已提交
789

790 791 792 793 794
	memset(blkbk->pending_reqs, 0, sizeof(blkbk->pending_reqs));

	INIT_LIST_HEAD(&blkbk->pending_free);
	spin_lock_init(&blkbk->pending_free_lock);
	init_waitqueue_head(&blkbk->pending_free_wq);
K
Konrad Rzeszutek Wilk 已提交
795

796
	for (i = 0; i < xen_blkif_reqs; i++)
797 798
		list_add_tail(&blkbk->pending_reqs[i].free_list,
			      &blkbk->pending_free);
K
Konrad Rzeszutek Wilk 已提交
799

800
	rc = xen_blkif_xenbus_init();
801 802
	if (rc)
		goto failed_init;
K
Konrad Rzeszutek Wilk 已提交
803 804 805 806

	return 0;

 out_of_memory:
807
	pr_alert(DRV_PFX "%s: out of memory\n", __func__);
808
 failed_init:
809
	kfree(blkbk->pending_reqs);
810
	kfree(blkbk->pending_grant_handles);
811 812 813 814
	for (i = 0; i < mmap_pages; i++) {
		if (blkbk->pending_pages[i])
			__free_page(blkbk->pending_pages[i]);
	}
815 816
	kfree(blkbk->pending_pages);
	kfree(blkbk);
817
	blkbk = NULL;
818
	return rc;
K
Konrad Rzeszutek Wilk 已提交
819 820
}

821
module_init(xen_blkif_init);
K
Konrad Rzeszutek Wilk 已提交
822 823

MODULE_LICENSE("Dual BSD/GPL");