blkback.c 42.4 KB
Newer Older
K
Konrad Rzeszutek Wilk 已提交
1 2 3 4 5 6
/******************************************************************************
 *
 * Back-end of the driver for virtual block devices. This portion of the
 * driver exports a 'unified' block-device interface that can be accessed
 * by any operating system that implements a compatible front end. A
 * reference front-end implementation can be found in:
7
 *  drivers/block/xen-blkfront.c
K
Konrad Rzeszutek Wilk 已提交
8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
 *
 * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
 * Copyright (c) 2005, Christopher Clark
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version 2
 * as published by the Free Software Foundation; or, when distributed
 * separately from the Linux kernel or incorporated into other
 * software packages, subject to the following license:
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this source file (the "Software"), to deal in the Software without
 * restriction, including without limitation the rights to use, copy, modify,
 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
 * and to permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 */

37 38
#define pr_fmt(fmt) "xen-blkback: " fmt

K
Konrad Rzeszutek Wilk 已提交
39 40 41 42
#include <linux/spinlock.h>
#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/delay.h>
J
Jeremy Fitzhardinge 已提交
43
#include <linux/freezer.h>
44
#include <linux/bitmap.h>
45

J
Jeremy Fitzhardinge 已提交
46 47
#include <xen/events.h>
#include <xen/page.h>
48
#include <xen/xen.h>
J
Jeremy Fitzhardinge 已提交
49 50
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
51
#include <xen/balloon.h>
52
#include <xen/grant_table.h>
K
Konrad Rzeszutek Wilk 已提交
53 54
#include "common.h"

55 56 57 58 59 60 61 62 63 64
/*
 * Maximum number of unused free pages to keep in the internal buffer.
 * Setting this to a value too low will reduce memory used in each backend,
 * but can have a performance penalty.
 *
 * A sane value is xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST, but can
 * be set to a lower value that might degrade performance on some intensive
 * IO workloads.
 */

65
static int xen_blkif_max_buffer_pages = 1024;
66 67 68 69
module_param_named(max_buffer_pages, xen_blkif_max_buffer_pages, int, 0644);
MODULE_PARM_DESC(max_buffer_pages,
"Maximum number of free pages to keep in each block backend buffer");

70 71 72 73 74 75 76 77 78 79 80
/*
 * Maximum number of grants to map persistently in blkback. For maximum
 * performance this should be the total numbers of grants that can be used
 * to fill the ring, but since this might become too high, specially with
 * the use of indirect descriptors, we set it to a value that provides good
 * performance without using too much memory.
 *
 * When the list of persistent grants is full we clean it up using a LRU
 * algorithm.
 */

81
static int xen_blkif_max_pgrants = 1056;
82 83 84 85
module_param_named(max_persistent_grants, xen_blkif_max_pgrants, int, 0644);
MODULE_PARM_DESC(max_persistent_grants,
                 "Maximum number of grants to map persistently");

86 87 88 89 90 91 92 93 94 95
/*
 * Maximum number of rings/queues blkback supports, allow as many queues as there
 * are CPUs if user has not specified a value.
 */
unsigned int xenblk_max_queues;
module_param_named(max_queues, xenblk_max_queues, uint, 0644);
MODULE_PARM_DESC(max_queues,
		 "Maximum number of hardware queues per virtual disk." \
		 "By default it is the number of online CPUs.");

B
Bob Liu 已提交
96 97 98 99
/*
 * Maximum order of pages to be used for the shared ring between front and
 * backend, 4KB page granularity is used.
 */
100
unsigned int xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
B
Bob Liu 已提交
101 102
module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, S_IRUGO);
MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
103 104 105 106 107 108 109 110 111 112 113 114 115 116
/*
 * The LRU mechanism to clean the lists of persistent grants needs to
 * be executed periodically. The time interval between consecutive executions
 * of the purge mechanism is set in ms.
 */
#define LRU_INTERVAL 100

/*
 * When the persistent grants list is full we will remove unused grants
 * from the list. The percent number of grants to be removed at each LRU
 * execution.
 */
#define LRU_PERCENT_CLEAN 5

K
Konrad Rzeszutek Wilk 已提交
117
/* Run-time switchable: /sys/module/blkback/parameters/ */
118
static unsigned int log_stats;
K
Konrad Rzeszutek Wilk 已提交
119 120 121 122
module_param(log_stats, int, 0644);

#define BLKBACK_INVALID_HANDLE (~0)

123
/* Number of free pages to remove on each call to gnttab_free_pages */
124 125
#define NUM_BATCH_FREE_PAGES 10

126
static inline int get_free_page(struct xen_blkif_ring *ring, struct page **page)
127 128 129
{
	unsigned long flags;

130 131 132 133
	spin_lock_irqsave(&ring->free_pages_lock, flags);
	if (list_empty(&ring->free_pages)) {
		BUG_ON(ring->free_pages_num != 0);
		spin_unlock_irqrestore(&ring->free_pages_lock, flags);
134
		return gnttab_alloc_pages(1, page);
135
	}
136 137
	BUG_ON(ring->free_pages_num == 0);
	page[0] = list_first_entry(&ring->free_pages, struct page, lru);
138
	list_del(&page[0]->lru);
139 140
	ring->free_pages_num--;
	spin_unlock_irqrestore(&ring->free_pages_lock, flags);
141

142 143 144
	return 0;
}

145
static inline void put_free_pages(struct xen_blkif_ring *ring, struct page **page,
146
                                  int num)
K
Konrad Rzeszutek Wilk 已提交
147
{
148 149 150
	unsigned long flags;
	int i;

151
	spin_lock_irqsave(&ring->free_pages_lock, flags);
152
	for (i = 0; i < num; i++)
153 154 155
		list_add(&page[i]->lru, &ring->free_pages);
	ring->free_pages_num += num;
	spin_unlock_irqrestore(&ring->free_pages_lock, flags);
156 157
}

158
static inline void shrink_free_pagepool(struct xen_blkif_ring *ring, int num)
159 160 161 162 163 164
{
	/* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */
	struct page *page[NUM_BATCH_FREE_PAGES];
	unsigned int num_pages = 0;
	unsigned long flags;

165 166 167 168
	spin_lock_irqsave(&ring->free_pages_lock, flags);
	while (ring->free_pages_num > num) {
		BUG_ON(list_empty(&ring->free_pages));
		page[num_pages] = list_first_entry(&ring->free_pages,
169 170
		                                   struct page, lru);
		list_del(&page[num_pages]->lru);
171
		ring->free_pages_num--;
172
		if (++num_pages == NUM_BATCH_FREE_PAGES) {
173
			spin_unlock_irqrestore(&ring->free_pages_lock, flags);
174
			gnttab_free_pages(num_pages, page);
175
			spin_lock_irqsave(&ring->free_pages_lock, flags);
176 177 178
			num_pages = 0;
		}
	}
179
	spin_unlock_irqrestore(&ring->free_pages_lock, flags);
180
	if (num_pages != 0)
181
		gnttab_free_pages(num_pages, page);
K
Konrad Rzeszutek Wilk 已提交
182 183
}

184 185
#define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))

186 187
static int do_block_io_op(struct xen_blkif_ring *ring);
static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
188 189
				struct blkif_request *req,
				struct pending_req *pending_req);
190
static void make_response(struct xen_blkif_ring *ring, u64 id,
K
Konrad Rzeszutek Wilk 已提交
191 192
			  unsigned short op, int st);

193 194
#define foreach_grant_safe(pos, n, rbtree, node) \
	for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
195
	     (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \
196
	     &(pos)->node != NULL; \
197 198
	     (pos) = container_of(n, typeof(*(pos)), node), \
	     (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
199 200


201 202
/*
 * We don't need locking around the persistent grant helpers
203
 * because blkback uses a single-thread for each backend, so we
204 205 206 207 208 209 210
 * can be sure that this functions will never be called recursively.
 *
 * The only exception to that is put_persistent_grant, that can be called
 * from interrupt context (by xen_blkbk_unmap), so we have to use atomic
 * bit operations to modify the flags of a persistent grant and to count
 * the number of used grants.
 */
211
static int add_persistent_gnt(struct xen_blkif_ring *ring,
212 213
			       struct persistent_gnt *persistent_gnt)
{
214
	struct rb_node **new = NULL, *parent = NULL;
215
	struct persistent_gnt *this;
216
	struct xen_blkif *blkif = ring->blkif;
217

218
	if (ring->persistent_gnt_c >= xen_blkif_max_pgrants) {
219 220 221 222
		if (!blkif->vbd.overflow_max_grants)
			blkif->vbd.overflow_max_grants = 1;
		return -EBUSY;
	}
223
	/* Figure out where to put new node */
224
	new = &ring->persistent_gnts.rb_node;
225 226 227 228 229 230 231 232 233
	while (*new) {
		this = container_of(*new, struct persistent_gnt, node);

		parent = *new;
		if (persistent_gnt->gnt < this->gnt)
			new = &((*new)->rb_left);
		else if (persistent_gnt->gnt > this->gnt)
			new = &((*new)->rb_right);
		else {
234
			pr_alert_ratelimited("trying to add a gref that's already in the tree\n");
235
			return -EINVAL;
236 237 238
		}
	}

239 240
	bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE);
	set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
241 242
	/* Add new node and rebalance tree. */
	rb_link_node(&(persistent_gnt->node), parent, new);
243 244 245
	rb_insert_color(&(persistent_gnt->node), &ring->persistent_gnts);
	ring->persistent_gnt_c++;
	atomic_inc(&ring->persistent_gnt_in_use);
246
	return 0;
247 248
}

249
static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring,
250 251 252
						 grant_ref_t gref)
{
	struct persistent_gnt *data;
253
	struct rb_node *node = NULL;
254

255
	node = ring->persistent_gnts.rb_node;
256 257 258 259 260 261 262
	while (node) {
		data = container_of(node, struct persistent_gnt, node);

		if (gref < data->gnt)
			node = node->rb_left;
		else if (gref > data->gnt)
			node = node->rb_right;
263 264
		else {
			if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) {
265
				pr_alert_ratelimited("requesting a grant already in use\n");
266 267 268
				return NULL;
			}
			set_bit(PERSISTENT_GNT_ACTIVE, data->flags);
269
			atomic_inc(&ring->persistent_gnt_in_use);
270
			return data;
271
		}
272 273 274 275
	}
	return NULL;
}

276
static void put_persistent_gnt(struct xen_blkif_ring *ring,
277 278 279
                               struct persistent_gnt *persistent_gnt)
{
	if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
280
		pr_alert_ratelimited("freeing a grant already unused\n");
281 282
	set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
	clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
283
	atomic_dec(&ring->persistent_gnt_in_use);
284 285
}

286
static void free_persistent_gnts(struct xen_blkif_ring *ring, struct rb_root *root,
287
                                 unsigned int num)
288 289 290 291
{
	struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
	struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
	struct persistent_gnt *persistent_gnt;
292
	struct rb_node *n;
293
	int segs_to_unmap = 0;
294 295 296 297 298
	struct gntab_unmap_queue_data unmap_data;

	unmap_data.pages = pages;
	unmap_data.unmap_ops = unmap;
	unmap_data.kunmap_ops = NULL;
299

300
	foreach_grant_safe(persistent_gnt, n, root, node) {
301 302 303 304 305 306 307 308 309 310 311 312
		BUG_ON(persistent_gnt->handle ==
			BLKBACK_INVALID_HANDLE);
		gnttab_set_unmap_op(&unmap[segs_to_unmap],
			(unsigned long) pfn_to_kaddr(page_to_pfn(
				persistent_gnt->page)),
			GNTMAP_host_map,
			persistent_gnt->handle);

		pages[segs_to_unmap] = persistent_gnt->page;

		if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
			!rb_next(&persistent_gnt->node)) {
313 314

			unmap_data.count = segs_to_unmap;
315
			BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
316

317
			put_free_pages(ring, pages, segs_to_unmap);
318 319
			segs_to_unmap = 0;
		}
320 321 322 323

		rb_erase(&persistent_gnt->node, root);
		kfree(persistent_gnt);
		num--;
324 325 326 327
	}
	BUG_ON(num != 0);
}

328
void xen_blkbk_unmap_purged_grants(struct work_struct *work)
329 330 331 332
{
	struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
	struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
	struct persistent_gnt *persistent_gnt;
333
	int segs_to_unmap = 0;
334
	struct xen_blkif_ring *ring = container_of(work, typeof(*ring), persistent_purge_work);
335 336 337 338 339
	struct gntab_unmap_queue_data unmap_data;

	unmap_data.pages = pages;
	unmap_data.unmap_ops = unmap;
	unmap_data.kunmap_ops = NULL;
340

341 342
	while(!list_empty(&ring->persistent_purge_list)) {
		persistent_gnt = list_first_entry(&ring->persistent_purge_list,
343 344 345 346 347 348 349 350 351 352 353 354
		                                  struct persistent_gnt,
		                                  remove_node);
		list_del(&persistent_gnt->remove_node);

		gnttab_set_unmap_op(&unmap[segs_to_unmap],
			vaddr(persistent_gnt->page),
			GNTMAP_host_map,
			persistent_gnt->handle);

		pages[segs_to_unmap] = persistent_gnt->page;

		if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
355
			unmap_data.count = segs_to_unmap;
356
			BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
357
			put_free_pages(ring, pages, segs_to_unmap);
358 359 360 361 362
			segs_to_unmap = 0;
		}
		kfree(persistent_gnt);
	}
	if (segs_to_unmap > 0) {
363
		unmap_data.count = segs_to_unmap;
364
		BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
365
		put_free_pages(ring, pages, segs_to_unmap);
366 367 368
	}
}

369
static void purge_persistent_gnt(struct xen_blkif_ring *ring)
370 371 372 373
{
	struct persistent_gnt *persistent_gnt;
	struct rb_node *n;
	unsigned int num_clean, total;
374
	bool scan_used = false, clean_used = false;
375 376
	struct rb_root *root;

377 378 379
	if (ring->persistent_gnt_c < xen_blkif_max_pgrants ||
	    (ring->persistent_gnt_c == xen_blkif_max_pgrants &&
	    !ring->blkif->vbd.overflow_max_grants)) {
380
		goto out;
381 382
	}

383
	if (work_busy(&ring->persistent_purge_work)) {
384
		pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n");
385
		goto out;
386 387 388
	}

	num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
389 390
	num_clean = ring->persistent_gnt_c - xen_blkif_max_pgrants + num_clean;
	num_clean = min(ring->persistent_gnt_c, num_clean);
391
	if ((num_clean == 0) ||
392
	    (num_clean > (ring->persistent_gnt_c - atomic_read(&ring->persistent_gnt_in_use))))
393
		goto out;
394 395 396 397 398 399 400 401 402 403 404 405

	/*
	 * At this point, we can assure that there will be no calls
         * to get_persistent_grant (because we are executing this code from
         * xen_blkif_schedule), there can only be calls to put_persistent_gnt,
         * which means that the number of currently used grants will go down,
         * but never up, so we will always be able to remove the requested
         * number of grants.
	 */

	total = num_clean;

406
	pr_debug("Going to purge %u persistent grants\n", num_clean);
407

408 409
	BUG_ON(!list_empty(&ring->persistent_purge_list));
	root = &ring->persistent_gnts;
410 411 412 413 414
purge_list:
	foreach_grant_safe(persistent_gnt, n, root, node) {
		BUG_ON(persistent_gnt->handle ==
			BLKBACK_INVALID_HANDLE);

415 416 417 418 419
		if (clean_used) {
			clear_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
			continue;
		}

420 421 422 423 424 425 426 427
		if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
			continue;
		if (!scan_used &&
		    (test_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags)))
			continue;

		rb_erase(&persistent_gnt->node, root);
		list_add(&persistent_gnt->remove_node,
428
			 &ring->persistent_purge_list);
429 430 431 432 433 434 435 436
		if (--num_clean == 0)
			goto finished;
	}
	/*
	 * If we get here it means we also need to start cleaning
	 * grants that were used since last purge in order to cope
	 * with the requested num
	 */
437
	if (!scan_used && !clean_used) {
438
		pr_debug("Still missing %u purged frames\n", num_clean);
439 440 441 442
		scan_used = true;
		goto purge_list;
	}
finished:
443
	if (!clean_used) {
444
		pr_debug("Finished scanning for grants to clean, removing used flag\n");
445 446
		clean_used = true;
		goto purge_list;
447
	}
448

449 450
	ring->persistent_gnt_c -= (total - num_clean);
	ring->blkif->vbd.overflow_max_grants = 0;
451 452

	/* We can defer this work */
453
	schedule_work(&ring->persistent_purge_work);
454
	pr_debug("Purged %u/%u\n", (total - num_clean), total);
455 456

out:
457 458 459
	return;
}

460 461
/*
 * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
K
Konrad Rzeszutek Wilk 已提交
462
 */
463
static struct pending_req *alloc_req(struct xen_blkif_ring *ring)
K
Konrad Rzeszutek Wilk 已提交
464
{
465
	struct pending_req *req = NULL;
K
Konrad Rzeszutek Wilk 已提交
466 467
	unsigned long flags;

468 469 470
	spin_lock_irqsave(&ring->pending_free_lock, flags);
	if (!list_empty(&ring->pending_free)) {
		req = list_entry(ring->pending_free.next, struct pending_req,
471
				 free_list);
K
Konrad Rzeszutek Wilk 已提交
472 473
		list_del(&req->free_list);
	}
474
	spin_unlock_irqrestore(&ring->pending_free_lock, flags);
K
Konrad Rzeszutek Wilk 已提交
475 476 477
	return req;
}

478 479 480 481
/*
 * Return the 'pending_req' structure back to the freepool. We also
 * wake up the thread if it was waiting for a free page.
 */
482
static void free_req(struct xen_blkif_ring *ring, struct pending_req *req)
K
Konrad Rzeszutek Wilk 已提交
483 484 485 486
{
	unsigned long flags;
	int was_empty;

487 488 489 490
	spin_lock_irqsave(&ring->pending_free_lock, flags);
	was_empty = list_empty(&ring->pending_free);
	list_add(&req->free_list, &ring->pending_free);
	spin_unlock_irqrestore(&ring->pending_free_lock, flags);
K
Konrad Rzeszutek Wilk 已提交
491
	if (was_empty)
492
		wake_up(&ring->pending_free_wq);
K
Konrad Rzeszutek Wilk 已提交
493 494
}

495 496 497
/*
 * Routines for managing virtual block devices (vbds).
 */
498 499
static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
			     int operation)
500
{
501
	struct xen_vbd *vbd = &blkif->vbd;
502 503
	int rc = -EACCES;

M
Mike Christie 已提交
504
	if ((operation != REQ_OP_READ) && vbd->readonly)
505 506
		goto out;

507 508 509 510 511 512 513 514
	if (likely(req->nr_sects)) {
		blkif_sector_t end = req->sector_number + req->nr_sects;

		if (unlikely(end < req->sector_number))
			goto out;
		if (unlikely(end > vbd_sz(vbd)))
			goto out;
	}
515 516 517 518 519 520 521 522 523

	req->dev  = vbd->pdevice;
	req->bdev = vbd->bdev;
	rc = 0;

 out:
	return rc;
}

524
static void xen_vbd_resize(struct xen_blkif *blkif)
525
{
526
	struct xen_vbd *vbd = &blkif->vbd;
527 528
	struct xenbus_transaction xbt;
	int err;
529
	struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be);
530
	unsigned long long new_size = vbd_sz(vbd);
531

532
	pr_info("VBD Resize: Domid: %d, Device: (%d, %d)\n",
533
		blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice));
534
	pr_info("VBD Resize: new size %llu\n", new_size);
535 536 537 538
	vbd->size = new_size;
again:
	err = xenbus_transaction_start(&xbt);
	if (err) {
539
		pr_warn("Error starting transaction\n");
540 541 542
		return;
	}
	err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
543
			    (unsigned long long)vbd_sz(vbd));
544
	if (err) {
545
		pr_warn("Error writing new size\n");
546 547 548 549 550 551 552 553 554
		goto abort;
	}
	/*
	 * Write the current state; we will use this to synchronize
	 * the front-end. If the current state is "connected" the
	 * front-end will get the new size information online.
	 */
	err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state);
	if (err) {
555
		pr_warn("Error writing the state\n");
556 557 558 559 560 561 562
		goto abort;
	}

	err = xenbus_transaction_end(xbt, 0);
	if (err == -EAGAIN)
		goto again;
	if (err)
563
		pr_warn("Error ending transaction\n");
564
	return;
565 566 567 568
abort:
	xenbus_transaction_end(xbt, 1);
}

569
/*
570 571
 * Notification from the guest OS.
 */
572
static void blkif_notify_work(struct xen_blkif_ring *ring)
K
Konrad Rzeszutek Wilk 已提交
573
{
574 575
	ring->waiting_reqs = 1;
	wake_up(&ring->wq);
576
}
K
Konrad Rzeszutek Wilk 已提交
577

578
irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
579 580 581
{
	blkif_notify_work(dev_id);
	return IRQ_HANDLED;
K
Konrad Rzeszutek Wilk 已提交
582 583
}

584
/*
K
Konrad Rzeszutek Wilk 已提交
585 586 587
 * SCHEDULER FUNCTIONS
 */

588
static void print_stats(struct xen_blkif_ring *ring)
K
Konrad Rzeszutek Wilk 已提交
589
{
590
	pr_info("(%s): oo %3llu  |  rd %4llu  |  wr %4llu  |  f %4llu"
591
		 "  |  ds %4llu | pg: %4u/%4d\n",
592 593 594
		 current->comm, ring->st_oo_req,
		 ring->st_rd_req, ring->st_wr_req,
		 ring->st_f_req, ring->st_ds_req,
595
		 ring->persistent_gnt_c,
596
		 xen_blkif_max_pgrants);
597 598 599 600 601
	ring->st_print = jiffies + msecs_to_jiffies(10 * 1000);
	ring->st_rd_req = 0;
	ring->st_wr_req = 0;
	ring->st_oo_req = 0;
	ring->st_ds_req = 0;
K
Konrad Rzeszutek Wilk 已提交
602 603
}

604
int xen_blkif_schedule(void *arg)
K
Konrad Rzeszutek Wilk 已提交
605
{
606 607
	struct xen_blkif_ring *ring = arg;
	struct xen_blkif *blkif = ring->blkif;
608
	struct xen_vbd *vbd = &blkif->vbd;
609
	unsigned long timeout;
610
	int ret;
K
Konrad Rzeszutek Wilk 已提交
611

612
	set_freezable();
K
Konrad Rzeszutek Wilk 已提交
613 614 615
	while (!kthread_should_stop()) {
		if (try_to_freeze())
			continue;
616
		if (unlikely(vbd->size != vbd_sz(vbd)))
617
			xen_vbd_resize(blkif);
K
Konrad Rzeszutek Wilk 已提交
618

619 620 621
		timeout = msecs_to_jiffies(LRU_INTERVAL);

		timeout = wait_event_interruptible_timeout(
622 623
			ring->wq,
			ring->waiting_reqs || kthread_should_stop(),
624 625 626 627
			timeout);
		if (timeout == 0)
			goto purge_gnt_list;
		timeout = wait_event_interruptible_timeout(
628 629
			ring->pending_free_wq,
			!list_empty(&ring->pending_free) ||
630 631 632 633
			kthread_should_stop(),
			timeout);
		if (timeout == 0)
			goto purge_gnt_list;
K
Konrad Rzeszutek Wilk 已提交
634

635
		ring->waiting_reqs = 0;
K
Konrad Rzeszutek Wilk 已提交
636 637
		smp_mb(); /* clear flag *before* checking for work */

638
		ret = do_block_io_op(ring);
639
		if (ret > 0)
640
			ring->waiting_reqs = 1;
641
		if (ret == -EACCES)
642
			wait_event_interruptible(ring->shutdown_wq,
643
						 kthread_should_stop());
K
Konrad Rzeszutek Wilk 已提交
644

645 646
purge_gnt_list:
		if (blkif->vbd.feature_gnt_persistent &&
647 648 649
		    time_after(jiffies, ring->next_lru)) {
			purge_persistent_gnt(ring);
			ring->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL);
650 651
		}

652
		/* Shrink if we have more than xen_blkif_max_buffer_pages */
653
		shrink_free_pagepool(ring, xen_blkif_max_buffer_pages);
654

655
		if (log_stats && time_after(jiffies, ring->st_print))
656
			print_stats(ring);
K
Konrad Rzeszutek Wilk 已提交
657 658
	}

R
Roger Pau Monne 已提交
659
	/* Drain pending purge work */
660
	flush_work(&ring->persistent_purge_work);
661

R
Roger Pau Monne 已提交
662
	if (log_stats)
663
		print_stats(ring);
R
Roger Pau Monne 已提交
664

665
	ring->xenblkd = NULL;
R
Roger Pau Monne 已提交
666 667 668 669 670 671 672

	return 0;
}

/*
 * Remove persistent grants and empty the pool of free pages
 */
673
void xen_blkbk_free_caches(struct xen_blkif_ring *ring)
R
Roger Pau Monne 已提交
674
{
675
	/* Free all persistent grant pages */
676 677 678
	if (!RB_EMPTY_ROOT(&ring->persistent_gnts))
		free_persistent_gnts(ring, &ring->persistent_gnts,
			ring->persistent_gnt_c);
679

680 681
	BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts));
	ring->persistent_gnt_c = 0;
682

683
	/* Since we are shutting down remove all pages from the buffer */
684
	shrink_free_pagepool(ring, 0 /* All */);
K
Konrad Rzeszutek Wilk 已提交
685 686
}

687
static unsigned int xen_blkbk_unmap_prepare(
688
	struct xen_blkif_ring *ring,
689 690 691 692
	struct grant_page **pages,
	unsigned int num,
	struct gnttab_unmap_grant_ref *unmap_ops,
	struct page **unmap_pages)
693 694 695
{
	unsigned int i, invcount = 0;

696
	for (i = 0; i < num; i++) {
697
		if (pages[i]->persistent_gnt != NULL) {
698
			put_persistent_gnt(ring, pages[i]->persistent_gnt);
699
			continue;
700
		}
701
		if (pages[i]->handle == BLKBACK_INVALID_HANDLE)
702
			continue;
703
		unmap_pages[invcount] = pages[i]->page;
704
		gnttab_set_unmap_op(&unmap_ops[invcount], vaddr(pages[i]->page),
705 706
				    GNTMAP_host_map, pages[i]->handle);
		pages[i]->handle = BLKBACK_INVALID_HANDLE;
707
		invcount++;
B
Bart Van Assche 已提交
708
	}
709

B
Bart Van Assche 已提交
710
	return invcount;
711 712 713 714
}

static void xen_blkbk_unmap_and_respond_callback(int result, struct gntab_unmap_queue_data *data)
{
715 716 717
	struct pending_req *pending_req = (struct pending_req *)(data->data);
	struct xen_blkif_ring *ring = pending_req->ring;
	struct xen_blkif *blkif = ring->blkif;
718 719 720 721 722

	/* BUG_ON used to reproduce existing behaviour,
	   but is this the best way to deal with this? */
	BUG_ON(result);

723
	put_free_pages(ring, data->pages, data->count);
724
	make_response(ring, pending_req->id,
725
		      pending_req->operation, pending_req->status);
726
	free_req(ring, pending_req);
727 728 729 730 731 732 733 734 735 736 737 738
	/*
	 * Make sure the request is freed before releasing blkif,
	 * or there could be a race between free_req and the
	 * cleanup done in xen_blkif_free during shutdown.
	 *
	 * NB: The fact that we might try to wake up pending_free_wq
	 * before drain_complete (in case there's a drain going on)
	 * it's not a problem with our current implementation
	 * because we can assure there's no thread waiting on
	 * pending_free_wq if there's a drain going on, but it has
	 * to be taken into account if the current model is changed.
	 */
739
	if (atomic_dec_and_test(&ring->inflight) && atomic_read(&blkif->drain)) {
740 741 742 743 744 745 746 747
		complete(&blkif->drain_complete);
	}
	xen_blkif_put(blkif);
}

static void xen_blkbk_unmap_and_respond(struct pending_req *req)
{
	struct gntab_unmap_queue_data* work = &req->gnttab_unmap_data;
748
	struct xen_blkif_ring *ring = req->ring;
749 750 751
	struct grant_page **pages = req->segments;
	unsigned int invcount;

752
	invcount = xen_blkbk_unmap_prepare(ring, pages, req->nr_segs,
753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772
					   req->unmap, req->unmap_pages);

	work->data = req;
	work->done = xen_blkbk_unmap_and_respond_callback;
	work->unmap_ops = req->unmap;
	work->kunmap_ops = NULL;
	work->pages = req->unmap_pages;
	work->count = invcount;

	gnttab_unmap_refs_async(&req->gnttab_unmap_data);
}


/*
 * Unmap the grant references.
 *
 * This could accumulate ops up to the batch size to reduce the number
 * of hypercalls, but since this is only used in error paths there's
 * no real need.
 */
773
static void xen_blkbk_unmap(struct xen_blkif_ring *ring,
774 775 776 777 778 779 780 781 782 783
                            struct grant_page *pages[],
                            int num)
{
	struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
	struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
	unsigned int invcount = 0;
	int ret;

	while (num) {
		unsigned int batch = min(num, BLKIF_MAX_SEGMENTS_PER_REQUEST);
784 785

		invcount = xen_blkbk_unmap_prepare(ring, pages, batch,
786 787 788
						   unmap, unmap_pages);
		if (invcount) {
			ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
789
			BUG_ON(ret);
790
			put_free_pages(ring, unmap_pages, invcount);
791
		}
792 793
		pages += batch;
		num -= batch;
794 795
	}
}
796

797
static int xen_blkbk_map(struct xen_blkif_ring *ring,
798
			 struct grant_page *pages[],
799
			 int num, bool ro)
800 801
{
	struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
802 803 804
	struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST];
	struct persistent_gnt *persistent_gnt = NULL;
	phys_addr_t addr = 0;
805
	int i, seg_idx, new_map_idx;
806
	int segs_to_map = 0;
807
	int ret = 0;
808
	int last_map = 0, map_until = 0;
809
	int use_persistent_gnts;
810
	struct xen_blkif *blkif = ring->blkif;
811 812 813

	use_persistent_gnts = (blkif->vbd.feature_gnt_persistent);

814 815
	/*
	 * Fill out preq.nr_sects with proper amount of sectors, and setup
816 817 818
	 * assign map[..] with the PFN of the page in our domain with the
	 * corresponding grant reference for each page.
	 */
819 820
again:
	for (i = map_until; i < num; i++) {
821 822
		uint32_t flags;

823
		if (use_persistent_gnts) {
824
			persistent_gnt = get_persistent_gnt(
825
				ring,
826
				pages[i]->gref);
827
		}
828 829 830 831 832 833

		if (persistent_gnt) {
			/*
			 * We are using persistent grants and
			 * the grant is already mapped
			 */
834 835
			pages[i]->page = persistent_gnt->page;
			pages[i]->persistent_gnt = persistent_gnt;
836
		} else {
837
			if (get_free_page(ring, &pages[i]->page))
838
				goto out_of_memory;
839 840 841
			addr = vaddr(pages[i]->page);
			pages_to_gnt[segs_to_map] = pages[i]->page;
			pages[i]->persistent_gnt = NULL;
842
			flags = GNTMAP_host_map;
843
			if (!use_persistent_gnts && ro)
844 845
				flags |= GNTMAP_readonly;
			gnttab_set_map_op(&map[segs_to_map++], addr,
846
					  flags, pages[i]->gref,
847 848
					  blkif->domid);
		}
849 850 851
		map_until = i + 1;
		if (segs_to_map == BLKIF_MAX_SEGMENTS_PER_REQUEST)
			break;
852 853
	}

854 855 856 857
	if (segs_to_map) {
		ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
		BUG_ON(ret);
	}
858

859 860
	/*
	 * Now swizzle the MFN in our domain with the MFN from the other domain
861 862 863
	 * so that when we access vaddr(pending_req,i) it has the contents of
	 * the page from the other domain.
	 */
864
	for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) {
865
		if (!pages[seg_idx]->persistent_gnt) {
866
			/* This is a newly mapped grant */
867 868
			BUG_ON(new_map_idx >= segs_to_map);
			if (unlikely(map[new_map_idx].status != 0)) {
869
				pr_debug("invalid buffer -- could not remap it\n");
870
				put_free_pages(ring, &pages[seg_idx]->page, 1);
871
				pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
872
				ret |= 1;
873
				goto next;
874
			}
875
			pages[seg_idx]->handle = map[new_map_idx].handle;
876
		} else {
877
			continue;
878
		}
879
		if (use_persistent_gnts &&
880
		    ring->persistent_gnt_c < xen_blkif_max_pgrants) {
881 882
			/*
			 * We are using persistent grants, the grant is
883
			 * not mapped but we might have room for it.
884 885 886 887
			 */
			persistent_gnt = kmalloc(sizeof(struct persistent_gnt),
				                 GFP_KERNEL);
			if (!persistent_gnt) {
888
				/*
889 890 891
				 * If we don't have enough memory to
				 * allocate the persistent_gnt struct
				 * map this grant non-persistenly
892
				 */
893
				goto next;
894
			}
895 896
			persistent_gnt->gnt = map[new_map_idx].ref;
			persistent_gnt->handle = map[new_map_idx].handle;
897
			persistent_gnt->page = pages[seg_idx]->page;
898
			if (add_persistent_gnt(ring,
899 900 901
			                       persistent_gnt)) {
				kfree(persistent_gnt);
				persistent_gnt = NULL;
902
				goto next;
903
			}
904
			pages[seg_idx]->persistent_gnt = persistent_gnt;
905
			pr_debug("grant %u added to the tree of persistent grants, using %u/%u\n",
906
				 persistent_gnt->gnt, ring->persistent_gnt_c,
907
				 xen_blkif_max_pgrants);
908 909 910 911
			goto next;
		}
		if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) {
			blkif->vbd.overflow_max_grants = 1;
912
			pr_debug("domain %u, device %#x is using maximum number of persistent grants\n",
913
			         blkif->domid, blkif->vbd.handle);
914
		}
915 916 917 918 919
		/*
		 * We could not map this grant persistently, so use it as
		 * a non-persistent grant.
		 */
next:
920
		new_map_idx++;
921
	}
922 923 924 925 926
	segs_to_map = 0;
	last_map = map_until;
	if (map_until != num)
		goto again;

927
	return ret;
928 929

out_of_memory:
930
	pr_alert("%s: out of memory\n", __func__);
931
	put_free_pages(ring, pages_to_gnt, segs_to_map);
932
	return -ENOMEM;
933 934
}

935
static int xen_blkbk_map_seg(struct pending_req *pending_req)
936
{
937
	int rc;
938

939
	rc = xen_blkbk_map(pending_req->ring, pending_req->segments,
940
			   pending_req->nr_segs,
941 942
	                   (pending_req->operation != BLKIF_OP_READ));

943 944
	return rc;
}
945

946 947 948 949 950
static int xen_blkbk_parse_indirect(struct blkif_request *req,
				    struct pending_req *pending_req,
				    struct seg_buf seg[],
				    struct phys_req *preq)
{
951
	struct grant_page **pages = pending_req->indirect_pages;
952
	struct xen_blkif_ring *ring = pending_req->ring;
953
	int indirect_grefs, rc, n, nseg, i;
954
	struct blkif_request_segment *segments = NULL;
955

956
	nseg = pending_req->nr_segs;
957 958 959
	indirect_grefs = INDIRECT_PAGES(nseg);
	BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);

960 961 962
	for (i = 0; i < indirect_grefs; i++)
		pages[i]->gref = req->u.indirect.indirect_grefs[i];

963
	rc = xen_blkbk_map(ring, pages, indirect_grefs, true);
964 965 966 967
	if (rc)
		goto unmap;

	for (n = 0, i = 0; n < nseg; n++) {
968 969
		uint8_t first_sect, last_sect;

970 971 972 973
		if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
			/* Map indirect segments */
			if (segments)
				kunmap_atomic(segments);
974
			segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
975 976
		}
		i = n % SEGS_PER_INDIRECT_FRAME;
977

978
		pending_req->segments[n]->gref = segments[i].gref;
979 980 981 982

		first_sect = READ_ONCE(segments[i].first_sect);
		last_sect = READ_ONCE(segments[i].last_sect);
		if (last_sect >= (XEN_PAGE_SIZE >> 9) || last_sect < first_sect) {
983 984 985
			rc = -EINVAL;
			goto unmap;
		}
986 987 988

		seg[n].nsec = last_sect - first_sect + 1;
		seg[n].offset = first_sect << 9;
989 990 991 992 993 994
		preq->nr_sects += seg[n].nsec;
	}

unmap:
	if (segments)
		kunmap_atomic(segments);
995
	xen_blkbk_unmap(ring, pages, indirect_grefs);
996
	return rc;
997 998
}

999
static int dispatch_discard_io(struct xen_blkif_ring *ring,
1000
				struct blkif_request *req)
1001 1002 1003
{
	int err = 0;
	int status = BLKIF_RSP_OKAY;
1004
	struct xen_blkif *blkif = ring->blkif;
1005
	struct block_device *bdev = blkif->vbd.bdev;
1006
	unsigned long secure;
1007
	struct phys_req preq;
1008

1009 1010
	xen_blkif_get(blkif);

1011 1012 1013
	preq.sector_number = req->u.discard.sector_number;
	preq.nr_sects      = req->u.discard.nr_sectors;

M
Mike Christie 已提交
1014
	err = xen_vbd_translate(&preq, blkif, REQ_OP_WRITE);
1015
	if (err) {
1016
		pr_warn("access denied: DISCARD [%llu->%llu] on dev=%04x\n",
1017 1018 1019 1020
			preq.sector_number,
			preq.sector_number + preq.nr_sects, blkif->vbd.pdevice);
		goto fail_response;
	}
1021
	ring->st_ds_req++;
1022

1023 1024 1025 1026 1027 1028 1029
	secure = (blkif->vbd.discard_secure &&
		 (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
		 BLKDEV_DISCARD_SECURE : 0;

	err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
				   req->u.discard.nr_sectors,
				   GFP_KERNEL, secure);
1030
fail_response:
1031
	if (err == -EOPNOTSUPP) {
1032
		pr_debug("discard op failed, not supported\n");
1033 1034 1035 1036
		status = BLKIF_RSP_EOPNOTSUPP;
	} else if (err)
		status = BLKIF_RSP_ERROR;

1037
	make_response(ring, req->u.discard.id, req->operation, status);
1038 1039
	xen_blkif_put(blkif);
	return err;
1040 1041
}

1042
static int dispatch_other_io(struct xen_blkif_ring *ring,
1043 1044 1045
			     struct blkif_request *req,
			     struct pending_req *pending_req)
{
1046 1047
	free_req(ring, pending_req);
	make_response(ring, req->u.other.id, req->operation,
1048 1049 1050 1051
		      BLKIF_RSP_EOPNOTSUPP);
	return -EIO;
}

1052
static void xen_blk_drain_io(struct xen_blkif_ring *ring)
1053
{
1054 1055
	struct xen_blkif *blkif = ring->blkif;

1056 1057
	atomic_set(&blkif->drain, 1);
	do {
1058
		if (atomic_read(&ring->inflight) == 0)
1059
			break;
1060 1061 1062 1063 1064 1065 1066 1067 1068
		wait_for_completion_interruptible_timeout(
				&blkif->drain_complete, HZ);

		if (!atomic_read(&blkif->drain))
			break;
	} while (!kthread_should_stop());
	atomic_set(&blkif->drain, 0);
}

1069 1070
static void __end_block_io_op(struct pending_req *pending_req,
		blk_status_t error)
K
Konrad Rzeszutek Wilk 已提交
1071 1072
{
	/* An error fails the entire request. */
1073 1074
	if (pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE &&
	    error == BLK_STS_NOTSUPP) {
1075
		pr_debug("flush diskcache op failed, not supported\n");
1076
		xen_blkbk_flush_diskcache(XBT_NIL, pending_req->ring->blkif->be, 0);
K
Konrad Rzeszutek Wilk 已提交
1077
		pending_req->status = BLKIF_RSP_EOPNOTSUPP;
1078 1079
	} else if (pending_req->operation == BLKIF_OP_WRITE_BARRIER &&
		   error == BLK_STS_NOTSUPP) {
1080
		pr_debug("write barrier op failed, not supported\n");
1081
		xen_blkbk_barrier(XBT_NIL, pending_req->ring->blkif->be, 0);
1082
		pending_req->status = BLKIF_RSP_EOPNOTSUPP;
K
Konrad Rzeszutek Wilk 已提交
1083
	} else if (error) {
1084
		pr_debug("Buffer not up-to-date at end of operation,"
1085
			 " error=%d\n", error);
K
Konrad Rzeszutek Wilk 已提交
1086 1087 1088
		pending_req->status = BLKIF_RSP_ERROR;
	}

1089 1090
	/*
	 * If all of the bio's have completed it is time to unmap
1091
	 * the grant references associated with 'request' and provide
1092 1093
	 * the proper response on the ring.
	 */
1094 1095
	if (atomic_dec_and_test(&pending_req->pendcnt))
		xen_blkbk_unmap_and_respond(pending_req);
K
Konrad Rzeszutek Wilk 已提交
1096 1097
}

1098 1099 1100
/*
 * bio callback.
 */
1101
static void end_block_io_op(struct bio *bio)
K
Konrad Rzeszutek Wilk 已提交
1102
{
1103
	__end_block_io_op(bio->bi_private, bio->bi_status);
K
Konrad Rzeszutek Wilk 已提交
1104 1105 1106 1107 1108
	bio_put(bio);
}



1109 1110 1111 1112
/*
 * Function to copy the from the ring buffer the 'struct blkif_request'
 * (which has the sectors we want, number of them, grant references, etc),
 * and transmute  it to the block API to hand it over to the proper block disk.
K
Konrad Rzeszutek Wilk 已提交
1113
 */
1114
static int
1115
__do_block_io_op(struct xen_blkif_ring *ring)
K
Konrad Rzeszutek Wilk 已提交
1116
{
1117
	union blkif_back_rings *blk_rings = &ring->blk_rings;
J
Jeremy Fitzhardinge 已提交
1118
	struct blkif_request req;
1119
	struct pending_req *pending_req;
K
Konrad Rzeszutek Wilk 已提交
1120 1121 1122 1123 1124 1125 1126
	RING_IDX rc, rp;
	int more_to_do = 0;

	rc = blk_rings->common.req_cons;
	rp = blk_rings->common.sring->req_prod;
	rmb(); /* Ensure we see queued requests up to 'rp'. */

1127 1128
	if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) {
		rc = blk_rings->common.rsp_prod_pvt;
1129
		pr_warn("Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n",
1130
			rp, rc, rp - rc, ring->blkif->vbd.pdevice);
1131 1132
		return -EACCES;
	}
K
Konrad Rzeszutek Wilk 已提交
1133 1134 1135 1136 1137
	while (rc != rp) {

		if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
			break;

1138
		if (kthread_should_stop()) {
K
Konrad Rzeszutek Wilk 已提交
1139 1140 1141 1142
			more_to_do = 1;
			break;
		}

1143
		pending_req = alloc_req(ring);
1144
		if (NULL == pending_req) {
1145
			ring->st_oo_req++;
K
Konrad Rzeszutek Wilk 已提交
1146 1147 1148 1149
			more_to_do = 1;
			break;
		}

1150
		switch (ring->blkif->blk_protocol) {
K
Konrad Rzeszutek Wilk 已提交
1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166
		case BLKIF_PROTOCOL_NATIVE:
			memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
			break;
		case BLKIF_PROTOCOL_X86_32:
			blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
			break;
		case BLKIF_PROTOCOL_X86_64:
			blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
			break;
		default:
			BUG();
		}
		blk_rings->common.req_cons = ++rc; /* before make_response() */

		/* Apply all sanity checks to /private copy/ of request. */
		barrier();
1167 1168 1169 1170 1171 1172

		switch (req.operation) {
		case BLKIF_OP_READ:
		case BLKIF_OP_WRITE:
		case BLKIF_OP_WRITE_BARRIER:
		case BLKIF_OP_FLUSH_DISKCACHE:
1173
		case BLKIF_OP_INDIRECT:
1174
			if (dispatch_rw_block_io(ring, &req, pending_req))
1175 1176 1177
				goto done;
			break;
		case BLKIF_OP_DISCARD:
1178 1179
			free_req(ring, pending_req);
			if (dispatch_discard_io(ring, &req))
1180
				goto done;
K
Konrad Rzeszutek Wilk 已提交
1181
			break;
1182
		default:
1183
			if (dispatch_other_io(ring, &req, pending_req))
1184 1185 1186
				goto done;
			break;
		}
K
Konrad Rzeszutek Wilk 已提交
1187 1188 1189 1190

		/* Yield point for this unbounded loop. */
		cond_resched();
	}
1191
done:
K
Konrad Rzeszutek Wilk 已提交
1192 1193 1194
	return more_to_do;
}

1195
static int
1196
do_block_io_op(struct xen_blkif_ring *ring)
1197
{
1198
	union blkif_back_rings *blk_rings = &ring->blk_rings;
1199 1200 1201
	int more_to_do;

	do {
1202
		more_to_do = __do_block_io_op(ring);
1203 1204 1205 1206 1207 1208 1209 1210
		if (more_to_do)
			break;

		RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
	} while (more_to_do);

	return more_to_do;
}
1211
/*
1212 1213
 * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
 * and call the 'submit_bio' to pass it to the underlying storage.
1214
 */
1215
static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
1216 1217
				struct blkif_request *req,
				struct pending_req *pending_req)
K
Konrad Rzeszutek Wilk 已提交
1218 1219
{
	struct phys_req preq;
1220
	struct seg_buf *seg = pending_req->seg;
K
Konrad Rzeszutek Wilk 已提交
1221 1222
	unsigned int nseg;
	struct bio *bio = NULL;
1223
	struct bio **biolist = pending_req->biolist;
1224
	int i, nbio = 0;
K
Konrad Rzeszutek Wilk 已提交
1225
	int operation;
M
Mike Christie 已提交
1226
	int operation_flags = 0;
1227
	struct blk_plug plug;
1228
	bool drain = false;
1229
	struct grant_page **pages = pending_req->segments;
1230 1231 1232 1233
	unsigned short req_operation;

	req_operation = req->operation == BLKIF_OP_INDIRECT ?
			req->u.indirect.indirect_op : req->operation;
1234

1235 1236 1237
	if ((req->operation == BLKIF_OP_INDIRECT) &&
	    (req_operation != BLKIF_OP_READ) &&
	    (req_operation != BLKIF_OP_WRITE)) {
1238
		pr_debug("Invalid indirect operation (%u)\n", req_operation);
1239 1240
		goto fail_response;
	}
K
Konrad Rzeszutek Wilk 已提交
1241

1242
	switch (req_operation) {
K
Konrad Rzeszutek Wilk 已提交
1243
	case BLKIF_OP_READ:
1244
		ring->st_rd_req++;
M
Mike Christie 已提交
1245
		operation = REQ_OP_READ;
K
Konrad Rzeszutek Wilk 已提交
1246 1247
		break;
	case BLKIF_OP_WRITE:
1248
		ring->st_wr_req++;
M
Mike Christie 已提交
1249
		operation = REQ_OP_WRITE;
1250
		operation_flags = REQ_SYNC | REQ_IDLE;
K
Konrad Rzeszutek Wilk 已提交
1251
		break;
1252 1253
	case BLKIF_OP_WRITE_BARRIER:
		drain = true;
1254
		/* fall through */
1255
	case BLKIF_OP_FLUSH_DISKCACHE:
1256
		ring->st_f_req++;
M
Mike Christie 已提交
1257
		operation = REQ_OP_WRITE;
1258
		operation_flags = REQ_PREFLUSH;
K
Konrad Rzeszutek Wilk 已提交
1259 1260 1261
		break;
	default:
		operation = 0; /* make gcc happy */
1262 1263
		goto fail_response;
		break;
K
Konrad Rzeszutek Wilk 已提交
1264 1265
	}

1266
	/* Check that the number of segments is sane. */
1267 1268
	nseg = req->operation == BLKIF_OP_INDIRECT ?
	       req->u.indirect.nr_segments : req->u.rw.nr_segments;
1269

1270
	if (unlikely(nseg == 0 && operation_flags != REQ_PREFLUSH) ||
1271 1272 1273 1274
	    unlikely((req->operation != BLKIF_OP_INDIRECT) &&
		     (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
	    unlikely((req->operation == BLKIF_OP_INDIRECT) &&
		     (nseg > MAX_INDIRECT_SEGMENTS))) {
1275
		pr_debug("Bad number of segments in request (%d)\n", nseg);
1276
		/* Haven't submitted any bio's yet. */
K
Konrad Rzeszutek Wilk 已提交
1277 1278 1279 1280 1281
		goto fail_response;
	}

	preq.nr_sects      = 0;

1282
	pending_req->ring      = ring;
1283
	pending_req->id        = req->u.rw.id;
1284
	pending_req->operation = req_operation;
K
Konrad Rzeszutek Wilk 已提交
1285
	pending_req->status    = BLKIF_RSP_OKAY;
1286
	pending_req->nr_segs   = nseg;
1287

1288 1289 1290 1291
	if (req->operation != BLKIF_OP_INDIRECT) {
		preq.dev               = req->u.rw.handle;
		preq.sector_number     = req->u.rw.sector_number;
		for (i = 0; i < nseg; i++) {
1292
			pages[i]->gref = req->u.rw.seg[i].gref;
1293 1294 1295
			seg[i].nsec = req->u.rw.seg[i].last_sect -
				req->u.rw.seg[i].first_sect + 1;
			seg[i].offset = (req->u.rw.seg[i].first_sect << 9);
1296
			if ((req->u.rw.seg[i].last_sect >= (XEN_PAGE_SIZE >> 9)) ||
1297 1298 1299 1300 1301 1302 1303 1304 1305
			    (req->u.rw.seg[i].last_sect <
			     req->u.rw.seg[i].first_sect))
				goto fail_response;
			preq.nr_sects += seg[i].nsec;
		}
	} else {
		preq.dev               = req->u.indirect.handle;
		preq.sector_number     = req->u.indirect.sector_number;
		if (xen_blkbk_parse_indirect(req, pending_req, seg, &preq))
K
Konrad Rzeszutek Wilk 已提交
1306 1307 1308
			goto fail_response;
	}

1309
	if (xen_vbd_translate(&preq, ring->blkif, operation) != 0) {
1310
		pr_debug("access denied: %s of [%llu,%llu] on dev=%04x\n",
M
Mike Christie 已提交
1311
			 operation == REQ_OP_READ ? "read" : "write",
1312
			 preq.sector_number,
1313
			 preq.sector_number + preq.nr_sects,
1314
			 ring->blkif->vbd.pdevice);
1315
		goto fail_response;
K
Konrad Rzeszutek Wilk 已提交
1316
	}
1317 1318

	/*
1319
	 * This check _MUST_ be done after xen_vbd_translate as the preq.bdev
1320 1321
	 * is set there.
	 */
1322 1323 1324
	for (i = 0; i < nseg; i++) {
		if (((int)preq.sector_number|(int)seg[i].nsec) &
		    ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
1325
			pr_debug("Misaligned I/O request from domain %d\n",
1326
				 ring->blkif->domid);
1327 1328 1329
			goto fail_response;
		}
	}
1330

1331
	/* Wait on all outstanding I/O's and once that has been completed
1332
	 * issue the flush.
1333 1334
	 */
	if (drain)
1335
		xen_blk_drain_io(pending_req->ring);
1336

1337 1338
	/*
	 * If we have failed at this point, we need to undo the M2P override,
1339 1340
	 * set gnttab_set_unmap_op on all of the grant references and perform
	 * the hypercall to unmap the grants - that is all done in
1341
	 * xen_blkbk_unmap.
1342
	 */
1343
	if (xen_blkbk_map_seg(pending_req))
K
Konrad Rzeszutek Wilk 已提交
1344 1345
		goto fail_flush;

1346 1347 1348 1349
	/*
	 * This corresponding xen_blkif_put is done in __end_block_io_op, or
	 * below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
	 */
1350 1351
	xen_blkif_get(ring->blkif);
	atomic_inc(&ring->inflight);
K
Konrad Rzeszutek Wilk 已提交
1352 1353 1354 1355

	for (i = 0; i < nseg; i++) {
		while ((bio == NULL) ||
		       (bio_add_page(bio,
1356
				     pages[i]->page,
K
Konrad Rzeszutek Wilk 已提交
1357
				     seg[i].nsec << 9,
1358
				     seg[i].offset) == 0)) {
1359

1360 1361
			int nr_iovecs = min_t(int, (nseg-i), BIO_MAX_PAGES);
			bio = bio_alloc(GFP_KERNEL, nr_iovecs);
K
Konrad Rzeszutek Wilk 已提交
1362 1363 1364
			if (unlikely(bio == NULL))
				goto fail_put_bio;

1365
			biolist[nbio++] = bio;
1366
			bio_set_dev(bio, preq.bdev);
K
Konrad Rzeszutek Wilk 已提交
1367 1368
			bio->bi_private = pending_req;
			bio->bi_end_io  = end_block_io_op;
1369
			bio->bi_iter.bi_sector  = preq.sector_number;
M
Mike Christie 已提交
1370
			bio_set_op_attrs(bio, operation, operation_flags);
K
Konrad Rzeszutek Wilk 已提交
1371 1372 1373 1374 1375
		}

		preq.sector_number += seg[i].nsec;
	}

1376
	/* This will be hit if the operation was a flush or discard. */
K
Konrad Rzeszutek Wilk 已提交
1377
	if (!bio) {
1378
		BUG_ON(operation_flags != REQ_PREFLUSH);
1379

1380 1381 1382
		bio = bio_alloc(GFP_KERNEL, 0);
		if (unlikely(bio == NULL))
			goto fail_put_bio;
K
Konrad Rzeszutek Wilk 已提交
1383

1384
		biolist[nbio++] = bio;
1385
		bio_set_dev(bio, preq.bdev);
1386 1387
		bio->bi_private = pending_req;
		bio->bi_end_io  = end_block_io_op;
M
Mike Christie 已提交
1388
		bio_set_op_attrs(bio, operation, operation_flags);
K
Konrad Rzeszutek Wilk 已提交
1389 1390
	}

1391
	atomic_set(&pending_req->pendcnt, nbio);
1392 1393
	blk_start_plug(&plug);

1394
	for (i = 0; i < nbio; i++)
1395
		submit_bio(biolist[i]);
1396

1397
	/* Let the I/Os go.. */
1398
	blk_finish_plug(&plug);
1399

M
Mike Christie 已提交
1400
	if (operation == REQ_OP_READ)
1401
		ring->st_rd_sect += preq.nr_sects;
M
Mike Christie 已提交
1402
	else if (operation == REQ_OP_WRITE)
1403
		ring->st_wr_sect += preq.nr_sects;
K
Konrad Rzeszutek Wilk 已提交
1404

1405
	return 0;
K
Konrad Rzeszutek Wilk 已提交
1406 1407

 fail_flush:
1408
	xen_blkbk_unmap(ring, pending_req->segments,
1409
	                pending_req->nr_segs);
K
Konrad Rzeszutek Wilk 已提交
1410
 fail_response:
1411
	/* Haven't submitted any bio's yet. */
1412 1413
	make_response(ring, req->u.rw.id, req_operation, BLKIF_RSP_ERROR);
	free_req(ring, pending_req);
K
Konrad Rzeszutek Wilk 已提交
1414
	msleep(1); /* back off a bit */
1415
	return -EIO;
K
Konrad Rzeszutek Wilk 已提交
1416 1417

 fail_put_bio:
1418
	for (i = 0; i < nbio; i++)
1419
		bio_put(biolist[i]);
1420
	atomic_set(&pending_req->pendcnt, 1);
1421
	__end_block_io_op(pending_req, BLK_STS_RESOURCE);
K
Konrad Rzeszutek Wilk 已提交
1422
	msleep(1); /* back off a bit */
1423
	return -EIO;
K
Konrad Rzeszutek Wilk 已提交
1424 1425 1426 1427
}



1428 1429
/*
 * Put a response on the ring on how the operation fared.
K
Konrad Rzeszutek Wilk 已提交
1430
 */
1431
static void make_response(struct xen_blkif_ring *ring, u64 id,
K
Konrad Rzeszutek Wilk 已提交
1432 1433
			  unsigned short op, int st)
{
1434
	struct blkif_response *resp;
K
Konrad Rzeszutek Wilk 已提交
1435
	unsigned long     flags;
1436
	union blkif_back_rings *blk_rings;
K
Konrad Rzeszutek Wilk 已提交
1437 1438
	int notify;

1439 1440
	spin_lock_irqsave(&ring->blk_ring_lock, flags);
	blk_rings = &ring->blk_rings;
K
Konrad Rzeszutek Wilk 已提交
1441
	/* Place on the response ring for the relevant domain. */
1442
	switch (ring->blkif->blk_protocol) {
K
Konrad Rzeszutek Wilk 已提交
1443
	case BLKIF_PROTOCOL_NATIVE:
1444 1445
		resp = RING_GET_RESPONSE(&blk_rings->native,
					 blk_rings->native.rsp_prod_pvt);
K
Konrad Rzeszutek Wilk 已提交
1446 1447
		break;
	case BLKIF_PROTOCOL_X86_32:
1448 1449
		resp = RING_GET_RESPONSE(&blk_rings->x86_32,
					 blk_rings->x86_32.rsp_prod_pvt);
K
Konrad Rzeszutek Wilk 已提交
1450 1451
		break;
	case BLKIF_PROTOCOL_X86_64:
1452 1453
		resp = RING_GET_RESPONSE(&blk_rings->x86_64,
					 blk_rings->x86_64.rsp_prod_pvt);
K
Konrad Rzeszutek Wilk 已提交
1454 1455 1456 1457
		break;
	default:
		BUG();
	}
1458 1459 1460 1461 1462

	resp->id        = id;
	resp->operation = op;
	resp->status    = st;

K
Konrad Rzeszutek Wilk 已提交
1463 1464
	blk_rings->common.rsp_prod_pvt++;
	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
1465
	spin_unlock_irqrestore(&ring->blk_ring_lock, flags);
K
Konrad Rzeszutek Wilk 已提交
1466
	if (notify)
1467
		notify_remote_via_irq(ring->irq);
K
Konrad Rzeszutek Wilk 已提交
1468 1469
}

1470
static int __init xen_blkif_init(void)
K
Konrad Rzeszutek Wilk 已提交
1471
{
1472
	int rc = 0;
K
Konrad Rzeszutek Wilk 已提交
1473

1474
	if (!xen_domain())
K
Konrad Rzeszutek Wilk 已提交
1475 1476
		return -ENODEV;

1477
	if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
B
Bob Liu 已提交
1478
		pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
1479 1480
			xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
		xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
B
Bob Liu 已提交
1481 1482
	}

1483 1484 1485
	if (xenblk_max_queues == 0)
		xenblk_max_queues = num_online_cpus();

1486
	rc = xen_blkif_interface_init();
1487 1488
	if (rc)
		goto failed_init;
K
Konrad Rzeszutek Wilk 已提交
1489

1490
	rc = xen_blkif_xenbus_init();
1491 1492
	if (rc)
		goto failed_init;
K
Konrad Rzeszutek Wilk 已提交
1493

1494 1495
 failed_init:
	return rc;
K
Konrad Rzeszutek Wilk 已提交
1496 1497
}

1498
module_init(xen_blkif_init);
K
Konrad Rzeszutek Wilk 已提交
1499 1500

MODULE_LICENSE("Dual BSD/GPL");
1501
MODULE_ALIAS("xen-backend:vbd");