xenbus_client.c 23.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
/******************************************************************************
 * Client-facing interface for the Xenbus driver.  In other words, the
 * interface between the Xenbus and the device-specific code, be it the
 * frontend or the backend of that driver.
 *
 * Copyright (C) 2005 XenSource Ltd
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License version 2
 * as published by the Free Software Foundation; or, when distributed
 * separately from the Linux kernel or incorporated into other
 * software packages, subject to the following license:
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this source file (the "Software"), to deal in the Software without
 * restriction, including without limitation the rights to use, copy, modify,
 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
 * and to permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 */

33
#include <linux/mm.h>
34
#include <linux/slab.h>
35
#include <linux/types.h>
D
Daniel De Graaf 已提交
36
#include <linux/spinlock.h>
37
#include <linux/vmalloc.h>
38
#include <linux/export.h>
39
#include <asm/xen/hypervisor.h>
40
#include <xen/page.h>
41 42
#include <xen/interface/xen.h>
#include <xen/interface/event_channel.h>
D
Daniel De Graaf 已提交
43
#include <xen/balloon.h>
44 45 46
#include <xen/events.h>
#include <xen/grant_table.h>
#include <xen/xenbus.h>
D
Daniel De Graaf 已提交
47
#include <xen/xen.h>
48
#include <xen/features.h>
D
Daniel De Graaf 已提交
49 50 51 52 53 54

#include "xenbus_probe.h"

struct xenbus_map_node {
	struct list_head next;
	union {
55 56 57 58 59 60 61
		struct {
			struct vm_struct *area;
		} pv;
		struct {
			struct page *pages[XENBUS_MAX_RING_PAGES];
			void *addr;
		} hvm;
D
Daniel De Graaf 已提交
62
	};
63 64
	grant_handle_t handles[XENBUS_MAX_RING_PAGES];
	unsigned int   nr_handles;
D
Daniel De Graaf 已提交
65 66 67 68 69 70
};

static DEFINE_SPINLOCK(xenbus_valloc_lock);
static LIST_HEAD(xenbus_valloc_pages);

struct xenbus_ring_ops {
71 72 73
	int (*map)(struct xenbus_device *dev,
		   grant_ref_t *gnt_refs, unsigned int nr_grefs,
		   void **vaddr);
D
Daniel De Graaf 已提交
74 75 76 77
	int (*unmap)(struct xenbus_device *dev, void *vaddr);
};

static const struct xenbus_ring_ops *ring_ops __read_mostly;
78 79 80 81 82 83 84 85 86 87 88

const char *xenbus_strstate(enum xenbus_state state)
{
	static const char *const name[] = {
		[ XenbusStateUnknown      ] = "Unknown",
		[ XenbusStateInitialising ] = "Initialising",
		[ XenbusStateInitWait     ] = "InitWait",
		[ XenbusStateInitialised  ] = "Initialised",
		[ XenbusStateConnected    ] = "Connected",
		[ XenbusStateClosing      ] = "Closing",
		[ XenbusStateClosed	  ] = "Closed",
89 90
		[XenbusStateReconfiguring] = "Reconfiguring",
		[XenbusStateReconfigured] = "Reconfigured",
91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158
	};
	return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID";
}
EXPORT_SYMBOL_GPL(xenbus_strstate);

/**
 * xenbus_watch_path - register a watch
 * @dev: xenbus device
 * @path: path to watch
 * @watch: watch to register
 * @callback: callback to register
 *
 * Register a @watch on the given path, using the given xenbus_watch structure
 * for storage, and the given @callback function as the callback.  Return 0 on
 * success, or -errno on error.  On success, the given @path will be saved as
 * @watch->node, and remains the caller's to free.  On error, @watch->node will
 * be NULL, the device will switch to %XenbusStateClosing, and the error will
 * be saved in the store.
 */
int xenbus_watch_path(struct xenbus_device *dev, const char *path,
		      struct xenbus_watch *watch,
		      void (*callback)(struct xenbus_watch *,
				       const char **, unsigned int))
{
	int err;

	watch->node = path;
	watch->callback = callback;

	err = register_xenbus_watch(watch);

	if (err) {
		watch->node = NULL;
		watch->callback = NULL;
		xenbus_dev_fatal(dev, err, "adding watch on %s", path);
	}

	return err;
}
EXPORT_SYMBOL_GPL(xenbus_watch_path);


/**
 * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path
 * @dev: xenbus device
 * @watch: watch to register
 * @callback: callback to register
 * @pathfmt: format of path to watch
 *
 * Register a watch on the given @path, using the given xenbus_watch
 * structure for storage, and the given @callback function as the callback.
 * Return 0 on success, or -errno on error.  On success, the watched path
 * (@path/@path2) will be saved as @watch->node, and becomes the caller's to
 * kfree().  On error, watch->node will be NULL, so the caller has nothing to
 * free, the device will switch to %XenbusStateClosing, and the error will be
 * saved in the store.
 */
int xenbus_watch_pathfmt(struct xenbus_device *dev,
			 struct xenbus_watch *watch,
			 void (*callback)(struct xenbus_watch *,
					const char **, unsigned int),
			 const char *pathfmt, ...)
{
	int err;
	va_list ap;
	char *path;

	va_start(ap, pathfmt);
159
	path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap);
160 161 162 163 164 165 166 167 168 169 170 171 172 173
	va_end(ap);

	if (!path) {
		xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch");
		return -ENOMEM;
	}
	err = xenbus_watch_path(dev, path, watch, callback);

	if (err)
		kfree(path);
	return err;
}
EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt);

174 175
static void xenbus_switch_fatal(struct xenbus_device *, int, int,
				const char *, ...);
176

177 178 179
static int
__xenbus_switch_state(struct xenbus_device *dev,
		      enum xenbus_state state, int depth)
180 181 182 183 184 185 186 187
{
	/* We check whether the state is currently set to the given value, and
	   if not, then the state is set.  We don't want to unconditionally
	   write the given state, because we don't want to fire watches
	   unnecessarily.  Furthermore, if the node has gone, we don't write
	   to it, as the device will be tearing down, and we don't want to
	   resurrect that directory.

188 189 190 191
	   Note that, because of this cached value of our state, this
	   function will not take a caller's Xenstore transaction
	   (something it was trying to in the past) because dev->state
	   would not get reset if the transaction was aborted.
192 193
	 */

194
	struct xenbus_transaction xbt;
195
	int current_state;
196
	int err, abort;
197 198 199 200

	if (state == dev->state)
		return 0;

201 202 203 204 205 206
again:
	abort = 1;

	err = xenbus_transaction_start(&xbt);
	if (err) {
		xenbus_switch_fatal(dev, depth, err, "starting transaction");
207
		return 0;
208
	}
209

210 211 212 213 214
	err = xenbus_scanf(xbt, dev->nodename, "state", "%d", &current_state);
	if (err != 1)
		goto abort;

	err = xenbus_printf(xbt, dev->nodename, "state", "%d", state);
215
	if (err) {
216 217
		xenbus_switch_fatal(dev, depth, err, "writing new state");
		goto abort;
218 219
	}

220 221 222 223 224 225 226 227 228
	abort = 0;
abort:
	err = xenbus_transaction_end(xbt, abort);
	if (err) {
		if (err == -EAGAIN && !abort)
			goto again;
		xenbus_switch_fatal(dev, depth, err, "ending transaction");
	} else
		dev->state = state;
229 230 231

	return 0;
}
232 233 234 235 236 237 238 239 240 241 242 243 244 245 246

/**
 * xenbus_switch_state
 * @dev: xenbus device
 * @state: new state
 *
 * Advertise in the store a change of the given driver to the given new_state.
 * Return 0 on success, or -errno on error.  On error, the device will switch
 * to XenbusStateClosing, and the error will be saved in the store.
 */
int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state)
{
	return __xenbus_switch_state(dev, state, 0);
}

247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279
EXPORT_SYMBOL_GPL(xenbus_switch_state);

int xenbus_frontend_closed(struct xenbus_device *dev)
{
	xenbus_switch_state(dev, XenbusStateClosed);
	complete(&dev->down);
	return 0;
}
EXPORT_SYMBOL_GPL(xenbus_frontend_closed);

/**
 * Return the path to the error node for the given device, or NULL on failure.
 * If the value returned is non-NULL, then it is the caller's to kfree.
 */
static char *error_path(struct xenbus_device *dev)
{
	return kasprintf(GFP_KERNEL, "error/%s", dev->nodename);
}


static void xenbus_va_dev_error(struct xenbus_device *dev, int err,
				const char *fmt, va_list ap)
{
	unsigned int len;
	char *printf_buffer = NULL;
	char *path_buffer = NULL;

#define PRINTF_BUFFER_SIZE 4096
	printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL);
	if (printf_buffer == NULL)
		goto fail;

	len = sprintf(printf_buffer, "%i ", -err);
280
	vsnprintf(printf_buffer+len, PRINTF_BUFFER_SIZE-len, fmt, ap);
281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329

	dev_err(&dev->dev, "%s\n", printf_buffer);

	path_buffer = error_path(dev);

	if (path_buffer == NULL) {
		dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
		       dev->nodename, printf_buffer);
		goto fail;
	}

	if (xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer) != 0) {
		dev_err(&dev->dev, "failed to write error node for %s (%s)\n",
		       dev->nodename, printf_buffer);
		goto fail;
	}

fail:
	kfree(printf_buffer);
	kfree(path_buffer);
}


/**
 * xenbus_dev_error
 * @dev: xenbus device
 * @err: error to report
 * @fmt: error message format
 *
 * Report the given negative errno into the store, along with the given
 * formatted message.
 */
void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...)
{
	va_list ap;

	va_start(ap, fmt);
	xenbus_va_dev_error(dev, err, fmt, ap);
	va_end(ap);
}
EXPORT_SYMBOL_GPL(xenbus_dev_error);

/**
 * xenbus_dev_fatal
 * @dev: xenbus device
 * @err: error to report
 * @fmt: error message format
 *
 * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by
330
 * xenbus_switch_state(dev, XenbusStateClosing) to schedule an orderly
331 332 333 334 335 336 337 338 339 340 341 342 343 344 345
 * closedown of this driver and its peer.
 */

void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...)
{
	va_list ap;

	va_start(ap, fmt);
	xenbus_va_dev_error(dev, err, fmt, ap);
	va_end(ap);

	xenbus_switch_state(dev, XenbusStateClosing);
}
EXPORT_SYMBOL_GPL(xenbus_dev_fatal);

346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362
/**
 * Equivalent to xenbus_dev_fatal(dev, err, fmt, args), but helps
 * avoiding recursion within xenbus_switch_state.
 */
static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err,
				const char *fmt, ...)
{
	va_list ap;

	va_start(ap, fmt);
	xenbus_va_dev_error(dev, err, fmt, ap);
	va_end(ap);

	if (!depth)
		__xenbus_switch_state(dev, XenbusStateClosing, 1);
}

363 364 365
/**
 * xenbus_grant_ring
 * @dev: xenbus device
366 367 368 369 370 371 372 373
 * @vaddr: starting virtual address of the ring
 * @nr_pages: number of pages to be granted
 * @grefs: grant reference array to be filled in
 *
 * Grant access to the given @vaddr to the peer of the given device.
 * Then fill in @grefs with grant references.  Return 0 on success, or
 * -errno on error.  On error, the device will switch to
 * XenbusStateClosing, and the error will be saved in the store.
374
 */
375 376
int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
		      unsigned int nr_pages, grant_ref_t *grefs)
377
{
378 379 380 381 382
	int err;
	int i, j;

	for (i = 0; i < nr_pages; i++) {
		err = gnttab_grant_foreign_access(dev->otherend_id,
383
						  virt_to_gfn(vaddr), 0);
384 385 386 387 388 389
		if (err < 0) {
			xenbus_dev_fatal(dev, err,
					 "granting access to ring page");
			goto fail;
		}
		grefs[i] = err;
390 391

		vaddr = vaddr + PAGE_SIZE;
392 393 394 395 396 397 398
	}

	return 0;

fail:
	for (j = 0; j < i; j++)
		gnttab_end_foreign_access_ref(grefs[j], 0);
399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451
	return err;
}
EXPORT_SYMBOL_GPL(xenbus_grant_ring);


/**
 * Allocate an event channel for the given xenbus_device, assigning the newly
 * created local port to *port.  Return 0 on success, or -errno on error.  On
 * error, the device will switch to XenbusStateClosing, and the error will be
 * saved in the store.
 */
int xenbus_alloc_evtchn(struct xenbus_device *dev, int *port)
{
	struct evtchn_alloc_unbound alloc_unbound;
	int err;

	alloc_unbound.dom = DOMID_SELF;
	alloc_unbound.remote_dom = dev->otherend_id;

	err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
					  &alloc_unbound);
	if (err)
		xenbus_dev_fatal(dev, err, "allocating event channel");
	else
		*port = alloc_unbound.port;

	return err;
}
EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn);


/**
 * Free an existing event channel. Returns 0 on success or -errno on error.
 */
int xenbus_free_evtchn(struct xenbus_device *dev, int port)
{
	struct evtchn_close close;
	int err;

	close.port = port;

	err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
	if (err)
		xenbus_dev_error(dev, err, "freeing event channel %d", port);

	return err;
}
EXPORT_SYMBOL_GPL(xenbus_free_evtchn);


/**
 * xenbus_map_ring_valloc
 * @dev: xenbus device
452 453
 * @gnt_refs: grant reference array
 * @nr_grefs: number of grant references
454 455
 * @vaddr: pointer to address to be filled out by mapping
 *
456 457 458 459 460 461
 * Map @nr_grefs pages of memory into this domain from another
 * domain's grant table.  xenbus_map_ring_valloc allocates @nr_grefs
 * pages of virtual address space, maps the pages to that address, and
 * sets *vaddr to that address.  Returns 0 on success, and GNTST_*
 * (see xen/include/interface/grant_table.h) or -ENOMEM / -EINVAL on
 * error. If an error is returned, device will switch to
462 463
 * XenbusStateClosing and the error message will be saved in XenStore.
 */
464 465
int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs,
			   unsigned int nr_grefs, void **vaddr)
D
Daniel De Graaf 已提交
466
{
467
	return ring_ops->map(dev, gnt_refs, nr_grefs, vaddr);
D
Daniel De Graaf 已提交
468 469 470
}
EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);

471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535
/* N.B. sizeof(phys_addr_t) doesn't always equal to sizeof(unsigned
 * long), e.g. 32-on-64.  Caller is responsible for preparing the
 * right array to feed into this function */
static int __xenbus_map_ring(struct xenbus_device *dev,
			     grant_ref_t *gnt_refs,
			     unsigned int nr_grefs,
			     grant_handle_t *handles,
			     phys_addr_t *addrs,
			     unsigned int flags,
			     bool *leaked)
{
	struct gnttab_map_grant_ref map[XENBUS_MAX_RING_PAGES];
	struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_PAGES];
	int i, j;
	int err = GNTST_okay;

	if (nr_grefs > XENBUS_MAX_RING_PAGES)
		return -EINVAL;

	for (i = 0; i < nr_grefs; i++) {
		memset(&map[i], 0, sizeof(map[i]));
		gnttab_set_map_op(&map[i], addrs[i], flags, gnt_refs[i],
				  dev->otherend_id);
		handles[i] = INVALID_GRANT_HANDLE;
	}

	gnttab_batch_map(map, i);

	for (i = 0; i < nr_grefs; i++) {
		if (map[i].status != GNTST_okay) {
			err = map[i].status;
			xenbus_dev_fatal(dev, map[i].status,
					 "mapping in shared page %d from domain %d",
					 gnt_refs[i], dev->otherend_id);
			goto fail;
		} else
			handles[i] = map[i].handle;
	}

	return GNTST_okay;

 fail:
	for (i = j = 0; i < nr_grefs; i++) {
		if (handles[i] != INVALID_GRANT_HANDLE) {
			memset(&unmap[j], 0, sizeof(unmap[j]));
			gnttab_set_unmap_op(&unmap[j], (phys_addr_t)addrs[i],
					    GNTMAP_host_map, handles[i]);
			j++;
		}
	}

	if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, j))
		BUG();

	*leaked = false;
	for (i = 0; i < j; i++) {
		if (unmap[i].status != GNTST_okay) {
			*leaked = true;
			break;
		}
	}

	return err;
}

D
Daniel De Graaf 已提交
536
static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
537 538 539
				     grant_ref_t *gnt_refs,
				     unsigned int nr_grefs,
				     void **vaddr)
540
{
D
Daniel De Graaf 已提交
541
	struct xenbus_map_node *node;
542
	struct vm_struct *area;
543 544 545 546 547
	pte_t *ptes[XENBUS_MAX_RING_PAGES];
	phys_addr_t phys_addrs[XENBUS_MAX_RING_PAGES];
	int err = GNTST_okay;
	int i;
	bool leaked;
548 549 550

	*vaddr = NULL;

551 552 553
	if (nr_grefs > XENBUS_MAX_RING_PAGES)
		return -EINVAL;

D
Daniel De Graaf 已提交
554 555 556 557
	node = kzalloc(sizeof(*node), GFP_KERNEL);
	if (!node)
		return -ENOMEM;

558
	area = alloc_vm_area(PAGE_SIZE * nr_grefs, ptes);
D
Daniel De Graaf 已提交
559 560
	if (!area) {
		kfree(node);
561
		return -ENOMEM;
D
Daniel De Graaf 已提交
562
	}
563

564 565
	for (i = 0; i < nr_grefs; i++)
		phys_addrs[i] = arbitrary_virt_to_machine(ptes[i]).maddr;
566

567 568 569 570 571 572
	err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles,
				phys_addrs,
				GNTMAP_host_map | GNTMAP_contains_pte,
				&leaked);
	if (err)
		goto failed;
573

574 575
	node->nr_handles = nr_grefs;
	node->pv.area = area;
D
Daniel De Graaf 已提交
576 577 578 579

	spin_lock(&xenbus_valloc_lock);
	list_add(&node->next, &xenbus_valloc_pages);
	spin_unlock(&xenbus_valloc_lock);
580 581 582

	*vaddr = area->addr;
	return 0;
583 584 585 586 587 588 589 590 591

failed:
	if (!leaked)
		free_vm_area(area);
	else
		pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs);

	kfree(node);
	return err;
592
}
D
Daniel De Graaf 已提交
593 594

static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
595 596 597
				      grant_ref_t *gnt_ref,
				      unsigned int nr_grefs,
				      void **vaddr)
D
Daniel De Graaf 已提交
598 599
{
	struct xenbus_map_node *node;
600
	int i;
D
Daniel De Graaf 已提交
601 602
	int err;
	void *addr;
603 604 605 606 607 608 609
	bool leaked = false;
	/* Why do we need two arrays? See comment of __xenbus_map_ring */
	phys_addr_t phys_addrs[XENBUS_MAX_RING_PAGES];
	unsigned long addrs[XENBUS_MAX_RING_PAGES];

	if (nr_grefs > XENBUS_MAX_RING_PAGES)
		return -EINVAL;
D
Daniel De Graaf 已提交
610 611 612 613 614 615 616

	*vaddr = NULL;

	node = kzalloc(sizeof(*node), GFP_KERNEL);
	if (!node)
		return -ENOMEM;

617 618
	err = alloc_xenballooned_pages(nr_grefs, node->hvm.pages,
				       false /* lowmem */);
D
Daniel De Graaf 已提交
619 620 621
	if (err)
		goto out_err;

622 623 624 625 626 627 628 629 630
	for (i = 0; i < nr_grefs; i++) {
		unsigned long pfn = page_to_pfn(node->hvm.pages[i]);
		phys_addrs[i] = (unsigned long)pfn_to_kaddr(pfn);
		addrs[i] = (unsigned long)pfn_to_kaddr(pfn);
	}

	err = __xenbus_map_ring(dev, gnt_ref, nr_grefs, node->handles,
				phys_addrs, GNTMAP_host_map, &leaked);
	node->nr_handles = nr_grefs;
D
Daniel De Graaf 已提交
631 632

	if (err)
633 634 635 636 637 638 639 640 641 642
		goto out_free_ballooned_pages;

	addr = vmap(node->hvm.pages, nr_grefs, VM_MAP | VM_IOREMAP,
		    PAGE_KERNEL);
	if (!addr) {
		err = -ENOMEM;
		goto out_xenbus_unmap_ring;
	}

	node->hvm.addr = addr;
D
Daniel De Graaf 已提交
643 644 645 646 647 648 649 650

	spin_lock(&xenbus_valloc_lock);
	list_add(&node->next, &xenbus_valloc_pages);
	spin_unlock(&xenbus_valloc_lock);

	*vaddr = addr;
	return 0;

651 652 653 654 655 656 657 658 659 660
 out_xenbus_unmap_ring:
	if (!leaked)
		xenbus_unmap_ring(dev, node->handles, node->nr_handles,
				  addrs);
	else
		pr_alert("leaking %p size %u page(s)",
			 addr, nr_grefs);
 out_free_ballooned_pages:
	if (!leaked)
		free_xenballooned_pages(nr_grefs, node->hvm.pages);
661
 out_err:
D
Daniel De Graaf 已提交
662 663 664
	kfree(node);
	return err;
}
665 666 667 668 669


/**
 * xenbus_map_ring
 * @dev: xenbus device
670 671 672 673 674
 * @gnt_refs: grant reference array
 * @nr_grefs: number of grant reference
 * @handles: pointer to grant handle to be filled
 * @vaddrs: addresses to be mapped to
 * @leaked: fail to clean up a failed map, caller should not free vaddr
675
 *
676
 * Map pages of memory into this domain from another domain's grant table.
677
 * xenbus_map_ring does not allocate the virtual address space (you must do
678
 * this yourself!). It only maps in the pages to the specified address.
679
 * Returns 0 on success, and GNTST_* (see xen/include/interface/grant_table.h)
680 681 682 683 684
 * or -ENOMEM / -EINVAL on error. If an error is returned, device will switch to
 * XenbusStateClosing and the first error message will be saved in XenStore.
 * Further more if we fail to map the ring, caller should check @leaked.
 * If @leaked is not zero it means xenbus_map_ring fails to clean up, caller
 * should not free the address space of @vaddr.
685
 */
686 687 688
int xenbus_map_ring(struct xenbus_device *dev, grant_ref_t *gnt_refs,
		    unsigned int nr_grefs, grant_handle_t *handles,
		    unsigned long *vaddrs, bool *leaked)
689
{
690 691
	phys_addr_t phys_addrs[XENBUS_MAX_RING_PAGES];
	int i;
692

693 694
	if (nr_grefs > XENBUS_MAX_RING_PAGES)
		return -EINVAL;
695

696 697
	for (i = 0; i < nr_grefs; i++)
		phys_addrs[i] = (unsigned long)vaddrs[i];
698

699 700
	return __xenbus_map_ring(dev, gnt_refs, nr_grefs, handles,
				 phys_addrs, GNTMAP_host_map, leaked);
701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718
}
EXPORT_SYMBOL_GPL(xenbus_map_ring);


/**
 * xenbus_unmap_ring_vfree
 * @dev: xenbus device
 * @vaddr: addr to unmap
 *
 * Based on Rusty Russell's skeleton driver's unmap_page.
 * Unmap a page of memory in this domain that was imported from another domain.
 * Use xenbus_unmap_ring_vfree if you mapped in your memory with
 * xenbus_map_ring_valloc (it will free the virtual address space).
 * Returns 0 on success and returns GNTST_* on error
 * (see xen/include/interface/grant_table.h).
 */
int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
{
D
Daniel De Graaf 已提交
719 720 721 722 723 724 725
	return ring_ops->unmap(dev, vaddr);
}
EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);

static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
{
	struct xenbus_map_node *node;
726
	struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_PAGES];
727
	unsigned int level;
728 729 730
	int i;
	bool leaked = false;
	int err;
731

D
Daniel De Graaf 已提交
732 733
	spin_lock(&xenbus_valloc_lock);
	list_for_each_entry(node, &xenbus_valloc_pages, next) {
734
		if (node->pv.area->addr == vaddr) {
D
Daniel De Graaf 已提交
735 736 737
			list_del(&node->next);
			goto found;
		}
738
	}
D
Daniel De Graaf 已提交
739 740 741
	node = NULL;
 found:
	spin_unlock(&xenbus_valloc_lock);
742

D
Daniel De Graaf 已提交
743
	if (!node) {
744 745 746 747 748
		xenbus_dev_error(dev, -ENOENT,
				 "can't find mapped virtual address %p", vaddr);
		return GNTST_bad_virt_addr;
	}

749 750 751 752 753 754 755 756 757 758
	for (i = 0; i < node->nr_handles; i++) {
		unsigned long addr;

		memset(&unmap[i], 0, sizeof(unmap[i]));
		addr = (unsigned long)vaddr + (PAGE_SIZE * i);
		unmap[i].host_addr = arbitrary_virt_to_machine(
			lookup_address(addr, &level)).maddr;
		unmap[i].dev_bus_addr = 0;
		unmap[i].handle = node->handles[i];
	}
759

760
	if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i))
761 762
		BUG();

763 764 765 766 767 768 769 770 771 772 773 774 775 776 777
	err = GNTST_okay;
	leaked = false;
	for (i = 0; i < node->nr_handles; i++) {
		if (unmap[i].status != GNTST_okay) {
			leaked = true;
			xenbus_dev_error(dev, unmap[i].status,
					 "unmapping page at handle %d error %d",
					 node->handles[i], unmap[i].status);
			err = unmap[i].status;
			break;
		}
	}

	if (!leaked)
		free_vm_area(node->pv.area);
778
	else
779 780
		pr_alert("leaking VM area %p size %u page(s)",
			 node->pv.area, node->nr_handles);
781

D
Daniel De Graaf 已提交
782
	kfree(node);
783
	return err;
784 785
}

D
Daniel De Graaf 已提交
786 787 788 789 790
static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
{
	int rv;
	struct xenbus_map_node *node;
	void *addr;
791 792
	unsigned long addrs[XENBUS_MAX_RING_PAGES];
	int i;
D
Daniel De Graaf 已提交
793 794 795

	spin_lock(&xenbus_valloc_lock);
	list_for_each_entry(node, &xenbus_valloc_pages, next) {
796
		addr = node->hvm.addr;
D
Daniel De Graaf 已提交
797 798 799 800 801
		if (addr == vaddr) {
			list_del(&node->next);
			goto found;
		}
	}
J
Jan Beulich 已提交
802
	node = addr = NULL;
D
Daniel De Graaf 已提交
803 804 805 806 807 808 809 810 811
 found:
	spin_unlock(&xenbus_valloc_lock);

	if (!node) {
		xenbus_dev_error(dev, -ENOENT,
				 "can't find mapped virtual address %p", vaddr);
		return GNTST_bad_virt_addr;
	}

812 813
	for (i = 0; i < node->nr_handles; i++)
		addrs[i] = (unsigned long)pfn_to_kaddr(page_to_pfn(node->hvm.pages[i]));
D
Daniel De Graaf 已提交
814

815 816
	rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles,
			       addrs);
817
	if (!rv) {
818
		vunmap(vaddr);
819 820
		free_xenballooned_pages(node->nr_handles, node->hvm.pages);
	}
D
Daniel De Graaf 已提交
821
	else
822 823
		WARN(1, "Leaking %p, size %u page(s)\n", vaddr,
		     node->nr_handles);
D
Daniel De Graaf 已提交
824 825 826 827

	kfree(node);
	return rv;
}
828 829 830 831

/**
 * xenbus_unmap_ring
 * @dev: xenbus device
832 833 834
 * @handles: grant handle array
 * @nr_handles: number of handles in the array
 * @vaddrs: addresses to unmap
835
 *
836
 * Unmap memory in this domain that was imported from another domain.
837 838 839 840
 * Returns 0 on success and returns GNTST_* on error
 * (see xen/include/interface/grant_table.h).
 */
int xenbus_unmap_ring(struct xenbus_device *dev,
841 842
		      grant_handle_t *handles, unsigned int nr_handles,
		      unsigned long *vaddrs)
843
{
844 845 846
	struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_PAGES];
	int i;
	int err;
847

848 849
	if (nr_handles > XENBUS_MAX_RING_PAGES)
		return -EINVAL;
850

851 852 853 854 855
	for (i = 0; i < nr_handles; i++)
		gnttab_set_unmap_op(&unmap[i], vaddrs[i],
				    GNTMAP_host_map, handles[i]);

	if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i))
856 857
		BUG();

858 859 860 861 862 863 864 865 866 867
	err = GNTST_okay;
	for (i = 0; i < nr_handles; i++) {
		if (unmap[i].status != GNTST_okay) {
			xenbus_dev_error(dev, unmap[i].status,
					 "unmapping page at handle %d error %d",
					 handles[i], unmap[i].status);
			err = unmap[i].status;
			break;
		}
	}
868

869
	return err;
870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890
}
EXPORT_SYMBOL_GPL(xenbus_unmap_ring);


/**
 * xenbus_read_driver_state
 * @path: path for driver
 *
 * Return the state of the driver rooted at the given store path, or
 * XenbusStateUnknown if no state can be read.
 */
enum xenbus_state xenbus_read_driver_state(const char *path)
{
	enum xenbus_state result;
	int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL);
	if (err)
		result = XenbusStateUnknown;

	return result;
}
EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
D
Daniel De Graaf 已提交
891 892 893 894 895 896 897 898 899 900 901 902 903

static const struct xenbus_ring_ops ring_ops_pv = {
	.map = xenbus_map_ring_valloc_pv,
	.unmap = xenbus_unmap_ring_vfree_pv,
};

static const struct xenbus_ring_ops ring_ops_hvm = {
	.map = xenbus_map_ring_valloc_hvm,
	.unmap = xenbus_unmap_ring_vfree_hvm,
};

void __init xenbus_ring_ops_init(void)
{
904
	if (!xen_feature(XENFEAT_auto_translated_physmap))
D
Daniel De Graaf 已提交
905 906 907 908
		ring_ops = &ring_ops_pv;
	else
		ring_ops = &ring_ops_hvm;
}