gntdev.c 17.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
/******************************************************************************
 * gntdev.c
 *
 * Device for accessing (in user-space) pages that have been granted by other
 * domains.
 *
 * Copyright (c) 2006-2007, D G Murray.
 *           (c) 2009 Gerd Hoffmann <kraxel@redhat.com>
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */

#undef DEBUG

#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/miscdevice.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/mmu_notifier.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
35
#include <linux/highmem.h>
36 37 38 39

#include <xen/xen.h>
#include <xen/grant_table.h>
#include <xen/gntdev.h>
40
#include <xen/events.h>
41 42 43 44 45 46 47 48 49
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/page.h>

MODULE_LICENSE("GPL");
MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, "
	      "Gerd Hoffmann <kraxel@redhat.com>");
MODULE_DESCRIPTION("User-space granted page access driver");

50
static int limit = 1024*1024;
51
module_param(limit, int, 0644);
52 53 54 55
MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped by "
		"the gntdev device");

static atomic_t pages_mapped = ATOMIC_INIT(0);
56

57 58
static int use_ptemod;

59 60 61 62 63 64 65 66
struct gntdev_priv {
	struct list_head maps;
	/* lock protects maps from concurrent changes */
	spinlock_t lock;
	struct mm_struct *mm;
	struct mmu_notifier mn;
};

67 68 69 70 71 72 73
struct unmap_notify {
	int flags;
	/* Address relative to the start of the grant_map */
	int addr;
	int event;
};

74 75 76 77 78 79 80
struct grant_map {
	struct list_head next;
	struct vm_area_struct *vma;
	int index;
	int count;
	int flags;
	int is_mapped;
81
	atomic_t users;
82
	struct unmap_notify notify;
83 84 85
	struct ioctl_gntdev_grant_ref *grants;
	struct gnttab_map_grant_ref   *map_ops;
	struct gnttab_unmap_grant_ref *unmap_ops;
86
	struct page **pages;
87 88
};

89 90
static int unmap_grant_pages(struct grant_map *map, int offset, int pages);

91 92 93 94 95 96 97 98
/* ------------------------------------------------------------------ */

static void gntdev_print_maps(struct gntdev_priv *priv,
			      char *text, int text_index)
{
#ifdef DEBUG
	struct grant_map *map;

99
	pr_debug("%s: maps list (priv %p)\n", __func__, priv);
100 101 102 103 104 105 106 107 108 109
	list_for_each_entry(map, &priv->maps, next)
		pr_debug("  index %2d, count %2d %s\n",
		       map->index, map->count,
		       map->index == text_index && text ? text : "");
#endif
}

static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
{
	struct grant_map *add;
110
	int i;
111 112 113 114 115 116 117 118

	add = kzalloc(sizeof(struct grant_map), GFP_KERNEL);
	if (NULL == add)
		return NULL;

	add->grants    = kzalloc(sizeof(add->grants[0])    * count, GFP_KERNEL);
	add->map_ops   = kzalloc(sizeof(add->map_ops[0])   * count, GFP_KERNEL);
	add->unmap_ops = kzalloc(sizeof(add->unmap_ops[0]) * count, GFP_KERNEL);
119 120 121 122 123
	add->pages     = kzalloc(sizeof(add->pages[0])     * count, GFP_KERNEL);
	if (NULL == add->grants    ||
	    NULL == add->map_ops   ||
	    NULL == add->unmap_ops ||
	    NULL == add->pages)
124 125
		goto err;

126 127 128 129 130 131
	for (i = 0; i < count; i++) {
		add->pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
		if (add->pages[i] == NULL)
			goto err;
	}

132 133
	add->index = 0;
	add->count = count;
134
	atomic_set(&add->users, 1);
135 136 137 138

	return add;

err:
139 140 141 142 143 144
	if (add->pages)
		for (i = 0; i < count; i++) {
			if (add->pages[i])
				__free_page(add->pages[i]);
		}
	kfree(add->pages);
145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
	kfree(add->grants);
	kfree(add->map_ops);
	kfree(add->unmap_ops);
	kfree(add);
	return NULL;
}

static void gntdev_add_map(struct gntdev_priv *priv, struct grant_map *add)
{
	struct grant_map *map;

	list_for_each_entry(map, &priv->maps, next) {
		if (add->index + add->count < map->index) {
			list_add_tail(&add->next, &map->next);
			goto done;
		}
		add->index = map->index + map->count;
	}
	list_add_tail(&add->next, &priv->maps);

done:
	gntdev_print_maps(priv, "[new]", add->index);
}

static struct grant_map *gntdev_find_map_index(struct gntdev_priv *priv,
		int index, int count)
{
	struct grant_map *map;

	list_for_each_entry(map, &priv->maps, next) {
		if (map->index != index)
			continue;
177
		if (count && map->count != count)
178 179 180 181 182 183
			continue;
		return map;
	}
	return NULL;
}

184
static void gntdev_put_map(struct grant_map *map)
185
{
186 187
	int i;

188 189
	if (!map)
		return;
190

191 192 193 194 195
	if (!atomic_dec_and_test(&map->users))
		return;

	atomic_sub(map->count, &pages_mapped);

196 197 198 199
	if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
		notify_remote_via_evtchn(map->notify.event);
	}

200 201 202 203
	if (map->pages) {
		if (!use_ptemod)
			unmap_grant_pages(map, 0, map->count);

204
		for (i = 0; i < map->count; i++) {
205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221
			uint32_t check, *tmp;
			if (!map->pages[i])
				continue;
			/* XXX When unmapping in an HVM domain, Xen will
			 * sometimes end up mapping the GFN to an invalid MFN.
			 * In this case, writes will be discarded and reads will
			 * return all 0xFF bytes.  Leak these unusable GFNs
			 * until Xen supports fixing their p2m mapping.
			 *
			 * Confirmed present in Xen 4.1-RC3 with HVM source
			 */
			tmp = kmap(map->pages[i]);
			*tmp = 0xdeaddead;
			mb();
			check = *tmp;
			kunmap(map->pages[i]);
			if (check == 0xdeaddead)
222
				__free_page(map->pages[i]);
223 224 225
			else
				pr_debug("Discard page %d=%ld\n", i,
					page_to_pfn(map->pages[i]));
226
		}
227
	}
228
	kfree(map->pages);
229 230 231 232 233 234 235 236 237 238 239 240 241
	kfree(map->grants);
	kfree(map->map_ops);
	kfree(map->unmap_ops);
	kfree(map);
}

/* ------------------------------------------------------------------ */

static int find_grant_ptes(pte_t *pte, pgtable_t token,
		unsigned long addr, void *data)
{
	struct grant_map *map = data;
	unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT;
242
	int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte;
243 244 245
	u64 pte_maddr;

	BUG_ON(pgnr >= map->count);
246 247
	pte_maddr = arbitrary_virt_to_machine(pte).maddr;

248
	gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags,
249 250
			  map->grants[pgnr].ref,
			  map->grants[pgnr].domid);
251
	gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr, flags,
252 253 254 255 256 257 258
			    0 /* handle */);
	return 0;
}

static int map_grant_pages(struct grant_map *map)
{
	int i, err = 0;
259 260 261 262 263 264 265 266 267 268 269 270 271
	phys_addr_t addr;

	if (!use_ptemod) {
		for (i = 0; i < map->count; i++) {
			addr = (phys_addr_t)
				pfn_to_kaddr(page_to_pfn(map->pages[i]));
			gnttab_set_map_op(&map->map_ops[i], addr, map->flags,
				map->grants[i].ref,
				map->grants[i].domid);
			gnttab_set_unmap_op(&map->unmap_ops[i], addr,
				map->flags, 0 /* handle */);
		}
	}
272 273

	pr_debug("map %d+%d\n", map->index, map->count);
274
	err = gnttab_map_refs(map->map_ops, map->pages, map->count);
275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
	if (err)
		return err;

	for (i = 0; i < map->count; i++) {
		if (map->map_ops[i].status)
			err = -EINVAL;
		map->unmap_ops[i].handle = map->map_ops[i].handle;
	}
	return err;
}

static int unmap_grant_pages(struct grant_map *map, int offset, int pages)
{
	int i, err = 0;

290 291
	if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
		int pgno = (map->notify.addr >> PAGE_SHIFT);
292 293 294 295 296 297
		if (pgno >= offset && pgno < offset + pages && use_ptemod) {
			void __user *tmp;
			tmp = map->vma->vm_start + map->notify.addr;
			copy_to_user(tmp, &err, 1);
			map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
		} else if (pgno >= offset && pgno < offset + pages) {
298 299 300 301 302 303 304
			uint8_t *tmp = kmap(map->pages[pgno]);
			tmp[map->notify.addr & (PAGE_SIZE-1)] = 0;
			kunmap(map->pages[pgno]);
			map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
		}
	}

305
	pr_debug("map %d+%d [%d+%d]\n", map->index, map->count, offset, pages);
306
	err = gnttab_unmap_refs(map->unmap_ops + offset, map->pages + offset, pages);
307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
	if (err)
		return err;

	for (i = 0; i < pages; i++) {
		if (map->unmap_ops[offset+i].status)
			err = -EINVAL;
		map->unmap_ops[offset+i].handle = 0;
	}
	return err;
}

/* ------------------------------------------------------------------ */

static void gntdev_vma_close(struct vm_area_struct *vma)
{
	struct grant_map *map = vma->vm_private_data;

	pr_debug("close %p\n", vma);
	map->is_mapped = 0;
	map->vma = NULL;
	vma->vm_private_data = NULL;
328
	gntdev_put_map(map);
329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416
}

static struct vm_operations_struct gntdev_vmops = {
	.close = gntdev_vma_close,
};

/* ------------------------------------------------------------------ */

static void mn_invl_range_start(struct mmu_notifier *mn,
				struct mm_struct *mm,
				unsigned long start, unsigned long end)
{
	struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
	struct grant_map *map;
	unsigned long mstart, mend;
	int err;

	spin_lock(&priv->lock);
	list_for_each_entry(map, &priv->maps, next) {
		if (!map->vma)
			continue;
		if (!map->is_mapped)
			continue;
		if (map->vma->vm_start >= end)
			continue;
		if (map->vma->vm_end <= start)
			continue;
		mstart = max(start, map->vma->vm_start);
		mend   = min(end,   map->vma->vm_end);
		pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
				map->index, map->count,
				map->vma->vm_start, map->vma->vm_end,
				start, end, mstart, mend);
		err = unmap_grant_pages(map,
					(mstart - map->vma->vm_start) >> PAGE_SHIFT,
					(mend - mstart) >> PAGE_SHIFT);
		WARN_ON(err);
	}
	spin_unlock(&priv->lock);
}

static void mn_invl_page(struct mmu_notifier *mn,
			 struct mm_struct *mm,
			 unsigned long address)
{
	mn_invl_range_start(mn, mm, address, address + PAGE_SIZE);
}

static void mn_release(struct mmu_notifier *mn,
		       struct mm_struct *mm)
{
	struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
	struct grant_map *map;
	int err;

	spin_lock(&priv->lock);
	list_for_each_entry(map, &priv->maps, next) {
		if (!map->vma)
			continue;
		pr_debug("map %d+%d (%lx %lx)\n",
				map->index, map->count,
				map->vma->vm_start, map->vma->vm_end);
		err = unmap_grant_pages(map, /* offset */ 0, map->count);
		WARN_ON(err);
	}
	spin_unlock(&priv->lock);
}

struct mmu_notifier_ops gntdev_mmu_ops = {
	.release                = mn_release,
	.invalidate_page        = mn_invl_page,
	.invalidate_range_start = mn_invl_range_start,
};

/* ------------------------------------------------------------------ */

static int gntdev_open(struct inode *inode, struct file *flip)
{
	struct gntdev_priv *priv;
	int ret = 0;

	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
	if (!priv)
		return -ENOMEM;

	INIT_LIST_HEAD(&priv->maps);
	spin_lock_init(&priv->lock);

417 418 419 420 421 422 423 424 425
	if (use_ptemod) {
		priv->mm = get_task_mm(current);
		if (!priv->mm) {
			kfree(priv);
			return -ENOMEM;
		}
		priv->mn.ops = &gntdev_mmu_ops;
		ret = mmu_notifier_register(&priv->mn, priv->mm);
		mmput(priv->mm);
426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448
	}

	if (ret) {
		kfree(priv);
		return ret;
	}

	flip->private_data = priv;
	pr_debug("priv %p\n", priv);

	return 0;
}

static int gntdev_release(struct inode *inode, struct file *flip)
{
	struct gntdev_priv *priv = flip->private_data;
	struct grant_map *map;

	pr_debug("priv %p\n", priv);

	spin_lock(&priv->lock);
	while (!list_empty(&priv->maps)) {
		map = list_entry(priv->maps.next, struct grant_map, next);
449 450
		list_del(&map->next);
		gntdev_put_map(map);
451 452 453
	}
	spin_unlock(&priv->lock);

454 455
	if (use_ptemod)
		mmu_notifier_unregister(&priv->mn, priv->mm);
456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476
	kfree(priv);
	return 0;
}

static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
				       struct ioctl_gntdev_map_grant_ref __user *u)
{
	struct ioctl_gntdev_map_grant_ref op;
	struct grant_map *map;
	int err;

	if (copy_from_user(&op, u, sizeof(op)) != 0)
		return -EFAULT;
	pr_debug("priv %p, add %d\n", priv, op.count);
	if (unlikely(op.count <= 0))
		return -EINVAL;

	err = -ENOMEM;
	map = gntdev_alloc_map(priv, op.count);
	if (!map)
		return err;
477

478 479 480
	if (unlikely(atomic_add_return(op.count, &pages_mapped) > limit)) {
		pr_debug("can't map: over limit\n");
		gntdev_put_map(map);
481 482 483
		return err;
	}

484 485 486
	if (copy_from_user(map->grants, &u->refs,
			   sizeof(map->grants[0]) * op.count) != 0) {
		gntdev_put_map(map);
487 488 489
		return err;
	}

490 491 492 493 494
	spin_lock(&priv->lock);
	gntdev_add_map(priv, map);
	op.index = map->index << PAGE_SHIFT;
	spin_unlock(&priv->lock);

495 496 497
	if (copy_to_user(u, &op, sizeof(op)) != 0)
		return -EFAULT;

498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513
	return 0;
}

static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
					 struct ioctl_gntdev_unmap_grant_ref __user *u)
{
	struct ioctl_gntdev_unmap_grant_ref op;
	struct grant_map *map;
	int err = -ENOENT;

	if (copy_from_user(&op, u, sizeof(op)) != 0)
		return -EFAULT;
	pr_debug("priv %p, del %d+%d\n", priv, (int)op.index, (int)op.count);

	spin_lock(&priv->lock);
	map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
514 515 516 517 518
	if (map) {
		list_del(&map->next);
		gntdev_put_map(map);
		err = 0;
	}
519 520 521 522 523 524 525 526
	spin_unlock(&priv->lock);
	return err;
}

static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
					      struct ioctl_gntdev_get_offset_for_vaddr __user *u)
{
	struct ioctl_gntdev_get_offset_for_vaddr op;
527
	struct vm_area_struct *vma;
528 529 530 531 532 533
	struct grant_map *map;

	if (copy_from_user(&op, u, sizeof(op)) != 0)
		return -EFAULT;
	pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr);

534 535
	vma = find_vma(current->mm, op.vaddr);
	if (!vma || vma->vm_ops != &gntdev_vmops)
536
		return -EINVAL;
537 538 539 540 541

	map = vma->vm_private_data;
	if (!map)
		return -EINVAL;

542 543 544 545 546 547 548 549
	op.offset = map->index << PAGE_SHIFT;
	op.count = map->count;

	if (copy_to_user(u, &op, sizeof(op)) != 0)
		return -EFAULT;
	return 0;
}

550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582
static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
{
	struct ioctl_gntdev_unmap_notify op;
	struct grant_map *map;
	int rc;

	if (copy_from_user(&op, u, sizeof(op)))
		return -EFAULT;

	if (op.action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT))
		return -EINVAL;

	spin_lock(&priv->lock);

	list_for_each_entry(map, &priv->maps, next) {
		uint64_t begin = map->index << PAGE_SHIFT;
		uint64_t end = (map->index + map->count) << PAGE_SHIFT;
		if (op.index >= begin && op.index < end)
			goto found;
	}
	rc = -ENOENT;
	goto unlock_out;

 found:
	map->notify.flags = op.action;
	map->notify.addr = op.index - (map->index << PAGE_SHIFT);
	map->notify.event = op.event_channel_port;
	rc = 0;
 unlock_out:
	spin_unlock(&priv->lock);
	return rc;
}

583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598
static long gntdev_ioctl(struct file *flip,
			 unsigned int cmd, unsigned long arg)
{
	struct gntdev_priv *priv = flip->private_data;
	void __user *ptr = (void __user *)arg;

	switch (cmd) {
	case IOCTL_GNTDEV_MAP_GRANT_REF:
		return gntdev_ioctl_map_grant_ref(priv, ptr);

	case IOCTL_GNTDEV_UNMAP_GRANT_REF:
		return gntdev_ioctl_unmap_grant_ref(priv, ptr);

	case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR:
		return gntdev_ioctl_get_offset_for_vaddr(priv, ptr);

599 600 601
	case IOCTL_GNTDEV_SET_UNMAP_NOTIFY:
		return gntdev_ioctl_notify(priv, ptr);

602 603 604 605 606 607 608 609 610 611 612 613 614 615
	default:
		pr_debug("priv %p, unknown cmd %x\n", priv, cmd);
		return -ENOIOCTLCMD;
	}

	return 0;
}

static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
{
	struct gntdev_priv *priv = flip->private_data;
	int index = vma->vm_pgoff;
	int count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
	struct grant_map *map;
616
	int i, err = -EINVAL;
617 618 619 620 621 622 623 624 625 626 627

	if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED))
		return -EINVAL;

	pr_debug("map %d+%d at %lx (pgoff %lx)\n",
			index, count, vma->vm_start, vma->vm_pgoff);

	spin_lock(&priv->lock);
	map = gntdev_find_map_index(priv, index, count);
	if (!map)
		goto unlock_out;
628
	if (use_ptemod && map->vma)
629
		goto unlock_out;
630
	if (use_ptemod && priv->mm != vma->vm_mm) {
631 632 633 634
		printk(KERN_WARNING "Huh? Other mm?\n");
		goto unlock_out;
	}

635 636
	atomic_inc(&map->users);

637 638
	vma->vm_ops = &gntdev_vmops;

639
	vma->vm_flags |= VM_RESERVED|VM_DONTCOPY|VM_DONTEXPAND|VM_PFNMAP;
640 641 642

	vma->vm_private_data = map;

643 644 645 646
	if (use_ptemod)
		map->vma = vma;

	map->flags = GNTMAP_host_map;
647 648 649
	if (!(vma->vm_flags & VM_WRITE))
		map->flags |= GNTMAP_readonly;

650 651
	spin_unlock(&priv->lock);

652 653 654 655 656 657
	if (use_ptemod) {
		err = apply_to_page_range(vma->vm_mm, vma->vm_start,
					  vma->vm_end - vma->vm_start,
					  find_grant_ptes, map);
		if (err) {
			printk(KERN_WARNING "find_grant_ptes() failure.\n");
658
			goto out_put_map;
659
		}
660 661 662
	}

	err = map_grant_pages(map);
663 664
	if (err)
		goto out_put_map;
665

666 667
	map->is_mapped = 1;

668 669 670 671 672
	if (!use_ptemod) {
		for (i = 0; i < count; i++) {
			err = vm_insert_page(vma, vma->vm_start + i*PAGE_SIZE,
				map->pages[i]);
			if (err)
673
				goto out_put_map;
674 675 676
		}
	}

677 678
	return 0;

679 680 681
unlock_out:
	spin_unlock(&priv->lock);
	return err;
682 683 684 685

out_put_map:
	gntdev_put_map(map);
	return err;
686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710
}

static const struct file_operations gntdev_fops = {
	.owner = THIS_MODULE,
	.open = gntdev_open,
	.release = gntdev_release,
	.mmap = gntdev_mmap,
	.unlocked_ioctl = gntdev_ioctl
};

static struct miscdevice gntdev_miscdev = {
	.minor        = MISC_DYNAMIC_MINOR,
	.name         = "xen/gntdev",
	.fops         = &gntdev_fops,
};

/* ------------------------------------------------------------------ */

static int __init gntdev_init(void)
{
	int err;

	if (!xen_domain())
		return -ENODEV;

711 712
	use_ptemod = xen_pv_domain();

713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729
	err = misc_register(&gntdev_miscdev);
	if (err != 0) {
		printk(KERN_ERR "Could not register gntdev device\n");
		return err;
	}
	return 0;
}

static void __exit gntdev_exit(void)
{
	misc_deregister(&gntdev_miscdev);
}

module_init(gntdev_init);
module_exit(gntdev_exit);

/* ------------------------------------------------------------------ */