gntdev.c 18.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
/******************************************************************************
 * gntdev.c
 *
 * Device for accessing (in user-space) pages that have been granted by other
 * domains.
 *
 * Copyright (c) 2006-2007, D G Murray.
 *           (c) 2009 Gerd Hoffmann <kraxel@redhat.com>
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */

#undef DEBUG

#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/miscdevice.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/mmu_notifier.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
35
#include <linux/highmem.h>
36 37 38 39

#include <xen/xen.h>
#include <xen/grant_table.h>
#include <xen/gntdev.h>
40
#include <xen/events.h>
41 42 43 44 45 46 47 48 49
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
#include <asm/xen/page.h>

MODULE_LICENSE("GPL");
MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, "
	      "Gerd Hoffmann <kraxel@redhat.com>");
MODULE_DESCRIPTION("User-space granted page access driver");

50
static int limit = 1024*1024;
51
module_param(limit, int, 0644);
52 53 54 55
MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped by "
		"the gntdev device");

static atomic_t pages_mapped = ATOMIC_INIT(0);
56

57 58
static int use_ptemod;

59 60 61 62 63 64 65 66
struct gntdev_priv {
	struct list_head maps;
	/* lock protects maps from concurrent changes */
	spinlock_t lock;
	struct mm_struct *mm;
	struct mmu_notifier mn;
};

67 68 69 70 71 72 73
struct unmap_notify {
	int flags;
	/* Address relative to the start of the grant_map */
	int addr;
	int event;
};

74 75 76 77 78 79
struct grant_map {
	struct list_head next;
	struct vm_area_struct *vma;
	int index;
	int count;
	int flags;
80
	atomic_t users;
81
	struct unmap_notify notify;
82 83 84
	struct ioctl_gntdev_grant_ref *grants;
	struct gnttab_map_grant_ref   *map_ops;
	struct gnttab_unmap_grant_ref *unmap_ops;
85
	struct page **pages;
86 87
};

88 89
static int unmap_grant_pages(struct grant_map *map, int offset, int pages);

90 91 92 93 94 95 96 97
/* ------------------------------------------------------------------ */

static void gntdev_print_maps(struct gntdev_priv *priv,
			      char *text, int text_index)
{
#ifdef DEBUG
	struct grant_map *map;

98
	pr_debug("%s: maps list (priv %p)\n", __func__, priv);
99 100 101 102 103 104 105 106 107 108
	list_for_each_entry(map, &priv->maps, next)
		pr_debug("  index %2d, count %2d %s\n",
		       map->index, map->count,
		       map->index == text_index && text ? text : "");
#endif
}

static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
{
	struct grant_map *add;
109
	int i;
110 111 112 113 114 115 116 117

	add = kzalloc(sizeof(struct grant_map), GFP_KERNEL);
	if (NULL == add)
		return NULL;

	add->grants    = kzalloc(sizeof(add->grants[0])    * count, GFP_KERNEL);
	add->map_ops   = kzalloc(sizeof(add->map_ops[0])   * count, GFP_KERNEL);
	add->unmap_ops = kzalloc(sizeof(add->unmap_ops[0]) * count, GFP_KERNEL);
118 119 120 121 122
	add->pages     = kzalloc(sizeof(add->pages[0])     * count, GFP_KERNEL);
	if (NULL == add->grants    ||
	    NULL == add->map_ops   ||
	    NULL == add->unmap_ops ||
	    NULL == add->pages)
123 124
		goto err;

125 126 127 128
	for (i = 0; i < count; i++) {
		add->pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM);
		if (add->pages[i] == NULL)
			goto err;
129 130
		add->map_ops[i].handle = -1;
		add->unmap_ops[i].handle = -1;
131 132
	}

133 134
	add->index = 0;
	add->count = count;
135
	atomic_set(&add->users, 1);
136 137 138 139

	return add;

err:
140 141 142 143 144 145
	if (add->pages)
		for (i = 0; i < count; i++) {
			if (add->pages[i])
				__free_page(add->pages[i]);
		}
	kfree(add->pages);
146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177
	kfree(add->grants);
	kfree(add->map_ops);
	kfree(add->unmap_ops);
	kfree(add);
	return NULL;
}

static void gntdev_add_map(struct gntdev_priv *priv, struct grant_map *add)
{
	struct grant_map *map;

	list_for_each_entry(map, &priv->maps, next) {
		if (add->index + add->count < map->index) {
			list_add_tail(&add->next, &map->next);
			goto done;
		}
		add->index = map->index + map->count;
	}
	list_add_tail(&add->next, &priv->maps);

done:
	gntdev_print_maps(priv, "[new]", add->index);
}

static struct grant_map *gntdev_find_map_index(struct gntdev_priv *priv,
		int index, int count)
{
	struct grant_map *map;

	list_for_each_entry(map, &priv->maps, next) {
		if (map->index != index)
			continue;
178
		if (count && map->count != count)
179 180 181 182 183 184
			continue;
		return map;
	}
	return NULL;
}

185
static void gntdev_put_map(struct grant_map *map)
186
{
187 188
	int i;

189 190
	if (!map)
		return;
191

192 193 194 195 196
	if (!atomic_dec_and_test(&map->users))
		return;

	atomic_sub(map->count, &pages_mapped);

197 198 199 200
	if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
		notify_remote_via_evtchn(map->notify.event);
	}

201 202 203 204
	if (map->pages) {
		if (!use_ptemod)
			unmap_grant_pages(map, 0, map->count);

205
		for (i = 0; i < map->count; i++) {
206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
			uint32_t check, *tmp;
			if (!map->pages[i])
				continue;
			/* XXX When unmapping in an HVM domain, Xen will
			 * sometimes end up mapping the GFN to an invalid MFN.
			 * In this case, writes will be discarded and reads will
			 * return all 0xFF bytes.  Leak these unusable GFNs
			 * until Xen supports fixing their p2m mapping.
			 *
			 * Confirmed present in Xen 4.1-RC3 with HVM source
			 */
			tmp = kmap(map->pages[i]);
			*tmp = 0xdeaddead;
			mb();
			check = *tmp;
			kunmap(map->pages[i]);
			if (check == 0xdeaddead)
223
				__free_page(map->pages[i]);
224 225 226
			else
				pr_debug("Discard page %d=%ld\n", i,
					page_to_pfn(map->pages[i]));
227
		}
228
	}
229
	kfree(map->pages);
230 231 232 233 234 235 236 237 238 239 240 241 242
	kfree(map->grants);
	kfree(map->map_ops);
	kfree(map->unmap_ops);
	kfree(map);
}

/* ------------------------------------------------------------------ */

static int find_grant_ptes(pte_t *pte, pgtable_t token,
		unsigned long addr, void *data)
{
	struct grant_map *map = data;
	unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT;
243
	int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte;
244 245 246
	u64 pte_maddr;

	BUG_ON(pgnr >= map->count);
247 248
	pte_maddr = arbitrary_virt_to_machine(pte).maddr;

249
	gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags,
250 251
			  map->grants[pgnr].ref,
			  map->grants[pgnr].domid);
252
	gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr, flags,
253
			    -1 /* handle */);
254 255 256 257 258 259
	return 0;
}

static int map_grant_pages(struct grant_map *map)
{
	int i, err = 0;
260 261 262
	phys_addr_t addr;

	if (!use_ptemod) {
263
		/* Note: it could already be mapped */
264
		if (map->map_ops[0].handle != -1)
265
			return 0;
266 267 268 269 270 271 272
		for (i = 0; i < map->count; i++) {
			addr = (phys_addr_t)
				pfn_to_kaddr(page_to_pfn(map->pages[i]));
			gnttab_set_map_op(&map->map_ops[i], addr, map->flags,
				map->grants[i].ref,
				map->grants[i].domid);
			gnttab_set_unmap_op(&map->unmap_ops[i], addr,
273
				map->flags, -1 /* handle */);
274 275
		}
	}
276 277

	pr_debug("map %d+%d\n", map->index, map->count);
278
	err = gnttab_map_refs(map->map_ops, map->pages, map->count);
279 280 281 282 283 284
	if (err)
		return err;

	for (i = 0; i < map->count; i++) {
		if (map->map_ops[i].status)
			err = -EINVAL;
285 286 287 288 289
		else {
			BUG_ON(map->map_ops[i].handle == -1);
			map->unmap_ops[i].handle = map->map_ops[i].handle;
			pr_debug("map handle=%d\n", map->map_ops[i].handle);
		}
290 291 292 293
	}
	return err;
}

294
static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
295 296 297
{
	int i, err = 0;

298 299
	if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
		int pgno = (map->notify.addr >> PAGE_SHIFT);
300 301 302
		if (pgno >= offset && pgno < offset + pages && use_ptemod) {
			void __user *tmp;
			tmp = map->vma->vm_start + map->notify.addr;
303 304 305
			err = copy_to_user(tmp, &err, 1);
			if (err)
				return err;
306 307
			map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
		} else if (pgno >= offset && pgno < offset + pages) {
308 309 310 311 312 313 314
			uint8_t *tmp = kmap(map->pages[pgno]);
			tmp[map->notify.addr & (PAGE_SIZE-1)] = 0;
			kunmap(map->pages[pgno]);
			map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
		}
	}

315
	err = gnttab_unmap_refs(map->unmap_ops + offset, map->pages + offset, pages);
316 317 318 319 320 321
	if (err)
		return err;

	for (i = 0; i < pages; i++) {
		if (map->unmap_ops[offset+i].status)
			err = -EINVAL;
322 323 324 325
		pr_debug("unmap handle=%d st=%d\n",
			map->unmap_ops[offset+i].handle,
			map->unmap_ops[offset+i].status);
		map->unmap_ops[offset+i].handle = -1;
326 327 328 329
	}
	return err;
}

330 331 332 333 334 335 336 337 338 339
static int unmap_grant_pages(struct grant_map *map, int offset, int pages)
{
	int range, err = 0;

	pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages);

	/* It is possible the requested range will have a "hole" where we
	 * already unmapped some of the grants. Only unmap valid ranges.
	 */
	while (pages && !err) {
340
		while (pages && map->unmap_ops[offset].handle == -1) {
341 342 343 344 345
			offset++;
			pages--;
		}
		range = 0;
		while (range < pages) {
346
			if (map->unmap_ops[offset+range].handle == -1) {
347 348 349 350 351 352 353 354 355 356 357 358 359
				range--;
				break;
			}
			range++;
		}
		err = __unmap_grant_pages(map, offset, range);
		offset += range;
		pages -= range;
	}

	return err;
}

360 361 362 363 364 365 366 367 368
/* ------------------------------------------------------------------ */

static void gntdev_vma_close(struct vm_area_struct *vma)
{
	struct grant_map *map = vma->vm_private_data;

	pr_debug("close %p\n", vma);
	map->vma = NULL;
	vma->vm_private_data = NULL;
369
	gntdev_put_map(map);
370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455
}

static struct vm_operations_struct gntdev_vmops = {
	.close = gntdev_vma_close,
};

/* ------------------------------------------------------------------ */

static void mn_invl_range_start(struct mmu_notifier *mn,
				struct mm_struct *mm,
				unsigned long start, unsigned long end)
{
	struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
	struct grant_map *map;
	unsigned long mstart, mend;
	int err;

	spin_lock(&priv->lock);
	list_for_each_entry(map, &priv->maps, next) {
		if (!map->vma)
			continue;
		if (map->vma->vm_start >= end)
			continue;
		if (map->vma->vm_end <= start)
			continue;
		mstart = max(start, map->vma->vm_start);
		mend   = min(end,   map->vma->vm_end);
		pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
				map->index, map->count,
				map->vma->vm_start, map->vma->vm_end,
				start, end, mstart, mend);
		err = unmap_grant_pages(map,
					(mstart - map->vma->vm_start) >> PAGE_SHIFT,
					(mend - mstart) >> PAGE_SHIFT);
		WARN_ON(err);
	}
	spin_unlock(&priv->lock);
}

static void mn_invl_page(struct mmu_notifier *mn,
			 struct mm_struct *mm,
			 unsigned long address)
{
	mn_invl_range_start(mn, mm, address, address + PAGE_SIZE);
}

static void mn_release(struct mmu_notifier *mn,
		       struct mm_struct *mm)
{
	struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
	struct grant_map *map;
	int err;

	spin_lock(&priv->lock);
	list_for_each_entry(map, &priv->maps, next) {
		if (!map->vma)
			continue;
		pr_debug("map %d+%d (%lx %lx)\n",
				map->index, map->count,
				map->vma->vm_start, map->vma->vm_end);
		err = unmap_grant_pages(map, /* offset */ 0, map->count);
		WARN_ON(err);
	}
	spin_unlock(&priv->lock);
}

struct mmu_notifier_ops gntdev_mmu_ops = {
	.release                = mn_release,
	.invalidate_page        = mn_invl_page,
	.invalidate_range_start = mn_invl_range_start,
};

/* ------------------------------------------------------------------ */

static int gntdev_open(struct inode *inode, struct file *flip)
{
	struct gntdev_priv *priv;
	int ret = 0;

	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
	if (!priv)
		return -ENOMEM;

	INIT_LIST_HEAD(&priv->maps);
	spin_lock_init(&priv->lock);

456 457 458 459 460 461 462 463 464
	if (use_ptemod) {
		priv->mm = get_task_mm(current);
		if (!priv->mm) {
			kfree(priv);
			return -ENOMEM;
		}
		priv->mn.ops = &gntdev_mmu_ops;
		ret = mmu_notifier_register(&priv->mn, priv->mm);
		mmput(priv->mm);
465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487
	}

	if (ret) {
		kfree(priv);
		return ret;
	}

	flip->private_data = priv;
	pr_debug("priv %p\n", priv);

	return 0;
}

static int gntdev_release(struct inode *inode, struct file *flip)
{
	struct gntdev_priv *priv = flip->private_data;
	struct grant_map *map;

	pr_debug("priv %p\n", priv);

	spin_lock(&priv->lock);
	while (!list_empty(&priv->maps)) {
		map = list_entry(priv->maps.next, struct grant_map, next);
488 489
		list_del(&map->next);
		gntdev_put_map(map);
490 491 492
	}
	spin_unlock(&priv->lock);

493 494
	if (use_ptemod)
		mmu_notifier_unregister(&priv->mn, priv->mm);
495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515
	kfree(priv);
	return 0;
}

static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
				       struct ioctl_gntdev_map_grant_ref __user *u)
{
	struct ioctl_gntdev_map_grant_ref op;
	struct grant_map *map;
	int err;

	if (copy_from_user(&op, u, sizeof(op)) != 0)
		return -EFAULT;
	pr_debug("priv %p, add %d\n", priv, op.count);
	if (unlikely(op.count <= 0))
		return -EINVAL;

	err = -ENOMEM;
	map = gntdev_alloc_map(priv, op.count);
	if (!map)
		return err;
516

517 518 519
	if (unlikely(atomic_add_return(op.count, &pages_mapped) > limit)) {
		pr_debug("can't map: over limit\n");
		gntdev_put_map(map);
520 521 522
		return err;
	}

523 524 525
	if (copy_from_user(map->grants, &u->refs,
			   sizeof(map->grants[0]) * op.count) != 0) {
		gntdev_put_map(map);
526 527 528
		return err;
	}

529 530 531 532 533
	spin_lock(&priv->lock);
	gntdev_add_map(priv, map);
	op.index = map->index << PAGE_SHIFT;
	spin_unlock(&priv->lock);

534 535 536
	if (copy_to_user(u, &op, sizeof(op)) != 0)
		return -EFAULT;

537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552
	return 0;
}

static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
					 struct ioctl_gntdev_unmap_grant_ref __user *u)
{
	struct ioctl_gntdev_unmap_grant_ref op;
	struct grant_map *map;
	int err = -ENOENT;

	if (copy_from_user(&op, u, sizeof(op)) != 0)
		return -EFAULT;
	pr_debug("priv %p, del %d+%d\n", priv, (int)op.index, (int)op.count);

	spin_lock(&priv->lock);
	map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
553 554 555 556 557
	if (map) {
		list_del(&map->next);
		gntdev_put_map(map);
		err = 0;
	}
558 559 560 561 562 563 564 565
	spin_unlock(&priv->lock);
	return err;
}

static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
					      struct ioctl_gntdev_get_offset_for_vaddr __user *u)
{
	struct ioctl_gntdev_get_offset_for_vaddr op;
566
	struct vm_area_struct *vma;
567 568 569 570 571 572
	struct grant_map *map;

	if (copy_from_user(&op, u, sizeof(op)) != 0)
		return -EFAULT;
	pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr);

573 574
	vma = find_vma(current->mm, op.vaddr);
	if (!vma || vma->vm_ops != &gntdev_vmops)
575
		return -EINVAL;
576 577 578 579 580

	map = vma->vm_private_data;
	if (!map)
		return -EINVAL;

581 582 583 584 585 586 587 588
	op.offset = map->index << PAGE_SHIFT;
	op.count = map->count;

	if (copy_to_user(u, &op, sizeof(op)) != 0)
		return -EFAULT;
	return 0;
}

589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612
static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
{
	struct ioctl_gntdev_unmap_notify op;
	struct grant_map *map;
	int rc;

	if (copy_from_user(&op, u, sizeof(op)))
		return -EFAULT;

	if (op.action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT))
		return -EINVAL;

	spin_lock(&priv->lock);

	list_for_each_entry(map, &priv->maps, next) {
		uint64_t begin = map->index << PAGE_SHIFT;
		uint64_t end = (map->index + map->count) << PAGE_SHIFT;
		if (op.index >= begin && op.index < end)
			goto found;
	}
	rc = -ENOENT;
	goto unlock_out;

 found:
613 614 615 616 617 618
	if ((op.action & UNMAP_NOTIFY_CLEAR_BYTE) &&
			(map->flags & GNTMAP_readonly)) {
		rc = -EINVAL;
		goto unlock_out;
	}

619 620 621 622 623 624 625 626 627
	map->notify.flags = op.action;
	map->notify.addr = op.index - (map->index << PAGE_SHIFT);
	map->notify.event = op.event_channel_port;
	rc = 0;
 unlock_out:
	spin_unlock(&priv->lock);
	return rc;
}

628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643
static long gntdev_ioctl(struct file *flip,
			 unsigned int cmd, unsigned long arg)
{
	struct gntdev_priv *priv = flip->private_data;
	void __user *ptr = (void __user *)arg;

	switch (cmd) {
	case IOCTL_GNTDEV_MAP_GRANT_REF:
		return gntdev_ioctl_map_grant_ref(priv, ptr);

	case IOCTL_GNTDEV_UNMAP_GRANT_REF:
		return gntdev_ioctl_unmap_grant_ref(priv, ptr);

	case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR:
		return gntdev_ioctl_get_offset_for_vaddr(priv, ptr);

644 645 646
	case IOCTL_GNTDEV_SET_UNMAP_NOTIFY:
		return gntdev_ioctl_notify(priv, ptr);

647 648 649 650 651 652 653 654 655 656 657 658 659 660
	default:
		pr_debug("priv %p, unknown cmd %x\n", priv, cmd);
		return -ENOIOCTLCMD;
	}

	return 0;
}

static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
{
	struct gntdev_priv *priv = flip->private_data;
	int index = vma->vm_pgoff;
	int count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
	struct grant_map *map;
661
	int i, err = -EINVAL;
662 663 664 665 666 667 668 669 670 671 672

	if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED))
		return -EINVAL;

	pr_debug("map %d+%d at %lx (pgoff %lx)\n",
			index, count, vma->vm_start, vma->vm_pgoff);

	spin_lock(&priv->lock);
	map = gntdev_find_map_index(priv, index, count);
	if (!map)
		goto unlock_out;
673
	if (use_ptemod && map->vma)
674
		goto unlock_out;
675
	if (use_ptemod && priv->mm != vma->vm_mm) {
676 677 678 679
		printk(KERN_WARNING "Huh? Other mm?\n");
		goto unlock_out;
	}

680 681
	atomic_inc(&map->users);

682 683
	vma->vm_ops = &gntdev_vmops;

684
	vma->vm_flags |= VM_RESERVED|VM_DONTCOPY|VM_DONTEXPAND|VM_PFNMAP;
685 686 687

	vma->vm_private_data = map;

688 689 690
	if (use_ptemod)
		map->vma = vma;

691 692 693 694 695 696 697 698 699
	if (map->flags) {
		if ((vma->vm_flags & VM_WRITE) &&
				(map->flags & GNTMAP_readonly))
			return -EINVAL;
	} else {
		map->flags = GNTMAP_host_map;
		if (!(vma->vm_flags & VM_WRITE))
			map->flags |= GNTMAP_readonly;
	}
700

701 702
	spin_unlock(&priv->lock);

703 704 705 706 707 708
	if (use_ptemod) {
		err = apply_to_page_range(vma->vm_mm, vma->vm_start,
					  vma->vm_end - vma->vm_start,
					  find_grant_ptes, map);
		if (err) {
			printk(KERN_WARNING "find_grant_ptes() failure.\n");
709
			goto out_put_map;
710
		}
711 712 713
	}

	err = map_grant_pages(map);
714 715
	if (err)
		goto out_put_map;
716

717 718 719 720 721
	if (!use_ptemod) {
		for (i = 0; i < count; i++) {
			err = vm_insert_page(vma, vma->vm_start + i*PAGE_SIZE,
				map->pages[i]);
			if (err)
722
				goto out_put_map;
723 724 725
		}
	}

726 727
	return 0;

728 729 730
unlock_out:
	spin_unlock(&priv->lock);
	return err;
731 732

out_put_map:
733 734
	if (use_ptemod)
		map->vma = NULL;
735 736
	gntdev_put_map(map);
	return err;
737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761
}

static const struct file_operations gntdev_fops = {
	.owner = THIS_MODULE,
	.open = gntdev_open,
	.release = gntdev_release,
	.mmap = gntdev_mmap,
	.unlocked_ioctl = gntdev_ioctl
};

static struct miscdevice gntdev_miscdev = {
	.minor        = MISC_DYNAMIC_MINOR,
	.name         = "xen/gntdev",
	.fops         = &gntdev_fops,
};

/* ------------------------------------------------------------------ */

static int __init gntdev_init(void)
{
	int err;

	if (!xen_domain())
		return -ENODEV;

762 763
	use_ptemod = xen_pv_domain();

764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780
	err = misc_register(&gntdev_miscdev);
	if (err != 0) {
		printk(KERN_ERR "Could not register gntdev device\n");
		return err;
	}
	return 0;
}

static void __exit gntdev_exit(void)
{
	misc_deregister(&gntdev_miscdev);
}

module_init(gntdev_init);
module_exit(gntdev_exit);

/* ------------------------------------------------------------------ */