privcmd.c 12.8 KB
Newer Older
J
Jeremy Fitzhardinge 已提交
1 2 3 4 5 6 7 8 9
/******************************************************************************
 * privcmd.c
 *
 * Interface to privileged domain-0 commands.
 *
 * Copyright (c) 2002-2004, K A Fraser, B Dragovic
 */

#include <linux/kernel.h>
B
Bastian Blank 已提交
10
#include <linux/module.h>
J
Jeremy Fitzhardinge 已提交
11 12 13 14 15 16 17 18 19 20 21
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/uaccess.h>
#include <linux/swap.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/seq_file.h>
B
Bastian Blank 已提交
22
#include <linux/miscdevice.h>
J
Jeremy Fitzhardinge 已提交
23 24 25 26 27 28 29 30 31 32 33 34

#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/tlb.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>

#include <xen/xen.h>
#include <xen/privcmd.h>
#include <xen/interface/xen.h>
#include <xen/features.h>
#include <xen/page.h>
35
#include <xen/xen-ops.h>
36
#include <xen/balloon.h>
37

B
Bastian Blank 已提交
38 39 40 41
#include "privcmd.h"

MODULE_LICENSE("GPL");

42 43
#define PRIV_VMA_LOCKED ((void *)1)

J
Jeremy Fitzhardinge 已提交
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
#ifndef HAVE_ARCH_PRIVCMD_MMAP
static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
#endif

static long privcmd_ioctl_hypercall(void __user *udata)
{
	struct privcmd_hypercall hypercall;
	long ret;

	if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
		return -EFAULT;

	ret = privcmd_call(hypercall.op,
			   hypercall.arg[0], hypercall.arg[1],
			   hypercall.arg[2], hypercall.arg[3],
			   hypercall.arg[4]);

	return ret;
}

static void free_page_list(struct list_head *pages)
{
	struct page *p, *n;

	list_for_each_entry_safe(p, n, pages, lru)
		__free_page(p);

	INIT_LIST_HEAD(pages);
}

/*
 * Given an array of items in userspace, return a list of pages
 * containing the data.  If copying fails, either because of memory
 * allocation failure or a problem reading user memory, return an
 * error code; its up to the caller to dispose of any partial list.
 */
static int gather_array(struct list_head *pagelist,
			unsigned nelem, size_t size,
82
			const void __user *data)
J
Jeremy Fitzhardinge 已提交
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
{
	unsigned pageidx;
	void *pagedata;
	int ret;

	if (size > PAGE_SIZE)
		return 0;

	pageidx = PAGE_SIZE;
	pagedata = NULL;	/* quiet, gcc */
	while (nelem--) {
		if (pageidx > PAGE_SIZE-size) {
			struct page *page = alloc_page(GFP_KERNEL);

			ret = -ENOMEM;
			if (page == NULL)
				goto fail;

			pagedata = page_address(page);

			list_add_tail(&page->lru, pagelist);
			pageidx = 0;
		}

		ret = -EFAULT;
		if (copy_from_user(pagedata + pageidx, data, size))
			goto fail;

		data += size;
		pageidx += size;
	}

	ret = 0;

fail:
	return ret;
}

/*
 * Call function "fn" on each element of the array fragmented
 * over a list of pages.
 */
static int traverse_pages(unsigned nelem, size_t size,
			  struct list_head *pos,
			  int (*fn)(void *data, void *state),
			  void *state)
{
	void *pagedata;
	unsigned pageidx;
132
	int ret = 0;
J
Jeremy Fitzhardinge 已提交
133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179

	BUG_ON(size > PAGE_SIZE);

	pageidx = PAGE_SIZE;
	pagedata = NULL;	/* hush, gcc */

	while (nelem--) {
		if (pageidx > PAGE_SIZE-size) {
			struct page *page;
			pos = pos->next;
			page = list_entry(pos, struct page, lru);
			pagedata = page_address(page);
			pageidx = 0;
		}

		ret = (*fn)(pagedata + pageidx, state);
		if (ret)
			break;
		pageidx += size;
	}

	return ret;
}

struct mmap_mfn_state {
	unsigned long va;
	struct vm_area_struct *vma;
	domid_t domain;
};

static int mmap_mfn_range(void *data, void *state)
{
	struct privcmd_mmap_entry *msg = data;
	struct mmap_mfn_state *st = state;
	struct vm_area_struct *vma = st->vma;
	int rc;

	/* Do not allow range to wrap the address space. */
	if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
	    ((unsigned long)(msg->npages << PAGE_SHIFT) >= -st->va))
		return -EINVAL;

	/* Range chunks must be contiguous in va space. */
	if ((msg->va != st->va) ||
	    ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
		return -EINVAL;

180 181 182 183
	rc = xen_remap_domain_mfn_range(vma,
					msg->va & PAGE_MASK,
					msg->mfn, msg->npages,
					vma->vm_page_prot,
184
					st->domain, NULL);
J
Jeremy Fitzhardinge 已提交
185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201
	if (rc < 0)
		return rc;

	st->va += msg->npages << PAGE_SHIFT;

	return 0;
}

static long privcmd_ioctl_mmap(void __user *udata)
{
	struct privcmd_mmap mmapcmd;
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma;
	int rc;
	LIST_HEAD(pagelist);
	struct mmap_mfn_state state;

202 203 204 205
	/* We only support privcmd_ioctl_mmap_batch for auto translated. */
	if (xen_feature(XENFEAT_auto_translated_physmap))
		return -ENOSYS;

J
Jeremy Fitzhardinge 已提交
206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252
	if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
		return -EFAULT;

	rc = gather_array(&pagelist,
			  mmapcmd.num, sizeof(struct privcmd_mmap_entry),
			  mmapcmd.entry);

	if (rc || list_empty(&pagelist))
		goto out;

	down_write(&mm->mmap_sem);

	{
		struct page *page = list_first_entry(&pagelist,
						     struct page, lru);
		struct privcmd_mmap_entry *msg = page_address(page);

		vma = find_vma(mm, msg->va);
		rc = -EINVAL;

		if (!vma || (msg->va != vma->vm_start) ||
		    !privcmd_enforce_singleshot_mapping(vma))
			goto out_up;
	}

	state.va = vma->vm_start;
	state.vma = vma;
	state.domain = mmapcmd.dom;

	rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
			    &pagelist,
			    mmap_mfn_range, &state);


out_up:
	up_write(&mm->mmap_sem);

out:
	free_page_list(&pagelist);

	return rc;
}

struct mmap_batch_state {
	domid_t domain;
	unsigned long va;
	struct vm_area_struct *vma;
253
	int index;
254 255 256 257 258 259 260
	/* A tristate:
	 *      0 for no errors
	 *      1 if at least one error has happened (and no
	 *          -ENOENT errors have happened)
	 *      -ENOENT if at least 1 -ENOENT has happened.
	 */
	int global_error;
261
	int version;
262 263 264

	/* User-space mfn array to store errors in the second pass for V1. */
	xen_pfn_t __user *user_mfn;
265 266
	/* User-space int array to store errors in the second pass for V2. */
	int __user *user_err;
J
Jeremy Fitzhardinge 已提交
267 268
};

269 270 271
/* auto translated dom0 note: if domU being created is PV, then mfn is
 * mfn(addr on bus). If it's auto xlated, then mfn is pfn (input to HAP).
 */
J
Jeremy Fitzhardinge 已提交
272 273 274 275
static int mmap_batch_fn(void *data, void *state)
{
	xen_pfn_t *mfnp = data;
	struct mmap_batch_state *st = state;
276 277 278
	struct vm_area_struct *vma = st->vma;
	struct page **pages = vma->vm_private_data;
	struct page *cur_page = NULL;
279 280
	int ret;

281 282 283
	if (xen_feature(XENFEAT_auto_translated_physmap))
		cur_page = pages[st->index++];

284
	ret = xen_remap_domain_mfn_range(st->vma, st->va & PAGE_MASK, *mfnp, 1,
285
					 st->vma->vm_page_prot, st->domain,
286
					 &cur_page);
J
Jeremy Fitzhardinge 已提交
287

288
	/* Store error code for second pass. */
289 290 291 292 293 294 295 296 297 298 299 300 301
	if (st->version == 1) {
		if (ret < 0) {
			/*
			 * V1 encodes the error codes in the 32bit top nibble of the
			 * mfn (with its known limitations vis-a-vis 64 bit callers).
			 */
			*mfnp |= (ret == -ENOENT) ?
						PRIVCMD_MMAPBATCH_PAGED_ERROR :
						PRIVCMD_MMAPBATCH_MFN_ERROR;
		}
	} else { /* st->version == 2 */
		*((int *) mfnp) = ret;
	}
302 303 304 305 306 307 308 309 310 311

	/* And see if it affects the global_error. */
	if (ret < 0) {
		if (ret == -ENOENT)
			st->global_error = -ENOENT;
		else {
			/* Record that at least one error has happened. */
			if (st->global_error == 0)
				st->global_error = 1;
		}
J
Jeremy Fitzhardinge 已提交
312 313 314 315 316 317
	}
	st->va += PAGE_SIZE;

	return 0;
}

318
static int mmap_return_errors(void *data, void *state)
J
Jeremy Fitzhardinge 已提交
319 320
{
	struct mmap_batch_state *st = state;
321

322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
	if (st->version == 1) {
		xen_pfn_t mfnp = *((xen_pfn_t *) data);
		if (mfnp & PRIVCMD_MMAPBATCH_MFN_ERROR)
			return __put_user(mfnp, st->user_mfn++);
		else
			st->user_mfn++;
	} else { /* st->version == 2 */
		int err = *((int *) data);
		if (err)
			return __put_user(err, st->user_err++);
		else
			st->user_err++;
	}

	return 0;
J
Jeremy Fitzhardinge 已提交
337 338
}

339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364
/* Allocate pfns that are then mapped with gmfns from foreign domid. Update
 * the vma with the page info to use later.
 * Returns: 0 if success, otherwise -errno
 */
static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
{
	int rc;
	struct page **pages;

	pages = kcalloc(numpgs, sizeof(pages[0]), GFP_KERNEL);
	if (pages == NULL)
		return -ENOMEM;

	rc = alloc_xenballooned_pages(numpgs, pages, 0);
	if (rc != 0) {
		pr_warn("%s Could not alloc %d pfns rc:%d\n", __func__,
			numpgs, rc);
		kfree(pages);
		return -ENOMEM;
	}
	BUG_ON(vma->vm_private_data != PRIV_VMA_LOCKED);
	vma->vm_private_data = pages;

	return 0;
}

365 366
static struct vm_operations_struct privcmd_vm_ops;

367
static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
J
Jeremy Fitzhardinge 已提交
368 369
{
	int ret;
370
	struct privcmd_mmapbatch_v2 m;
J
Jeremy Fitzhardinge 已提交
371 372 373 374 375 376
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma;
	unsigned long nr_pages;
	LIST_HEAD(pagelist);
	struct mmap_batch_state state;

377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395
	switch (version) {
	case 1:
		if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch)))
			return -EFAULT;
		/* Returns per-frame error in m.arr. */
		m.err = NULL;
		if (!access_ok(VERIFY_WRITE, m.arr, m.num * sizeof(*m.arr)))
			return -EFAULT;
		break;
	case 2:
		if (copy_from_user(&m, udata, sizeof(struct privcmd_mmapbatch_v2)))
			return -EFAULT;
		/* Returns per-frame error code in m.err. */
		if (!access_ok(VERIFY_WRITE, m.err, m.num * (sizeof(*m.err))))
			return -EFAULT;
		break;
	default:
		return -EINVAL;
	}
J
Jeremy Fitzhardinge 已提交
396 397 398 399 400

	nr_pages = m.num;
	if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
		return -EINVAL;

401
	ret = gather_array(&pagelist, m.num, sizeof(xen_pfn_t), m.arr);
J
Jeremy Fitzhardinge 已提交
402

403
	if (ret)
J
Jeremy Fitzhardinge 已提交
404
		goto out;
405 406 407 408 409
	if (list_empty(&pagelist)) {
		ret = -EINVAL;
		goto out;
	}

410 411 412 413 414 415
	if (version == 2) {
		/* Zero error array now to only copy back actual errors. */
		if (clear_user(m.err, sizeof(int) * m.num)) {
			ret = -EFAULT;
			goto out;
		}
416
	}
J
Jeremy Fitzhardinge 已提交
417 418 419 420 421

	down_write(&mm->mmap_sem);

	vma = find_vma(mm, m.addr);
	if (!vma ||
422
	    vma->vm_ops != &privcmd_vm_ops ||
J
Jeremy Fitzhardinge 已提交
423 424 425 426
	    (m.addr != vma->vm_start) ||
	    ((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) ||
	    !privcmd_enforce_singleshot_mapping(vma)) {
		up_write(&mm->mmap_sem);
427
		ret = -EINVAL;
J
Jeremy Fitzhardinge 已提交
428 429
		goto out;
	}
430 431 432 433 434 435 436
	if (xen_feature(XENFEAT_auto_translated_physmap)) {
		ret = alloc_empty_pages(vma, m.num);
		if (ret < 0) {
			up_write(&mm->mmap_sem);
			goto out;
		}
	}
J
Jeremy Fitzhardinge 已提交
437

438 439 440
	state.domain        = m.dom;
	state.vma           = vma;
	state.va            = m.addr;
441
	state.index         = 0;
442
	state.global_error  = 0;
443
	state.version       = version;
J
Jeremy Fitzhardinge 已提交
444

445 446 447
	/* mmap_batch_fn guarantees ret == 0 */
	BUG_ON(traverse_pages(m.num, sizeof(xen_pfn_t),
			     &pagelist, mmap_batch_fn, &state));
J
Jeremy Fitzhardinge 已提交
448 449 450

	up_write(&mm->mmap_sem);

451 452 453 454 455 456 457 458
	if (state.global_error) {
		/* Write back errors in second pass. */
		state.user_mfn = (xen_pfn_t *)m.arr;
		state.user_err = m.err;
		ret = traverse_pages(m.num, sizeof(xen_pfn_t),
							 &pagelist, mmap_return_errors, &state);
	} else
		ret = 0;
459 460 461 462 463

	/* If we have not had any EFAULT-like global errors then set the global
	 * error to -ENOENT if necessary. */
	if ((ret == 0) && (state.global_error == -ENOENT))
		ret = -ENOENT;
J
Jeremy Fitzhardinge 已提交
464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486

out:
	free_page_list(&pagelist);

	return ret;
}

static long privcmd_ioctl(struct file *file,
			  unsigned int cmd, unsigned long data)
{
	int ret = -ENOSYS;
	void __user *udata = (void __user *) data;

	switch (cmd) {
	case IOCTL_PRIVCMD_HYPERCALL:
		ret = privcmd_ioctl_hypercall(udata);
		break;

	case IOCTL_PRIVCMD_MMAP:
		ret = privcmd_ioctl_mmap(udata);
		break;

	case IOCTL_PRIVCMD_MMAPBATCH:
487 488 489 490 491
		ret = privcmd_ioctl_mmap_batch(udata, 1);
		break;

	case IOCTL_PRIVCMD_MMAPBATCH_V2:
		ret = privcmd_ioctl_mmap_batch(udata, 2);
J
Jeremy Fitzhardinge 已提交
492 493 494 495 496 497 498 499 500 501
		break;

	default:
		ret = -EINVAL;
		break;
	}

	return ret;
}

502 503 504 505 506
static void privcmd_close(struct vm_area_struct *vma)
{
	struct page **pages = vma->vm_private_data;
	int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;

507
	if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
508 509 510 511 512 513 514
		return;

	xen_unmap_domain_mfn_range(vma, numpgs, pages);
	free_xenballooned_pages(numpgs, pages);
	kfree(pages);
}

J
Jeremy Fitzhardinge 已提交
515 516
static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
517 518 519 520
	printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
	       vma, vma->vm_start, vma->vm_end,
	       vmf->pgoff, vmf->virtual_address);

J
Jeremy Fitzhardinge 已提交
521 522 523 524
	return VM_FAULT_SIGBUS;
}

static struct vm_operations_struct privcmd_vm_ops = {
525
	.close = privcmd_close,
J
Jeremy Fitzhardinge 已提交
526 527 528 529 530
	.fault = privcmd_fault
};

static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
{
531 532
	/* DONTCOPY is essential for Xen because copy_page_range doesn't know
	 * how to recreate these mappings */
533 534
	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTCOPY |
			 VM_DONTEXPAND | VM_DONTDUMP;
J
Jeremy Fitzhardinge 已提交
535 536 537 538 539 540 541 542
	vma->vm_ops = &privcmd_vm_ops;
	vma->vm_private_data = NULL;

	return 0;
}

static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
{
543
	return !cmpxchg(&vma->vm_private_data, NULL, PRIV_VMA_LOCKED);
J
Jeremy Fitzhardinge 已提交
544 545
}

B
Bastian Blank 已提交
546 547
const struct file_operations xen_privcmd_fops = {
	.owner = THIS_MODULE,
J
Jeremy Fitzhardinge 已提交
548 549 550
	.unlocked_ioctl = privcmd_ioctl,
	.mmap = privcmd_mmap,
};
B
Bastian Blank 已提交
551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580
EXPORT_SYMBOL_GPL(xen_privcmd_fops);

static struct miscdevice privcmd_dev = {
	.minor = MISC_DYNAMIC_MINOR,
	.name = "xen/privcmd",
	.fops = &xen_privcmd_fops,
};

static int __init privcmd_init(void)
{
	int err;

	if (!xen_domain())
		return -ENODEV;

	err = misc_register(&privcmd_dev);
	if (err != 0) {
		printk(KERN_ERR "Could not register Xen privcmd device\n");
		return err;
	}
	return 0;
}

static void __exit privcmd_exit(void)
{
	misc_deregister(&privcmd_dev);
}

module_init(privcmd_init);
module_exit(privcmd_exit);