eventfd.c 20.7 KB
Newer Older
G
Gregory Haskins 已提交
1 2 3 4
/*
 * kvm eventfd support - use eventfd objects to signal various KVM events
 *
 * Copyright 2009 Novell.  All Rights Reserved.
A
Avi Kivity 已提交
5
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
G
Gregory Haskins 已提交
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
 *
 * Author:
 *	Gregory Haskins <ghaskins@novell.com>
 *
 * This file is free software; you can redistribute it and/or modify
 * it under the terms of version 2 of the GNU General Public License
 * as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
 */

#include <linux/kvm_host.h>
G
Gregory Haskins 已提交
25
#include <linux/kvm.h>
G
Gregory Haskins 已提交
26 27 28 29 30 31 32
#include <linux/workqueue.h>
#include <linux/syscalls.h>
#include <linux/wait.h>
#include <linux/poll.h>
#include <linux/file.h>
#include <linux/list.h>
#include <linux/eventfd.h>
G
Gregory Haskins 已提交
33
#include <linux/kernel.h>
34
#include <linux/srcu.h>
35
#include <linux/slab.h>
G
Gregory Haskins 已提交
36 37

#include "iodev.h"
G
Gregory Haskins 已提交
38

39
#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
G
Gregory Haskins 已提交
40 41 42 43 44 45 46 47
/*
 * --------------------------------------------------------------------
 * irqfd: Allows an fd to be used to inject an interrupt to the guest
 *
 * Credit goes to Avi Kivity for the original idea.
 * --------------------------------------------------------------------
 */

48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
/*
 * Resampling irqfds are a special variety of irqfds used to emulate
 * level triggered interrupts.  The interrupt is asserted on eventfd
 * trigger.  On acknowledgement through the irq ack notifier, the
 * interrupt is de-asserted and userspace is notified through the
 * resamplefd.  All resamplers on the same gsi are de-asserted
 * together, so we don't need to track the state of each individual
 * user.  We can also therefore share the same irq source ID.
 */
struct _irqfd_resampler {
	struct kvm *kvm;
	/*
	 * List of resampling struct _irqfd objects sharing this gsi.
	 * RCU list modified under kvm->irqfds.resampler_lock
	 */
	struct list_head list;
	struct kvm_irq_ack_notifier notifier;
	/*
	 * Entry in list of kvm->irqfd.resampler_list.  Use for sharing
	 * resamplers among irqfds on the same gsi.
	 * Accessed and modified under kvm->irqfds.resampler_lock
	 */
	struct list_head link;
};

G
Gregory Haskins 已提交
73
struct _irqfd {
74 75 76 77 78 79 80 81
	/* Used for MSI fast-path */
	struct kvm *kvm;
	wait_queue_t wait;
	/* Update side is protected by irqfds.lock */
	struct kvm_kernel_irq_routing_entry __rcu *irq_entry;
	/* Used for level IRQ fast-path */
	int gsi;
	struct work_struct inject;
82 83 84 85 86 87
	/* The resampler used by this irqfd (resampler-only) */
	struct _irqfd_resampler *resampler;
	/* Eventfd notified on resample (resampler-only) */
	struct eventfd_ctx *resamplefd;
	/* Entry in list of irqfds for a resampler (resampler-only) */
	struct list_head resampler_link;
88 89 90 91 92
	/* Used for setup/shutdown */
	struct eventfd_ctx *eventfd;
	struct list_head list;
	poll_table pt;
	struct work_struct shutdown;
G
Gregory Haskins 已提交
93 94 95 96 97 98 99 100 101 102
};

static struct workqueue_struct *irqfd_cleanup_wq;

static void
irqfd_inject(struct work_struct *work)
{
	struct _irqfd *irqfd = container_of(work, struct _irqfd, inject);
	struct kvm *kvm = irqfd->kvm;

103
	if (!irqfd->resampler) {
104 105 106 107
		kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1,
				false);
		kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0,
				false);
108 109
	} else
		kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
110
			    irqfd->gsi, 1, false);
111 112 113 114 115 116 117 118 119 120 121
}

/*
 * Since resampler irqfds share an IRQ source ID, we de-assert once
 * then notify all of the resampler irqfds using this GSI.  We can't
 * do multiple de-asserts or we risk racing with incoming re-asserts.
 */
static void
irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
{
	struct _irqfd_resampler *resampler;
122
	struct kvm *kvm;
123
	struct _irqfd *irqfd;
124
	int idx;
125 126

	resampler = container_of(kian, struct _irqfd_resampler, notifier);
127
	kvm = resampler->kvm;
128

129
	kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
130
		    resampler->notifier.gsi, 0, false);
131

132
	idx = srcu_read_lock(&kvm->irq_srcu);
133 134 135 136

	list_for_each_entry_rcu(irqfd, &resampler->list, resampler_link)
		eventfd_signal(irqfd->resamplefd, 1);

137
	srcu_read_unlock(&kvm->irq_srcu, idx);
138 139 140 141 142 143 144 145 146 147 148
}

static void
irqfd_resampler_shutdown(struct _irqfd *irqfd)
{
	struct _irqfd_resampler *resampler = irqfd->resampler;
	struct kvm *kvm = resampler->kvm;

	mutex_lock(&kvm->irqfds.resampler_lock);

	list_del_rcu(&irqfd->resampler_link);
149
	synchronize_srcu(&kvm->irq_srcu);
150 151 152 153 154

	if (list_empty(&resampler->list)) {
		list_del(&resampler->link);
		kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier);
		kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
155
			    resampler->notifier.gsi, 0, false);
156 157 158 159
		kfree(resampler);
	}

	mutex_unlock(&kvm->irqfds.resampler_lock);
G
Gregory Haskins 已提交
160 161 162 163 164 165 166 167 168
}

/*
 * Race-free decouple logic (ordering is critical)
 */
static void
irqfd_shutdown(struct work_struct *work)
{
	struct _irqfd *irqfd = container_of(work, struct _irqfd, shutdown);
169
	u64 cnt;
G
Gregory Haskins 已提交
170 171 172 173 174

	/*
	 * Synchronize with the wait-queue and unhook ourselves to prevent
	 * further events.
	 */
175
	eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt);
G
Gregory Haskins 已提交
176 177 178 179 180

	/*
	 * We know no new events will be scheduled at this point, so block
	 * until all previously outstanding events have completed
	 */
181
	flush_work(&irqfd->inject);
G
Gregory Haskins 已提交
182

183 184 185 186 187
	if (irqfd->resampler) {
		irqfd_resampler_shutdown(irqfd);
		eventfd_ctx_put(irqfd->resamplefd);
	}

G
Gregory Haskins 已提交
188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
	/*
	 * It is now safe to release the object's resources
	 */
	eventfd_ctx_put(irqfd->eventfd);
	kfree(irqfd);
}


/* assumes kvm->irqfds.lock is held */
static bool
irqfd_is_active(struct _irqfd *irqfd)
{
	return list_empty(&irqfd->list) ? false : true;
}

/*
 * Mark the irqfd as inactive and schedule it for removal
 *
 * assumes kvm->irqfds.lock is held
 */
static void
irqfd_deactivate(struct _irqfd *irqfd)
{
	BUG_ON(!irqfd_is_active(irqfd));

	list_del_init(&irqfd->list);

	queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
}

/*
 * Called with wqh->lock held and interrupts disabled
 */
static int
irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
{
	struct _irqfd *irqfd = container_of(wait, struct _irqfd, wait);
	unsigned long flags = (unsigned long)key;
226 227
	struct kvm_kernel_irq_routing_entry *irq;
	struct kvm *kvm = irqfd->kvm;
228
	int idx;
G
Gregory Haskins 已提交
229

230
	if (flags & POLLIN) {
231 232
		idx = srcu_read_lock(&kvm->irq_srcu);
		irq = srcu_dereference(irqfd->irq_entry, &kvm->irq_srcu);
G
Gregory Haskins 已提交
233
		/* An event has been signaled, inject an interrupt */
234
		if (irq)
235 236
			kvm_set_msi(irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1,
					false);
237 238
		else
			schedule_work(&irqfd->inject);
239
		srcu_read_unlock(&kvm->irq_srcu, idx);
240
	}
G
Gregory Haskins 已提交
241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273

	if (flags & POLLHUP) {
		/* The eventfd is closing, detach from KVM */
		unsigned long flags;

		spin_lock_irqsave(&kvm->irqfds.lock, flags);

		/*
		 * We must check if someone deactivated the irqfd before
		 * we could acquire the irqfds.lock since the item is
		 * deactivated from the KVM side before it is unhooked from
		 * the wait-queue.  If it is already deactivated, we can
		 * simply return knowing the other side will cleanup for us.
		 * We cannot race against the irqfd going away since the
		 * other side is required to acquire wqh->lock, which we hold
		 */
		if (irqfd_is_active(irqfd))
			irqfd_deactivate(irqfd);

		spin_unlock_irqrestore(&kvm->irqfds.lock, flags);
	}

	return 0;
}

static void
irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,
			poll_table *pt)
{
	struct _irqfd *irqfd = container_of(pt, struct _irqfd, pt);
	add_wait_queue(wqh, &irqfd->wait);
}

274 275 276 277 278 279 280 281 282 283 284
/* Must be called under irqfds.lock */
static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd,
			 struct kvm_irq_routing_table *irq_rt)
{
	struct kvm_kernel_irq_routing_entry *e;

	if (irqfd->gsi >= irq_rt->nr_rt_entries) {
		rcu_assign_pointer(irqfd->irq_entry, NULL);
		return;
	}

285
	hlist_for_each_entry(e, &irq_rt->map[irqfd->gsi], link) {
286 287 288 289 290 291 292 293
		/* Only fast-path MSI. */
		if (e->type == KVM_IRQ_ROUTING_MSI)
			rcu_assign_pointer(irqfd->irq_entry, e);
		else
			rcu_assign_pointer(irqfd->irq_entry, NULL);
	}
}

G
Gregory Haskins 已提交
294
static int
295
kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
G
Gregory Haskins 已提交
296
{
297
	struct kvm_irq_routing_table *irq_rt;
298
	struct _irqfd *irqfd, *tmp;
A
Al Viro 已提交
299
	struct fd f;
300
	struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
G
Gregory Haskins 已提交
301 302 303 304 305 306 307 308
	int ret;
	unsigned int events;

	irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
	if (!irqfd)
		return -ENOMEM;

	irqfd->kvm = kvm;
309
	irqfd->gsi = args->gsi;
G
Gregory Haskins 已提交
310 311 312 313
	INIT_LIST_HEAD(&irqfd->list);
	INIT_WORK(&irqfd->inject, irqfd_inject);
	INIT_WORK(&irqfd->shutdown, irqfd_shutdown);

A
Al Viro 已提交
314 315 316 317
	f = fdget(args->fd);
	if (!f.file) {
		ret = -EBADF;
		goto out;
G
Gregory Haskins 已提交
318 319
	}

A
Al Viro 已提交
320
	eventfd = eventfd_ctx_fileget(f.file);
G
Gregory Haskins 已提交
321 322 323 324 325 326 327
	if (IS_ERR(eventfd)) {
		ret = PTR_ERR(eventfd);
		goto fail;
	}

	irqfd->eventfd = eventfd;

328 329 330 331 332 333 334 335 336 337 338 339 340 341 342
	if (args->flags & KVM_IRQFD_FLAG_RESAMPLE) {
		struct _irqfd_resampler *resampler;

		resamplefd = eventfd_ctx_fdget(args->resamplefd);
		if (IS_ERR(resamplefd)) {
			ret = PTR_ERR(resamplefd);
			goto fail;
		}

		irqfd->resamplefd = resamplefd;
		INIT_LIST_HEAD(&irqfd->resampler_link);

		mutex_lock(&kvm->irqfds.resampler_lock);

		list_for_each_entry(resampler,
343
				    &kvm->irqfds.resampler_list, link) {
344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370
			if (resampler->notifier.gsi == irqfd->gsi) {
				irqfd->resampler = resampler;
				break;
			}
		}

		if (!irqfd->resampler) {
			resampler = kzalloc(sizeof(*resampler), GFP_KERNEL);
			if (!resampler) {
				ret = -ENOMEM;
				mutex_unlock(&kvm->irqfds.resampler_lock);
				goto fail;
			}

			resampler->kvm = kvm;
			INIT_LIST_HEAD(&resampler->list);
			resampler->notifier.gsi = irqfd->gsi;
			resampler->notifier.irq_acked = irqfd_resampler_ack;
			INIT_LIST_HEAD(&resampler->link);

			list_add(&resampler->link, &kvm->irqfds.resampler_list);
			kvm_register_irq_ack_notifier(kvm,
						      &resampler->notifier);
			irqfd->resampler = resampler;
		}

		list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list);
371
		synchronize_srcu(&kvm->irq_srcu);
372 373 374 375

		mutex_unlock(&kvm->irqfds.resampler_lock);
	}

G
Gregory Haskins 已提交
376 377 378 379 380 381 382
	/*
	 * Install our own custom wake-up handling so we are notified via
	 * a callback whenever someone signals the underlying eventfd
	 */
	init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup);
	init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc);

383 384 385 386 387 388 389 390 391 392 393 394
	spin_lock_irq(&kvm->irqfds.lock);

	ret = 0;
	list_for_each_entry(tmp, &kvm->irqfds.items, list) {
		if (irqfd->eventfd != tmp->eventfd)
			continue;
		/* This fd is used for another irq already. */
		ret = -EBUSY;
		spin_unlock_irq(&kvm->irqfds.lock);
		goto fail;
	}

395 396 397 398
	irq_rt = rcu_dereference_protected(kvm->irq_routing,
					   lockdep_is_held(&kvm->irqfds.lock));
	irqfd_update(kvm, irqfd, irq_rt);

G
Gregory Haskins 已提交
399 400
	list_add_tail(&irqfd->list, &kvm->irqfds.items);

401 402
	spin_unlock_irq(&kvm->irqfds.lock);

G
Gregory Haskins 已提交
403 404 405 406
	/*
	 * Check if there was an event already pending on the eventfd
	 * before we registered, and trigger it as if we didn't miss it.
	 */
407 408
	events = f.file->f_op->poll(f.file, &irqfd->pt);

G
Gregory Haskins 已提交
409 410 411 412 413 414 415
	if (events & POLLIN)
		schedule_work(&irqfd->inject);

	/*
	 * do not drop the file until the irqfd is fully initialized, otherwise
	 * we might race against the POLLHUP
	 */
A
Al Viro 已提交
416
	fdput(f);
G
Gregory Haskins 已提交
417 418 419 420

	return 0;

fail:
421 422 423 424 425 426
	if (irqfd->resampler)
		irqfd_resampler_shutdown(irqfd);

	if (resamplefd && !IS_ERR(resamplefd))
		eventfd_ctx_put(resamplefd);

G
Gregory Haskins 已提交
427 428 429
	if (eventfd && !IS_ERR(eventfd))
		eventfd_ctx_put(eventfd);

A
Al Viro 已提交
430
	fdput(f);
G
Gregory Haskins 已提交
431

A
Al Viro 已提交
432
out:
G
Gregory Haskins 已提交
433 434 435
	kfree(irqfd);
	return ret;
}
436
#endif
G
Gregory Haskins 已提交
437 438

void
G
Gregory Haskins 已提交
439
kvm_eventfd_init(struct kvm *kvm)
G
Gregory Haskins 已提交
440
{
441
#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
G
Gregory Haskins 已提交
442 443
	spin_lock_init(&kvm->irqfds.lock);
	INIT_LIST_HEAD(&kvm->irqfds.items);
444 445
	INIT_LIST_HEAD(&kvm->irqfds.resampler_list);
	mutex_init(&kvm->irqfds.resampler_lock);
446
#endif
G
Gregory Haskins 已提交
447
	INIT_LIST_HEAD(&kvm->ioeventfds);
G
Gregory Haskins 已提交
448 449
}

450
#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
G
Gregory Haskins 已提交
451 452 453 454
/*
 * shutdown any irqfd's that match fd+gsi
 */
static int
455
kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
G
Gregory Haskins 已提交
456 457 458 459
{
	struct _irqfd *irqfd, *tmp;
	struct eventfd_ctx *eventfd;

460
	eventfd = eventfd_ctx_fdget(args->fd);
G
Gregory Haskins 已提交
461 462 463 464 465 466
	if (IS_ERR(eventfd))
		return PTR_ERR(eventfd);

	spin_lock_irq(&kvm->irqfds.lock);

	list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
467
		if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) {
468 469
			/*
			 * This rcu_assign_pointer is needed for when
470 471 472
			 * another thread calls kvm_irq_routing_update before
			 * we flush workqueue below (we synchronize with
			 * kvm_irq_routing_update using irqfds.lock).
473
			 * It is paired with synchronize_srcu done by caller
474 475 476
			 * of that function.
			 */
			rcu_assign_pointer(irqfd->irq_entry, NULL);
G
Gregory Haskins 已提交
477
			irqfd_deactivate(irqfd);
478
		}
G
Gregory Haskins 已提交
479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494
	}

	spin_unlock_irq(&kvm->irqfds.lock);
	eventfd_ctx_put(eventfd);

	/*
	 * Block until we know all outstanding shutdown jobs have completed
	 * so that we guarantee there will not be any more interrupts on this
	 * gsi once this deassign function returns.
	 */
	flush_workqueue(irqfd_cleanup_wq);

	return 0;
}

int
495
kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
G
Gregory Haskins 已提交
496
{
497
	if (args->flags & ~(KVM_IRQFD_FLAG_DEASSIGN | KVM_IRQFD_FLAG_RESAMPLE))
A
Alex Williamson 已提交
498 499
		return -EINVAL;

500 501
	if (args->flags & KVM_IRQFD_FLAG_DEASSIGN)
		return kvm_irqfd_deassign(kvm, args);
G
Gregory Haskins 已提交
502

503
	return kvm_irqfd_assign(kvm, args);
G
Gregory Haskins 已提交
504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529
}

/*
 * This function is called as the kvm VM fd is being released. Shutdown all
 * irqfds that still remain open
 */
void
kvm_irqfd_release(struct kvm *kvm)
{
	struct _irqfd *irqfd, *tmp;

	spin_lock_irq(&kvm->irqfds.lock);

	list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list)
		irqfd_deactivate(irqfd);

	spin_unlock_irq(&kvm->irqfds.lock);

	/*
	 * Block until we know all outstanding shutdown jobs have completed
	 * since we do not take a kvm* reference.
	 */
	flush_workqueue(irqfd_cleanup_wq);

}

530 531
/*
 * Change irq_routing and irqfd.
532
 * Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards.
533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548
 */
void kvm_irq_routing_update(struct kvm *kvm,
			    struct kvm_irq_routing_table *irq_rt)
{
	struct _irqfd *irqfd;

	spin_lock_irq(&kvm->irqfds.lock);

	rcu_assign_pointer(kvm->irq_routing, irq_rt);

	list_for_each_entry(irqfd, &kvm->irqfds.items, list)
		irqfd_update(kvm, irqfd, irq_rt);

	spin_unlock_irq(&kvm->irqfds.lock);
}

G
Gregory Haskins 已提交
549 550 551 552 553
/*
 * create a host-wide workqueue for issuing deferred shutdown requests
 * aggregated from all vm* instances. We need our own isolated single-thread
 * queue to prevent deadlock against flushing the normal work-queue.
 */
554
int kvm_irqfd_init(void)
G
Gregory Haskins 已提交
555 556 557 558 559 560 561 562
{
	irqfd_cleanup_wq = create_singlethread_workqueue("kvm-irqfd-cleanup");
	if (!irqfd_cleanup_wq)
		return -ENOMEM;

	return 0;
}

563
void kvm_irqfd_exit(void)
G
Gregory Haskins 已提交
564 565 566
{
	destroy_workqueue(irqfd_cleanup_wq);
}
567
#endif
G
Gregory Haskins 已提交
568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584

/*
 * --------------------------------------------------------------------
 * ioeventfd: translate a PIO/MMIO memory write to an eventfd signal.
 *
 * userspace can register a PIO/MMIO address with an eventfd for receiving
 * notification when the memory has been touched.
 * --------------------------------------------------------------------
 */

struct _ioeventfd {
	struct list_head     list;
	u64                  addr;
	int                  length;
	struct eventfd_ctx  *eventfd;
	u64                  datamatch;
	struct kvm_io_device dev;
585
	u8                   bus_idx;
G
Gregory Haskins 已提交
586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607
	bool                 wildcard;
};

static inline struct _ioeventfd *
to_ioeventfd(struct kvm_io_device *dev)
{
	return container_of(dev, struct _ioeventfd, dev);
}

static void
ioeventfd_release(struct _ioeventfd *p)
{
	eventfd_ctx_put(p->eventfd);
	list_del(&p->list);
	kfree(p);
}

static bool
ioeventfd_in_range(struct _ioeventfd *p, gpa_t addr, int len, const void *val)
{
	u64 _val;

608 609 610 611 612 613 614 615 616
	if (addr != p->addr)
		/* address must be precise for a hit */
		return false;

	if (!p->length)
		/* length = 0 means only look at the address, so always a hit */
		return true;

	if (len != p->length)
G
Gregory Haskins 已提交
617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685
		/* address-range must be precise for a hit */
		return false;

	if (p->wildcard)
		/* all else equal, wildcard is always a hit */
		return true;

	/* otherwise, we have to actually compare the data */

	BUG_ON(!IS_ALIGNED((unsigned long)val, len));

	switch (len) {
	case 1:
		_val = *(u8 *)val;
		break;
	case 2:
		_val = *(u16 *)val;
		break;
	case 4:
		_val = *(u32 *)val;
		break;
	case 8:
		_val = *(u64 *)val;
		break;
	default:
		return false;
	}

	return _val == p->datamatch ? true : false;
}

/* MMIO/PIO writes trigger an event if the addr/val match */
static int
ioeventfd_write(struct kvm_io_device *this, gpa_t addr, int len,
		const void *val)
{
	struct _ioeventfd *p = to_ioeventfd(this);

	if (!ioeventfd_in_range(p, addr, len, val))
		return -EOPNOTSUPP;

	eventfd_signal(p->eventfd, 1);
	return 0;
}

/*
 * This function is called as KVM is completely shutting down.  We do not
 * need to worry about locking just nuke anything we have as quickly as possible
 */
static void
ioeventfd_destructor(struct kvm_io_device *this)
{
	struct _ioeventfd *p = to_ioeventfd(this);

	ioeventfd_release(p);
}

static const struct kvm_io_device_ops ioeventfd_ops = {
	.write      = ioeventfd_write,
	.destructor = ioeventfd_destructor,
};

/* assumes kvm->slots_lock held */
static bool
ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p)
{
	struct _ioeventfd *_p;

	list_for_each_entry(_p, &kvm->ioeventfds, list)
686
		if (_p->bus_idx == p->bus_idx &&
687 688 689 690 691
		    _p->addr == p->addr &&
		    (!_p->length || !p->length ||
		     (_p->length == p->length &&
		      (_p->wildcard || p->wildcard ||
		       _p->datamatch == p->datamatch))))
G
Gregory Haskins 已提交
692 693 694 695 696
			return true;

	return false;
}

697 698 699 700 701 702 703 704 705
static enum kvm_bus ioeventfd_bus_from_flags(__u32 flags)
{
	if (flags & KVM_IOEVENTFD_FLAG_PIO)
		return KVM_PIO_BUS;
	if (flags & KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY)
		return KVM_VIRTIO_CCW_NOTIFY_BUS;
	return KVM_MMIO_BUS;
}

G
Gregory Haskins 已提交
706 707 708
static int
kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
{
709
	enum kvm_bus              bus_idx;
G
Gregory Haskins 已提交
710 711 712 713
	struct _ioeventfd        *p;
	struct eventfd_ctx       *eventfd;
	int                       ret;

714
	bus_idx = ioeventfd_bus_from_flags(args->flags);
715
	/* must be natural-word sized, or 0 to ignore length */
G
Gregory Haskins 已提交
716
	switch (args->len) {
717
	case 0:
G
Gregory Haskins 已提交
718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734
	case 1:
	case 2:
	case 4:
	case 8:
		break;
	default:
		return -EINVAL;
	}

	/* check for range overflow */
	if (args->addr + args->len < args->addr)
		return -EINVAL;

	/* check for extra flags that we don't understand */
	if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
		return -EINVAL;

735 736 737 738 739 740
	/* ioeventfd with no length can't be combined with DATAMATCH */
	if (!args->len &&
	    args->flags & (KVM_IOEVENTFD_FLAG_PIO |
			   KVM_IOEVENTFD_FLAG_DATAMATCH))
		return -EINVAL;

G
Gregory Haskins 已提交
741 742 743 744 745 746 747 748 749 750 751 752
	eventfd = eventfd_ctx_fdget(args->fd);
	if (IS_ERR(eventfd))
		return PTR_ERR(eventfd);

	p = kzalloc(sizeof(*p), GFP_KERNEL);
	if (!p) {
		ret = -ENOMEM;
		goto fail;
	}

	INIT_LIST_HEAD(&p->list);
	p->addr    = args->addr;
753
	p->bus_idx = bus_idx;
G
Gregory Haskins 已提交
754 755 756 757 758 759 760 761 762
	p->length  = args->len;
	p->eventfd = eventfd;

	/* The datamatch feature is optional, otherwise this is a wildcard */
	if (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH)
		p->datamatch = args->datamatch;
	else
		p->wildcard = true;

763
	mutex_lock(&kvm->slots_lock);
G
Gregory Haskins 已提交
764

L
Lucas De Marchi 已提交
765
	/* Verify that there isn't a match already */
G
Gregory Haskins 已提交
766 767 768 769 770 771 772
	if (ioeventfd_check_collision(kvm, p)) {
		ret = -EEXIST;
		goto unlock_fail;
	}

	kvm_iodevice_init(&p->dev, &ioeventfd_ops);

773 774
	ret = kvm_io_bus_register_dev(kvm, bus_idx, p->addr, p->length,
				      &p->dev);
G
Gregory Haskins 已提交
775 776 777
	if (ret < 0)
		goto unlock_fail;

778 779 780 781 782 783 784 785 786 787
	/* When length is ignored, MMIO is also put on a separate bus, for
	 * faster lookups.
	 */
	if (!args->len && !(args->flags & KVM_IOEVENTFD_FLAG_PIO)) {
		ret = kvm_io_bus_register_dev(kvm, KVM_FAST_MMIO_BUS,
					      p->addr, 0, &p->dev);
		if (ret < 0)
			goto register_fail;
	}

788
	kvm->buses[bus_idx]->ioeventfd_count++;
G
Gregory Haskins 已提交
789 790
	list_add_tail(&p->list, &kvm->ioeventfds);

791
	mutex_unlock(&kvm->slots_lock);
G
Gregory Haskins 已提交
792 793 794

	return 0;

795 796
register_fail:
	kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
G
Gregory Haskins 已提交
797
unlock_fail:
798
	mutex_unlock(&kvm->slots_lock);
G
Gregory Haskins 已提交
799 800 801 802 803 804 805 806 807 808 809

fail:
	kfree(p);
	eventfd_ctx_put(eventfd);

	return ret;
}

static int
kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
{
810
	enum kvm_bus              bus_idx;
G
Gregory Haskins 已提交
811 812 813 814
	struct _ioeventfd        *p, *tmp;
	struct eventfd_ctx       *eventfd;
	int                       ret = -ENOENT;

815
	bus_idx = ioeventfd_bus_from_flags(args->flags);
G
Gregory Haskins 已提交
816 817 818 819
	eventfd = eventfd_ctx_fdget(args->fd);
	if (IS_ERR(eventfd))
		return PTR_ERR(eventfd);

820
	mutex_lock(&kvm->slots_lock);
G
Gregory Haskins 已提交
821 822 823 824

	list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) {
		bool wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH);

825 826
		if (p->bus_idx != bus_idx ||
		    p->eventfd != eventfd  ||
G
Gregory Haskins 已提交
827 828 829 830 831 832 833 834
		    p->addr != args->addr  ||
		    p->length != args->len ||
		    p->wildcard != wildcard)
			continue;

		if (!p->wildcard && p->datamatch != args->datamatch)
			continue;

M
Marcelo Tosatti 已提交
835
		kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
836 837 838 839
		if (!p->length) {
			kvm_io_bus_unregister_dev(kvm, KVM_FAST_MMIO_BUS,
						  &p->dev);
		}
840
		kvm->buses[bus_idx]->ioeventfd_count--;
G
Gregory Haskins 已提交
841 842 843 844 845
		ioeventfd_release(p);
		ret = 0;
		break;
	}

846
	mutex_unlock(&kvm->slots_lock);
G
Gregory Haskins 已提交
847 848 849 850 851 852 853 854 855 856 857 858 859 860

	eventfd_ctx_put(eventfd);

	return ret;
}

int
kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
{
	if (args->flags & KVM_IOEVENTFD_FLAG_DEASSIGN)
		return kvm_deassign_ioeventfd(kvm, args);

	return kvm_assign_ioeventfd(kvm, args);
}