eventfd.c 20.8 KB
Newer Older
G
Gregory Haskins 已提交
1 2 3 4
/*
 * kvm eventfd support - use eventfd objects to signal various KVM events
 *
 * Copyright 2009 Novell.  All Rights Reserved.
A
Avi Kivity 已提交
5
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
G
Gregory Haskins 已提交
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
 *
 * Author:
 *	Gregory Haskins <ghaskins@novell.com>
 *
 * This file is free software; you can redistribute it and/or modify
 * it under the terms of version 2 of the GNU General Public License
 * as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
 */

#include <linux/kvm_host.h>
G
Gregory Haskins 已提交
25
#include <linux/kvm.h>
G
Gregory Haskins 已提交
26 27 28 29 30 31 32
#include <linux/workqueue.h>
#include <linux/syscalls.h>
#include <linux/wait.h>
#include <linux/poll.h>
#include <linux/file.h>
#include <linux/list.h>
#include <linux/eventfd.h>
G
Gregory Haskins 已提交
33
#include <linux/kernel.h>
34
#include <linux/srcu.h>
35
#include <linux/slab.h>
36
#include <linux/seqlock.h>
G
Gregory Haskins 已提交
37 38

#include "iodev.h"
G
Gregory Haskins 已提交
39

40
#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
G
Gregory Haskins 已提交
41 42 43 44 45 46 47 48
/*
 * --------------------------------------------------------------------
 * irqfd: Allows an fd to be used to inject an interrupt to the guest
 *
 * Credit goes to Avi Kivity for the original idea.
 * --------------------------------------------------------------------
 */

49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
/*
 * Resampling irqfds are a special variety of irqfds used to emulate
 * level triggered interrupts.  The interrupt is asserted on eventfd
 * trigger.  On acknowledgement through the irq ack notifier, the
 * interrupt is de-asserted and userspace is notified through the
 * resamplefd.  All resamplers on the same gsi are de-asserted
 * together, so we don't need to track the state of each individual
 * user.  We can also therefore share the same irq source ID.
 */
struct _irqfd_resampler {
	struct kvm *kvm;
	/*
	 * List of resampling struct _irqfd objects sharing this gsi.
	 * RCU list modified under kvm->irqfds.resampler_lock
	 */
	struct list_head list;
	struct kvm_irq_ack_notifier notifier;
	/*
	 * Entry in list of kvm->irqfd.resampler_list.  Use for sharing
	 * resamplers among irqfds on the same gsi.
	 * Accessed and modified under kvm->irqfds.resampler_lock
	 */
	struct list_head link;
};

G
Gregory Haskins 已提交
74
struct _irqfd {
75 76 77 78
	/* Used for MSI fast-path */
	struct kvm *kvm;
	wait_queue_t wait;
	/* Update side is protected by irqfds.lock */
79 80
	struct kvm_kernel_irq_routing_entry irq_entry;
	seqcount_t irq_entry_sc;
81 82 83
	/* Used for level IRQ fast-path */
	int gsi;
	struct work_struct inject;
84 85 86 87 88 89
	/* The resampler used by this irqfd (resampler-only) */
	struct _irqfd_resampler *resampler;
	/* Eventfd notified on resample (resampler-only) */
	struct eventfd_ctx *resamplefd;
	/* Entry in list of irqfds for a resampler (resampler-only) */
	struct list_head resampler_link;
90 91 92 93 94
	/* Used for setup/shutdown */
	struct eventfd_ctx *eventfd;
	struct list_head list;
	poll_table pt;
	struct work_struct shutdown;
G
Gregory Haskins 已提交
95 96 97 98 99 100 101 102 103 104
};

static struct workqueue_struct *irqfd_cleanup_wq;

static void
irqfd_inject(struct work_struct *work)
{
	struct _irqfd *irqfd = container_of(work, struct _irqfd, inject);
	struct kvm *kvm = irqfd->kvm;

105
	if (!irqfd->resampler) {
106 107 108 109
		kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1,
				false);
		kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0,
				false);
110 111
	} else
		kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
112
			    irqfd->gsi, 1, false);
113 114 115 116 117 118 119 120 121 122 123
}

/*
 * Since resampler irqfds share an IRQ source ID, we de-assert once
 * then notify all of the resampler irqfds using this GSI.  We can't
 * do multiple de-asserts or we risk racing with incoming re-asserts.
 */
static void
irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
{
	struct _irqfd_resampler *resampler;
124
	struct kvm *kvm;
125
	struct _irqfd *irqfd;
126
	int idx;
127 128

	resampler = container_of(kian, struct _irqfd_resampler, notifier);
129
	kvm = resampler->kvm;
130

131
	kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
132
		    resampler->notifier.gsi, 0, false);
133

134
	idx = srcu_read_lock(&kvm->irq_srcu);
135 136 137 138

	list_for_each_entry_rcu(irqfd, &resampler->list, resampler_link)
		eventfd_signal(irqfd->resamplefd, 1);

139
	srcu_read_unlock(&kvm->irq_srcu, idx);
140 141 142 143 144 145 146 147 148 149 150
}

static void
irqfd_resampler_shutdown(struct _irqfd *irqfd)
{
	struct _irqfd_resampler *resampler = irqfd->resampler;
	struct kvm *kvm = resampler->kvm;

	mutex_lock(&kvm->irqfds.resampler_lock);

	list_del_rcu(&irqfd->resampler_link);
151
	synchronize_srcu(&kvm->irq_srcu);
152 153 154 155 156

	if (list_empty(&resampler->list)) {
		list_del(&resampler->link);
		kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier);
		kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
157
			    resampler->notifier.gsi, 0, false);
158 159 160 161
		kfree(resampler);
	}

	mutex_unlock(&kvm->irqfds.resampler_lock);
G
Gregory Haskins 已提交
162 163 164 165 166 167 168 169 170
}

/*
 * Race-free decouple logic (ordering is critical)
 */
static void
irqfd_shutdown(struct work_struct *work)
{
	struct _irqfd *irqfd = container_of(work, struct _irqfd, shutdown);
171
	u64 cnt;
G
Gregory Haskins 已提交
172 173 174 175 176

	/*
	 * Synchronize with the wait-queue and unhook ourselves to prevent
	 * further events.
	 */
177
	eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt);
G
Gregory Haskins 已提交
178 179 180 181 182

	/*
	 * We know no new events will be scheduled at this point, so block
	 * until all previously outstanding events have completed
	 */
183
	flush_work(&irqfd->inject);
G
Gregory Haskins 已提交
184

185 186 187 188 189
	if (irqfd->resampler) {
		irqfd_resampler_shutdown(irqfd);
		eventfd_ctx_put(irqfd->resamplefd);
	}

G
Gregory Haskins 已提交
190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227
	/*
	 * It is now safe to release the object's resources
	 */
	eventfd_ctx_put(irqfd->eventfd);
	kfree(irqfd);
}


/* assumes kvm->irqfds.lock is held */
static bool
irqfd_is_active(struct _irqfd *irqfd)
{
	return list_empty(&irqfd->list) ? false : true;
}

/*
 * Mark the irqfd as inactive and schedule it for removal
 *
 * assumes kvm->irqfds.lock is held
 */
static void
irqfd_deactivate(struct _irqfd *irqfd)
{
	BUG_ON(!irqfd_is_active(irqfd));

	list_del_init(&irqfd->list);

	queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
}

/*
 * Called with wqh->lock held and interrupts disabled
 */
static int
irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
{
	struct _irqfd *irqfd = container_of(wait, struct _irqfd, wait);
	unsigned long flags = (unsigned long)key;
228
	struct kvm_kernel_irq_routing_entry irq;
229
	struct kvm *kvm = irqfd->kvm;
230
	unsigned seq;
231
	int idx;
G
Gregory Haskins 已提交
232

233
	if (flags & POLLIN) {
234
		idx = srcu_read_lock(&kvm->irq_srcu);
235 236 237 238
		do {
			seq = read_seqcount_begin(&irqfd->irq_entry_sc);
			irq = irqfd->irq_entry;
		} while (read_seqcount_retry(&irqfd->irq_entry_sc, seq));
G
Gregory Haskins 已提交
239
		/* An event has been signaled, inject an interrupt */
240 241
		if (irq.type == KVM_IRQ_ROUTING_MSI)
			kvm_set_msi(&irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1,
242
					false);
243 244
		else
			schedule_work(&irqfd->inject);
245
		srcu_read_unlock(&kvm->irq_srcu, idx);
246
	}
G
Gregory Haskins 已提交
247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279

	if (flags & POLLHUP) {
		/* The eventfd is closing, detach from KVM */
		unsigned long flags;

		spin_lock_irqsave(&kvm->irqfds.lock, flags);

		/*
		 * We must check if someone deactivated the irqfd before
		 * we could acquire the irqfds.lock since the item is
		 * deactivated from the KVM side before it is unhooked from
		 * the wait-queue.  If it is already deactivated, we can
		 * simply return knowing the other side will cleanup for us.
		 * We cannot race against the irqfd going away since the
		 * other side is required to acquire wqh->lock, which we hold
		 */
		if (irqfd_is_active(irqfd))
			irqfd_deactivate(irqfd);

		spin_unlock_irqrestore(&kvm->irqfds.lock, flags);
	}

	return 0;
}

static void
irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,
			poll_table *pt)
{
	struct _irqfd *irqfd = container_of(pt, struct _irqfd, pt);
	add_wait_queue(wqh, &irqfd->wait);
}

280
/* Must be called under irqfds.lock */
281
static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd)
282 283
{
	struct kvm_kernel_irq_routing_entry *e;
284 285 286
	struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS];
	int i, n_entries;

287
	n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi);
288

289 290 291
	write_seqcount_begin(&irqfd->irq_entry_sc);

	irqfd->irq_entry.type = 0;
292

293 294
	e = entries;
	for (i = 0; i < n_entries; ++i, ++e) {
295 296
		/* Only fast-path MSI. */
		if (e->type == KVM_IRQ_ROUTING_MSI)
297
			irqfd->irq_entry = *e;
298
	}
299 300

	write_seqcount_end(&irqfd->irq_entry_sc);
301 302
}

G
Gregory Haskins 已提交
303
static int
304
kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
G
Gregory Haskins 已提交
305
{
306
	struct _irqfd *irqfd, *tmp;
A
Al Viro 已提交
307
	struct fd f;
308
	struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
G
Gregory Haskins 已提交
309 310
	int ret;
	unsigned int events;
311
	int idx;
G
Gregory Haskins 已提交
312 313 314 315 316 317

	irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
	if (!irqfd)
		return -ENOMEM;

	irqfd->kvm = kvm;
318
	irqfd->gsi = args->gsi;
G
Gregory Haskins 已提交
319 320 321
	INIT_LIST_HEAD(&irqfd->list);
	INIT_WORK(&irqfd->inject, irqfd_inject);
	INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
322
	seqcount_init(&irqfd->irq_entry_sc);
G
Gregory Haskins 已提交
323

A
Al Viro 已提交
324 325 326 327
	f = fdget(args->fd);
	if (!f.file) {
		ret = -EBADF;
		goto out;
G
Gregory Haskins 已提交
328 329
	}

A
Al Viro 已提交
330
	eventfd = eventfd_ctx_fileget(f.file);
G
Gregory Haskins 已提交
331 332 333 334 335 336 337
	if (IS_ERR(eventfd)) {
		ret = PTR_ERR(eventfd);
		goto fail;
	}

	irqfd->eventfd = eventfd;

338 339 340 341 342 343 344 345 346 347 348 349 350 351 352
	if (args->flags & KVM_IRQFD_FLAG_RESAMPLE) {
		struct _irqfd_resampler *resampler;

		resamplefd = eventfd_ctx_fdget(args->resamplefd);
		if (IS_ERR(resamplefd)) {
			ret = PTR_ERR(resamplefd);
			goto fail;
		}

		irqfd->resamplefd = resamplefd;
		INIT_LIST_HEAD(&irqfd->resampler_link);

		mutex_lock(&kvm->irqfds.resampler_lock);

		list_for_each_entry(resampler,
353
				    &kvm->irqfds.resampler_list, link) {
354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380
			if (resampler->notifier.gsi == irqfd->gsi) {
				irqfd->resampler = resampler;
				break;
			}
		}

		if (!irqfd->resampler) {
			resampler = kzalloc(sizeof(*resampler), GFP_KERNEL);
			if (!resampler) {
				ret = -ENOMEM;
				mutex_unlock(&kvm->irqfds.resampler_lock);
				goto fail;
			}

			resampler->kvm = kvm;
			INIT_LIST_HEAD(&resampler->list);
			resampler->notifier.gsi = irqfd->gsi;
			resampler->notifier.irq_acked = irqfd_resampler_ack;
			INIT_LIST_HEAD(&resampler->link);

			list_add(&resampler->link, &kvm->irqfds.resampler_list);
			kvm_register_irq_ack_notifier(kvm,
						      &resampler->notifier);
			irqfd->resampler = resampler;
		}

		list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list);
381
		synchronize_srcu(&kvm->irq_srcu);
382 383 384 385

		mutex_unlock(&kvm->irqfds.resampler_lock);
	}

G
Gregory Haskins 已提交
386 387 388 389 390 391 392
	/*
	 * Install our own custom wake-up handling so we are notified via
	 * a callback whenever someone signals the underlying eventfd
	 */
	init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup);
	init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc);

393 394 395 396 397 398 399 400 401 402 403 404
	spin_lock_irq(&kvm->irqfds.lock);

	ret = 0;
	list_for_each_entry(tmp, &kvm->irqfds.items, list) {
		if (irqfd->eventfd != tmp->eventfd)
			continue;
		/* This fd is used for another irq already. */
		ret = -EBUSY;
		spin_unlock_irq(&kvm->irqfds.lock);
		goto fail;
	}

405 406 407
	idx = srcu_read_lock(&kvm->irq_srcu);
	irqfd_update(kvm, irqfd);
	srcu_read_unlock(&kvm->irq_srcu, idx);
408

G
Gregory Haskins 已提交
409 410
	list_add_tail(&irqfd->list, &kvm->irqfds.items);

411 412
	spin_unlock_irq(&kvm->irqfds.lock);

G
Gregory Haskins 已提交
413 414 415 416
	/*
	 * Check if there was an event already pending on the eventfd
	 * before we registered, and trigger it as if we didn't miss it.
	 */
417 418
	events = f.file->f_op->poll(f.file, &irqfd->pt);

G
Gregory Haskins 已提交
419 420 421 422 423 424 425
	if (events & POLLIN)
		schedule_work(&irqfd->inject);

	/*
	 * do not drop the file until the irqfd is fully initialized, otherwise
	 * we might race against the POLLHUP
	 */
A
Al Viro 已提交
426
	fdput(f);
G
Gregory Haskins 已提交
427 428 429 430

	return 0;

fail:
431 432 433 434 435 436
	if (irqfd->resampler)
		irqfd_resampler_shutdown(irqfd);

	if (resamplefd && !IS_ERR(resamplefd))
		eventfd_ctx_put(resamplefd);

G
Gregory Haskins 已提交
437 438 439
	if (eventfd && !IS_ERR(eventfd))
		eventfd_ctx_put(eventfd);

A
Al Viro 已提交
440
	fdput(f);
G
Gregory Haskins 已提交
441

A
Al Viro 已提交
442
out:
G
Gregory Haskins 已提交
443 444 445
	kfree(irqfd);
	return ret;
}
446
#endif
G
Gregory Haskins 已提交
447 448

void
G
Gregory Haskins 已提交
449
kvm_eventfd_init(struct kvm *kvm)
G
Gregory Haskins 已提交
450
{
451
#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
G
Gregory Haskins 已提交
452 453
	spin_lock_init(&kvm->irqfds.lock);
	INIT_LIST_HEAD(&kvm->irqfds.items);
454 455
	INIT_LIST_HEAD(&kvm->irqfds.resampler_list);
	mutex_init(&kvm->irqfds.resampler_lock);
456
#endif
G
Gregory Haskins 已提交
457
	INIT_LIST_HEAD(&kvm->ioeventfds);
G
Gregory Haskins 已提交
458 459
}

460
#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
G
Gregory Haskins 已提交
461 462 463 464
/*
 * shutdown any irqfd's that match fd+gsi
 */
static int
465
kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
G
Gregory Haskins 已提交
466 467 468 469
{
	struct _irqfd *irqfd, *tmp;
	struct eventfd_ctx *eventfd;

470
	eventfd = eventfd_ctx_fdget(args->fd);
G
Gregory Haskins 已提交
471 472 473 474 475 476
	if (IS_ERR(eventfd))
		return PTR_ERR(eventfd);

	spin_lock_irq(&kvm->irqfds.lock);

	list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
477
		if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) {
478
			/*
479
			 * This clearing of irq_entry.type is needed for when
480 481 482
			 * another thread calls kvm_irq_routing_update before
			 * we flush workqueue below (we synchronize with
			 * kvm_irq_routing_update using irqfds.lock).
483
			 */
484 485 486
			write_seqcount_begin(&irqfd->irq_entry_sc);
			irqfd->irq_entry.type = 0;
			write_seqcount_end(&irqfd->irq_entry_sc);
G
Gregory Haskins 已提交
487
			irqfd_deactivate(irqfd);
488
		}
G
Gregory Haskins 已提交
489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504
	}

	spin_unlock_irq(&kvm->irqfds.lock);
	eventfd_ctx_put(eventfd);

	/*
	 * Block until we know all outstanding shutdown jobs have completed
	 * so that we guarantee there will not be any more interrupts on this
	 * gsi once this deassign function returns.
	 */
	flush_workqueue(irqfd_cleanup_wq);

	return 0;
}

int
505
kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
G
Gregory Haskins 已提交
506
{
507
	if (args->flags & ~(KVM_IRQFD_FLAG_DEASSIGN | KVM_IRQFD_FLAG_RESAMPLE))
A
Alex Williamson 已提交
508 509
		return -EINVAL;

510 511
	if (args->flags & KVM_IRQFD_FLAG_DEASSIGN)
		return kvm_irqfd_deassign(kvm, args);
G
Gregory Haskins 已提交
512

513
	return kvm_irqfd_assign(kvm, args);
G
Gregory Haskins 已提交
514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539
}

/*
 * This function is called as the kvm VM fd is being released. Shutdown all
 * irqfds that still remain open
 */
void
kvm_irqfd_release(struct kvm *kvm)
{
	struct _irqfd *irqfd, *tmp;

	spin_lock_irq(&kvm->irqfds.lock);

	list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list)
		irqfd_deactivate(irqfd);

	spin_unlock_irq(&kvm->irqfds.lock);

	/*
	 * Block until we know all outstanding shutdown jobs have completed
	 * since we do not take a kvm* reference.
	 */
	flush_workqueue(irqfd_cleanup_wq);

}

540
/*
541
 * Take note of a change in irq routing.
542
 * Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards.
543
 */
544
void kvm_irq_routing_update(struct kvm *kvm)
545 546 547 548 549 550
{
	struct _irqfd *irqfd;

	spin_lock_irq(&kvm->irqfds.lock);

	list_for_each_entry(irqfd, &kvm->irqfds.items, list)
551
		irqfd_update(kvm, irqfd);
552 553 554 555

	spin_unlock_irq(&kvm->irqfds.lock);
}

G
Gregory Haskins 已提交
556 557 558 559 560
/*
 * create a host-wide workqueue for issuing deferred shutdown requests
 * aggregated from all vm* instances. We need our own isolated single-thread
 * queue to prevent deadlock against flushing the normal work-queue.
 */
561
int kvm_irqfd_init(void)
G
Gregory Haskins 已提交
562 563 564 565 566 567 568 569
{
	irqfd_cleanup_wq = create_singlethread_workqueue("kvm-irqfd-cleanup");
	if (!irqfd_cleanup_wq)
		return -ENOMEM;

	return 0;
}

570
void kvm_irqfd_exit(void)
G
Gregory Haskins 已提交
571 572 573
{
	destroy_workqueue(irqfd_cleanup_wq);
}
574
#endif
G
Gregory Haskins 已提交
575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591

/*
 * --------------------------------------------------------------------
 * ioeventfd: translate a PIO/MMIO memory write to an eventfd signal.
 *
 * userspace can register a PIO/MMIO address with an eventfd for receiving
 * notification when the memory has been touched.
 * --------------------------------------------------------------------
 */

struct _ioeventfd {
	struct list_head     list;
	u64                  addr;
	int                  length;
	struct eventfd_ctx  *eventfd;
	u64                  datamatch;
	struct kvm_io_device dev;
592
	u8                   bus_idx;
G
Gregory Haskins 已提交
593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614
	bool                 wildcard;
};

static inline struct _ioeventfd *
to_ioeventfd(struct kvm_io_device *dev)
{
	return container_of(dev, struct _ioeventfd, dev);
}

static void
ioeventfd_release(struct _ioeventfd *p)
{
	eventfd_ctx_put(p->eventfd);
	list_del(&p->list);
	kfree(p);
}

static bool
ioeventfd_in_range(struct _ioeventfd *p, gpa_t addr, int len, const void *val)
{
	u64 _val;

615 616 617 618 619 620 621 622 623
	if (addr != p->addr)
		/* address must be precise for a hit */
		return false;

	if (!p->length)
		/* length = 0 means only look at the address, so always a hit */
		return true;

	if (len != p->length)
G
Gregory Haskins 已提交
624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692
		/* address-range must be precise for a hit */
		return false;

	if (p->wildcard)
		/* all else equal, wildcard is always a hit */
		return true;

	/* otherwise, we have to actually compare the data */

	BUG_ON(!IS_ALIGNED((unsigned long)val, len));

	switch (len) {
	case 1:
		_val = *(u8 *)val;
		break;
	case 2:
		_val = *(u16 *)val;
		break;
	case 4:
		_val = *(u32 *)val;
		break;
	case 8:
		_val = *(u64 *)val;
		break;
	default:
		return false;
	}

	return _val == p->datamatch ? true : false;
}

/* MMIO/PIO writes trigger an event if the addr/val match */
static int
ioeventfd_write(struct kvm_io_device *this, gpa_t addr, int len,
		const void *val)
{
	struct _ioeventfd *p = to_ioeventfd(this);

	if (!ioeventfd_in_range(p, addr, len, val))
		return -EOPNOTSUPP;

	eventfd_signal(p->eventfd, 1);
	return 0;
}

/*
 * This function is called as KVM is completely shutting down.  We do not
 * need to worry about locking just nuke anything we have as quickly as possible
 */
static void
ioeventfd_destructor(struct kvm_io_device *this)
{
	struct _ioeventfd *p = to_ioeventfd(this);

	ioeventfd_release(p);
}

static const struct kvm_io_device_ops ioeventfd_ops = {
	.write      = ioeventfd_write,
	.destructor = ioeventfd_destructor,
};

/* assumes kvm->slots_lock held */
static bool
ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p)
{
	struct _ioeventfd *_p;

	list_for_each_entry(_p, &kvm->ioeventfds, list)
693
		if (_p->bus_idx == p->bus_idx &&
694 695 696 697 698
		    _p->addr == p->addr &&
		    (!_p->length || !p->length ||
		     (_p->length == p->length &&
		      (_p->wildcard || p->wildcard ||
		       _p->datamatch == p->datamatch))))
G
Gregory Haskins 已提交
699 700 701 702 703
			return true;

	return false;
}

704 705 706 707 708 709 710 711 712
static enum kvm_bus ioeventfd_bus_from_flags(__u32 flags)
{
	if (flags & KVM_IOEVENTFD_FLAG_PIO)
		return KVM_PIO_BUS;
	if (flags & KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY)
		return KVM_VIRTIO_CCW_NOTIFY_BUS;
	return KVM_MMIO_BUS;
}

G
Gregory Haskins 已提交
713 714 715
static int
kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
{
716
	enum kvm_bus              bus_idx;
G
Gregory Haskins 已提交
717 718 719 720
	struct _ioeventfd        *p;
	struct eventfd_ctx       *eventfd;
	int                       ret;

721
	bus_idx = ioeventfd_bus_from_flags(args->flags);
722
	/* must be natural-word sized, or 0 to ignore length */
G
Gregory Haskins 已提交
723
	switch (args->len) {
724
	case 0:
G
Gregory Haskins 已提交
725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741
	case 1:
	case 2:
	case 4:
	case 8:
		break;
	default:
		return -EINVAL;
	}

	/* check for range overflow */
	if (args->addr + args->len < args->addr)
		return -EINVAL;

	/* check for extra flags that we don't understand */
	if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
		return -EINVAL;

742 743 744 745 746 747
	/* ioeventfd with no length can't be combined with DATAMATCH */
	if (!args->len &&
	    args->flags & (KVM_IOEVENTFD_FLAG_PIO |
			   KVM_IOEVENTFD_FLAG_DATAMATCH))
		return -EINVAL;

G
Gregory Haskins 已提交
748 749 750 751 752 753 754 755 756 757 758 759
	eventfd = eventfd_ctx_fdget(args->fd);
	if (IS_ERR(eventfd))
		return PTR_ERR(eventfd);

	p = kzalloc(sizeof(*p), GFP_KERNEL);
	if (!p) {
		ret = -ENOMEM;
		goto fail;
	}

	INIT_LIST_HEAD(&p->list);
	p->addr    = args->addr;
760
	p->bus_idx = bus_idx;
G
Gregory Haskins 已提交
761 762 763 764 765 766 767 768 769
	p->length  = args->len;
	p->eventfd = eventfd;

	/* The datamatch feature is optional, otherwise this is a wildcard */
	if (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH)
		p->datamatch = args->datamatch;
	else
		p->wildcard = true;

770
	mutex_lock(&kvm->slots_lock);
G
Gregory Haskins 已提交
771

L
Lucas De Marchi 已提交
772
	/* Verify that there isn't a match already */
G
Gregory Haskins 已提交
773 774 775 776 777 778 779
	if (ioeventfd_check_collision(kvm, p)) {
		ret = -EEXIST;
		goto unlock_fail;
	}

	kvm_iodevice_init(&p->dev, &ioeventfd_ops);

780 781
	ret = kvm_io_bus_register_dev(kvm, bus_idx, p->addr, p->length,
				      &p->dev);
G
Gregory Haskins 已提交
782 783 784
	if (ret < 0)
		goto unlock_fail;

785 786 787 788 789 790 791 792 793 794
	/* When length is ignored, MMIO is also put on a separate bus, for
	 * faster lookups.
	 */
	if (!args->len && !(args->flags & KVM_IOEVENTFD_FLAG_PIO)) {
		ret = kvm_io_bus_register_dev(kvm, KVM_FAST_MMIO_BUS,
					      p->addr, 0, &p->dev);
		if (ret < 0)
			goto register_fail;
	}

795
	kvm->buses[bus_idx]->ioeventfd_count++;
G
Gregory Haskins 已提交
796 797
	list_add_tail(&p->list, &kvm->ioeventfds);

798
	mutex_unlock(&kvm->slots_lock);
G
Gregory Haskins 已提交
799 800 801

	return 0;

802 803
register_fail:
	kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
G
Gregory Haskins 已提交
804
unlock_fail:
805
	mutex_unlock(&kvm->slots_lock);
G
Gregory Haskins 已提交
806 807 808 809 810 811 812 813 814 815 816

fail:
	kfree(p);
	eventfd_ctx_put(eventfd);

	return ret;
}

static int
kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
{
817
	enum kvm_bus              bus_idx;
G
Gregory Haskins 已提交
818 819 820 821
	struct _ioeventfd        *p, *tmp;
	struct eventfd_ctx       *eventfd;
	int                       ret = -ENOENT;

822
	bus_idx = ioeventfd_bus_from_flags(args->flags);
G
Gregory Haskins 已提交
823 824 825 826
	eventfd = eventfd_ctx_fdget(args->fd);
	if (IS_ERR(eventfd))
		return PTR_ERR(eventfd);

827
	mutex_lock(&kvm->slots_lock);
G
Gregory Haskins 已提交
828 829 830 831

	list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) {
		bool wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH);

832 833
		if (p->bus_idx != bus_idx ||
		    p->eventfd != eventfd  ||
G
Gregory Haskins 已提交
834 835 836 837 838 839 840 841
		    p->addr != args->addr  ||
		    p->length != args->len ||
		    p->wildcard != wildcard)
			continue;

		if (!p->wildcard && p->datamatch != args->datamatch)
			continue;

M
Marcelo Tosatti 已提交
842
		kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
843 844 845 846
		if (!p->length) {
			kvm_io_bus_unregister_dev(kvm, KVM_FAST_MMIO_BUS,
						  &p->dev);
		}
847
		kvm->buses[bus_idx]->ioeventfd_count--;
G
Gregory Haskins 已提交
848 849 850 851 852
		ioeventfd_release(p);
		ret = 0;
		break;
	}

853
	mutex_unlock(&kvm->slots_lock);
G
Gregory Haskins 已提交
854 855 856 857 858 859 860 861 862 863 864 865 866 867

	eventfd_ctx_put(eventfd);

	return ret;
}

int
kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
{
	if (args->flags & KVM_IOEVENTFD_FLAG_DEASSIGN)
		return kvm_deassign_ioeventfd(kvm, args);

	return kvm_assign_ioeventfd(kvm, args);
}