eventfd.c 22.3 KB
Newer Older
G
Gregory Haskins 已提交
1 2 3 4
/*
 * kvm eventfd support - use eventfd objects to signal various KVM events
 *
 * Copyright 2009 Novell.  All Rights Reserved.
A
Avi Kivity 已提交
5
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
G
Gregory Haskins 已提交
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
 *
 * Author:
 *	Gregory Haskins <ghaskins@novell.com>
 *
 * This file is free software; you can redistribute it and/or modify
 * it under the terms of version 2 of the GNU General Public License
 * as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
 */

#include <linux/kvm_host.h>
G
Gregory Haskins 已提交
25
#include <linux/kvm.h>
G
Gregory Haskins 已提交
26 27 28 29 30 31 32
#include <linux/workqueue.h>
#include <linux/syscalls.h>
#include <linux/wait.h>
#include <linux/poll.h>
#include <linux/file.h>
#include <linux/list.h>
#include <linux/eventfd.h>
G
Gregory Haskins 已提交
33
#include <linux/kernel.h>
34
#include <linux/srcu.h>
35
#include <linux/slab.h>
36
#include <linux/seqlock.h>
37
#include <trace/events/kvm.h>
G
Gregory Haskins 已提交
38 39

#include "iodev.h"
G
Gregory Haskins 已提交
40

41
#ifdef CONFIG_HAVE_KVM_IRQFD
G
Gregory Haskins 已提交
42 43 44 45 46 47 48 49
/*
 * --------------------------------------------------------------------
 * irqfd: Allows an fd to be used to inject an interrupt to the guest
 *
 * Credit goes to Avi Kivity for the original idea.
 * --------------------------------------------------------------------
 */

50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
/*
 * Resampling irqfds are a special variety of irqfds used to emulate
 * level triggered interrupts.  The interrupt is asserted on eventfd
 * trigger.  On acknowledgement through the irq ack notifier, the
 * interrupt is de-asserted and userspace is notified through the
 * resamplefd.  All resamplers on the same gsi are de-asserted
 * together, so we don't need to track the state of each individual
 * user.  We can also therefore share the same irq source ID.
 */
struct _irqfd_resampler {
	struct kvm *kvm;
	/*
	 * List of resampling struct _irqfd objects sharing this gsi.
	 * RCU list modified under kvm->irqfds.resampler_lock
	 */
	struct list_head list;
	struct kvm_irq_ack_notifier notifier;
	/*
	 * Entry in list of kvm->irqfd.resampler_list.  Use for sharing
	 * resamplers among irqfds on the same gsi.
	 * Accessed and modified under kvm->irqfds.resampler_lock
	 */
	struct list_head link;
};

G
Gregory Haskins 已提交
75
struct _irqfd {
76 77 78 79
	/* Used for MSI fast-path */
	struct kvm *kvm;
	wait_queue_t wait;
	/* Update side is protected by irqfds.lock */
80 81
	struct kvm_kernel_irq_routing_entry irq_entry;
	seqcount_t irq_entry_sc;
82 83 84
	/* Used for level IRQ fast-path */
	int gsi;
	struct work_struct inject;
85 86 87 88 89 90
	/* The resampler used by this irqfd (resampler-only) */
	struct _irqfd_resampler *resampler;
	/* Eventfd notified on resample (resampler-only) */
	struct eventfd_ctx *resamplefd;
	/* Entry in list of irqfds for a resampler (resampler-only) */
	struct list_head resampler_link;
91 92 93 94 95
	/* Used for setup/shutdown */
	struct eventfd_ctx *eventfd;
	struct list_head list;
	poll_table pt;
	struct work_struct shutdown;
G
Gregory Haskins 已提交
96 97 98 99 100 101 102 103 104 105
};

static struct workqueue_struct *irqfd_cleanup_wq;

static void
irqfd_inject(struct work_struct *work)
{
	struct _irqfd *irqfd = container_of(work, struct _irqfd, inject);
	struct kvm *kvm = irqfd->kvm;

106
	if (!irqfd->resampler) {
107 108 109 110
		kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1,
				false);
		kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0,
				false);
111 112
	} else
		kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
113
			    irqfd->gsi, 1, false);
114 115 116 117 118 119 120 121 122 123 124
}

/*
 * Since resampler irqfds share an IRQ source ID, we de-assert once
 * then notify all of the resampler irqfds using this GSI.  We can't
 * do multiple de-asserts or we risk racing with incoming re-asserts.
 */
static void
irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
{
	struct _irqfd_resampler *resampler;
125
	struct kvm *kvm;
126
	struct _irqfd *irqfd;
127
	int idx;
128 129

	resampler = container_of(kian, struct _irqfd_resampler, notifier);
130
	kvm = resampler->kvm;
131

132
	kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
133
		    resampler->notifier.gsi, 0, false);
134

135
	idx = srcu_read_lock(&kvm->irq_srcu);
136 137 138 139

	list_for_each_entry_rcu(irqfd, &resampler->list, resampler_link)
		eventfd_signal(irqfd->resamplefd, 1);

140
	srcu_read_unlock(&kvm->irq_srcu, idx);
141 142 143 144 145 146 147 148 149 150 151
}

static void
irqfd_resampler_shutdown(struct _irqfd *irqfd)
{
	struct _irqfd_resampler *resampler = irqfd->resampler;
	struct kvm *kvm = resampler->kvm;

	mutex_lock(&kvm->irqfds.resampler_lock);

	list_del_rcu(&irqfd->resampler_link);
152
	synchronize_srcu(&kvm->irq_srcu);
153 154 155 156 157

	if (list_empty(&resampler->list)) {
		list_del(&resampler->link);
		kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier);
		kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
158
			    resampler->notifier.gsi, 0, false);
159 160 161 162
		kfree(resampler);
	}

	mutex_unlock(&kvm->irqfds.resampler_lock);
G
Gregory Haskins 已提交
163 164 165 166 167 168 169 170 171
}

/*
 * Race-free decouple logic (ordering is critical)
 */
static void
irqfd_shutdown(struct work_struct *work)
{
	struct _irqfd *irqfd = container_of(work, struct _irqfd, shutdown);
172
	u64 cnt;
G
Gregory Haskins 已提交
173 174 175 176 177

	/*
	 * Synchronize with the wait-queue and unhook ourselves to prevent
	 * further events.
	 */
178
	eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt);
G
Gregory Haskins 已提交
179 180 181 182 183

	/*
	 * We know no new events will be scheduled at this point, so block
	 * until all previously outstanding events have completed
	 */
184
	flush_work(&irqfd->inject);
G
Gregory Haskins 已提交
185

186 187 188 189 190
	if (irqfd->resampler) {
		irqfd_resampler_shutdown(irqfd);
		eventfd_ctx_put(irqfd->resamplefd);
	}

G
Gregory Haskins 已提交
191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
	/*
	 * It is now safe to release the object's resources
	 */
	eventfd_ctx_put(irqfd->eventfd);
	kfree(irqfd);
}


/* assumes kvm->irqfds.lock is held */
static bool
irqfd_is_active(struct _irqfd *irqfd)
{
	return list_empty(&irqfd->list) ? false : true;
}

/*
 * Mark the irqfd as inactive and schedule it for removal
 *
 * assumes kvm->irqfds.lock is held
 */
static void
irqfd_deactivate(struct _irqfd *irqfd)
{
	BUG_ON(!irqfd_is_active(irqfd));

	list_del_init(&irqfd->list);

	queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
}

/*
 * Called with wqh->lock held and interrupts disabled
 */
static int
irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
{
	struct _irqfd *irqfd = container_of(wait, struct _irqfd, wait);
	unsigned long flags = (unsigned long)key;
229
	struct kvm_kernel_irq_routing_entry irq;
230
	struct kvm *kvm = irqfd->kvm;
231
	unsigned seq;
232
	int idx;
G
Gregory Haskins 已提交
233

234
	if (flags & POLLIN) {
235
		idx = srcu_read_lock(&kvm->irq_srcu);
236 237 238 239
		do {
			seq = read_seqcount_begin(&irqfd->irq_entry_sc);
			irq = irqfd->irq_entry;
		} while (read_seqcount_retry(&irqfd->irq_entry_sc, seq));
G
Gregory Haskins 已提交
240
		/* An event has been signaled, inject an interrupt */
241 242
		if (irq.type == KVM_IRQ_ROUTING_MSI)
			kvm_set_msi(&irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1,
243
					false);
244 245
		else
			schedule_work(&irqfd->inject);
246
		srcu_read_unlock(&kvm->irq_srcu, idx);
247
	}
G
Gregory Haskins 已提交
248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280

	if (flags & POLLHUP) {
		/* The eventfd is closing, detach from KVM */
		unsigned long flags;

		spin_lock_irqsave(&kvm->irqfds.lock, flags);

		/*
		 * We must check if someone deactivated the irqfd before
		 * we could acquire the irqfds.lock since the item is
		 * deactivated from the KVM side before it is unhooked from
		 * the wait-queue.  If it is already deactivated, we can
		 * simply return knowing the other side will cleanup for us.
		 * We cannot race against the irqfd going away since the
		 * other side is required to acquire wqh->lock, which we hold
		 */
		if (irqfd_is_active(irqfd))
			irqfd_deactivate(irqfd);

		spin_unlock_irqrestore(&kvm->irqfds.lock, flags);
	}

	return 0;
}

static void
irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,
			poll_table *pt)
{
	struct _irqfd *irqfd = container_of(pt, struct _irqfd, pt);
	add_wait_queue(wqh, &irqfd->wait);
}

281
/* Must be called under irqfds.lock */
282
static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd)
283 284
{
	struct kvm_kernel_irq_routing_entry *e;
285 286 287
	struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS];
	int i, n_entries;

288
	n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi);
289

290 291 292
	write_seqcount_begin(&irqfd->irq_entry_sc);

	irqfd->irq_entry.type = 0;
293

294 295
	e = entries;
	for (i = 0; i < n_entries; ++i, ++e) {
296 297
		/* Only fast-path MSI. */
		if (e->type == KVM_IRQ_ROUTING_MSI)
298
			irqfd->irq_entry = *e;
299
	}
300 301

	write_seqcount_end(&irqfd->irq_entry_sc);
302 303
}

G
Gregory Haskins 已提交
304
static int
305
kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
G
Gregory Haskins 已提交
306
{
307
	struct _irqfd *irqfd, *tmp;
A
Al Viro 已提交
308
	struct fd f;
309
	struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
G
Gregory Haskins 已提交
310 311
	int ret;
	unsigned int events;
312
	int idx;
G
Gregory Haskins 已提交
313 314 315 316 317 318

	irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
	if (!irqfd)
		return -ENOMEM;

	irqfd->kvm = kvm;
319
	irqfd->gsi = args->gsi;
G
Gregory Haskins 已提交
320 321 322
	INIT_LIST_HEAD(&irqfd->list);
	INIT_WORK(&irqfd->inject, irqfd_inject);
	INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
323
	seqcount_init(&irqfd->irq_entry_sc);
G
Gregory Haskins 已提交
324

A
Al Viro 已提交
325 326 327 328
	f = fdget(args->fd);
	if (!f.file) {
		ret = -EBADF;
		goto out;
G
Gregory Haskins 已提交
329 330
	}

A
Al Viro 已提交
331
	eventfd = eventfd_ctx_fileget(f.file);
G
Gregory Haskins 已提交
332 333 334 335 336 337 338
	if (IS_ERR(eventfd)) {
		ret = PTR_ERR(eventfd);
		goto fail;
	}

	irqfd->eventfd = eventfd;

339 340 341 342 343 344 345 346 347 348 349 350 351 352 353
	if (args->flags & KVM_IRQFD_FLAG_RESAMPLE) {
		struct _irqfd_resampler *resampler;

		resamplefd = eventfd_ctx_fdget(args->resamplefd);
		if (IS_ERR(resamplefd)) {
			ret = PTR_ERR(resamplefd);
			goto fail;
		}

		irqfd->resamplefd = resamplefd;
		INIT_LIST_HEAD(&irqfd->resampler_link);

		mutex_lock(&kvm->irqfds.resampler_lock);

		list_for_each_entry(resampler,
354
				    &kvm->irqfds.resampler_list, link) {
355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381
			if (resampler->notifier.gsi == irqfd->gsi) {
				irqfd->resampler = resampler;
				break;
			}
		}

		if (!irqfd->resampler) {
			resampler = kzalloc(sizeof(*resampler), GFP_KERNEL);
			if (!resampler) {
				ret = -ENOMEM;
				mutex_unlock(&kvm->irqfds.resampler_lock);
				goto fail;
			}

			resampler->kvm = kvm;
			INIT_LIST_HEAD(&resampler->list);
			resampler->notifier.gsi = irqfd->gsi;
			resampler->notifier.irq_acked = irqfd_resampler_ack;
			INIT_LIST_HEAD(&resampler->link);

			list_add(&resampler->link, &kvm->irqfds.resampler_list);
			kvm_register_irq_ack_notifier(kvm,
						      &resampler->notifier);
			irqfd->resampler = resampler;
		}

		list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list);
382
		synchronize_srcu(&kvm->irq_srcu);
383 384 385 386

		mutex_unlock(&kvm->irqfds.resampler_lock);
	}

G
Gregory Haskins 已提交
387 388 389 390 391 392 393
	/*
	 * Install our own custom wake-up handling so we are notified via
	 * a callback whenever someone signals the underlying eventfd
	 */
	init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup);
	init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc);

394 395 396 397 398 399 400 401 402 403 404 405
	spin_lock_irq(&kvm->irqfds.lock);

	ret = 0;
	list_for_each_entry(tmp, &kvm->irqfds.items, list) {
		if (irqfd->eventfd != tmp->eventfd)
			continue;
		/* This fd is used for another irq already. */
		ret = -EBUSY;
		spin_unlock_irq(&kvm->irqfds.lock);
		goto fail;
	}

406 407 408
	idx = srcu_read_lock(&kvm->irq_srcu);
	irqfd_update(kvm, irqfd);
	srcu_read_unlock(&kvm->irq_srcu, idx);
409

G
Gregory Haskins 已提交
410 411
	list_add_tail(&irqfd->list, &kvm->irqfds.items);

412 413
	spin_unlock_irq(&kvm->irqfds.lock);

G
Gregory Haskins 已提交
414 415 416 417
	/*
	 * Check if there was an event already pending on the eventfd
	 * before we registered, and trigger it as if we didn't miss it.
	 */
418 419
	events = f.file->f_op->poll(f.file, &irqfd->pt);

G
Gregory Haskins 已提交
420 421 422 423 424 425 426
	if (events & POLLIN)
		schedule_work(&irqfd->inject);

	/*
	 * do not drop the file until the irqfd is fully initialized, otherwise
	 * we might race against the POLLHUP
	 */
A
Al Viro 已提交
427
	fdput(f);
G
Gregory Haskins 已提交
428 429 430 431

	return 0;

fail:
432 433 434 435 436 437
	if (irqfd->resampler)
		irqfd_resampler_shutdown(irqfd);

	if (resamplefd && !IS_ERR(resamplefd))
		eventfd_ctx_put(resamplefd);

G
Gregory Haskins 已提交
438 439 440
	if (eventfd && !IS_ERR(eventfd))
		eventfd_ctx_put(eventfd);

A
Al Viro 已提交
441
	fdput(f);
G
Gregory Haskins 已提交
442

A
Al Viro 已提交
443
out:
G
Gregory Haskins 已提交
444 445 446
	kfree(irqfd);
	return ret;
}
447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503

bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
{
	struct kvm_irq_ack_notifier *kian;
	int gsi, idx;

	idx = srcu_read_lock(&kvm->irq_srcu);
	gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
	if (gsi != -1)
		hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
					 link)
			if (kian->gsi == gsi) {
				srcu_read_unlock(&kvm->irq_srcu, idx);
				return true;
			}

	srcu_read_unlock(&kvm->irq_srcu, idx);

	return false;
}
EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);

void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
{
	struct kvm_irq_ack_notifier *kian;
	int gsi, idx;

	trace_kvm_ack_irq(irqchip, pin);

	idx = srcu_read_lock(&kvm->irq_srcu);
	gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
	if (gsi != -1)
		hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
					 link)
			if (kian->gsi == gsi)
				kian->irq_acked(kian);
	srcu_read_unlock(&kvm->irq_srcu, idx);
}

void kvm_register_irq_ack_notifier(struct kvm *kvm,
				   struct kvm_irq_ack_notifier *kian)
{
	mutex_lock(&kvm->irq_lock);
	hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
	mutex_unlock(&kvm->irq_lock);
	kvm_vcpu_request_scan_ioapic(kvm);
}

void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
				    struct kvm_irq_ack_notifier *kian)
{
	mutex_lock(&kvm->irq_lock);
	hlist_del_init_rcu(&kian->link);
	mutex_unlock(&kvm->irq_lock);
	synchronize_srcu(&kvm->irq_srcu);
	kvm_vcpu_request_scan_ioapic(kvm);
}
504
#endif
G
Gregory Haskins 已提交
505 506

void
G
Gregory Haskins 已提交
507
kvm_eventfd_init(struct kvm *kvm)
G
Gregory Haskins 已提交
508
{
509
#ifdef CONFIG_HAVE_KVM_IRQFD
G
Gregory Haskins 已提交
510 511
	spin_lock_init(&kvm->irqfds.lock);
	INIT_LIST_HEAD(&kvm->irqfds.items);
512 513
	INIT_LIST_HEAD(&kvm->irqfds.resampler_list);
	mutex_init(&kvm->irqfds.resampler_lock);
514
#endif
G
Gregory Haskins 已提交
515
	INIT_LIST_HEAD(&kvm->ioeventfds);
G
Gregory Haskins 已提交
516 517
}

518
#ifdef CONFIG_HAVE_KVM_IRQFD
G
Gregory Haskins 已提交
519 520 521 522
/*
 * shutdown any irqfd's that match fd+gsi
 */
static int
523
kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
G
Gregory Haskins 已提交
524 525 526 527
{
	struct _irqfd *irqfd, *tmp;
	struct eventfd_ctx *eventfd;

528
	eventfd = eventfd_ctx_fdget(args->fd);
G
Gregory Haskins 已提交
529 530 531 532 533 534
	if (IS_ERR(eventfd))
		return PTR_ERR(eventfd);

	spin_lock_irq(&kvm->irqfds.lock);

	list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
535
		if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) {
536
			/*
537
			 * This clearing of irq_entry.type is needed for when
538 539 540
			 * another thread calls kvm_irq_routing_update before
			 * we flush workqueue below (we synchronize with
			 * kvm_irq_routing_update using irqfds.lock).
541
			 */
542 543 544
			write_seqcount_begin(&irqfd->irq_entry_sc);
			irqfd->irq_entry.type = 0;
			write_seqcount_end(&irqfd->irq_entry_sc);
G
Gregory Haskins 已提交
545
			irqfd_deactivate(irqfd);
546
		}
G
Gregory Haskins 已提交
547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562
	}

	spin_unlock_irq(&kvm->irqfds.lock);
	eventfd_ctx_put(eventfd);

	/*
	 * Block until we know all outstanding shutdown jobs have completed
	 * so that we guarantee there will not be any more interrupts on this
	 * gsi once this deassign function returns.
	 */
	flush_workqueue(irqfd_cleanup_wq);

	return 0;
}

int
563
kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
G
Gregory Haskins 已提交
564
{
565
	if (args->flags & ~(KVM_IRQFD_FLAG_DEASSIGN | KVM_IRQFD_FLAG_RESAMPLE))
A
Alex Williamson 已提交
566 567
		return -EINVAL;

568 569
	if (args->flags & KVM_IRQFD_FLAG_DEASSIGN)
		return kvm_irqfd_deassign(kvm, args);
G
Gregory Haskins 已提交
570

571
	return kvm_irqfd_assign(kvm, args);
G
Gregory Haskins 已提交
572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597
}

/*
 * This function is called as the kvm VM fd is being released. Shutdown all
 * irqfds that still remain open
 */
void
kvm_irqfd_release(struct kvm *kvm)
{
	struct _irqfd *irqfd, *tmp;

	spin_lock_irq(&kvm->irqfds.lock);

	list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list)
		irqfd_deactivate(irqfd);

	spin_unlock_irq(&kvm->irqfds.lock);

	/*
	 * Block until we know all outstanding shutdown jobs have completed
	 * since we do not take a kvm* reference.
	 */
	flush_workqueue(irqfd_cleanup_wq);

}

598
/*
599
 * Take note of a change in irq routing.
600
 * Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards.
601
 */
602
void kvm_irq_routing_update(struct kvm *kvm)
603 604 605 606 607 608
{
	struct _irqfd *irqfd;

	spin_lock_irq(&kvm->irqfds.lock);

	list_for_each_entry(irqfd, &kvm->irqfds.items, list)
609
		irqfd_update(kvm, irqfd);
610 611 612 613

	spin_unlock_irq(&kvm->irqfds.lock);
}

G
Gregory Haskins 已提交
614 615 616 617 618
/*
 * create a host-wide workqueue for issuing deferred shutdown requests
 * aggregated from all vm* instances. We need our own isolated single-thread
 * queue to prevent deadlock against flushing the normal work-queue.
 */
619
int kvm_irqfd_init(void)
G
Gregory Haskins 已提交
620 621 622 623 624 625 626 627
{
	irqfd_cleanup_wq = create_singlethread_workqueue("kvm-irqfd-cleanup");
	if (!irqfd_cleanup_wq)
		return -ENOMEM;

	return 0;
}

628
void kvm_irqfd_exit(void)
G
Gregory Haskins 已提交
629 630 631
{
	destroy_workqueue(irqfd_cleanup_wq);
}
632
#endif
G
Gregory Haskins 已提交
633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649

/*
 * --------------------------------------------------------------------
 * ioeventfd: translate a PIO/MMIO memory write to an eventfd signal.
 *
 * userspace can register a PIO/MMIO address with an eventfd for receiving
 * notification when the memory has been touched.
 * --------------------------------------------------------------------
 */

struct _ioeventfd {
	struct list_head     list;
	u64                  addr;
	int                  length;
	struct eventfd_ctx  *eventfd;
	u64                  datamatch;
	struct kvm_io_device dev;
650
	u8                   bus_idx;
G
Gregory Haskins 已提交
651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672
	bool                 wildcard;
};

static inline struct _ioeventfd *
to_ioeventfd(struct kvm_io_device *dev)
{
	return container_of(dev, struct _ioeventfd, dev);
}

static void
ioeventfd_release(struct _ioeventfd *p)
{
	eventfd_ctx_put(p->eventfd);
	list_del(&p->list);
	kfree(p);
}

static bool
ioeventfd_in_range(struct _ioeventfd *p, gpa_t addr, int len, const void *val)
{
	u64 _val;

673 674 675 676 677 678 679 680 681
	if (addr != p->addr)
		/* address must be precise for a hit */
		return false;

	if (!p->length)
		/* length = 0 means only look at the address, so always a hit */
		return true;

	if (len != p->length)
G
Gregory Haskins 已提交
682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750
		/* address-range must be precise for a hit */
		return false;

	if (p->wildcard)
		/* all else equal, wildcard is always a hit */
		return true;

	/* otherwise, we have to actually compare the data */

	BUG_ON(!IS_ALIGNED((unsigned long)val, len));

	switch (len) {
	case 1:
		_val = *(u8 *)val;
		break;
	case 2:
		_val = *(u16 *)val;
		break;
	case 4:
		_val = *(u32 *)val;
		break;
	case 8:
		_val = *(u64 *)val;
		break;
	default:
		return false;
	}

	return _val == p->datamatch ? true : false;
}

/* MMIO/PIO writes trigger an event if the addr/val match */
static int
ioeventfd_write(struct kvm_io_device *this, gpa_t addr, int len,
		const void *val)
{
	struct _ioeventfd *p = to_ioeventfd(this);

	if (!ioeventfd_in_range(p, addr, len, val))
		return -EOPNOTSUPP;

	eventfd_signal(p->eventfd, 1);
	return 0;
}

/*
 * This function is called as KVM is completely shutting down.  We do not
 * need to worry about locking just nuke anything we have as quickly as possible
 */
static void
ioeventfd_destructor(struct kvm_io_device *this)
{
	struct _ioeventfd *p = to_ioeventfd(this);

	ioeventfd_release(p);
}

static const struct kvm_io_device_ops ioeventfd_ops = {
	.write      = ioeventfd_write,
	.destructor = ioeventfd_destructor,
};

/* assumes kvm->slots_lock held */
static bool
ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p)
{
	struct _ioeventfd *_p;

	list_for_each_entry(_p, &kvm->ioeventfds, list)
751
		if (_p->bus_idx == p->bus_idx &&
752 753 754 755 756
		    _p->addr == p->addr &&
		    (!_p->length || !p->length ||
		     (_p->length == p->length &&
		      (_p->wildcard || p->wildcard ||
		       _p->datamatch == p->datamatch))))
G
Gregory Haskins 已提交
757 758 759 760 761
			return true;

	return false;
}

762 763 764 765 766 767 768 769 770
static enum kvm_bus ioeventfd_bus_from_flags(__u32 flags)
{
	if (flags & KVM_IOEVENTFD_FLAG_PIO)
		return KVM_PIO_BUS;
	if (flags & KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY)
		return KVM_VIRTIO_CCW_NOTIFY_BUS;
	return KVM_MMIO_BUS;
}

G
Gregory Haskins 已提交
771 772 773
static int
kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
{
774
	enum kvm_bus              bus_idx;
G
Gregory Haskins 已提交
775 776 777 778
	struct _ioeventfd        *p;
	struct eventfd_ctx       *eventfd;
	int                       ret;

779
	bus_idx = ioeventfd_bus_from_flags(args->flags);
780
	/* must be natural-word sized, or 0 to ignore length */
G
Gregory Haskins 已提交
781
	switch (args->len) {
782
	case 0:
G
Gregory Haskins 已提交
783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799
	case 1:
	case 2:
	case 4:
	case 8:
		break;
	default:
		return -EINVAL;
	}

	/* check for range overflow */
	if (args->addr + args->len < args->addr)
		return -EINVAL;

	/* check for extra flags that we don't understand */
	if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
		return -EINVAL;

800 801 802 803 804 805
	/* ioeventfd with no length can't be combined with DATAMATCH */
	if (!args->len &&
	    args->flags & (KVM_IOEVENTFD_FLAG_PIO |
			   KVM_IOEVENTFD_FLAG_DATAMATCH))
		return -EINVAL;

G
Gregory Haskins 已提交
806 807 808 809 810 811 812 813 814 815 816 817
	eventfd = eventfd_ctx_fdget(args->fd);
	if (IS_ERR(eventfd))
		return PTR_ERR(eventfd);

	p = kzalloc(sizeof(*p), GFP_KERNEL);
	if (!p) {
		ret = -ENOMEM;
		goto fail;
	}

	INIT_LIST_HEAD(&p->list);
	p->addr    = args->addr;
818
	p->bus_idx = bus_idx;
G
Gregory Haskins 已提交
819 820 821 822 823 824 825 826 827
	p->length  = args->len;
	p->eventfd = eventfd;

	/* The datamatch feature is optional, otherwise this is a wildcard */
	if (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH)
		p->datamatch = args->datamatch;
	else
		p->wildcard = true;

828
	mutex_lock(&kvm->slots_lock);
G
Gregory Haskins 已提交
829

L
Lucas De Marchi 已提交
830
	/* Verify that there isn't a match already */
G
Gregory Haskins 已提交
831 832 833 834 835 836 837
	if (ioeventfd_check_collision(kvm, p)) {
		ret = -EEXIST;
		goto unlock_fail;
	}

	kvm_iodevice_init(&p->dev, &ioeventfd_ops);

838 839
	ret = kvm_io_bus_register_dev(kvm, bus_idx, p->addr, p->length,
				      &p->dev);
G
Gregory Haskins 已提交
840 841 842
	if (ret < 0)
		goto unlock_fail;

843 844 845 846 847 848 849 850 851 852
	/* When length is ignored, MMIO is also put on a separate bus, for
	 * faster lookups.
	 */
	if (!args->len && !(args->flags & KVM_IOEVENTFD_FLAG_PIO)) {
		ret = kvm_io_bus_register_dev(kvm, KVM_FAST_MMIO_BUS,
					      p->addr, 0, &p->dev);
		if (ret < 0)
			goto register_fail;
	}

853
	kvm->buses[bus_idx]->ioeventfd_count++;
G
Gregory Haskins 已提交
854 855
	list_add_tail(&p->list, &kvm->ioeventfds);

856
	mutex_unlock(&kvm->slots_lock);
G
Gregory Haskins 已提交
857 858 859

	return 0;

860 861
register_fail:
	kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
G
Gregory Haskins 已提交
862
unlock_fail:
863
	mutex_unlock(&kvm->slots_lock);
G
Gregory Haskins 已提交
864 865 866 867 868 869 870 871 872 873 874

fail:
	kfree(p);
	eventfd_ctx_put(eventfd);

	return ret;
}

static int
kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
{
875
	enum kvm_bus              bus_idx;
G
Gregory Haskins 已提交
876 877 878 879
	struct _ioeventfd        *p, *tmp;
	struct eventfd_ctx       *eventfd;
	int                       ret = -ENOENT;

880
	bus_idx = ioeventfd_bus_from_flags(args->flags);
G
Gregory Haskins 已提交
881 882 883 884
	eventfd = eventfd_ctx_fdget(args->fd);
	if (IS_ERR(eventfd))
		return PTR_ERR(eventfd);

885
	mutex_lock(&kvm->slots_lock);
G
Gregory Haskins 已提交
886 887 888 889

	list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) {
		bool wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH);

890 891
		if (p->bus_idx != bus_idx ||
		    p->eventfd != eventfd  ||
G
Gregory Haskins 已提交
892 893 894 895 896 897 898 899
		    p->addr != args->addr  ||
		    p->length != args->len ||
		    p->wildcard != wildcard)
			continue;

		if (!p->wildcard && p->datamatch != args->datamatch)
			continue;

M
Marcelo Tosatti 已提交
900
		kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
901 902 903 904
		if (!p->length) {
			kvm_io_bus_unregister_dev(kvm, KVM_FAST_MMIO_BUS,
						  &p->dev);
		}
905
		kvm->buses[bus_idx]->ioeventfd_count--;
G
Gregory Haskins 已提交
906 907 908 909 910
		ioeventfd_release(p);
		ret = 0;
		break;
	}

911
	mutex_unlock(&kvm->slots_lock);
G
Gregory Haskins 已提交
912 913 914 915 916 917 918 919 920 921 922 923 924 925

	eventfd_ctx_put(eventfd);

	return ret;
}

int
kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
{
	if (args->flags & KVM_IOEVENTFD_FLAG_DEASSIGN)
		return kvm_deassign_ioeventfd(kvm, args);

	return kvm_assign_ioeventfd(kvm, args);
}