eventfd.c 21.3 KB
Newer Older
G
Gregory Haskins 已提交
1 2 3 4
/*
 * kvm eventfd support - use eventfd objects to signal various KVM events
 *
 * Copyright 2009 Novell.  All Rights Reserved.
A
Avi Kivity 已提交
5
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
G
Gregory Haskins 已提交
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
 *
 * Author:
 *	Gregory Haskins <ghaskins@novell.com>
 *
 * This file is free software; you can redistribute it and/or modify
 * it under the terms of version 2 of the GNU General Public License
 * as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
 */

#include <linux/kvm_host.h>
G
Gregory Haskins 已提交
25
#include <linux/kvm.h>
E
Eric Auger 已提交
26
#include <linux/kvm_irqfd.h>
G
Gregory Haskins 已提交
27 28 29 30 31 32 33
#include <linux/workqueue.h>
#include <linux/syscalls.h>
#include <linux/wait.h>
#include <linux/poll.h>
#include <linux/file.h>
#include <linux/list.h>
#include <linux/eventfd.h>
G
Gregory Haskins 已提交
34
#include <linux/kernel.h>
35
#include <linux/srcu.h>
36
#include <linux/slab.h>
37
#include <linux/seqlock.h>
38
#include <trace/events/kvm.h>
G
Gregory Haskins 已提交
39

40
#include <kvm/iodev.h>
G
Gregory Haskins 已提交
41

42
#ifdef CONFIG_HAVE_KVM_IRQFD
G
Gregory Haskins 已提交
43 44 45 46 47 48

static struct workqueue_struct *irqfd_cleanup_wq;

static void
irqfd_inject(struct work_struct *work)
{
E
Eric Auger 已提交
49 50
	struct kvm_kernel_irqfd *irqfd =
		container_of(work, struct kvm_kernel_irqfd, inject);
G
Gregory Haskins 已提交
51 52
	struct kvm *kvm = irqfd->kvm;

53
	if (!irqfd->resampler) {
54 55 56 57
		kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1,
				false);
		kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0,
				false);
58 59
	} else
		kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
60
			    irqfd->gsi, 1, false);
61 62 63 64 65 66 67 68 69 70
}

/*
 * Since resampler irqfds share an IRQ source ID, we de-assert once
 * then notify all of the resampler irqfds using this GSI.  We can't
 * do multiple de-asserts or we risk racing with incoming re-asserts.
 */
static void
irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
{
E
Eric Auger 已提交
71
	struct kvm_kernel_irqfd_resampler *resampler;
72
	struct kvm *kvm;
E
Eric Auger 已提交
73
	struct kvm_kernel_irqfd *irqfd;
74
	int idx;
75

E
Eric Auger 已提交
76 77
	resampler = container_of(kian,
			struct kvm_kernel_irqfd_resampler, notifier);
78
	kvm = resampler->kvm;
79

80
	kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
81
		    resampler->notifier.gsi, 0, false);
82

83
	idx = srcu_read_lock(&kvm->irq_srcu);
84 85 86 87

	list_for_each_entry_rcu(irqfd, &resampler->list, resampler_link)
		eventfd_signal(irqfd->resamplefd, 1);

88
	srcu_read_unlock(&kvm->irq_srcu, idx);
89 90 91
}

static void
E
Eric Auger 已提交
92
irqfd_resampler_shutdown(struct kvm_kernel_irqfd *irqfd)
93
{
E
Eric Auger 已提交
94
	struct kvm_kernel_irqfd_resampler *resampler = irqfd->resampler;
95 96 97 98 99
	struct kvm *kvm = resampler->kvm;

	mutex_lock(&kvm->irqfds.resampler_lock);

	list_del_rcu(&irqfd->resampler_link);
100
	synchronize_srcu(&kvm->irq_srcu);
101 102 103 104 105

	if (list_empty(&resampler->list)) {
		list_del(&resampler->link);
		kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier);
		kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
106
			    resampler->notifier.gsi, 0, false);
107 108 109 110
		kfree(resampler);
	}

	mutex_unlock(&kvm->irqfds.resampler_lock);
G
Gregory Haskins 已提交
111 112 113 114 115 116 117 118
}

/*
 * Race-free decouple logic (ordering is critical)
 */
static void
irqfd_shutdown(struct work_struct *work)
{
E
Eric Auger 已提交
119 120
	struct kvm_kernel_irqfd *irqfd =
		container_of(work, struct kvm_kernel_irqfd, shutdown);
121
	u64 cnt;
G
Gregory Haskins 已提交
122 123 124 125 126

	/*
	 * Synchronize with the wait-queue and unhook ourselves to prevent
	 * further events.
	 */
127
	eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt);
G
Gregory Haskins 已提交
128 129 130 131 132

	/*
	 * We know no new events will be scheduled at this point, so block
	 * until all previously outstanding events have completed
	 */
133
	flush_work(&irqfd->inject);
G
Gregory Haskins 已提交
134

135 136 137 138 139
	if (irqfd->resampler) {
		irqfd_resampler_shutdown(irqfd);
		eventfd_ctx_put(irqfd->resamplefd);
	}

G
Gregory Haskins 已提交
140 141 142 143 144 145 146 147 148 149
	/*
	 * It is now safe to release the object's resources
	 */
	eventfd_ctx_put(irqfd->eventfd);
	kfree(irqfd);
}


/* assumes kvm->irqfds.lock is held */
static bool
E
Eric Auger 已提交
150
irqfd_is_active(struct kvm_kernel_irqfd *irqfd)
G
Gregory Haskins 已提交
151 152 153 154 155 156 157 158 159 160
{
	return list_empty(&irqfd->list) ? false : true;
}

/*
 * Mark the irqfd as inactive and schedule it for removal
 *
 * assumes kvm->irqfds.lock is held
 */
static void
E
Eric Auger 已提交
161
irqfd_deactivate(struct kvm_kernel_irqfd *irqfd)
G
Gregory Haskins 已提交
162 163 164 165 166 167 168 169 170 171 172 173 174 175
{
	BUG_ON(!irqfd_is_active(irqfd));

	list_del_init(&irqfd->list);

	queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
}

/*
 * Called with wqh->lock held and interrupts disabled
 */
static int
irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
{
E
Eric Auger 已提交
176 177
	struct kvm_kernel_irqfd *irqfd =
		container_of(wait, struct kvm_kernel_irqfd, wait);
G
Gregory Haskins 已提交
178
	unsigned long flags = (unsigned long)key;
179
	struct kvm_kernel_irq_routing_entry irq;
180
	struct kvm *kvm = irqfd->kvm;
181
	unsigned seq;
182
	int idx;
G
Gregory Haskins 已提交
183

184
	if (flags & POLLIN) {
185
		idx = srcu_read_lock(&kvm->irq_srcu);
186 187 188 189
		do {
			seq = read_seqcount_begin(&irqfd->irq_entry_sc);
			irq = irqfd->irq_entry;
		} while (read_seqcount_retry(&irqfd->irq_entry_sc, seq));
G
Gregory Haskins 已提交
190
		/* An event has been signaled, inject an interrupt */
191 192
		if (irq.type == KVM_IRQ_ROUTING_MSI)
			kvm_set_msi(&irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1,
193
					false);
194 195
		else
			schedule_work(&irqfd->inject);
196
		srcu_read_unlock(&kvm->irq_srcu, idx);
197
	}
G
Gregory Haskins 已提交
198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226

	if (flags & POLLHUP) {
		/* The eventfd is closing, detach from KVM */
		unsigned long flags;

		spin_lock_irqsave(&kvm->irqfds.lock, flags);

		/*
		 * We must check if someone deactivated the irqfd before
		 * we could acquire the irqfds.lock since the item is
		 * deactivated from the KVM side before it is unhooked from
		 * the wait-queue.  If it is already deactivated, we can
		 * simply return knowing the other side will cleanup for us.
		 * We cannot race against the irqfd going away since the
		 * other side is required to acquire wqh->lock, which we hold
		 */
		if (irqfd_is_active(irqfd))
			irqfd_deactivate(irqfd);

		spin_unlock_irqrestore(&kvm->irqfds.lock, flags);
	}

	return 0;
}

static void
irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,
			poll_table *pt)
{
E
Eric Auger 已提交
227 228
	struct kvm_kernel_irqfd *irqfd =
		container_of(pt, struct kvm_kernel_irqfd, pt);
G
Gregory Haskins 已提交
229 230 231
	add_wait_queue(wqh, &irqfd->wait);
}

232
/* Must be called under irqfds.lock */
E
Eric Auger 已提交
233
static void irqfd_update(struct kvm *kvm, struct kvm_kernel_irqfd *irqfd)
234 235
{
	struct kvm_kernel_irq_routing_entry *e;
236 237 238
	struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS];
	int i, n_entries;

239
	n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi);
240

241 242 243
	write_seqcount_begin(&irqfd->irq_entry_sc);

	irqfd->irq_entry.type = 0;
244

245 246
	e = entries;
	for (i = 0; i < n_entries; ++i, ++e) {
247 248
		/* Only fast-path MSI. */
		if (e->type == KVM_IRQ_ROUTING_MSI)
249
			irqfd->irq_entry = *e;
250
	}
251 252

	write_seqcount_end(&irqfd->irq_entry_sc);
253 254
}

255 256 257 258 259 260 261 262 263 264 265 266
#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
void __attribute__((weak)) kvm_arch_irq_bypass_stop(
				struct irq_bypass_consumer *cons)
{
}

void __attribute__((weak)) kvm_arch_irq_bypass_start(
				struct irq_bypass_consumer *cons)
{
}
#endif

G
Gregory Haskins 已提交
267
static int
268
kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
G
Gregory Haskins 已提交
269
{
E
Eric Auger 已提交
270
	struct kvm_kernel_irqfd *irqfd, *tmp;
A
Al Viro 已提交
271
	struct fd f;
272
	struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
G
Gregory Haskins 已提交
273 274
	int ret;
	unsigned int events;
275
	int idx;
G
Gregory Haskins 已提交
276

277 278 279
	if (!kvm_arch_intc_initialized(kvm))
		return -EAGAIN;

G
Gregory Haskins 已提交
280 281 282 283 284
	irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
	if (!irqfd)
		return -ENOMEM;

	irqfd->kvm = kvm;
285
	irqfd->gsi = args->gsi;
G
Gregory Haskins 已提交
286 287 288
	INIT_LIST_HEAD(&irqfd->list);
	INIT_WORK(&irqfd->inject, irqfd_inject);
	INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
289
	seqcount_init(&irqfd->irq_entry_sc);
G
Gregory Haskins 已提交
290

A
Al Viro 已提交
291 292 293 294
	f = fdget(args->fd);
	if (!f.file) {
		ret = -EBADF;
		goto out;
G
Gregory Haskins 已提交
295 296
	}

A
Al Viro 已提交
297
	eventfd = eventfd_ctx_fileget(f.file);
G
Gregory Haskins 已提交
298 299 300 301 302 303 304
	if (IS_ERR(eventfd)) {
		ret = PTR_ERR(eventfd);
		goto fail;
	}

	irqfd->eventfd = eventfd;

305
	if (args->flags & KVM_IRQFD_FLAG_RESAMPLE) {
E
Eric Auger 已提交
306
		struct kvm_kernel_irqfd_resampler *resampler;
307 308 309 310 311 312 313 314 315 316 317 318 319

		resamplefd = eventfd_ctx_fdget(args->resamplefd);
		if (IS_ERR(resamplefd)) {
			ret = PTR_ERR(resamplefd);
			goto fail;
		}

		irqfd->resamplefd = resamplefd;
		INIT_LIST_HEAD(&irqfd->resampler_link);

		mutex_lock(&kvm->irqfds.resampler_lock);

		list_for_each_entry(resampler,
320
				    &kvm->irqfds.resampler_list, link) {
321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347
			if (resampler->notifier.gsi == irqfd->gsi) {
				irqfd->resampler = resampler;
				break;
			}
		}

		if (!irqfd->resampler) {
			resampler = kzalloc(sizeof(*resampler), GFP_KERNEL);
			if (!resampler) {
				ret = -ENOMEM;
				mutex_unlock(&kvm->irqfds.resampler_lock);
				goto fail;
			}

			resampler->kvm = kvm;
			INIT_LIST_HEAD(&resampler->list);
			resampler->notifier.gsi = irqfd->gsi;
			resampler->notifier.irq_acked = irqfd_resampler_ack;
			INIT_LIST_HEAD(&resampler->link);

			list_add(&resampler->link, &kvm->irqfds.resampler_list);
			kvm_register_irq_ack_notifier(kvm,
						      &resampler->notifier);
			irqfd->resampler = resampler;
		}

		list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list);
348
		synchronize_srcu(&kvm->irq_srcu);
349 350 351 352

		mutex_unlock(&kvm->irqfds.resampler_lock);
	}

G
Gregory Haskins 已提交
353 354 355 356 357 358 359
	/*
	 * Install our own custom wake-up handling so we are notified via
	 * a callback whenever someone signals the underlying eventfd
	 */
	init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup);
	init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc);

360 361 362 363 364 365 366 367 368 369 370 371
	spin_lock_irq(&kvm->irqfds.lock);

	ret = 0;
	list_for_each_entry(tmp, &kvm->irqfds.items, list) {
		if (irqfd->eventfd != tmp->eventfd)
			continue;
		/* This fd is used for another irq already. */
		ret = -EBUSY;
		spin_unlock_irq(&kvm->irqfds.lock);
		goto fail;
	}

372 373 374
	idx = srcu_read_lock(&kvm->irq_srcu);
	irqfd_update(kvm, irqfd);
	srcu_read_unlock(&kvm->irq_srcu, idx);
375

G
Gregory Haskins 已提交
376 377
	list_add_tail(&irqfd->list, &kvm->irqfds.items);

378 379
	spin_unlock_irq(&kvm->irqfds.lock);

G
Gregory Haskins 已提交
380 381 382 383
	/*
	 * Check if there was an event already pending on the eventfd
	 * before we registered, and trigger it as if we didn't miss it.
	 */
384 385
	events = f.file->f_op->poll(f.file, &irqfd->pt);

G
Gregory Haskins 已提交
386 387 388 389 390 391 392
	if (events & POLLIN)
		schedule_work(&irqfd->inject);

	/*
	 * do not drop the file until the irqfd is fully initialized, otherwise
	 * we might race against the POLLHUP
	 */
A
Al Viro 已提交
393
	fdput(f);
G
Gregory Haskins 已提交
394 395 396 397

	return 0;

fail:
398 399 400 401 402 403
	if (irqfd->resampler)
		irqfd_resampler_shutdown(irqfd);

	if (resamplefd && !IS_ERR(resamplefd))
		eventfd_ctx_put(resamplefd);

G
Gregory Haskins 已提交
404 405 406
	if (eventfd && !IS_ERR(eventfd))
		eventfd_ctx_put(eventfd);

A
Al Viro 已提交
407
	fdput(f);
G
Gregory Haskins 已提交
408

A
Al Viro 已提交
409
out:
G
Gregory Haskins 已提交
410 411 412
	kfree(irqfd);
	return ret;
}
413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469

bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
{
	struct kvm_irq_ack_notifier *kian;
	int gsi, idx;

	idx = srcu_read_lock(&kvm->irq_srcu);
	gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
	if (gsi != -1)
		hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
					 link)
			if (kian->gsi == gsi) {
				srcu_read_unlock(&kvm->irq_srcu, idx);
				return true;
			}

	srcu_read_unlock(&kvm->irq_srcu, idx);

	return false;
}
EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);

void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
{
	struct kvm_irq_ack_notifier *kian;
	int gsi, idx;

	trace_kvm_ack_irq(irqchip, pin);

	idx = srcu_read_lock(&kvm->irq_srcu);
	gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
	if (gsi != -1)
		hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
					 link)
			if (kian->gsi == gsi)
				kian->irq_acked(kian);
	srcu_read_unlock(&kvm->irq_srcu, idx);
}

void kvm_register_irq_ack_notifier(struct kvm *kvm,
				   struct kvm_irq_ack_notifier *kian)
{
	mutex_lock(&kvm->irq_lock);
	hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
	mutex_unlock(&kvm->irq_lock);
	kvm_vcpu_request_scan_ioapic(kvm);
}

void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
				    struct kvm_irq_ack_notifier *kian)
{
	mutex_lock(&kvm->irq_lock);
	hlist_del_init_rcu(&kian->link);
	mutex_unlock(&kvm->irq_lock);
	synchronize_srcu(&kvm->irq_srcu);
	kvm_vcpu_request_scan_ioapic(kvm);
}
470
#endif
G
Gregory Haskins 已提交
471 472

void
G
Gregory Haskins 已提交
473
kvm_eventfd_init(struct kvm *kvm)
G
Gregory Haskins 已提交
474
{
475
#ifdef CONFIG_HAVE_KVM_IRQFD
G
Gregory Haskins 已提交
476 477
	spin_lock_init(&kvm->irqfds.lock);
	INIT_LIST_HEAD(&kvm->irqfds.items);
478 479
	INIT_LIST_HEAD(&kvm->irqfds.resampler_list);
	mutex_init(&kvm->irqfds.resampler_lock);
480
#endif
G
Gregory Haskins 已提交
481
	INIT_LIST_HEAD(&kvm->ioeventfds);
G
Gregory Haskins 已提交
482 483
}

484
#ifdef CONFIG_HAVE_KVM_IRQFD
G
Gregory Haskins 已提交
485 486 487 488
/*
 * shutdown any irqfd's that match fd+gsi
 */
static int
489
kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
G
Gregory Haskins 已提交
490
{
E
Eric Auger 已提交
491
	struct kvm_kernel_irqfd *irqfd, *tmp;
G
Gregory Haskins 已提交
492 493
	struct eventfd_ctx *eventfd;

494
	eventfd = eventfd_ctx_fdget(args->fd);
G
Gregory Haskins 已提交
495 496 497 498 499 500
	if (IS_ERR(eventfd))
		return PTR_ERR(eventfd);

	spin_lock_irq(&kvm->irqfds.lock);

	list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
501
		if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) {
502
			/*
503
			 * This clearing of irq_entry.type is needed for when
504 505 506
			 * another thread calls kvm_irq_routing_update before
			 * we flush workqueue below (we synchronize with
			 * kvm_irq_routing_update using irqfds.lock).
507
			 */
508 509 510
			write_seqcount_begin(&irqfd->irq_entry_sc);
			irqfd->irq_entry.type = 0;
			write_seqcount_end(&irqfd->irq_entry_sc);
G
Gregory Haskins 已提交
511
			irqfd_deactivate(irqfd);
512
		}
G
Gregory Haskins 已提交
513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528
	}

	spin_unlock_irq(&kvm->irqfds.lock);
	eventfd_ctx_put(eventfd);

	/*
	 * Block until we know all outstanding shutdown jobs have completed
	 * so that we guarantee there will not be any more interrupts on this
	 * gsi once this deassign function returns.
	 */
	flush_workqueue(irqfd_cleanup_wq);

	return 0;
}

int
529
kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
G
Gregory Haskins 已提交
530
{
531
	if (args->flags & ~(KVM_IRQFD_FLAG_DEASSIGN | KVM_IRQFD_FLAG_RESAMPLE))
A
Alex Williamson 已提交
532 533
		return -EINVAL;

534 535
	if (args->flags & KVM_IRQFD_FLAG_DEASSIGN)
		return kvm_irqfd_deassign(kvm, args);
G
Gregory Haskins 已提交
536

537
	return kvm_irqfd_assign(kvm, args);
G
Gregory Haskins 已提交
538 539 540 541 542 543 544 545 546
}

/*
 * This function is called as the kvm VM fd is being released. Shutdown all
 * irqfds that still remain open
 */
void
kvm_irqfd_release(struct kvm *kvm)
{
E
Eric Auger 已提交
547
	struct kvm_kernel_irqfd *irqfd, *tmp;
G
Gregory Haskins 已提交
548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563

	spin_lock_irq(&kvm->irqfds.lock);

	list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list)
		irqfd_deactivate(irqfd);

	spin_unlock_irq(&kvm->irqfds.lock);

	/*
	 * Block until we know all outstanding shutdown jobs have completed
	 * since we do not take a kvm* reference.
	 */
	flush_workqueue(irqfd_cleanup_wq);

}

564
/*
565
 * Take note of a change in irq routing.
566
 * Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards.
567
 */
568
void kvm_irq_routing_update(struct kvm *kvm)
569
{
E
Eric Auger 已提交
570
	struct kvm_kernel_irqfd *irqfd;
571 572 573 574

	spin_lock_irq(&kvm->irqfds.lock);

	list_for_each_entry(irqfd, &kvm->irqfds.items, list)
575
		irqfd_update(kvm, irqfd);
576 577 578 579

	spin_unlock_irq(&kvm->irqfds.lock);
}

G
Gregory Haskins 已提交
580 581 582 583 584
/*
 * create a host-wide workqueue for issuing deferred shutdown requests
 * aggregated from all vm* instances. We need our own isolated single-thread
 * queue to prevent deadlock against flushing the normal work-queue.
 */
585
int kvm_irqfd_init(void)
G
Gregory Haskins 已提交
586 587 588 589 590 591 592 593
{
	irqfd_cleanup_wq = create_singlethread_workqueue("kvm-irqfd-cleanup");
	if (!irqfd_cleanup_wq)
		return -ENOMEM;

	return 0;
}

594
void kvm_irqfd_exit(void)
G
Gregory Haskins 已提交
595 596 597
{
	destroy_workqueue(irqfd_cleanup_wq);
}
598
#endif
G
Gregory Haskins 已提交
599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615

/*
 * --------------------------------------------------------------------
 * ioeventfd: translate a PIO/MMIO memory write to an eventfd signal.
 *
 * userspace can register a PIO/MMIO address with an eventfd for receiving
 * notification when the memory has been touched.
 * --------------------------------------------------------------------
 */

struct _ioeventfd {
	struct list_head     list;
	u64                  addr;
	int                  length;
	struct eventfd_ctx  *eventfd;
	u64                  datamatch;
	struct kvm_io_device dev;
616
	u8                   bus_idx;
G
Gregory Haskins 已提交
617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638
	bool                 wildcard;
};

static inline struct _ioeventfd *
to_ioeventfd(struct kvm_io_device *dev)
{
	return container_of(dev, struct _ioeventfd, dev);
}

static void
ioeventfd_release(struct _ioeventfd *p)
{
	eventfd_ctx_put(p->eventfd);
	list_del(&p->list);
	kfree(p);
}

static bool
ioeventfd_in_range(struct _ioeventfd *p, gpa_t addr, int len, const void *val)
{
	u64 _val;

639 640 641 642 643 644 645 646 647
	if (addr != p->addr)
		/* address must be precise for a hit */
		return false;

	if (!p->length)
		/* length = 0 means only look at the address, so always a hit */
		return true;

	if (len != p->length)
G
Gregory Haskins 已提交
648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680
		/* address-range must be precise for a hit */
		return false;

	if (p->wildcard)
		/* all else equal, wildcard is always a hit */
		return true;

	/* otherwise, we have to actually compare the data */

	BUG_ON(!IS_ALIGNED((unsigned long)val, len));

	switch (len) {
	case 1:
		_val = *(u8 *)val;
		break;
	case 2:
		_val = *(u16 *)val;
		break;
	case 4:
		_val = *(u32 *)val;
		break;
	case 8:
		_val = *(u64 *)val;
		break;
	default:
		return false;
	}

	return _val == p->datamatch ? true : false;
}

/* MMIO/PIO writes trigger an event if the addr/val match */
static int
681 682
ioeventfd_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr,
		int len, const void *val)
G
Gregory Haskins 已提交
683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716
{
	struct _ioeventfd *p = to_ioeventfd(this);

	if (!ioeventfd_in_range(p, addr, len, val))
		return -EOPNOTSUPP;

	eventfd_signal(p->eventfd, 1);
	return 0;
}

/*
 * This function is called as KVM is completely shutting down.  We do not
 * need to worry about locking just nuke anything we have as quickly as possible
 */
static void
ioeventfd_destructor(struct kvm_io_device *this)
{
	struct _ioeventfd *p = to_ioeventfd(this);

	ioeventfd_release(p);
}

static const struct kvm_io_device_ops ioeventfd_ops = {
	.write      = ioeventfd_write,
	.destructor = ioeventfd_destructor,
};

/* assumes kvm->slots_lock held */
static bool
ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p)
{
	struct _ioeventfd *_p;

	list_for_each_entry(_p, &kvm->ioeventfds, list)
717
		if (_p->bus_idx == p->bus_idx &&
718 719 720 721 722
		    _p->addr == p->addr &&
		    (!_p->length || !p->length ||
		     (_p->length == p->length &&
		      (_p->wildcard || p->wildcard ||
		       _p->datamatch == p->datamatch))))
G
Gregory Haskins 已提交
723 724 725 726 727
			return true;

	return false;
}

728 729 730 731 732 733 734 735 736
static enum kvm_bus ioeventfd_bus_from_flags(__u32 flags)
{
	if (flags & KVM_IOEVENTFD_FLAG_PIO)
		return KVM_PIO_BUS;
	if (flags & KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY)
		return KVM_VIRTIO_CCW_NOTIFY_BUS;
	return KVM_MMIO_BUS;
}

737 738 739
static int kvm_assign_ioeventfd_idx(struct kvm *kvm,
				enum kvm_bus bus_idx,
				struct kvm_ioeventfd *args)
G
Gregory Haskins 已提交
740 741
{

742 743 744
	struct eventfd_ctx *eventfd;
	struct _ioeventfd *p;
	int ret;
745

G
Gregory Haskins 已提交
746 747 748 749 750 751 752 753 754 755 756 757
	eventfd = eventfd_ctx_fdget(args->fd);
	if (IS_ERR(eventfd))
		return PTR_ERR(eventfd);

	p = kzalloc(sizeof(*p), GFP_KERNEL);
	if (!p) {
		ret = -ENOMEM;
		goto fail;
	}

	INIT_LIST_HEAD(&p->list);
	p->addr    = args->addr;
758
	p->bus_idx = bus_idx;
G
Gregory Haskins 已提交
759 760 761 762 763 764 765 766 767
	p->length  = args->len;
	p->eventfd = eventfd;

	/* The datamatch feature is optional, otherwise this is a wildcard */
	if (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH)
		p->datamatch = args->datamatch;
	else
		p->wildcard = true;

768
	mutex_lock(&kvm->slots_lock);
G
Gregory Haskins 已提交
769

L
Lucas De Marchi 已提交
770
	/* Verify that there isn't a match already */
G
Gregory Haskins 已提交
771 772 773 774 775 776 777
	if (ioeventfd_check_collision(kvm, p)) {
		ret = -EEXIST;
		goto unlock_fail;
	}

	kvm_iodevice_init(&p->dev, &ioeventfd_ops);

778 779
	ret = kvm_io_bus_register_dev(kvm, bus_idx, p->addr, p->length,
				      &p->dev);
G
Gregory Haskins 已提交
780 781 782
	if (ret < 0)
		goto unlock_fail;

783
	kvm->buses[bus_idx]->ioeventfd_count++;
G
Gregory Haskins 已提交
784 785
	list_add_tail(&p->list, &kvm->ioeventfds);

786
	mutex_unlock(&kvm->slots_lock);
G
Gregory Haskins 已提交
787 788 789 790

	return 0;

unlock_fail:
791
	mutex_unlock(&kvm->slots_lock);
G
Gregory Haskins 已提交
792 793 794 795 796 797 798 799 800

fail:
	kfree(p);
	eventfd_ctx_put(eventfd);

	return ret;
}

static int
801 802
kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
			   struct kvm_ioeventfd *args)
G
Gregory Haskins 已提交
803 804 805 806 807 808 809 810 811
{
	struct _ioeventfd        *p, *tmp;
	struct eventfd_ctx       *eventfd;
	int                       ret = -ENOENT;

	eventfd = eventfd_ctx_fdget(args->fd);
	if (IS_ERR(eventfd))
		return PTR_ERR(eventfd);

812
	mutex_lock(&kvm->slots_lock);
G
Gregory Haskins 已提交
813 814 815 816

	list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) {
		bool wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH);

817 818
		if (p->bus_idx != bus_idx ||
		    p->eventfd != eventfd  ||
G
Gregory Haskins 已提交
819 820 821 822 823 824 825 826
		    p->addr != args->addr  ||
		    p->length != args->len ||
		    p->wildcard != wildcard)
			continue;

		if (!p->wildcard && p->datamatch != args->datamatch)
			continue;

M
Marcelo Tosatti 已提交
827
		kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
828
		kvm->buses[bus_idx]->ioeventfd_count--;
G
Gregory Haskins 已提交
829 830 831 832 833
		ioeventfd_release(p);
		ret = 0;
		break;
	}

834
	mutex_unlock(&kvm->slots_lock);
G
Gregory Haskins 已提交
835 836 837 838 839 840

	eventfd_ctx_put(eventfd);

	return ret;
}

841 842 843
static int kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
{
	enum kvm_bus bus_idx = ioeventfd_bus_from_flags(args->flags);
844 845 846 847
	int ret = kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);

	if (!args->len && bus_idx == KVM_MMIO_BUS)
		kvm_deassign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
848

849
	return ret;
850 851 852 853 854 855
}

static int
kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
{
	enum kvm_bus              bus_idx;
856
	int ret;
857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879

	bus_idx = ioeventfd_bus_from_flags(args->flags);
	/* must be natural-word sized, or 0 to ignore length */
	switch (args->len) {
	case 0:
	case 1:
	case 2:
	case 4:
	case 8:
		break;
	default:
		return -EINVAL;
	}

	/* check for range overflow */
	if (args->addr + args->len < args->addr)
		return -EINVAL;

	/* check for extra flags that we don't understand */
	if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
		return -EINVAL;

	/* ioeventfd with no length can't be combined with DATAMATCH */
880
	if (!args->len && (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH))
881 882
		return -EINVAL;

883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901
	ret = kvm_assign_ioeventfd_idx(kvm, bus_idx, args);
	if (ret)
		goto fail;

	/* When length is ignored, MMIO is also put on a separate bus, for
	 * faster lookups.
	 */
	if (!args->len && bus_idx == KVM_MMIO_BUS) {
		ret = kvm_assign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
		if (ret < 0)
			goto fast_fail;
	}

	return 0;

fast_fail:
	kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
fail:
	return ret;
902 903
}

G
Gregory Haskins 已提交
904 905 906 907 908 909 910 911
int
kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
{
	if (args->flags & KVM_IOEVENTFD_FLAG_DEASSIGN)
		return kvm_deassign_ioeventfd(kvm, args);

	return kvm_assign_ioeventfd(kvm, args);
}