eventfd.c 21.0 KB
Newer Older
G
Gregory Haskins 已提交
1 2 3 4
/*
 * kvm eventfd support - use eventfd objects to signal various KVM events
 *
 * Copyright 2009 Novell.  All Rights Reserved.
A
Avi Kivity 已提交
5
 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
G
Gregory Haskins 已提交
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
 *
 * Author:
 *	Gregory Haskins <ghaskins@novell.com>
 *
 * This file is free software; you can redistribute it and/or modify
 * it under the terms of version 2 of the GNU General Public License
 * as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
 */

#include <linux/kvm_host.h>
G
Gregory Haskins 已提交
25
#include <linux/kvm.h>
E
Eric Auger 已提交
26
#include <linux/kvm_irqfd.h>
G
Gregory Haskins 已提交
27 28 29 30 31 32 33
#include <linux/workqueue.h>
#include <linux/syscalls.h>
#include <linux/wait.h>
#include <linux/poll.h>
#include <linux/file.h>
#include <linux/list.h>
#include <linux/eventfd.h>
G
Gregory Haskins 已提交
34
#include <linux/kernel.h>
35
#include <linux/srcu.h>
36
#include <linux/slab.h>
37
#include <linux/seqlock.h>
38
#include <trace/events/kvm.h>
G
Gregory Haskins 已提交
39

40
#include <kvm/iodev.h>
G
Gregory Haskins 已提交
41

42
#ifdef CONFIG_HAVE_KVM_IRQFD
G
Gregory Haskins 已提交
43 44 45 46 47 48

static struct workqueue_struct *irqfd_cleanup_wq;

static void
irqfd_inject(struct work_struct *work)
{
E
Eric Auger 已提交
49 50
	struct kvm_kernel_irqfd *irqfd =
		container_of(work, struct kvm_kernel_irqfd, inject);
G
Gregory Haskins 已提交
51 52
	struct kvm *kvm = irqfd->kvm;

53
	if (!irqfd->resampler) {
54 55 56 57
		kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1,
				false);
		kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0,
				false);
58 59
	} else
		kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
60
			    irqfd->gsi, 1, false);
61 62 63 64 65 66 67 68 69 70
}

/*
 * Since resampler irqfds share an IRQ source ID, we de-assert once
 * then notify all of the resampler irqfds using this GSI.  We can't
 * do multiple de-asserts or we risk racing with incoming re-asserts.
 */
static void
irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
{
E
Eric Auger 已提交
71
	struct kvm_kernel_irqfd_resampler *resampler;
72
	struct kvm *kvm;
E
Eric Auger 已提交
73
	struct kvm_kernel_irqfd *irqfd;
74
	int idx;
75

E
Eric Auger 已提交
76 77
	resampler = container_of(kian,
			struct kvm_kernel_irqfd_resampler, notifier);
78
	kvm = resampler->kvm;
79

80
	kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
81
		    resampler->notifier.gsi, 0, false);
82

83
	idx = srcu_read_lock(&kvm->irq_srcu);
84 85 86 87

	list_for_each_entry_rcu(irqfd, &resampler->list, resampler_link)
		eventfd_signal(irqfd->resamplefd, 1);

88
	srcu_read_unlock(&kvm->irq_srcu, idx);
89 90 91
}

static void
E
Eric Auger 已提交
92
irqfd_resampler_shutdown(struct kvm_kernel_irqfd *irqfd)
93
{
E
Eric Auger 已提交
94
	struct kvm_kernel_irqfd_resampler *resampler = irqfd->resampler;
95 96 97 98 99
	struct kvm *kvm = resampler->kvm;

	mutex_lock(&kvm->irqfds.resampler_lock);

	list_del_rcu(&irqfd->resampler_link);
100
	synchronize_srcu(&kvm->irq_srcu);
101 102 103 104 105

	if (list_empty(&resampler->list)) {
		list_del(&resampler->link);
		kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier);
		kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
106
			    resampler->notifier.gsi, 0, false);
107 108 109 110
		kfree(resampler);
	}

	mutex_unlock(&kvm->irqfds.resampler_lock);
G
Gregory Haskins 已提交
111 112 113 114 115 116 117 118
}

/*
 * Race-free decouple logic (ordering is critical)
 */
static void
irqfd_shutdown(struct work_struct *work)
{
E
Eric Auger 已提交
119 120
	struct kvm_kernel_irqfd *irqfd =
		container_of(work, struct kvm_kernel_irqfd, shutdown);
121
	u64 cnt;
G
Gregory Haskins 已提交
122 123 124 125 126

	/*
	 * Synchronize with the wait-queue and unhook ourselves to prevent
	 * further events.
	 */
127
	eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt);
G
Gregory Haskins 已提交
128 129 130 131 132

	/*
	 * We know no new events will be scheduled at this point, so block
	 * until all previously outstanding events have completed
	 */
133
	flush_work(&irqfd->inject);
G
Gregory Haskins 已提交
134

135 136 137 138 139
	if (irqfd->resampler) {
		irqfd_resampler_shutdown(irqfd);
		eventfd_ctx_put(irqfd->resamplefd);
	}

G
Gregory Haskins 已提交
140 141 142 143 144 145 146 147 148 149
	/*
	 * It is now safe to release the object's resources
	 */
	eventfd_ctx_put(irqfd->eventfd);
	kfree(irqfd);
}


/* assumes kvm->irqfds.lock is held */
static bool
E
Eric Auger 已提交
150
irqfd_is_active(struct kvm_kernel_irqfd *irqfd)
G
Gregory Haskins 已提交
151 152 153 154 155 156 157 158 159 160
{
	return list_empty(&irqfd->list) ? false : true;
}

/*
 * Mark the irqfd as inactive and schedule it for removal
 *
 * assumes kvm->irqfds.lock is held
 */
static void
E
Eric Auger 已提交
161
irqfd_deactivate(struct kvm_kernel_irqfd *irqfd)
G
Gregory Haskins 已提交
162 163 164 165 166 167 168 169 170 171 172 173 174 175
{
	BUG_ON(!irqfd_is_active(irqfd));

	list_del_init(&irqfd->list);

	queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
}

/*
 * Called with wqh->lock held and interrupts disabled
 */
static int
irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
{
E
Eric Auger 已提交
176 177
	struct kvm_kernel_irqfd *irqfd =
		container_of(wait, struct kvm_kernel_irqfd, wait);
G
Gregory Haskins 已提交
178
	unsigned long flags = (unsigned long)key;
179
	struct kvm_kernel_irq_routing_entry irq;
180
	struct kvm *kvm = irqfd->kvm;
181
	unsigned seq;
182
	int idx;
G
Gregory Haskins 已提交
183

184
	if (flags & POLLIN) {
185
		idx = srcu_read_lock(&kvm->irq_srcu);
186 187 188 189
		do {
			seq = read_seqcount_begin(&irqfd->irq_entry_sc);
			irq = irqfd->irq_entry;
		} while (read_seqcount_retry(&irqfd->irq_entry_sc, seq));
G
Gregory Haskins 已提交
190
		/* An event has been signaled, inject an interrupt */
191 192
		if (irq.type == KVM_IRQ_ROUTING_MSI)
			kvm_set_msi(&irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1,
193
					false);
194 195
		else
			schedule_work(&irqfd->inject);
196
		srcu_read_unlock(&kvm->irq_srcu, idx);
197
	}
G
Gregory Haskins 已提交
198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226

	if (flags & POLLHUP) {
		/* The eventfd is closing, detach from KVM */
		unsigned long flags;

		spin_lock_irqsave(&kvm->irqfds.lock, flags);

		/*
		 * We must check if someone deactivated the irqfd before
		 * we could acquire the irqfds.lock since the item is
		 * deactivated from the KVM side before it is unhooked from
		 * the wait-queue.  If it is already deactivated, we can
		 * simply return knowing the other side will cleanup for us.
		 * We cannot race against the irqfd going away since the
		 * other side is required to acquire wqh->lock, which we hold
		 */
		if (irqfd_is_active(irqfd))
			irqfd_deactivate(irqfd);

		spin_unlock_irqrestore(&kvm->irqfds.lock, flags);
	}

	return 0;
}

static void
irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,
			poll_table *pt)
{
E
Eric Auger 已提交
227 228
	struct kvm_kernel_irqfd *irqfd =
		container_of(pt, struct kvm_kernel_irqfd, pt);
G
Gregory Haskins 已提交
229 230 231
	add_wait_queue(wqh, &irqfd->wait);
}

232
/* Must be called under irqfds.lock */
E
Eric Auger 已提交
233
static void irqfd_update(struct kvm *kvm, struct kvm_kernel_irqfd *irqfd)
234 235
{
	struct kvm_kernel_irq_routing_entry *e;
236 237 238
	struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS];
	int i, n_entries;

239
	n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi);
240

241 242 243
	write_seqcount_begin(&irqfd->irq_entry_sc);

	irqfd->irq_entry.type = 0;
244

245 246
	e = entries;
	for (i = 0; i < n_entries; ++i, ++e) {
247 248
		/* Only fast-path MSI. */
		if (e->type == KVM_IRQ_ROUTING_MSI)
249
			irqfd->irq_entry = *e;
250
	}
251 252

	write_seqcount_end(&irqfd->irq_entry_sc);
253 254
}

G
Gregory Haskins 已提交
255
static int
256
kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
G
Gregory Haskins 已提交
257
{
E
Eric Auger 已提交
258
	struct kvm_kernel_irqfd *irqfd, *tmp;
A
Al Viro 已提交
259
	struct fd f;
260
	struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
G
Gregory Haskins 已提交
261 262
	int ret;
	unsigned int events;
263
	int idx;
G
Gregory Haskins 已提交
264

265 266 267
	if (!kvm_arch_intc_initialized(kvm))
		return -EAGAIN;

G
Gregory Haskins 已提交
268 269 270 271 272
	irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
	if (!irqfd)
		return -ENOMEM;

	irqfd->kvm = kvm;
273
	irqfd->gsi = args->gsi;
G
Gregory Haskins 已提交
274 275 276
	INIT_LIST_HEAD(&irqfd->list);
	INIT_WORK(&irqfd->inject, irqfd_inject);
	INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
277
	seqcount_init(&irqfd->irq_entry_sc);
G
Gregory Haskins 已提交
278

A
Al Viro 已提交
279 280 281 282
	f = fdget(args->fd);
	if (!f.file) {
		ret = -EBADF;
		goto out;
G
Gregory Haskins 已提交
283 284
	}

A
Al Viro 已提交
285
	eventfd = eventfd_ctx_fileget(f.file);
G
Gregory Haskins 已提交
286 287 288 289 290 291 292
	if (IS_ERR(eventfd)) {
		ret = PTR_ERR(eventfd);
		goto fail;
	}

	irqfd->eventfd = eventfd;

293
	if (args->flags & KVM_IRQFD_FLAG_RESAMPLE) {
E
Eric Auger 已提交
294
		struct kvm_kernel_irqfd_resampler *resampler;
295 296 297 298 299 300 301 302 303 304 305 306 307

		resamplefd = eventfd_ctx_fdget(args->resamplefd);
		if (IS_ERR(resamplefd)) {
			ret = PTR_ERR(resamplefd);
			goto fail;
		}

		irqfd->resamplefd = resamplefd;
		INIT_LIST_HEAD(&irqfd->resampler_link);

		mutex_lock(&kvm->irqfds.resampler_lock);

		list_for_each_entry(resampler,
308
				    &kvm->irqfds.resampler_list, link) {
309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335
			if (resampler->notifier.gsi == irqfd->gsi) {
				irqfd->resampler = resampler;
				break;
			}
		}

		if (!irqfd->resampler) {
			resampler = kzalloc(sizeof(*resampler), GFP_KERNEL);
			if (!resampler) {
				ret = -ENOMEM;
				mutex_unlock(&kvm->irqfds.resampler_lock);
				goto fail;
			}

			resampler->kvm = kvm;
			INIT_LIST_HEAD(&resampler->list);
			resampler->notifier.gsi = irqfd->gsi;
			resampler->notifier.irq_acked = irqfd_resampler_ack;
			INIT_LIST_HEAD(&resampler->link);

			list_add(&resampler->link, &kvm->irqfds.resampler_list);
			kvm_register_irq_ack_notifier(kvm,
						      &resampler->notifier);
			irqfd->resampler = resampler;
		}

		list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list);
336
		synchronize_srcu(&kvm->irq_srcu);
337 338 339 340

		mutex_unlock(&kvm->irqfds.resampler_lock);
	}

G
Gregory Haskins 已提交
341 342 343 344 345 346 347
	/*
	 * Install our own custom wake-up handling so we are notified via
	 * a callback whenever someone signals the underlying eventfd
	 */
	init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup);
	init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc);

348 349 350 351 352 353 354 355 356 357 358 359
	spin_lock_irq(&kvm->irqfds.lock);

	ret = 0;
	list_for_each_entry(tmp, &kvm->irqfds.items, list) {
		if (irqfd->eventfd != tmp->eventfd)
			continue;
		/* This fd is used for another irq already. */
		ret = -EBUSY;
		spin_unlock_irq(&kvm->irqfds.lock);
		goto fail;
	}

360 361 362
	idx = srcu_read_lock(&kvm->irq_srcu);
	irqfd_update(kvm, irqfd);
	srcu_read_unlock(&kvm->irq_srcu, idx);
363

G
Gregory Haskins 已提交
364 365
	list_add_tail(&irqfd->list, &kvm->irqfds.items);

366 367
	spin_unlock_irq(&kvm->irqfds.lock);

G
Gregory Haskins 已提交
368 369 370 371
	/*
	 * Check if there was an event already pending on the eventfd
	 * before we registered, and trigger it as if we didn't miss it.
	 */
372 373
	events = f.file->f_op->poll(f.file, &irqfd->pt);

G
Gregory Haskins 已提交
374 375 376 377 378 379 380
	if (events & POLLIN)
		schedule_work(&irqfd->inject);

	/*
	 * do not drop the file until the irqfd is fully initialized, otherwise
	 * we might race against the POLLHUP
	 */
A
Al Viro 已提交
381
	fdput(f);
G
Gregory Haskins 已提交
382 383 384 385

	return 0;

fail:
386 387 388 389 390 391
	if (irqfd->resampler)
		irqfd_resampler_shutdown(irqfd);

	if (resamplefd && !IS_ERR(resamplefd))
		eventfd_ctx_put(resamplefd);

G
Gregory Haskins 已提交
392 393 394
	if (eventfd && !IS_ERR(eventfd))
		eventfd_ctx_put(eventfd);

A
Al Viro 已提交
395
	fdput(f);
G
Gregory Haskins 已提交
396

A
Al Viro 已提交
397
out:
G
Gregory Haskins 已提交
398 399 400
	kfree(irqfd);
	return ret;
}
401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457

bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
{
	struct kvm_irq_ack_notifier *kian;
	int gsi, idx;

	idx = srcu_read_lock(&kvm->irq_srcu);
	gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
	if (gsi != -1)
		hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
					 link)
			if (kian->gsi == gsi) {
				srcu_read_unlock(&kvm->irq_srcu, idx);
				return true;
			}

	srcu_read_unlock(&kvm->irq_srcu, idx);

	return false;
}
EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);

void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
{
	struct kvm_irq_ack_notifier *kian;
	int gsi, idx;

	trace_kvm_ack_irq(irqchip, pin);

	idx = srcu_read_lock(&kvm->irq_srcu);
	gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
	if (gsi != -1)
		hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
					 link)
			if (kian->gsi == gsi)
				kian->irq_acked(kian);
	srcu_read_unlock(&kvm->irq_srcu, idx);
}

void kvm_register_irq_ack_notifier(struct kvm *kvm,
				   struct kvm_irq_ack_notifier *kian)
{
	mutex_lock(&kvm->irq_lock);
	hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
	mutex_unlock(&kvm->irq_lock);
	kvm_vcpu_request_scan_ioapic(kvm);
}

void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
				    struct kvm_irq_ack_notifier *kian)
{
	mutex_lock(&kvm->irq_lock);
	hlist_del_init_rcu(&kian->link);
	mutex_unlock(&kvm->irq_lock);
	synchronize_srcu(&kvm->irq_srcu);
	kvm_vcpu_request_scan_ioapic(kvm);
}
458
#endif
G
Gregory Haskins 已提交
459 460

void
G
Gregory Haskins 已提交
461
kvm_eventfd_init(struct kvm *kvm)
G
Gregory Haskins 已提交
462
{
463
#ifdef CONFIG_HAVE_KVM_IRQFD
G
Gregory Haskins 已提交
464 465
	spin_lock_init(&kvm->irqfds.lock);
	INIT_LIST_HEAD(&kvm->irqfds.items);
466 467
	INIT_LIST_HEAD(&kvm->irqfds.resampler_list);
	mutex_init(&kvm->irqfds.resampler_lock);
468
#endif
G
Gregory Haskins 已提交
469
	INIT_LIST_HEAD(&kvm->ioeventfds);
G
Gregory Haskins 已提交
470 471
}

472
#ifdef CONFIG_HAVE_KVM_IRQFD
G
Gregory Haskins 已提交
473 474 475 476
/*
 * shutdown any irqfd's that match fd+gsi
 */
static int
477
kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
G
Gregory Haskins 已提交
478
{
E
Eric Auger 已提交
479
	struct kvm_kernel_irqfd *irqfd, *tmp;
G
Gregory Haskins 已提交
480 481
	struct eventfd_ctx *eventfd;

482
	eventfd = eventfd_ctx_fdget(args->fd);
G
Gregory Haskins 已提交
483 484 485 486 487 488
	if (IS_ERR(eventfd))
		return PTR_ERR(eventfd);

	spin_lock_irq(&kvm->irqfds.lock);

	list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
489
		if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) {
490
			/*
491
			 * This clearing of irq_entry.type is needed for when
492 493 494
			 * another thread calls kvm_irq_routing_update before
			 * we flush workqueue below (we synchronize with
			 * kvm_irq_routing_update using irqfds.lock).
495
			 */
496 497 498
			write_seqcount_begin(&irqfd->irq_entry_sc);
			irqfd->irq_entry.type = 0;
			write_seqcount_end(&irqfd->irq_entry_sc);
G
Gregory Haskins 已提交
499
			irqfd_deactivate(irqfd);
500
		}
G
Gregory Haskins 已提交
501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516
	}

	spin_unlock_irq(&kvm->irqfds.lock);
	eventfd_ctx_put(eventfd);

	/*
	 * Block until we know all outstanding shutdown jobs have completed
	 * so that we guarantee there will not be any more interrupts on this
	 * gsi once this deassign function returns.
	 */
	flush_workqueue(irqfd_cleanup_wq);

	return 0;
}

int
517
kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
G
Gregory Haskins 已提交
518
{
519
	if (args->flags & ~(KVM_IRQFD_FLAG_DEASSIGN | KVM_IRQFD_FLAG_RESAMPLE))
A
Alex Williamson 已提交
520 521
		return -EINVAL;

522 523
	if (args->flags & KVM_IRQFD_FLAG_DEASSIGN)
		return kvm_irqfd_deassign(kvm, args);
G
Gregory Haskins 已提交
524

525
	return kvm_irqfd_assign(kvm, args);
G
Gregory Haskins 已提交
526 527 528 529 530 531 532 533 534
}

/*
 * This function is called as the kvm VM fd is being released. Shutdown all
 * irqfds that still remain open
 */
void
kvm_irqfd_release(struct kvm *kvm)
{
E
Eric Auger 已提交
535
	struct kvm_kernel_irqfd *irqfd, *tmp;
G
Gregory Haskins 已提交
536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551

	spin_lock_irq(&kvm->irqfds.lock);

	list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list)
		irqfd_deactivate(irqfd);

	spin_unlock_irq(&kvm->irqfds.lock);

	/*
	 * Block until we know all outstanding shutdown jobs have completed
	 * since we do not take a kvm* reference.
	 */
	flush_workqueue(irqfd_cleanup_wq);

}

552
/*
553
 * Take note of a change in irq routing.
554
 * Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards.
555
 */
556
void kvm_irq_routing_update(struct kvm *kvm)
557
{
E
Eric Auger 已提交
558
	struct kvm_kernel_irqfd *irqfd;
559 560 561 562

	spin_lock_irq(&kvm->irqfds.lock);

	list_for_each_entry(irqfd, &kvm->irqfds.items, list)
563
		irqfd_update(kvm, irqfd);
564 565 566 567

	spin_unlock_irq(&kvm->irqfds.lock);
}

G
Gregory Haskins 已提交
568 569 570 571 572
/*
 * create a host-wide workqueue for issuing deferred shutdown requests
 * aggregated from all vm* instances. We need our own isolated single-thread
 * queue to prevent deadlock against flushing the normal work-queue.
 */
573
int kvm_irqfd_init(void)
G
Gregory Haskins 已提交
574 575 576 577 578 579 580 581
{
	irqfd_cleanup_wq = create_singlethread_workqueue("kvm-irqfd-cleanup");
	if (!irqfd_cleanup_wq)
		return -ENOMEM;

	return 0;
}

582
void kvm_irqfd_exit(void)
G
Gregory Haskins 已提交
583 584 585
{
	destroy_workqueue(irqfd_cleanup_wq);
}
586
#endif
G
Gregory Haskins 已提交
587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603

/*
 * --------------------------------------------------------------------
 * ioeventfd: translate a PIO/MMIO memory write to an eventfd signal.
 *
 * userspace can register a PIO/MMIO address with an eventfd for receiving
 * notification when the memory has been touched.
 * --------------------------------------------------------------------
 */

struct _ioeventfd {
	struct list_head     list;
	u64                  addr;
	int                  length;
	struct eventfd_ctx  *eventfd;
	u64                  datamatch;
	struct kvm_io_device dev;
604
	u8                   bus_idx;
G
Gregory Haskins 已提交
605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626
	bool                 wildcard;
};

static inline struct _ioeventfd *
to_ioeventfd(struct kvm_io_device *dev)
{
	return container_of(dev, struct _ioeventfd, dev);
}

static void
ioeventfd_release(struct _ioeventfd *p)
{
	eventfd_ctx_put(p->eventfd);
	list_del(&p->list);
	kfree(p);
}

static bool
ioeventfd_in_range(struct _ioeventfd *p, gpa_t addr, int len, const void *val)
{
	u64 _val;

627 628 629 630 631 632 633 634 635
	if (addr != p->addr)
		/* address must be precise for a hit */
		return false;

	if (!p->length)
		/* length = 0 means only look at the address, so always a hit */
		return true;

	if (len != p->length)
G
Gregory Haskins 已提交
636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668
		/* address-range must be precise for a hit */
		return false;

	if (p->wildcard)
		/* all else equal, wildcard is always a hit */
		return true;

	/* otherwise, we have to actually compare the data */

	BUG_ON(!IS_ALIGNED((unsigned long)val, len));

	switch (len) {
	case 1:
		_val = *(u8 *)val;
		break;
	case 2:
		_val = *(u16 *)val;
		break;
	case 4:
		_val = *(u32 *)val;
		break;
	case 8:
		_val = *(u64 *)val;
		break;
	default:
		return false;
	}

	return _val == p->datamatch ? true : false;
}

/* MMIO/PIO writes trigger an event if the addr/val match */
static int
669 670
ioeventfd_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr,
		int len, const void *val)
G
Gregory Haskins 已提交
671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704
{
	struct _ioeventfd *p = to_ioeventfd(this);

	if (!ioeventfd_in_range(p, addr, len, val))
		return -EOPNOTSUPP;

	eventfd_signal(p->eventfd, 1);
	return 0;
}

/*
 * This function is called as KVM is completely shutting down.  We do not
 * need to worry about locking just nuke anything we have as quickly as possible
 */
static void
ioeventfd_destructor(struct kvm_io_device *this)
{
	struct _ioeventfd *p = to_ioeventfd(this);

	ioeventfd_release(p);
}

static const struct kvm_io_device_ops ioeventfd_ops = {
	.write      = ioeventfd_write,
	.destructor = ioeventfd_destructor,
};

/* assumes kvm->slots_lock held */
static bool
ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p)
{
	struct _ioeventfd *_p;

	list_for_each_entry(_p, &kvm->ioeventfds, list)
705
		if (_p->bus_idx == p->bus_idx &&
706 707 708 709 710
		    _p->addr == p->addr &&
		    (!_p->length || !p->length ||
		     (_p->length == p->length &&
		      (_p->wildcard || p->wildcard ||
		       _p->datamatch == p->datamatch))))
G
Gregory Haskins 已提交
711 712 713 714 715
			return true;

	return false;
}

716 717 718 719 720 721 722 723 724
static enum kvm_bus ioeventfd_bus_from_flags(__u32 flags)
{
	if (flags & KVM_IOEVENTFD_FLAG_PIO)
		return KVM_PIO_BUS;
	if (flags & KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY)
		return KVM_VIRTIO_CCW_NOTIFY_BUS;
	return KVM_MMIO_BUS;
}

725 726 727
static int kvm_assign_ioeventfd_idx(struct kvm *kvm,
				enum kvm_bus bus_idx,
				struct kvm_ioeventfd *args)
G
Gregory Haskins 已提交
728 729
{

730 731 732
	struct eventfd_ctx *eventfd;
	struct _ioeventfd *p;
	int ret;
733

G
Gregory Haskins 已提交
734 735 736 737 738 739 740 741 742 743 744 745
	eventfd = eventfd_ctx_fdget(args->fd);
	if (IS_ERR(eventfd))
		return PTR_ERR(eventfd);

	p = kzalloc(sizeof(*p), GFP_KERNEL);
	if (!p) {
		ret = -ENOMEM;
		goto fail;
	}

	INIT_LIST_HEAD(&p->list);
	p->addr    = args->addr;
746
	p->bus_idx = bus_idx;
G
Gregory Haskins 已提交
747 748 749 750 751 752 753 754 755
	p->length  = args->len;
	p->eventfd = eventfd;

	/* The datamatch feature is optional, otherwise this is a wildcard */
	if (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH)
		p->datamatch = args->datamatch;
	else
		p->wildcard = true;

756
	mutex_lock(&kvm->slots_lock);
G
Gregory Haskins 已提交
757

L
Lucas De Marchi 已提交
758
	/* Verify that there isn't a match already */
G
Gregory Haskins 已提交
759 760 761 762 763 764 765
	if (ioeventfd_check_collision(kvm, p)) {
		ret = -EEXIST;
		goto unlock_fail;
	}

	kvm_iodevice_init(&p->dev, &ioeventfd_ops);

766 767
	ret = kvm_io_bus_register_dev(kvm, bus_idx, p->addr, p->length,
				      &p->dev);
G
Gregory Haskins 已提交
768 769 770
	if (ret < 0)
		goto unlock_fail;

771
	kvm->buses[bus_idx]->ioeventfd_count++;
G
Gregory Haskins 已提交
772 773
	list_add_tail(&p->list, &kvm->ioeventfds);

774
	mutex_unlock(&kvm->slots_lock);
G
Gregory Haskins 已提交
775 776 777 778

	return 0;

unlock_fail:
779
	mutex_unlock(&kvm->slots_lock);
G
Gregory Haskins 已提交
780 781 782 783 784 785 786 787 788

fail:
	kfree(p);
	eventfd_ctx_put(eventfd);

	return ret;
}

static int
789 790
kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
			   struct kvm_ioeventfd *args)
G
Gregory Haskins 已提交
791 792 793 794 795 796 797 798 799
{
	struct _ioeventfd        *p, *tmp;
	struct eventfd_ctx       *eventfd;
	int                       ret = -ENOENT;

	eventfd = eventfd_ctx_fdget(args->fd);
	if (IS_ERR(eventfd))
		return PTR_ERR(eventfd);

800
	mutex_lock(&kvm->slots_lock);
G
Gregory Haskins 已提交
801 802 803 804

	list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) {
		bool wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH);

805 806
		if (p->bus_idx != bus_idx ||
		    p->eventfd != eventfd  ||
G
Gregory Haskins 已提交
807 808 809 810 811 812 813 814
		    p->addr != args->addr  ||
		    p->length != args->len ||
		    p->wildcard != wildcard)
			continue;

		if (!p->wildcard && p->datamatch != args->datamatch)
			continue;

M
Marcelo Tosatti 已提交
815
		kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
816
		kvm->buses[bus_idx]->ioeventfd_count--;
G
Gregory Haskins 已提交
817 818 819 820 821
		ioeventfd_release(p);
		ret = 0;
		break;
	}

822
	mutex_unlock(&kvm->slots_lock);
G
Gregory Haskins 已提交
823 824 825 826 827 828

	eventfd_ctx_put(eventfd);

	return ret;
}

829 830 831
static int kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
{
	enum kvm_bus bus_idx = ioeventfd_bus_from_flags(args->flags);
832 833 834 835
	int ret = kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);

	if (!args->len && bus_idx == KVM_MMIO_BUS)
		kvm_deassign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
836

837
	return ret;
838 839 840 841 842 843
}

static int
kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
{
	enum kvm_bus              bus_idx;
844
	int ret;
845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867

	bus_idx = ioeventfd_bus_from_flags(args->flags);
	/* must be natural-word sized, or 0 to ignore length */
	switch (args->len) {
	case 0:
	case 1:
	case 2:
	case 4:
	case 8:
		break;
	default:
		return -EINVAL;
	}

	/* check for range overflow */
	if (args->addr + args->len < args->addr)
		return -EINVAL;

	/* check for extra flags that we don't understand */
	if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
		return -EINVAL;

	/* ioeventfd with no length can't be combined with DATAMATCH */
868
	if (!args->len && (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH))
869 870
		return -EINVAL;

871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889
	ret = kvm_assign_ioeventfd_idx(kvm, bus_idx, args);
	if (ret)
		goto fail;

	/* When length is ignored, MMIO is also put on a separate bus, for
	 * faster lookups.
	 */
	if (!args->len && bus_idx == KVM_MMIO_BUS) {
		ret = kvm_assign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
		if (ret < 0)
			goto fast_fail;
	}

	return 0;

fast_fail:
	kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
fail:
	return ret;
890 891
}

G
Gregory Haskins 已提交
892 893 894 895 896 897 898 899
int
kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
{
	if (args->flags & KVM_IOEVENTFD_FLAG_DEASSIGN)
		return kvm_deassign_ioeventfd(kvm, args);

	return kvm_assign_ioeventfd(kvm, args);
}