kfd_events.c 24.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * Copyright 2014 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 */

#include <linux/mm_types.h>
#include <linux/slab.h>
#include <linux/types.h>
26
#include <linux/sched/signal.h>
27
#include <linux/sched/mm.h>
28 29 30 31 32
#include <linux/uaccess.h>
#include <linux/mman.h>
#include <linux/memory.h>
#include "kfd_priv.h"
#include "kfd_events.h"
33
#include "kfd_iommu.h"
34
#include <linux/device.h>
35 36

/*
37
 * Wrapper around wait_queue_entry_t
38 39
 */
struct kfd_event_waiter {
40 41 42
	wait_queue_entry_t wait;
	struct kfd_event *event; /* Event to wait for */
	bool activated;		 /* Becomes true when event is signaled */
43 44 45 46
};

/*
 * Each signal event needs a 64-bit signal slot where the signaler will write
47
 * a 1 before sending an interrupt. (This is needed because some interrupts
48
 * do not contain enough spare data bits to identify an event.)
49 50
 * We get whole pages and map them to the process VA.
 * Individual signal events use their event_id as slot index.
51
 */
52
struct kfd_signal_page {
53 54
	uint64_t *kernel_address;
	uint64_t __user *user_address;
55
	bool need_to_free_pages;
56 57 58
};


59
static uint64_t *page_slots(struct kfd_signal_page *page)
60 61 62 63
{
	return page->kernel_address;
}

64
static struct kfd_signal_page *allocate_signal_page(struct kfd_process *p)
65 66
{
	void *backing_store;
67
	struct kfd_signal_page *page;
68

69
	page = kzalloc(sizeof(*page), GFP_KERNEL);
70
	if (!page)
71
		return NULL;
72

73
	backing_store = (void *) __get_free_pages(GFP_KERNEL,
74 75 76 77
					get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
	if (!backing_store)
		goto fail_alloc_signal_store;

78
	/* Initialize all events to unsignaled */
79
	memset(backing_store, (uint8_t) UNSIGNALED_EVENT_SLOT,
80
	       KFD_SIGNAL_EVENT_LIMIT * 8);
81 82

	page->kernel_address = backing_store;
83
	page->need_to_free_pages = true;
84
	pr_debug("Allocated new event signal page at %p, for process %p\n",
85 86
			page, p);

87
	return page;
88 89 90

fail_alloc_signal_store:
	kfree(page);
91
	return NULL;
92 93
}

94 95
static int allocate_event_notification_slot(struct kfd_process *p,
					    struct kfd_event *ev)
96
{
97 98
	int id;

99 100 101
	if (!p->signal_page) {
		p->signal_page = allocate_signal_page(p);
		if (!p->signal_page)
102
			return -ENOMEM;
103 104
		/* Oldest user mode expects 256 event slots */
		p->signal_mapped_size = 256*8;
105 106
	}

107 108 109 110 111 112 113
	/*
	 * Compatibility with old user mode: Only use signal slots
	 * user mode has mapped, may be less than
	 * KFD_SIGNAL_EVENT_LIMIT. This also allows future increase
	 * of the event limit without breaking user mode.
	 */
	id = idr_alloc(&p->event_idr, ev, 0, p->signal_mapped_size / 8,
114 115 116
		       GFP_KERNEL);
	if (id < 0)
		return id;
117

118 119
	ev->event_id = id;
	page_slots(p->signal_page)[id] = UNSIGNALED_EVENT_SLOT;
120

121
	return 0;
122 123 124 125 126 127 128 129
}

/*
 * Assumes that p->event_mutex is held and of course that p is not going
 * away (current or locked).
 */
static struct kfd_event *lookup_event_by_id(struct kfd_process *p, uint32_t id)
{
130
	return idr_find(&p->event_idr, id);
131 132
}

133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
/**
 * lookup_signaled_event_by_partial_id - Lookup signaled event from partial ID
 * @p:     Pointer to struct kfd_process
 * @id:    ID to look up
 * @bits:  Number of valid bits in @id
 *
 * Finds the first signaled event with a matching partial ID. If no
 * matching signaled event is found, returns NULL. In that case the
 * caller should assume that the partial ID is invalid and do an
 * exhaustive search of all siglaned events.
 *
 * If multiple events with the same partial ID signal at the same
 * time, they will be found one interrupt at a time, not necessarily
 * in the same order the interrupts occurred. As long as the number of
 * interrupts is correct, all signaled events will be seen by the
 * driver.
 */
static struct kfd_event *lookup_signaled_event_by_partial_id(
	struct kfd_process *p, uint32_t id, uint32_t bits)
{
	struct kfd_event *ev;

	if (!p->signal_page || id >= KFD_SIGNAL_EVENT_LIMIT)
		return NULL;

	/* Fast path for the common case that @id is not a partial ID
	 * and we only need a single lookup.
	 */
	if (bits > 31 || (1U << bits) >= KFD_SIGNAL_EVENT_LIMIT) {
		if (page_slots(p->signal_page)[id] == UNSIGNALED_EVENT_SLOT)
			return NULL;

		return idr_find(&p->event_idr, id);
	}

	/* General case for partial IDs: Iterate over all matching IDs
	 * and find the first one that has signaled.
	 */
	for (ev = NULL; id < KFD_SIGNAL_EVENT_LIMIT && !ev; id += 1U << bits) {
		if (page_slots(p->signal_page)[id] == UNSIGNALED_EVENT_SLOT)
			continue;

		ev = idr_find(&p->event_idr, id);
	}

	return ev;
}

181 182 183 184
static int create_signal_event(struct file *devkfd,
				struct kfd_process *p,
				struct kfd_event *ev)
{
185 186
	int ret;

187 188
	if (p->signal_mapped_size &&
	    p->signal_event_count == p->signal_mapped_size / 8) {
189 190 191 192
		if (!p->signal_event_limit_reached) {
			pr_warn("Signal event wasn't created because limit was reached\n");
			p->signal_event_limit_reached = true;
		}
193
		return -ENOSPC;
194 195
	}

196 197
	ret = allocate_event_notification_slot(p, ev);
	if (ret) {
198
		pr_warn("Signal event wasn't created because out of kernel memory\n");
199
		return ret;
200 201 202 203
	}

	p->signal_event_count++;

204
	ev->user_signal_address = &p->signal_page->user_address[ev->event_id];
205
	pr_debug("Signal event number %zu created with id %d, address %p\n",
206 207 208
			p->signal_event_count, ev->event_id,
			ev->user_signal_address);

209 210 211 212 213
	return 0;
}

static int create_other_event(struct kfd_process *p, struct kfd_event *ev)
{
214 215 216 217 218 219 220 221 222 223 224 225
	/* Cast KFD_LAST_NONSIGNAL_EVENT to uint32_t. This allows an
	 * intentional integer overflow to -1 without a compiler
	 * warning. idr_alloc treats a negative value as "maximum
	 * signed integer".
	 */
	int id = idr_alloc(&p->event_idr, ev, KFD_FIRST_NONSIGNAL_EVENT_ID,
			   (uint32_t)KFD_LAST_NONSIGNAL_EVENT_ID + 1,
			   GFP_KERNEL);

	if (id < 0)
		return id;
	ev->event_id = id;
226 227 228 229 230 231 232

	return 0;
}

void kfd_event_init_process(struct kfd_process *p)
{
	mutex_init(&p->event_mutex);
233
	idr_init(&p->event_idr);
234
	p->signal_page = NULL;
235 236 237 238 239
	p->signal_event_count = 0;
}

static void destroy_event(struct kfd_process *p, struct kfd_event *ev)
{
240
	struct kfd_event_waiter *waiter;
241

242 243
	/* Wake up pending waiters. They will return failure */
	list_for_each_entry(waiter, &ev->wq.head, wait.entry)
244
		waiter->event = NULL;
245
	wake_up_all(&ev->wq);
246

247 248
	if (ev->type == KFD_EVENT_TYPE_SIGNAL ||
	    ev->type == KFD_EVENT_TYPE_DEBUG)
249 250
		p->signal_event_count--;

251
	idr_remove(&p->event_idr, ev->event_id);
252 253 254 255 256 257
	kfree(ev);
}

static void destroy_events(struct kfd_process *p)
{
	struct kfd_event *ev;
258
	uint32_t id;
259

260
	idr_for_each_entry(&p->event_idr, ev, id)
261
		destroy_event(p, ev);
262
	idr_destroy(&p->event_idr);
263 264 265 266 267 268
}

/*
 * We assume that the process is being destroyed and there is no need to
 * unmap the pages or keep bookkeeping data in order.
 */
269
static void shutdown_signal_page(struct kfd_process *p)
270
{
271
	struct kfd_signal_page *page = p->signal_page;
272

273
	if (page) {
274 275 276
		if (page->need_to_free_pages)
			free_pages((unsigned long)page->kernel_address,
				   get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
277 278 279 280 281 282 283
		kfree(page);
	}
}

void kfd_event_free_process(struct kfd_process *p)
{
	destroy_events(p);
284
	shutdown_signal_page(p);
285 286 287 288 289 290 291 292 293 294 295 296 297
}

static bool event_can_be_gpu_signaled(const struct kfd_event *ev)
{
	return ev->type == KFD_EVENT_TYPE_SIGNAL ||
					ev->type == KFD_EVENT_TYPE_DEBUG;
}

static bool event_can_be_cpu_signaled(const struct kfd_event *ev)
{
	return ev->type == KFD_EVENT_TYPE_SIGNAL;
}

298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321
int kfd_event_page_set(struct kfd_process *p, void *kernel_address,
		       uint64_t size)
{
	struct kfd_signal_page *page;

	if (p->signal_page)
		return -EBUSY;

	page = kzalloc(sizeof(*page), GFP_KERNEL);
	if (!page)
		return -ENOMEM;

	/* Initialize all events to unsignaled */
	memset(kernel_address, (uint8_t) UNSIGNALED_EVENT_SLOT,
	       KFD_SIGNAL_EVENT_LIMIT * 8);

	page->kernel_address = kernel_address;

	p->signal_page = page;
	p->signal_mapped_size = size;

	return 0;
}

322 323 324 325 326 327 328 329 330 331 332 333 334 335 336
int kfd_event_create(struct file *devkfd, struct kfd_process *p,
		     uint32_t event_type, bool auto_reset, uint32_t node_id,
		     uint32_t *event_id, uint32_t *event_trigger_data,
		     uint64_t *event_page_offset, uint32_t *event_slot_index)
{
	int ret = 0;
	struct kfd_event *ev = kzalloc(sizeof(*ev), GFP_KERNEL);

	if (!ev)
		return -ENOMEM;

	ev->type = event_type;
	ev->auto_reset = auto_reset;
	ev->signaled = false;

337
	init_waitqueue_head(&ev->wq);
338 339 340 341 342 343 344 345 346 347

	*event_page_offset = 0;

	mutex_lock(&p->event_mutex);

	switch (event_type) {
	case KFD_EVENT_TYPE_SIGNAL:
	case KFD_EVENT_TYPE_DEBUG:
		ret = create_signal_event(devkfd, p, ev);
		if (!ret) {
348
			*event_page_offset = KFD_MMAP_EVENTS_MASK;
349
			*event_page_offset <<= PAGE_SHIFT;
350
			*event_slot_index = ev->event_id;
351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392
		}
		break;
	default:
		ret = create_other_event(p, ev);
		break;
	}

	if (!ret) {
		*event_id = ev->event_id;
		*event_trigger_data = ev->event_id;
	} else {
		kfree(ev);
	}

	mutex_unlock(&p->event_mutex);

	return ret;
}

/* Assumes that p is current. */
int kfd_event_destroy(struct kfd_process *p, uint32_t event_id)
{
	struct kfd_event *ev;
	int ret = 0;

	mutex_lock(&p->event_mutex);

	ev = lookup_event_by_id(p, event_id);

	if (ev)
		destroy_event(p, ev);
	else
		ret = -EINVAL;

	mutex_unlock(&p->event_mutex);
	return ret;
}

static void set_event(struct kfd_event *ev)
{
	struct kfd_event_waiter *waiter;

393 394 395 396 397 398
	/* Auto reset if the list is non-empty and we're waking
	 * someone. waitqueue_active is safe here because we're
	 * protected by the p->event_mutex, which is also held when
	 * updating the wait queues in kfd_wait_on_events.
	 */
	ev->signaled = !ev->auto_reset || !waitqueue_active(&ev->wq);
399

400
	list_for_each_entry(waiter, &ev->wq.head, wait.entry)
401 402
		waiter->activated = true;

403
	wake_up_all(&ev->wq);
404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451
}

/* Assumes that p is current. */
int kfd_set_event(struct kfd_process *p, uint32_t event_id)
{
	int ret = 0;
	struct kfd_event *ev;

	mutex_lock(&p->event_mutex);

	ev = lookup_event_by_id(p, event_id);

	if (ev && event_can_be_cpu_signaled(ev))
		set_event(ev);
	else
		ret = -EINVAL;

	mutex_unlock(&p->event_mutex);
	return ret;
}

static void reset_event(struct kfd_event *ev)
{
	ev->signaled = false;
}

/* Assumes that p is current. */
int kfd_reset_event(struct kfd_process *p, uint32_t event_id)
{
	int ret = 0;
	struct kfd_event *ev;

	mutex_lock(&p->event_mutex);

	ev = lookup_event_by_id(p, event_id);

	if (ev && event_can_be_cpu_signaled(ev))
		reset_event(ev);
	else
		ret = -EINVAL;

	mutex_unlock(&p->event_mutex);
	return ret;

}

static void acknowledge_signal(struct kfd_process *p, struct kfd_event *ev)
{
452
	page_slots(p->signal_page)[ev->event_id] = UNSIGNALED_EVENT_SLOT;
453 454 455 456 457 458 459 460 461 462 463 464 465 466
}

static void set_event_from_interrupt(struct kfd_process *p,
					struct kfd_event *ev)
{
	if (ev && event_can_be_gpu_signaled(ev)) {
		acknowledge_signal(p, ev);
		set_event(ev);
	}
}

void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
				uint32_t valid_id_bits)
{
467
	struct kfd_event *ev = NULL;
468 469 470 471

	/*
	 * Because we are called from arbitrary context (workqueue) as opposed
	 * to process context, kfd_process could attempt to exit while we are
472
	 * running so the lookup function increments the process ref count.
473 474 475 476 477 478 479 480
	 */
	struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);

	if (!p)
		return; /* Presumably process exited. */

	mutex_lock(&p->event_mutex);

481 482 483 484
	if (valid_id_bits)
		ev = lookup_signaled_event_by_partial_id(p, partial_id,
							 valid_id_bits);
	if (ev) {
485
		set_event_from_interrupt(p, ev);
486
	} else if (p->signal_page) {
487
		/*
488 489 490
		 * Partial ID lookup failed. Assume that the event ID
		 * in the interrupt payload was invalid and do an
		 * exhaustive search of signaled events.
491
		 */
492 493
		uint64_t *slots = page_slots(p->signal_page);
		uint32_t id;
494

495 496 497 498
		if (valid_id_bits)
			pr_debug_ratelimited("Partial ID invalid: %u (%u valid bits)\n",
					     partial_id, valid_id_bits);

499 500 501 502 503 504 505 506 507 508
		if (p->signal_event_count < KFD_SIGNAL_EVENT_LIMIT/2) {
			/* With relatively few events, it's faster to
			 * iterate over the event IDR
			 */
			idr_for_each_entry(&p->event_idr, ev, id) {
				if (id >= KFD_SIGNAL_EVENT_LIMIT)
					break;

				if (slots[id] != UNSIGNALED_EVENT_SLOT)
					set_event_from_interrupt(p, ev);
509
			}
510 511 512 513 514 515 516 517 518 519 520
		} else {
			/* With relatively many events, it's faster to
			 * iterate over the signal slots and lookup
			 * only signaled events from the IDR.
			 */
			for (id = 0; id < KFD_SIGNAL_EVENT_LIMIT; id++)
				if (slots[id] != UNSIGNALED_EVENT_SLOT) {
					ev = lookup_event_by_id(p, id);
					set_event_from_interrupt(p, ev);
				}
		}
521 522 523
	}

	mutex_unlock(&p->event_mutex);
524
	kfd_unref_process(p);
525 526 527 528 529 530 531 532 533 534 535 536
}

static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
{
	struct kfd_event_waiter *event_waiters;
	uint32_t i;

	event_waiters = kmalloc_array(num_events,
					sizeof(struct kfd_event_waiter),
					GFP_KERNEL);

	for (i = 0; (event_waiters) && (i < num_events) ; i++) {
537
		init_wait(&event_waiters[i].wait);
538 539 540 541 542 543
		event_waiters[i].activated = false;
	}

	return event_waiters;
}

544
static int init_event_waiter_get_status(struct kfd_process *p,
545
		struct kfd_event_waiter *waiter,
546
		uint32_t event_id)
547 548 549 550 551 552
{
	struct kfd_event *ev = lookup_event_by_id(p, event_id);

	if (!ev)
		return -EINVAL;

553
	waiter->event = ev;
554 555 556 557 558 559
	waiter->activated = ev->signaled;
	ev->signaled = ev->signaled && !ev->auto_reset;

	return 0;
}

560 561 562 563 564 565 566 567
static void init_event_waiter_add_to_waitlist(struct kfd_event_waiter *waiter)
{
	struct kfd_event *ev = waiter->event;

	/* Only add to the wait list if we actually need to
	 * wait on this event.
	 */
	if (!waiter->activated)
568
		add_wait_queue(&ev->wq, &waiter->wait);
569 570
}

571 572 573 574 575 576 577 578 579 580 581
/* test_event_condition - Test condition of events being waited for
 * @all:           Return completion only if all events have signaled
 * @num_events:    Number of events to wait for
 * @event_waiters: Array of event waiters, one per event
 *
 * Returns KFD_IOC_WAIT_RESULT_COMPLETE if all (or one) event(s) have
 * signaled. Returns KFD_IOC_WAIT_RESULT_TIMEOUT if no (or not all)
 * events have signaled. Returns KFD_IOC_WAIT_RESULT_FAIL if any of
 * the events have been destroyed.
 */
static uint32_t test_event_condition(bool all, uint32_t num_events,
582 583 584 585 586 587
				struct kfd_event_waiter *event_waiters)
{
	uint32_t i;
	uint32_t activated_count = 0;

	for (i = 0; i < num_events; i++) {
588 589 590
		if (!event_waiters[i].event)
			return KFD_IOC_WAIT_RESULT_FAIL;

591 592
		if (event_waiters[i].activated) {
			if (!all)
593
				return KFD_IOC_WAIT_RESULT_COMPLETE;
594 595 596 597 598

			activated_count++;
		}
	}

599 600
	return activated_count == num_events ?
		KFD_IOC_WAIT_RESULT_COMPLETE : KFD_IOC_WAIT_RESULT_TIMEOUT;
601 602
}

603 604 605 606
/*
 * Copy event specific data, if defined.
 * Currently only memory exception events have additional data to copy to user
 */
607
static int copy_signaled_event_data(uint32_t num_events,
608 609 610 611 612 613 614 615 616 617 618 619 620
		struct kfd_event_waiter *event_waiters,
		struct kfd_event_data __user *data)
{
	struct kfd_hsa_memory_exception_data *src;
	struct kfd_hsa_memory_exception_data __user *dst;
	struct kfd_event_waiter *waiter;
	struct kfd_event *event;
	uint32_t i;

	for (i = 0; i < num_events; i++) {
		waiter = &event_waiters[i];
		event = waiter->event;
		if (waiter->activated && event->type == KFD_EVENT_TYPE_MEMORY) {
621
			dst = &data[i].memory_exception_data;
622 623 624
			src = &event->memory_exception_data;
			if (copy_to_user(dst, src,
				sizeof(struct kfd_hsa_memory_exception_data)))
625
				return -EFAULT;
626 627 628
		}
	}

629
	return 0;
630 631 632 633 634

}



635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657
static long user_timeout_to_jiffies(uint32_t user_timeout_ms)
{
	if (user_timeout_ms == KFD_EVENT_TIMEOUT_IMMEDIATE)
		return 0;

	if (user_timeout_ms == KFD_EVENT_TIMEOUT_INFINITE)
		return MAX_SCHEDULE_TIMEOUT;

	/*
	 * msecs_to_jiffies interprets all values above 2^31-1 as infinite,
	 * but we consider them finite.
	 * This hack is wrong, but nobody is likely to notice.
	 */
	user_timeout_ms = min_t(uint32_t, user_timeout_ms, 0x7FFFFFFF);

	return msecs_to_jiffies(user_timeout_ms) + 1;
}

static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
{
	uint32_t i;

	for (i = 0; i < num_events; i++)
658 659 660
		if (waiters[i].event)
			remove_wait_queue(&waiters[i].event->wq,
					  &waiters[i].wait);
661 662 663 664 665

	kfree(waiters);
}

int kfd_wait_on_events(struct kfd_process *p,
666
		       uint32_t num_events, void __user *data,
667
		       bool all, uint32_t user_timeout_ms,
668
		       uint32_t *wait_result)
669
{
670 671
	struct kfd_event_data __user *events =
			(struct kfd_event_data __user *) data;
672 673
	uint32_t i;
	int ret = 0;
674

675 676 677
	struct kfd_event_waiter *event_waiters = NULL;
	long timeout = user_timeout_to_jiffies(user_timeout_ms);

678 679 680 681 682 683
	event_waiters = alloc_event_waiters(num_events);
	if (!event_waiters) {
		ret = -ENOMEM;
		goto out;
	}

684 685 686
	mutex_lock(&p->event_mutex);

	for (i = 0; i < num_events; i++) {
687
		struct kfd_event_data event_data;
688

689
		if (copy_from_user(&event_data, &events[i],
690 691
				sizeof(struct kfd_event_data))) {
			ret = -EFAULT;
692
			goto out_unlock;
693
		}
694

695
		ret = init_event_waiter_get_status(p, &event_waiters[i],
696
				event_data.event_id);
697
		if (ret)
698
			goto out_unlock;
699 700
	}

701
	/* Check condition once. */
702 703
	*wait_result = test_event_condition(all, num_events, event_waiters);
	if (*wait_result == KFD_IOC_WAIT_RESULT_COMPLETE) {
704 705 706
		ret = copy_signaled_event_data(num_events,
					       event_waiters, events);
		goto out_unlock;
707 708 709 710 711
	} else if (WARN_ON(*wait_result == KFD_IOC_WAIT_RESULT_FAIL)) {
		/* This should not happen. Events shouldn't be
		 * destroyed while we're holding the event_mutex
		 */
		goto out_unlock;
712 713
	}

714 715 716 717
	/* Add to wait lists if we need to wait. */
	for (i = 0; i < num_events; i++)
		init_event_waiter_add_to_waitlist(&event_waiters[i]);

718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738
	mutex_unlock(&p->event_mutex);

	while (true) {
		if (fatal_signal_pending(current)) {
			ret = -EINTR;
			break;
		}

		if (signal_pending(current)) {
			/*
			 * This is wrong when a nonzero, non-infinite timeout
			 * is specified. We need to use
			 * ERESTARTSYS_RESTARTBLOCK, but struct restart_block
			 * contains a union with data for each user and it's
			 * in generic kernel code that I don't want to
			 * touch yet.
			 */
			ret = -ERESTARTSYS;
			break;
		}

739 740 741 742 743 744 745 746 747 748 749
		/* Set task state to interruptible sleep before
		 * checking wake-up conditions. A concurrent wake-up
		 * will put the task back into runnable state. In that
		 * case schedule_timeout will not put the task to
		 * sleep and we'll get a chance to re-check the
		 * updated conditions almost immediately. Otherwise,
		 * this race condition would lead to a soft hang or a
		 * very long sleep.
		 */
		set_current_state(TASK_INTERRUPTIBLE);

750 751 752
		*wait_result = test_event_condition(all, num_events,
						    event_waiters);
		if (*wait_result != KFD_IOC_WAIT_RESULT_TIMEOUT)
753 754
			break;

755
		if (timeout <= 0)
756 757
			break;

758
		timeout = schedule_timeout(timeout);
759 760 761
	}
	__set_current_state(TASK_RUNNING);

762 763 764 765 766 767 768
	/* copy_signaled_event_data may sleep. So this has to happen
	 * after the task state is set back to RUNNING.
	 */
	if (!ret && *wait_result == KFD_IOC_WAIT_RESULT_COMPLETE)
		ret = copy_signaled_event_data(num_events,
					       event_waiters, events);

769
	mutex_lock(&p->event_mutex);
770
out_unlock:
771 772
	free_waiters(num_events, event_waiters);
	mutex_unlock(&p->event_mutex);
773 774 775
out:
	if (ret)
		*wait_result = KFD_IOC_WAIT_RESULT_FAIL;
776 777
	else if (*wait_result == KFD_IOC_WAIT_RESULT_FAIL)
		ret = -EIO;
778 779 780 781 782 783 784

	return ret;
}

int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
{
	unsigned long pfn;
785
	struct kfd_signal_page *page;
786
	int ret;
787

788 789
	/* check required size doesn't exceed the allocated size */
	if (get_order(KFD_SIGNAL_EVENT_LIMIT * 8) <
790
			get_order(vma->vm_end - vma->vm_start)) {
791
		pr_err("Event page mmap requested illegal size\n");
792 793 794
		return -EINVAL;
	}

795
	page = p->signal_page;
796 797
	if (!page) {
		/* Probably KFD bug, but mmap is user-accessible. */
798
		pr_debug("Signal page could not be found\n");
799 800 801 802 803 804 805 806 807
		return -EINVAL;
	}

	pfn = __pa(page->kernel_address);
	pfn >>= PAGE_SHIFT;

	vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE
		       | VM_DONTDUMP | VM_PFNMAP;

808
	pr_debug("Mapping signal page\n");
809 810 811 812 813 814 815 816 817 818
	pr_debug("     start user address  == 0x%08lx\n", vma->vm_start);
	pr_debug("     end user address    == 0x%08lx\n", vma->vm_end);
	pr_debug("     pfn                 == 0x%016lX\n", pfn);
	pr_debug("     vm_flags            == 0x%08lX\n", vma->vm_flags);
	pr_debug("     size                == 0x%08lX\n",
			vma->vm_end - vma->vm_start);

	page->user_address = (uint64_t __user *)vma->vm_start;

	/* mapping the page to user process */
819
	ret = remap_pfn_range(vma, vma->vm_start, pfn,
820
			vma->vm_end - vma->vm_start, vma->vm_page_prot);
821 822 823 824
	if (!ret)
		p->signal_mapped_size = vma->vm_end - vma->vm_start;

	return ret;
825
}
826 827 828 829 830 831 832 833 834 835

/*
 * Assumes that p->event_mutex is held and of course
 * that p is not going away (current or locked).
 */
static void lookup_events_by_type_and_signal(struct kfd_process *p,
		int type, void *event_data)
{
	struct kfd_hsa_memory_exception_data *ev_data;
	struct kfd_event *ev;
836
	uint32_t id;
837 838 839 840
	bool send_signal = true;

	ev_data = (struct kfd_hsa_memory_exception_data *) event_data;

841 842
	id = KFD_FIRST_NONSIGNAL_EVENT_ID;
	idr_for_each_entry_continue(&p->event_idr, ev, id)
843 844 845 846 847 848 849 850 851 852 853 854
		if (ev->type == type) {
			send_signal = false;
			dev_dbg(kfd_device,
					"Event found: id %X type %d",
					ev->event_id, ev->type);
			set_event(ev);
			if (ev->type == KFD_EVENT_TYPE_MEMORY && ev_data)
				ev->memory_exception_data = *ev_data;
		}

	/* Send SIGTERM no event of type "type" has been found*/
	if (send_signal) {
855 856 857 858 859 860 861 862
		if (send_sigterm) {
			dev_warn(kfd_device,
				"Sending SIGTERM to HSA Process with PID %d ",
					p->lead_thread->pid);
			send_sig(SIGTERM, p->lead_thread, 0);
		} else {
			dev_err(kfd_device,
				"HSA Process (PID %d) got unhandled exception",
863
				p->lead_thread->pid);
864
		}
865 866 867
	}
}

868
#ifdef KFD_SUPPORT_IOMMU_V2
869 870 871 872 873 874 875 876 877 878
void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
		unsigned long address, bool is_write_requested,
		bool is_execute_requested)
{
	struct kfd_hsa_memory_exception_data memory_exception_data;
	struct vm_area_struct *vma;

	/*
	 * Because we are called from arbitrary context (workqueue) as opposed
	 * to process context, kfd_process could attempt to exit while we are
879
	 * running so the lookup function increments the process ref count.
880 881
	 */
	struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
882
	struct mm_struct *mm;
883 884 885 886

	if (!p)
		return; /* Presumably process exited. */

887 888 889 890 891
	/* Take a safe reference to the mm_struct, which may otherwise
	 * disappear even while the kfd_process is still referenced.
	 */
	mm = get_task_mm(p->lead_thread);
	if (!mm) {
892
		kfd_unref_process(p);
893 894 895
		return; /* Process is exiting */
	}

896 897
	memset(&memory_exception_data, 0, sizeof(memory_exception_data));

898 899
	down_read(&mm->mmap_sem);
	vma = find_vma(mm, address);
900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924

	memory_exception_data.gpu_id = dev->id;
	memory_exception_data.va = address;
	/* Set failure reason */
	memory_exception_data.failure.NotPresent = 1;
	memory_exception_data.failure.NoExecute = 0;
	memory_exception_data.failure.ReadOnly = 0;
	if (vma) {
		if (vma->vm_start > address) {
			memory_exception_data.failure.NotPresent = 1;
			memory_exception_data.failure.NoExecute = 0;
			memory_exception_data.failure.ReadOnly = 0;
		} else {
			memory_exception_data.failure.NotPresent = 0;
			if (is_write_requested && !(vma->vm_flags & VM_WRITE))
				memory_exception_data.failure.ReadOnly = 1;
			else
				memory_exception_data.failure.ReadOnly = 0;
			if (is_execute_requested && !(vma->vm_flags & VM_EXEC))
				memory_exception_data.failure.NoExecute = 1;
			else
				memory_exception_data.failure.NoExecute = 0;
		}
	}

925 926
	up_read(&mm->mmap_sem);
	mmput(mm);
927 928 929 930 931 932 933 934

	mutex_lock(&p->event_mutex);

	/* Lookup events by type and signal them */
	lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_MEMORY,
			&memory_exception_data);

	mutex_unlock(&p->event_mutex);
935
	kfd_unref_process(p);
936
}
937
#endif /* KFD_SUPPORT_IOMMU_V2 */
938 939 940 941 942 943

void kfd_signal_hw_exception_event(unsigned int pasid)
{
	/*
	 * Because we are called from arbitrary context (workqueue) as opposed
	 * to process context, kfd_process could attempt to exit while we are
944
	 * running so the lookup function increments the process ref count.
945 946 947 948 949 950 951 952 953 954 955 956
	 */
	struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);

	if (!p)
		return; /* Presumably process exited. */

	mutex_lock(&p->event_mutex);

	/* Lookup events by type and signal them */
	lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_HW_EXCEPTION, NULL);

	mutex_unlock(&p->event_mutex);
957
	kfd_unref_process(p);
958
}