kfd_events.c 24.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * Copyright 2014 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 */

#include <linux/mm_types.h>
#include <linux/slab.h>
#include <linux/types.h>
26
#include <linux/sched/signal.h>
27
#include <linux/sched/mm.h>
28 29 30 31 32
#include <linux/uaccess.h>
#include <linux/mman.h>
#include <linux/memory.h>
#include "kfd_priv.h"
#include "kfd_events.h"
33
#include <linux/device.h>
34 35

/*
36
 * Wrapper around wait_queue_entry_t
37 38
 */
struct kfd_event_waiter {
39 40 41
	wait_queue_entry_t wait;
	struct kfd_event *event; /* Event to wait for */
	bool activated;		 /* Becomes true when event is signaled */
42 43
};

44 45 46
#define SLOTS_PER_PAGE KFD_SIGNAL_EVENT_LIMIT
#define SLOT_BITMAP_LONGS BITS_TO_LONGS(SLOTS_PER_PAGE)

47 48 49 50 51 52 53 54 55 56
/*
 * Over-complicated pooled allocator for event notification slots.
 *
 * Each signal event needs a 64-bit signal slot where the signaler will write
 * a 1 before sending an interrupt.l (This is needed because some interrupts
 * do not contain enough spare data bits to identify an event.)
 * We get whole pages from vmalloc and map them to the process VA.
 * Individual signal events are then allocated a slot in a page.
 */

57
struct kfd_signal_page {
58 59 60
	uint64_t *kernel_address;
	uint64_t __user *user_address;
	unsigned int free_slots;
61
	unsigned long used_slot_bitmap[SLOT_BITMAP_LONGS];
62 63 64 65 66 67 68 69 70
};

/*
 * For signal events, the event ID is used as the interrupt user data.
 * For SQ s_sendmsg interrupts, this is limited to 8 bits.
 */

#define INTERRUPT_DATA_BITS 8

71
static uint64_t *page_slots(struct kfd_signal_page *page)
72 73 74 75 76
{
	return page->kernel_address;
}

static bool allocate_free_slot(struct kfd_process *process,
77
			       unsigned int *out_slot_index)
78
{
79 80
	struct kfd_signal_page *page = process->signal_page;
	unsigned int slot;
81

82 83 84
	if (!page || page->free_slots == 0) {
		pr_debug("No free event signal slots were found for process %p\n",
			 process);
85

86 87
		return false;
	}
88

89
	slot = find_first_zero_bit(page->used_slot_bitmap, SLOTS_PER_PAGE);
90

91 92
	__set_bit(slot, page->used_slot_bitmap);
	page->free_slots--;
93

94
	page_slots(page)[slot] = UNSIGNALED_EVENT_SLOT;
95

96
	*out_slot_index = slot;
97

98 99
	pr_debug("Allocated event signal slot in page %p, slot %d\n",
		 page, slot);
100

101
	return true;
102 103
}

104
static struct kfd_signal_page *allocate_signal_page(struct kfd_process *p)
105 106
{
	void *backing_store;
107
	struct kfd_signal_page *page;
108

109
	page = kzalloc(sizeof(*page), GFP_KERNEL);
110
	if (!page)
111
		return NULL;
112 113 114

	page->free_slots = SLOTS_PER_PAGE;

115
	backing_store = (void *) __get_free_pages(GFP_KERNEL,
116 117 118 119
					get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
	if (!backing_store)
		goto fail_alloc_signal_store;

120
	/* Initialize all events to unsignaled */
121
	memset(backing_store, (uint8_t) UNSIGNALED_EVENT_SLOT,
122
	       KFD_SIGNAL_EVENT_LIMIT * 8);
123 124

	page->kernel_address = backing_store;
125
	pr_debug("Allocated new event signal page at %p, for process %p\n",
126 127
			page, p);

128
	return page;
129 130 131

fail_alloc_signal_store:
	kfree(page);
132
	return NULL;
133 134
}

135 136
static bool allocate_event_notification_slot(struct kfd_process *p,
					     unsigned int *signal_slot_index)
137
{
138 139 140 141
	if (!p->signal_page) {
		p->signal_page = allocate_signal_page(p);
		if (!p->signal_page)
			return false;
142 143
	}

144
	return allocate_free_slot(p, signal_slot_index);
145 146 147
}

/* Assumes that the process's event_mutex is locked. */
148
static void release_event_notification_slot(struct kfd_signal_page *page,
149 150 151 152 153 154
						size_t slot_index)
{
	__clear_bit(slot_index, page->used_slot_bitmap);
	page->free_slots++;

	/* We don't free signal pages, they are retained by the process
155 156
	 * and reused until it exits.
	 */
157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184
}

/*
 * Assumes that p->event_mutex is held and of course that p is not going
 * away (current or locked).
 */
static struct kfd_event *lookup_event_by_id(struct kfd_process *p, uint32_t id)
{
	struct kfd_event *ev;

	hash_for_each_possible(p->events, ev, events, id)
		if (ev->event_id == id)
			return ev;

	return NULL;
}

/*
 * Produce a kfd event id for a nonsignal event.
 * These are arbitrary numbers, so we do a sequential search through
 * the hash table for an unused number.
 */
static u32 make_nonsignal_event_id(struct kfd_process *p)
{
	u32 id;

	for (id = p->next_nonsignal_event_id;
		id < KFD_LAST_NONSIGNAL_EVENT_ID &&
185
		lookup_event_by_id(p, id);
186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203
		id++)
		;

	if (id < KFD_LAST_NONSIGNAL_EVENT_ID) {

		/*
		 * What if id == LAST_NONSIGNAL_EVENT_ID - 1?
		 * Then next_nonsignal_event_id = LAST_NONSIGNAL_EVENT_ID so
		 * the first loop fails immediately and we proceed with the
		 * wraparound loop below.
		 */
		p->next_nonsignal_event_id = id + 1;

		return id;
	}

	for (id = KFD_FIRST_NONSIGNAL_EVENT_ID;
		id < KFD_LAST_NONSIGNAL_EVENT_ID &&
204
		lookup_event_by_id(p, id);
205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
		id++)
		;


	if (id < KFD_LAST_NONSIGNAL_EVENT_ID) {
		p->next_nonsignal_event_id = id + 1;
		return id;
	}

	p->next_nonsignal_event_id = KFD_FIRST_NONSIGNAL_EVENT_ID;
	return 0;
}

static struct kfd_event *lookup_event_by_page_slot(struct kfd_process *p,
						unsigned int signal_slot)
{
221
	return lookup_event_by_id(p, signal_slot);
222 223 224 225 226 227 228
}

static int create_signal_event(struct file *devkfd,
				struct kfd_process *p,
				struct kfd_event *ev)
{
	if (p->signal_event_count == KFD_SIGNAL_EVENT_LIMIT) {
229 230 231 232
		if (!p->signal_event_limit_reached) {
			pr_warn("Signal event wasn't created because limit was reached\n");
			p->signal_event_limit_reached = true;
		}
233 234 235
		return -ENOMEM;
	}

236
	if (!allocate_event_notification_slot(p, &ev->signal_slot_index)) {
237
		pr_warn("Signal event wasn't created because out of kernel memory\n");
238 239 240 241 242 243
		return -ENOMEM;
	}

	p->signal_event_count++;

	ev->user_signal_address =
244
			&p->signal_page->user_address[ev->signal_slot_index];
245

246
	ev->event_id = ev->signal_slot_index;
247

248
	pr_debug("Signal event number %zu created with id %d, address %p\n",
249 250 251
			p->signal_event_count, ev->event_id,
			ev->user_signal_address);

252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
	return 0;
}

/*
 * No non-signal events are supported yet.
 * We create them as events that never signal.
 * Set event calls from user-mode are failed.
 */
static int create_other_event(struct kfd_process *p, struct kfd_event *ev)
{
	ev->event_id = make_nonsignal_event_id(p);
	if (ev->event_id == 0)
		return -ENOMEM;

	return 0;
}

void kfd_event_init_process(struct kfd_process *p)
{
	mutex_init(&p->event_mutex);
	hash_init(p->events);
273
	p->signal_page = NULL;
274 275 276 277 278 279
	p->next_nonsignal_event_id = KFD_FIRST_NONSIGNAL_EVENT_ID;
	p->signal_event_count = 0;
}

static void destroy_event(struct kfd_process *p, struct kfd_event *ev)
{
280
	struct kfd_event_waiter *waiter;
281

282 283
	/* Wake up pending waiters. They will return failure */
	list_for_each_entry(waiter, &ev->wq.head, wait.entry)
284
		waiter->event = NULL;
285
	wake_up_all(&ev->wq);
286

287 288 289
	if ((ev->type == KFD_EVENT_TYPE_SIGNAL ||
	     ev->type == KFD_EVENT_TYPE_DEBUG) && p->signal_page) {
		release_event_notification_slot(p->signal_page,
290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311
						ev->signal_slot_index);
		p->signal_event_count--;
	}

	hash_del(&ev->events);
	kfree(ev);
}

static void destroy_events(struct kfd_process *p)
{
	struct kfd_event *ev;
	struct hlist_node *tmp;
	unsigned int hash_bkt;

	hash_for_each_safe(p->events, hash_bkt, tmp, ev, events)
		destroy_event(p, ev);
}

/*
 * We assume that the process is being destroyed and there is no need to
 * unmap the pages or keep bookkeeping data in order.
 */
312
static void shutdown_signal_page(struct kfd_process *p)
313
{
314
	struct kfd_signal_page *page = p->signal_page;
315

316
	if (page) {
317 318 319 320 321 322 323 324 325
		free_pages((unsigned long)page->kernel_address,
				get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
		kfree(page);
	}
}

void kfd_event_free_process(struct kfd_process *p)
{
	destroy_events(p);
326
	shutdown_signal_page(p);
327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
}

static bool event_can_be_gpu_signaled(const struct kfd_event *ev)
{
	return ev->type == KFD_EVENT_TYPE_SIGNAL ||
					ev->type == KFD_EVENT_TYPE_DEBUG;
}

static bool event_can_be_cpu_signaled(const struct kfd_event *ev)
{
	return ev->type == KFD_EVENT_TYPE_SIGNAL;
}

int kfd_event_create(struct file *devkfd, struct kfd_process *p,
		     uint32_t event_type, bool auto_reset, uint32_t node_id,
		     uint32_t *event_id, uint32_t *event_trigger_data,
		     uint64_t *event_page_offset, uint32_t *event_slot_index)
{
	int ret = 0;
	struct kfd_event *ev = kzalloc(sizeof(*ev), GFP_KERNEL);

	if (!ev)
		return -ENOMEM;

	ev->type = event_type;
	ev->auto_reset = auto_reset;
	ev->signaled = false;

355
	init_waitqueue_head(&ev->wq);
356 357 358 359 360 361 362 363 364 365

	*event_page_offset = 0;

	mutex_lock(&p->event_mutex);

	switch (event_type) {
	case KFD_EVENT_TYPE_SIGNAL:
	case KFD_EVENT_TYPE_DEBUG:
		ret = create_signal_event(devkfd, p, ev);
		if (!ret) {
366
			*event_page_offset = KFD_MMAP_EVENTS_MASK;
367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412
			*event_page_offset <<= PAGE_SHIFT;
			*event_slot_index = ev->signal_slot_index;
		}
		break;
	default:
		ret = create_other_event(p, ev);
		break;
	}

	if (!ret) {
		hash_add(p->events, &ev->events, ev->event_id);

		*event_id = ev->event_id;
		*event_trigger_data = ev->event_id;
	} else {
		kfree(ev);
	}

	mutex_unlock(&p->event_mutex);

	return ret;
}

/* Assumes that p is current. */
int kfd_event_destroy(struct kfd_process *p, uint32_t event_id)
{
	struct kfd_event *ev;
	int ret = 0;

	mutex_lock(&p->event_mutex);

	ev = lookup_event_by_id(p, event_id);

	if (ev)
		destroy_event(p, ev);
	else
		ret = -EINVAL;

	mutex_unlock(&p->event_mutex);
	return ret;
}

static void set_event(struct kfd_event *ev)
{
	struct kfd_event_waiter *waiter;

413 414 415 416 417 418
	/* Auto reset if the list is non-empty and we're waking
	 * someone. waitqueue_active is safe here because we're
	 * protected by the p->event_mutex, which is also held when
	 * updating the wait queues in kfd_wait_on_events.
	 */
	ev->signaled = !ev->auto_reset || !waitqueue_active(&ev->wq);
419

420
	list_for_each_entry(waiter, &ev->wq.head, wait.entry)
421 422
		waiter->activated = true;

423
	wake_up_all(&ev->wq);
424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471
}

/* Assumes that p is current. */
int kfd_set_event(struct kfd_process *p, uint32_t event_id)
{
	int ret = 0;
	struct kfd_event *ev;

	mutex_lock(&p->event_mutex);

	ev = lookup_event_by_id(p, event_id);

	if (ev && event_can_be_cpu_signaled(ev))
		set_event(ev);
	else
		ret = -EINVAL;

	mutex_unlock(&p->event_mutex);
	return ret;
}

static void reset_event(struct kfd_event *ev)
{
	ev->signaled = false;
}

/* Assumes that p is current. */
int kfd_reset_event(struct kfd_process *p, uint32_t event_id)
{
	int ret = 0;
	struct kfd_event *ev;

	mutex_lock(&p->event_mutex);

	ev = lookup_event_by_id(p, event_id);

	if (ev && event_can_be_cpu_signaled(ev))
		reset_event(ev);
	else
		ret = -EINVAL;

	mutex_unlock(&p->event_mutex);
	return ret;

}

static void acknowledge_signal(struct kfd_process *p, struct kfd_event *ev)
{
472
	page_slots(p->signal_page)[ev->signal_slot_index] =
473 474 475
						UNSIGNALED_EVENT_SLOT;
}

476
static bool is_slot_signaled(struct kfd_process *p, unsigned int index)
477
{
478 479 480 481 482
	if (!p->signal_page)
		return false;
	else
		return page_slots(p->signal_page)[index] !=
			UNSIGNALED_EVENT_SLOT;
483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514
}

static void set_event_from_interrupt(struct kfd_process *p,
					struct kfd_event *ev)
{
	if (ev && event_can_be_gpu_signaled(ev)) {
		acknowledge_signal(p, ev);
		set_event(ev);
	}
}

void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
				uint32_t valid_id_bits)
{
	struct kfd_event *ev;

	/*
	 * Because we are called from arbitrary context (workqueue) as opposed
	 * to process context, kfd_process could attempt to exit while we are
	 * running so the lookup function returns a locked process.
	 */
	struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);

	if (!p)
		return; /* Presumably process exited. */

	mutex_lock(&p->event_mutex);

	if (valid_id_bits >= INTERRUPT_DATA_BITS) {
		/* Partial ID is a full ID. */
		ev = lookup_event_by_id(p, partial_id);
		set_event_from_interrupt(p, ev);
515
	} else if (p->signal_page) {
516 517 518 519 520
		/*
		 * Partial ID is in fact partial. For now we completely
		 * ignore it, but we could use any bits we did receive to
		 * search faster.
		 */
521
		unsigned int i;
522

523 524 525 526 527
		for (i = 0; i < SLOTS_PER_PAGE; i++)
			if (is_slot_signaled(p, i)) {
				ev = lookup_event_by_page_slot(p, i);
				set_event_from_interrupt(p, ev);
			}
528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543
	}

	mutex_unlock(&p->event_mutex);
	mutex_unlock(&p->mutex);
}

static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
{
	struct kfd_event_waiter *event_waiters;
	uint32_t i;

	event_waiters = kmalloc_array(num_events,
					sizeof(struct kfd_event_waiter),
					GFP_KERNEL);

	for (i = 0; (event_waiters) && (i < num_events) ; i++) {
544
		init_wait(&event_waiters[i].wait);
545 546 547 548 549 550
		event_waiters[i].activated = false;
	}

	return event_waiters;
}

551
static int init_event_waiter_get_status(struct kfd_process *p,
552
		struct kfd_event_waiter *waiter,
553
		uint32_t event_id)
554 555 556 557 558 559
{
	struct kfd_event *ev = lookup_event_by_id(p, event_id);

	if (!ev)
		return -EINVAL;

560
	waiter->event = ev;
561 562 563 564 565 566
	waiter->activated = ev->signaled;
	ev->signaled = ev->signaled && !ev->auto_reset;

	return 0;
}

567 568 569 570 571 572 573 574
static void init_event_waiter_add_to_waitlist(struct kfd_event_waiter *waiter)
{
	struct kfd_event *ev = waiter->event;

	/* Only add to the wait list if we actually need to
	 * wait on this event.
	 */
	if (!waiter->activated)
575
		add_wait_queue(&ev->wq, &waiter->wait);
576 577
}

578 579 580 581 582 583 584 585 586 587 588
/* test_event_condition - Test condition of events being waited for
 * @all:           Return completion only if all events have signaled
 * @num_events:    Number of events to wait for
 * @event_waiters: Array of event waiters, one per event
 *
 * Returns KFD_IOC_WAIT_RESULT_COMPLETE if all (or one) event(s) have
 * signaled. Returns KFD_IOC_WAIT_RESULT_TIMEOUT if no (or not all)
 * events have signaled. Returns KFD_IOC_WAIT_RESULT_FAIL if any of
 * the events have been destroyed.
 */
static uint32_t test_event_condition(bool all, uint32_t num_events,
589 590 591 592 593 594
				struct kfd_event_waiter *event_waiters)
{
	uint32_t i;
	uint32_t activated_count = 0;

	for (i = 0; i < num_events; i++) {
595 596 597
		if (!event_waiters[i].event)
			return KFD_IOC_WAIT_RESULT_FAIL;

598 599
		if (event_waiters[i].activated) {
			if (!all)
600
				return KFD_IOC_WAIT_RESULT_COMPLETE;
601 602 603 604 605

			activated_count++;
		}
	}

606 607
	return activated_count == num_events ?
		KFD_IOC_WAIT_RESULT_COMPLETE : KFD_IOC_WAIT_RESULT_TIMEOUT;
608 609
}

610 611 612 613
/*
 * Copy event specific data, if defined.
 * Currently only memory exception events have additional data to copy to user
 */
614
static int copy_signaled_event_data(uint32_t num_events,
615 616 617 618 619 620 621 622 623 624 625 626 627
		struct kfd_event_waiter *event_waiters,
		struct kfd_event_data __user *data)
{
	struct kfd_hsa_memory_exception_data *src;
	struct kfd_hsa_memory_exception_data __user *dst;
	struct kfd_event_waiter *waiter;
	struct kfd_event *event;
	uint32_t i;

	for (i = 0; i < num_events; i++) {
		waiter = &event_waiters[i];
		event = waiter->event;
		if (waiter->activated && event->type == KFD_EVENT_TYPE_MEMORY) {
628
			dst = &data[i].memory_exception_data;
629 630 631
			src = &event->memory_exception_data;
			if (copy_to_user(dst, src,
				sizeof(struct kfd_hsa_memory_exception_data)))
632
				return -EFAULT;
633 634 635
		}
	}

636
	return 0;
637 638 639 640 641

}



642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664
static long user_timeout_to_jiffies(uint32_t user_timeout_ms)
{
	if (user_timeout_ms == KFD_EVENT_TIMEOUT_IMMEDIATE)
		return 0;

	if (user_timeout_ms == KFD_EVENT_TIMEOUT_INFINITE)
		return MAX_SCHEDULE_TIMEOUT;

	/*
	 * msecs_to_jiffies interprets all values above 2^31-1 as infinite,
	 * but we consider them finite.
	 * This hack is wrong, but nobody is likely to notice.
	 */
	user_timeout_ms = min_t(uint32_t, user_timeout_ms, 0x7FFFFFFF);

	return msecs_to_jiffies(user_timeout_ms) + 1;
}

static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
{
	uint32_t i;

	for (i = 0; i < num_events; i++)
665 666 667
		if (waiters[i].event)
			remove_wait_queue(&waiters[i].event->wq,
					  &waiters[i].wait);
668 669 670 671 672

	kfree(waiters);
}

int kfd_wait_on_events(struct kfd_process *p,
673
		       uint32_t num_events, void __user *data,
674
		       bool all, uint32_t user_timeout_ms,
675
		       uint32_t *wait_result)
676
{
677 678
	struct kfd_event_data __user *events =
			(struct kfd_event_data __user *) data;
679 680
	uint32_t i;
	int ret = 0;
681

682 683 684
	struct kfd_event_waiter *event_waiters = NULL;
	long timeout = user_timeout_to_jiffies(user_timeout_ms);

685 686 687 688 689 690
	event_waiters = alloc_event_waiters(num_events);
	if (!event_waiters) {
		ret = -ENOMEM;
		goto out;
	}

691 692 693
	mutex_lock(&p->event_mutex);

	for (i = 0; i < num_events; i++) {
694
		struct kfd_event_data event_data;
695

696
		if (copy_from_user(&event_data, &events[i],
697 698
				sizeof(struct kfd_event_data))) {
			ret = -EFAULT;
699
			goto out_unlock;
700
		}
701

702
		ret = init_event_waiter_get_status(p, &event_waiters[i],
703
				event_data.event_id);
704
		if (ret)
705
			goto out_unlock;
706 707
	}

708
	/* Check condition once. */
709 710
	*wait_result = test_event_condition(all, num_events, event_waiters);
	if (*wait_result == KFD_IOC_WAIT_RESULT_COMPLETE) {
711 712 713
		ret = copy_signaled_event_data(num_events,
					       event_waiters, events);
		goto out_unlock;
714 715 716 717 718
	} else if (WARN_ON(*wait_result == KFD_IOC_WAIT_RESULT_FAIL)) {
		/* This should not happen. Events shouldn't be
		 * destroyed while we're holding the event_mutex
		 */
		goto out_unlock;
719 720
	}

721 722 723 724
	/* Add to wait lists if we need to wait. */
	for (i = 0; i < num_events; i++)
		init_event_waiter_add_to_waitlist(&event_waiters[i]);

725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745
	mutex_unlock(&p->event_mutex);

	while (true) {
		if (fatal_signal_pending(current)) {
			ret = -EINTR;
			break;
		}

		if (signal_pending(current)) {
			/*
			 * This is wrong when a nonzero, non-infinite timeout
			 * is specified. We need to use
			 * ERESTARTSYS_RESTARTBLOCK, but struct restart_block
			 * contains a union with data for each user and it's
			 * in generic kernel code that I don't want to
			 * touch yet.
			 */
			ret = -ERESTARTSYS;
			break;
		}

746 747 748 749 750 751 752 753 754 755 756
		/* Set task state to interruptible sleep before
		 * checking wake-up conditions. A concurrent wake-up
		 * will put the task back into runnable state. In that
		 * case schedule_timeout will not put the task to
		 * sleep and we'll get a chance to re-check the
		 * updated conditions almost immediately. Otherwise,
		 * this race condition would lead to a soft hang or a
		 * very long sleep.
		 */
		set_current_state(TASK_INTERRUPTIBLE);

757 758 759
		*wait_result = test_event_condition(all, num_events,
						    event_waiters);
		if (*wait_result != KFD_IOC_WAIT_RESULT_TIMEOUT)
760 761
			break;

762
		if (timeout <= 0)
763 764
			break;

765
		timeout = schedule_timeout(timeout);
766 767 768
	}
	__set_current_state(TASK_RUNNING);

769 770 771 772 773 774 775
	/* copy_signaled_event_data may sleep. So this has to happen
	 * after the task state is set back to RUNNING.
	 */
	if (!ret && *wait_result == KFD_IOC_WAIT_RESULT_COMPLETE)
		ret = copy_signaled_event_data(num_events,
					       event_waiters, events);

776
	mutex_lock(&p->event_mutex);
777
out_unlock:
778 779
	free_waiters(num_events, event_waiters);
	mutex_unlock(&p->event_mutex);
780 781 782
out:
	if (ret)
		*wait_result = KFD_IOC_WAIT_RESULT_FAIL;
783 784
	else if (*wait_result == KFD_IOC_WAIT_RESULT_FAIL)
		ret = -EIO;
785 786 787 788 789 790 791 792

	return ret;
}

int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
{

	unsigned long pfn;
793
	struct kfd_signal_page *page;
794 795 796 797

	/* check required size is logical */
	if (get_order(KFD_SIGNAL_EVENT_LIMIT * 8) !=
			get_order(vma->vm_end - vma->vm_start)) {
798
		pr_err("Event page mmap requested illegal size\n");
799 800 801
		return -EINVAL;
	}

802
	page = p->signal_page;
803 804
	if (!page) {
		/* Probably KFD bug, but mmap is user-accessible. */
805
		pr_debug("Signal page could not be found\n");
806 807 808 809 810 811 812 813 814
		return -EINVAL;
	}

	pfn = __pa(page->kernel_address);
	pfn >>= PAGE_SHIFT;

	vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE
		       | VM_DONTDUMP | VM_PFNMAP;

815
	pr_debug("Mapping signal page\n");
816 817 818 819 820 821 822 823 824 825 826 827 828
	pr_debug("     start user address  == 0x%08lx\n", vma->vm_start);
	pr_debug("     end user address    == 0x%08lx\n", vma->vm_end);
	pr_debug("     pfn                 == 0x%016lX\n", pfn);
	pr_debug("     vm_flags            == 0x%08lX\n", vma->vm_flags);
	pr_debug("     size                == 0x%08lX\n",
			vma->vm_end - vma->vm_start);

	page->user_address = (uint64_t __user *)vma->vm_start;

	/* mapping the page to user process */
	return remap_pfn_range(vma, vma->vm_start, pfn,
			vma->vm_end - vma->vm_start, vma->vm_page_prot);
}
829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856

/*
 * Assumes that p->event_mutex is held and of course
 * that p is not going away (current or locked).
 */
static void lookup_events_by_type_and_signal(struct kfd_process *p,
		int type, void *event_data)
{
	struct kfd_hsa_memory_exception_data *ev_data;
	struct kfd_event *ev;
	int bkt;
	bool send_signal = true;

	ev_data = (struct kfd_hsa_memory_exception_data *) event_data;

	hash_for_each(p->events, bkt, ev, events)
		if (ev->type == type) {
			send_signal = false;
			dev_dbg(kfd_device,
					"Event found: id %X type %d",
					ev->event_id, ev->type);
			set_event(ev);
			if (ev->type == KFD_EVENT_TYPE_MEMORY && ev_data)
				ev->memory_exception_data = *ev_data;
		}

	/* Send SIGTERM no event of type "type" has been found*/
	if (send_signal) {
857 858 859 860 861 862 863 864
		if (send_sigterm) {
			dev_warn(kfd_device,
				"Sending SIGTERM to HSA Process with PID %d ",
					p->lead_thread->pid);
			send_sig(SIGTERM, p->lead_thread, 0);
		} else {
			dev_err(kfd_device,
				"HSA Process (PID %d) got unhandled exception",
865
				p->lead_thread->pid);
866
		}
867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882
	}
}

void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
		unsigned long address, bool is_write_requested,
		bool is_execute_requested)
{
	struct kfd_hsa_memory_exception_data memory_exception_data;
	struct vm_area_struct *vma;

	/*
	 * Because we are called from arbitrary context (workqueue) as opposed
	 * to process context, kfd_process could attempt to exit while we are
	 * running so the lookup function returns a locked process.
	 */
	struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
883
	struct mm_struct *mm;
884 885 886 887

	if (!p)
		return; /* Presumably process exited. */

888 889 890 891 892 893 894 895 896
	/* Take a safe reference to the mm_struct, which may otherwise
	 * disappear even while the kfd_process is still referenced.
	 */
	mm = get_task_mm(p->lead_thread);
	if (!mm) {
		mutex_unlock(&p->mutex);
		return; /* Process is exiting */
	}

897 898
	memset(&memory_exception_data, 0, sizeof(memory_exception_data));

899 900
	down_read(&mm->mmap_sem);
	vma = find_vma(mm, address);
901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925

	memory_exception_data.gpu_id = dev->id;
	memory_exception_data.va = address;
	/* Set failure reason */
	memory_exception_data.failure.NotPresent = 1;
	memory_exception_data.failure.NoExecute = 0;
	memory_exception_data.failure.ReadOnly = 0;
	if (vma) {
		if (vma->vm_start > address) {
			memory_exception_data.failure.NotPresent = 1;
			memory_exception_data.failure.NoExecute = 0;
			memory_exception_data.failure.ReadOnly = 0;
		} else {
			memory_exception_data.failure.NotPresent = 0;
			if (is_write_requested && !(vma->vm_flags & VM_WRITE))
				memory_exception_data.failure.ReadOnly = 1;
			else
				memory_exception_data.failure.ReadOnly = 0;
			if (is_execute_requested && !(vma->vm_flags & VM_EXEC))
				memory_exception_data.failure.NoExecute = 1;
			else
				memory_exception_data.failure.NoExecute = 0;
		}
	}

926 927
	up_read(&mm->mmap_sem);
	mmput(mm);
928 929 930 931 932 933 934 935 936 937

	mutex_lock(&p->event_mutex);

	/* Lookup events by type and signal them */
	lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_MEMORY,
			&memory_exception_data);

	mutex_unlock(&p->event_mutex);
	mutex_unlock(&p->mutex);
}
938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958

void kfd_signal_hw_exception_event(unsigned int pasid)
{
	/*
	 * Because we are called from arbitrary context (workqueue) as opposed
	 * to process context, kfd_process could attempt to exit while we are
	 * running so the lookup function returns a locked process.
	 */
	struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);

	if (!p)
		return; /* Presumably process exited. */

	mutex_lock(&p->event_mutex);

	/* Lookup events by type and signal them */
	lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_HW_EXCEPTION, NULL);

	mutex_unlock(&p->event_mutex);
	mutex_unlock(&p->mutex);
}