kfd_process.c 28.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * Copyright 2014 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 */

#include <linux/mutex.h>
#include <linux/log2.h>
#include <linux/sched.h>
26
#include <linux/sched/mm.h>
27
#include <linux/sched/task.h>
28
#include <linux/slab.h>
29
#include <linux/amd-iommu.h>
30
#include <linux/notifier.h>
31
#include <linux/compat.h>
F
Felix Kuehling 已提交
32
#include <linux/mman.h>
33
#include <linux/file.h>
34

35 36 37
struct mm_struct;

#include "kfd_priv.h"
38
#include "kfd_device_queue_manager.h"
39
#include "kfd_dbgmgr.h"
40
#include "kfd_iommu.h"
41 42 43 44 45

/*
 * List of struct kfd_process (field kfd_process).
 * Unique/indexed by mm_struct*
 */
46
DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
47 48
static DEFINE_MUTEX(kfd_processes_mutex);

49
DEFINE_SRCU(kfd_processes_srcu);
50

51
/* For process termination handling */
52 53
static struct workqueue_struct *kfd_process_wq;

54 55 56 57 58 59 60 61
/* Ordered, single-threaded workqueue for restoring evicted
 * processes. Restoring multiple processes concurrently under memory
 * pressure can lead to processes blocking each other from validating
 * their BOs and result in a live-lock situation where processes
 * remain evicted indefinitely.
 */
static struct workqueue_struct *kfd_restore_wq;

62
static struct kfd_process *find_process(const struct task_struct *thread);
63
static void kfd_process_ref_release(struct kref *ref);
64 65
static struct kfd_process *create_process(const struct task_struct *thread,
					struct file *filep);
F
Felix Kuehling 已提交
66

67 68 69
static void evict_process_worker(struct work_struct *work);
static void restore_process_worker(struct work_struct *work);

70

71
int kfd_process_create_wq(void)
72 73
{
	if (!kfd_process_wq)
74
		kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0);
75 76 77 78 79 80 81 82 83
	if (!kfd_restore_wq)
		kfd_restore_wq = alloc_ordered_workqueue("kfd_restore_wq", 0);

	if (!kfd_process_wq || !kfd_restore_wq) {
		kfd_process_destroy_wq();
		return -ENOMEM;
	}

	return 0;
84 85 86 87 88 89 90 91
}

void kfd_process_destroy_wq(void)
{
	if (kfd_process_wq) {
		destroy_workqueue(kfd_process_wq);
		kfd_process_wq = NULL;
	}
92 93 94 95
	if (kfd_restore_wq) {
		destroy_workqueue(kfd_restore_wq);
		kfd_restore_wq = NULL;
	}
96 97
}

98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173
static void kfd_process_free_gpuvm(struct kgd_mem *mem,
			struct kfd_process_device *pdd)
{
	struct kfd_dev *dev = pdd->dev;

	dev->kfd2kgd->unmap_memory_to_gpu(dev->kgd, mem, pdd->vm);
	dev->kfd2kgd->free_memory_of_gpu(dev->kgd, mem);
}

/* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process
 *	This function should be only called right after the process
 *	is created and when kfd_processes_mutex is still being held
 *	to avoid concurrency. Because of that exclusiveness, we do
 *	not need to take p->mutex.
 */
static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
				   uint64_t gpu_va, uint32_t size,
				   uint32_t flags, void **kptr)
{
	struct kfd_dev *kdev = pdd->dev;
	struct kgd_mem *mem = NULL;
	int handle;
	int err;

	err = kdev->kfd2kgd->alloc_memory_of_gpu(kdev->kgd, gpu_va, size,
						 pdd->vm, &mem, NULL, flags);
	if (err)
		goto err_alloc_mem;

	err = kdev->kfd2kgd->map_memory_to_gpu(kdev->kgd, mem, pdd->vm);
	if (err)
		goto err_map_mem;

	err = kdev->kfd2kgd->sync_memory(kdev->kgd, mem, true);
	if (err) {
		pr_debug("Sync memory failed, wait interrupted by user signal\n");
		goto sync_memory_failed;
	}

	/* Create an obj handle so kfd_process_device_remove_obj_handle
	 * will take care of the bo removal when the process finishes.
	 * We do not need to take p->mutex, because the process is just
	 * created and the ioctls have not had the chance to run.
	 */
	handle = kfd_process_device_create_obj_handle(pdd, mem);

	if (handle < 0) {
		err = handle;
		goto free_gpuvm;
	}

	if (kptr) {
		err = kdev->kfd2kgd->map_gtt_bo_to_kernel(kdev->kgd,
				(struct kgd_mem *)mem, kptr, NULL);
		if (err) {
			pr_debug("Map GTT BO to kernel failed\n");
			goto free_obj_handle;
		}
	}

	return err;

free_obj_handle:
	kfd_process_device_remove_obj_handle(pdd, handle);
free_gpuvm:
sync_memory_failed:
	kfd_process_free_gpuvm(mem, pdd);
	return err;

err_map_mem:
	kdev->kfd2kgd->free_memory_of_gpu(kdev->kgd, mem);
err_alloc_mem:
	*kptr = NULL;
	return err;
}

174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203
/* kfd_process_device_reserve_ib_mem - Reserve memory inside the
 *	process for IB usage The memory reserved is for KFD to submit
 *	IB to AMDGPU from kernel.  If the memory is reserved
 *	successfully, ib_kaddr will have the CPU/kernel
 *	address. Check ib_kaddr before accessing the memory.
 */
static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
{
	struct qcm_process_device *qpd = &pdd->qpd;
	uint32_t flags = ALLOC_MEM_FLAGS_GTT |
			 ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
			 ALLOC_MEM_FLAGS_WRITABLE |
			 ALLOC_MEM_FLAGS_EXECUTABLE;
	void *kaddr;
	int ret;

	if (qpd->ib_kaddr || !qpd->ib_base)
		return 0;

	/* ib_base is only set for dGPU */
	ret = kfd_process_alloc_gpuvm(pdd, qpd->ib_base, PAGE_SIZE, flags,
				      &kaddr);
	if (ret)
		return ret;

	qpd->ib_kaddr = kaddr;

	return 0;
}

F
Felix Kuehling 已提交
204
struct kfd_process *kfd_create_process(struct file *filep)
205 206
{
	struct kfd_process *process;
F
Felix Kuehling 已提交
207
	struct task_struct *thread = current;
208

209
	if (!thread->mm)
210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
		return ERR_PTR(-EINVAL);

	/* Only the pthreads threading model is supported. */
	if (thread->group_leader->mm != thread->mm)
		return ERR_PTR(-EINVAL);

	/*
	 * take kfd processes mutex before starting of process creation
	 * so there won't be a case where two threads of the same process
	 * create two kfd_process structures
	 */
	mutex_lock(&kfd_processes_mutex);

	/* A prior open of /dev/kfd could have already created the process. */
	process = find_process(thread);
	if (process)
226
		pr_debug("Process already found\n");
227 228
	else
		process = create_process(thread, filep);
229 230 231 232 233 234 235 236 237 238

	mutex_unlock(&kfd_processes_mutex);

	return process;
}

struct kfd_process *kfd_get_process(const struct task_struct *thread)
{
	struct kfd_process *process;

239
	if (!thread->mm)
240 241 242 243 244 245 246
		return ERR_PTR(-EINVAL);

	/* Only the pthreads threading model is supported. */
	if (thread->group_leader->mm != thread->mm)
		return ERR_PTR(-EINVAL);

	process = find_process(thread);
247 248
	if (!process)
		return ERR_PTR(-EINVAL);
249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276

	return process;
}

static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
{
	struct kfd_process *process;

	hash_for_each_possible_rcu(kfd_processes_table, process,
					kfd_processes, (uintptr_t)mm)
		if (process->mm == mm)
			return process;

	return NULL;
}

static struct kfd_process *find_process(const struct task_struct *thread)
{
	struct kfd_process *p;
	int idx;

	idx = srcu_read_lock(&kfd_processes_srcu);
	p = find_process_by_mm(thread->mm);
	srcu_read_unlock(&kfd_processes_srcu, idx);

	return p;
}

277 278 279 280 281
void kfd_unref_process(struct kfd_process *p)
{
	kref_put(&p->ref, kfd_process_ref_release);
}

282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315
static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
{
	struct kfd_process *p = pdd->process;
	void *mem;
	int id;

	/*
	 * Remove all handles from idr and release appropriate
	 * local memory object
	 */
	idr_for_each_entry(&pdd->alloc_idr, mem, id) {
		struct kfd_process_device *peer_pdd;

		list_for_each_entry(peer_pdd, &p->per_device_data,
				    per_device_list) {
			if (!peer_pdd->vm)
				continue;
			peer_pdd->dev->kfd2kgd->unmap_memory_to_gpu(
				peer_pdd->dev->kgd, mem, peer_pdd->vm);
		}

		pdd->dev->kfd2kgd->free_memory_of_gpu(pdd->dev->kgd, mem);
		kfd_process_device_remove_obj_handle(pdd, id);
	}
}

static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p)
{
	struct kfd_process_device *pdd;

	list_for_each_entry(pdd, &p->per_device_data, per_device_list)
		kfd_process_device_free_bos(pdd);
}

316
static void kfd_process_destroy_pdds(struct kfd_process *p)
317 318 319 320
{
	struct kfd_process_device *pdd, *temp;

	list_for_each_entry_safe(pdd, temp, &p->per_device_data,
321 322
				 per_device_list) {
		pr_debug("Releasing pdd (topology id %d) for process (pasid %d)\n",
323 324
				pdd->dev->id, p->pasid);

325 326 327
		if (pdd->drm_file)
			fput(pdd->drm_file);
		else if (pdd->vm)
328 329 330
			pdd->dev->kfd2kgd->destroy_process_vm(
				pdd->dev->kgd, pdd->vm);

331
		list_del(&pdd->per_device_list);
F
Felix Kuehling 已提交
332

333
		if (pdd->qpd.cwsr_kaddr && !pdd->qpd.cwsr_base)
F
Felix Kuehling 已提交
334 335 336
			free_pages((unsigned long)pdd->qpd.cwsr_kaddr,
				get_order(KFD_CWSR_TBA_TMA_SIZE));

337
		kfree(pdd->qpd.doorbell_bitmap);
338 339
		idr_destroy(&pdd->alloc_idr);

340 341
		kfree(pdd);
	}
342 343 344 345 346 347 348 349 350 351 352 353
}

/* No process locking is needed in this function, because the process
 * is not findable any more. We must assume that no other thread is
 * using it any more, otherwise we couldn't safely free the process
 * structure in the end.
 */
static void kfd_process_wq_release(struct work_struct *work)
{
	struct kfd_process *p = container_of(work, struct kfd_process,
					     release_work);

354
	kfd_iommu_unbind_process(p);
355

356 357
	kfd_process_free_outstanding_kfd_bos(p);

358
	kfd_process_destroy_pdds(p);
359
	dma_fence_put(p->ef);
360

361 362
	kfd_event_free_process(p);

363
	kfd_pasid_free(p->pasid);
364
	kfd_free_process_doorbells(p);
365 366 367

	mutex_destroy(&p->mutex);

368 369
	put_task_struct(p->lead_thread);

370 371 372
	kfree(p);
}

373
static void kfd_process_ref_release(struct kref *ref)
374
{
375
	struct kfd_process *p = container_of(ref, struct kfd_process, ref);
376

377 378 379
	INIT_WORK(&p->release_work, kfd_process_wq_release);
	queue_work(kfd_process_wq, &p->release_work);
}
380

381 382 383
static void kfd_process_destroy_delayed(struct rcu_head *rcu)
{
	struct kfd_process *p = container_of(rcu, struct kfd_process, rcu);
384

385
	kfd_unref_process(p);
386 387 388 389 390 391
}

static void kfd_process_notifier_release(struct mmu_notifier *mn,
					struct mm_struct *mm)
{
	struct kfd_process *p;
392
	struct kfd_process_device *pdd = NULL;
393 394 395 396 397 398

	/*
	 * The kfd_process structure can not be free because the
	 * mmu_notifier srcu is read locked
	 */
	p = container_of(mn, struct kfd_process, mmu_notifier);
399 400
	if (WARN_ON(p->mm != mm))
		return;
401 402 403 404 405 406

	mutex_lock(&kfd_processes_mutex);
	hash_del_rcu(&p->kfd_processes);
	mutex_unlock(&kfd_processes_mutex);
	synchronize_srcu(&kfd_processes_srcu);

407 408 409
	cancel_delayed_work_sync(&p->eviction_work);
	cancel_delayed_work_sync(&p->restore_work);

410 411
	mutex_lock(&p->mutex);

412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428
	/* Iterate over all process device data structures and if the
	 * pdd is in debug mode, we should first force unregistration,
	 * then we will be able to destroy the queues
	 */
	list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
		struct kfd_dev *dev = pdd->dev;

		mutex_lock(kfd_get_dbgmgr_mutex());
		if (dev && dev->dbgmgr && dev->dbgmgr->pasid == p->pasid) {
			if (!kfd_dbgmgr_unregister(dev->dbgmgr, p)) {
				kfd_dbgmgr_destroy(dev->dbgmgr);
				dev->dbgmgr = NULL;
			}
		}
		mutex_unlock(kfd_get_dbgmgr_mutex());
	}

429
	kfd_process_dequeue_from_all_devices(p);
430 431
	pqm_uninit(&p->pqm);

432 433 434
	/* Indicate to other users that MM is no longer valid */
	p->mm = NULL;

435 436
	mutex_unlock(&p->mutex);

437
	mmu_notifier_unregister_no_release(&p->mmu_notifier, mm);
438 439 440 441 442 443 444
	mmu_notifier_call_srcu(&p->rcu, &kfd_process_destroy_delayed);
}

static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
	.release = kfd_process_notifier_release,
};

445
static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep)
F
Felix Kuehling 已提交
446 447
{
	unsigned long  offset;
448
	struct kfd_process_device *pdd;
F
Felix Kuehling 已提交
449

450
	list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
451 452 453 454
		struct kfd_dev *dev = pdd->dev;
		struct qcm_process_device *qpd = &pdd->qpd;

		if (!dev->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base)
F
Felix Kuehling 已提交
455
			continue;
456

457 458
		offset = (KFD_MMAP_TYPE_RESERVED_MEM | KFD_MMAP_GPU_ID(dev->id))
			<< PAGE_SHIFT;
F
Felix Kuehling 已提交
459 460 461 462 463
		qpd->tba_addr = (int64_t)vm_mmap(filep, 0,
			KFD_CWSR_TBA_TMA_SIZE, PROT_READ | PROT_EXEC,
			MAP_SHARED, offset);

		if (IS_ERR_VALUE(qpd->tba_addr)) {
464 465 466
			int err = qpd->tba_addr;

			pr_err("Failure to set tba address. error %d.\n", err);
F
Felix Kuehling 已提交
467 468
			qpd->tba_addr = 0;
			qpd->cwsr_kaddr = NULL;
469
			return err;
F
Felix Kuehling 已提交
470 471 472 473 474 475 476 477
		}

		memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);

		qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
		pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
			qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);
	}
478 479

	return 0;
F
Felix Kuehling 已提交
480 481
}

482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511
static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
{
	struct kfd_dev *dev = pdd->dev;
	struct qcm_process_device *qpd = &pdd->qpd;
	uint32_t flags = ALLOC_MEM_FLAGS_GTT |
		ALLOC_MEM_FLAGS_NO_SUBSTITUTE | ALLOC_MEM_FLAGS_EXECUTABLE;
	void *kaddr;
	int ret;

	if (!dev->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base)
		return 0;

	/* cwsr_base is only set for dGPU */
	ret = kfd_process_alloc_gpuvm(pdd, qpd->cwsr_base,
				      KFD_CWSR_TBA_TMA_SIZE, flags, &kaddr);
	if (ret)
		return ret;

	qpd->cwsr_kaddr = kaddr;
	qpd->tba_addr = qpd->cwsr_base;

	memcpy(qpd->cwsr_kaddr, dev->cwsr_isa, dev->cwsr_isa_size);

	qpd->tma_addr = qpd->tba_addr + KFD_CWSR_TMA_OFFSET;
	pr_debug("set tba :0x%llx, tma:0x%llx, cwsr_kaddr:%p for pqm.\n",
		 qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr);

	return 0;
}

512 513
static struct kfd_process *create_process(const struct task_struct *thread,
					struct file *filep)
514 515 516 517 518 519 520 521 522 523 524 525 526
{
	struct kfd_process *process;
	int err = -ENOMEM;

	process = kzalloc(sizeof(*process), GFP_KERNEL);

	if (!process)
		goto err_alloc_process;

	process->pasid = kfd_pasid_alloc();
	if (process->pasid == 0)
		goto err_alloc_pasid;

527 528 529
	if (kfd_alloc_process_doorbells(process) < 0)
		goto err_alloc_doorbells;

530 531
	kref_init(&process->ref);

532 533 534 535 536 537
	mutex_init(&process->mutex);

	process->mm = thread->mm;

	/* register notifier */
	process->mmu_notifier.ops = &kfd_process_mmu_notifier_ops;
538
	err = mmu_notifier_register(&process->mmu_notifier, process->mm);
539 540 541 542 543 544 545
	if (err)
		goto err_mmu_notifier;

	hash_add_rcu(kfd_processes_table, &process->kfd_processes,
			(uintptr_t)process->mm);

	process->lead_thread = thread->group_leader;
546
	get_task_struct(process->lead_thread);
547 548 549

	INIT_LIST_HEAD(&process->per_device_data);

550 551
	kfd_event_init_process(process);

552 553 554 555
	err = pqm_init(&process->pqm, process);
	if (err != 0)
		goto err_process_pqm_init;

556
	/* init process apertures*/
557
	process->is_32bit_user_mode = in_compat_syscall();
558 559
	err = kfd_init_apertures(process);
	if (err != 0)
560
		goto err_init_apertures;
561

562 563 564 565
	INIT_DELAYED_WORK(&process->eviction_work, evict_process_worker);
	INIT_DELAYED_WORK(&process->restore_work, restore_process_worker);
	process->last_restore_timestamp = get_jiffies_64();

566
	err = kfd_process_init_cwsr_apu(process, filep);
567 568 569
	if (err)
		goto err_init_cwsr;

570 571
	return process;

572
err_init_cwsr:
573
	kfd_process_free_outstanding_kfd_bos(process);
574
	kfd_process_destroy_pdds(process);
575
err_init_apertures:
576
	pqm_uninit(&process->pqm);
577 578 579 580
err_process_pqm_init:
	hash_del_rcu(&process->kfd_processes);
	synchronize_rcu();
	mmu_notifier_unregister_no_release(&process->mmu_notifier, process->mm);
581
err_mmu_notifier:
582
	mutex_destroy(&process->mutex);
583 584
	kfd_free_process_doorbells(process);
err_alloc_doorbells:
585 586 587 588 589 590 591
	kfd_pasid_free(process->pasid);
err_alloc_pasid:
	kfree(process);
err_alloc_process:
	return ERR_PTR(err);
}

592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616
static int init_doorbell_bitmap(struct qcm_process_device *qpd,
			struct kfd_dev *dev)
{
	unsigned int i;

	if (!KFD_IS_SOC15(dev->device_info->asic_family))
		return 0;

	qpd->doorbell_bitmap =
		kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
				     BITS_PER_BYTE), GFP_KERNEL);
	if (!qpd->doorbell_bitmap)
		return -ENOMEM;

	/* Mask out any reserved doorbells */
	for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS; i++)
		if ((dev->shared_resources.reserved_doorbell_mask & i) ==
		    dev->shared_resources.reserved_doorbell_val) {
			set_bit(i, qpd->doorbell_bitmap);
			pr_debug("reserved doorbell 0x%03x\n", i);
		}

	return 0;
}

617
struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
618
							struct kfd_process *p)
619 620 621 622 623
{
	struct kfd_process_device *pdd = NULL;

	list_for_each_entry(pdd, &p->per_device_data, per_device_list)
		if (pdd->dev == dev)
624
			return pdd;
625

626
	return NULL;
627 628 629 630 631 632 633 634
}

struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
							struct kfd_process *p)
{
	struct kfd_process_device *pdd = NULL;

	pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
635 636 637
	if (!pdd)
		return NULL;

638 639 640 641 642 643
	if (init_doorbell_bitmap(&pdd->qpd, dev)) {
		pr_err("Failed to init doorbell for process\n");
		kfree(pdd);
		return NULL;
	}

644 645 646 647 648
	pdd->dev = dev;
	INIT_LIST_HEAD(&pdd->qpd.queues_list);
	INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
	pdd->qpd.dqm = dev->dqm;
	pdd->qpd.pqm = &p->pqm;
649
	pdd->qpd.evicted = 0;
650 651 652 653
	pdd->process = p;
	pdd->bound = PDD_UNBOUND;
	pdd->already_dequeued = false;
	list_add(&pdd->per_device_list, &p->per_device_data);
654

655 656 657
	/* Init idr used for memory handle translation */
	idr_init(&pdd->alloc_idr);

658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689
	return pdd;
}

/**
 * kfd_process_device_init_vm - Initialize a VM for a process-device
 *
 * @pdd: The process-device
 * @drm_file: Optional pointer to a DRM file descriptor
 *
 * If @drm_file is specified, it will be used to acquire the VM from
 * that file descriptor. If successful, the @pdd takes ownership of
 * the file descriptor.
 *
 * If @drm_file is NULL, a new VM is created.
 *
 * Returns 0 on success, -errno on failure.
 */
int kfd_process_device_init_vm(struct kfd_process_device *pdd,
			       struct file *drm_file)
{
	struct kfd_process *p;
	struct kfd_dev *dev;
	int ret;

	if (pdd->vm)
		return drm_file ? -EBUSY : 0;

	p = pdd->process;
	dev = pdd->dev;

	if (drm_file)
		ret = dev->kfd2kgd->acquire_process_vm(
690
			dev->kgd, drm_file, p->pasid,
691 692 693
			&pdd->vm, &p->kgd_process_info, &p->ef);
	else
		ret = dev->kfd2kgd->create_process_vm(
694
			dev->kgd, p->pasid, &pdd->vm, &p->kgd_process_info, &p->ef);
695
	if (ret) {
696
		pr_err("Failed to create process VM object\n");
697
		return ret;
698 699
	}

700 701 702
	ret = kfd_process_device_reserve_ib_mem(pdd);
	if (ret)
		goto err_reserve_ib_mem;
703 704 705 706
	ret = kfd_process_device_init_cwsr_dgpu(pdd);
	if (ret)
		goto err_init_cwsr;

707 708 709
	pdd->drm_file = drm_file;

	return 0;
710 711

err_init_cwsr:
712
err_reserve_ib_mem:
713 714 715 716 717 718
	kfd_process_device_free_bos(pdd);
	if (!drm_file)
		dev->kfd2kgd->destroy_process_vm(dev->kgd, pdd->vm);
	pdd->vm = NULL;

	return ret;
719 720 721 722 723 724 725 726 727 728 729 730
}

/*
 * Direct the IOMMU to bind the process (specifically the pasid->mm)
 * to the device.
 * Unbinding occurs when the process dies or the device is removed.
 *
 * Assumes that the process lock is held.
 */
struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
							struct kfd_process *p)
{
731
	struct kfd_process_device *pdd;
732
	int err;
733

734 735 736
	pdd = kfd_get_process_device_data(dev, p);
	if (!pdd) {
		pr_err("Process device data doesn't exist\n");
737
		return ERR_PTR(-ENOMEM);
738
	}
739

740 741
	err = kfd_iommu_bind_process_to_device(pdd);
	if (err)
742 743
		return ERR_PTR(err);

744 745 746 747
	err = kfd_process_device_init_vm(pdd, NULL);
	if (err)
		return ERR_PTR(err);

748 749 750
	return pdd;
}

751 752
struct kfd_process_device *kfd_get_first_process_device_data(
						struct kfd_process *p)
753 754 755 756 757 758
{
	return list_first_entry(&p->per_device_data,
				struct kfd_process_device,
				per_device_list);
}

759 760
struct kfd_process_device *kfd_get_next_process_device_data(
						struct kfd_process *p,
761 762 763 764 765 766 767 768 769 770 771
						struct kfd_process_device *pdd)
{
	if (list_is_last(&pdd->per_device_list, &p->per_device_data))
		return NULL;
	return list_next_entry(pdd, per_device_list);
}

bool kfd_has_process_device_data(struct kfd_process *p)
{
	return !(list_empty(&p->per_device_data));
}
772

773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803
/* Create specific handle mapped to mem from process local memory idr
 * Assumes that the process lock is held.
 */
int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd,
					void *mem)
{
	return idr_alloc(&pdd->alloc_idr, mem, 0, 0, GFP_KERNEL);
}

/* Translate specific handle from process local memory idr
 * Assumes that the process lock is held.
 */
void *kfd_process_device_translate_handle(struct kfd_process_device *pdd,
					int handle)
{
	if (handle < 0)
		return NULL;

	return idr_find(&pdd->alloc_idr, handle);
}

/* Remove specific handle from process local memory idr
 * Assumes that the process lock is held.
 */
void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd,
					int handle)
{
	if (handle >= 0)
		idr_remove(&pdd->alloc_idr, handle);
}

804
/* This increments the process->ref counter. */
805 806
struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid)
{
807
	struct kfd_process *p, *ret_p = NULL;
808 809 810 811 812 813
	unsigned int temp;

	int idx = srcu_read_lock(&kfd_processes_srcu);

	hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
		if (p->pasid == pasid) {
814
			kref_get(&p->ref);
815
			ret_p = p;
816 817 818 819 820 821
			break;
		}
	}

	srcu_read_unlock(&kfd_processes_srcu, idx);

822
	return ret_p;
823
}
F
Felix Kuehling 已提交
824

825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845
/* This increments the process->ref counter. */
struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm)
{
	struct kfd_process *p;

	int idx = srcu_read_lock(&kfd_processes_srcu);

	p = find_process_by_mm(mm);
	if (p)
		kref_get(&p->ref);

	srcu_read_unlock(&kfd_processes_srcu, idx);

	return p;
}

/* process_evict_queues - Evict all user queues of a process
 *
 * Eviction is reference-counted per process-device. This means multiple
 * evictions from different sources can be nested safely.
 */
846
int kfd_process_evict_queues(struct kfd_process *p)
847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881
{
	struct kfd_process_device *pdd;
	int r = 0;
	unsigned int n_evicted = 0;

	list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
		r = pdd->dev->dqm->ops.evict_process_queues(pdd->dev->dqm,
							    &pdd->qpd);
		if (r) {
			pr_err("Failed to evict process queues\n");
			goto fail;
		}
		n_evicted++;
	}

	return r;

fail:
	/* To keep state consistent, roll back partial eviction by
	 * restoring queues
	 */
	list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
		if (n_evicted == 0)
			break;
		if (pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
							      &pdd->qpd))
			pr_err("Failed to restore queues\n");

		n_evicted--;
	}

	return r;
}

/* process_restore_queues - Restore all user queues of a process */
882
int kfd_process_restore_queues(struct kfd_process *p)
883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923
{
	struct kfd_process_device *pdd;
	int r, ret = 0;

	list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
		r = pdd->dev->dqm->ops.restore_process_queues(pdd->dev->dqm,
							      &pdd->qpd);
		if (r) {
			pr_err("Failed to restore process queues\n");
			if (!ret)
				ret = r;
		}
	}

	return ret;
}

static void evict_process_worker(struct work_struct *work)
{
	int ret;
	struct kfd_process *p;
	struct delayed_work *dwork;

	dwork = to_delayed_work(work);

	/* Process termination destroys this worker thread. So during the
	 * lifetime of this thread, kfd_process p will be valid
	 */
	p = container_of(dwork, struct kfd_process, eviction_work);
	WARN_ONCE(p->last_eviction_seqno != p->ef->seqno,
		  "Eviction fence mismatch\n");

	/* Narrow window of overlap between restore and evict work
	 * item is possible. Once amdgpu_amdkfd_gpuvm_restore_process_bos
	 * unreserves KFD BOs, it is possible to evicted again. But
	 * restore has few more steps of finish. So lets wait for any
	 * previous restore work to complete
	 */
	flush_delayed_work(&p->restore_work);

	pr_debug("Started evicting pasid %d\n", p->pasid);
924
	ret = kfd_process_evict_queues(p);
925 926 927 928
	if (!ret) {
		dma_fence_signal(p->ef);
		dma_fence_put(p->ef);
		p->ef = NULL;
929
		queue_delayed_work(kfd_restore_wq, &p->restore_work,
930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977
				msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));

		pr_debug("Finished evicting pasid %d\n", p->pasid);
	} else
		pr_err("Failed to evict queues of pasid %d\n", p->pasid);
}

static void restore_process_worker(struct work_struct *work)
{
	struct delayed_work *dwork;
	struct kfd_process *p;
	struct kfd_process_device *pdd;
	int ret = 0;

	dwork = to_delayed_work(work);

	/* Process termination destroys this worker thread. So during the
	 * lifetime of this thread, kfd_process p will be valid
	 */
	p = container_of(dwork, struct kfd_process, restore_work);

	/* Call restore_process_bos on the first KGD device. This function
	 * takes care of restoring the whole process including other devices.
	 * Restore can fail if enough memory is not available. If so,
	 * reschedule again.
	 */
	pdd = list_first_entry(&p->per_device_data,
			       struct kfd_process_device,
			       per_device_list);

	pr_debug("Started restoring pasid %d\n", p->pasid);

	/* Setting last_restore_timestamp before successful restoration.
	 * Otherwise this would have to be set by KGD (restore_process_bos)
	 * before KFD BOs are unreserved. If not, the process can be evicted
	 * again before the timestamp is set.
	 * If restore fails, the timestamp will be set again in the next
	 * attempt. This would mean that the minimum GPU quanta would be
	 * PROCESS_ACTIVE_TIME_MS - (time to execute the following two
	 * functions)
	 */

	p->last_restore_timestamp = get_jiffies_64();
	ret = pdd->dev->kfd2kgd->restore_process_bos(p->kgd_process_info,
						     &p->ef);
	if (ret) {
		pr_debug("Failed to restore BOs of pasid %d, retry after %d ms\n",
			 p->pasid, PROCESS_BACK_OFF_TIME_MS);
978
		ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
979 980 981 982 983
				msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
		WARN(!ret, "reschedule restore work failed\n");
		return;
	}

984
	ret = kfd_process_restore_queues(p);
985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000
	if (!ret)
		pr_debug("Finished restoring pasid %d\n", p->pasid);
	else
		pr_err("Failed to restore queues of pasid %d\n", p->pasid);
}

void kfd_suspend_all_processes(void)
{
	struct kfd_process *p;
	unsigned int temp;
	int idx = srcu_read_lock(&kfd_processes_srcu);

	hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
		cancel_delayed_work_sync(&p->eviction_work);
		cancel_delayed_work_sync(&p->restore_work);

1001
		if (kfd_process_evict_queues(p))
1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016
			pr_err("Failed to suspend process %d\n", p->pasid);
		dma_fence_signal(p->ef);
		dma_fence_put(p->ef);
		p->ef = NULL;
	}
	srcu_read_unlock(&kfd_processes_srcu, idx);
}

int kfd_resume_all_processes(void)
{
	struct kfd_process *p;
	unsigned int temp;
	int ret = 0, idx = srcu_read_lock(&kfd_processes_srcu);

	hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
1017
		if (!queue_delayed_work(kfd_restore_wq, &p->restore_work, 0)) {
1018 1019 1020 1021 1022 1023 1024 1025 1026
			pr_err("Restore process %d failed during resume\n",
			       p->pasid);
			ret = -EFAULT;
		}
	}
	srcu_read_unlock(&kfd_processes_srcu, idx);
	return ret;
}

1027
int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
F
Felix Kuehling 已提交
1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
			  struct vm_area_struct *vma)
{
	struct kfd_process_device *pdd;
	struct qcm_process_device *qpd;

	if ((vma->vm_end - vma->vm_start) != KFD_CWSR_TBA_TMA_SIZE) {
		pr_err("Incorrect CWSR mapping size.\n");
		return -EINVAL;
	}

	pdd = kfd_get_process_device_data(dev, process);
	if (!pdd)
		return -EINVAL;
	qpd = &pdd->qpd;

	qpd->cwsr_kaddr = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
					get_order(KFD_CWSR_TBA_TMA_SIZE));
	if (!qpd->cwsr_kaddr) {
		pr_err("Error allocating per process CWSR buffer.\n");
		return -ENOMEM;
	}

	vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND
		| VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP;
	/* Mapping pages to user process */
	return remap_pfn_range(vma, vma->vm_start,
			       PFN_DOWN(__pa(qpd->cwsr_kaddr)),
			       KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
}
1057

1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073
void kfd_flush_tlb(struct kfd_process_device *pdd)
{
	struct kfd_dev *dev = pdd->dev;
	const struct kfd2kgd_calls *f2g = dev->kfd2kgd;

	if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
		/* Nothing to flush until a VMID is assigned, which
		 * only happens when the first queue is created.
		 */
		if (pdd->qpd.vmid)
			f2g->invalidate_tlbs_vmid(dev->kgd, pdd->qpd.vmid);
	} else {
		f2g->invalidate_tlbs(dev->kgd, pdd->process->pasid);
	}
}

1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101
#if defined(CONFIG_DEBUG_FS)

int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data)
{
	struct kfd_process *p;
	unsigned int temp;
	int r = 0;

	int idx = srcu_read_lock(&kfd_processes_srcu);

	hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
		seq_printf(m, "Process %d PASID %d:\n",
			   p->lead_thread->tgid, p->pasid);

		mutex_lock(&p->mutex);
		r = pqm_debugfs_mqds(m, &p->pqm);
		mutex_unlock(&p->mutex);

		if (r)
			break;
	}

	srcu_read_unlock(&kfd_processes_srcu, idx);

	return r;
}

#endif