kfd_device_queue_manager.c 40.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * Copyright 2014 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 */

24 25
#include <linux/ratelimit.h>
#include <linux/printk.h>
26 27 28 29
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/types.h>
#include <linux/bitops.h>
30
#include <linux/sched.h>
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46
#include "kfd_priv.h"
#include "kfd_device_queue_manager.h"
#include "kfd_mqd_manager.h"
#include "cik_regs.h"
#include "kfd_kernel_queue.h"

/* Size of the per-pipe EOP queue */
#define CIK_HPD_EOP_BYTES_LOG2 11
#define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)

static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
					unsigned int pasid, unsigned int vmid);

static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
					struct queue *q,
					struct qcm_process_device *qpd);
47

48 49 50
static int execute_queues_cpsch(struct device_queue_manager *dqm,
				enum kfd_unmap_queues_filter filter,
				uint32_t filter_param);
51
static int unmap_queues_cpsch(struct device_queue_manager *dqm,
52 53
				enum kfd_unmap_queues_filter filter,
				uint32_t filter_param);
54

F
Felix Kuehling 已提交
55 56
static int map_queues_cpsch(struct device_queue_manager *dqm);

57 58 59 60 61 62
static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
					struct queue *q,
					struct qcm_process_device *qpd);

static void deallocate_sdma_queue(struct device_queue_manager *dqm,
				unsigned int sdma_queue_id);
63

64 65
static inline
enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
66
{
67
	if (type == KFD_QUEUE_TYPE_SDMA)
68 69
		return KFD_MQD_TYPE_SDMA;
	return KFD_MQD_TYPE_CP;
70 71
}

72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
{
	int i;
	int pipe_offset = mec * dqm->dev->shared_resources.num_pipe_per_mec
		+ pipe * dqm->dev->shared_resources.num_queue_per_pipe;

	/* queue is available for KFD usage if bit is 1 */
	for (i = 0; i <  dqm->dev->shared_resources.num_queue_per_pipe; ++i)
		if (test_bit(pipe_offset + i,
			      dqm->dev->shared_resources.queue_bitmap))
			return true;
	return false;
}

unsigned int get_queues_num(struct device_queue_manager *dqm)
87
{
88 89
	return bitmap_weight(dqm->dev->shared_resources.queue_bitmap,
				KGD_MAX_QUEUES);
90 91
}

92
unsigned int get_queues_per_pipe(struct device_queue_manager *dqm)
93
{
94 95 96 97 98 99
	return dqm->dev->shared_resources.num_queue_per_pipe;
}

unsigned int get_pipes_per_mec(struct device_queue_manager *dqm)
{
	return dqm->dev->shared_resources.num_pipe_per_mec;
100 101
}

102
void program_sh_mem_settings(struct device_queue_manager *dqm,
103 104
					struct qcm_process_device *qpd)
{
105 106
	return dqm->dev->kfd2kgd->program_sh_mem_settings(
						dqm->dev->kgd, qpd->vmid,
107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
						qpd->sh_mem_config,
						qpd->sh_mem_ape1_base,
						qpd->sh_mem_ape1_limit,
						qpd->sh_mem_bases);
}

static int allocate_vmid(struct device_queue_manager *dqm,
			struct qcm_process_device *qpd,
			struct queue *q)
{
	int bit, allocated_vmid;

	if (dqm->vmid_bitmap == 0)
		return -ENOMEM;

122 123
	bit = ffs(dqm->vmid_bitmap) - 1;
	dqm->vmid_bitmap &= ~(1 << bit);
124

125
	allocated_vmid = bit + dqm->dev->vm_info.first_vmid_kfd;
126
	pr_debug("vmid allocation %d\n", allocated_vmid);
127 128 129 130 131 132
	qpd->vmid = allocated_vmid;
	q->properties.vmid = allocated_vmid;

	set_pasid_vmid_mapping(dqm, q->process->pasid, q->properties.vmid);
	program_sh_mem_settings(dqm, qpd);

133 134 135 136 137 138 139 140 141
	/* qpd->page_table_base is set earlier when register_process()
	 * is called, i.e. when the first queue is created.
	 */
	dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->kgd,
			qpd->vmid,
			qpd->page_table_base);
	/* invalidate the VM context after pasid and vmid mapping is set up */
	kfd_flush_tlb(qpd_to_pdd(qpd));

142 143 144 145 146 147 148
	return 0;
}

static void deallocate_vmid(struct device_queue_manager *dqm,
				struct qcm_process_device *qpd,
				struct queue *q)
{
149
	int bit = qpd->vmid - dqm->dev->vm_info.first_vmid_kfd;
150

151 152
	kfd_flush_tlb(qpd_to_pdd(qpd));

153 154 155
	/* Release the vmid mapping */
	set_pasid_vmid_mapping(dqm, 0, qpd->vmid);

156
	dqm->vmid_bitmap |= (1 << bit);
157 158 159 160 161 162
	qpd->vmid = 0;
	q->properties.vmid = 0;
}

static int create_queue_nocpsch(struct device_queue_manager *dqm,
				struct queue *q,
163
				struct qcm_process_device *qpd)
164 165 166 167 168 169 170
{
	int retval;

	print_queue(q);

	mutex_lock(&dqm->lock);

171
	if (dqm->total_queue_count >= max_num_of_queues_per_device) {
172
		pr_warn("Can't create new usermode queue because %d queues were already created\n",
173
				dqm->total_queue_count);
K
Kent Russell 已提交
174 175
		retval = -EPERM;
		goto out_unlock;
176 177
	}

178 179
	if (list_empty(&qpd->queues_list)) {
		retval = allocate_vmid(dqm, qpd, q);
K
Kent Russell 已提交
180 181
		if (retval)
			goto out_unlock;
182 183
	}
	q->properties.vmid = qpd->vmid;
184 185 186 187 188 189 190 191
	/*
	 * Eviction state logic: we only mark active queues as evicted
	 * to avoid the overhead of restoring inactive queues later
	 */
	if (qpd->evicted)
		q->properties.is_evicted = (q->properties.queue_size > 0 &&
					    q->properties.queue_percent > 0 &&
					    q->properties.queue_address != 0);
192

F
Felix Kuehling 已提交
193 194 195
	q->properties.tba_addr = qpd->tba_addr;
	q->properties.tma_addr = qpd->tma_addr;

196 197
	if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
		retval = create_compute_queue_nocpsch(dqm, q, qpd);
K
Kent Russell 已提交
198
	else if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
199
		retval = create_sdma_queue_nocpsch(dqm, q, qpd);
K
Kent Russell 已提交
200 201
	else
		retval = -EINVAL;
202

203
	if (retval) {
204
		if (list_empty(&qpd->queues_list))
205
			deallocate_vmid(dqm, qpd, q);
K
Kent Russell 已提交
206
		goto out_unlock;
207 208 209
	}

	list_add(&q->list, &qpd->queues_list);
210
	qpd->queue_count++;
211 212
	if (q->properties.is_active)
		dqm->queue_count++;
213

214 215
	if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
		dqm->sdma_queue_count++;
216

217 218 219 220 221 222 223 224
	/*
	 * Unconditionally increment this counter, regardless of the queue's
	 * type or whether the queue is active.
	 */
	dqm->total_queue_count++;
	pr_debug("Total of %d queues are accountable so far\n",
			dqm->total_queue_count);

K
Kent Russell 已提交
225
out_unlock:
226
	mutex_unlock(&dqm->lock);
K
Kent Russell 已提交
227
	return retval;
228 229 230 231 232
}

static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
{
	bool set;
233
	int pipe, bit, i;
234 235 236

	set = false;

237 238
	for (pipe = dqm->next_pipe_to_allocate, i = 0;
			i < get_pipes_per_mec(dqm);
239 240 241 242 243
			pipe = ((pipe + 1) % get_pipes_per_mec(dqm)), ++i) {

		if (!is_pipe_enabled(dqm, 0, pipe))
			continue;

244
		if (dqm->allocated_queues[pipe] != 0) {
245 246
			bit = ffs(dqm->allocated_queues[pipe]) - 1;
			dqm->allocated_queues[pipe] &= ~(1 << bit);
247 248 249 250 251 252 253
			q->pipe = pipe;
			q->queue = bit;
			set = true;
			break;
		}
	}

254
	if (!set)
255 256
		return -EBUSY;

257
	pr_debug("hqd slot - pipe %d, queue %d\n", q->pipe, q->queue);
258
	/* horizontal hqd allocation */
259
	dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm);
260 261 262 263 264 265 266

	return 0;
}

static inline void deallocate_hqd(struct device_queue_manager *dqm,
				struct queue *q)
{
267
	dqm->allocated_queues[q->pipe] |= (1 << q->queue);
268 269 270 271 272 273 274 275 276
}

static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
					struct queue *q,
					struct qcm_process_device *qpd)
{
	int retval;
	struct mqd_manager *mqd;

277
	mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
278
	if (!mqd)
279 280 281
		return -ENOMEM;

	retval = allocate_hqd(dqm, q);
282
	if (retval)
283 284 285 286
		return retval;

	retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
				&q->gart_mqd_addr, &q->properties);
K
Kent Russell 已提交
287 288
	if (retval)
		goto out_deallocate_hqd;
289

290 291
	pr_debug("Loading mqd to hqd on pipe %d, queue %d\n",
			q->pipe, q->queue);
292

293 294 295
	dqm->dev->kfd2kgd->set_scratch_backing_va(
			dqm->dev->kgd, qpd->sh_hidden_private_base, qpd->vmid);

F
Felix Kuehling 已提交
296 297 298
	if (!q->properties.is_active)
		return 0;

299 300
	retval = mqd->load_mqd(mqd, q->mqd, q->pipe, q->queue, &q->properties,
			       q->process->mm);
K
Kent Russell 已提交
301 302
	if (retval)
		goto out_uninit_mqd;
303

304
	return 0;
K
Kent Russell 已提交
305 306 307 308 309 310 311

out_uninit_mqd:
	mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
out_deallocate_hqd:
	deallocate_hqd(dqm, q);

	return retval;
312 313
}

314 315 316 317
/* Access to DQM has to be locked before calling destroy_queue_nocpsch_locked
 * to avoid asynchronized access
 */
static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
318 319 320 321 322 323
				struct qcm_process_device *qpd,
				struct queue *q)
{
	int retval;
	struct mqd_manager *mqd;

324 325 326 327
	mqd = dqm->ops.get_mqd_manager(dqm,
		get_mqd_type_from_queue_type(q->properties.type));
	if (!mqd)
		return -ENOMEM;
328

329 330 331 332 333
	if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
		deallocate_hqd(dqm, q);
	} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
		dqm->sdma_queue_count--;
		deallocate_sdma_queue(dqm, q->sdma_id);
334
	} else {
335
		pr_debug("q->properties.type %d is invalid\n",
336
				q->properties.type);
337
		return -EINVAL;
338
	}
339
	dqm->total_queue_count--;
340 341

	retval = mqd->destroy_mqd(mqd, q->mqd,
342
				KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
343
				KFD_UNMAP_LATENCY_MS,
344
				q->pipe, q->queue);
345 346
	if (retval == -ETIME)
		qpd->reset_wavefronts = true;
347 348 349 350

	mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);

	list_del(&q->list);
351 352 353 354 355 356 357 358 359 360 361 362
	if (list_empty(&qpd->queues_list)) {
		if (qpd->reset_wavefronts) {
			pr_warn("Resetting wave fronts (nocpsch) on dev %p\n",
					dqm->dev);
			/* dbgdev_wave_reset_wavefronts has to be called before
			 * deallocate_vmid(), i.e. when vmid is still in use.
			 */
			dbgdev_wave_reset_wavefronts(dqm->dev,
					qpd->pqm->process);
			qpd->reset_wavefronts = false;
		}

363
		deallocate_vmid(dqm, qpd, q);
364
	}
365
	qpd->queue_count--;
366 367
	if (q->properties.is_active)
		dqm->queue_count--;
368

369 370
	return retval;
}
371

372 373 374 375 376 377 378 379
static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
				struct qcm_process_device *qpd,
				struct queue *q)
{
	int retval;

	mutex_lock(&dqm->lock);
	retval = destroy_queue_nocpsch_locked(dqm, qpd, q);
380
	mutex_unlock(&dqm->lock);
381

382 383 384 385 386 387 388
	return retval;
}

static int update_queue(struct device_queue_manager *dqm, struct queue *q)
{
	int retval;
	struct mqd_manager *mqd;
389
	struct kfd_process_device *pdd;
390
	bool prev_active = false;
391 392

	mutex_lock(&dqm->lock);
393 394 395 396 397
	pdd = kfd_get_process_device_data(q->device, q->process);
	if (!pdd) {
		retval = -ENODEV;
		goto out_unlock;
	}
O
Oded Gabbay 已提交
398 399
	mqd = dqm->ops.get_mqd_manager(dqm,
			get_mqd_type_from_queue_type(q->properties.type));
400
	if (!mqd) {
K
Kent Russell 已提交
401 402
		retval = -ENOMEM;
		goto out_unlock;
403
	}
404 405 406 407 408 409 410 411
	/*
	 * Eviction state logic: we only mark active queues as evicted
	 * to avoid the overhead of restoring inactive queues later
	 */
	if (pdd->qpd.evicted)
		q->properties.is_evicted = (q->properties.queue_size > 0 &&
					    q->properties.queue_percent > 0 &&
					    q->properties.queue_address != 0);
412

F
Felix Kuehling 已提交
413 414 415 416
	/* Save previous activity state for counters */
	prev_active = q->properties.is_active;

	/* Make sure the queue is unmapped before updating the MQD */
417
	if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) {
F
Felix Kuehling 已提交
418 419
		retval = unmap_queues_cpsch(dqm,
				KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
F
Felix Kuehling 已提交
420
		if (retval) {
F
Felix Kuehling 已提交
421 422 423
			pr_err("unmap queue failed\n");
			goto out_unlock;
		}
F
Felix Kuehling 已提交
424
	} else if (prev_active &&
F
Felix Kuehling 已提交
425 426 427 428 429 430 431 432 433 434 435 436 437
		   (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
		    q->properties.type == KFD_QUEUE_TYPE_SDMA)) {
		retval = mqd->destroy_mqd(mqd, q->mqd,
				KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
				KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
		if (retval) {
			pr_err("destroy mqd failed\n");
			goto out_unlock;
		}
	}

	retval = mqd->update_mqd(mqd, q->mqd, &q->properties);

438 439 440 441 442 443 444 445 446 447 448
	/*
	 * check active state vs. the previous state and modify
	 * counter accordingly. map_queues_cpsch uses the
	 * dqm->queue_count to determine whether a new runlist must be
	 * uploaded.
	 */
	if (q->properties.is_active && !prev_active)
		dqm->queue_count++;
	else if (!q->properties.is_active && prev_active)
		dqm->queue_count--;

449
	if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS)
F
Felix Kuehling 已提交
450
		retval = map_queues_cpsch(dqm);
F
Felix Kuehling 已提交
451
	else if (q->properties.is_active &&
F
Felix Kuehling 已提交
452 453 454 455
		 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
		  q->properties.type == KFD_QUEUE_TYPE_SDMA))
		retval = mqd->load_mqd(mqd, q->mqd, q->pipe, q->queue,
				       &q->properties, q->process->mm);
456

K
Kent Russell 已提交
457
out_unlock:
458 459 460 461
	mutex_unlock(&dqm->lock);
	return retval;
}

462
static struct mqd_manager *get_mqd_manager(
463 464 465 466
		struct device_queue_manager *dqm, enum KFD_MQD_TYPE type)
{
	struct mqd_manager *mqd;

467 468
	if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
		return NULL;
469

470
	pr_debug("mqd type %d\n", type);
471 472 473 474

	mqd = dqm->mqds[type];
	if (!mqd) {
		mqd = mqd_manager_init(type, dqm->dev);
475
		if (!mqd)
476
			pr_err("mqd manager is NULL");
477 478 479 480 481 482
		dqm->mqds[type] = mqd;
	}

	return mqd;
}

483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663
static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
					struct qcm_process_device *qpd)
{
	struct queue *q;
	struct mqd_manager *mqd;
	struct kfd_process_device *pdd;
	int retval = 0;

	mutex_lock(&dqm->lock);
	if (qpd->evicted++ > 0) /* already evicted, do nothing */
		goto out;

	pdd = qpd_to_pdd(qpd);
	pr_info_ratelimited("Evicting PASID %u queues\n",
			    pdd->process->pasid);

	/* unactivate all active queues on the qpd */
	list_for_each_entry(q, &qpd->queues_list, list) {
		if (!q->properties.is_active)
			continue;
		mqd = dqm->ops.get_mqd_manager(dqm,
			get_mqd_type_from_queue_type(q->properties.type));
		if (!mqd) { /* should not be here */
			pr_err("Cannot evict queue, mqd mgr is NULL\n");
			retval = -ENOMEM;
			goto out;
		}
		q->properties.is_evicted = true;
		q->properties.is_active = false;
		retval = mqd->destroy_mqd(mqd, q->mqd,
				KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
				KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
		if (retval)
			goto out;
		dqm->queue_count--;
	}

out:
	mutex_unlock(&dqm->lock);
	return retval;
}

static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
				      struct qcm_process_device *qpd)
{
	struct queue *q;
	struct kfd_process_device *pdd;
	int retval = 0;

	mutex_lock(&dqm->lock);
	if (qpd->evicted++ > 0) /* already evicted, do nothing */
		goto out;

	pdd = qpd_to_pdd(qpd);
	pr_info_ratelimited("Evicting PASID %u queues\n",
			    pdd->process->pasid);

	/* unactivate all active queues on the qpd */
	list_for_each_entry(q, &qpd->queues_list, list) {
		if (!q->properties.is_active)
			continue;
		q->properties.is_evicted = true;
		q->properties.is_active = false;
		dqm->queue_count--;
	}
	retval = execute_queues_cpsch(dqm,
				qpd->is_debug ?
				KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
				KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);

out:
	mutex_unlock(&dqm->lock);
	return retval;
}

static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
					  struct qcm_process_device *qpd)
{
	struct queue *q;
	struct mqd_manager *mqd;
	struct kfd_process_device *pdd;
	uint32_t pd_base;
	int retval = 0;

	pdd = qpd_to_pdd(qpd);
	/* Retrieve PD base */
	pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);

	mutex_lock(&dqm->lock);
	if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
		goto out;
	if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
		qpd->evicted--;
		goto out;
	}

	pr_info_ratelimited("Restoring PASID %u queues\n",
			    pdd->process->pasid);

	/* Update PD Base in QPD */
	qpd->page_table_base = pd_base;
	pr_debug("Updated PD address to 0x%08x\n", pd_base);

	if (!list_empty(&qpd->queues_list)) {
		dqm->dev->kfd2kgd->set_vm_context_page_table_base(
				dqm->dev->kgd,
				qpd->vmid,
				qpd->page_table_base);
		kfd_flush_tlb(pdd);
	}

	/* activate all active queues on the qpd */
	list_for_each_entry(q, &qpd->queues_list, list) {
		if (!q->properties.is_evicted)
			continue;
		mqd = dqm->ops.get_mqd_manager(dqm,
			get_mqd_type_from_queue_type(q->properties.type));
		if (!mqd) { /* should not be here */
			pr_err("Cannot restore queue, mqd mgr is NULL\n");
			retval = -ENOMEM;
			goto out;
		}
		q->properties.is_evicted = false;
		q->properties.is_active = true;
		retval = mqd->load_mqd(mqd, q->mqd, q->pipe,
				       q->queue, &q->properties,
				       q->process->mm);
		if (retval)
			goto out;
		dqm->queue_count++;
	}
	qpd->evicted = 0;
out:
	mutex_unlock(&dqm->lock);
	return retval;
}

static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
					struct qcm_process_device *qpd)
{
	struct queue *q;
	struct kfd_process_device *pdd;
	uint32_t pd_base;
	int retval = 0;

	pdd = qpd_to_pdd(qpd);
	/* Retrieve PD base */
	pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);

	mutex_lock(&dqm->lock);
	if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
		goto out;
	if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
		qpd->evicted--;
		goto out;
	}

	pr_info_ratelimited("Restoring PASID %u queues\n",
			    pdd->process->pasid);

	/* Update PD Base in QPD */
	qpd->page_table_base = pd_base;
	pr_debug("Updated PD address to 0x%08x\n", pd_base);

	/* activate all active queues on the qpd */
	list_for_each_entry(q, &qpd->queues_list, list) {
		if (!q->properties.is_evicted)
			continue;
		q->properties.is_evicted = false;
		q->properties.is_active = true;
		dqm->queue_count++;
	}
	retval = execute_queues_cpsch(dqm,
				KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
	if (!retval)
		qpd->evicted = 0;
out:
	mutex_unlock(&dqm->lock);
	return retval;
}

664
static int register_process(struct device_queue_manager *dqm,
665 666 667
					struct qcm_process_device *qpd)
{
	struct device_process_node *n;
668 669
	struct kfd_process_device *pdd;
	uint32_t pd_base;
670
	int retval;
671

672
	n = kzalloc(sizeof(*n), GFP_KERNEL);
673 674 675 676 677
	if (!n)
		return -ENOMEM;

	n->qpd = qpd;

678 679 680 681
	pdd = qpd_to_pdd(qpd);
	/* Retrieve PD base */
	pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);

682 683 684
	mutex_lock(&dqm->lock);
	list_add(&n->list, &dqm->queues);

685 686 687
	/* Update PD Base in QPD */
	qpd->page_table_base = pd_base;

688
	retval = dqm->asic_ops.update_qpd(dqm, qpd);
689

690 691 692 693
	dqm->processes_count++;

	mutex_unlock(&dqm->lock);

694
	return retval;
695 696
}

697
static int unregister_process(struct device_queue_manager *dqm,
698 699 700 701 702
					struct qcm_process_device *qpd)
{
	int retval;
	struct device_process_node *cur, *next;

703 704
	pr_debug("qpd->queues_list is %s\n",
			list_empty(&qpd->queues_list) ? "empty" : "not empty");
705 706 707 708 709 710 711

	retval = 0;
	mutex_lock(&dqm->lock);

	list_for_each_entry_safe(cur, next, &dqm->queues, list) {
		if (qpd == cur->qpd) {
			list_del(&cur->list);
712
			kfree(cur);
713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729
			dqm->processes_count--;
			goto out;
		}
	}
	/* qpd not found in dqm list */
	retval = 1;
out:
	mutex_unlock(&dqm->lock);
	return retval;
}

static int
set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid,
			unsigned int vmid)
{
	uint32_t pasid_mapping;

730 731 732 733 734 735
	pasid_mapping = (pasid == 0) ? 0 :
		(uint32_t)pasid |
		ATC_VMID_PASID_MAPPING_VALID;

	return dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
						dqm->dev->kgd, pasid_mapping,
736 737 738
						vmid);
}

739 740 741 742
static void init_interrupts(struct device_queue_manager *dqm)
{
	unsigned int i;

743 744 745
	for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++)
		if (is_pipe_enabled(dqm, 0, i))
			dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd, i);
746 747
}

748 749
static int initialize_nocpsch(struct device_queue_manager *dqm)
{
750
	int pipe, queue;
751

752
	pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
753

K
Kent Russell 已提交
754 755 756 757 758
	dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm),
					sizeof(unsigned int), GFP_KERNEL);
	if (!dqm->allocated_queues)
		return -ENOMEM;

759 760 761
	mutex_init(&dqm->lock);
	INIT_LIST_HEAD(&dqm->queues);
	dqm->queue_count = dqm->next_pipe_to_allocate = 0;
762
	dqm->sdma_queue_count = 0;
763

764 765 766 767 768 769 770 771
	for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
		int pipe_offset = pipe * get_queues_per_pipe(dqm);

		for (queue = 0; queue < get_queues_per_pipe(dqm); queue++)
			if (test_bit(pipe_offset + queue,
				     dqm->dev->shared_resources.queue_bitmap))
				dqm->allocated_queues[pipe] |= 1 << queue;
	}
772

773
	dqm->vmid_bitmap = (1 << dqm->dev->vm_info.vmid_num_kfd) - 1;
774
	dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1;
775 776 777 778

	return 0;
}

779
static void uninitialize(struct device_queue_manager *dqm)
780
{
781 782
	int i;

783
	WARN_ON(dqm->queue_count > 0 || dqm->processes_count > 0);
784 785

	kfree(dqm->allocated_queues);
786 787
	for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
		kfree(dqm->mqds[i]);
788
	mutex_destroy(&dqm->lock);
789
	kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
790 791 792 793
}

static int start_nocpsch(struct device_queue_manager *dqm)
{
794
	init_interrupts(dqm);
795 796 797 798 799 800 801 802
	return 0;
}

static int stop_nocpsch(struct device_queue_manager *dqm)
{
	return 0;
}

803 804 805 806 807 808 809 810
static int allocate_sdma_queue(struct device_queue_manager *dqm,
				unsigned int *sdma_queue_id)
{
	int bit;

	if (dqm->sdma_bitmap == 0)
		return -ENOMEM;

811 812
	bit = ffs(dqm->sdma_bitmap) - 1;
	dqm->sdma_bitmap &= ~(1 << bit);
813 814 815 816 817 818 819 820
	*sdma_queue_id = bit;

	return 0;
}

static void deallocate_sdma_queue(struct device_queue_manager *dqm,
				unsigned int sdma_queue_id)
{
821
	if (sdma_queue_id >= CIK_SDMA_QUEUES)
822
		return;
823
	dqm->sdma_bitmap |= (1 << sdma_queue_id);
824 825 826 827 828 829 830 831 832
}

static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
					struct queue *q,
					struct qcm_process_device *qpd)
{
	struct mqd_manager *mqd;
	int retval;

833
	mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
834 835 836 837
	if (!mqd)
		return -ENOMEM;

	retval = allocate_sdma_queue(dqm, &q->sdma_id);
838
	if (retval)
839 840
		return retval;

841 842
	q->properties.sdma_queue_id = q->sdma_id / CIK_SDMA_QUEUES_PER_ENGINE;
	q->properties.sdma_engine_id = q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
843

844 845 846
	pr_debug("SDMA id is:    %d\n", q->sdma_id);
	pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id);
	pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
847

848
	dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
849 850
	retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
				&q->gart_mqd_addr, &q->properties);
K
Kent Russell 已提交
851 852
	if (retval)
		goto out_deallocate_sdma_queue;
853

854
	retval = mqd->load_mqd(mqd, q->mqd, 0, 0, &q->properties, NULL);
K
Kent Russell 已提交
855 856
	if (retval)
		goto out_uninit_mqd;
857

858
	return 0;
K
Kent Russell 已提交
859 860 861 862 863 864 865

out_uninit_mqd:
	mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
out_deallocate_sdma_queue:
	deallocate_sdma_queue(dqm, q->sdma_id);

	return retval;
866 867
}

868 869 870 871 872 873
/*
 * Device Queue Manager implementation for cp scheduler
 */

static int set_sched_resources(struct device_queue_manager *dqm)
{
874
	int i, mec;
875 876
	struct scheduling_resources res;

877
	res.vmid_mask = dqm->dev->shared_resources.compute_vmid_bitmap;
878 879 880 881 882 883 884 885 886 887 888 889 890 891 892

	res.queue_mask = 0;
	for (i = 0; i < KGD_MAX_QUEUES; ++i) {
		mec = (i / dqm->dev->shared_resources.num_queue_per_pipe)
			/ dqm->dev->shared_resources.num_pipe_per_mec;

		if (!test_bit(i, dqm->dev->shared_resources.queue_bitmap))
			continue;

		/* only acquire queues from the first MEC */
		if (mec > 0)
			continue;

		/* This situation may be hit in the future if a new HW
		 * generation exposes more than 64 queues. If so, the
893 894
		 * definition of res.queue_mask needs updating
		 */
895
		if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) {
896 897 898 899 900 901
			pr_err("Invalid queue enabled by amdgpu: %d\n", i);
			break;
		}

		res.queue_mask |= (1ull << i);
	}
902 903 904
	res.gws_mask = res.oac_mask = res.gds_heap_base =
						res.gds_heap_size = 0;

905 906 907
	pr_debug("Scheduling resources:\n"
			"vmid mask: 0x%8X\n"
			"queue mask: 0x%8llX\n",
908 909 910 911 912 913 914
			res.vmid_mask, res.queue_mask);

	return pm_send_set_resources(&dqm->packets, &res);
}

static int initialize_cpsch(struct device_queue_manager *dqm)
{
915
	pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
916 917 918 919

	mutex_init(&dqm->lock);
	INIT_LIST_HEAD(&dqm->queues);
	dqm->queue_count = dqm->processes_count = 0;
920
	dqm->sdma_queue_count = 0;
921
	dqm->active_runlist = false;
922
	dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1;
923

924
	return 0;
925 926 927 928 929 930 931 932 933
}

static int start_cpsch(struct device_queue_manager *dqm)
{
	int retval;

	retval = 0;

	retval = pm_init(&dqm->packets, dqm);
934
	if (retval)
935 936 937
		goto fail_packet_manager_init;

	retval = set_sched_resources(dqm);
938
	if (retval)
939 940
		goto fail_set_sched_resources;

941
	pr_debug("Allocating fence memory\n");
942 943

	/* allocate fence memory on the gart */
944 945
	retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
					&dqm->fence_mem);
946

947
	if (retval)
948 949 950 951
		goto fail_allocate_vidmem;

	dqm->fence_addr = dqm->fence_mem->cpu_ptr;
	dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
952 953 954

	init_interrupts(dqm);

955
	mutex_lock(&dqm->lock);
956
	execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
957
	mutex_unlock(&dqm->lock);
958 959 960 961 962 963 964 965 966 967 968

	return 0;
fail_allocate_vidmem:
fail_set_sched_resources:
	pm_uninit(&dqm->packets);
fail_packet_manager_init:
	return retval;
}

static int stop_cpsch(struct device_queue_manager *dqm)
{
969
	mutex_lock(&dqm->lock);
970
	unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
971
	mutex_unlock(&dqm->lock);
972

973
	kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
974 975 976 977 978 979 980 981 982 983
	pm_uninit(&dqm->packets);

	return 0;
}

static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
					struct kernel_queue *kq,
					struct qcm_process_device *qpd)
{
	mutex_lock(&dqm->lock);
984
	if (dqm->total_queue_count >= max_num_of_queues_per_device) {
985
		pr_warn("Can't create new kernel queue because %d queues were already created\n",
986 987 988 989 990 991 992 993 994 995 996 997 998
				dqm->total_queue_count);
		mutex_unlock(&dqm->lock);
		return -EPERM;
	}

	/*
	 * Unconditionally increment this counter, regardless of the queue's
	 * type or whether the queue is active.
	 */
	dqm->total_queue_count++;
	pr_debug("Total of %d queues are accountable so far\n",
			dqm->total_queue_count);

999 1000 1001
	list_add(&kq->list, &qpd->priv_queue_list);
	dqm->queue_count++;
	qpd->is_debug = true;
1002
	execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015
	mutex_unlock(&dqm->lock);

	return 0;
}

static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
					struct kernel_queue *kq,
					struct qcm_process_device *qpd)
{
	mutex_lock(&dqm->lock);
	list_del(&kq->list);
	dqm->queue_count--;
	qpd->is_debug = false;
1016
	execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
1017 1018 1019 1020
	/*
	 * Unconditionally decrement this counter, regardless of the queue's
	 * type.
	 */
1021
	dqm->total_queue_count--;
1022 1023
	pr_debug("Total of %d queues are accountable so far\n",
			dqm->total_queue_count);
1024 1025 1026 1027
	mutex_unlock(&dqm->lock);
}

static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
1028
			struct qcm_process_device *qpd)
1029 1030 1031 1032 1033 1034 1035 1036
{
	int retval;
	struct mqd_manager *mqd;

	retval = 0;

	mutex_lock(&dqm->lock);

1037
	if (dqm->total_queue_count >= max_num_of_queues_per_device) {
1038
		pr_warn("Can't create new usermode queue because %d queues were already created\n",
1039 1040 1041 1042 1043
				dqm->total_queue_count);
		retval = -EPERM;
		goto out;
	}

1044 1045
	if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
		retval = allocate_sdma_queue(dqm, &q->sdma_id);
F
Felix Kuehling 已提交
1046
		if (retval)
1047 1048 1049 1050 1051 1052
			goto out;
		q->properties.sdma_queue_id =
			q->sdma_id / CIK_SDMA_QUEUES_PER_ENGINE;
		q->properties.sdma_engine_id =
			q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
	}
1053
	mqd = dqm->ops.get_mqd_manager(dqm,
1054 1055
			get_mqd_type_from_queue_type(q->properties.type));

1056
	if (!mqd) {
K
Kent Russell 已提交
1057 1058
		retval = -ENOMEM;
		goto out;
1059
	}
1060 1061 1062 1063 1064 1065 1066 1067
	/*
	 * Eviction state logic: we only mark active queues as evicted
	 * to avoid the overhead of restoring inactive queues later
	 */
	if (qpd->evicted)
		q->properties.is_evicted = (q->properties.queue_size > 0 &&
					    q->properties.queue_percent > 0 &&
					    q->properties.queue_address != 0);
1068

1069
	dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
F
Felix Kuehling 已提交
1070 1071 1072

	q->properties.tba_addr = qpd->tba_addr;
	q->properties.tma_addr = qpd->tma_addr;
1073 1074
	retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
				&q->gart_mqd_addr, &q->properties);
1075
	if (retval)
1076 1077 1078
		goto out;

	list_add(&q->list, &qpd->queues_list);
1079
	qpd->queue_count++;
1080 1081
	if (q->properties.is_active) {
		dqm->queue_count++;
1082 1083
		retval = execute_queues_cpsch(dqm,
				KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1084 1085
	}

1086
	if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
1087
		dqm->sdma_queue_count++;
1088 1089 1090 1091 1092 1093 1094 1095 1096
	/*
	 * Unconditionally increment this counter, regardless of the queue's
	 * type or whether the queue is active.
	 */
	dqm->total_queue_count++;

	pr_debug("Total of %d queues are accountable so far\n",
			dqm->total_queue_count);

1097 1098 1099 1100 1101
out:
	mutex_unlock(&dqm->lock);
	return retval;
}

1102
int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
1103
				unsigned int fence_value,
1104
				unsigned int timeout_ms)
1105
{
1106
	unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
1107 1108

	while (*fence_addr != fence_value) {
1109
		if (time_after(jiffies, end_jiffies)) {
1110
			pr_err("qcm fence wait loop timeout expired\n");
1111 1112
			return -ETIME;
		}
1113
		schedule();
1114 1115 1116 1117 1118
	}

	return 0;
}

1119
static int unmap_sdma_queues(struct device_queue_manager *dqm,
1120 1121 1122
				unsigned int sdma_engine)
{
	return pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA,
1123
			KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false,
1124 1125 1126
			sdma_engine);
}

F
Felix Kuehling 已提交
1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147
/* dqm->lock mutex has to be locked before calling this function */
static int map_queues_cpsch(struct device_queue_manager *dqm)
{
	int retval;

	if (dqm->queue_count <= 0 || dqm->processes_count <= 0)
		return 0;

	if (dqm->active_runlist)
		return 0;

	retval = pm_send_runlist(&dqm->packets, &dqm->queues);
	if (retval) {
		pr_err("failed to execute runlist\n");
		return retval;
	}
	dqm->active_runlist = true;

	return retval;
}

1148
/* dqm->lock mutex has to be locked before calling this function */
1149
static int unmap_queues_cpsch(struct device_queue_manager *dqm,
1150 1151
				enum kfd_unmap_queues_filter filter,
				uint32_t filter_param)
1152
{
1153
	int retval = 0;
1154

1155
	if (!dqm->active_runlist)
1156
		return retval;
1157

1158
	pr_debug("Before destroying queues, sdma queue count is : %u\n",
1159 1160 1161
		dqm->sdma_queue_count);

	if (dqm->sdma_queue_count > 0) {
1162 1163
		unmap_sdma_queues(dqm, 0);
		unmap_sdma_queues(dqm, 1);
1164 1165
	}

1166
	retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
1167
			filter, filter_param, false, 0);
1168
	if (retval)
1169
		return retval;
1170 1171 1172 1173 1174

	*dqm->fence_addr = KFD_FENCE_INIT;
	pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr,
				KFD_FENCE_COMPLETED);
	/* should be timed out */
1175
	retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
1176
				QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS);
1177
	if (retval)
1178
		return retval;
1179

1180 1181 1182 1183 1184 1185
	pm_release_ib(&dqm->packets);
	dqm->active_runlist = false;

	return retval;
}

1186
/* dqm->lock mutex has to be locked before calling this function */
1187 1188 1189
static int execute_queues_cpsch(struct device_queue_manager *dqm,
				enum kfd_unmap_queues_filter filter,
				uint32_t filter_param)
1190 1191 1192
{
	int retval;

1193
	retval = unmap_queues_cpsch(dqm, filter, filter_param);
1194
	if (retval) {
1195
		pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
1196
		return retval;
1197 1198
	}

F
Felix Kuehling 已提交
1199
	return map_queues_cpsch(dqm);
1200 1201 1202 1203 1204 1205 1206 1207
}

static int destroy_queue_cpsch(struct device_queue_manager *dqm,
				struct qcm_process_device *qpd,
				struct queue *q)
{
	int retval;
	struct mqd_manager *mqd;
1208
	bool preempt_all_queues;
1209

1210 1211
	preempt_all_queues = false;

1212 1213 1214 1215
	retval = 0;

	/* remove queue from list to prevent rescheduling after preemption */
	mutex_lock(&dqm->lock);
1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226

	if (qpd->is_debug) {
		/*
		 * error, currently we do not allow to destroy a queue
		 * of a currently debugged process
		 */
		retval = -EBUSY;
		goto failed_try_destroy_debugged_queue;

	}

1227
	mqd = dqm->ops.get_mqd_manager(dqm,
1228
			get_mqd_type_from_queue_type(q->properties.type));
1229 1230 1231 1232 1233
	if (!mqd) {
		retval = -ENOMEM;
		goto failed;
	}

1234
	if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
1235
		dqm->sdma_queue_count--;
1236 1237
		deallocate_sdma_queue(dqm, q->sdma_id);
	}
1238

1239
	list_del(&q->list);
1240
	qpd->queue_count--;
1241
	if (q->properties.is_active) {
1242
		dqm->queue_count--;
1243
		retval = execute_queues_cpsch(dqm,
1244
				KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1245 1246 1247
		if (retval == -ETIME)
			qpd->reset_wavefronts = true;
	}
1248 1249

	mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
1250 1251 1252 1253 1254 1255 1256 1257

	/*
	 * Unconditionally decrement this counter, regardless of the queue's
	 * type
	 */
	dqm->total_queue_count--;
	pr_debug("Total of %d queues are accountable so far\n",
			dqm->total_queue_count);
1258 1259 1260

	mutex_unlock(&dqm->lock);

1261
	return retval;
1262 1263

failed:
1264 1265
failed_try_destroy_debugged_queue:

1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284
	mutex_unlock(&dqm->lock);
	return retval;
}

/*
 * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
 * stay in user mode.
 */
#define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
/* APE1 limit is inclusive and 64K aligned. */
#define APE1_LIMIT_ALIGNMENT 0xFFFF

static bool set_cache_memory_policy(struct device_queue_manager *dqm,
				   struct qcm_process_device *qpd,
				   enum cache_policy default_policy,
				   enum cache_policy alternate_policy,
				   void __user *alternate_aperture_base,
				   uint64_t alternate_aperture_size)
{
1285
	bool retval;
1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306

	mutex_lock(&dqm->lock);

	if (alternate_aperture_size == 0) {
		/* base > limit disables APE1 */
		qpd->sh_mem_ape1_base = 1;
		qpd->sh_mem_ape1_limit = 0;
	} else {
		/*
		 * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
		 *			SH_MEM_APE1_BASE[31:0], 0x0000 }
		 * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
		 *			SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
		 * Verify that the base and size parameters can be
		 * represented in this format and convert them.
		 * Additionally restrict APE1 to user-mode addresses.
		 */

		uint64_t base = (uintptr_t)alternate_aperture_base;
		uint64_t limit = base + alternate_aperture_size - 1;

K
Kent Russell 已提交
1307 1308 1309
		if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
		   (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
			retval = false;
1310
			goto out;
K
Kent Russell 已提交
1311
		}
1312 1313 1314 1315 1316

		qpd->sh_mem_ape1_base = base >> 16;
		qpd->sh_mem_ape1_limit = limit >> 16;
	}

1317
	retval = dqm->asic_ops.set_cache_memory_policy(
1318 1319 1320 1321 1322 1323
			dqm,
			qpd,
			default_policy,
			alternate_policy,
			alternate_aperture_base,
			alternate_aperture_size);
1324

1325
	if ((dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
1326 1327
		program_sh_mem_settings(dqm, qpd);

1328
	pr_debug("sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
1329 1330 1331 1332 1333
		qpd->sh_mem_config, qpd->sh_mem_ape1_base,
		qpd->sh_mem_ape1_limit);

out:
	mutex_unlock(&dqm->lock);
K
Kent Russell 已提交
1334
	return retval;
1335 1336
}

1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356
static int set_trap_handler(struct device_queue_manager *dqm,
				struct qcm_process_device *qpd,
				uint64_t tba_addr,
				uint64_t tma_addr)
{
	uint64_t *tma;

	if (dqm->dev->cwsr_enabled) {
		/* Jump from CWSR trap handler to user trap */
		tma = (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
		tma[0] = tba_addr;
		tma[1] = tma_addr;
	} else {
		qpd->tba_addr = tba_addr;
		qpd->tma_addr = tma_addr;
	}

	return 0;
}

1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450
static int process_termination_nocpsch(struct device_queue_manager *dqm,
		struct qcm_process_device *qpd)
{
	struct queue *q, *next;
	struct device_process_node *cur, *next_dpn;
	int retval = 0;

	mutex_lock(&dqm->lock);

	/* Clear all user mode queues */
	list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
		int ret;

		ret = destroy_queue_nocpsch_locked(dqm, qpd, q);
		if (ret)
			retval = ret;
	}

	/* Unregister process */
	list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
		if (qpd == cur->qpd) {
			list_del(&cur->list);
			kfree(cur);
			dqm->processes_count--;
			break;
		}
	}

	mutex_unlock(&dqm->lock);
	return retval;
}


static int process_termination_cpsch(struct device_queue_manager *dqm,
		struct qcm_process_device *qpd)
{
	int retval;
	struct queue *q, *next;
	struct kernel_queue *kq, *kq_next;
	struct mqd_manager *mqd;
	struct device_process_node *cur, *next_dpn;
	enum kfd_unmap_queues_filter filter =
		KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES;

	retval = 0;

	mutex_lock(&dqm->lock);

	/* Clean all kernel queues */
	list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
		list_del(&kq->list);
		dqm->queue_count--;
		qpd->is_debug = false;
		dqm->total_queue_count--;
		filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
	}

	/* Clear all user mode queues */
	list_for_each_entry(q, &qpd->queues_list, list) {
		if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
			dqm->sdma_queue_count--;

		if (q->properties.is_active)
			dqm->queue_count--;

		dqm->total_queue_count--;
	}

	/* Unregister process */
	list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
		if (qpd == cur->qpd) {
			list_del(&cur->list);
			kfree(cur);
			dqm->processes_count--;
			break;
		}
	}

	retval = execute_queues_cpsch(dqm, filter, 0);
	if (retval || qpd->reset_wavefronts) {
		pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev);
		dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process);
		qpd->reset_wavefronts = false;
	}

	/* lastly, free mqd resources */
	list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
		mqd = dqm->ops.get_mqd_manager(dqm,
			get_mqd_type_from_queue_type(q->properties.type));
		if (!mqd) {
			retval = -ENOMEM;
			goto out;
		}
		list_del(&q->list);
1451
		qpd->queue_count--;
1452 1453 1454 1455 1456 1457 1458 1459
		mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
	}

out:
	mutex_unlock(&dqm->lock);
	return retval;
}

1460 1461 1462 1463
struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
{
	struct device_queue_manager *dqm;

1464
	pr_debug("Loading device queue manager\n");
1465

1466
	dqm = kzalloc(sizeof(*dqm), GFP_KERNEL);
1467 1468 1469
	if (!dqm)
		return NULL;

1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485
	switch (dev->device_info->asic_family) {
	/* HWS is not available on Hawaii. */
	case CHIP_HAWAII:
	/* HWS depends on CWSR for timely dequeue. CWSR is not
	 * available on Tonga.
	 *
	 * FIXME: This argument also applies to Kaveri.
	 */
	case CHIP_TONGA:
		dqm->sched_policy = KFD_SCHED_POLICY_NO_HWS;
		break;
	default:
		dqm->sched_policy = sched_policy;
		break;
	}

1486
	dqm->dev = dev;
1487
	switch (dqm->sched_policy) {
1488 1489 1490
	case KFD_SCHED_POLICY_HWS:
	case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
		/* initialize dqm for cp scheduling */
1491 1492 1493 1494 1495 1496
		dqm->ops.create_queue = create_queue_cpsch;
		dqm->ops.initialize = initialize_cpsch;
		dqm->ops.start = start_cpsch;
		dqm->ops.stop = stop_cpsch;
		dqm->ops.destroy_queue = destroy_queue_cpsch;
		dqm->ops.update_queue = update_queue;
1497 1498 1499 1500
		dqm->ops.get_mqd_manager = get_mqd_manager;
		dqm->ops.register_process = register_process;
		dqm->ops.unregister_process = unregister_process;
		dqm->ops.uninitialize = uninitialize;
1501 1502 1503
		dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
		dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
		dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
1504
		dqm->ops.set_trap_handler = set_trap_handler;
1505
		dqm->ops.process_termination = process_termination_cpsch;
1506 1507
		dqm->ops.evict_process_queues = evict_process_queues_cpsch;
		dqm->ops.restore_process_queues = restore_process_queues_cpsch;
1508 1509 1510
		break;
	case KFD_SCHED_POLICY_NO_HWS:
		/* initialize dqm for no cp scheduling */
1511 1512 1513 1514 1515
		dqm->ops.start = start_nocpsch;
		dqm->ops.stop = stop_nocpsch;
		dqm->ops.create_queue = create_queue_nocpsch;
		dqm->ops.destroy_queue = destroy_queue_nocpsch;
		dqm->ops.update_queue = update_queue;
1516 1517 1518
		dqm->ops.get_mqd_manager = get_mqd_manager;
		dqm->ops.register_process = register_process;
		dqm->ops.unregister_process = unregister_process;
1519
		dqm->ops.initialize = initialize_nocpsch;
1520
		dqm->ops.uninitialize = uninitialize;
1521
		dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
1522
		dqm->ops.set_trap_handler = set_trap_handler;
1523
		dqm->ops.process_termination = process_termination_nocpsch;
1524 1525 1526
		dqm->ops.evict_process_queues = evict_process_queues_nocpsch;
		dqm->ops.restore_process_queues =
			restore_process_queues_nocpsch;
1527 1528
		break;
	default:
1529
		pr_err("Invalid scheduling policy %d\n", dqm->sched_policy);
1530
		goto out_free;
1531 1532
	}

1533 1534
	switch (dev->device_info->asic_family) {
	case CHIP_CARRIZO:
1535
		device_queue_manager_init_vi(&dqm->asic_ops);
1536 1537
		break;

1538
	case CHIP_KAVERI:
1539
		device_queue_manager_init_cik(&dqm->asic_ops);
1540
		break;
1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551

	case CHIP_HAWAII:
		device_queue_manager_init_cik_hawaii(&dqm->asic_ops);
		break;

	case CHIP_TONGA:
	case CHIP_FIJI:
	case CHIP_POLARIS10:
	case CHIP_POLARIS11:
		device_queue_manager_init_vi_tonga(&dqm->asic_ops);
		break;
1552 1553 1554 1555
	default:
		WARN(1, "Unexpected ASIC family %u",
		     dev->device_info->asic_family);
		goto out_free;
1556 1557
	}

1558 1559
	if (!dqm->ops.initialize(dqm))
		return dqm;
1560

1561 1562 1563
out_free:
	kfree(dqm);
	return NULL;
1564 1565 1566 1567
}

void device_queue_manager_uninit(struct device_queue_manager *dqm)
{
1568
	dqm->ops.uninitialize(dqm);
1569 1570
	kfree(dqm);
}
1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641

#if defined(CONFIG_DEBUG_FS)

static void seq_reg_dump(struct seq_file *m,
			 uint32_t (*dump)[2], uint32_t n_regs)
{
	uint32_t i, count;

	for (i = 0, count = 0; i < n_regs; i++) {
		if (count == 0 ||
		    dump[i-1][0] + sizeof(uint32_t) != dump[i][0]) {
			seq_printf(m, "%s    %08x: %08x",
				   i ? "\n" : "",
				   dump[i][0], dump[i][1]);
			count = 7;
		} else {
			seq_printf(m, " %08x", dump[i][1]);
			count--;
		}
	}

	seq_puts(m, "\n");
}

int dqm_debugfs_hqds(struct seq_file *m, void *data)
{
	struct device_queue_manager *dqm = data;
	uint32_t (*dump)[2], n_regs;
	int pipe, queue;
	int r = 0;

	for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
		int pipe_offset = pipe * get_queues_per_pipe(dqm);

		for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) {
			if (!test_bit(pipe_offset + queue,
				      dqm->dev->shared_resources.queue_bitmap))
				continue;

			r = dqm->dev->kfd2kgd->hqd_dump(
				dqm->dev->kgd, pipe, queue, &dump, &n_regs);
			if (r)
				break;

			seq_printf(m, "  CP Pipe %d, Queue %d\n",
				  pipe, queue);
			seq_reg_dump(m, dump, n_regs);

			kfree(dump);
		}
	}

	for (pipe = 0; pipe < CIK_SDMA_ENGINE_NUM; pipe++) {
		for (queue = 0; queue < CIK_SDMA_QUEUES_PER_ENGINE; queue++) {
			r = dqm->dev->kfd2kgd->hqd_sdma_dump(
				dqm->dev->kgd, pipe, queue, &dump, &n_regs);
			if (r)
				break;

			seq_printf(m, "  SDMA Engine %d, RLC %d\n",
				  pipe, queue);
			seq_reg_dump(m, dump, n_regs);

			kfree(dump);
		}
	}

	return r;
}

#endif