kfd_device_queue_manager.c 53.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * Copyright 2014 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 */

24 25
#include <linux/ratelimit.h>
#include <linux/printk.h>
26 27 28 29
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/types.h>
#include <linux/bitops.h>
30
#include <linux/sched.h>
31 32 33 34 35
#include "kfd_priv.h"
#include "kfd_device_queue_manager.h"
#include "kfd_mqd_manager.h"
#include "cik_regs.h"
#include "kfd_kernel_queue.h"
A
Amber Lin 已提交
36
#include "amdgpu_amdkfd.h"
37 38 39 40 41 42 43 44

/* Size of the per-pipe EOP queue */
#define CIK_HPD_EOP_BYTES_LOG2 11
#define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)

static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
					unsigned int pasid, unsigned int vmid);

45 46 47
static int execute_queues_cpsch(struct device_queue_manager *dqm,
				enum kfd_unmap_queues_filter filter,
				uint32_t filter_param);
48
static int unmap_queues_cpsch(struct device_queue_manager *dqm,
49 50
				enum kfd_unmap_queues_filter filter,
				uint32_t filter_param);
51

F
Felix Kuehling 已提交
52 53
static int map_queues_cpsch(struct device_queue_manager *dqm);

54
static void deallocate_sdma_queue(struct device_queue_manager *dqm,
55
				struct queue *q);
56

57 58 59 60 61
static inline void deallocate_hqd(struct device_queue_manager *dqm,
				struct queue *q);
static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q);
static int allocate_sdma_queue(struct device_queue_manager *dqm,
				struct queue *q);
62 63
static void kfd_process_hw_exception(struct work_struct *work);

64 65
static inline
enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
66
{
67
	if (type == KFD_QUEUE_TYPE_SDMA || type == KFD_QUEUE_TYPE_SDMA_XGMI)
68 69
		return KFD_MQD_TYPE_SDMA;
	return KFD_MQD_TYPE_CP;
70 71
}

72 73 74 75 76 77 78 79 80
static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
{
	int i;
	int pipe_offset = mec * dqm->dev->shared_resources.num_pipe_per_mec
		+ pipe * dqm->dev->shared_resources.num_queue_per_pipe;

	/* queue is available for KFD usage if bit is 1 */
	for (i = 0; i <  dqm->dev->shared_resources.num_queue_per_pipe; ++i)
		if (test_bit(pipe_offset + i,
81
			      dqm->dev->shared_resources.cp_queue_bitmap))
82 83 84 85
			return true;
	return false;
}

86
unsigned int get_cp_queues_num(struct device_queue_manager *dqm)
87
{
88
	return bitmap_weight(dqm->dev->shared_resources.cp_queue_bitmap,
89
				KGD_MAX_QUEUES);
90 91
}

92
unsigned int get_queues_per_pipe(struct device_queue_manager *dqm)
93
{
94 95 96 97 98 99
	return dqm->dev->shared_resources.num_queue_per_pipe;
}

unsigned int get_pipes_per_mec(struct device_queue_manager *dqm)
{
	return dqm->dev->shared_resources.num_pipe_per_mec;
100 101
}

102 103 104 105 106
static unsigned int get_num_sdma_engines(struct device_queue_manager *dqm)
{
	return dqm->dev->device_info->num_sdma_engines;
}

107 108 109 110 111
static unsigned int get_num_xgmi_sdma_engines(struct device_queue_manager *dqm)
{
	return dqm->dev->device_info->num_xgmi_sdma_engines;
}

112 113 114 115 116
static unsigned int get_num_all_sdma_engines(struct device_queue_manager *dqm)
{
	return get_num_sdma_engines(dqm) + get_num_xgmi_sdma_engines(dqm);
}

117 118 119
unsigned int get_num_sdma_queues(struct device_queue_manager *dqm)
{
	return dqm->dev->device_info->num_sdma_engines
120
			* dqm->dev->device_info->num_sdma_queues_per_engine;
121 122
}

123 124 125 126 127 128
unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm)
{
	return dqm->dev->device_info->num_xgmi_sdma_engines
			* dqm->dev->device_info->num_sdma_queues_per_engine;
}

129
void program_sh_mem_settings(struct device_queue_manager *dqm,
130 131
					struct qcm_process_device *qpd)
{
132 133
	return dqm->dev->kfd2kgd->program_sh_mem_settings(
						dqm->dev->kgd, qpd->vmid,
134 135 136 137 138 139
						qpd->sh_mem_config,
						qpd->sh_mem_ape1_base,
						qpd->sh_mem_ape1_limit,
						qpd->sh_mem_bases);
}

140
static void increment_queue_count(struct device_queue_manager *dqm,
141 142 143 144 145 146 147
			enum kfd_queue_type type)
{
	dqm->active_queue_count++;
	if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
		dqm->active_cp_queue_count++;
}

148
static void decrement_queue_count(struct device_queue_manager *dqm,
149 150 151 152 153 154 155
			enum kfd_queue_type type)
{
	dqm->active_queue_count--;
	if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
		dqm->active_cp_queue_count--;
}

156 157 158 159 160 161 162 163 164
static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
{
	struct kfd_dev *dev = qpd->dqm->dev;

	if (!KFD_IS_SOC15(dev->device_info->asic_family)) {
		/* On pre-SOC15 chips we need to use the queue ID to
		 * preserve the user mode ABI.
		 */
		q->doorbell_id = q->properties.queue_id;
165 166
	} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
			q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
167 168 169 170
		/* For SDMA queues on SOC15 with 8-byte doorbell, use static
		 * doorbell assignments based on the engine and queue id.
		 * The doobell index distance between RLC (2*i) and (2*i+1)
		 * for a SDMA engine is 512.
171
		 */
172 173 174 175 176 177 178
		uint32_t *idx_offset =
				dev->shared_resources.sdma_doorbell_idx;

		q->doorbell_id = idx_offset[q->properties.sdma_engine_id]
			+ (q->properties.sdma_queue_id & 1)
			* KFD_QUEUE_DOORBELL_MIRROR_OFFSET
			+ (q->properties.sdma_queue_id >> 1);
179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
	} else {
		/* For CP queues on SOC15 reserve a free doorbell ID */
		unsigned int found;

		found = find_first_zero_bit(qpd->doorbell_bitmap,
					    KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
		if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
			pr_debug("No doorbells available");
			return -EBUSY;
		}
		set_bit(found, qpd->doorbell_bitmap);
		q->doorbell_id = found;
	}

	q->properties.doorbell_off =
194
		kfd_get_doorbell_dw_offset_in_bar(dev, q->process,
195 196 197 198 199 200 201 202 203 204 205 206
					  q->doorbell_id);

	return 0;
}

static void deallocate_doorbell(struct qcm_process_device *qpd,
				struct queue *q)
{
	unsigned int old;
	struct kfd_dev *dev = qpd->dqm->dev;

	if (!KFD_IS_SOC15(dev->device_info->asic_family) ||
207 208
	    q->properties.type == KFD_QUEUE_TYPE_SDMA ||
	    q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
209 210 211 212 213 214
		return;

	old = test_and_clear_bit(q->doorbell_id, qpd->doorbell_bitmap);
	WARN_ON(!old);
}

215 216 217 218
static int allocate_vmid(struct device_queue_manager *dqm,
			struct qcm_process_device *qpd,
			struct queue *q)
{
219
	int allocated_vmid = -1, i;
220

221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
	for (i = dqm->dev->vm_info.first_vmid_kfd;
			i <= dqm->dev->vm_info.last_vmid_kfd; i++) {
		if (!dqm->vmid_pasid[i]) {
			allocated_vmid = i;
			break;
		}
	}

	if (allocated_vmid < 0) {
		pr_err("no more vmid to allocate\n");
		return -ENOSPC;
	}

	pr_debug("vmid allocated: %d\n", allocated_vmid);

	dqm->vmid_pasid[allocated_vmid] = q->process->pasid;
237

238
	set_pasid_vmid_mapping(dqm, q->process->pasid, allocated_vmid);
239 240 241 242 243 244

	qpd->vmid = allocated_vmid;
	q->properties.vmid = allocated_vmid;

	program_sh_mem_settings(dqm, qpd);

245 246 247 248 249 250 251 252 253
	/* qpd->page_table_base is set earlier when register_process()
	 * is called, i.e. when the first queue is created.
	 */
	dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->kgd,
			qpd->vmid,
			qpd->page_table_base);
	/* invalidate the VM context after pasid and vmid mapping is set up */
	kfd_flush_tlb(qpd_to_pdd(qpd));

254 255 256
	if (dqm->dev->kfd2kgd->set_scratch_backing_va)
		dqm->dev->kfd2kgd->set_scratch_backing_va(dqm->dev->kgd,
				qpd->sh_hidden_private_base, qpd->vmid);
257

258 259 260
	return 0;
}

261 262 263
static int flush_texture_cache_nocpsch(struct kfd_dev *kdev,
				struct qcm_process_device *qpd)
{
264 265
	const struct packet_manager_funcs *pmf = qpd->dqm->packets.pmf;
	int ret;
266 267 268 269

	if (!qpd->ib_kaddr)
		return -ENOMEM;

270 271 272
	ret = pmf->release_mem(qpd->ib_base, (uint32_t *)qpd->ib_kaddr);
	if (ret)
		return ret;
273

A
Amber Lin 已提交
274
	return amdgpu_amdkfd_submit_ib(kdev->kgd, KGD_ENGINE_MEC1, qpd->vmid,
275 276
				qpd->ib_base, (uint32_t *)qpd->ib_kaddr,
				pmf->release_mem_size / sizeof(uint32_t));
277 278
}

279 280 281 282
static void deallocate_vmid(struct device_queue_manager *dqm,
				struct qcm_process_device *qpd,
				struct queue *q)
{
283 284 285 286
	/* On GFX v7, CP doesn't flush TC at dequeue */
	if (q->device->device_info->asic_family == CHIP_HAWAII)
		if (flush_texture_cache_nocpsch(q->device, qpd))
			pr_err("Failed to flush TC\n");
287

288
	kfd_flush_tlb(qpd_to_pdd(qpd));
289

290 291
	/* Release the vmid mapping */
	set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
292
	dqm->vmid_pasid[qpd->vmid] = 0;
293

294 295 296 297 298 299
	qpd->vmid = 0;
	q->properties.vmid = 0;
}

static int create_queue_nocpsch(struct device_queue_manager *dqm,
				struct queue *q,
300
				struct qcm_process_device *qpd)
301
{
302
	struct mqd_manager *mqd_mgr;
303 304
	int retval;

305
	dqm_lock(dqm);
306

307
	if (dqm->total_queue_count >= max_num_of_queues_per_device) {
308
		pr_warn("Can't create new usermode queue because %d queues were already created\n",
309
				dqm->total_queue_count);
K
Kent Russell 已提交
310 311
		retval = -EPERM;
		goto out_unlock;
312 313
	}

314 315
	if (list_empty(&qpd->queues_list)) {
		retval = allocate_vmid(dqm, qpd, q);
K
Kent Russell 已提交
316 317
		if (retval)
			goto out_unlock;
318 319
	}
	q->properties.vmid = qpd->vmid;
320
	/*
321 322 323
	 * Eviction state logic: mark all queues as evicted, even ones
	 * not currently active. Restoring inactive queues later only
	 * updates the is_evicted flag but is a no-op otherwise.
324
	 */
325
	q->properties.is_evicted = !!qpd->evicted;
326

F
Felix Kuehling 已提交
327 328 329
	q->properties.tba_addr = qpd->tba_addr;
	q->properties.tma_addr = qpd->tma_addr;

330 331
	mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
			q->properties.type)];
332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349
	if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
		retval = allocate_hqd(dqm, q);
		if (retval)
			goto deallocate_vmid;
		pr_debug("Loading mqd to hqd on pipe %d, queue %d\n",
			q->pipe, q->queue);
	} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
		q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
		retval = allocate_sdma_queue(dqm, q);
		if (retval)
			goto deallocate_vmid;
		dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
	}

	retval = allocate_doorbell(qpd, q);
	if (retval)
		goto out_deallocate_hqd;

350 351
	/* Temporarily release dqm lock to avoid a circular lock dependency */
	dqm_unlock(dqm);
352
	q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties);
353 354
	dqm_lock(dqm);

355 356 357 358
	if (!q->mqd_mem_obj) {
		retval = -ENOMEM;
		goto out_deallocate_doorbell;
	}
359 360
	mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
				&q->gart_mqd_addr, &q->properties);
361
	if (q->properties.is_active) {
362 363 364 365
		if (!dqm->sched_running) {
			WARN_ONCE(1, "Load non-HWS mqd while stopped\n");
			goto add_queue_to_list;
		}
366 367 368 369 370 371 372 373

		if (WARN(q->process->mm != current->mm,
					"should only run in user thread"))
			retval = -EFAULT;
		else
			retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
					q->queue, &q->properties, current->mm);
		if (retval)
374
			goto out_free_mqd;
375 376
	}

377
add_queue_to_list:
378
	list_add(&q->list, &qpd->queues_list);
379
	qpd->queue_count++;
380
	if (q->properties.is_active)
381
		increment_queue_count(dqm, q->properties.type);
382

383 384 385 386 387 388 389
	/*
	 * Unconditionally increment this counter, regardless of the queue's
	 * type or whether the queue is active.
	 */
	dqm->total_queue_count++;
	pr_debug("Total of %d queues are accountable so far\n",
			dqm->total_queue_count);
390
	goto out_unlock;
391

392 393
out_free_mqd:
	mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
394 395 396 397 398 399 400 401 402 403 404
out_deallocate_doorbell:
	deallocate_doorbell(qpd, q);
out_deallocate_hqd:
	if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
		deallocate_hqd(dqm, q);
	else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
		q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
		deallocate_sdma_queue(dqm, q);
deallocate_vmid:
	if (list_empty(&qpd->queues_list))
		deallocate_vmid(dqm, qpd, q);
K
Kent Russell 已提交
405
out_unlock:
406
	dqm_unlock(dqm);
K
Kent Russell 已提交
407
	return retval;
408 409 410 411 412
}

static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
{
	bool set;
413
	int pipe, bit, i;
414 415 416

	set = false;

417 418
	for (pipe = dqm->next_pipe_to_allocate, i = 0;
			i < get_pipes_per_mec(dqm);
419 420 421 422 423
			pipe = ((pipe + 1) % get_pipes_per_mec(dqm)), ++i) {

		if (!is_pipe_enabled(dqm, 0, pipe))
			continue;

424
		if (dqm->allocated_queues[pipe] != 0) {
425 426
			bit = ffs(dqm->allocated_queues[pipe]) - 1;
			dqm->allocated_queues[pipe] &= ~(1 << bit);
427 428 429 430 431 432 433
			q->pipe = pipe;
			q->queue = bit;
			set = true;
			break;
		}
	}

434
	if (!set)
435 436
		return -EBUSY;

437
	pr_debug("hqd slot - pipe %d, queue %d\n", q->pipe, q->queue);
438
	/* horizontal hqd allocation */
439
	dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm);
440 441 442 443 444 445 446

	return 0;
}

static inline void deallocate_hqd(struct device_queue_manager *dqm,
				struct queue *q)
{
447
	dqm->allocated_queues[q->pipe] |= (1 << q->queue);
448 449
}

450 451 452 453
/* Access to DQM has to be locked before calling destroy_queue_nocpsch_locked
 * to avoid asynchronized access
 */
static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
454 455 456 457
				struct qcm_process_device *qpd,
				struct queue *q)
{
	int retval;
458
	struct mqd_manager *mqd_mgr;
459

460 461
	mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
			q->properties.type)];
462

463
	if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
464
		deallocate_hqd(dqm, q);
465
	else if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
466
		deallocate_sdma_queue(dqm, q);
467
	else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
468
		deallocate_sdma_queue(dqm, q);
469
	else {
470
		pr_debug("q->properties.type %d is invalid\n",
471
				q->properties.type);
472
		return -EINVAL;
473
	}
474
	dqm->total_queue_count--;
475

476 477
	deallocate_doorbell(qpd, q);

478 479 480 481 482
	if (!dqm->sched_running) {
		WARN_ONCE(1, "Destroy non-HWS queue while stopped\n");
		return 0;
	}

483
	retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
484
				KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
485
				KFD_UNMAP_LATENCY_MS,
486
				q->pipe, q->queue);
487 488
	if (retval == -ETIME)
		qpd->reset_wavefronts = true;
489

490

491
	mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
492 493

	list_del(&q->list);
494 495 496 497 498 499 500 501 502 503 504 505
	if (list_empty(&qpd->queues_list)) {
		if (qpd->reset_wavefronts) {
			pr_warn("Resetting wave fronts (nocpsch) on dev %p\n",
					dqm->dev);
			/* dbgdev_wave_reset_wavefronts has to be called before
			 * deallocate_vmid(), i.e. when vmid is still in use.
			 */
			dbgdev_wave_reset_wavefronts(dqm->dev,
					qpd->pqm->process);
			qpd->reset_wavefronts = false;
		}

506
		deallocate_vmid(dqm, qpd, q);
507
	}
508
	qpd->queue_count--;
509
	if (q->properties.is_active) {
510
		decrement_queue_count(dqm, q->properties.type);
511 512 513 514 515
		if (q->properties.is_gws) {
			dqm->gws_queue_count--;
			qpd->mapped_gws_queue = false;
		}
	}
516

517 518
	return retval;
}
519

520 521 522 523 524
static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
				struct qcm_process_device *qpd,
				struct queue *q)
{
	int retval;
525 526 527 528 529 530
	uint64_t sdma_val = 0;
	struct kfd_process_device *pdd = qpd_to_pdd(qpd);

	/* Get the SDMA queue stats */
	if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
	    (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
531
		retval = read_sdma_queue_counter((uint64_t __user *)q->properties.read_ptr,
532 533 534 535 536
							&sdma_val);
		if (retval)
			pr_err("Failed to read SDMA queue counter for queue: %d\n",
				q->properties.queue_id);
	}
537

538
	dqm_lock(dqm);
539
	retval = destroy_queue_nocpsch_locked(dqm, qpd, q);
540 541
	if (!retval)
		pdd->sdma_past_activity_counter += sdma_val;
542
	dqm_unlock(dqm);
543

544 545 546 547 548
	return retval;
}

static int update_queue(struct device_queue_manager *dqm, struct queue *q)
{
549
	int retval = 0;
550
	struct mqd_manager *mqd_mgr;
551
	struct kfd_process_device *pdd;
552
	bool prev_active = false;
553

554
	dqm_lock(dqm);
555 556 557 558 559
	pdd = kfd_get_process_device_data(q->device, q->process);
	if (!pdd) {
		retval = -ENODEV;
		goto out_unlock;
	}
560 561
	mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
			q->properties.type)];
562

F
Felix Kuehling 已提交
563 564 565 566
	/* Save previous activity state for counters */
	prev_active = q->properties.is_active;

	/* Make sure the queue is unmapped before updating the MQD */
567
	if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) {
F
Felix Kuehling 已提交
568 569
		retval = unmap_queues_cpsch(dqm,
				KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
F
Felix Kuehling 已提交
570
		if (retval) {
F
Felix Kuehling 已提交
571 572 573
			pr_err("unmap queue failed\n");
			goto out_unlock;
		}
F
Felix Kuehling 已提交
574
	} else if (prev_active &&
F
Felix Kuehling 已提交
575
		   (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
576 577
		    q->properties.type == KFD_QUEUE_TYPE_SDMA ||
		    q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
578 579 580 581 582 583

		if (!dqm->sched_running) {
			WARN_ONCE(1, "Update non-HWS queue while stopped\n");
			goto out_unlock;
		}

584
		retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
F
Felix Kuehling 已提交
585 586 587 588 589 590 591 592
				KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
				KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
		if (retval) {
			pr_err("destroy mqd failed\n");
			goto out_unlock;
		}
	}

593
	mqd_mgr->update_mqd(mqd_mgr, q->mqd, &q->properties);
F
Felix Kuehling 已提交
594

595 596 597
	/*
	 * check active state vs. the previous state and modify
	 * counter accordingly. map_queues_cpsch uses the
598
	 * dqm->active_queue_count to determine whether a new runlist must be
599 600 601
	 * uploaded.
	 */
	if (q->properties.is_active && !prev_active)
602
		increment_queue_count(dqm, q->properties.type);
603
	else if (!q->properties.is_active && prev_active)
604
		decrement_queue_count(dqm, q->properties.type);
605

606 607 608 609 610 611 612 613 614 615 616 617 618 619
	if (q->gws && !q->properties.is_gws) {
		if (q->properties.is_active) {
			dqm->gws_queue_count++;
			pdd->qpd.mapped_gws_queue = true;
		}
		q->properties.is_gws = true;
	} else if (!q->gws && q->properties.is_gws) {
		if (q->properties.is_active) {
			dqm->gws_queue_count--;
			pdd->qpd.mapped_gws_queue = false;
		}
		q->properties.is_gws = false;
	}

620
	if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS)
F
Felix Kuehling 已提交
621
		retval = map_queues_cpsch(dqm);
F
Felix Kuehling 已提交
622
	else if (q->properties.is_active &&
F
Felix Kuehling 已提交
623
		 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
624 625
		  q->properties.type == KFD_QUEUE_TYPE_SDMA ||
		  q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
626 627 628 629 630 631 632 633
		if (WARN(q->process->mm != current->mm,
			 "should only run in user thread"))
			retval = -EFAULT;
		else
			retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd,
						   q->pipe, q->queue,
						   &q->properties, current->mm);
	}
634

K
Kent Russell 已提交
635
out_unlock:
636
	dqm_unlock(dqm);
637 638 639
	return retval;
}

640 641 642 643
static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
					struct qcm_process_device *qpd)
{
	struct queue *q;
644
	struct mqd_manager *mqd_mgr;
645
	struct kfd_process_device *pdd;
646
	int retval, ret = 0;
647

648
	dqm_lock(dqm);
649 650 651 652
	if (qpd->evicted++ > 0) /* already evicted, do nothing */
		goto out;

	pdd = qpd_to_pdd(qpd);
653
	pr_info_ratelimited("Evicting PASID 0x%x queues\n",
654 655
			    pdd->process->pasid);

656 657 658
	/* Mark all queues as evicted. Deactivate all active queues on
	 * the qpd.
	 */
659
	list_for_each_entry(q, &qpd->queues_list, list) {
660
		q->properties.is_evicted = true;
661 662
		if (!q->properties.is_active)
			continue;
663

664 665
		mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
				q->properties.type)];
666
		q->properties.is_active = false;
667
		decrement_queue_count(dqm, q->properties.type);
668 669 670 671
		if (q->properties.is_gws) {
			dqm->gws_queue_count--;
			qpd->mapped_gws_queue = false;
		}
672 673 674 675

		if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n"))
			continue;

676
		retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
677 678
				KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
				KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
679 680 681 682 683
		if (retval && !ret)
			/* Return the first error, but keep going to
			 * maintain a consistent eviction state
			 */
			ret = retval;
684 685 686
	}

out:
687
	dqm_unlock(dqm);
688
	return ret;
689 690 691 692 693 694 695 696 697
}

static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
				      struct qcm_process_device *qpd)
{
	struct queue *q;
	struct kfd_process_device *pdd;
	int retval = 0;

698
	dqm_lock(dqm);
699 700 701 702
	if (qpd->evicted++ > 0) /* already evicted, do nothing */
		goto out;

	pdd = qpd_to_pdd(qpd);
703
	pr_info_ratelimited("Evicting PASID 0x%x queues\n",
704 705
			    pdd->process->pasid);

706 707 708
	/* Mark all queues as evicted. Deactivate all active queues on
	 * the qpd.
	 */
709
	list_for_each_entry(q, &qpd->queues_list, list) {
710
		q->properties.is_evicted = true;
711 712
		if (!q->properties.is_active)
			continue;
713

714
		q->properties.is_active = false;
715
		decrement_queue_count(dqm, q->properties.type);
716 717 718 719 720 721 722
	}
	retval = execute_queues_cpsch(dqm,
				qpd->is_debug ?
				KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
				KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);

out:
723
	dqm_unlock(dqm);
724 725 726 727 728 729
	return retval;
}

static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
					  struct qcm_process_device *qpd)
{
730
	struct mm_struct *mm = NULL;
731
	struct queue *q;
732
	struct mqd_manager *mqd_mgr;
733
	struct kfd_process_device *pdd;
734
	uint64_t pd_base;
735
	int retval, ret = 0;
736 737 738

	pdd = qpd_to_pdd(qpd);
	/* Retrieve PD base */
A
Amber Lin 已提交
739
	pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
740

741
	dqm_lock(dqm);
742 743 744 745 746 747 748
	if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
		goto out;
	if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
		qpd->evicted--;
		goto out;
	}

749
	pr_info_ratelimited("Restoring PASID 0x%x queues\n",
750 751 752 753
			    pdd->process->pasid);

	/* Update PD Base in QPD */
	qpd->page_table_base = pd_base;
754
	pr_debug("Updated PD address to 0x%llx\n", pd_base);
755 756 757 758 759 760 761 762 763

	if (!list_empty(&qpd->queues_list)) {
		dqm->dev->kfd2kgd->set_vm_context_page_table_base(
				dqm->dev->kgd,
				qpd->vmid,
				qpd->page_table_base);
		kfd_flush_tlb(pdd);
	}

764 765 766 767 768
	/* Take a safe reference to the mm_struct, which may otherwise
	 * disappear even while the kfd_process is still referenced.
	 */
	mm = get_task_mm(pdd->process->lead_thread);
	if (!mm) {
769
		ret = -EFAULT;
770 771 772
		goto out;
	}

773 774 775
	/* Remove the eviction flags. Activate queues that are not
	 * inactive for other reasons.
	 */
776
	list_for_each_entry(q, &qpd->queues_list, list) {
777 778
		q->properties.is_evicted = false;
		if (!QUEUE_IS_ACTIVE(q->properties))
779
			continue;
780

781 782
		mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
				q->properties.type)];
783
		q->properties.is_active = true;
784
		increment_queue_count(dqm, q->properties.type);
785 786 787 788
		if (q->properties.is_gws) {
			dqm->gws_queue_count++;
			qpd->mapped_gws_queue = true;
		}
789 790 791 792

		if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n"))
			continue;

793
		retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
794
				       q->queue, &q->properties, mm);
795 796 797 798 799
		if (retval && !ret)
			/* Return the first error, but keep going to
			 * maintain a consistent eviction state
			 */
			ret = retval;
800 801 802
	}
	qpd->evicted = 0;
out:
803 804
	if (mm)
		mmput(mm);
805
	dqm_unlock(dqm);
806
	return ret;
807 808 809 810 811 812 813
}

static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
					struct qcm_process_device *qpd)
{
	struct queue *q;
	struct kfd_process_device *pdd;
814
	uint64_t pd_base;
815 816 817 818
	int retval = 0;

	pdd = qpd_to_pdd(qpd);
	/* Retrieve PD base */
A
Amber Lin 已提交
819
	pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
820

821
	dqm_lock(dqm);
822 823 824 825 826 827 828
	if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
		goto out;
	if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
		qpd->evicted--;
		goto out;
	}

829
	pr_info_ratelimited("Restoring PASID 0x%x queues\n",
830 831 832 833
			    pdd->process->pasid);

	/* Update PD Base in QPD */
	qpd->page_table_base = pd_base;
834
	pr_debug("Updated PD address to 0x%llx\n", pd_base);
835 836 837 838

	/* activate all active queues on the qpd */
	list_for_each_entry(q, &qpd->queues_list, list) {
		q->properties.is_evicted = false;
839 840 841
		if (!QUEUE_IS_ACTIVE(q->properties))
			continue;

842
		q->properties.is_active = true;
843
		increment_queue_count(dqm, q->properties.type);
844 845 846
	}
	retval = execute_queues_cpsch(dqm,
				KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
847
	qpd->evicted = 0;
848
out:
849
	dqm_unlock(dqm);
850 851 852
	return retval;
}

853
static int register_process(struct device_queue_manager *dqm,
854 855 856
					struct qcm_process_device *qpd)
{
	struct device_process_node *n;
857
	struct kfd_process_device *pdd;
858
	uint64_t pd_base;
859
	int retval;
860

861
	n = kzalloc(sizeof(*n), GFP_KERNEL);
862 863 864 865 866
	if (!n)
		return -ENOMEM;

	n->qpd = qpd;

867 868
	pdd = qpd_to_pdd(qpd);
	/* Retrieve PD base */
A
Amber Lin 已提交
869
	pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
870

871
	dqm_lock(dqm);
872 873
	list_add(&n->list, &dqm->queues);

874 875
	/* Update PD Base in QPD */
	qpd->page_table_base = pd_base;
876
	pr_debug("Updated PD address to 0x%llx\n", pd_base);
877

878
	retval = dqm->asic_ops.update_qpd(dqm, qpd);
879

880
	dqm->processes_count++;
881

882
	dqm_unlock(dqm);
883

884 885 886 887 888
	/* Outside the DQM lock because under the DQM lock we can't do
	 * reclaim or take other locks that others hold while reclaiming.
	 */
	kfd_inc_compute_active(dqm->dev);

889
	return retval;
890 891
}

892
static int unregister_process(struct device_queue_manager *dqm,
893 894 895 896 897
					struct qcm_process_device *qpd)
{
	int retval;
	struct device_process_node *cur, *next;

898 899
	pr_debug("qpd->queues_list is %s\n",
			list_empty(&qpd->queues_list) ? "empty" : "not empty");
900 901

	retval = 0;
902
	dqm_lock(dqm);
903 904 905 906

	list_for_each_entry_safe(cur, next, &dqm->queues, list) {
		if (qpd == cur->qpd) {
			list_del(&cur->list);
907
			kfree(cur);
908
			dqm->processes_count--;
909 910 911 912 913 914
			goto out;
		}
	}
	/* qpd not found in dqm list */
	retval = 1;
out:
915
	dqm_unlock(dqm);
916 917 918 919 920 921 922

	/* Outside the DQM lock because under the DQM lock we can't do
	 * reclaim or take other locks that others hold while reclaiming.
	 */
	if (!retval)
		kfd_dec_compute_active(dqm->dev);

923 924 925 926 927 928 929
	return retval;
}

static int
set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid,
			unsigned int vmid)
{
930
	return dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
931
						dqm->dev->kgd, pasid, vmid);
932 933
}

934 935 936 937
static void init_interrupts(struct device_queue_manager *dqm)
{
	unsigned int i;

938 939 940
	for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++)
		if (is_pipe_enabled(dqm, 0, i))
			dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd, i);
941 942
}

943 944
static int initialize_nocpsch(struct device_queue_manager *dqm)
{
945
	int pipe, queue;
946

947
	pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
948

K
Kent Russell 已提交
949 950 951 952 953
	dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm),
					sizeof(unsigned int), GFP_KERNEL);
	if (!dqm->allocated_queues)
		return -ENOMEM;

954
	mutex_init(&dqm->lock_hidden);
955
	INIT_LIST_HEAD(&dqm->queues);
956
	dqm->active_queue_count = dqm->next_pipe_to_allocate = 0;
957
	dqm->active_cp_queue_count = 0;
958
	dqm->gws_queue_count = 0;
959

960 961 962 963 964
	for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
		int pipe_offset = pipe * get_queues_per_pipe(dqm);

		for (queue = 0; queue < get_queues_per_pipe(dqm); queue++)
			if (test_bit(pipe_offset + queue,
965
				     dqm->dev->shared_resources.cp_queue_bitmap))
966 967
				dqm->allocated_queues[pipe] |= 1 << queue;
	}
968

969 970
	memset(dqm->vmid_pasid, 0, sizeof(dqm->vmid_pasid));

971 972
	dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
	dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
973 974 975 976

	return 0;
}

977
static void uninitialize(struct device_queue_manager *dqm)
978
{
979 980
	int i;

981
	WARN_ON(dqm->active_queue_count > 0 || dqm->processes_count > 0);
982 983

	kfree(dqm->allocated_queues);
984
	for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
985
		kfree(dqm->mqd_mgrs[i]);
986
	mutex_destroy(&dqm->lock_hidden);
987 988 989 990
}

static int start_nocpsch(struct device_queue_manager *dqm)
{
991
	pr_info("SW scheduler is used");
992
	init_interrupts(dqm);
993 994 995
	
	if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
		return pm_init(&dqm->packets, dqm);
996 997
	dqm->sched_running = true;

998
	return 0;
999 1000 1001 1002
}

static int stop_nocpsch(struct device_queue_manager *dqm)
{
1003
	if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
1004
		pm_uninit(&dqm->packets, false);
1005 1006
	dqm->sched_running = false;

1007 1008 1009
	return 0;
}

1010 1011 1012 1013 1014 1015 1016
static void pre_reset(struct device_queue_manager *dqm)
{
	dqm_lock(dqm);
	dqm->is_resetting = true;
	dqm_unlock(dqm);
}

1017
static int allocate_sdma_queue(struct device_queue_manager *dqm,
1018
				struct queue *q)
1019 1020 1021
{
	int bit;

1022
	if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
1023 1024
		if (dqm->sdma_bitmap == 0) {
			pr_err("No more SDMA queue to allocate\n");
1025
			return -ENOMEM;
1026 1027
		}

1028 1029 1030 1031 1032 1033 1034 1035
		bit = __ffs64(dqm->sdma_bitmap);
		dqm->sdma_bitmap &= ~(1ULL << bit);
		q->sdma_id = bit;
		q->properties.sdma_engine_id = q->sdma_id %
				get_num_sdma_engines(dqm);
		q->properties.sdma_queue_id = q->sdma_id /
				get_num_sdma_engines(dqm);
	} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1036 1037
		if (dqm->xgmi_sdma_bitmap == 0) {
			pr_err("No more XGMI SDMA queue to allocate\n");
1038
			return -ENOMEM;
1039
		}
1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053
		bit = __ffs64(dqm->xgmi_sdma_bitmap);
		dqm->xgmi_sdma_bitmap &= ~(1ULL << bit);
		q->sdma_id = bit;
		/* sdma_engine_id is sdma id including
		 * both PCIe-optimized SDMAs and XGMI-
		 * optimized SDMAs. The calculation below
		 * assumes the first N engines are always
		 * PCIe-optimized ones
		 */
		q->properties.sdma_engine_id = get_num_sdma_engines(dqm) +
				q->sdma_id % get_num_xgmi_sdma_engines(dqm);
		q->properties.sdma_queue_id = q->sdma_id /
				get_num_xgmi_sdma_engines(dqm);
	}
1054 1055 1056

	pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
	pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id);
1057 1058 1059 1060 1061

	return 0;
}

static void deallocate_sdma_queue(struct device_queue_manager *dqm,
1062
				struct queue *q)
1063
{
1064 1065 1066 1067 1068 1069 1070 1071 1072
	if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
		if (q->sdma_id >= get_num_sdma_queues(dqm))
			return;
		dqm->sdma_bitmap |= (1ULL << q->sdma_id);
	} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
		if (q->sdma_id >= get_num_xgmi_sdma_queues(dqm))
			return;
		dqm->xgmi_sdma_bitmap |= (1ULL << q->sdma_id);
	}
1073 1074
}

1075 1076 1077 1078 1079 1080
/*
 * Device Queue Manager implementation for cp scheduler
 */

static int set_sched_resources(struct device_queue_manager *dqm)
{
1081
	int i, mec;
1082 1083
	struct scheduling_resources res;

1084
	res.vmid_mask = dqm->dev->shared_resources.compute_vmid_bitmap;
1085 1086 1087 1088 1089 1090

	res.queue_mask = 0;
	for (i = 0; i < KGD_MAX_QUEUES; ++i) {
		mec = (i / dqm->dev->shared_resources.num_queue_per_pipe)
			/ dqm->dev->shared_resources.num_pipe_per_mec;

1091
		if (!test_bit(i, dqm->dev->shared_resources.cp_queue_bitmap))
1092 1093 1094 1095 1096 1097 1098 1099
			continue;

		/* only acquire queues from the first MEC */
		if (mec > 0)
			continue;

		/* This situation may be hit in the future if a new HW
		 * generation exposes more than 64 queues. If so, the
1100 1101
		 * definition of res.queue_mask needs updating
		 */
1102
		if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) {
1103 1104 1105 1106
			pr_err("Invalid queue enabled by amdgpu: %d\n", i);
			break;
		}

1107 1108 1109
		res.queue_mask |= 1ull
			<< amdgpu_queue_mask_bit_to_set_resource_bit(
				(struct amdgpu_device *)dqm->dev->kgd, i);
1110
	}
O
Oak Zeng 已提交
1111 1112
	res.gws_mask = ~0ull;
	res.oac_mask = res.gds_heap_base = res.gds_heap_size = 0;
1113

1114 1115 1116
	pr_debug("Scheduling resources:\n"
			"vmid mask: 0x%8X\n"
			"queue mask: 0x%8llX\n",
1117 1118 1119 1120 1121 1122 1123
			res.vmid_mask, res.queue_mask);

	return pm_send_set_resources(&dqm->packets, &res);
}

static int initialize_cpsch(struct device_queue_manager *dqm)
{
1124
	pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
1125

1126
	mutex_init(&dqm->lock_hidden);
1127
	INIT_LIST_HEAD(&dqm->queues);
1128
	dqm->active_queue_count = dqm->processes_count = 0;
1129
	dqm->active_cp_queue_count = 0;
1130
	dqm->gws_queue_count = 0;
1131
	dqm->active_runlist = false;
1132 1133
	dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
	dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
1134

1135 1136
	INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);

1137
	return 0;
1138 1139 1140 1141 1142 1143 1144 1145 1146
}

static int start_cpsch(struct device_queue_manager *dqm)
{
	int retval;

	retval = 0;

	retval = pm_init(&dqm->packets, dqm);
1147
	if (retval)
1148 1149 1150
		goto fail_packet_manager_init;

	retval = set_sched_resources(dqm);
1151
	if (retval)
1152 1153
		goto fail_set_sched_resources;

1154
	pr_debug("Allocating fence memory\n");
1155 1156

	/* allocate fence memory on the gart */
1157 1158
	retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
					&dqm->fence_mem);
1159

1160
	if (retval)
1161 1162 1163 1164
		goto fail_allocate_vidmem;

	dqm->fence_addr = dqm->fence_mem->cpu_ptr;
	dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
1165 1166 1167

	init_interrupts(dqm);

1168
	dqm_lock(dqm);
1169 1170
	/* clear hang status when driver try to start the hw scheduler */
	dqm->is_hws_hang = false;
1171
	dqm->is_resetting = false;
1172
	dqm->sched_running = true;
1173
	execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1174
	dqm_unlock(dqm);
1175 1176 1177 1178

	return 0;
fail_allocate_vidmem:
fail_set_sched_resources:
1179
	pm_uninit(&dqm->packets, false);
1180 1181 1182 1183 1184 1185
fail_packet_manager_init:
	return retval;
}

static int stop_cpsch(struct device_queue_manager *dqm)
{
1186 1187
	bool hanging;

1188
	dqm_lock(dqm);
1189 1190 1191
	if (!dqm->is_hws_hang)
		unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
	hanging = dqm->is_hws_hang || dqm->is_resetting;
1192
	dqm->sched_running = false;
1193
	dqm_unlock(dqm);
1194

D
Dennis Li 已提交
1195 1196
	pm_release_ib(&dqm->packets);

1197
	kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
1198
	pm_uninit(&dqm->packets, hanging);
1199 1200 1201 1202 1203 1204 1205 1206

	return 0;
}

static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
					struct kernel_queue *kq,
					struct qcm_process_device *qpd)
{
1207
	dqm_lock(dqm);
1208
	if (dqm->total_queue_count >= max_num_of_queues_per_device) {
1209
		pr_warn("Can't create new kernel queue because %d queues were already created\n",
1210
				dqm->total_queue_count);
1211
		dqm_unlock(dqm);
1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222
		return -EPERM;
	}

	/*
	 * Unconditionally increment this counter, regardless of the queue's
	 * type or whether the queue is active.
	 */
	dqm->total_queue_count++;
	pr_debug("Total of %d queues are accountable so far\n",
			dqm->total_queue_count);

1223
	list_add(&kq->list, &qpd->priv_queue_list);
1224
	increment_queue_count(dqm, kq->queue->properties.type);
1225
	qpd->is_debug = true;
1226
	execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1227
	dqm_unlock(dqm);
1228 1229 1230 1231 1232 1233 1234 1235

	return 0;
}

static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
					struct kernel_queue *kq,
					struct qcm_process_device *qpd)
{
1236
	dqm_lock(dqm);
1237
	list_del(&kq->list);
1238
	decrement_queue_count(dqm, kq->queue->properties.type);
1239
	qpd->is_debug = false;
1240
	execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
1241 1242 1243 1244
	/*
	 * Unconditionally decrement this counter, regardless of the queue's
	 * type.
	 */
1245
	dqm->total_queue_count--;
1246 1247
	pr_debug("Total of %d queues are accountable so far\n",
			dqm->total_queue_count);
1248
	dqm_unlock(dqm);
1249 1250 1251
}

static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
1252
			struct qcm_process_device *qpd)
1253 1254
{
	int retval;
1255
	struct mqd_manager *mqd_mgr;
1256

1257
	if (dqm->total_queue_count >= max_num_of_queues_per_device) {
1258
		pr_warn("Can't create new usermode queue because %d queues were already created\n",
1259
				dqm->total_queue_count);
1260 1261
		retval = -EPERM;
		goto out;
1262 1263
	}

1264 1265
	if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
		q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1266
		dqm_lock(dqm);
1267
		retval = allocate_sdma_queue(dqm, q);
1268
		dqm_unlock(dqm);
F
Felix Kuehling 已提交
1269
		if (retval)
1270
			goto out;
1271
	}
1272 1273 1274 1275 1276

	retval = allocate_doorbell(qpd, q);
	if (retval)
		goto out_deallocate_sdma_queue;

1277 1278
	mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
			q->properties.type)];
E
Eric Huang 已提交
1279

1280 1281 1282
	if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
		q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
		dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
F
Felix Kuehling 已提交
1283 1284
	q->properties.tba_addr = qpd->tba_addr;
	q->properties.tma_addr = qpd->tma_addr;
1285 1286 1287 1288 1289
	q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties);
	if (!q->mqd_mem_obj) {
		retval = -ENOMEM;
		goto out_deallocate_doorbell;
	}
E
Eric Huang 已提交
1290 1291 1292 1293 1294 1295 1296 1297

	dqm_lock(dqm);
	/*
	 * Eviction state logic: mark all queues as evicted, even ones
	 * not currently active. Restoring inactive queues later only
	 * updates the is_evicted flag but is a no-op otherwise.
	 */
	q->properties.is_evicted = !!qpd->evicted;
1298 1299
	mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
				&q->gart_mqd_addr, &q->properties);
1300

1301
	list_add(&q->list, &qpd->queues_list);
1302
	qpd->queue_count++;
1303

1304
	if (q->properties.is_active) {
1305 1306
		increment_queue_count(dqm, q->properties.type);

1307
		execute_queues_cpsch(dqm,
1308
				KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1309 1310
	}

1311 1312 1313 1314 1315 1316 1317 1318 1319
	/*
	 * Unconditionally increment this counter, regardless of the queue's
	 * type or whether the queue is active.
	 */
	dqm->total_queue_count++;

	pr_debug("Total of %d queues are accountable so far\n",
			dqm->total_queue_count);

1320
	dqm_unlock(dqm);
1321 1322
	return retval;

1323 1324
out_deallocate_doorbell:
	deallocate_doorbell(qpd, q);
1325
out_deallocate_sdma_queue:
1326
	if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1327 1328
		q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
		dqm_lock(dqm);
1329
		deallocate_sdma_queue(dqm, q);
1330 1331
		dqm_unlock(dqm);
	}
1332
out:
1333 1334 1335
	return retval;
}

1336
int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
1337
				unsigned int fence_value,
1338
				unsigned int timeout_ms)
1339
{
1340
	unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
1341 1342

	while (*fence_addr != fence_value) {
1343
		if (time_after(jiffies, end_jiffies)) {
1344
			pr_err("qcm fence wait loop timeout expired\n");
1345 1346 1347 1348 1349 1350 1351
			/* In HWS case, this is used to halt the driver thread
			 * in order not to mess up CP states before doing
			 * scandumps for FW debugging.
			 */
			while (halt_if_hws_hang)
				schedule();

1352 1353
			return -ETIME;
		}
1354
		schedule();
1355 1356 1357 1358 1359
	}

	return 0;
}

F
Felix Kuehling 已提交
1360 1361 1362 1363 1364
/* dqm->lock mutex has to be locked before calling this function */
static int map_queues_cpsch(struct device_queue_manager *dqm)
{
	int retval;

1365 1366
	if (!dqm->sched_running)
		return 0;
1367
	if (dqm->active_queue_count <= 0 || dqm->processes_count <= 0)
F
Felix Kuehling 已提交
1368 1369 1370 1371 1372
		return 0;
	if (dqm->active_runlist)
		return 0;

	retval = pm_send_runlist(&dqm->packets, &dqm->queues);
1373
	pr_debug("%s sent runlist\n", __func__);
F
Felix Kuehling 已提交
1374 1375 1376 1377 1378 1379 1380 1381 1382
	if (retval) {
		pr_err("failed to execute runlist\n");
		return retval;
	}
	dqm->active_runlist = true;

	return retval;
}

1383
/* dqm->lock mutex has to be locked before calling this function */
1384
static int unmap_queues_cpsch(struct device_queue_manager *dqm,
1385 1386
				enum kfd_unmap_queues_filter filter,
				uint32_t filter_param)
1387
{
1388
	int retval = 0;
1389

1390 1391
	if (!dqm->sched_running)
		return 0;
1392 1393
	if (dqm->is_hws_hang)
		return -EIO;
1394
	if (!dqm->active_runlist)
1395
		return retval;
1396

1397
	retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
1398
			filter, filter_param, false, 0);
1399
	if (retval)
1400
		return retval;
1401 1402 1403 1404 1405

	*dqm->fence_addr = KFD_FENCE_INIT;
	pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr,
				KFD_FENCE_COMPLETED);
	/* should be timed out */
1406
	retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
1407
				queue_preemption_timeout_ms);
1408 1409 1410 1411 1412 1413 1414 1415 1416
	if (retval) {
		pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
		dqm->is_hws_hang = true;
		/* It's possible we're detecting a HWS hang in the
		 * middle of a GPU reset. No need to schedule another
		 * reset in this case.
		 */
		if (!dqm->is_resetting)
			schedule_work(&dqm->hw_exception_work);
1417
		return retval;
1418
	}
1419

1420 1421 1422 1423 1424 1425
	pm_release_ib(&dqm->packets);
	dqm->active_runlist = false;

	return retval;
}

1426
/* dqm->lock mutex has to be locked before calling this function */
1427 1428 1429
static int execute_queues_cpsch(struct device_queue_manager *dqm,
				enum kfd_unmap_queues_filter filter,
				uint32_t filter_param)
1430 1431 1432
{
	int retval;

1433 1434
	if (dqm->is_hws_hang)
		return -EIO;
1435
	retval = unmap_queues_cpsch(dqm, filter, filter_param);
1436
	if (retval)
1437
		return retval;
1438

F
Felix Kuehling 已提交
1439
	return map_queues_cpsch(dqm);
1440 1441 1442 1443 1444 1445 1446
}

static int destroy_queue_cpsch(struct device_queue_manager *dqm,
				struct qcm_process_device *qpd,
				struct queue *q)
{
	int retval;
1447
	struct mqd_manager *mqd_mgr;
1448 1449 1450 1451 1452 1453
	uint64_t sdma_val = 0;
	struct kfd_process_device *pdd = qpd_to_pdd(qpd);

	/* Get the SDMA queue stats */
	if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
	    (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
1454
		retval = read_sdma_queue_counter((uint64_t __user *)q->properties.read_ptr,
1455 1456 1457 1458 1459
							&sdma_val);
		if (retval)
			pr_err("Failed to read SDMA queue counter for queue: %d\n",
				q->properties.queue_id);
	}
1460

1461 1462 1463
	retval = 0;

	/* remove queue from list to prevent rescheduling after preemption */
1464
	dqm_lock(dqm);
1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475

	if (qpd->is_debug) {
		/*
		 * error, currently we do not allow to destroy a queue
		 * of a currently debugged process
		 */
		retval = -EBUSY;
		goto failed_try_destroy_debugged_queue;

	}

1476 1477
	mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
			q->properties.type)];
1478

1479 1480
	deallocate_doorbell(qpd, q);

1481 1482
	if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
	    (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
1483
		deallocate_sdma_queue(dqm, q);
1484 1485
		pdd->sdma_past_activity_counter += sdma_val;
	}
1486

1487
	list_del(&q->list);
1488
	qpd->queue_count--;
1489
	if (q->properties.is_active) {
1490
		decrement_queue_count(dqm, q->properties.type);
1491
		retval = execute_queues_cpsch(dqm,
1492
				KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1493 1494
		if (retval == -ETIME)
			qpd->reset_wavefronts = true;
1495 1496 1497 1498
		if (q->properties.is_gws) {
			dqm->gws_queue_count--;
			qpd->mapped_gws_queue = false;
		}
1499
	}
1500

1501 1502 1503 1504 1505 1506 1507
	/*
	 * Unconditionally decrement this counter, regardless of the queue's
	 * type
	 */
	dqm->total_queue_count--;
	pr_debug("Total of %d queues are accountable so far\n",
			dqm->total_queue_count);
1508

1509
	dqm_unlock(dqm);
1510

1511 1512
	/* Do free_mqd after dqm_unlock(dqm) to avoid circular locking */
	mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
1513

1514
	return retval;
1515

1516 1517
failed_try_destroy_debugged_queue:

1518
	dqm_unlock(dqm);
1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536
	return retval;
}

/*
 * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
 * stay in user mode.
 */
#define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
/* APE1 limit is inclusive and 64K aligned. */
#define APE1_LIMIT_ALIGNMENT 0xFFFF

static bool set_cache_memory_policy(struct device_queue_manager *dqm,
				   struct qcm_process_device *qpd,
				   enum cache_policy default_policy,
				   enum cache_policy alternate_policy,
				   void __user *alternate_aperture_base,
				   uint64_t alternate_aperture_size)
{
1537 1538 1539 1540
	bool retval = true;

	if (!dqm->asic_ops.set_cache_memory_policy)
		return retval;
1541

1542
	dqm_lock(dqm);
1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561

	if (alternate_aperture_size == 0) {
		/* base > limit disables APE1 */
		qpd->sh_mem_ape1_base = 1;
		qpd->sh_mem_ape1_limit = 0;
	} else {
		/*
		 * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
		 *			SH_MEM_APE1_BASE[31:0], 0x0000 }
		 * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
		 *			SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
		 * Verify that the base and size parameters can be
		 * represented in this format and convert them.
		 * Additionally restrict APE1 to user-mode addresses.
		 */

		uint64_t base = (uintptr_t)alternate_aperture_base;
		uint64_t limit = base + alternate_aperture_size - 1;

K
Kent Russell 已提交
1562 1563 1564
		if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
		   (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
			retval = false;
1565
			goto out;
K
Kent Russell 已提交
1566
		}
1567 1568 1569 1570 1571

		qpd->sh_mem_ape1_base = base >> 16;
		qpd->sh_mem_ape1_limit = limit >> 16;
	}

1572
	retval = dqm->asic_ops.set_cache_memory_policy(
1573 1574 1575 1576 1577 1578
			dqm,
			qpd,
			default_policy,
			alternate_policy,
			alternate_aperture_base,
			alternate_aperture_size);
1579

1580
	if ((dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
1581 1582
		program_sh_mem_settings(dqm, qpd);

1583
	pr_debug("sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
1584 1585 1586 1587
		qpd->sh_mem_config, qpd->sh_mem_ape1_base,
		qpd->sh_mem_ape1_limit);

out:
1588
	dqm_unlock(dqm);
K
Kent Russell 已提交
1589
	return retval;
1590 1591
}

1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611
static int set_trap_handler(struct device_queue_manager *dqm,
				struct qcm_process_device *qpd,
				uint64_t tba_addr,
				uint64_t tma_addr)
{
	uint64_t *tma;

	if (dqm->dev->cwsr_enabled) {
		/* Jump from CWSR trap handler to user trap */
		tma = (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
		tma[0] = tba_addr;
		tma[1] = tma_addr;
	} else {
		qpd->tba_addr = tba_addr;
		qpd->tma_addr = tma_addr;
	}

	return 0;
}

1612 1613 1614 1615 1616 1617
static int process_termination_nocpsch(struct device_queue_manager *dqm,
		struct qcm_process_device *qpd)
{
	struct queue *q, *next;
	struct device_process_node *cur, *next_dpn;
	int retval = 0;
1618
	bool found = false;
1619

1620
	dqm_lock(dqm);
1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636

	/* Clear all user mode queues */
	list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
		int ret;

		ret = destroy_queue_nocpsch_locked(dqm, qpd, q);
		if (ret)
			retval = ret;
	}

	/* Unregister process */
	list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
		if (qpd == cur->qpd) {
			list_del(&cur->list);
			kfree(cur);
			dqm->processes_count--;
1637
			found = true;
1638 1639 1640 1641
			break;
		}
	}

1642
	dqm_unlock(dqm);
1643 1644 1645 1646 1647 1648 1649

	/* Outside the DQM lock because under the DQM lock we can't do
	 * reclaim or take other locks that others hold while reclaiming.
	 */
	if (found)
		kfd_dec_compute_active(dqm->dev);

1650 1651 1652
	return retval;
}

1653 1654 1655 1656 1657 1658
static int get_wave_state(struct device_queue_manager *dqm,
			  struct queue *q,
			  void __user *ctl_stack,
			  u32 *ctl_stack_used_size,
			  u32 *save_area_used_size)
{
1659
	struct mqd_manager *mqd_mgr;
1660 1661 1662 1663 1664 1665 1666 1667 1668 1669
	int r;

	dqm_lock(dqm);

	if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE ||
	    q->properties.is_active || !q->device->cwsr_enabled) {
		r = -EINVAL;
		goto dqm_unlock;
	}

1670
	mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP];
1671

1672
	if (!mqd_mgr->get_wave_state) {
1673 1674 1675 1676
		r = -EINVAL;
		goto dqm_unlock;
	}

1677 1678
	r = mqd_mgr->get_wave_state(mqd_mgr, q->mqd, ctl_stack,
			ctl_stack_used_size, save_area_used_size);
1679 1680 1681 1682 1683

dqm_unlock:
	dqm_unlock(dqm);
	return r;
}
1684 1685 1686 1687 1688 1689 1690

static int process_termination_cpsch(struct device_queue_manager *dqm,
		struct qcm_process_device *qpd)
{
	int retval;
	struct queue *q, *next;
	struct kernel_queue *kq, *kq_next;
1691
	struct mqd_manager *mqd_mgr;
1692 1693 1694
	struct device_process_node *cur, *next_dpn;
	enum kfd_unmap_queues_filter filter =
		KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES;
1695
	bool found = false;
1696 1697 1698

	retval = 0;

1699
	dqm_lock(dqm);
1700 1701 1702 1703

	/* Clean all kernel queues */
	list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
		list_del(&kq->list);
1704
		decrement_queue_count(dqm, kq->queue->properties.type);
1705 1706 1707 1708 1709 1710 1711
		qpd->is_debug = false;
		dqm->total_queue_count--;
		filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
	}

	/* Clear all user mode queues */
	list_for_each_entry(q, &qpd->queues_list, list) {
1712
		if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
1713
			deallocate_sdma_queue(dqm, q);
1714
		else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
1715
			deallocate_sdma_queue(dqm, q);
1716

1717
		if (q->properties.is_active) {
1718
			decrement_queue_count(dqm, q->properties.type);
1719 1720 1721 1722 1723
			if (q->properties.is_gws) {
				dqm->gws_queue_count--;
				qpd->mapped_gws_queue = false;
			}
		}
1724 1725 1726 1727 1728 1729 1730 1731 1732 1733

		dqm->total_queue_count--;
	}

	/* Unregister process */
	list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
		if (qpd == cur->qpd) {
			list_del(&cur->list);
			kfree(cur);
			dqm->processes_count--;
1734
			found = true;
1735 1736 1737 1738 1739
			break;
		}
	}

	retval = execute_queues_cpsch(dqm, filter, 0);
1740
	if ((!dqm->is_hws_hang) && (retval || qpd->reset_wavefronts)) {
1741 1742 1743 1744 1745
		pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev);
		dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process);
		qpd->reset_wavefronts = false;
	}

1746 1747
	dqm_unlock(dqm);

1748 1749 1750 1751 1752 1753
	/* Outside the DQM lock because under the DQM lock we can't do
	 * reclaim or take other locks that others hold while reclaiming.
	 */
	if (found)
		kfd_dec_compute_active(dqm->dev);

1754
	/* Lastly, free mqd resources.
1755
	 * Do free_mqd() after dqm_unlock to avoid circular locking.
1756
	 */
1757
	list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
1758 1759
		mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
				q->properties.type)];
1760
		list_del(&q->list);
1761
		qpd->queue_count--;
1762
		mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
1763 1764 1765 1766 1767
	}

	return retval;
}

1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791
static int init_mqd_managers(struct device_queue_manager *dqm)
{
	int i, j;
	struct mqd_manager *mqd_mgr;

	for (i = 0; i < KFD_MQD_TYPE_MAX; i++) {
		mqd_mgr = dqm->asic_ops.mqd_manager_init(i, dqm->dev);
		if (!mqd_mgr) {
			pr_err("mqd manager [%d] initialization failed\n", i);
			goto out_free;
		}
		dqm->mqd_mgrs[i] = mqd_mgr;
	}

	return 0;

out_free:
	for (j = 0; j < i; j++) {
		kfree(dqm->mqd_mgrs[j]);
		dqm->mqd_mgrs[j] = NULL;
	}

	return -ENOMEM;
}
1792 1793 1794 1795 1796 1797 1798 1799

/* Allocate one hiq mqd (HWS) and all SDMA mqd in a continuous trunk*/
static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
{
	int retval;
	struct kfd_dev *dev = dqm->dev;
	struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd;
	uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size *
1800
		get_num_all_sdma_engines(dqm) *
1801 1802 1803 1804 1805
		dev->device_info->num_sdma_queues_per_engine +
		dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size;

	retval = amdgpu_amdkfd_alloc_gtt_mem(dev->kgd, size,
		&(mem_obj->gtt_mem), &(mem_obj->gpu_addr),
1806
		(void *)&(mem_obj->cpu_ptr), false);
1807 1808 1809 1810

	return retval;
}

1811 1812 1813 1814
struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
{
	struct device_queue_manager *dqm;

1815
	pr_debug("Loading device queue manager\n");
1816

1817
	dqm = kzalloc(sizeof(*dqm), GFP_KERNEL);
1818 1819 1820
	if (!dqm)
		return NULL;

1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836
	switch (dev->device_info->asic_family) {
	/* HWS is not available on Hawaii. */
	case CHIP_HAWAII:
	/* HWS depends on CWSR for timely dequeue. CWSR is not
	 * available on Tonga.
	 *
	 * FIXME: This argument also applies to Kaveri.
	 */
	case CHIP_TONGA:
		dqm->sched_policy = KFD_SCHED_POLICY_NO_HWS;
		break;
	default:
		dqm->sched_policy = sched_policy;
		break;
	}

1837
	dqm->dev = dev;
1838
	switch (dqm->sched_policy) {
1839 1840 1841
	case KFD_SCHED_POLICY_HWS:
	case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
		/* initialize dqm for cp scheduling */
1842 1843 1844 1845
		dqm->ops.create_queue = create_queue_cpsch;
		dqm->ops.initialize = initialize_cpsch;
		dqm->ops.start = start_cpsch;
		dqm->ops.stop = stop_cpsch;
1846
		dqm->ops.pre_reset = pre_reset;
1847 1848
		dqm->ops.destroy_queue = destroy_queue_cpsch;
		dqm->ops.update_queue = update_queue;
1849 1850 1851
		dqm->ops.register_process = register_process;
		dqm->ops.unregister_process = unregister_process;
		dqm->ops.uninitialize = uninitialize;
1852 1853 1854
		dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
		dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
		dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
1855
		dqm->ops.set_trap_handler = set_trap_handler;
1856
		dqm->ops.process_termination = process_termination_cpsch;
1857 1858
		dqm->ops.evict_process_queues = evict_process_queues_cpsch;
		dqm->ops.restore_process_queues = restore_process_queues_cpsch;
1859
		dqm->ops.get_wave_state = get_wave_state;
1860 1861 1862
		break;
	case KFD_SCHED_POLICY_NO_HWS:
		/* initialize dqm for no cp scheduling */
1863 1864
		dqm->ops.start = start_nocpsch;
		dqm->ops.stop = stop_nocpsch;
1865
		dqm->ops.pre_reset = pre_reset;
1866 1867 1868
		dqm->ops.create_queue = create_queue_nocpsch;
		dqm->ops.destroy_queue = destroy_queue_nocpsch;
		dqm->ops.update_queue = update_queue;
1869 1870
		dqm->ops.register_process = register_process;
		dqm->ops.unregister_process = unregister_process;
1871
		dqm->ops.initialize = initialize_nocpsch;
1872
		dqm->ops.uninitialize = uninitialize;
1873
		dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
1874
		dqm->ops.set_trap_handler = set_trap_handler;
1875
		dqm->ops.process_termination = process_termination_nocpsch;
1876 1877 1878
		dqm->ops.evict_process_queues = evict_process_queues_nocpsch;
		dqm->ops.restore_process_queues =
			restore_process_queues_nocpsch;
1879
		dqm->ops.get_wave_state = get_wave_state;
1880 1881
		break;
	default:
1882
		pr_err("Invalid scheduling policy %d\n", dqm->sched_policy);
1883
		goto out_free;
1884 1885
	}

1886 1887
	switch (dev->device_info->asic_family) {
	case CHIP_CARRIZO:
1888
		device_queue_manager_init_vi(&dqm->asic_ops);
1889 1890
		break;

1891
	case CHIP_KAVERI:
1892
		device_queue_manager_init_cik(&dqm->asic_ops);
1893
		break;
1894 1895 1896 1897 1898 1899 1900 1901 1902

	case CHIP_HAWAII:
		device_queue_manager_init_cik_hawaii(&dqm->asic_ops);
		break;

	case CHIP_TONGA:
	case CHIP_FIJI:
	case CHIP_POLARIS10:
	case CHIP_POLARIS11:
1903
	case CHIP_POLARIS12:
K
Kent Russell 已提交
1904
	case CHIP_VEGAM:
1905 1906
		device_queue_manager_init_vi_tonga(&dqm->asic_ops);
		break;
1907 1908

	case CHIP_VEGA10:
1909
	case CHIP_VEGA12:
1910
	case CHIP_VEGA20:
1911
	case CHIP_RAVEN:
1912
	case CHIP_RENOIR:
Y
Yong Zhao 已提交
1913
	case CHIP_ARCTURUS:
1914 1915
		device_queue_manager_init_v9(&dqm->asic_ops);
		break;
1916
	case CHIP_NAVI10:
1917
	case CHIP_NAVI12:
Y
Yong Zhao 已提交
1918
	case CHIP_NAVI14:
1919
	case CHIP_SIENNA_CICHLID:
1920
	case CHIP_NAVY_FLOUNDER:
1921 1922
		device_queue_manager_init_v10_navi10(&dqm->asic_ops);
		break;
1923 1924 1925 1926
	default:
		WARN(1, "Unexpected ASIC family %u",
		     dev->device_info->asic_family);
		goto out_free;
1927 1928
	}

1929 1930 1931
	if (init_mqd_managers(dqm))
		goto out_free;

1932 1933 1934 1935 1936
	if (allocate_hiq_sdma_mqd(dqm)) {
		pr_err("Failed to allocate hiq sdma mqd trunk buffer\n");
		goto out_free;
	}

1937 1938
	if (!dqm->ops.initialize(dqm))
		return dqm;
1939

1940 1941 1942
out_free:
	kfree(dqm);
	return NULL;
1943 1944
}

1945 1946
static void deallocate_hiq_sdma_mqd(struct kfd_dev *dev,
				    struct kfd_mem_obj *mqd)
1947 1948 1949 1950 1951 1952
{
	WARN(!mqd, "No hiq sdma mqd trunk to free");

	amdgpu_amdkfd_free_gtt_mem(dev->kgd, mqd->gtt_mem);
}

1953 1954
void device_queue_manager_uninit(struct device_queue_manager *dqm)
{
1955
	dqm->ops.uninitialize(dqm);
1956
	deallocate_hiq_sdma_mqd(dqm->dev, &dqm->hiq_sdma_mqd);
1957 1958
	kfree(dqm);
}
1959

S
shaoyunl 已提交
1960 1961 1962 1963 1964 1965 1966 1967 1968
int kfd_process_vm_fault(struct device_queue_manager *dqm,
			 unsigned int pasid)
{
	struct kfd_process_device *pdd;
	struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
	int ret = 0;

	if (!p)
		return -EINVAL;
1969
	WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid);
S
shaoyunl 已提交
1970 1971 1972 1973 1974 1975 1976 1977
	pdd = kfd_get_process_device_data(dqm->dev, p);
	if (pdd)
		ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd);
	kfd_unref_process(p);

	return ret;
}

1978 1979 1980 1981
static void kfd_process_hw_exception(struct work_struct *work)
{
	struct device_queue_manager *dqm = container_of(work,
			struct device_queue_manager, hw_exception_work);
A
Amber Lin 已提交
1982
	amdgpu_amdkfd_gpu_reset(dqm->dev->kgd);
1983 1984
}

1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014
#if defined(CONFIG_DEBUG_FS)

static void seq_reg_dump(struct seq_file *m,
			 uint32_t (*dump)[2], uint32_t n_regs)
{
	uint32_t i, count;

	for (i = 0, count = 0; i < n_regs; i++) {
		if (count == 0 ||
		    dump[i-1][0] + sizeof(uint32_t) != dump[i][0]) {
			seq_printf(m, "%s    %08x: %08x",
				   i ? "\n" : "",
				   dump[i][0], dump[i][1]);
			count = 7;
		} else {
			seq_printf(m, " %08x", dump[i][1]);
			count--;
		}
	}

	seq_puts(m, "\n");
}

int dqm_debugfs_hqds(struct seq_file *m, void *data)
{
	struct device_queue_manager *dqm = data;
	uint32_t (*dump)[2], n_regs;
	int pipe, queue;
	int r = 0;

2015 2016 2017 2018 2019 2020
	if (!dqm->sched_running) {
		seq_printf(m, " Device is stopped\n");

		return 0;
	}

O
Oak Zeng 已提交
2021
	r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->kgd,
2022 2023
					KFD_CIK_HIQ_PIPE, KFD_CIK_HIQ_QUEUE,
					&dump, &n_regs);
O
Oak Zeng 已提交
2024 2025
	if (!r) {
		seq_printf(m, "  HIQ on MEC %d Pipe %d Queue %d\n",
2026 2027 2028
			   KFD_CIK_HIQ_PIPE/get_pipes_per_mec(dqm)+1,
			   KFD_CIK_HIQ_PIPE%get_pipes_per_mec(dqm),
			   KFD_CIK_HIQ_QUEUE);
O
Oak Zeng 已提交
2029 2030 2031 2032 2033
		seq_reg_dump(m, dump, n_regs);

		kfree(dump);
	}

2034 2035 2036 2037 2038
	for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
		int pipe_offset = pipe * get_queues_per_pipe(dqm);

		for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) {
			if (!test_bit(pipe_offset + queue,
2039
				      dqm->dev->shared_resources.cp_queue_bitmap))
2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054
				continue;

			r = dqm->dev->kfd2kgd->hqd_dump(
				dqm->dev->kgd, pipe, queue, &dump, &n_regs);
			if (r)
				break;

			seq_printf(m, "  CP Pipe %d, Queue %d\n",
				  pipe, queue);
			seq_reg_dump(m, dump, n_regs);

			kfree(dump);
		}
	}

2055
	for (pipe = 0; pipe < get_num_all_sdma_engines(dqm); pipe++) {
2056 2057 2058
		for (queue = 0;
		     queue < dqm->dev->device_info->num_sdma_queues_per_engine;
		     queue++) {
2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074
			r = dqm->dev->kfd2kgd->hqd_sdma_dump(
				dqm->dev->kgd, pipe, queue, &dump, &n_regs);
			if (r)
				break;

			seq_printf(m, "  SDMA Engine %d, RLC %d\n",
				  pipe, queue);
			seq_reg_dump(m, dump, n_regs);

			kfree(dump);
		}
	}

	return r;
}

2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086
int dqm_debugfs_execute_queues(struct device_queue_manager *dqm)
{
	int r = 0;

	dqm_lock(dqm);
	dqm->active_runlist = true;
	r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
	dqm_unlock(dqm);

	return r;
}

2087
#endif