kfd_device_queue_manager.c 31.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright 2014 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 */

#include <linux/slab.h>
#include <linux/list.h>
#include <linux/types.h>
#include <linux/printk.h>
#include <linux/bitops.h>
29
#include <linux/sched.h>
30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45
#include "kfd_priv.h"
#include "kfd_device_queue_manager.h"
#include "kfd_mqd_manager.h"
#include "cik_regs.h"
#include "kfd_kernel_queue.h"

/* Size of the per-pipe EOP queue */
#define CIK_HPD_EOP_BYTES_LOG2 11
#define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)

static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
					unsigned int pasid, unsigned int vmid);

static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
					struct queue *q,
					struct qcm_process_device *qpd);
46

47 48 49
static int execute_queues_cpsch(struct device_queue_manager *dqm,
				enum kfd_unmap_queues_filter filter,
				uint32_t filter_param);
50
static int unmap_queues_cpsch(struct device_queue_manager *dqm,
51 52
				enum kfd_unmap_queues_filter filter,
				uint32_t filter_param);
53

F
Felix Kuehling 已提交
54 55
static int map_queues_cpsch(struct device_queue_manager *dqm);

56 57 58 59 60 61
static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
					struct queue *q,
					struct qcm_process_device *qpd);

static void deallocate_sdma_queue(struct device_queue_manager *dqm,
				unsigned int sdma_queue_id);
62

63 64
static inline
enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
65
{
66
	if (type == KFD_QUEUE_TYPE_SDMA)
67 68
		return KFD_MQD_TYPE_SDMA;
	return KFD_MQD_TYPE_CP;
69 70
}

71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
{
	int i;
	int pipe_offset = mec * dqm->dev->shared_resources.num_pipe_per_mec
		+ pipe * dqm->dev->shared_resources.num_queue_per_pipe;

	/* queue is available for KFD usage if bit is 1 */
	for (i = 0; i <  dqm->dev->shared_resources.num_queue_per_pipe; ++i)
		if (test_bit(pipe_offset + i,
			      dqm->dev->shared_resources.queue_bitmap))
			return true;
	return false;
}

unsigned int get_queues_num(struct device_queue_manager *dqm)
86
{
87 88
	return bitmap_weight(dqm->dev->shared_resources.queue_bitmap,
				KGD_MAX_QUEUES);
89 90
}

91
unsigned int get_queues_per_pipe(struct device_queue_manager *dqm)
92
{
93 94 95 96 97 98
	return dqm->dev->shared_resources.num_queue_per_pipe;
}

unsigned int get_pipes_per_mec(struct device_queue_manager *dqm)
{
	return dqm->dev->shared_resources.num_pipe_per_mec;
99 100
}

101
void program_sh_mem_settings(struct device_queue_manager *dqm,
102 103
					struct qcm_process_device *qpd)
{
104 105
	return dqm->dev->kfd2kgd->program_sh_mem_settings(
						dqm->dev->kgd, qpd->vmid,
106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
						qpd->sh_mem_config,
						qpd->sh_mem_ape1_base,
						qpd->sh_mem_ape1_limit,
						qpd->sh_mem_bases);
}

static int allocate_vmid(struct device_queue_manager *dqm,
			struct qcm_process_device *qpd,
			struct queue *q)
{
	int bit, allocated_vmid;

	if (dqm->vmid_bitmap == 0)
		return -ENOMEM;

121 122
	bit = find_first_bit((unsigned long *)&dqm->vmid_bitmap,
				dqm->dev->vm_info.vmid_num_kfd);
123 124
	clear_bit(bit, (unsigned long *)&dqm->vmid_bitmap);

125
	allocated_vmid = bit + dqm->dev->vm_info.first_vmid_kfd;
126
	pr_debug("vmid allocation %d\n", allocated_vmid);
127 128 129 130 131 132 133 134 135 136 137 138 139
	qpd->vmid = allocated_vmid;
	q->properties.vmid = allocated_vmid;

	set_pasid_vmid_mapping(dqm, q->process->pasid, q->properties.vmid);
	program_sh_mem_settings(dqm, qpd);

	return 0;
}

static void deallocate_vmid(struct device_queue_manager *dqm,
				struct qcm_process_device *qpd,
				struct queue *q)
{
140
	int bit = qpd->vmid - dqm->dev->vm_info.first_vmid_kfd;
141

142 143 144
	/* Release the vmid mapping */
	set_pasid_vmid_mapping(dqm, 0, qpd->vmid);

145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160
	set_bit(bit, (unsigned long *)&dqm->vmid_bitmap);
	qpd->vmid = 0;
	q->properties.vmid = 0;
}

static int create_queue_nocpsch(struct device_queue_manager *dqm,
				struct queue *q,
				struct qcm_process_device *qpd,
				int *allocated_vmid)
{
	int retval;

	print_queue(q);

	mutex_lock(&dqm->lock);

161
	if (dqm->total_queue_count >= max_num_of_queues_per_device) {
162
		pr_warn("Can't create new usermode queue because %d queues were already created\n",
163
				dqm->total_queue_count);
K
Kent Russell 已提交
164 165
		retval = -EPERM;
		goto out_unlock;
166 167
	}

168 169
	if (list_empty(&qpd->queues_list)) {
		retval = allocate_vmid(dqm, qpd, q);
K
Kent Russell 已提交
170 171
		if (retval)
			goto out_unlock;
172 173 174 175
	}
	*allocated_vmid = qpd->vmid;
	q->properties.vmid = qpd->vmid;

176 177
	if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
		retval = create_compute_queue_nocpsch(dqm, q, qpd);
K
Kent Russell 已提交
178
	else if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
179
		retval = create_sdma_queue_nocpsch(dqm, q, qpd);
K
Kent Russell 已提交
180 181
	else
		retval = -EINVAL;
182

183
	if (retval) {
184 185 186 187
		if (list_empty(&qpd->queues_list)) {
			deallocate_vmid(dqm, qpd, q);
			*allocated_vmid = 0;
		}
K
Kent Russell 已提交
188
		goto out_unlock;
189 190 191
	}

	list_add(&q->list, &qpd->queues_list);
192 193
	if (q->properties.is_active)
		dqm->queue_count++;
194

195 196
	if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
		dqm->sdma_queue_count++;
197

198 199 200 201 202 203 204 205
	/*
	 * Unconditionally increment this counter, regardless of the queue's
	 * type or whether the queue is active.
	 */
	dqm->total_queue_count++;
	pr_debug("Total of %d queues are accountable so far\n",
			dqm->total_queue_count);

K
Kent Russell 已提交
206
out_unlock:
207
	mutex_unlock(&dqm->lock);
K
Kent Russell 已提交
208
	return retval;
209 210 211 212 213
}

static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
{
	bool set;
214
	int pipe, bit, i;
215 216 217

	set = false;

218 219
	for (pipe = dqm->next_pipe_to_allocate, i = 0;
			i < get_pipes_per_mec(dqm);
220 221 222 223 224
			pipe = ((pipe + 1) % get_pipes_per_mec(dqm)), ++i) {

		if (!is_pipe_enabled(dqm, 0, pipe))
			continue;

225 226 227
		if (dqm->allocated_queues[pipe] != 0) {
			bit = find_first_bit(
				(unsigned long *)&dqm->allocated_queues[pipe],
228
				get_queues_per_pipe(dqm));
229 230 231 232 233 234 235 236 237 238

			clear_bit(bit,
				(unsigned long *)&dqm->allocated_queues[pipe]);
			q->pipe = pipe;
			q->queue = bit;
			set = true;
			break;
		}
	}

239
	if (!set)
240 241
		return -EBUSY;

242
	pr_debug("hqd slot - pipe %d, queue %d\n", q->pipe, q->queue);
243
	/* horizontal hqd allocation */
244
	dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm);
245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261

	return 0;
}

static inline void deallocate_hqd(struct device_queue_manager *dqm,
				struct queue *q)
{
	set_bit(q->queue, (unsigned long *)&dqm->allocated_queues[q->pipe]);
}

static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
					struct queue *q,
					struct qcm_process_device *qpd)
{
	int retval;
	struct mqd_manager *mqd;

262
	mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
263
	if (!mqd)
264 265 266
		return -ENOMEM;

	retval = allocate_hqd(dqm, q);
267
	if (retval)
268 269 270 271
		return retval;

	retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
				&q->gart_mqd_addr, &q->properties);
K
Kent Russell 已提交
272 273
	if (retval)
		goto out_deallocate_hqd;
274

275 276
	pr_debug("Loading mqd to hqd on pipe %d, queue %d\n",
			q->pipe, q->queue);
277

278 279 280
	dqm->dev->kfd2kgd->set_scratch_backing_va(
			dqm->dev->kgd, qpd->sh_hidden_private_base, qpd->vmid);

F
Felix Kuehling 已提交
281 282 283
	if (!q->properties.is_active)
		return 0;

284 285
	retval = mqd->load_mqd(mqd, q->mqd, q->pipe, q->queue, &q->properties,
			       q->process->mm);
K
Kent Russell 已提交
286 287
	if (retval)
		goto out_uninit_mqd;
288

289
	return 0;
K
Kent Russell 已提交
290 291 292 293 294 295 296

out_uninit_mqd:
	mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
out_deallocate_hqd:
	deallocate_hqd(dqm, q);

	return retval;
297 298
}

299 300 301 302
/* Access to DQM has to be locked before calling destroy_queue_nocpsch_locked
 * to avoid asynchronized access
 */
static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
303 304 305 306 307 308
				struct qcm_process_device *qpd,
				struct queue *q)
{
	int retval;
	struct mqd_manager *mqd;

309 310 311 312
	mqd = dqm->ops.get_mqd_manager(dqm,
		get_mqd_type_from_queue_type(q->properties.type));
	if (!mqd)
		return -ENOMEM;
313

314 315 316 317 318
	if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
		deallocate_hqd(dqm, q);
	} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
		dqm->sdma_queue_count--;
		deallocate_sdma_queue(dqm, q->sdma_id);
319
	} else {
320
		pr_debug("q->properties.type %d is invalid\n",
321
				q->properties.type);
322
		return -EINVAL;
323
	}
324
	dqm->total_queue_count--;
325 326

	retval = mqd->destroy_mqd(mqd, q->mqd,
327
				KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
328
				KFD_UNMAP_LATENCY_MS,
329
				q->pipe, q->queue);
330 331
	if (retval == -ETIME)
		qpd->reset_wavefronts = true;
332 333 334 335

	mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);

	list_del(&q->list);
336 337 338 339 340 341 342 343 344 345 346 347
	if (list_empty(&qpd->queues_list)) {
		if (qpd->reset_wavefronts) {
			pr_warn("Resetting wave fronts (nocpsch) on dev %p\n",
					dqm->dev);
			/* dbgdev_wave_reset_wavefronts has to be called before
			 * deallocate_vmid(), i.e. when vmid is still in use.
			 */
			dbgdev_wave_reset_wavefronts(dqm->dev,
					qpd->pqm->process);
			qpd->reset_wavefronts = false;
		}

348
		deallocate_vmid(dqm, qpd, q);
349
	}
350 351
	if (q->properties.is_active)
		dqm->queue_count--;
352

353 354
	return retval;
}
355

356 357 358 359 360 361 362 363
static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
				struct qcm_process_device *qpd,
				struct queue *q)
{
	int retval;

	mutex_lock(&dqm->lock);
	retval = destroy_queue_nocpsch_locked(dqm, qpd, q);
364
	mutex_unlock(&dqm->lock);
365

366 367 368 369 370 371 372
	return retval;
}

static int update_queue(struct device_queue_manager *dqm, struct queue *q)
{
	int retval;
	struct mqd_manager *mqd;
373
	bool prev_active = false;
374 375

	mutex_lock(&dqm->lock);
O
Oded Gabbay 已提交
376 377
	mqd = dqm->ops.get_mqd_manager(dqm,
			get_mqd_type_from_queue_type(q->properties.type));
378
	if (!mqd) {
K
Kent Russell 已提交
379 380
		retval = -ENOMEM;
		goto out_unlock;
381 382
	}

F
Felix Kuehling 已提交
383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416
	/* Save previous activity state for counters */
	prev_active = q->properties.is_active;

	/* Make sure the queue is unmapped before updating the MQD */
	if (sched_policy != KFD_SCHED_POLICY_NO_HWS) {
		retval = unmap_queues_cpsch(dqm,
				KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
		if (retval != 0) {
			pr_err("unmap queue failed\n");
			goto out_unlock;
		}
	} else if (sched_policy == KFD_SCHED_POLICY_NO_HWS &&
		   prev_active &&
		   (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
		    q->properties.type == KFD_QUEUE_TYPE_SDMA)) {
		retval = mqd->destroy_mqd(mqd, q->mqd,
				KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
				KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
		if (retval) {
			pr_err("destroy mqd failed\n");
			goto out_unlock;
		}
	}

	retval = mqd->update_mqd(mqd, q->mqd, &q->properties);

	if (sched_policy != KFD_SCHED_POLICY_NO_HWS)
		retval = map_queues_cpsch(dqm);
	else if (sched_policy == KFD_SCHED_POLICY_NO_HWS &&
		 q->properties.is_active &&
		 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
		  q->properties.type == KFD_QUEUE_TYPE_SDMA))
		retval = mqd->load_mqd(mqd, q->mqd, q->pipe, q->queue,
				       &q->properties, q->process->mm);
417 418 419 420 421

	/*
	 * check active state vs. the previous state
	 * and modify counter accordingly
	 */
F
Felix Kuehling 已提交
422
	if (q->properties.is_active && !prev_active)
423
		dqm->queue_count++;
424
	else if (!q->properties.is_active && prev_active)
425 426
		dqm->queue_count--;

K
Kent Russell 已提交
427
out_unlock:
428 429 430 431
	mutex_unlock(&dqm->lock);
	return retval;
}

432
static struct mqd_manager *get_mqd_manager(
433 434 435 436
		struct device_queue_manager *dqm, enum KFD_MQD_TYPE type)
{
	struct mqd_manager *mqd;

437 438
	if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
		return NULL;
439

440
	pr_debug("mqd type %d\n", type);
441 442 443 444

	mqd = dqm->mqds[type];
	if (!mqd) {
		mqd = mqd_manager_init(type, dqm->dev);
445
		if (!mqd)
446
			pr_err("mqd manager is NULL");
447 448 449 450 451 452
		dqm->mqds[type] = mqd;
	}

	return mqd;
}

453
static int register_process(struct device_queue_manager *dqm,
454 455 456
					struct qcm_process_device *qpd)
{
	struct device_process_node *n;
457
	int retval;
458

459
	n = kzalloc(sizeof(*n), GFP_KERNEL);
460 461 462 463 464 465 466 467
	if (!n)
		return -ENOMEM;

	n->qpd = qpd;

	mutex_lock(&dqm->lock);
	list_add(&n->list, &dqm->queues);

468 469
	retval = dqm->ops_asic_specific.register_process(dqm, qpd);

470 471 472 473
	dqm->processes_count++;

	mutex_unlock(&dqm->lock);

474
	return retval;
475 476
}

477
static int unregister_process(struct device_queue_manager *dqm,
478 479 480 481 482
					struct qcm_process_device *qpd)
{
	int retval;
	struct device_process_node *cur, *next;

483 484
	pr_debug("qpd->queues_list is %s\n",
			list_empty(&qpd->queues_list) ? "empty" : "not empty");
485 486 487 488 489 490 491

	retval = 0;
	mutex_lock(&dqm->lock);

	list_for_each_entry_safe(cur, next, &dqm->queues, list) {
		if (qpd == cur->qpd) {
			list_del(&cur->list);
492
			kfree(cur);
493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509
			dqm->processes_count--;
			goto out;
		}
	}
	/* qpd not found in dqm list */
	retval = 1;
out:
	mutex_unlock(&dqm->lock);
	return retval;
}

static int
set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid,
			unsigned int vmid)
{
	uint32_t pasid_mapping;

510 511 512 513 514 515
	pasid_mapping = (pasid == 0) ? 0 :
		(uint32_t)pasid |
		ATC_VMID_PASID_MAPPING_VALID;

	return dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
						dqm->dev->kgd, pasid_mapping,
516 517 518
						vmid);
}

519 520 521 522
static void init_interrupts(struct device_queue_manager *dqm)
{
	unsigned int i;

523 524 525
	for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++)
		if (is_pipe_enabled(dqm, 0, i))
			dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd, i);
526 527
}

528 529
static int initialize_nocpsch(struct device_queue_manager *dqm)
{
530
	int pipe, queue;
531

532
	pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
533

K
Kent Russell 已提交
534 535 536 537 538
	dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm),
					sizeof(unsigned int), GFP_KERNEL);
	if (!dqm->allocated_queues)
		return -ENOMEM;

539 540 541
	mutex_init(&dqm->lock);
	INIT_LIST_HEAD(&dqm->queues);
	dqm->queue_count = dqm->next_pipe_to_allocate = 0;
542
	dqm->sdma_queue_count = 0;
543

544 545 546 547 548 549 550 551
	for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
		int pipe_offset = pipe * get_queues_per_pipe(dqm);

		for (queue = 0; queue < get_queues_per_pipe(dqm); queue++)
			if (test_bit(pipe_offset + queue,
				     dqm->dev->shared_resources.queue_bitmap))
				dqm->allocated_queues[pipe] |= 1 << queue;
	}
552

553
	dqm->vmid_bitmap = (1 << dqm->dev->vm_info.vmid_num_kfd) - 1;
554
	dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1;
555 556 557 558

	return 0;
}

559
static void uninitialize(struct device_queue_manager *dqm)
560
{
561 562
	int i;

563
	WARN_ON(dqm->queue_count > 0 || dqm->processes_count > 0);
564 565

	kfree(dqm->allocated_queues);
566 567
	for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
		kfree(dqm->mqds[i]);
568
	mutex_destroy(&dqm->lock);
569
	kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
570 571 572 573
}

static int start_nocpsch(struct device_queue_manager *dqm)
{
574
	init_interrupts(dqm);
575 576 577 578 579 580 581 582
	return 0;
}

static int stop_nocpsch(struct device_queue_manager *dqm)
{
	return 0;
}

583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602
static int allocate_sdma_queue(struct device_queue_manager *dqm,
				unsigned int *sdma_queue_id)
{
	int bit;

	if (dqm->sdma_bitmap == 0)
		return -ENOMEM;

	bit = find_first_bit((unsigned long *)&dqm->sdma_bitmap,
				CIK_SDMA_QUEUES);

	clear_bit(bit, (unsigned long *)&dqm->sdma_bitmap);
	*sdma_queue_id = bit;

	return 0;
}

static void deallocate_sdma_queue(struct device_queue_manager *dqm,
				unsigned int sdma_queue_id)
{
603
	if (sdma_queue_id >= CIK_SDMA_QUEUES)
604 605 606 607 608 609 610 611 612 613 614
		return;
	set_bit(sdma_queue_id, (unsigned long *)&dqm->sdma_bitmap);
}

static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
					struct queue *q,
					struct qcm_process_device *qpd)
{
	struct mqd_manager *mqd;
	int retval;

615
	mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
616 617 618 619
	if (!mqd)
		return -ENOMEM;

	retval = allocate_sdma_queue(dqm, &q->sdma_id);
620
	if (retval)
621 622 623 624 625
		return retval;

	q->properties.sdma_queue_id = q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
	q->properties.sdma_engine_id = q->sdma_id / CIK_SDMA_ENGINE_NUM;

626 627 628
	pr_debug("SDMA id is:    %d\n", q->sdma_id);
	pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id);
	pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
629

630
	dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd);
631 632
	retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
				&q->gart_mqd_addr, &q->properties);
K
Kent Russell 已提交
633 634
	if (retval)
		goto out_deallocate_sdma_queue;
635

636
	retval = mqd->load_mqd(mqd, q->mqd, 0, 0, &q->properties, NULL);
K
Kent Russell 已提交
637 638
	if (retval)
		goto out_uninit_mqd;
639

640
	return 0;
K
Kent Russell 已提交
641 642 643 644 645 646 647

out_uninit_mqd:
	mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
out_deallocate_sdma_queue:
	deallocate_sdma_queue(dqm, q->sdma_id);

	return retval;
648 649
}

650 651 652 653 654 655
/*
 * Device Queue Manager implementation for cp scheduler
 */

static int set_sched_resources(struct device_queue_manager *dqm)
{
656
	int i, mec;
657 658
	struct scheduling_resources res;

659
	res.vmid_mask = dqm->dev->shared_resources.compute_vmid_bitmap;
660 661 662 663 664 665 666 667 668 669 670 671 672 673 674

	res.queue_mask = 0;
	for (i = 0; i < KGD_MAX_QUEUES; ++i) {
		mec = (i / dqm->dev->shared_resources.num_queue_per_pipe)
			/ dqm->dev->shared_resources.num_pipe_per_mec;

		if (!test_bit(i, dqm->dev->shared_resources.queue_bitmap))
			continue;

		/* only acquire queues from the first MEC */
		if (mec > 0)
			continue;

		/* This situation may be hit in the future if a new HW
		 * generation exposes more than 64 queues. If so, the
675 676
		 * definition of res.queue_mask needs updating
		 */
677
		if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) {
678 679 680 681 682 683
			pr_err("Invalid queue enabled by amdgpu: %d\n", i);
			break;
		}

		res.queue_mask |= (1ull << i);
	}
684 685 686
	res.gws_mask = res.oac_mask = res.gds_heap_base =
						res.gds_heap_size = 0;

687 688 689
	pr_debug("Scheduling resources:\n"
			"vmid mask: 0x%8X\n"
			"queue mask: 0x%8llX\n",
690 691 692 693 694 695 696 697 698
			res.vmid_mask, res.queue_mask);

	return pm_send_set_resources(&dqm->packets, &res);
}

static int initialize_cpsch(struct device_queue_manager *dqm)
{
	int retval;

699
	pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
700 701 702 703

	mutex_init(&dqm->lock);
	INIT_LIST_HEAD(&dqm->queues);
	dqm->queue_count = dqm->processes_count = 0;
704
	dqm->sdma_queue_count = 0;
705
	dqm->active_runlist = false;
706
	retval = dqm->ops_asic_specific.initialize(dqm);
707
	if (retval)
K
Kent Russell 已提交
708
		mutex_destroy(&dqm->lock);
709 710 711 712 713 714 715 716 717 718 719

	return retval;
}

static int start_cpsch(struct device_queue_manager *dqm)
{
	int retval;

	retval = 0;

	retval = pm_init(&dqm->packets, dqm);
720
	if (retval)
721 722 723
		goto fail_packet_manager_init;

	retval = set_sched_resources(dqm);
724
	if (retval)
725 726
		goto fail_set_sched_resources;

727
	pr_debug("Allocating fence memory\n");
728 729

	/* allocate fence memory on the gart */
730 731
	retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
					&dqm->fence_mem);
732

733
	if (retval)
734 735 736 737
		goto fail_allocate_vidmem;

	dqm->fence_addr = dqm->fence_mem->cpu_ptr;
	dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
738 739 740

	init_interrupts(dqm);

741
	mutex_lock(&dqm->lock);
742
	execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
743
	mutex_unlock(&dqm->lock);
744 745 746 747 748 749 750 751 752 753 754

	return 0;
fail_allocate_vidmem:
fail_set_sched_resources:
	pm_uninit(&dqm->packets);
fail_packet_manager_init:
	return retval;
}

static int stop_cpsch(struct device_queue_manager *dqm)
{
755
	mutex_lock(&dqm->lock);
756
	unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
757
	mutex_unlock(&dqm->lock);
758

759
	kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
760 761 762 763 764 765 766 767 768 769
	pm_uninit(&dqm->packets);

	return 0;
}

static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
					struct kernel_queue *kq,
					struct qcm_process_device *qpd)
{
	mutex_lock(&dqm->lock);
770
	if (dqm->total_queue_count >= max_num_of_queues_per_device) {
771
		pr_warn("Can't create new kernel queue because %d queues were already created\n",
772 773 774 775 776 777 778 779 780 781 782 783 784
				dqm->total_queue_count);
		mutex_unlock(&dqm->lock);
		return -EPERM;
	}

	/*
	 * Unconditionally increment this counter, regardless of the queue's
	 * type or whether the queue is active.
	 */
	dqm->total_queue_count++;
	pr_debug("Total of %d queues are accountable so far\n",
			dqm->total_queue_count);

785 786 787
	list_add(&kq->list, &qpd->priv_queue_list);
	dqm->queue_count++;
	qpd->is_debug = true;
788
	execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
789 790 791 792 793 794 795 796 797 798 799 800 801
	mutex_unlock(&dqm->lock);

	return 0;
}

static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
					struct kernel_queue *kq,
					struct qcm_process_device *qpd)
{
	mutex_lock(&dqm->lock);
	list_del(&kq->list);
	dqm->queue_count--;
	qpd->is_debug = false;
802
	execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
803 804 805 806
	/*
	 * Unconditionally decrement this counter, regardless of the queue's
	 * type.
	 */
807
	dqm->total_queue_count--;
808 809
	pr_debug("Total of %d queues are accountable so far\n",
			dqm->total_queue_count);
810 811 812
	mutex_unlock(&dqm->lock);
}

813 814 815 816 817 818 819 820
static void select_sdma_engine_id(struct queue *q)
{
	static int sdma_id;

	q->sdma_id = sdma_id;
	sdma_id = (sdma_id + 1) % 2;
}

821 822 823 824 825 826 827 828 829 830 831 832 833
static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
			struct qcm_process_device *qpd, int *allocate_vmid)
{
	int retval;
	struct mqd_manager *mqd;

	retval = 0;

	if (allocate_vmid)
		*allocate_vmid = 0;

	mutex_lock(&dqm->lock);

834
	if (dqm->total_queue_count >= max_num_of_queues_per_device) {
835
		pr_warn("Can't create new usermode queue because %d queues were already created\n",
836 837 838 839 840
				dqm->total_queue_count);
		retval = -EPERM;
		goto out;
	}

841 842 843
	if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
		select_sdma_engine_id(q);

844
	mqd = dqm->ops.get_mqd_manager(dqm,
845 846
			get_mqd_type_from_queue_type(q->properties.type));

847
	if (!mqd) {
K
Kent Russell 已提交
848 849
		retval = -ENOMEM;
		goto out;
850 851
	}

852
	dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd);
853 854
	retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
				&q->gart_mqd_addr, &q->properties);
855
	if (retval)
856 857 858 859 860
		goto out;

	list_add(&q->list, &qpd->queues_list);
	if (q->properties.is_active) {
		dqm->queue_count++;
861 862
		retval = execute_queues_cpsch(dqm,
				KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
863 864
	}

865
	if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
866
		dqm->sdma_queue_count++;
867 868 869 870 871 872 873 874 875
	/*
	 * Unconditionally increment this counter, regardless of the queue's
	 * type or whether the queue is active.
	 */
	dqm->total_queue_count++;

	pr_debug("Total of %d queues are accountable so far\n",
			dqm->total_queue_count);

876 877 878 879 880
out:
	mutex_unlock(&dqm->lock);
	return retval;
}

881
int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
882
				unsigned int fence_value,
883
				unsigned int timeout_ms)
884
{
885
	unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
886 887

	while (*fence_addr != fence_value) {
888
		if (time_after(jiffies, end_jiffies)) {
889
			pr_err("qcm fence wait loop timeout expired\n");
890 891
			return -ETIME;
		}
892
		schedule();
893 894 895 896 897
	}

	return 0;
}

898
static int unmap_sdma_queues(struct device_queue_manager *dqm,
899 900 901
				unsigned int sdma_engine)
{
	return pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA,
902
			KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false,
903 904 905
			sdma_engine);
}

F
Felix Kuehling 已提交
906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926
/* dqm->lock mutex has to be locked before calling this function */
static int map_queues_cpsch(struct device_queue_manager *dqm)
{
	int retval;

	if (dqm->queue_count <= 0 || dqm->processes_count <= 0)
		return 0;

	if (dqm->active_runlist)
		return 0;

	retval = pm_send_runlist(&dqm->packets, &dqm->queues);
	if (retval) {
		pr_err("failed to execute runlist\n");
		return retval;
	}
	dqm->active_runlist = true;

	return retval;
}

927
/* dqm->lock mutex has to be locked before calling this function */
928
static int unmap_queues_cpsch(struct device_queue_manager *dqm,
929 930
				enum kfd_unmap_queues_filter filter,
				uint32_t filter_param)
931
{
932
	int retval = 0;
933

934
	if (!dqm->active_runlist)
935
		return retval;
936

937
	pr_debug("Before destroying queues, sdma queue count is : %u\n",
938 939 940
		dqm->sdma_queue_count);

	if (dqm->sdma_queue_count > 0) {
941 942
		unmap_sdma_queues(dqm, 0);
		unmap_sdma_queues(dqm, 1);
943 944
	}

945
	retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
946
			filter, filter_param, false, 0);
947
	if (retval)
948
		return retval;
949 950 951 952 953

	*dqm->fence_addr = KFD_FENCE_INIT;
	pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr,
				KFD_FENCE_COMPLETED);
	/* should be timed out */
954
	retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
955
				QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS);
956
	if (retval)
957
		return retval;
958

959 960 961 962 963 964
	pm_release_ib(&dqm->packets);
	dqm->active_runlist = false;

	return retval;
}

965
/* dqm->lock mutex has to be locked before calling this function */
966 967 968
static int execute_queues_cpsch(struct device_queue_manager *dqm,
				enum kfd_unmap_queues_filter filter,
				uint32_t filter_param)
969 970 971
{
	int retval;

972
	retval = unmap_queues_cpsch(dqm, filter, filter_param);
973
	if (retval) {
974
		pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
975
		return retval;
976 977
	}

F
Felix Kuehling 已提交
978
	return map_queues_cpsch(dqm);
979 980 981 982 983 984 985 986
}

static int destroy_queue_cpsch(struct device_queue_manager *dqm,
				struct qcm_process_device *qpd,
				struct queue *q)
{
	int retval;
	struct mqd_manager *mqd;
987
	bool preempt_all_queues;
988

989 990
	preempt_all_queues = false;

991 992 993 994
	retval = 0;

	/* remove queue from list to prevent rescheduling after preemption */
	mutex_lock(&dqm->lock);
995 996 997 998 999 1000 1001 1002 1003 1004 1005

	if (qpd->is_debug) {
		/*
		 * error, currently we do not allow to destroy a queue
		 * of a currently debugged process
		 */
		retval = -EBUSY;
		goto failed_try_destroy_debugged_queue;

	}

1006
	mqd = dqm->ops.get_mqd_manager(dqm,
1007
			get_mqd_type_from_queue_type(q->properties.type));
1008 1009 1010 1011 1012
	if (!mqd) {
		retval = -ENOMEM;
		goto failed;
	}

1013 1014 1015
	if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
		dqm->sdma_queue_count--;

1016
	list_del(&q->list);
1017 1018
	if (q->properties.is_active)
		dqm->queue_count--;
1019

1020 1021 1022 1023
	retval = execute_queues_cpsch(dqm,
				KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
	if (retval == -ETIME)
		qpd->reset_wavefronts = true;
1024 1025

	mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
1026 1027 1028 1029 1030 1031 1032 1033

	/*
	 * Unconditionally decrement this counter, regardless of the queue's
	 * type
	 */
	dqm->total_queue_count--;
	pr_debug("Total of %d queues are accountable so far\n",
			dqm->total_queue_count);
1034 1035 1036 1037 1038 1039

	mutex_unlock(&dqm->lock);

	return 0;

failed:
1040 1041
failed_try_destroy_debugged_queue:

1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060
	mutex_unlock(&dqm->lock);
	return retval;
}

/*
 * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
 * stay in user mode.
 */
#define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
/* APE1 limit is inclusive and 64K aligned. */
#define APE1_LIMIT_ALIGNMENT 0xFFFF

static bool set_cache_memory_policy(struct device_queue_manager *dqm,
				   struct qcm_process_device *qpd,
				   enum cache_policy default_policy,
				   enum cache_policy alternate_policy,
				   void __user *alternate_aperture_base,
				   uint64_t alternate_aperture_size)
{
1061
	bool retval;
1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082

	mutex_lock(&dqm->lock);

	if (alternate_aperture_size == 0) {
		/* base > limit disables APE1 */
		qpd->sh_mem_ape1_base = 1;
		qpd->sh_mem_ape1_limit = 0;
	} else {
		/*
		 * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
		 *			SH_MEM_APE1_BASE[31:0], 0x0000 }
		 * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
		 *			SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
		 * Verify that the base and size parameters can be
		 * represented in this format and convert them.
		 * Additionally restrict APE1 to user-mode addresses.
		 */

		uint64_t base = (uintptr_t)alternate_aperture_base;
		uint64_t limit = base + alternate_aperture_size - 1;

K
Kent Russell 已提交
1083 1084 1085
		if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
		   (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
			retval = false;
1086
			goto out;
K
Kent Russell 已提交
1087
		}
1088 1089 1090 1091 1092

		qpd->sh_mem_ape1_base = base >> 16;
		qpd->sh_mem_ape1_limit = limit >> 16;
	}

1093 1094 1095 1096 1097 1098 1099
	retval = dqm->ops_asic_specific.set_cache_memory_policy(
			dqm,
			qpd,
			default_policy,
			alternate_policy,
			alternate_aperture_base,
			alternate_aperture_size);
1100 1101 1102 1103

	if ((sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
		program_sh_mem_settings(dqm, qpd);

1104
	pr_debug("sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
1105 1106 1107 1108 1109
		qpd->sh_mem_config, qpd->sh_mem_ape1_base,
		qpd->sh_mem_ape1_limit);

out:
	mutex_unlock(&dqm->lock);
K
Kent Russell 已提交
1110
	return retval;
1111 1112
}

1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214
static int process_termination_nocpsch(struct device_queue_manager *dqm,
		struct qcm_process_device *qpd)
{
	struct queue *q, *next;
	struct device_process_node *cur, *next_dpn;
	int retval = 0;

	mutex_lock(&dqm->lock);

	/* Clear all user mode queues */
	list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
		int ret;

		ret = destroy_queue_nocpsch_locked(dqm, qpd, q);
		if (ret)
			retval = ret;
	}

	/* Unregister process */
	list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
		if (qpd == cur->qpd) {
			list_del(&cur->list);
			kfree(cur);
			dqm->processes_count--;
			break;
		}
	}

	mutex_unlock(&dqm->lock);
	return retval;
}


static int process_termination_cpsch(struct device_queue_manager *dqm,
		struct qcm_process_device *qpd)
{
	int retval;
	struct queue *q, *next;
	struct kernel_queue *kq, *kq_next;
	struct mqd_manager *mqd;
	struct device_process_node *cur, *next_dpn;
	enum kfd_unmap_queues_filter filter =
		KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES;

	retval = 0;

	mutex_lock(&dqm->lock);

	/* Clean all kernel queues */
	list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
		list_del(&kq->list);
		dqm->queue_count--;
		qpd->is_debug = false;
		dqm->total_queue_count--;
		filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
	}

	/* Clear all user mode queues */
	list_for_each_entry(q, &qpd->queues_list, list) {
		if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
			dqm->sdma_queue_count--;

		if (q->properties.is_active)
			dqm->queue_count--;

		dqm->total_queue_count--;
	}

	/* Unregister process */
	list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
		if (qpd == cur->qpd) {
			list_del(&cur->list);
			kfree(cur);
			dqm->processes_count--;
			break;
		}
	}

	retval = execute_queues_cpsch(dqm, filter, 0);
	if (retval || qpd->reset_wavefronts) {
		pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev);
		dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process);
		qpd->reset_wavefronts = false;
	}

	/* lastly, free mqd resources */
	list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
		mqd = dqm->ops.get_mqd_manager(dqm,
			get_mqd_type_from_queue_type(q->properties.type));
		if (!mqd) {
			retval = -ENOMEM;
			goto out;
		}
		list_del(&q->list);
		mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
	}

out:
	mutex_unlock(&dqm->lock);
	return retval;
}

1215 1216 1217 1218
struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
{
	struct device_queue_manager *dqm;

1219
	pr_debug("Loading device queue manager\n");
1220

1221
	dqm = kzalloc(sizeof(*dqm), GFP_KERNEL);
1222 1223 1224 1225 1226 1227 1228 1229
	if (!dqm)
		return NULL;

	dqm->dev = dev;
	switch (sched_policy) {
	case KFD_SCHED_POLICY_HWS:
	case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
		/* initialize dqm for cp scheduling */
1230 1231 1232 1233 1234 1235
		dqm->ops.create_queue = create_queue_cpsch;
		dqm->ops.initialize = initialize_cpsch;
		dqm->ops.start = start_cpsch;
		dqm->ops.stop = stop_cpsch;
		dqm->ops.destroy_queue = destroy_queue_cpsch;
		dqm->ops.update_queue = update_queue;
1236 1237 1238 1239
		dqm->ops.get_mqd_manager = get_mqd_manager;
		dqm->ops.register_process = register_process;
		dqm->ops.unregister_process = unregister_process;
		dqm->ops.uninitialize = uninitialize;
1240 1241 1242
		dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
		dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
		dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
1243
		dqm->ops.process_termination = process_termination_cpsch;
1244 1245 1246
		break;
	case KFD_SCHED_POLICY_NO_HWS:
		/* initialize dqm for no cp scheduling */
1247 1248 1249 1250 1251
		dqm->ops.start = start_nocpsch;
		dqm->ops.stop = stop_nocpsch;
		dqm->ops.create_queue = create_queue_nocpsch;
		dqm->ops.destroy_queue = destroy_queue_nocpsch;
		dqm->ops.update_queue = update_queue;
1252 1253 1254
		dqm->ops.get_mqd_manager = get_mqd_manager;
		dqm->ops.register_process = register_process;
		dqm->ops.unregister_process = unregister_process;
1255
		dqm->ops.initialize = initialize_nocpsch;
1256
		dqm->ops.uninitialize = uninitialize;
1257
		dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
1258
		dqm->ops.process_termination = process_termination_nocpsch;
1259 1260
		break;
	default:
1261 1262
		pr_err("Invalid scheduling policy %d\n", sched_policy);
		goto out_free;
1263 1264
	}

1265 1266 1267
	switch (dev->device_info->asic_family) {
	case CHIP_CARRIZO:
		device_queue_manager_init_vi(&dqm->ops_asic_specific);
1268 1269
		break;

1270 1271
	case CHIP_KAVERI:
		device_queue_manager_init_cik(&dqm->ops_asic_specific);
1272
		break;
1273 1274 1275 1276
	default:
		WARN(1, "Unexpected ASIC family %u",
		     dev->device_info->asic_family);
		goto out_free;
1277 1278
	}

1279 1280
	if (!dqm->ops.initialize(dqm))
		return dqm;
1281

1282 1283 1284
out_free:
	kfree(dqm);
	return NULL;
1285 1286 1287 1288
}

void device_queue_manager_uninit(struct device_queue_manager *dqm)
{
1289
	dqm->ops.uninitialize(dqm);
1290 1291
	kfree(dqm);
}