kfd_packet_manager.c 17.7 KB
Newer Older
B
Ben Goz 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
/*
 * Copyright 2014 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 */

#include <linux/slab.h>
#include <linux/mutex.h>
#include "kfd_device_queue_manager.h"
#include "kfd_kernel_queue.h"
#include "kfd_priv.h"
#include "kfd_pm4_headers.h"
30
#include "kfd_pm4_headers_vi.h"
B
Ben Goz 已提交
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
#include "kfd_pm4_opcodes.h"

static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
				unsigned int buffer_size_bytes)
{
	unsigned int temp = *wptr + increment_bytes / sizeof(uint32_t);

	BUG_ON((temp * sizeof(uint32_t)) > buffer_size_bytes);
	*wptr = temp;
}

static unsigned int build_pm4_header(unsigned int opcode, size_t packet_size)
{
	union PM4_MES_TYPE_3_HEADER header;

	header.u32all = 0;
	header.opcode = opcode;
	header.count = packet_size/sizeof(uint32_t) - 2;
	header.type = PM4_TYPE_3;

	return header.u32all;
}

static void pm_calc_rlib_size(struct packet_manager *pm,
				unsigned int *rlib_size,
				bool *over_subscription)
{
	unsigned int process_count, queue_count;
59
	unsigned int map_queue_size;
B
Ben Goz 已提交
60 61 62 63 64 65 66 67

	BUG_ON(!pm || !rlib_size || !over_subscription);

	process_count = pm->dqm->processes_count;
	queue_count = pm->dqm->queue_count;

	/* check if there is over subscription*/
	*over_subscription = false;
68
	if ((process_count > 1) || queue_count > get_queues_num(pm->dqm)) {
B
Ben Goz 已提交
69
		*over_subscription = true;
70
		pr_debug("Over subscribed runlist\n");
B
Ben Goz 已提交
71 72
	}

73 74 75 76
	map_queue_size =
		(pm->dqm->dev->device_info->asic_family == CHIP_CARRIZO) ?
		sizeof(struct pm4_mes_map_queues) :
		sizeof(struct pm4_map_queues);
B
Ben Goz 已提交
77 78
	/* calculate run list ib allocation size */
	*rlib_size = process_count * sizeof(struct pm4_map_process) +
79
		     queue_count * map_queue_size;
B
Ben Goz 已提交
80 81 82 83 84 85 86 87

	/*
	 * Increase the allocation size in case we need a chained run list
	 * when over subscription
	 */
	if (*over_subscription)
		*rlib_size += sizeof(struct pm4_runlist);

88
	pr_debug("runlist ib size %d\n", *rlib_size);
B
Ben Goz 已提交
89 90 91 92 93 94 95 96 97 98 99
}

static int pm_allocate_runlist_ib(struct packet_manager *pm,
				unsigned int **rl_buffer,
				uint64_t *rl_gpu_buffer,
				unsigned int *rl_buffer_size,
				bool *is_over_subscription)
{
	int retval;

	BUG_ON(!pm);
100
	BUG_ON(pm->allocated);
101
	BUG_ON(!is_over_subscription);
B
Ben Goz 已提交
102 103 104

	pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);

105 106
	retval = kfd_gtt_sa_allocate(pm->dqm->dev, *rl_buffer_size,
					&pm->ib_buffer_obj);
B
Ben Goz 已提交
107

108
	if (retval) {
109
		pr_err("Failed to allocate runlist IB\n");
B
Ben Goz 已提交
110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
		return retval;
	}

	*(void **)rl_buffer = pm->ib_buffer_obj->cpu_ptr;
	*rl_gpu_buffer = pm->ib_buffer_obj->gpu_addr;

	memset(*rl_buffer, 0, *rl_buffer_size);
	pm->allocated = true;
	return retval;
}

static int pm_create_runlist(struct packet_manager *pm, uint32_t *buffer,
			uint64_t ib, size_t ib_size_in_dwords, bool chain)
{
	struct pm4_runlist *packet;

	BUG_ON(!pm || !buffer || !ib);

	packet = (struct pm4_runlist *)buffer;

	memset(buffer, 0, sizeof(struct pm4_runlist));
	packet->header.u32all = build_pm4_header(IT_RUN_LIST,
						sizeof(struct pm4_runlist));

	packet->bitfields4.ib_size = ib_size_in_dwords;
	packet->bitfields4.chain = chain ? 1 : 0;
	packet->bitfields4.offload_polling = 0;
	packet->bitfields4.valid = 1;
	packet->ordinal2 = lower_32_bits(ib);
	packet->bitfields3.ib_base_hi = upper_32_bits(ib);

	return 0;
}

static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer,
				struct qcm_process_device *qpd)
{
	struct pm4_map_process *packet;
	struct queue *cur;
	uint32_t num_queues;

	BUG_ON(!pm || !buffer || !qpd);

	packet = (struct pm4_map_process *)buffer;

	memset(buffer, 0, sizeof(struct pm4_map_process));

	packet->header.u32all = build_pm4_header(IT_MAP_PROCESS,
					sizeof(struct pm4_map_process));
	packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
	packet->bitfields2.process_quantum = 1;
	packet->bitfields2.pasid = qpd->pqm->process->pasid;
	packet->bitfields3.page_table_base = qpd->page_table_base;
	packet->bitfields10.gds_size = qpd->gds_size;
	packet->bitfields10.num_gws = qpd->num_gws;
	packet->bitfields10.num_oac = qpd->num_oac;
	num_queues = 0;
	list_for_each_entry(cur, &qpd->queues_list, list)
		num_queues++;
169
	packet->bitfields10.num_queues = (qpd->is_debug) ? 0 : num_queues;
B
Ben Goz 已提交
170 171 172 173 174 175 176 177 178 179 180 181

	packet->sh_mem_config = qpd->sh_mem_config;
	packet->sh_mem_bases = qpd->sh_mem_bases;
	packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base;
	packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit;

	packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
	packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);

	return 0;
}

182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221
static int pm_create_map_queue_vi(struct packet_manager *pm, uint32_t *buffer,
		struct queue *q, bool is_static)
{
	struct pm4_mes_map_queues *packet;
	bool use_static = is_static;

	BUG_ON(!pm || !buffer || !q);

	packet = (struct pm4_mes_map_queues *)buffer;
	memset(buffer, 0, sizeof(struct pm4_map_queues));

	packet->header.u32all = build_pm4_header(IT_MAP_QUEUES,
						sizeof(struct pm4_map_queues));
	packet->bitfields2.alloc_format =
		alloc_format__mes_map_queues__one_per_pipe_vi;
	packet->bitfields2.num_queues = 1;
	packet->bitfields2.queue_sel =
		queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi;

	packet->bitfields2.engine_sel =
		engine_sel__mes_map_queues__compute_vi;
	packet->bitfields2.queue_type =
		queue_type__mes_map_queues__normal_compute_vi;

	switch (q->properties.type) {
	case KFD_QUEUE_TYPE_COMPUTE:
		if (use_static)
			packet->bitfields2.queue_type =
		queue_type__mes_map_queues__normal_latency_static_queue_vi;
		break;
	case KFD_QUEUE_TYPE_DIQ:
		packet->bitfields2.queue_type =
			queue_type__mes_map_queues__debug_interface_queue_vi;
		break;
	case KFD_QUEUE_TYPE_SDMA:
		packet->bitfields2.engine_sel =
				engine_sel__mes_map_queues__sdma0_vi;
		use_static = false; /* no static queues under SDMA */
		break;
	default:
222
		pr_err("queue type %d\n", q->properties.type);
223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243
		BUG();
		break;
	}
	packet->bitfields3.doorbell_offset =
			q->properties.doorbell_off;

	packet->mqd_addr_lo =
			lower_32_bits(q->gart_mqd_addr);

	packet->mqd_addr_hi =
			upper_32_bits(q->gart_mqd_addr);

	packet->wptr_addr_lo =
			lower_32_bits((uint64_t)q->properties.write_ptr);

	packet->wptr_addr_hi =
			upper_32_bits((uint64_t)q->properties.write_ptr);

	return 0;
}

B
Ben Goz 已提交
244
static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer,
245
				struct queue *q, bool is_static)
B
Ben Goz 已提交
246 247
{
	struct pm4_map_queues *packet;
248
	bool use_static = is_static;
B
Ben Goz 已提交
249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275

	BUG_ON(!pm || !buffer || !q);

	packet = (struct pm4_map_queues *)buffer;
	memset(buffer, 0, sizeof(struct pm4_map_queues));

	packet->header.u32all = build_pm4_header(IT_MAP_QUEUES,
						sizeof(struct pm4_map_queues));
	packet->bitfields2.alloc_format =
				alloc_format__mes_map_queues__one_per_pipe;
	packet->bitfields2.num_queues = 1;
	packet->bitfields2.queue_sel =
		queue_sel__mes_map_queues__map_to_hws_determined_queue_slots;

	packet->bitfields2.vidmem = (q->properties.is_interop) ?
			vidmem__mes_map_queues__uses_video_memory :
			vidmem__mes_map_queues__uses_no_video_memory;

	switch (q->properties.type) {
	case KFD_QUEUE_TYPE_COMPUTE:
	case KFD_QUEUE_TYPE_DIQ:
		packet->bitfields2.engine_sel =
				engine_sel__mes_map_queues__compute;
		break;
	case KFD_QUEUE_TYPE_SDMA:
		packet->bitfields2.engine_sel =
				engine_sel__mes_map_queues__sdma0;
276
		use_static = false; /* no static queues under SDMA */
B
Ben Goz 已提交
277 278 279 280 281 282 283 284 285
		break;
	default:
		BUG();
		break;
	}

	packet->mes_map_queues_ordinals[0].bitfields3.doorbell_offset =
			q->properties.doorbell_off;

286
	packet->mes_map_queues_ordinals[0].bitfields3.is_static =
287
			(use_static) ? 1 : 0;
288

B
Ben Goz 已提交
289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323
	packet->mes_map_queues_ordinals[0].mqd_addr_lo =
			lower_32_bits(q->gart_mqd_addr);

	packet->mes_map_queues_ordinals[0].mqd_addr_hi =
			upper_32_bits(q->gart_mqd_addr);

	packet->mes_map_queues_ordinals[0].wptr_addr_lo =
			lower_32_bits((uint64_t)q->properties.write_ptr);

	packet->mes_map_queues_ordinals[0].wptr_addr_hi =
			upper_32_bits((uint64_t)q->properties.write_ptr);

	return 0;
}

static int pm_create_runlist_ib(struct packet_manager *pm,
				struct list_head *queues,
				uint64_t *rl_gpu_addr,
				size_t *rl_size_bytes)
{
	unsigned int alloc_size_bytes;
	unsigned int *rl_buffer, rl_wptr, i;
	int retval, proccesses_mapped;
	struct device_process_node *cur;
	struct qcm_process_device *qpd;
	struct queue *q;
	struct kernel_queue *kq;
	bool is_over_subscription;

	BUG_ON(!pm || !queues || !rl_size_bytes || !rl_gpu_addr);

	rl_wptr = retval = proccesses_mapped = 0;

	retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr,
				&alloc_size_bytes, &is_over_subscription);
324
	if (retval)
B
Ben Goz 已提交
325 326 327 328
		return retval;

	*rl_size_bytes = alloc_size_bytes;

329
	pr_debug("Building runlist ib process count: %d queues count %d\n",
B
Ben Goz 已提交
330 331 332 333 334 335 336
		pm->dqm->processes_count, pm->dqm->queue_count);

	/* build the run list ib packet */
	list_for_each_entry(cur, queues, list) {
		qpd = cur->qpd;
		/* build map process packet */
		if (proccesses_mapped >= pm->dqm->processes_count) {
337
			pr_debug("Not enough space left in runlist IB\n");
B
Ben Goz 已提交
338 339 340
			pm_release_ib(pm);
			return -ENOMEM;
		}
341

B
Ben Goz 已提交
342
		retval = pm_create_map_process(pm, &rl_buffer[rl_wptr], qpd);
343
		if (retval)
B
Ben Goz 已提交
344
			return retval;
345

B
Ben Goz 已提交
346 347 348 349 350
		proccesses_mapped++;
		inc_wptr(&rl_wptr, sizeof(struct pm4_map_process),
				alloc_size_bytes);

		list_for_each_entry(kq, &qpd->priv_queue_list, list) {
351
			if (!kq->queue->properties.is_active)
B
Ben Goz 已提交
352
				continue;
353

354
			pr_debug("static_queue, mapping kernel q %d, is debug status %d\n",
355 356
				kq->queue->queue, qpd->is_debug);

357 358 359 360 361 362 363 364 365 366 367
			if (pm->dqm->dev->device_info->asic_family ==
					CHIP_CARRIZO)
				retval = pm_create_map_queue_vi(pm,
						&rl_buffer[rl_wptr],
						kq->queue,
						qpd->is_debug);
			else
				retval = pm_create_map_queue(pm,
						&rl_buffer[rl_wptr],
						kq->queue,
						qpd->is_debug);
368
			if (retval)
B
Ben Goz 已提交
369
				return retval;
370 371 372 373

			inc_wptr(&rl_wptr,
				sizeof(struct pm4_map_queues),
				alloc_size_bytes);
B
Ben Goz 已提交
374 375 376
		}

		list_for_each_entry(q, &qpd->queues_list, list) {
377
			if (!q->properties.is_active)
B
Ben Goz 已提交
378
				continue;
379

380
			pr_debug("static_queue, mapping user queue %d, is debug status %d\n",
381 382
				q->queue, qpd->is_debug);

383 384 385 386 387 388 389 390 391 392 393
			if (pm->dqm->dev->device_info->asic_family ==
					CHIP_CARRIZO)
				retval = pm_create_map_queue_vi(pm,
						&rl_buffer[rl_wptr],
						q,
						qpd->is_debug);
			else
				retval = pm_create_map_queue(pm,
						&rl_buffer[rl_wptr],
						q,
						qpd->is_debug);
394

395
			if (retval)
B
Ben Goz 已提交
396
				return retval;
397 398 399 400

			inc_wptr(&rl_wptr,
				sizeof(struct pm4_map_queues),
				alloc_size_bytes);
B
Ben Goz 已提交
401 402 403
		}
	}

404
	pr_debug("Finished map process and queues to runlist\n");
B
Ben Goz 已提交
405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423

	if (is_over_subscription)
		pm_create_runlist(pm, &rl_buffer[rl_wptr], *rl_gpu_addr,
				alloc_size_bytes / sizeof(uint32_t), true);

	for (i = 0; i < alloc_size_bytes / sizeof(uint32_t); i++)
		pr_debug("0x%2X ", rl_buffer[i]);
	pr_debug("\n");

	return 0;
}

int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
{
	BUG_ON(!dqm);

	pm->dqm = dqm;
	mutex_init(&pm->lock);
	pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ);
424
	if (!pm->priv_queue) {
B
Ben Goz 已提交
425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448
		mutex_destroy(&pm->lock);
		return -ENOMEM;
	}
	pm->allocated = false;

	return 0;
}

void pm_uninit(struct packet_manager *pm)
{
	BUG_ON(!pm);

	mutex_destroy(&pm->lock);
	kernel_queue_uninit(pm->priv_queue);
}

int pm_send_set_resources(struct packet_manager *pm,
				struct scheduling_resources *res)
{
	struct pm4_set_resources *packet;

	BUG_ON(!pm || !res);

	mutex_lock(&pm->lock);
449
	pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
B
Ben Goz 已提交
450
					sizeof(*packet) / sizeof(uint32_t),
451
					(unsigned int **)&packet);
452
	if (!packet) {
B
Ben Goz 已提交
453
		mutex_unlock(&pm->lock);
454
		pr_err("Failed to allocate buffer on kernel queue\n");
B
Ben Goz 已提交
455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475
		return -ENOMEM;
	}

	memset(packet, 0, sizeof(struct pm4_set_resources));
	packet->header.u32all = build_pm4_header(IT_SET_RESOURCES,
					sizeof(struct pm4_set_resources));

	packet->bitfields2.queue_type =
			queue_type__mes_set_resources__hsa_interface_queue_hiq;
	packet->bitfields2.vmid_mask = res->vmid_mask;
	packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY;
	packet->bitfields7.oac_mask = res->oac_mask;
	packet->bitfields8.gds_heap_base = res->gds_heap_base;
	packet->bitfields8.gds_heap_size = res->gds_heap_size;

	packet->gws_mask_lo = lower_32_bits(res->gws_mask);
	packet->gws_mask_hi = upper_32_bits(res->gws_mask);

	packet->queue_mask_lo = lower_32_bits(res->queue_mask);
	packet->queue_mask_hi = upper_32_bits(res->queue_mask);

476
	pm->priv_queue->ops.submit_packet(pm->priv_queue);
B
Ben Goz 已提交
477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493

	mutex_unlock(&pm->lock);

	return 0;
}

int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
{
	uint64_t rl_gpu_ib_addr;
	uint32_t *rl_buffer;
	size_t rl_ib_size, packet_size_dwords;
	int retval;

	BUG_ON(!pm || !dqm_queues);

	retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr,
					&rl_ib_size);
494
	if (retval)
B
Ben Goz 已提交
495 496
		goto fail_create_runlist_ib;

497
	pr_debug("runlist IB address: 0x%llX\n", rl_gpu_ib_addr);
B
Ben Goz 已提交
498 499 500 501

	packet_size_dwords = sizeof(struct pm4_runlist) / sizeof(uint32_t);
	mutex_lock(&pm->lock);

502
	retval = pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
B
Ben Goz 已提交
503
					packet_size_dwords, &rl_buffer);
504
	if (retval)
B
Ben Goz 已提交
505 506 507 508
		goto fail_acquire_packet_buffer;

	retval = pm_create_runlist(pm, rl_buffer, rl_gpu_ib_addr,
					rl_ib_size / sizeof(uint32_t), false);
509
	if (retval)
B
Ben Goz 已提交
510 511
		goto fail_create_runlist;

512
	pm->priv_queue->ops.submit_packet(pm->priv_queue);
B
Ben Goz 已提交
513 514 515 516 517 518

	mutex_unlock(&pm->lock);

	return retval;

fail_create_runlist:
519
	pm->priv_queue->ops.rollback_packet(pm->priv_queue);
B
Ben Goz 已提交
520 521 522
fail_acquire_packet_buffer:
	mutex_unlock(&pm->lock);
fail_create_runlist_ib:
523
	pm_release_ib(pm);
B
Ben Goz 已提交
524 525 526 527 528 529 530 531 532 533 534 535
	return retval;
}

int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
			uint32_t fence_value)
{
	int retval;
	struct pm4_query_status *packet;

	BUG_ON(!pm || !fence_address);

	mutex_lock(&pm->lock);
536
	retval = pm->priv_queue->ops.acquire_packet_buffer(
B
Ben Goz 已提交
537 538 539
			pm->priv_queue,
			sizeof(struct pm4_query_status) / sizeof(uint32_t),
			(unsigned int **)&packet);
540
	if (retval)
B
Ben Goz 已提交
541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556
		goto fail_acquire_packet_buffer;

	packet->header.u32all = build_pm4_header(IT_QUERY_STATUS,
					sizeof(struct pm4_query_status));

	packet->bitfields2.context_id = 0;
	packet->bitfields2.interrupt_sel =
			interrupt_sel__mes_query_status__completion_status;
	packet->bitfields2.command =
			command__mes_query_status__fence_only_after_write_ack;

	packet->addr_hi = upper_32_bits((uint64_t)fence_address);
	packet->addr_lo = lower_32_bits((uint64_t)fence_address);
	packet->data_hi = upper_32_bits((uint64_t)fence_value);
	packet->data_lo = lower_32_bits((uint64_t)fence_value);

557
	pm->priv_queue->ops.submit_packet(pm->priv_queue);
B
Ben Goz 已提交
558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578
	mutex_unlock(&pm->lock);

	return 0;

fail_acquire_packet_buffer:
	mutex_unlock(&pm->lock);
	return retval;
}

int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
			enum kfd_preempt_type_filter mode,
			uint32_t filter_param, bool reset,
			unsigned int sdma_engine)
{
	int retval;
	uint32_t *buffer;
	struct pm4_unmap_queues *packet;

	BUG_ON(!pm);

	mutex_lock(&pm->lock);
579
	retval = pm->priv_queue->ops.acquire_packet_buffer(
B
Ben Goz 已提交
580 581 582
			pm->priv_queue,
			sizeof(struct pm4_unmap_queues) / sizeof(uint32_t),
			&buffer);
583
	if (retval)
B
Ben Goz 已提交
584 585 586 587
		goto err_acquire_packet_buffer;

	packet = (struct pm4_unmap_queues *)buffer;
	memset(buffer, 0, sizeof(struct pm4_unmap_queues));
588
	pr_debug("static_queue: unmapping queues: mode is %d , reset is %d , type is %d\n",
589
		mode, reset, type);
B
Ben Goz 已提交
590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629
	packet->header.u32all = build_pm4_header(IT_UNMAP_QUEUES,
					sizeof(struct pm4_unmap_queues));
	switch (type) {
	case KFD_QUEUE_TYPE_COMPUTE:
	case KFD_QUEUE_TYPE_DIQ:
		packet->bitfields2.engine_sel =
			engine_sel__mes_unmap_queues__compute;
		break;
	case KFD_QUEUE_TYPE_SDMA:
		packet->bitfields2.engine_sel =
			engine_sel__mes_unmap_queues__sdma0 + sdma_engine;
		break;
	default:
		BUG();
		break;
	}

	if (reset)
		packet->bitfields2.action =
				action__mes_unmap_queues__reset_queues;
	else
		packet->bitfields2.action =
				action__mes_unmap_queues__preempt_queues;

	switch (mode) {
	case KFD_PREEMPT_TYPE_FILTER_SINGLE_QUEUE:
		packet->bitfields2.queue_sel =
				queue_sel__mes_unmap_queues__perform_request_on_specified_queues;
		packet->bitfields2.num_queues = 1;
		packet->bitfields3b.doorbell_offset0 = filter_param;
		break;
	case KFD_PREEMPT_TYPE_FILTER_BY_PASID:
		packet->bitfields2.queue_sel =
				queue_sel__mes_unmap_queues__perform_request_on_pasid_queues;
		packet->bitfields3a.pasid = filter_param;
		break;
	case KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES:
		packet->bitfields2.queue_sel =
				queue_sel__mes_unmap_queues__perform_request_on_all_active_queues;
		break;
630 631 632 633 634
	case KFD_PREEMPT_TYPE_FILTER_DYNAMIC_QUEUES:
		/* in this case, we do not preempt static queues */
		packet->bitfields2.queue_sel =
				queue_sel__mes_unmap_queues__perform_request_on_dynamic_queues_only;
		break;
B
Ben Goz 已提交
635 636 637
	default:
		BUG();
		break;
638
	}
B
Ben Goz 已提交
639

640
	pm->priv_queue->ops.submit_packet(pm->priv_queue);
B
Ben Goz 已提交
641 642 643 644 645 646 647 648 649 650 651 652 653 654 655

	mutex_unlock(&pm->lock);
	return 0;

err_acquire_packet_buffer:
	mutex_unlock(&pm->lock);
	return retval;
}

void pm_release_ib(struct packet_manager *pm)
{
	BUG_ON(!pm);

	mutex_lock(&pm->lock);
	if (pm->allocated) {
656
		kfd_gtt_sa_free(pm->dqm->dev, pm->ib_buffer_obj);
B
Ben Goz 已提交
657 658 659 660
		pm->allocated = false;
	}
	mutex_unlock(&pm->lock);
}