amdgpu_gfx.c 23.1 KB
Newer Older
A
Alex Deucher 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * Copyright 2014 Advanced Micro Devices, Inc.
 * Copyright 2008 Red Hat Inc.
 * Copyright 2009 Jerome Glisse.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 */
25

A
Alex Deucher 已提交
26
#include "amdgpu.h"
27
#include "amdgpu_gfx.h"
28
#include "amdgpu_rlc.h"
29
#include "amdgpu_ras.h"
A
Alex Deucher 已提交
30

31 32
/* delay 0.1 second to enable gfx off feature */
#define GFX_OFF_DELAY_ENABLE         msecs_to_jiffies(100)
33

A
Alex Deucher 已提交
34
/*
35
 * GPU GFX IP block helpers function.
A
Alex Deucher 已提交
36
 */
37

38 39
int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec,
				int pipe, int queue)
40 41 42 43 44 45 46 47 48 49 50
{
	int bit = 0;

	bit += mec * adev->gfx.mec.num_pipe_per_mec
		* adev->gfx.mec.num_queue_per_pipe;
	bit += pipe * adev->gfx.mec.num_queue_per_pipe;
	bit += queue;

	return bit;
}

51
void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
52
				 int *mec, int *pipe, int *queue)
53 54 55 56 57 58 59 60 61 62 63 64
{
	*queue = bit % adev->gfx.mec.num_queue_per_pipe;
	*pipe = (bit / adev->gfx.mec.num_queue_per_pipe)
		% adev->gfx.mec.num_pipe_per_mec;
	*mec = (bit / adev->gfx.mec.num_queue_per_pipe)
	       / adev->gfx.mec.num_pipe_per_mec;

}

bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev,
				     int mec, int pipe, int queue)
{
65
	return test_bit(amdgpu_gfx_mec_queue_to_bit(adev, mec, pipe, queue),
66 67 68
			adev->gfx.mec.queue_bitmap);
}

69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev,
			       int me, int pipe, int queue)
{
	int bit = 0;

	bit += me * adev->gfx.me.num_pipe_per_me
		* adev->gfx.me.num_queue_per_pipe;
	bit += pipe * adev->gfx.me.num_queue_per_pipe;
	bit += queue;

	return bit;
}

void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit,
				int *me, int *pipe, int *queue)
{
	*queue = bit % adev->gfx.me.num_queue_per_pipe;
	*pipe = (bit / adev->gfx.me.num_queue_per_pipe)
		% adev->gfx.me.num_pipe_per_me;
	*me = (bit / adev->gfx.me.num_queue_per_pipe)
		/ adev->gfx.me.num_pipe_per_me;
}

bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev,
				    int me, int pipe, int queue)
{
	return test_bit(amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue),
			adev->gfx.me.queue_bitmap);
}

A
Alex Deucher 已提交
99 100 101 102 103 104 105 106 107 108 109 110 111
/**
 * amdgpu_gfx_scratch_get - Allocate a scratch register
 *
 * @adev: amdgpu_device pointer
 * @reg: scratch register mmio offset
 *
 * Allocate a CP scratch register for use by the driver (all asics).
 * Returns 0 on success or -EINVAL on failure.
 */
int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg)
{
	int i;

112 113 114 115 116 117
	i = ffs(adev->gfx.scratch.free_mask);
	if (i != 0 && i <= adev->gfx.scratch.num_reg) {
		i--;
		adev->gfx.scratch.free_mask &= ~(1u << i);
		*reg = adev->gfx.scratch.reg_base + i;
		return 0;
A
Alex Deucher 已提交
118 119 120 121 122 123 124 125 126 127 128 129 130 131
	}
	return -EINVAL;
}

/**
 * amdgpu_gfx_scratch_free - Free a scratch register
 *
 * @adev: amdgpu_device pointer
 * @reg: scratch register mmio offset
 *
 * Free a CP scratch register allocated for use by the driver (all asics)
 */
void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg)
{
132
	adev->gfx.scratch.free_mask |= 1u << (reg - adev->gfx.scratch.reg_base);
A
Alex Deucher 已提交
133
}
134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177

/**
 * amdgpu_gfx_parse_disable_cu - Parse the disable_cu module parameter
 *
 * @mask: array in which the per-shader array disable masks will be stored
 * @max_se: number of SEs
 * @max_sh: number of SHs
 *
 * The bitmask of CUs to be disabled in the shader array determined by se and
 * sh is stored in mask[se * max_sh + sh].
 */
void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh)
{
	unsigned se, sh, cu;
	const char *p;

	memset(mask, 0, sizeof(*mask) * max_se * max_sh);

	if (!amdgpu_disable_cu || !*amdgpu_disable_cu)
		return;

	p = amdgpu_disable_cu;
	for (;;) {
		char *next;
		int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu);
		if (ret < 3) {
			DRM_ERROR("amdgpu: could not parse disable_cu\n");
			return;
		}

		if (se < max_se && sh < max_sh && cu < 16) {
			DRM_INFO("amdgpu: disabling CU %u.%u.%u\n", se, sh, cu);
			mask[se * max_sh + sh] |= 1u << cu;
		} else {
			DRM_ERROR("amdgpu: disable_cu %u.%u.%u is out of range\n",
				  se, sh, cu);
		}

		next = strchr(p, ',');
		if (!next)
			break;
		p = next + 1;
	}
}
178

179 180
static bool amdgpu_gfx_is_multipipe_capable(struct amdgpu_device *adev)
{
181 182 183 184 185 186
	if (amdgpu_compute_multipipe != -1) {
		DRM_INFO("amdgpu: forcing compute pipe policy %d\n",
			 amdgpu_compute_multipipe);
		return amdgpu_compute_multipipe == 1;
	}

187 188 189 190 191 192 193 194
	/* FIXME: spreading the queues across pipes causes perf regressions
	 * on POLARIS11 compute workloads */
	if (adev->asic_type == CHIP_POLARIS11)
		return false;

	return adev->gfx.mec.num_mec > 1;
}

195
bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
196
					       struct amdgpu_ring *ring)
197
{
198 199 200 201 202 203 204 205
	/* Policy: use 1st queue as high priority compute queue if we
	 * have more than one compute queue.
	 */
	if (adev->gfx.num_compute_rings > 1 &&
	    ring == &adev->gfx.compute_ring[0])
		return true;

	return false;
206 207
}

208 209
void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
{
210
	int i, queue, pipe;
211
	bool multipipe_policy = amdgpu_gfx_is_multipipe_capable(adev);
212 213 214 215 216 217 218 219 220 221 222 223 224
	int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec *
				     adev->gfx.mec.num_queue_per_pipe,
				     adev->gfx.num_compute_rings);

	if (multipipe_policy) {
		/* policy: make queues evenly cross all pipes on MEC1 only */
		for (i = 0; i < max_queues_per_mec; i++) {
			pipe = i % adev->gfx.mec.num_pipe_per_mec;
			queue = (i / adev->gfx.mec.num_pipe_per_mec) %
				adev->gfx.mec.num_queue_per_pipe;

			set_bit(pipe * adev->gfx.mec.num_queue_per_pipe + queue,
					adev->gfx.mec.queue_bitmap);
225
		}
226 227 228 229
	} else {
		/* policy: amdgpu owns all queues in the given pipe */
		for (i = 0; i < max_queues_per_mec; ++i)
			set_bit(i, adev->gfx.mec.queue_bitmap);
230 231
	}

232
	dev_dbg(adev->dev, "mec queue bitmap weight=%d\n", bitmap_weight(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES));
233
}
234

235 236
void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
{
237
	int i, queue, me;
238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256

	for (i = 0; i < AMDGPU_MAX_GFX_QUEUES; ++i) {
		queue = i % adev->gfx.me.num_queue_per_pipe;
		me = (i / adev->gfx.me.num_queue_per_pipe)
		      / adev->gfx.me.num_pipe_per_me;

		if (me >= adev->gfx.me.num_me)
			break;
		/* policy: amdgpu owns the first queue per pipe at this stage
		 * will extend to mulitple queues per pipe later */
		if (me == 0 && queue < 1)
			set_bit(i, adev->gfx.me.queue_bitmap);
	}

	/* update the number of active graphics rings */
	adev->gfx.num_gfx_rings =
		bitmap_weight(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
}

257 258 259 260 261 262 263 264 265 266 267 268 269 270
static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
				  struct amdgpu_ring *ring)
{
	int queue_bit;
	int mec, pipe, queue;

	queue_bit = adev->gfx.mec.num_mec
		    * adev->gfx.mec.num_pipe_per_mec
		    * adev->gfx.mec.num_queue_per_pipe;

	while (queue_bit-- >= 0) {
		if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap))
			continue;

271
		amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
272

H
Huang Rui 已提交
273 274 275 276 277 278
		/*
		 * 1. Using pipes 2/3 from MEC 2 seems cause problems.
		 * 2. It must use queue id 0, because CGPG_IDLE/SAVE/LOAD/RUN
		 * only can be issued on queue 0.
		 */
		if ((mec == 1 && pipe > 1) || queue != 0)
279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298
			continue;

		ring->me = mec + 1;
		ring->pipe = pipe;
		ring->queue = queue;

		return 0;
	}

	dev_err(adev->dev, "Failed to find a queue for KIQ\n");
	return -EINVAL;
}

int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
			     struct amdgpu_ring *ring,
			     struct amdgpu_irq_src *irq)
{
	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
	int r = 0;

299
	spin_lock_init(&kiq->ring_lock);
300 301 302 303

	ring->adev = NULL;
	ring->ring_obj = NULL;
	ring->use_doorbell = true;
304
	ring->doorbell_index = adev->doorbell_index.kiq;
305 306 307 308 309 310

	r = amdgpu_gfx_kiq_acquire(adev, ring);
	if (r)
		return r;

	ring->eop_gpu_addr = kiq->eop_gpu_addr;
311
	ring->no_scheduler = true;
312
	sprintf(ring->name, "kiq_%d.%d.%d", ring->me, ring->pipe, ring->queue);
313
	r = amdgpu_ring_init(adev, ring, 1024,
314 315
			     irq, AMDGPU_CP_KIQ_IRQ_DRIVER0,
			     AMDGPU_RING_PRIO_DEFAULT);
316 317 318 319 320 321
	if (r)
		dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r);

	return r;
}

322
void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring)
323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358
{
	amdgpu_ring_fini(ring);
}

void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev)
{
	struct amdgpu_kiq *kiq = &adev->gfx.kiq;

	amdgpu_bo_free_kernel(&kiq->eop_obj, &kiq->eop_gpu_addr, NULL);
}

int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
			unsigned hpd_size)
{
	int r;
	u32 *hpd;
	struct amdgpu_kiq *kiq = &adev->gfx.kiq;

	r = amdgpu_bo_create_kernel(adev, hpd_size, PAGE_SIZE,
				    AMDGPU_GEM_DOMAIN_GTT, &kiq->eop_obj,
				    &kiq->eop_gpu_addr, (void **)&hpd);
	if (r) {
		dev_warn(adev->dev, "failed to create KIQ bo (%d).\n", r);
		return r;
	}

	memset(hpd, 0, hpd_size);

	r = amdgpu_bo_reserve(kiq->eop_obj, true);
	if (unlikely(r != 0))
		dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n", r);
	amdgpu_bo_kunmap(kiq->eop_obj);
	amdgpu_bo_unreserve(kiq->eop_obj);

	return 0;
}
359

360 361 362
/* create MQD for each compute/gfx queue */
int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
			   unsigned mqd_size)
363 364 365 366 367 368 369
{
	struct amdgpu_ring *ring = NULL;
	int r, i;

	/* create MQD for KIQ */
	ring = &adev->gfx.kiq.ring;
	if (!ring->mqd_obj) {
370 371 372 373 374
		/* originaly the KIQ MQD is put in GTT domain, but for SRIOV VRAM domain is a must
		 * otherwise hypervisor trigger SAVE_VF fail after driver unloaded which mean MQD
		 * deallocated and gart_unbind, to strict diverage we decide to use VRAM domain for
		 * KIQ MQD no matter SRIOV or Bare-metal
		 */
375
		r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
376
					    AMDGPU_GEM_DOMAIN_VRAM, &ring->mqd_obj,
377 378 379 380 381 382 383 384 385 386 387 388
					    &ring->mqd_gpu_addr, &ring->mqd_ptr);
		if (r) {
			dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
			return r;
		}

		/* prepare MQD backup */
		adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS] = kmalloc(mqd_size, GFP_KERNEL);
		if (!adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS])
				dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
	}

389
	if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
		/* create MQD for each KGQ */
		for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
			ring = &adev->gfx.gfx_ring[i];
			if (!ring->mqd_obj) {
				r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
							    AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
							    &ring->mqd_gpu_addr, &ring->mqd_ptr);
				if (r) {
					dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
					return r;
				}

				/* prepare MQD backup */
				adev->gfx.me.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL);
				if (!adev->gfx.me.mqd_backup[i])
					dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
			}
		}
	}

410 411 412 413 414 415 416 417
	/* create MQD for each KCQ */
	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
		ring = &adev->gfx.compute_ring[i];
		if (!ring->mqd_obj) {
			r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
						    AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
						    &ring->mqd_gpu_addr, &ring->mqd_ptr);
			if (r) {
418
				dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
419 420 421 422 423 424 425 426 427 428 429 430 431
				return r;
			}

			/* prepare MQD backup */
			adev->gfx.mec.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL);
			if (!adev->gfx.mec.mqd_backup[i])
				dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
		}
	}

	return 0;
}

432
void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev)
433 434 435 436
{
	struct amdgpu_ring *ring = NULL;
	int i;

437
	if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
438 439 440 441 442 443 444 445 446
		for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
			ring = &adev->gfx.gfx_ring[i];
			kfree(adev->gfx.me.mqd_backup[i]);
			amdgpu_bo_free_kernel(&ring->mqd_obj,
					      &ring->mqd_gpu_addr,
					      &ring->mqd_ptr);
		}
	}

447 448 449 450 451 452 453 454 455 456 457 458 459 460
	for (i = 0; i < adev->gfx.num_compute_rings; i++) {
		ring = &adev->gfx.compute_ring[i];
		kfree(adev->gfx.mec.mqd_backup[i]);
		amdgpu_bo_free_kernel(&ring->mqd_obj,
				      &ring->mqd_gpu_addr,
				      &ring->mqd_ptr);
	}

	ring = &adev->gfx.kiq.ring;
	kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]);
	amdgpu_bo_free_kernel(&ring->mqd_obj,
			      &ring->mqd_gpu_addr,
			      &ring->mqd_ptr);
}
461

462 463 464 465 466 467 468 469 470 471 472 473 474 475
int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev)
{
	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
	struct amdgpu_ring *kiq_ring = &kiq->ring;
	int i;

	if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
		return -EINVAL;

	if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
					adev->gfx.num_compute_rings))
		return -ENOMEM;

	for (i = 0; i < adev->gfx.num_compute_rings; i++)
476 477
		kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.compute_ring[i],
					   RESET_QUEUES, 0, 0);
478

479
	return amdgpu_ring_test_helper(kiq_ring);
480 481
}

482
int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
483 484 485
					int queue_bit)
{
	int mec, pipe, queue;
486
	int set_resource_bit = 0;
487

488
	amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
489

490
	set_resource_bit = mec * 4 * 8 + pipe * 8 + queue;
491

492
	return set_resource_bit;
493 494
}

495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516
int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
{
	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
	struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
	uint64_t queue_mask = 0;
	int r, i;

	if (!kiq->pmf || !kiq->pmf->kiq_map_queues || !kiq->pmf->kiq_set_resources)
		return -EINVAL;

	for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
		if (!test_bit(i, adev->gfx.mec.queue_bitmap))
			continue;

		/* This situation may be hit in the future if a new HW
		 * generation exposes more than 64 queues. If so, the
		 * definition of queue_mask needs updating */
		if (WARN_ON(i > (sizeof(queue_mask)*8))) {
			DRM_ERROR("Invalid KCQ enabled: %d\n", i);
			break;
		}

517
		queue_mask |= (1ull << amdgpu_queue_mask_bit_to_set_resource_bit(adev, i));
518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541
	}

	DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
							kiq_ring->queue);

	r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
					adev->gfx.num_compute_rings +
					kiq->pmf->set_resources_size);
	if (r) {
		DRM_ERROR("Failed to lock KIQ (%d).\n", r);
		return r;
	}

	kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
	for (i = 0; i < adev->gfx.num_compute_rings; i++)
		kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.compute_ring[i]);

	r = amdgpu_ring_test_helper(kiq_ring);
	if (r)
		DRM_ERROR("KCQ enable failed\n");

	return r;
}

542 543 544 545 546 547 548 549 550 551 552 553 554
/* amdgpu_gfx_off_ctrl - Handle gfx off feature enable/disable
 *
 * @adev: amdgpu_device pointer
 * @bool enable true: enable gfx off feature, false: disable gfx off feature
 *
 * 1. gfx off feature will be enabled by gfx ip after gfx cg gp enabled.
 * 2. other client can send request to disable gfx off feature, the request should be honored.
 * 3. other client can cancel their request of disable gfx off feature
 * 4. other client should not send request to enable gfx off feature before disable gfx off feature.
 */

void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
{
555
	if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
556 557 558 559 560 561 562 563 564 565
		return;

	mutex_lock(&adev->gfx.gfx_off_mutex);

	if (!enable)
		adev->gfx.gfx_off_req_count++;
	else if (adev->gfx.gfx_off_req_count > 0)
		adev->gfx.gfx_off_req_count--;

	if (enable && !adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
566
		schedule_delayed_work(&adev->gfx.gfx_off_delay_work, GFX_OFF_DELAY_ENABLE);
567
	} else if (!enable && adev->gfx.gfx_off_state) {
568
		if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) {
569
			adev->gfx.gfx_off_state = false;
570 571 572 573 574 575

			if (adev->gfx.funcs->init_spm_golden) {
				dev_dbg(adev->dev, "GFXOFF is disabled, re-init SPM golden settings\n");
				amdgpu_gfx_init_spm_golden(adev);
			}
		}
576
	}
577

578 579
	mutex_unlock(&adev->gfx.gfx_off_mutex);
}
580

581 582 583 584 585 586 587 588 589 590 591 592 593 594
int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value)
{

	int r = 0;

	mutex_lock(&adev->gfx.gfx_off_mutex);

	r = smu_get_status_gfxoff(adev, value);

	mutex_unlock(&adev->gfx.gfx_off_mutex);

	return r;
}

595
int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev)
596 597 598 599 600
{
	int r;
	struct ras_fs_if fs_info = {
		.sysfs_name = "gfx_err_count",
	};
601 602 603
	struct ras_ih_if ih_info = {
		.cb = amdgpu_gfx_process_ras_data_cb,
	};
604
	struct ras_query_if info = { 0 };
605 606 607 608 609 610 611 612 613 614

	if (!adev->gfx.ras_if) {
		adev->gfx.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
		if (!adev->gfx.ras_if)
			return -ENOMEM;
		adev->gfx.ras_if->block = AMDGPU_RAS_BLOCK__GFX;
		adev->gfx.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
		adev->gfx.ras_if->sub_block_index = 0;
		strcpy(adev->gfx.ras_if->name, "gfx");
	}
615
	fs_info.head = ih_info.head = *adev->gfx.ras_if;
616
	r = amdgpu_ras_late_init(adev, adev->gfx.ras_if,
617
				 &fs_info, &ih_info);
618 619 620 621
	if (r)
		goto free;

	if (amdgpu_ras_is_supported(adev, adev->gfx.ras_if->block)) {
622 623 624 625 626 627 628
		if (adev->gmc.xgmi.connected_to_cpu) {
			info.head = *adev->gfx.ras_if;
			amdgpu_ras_query_error_status(adev, &info);
		} else {
			amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX);
		}

629 630 631 632 633 634 635 636 637 638 639
		r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
		if (r)
			goto late_fini;
	} else {
		/* free gfx ras_if if ras is not supported */
		r = 0;
		goto free;
	}

	return 0;
late_fini:
640
	amdgpu_ras_late_fini(adev, adev->gfx.ras_if, &ih_info);
641 642 643 644 645
free:
	kfree(adev->gfx.ras_if);
	adev->gfx.ras_if = NULL;
	return r;
}
646

647 648 649 650 651 652 653 654 655 656 657 658 659 660 661
void amdgpu_gfx_ras_fini(struct amdgpu_device *adev)
{
	if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX) &&
			adev->gfx.ras_if) {
		struct ras_common_if *ras_if = adev->gfx.ras_if;
		struct ras_ih_if ih_info = {
			.head = *ras_if,
			.cb = amdgpu_gfx_process_ras_data_cb,
		};

		amdgpu_ras_late_fini(adev, ras_if, &ih_info);
		kfree(ras_if);
	}
}

662 663 664 665
int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
		void *err_data,
		struct amdgpu_iv_entry *entry)
{
666 667 668 669 670 671
	/* TODO ue will trigger an interrupt.
	 *
	 * When “Full RAS” is enabled, the per-IP interrupt sources should
	 * be disabled and the driver should only look for the aggregated
	 * interrupt via sync flood
	 */
672 673 674 675
	if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
		kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
		if (adev->gfx.funcs->query_ras_error_count)
			adev->gfx.funcs->query_ras_error_count(adev, err_data);
676
		amdgpu_ras_reset_gpu(adev);
677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698
	}
	return AMDGPU_RAS_SUCCESS;
}

int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
				  struct amdgpu_irq_src *source,
				  struct amdgpu_iv_entry *entry)
{
	struct ras_common_if *ras_if = adev->gfx.ras_if;
	struct ras_dispatch_if ih_data = {
		.entry = entry,
	};

	if (!ras_if)
		return 0;

	ih_data.head = *ras_if;

	DRM_ERROR("CP ECC ERROR IRQ\n");
	amdgpu_ras_interrupt_dispatch(adev, &ih_data);
	return 0;
}
699 700 701 702 703

uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
{
	signed long r, cnt = 0;
	unsigned long flags;
704
	uint32_t seq, reg_val_offs = 0, value = 0;
705 706 707
	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
	struct amdgpu_ring *ring = &kiq->ring;

708 709 710
	if (adev->in_pci_err_recovery)
		return 0;

711 712 713
	BUG_ON(!ring->funcs->emit_rreg);

	spin_lock_irqsave(&kiq->ring_lock, flags);
714 715
	if (amdgpu_device_wb_get(adev, &reg_val_offs)) {
		pr_err("critical bug! too many kiq readers\n");
Y
Yintian Tao 已提交
716
		goto failed_unlock;
717
	}
718
	amdgpu_ring_alloc(ring, 32);
719
	amdgpu_ring_emit_rreg(ring, reg, reg_val_offs);
Y
Yintian Tao 已提交
720 721 722 723
	r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
	if (r)
		goto failed_undo;

724 725 726 727 728 729 730 731 732 733 734 735 736
	amdgpu_ring_commit(ring);
	spin_unlock_irqrestore(&kiq->ring_lock, flags);

	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);

	/* don't wait anymore for gpu reset case because this way may
	 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
	 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
	 * never return if we keep waiting in virt_kiq_rreg, which cause
	 * gpu_recover() hang there.
	 *
	 * also don't wait anymore for IRQ context
	 * */
737
	if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
738 739 740 741 742 743 744 745 746 747 748
		goto failed_kiq_read;

	might_sleep();
	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
	}

	if (cnt > MAX_KIQ_REG_TRY)
		goto failed_kiq_read;

749 750 751 752
	mb();
	value = adev->wb.wb[reg_val_offs];
	amdgpu_device_wb_free(adev, reg_val_offs);
	return value;
753

Y
Yintian Tao 已提交
754 755 756 757
failed_undo:
	amdgpu_ring_undo(ring);
failed_unlock:
	spin_unlock_irqrestore(&kiq->ring_lock, flags);
758
failed_kiq_read:
Y
Yintian Tao 已提交
759 760
	if (reg_val_offs)
		amdgpu_device_wb_free(adev, reg_val_offs);
761
	dev_err(adev->dev, "failed to read reg:%x\n", reg);
762 763 764 765 766 767 768 769 770 771 772 773 774
	return ~0;
}

void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
{
	signed long r, cnt = 0;
	unsigned long flags;
	uint32_t seq;
	struct amdgpu_kiq *kiq = &adev->gfx.kiq;
	struct amdgpu_ring *ring = &kiq->ring;

	BUG_ON(!ring->funcs->emit_wreg);

775 776 777
	if (adev->in_pci_err_recovery)
		return;

778 779 780
	spin_lock_irqsave(&kiq->ring_lock, flags);
	amdgpu_ring_alloc(ring, 32);
	amdgpu_ring_emit_wreg(ring, reg, v);
Y
Yintian Tao 已提交
781 782 783 784
	r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
	if (r)
		goto failed_undo;

785 786 787 788 789 790 791 792 793 794 795 796 797
	amdgpu_ring_commit(ring);
	spin_unlock_irqrestore(&kiq->ring_lock, flags);

	r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);

	/* don't wait anymore for gpu reset case because this way may
	 * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
	 * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
	 * never return if we keep waiting in virt_kiq_rreg, which cause
	 * gpu_recover() hang there.
	 *
	 * also don't wait anymore for IRQ context
	 * */
798
	if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
799 800 801 802 803 804 805 806 807 808 809 810 811 812
		goto failed_kiq_write;

	might_sleep();
	while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {

		msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
		r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
	}

	if (cnt > MAX_KIQ_REG_TRY)
		goto failed_kiq_write;

	return;

Y
Yintian Tao 已提交
813 814 815
failed_undo:
	amdgpu_ring_undo(ring);
	spin_unlock_irqrestore(&kiq->ring_lock, flags);
816
failed_kiq_write:
817
	dev_err(adev->dev, "failed to write reg:%x\n", reg);
818
}
819 820 821 822 823 824 825 826 827 828 829

int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev)
{
	if (amdgpu_num_kcq == -1) {
		return 8;
	} else if (amdgpu_num_kcq > 8 || amdgpu_num_kcq < 0) {
		dev_warn(adev->dev, "set kernel compute queue number to 8 due to invalid parameter provided by user\n");
		return 8;
	}
	return amdgpu_num_kcq;
}
830 831 832 833 834 835 836 837 838

/* amdgpu_gfx_state_change_set - Handle gfx power state change set
 * @adev: amdgpu_device pointer
 * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry)
 *
 */

void amdgpu_gfx_state_change_set(struct amdgpu_device *adev, enum gfx_change_state state)
{
839 840 841 842 843 844
	if (is_support_sw_smu(adev)) {
		smu_gfx_state_change_set(&adev->smu, state);
	} else {
		mutex_lock(&adev->pm.mutex);
		if (adev->powerplay.pp_funcs &&
		    adev->powerplay.pp_funcs->gfx_state_change_set)
845
			((adev)->powerplay.pp_funcs->gfx_state_change_set(
846 847 848
				(adev)->powerplay.pp_handle, state));
		mutex_unlock(&adev->pm.mutex);
	}
849
}