amdgpu_device.c 140.0 KB
Newer Older
A
Alex Deucher 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
/*
 * Copyright 2008 Advanced Micro Devices, Inc.
 * Copyright 2008 Red Hat Inc.
 * Copyright 2009 Jerome Glisse.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Dave Airlie
 *          Alex Deucher
 *          Jerome Glisse
 */
28
#include <linux/power_supply.h>
29
#include <linux/kthread.h>
30
#include <linux/module.h>
A
Alex Deucher 已提交
31 32
#include <linux/console.h>
#include <linux/slab.h>
33

34
#include <drm/drm_atomic_helper.h>
35
#include <drm/drm_probe_helper.h>
A
Alex Deucher 已提交
36 37 38 39 40
#include <drm/amdgpu_drm.h>
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
#include <linux/efi.h>
#include "amdgpu.h"
41
#include "amdgpu_trace.h"
A
Alex Deucher 已提交
42 43 44
#include "amdgpu_i2c.h"
#include "atom.h"
#include "amdgpu_atombios.h"
45
#include "amdgpu_atomfirmware.h"
46
#include "amd_pcie.h"
K
Ken Wang 已提交
47 48 49
#ifdef CONFIG_DRM_AMDGPU_SI
#include "si.h"
#endif
50 51 52
#ifdef CONFIG_DRM_AMDGPU_CIK
#include "cik.h"
#endif
53
#include "vi.h"
54
#include "soc15.h"
55
#include "nv.h"
A
Alex Deucher 已提交
56
#include "bif/bif_4_1_d.h"
57
#include <linux/pci.h>
58
#include <linux/firmware.h>
59
#include "amdgpu_vf_error.h"
A
Alex Deucher 已提交
60

61
#include "amdgpu_amdkfd.h"
62
#include "amdgpu_pm.h"
A
Alex Deucher 已提交
63

64
#include "amdgpu_xgmi.h"
65
#include "amdgpu_ras.h"
J
Jonathan Kim 已提交
66
#include "amdgpu_pmu.h"
67
#include "amdgpu_fru_eeprom.h"
68

69
#include <linux/suspend.h>
70
#include <drm/task_barrier.h>
71
#include <linux/pm_runtime.h>
72

73
MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
74
MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
75
MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
76
MODULE_FIRMWARE("amdgpu/picasso_gpu_info.bin");
77
MODULE_FIRMWARE("amdgpu/raven2_gpu_info.bin");
78
MODULE_FIRMWARE("amdgpu/arcturus_gpu_info.bin");
79
MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin");
80
MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin");
81
MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin");
82
MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin");
83
MODULE_FIRMWARE("amdgpu/vangogh_gpu_info.bin");
84

85 86
#define AMDGPU_RESUME_MS		2000

87
const char *amdgpu_asic_name[] = {
88 89 90 91 92
	"TAHITI",
	"PITCAIRN",
	"VERDE",
	"OLAND",
	"HAINAN",
A
Alex Deucher 已提交
93 94 95 96 97 98 99
	"BONAIRE",
	"KAVERI",
	"KABINI",
	"HAWAII",
	"MULLINS",
	"TOPAZ",
	"TONGA",
100
	"FIJI",
A
Alex Deucher 已提交
101
	"CARRIZO",
S
Samuel Li 已提交
102
	"STONEY",
103 104
	"POLARIS10",
	"POLARIS11",
105
	"POLARIS12",
L
Leo Liu 已提交
106
	"VEGAM",
K
Ken Wang 已提交
107
	"VEGA10",
108
	"VEGA12",
109
	"VEGA20",
110
	"RAVEN",
L
Le Ma 已提交
111
	"ARCTURUS",
112
	"RENOIR",
L
Le Ma 已提交
113
	"ALDEBARAN",
H
Huang Rui 已提交
114
	"NAVI10",
X
Xiaojie Yuan 已提交
115
	"NAVI14",
X
Xiaojie Yuan 已提交
116
	"NAVI12",
117
	"SIENNA_CICHLID",
118
	"NAVY_FLOUNDER",
119
	"VANGOGH",
120
	"DIMGREY_CAVEFISH",
A
Alex Deucher 已提交
121 122 123
	"LAST",
};

124 125 126 127 128 129 130 131 132 133 134 135 136
/**
 * DOC: pcie_replay_count
 *
 * The amdgpu driver provides a sysfs API for reporting the total number
 * of PCIe replays (NAKs)
 * The file pcie_replay_count is used for this and returns the total
 * number of replays as a sum of the NAKs generated and NAKs received
 */

static ssize_t amdgpu_device_get_pcie_replay_count(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct drm_device *ddev = dev_get_drvdata(dev);
137
	struct amdgpu_device *adev = drm_to_adev(ddev);
138 139 140 141 142 143 144 145
	uint64_t cnt = amdgpu_asic_get_pcie_replay_count(adev);

	return snprintf(buf, PAGE_SIZE, "%llu\n", cnt);
}

static DEVICE_ATTR(pcie_replay_count, S_IRUGO,
		amdgpu_device_get_pcie_replay_count, NULL);

146 147
static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev);

148 149 150 151 152 153 154 155 156 157 158 159 160 161
/**
 * DOC: product_name
 *
 * The amdgpu driver provides a sysfs API for reporting the product name
 * for the device
 * The file serial_number is used for this and returns the product name
 * as returned from the FRU.
 * NOTE: This is only available for certain server cards
 */

static ssize_t amdgpu_device_get_product_name(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct drm_device *ddev = dev_get_drvdata(dev);
162
	struct amdgpu_device *adev = drm_to_adev(ddev);
163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183

	return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_name);
}

static DEVICE_ATTR(product_name, S_IRUGO,
		amdgpu_device_get_product_name, NULL);

/**
 * DOC: product_number
 *
 * The amdgpu driver provides a sysfs API for reporting the part number
 * for the device
 * The file serial_number is used for this and returns the part number
 * as returned from the FRU.
 * NOTE: This is only available for certain server cards
 */

static ssize_t amdgpu_device_get_product_number(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct drm_device *ddev = dev_get_drvdata(dev);
184
	struct amdgpu_device *adev = drm_to_adev(ddev);
185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205

	return snprintf(buf, PAGE_SIZE, "%s\n", adev->product_number);
}

static DEVICE_ATTR(product_number, S_IRUGO,
		amdgpu_device_get_product_number, NULL);

/**
 * DOC: serial_number
 *
 * The amdgpu driver provides a sysfs API for reporting the serial number
 * for the device
 * The file serial_number is used for this and returns the serial number
 * as returned from the FRU.
 * NOTE: This is only available for certain server cards
 */

static ssize_t amdgpu_device_get_serial_number(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct drm_device *ddev = dev_get_drvdata(dev);
206
	struct amdgpu_device *adev = drm_to_adev(ddev);
207 208 209 210 211 212 213

	return snprintf(buf, PAGE_SIZE, "%s\n", adev->serial);
}

static DEVICE_ATTR(serial_number, S_IRUGO,
		amdgpu_device_get_serial_number, NULL);

214
/**
215
 * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control
216 217 218
 *
 * @dev: drm_device pointer
 *
219
 * Returns true if the device is a dGPU with ATPX power control,
220 221
 * otherwise return false.
 */
222
bool amdgpu_device_supports_px(struct drm_device *dev)
223 224 225
{
	struct amdgpu_device *adev = drm_to_adev(dev);

226
	if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid())
227 228 229 230
		return true;
	return false;
}

231
/**
232
 * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources
233 234 235
 *
 * @dev: drm_device pointer
 *
236
 * Returns true if the device is a dGPU with ACPI power control,
237 238
 * otherwise return false.
 */
239
bool amdgpu_device_supports_boco(struct drm_device *dev)
A
Alex Deucher 已提交
240
{
241
	struct amdgpu_device *adev = drm_to_adev(dev);
A
Alex Deucher 已提交
242

243 244
	if (adev->has_pr3 ||
	    ((adev->flags & AMD_IS_PX) && amdgpu_is_atpx_hybrid()))
A
Alex Deucher 已提交
245 246 247 248
		return true;
	return false;
}

249 250 251 252 253 254 255 256 257 258
/**
 * amdgpu_device_supports_baco - Does the device support BACO
 *
 * @dev: drm_device pointer
 *
 * Returns true if the device supporte BACO,
 * otherwise return false.
 */
bool amdgpu_device_supports_baco(struct drm_device *dev)
{
259
	struct amdgpu_device *adev = drm_to_adev(dev);
260 261 262 263

	return amdgpu_asic_supports_baco(adev);
}

264 265 266 267
/*
 * VRAM access helper functions
 */

268 269 270 271 272 273 274 275 276 277 278 279 280
/**
 * amdgpu_device_vram_access - read/write a buffer in vram
 *
 * @adev: amdgpu_device pointer
 * @pos: offset of the buffer in vram
 * @buf: virtual address of the buffer in system memory
 * @size: read/write size, sizeof(@buf) must > @size
 * @write: true - write to vram, otherwise - read from vram
 */
void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
			       uint32_t *buf, size_t size, bool write)
{
	unsigned long flags;
281 282 283
	uint32_t hi = ~0;
	uint64_t last;

284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309

#ifdef CONFIG_64BIT
	last = min(pos + size, adev->gmc.visible_vram_size);
	if (last > pos) {
		void __iomem *addr = adev->mman.aper_base_kaddr + pos;
		size_t count = last - pos;

		if (write) {
			memcpy_toio(addr, buf, count);
			mb();
			amdgpu_asic_flush_hdp(adev, NULL);
		} else {
			amdgpu_asic_invalidate_hdp(adev, NULL);
			mb();
			memcpy_fromio(buf, addr, count);
		}

		if (count == size)
			return;

		pos += count;
		buf += count / 4;
		size -= count;
	}
#endif

310 311 312
	spin_lock_irqsave(&adev->mmio_idx_lock, flags);
	for (last = pos + size; pos < last; pos += 4) {
		uint32_t tmp = pos >> 31;
313 314

		WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
315 316 317 318
		if (tmp != hi) {
			WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
			hi = tmp;
		}
319 320 321 322 323
		if (write)
			WREG32_NO_KIQ(mmMM_DATA, *buf++);
		else
			*buf++ = RREG32_NO_KIQ(mmMM_DATA);
	}
324
	spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
325 326
}

A
Alex Deucher 已提交
327
/*
328
 * register access helper functions.
A
Alex Deucher 已提交
329
 */
330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358

/* Check if hw access should be skipped because of hotplug or device error */
bool amdgpu_device_skip_hw_access(struct amdgpu_device *adev)
{
	if (adev->in_pci_err_recovery)
		return true;

#ifdef CONFIG_LOCKDEP
	/*
	 * This is a bit complicated to understand, so worth a comment. What we assert
	 * here is that the GPU reset is not running on another thread in parallel.
	 *
	 * For this we trylock the read side of the reset semaphore, if that succeeds
	 * we know that the reset is not running in paralell.
	 *
	 * If the trylock fails we assert that we are either already holding the read
	 * side of the lock or are the reset thread itself and hold the write side of
	 * the lock.
	 */
	if (in_task()) {
		if (down_read_trylock(&adev->reset_sem))
			up_read(&adev->reset_sem);
		else
			lockdep_assert_held(&adev->reset_sem);
	}
#endif
	return false;
}

359
/**
360
 * amdgpu_device_rreg - read a memory mapped IO or indirect register
361 362 363 364 365 366 367
 *
 * @adev: amdgpu_device pointer
 * @reg: dword aligned register offset
 * @acc_flags: access flags which require special behavior
 *
 * Returns the 32 bit value from the offset specified.
 */
368 369
uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
			    uint32_t reg, uint32_t acc_flags)
A
Alex Deucher 已提交
370
{
371 372
	uint32_t ret;

373
	if (amdgpu_device_skip_hw_access(adev))
374 375
		return 0;

376 377 378 379 380 381 382 383 384 385 386
	if ((reg * 4) < adev->rmmio_size) {
		if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
		    amdgpu_sriov_runtime(adev) &&
		    down_read_trylock(&adev->reset_sem)) {
			ret = amdgpu_kiq_rreg(adev, reg);
			up_read(&adev->reset_sem);
		} else {
			ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
		}
	} else {
		ret = adev->pcie_rreg(adev, reg * 4);
387
	}
388

389
	trace_amdgpu_device_rreg(adev->pdev->device, reg, ret);
390

391
	return ret;
A
Alex Deucher 已提交
392 393
}

394 395 396 397 398 399
/*
 * MMIO register read with bytes helper functions
 * @offset:bytes offset from MMIO start
 *
*/

400 401 402 403 404 405 406 407
/**
 * amdgpu_mm_rreg8 - read a memory mapped IO register
 *
 * @adev: amdgpu_device pointer
 * @offset: byte aligned register offset
 *
 * Returns the 8 bit value from the offset specified.
 */
408 409
uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset)
{
410
	if (amdgpu_device_skip_hw_access(adev))
411 412
		return 0;

413 414 415 416 417 418 419 420 421 422 423
	if (offset < adev->rmmio_size)
		return (readb(adev->rmmio + offset));
	BUG();
}

/*
 * MMIO register write with bytes helper functions
 * @offset:bytes offset from MMIO start
 * @value: the value want to be written to the register
 *
*/
424 425 426 427 428 429 430 431 432
/**
 * amdgpu_mm_wreg8 - read a memory mapped IO register
 *
 * @adev: amdgpu_device pointer
 * @offset: byte aligned register offset
 * @value: 8 bit value to write
 *
 * Writes the value specified to the offset specified.
 */
433 434
void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
{
435
	if (amdgpu_device_skip_hw_access(adev))
436 437
		return;

438 439 440 441 442 443
	if (offset < adev->rmmio_size)
		writeb(value, adev->rmmio + offset);
	else
		BUG();
}

444
/**
445
 * amdgpu_device_wreg - write to a memory mapped IO or indirect register
446 447 448 449 450 451 452 453
 *
 * @adev: amdgpu_device pointer
 * @reg: dword aligned register offset
 * @v: 32 bit value to write to the register
 * @acc_flags: access flags which require special behavior
 *
 * Writes the value specified to the offset specified.
 */
454 455 456
void amdgpu_device_wreg(struct amdgpu_device *adev,
			uint32_t reg, uint32_t v,
			uint32_t acc_flags)
A
Alex Deucher 已提交
457
{
458
	if (amdgpu_device_skip_hw_access(adev))
459 460
		return;

461 462 463 464 465 466 467 468 469 470 471
	if ((reg * 4) < adev->rmmio_size) {
		if (!(acc_flags & AMDGPU_REGS_NO_KIQ) &&
		    amdgpu_sriov_runtime(adev) &&
		    down_read_trylock(&adev->reset_sem)) {
			amdgpu_kiq_wreg(adev, reg, v);
			up_read(&adev->reset_sem);
		} else {
			writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
		}
	} else {
		adev->pcie_wreg(adev, reg * 4, v);
472
	}
473

474
	trace_amdgpu_device_wreg(adev->pdev->device, reg, v);
M
Monk Liu 已提交
475
}
A
Alex Deucher 已提交
476

M
Monk Liu 已提交
477 478 479 480 481
/*
 * amdgpu_mm_wreg_mmio_rlc -  write register either with mmio or with RLC path if in range
 *
 * this function is invoked only the debugfs register access
 * */
482 483
void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
			     uint32_t reg, uint32_t v)
M
Monk Liu 已提交
484
{
485
	if (amdgpu_device_skip_hw_access(adev))
486 487
		return;

M
Monk Liu 已提交
488
	if (amdgpu_sriov_fullaccess(adev) &&
489 490
	    adev->gfx.rlc.funcs &&
	    adev->gfx.rlc.funcs->is_rlcg_access_range) {
M
Monk Liu 已提交
491 492
		if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
			return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v);
493 494
	} else {
		writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
495
	}
A
Alex Deucher 已提交
496 497 498 499 500 501 502 503 504 505 506 507 508
}

/**
 * amdgpu_mm_rdoorbell - read a doorbell dword
 *
 * @adev: amdgpu_device pointer
 * @index: doorbell index
 *
 * Returns the value in the doorbell aperture at the
 * requested doorbell index (CIK).
 */
u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index)
{
509
	if (amdgpu_device_skip_hw_access(adev))
510 511
		return 0;

A
Alex Deucher 已提交
512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531
	if (index < adev->doorbell.num_doorbells) {
		return readl(adev->doorbell.ptr + index);
	} else {
		DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
		return 0;
	}
}

/**
 * amdgpu_mm_wdoorbell - write a doorbell dword
 *
 * @adev: amdgpu_device pointer
 * @index: doorbell index
 * @v: value to write
 *
 * Writes @v to the doorbell aperture at the
 * requested doorbell index (CIK).
 */
void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v)
{
532
	if (amdgpu_device_skip_hw_access(adev))
533 534
		return;

A
Alex Deucher 已提交
535 536 537 538 539 540 541
	if (index < adev->doorbell.num_doorbells) {
		writel(v, adev->doorbell.ptr + index);
	} else {
		DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
	}
}

542 543 544 545 546 547 548 549 550 551 552
/**
 * amdgpu_mm_rdoorbell64 - read a doorbell Qword
 *
 * @adev: amdgpu_device pointer
 * @index: doorbell index
 *
 * Returns the value in the doorbell aperture at the
 * requested doorbell index (VEGA10+).
 */
u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index)
{
553
	if (amdgpu_device_skip_hw_access(adev))
554 555
		return 0;

556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575
	if (index < adev->doorbell.num_doorbells) {
		return atomic64_read((atomic64_t *)(adev->doorbell.ptr + index));
	} else {
		DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index);
		return 0;
	}
}

/**
 * amdgpu_mm_wdoorbell64 - write a doorbell Qword
 *
 * @adev: amdgpu_device pointer
 * @index: doorbell index
 * @v: value to write
 *
 * Writes @v to the doorbell aperture at the
 * requested doorbell index (VEGA10+).
 */
void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v)
{
576
	if (amdgpu_device_skip_hw_access(adev))
577 578
		return;

579 580 581 582 583 584 585
	if (index < adev->doorbell.num_doorbells) {
		atomic64_set((atomic64_t *)(adev->doorbell.ptr + index), v);
	} else {
		DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index);
	}
}

586 587 588 589 590 591
/**
 * amdgpu_device_indirect_rreg - read an indirect register
 *
 * @adev: amdgpu_device pointer
 * @pcie_index: mmio register offset
 * @pcie_data: mmio register offset
592
 * @reg_addr: indirect register address to read from
593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622
 *
 * Returns the value of indirect register @reg_addr
 */
u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev,
				u32 pcie_index, u32 pcie_data,
				u32 reg_addr)
{
	unsigned long flags;
	u32 r;
	void __iomem *pcie_index_offset;
	void __iomem *pcie_data_offset;

	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;

	writel(reg_addr, pcie_index_offset);
	readl(pcie_index_offset);
	r = readl(pcie_data_offset);
	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);

	return r;
}

/**
 * amdgpu_device_indirect_rreg64 - read a 64bits indirect register
 *
 * @adev: amdgpu_device pointer
 * @pcie_index: mmio register offset
 * @pcie_data: mmio register offset
623
 * @reg_addr: indirect register address to read from
624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716
 *
 * Returns the value of indirect register @reg_addr
 */
u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev,
				  u32 pcie_index, u32 pcie_data,
				  u32 reg_addr)
{
	unsigned long flags;
	u64 r;
	void __iomem *pcie_index_offset;
	void __iomem *pcie_data_offset;

	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;

	/* read low 32 bits */
	writel(reg_addr, pcie_index_offset);
	readl(pcie_index_offset);
	r = readl(pcie_data_offset);
	/* read high 32 bits */
	writel(reg_addr + 4, pcie_index_offset);
	readl(pcie_index_offset);
	r |= ((u64)readl(pcie_data_offset) << 32);
	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);

	return r;
}

/**
 * amdgpu_device_indirect_wreg - write an indirect register address
 *
 * @adev: amdgpu_device pointer
 * @pcie_index: mmio register offset
 * @pcie_data: mmio register offset
 * @reg_addr: indirect register offset
 * @reg_data: indirect register data
 *
 */
void amdgpu_device_indirect_wreg(struct amdgpu_device *adev,
				 u32 pcie_index, u32 pcie_data,
				 u32 reg_addr, u32 reg_data)
{
	unsigned long flags;
	void __iomem *pcie_index_offset;
	void __iomem *pcie_data_offset;

	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;

	writel(reg_addr, pcie_index_offset);
	readl(pcie_index_offset);
	writel(reg_data, pcie_data_offset);
	readl(pcie_data_offset);
	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
}

/**
 * amdgpu_device_indirect_wreg64 - write a 64bits indirect register address
 *
 * @adev: amdgpu_device pointer
 * @pcie_index: mmio register offset
 * @pcie_data: mmio register offset
 * @reg_addr: indirect register offset
 * @reg_data: indirect register data
 *
 */
void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev,
				   u32 pcie_index, u32 pcie_data,
				   u32 reg_addr, u64 reg_data)
{
	unsigned long flags;
	void __iomem *pcie_index_offset;
	void __iomem *pcie_data_offset;

	spin_lock_irqsave(&adev->pcie_idx_lock, flags);
	pcie_index_offset = (void __iomem *)adev->rmmio + pcie_index * 4;
	pcie_data_offset = (void __iomem *)adev->rmmio + pcie_data * 4;

	/* write low 32 bits */
	writel(reg_addr, pcie_index_offset);
	readl(pcie_index_offset);
	writel((u32)(reg_data & 0xffffffffULL), pcie_data_offset);
	readl(pcie_data_offset);
	/* write high 32 bits */
	writel(reg_addr + 4, pcie_index_offset);
	readl(pcie_index_offset);
	writel((u32)(reg_data >> 32), pcie_data_offset);
	readl(pcie_data_offset);
	spin_unlock_irqrestore(&adev->pcie_idx_lock, flags);
}

A
Alex Deucher 已提交
717 718 719
/**
 * amdgpu_invalid_rreg - dummy reg read function
 *
720
 * @adev: amdgpu_device pointer
A
Alex Deucher 已提交
721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736
 * @reg: offset of register
 *
 * Dummy register read function.  Used for register blocks
 * that certain asics don't have (all asics).
 * Returns the value in the register.
 */
static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg)
{
	DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
	BUG();
	return 0;
}

/**
 * amdgpu_invalid_wreg - dummy reg write function
 *
737
 * @adev: amdgpu_device pointer
A
Alex Deucher 已提交
738 739 740 741 742 743 744 745 746 747 748 749 750
 * @reg: offset of register
 * @v: value to write to the register
 *
 * Dummy register read function.  Used for register blocks
 * that certain asics don't have (all asics).
 */
static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
{
	DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
		  reg, v);
	BUG();
}

751 752 753
/**
 * amdgpu_invalid_rreg64 - dummy 64 bit reg read function
 *
754
 * @adev: amdgpu_device pointer
755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770
 * @reg: offset of register
 *
 * Dummy register read function.  Used for register blocks
 * that certain asics don't have (all asics).
 * Returns the value in the register.
 */
static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg)
{
	DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg);
	BUG();
	return 0;
}

/**
 * amdgpu_invalid_wreg64 - dummy reg write function
 *
771
 * @adev: amdgpu_device pointer
772 773 774 775 776 777 778 779 780 781 782 783 784
 * @reg: offset of register
 * @v: value to write to the register
 *
 * Dummy register read function.  Used for register blocks
 * that certain asics don't have (all asics).
 */
static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v)
{
	DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n",
		  reg, v);
	BUG();
}

A
Alex Deucher 已提交
785 786 787
/**
 * amdgpu_block_invalid_rreg - dummy reg read function
 *
788
 * @adev: amdgpu_device pointer
A
Alex Deucher 已提交
789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807
 * @block: offset of instance
 * @reg: offset of register
 *
 * Dummy register read function.  Used for register blocks
 * that certain asics don't have (all asics).
 * Returns the value in the register.
 */
static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev,
					  uint32_t block, uint32_t reg)
{
	DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n",
		  reg, block);
	BUG();
	return 0;
}

/**
 * amdgpu_block_invalid_wreg - dummy reg write function
 *
808
 * @adev: amdgpu_device pointer
A
Alex Deucher 已提交
809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824
 * @block: offset of instance
 * @reg: offset of register
 * @v: value to write to the register
 *
 * Dummy register read function.  Used for register blocks
 * that certain asics don't have (all asics).
 */
static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev,
				      uint32_t block,
				      uint32_t reg, uint32_t v)
{
	DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n",
		  reg, block, v);
	BUG();
}

825 826 827
/**
 * amdgpu_device_asic_init - Wrapper for atom asic_init
 *
828
 * @adev: amdgpu_device pointer
829 830 831 832 833 834 835 836 837 838
 *
 * Does any asic specific work and then calls atom asic init.
 */
static int amdgpu_device_asic_init(struct amdgpu_device *adev)
{
	amdgpu_asic_pre_asic_init(adev);

	return amdgpu_atom_asic_init(adev->mode_info.atom_context);
}

839 840 841
/**
 * amdgpu_device_vram_scratch_init - allocate the VRAM scratch page
 *
842
 * @adev: amdgpu_device pointer
843 844 845 846
 *
 * Allocates a scratch page of VRAM for use by various things in the
 * driver.
 */
847
static int amdgpu_device_vram_scratch_init(struct amdgpu_device *adev)
A
Alex Deucher 已提交
848
{
849 850 851 852 853
	return amdgpu_bo_create_kernel(adev, AMDGPU_GPU_PAGE_SIZE,
				       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
				       &adev->vram_scratch.robj,
				       &adev->vram_scratch.gpu_addr,
				       (void **)&adev->vram_scratch.ptr);
A
Alex Deucher 已提交
854 855
}

856 857 858
/**
 * amdgpu_device_vram_scratch_fini - Free the VRAM scratch page
 *
859
 * @adev: amdgpu_device pointer
860 861 862
 *
 * Frees the VRAM scratch page.
 */
863
static void amdgpu_device_vram_scratch_fini(struct amdgpu_device *adev)
A
Alex Deucher 已提交
864
{
865
	amdgpu_bo_free_kernel(&adev->vram_scratch.robj, NULL, NULL);
A
Alex Deucher 已提交
866 867 868
}

/**
869
 * amdgpu_device_program_register_sequence - program an array of registers.
A
Alex Deucher 已提交
870 871 872 873 874 875 876 877
 *
 * @adev: amdgpu_device pointer
 * @registers: pointer to the register array
 * @array_size: size of the register array
 *
 * Programs an array or registers with and and or masks.
 * This is a helper for setting golden registers.
 */
878 879 880
void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
					     const u32 *registers,
					     const u32 array_size)
A
Alex Deucher 已提交
881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897
{
	u32 tmp, reg, and_mask, or_mask;
	int i;

	if (array_size % 3)
		return;

	for (i = 0; i < array_size; i +=3) {
		reg = registers[i + 0];
		and_mask = registers[i + 1];
		or_mask = registers[i + 2];

		if (and_mask == 0xffffffff) {
			tmp = or_mask;
		} else {
			tmp = RREG32(reg);
			tmp &= ~and_mask;
898 899 900 901
			if (adev->family >= AMDGPU_FAMILY_AI)
				tmp |= (or_mask & and_mask);
			else
				tmp |= or_mask;
A
Alex Deucher 已提交
902 903 904 905 906
		}
		WREG32(reg, tmp);
	}
}

907 908 909 910 911 912 913 914
/**
 * amdgpu_device_pci_config_reset - reset the GPU
 *
 * @adev: amdgpu_device pointer
 *
 * Resets the GPU using the pci config reset sequence.
 * Only applicable to asics prior to vega10.
 */
915
void amdgpu_device_pci_config_reset(struct amdgpu_device *adev)
A
Alex Deucher 已提交
916 917 918 919
{
	pci_write_config_dword(adev->pdev, 0x7c, AMDGPU_ASIC_RESET_DATA);
}

920 921 922 923 924 925 926 927 928 929 930 931
/**
 * amdgpu_device_pci_reset - reset the GPU using generic PCI means
 *
 * @adev: amdgpu_device pointer
 *
 * Resets the GPU using generic pci reset interfaces (FLR, SBR, etc.).
 */
int amdgpu_device_pci_reset(struct amdgpu_device *adev)
{
	return pci_reset_function(adev->pdev);
}

A
Alex Deucher 已提交
932 933 934 935
/*
 * GPU doorbell aperture helpers function.
 */
/**
936
 * amdgpu_device_doorbell_init - Init doorbell driver information.
A
Alex Deucher 已提交
937 938 939 940 941 942
 *
 * @adev: amdgpu_device pointer
 *
 * Init doorbell driver information (CIK)
 * Returns 0 on success, error on failure.
 */
943
static int amdgpu_device_doorbell_init(struct amdgpu_device *adev)
A
Alex Deucher 已提交
944
{
945

946 947 948 949 950 951 952 953 954
	/* No doorbell on SI hardware generation */
	if (adev->asic_type < CHIP_BONAIRE) {
		adev->doorbell.base = 0;
		adev->doorbell.size = 0;
		adev->doorbell.num_doorbells = 0;
		adev->doorbell.ptr = NULL;
		return 0;
	}

955 956 957
	if (pci_resource_flags(adev->pdev, 2) & IORESOURCE_UNSET)
		return -EINVAL;

958 959
	amdgpu_asic_init_doorbell_index(adev);

A
Alex Deucher 已提交
960 961 962 963
	/* doorbell bar mapping */
	adev->doorbell.base = pci_resource_start(adev->pdev, 2);
	adev->doorbell.size = pci_resource_len(adev->pdev, 2);

964
	adev->doorbell.num_doorbells = min_t(u32, adev->doorbell.size / sizeof(u32),
965
					     adev->doorbell_index.max_assignment+1);
A
Alex Deucher 已提交
966 967 968
	if (adev->doorbell.num_doorbells == 0)
		return -EINVAL;

969
	/* For Vega, reserve and map two pages on doorbell BAR since SDMA
970 971 972 973
	 * paging queue doorbell use the second page. The
	 * AMDGPU_DOORBELL64_MAX_ASSIGNMENT definition assumes all the
	 * doorbells are in the first page. So with paging queue enabled,
	 * the max num_doorbells should + 1 page (0x400 in dword)
974 975
	 */
	if (adev->asic_type >= CHIP_VEGA10)
976
		adev->doorbell.num_doorbells += 0x400;
977

978 979 980 981
	adev->doorbell.ptr = ioremap(adev->doorbell.base,
				     adev->doorbell.num_doorbells *
				     sizeof(u32));
	if (adev->doorbell.ptr == NULL)
A
Alex Deucher 已提交
982 983 984 985 986 987
		return -ENOMEM;

	return 0;
}

/**
988
 * amdgpu_device_doorbell_fini - Tear down doorbell driver information.
A
Alex Deucher 已提交
989 990 991 992 993
 *
 * @adev: amdgpu_device pointer
 *
 * Tear down doorbell driver information (CIK)
 */
994
static void amdgpu_device_doorbell_fini(struct amdgpu_device *adev)
A
Alex Deucher 已提交
995 996 997 998 999
{
	iounmap(adev->doorbell.ptr);
	adev->doorbell.ptr = NULL;
}

1000

A
Alex Deucher 已提交
1001 1002

/*
1003
 * amdgpu_device_wb_*()
1004
 * Writeback is the method by which the GPU updates special pages in memory
A
Alex Xie 已提交
1005
 * with the status of certain GPU events (fences, ring pointers,etc.).
A
Alex Deucher 已提交
1006 1007 1008
 */

/**
1009
 * amdgpu_device_wb_fini - Disable Writeback and free memory
A
Alex Deucher 已提交
1010 1011 1012 1013 1014 1015
 *
 * @adev: amdgpu_device pointer
 *
 * Disables Writeback and frees the Writeback memory (all asics).
 * Used at driver shutdown.
 */
1016
static void amdgpu_device_wb_fini(struct amdgpu_device *adev)
A
Alex Deucher 已提交
1017 1018
{
	if (adev->wb.wb_obj) {
1019 1020 1021
		amdgpu_bo_free_kernel(&adev->wb.wb_obj,
				      &adev->wb.gpu_addr,
				      (void **)&adev->wb.wb);
A
Alex Deucher 已提交
1022 1023 1024 1025 1026
		adev->wb.wb_obj = NULL;
	}
}

/**
1027
 * amdgpu_device_wb_init- Init Writeback driver info and allocate memory
A
Alex Deucher 已提交
1028 1029 1030
 *
 * @adev: amdgpu_device pointer
 *
1031
 * Initializes writeback and allocates writeback memory (all asics).
A
Alex Deucher 已提交
1032 1033 1034
 * Used at driver startup.
 * Returns 0 on success or an -error on failure.
 */
1035
static int amdgpu_device_wb_init(struct amdgpu_device *adev)
A
Alex Deucher 已提交
1036 1037 1038 1039
{
	int r;

	if (adev->wb.wb_obj == NULL) {
1040 1041
		/* AMDGPU_MAX_WB * sizeof(uint32_t) * 8 = AMDGPU_MAX_WB 256bit slots */
		r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * sizeof(uint32_t) * 8,
1042 1043 1044
					    PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
					    &adev->wb.wb_obj, &adev->wb.gpu_addr,
					    (void **)&adev->wb.wb);
A
Alex Deucher 已提交
1045 1046 1047 1048 1049 1050 1051 1052 1053
		if (r) {
			dev_warn(adev->dev, "(%d) create WB bo failed\n", r);
			return r;
		}

		adev->wb.num_wb = AMDGPU_MAX_WB;
		memset(&adev->wb.used, 0, sizeof(adev->wb.used));

		/* clear wb memory */
M
Monk Liu 已提交
1054
		memset((char *)adev->wb.wb, 0, AMDGPU_MAX_WB * sizeof(uint32_t) * 8);
A
Alex Deucher 已提交
1055 1056 1057 1058 1059 1060
	}

	return 0;
}

/**
1061
 * amdgpu_device_wb_get - Allocate a wb entry
A
Alex Deucher 已提交
1062 1063 1064 1065 1066 1067 1068
 *
 * @adev: amdgpu_device pointer
 * @wb: wb index
 *
 * Allocate a wb slot for use by the driver (all asics).
 * Returns 0 on success or -EINVAL on failure.
 */
1069
int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb)
A
Alex Deucher 已提交
1070 1071 1072
{
	unsigned long offset = find_first_zero_bit(adev->wb.used, adev->wb.num_wb);

1073
	if (offset < adev->wb.num_wb) {
K
Ken Wang 已提交
1074
		__set_bit(offset, adev->wb.used);
M
Monk Liu 已提交
1075
		*wb = offset << 3; /* convert to dw offset */
1076 1077 1078 1079 1080 1081
		return 0;
	} else {
		return -EINVAL;
	}
}

A
Alex Deucher 已提交
1082
/**
1083
 * amdgpu_device_wb_free - Free a wb entry
A
Alex Deucher 已提交
1084 1085 1086 1087 1088 1089
 *
 * @adev: amdgpu_device pointer
 * @wb: wb index
 *
 * Free a wb slot allocated for use by the driver (all asics)
 */
1090
void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb)
A
Alex Deucher 已提交
1091
{
M
Monk Liu 已提交
1092
	wb >>= 3;
A
Alex Deucher 已提交
1093
	if (wb < adev->wb.num_wb)
M
Monk Liu 已提交
1094
		__clear_bit(wb, adev->wb.used);
A
Alex Deucher 已提交
1095 1096
}

1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107
/**
 * amdgpu_device_resize_fb_bar - try to resize FB BAR
 *
 * @adev: amdgpu_device pointer
 *
 * Try to resize FB BAR to make all VRAM CPU accessible. We try very hard not
 * to fail, but if any of the BARs is not accessible after the size we abort
 * driver loading by returning -ENODEV.
 */
int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
{
1108
	int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size);
1109 1110 1111
	struct pci_bus *root;
	struct resource *res;
	unsigned i;
1112 1113 1114
	u16 cmd;
	int r;

1115 1116 1117 1118
	/* Bypass for VF */
	if (amdgpu_sriov_vf(adev))
		return 0;

1119 1120 1121 1122 1123
	/* skip if the bios has already enabled large BAR */
	if (adev->gmc.real_vram_size &&
	    (pci_resource_len(adev->pdev, 0) >= adev->gmc.real_vram_size))
		return 0;

1124 1125 1126 1127 1128 1129
	/* Check if the root BUS has 64bit memory resources */
	root = adev->pdev->bus;
	while (root->parent)
		root = root->parent;

	pci_bus_for_each_resource(root, res, i) {
1130
		if (res && res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) &&
1131 1132 1133 1134 1135 1136 1137 1138
		    res->start > 0x100000000ull)
			break;
	}

	/* Trying to resize is pointless without a root hub window above 4GB */
	if (!res)
		return 0;

1139 1140 1141 1142
	/* Limit the BAR size to what is available */
	rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1,
			rbar_size);

1143 1144 1145 1146 1147 1148
	/* Disable memory decoding while we change the BAR addresses and size */
	pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd);
	pci_write_config_word(adev->pdev, PCI_COMMAND,
			      cmd & ~PCI_COMMAND_MEMORY);

	/* Free the VRAM and doorbell BAR, we most likely need to move both. */
1149
	amdgpu_device_doorbell_fini(adev);
1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165
	if (adev->asic_type >= CHIP_BONAIRE)
		pci_release_resource(adev->pdev, 2);

	pci_release_resource(adev->pdev, 0);

	r = pci_resize_resource(adev->pdev, 0, rbar_size);
	if (r == -ENOSPC)
		DRM_INFO("Not enough PCI address space for a large BAR.");
	else if (r && r != -ENOTSUPP)
		DRM_ERROR("Problem resizing BAR0 (%d).", r);

	pci_assign_unassigned_bus_resources(adev->pdev->bus);

	/* When the doorbell or fb BAR isn't available we have no chance of
	 * using the device.
	 */
1166
	r = amdgpu_device_doorbell_init(adev);
1167 1168 1169 1170 1171 1172 1173
	if (r || (pci_resource_flags(adev->pdev, 0) & IORESOURCE_UNSET))
		return -ENODEV;

	pci_write_config_word(adev->pdev, PCI_COMMAND, cmd);

	return 0;
}
1174

A
Alex Deucher 已提交
1175 1176 1177 1178
/*
 * GPU helpers function.
 */
/**
A
Alex Deucher 已提交
1179
 * amdgpu_device_need_post - check if the hw need post or not
A
Alex Deucher 已提交
1180 1181 1182
 *
 * @adev: amdgpu_device pointer
 *
1183 1184 1185
 * Check if the asic has been initialized (all asics) at driver startup
 * or post is needed if  hw reset is performed.
 * Returns true if need or false if not.
A
Alex Deucher 已提交
1186
 */
A
Alex Deucher 已提交
1187
bool amdgpu_device_need_post(struct amdgpu_device *adev)
A
Alex Deucher 已提交
1188 1189 1190
{
	uint32_t reg;

1191 1192 1193 1194
	if (amdgpu_sriov_vf(adev))
		return false;

	if (amdgpu_passthrough(adev)) {
M
Monk Liu 已提交
1195 1196 1197 1198
		/* for FIJI: In whole GPU pass-through virtualization case, after VM reboot
		 * some old smc fw still need driver do vPost otherwise gpu hang, while
		 * those smc fw version above 22.15 doesn't have this flaw, so we force
		 * vpost executed for smc version below 22.15
1199 1200 1201 1202 1203 1204 1205 1206 1207 1208
		 */
		if (adev->asic_type == CHIP_FIJI) {
			int err;
			uint32_t fw_ver;
			err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev);
			/* force vPost if error occured */
			if (err)
				return true;

			fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
M
Monk Liu 已提交
1209 1210
			if (fw_ver < 0x00160e00)
				return true;
1211 1212
		}
	}
1213

1214 1215 1216 1217
	/* Don't post if we need to reset whole hive on init */
	if (adev->gmc.xgmi.pending_reset)
		return false;

1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233
	if (adev->has_hw_reset) {
		adev->has_hw_reset = false;
		return true;
	}

	/* bios scratch used on CIK+ */
	if (adev->asic_type >= CHIP_BONAIRE)
		return amdgpu_atombios_scratch_need_asic_init(adev);

	/* check MEM_SIZE for older asics */
	reg = amdgpu_asic_get_config_memsize(adev);

	if ((reg != 0) && (reg != 0xffffffff))
		return false;

	return true;
1234 1235
}

A
Alex Deucher 已提交
1236 1237
/* if we get transitioned to only one device, take VGA back */
/**
1238
 * amdgpu_device_vga_set_decode - enable/disable vga decode
A
Alex Deucher 已提交
1239 1240 1241 1242 1243 1244 1245
 *
 * @cookie: amdgpu_device pointer
 * @state: enable/disable vga decode
 *
 * Enable/disable vga decode (all asics).
 * Returns VGA resource flags.
 */
1246
static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
A
Alex Deucher 已提交
1247 1248 1249 1250 1251 1252 1253 1254 1255 1256
{
	struct amdgpu_device *adev = cookie;
	amdgpu_asic_set_vga_state(adev, state);
	if (state)
		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
	else
		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}

1257 1258 1259 1260 1261 1262 1263 1264 1265 1266
/**
 * amdgpu_device_check_block_size - validate the vm block size
 *
 * @adev: amdgpu_device pointer
 *
 * Validates the vm block size specified via module parameter.
 * The vm block size defines number of bits in page table versus page directory,
 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
 * page table and the remaining bits are in the page directory.
 */
1267
static void amdgpu_device_check_block_size(struct amdgpu_device *adev)
1268 1269 1270 1271
{
	/* defines number of bits in page table versus page directory,
	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
	 * page table and the remaining bits are in the page directory */
1272 1273
	if (amdgpu_vm_block_size == -1)
		return;
1274

1275
	if (amdgpu_vm_block_size < 9) {
1276 1277
		dev_warn(adev->dev, "VM page table size (%d) too small\n",
			 amdgpu_vm_block_size);
1278
		amdgpu_vm_block_size = -1;
1279 1280 1281
	}
}

1282 1283 1284 1285 1286 1287 1288 1289
/**
 * amdgpu_device_check_vm_size - validate the vm size
 *
 * @adev: amdgpu_device pointer
 *
 * Validates the vm size in GB specified via module parameter.
 * The VM size is the size of the GPU virtual memory space in GB.
 */
1290
static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)
1291
{
1292 1293 1294 1295
	/* no need to check the default value */
	if (amdgpu_vm_size == -1)
		return;

1296 1297 1298
	if (amdgpu_vm_size < 1) {
		dev_warn(adev->dev, "VM size (%d) too small, min is 1GB\n",
			 amdgpu_vm_size);
1299
		amdgpu_vm_size = -1;
1300 1301 1302
	}
}

1303 1304 1305
static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev)
{
	struct sysinfo si;
1306
	bool is_os_64 = (sizeof(void *) == 8);
1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342
	uint64_t total_memory;
	uint64_t dram_size_seven_GB = 0x1B8000000;
	uint64_t dram_size_three_GB = 0xB8000000;

	if (amdgpu_smu_memory_pool_size == 0)
		return;

	if (!is_os_64) {
		DRM_WARN("Not 64-bit OS, feature not supported\n");
		goto def_value;
	}
	si_meminfo(&si);
	total_memory = (uint64_t)si.totalram * si.mem_unit;

	if ((amdgpu_smu_memory_pool_size == 1) ||
		(amdgpu_smu_memory_pool_size == 2)) {
		if (total_memory < dram_size_three_GB)
			goto def_value1;
	} else if ((amdgpu_smu_memory_pool_size == 4) ||
		(amdgpu_smu_memory_pool_size == 8)) {
		if (total_memory < dram_size_seven_GB)
			goto def_value1;
	} else {
		DRM_WARN("Smu memory pool size not supported\n");
		goto def_value;
	}
	adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28;

	return;

def_value1:
	DRM_WARN("No enough system memory\n");
def_value:
	adev->pm.smu_prv_buffer_size = 0;
}

A
Alex Deucher 已提交
1343
/**
1344
 * amdgpu_device_check_arguments - validate module params
A
Alex Deucher 已提交
1345 1346 1347 1348 1349 1350
 *
 * @adev: amdgpu_device pointer
 *
 * Validates certain module parameters and updates
 * the associated values used by the driver (all asics).
 */
1351
static int amdgpu_device_check_arguments(struct amdgpu_device *adev)
A
Alex Deucher 已提交
1352
{
1353 1354 1355 1356
	if (amdgpu_sched_jobs < 4) {
		dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
			 amdgpu_sched_jobs);
		amdgpu_sched_jobs = 4;
1357
	} else if (!is_power_of_2(amdgpu_sched_jobs)){
1358 1359 1360 1361
		dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
			 amdgpu_sched_jobs);
		amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
	}
A
Alex Deucher 已提交
1362

1363
	if (amdgpu_gart_size != -1 && amdgpu_gart_size < 32) {
1364 1365 1366
		/* gart size must be greater or equal to 32M */
		dev_warn(adev->dev, "gart size (%d) too small\n",
			 amdgpu_gart_size);
1367
		amdgpu_gart_size = -1;
A
Alex Deucher 已提交
1368 1369
	}

1370
	if (amdgpu_gtt_size != -1 && amdgpu_gtt_size < 32) {
1371
		/* gtt size must be greater or equal to 32M */
1372 1373 1374
		dev_warn(adev->dev, "gtt size (%d) too small\n",
				 amdgpu_gtt_size);
		amdgpu_gtt_size = -1;
A
Alex Deucher 已提交
1375 1376
	}

1377 1378 1379 1380 1381 1382 1383
	/* valid range is between 4 and 9 inclusive */
	if (amdgpu_vm_fragment_size != -1 &&
	    (amdgpu_vm_fragment_size > 9 || amdgpu_vm_fragment_size < 4)) {
		dev_warn(adev->dev, "valid range is between 4 and 9\n");
		amdgpu_vm_fragment_size = -1;
	}

1384 1385 1386 1387 1388 1389 1390 1391 1392 1393
	if (amdgpu_sched_hw_submission < 2) {
		dev_warn(adev->dev, "sched hw submission jobs (%d) must be at least 2\n",
			 amdgpu_sched_hw_submission);
		amdgpu_sched_hw_submission = 2;
	} else if (!is_power_of_2(amdgpu_sched_hw_submission)) {
		dev_warn(adev->dev, "sched hw submission jobs (%d) must be a power of 2\n",
			 amdgpu_sched_hw_submission);
		amdgpu_sched_hw_submission = roundup_pow_of_two(amdgpu_sched_hw_submission);
	}

1394 1395
	amdgpu_device_check_smu_prv_buffer_size(adev);

1396
	amdgpu_device_check_vm_size(adev);
A
Alex Deucher 已提交
1397

1398
	amdgpu_device_check_block_size(adev);
C
Christian König 已提交
1399

1400
	adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type);
1401

1402
	amdgpu_gmc_tmz_set(adev);
1403

1404 1405
	amdgpu_gmc_noretry_set(adev);

1406
	return 0;
A
Alex Deucher 已提交
1407 1408 1409 1410 1411 1412
}

/**
 * amdgpu_switcheroo_set_state - set switcheroo state
 *
 * @pdev: pci dev pointer
1413
 * @state: vga_switcheroo state
A
Alex Deucher 已提交
1414 1415 1416 1417
 *
 * Callback for the switcheroo driver.  Suspends or resumes the
 * the asics before or after it is powered up using ACPI methods.
 */
1418 1419
static void amdgpu_switcheroo_set_state(struct pci_dev *pdev,
					enum vga_switcheroo_state state)
A
Alex Deucher 已提交
1420 1421
{
	struct drm_device *dev = pci_get_drvdata(pdev);
1422
	int r;
A
Alex Deucher 已提交
1423

1424
	if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF)
A
Alex Deucher 已提交
1425 1426 1427
		return;

	if (state == VGA_SWITCHEROO_ON) {
1428
		pr_info("switched on\n");
A
Alex Deucher 已提交
1429 1430 1431
		/* don't suspend or resume card normally */
		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;

1432 1433 1434
		pci_set_power_state(pdev, PCI_D0);
		amdgpu_device_load_pci_state(pdev);
		r = pci_enable_device(pdev);
1435 1436 1437
		if (r)
			DRM_WARN("pci_enable_device failed (%d)\n", r);
		amdgpu_device_resume(dev, true);
A
Alex Deucher 已提交
1438 1439 1440

		dev->switch_power_state = DRM_SWITCH_POWER_ON;
	} else {
1441
		pr_info("switched off\n");
A
Alex Deucher 已提交
1442
		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1443
		amdgpu_device_suspend(dev, true);
1444
		amdgpu_device_cache_pci_state(pdev);
1445
		/* Shut down the device */
1446 1447
		pci_disable_device(pdev);
		pci_set_power_state(pdev, PCI_D3cold);
A
Alex Deucher 已提交
1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469
		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
	}
}

/**
 * amdgpu_switcheroo_can_switch - see if switcheroo state can change
 *
 * @pdev: pci dev pointer
 *
 * Callback for the switcheroo driver.  Check of the switcheroo
 * state can be changed.
 * Returns true if the state can be changed, false if not.
 */
static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
{
	struct drm_device *dev = pci_get_drvdata(pdev);

	/*
	* FIXME: open_count is protected by drm_global_mutex but that would lead to
	* locking inversion with the driver load path. And the access here is
	* completely racy anyway. So don't bother with locking for now.
	*/
1470
	return atomic_read(&dev->open_count) == 0;
A
Alex Deucher 已提交
1471 1472 1473 1474 1475 1476 1477 1478
}

static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
	.set_gpu_state = amdgpu_switcheroo_set_state,
	.reprobe = NULL,
	.can_switch = amdgpu_switcheroo_can_switch,
};

1479 1480 1481
/**
 * amdgpu_device_ip_set_clockgating_state - set the CG state
 *
1482
 * @dev: amdgpu_device pointer
1483 1484 1485 1486 1487 1488 1489
 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
 * @state: clockgating state (gate or ungate)
 *
 * Sets the requested clockgating state for all instances of
 * the hardware IP specified.
 * Returns the error code from the last instance.
 */
1490
int amdgpu_device_ip_set_clockgating_state(void *dev,
1491 1492
					   enum amd_ip_block_type block_type,
					   enum amd_clockgating_state state)
A
Alex Deucher 已提交
1493
{
1494
	struct amdgpu_device *adev = dev;
A
Alex Deucher 已提交
1495 1496 1497
	int i, r = 0;

	for (i = 0; i < adev->num_ip_blocks; i++) {
1498
		if (!adev->ip_blocks[i].status.valid)
1499
			continue;
1500 1501 1502 1503 1504 1505 1506 1507 1508
		if (adev->ip_blocks[i].version->type != block_type)
			continue;
		if (!adev->ip_blocks[i].version->funcs->set_clockgating_state)
			continue;
		r = adev->ip_blocks[i].version->funcs->set_clockgating_state(
			(void *)adev, state);
		if (r)
			DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n",
				  adev->ip_blocks[i].version->funcs->name, r);
A
Alex Deucher 已提交
1509 1510 1511 1512
	}
	return r;
}

1513 1514 1515
/**
 * amdgpu_device_ip_set_powergating_state - set the PG state
 *
1516
 * @dev: amdgpu_device pointer
1517 1518 1519 1520 1521 1522 1523
 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
 * @state: powergating state (gate or ungate)
 *
 * Sets the requested powergating state for all instances of
 * the hardware IP specified.
 * Returns the error code from the last instance.
 */
1524
int amdgpu_device_ip_set_powergating_state(void *dev,
1525 1526
					   enum amd_ip_block_type block_type,
					   enum amd_powergating_state state)
A
Alex Deucher 已提交
1527
{
1528
	struct amdgpu_device *adev = dev;
A
Alex Deucher 已提交
1529 1530 1531
	int i, r = 0;

	for (i = 0; i < adev->num_ip_blocks; i++) {
1532
		if (!adev->ip_blocks[i].status.valid)
1533
			continue;
1534 1535 1536 1537 1538 1539 1540 1541 1542
		if (adev->ip_blocks[i].version->type != block_type)
			continue;
		if (!adev->ip_blocks[i].version->funcs->set_powergating_state)
			continue;
		r = adev->ip_blocks[i].version->funcs->set_powergating_state(
			(void *)adev, state);
		if (r)
			DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n",
				  adev->ip_blocks[i].version->funcs->name, r);
A
Alex Deucher 已提交
1543 1544 1545 1546
	}
	return r;
}

1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557
/**
 * amdgpu_device_ip_get_clockgating_state - get the CG state
 *
 * @adev: amdgpu_device pointer
 * @flags: clockgating feature flags
 *
 * Walks the list of IPs on the device and updates the clockgating
 * flags for each IP.
 * Updates @flags with the feature flags for each hardware IP where
 * clockgating is enabled.
 */
1558 1559
void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
					    u32 *flags)
1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570
{
	int i;

	for (i = 0; i < adev->num_ip_blocks; i++) {
		if (!adev->ip_blocks[i].status.valid)
			continue;
		if (adev->ip_blocks[i].version->funcs->get_clockgating_state)
			adev->ip_blocks[i].version->funcs->get_clockgating_state((void *)adev, flags);
	}
}

1571 1572 1573 1574 1575 1576 1577 1578 1579
/**
 * amdgpu_device_ip_wait_for_idle - wait for idle
 *
 * @adev: amdgpu_device pointer
 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
 *
 * Waits for the request hardware IP to be idle.
 * Returns 0 for success or a negative error code on failure.
 */
1580 1581
int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
				   enum amd_ip_block_type block_type)
1582 1583 1584 1585
{
	int i, r;

	for (i = 0; i < adev->num_ip_blocks; i++) {
1586
		if (!adev->ip_blocks[i].status.valid)
1587
			continue;
1588 1589
		if (adev->ip_blocks[i].version->type == block_type) {
			r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev);
1590 1591 1592 1593 1594 1595 1596 1597 1598
			if (r)
				return r;
			break;
		}
	}
	return 0;

}

1599 1600 1601 1602 1603 1604 1605 1606 1607
/**
 * amdgpu_device_ip_is_idle - is the hardware IP idle
 *
 * @adev: amdgpu_device pointer
 * @block_type: Type of hardware IP (SMU, GFX, UVD, etc.)
 *
 * Check if the hardware IP is idle or not.
 * Returns true if it the IP is idle, false if not.
 */
1608 1609
bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
			      enum amd_ip_block_type block_type)
1610 1611 1612 1613
{
	int i;

	for (i = 0; i < adev->num_ip_blocks; i++) {
1614
		if (!adev->ip_blocks[i].status.valid)
1615
			continue;
1616 1617
		if (adev->ip_blocks[i].version->type == block_type)
			return adev->ip_blocks[i].version->funcs->is_idle((void *)adev);
1618 1619 1620 1621 1622
	}
	return true;

}

1623 1624 1625 1626
/**
 * amdgpu_device_ip_get_ip_block - get a hw IP pointer
 *
 * @adev: amdgpu_device pointer
1627
 * @type: Type of hardware IP (SMU, GFX, UVD, etc.)
1628 1629 1630 1631
 *
 * Returns a pointer to the hardware IP block structure
 * if it exists for the asic, otherwise NULL.
 */
1632 1633 1634
struct amdgpu_ip_block *
amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
			      enum amd_ip_block_type type)
A
Alex Deucher 已提交
1635 1636 1637 1638
{
	int i;

	for (i = 0; i < adev->num_ip_blocks; i++)
1639
		if (adev->ip_blocks[i].version->type == type)
A
Alex Deucher 已提交
1640 1641 1642 1643 1644 1645
			return &adev->ip_blocks[i];

	return NULL;
}

/**
1646
 * amdgpu_device_ip_block_version_cmp
A
Alex Deucher 已提交
1647 1648
 *
 * @adev: amdgpu_device pointer
1649
 * @type: enum amd_ip_block_type
A
Alex Deucher 已提交
1650 1651 1652 1653 1654 1655
 * @major: major version
 * @minor: minor version
 *
 * return 0 if equal or greater
 * return 1 if smaller or the ip_block doesn't exist
 */
1656 1657 1658
int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
				       enum amd_ip_block_type type,
				       u32 major, u32 minor)
A
Alex Deucher 已提交
1659
{
1660
	struct amdgpu_ip_block *ip_block = amdgpu_device_ip_get_ip_block(adev, type);
A
Alex Deucher 已提交
1661

1662 1663 1664
	if (ip_block && ((ip_block->version->major > major) ||
			((ip_block->version->major == major) &&
			(ip_block->version->minor >= minor))))
A
Alex Deucher 已提交
1665 1666 1667 1668 1669
		return 0;

	return 1;
}

1670
/**
1671
 * amdgpu_device_ip_block_add
1672 1673 1674 1675 1676 1677 1678
 *
 * @adev: amdgpu_device pointer
 * @ip_block_version: pointer to the IP to add
 *
 * Adds the IP block driver information to the collection of IPs
 * on the asic.
 */
1679 1680
int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
			       const struct amdgpu_ip_block_version *ip_block_version)
1681 1682 1683 1684
{
	if (!ip_block_version)
		return -EINVAL;

1685
	DRM_INFO("add ip block number %d <%s>\n", adev->num_ip_blocks,
1686 1687
		  ip_block_version->funcs->name);

1688 1689 1690 1691 1692
	adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version;

	return 0;
}

1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704
/**
 * amdgpu_device_enable_virtual_display - enable virtual display feature
 *
 * @adev: amdgpu_device pointer
 *
 * Enabled the virtual display feature if the user has enabled it via
 * the module parameter virtual_display.  This feature provides a virtual
 * display hardware on headless boards or in virtualized environments.
 * This function parses and validates the configuration string specified by
 * the user and configues the virtual display configuration (number of
 * virtual connectors, crtcs, etc.) specified.
 */
1705
static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev)
1706 1707 1708 1709
{
	adev->enable_virtual_display = false;

	if (amdgpu_virtual_display) {
1710
		const char *pci_address_name = pci_name(adev->pdev);
1711
		char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname;
1712 1713 1714

		pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL);
		pciaddstr_tmp = pciaddstr;
1715 1716
		while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) {
			pciaddname = strsep(&pciaddname_tmp, ",");
1717 1718
			if (!strcmp("all", pciaddname)
			    || !strcmp(pci_address_name, pciaddname)) {
1719 1720 1721
				long num_crtc;
				int res = -1;

1722
				adev->enable_virtual_display = true;
1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736

				if (pciaddname_tmp)
					res = kstrtol(pciaddname_tmp, 10,
						      &num_crtc);

				if (!res) {
					if (num_crtc < 1)
						num_crtc = 1;
					if (num_crtc > 6)
						num_crtc = 6;
					adev->mode_info.num_crtc = num_crtc;
				} else {
					adev->mode_info.num_crtc = 1;
				}
1737 1738 1739 1740
				break;
			}
		}

1741 1742 1743
		DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n",
			 amdgpu_virtual_display, pci_address_name,
			 adev->enable_virtual_display, adev->mode_info.num_crtc);
1744 1745 1746 1747 1748

		kfree(pciaddstr);
	}
}

1749 1750 1751 1752 1753 1754 1755 1756 1757 1758
/**
 * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware
 *
 * @adev: amdgpu_device pointer
 *
 * Parses the asic configuration parameters specified in the gpu info
 * firmware and makes them availale to the driver for use in configuring
 * the asic.
 * Returns 0 on success, -EINVAL on failure.
 */
1759 1760 1761
static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)
{
	const char *chip_name;
1762
	char fw_name[40];
1763 1764 1765
	int err;
	const struct gpu_info_firmware_header_v1_0 *hdr;

1766 1767
	adev->firmware.gpu_info_fw = NULL;

1768
	if (adev->mman.discovery_bin) {
1769
		amdgpu_discovery_get_gfx_info(adev);
1770 1771 1772 1773 1774 1775 1776 1777

		/*
		 * FIXME: The bounding box is still needed by Navi12, so
		 * temporarily read it from gpu_info firmware. Should be droped
		 * when DAL no longer needs it.
		 */
		if (adev->asic_type != CHIP_NAVI12)
			return 0;
1778 1779
	}

1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794
	switch (adev->asic_type) {
#ifdef CONFIG_DRM_AMDGPU_SI
	case CHIP_VERDE:
	case CHIP_TAHITI:
	case CHIP_PITCAIRN:
	case CHIP_OLAND:
	case CHIP_HAINAN:
#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
	case CHIP_BONAIRE:
	case CHIP_HAWAII:
	case CHIP_KAVERI:
	case CHIP_KABINI:
	case CHIP_MULLINS:
#endif
1795 1796 1797 1798 1799 1800 1801 1802 1803
	case CHIP_TOPAZ:
	case CHIP_TONGA:
	case CHIP_FIJI:
	case CHIP_POLARIS10:
	case CHIP_POLARIS11:
	case CHIP_POLARIS12:
	case CHIP_VEGAM:
	case CHIP_CARRIZO:
	case CHIP_STONEY:
1804
	case CHIP_VEGA20:
1805
	case CHIP_ALDEBARAN:
1806 1807
	case CHIP_SIENNA_CICHLID:
	case CHIP_NAVY_FLOUNDER:
1808
	case CHIP_DIMGREY_CAVEFISH:
1809 1810 1811 1812 1813
	default:
		return 0;
	case CHIP_VEGA10:
		chip_name = "vega10";
		break;
1814 1815 1816
	case CHIP_VEGA12:
		chip_name = "vega12";
		break;
1817
	case CHIP_RAVEN:
A
Alex Deucher 已提交
1818
		if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1819
			chip_name = "raven2";
A
Alex Deucher 已提交
1820
		else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1821
			chip_name = "picasso";
1822 1823
		else
			chip_name = "raven";
1824
		break;
1825 1826 1827
	case CHIP_ARCTURUS:
		chip_name = "arcturus";
		break;
1828
	case CHIP_RENOIR:
1829 1830 1831 1832
		if (adev->apu_flags & AMD_APU_IS_RENOIR)
			chip_name = "renoir";
		else
			chip_name = "green_sardine";
1833
		break;
1834 1835 1836
	case CHIP_NAVI10:
		chip_name = "navi10";
		break;
1837 1838 1839
	case CHIP_NAVI14:
		chip_name = "navi14";
		break;
1840 1841 1842
	case CHIP_NAVI12:
		chip_name = "navi12";
		break;
1843 1844 1845
	case CHIP_VANGOGH:
		chip_name = "vangogh";
		break;
1846 1847 1848
	}

	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name);
1849
	err = request_firmware(&adev->firmware.gpu_info_fw, fw_name, adev->dev);
1850 1851 1852 1853 1854 1855
	if (err) {
		dev_err(adev->dev,
			"Failed to load gpu_info firmware \"%s\"\n",
			fw_name);
		goto out;
	}
1856
	err = amdgpu_ucode_validate(adev->firmware.gpu_info_fw);
1857 1858 1859 1860 1861 1862 1863
	if (err) {
		dev_err(adev->dev,
			"Failed to validate gpu_info firmware \"%s\"\n",
			fw_name);
		goto out;
	}

1864
	hdr = (const struct gpu_info_firmware_header_v1_0 *)adev->firmware.gpu_info_fw->data;
1865 1866 1867 1868 1869 1870
	amdgpu_ucode_print_gpu_info_hdr(&hdr->header);

	switch (hdr->version_major) {
	case 1:
	{
		const struct gpu_info_firmware_v1_0 *gpu_info_fw =
1871
			(const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data +
1872 1873
								le32_to_cpu(hdr->header.ucode_array_offset_bytes));

1874 1875 1876 1877
		/*
		 * Should be droped when DAL no longer needs it.
		 */
		if (adev->asic_type == CHIP_NAVI12)
1878 1879
			goto parse_soc_bounding_box;

1880 1881 1882 1883
		adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se);
		adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh);
		adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se);
		adev->gfx.config.max_backends_per_se = le32_to_cpu(gpu_info_fw->gc_num_rb_per_se);
1884
		adev->gfx.config.max_texture_channel_caches =
1885 1886 1887 1888 1889
			le32_to_cpu(gpu_info_fw->gc_num_tccs);
		adev->gfx.config.max_gprs = le32_to_cpu(gpu_info_fw->gc_num_gprs);
		adev->gfx.config.max_gs_threads = le32_to_cpu(gpu_info_fw->gc_num_max_gs_thds);
		adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gpu_info_fw->gc_gs_table_depth);
		adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gpu_info_fw->gc_gsprim_buff_depth);
1890
		adev->gfx.config.double_offchip_lds_buf =
1891 1892
			le32_to_cpu(gpu_info_fw->gc_double_offchip_lds_buffer);
		adev->gfx.cu_info.wave_front_size = le32_to_cpu(gpu_info_fw->gc_wave_size);
1893 1894 1895 1896 1897
		adev->gfx.cu_info.max_waves_per_simd =
			le32_to_cpu(gpu_info_fw->gc_max_waves_per_simd);
		adev->gfx.cu_info.max_scratch_slots_per_cu =
			le32_to_cpu(gpu_info_fw->gc_max_scratch_slots_per_cu);
		adev->gfx.cu_info.lds_size = le32_to_cpu(gpu_info_fw->gc_lds_size);
1898
		if (hdr->version_minor >= 1) {
1899 1900 1901 1902 1903 1904 1905 1906
			const struct gpu_info_firmware_v1_1 *gpu_info_fw =
				(const struct gpu_info_firmware_v1_1 *)(adev->firmware.gpu_info_fw->data +
									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
			adev->gfx.config.num_sc_per_sh =
				le32_to_cpu(gpu_info_fw->num_sc_per_sh);
			adev->gfx.config.num_packer_per_sc =
				le32_to_cpu(gpu_info_fw->num_packer_per_sc);
		}
1907 1908 1909 1910

parse_soc_bounding_box:
		/*
		 * soc bounding box info is not integrated in disocovery table,
1911
		 * we always need to parse it from gpu info firmware if needed.
1912
		 */
1913 1914 1915 1916 1917 1918
		if (hdr->version_minor == 2) {
			const struct gpu_info_firmware_v1_2 *gpu_info_fw =
				(const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data +
									le32_to_cpu(hdr->header.ucode_array_offset_bytes));
			adev->dm.soc_bounding_box = &gpu_info_fw->soc_bounding_box;
		}
1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930
		break;
	}
	default:
		dev_err(adev->dev,
			"Unsupported gpu_info table %d\n", hdr->header.ucode_version);
		err = -EINVAL;
		goto out;
	}
out:
	return err;
}

1931 1932 1933 1934 1935 1936 1937 1938 1939 1940
/**
 * amdgpu_device_ip_early_init - run early init for hardware IPs
 *
 * @adev: amdgpu_device pointer
 *
 * Early initialization pass for hardware IPs.  The hardware IPs that make
 * up each asic are discovered each IP's early_init callback is run.  This
 * is the first stage in initializing the asic.
 * Returns 0 on success, negative error code on failure.
 */
1941
static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
A
Alex Deucher 已提交
1942
{
1943
	int i, r;
A
Alex Deucher 已提交
1944

1945
	amdgpu_device_enable_virtual_display(adev);
1946

1947 1948
	if (amdgpu_sriov_vf(adev)) {
		r = amdgpu_virt_request_full_gpu(adev, true);
1949 1950
		if (r)
			return r;
1951 1952
	}

A
Alex Deucher 已提交
1953
	switch (adev->asic_type) {
K
Ken Wang 已提交
1954 1955 1956 1957 1958 1959
#ifdef CONFIG_DRM_AMDGPU_SI
	case CHIP_VERDE:
	case CHIP_TAHITI:
	case CHIP_PITCAIRN:
	case CHIP_OLAND:
	case CHIP_HAINAN:
K
Ken Wang 已提交
1960
		adev->family = AMDGPU_FAMILY_SI;
K
Ken Wang 已提交
1961 1962 1963 1964 1965
		r = si_set_ip_blocks(adev);
		if (r)
			return r;
		break;
#endif
1966 1967 1968 1969 1970 1971
#ifdef CONFIG_DRM_AMDGPU_CIK
	case CHIP_BONAIRE:
	case CHIP_HAWAII:
	case CHIP_KAVERI:
	case CHIP_KABINI:
	case CHIP_MULLINS:
1972
		if (adev->flags & AMD_IS_APU)
1973
			adev->family = AMDGPU_FAMILY_KV;
1974 1975
		else
			adev->family = AMDGPU_FAMILY_CI;
1976 1977 1978 1979 1980 1981

		r = cik_set_ip_blocks(adev);
		if (r)
			return r;
		break;
#endif
1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999
	case CHIP_TOPAZ:
	case CHIP_TONGA:
	case CHIP_FIJI:
	case CHIP_POLARIS10:
	case CHIP_POLARIS11:
	case CHIP_POLARIS12:
	case CHIP_VEGAM:
	case CHIP_CARRIZO:
	case CHIP_STONEY:
		if (adev->flags & AMD_IS_APU)
			adev->family = AMDGPU_FAMILY_CZ;
		else
			adev->family = AMDGPU_FAMILY_VI;

		r = vi_set_ip_blocks(adev);
		if (r)
			return r;
		break;
2000 2001
	case CHIP_VEGA10:
	case CHIP_VEGA12:
2002
	case CHIP_VEGA20:
2003
	case CHIP_RAVEN:
2004
	case CHIP_ARCTURUS:
2005
	case CHIP_RENOIR:
L
Le Ma 已提交
2006
	case CHIP_ALDEBARAN:
2007
		if (adev->flags & AMD_IS_APU)
2008 2009 2010
			adev->family = AMDGPU_FAMILY_RV;
		else
			adev->family = AMDGPU_FAMILY_AI;
2011 2012 2013 2014 2015

		r = soc15_set_ip_blocks(adev);
		if (r)
			return r;
		break;
2016
	case  CHIP_NAVI10:
2017
	case  CHIP_NAVI14:
2018
	case  CHIP_NAVI12:
2019
	case  CHIP_SIENNA_CICHLID:
2020
	case  CHIP_NAVY_FLOUNDER:
2021
	case  CHIP_DIMGREY_CAVEFISH:
2022 2023 2024 2025 2026
	case CHIP_VANGOGH:
		if (adev->asic_type == CHIP_VANGOGH)
			adev->family = AMDGPU_FAMILY_VGH;
		else
			adev->family = AMDGPU_FAMILY_NV;
2027 2028 2029 2030 2031

		r = nv_set_ip_blocks(adev);
		if (r)
			return r;
		break;
A
Alex Deucher 已提交
2032 2033 2034 2035 2036
	default:
		/* FIXME: not supported yet */
		return -EINVAL;
	}

2037 2038
	amdgpu_amdkfd_device_probe(adev);

2039
	adev->pm.pp_feature = amdgpu_pp_feature_mask;
2040
	if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS)
2041
		adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2042 2043
	if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
		adev->pm.pp_feature &= ~PP_OVERDRIVE_MASK;
2044

A
Alex Deucher 已提交
2045 2046
	for (i = 0; i < adev->num_ip_blocks; i++) {
		if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
2047 2048
			DRM_ERROR("disabled ip block: %d <%s>\n",
				  i, adev->ip_blocks[i].version->funcs->name);
2049
			adev->ip_blocks[i].status.valid = false;
A
Alex Deucher 已提交
2050
		} else {
2051 2052
			if (adev->ip_blocks[i].version->funcs->early_init) {
				r = adev->ip_blocks[i].version->funcs->early_init((void *)adev);
2053
				if (r == -ENOENT) {
2054
					adev->ip_blocks[i].status.valid = false;
2055
				} else if (r) {
2056 2057
					DRM_ERROR("early_init of IP block <%s> failed %d\n",
						  adev->ip_blocks[i].version->funcs->name, r);
A
Alex Deucher 已提交
2058
					return r;
2059
				} else {
2060
					adev->ip_blocks[i].status.valid = true;
2061
				}
2062
			} else {
2063
				adev->ip_blocks[i].status.valid = true;
A
Alex Deucher 已提交
2064 2065
			}
		}
2066 2067
		/* get the vbios after the asic_funcs are set up */
		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON) {
2068 2069 2070 2071
			r = amdgpu_device_parse_gpu_info_fw(adev);
			if (r)
				return r;

2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082
			/* Read BIOS */
			if (!amdgpu_get_bios(adev))
				return -EINVAL;

			r = amdgpu_atombios_init(adev);
			if (r) {
				dev_err(adev->dev, "amdgpu_atombios_init failed\n");
				amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_INIT_FAIL, 0, 0);
				return r;
			}
		}
A
Alex Deucher 已提交
2083 2084
	}

2085 2086 2087
	adev->cg_flags &= amdgpu_cg_mask;
	adev->pg_flags &= amdgpu_pg_mask;

A
Alex Deucher 已提交
2088 2089 2090
	return 0;
}

2091 2092 2093 2094 2095 2096 2097 2098 2099 2100
static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev)
{
	int i, r;

	for (i = 0; i < adev->num_ip_blocks; i++) {
		if (!adev->ip_blocks[i].status.sw)
			continue;
		if (adev->ip_blocks[i].status.hw)
			continue;
		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2101
		    (amdgpu_sriov_vf(adev) && (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)) ||
2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136
		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
			r = adev->ip_blocks[i].version->funcs->hw_init(adev);
			if (r) {
				DRM_ERROR("hw_init of IP block <%s> failed %d\n",
					  adev->ip_blocks[i].version->funcs->name, r);
				return r;
			}
			adev->ip_blocks[i].status.hw = true;
		}
	}

	return 0;
}

static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev)
{
	int i, r;

	for (i = 0; i < adev->num_ip_blocks; i++) {
		if (!adev->ip_blocks[i].status.sw)
			continue;
		if (adev->ip_blocks[i].status.hw)
			continue;
		r = adev->ip_blocks[i].version->funcs->hw_init(adev);
		if (r) {
			DRM_ERROR("hw_init of IP block <%s> failed %d\n",
				  adev->ip_blocks[i].version->funcs->name, r);
			return r;
		}
		adev->ip_blocks[i].status.hw = true;
	}

	return 0;
}

2137 2138 2139 2140
static int amdgpu_device_fw_loading(struct amdgpu_device *adev)
{
	int r = 0;
	int i;
2141
	uint32_t smu_version;
2142 2143 2144

	if (adev->asic_type >= CHIP_VEGA10) {
		for (i = 0; i < adev->num_ip_blocks; i++) {
2145 2146 2147
			if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_PSP)
				continue;

2148 2149 2150
			if (!adev->ip_blocks[i].status.sw)
				continue;

2151 2152 2153 2154
			/* no need to do the fw loading again if already done*/
			if (adev->ip_blocks[i].status.hw == true)
				break;

2155
			if (amdgpu_in_reset(adev) || adev->in_suspend) {
2156 2157 2158
				r = adev->ip_blocks[i].version->funcs->resume(adev);
				if (r) {
					DRM_ERROR("resume of IP block <%s> failed %d\n",
2159
							  adev->ip_blocks[i].version->funcs->name, r);
2160 2161 2162 2163 2164 2165 2166 2167
					return r;
				}
			} else {
				r = adev->ip_blocks[i].version->funcs->hw_init(adev);
				if (r) {
					DRM_ERROR("hw_init of IP block <%s> failed %d\n",
							  adev->ip_blocks[i].version->funcs->name, r);
					return r;
2168 2169
				}
			}
2170 2171 2172

			adev->ip_blocks[i].status.hw = true;
			break;
2173 2174
		}
	}
2175

2176 2177
	if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA)
		r = amdgpu_pm_load_smu_firmware(adev, &smu_version);
2178

2179
	return r;
2180 2181
}

2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192
/**
 * amdgpu_device_ip_init - run init for hardware IPs
 *
 * @adev: amdgpu_device pointer
 *
 * Main initialization pass for hardware IPs.  The list of all the hardware
 * IPs that make up the asic is walked and the sw_init and hw_init callbacks
 * are run.  sw_init initializes the software state associated with each IP
 * and hw_init initializes the hardware associated with each IP.
 * Returns 0 on success, negative error code on failure.
 */
2193
static int amdgpu_device_ip_init(struct amdgpu_device *adev)
A
Alex Deucher 已提交
2194 2195 2196
{
	int i, r;

2197 2198 2199 2200
	r = amdgpu_ras_init(adev);
	if (r)
		return r;

A
Alex Deucher 已提交
2201
	for (i = 0; i < adev->num_ip_blocks; i++) {
2202
		if (!adev->ip_blocks[i].status.valid)
A
Alex Deucher 已提交
2203
			continue;
2204
		r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev);
2205
		if (r) {
2206 2207
			DRM_ERROR("sw_init of IP block <%s> failed %d\n",
				  adev->ip_blocks[i].version->funcs->name, r);
2208
			goto init_failed;
2209
		}
2210
		adev->ip_blocks[i].status.sw = true;
2211

A
Alex Deucher 已提交
2212
		/* need to do gmc hw init early so we can allocate gpu mem */
2213
		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2214
			r = amdgpu_device_vram_scratch_init(adev);
2215 2216
			if (r) {
				DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
2217
				goto init_failed;
2218
			}
2219
			r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev);
2220 2221
			if (r) {
				DRM_ERROR("hw_init %d failed %d\n", i, r);
2222
				goto init_failed;
2223
			}
2224
			r = amdgpu_device_wb_init(adev);
2225
			if (r) {
2226
				DRM_ERROR("amdgpu_device_wb_init failed %d\n", r);
2227
				goto init_failed;
2228
			}
2229
			adev->ip_blocks[i].status.hw = true;
M
Monk Liu 已提交
2230 2231

			/* right after GMC hw init, we create CSA */
2232
			if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
R
Rex Zhu 已提交
2233 2234 2235
				r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj,
								AMDGPU_GEM_DOMAIN_VRAM,
								AMDGPU_CSA_SIZE);
M
Monk Liu 已提交
2236 2237
				if (r) {
					DRM_ERROR("allocate CSA failed %d\n", r);
2238
					goto init_failed;
M
Monk Liu 已提交
2239 2240
				}
			}
A
Alex Deucher 已提交
2241 2242 2243
		}
	}

2244 2245 2246
	if (amdgpu_sriov_vf(adev))
		amdgpu_virt_init_data_exchange(adev);

2247 2248 2249 2250 2251 2252 2253
	r = amdgpu_ib_pool_init(adev);
	if (r) {
		dev_err(adev->dev, "IB initialization failed (%d).\n", r);
		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
		goto init_failed;
	}

2254 2255
	r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
	if (r)
2256
		goto init_failed;
2257 2258 2259

	r = amdgpu_device_ip_hw_init_phase1(adev);
	if (r)
2260
		goto init_failed;
2261

2262 2263
	r = amdgpu_device_fw_loading(adev);
	if (r)
2264
		goto init_failed;
2265

2266 2267
	r = amdgpu_device_ip_hw_init_phase2(adev);
	if (r)
2268
		goto init_failed;
A
Alex Deucher 已提交
2269

2270 2271 2272 2273 2274
	/*
	 * retired pages will be loaded from eeprom and reserved here,
	 * it should be called after amdgpu_device_ip_hw_init_phase2  since
	 * for some ASICs the RAS EEPROM code relies on SMU fully functioning
	 * for I2C communication which only true at this point.
2275 2276 2277 2278 2279 2280
	 *
	 * amdgpu_ras_recovery_init may fail, but the upper only cares the
	 * failure from bad gpu situation and stop amdgpu init process
	 * accordingly. For other failed cases, it will still release all
	 * the resource and print error message, rather than returning one
	 * negative value to upper level.
2281 2282 2283 2284
	 *
	 * Note: theoretically, this should be called before all vram allocations
	 * to protect retired page from abusing
	 */
2285 2286 2287
	r = amdgpu_ras_recovery_init(adev);
	if (r)
		goto init_failed;
2288

2289 2290
	if (adev->gmc.xgmi.num_physical_nodes > 1)
		amdgpu_xgmi_add_device(adev);
2291 2292 2293 2294

	/* Don't init kfd if whole hive need to be reset during init */
	if (!adev->gmc.xgmi.pending_reset)
		amdgpu_amdkfd_device_init(adev);
2295

2296 2297
	amdgpu_fru_get_product_info(adev);

2298
init_failed:
2299
	if (amdgpu_sriov_vf(adev))
2300 2301
		amdgpu_virt_release_full_gpu(adev, true);

2302
	return r;
A
Alex Deucher 已提交
2303 2304
}

2305 2306 2307 2308 2309 2310 2311 2312 2313
/**
 * amdgpu_device_fill_reset_magic - writes reset magic to gart pointer
 *
 * @adev: amdgpu_device pointer
 *
 * Writes a reset magic value to the gart pointer in VRAM.  The driver calls
 * this function before a GPU reset.  If the value is retained after a
 * GPU reset, VRAM has not been lost.  Some GPU resets may destry VRAM contents.
 */
2314
static void amdgpu_device_fill_reset_magic(struct amdgpu_device *adev)
2315 2316 2317 2318
{
	memcpy(adev->reset_magic, adev->gart.ptr, AMDGPU_RESET_MAGIC_NUM);
}

2319 2320 2321 2322 2323 2324 2325 2326 2327 2328
/**
 * amdgpu_device_check_vram_lost - check if vram is valid
 *
 * @adev: amdgpu_device pointer
 *
 * Checks the reset magic value written to the gart pointer in VRAM.
 * The driver calls this after a GPU reset to see if the contents of
 * VRAM is lost or now.
 * returns true if vram is lost, false if not.
 */
2329
static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev)
2330
{
2331 2332 2333 2334
	if (memcmp(adev->gart.ptr, adev->reset_magic,
			AMDGPU_RESET_MAGIC_NUM))
		return true;

2335
	if (!amdgpu_in_reset(adev))
2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348
		return false;

	/*
	 * For all ASICs with baco/mode1 reset, the VRAM is
	 * always assumed to be lost.
	 */
	switch (amdgpu_asic_reset_method(adev)) {
	case AMD_RESET_METHOD_BACO:
	case AMD_RESET_METHOD_MODE1:
		return true;
	default:
		return false;
	}
2349 2350
}

2351
/**
2352
 * amdgpu_device_set_cg_state - set clockgating for amdgpu device
2353 2354
 *
 * @adev: amdgpu_device pointer
2355
 * @state: clockgating state (gate or ungate)
2356 2357
 *
 * The list of all the hardware IPs that make up the asic is walked and the
2358 2359 2360
 * set_clockgating_state callbacks are run.
 * Late initialization pass enabling clockgating for hardware IPs.
 * Fini or suspend, pass disabling clockgating for hardware IPs.
2361 2362
 * Returns 0 on success, negative error code on failure.
 */
2363

2364 2365
static int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
						enum amd_clockgating_state state)
A
Alex Deucher 已提交
2366
{
2367
	int i, j, r;
A
Alex Deucher 已提交
2368

2369 2370 2371
	if (amdgpu_emu_mode == 1)
		return 0;

2372 2373
	for (j = 0; j < adev->num_ip_blocks; j++) {
		i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2374
		if (!adev->ip_blocks[i].status.late_initialized)
A
Alex Deucher 已提交
2375
			continue;
2376
		/* skip CG for VCE/UVD, it's handled specially */
2377
		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
2378
		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
2379
		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2380
		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2381
		    adev->ip_blocks[i].version->funcs->set_clockgating_state) {
2382
			/* enable clockgating to save power */
2383
			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
2384
										     state);
2385 2386
			if (r) {
				DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n",
2387
					  adev->ip_blocks[i].version->funcs->name, r);
2388 2389
				return r;
			}
2390
		}
A
Alex Deucher 已提交
2391
	}
2392

2393 2394 2395
	return 0;
}

2396
static int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_powergating_state state)
2397
{
2398
	int i, j, r;
2399

2400 2401 2402
	if (amdgpu_emu_mode == 1)
		return 0;

2403 2404
	for (j = 0; j < adev->num_ip_blocks; j++) {
		i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
2405
		if (!adev->ip_blocks[i].status.late_initialized)
2406 2407 2408 2409 2410
			continue;
		/* skip CG for VCE/UVD, it's handled specially */
		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&
		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE &&
		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&
2411
		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_JPEG &&
2412 2413 2414
		    adev->ip_blocks[i].version->funcs->set_powergating_state) {
			/* enable powergating to save power */
			r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev,
2415
											state);
2416 2417 2418 2419 2420 2421 2422
			if (r) {
				DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n",
					  adev->ip_blocks[i].version->funcs->name, r);
				return r;
			}
		}
	}
2423 2424 2425
	return 0;
}

2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445
static int amdgpu_device_enable_mgpu_fan_boost(void)
{
	struct amdgpu_gpu_instance *gpu_ins;
	struct amdgpu_device *adev;
	int i, ret = 0;

	mutex_lock(&mgpu_info.mutex);

	/*
	 * MGPU fan boost feature should be enabled
	 * only when there are two or more dGPUs in
	 * the system
	 */
	if (mgpu_info.num_dgpu < 2)
		goto out;

	for (i = 0; i < mgpu_info.num_dgpu; i++) {
		gpu_ins = &(mgpu_info.gpu_ins[i]);
		adev = gpu_ins->adev;
		if (!(adev->flags & AMD_IS_APU) &&
2446
		    !gpu_ins->mgpu_fan_enabled) {
2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460
			ret = amdgpu_dpm_enable_mgpu_fan_boost(adev);
			if (ret)
				break;

			gpu_ins->mgpu_fan_enabled = 1;
		}
	}

out:
	mutex_unlock(&mgpu_info.mutex);

	return ret;
}

2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472
/**
 * amdgpu_device_ip_late_init - run late init for hardware IPs
 *
 * @adev: amdgpu_device pointer
 *
 * Late initialization pass for hardware IPs.  The list of all the hardware
 * IPs that make up the asic is walked and the late_init callbacks are run.
 * late_init covers any special initialization that an IP requires
 * after all of the have been initialized or something that needs to happen
 * late in the init process.
 * Returns 0 on success, negative error code on failure.
 */
2473
static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)
2474
{
2475
	struct amdgpu_gpu_instance *gpu_instance;
2476 2477 2478
	int i = 0, r;

	for (i = 0; i < adev->num_ip_blocks; i++) {
2479
		if (!adev->ip_blocks[i].status.hw)
2480 2481 2482 2483 2484 2485 2486 2487 2488
			continue;
		if (adev->ip_blocks[i].version->funcs->late_init) {
			r = adev->ip_blocks[i].version->funcs->late_init((void *)adev);
			if (r) {
				DRM_ERROR("late_init of IP block <%s> failed %d\n",
					  adev->ip_blocks[i].version->funcs->name, r);
				return r;
			}
		}
2489
		adev->ip_blocks[i].status.late_initialized = true;
2490 2491
	}

2492 2493
	amdgpu_ras_set_error_query_ready(adev, true);

2494 2495
	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
2496

2497
	amdgpu_device_fill_reset_magic(adev);
A
Alex Deucher 已提交
2498

2499 2500 2501 2502
	r = amdgpu_device_enable_mgpu_fan_boost();
	if (r)
		DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);

2503 2504 2505 2506 2507
	/* For XGMI + passthrough configuration on arcturus, enable light SBR */
	if (adev->asic_type == CHIP_ARCTURUS &&
	    amdgpu_passthrough(adev) &&
	    adev->gmc.xgmi.num_physical_nodes > 1)
		smu_set_light_sbr(&adev->smu, true);
2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530

	if (adev->gmc.xgmi.num_physical_nodes > 1) {
		mutex_lock(&mgpu_info.mutex);

		/*
		 * Reset device p-state to low as this was booted with high.
		 *
		 * This should be performed only after all devices from the same
		 * hive get initialized.
		 *
		 * However, it's unknown how many device in the hive in advance.
		 * As this is counted one by one during devices initializations.
		 *
		 * So, we wait for all XGMI interlinked devices initialized.
		 * This may bring some delays as those devices may come from
		 * different hives. But that should be OK.
		 */
		if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) {
			for (i = 0; i < mgpu_info.num_gpu; i++) {
				gpu_instance = &(mgpu_info.gpu_ins[i]);
				if (gpu_instance->adev->flags & AMD_IS_APU)
					continue;

2531 2532
				r = amdgpu_xgmi_set_pstate(gpu_instance->adev,
						AMDGPU_XGMI_PSTATE_MIN);
2533 2534 2535 2536 2537 2538 2539 2540 2541 2542
				if (r) {
					DRM_ERROR("pstate setting failed (%d).\n", r);
					break;
				}
			}
		}

		mutex_unlock(&mgpu_info.mutex);
	}

A
Alex Deucher 已提交
2543 2544 2545
	return 0;
}

2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556
/**
 * amdgpu_device_ip_fini - run fini for hardware IPs
 *
 * @adev: amdgpu_device pointer
 *
 * Main teardown pass for hardware IPs.  The list of all the hardware
 * IPs that make up the asic is walked and the hw_fini and sw_fini callbacks
 * are run.  hw_fini tears down the hardware associated with each IP
 * and sw_fini tears down any software state associated with each IP.
 * Returns 0 on success, negative error code on failure.
 */
2557
static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
A
Alex Deucher 已提交
2558 2559 2560
{
	int i, r;

2561 2562 2563
	if (amdgpu_sriov_vf(adev) && adev->virt.ras_init_done)
		amdgpu_virt_release_ras_err_handler_data(adev);

2564 2565
	amdgpu_ras_pre_fini(adev);

2566 2567 2568
	if (adev->gmc.xgmi.num_physical_nodes > 1)
		amdgpu_xgmi_remove_device(adev);

2569
	amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
2570 2571
	amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);

2572 2573
	amdgpu_amdkfd_device_fini(adev);

2574 2575
	/* need to disable SMC first */
	for (i = 0; i < adev->num_ip_blocks; i++) {
2576
		if (!adev->ip_blocks[i].status.hw)
2577
			continue;
2578
		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
2579
			r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
2580 2581 2582
			/* XXX handle errors */
			if (r) {
				DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
2583
					  adev->ip_blocks[i].version->funcs->name, r);
2584
			}
2585
			adev->ip_blocks[i].status.hw = false;
2586 2587 2588 2589
			break;
		}
	}

A
Alex Deucher 已提交
2590
	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2591
		if (!adev->ip_blocks[i].status.hw)
A
Alex Deucher 已提交
2592
			continue;
2593

2594
		r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev);
A
Alex Deucher 已提交
2595
		/* XXX handle errors */
2596
		if (r) {
2597 2598
			DRM_DEBUG("hw_fini of IP block <%s> failed %d\n",
				  adev->ip_blocks[i].version->funcs->name, r);
2599
		}
2600

2601
		adev->ip_blocks[i].status.hw = false;
A
Alex Deucher 已提交
2602 2603
	}

2604

A
Alex Deucher 已提交
2605
	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2606
		if (!adev->ip_blocks[i].status.sw)
A
Alex Deucher 已提交
2607
			continue;
2608 2609

		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
2610
			amdgpu_ucode_free_bo(adev);
R
Rex Zhu 已提交
2611
			amdgpu_free_static_csa(&adev->virt.csa_obj);
2612 2613
			amdgpu_device_wb_fini(adev);
			amdgpu_device_vram_scratch_fini(adev);
2614
			amdgpu_ib_pool_fini(adev);
2615 2616
		}

2617
		r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
A
Alex Deucher 已提交
2618
		/* XXX handle errors */
2619
		if (r) {
2620 2621
			DRM_DEBUG("sw_fini of IP block <%s> failed %d\n",
				  adev->ip_blocks[i].version->funcs->name, r);
2622
		}
2623 2624
		adev->ip_blocks[i].status.sw = false;
		adev->ip_blocks[i].status.valid = false;
A
Alex Deucher 已提交
2625 2626
	}

M
Monk Liu 已提交
2627
	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2628
		if (!adev->ip_blocks[i].status.late_initialized)
2629
			continue;
2630 2631 2632
		if (adev->ip_blocks[i].version->funcs->late_fini)
			adev->ip_blocks[i].version->funcs->late_fini((void *)adev);
		adev->ip_blocks[i].status.late_initialized = false;
M
Monk Liu 已提交
2633 2634
	}

2635 2636
	amdgpu_ras_fini(adev);

2637
	if (amdgpu_sriov_vf(adev))
2638 2639
		if (amdgpu_virt_release_full_gpu(adev, false))
			DRM_ERROR("failed to release exclusive mode on fini\n");
M
Monk Liu 已提交
2640

A
Alex Deucher 已提交
2641 2642 2643
	return 0;
}

2644
/**
2645
 * amdgpu_device_delayed_init_work_handler - work handler for IB tests
2646
 *
2647
 * @work: work_struct.
2648
 */
2649
static void amdgpu_device_delayed_init_work_handler(struct work_struct *work)
2650 2651
{
	struct amdgpu_device *adev =
2652
		container_of(work, struct amdgpu_device, delayed_init_work.work);
2653 2654 2655 2656 2657
	int r;

	r = amdgpu_ib_ring_tests(adev);
	if (r)
		DRM_ERROR("ib ring test failed (%d).\n", r);
2658 2659
}

2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672
static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
{
	struct amdgpu_device *adev =
		container_of(work, struct amdgpu_device, gfx.gfx_off_delay_work.work);

	mutex_lock(&adev->gfx.gfx_off_mutex);
	if (!adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
		if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, true))
			adev->gfx.gfx_off_state = true;
	}
	mutex_unlock(&adev->gfx.gfx_off_mutex);
}

2673
/**
2674
 * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
2675 2676 2677 2678 2679 2680 2681 2682 2683
 *
 * @adev: amdgpu_device pointer
 *
 * Main suspend function for hardware IPs.  The list of all the hardware
 * IPs that make up the asic is walked, clockgating is disabled and the
 * suspend callbacks are run.  suspend puts the hardware and software state
 * in each IP into a state suitable for suspend.
 * Returns 0 on success, negative error code on failure.
 */
2684 2685 2686 2687
static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
{
	int i, r;

2688
	if (!adev->in_s0ix || amdgpu_in_reset(adev)) {
2689 2690 2691
		amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
		amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
	}
2692

2693 2694 2695
	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
		if (!adev->ip_blocks[i].status.valid)
			continue;
2696

2697
		/* displays are handled separately */
2698 2699 2700 2701 2702 2703 2704 2705 2706 2707
		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_DCE)
			continue;

		/* XXX handle errors */
		r = adev->ip_blocks[i].version->funcs->suspend(adev);
		/* XXX handle errors */
		if (r) {
			DRM_ERROR("suspend of IP block <%s> failed %d\n",
				  adev->ip_blocks[i].version->funcs->name, r);
			return r;
2708
		}
2709 2710

		adev->ip_blocks[i].status.hw = false;
2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727
	}

	return 0;
}

/**
 * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
 *
 * @adev: amdgpu_device pointer
 *
 * Main suspend function for hardware IPs.  The list of all the hardware
 * IPs that make up the asic is walked, clockgating is disabled and the
 * suspend callbacks are run.  suspend puts the hardware and software state
 * in each IP into a state suitable for suspend.
 * Returns 0 on success, negative error code on failure.
 */
static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
A
Alex Deucher 已提交
2728 2729 2730
{
	int i, r;

2731
	if (adev->in_s0ix)
2732 2733
		amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry);

A
Alex Deucher 已提交
2734
	for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
2735
		if (!adev->ip_blocks[i].status.valid)
A
Alex Deucher 已提交
2736
			continue;
2737 2738 2739
		/* displays are handled in phase1 */
		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
			continue;
2740 2741 2742 2743 2744 2745
		/* PSP lost connection when err_event_athub occurs */
		if (amdgpu_ras_intr_triggered() &&
		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
			adev->ip_blocks[i].status.hw = false;
			continue;
		}
2746 2747 2748 2749 2750 2751 2752 2753 2754 2755

		/* skip unnecessary suspend if we do not initialize them yet */
		if (adev->gmc.xgmi.pending_reset &&
		    !(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC ||
		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
		      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH)) {
			adev->ip_blocks[i].status.hw = false;
			continue;
		}
2756 2757 2758

		/* XXX fix these remaining cases */
		if (adev->in_s0ix &&
2759
		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP || /* breaks resume */
2760 2761 2762
		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX))  /* breaks suspend */
			continue;

A
Alex Deucher 已提交
2763
		/* XXX handle errors */
2764
		r = adev->ip_blocks[i].version->funcs->suspend(adev);
A
Alex Deucher 已提交
2765
		/* XXX handle errors */
2766
		if (r) {
2767 2768
			DRM_ERROR("suspend of IP block <%s> failed %d\n",
				  adev->ip_blocks[i].version->funcs->name, r);
2769
		}
2770
		adev->ip_blocks[i].status.hw = false;
2771
		/* handle putting the SMC in the appropriate state */
2772 2773 2774 2775 2776 2777 2778 2779
		if(!amdgpu_sriov_vf(adev)){
			if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
				r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
				if (r) {
					DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
							adev->mp1_state, r);
					return r;
				}
2780 2781
			}
		}
A
Alex Deucher 已提交
2782 2783 2784 2785 2786
	}

	return 0;
}

2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801
/**
 * amdgpu_device_ip_suspend - run suspend for hardware IPs
 *
 * @adev: amdgpu_device pointer
 *
 * Main suspend function for hardware IPs.  The list of all the hardware
 * IPs that make up the asic is walked, clockgating is disabled and the
 * suspend callbacks are run.  suspend puts the hardware and software state
 * in each IP into a state suitable for suspend.
 * Returns 0 on success, negative error code on failure.
 */
int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
{
	int r;

2802 2803
	if (amdgpu_sriov_vf(adev)) {
		amdgpu_virt_fini_data_exchange(adev);
2804
		amdgpu_virt_request_full_gpu(adev, false);
2805
	}
2806

2807 2808 2809 2810 2811
	r = amdgpu_device_ip_suspend_phase1(adev);
	if (r)
		return r;
	r = amdgpu_device_ip_suspend_phase2(adev);

2812 2813 2814
	if (amdgpu_sriov_vf(adev))
		amdgpu_virt_release_full_gpu(adev, false);

2815 2816 2817
	return r;
}

2818
static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
2819 2820 2821
{
	int i, r;

2822 2823 2824
	static enum amd_ip_block_type ip_order[] = {
		AMD_IP_BLOCK_TYPE_GMC,
		AMD_IP_BLOCK_TYPE_COMMON,
2825
		AMD_IP_BLOCK_TYPE_PSP,
2826 2827
		AMD_IP_BLOCK_TYPE_IH,
	};
2828

2829 2830 2831
	for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
		int j;
		struct amdgpu_ip_block *block;
2832

2833 2834
		block = &adev->ip_blocks[i];
		block->status.hw = false;
2835

2836
		for (j = 0; j < ARRAY_SIZE(ip_order); j++) {
2837

2838
			if (block->version->type != ip_order[j] ||
2839 2840 2841 2842
				!block->status.valid)
				continue;

			r = block->version->funcs->hw_init(adev);
2843
			DRM_INFO("RE-INIT-early: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2844 2845
			if (r)
				return r;
2846
			block->status.hw = true;
2847 2848 2849 2850 2851 2852
		}
	}

	return 0;
}

2853
static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
2854 2855 2856
{
	int i, r;

2857 2858 2859 2860 2861
	static enum amd_ip_block_type ip_order[] = {
		AMD_IP_BLOCK_TYPE_SMC,
		AMD_IP_BLOCK_TYPE_DCE,
		AMD_IP_BLOCK_TYPE_GFX,
		AMD_IP_BLOCK_TYPE_SDMA,
2862
		AMD_IP_BLOCK_TYPE_UVD,
2863 2864
		AMD_IP_BLOCK_TYPE_VCE,
		AMD_IP_BLOCK_TYPE_VCN
2865
	};
2866

2867 2868 2869
	for (i = 0; i < ARRAY_SIZE(ip_order); i++) {
		int j;
		struct amdgpu_ip_block *block;
2870

2871 2872 2873 2874
		for (j = 0; j < adev->num_ip_blocks; j++) {
			block = &adev->ip_blocks[j];

			if (block->version->type != ip_order[i] ||
2875 2876
				!block->status.valid ||
				block->status.hw)
2877 2878
				continue;

2879 2880 2881 2882 2883
			if (block->version->type == AMD_IP_BLOCK_TYPE_SMC)
				r = block->version->funcs->resume(adev);
			else
				r = block->version->funcs->hw_init(adev);

2884
			DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
2885 2886
			if (r)
				return r;
2887
			block->status.hw = true;
2888 2889 2890 2891 2892 2893
		}
	}

	return 0;
}

2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905
/**
 * amdgpu_device_ip_resume_phase1 - run resume for hardware IPs
 *
 * @adev: amdgpu_device pointer
 *
 * First resume function for hardware IPs.  The list of all the hardware
 * IPs that make up the asic is walked and the resume callbacks are run for
 * COMMON, GMC, and IH.  resume puts the hardware into a functional state
 * after a suspend and updates the software state as necessary.  This
 * function is also used for restoring the GPU after a GPU reset.
 * Returns 0 on success, negative error code on failure.
 */
2906
static int amdgpu_device_ip_resume_phase1(struct amdgpu_device *adev)
A
Alex Deucher 已提交
2907 2908 2909
{
	int i, r;

2910
	for (i = 0; i < adev->num_ip_blocks; i++) {
2911
		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
2912 2913
			continue;
		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2914 2915
		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) {
2916

2917 2918 2919 2920 2921 2922
			r = adev->ip_blocks[i].version->funcs->resume(adev);
			if (r) {
				DRM_ERROR("resume of IP block <%s> failed %d\n",
					  adev->ip_blocks[i].version->funcs->name, r);
				return r;
			}
2923
			adev->ip_blocks[i].status.hw = true;
2924 2925 2926 2927 2928 2929
		}
	}

	return 0;
}

2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942
/**
 * amdgpu_device_ip_resume_phase2 - run resume for hardware IPs
 *
 * @adev: amdgpu_device pointer
 *
 * First resume function for hardware IPs.  The list of all the hardware
 * IPs that make up the asic is walked and the resume callbacks are run for
 * all blocks except COMMON, GMC, and IH.  resume puts the hardware into a
 * functional state after a suspend and updates the software state as
 * necessary.  This function is also used for restoring the GPU after a GPU
 * reset.
 * Returns 0 on success, negative error code on failure.
 */
2943
static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
A
Alex Deucher 已提交
2944 2945 2946 2947
{
	int i, r;

	for (i = 0; i < adev->num_ip_blocks; i++) {
2948
		if (!adev->ip_blocks[i].status.valid || adev->ip_blocks[i].status.hw)
A
Alex Deucher 已提交
2949
			continue;
2950
		if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
2951
		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
2952 2953
		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
		    adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP)
2954
			continue;
2955
		r = adev->ip_blocks[i].version->funcs->resume(adev);
2956
		if (r) {
2957 2958
			DRM_ERROR("resume of IP block <%s> failed %d\n",
				  adev->ip_blocks[i].version->funcs->name, r);
A
Alex Deucher 已提交
2959
			return r;
2960
		}
2961
		adev->ip_blocks[i].status.hw = true;
A
Alex Deucher 已提交
2962 2963 2964 2965 2966
	}

	return 0;
}

2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978
/**
 * amdgpu_device_ip_resume - run resume for hardware IPs
 *
 * @adev: amdgpu_device pointer
 *
 * Main resume function for hardware IPs.  The hardware IPs
 * are split into two resume functions because they are
 * are also used in in recovering from a GPU reset and some additional
 * steps need to be take between them.  In this case (S3/S4) they are
 * run sequentially.
 * Returns 0 on success, negative error code on failure.
 */
2979
static int amdgpu_device_ip_resume(struct amdgpu_device *adev)
2980 2981 2982
{
	int r;

2983
	r = amdgpu_device_ip_resume_phase1(adev);
2984 2985
	if (r)
		return r;
2986 2987 2988 2989 2990

	r = amdgpu_device_fw_loading(adev);
	if (r)
		return r;

2991
	r = amdgpu_device_ip_resume_phase2(adev);
2992 2993 2994 2995

	return r;
}

2996 2997 2998 2999 3000 3001 3002
/**
 * amdgpu_device_detect_sriov_bios - determine if the board supports SR-IOV
 *
 * @adev: amdgpu_device pointer
 *
 * Query the VBIOS data tables to determine if the board supports SR-IOV.
 */
3003
static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev)
3004
{
M
Monk Liu 已提交
3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015
	if (amdgpu_sriov_vf(adev)) {
		if (adev->is_atom_fw) {
			if (amdgpu_atomfirmware_gpu_supports_virtualization(adev))
				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
		} else {
			if (amdgpu_atombios_has_gpu_virtualization_table(adev))
				adev->virt.caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS;
		}

		if (!(adev->virt.caps & AMDGPU_SRIOV_CAPS_SRIOV_VBIOS))
			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_NO_VBIOS, 0, 0);
3016
	}
3017 3018
}

3019 3020 3021 3022 3023 3024 3025 3026
/**
 * amdgpu_device_asic_has_dc_support - determine if DC supports the asic
 *
 * @asic_type: AMD asic type
 *
 * Check if there is DC (new modesetting infrastructre) support for an asic.
 * returns true if DC has support, false if not.
 */
3027 3028 3029 3030
bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
{
	switch (asic_type) {
#if defined(CONFIG_DRM_AMD_DC)
3031 3032 3033 3034 3035 3036
#if defined(CONFIG_DRM_AMD_DC_SI)
	case CHIP_TAHITI:
	case CHIP_PITCAIRN:
	case CHIP_VERDE:
	case CHIP_OLAND:
#endif
3037
	case CHIP_BONAIRE:
3038
	case CHIP_KAVERI:
3039 3040
	case CHIP_KABINI:
	case CHIP_MULLINS:
3041 3042 3043 3044 3045 3046 3047 3048 3049
		/*
		 * We have systems in the wild with these ASICs that require
		 * LVDS and VGA support which is not supported with DC.
		 *
		 * Fallback to the non-DC driver here by default so as not to
		 * cause regressions.
		 */
		return amdgpu_dc > 0;
	case CHIP_HAWAII:
3050 3051 3052
	case CHIP_CARRIZO:
	case CHIP_STONEY:
	case CHIP_POLARIS10:
L
Leo Liu 已提交
3053
	case CHIP_POLARIS11:
3054
	case CHIP_POLARIS12:
L
Leo Liu 已提交
3055
	case CHIP_VEGAM:
3056 3057
	case CHIP_TONGA:
	case CHIP_FIJI:
3058
	case CHIP_VEGA10:
3059
	case CHIP_VEGA12:
3060
	case CHIP_VEGA20:
3061
#if defined(CONFIG_DRM_AMD_DC_DCN)
3062
	case CHIP_RAVEN:
3063
	case CHIP_NAVI10:
3064
	case CHIP_NAVI14:
L
Leo Li 已提交
3065
	case CHIP_NAVI12:
R
Roman Li 已提交
3066
	case CHIP_RENOIR:
3067
	case CHIP_SIENNA_CICHLID:
3068
	case CHIP_NAVY_FLOUNDER:
3069
	case CHIP_DIMGREY_CAVEFISH:
3070
	case CHIP_VANGOGH:
3071
#endif
3072
		return amdgpu_dc != 0;
3073 3074
#endif
	default:
3075
		if (amdgpu_dc > 0)
3076
			DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
3077
					 "but isn't supported by ASIC, ignoring\n");
3078 3079 3080 3081 3082 3083 3084
		return false;
	}
}

/**
 * amdgpu_device_has_dc_support - check if dc is supported
 *
3085
 * @adev: amdgpu_device pointer
3086 3087 3088 3089 3090
 *
 * Returns true for supported, false for not supported
 */
bool amdgpu_device_has_dc_support(struct amdgpu_device *adev)
{
3091
	if (amdgpu_sriov_vf(adev) || adev->enable_virtual_display)
X
Xiangliang Yu 已提交
3092 3093
		return false;

3094 3095 3096
	return amdgpu_device_asic_has_dc_support(adev->asic_type);
}

3097 3098 3099 3100 3101

static void amdgpu_device_xgmi_reset_func(struct work_struct *__work)
{
	struct amdgpu_device *adev =
		container_of(__work, struct amdgpu_device, xgmi_reset_work);
3102
	struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev);
3103

3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116
	/* It's a bug to not have a hive within this function */
	if (WARN_ON(!hive))
		return;

	/*
	 * Use task barrier to synchronize all xgmi reset works across the
	 * hive. task_barrier_enter and task_barrier_exit will block
	 * until all the threads running the xgmi reset works reach
	 * those points. task_barrier_full will do both blocks.
	 */
	if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {

		task_barrier_enter(&hive->tb);
3117
		adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev));
3118 3119 3120 3121 3122

		if (adev->asic_reset_res)
			goto fail;

		task_barrier_exit(&hive->tb);
3123
		adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev));
3124 3125 3126

		if (adev->asic_reset_res)
			goto fail;
3127 3128 3129

		if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count)
			adev->mmhub.funcs->reset_ras_error_count(adev);
3130 3131 3132 3133 3134
	} else {

		task_barrier_full(&hive->tb);
		adev->asic_reset_res =  amdgpu_asic_reset(adev);
	}
3135

3136
fail:
3137
	if (adev->asic_reset_res)
E
Evan Quan 已提交
3138
		DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
3139
			 adev->asic_reset_res, adev_to_drm(adev)->unique);
3140
	amdgpu_put_xgmi_hive(hive);
3141 3142
}

3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154
static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
{
	char *input = amdgpu_lockup_timeout;
	char *timeout_setting = NULL;
	int index = 0;
	long timeout;
	int ret = 0;

	/*
	 * By default timeout for non compute jobs is 10000.
	 * And there is no timeout enforced on compute jobs.
	 * In SR-IOV or passthrough mode, timeout for compute
J
Jiawei 已提交
3155
	 * jobs are 60000 by default.
3156 3157 3158
	 */
	adev->gfx_timeout = msecs_to_jiffies(10000);
	adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3159 3160 3161 3162
	if (amdgpu_sriov_vf(adev))
		adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ?
					msecs_to_jiffies(60000) : msecs_to_jiffies(10000);
	else if (amdgpu_passthrough(adev))
J
Jiawei 已提交
3163
		adev->compute_timeout =  msecs_to_jiffies(60000);
3164 3165 3166
	else
		adev->compute_timeout = MAX_SCHEDULE_TIMEOUT;

3167
	if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3168
		while ((timeout_setting = strsep(&input, ",")) &&
3169
				strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) {
3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203
			ret = kstrtol(timeout_setting, 0, &timeout);
			if (ret)
				return ret;

			if (timeout == 0) {
				index++;
				continue;
			} else if (timeout < 0) {
				timeout = MAX_SCHEDULE_TIMEOUT;
			} else {
				timeout = msecs_to_jiffies(timeout);
			}

			switch (index++) {
			case 0:
				adev->gfx_timeout = timeout;
				break;
			case 1:
				adev->compute_timeout = timeout;
				break;
			case 2:
				adev->sdma_timeout = timeout;
				break;
			case 3:
				adev->video_timeout = timeout;
				break;
			default:
				break;
			}
		}
		/*
		 * There is only one value specified and
		 * it should apply to all non-compute jobs.
		 */
3204
		if (index == 1) {
3205
			adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout;
3206 3207 3208
			if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
				adev->compute_timeout = adev->gfx_timeout;
		}
3209 3210 3211 3212
	}

	return ret;
}
3213

3214 3215 3216 3217 3218 3219 3220 3221
static const struct attribute *amdgpu_dev_attributes[] = {
	&dev_attr_product_name.attr,
	&dev_attr_product_number.attr,
	&dev_attr_serial_number.attr,
	&dev_attr_pcie_replay_count.attr,
	NULL
};

3222

A
Alex Deucher 已提交
3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235
/**
 * amdgpu_device_init - initialize the driver
 *
 * @adev: amdgpu_device pointer
 * @flags: driver flags
 *
 * Initializes the driver info and hw (all asics).
 * Returns 0 for success or an error on failure.
 * Called at driver startup.
 */
int amdgpu_device_init(struct amdgpu_device *adev,
		       uint32_t flags)
{
3236 3237
	struct drm_device *ddev = adev_to_drm(adev);
	struct pci_dev *pdev = adev->pdev;
A
Alex Deucher 已提交
3238
	int r, i;
3239
	bool px = false;
3240
	u32 max_MBps;
A
Alex Deucher 已提交
3241 3242 3243

	adev->shutdown = false;
	adev->flags = flags;
3244 3245 3246 3247 3248 3249

	if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST)
		adev->asic_type = amdgpu_force_asic_type;
	else
		adev->asic_type = flags & AMD_ASIC_MASK;

A
Alex Deucher 已提交
3250
	adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
3251
	if (amdgpu_emu_mode == 1)
3252
		adev->usec_timeout *= 10;
3253
	adev->gmc.gart_size = 512 * 1024 * 1024;
A
Alex Deucher 已提交
3254 3255 3256 3257 3258
	adev->accel_working = false;
	adev->num_rings = 0;
	adev->mman.buffer_funcs = NULL;
	adev->mman.buffer_funcs_ring = NULL;
	adev->vm_manager.vm_pte_funcs = NULL;
3259
	adev->vm_manager.vm_pte_num_scheds = 0;
3260
	adev->gmc.gmc_funcs = NULL;
3261
	adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS);
3262
	bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
A
Alex Deucher 已提交
3263 3264 3265 3266 3267

	adev->smc_rreg = &amdgpu_invalid_rreg;
	adev->smc_wreg = &amdgpu_invalid_wreg;
	adev->pcie_rreg = &amdgpu_invalid_rreg;
	adev->pcie_wreg = &amdgpu_invalid_wreg;
3268 3269
	adev->pciep_rreg = &amdgpu_invalid_rreg;
	adev->pciep_wreg = &amdgpu_invalid_wreg;
3270 3271
	adev->pcie_rreg64 = &amdgpu_invalid_rreg64;
	adev->pcie_wreg64 = &amdgpu_invalid_wreg64;
A
Alex Deucher 已提交
3272 3273 3274 3275
	adev->uvd_ctx_rreg = &amdgpu_invalid_rreg;
	adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
	adev->didt_rreg = &amdgpu_invalid_rreg;
	adev->didt_wreg = &amdgpu_invalid_wreg;
3276 3277
	adev->gc_cac_rreg = &amdgpu_invalid_rreg;
	adev->gc_cac_wreg = &amdgpu_invalid_wreg;
A
Alex Deucher 已提交
3278 3279 3280
	adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
	adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;

3281 3282 3283
	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
		 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
		 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
A
Alex Deucher 已提交
3284 3285 3286

	/* mutex initialization are all done here so we
	 * can recall function without having locking issues */
3287
	mutex_init(&adev->firmware.mutex);
A
Alex Deucher 已提交
3288 3289 3290
	mutex_init(&adev->pm.mutex);
	mutex_init(&adev->gfx.gpu_clock_mutex);
	mutex_init(&adev->srbm_mutex);
3291
	mutex_init(&adev->gfx.pipe_reserve_mutex);
3292
	mutex_init(&adev->gfx.gfx_off_mutex);
A
Alex Deucher 已提交
3293 3294
	mutex_init(&adev->grbm_idx_mutex);
	mutex_init(&adev->mn_lock);
A
Alex Deucher 已提交
3295
	mutex_init(&adev->virt.vf_errors.lock);
A
Alex Deucher 已提交
3296
	hash_init(adev->mn_hash);
3297
	atomic_set(&adev->in_gpu_reset, 0);
3298
	init_rwsem(&adev->reset_sem);
3299
	mutex_init(&adev->psp.mutex);
3300
	mutex_init(&adev->notifier_lock);
A
Alex Deucher 已提交
3301

3302 3303 3304
	r = amdgpu_device_check_arguments(adev);
	if (r)
		return r;
A
Alex Deucher 已提交
3305 3306 3307 3308 3309 3310

	spin_lock_init(&adev->mmio_idx_lock);
	spin_lock_init(&adev->smc_idx_lock);
	spin_lock_init(&adev->pcie_idx_lock);
	spin_lock_init(&adev->uvd_ctx_idx_lock);
	spin_lock_init(&adev->didt_idx_lock);
3311
	spin_lock_init(&adev->gc_cac_idx_lock);
3312
	spin_lock_init(&adev->se_cac_idx_lock);
A
Alex Deucher 已提交
3313
	spin_lock_init(&adev->audio_endpt_idx_lock);
3314
	spin_lock_init(&adev->mm_stats.lock);
A
Alex Deucher 已提交
3315

3316 3317 3318
	INIT_LIST_HEAD(&adev->shadow_list);
	mutex_init(&adev->shadow_list_lock);

3319 3320
	INIT_LIST_HEAD(&adev->reset_list);

3321 3322
	INIT_DELAYED_WORK(&adev->delayed_init_work,
			  amdgpu_device_delayed_init_work_handler);
3323 3324
	INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work,
			  amdgpu_device_delay_enable_gfx_off);
3325

3326 3327
	INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func);

3328
	adev->gfx.gfx_off_req_count = 1;
3329
	adev->pm.ac_power = power_supply_is_system_supplied() > 0;
3330

3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341
	atomic_set(&adev->throttling_logging_enabled, 1);
	/*
	 * If throttling continues, logging will be performed every minute
	 * to avoid log flooding. "-1" is subtracted since the thermal
	 * throttling interrupt comes every second. Thus, the total logging
	 * interval is 59 seconds(retelimited printk interval) + 1(waiting
	 * for throttling interrupt) = 60 seconds.
	 */
	ratelimit_state_init(&adev->throttling_logging_rs, (60 - 1) * HZ, 1);
	ratelimit_set_flags(&adev->throttling_logging_rs, RATELIMIT_MSG_ON_RELEASE);

3342 3343
	/* Registers mapping */
	/* TODO: block userspace mapping of io register */
3344 3345 3346 3347 3348 3349 3350
	if (adev->asic_type >= CHIP_BONAIRE) {
		adev->rmmio_base = pci_resource_start(adev->pdev, 5);
		adev->rmmio_size = pci_resource_len(adev->pdev, 5);
	} else {
		adev->rmmio_base = pci_resource_start(adev->pdev, 2);
		adev->rmmio_size = pci_resource_len(adev->pdev, 2);
	}
A
Alex Deucher 已提交
3351 3352 3353 3354 3355 3356 3357 3358

	adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
	if (adev->rmmio == NULL) {
		return -ENOMEM;
	}
	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
	DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);

3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369
	/* enable PCIE atomic ops */
	r = pci_enable_atomic_ops_to_root(adev->pdev,
					  PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
					  PCI_EXP_DEVCAP2_ATOMIC_COMP64);
	if (r) {
		adev->have_atomics_support = false;
		DRM_INFO("PCIE atomic ops is not supported\n");
	} else {
		adev->have_atomics_support = true;
	}

3370 3371
	amdgpu_device_get_pcie_info(adev);

3372 3373 3374
	if (amdgpu_mcbp)
		DRM_INFO("MCBP is enabled\n");

3375 3376 3377
	if (amdgpu_mes && adev->asic_type >= CHIP_NAVI10)
		adev->enable_mes = true;

3378 3379 3380
	/* detect hw virtualization here */
	amdgpu_detect_virtualization(adev);

3381 3382 3383
	r = amdgpu_device_get_job_timeout_settings(adev);
	if (r) {
		dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n");
3384
		goto failed_unmap;
3385 3386
	}

A
Alex Deucher 已提交
3387
	/* early init functions */
3388
	r = amdgpu_device_ip_early_init(adev);
A
Alex Deucher 已提交
3389
	if (r)
3390
		goto failed_unmap;
A
Alex Deucher 已提交
3391

3392 3393 3394
	/* doorbell bar mapping and doorbell index init*/
	amdgpu_device_doorbell_init(adev);

A
Alex Deucher 已提交
3395 3396 3397
	/* if we have > 1 VGA cards, then disable the amdgpu VGA resources */
	/* this will fail for cards that aren't VGA class devices, just
	 * ignore it */
3398 3399
	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
		vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
A
Alex Deucher 已提交
3400

3401 3402
	if (amdgpu_device_supports_px(ddev)) {
		px = true;
3403
		vga_switcheroo_register_client(adev->pdev,
3404
					       &amdgpu_switcheroo_ops, px);
A
Alex Deucher 已提交
3405
		vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
3406
	}
A
Alex Deucher 已提交
3407

3408 3409 3410
	if (amdgpu_emu_mode == 1) {
		/* post the asic on emulation mode */
		emu_soc_asic_init(adev);
3411
		goto fence_driver_init;
3412
	}
3413

3414 3415
	/* detect if we are with an SRIOV vbios */
	amdgpu_device_detect_sriov_bios(adev);
3416

3417 3418 3419
	/* check if we need to reset the asic
	 *  E.g., driver was not cleanly unloaded previously, etc.
	 */
3420
	if (!amdgpu_sriov_vf(adev) && amdgpu_asic_need_reset_on_init(adev)) {
3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431
		if (adev->gmc.xgmi.num_physical_nodes) {
			dev_info(adev->dev, "Pending hive reset.\n");
			adev->gmc.xgmi.pending_reset = true;
			/* Only need to init necessary block for SMU to handle the reset */
			for (i = 0; i < adev->num_ip_blocks; i++) {
				if (!adev->ip_blocks[i].status.valid)
					continue;
				if (!(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC ||
				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_COMMON ||
				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH ||
				      adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC)) {
3432
					DRM_DEBUG("IP %s disabled for hw_init.\n",
3433 3434 3435 3436 3437 3438 3439 3440 3441 3442
						adev->ip_blocks[i].version->funcs->name);
					adev->ip_blocks[i].status.hw = true;
				}
			}
		} else {
			r = amdgpu_asic_reset(adev);
			if (r) {
				dev_err(adev->dev, "asic reset on init failed\n");
				goto failed;
			}
3443 3444 3445
		}
	}

3446
	pci_enable_pcie_error_reporting(adev->pdev);
3447

A
Alex Deucher 已提交
3448
	/* Post card if necessary */
A
Alex Deucher 已提交
3449
	if (amdgpu_device_need_post(adev)) {
A
Alex Deucher 已提交
3450
		if (!adev->bios) {
3451
			dev_err(adev->dev, "no vBIOS found\n");
3452 3453
			r = -EINVAL;
			goto failed;
A
Alex Deucher 已提交
3454
		}
3455
		DRM_INFO("GPU posting now...\n");
3456
		r = amdgpu_device_asic_init(adev);
3457 3458 3459 3460
		if (r) {
			dev_err(adev->dev, "gpu post error!\n");
			goto failed;
		}
A
Alex Deucher 已提交
3461 3462
	}

3463 3464 3465 3466 3467
	if (adev->is_atom_fw) {
		/* Initialize clocks */
		r = amdgpu_atomfirmware_get_clock_info(adev);
		if (r) {
			dev_err(adev->dev, "amdgpu_atomfirmware_get_clock_info failed\n");
A
Alex Deucher 已提交
3468
			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3469 3470 3471
			goto failed;
		}
	} else {
3472 3473 3474 3475
		/* Initialize clocks */
		r = amdgpu_atombios_get_clock_info(adev);
		if (r) {
			dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
A
Alex Deucher 已提交
3476
			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_ATOMBIOS_GET_CLOCK_FAIL, 0, 0);
3477
			goto failed;
3478 3479
		}
		/* init i2c buses */
3480 3481
		if (!amdgpu_device_has_dc_support(adev))
			amdgpu_atombios_i2c_init(adev);
3482
	}
A
Alex Deucher 已提交
3483

3484
fence_driver_init:
A
Alex Deucher 已提交
3485 3486
	/* Fence driver */
	r = amdgpu_fence_driver_init(adev);
3487 3488
	if (r) {
		dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
A
Alex Deucher 已提交
3489
		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
3490
		goto failed;
3491
	}
A
Alex Deucher 已提交
3492 3493

	/* init the mode config */
3494
	drm_mode_config_init(adev_to_drm(adev));
A
Alex Deucher 已提交
3495

3496
	r = amdgpu_device_ip_init(adev);
A
Alex Deucher 已提交
3497
	if (r) {
3498 3499 3500 3501 3502 3503
		/* failed in exclusive mode due to timeout */
		if (amdgpu_sriov_vf(adev) &&
		    !amdgpu_sriov_runtime(adev) &&
		    amdgpu_virt_mmio_blocked(adev) &&
		    !amdgpu_virt_wait_reset(adev)) {
			dev_err(adev->dev, "VF exclusive mode timeout\n");
3504 3505 3506
			/* Don't send request since VF is inactive. */
			adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
			adev->virt.ops = NULL;
3507
			r = -EAGAIN;
3508
			goto release_ras_con;
3509
		}
3510
		dev_err(adev->dev, "amdgpu_device_ip_init failed\n");
A
Alex Deucher 已提交
3511
		amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0);
3512
		goto release_ras_con;
A
Alex Deucher 已提交
3513 3514
	}

3515 3516
	dev_info(adev->dev,
		"SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
Y
Yong Zhao 已提交
3517 3518 3519 3520 3521
			adev->gfx.config.max_shader_engines,
			adev->gfx.config.max_sh_per_se,
			adev->gfx.config.max_cu_per_sh,
			adev->gfx.cu_info.number);

A
Alex Deucher 已提交
3522 3523
	adev->accel_working = true;

3524 3525
	amdgpu_vm_check_compute_bug(adev);

3526 3527 3528 3529 3530 3531 3532 3533
	/* Initialize the buffer migration limit. */
	if (amdgpu_moverate >= 0)
		max_MBps = amdgpu_moverate;
	else
		max_MBps = 8; /* Allow 8 MB/s. */
	/* Get a log2 for easy divisions. */
	adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));

3534 3535
	amdgpu_fbdev_init(adev);

3536
	r = amdgpu_pm_sysfs_init(adev);
3537 3538
	if (r) {
		adev->pm_sysfs_en = false;
3539
		DRM_ERROR("registering pm debugfs failed (%d).\n", r);
3540 3541
	} else
		adev->pm_sysfs_en = true;
3542

3543
	r = amdgpu_ucode_sysfs_init(adev);
3544 3545
	if (r) {
		adev->ucode_sysfs_en = false;
3546
		DRM_ERROR("Creating firmware sysfs failed (%d).\n", r);
3547 3548
	} else
		adev->ucode_sysfs_en = true;
3549

A
Alex Deucher 已提交
3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562
	if ((amdgpu_testing & 1)) {
		if (adev->accel_working)
			amdgpu_test_moves(adev);
		else
			DRM_INFO("amdgpu: acceleration disabled, skipping move tests\n");
	}
	if (amdgpu_benchmarking) {
		if (adev->accel_working)
			amdgpu_benchmark(adev, amdgpu_benchmarking);
		else
			DRM_INFO("amdgpu: acceleration disabled, skipping benchmarks\n");
	}

3563 3564 3565 3566 3567 3568 3569
	/*
	 * Register gpu instance before amdgpu_device_enable_mgpu_fan_boost.
	 * Otherwise the mgpu fan boost feature will be skipped due to the
	 * gpu instance is counted less.
	 */
	amdgpu_register_gpu_instance(adev);

A
Alex Deucher 已提交
3570 3571 3572
	/* enable clockgating, etc. after ib tests, etc. since some blocks require
	 * explicit gating rather than handling it automatically.
	 */
3573 3574 3575 3576 3577
	if (!adev->gmc.xgmi.pending_reset) {
		r = amdgpu_device_ip_late_init(adev);
		if (r) {
			dev_err(adev->dev, "amdgpu_device_ip_late_init failed\n");
			amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_LATE_INIT_FAIL, 0, r);
3578
			goto release_ras_con;
3579 3580 3581 3582 3583
		}
		/* must succeed. */
		amdgpu_ras_resume(adev);
		queue_delayed_work(system_wq, &adev->delayed_init_work,
				   msecs_to_jiffies(AMDGPU_RESUME_MS));
3584
	}
A
Alex Deucher 已提交
3585

3586 3587 3588
	if (amdgpu_sriov_vf(adev))
		flush_delayed_work(&adev->delayed_init_work);

3589
	r = sysfs_create_files(&adev->dev->kobj, amdgpu_dev_attributes);
3590
	if (r)
3591
		dev_err(adev->dev, "Could not create amdgpu device attr\n");
3592

3593 3594
	if (IS_ENABLED(CONFIG_PERF_EVENTS))
		r = amdgpu_pmu_init(adev);
J
Jonathan Kim 已提交
3595 3596 3597
	if (r)
		dev_err(adev->dev, "amdgpu_pmu_init failed\n");

3598 3599 3600 3601
	/* Have stored pci confspace at hand for restore in sudden PCI error */
	if (amdgpu_device_cache_pci_state(adev->pdev))
		pci_restore_state(pdev);

3602 3603 3604 3605
	if (adev->gmc.xgmi.pending_reset)
		queue_delayed_work(system_wq, &mgpu_info.delayed_reset_work,
				   msecs_to_jiffies(AMDGPU_RESUME_MS));

A
Alex Deucher 已提交
3606
	return 0;
3607

3608 3609 3610
release_ras_con:
	amdgpu_release_ras_context(adev);

3611
failed:
3612
	amdgpu_vf_error_trans_all(adev);
3613
	if (px)
3614
		vga_switcheroo_fini_domain_pm_ops(adev->dev);
3615

3616 3617 3618 3619
failed_unmap:
	iounmap(adev->rmmio);
	adev->rmmio = NULL;

3620
	return r;
A
Alex Deucher 已提交
3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632
}

/**
 * amdgpu_device_fini - tear down the driver
 *
 * @adev: amdgpu_device pointer
 *
 * Tear down the driver info (all asics).
 * Called at driver shutdown.
 */
void amdgpu_device_fini(struct amdgpu_device *adev)
{
3633
	dev_info(adev->dev, "amdgpu: finishing device.\n");
3634
	flush_delayed_work(&adev->delayed_init_work);
3635
	ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
3636
	adev->shutdown = true;
3637

3638 3639
	kfree(adev->pci_state);

M
Monk Liu 已提交
3640 3641 3642
	/* make sure IB test finished before entering exclusive mode
	 * to avoid preemption on IB test
	 * */
3643
	if (amdgpu_sriov_vf(adev)) {
M
Monk Liu 已提交
3644
		amdgpu_virt_request_full_gpu(adev, false);
3645 3646
		amdgpu_virt_fini_data_exchange(adev);
	}
M
Monk Liu 已提交
3647

3648 3649
	/* disable all interrupts */
	amdgpu_irq_disable_all(adev);
3650 3651
	if (adev->mode_info.mode_config_initialized){
		if (!amdgpu_device_has_dc_support(adev))
3652
			drm_helper_force_disable_all(adev_to_drm(adev));
3653
		else
3654
			drm_atomic_helper_shutdown(adev_to_drm(adev));
3655
	}
A
Alex Deucher 已提交
3656
	amdgpu_fence_driver_fini(adev);
3657 3658
	if (adev->pm_sysfs_en)
		amdgpu_pm_sysfs_fini(adev);
A
Alex Deucher 已提交
3659
	amdgpu_fbdev_fini(adev);
N
Nirmoy Das 已提交
3660
	amdgpu_device_ip_fini(adev);
3661 3662
	release_firmware(adev->firmware.gpu_info_fw);
	adev->firmware.gpu_info_fw = NULL;
A
Alex Deucher 已提交
3663 3664
	adev->accel_working = false;
	/* free i2c buses */
3665 3666
	if (!amdgpu_device_has_dc_support(adev))
		amdgpu_i2c_fini(adev);
3667 3668 3669 3670

	if (amdgpu_emu_mode != 1)
		amdgpu_atombios_fini(adev);

A
Alex Deucher 已提交
3671 3672
	kfree(adev->bios);
	adev->bios = NULL;
3673
	if (amdgpu_device_supports_px(adev_to_drm(adev))) {
3674
		vga_switcheroo_unregister_client(adev->pdev);
3675
		vga_switcheroo_fini_domain_pm_ops(adev->dev);
3676
	}
3677 3678
	if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
		vga_client_register(adev->pdev, NULL, NULL, NULL);
A
Alex Deucher 已提交
3679 3680
	iounmap(adev->rmmio);
	adev->rmmio = NULL;
3681
	amdgpu_device_doorbell_fini(adev);
3682

3683 3684
	if (adev->ucode_sysfs_en)
		amdgpu_ucode_sysfs_fini(adev);
3685 3686

	sysfs_remove_files(&adev->dev->kobj, amdgpu_dev_attributes);
3687 3688
	if (IS_ENABLED(CONFIG_PERF_EVENTS))
		amdgpu_pmu_fini(adev);
3689
	if (adev->mman.discovery_bin)
3690
		amdgpu_discovery_fini(adev);
A
Alex Deucher 已提交
3691 3692 3693 3694 3695 3696 3697
}


/*
 * Suspend & resume.
 */
/**
3698
 * amdgpu_device_suspend - initiate device suspend
A
Alex Deucher 已提交
3699
 *
3700 3701
 * @dev: drm dev pointer
 * @fbcon : notify the fbdev of suspend
A
Alex Deucher 已提交
3702 3703 3704 3705 3706
 *
 * Puts the hw in the suspend state (all asics).
 * Returns 0 for success or an error on failure.
 * Called at driver suspend.
 */
3707
int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
A
Alex Deucher 已提交
3708
{
3709
	struct amdgpu_device *adev = drm_to_adev(dev);
3710
	int r;
A
Alex Deucher 已提交
3711 3712 3713 3714

	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;

3715
	adev->in_suspend = true;
A
Alex Deucher 已提交
3716 3717
	drm_kms_helper_poll_disable(dev);

3718 3719 3720
	if (fbcon)
		amdgpu_fbdev_set_suspend(adev, 1);

3721
	cancel_delayed_work_sync(&adev->delayed_init_work);
3722

3723 3724
	amdgpu_ras_suspend(adev);

3725 3726
	r = amdgpu_device_ip_suspend_phase1(adev);

3727
	amdgpu_amdkfd_suspend(adev, adev->in_runpm);
3728

A
Alex Deucher 已提交
3729 3730 3731
	/* evict vram memory */
	amdgpu_bo_evict_vram(adev);

3732
	amdgpu_fence_driver_suspend(adev);
A
Alex Deucher 已提交
3733

3734
	r = amdgpu_device_ip_suspend_phase2(adev);
3735 3736 3737 3738
	/* evict remaining vram memory
	 * This second call to evict vram is to evict the gart page table
	 * using the CPU.
	 */
A
Alex Deucher 已提交
3739 3740 3741 3742 3743 3744
	amdgpu_bo_evict_vram(adev);

	return 0;
}

/**
3745
 * amdgpu_device_resume - initiate device resume
A
Alex Deucher 已提交
3746
 *
3747 3748
 * @dev: drm dev pointer
 * @fbcon : notify the fbdev of resume
A
Alex Deucher 已提交
3749 3750 3751 3752 3753
 *
 * Bring the hw back to operating state (all asics).
 * Returns 0 for success or an error on failure.
 * Called at driver resume.
 */
3754
int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
A
Alex Deucher 已提交
3755
{
3756
	struct amdgpu_device *adev = drm_to_adev(dev);
3757
	int r = 0;
A
Alex Deucher 已提交
3758 3759 3760 3761

	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
		return 0;

3762
	if (adev->in_s0ix)
3763 3764
		amdgpu_gfx_state_change_set(adev, sGpuChangeState_D0Entry);

A
Alex Deucher 已提交
3765
	/* post card */
A
Alex Deucher 已提交
3766
	if (amdgpu_device_need_post(adev)) {
3767
		r = amdgpu_device_asic_init(adev);
J
jimqu 已提交
3768
		if (r)
3769
			dev_err(adev->dev, "amdgpu asic init failed\n");
J
jimqu 已提交
3770
	}
A
Alex Deucher 已提交
3771

3772
	r = amdgpu_device_ip_resume(adev);
3773
	if (r) {
3774
		dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
3775
		return r;
3776
	}
3777 3778
	amdgpu_fence_driver_resume(adev);

A
Alex Deucher 已提交
3779

3780
	r = amdgpu_device_ip_late_init(adev);
3781
	if (r)
3782
		return r;
A
Alex Deucher 已提交
3783

3784 3785 3786
	queue_delayed_work(system_wq, &adev->delayed_init_work,
			   msecs_to_jiffies(AMDGPU_RESUME_MS));

3787
	r = amdgpu_amdkfd_resume(adev, adev->in_runpm);
3788 3789
	if (r)
		return r;
3790

3791
	/* Make sure IB tests flushed */
3792
	flush_delayed_work(&adev->delayed_init_work);
3793

3794
	if (fbcon)
3795
		amdgpu_fbdev_set_suspend(adev, 0);
A
Alex Deucher 已提交
3796 3797

	drm_kms_helper_poll_enable(dev);
3798

3799 3800
	amdgpu_ras_resume(adev);

3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812
	/*
	 * Most of the connector probing functions try to acquire runtime pm
	 * refs to ensure that the GPU is powered on when connector polling is
	 * performed. Since we're calling this from a runtime PM callback,
	 * trying to acquire rpm refs will cause us to deadlock.
	 *
	 * Since we're guaranteed to be holding the rpm lock, it's safe to
	 * temporarily disable the rpm helpers so this doesn't deadlock us.
	 */
#ifdef CONFIG_PM
	dev->dev->power.disable_depth++;
#endif
3813 3814 3815 3816
	if (!amdgpu_device_has_dc_support(adev))
		drm_helper_hpd_irq_event(dev);
	else
		drm_kms_helper_hotplug_event(dev);
3817 3818 3819
#ifdef CONFIG_PM
	dev->dev->power.disable_depth--;
#endif
3820 3821
	adev->in_suspend = false;

3822
	return 0;
A
Alex Deucher 已提交
3823 3824
}

3825 3826 3827 3828 3829 3830 3831 3832 3833 3834
/**
 * amdgpu_device_ip_check_soft_reset - did soft reset succeed
 *
 * @adev: amdgpu_device pointer
 *
 * The list of all the hardware IPs that make up the asic is walked and
 * the check_soft_reset callbacks are run.  check_soft_reset determines
 * if the asic is still hung or not.
 * Returns true if any of the IPs are still in a hung state, false if not.
 */
3835
static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)
3836 3837 3838 3839
{
	int i;
	bool asic_hang = false;

3840 3841 3842
	if (amdgpu_sriov_vf(adev))
		return true;

3843 3844 3845
	if (amdgpu_asic_need_full_reset(adev))
		return true;

3846
	for (i = 0; i < adev->num_ip_blocks; i++) {
3847
		if (!adev->ip_blocks[i].status.valid)
3848
			continue;
3849 3850 3851 3852
		if (adev->ip_blocks[i].version->funcs->check_soft_reset)
			adev->ip_blocks[i].status.hang =
				adev->ip_blocks[i].version->funcs->check_soft_reset(adev);
		if (adev->ip_blocks[i].status.hang) {
3853
			dev_info(adev->dev, "IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name);
3854 3855 3856 3857 3858 3859
			asic_hang = true;
		}
	}
	return asic_hang;
}

3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870
/**
 * amdgpu_device_ip_pre_soft_reset - prepare for soft reset
 *
 * @adev: amdgpu_device pointer
 *
 * The list of all the hardware IPs that make up the asic is walked and the
 * pre_soft_reset callbacks are run if the block is hung.  pre_soft_reset
 * handles any IP specific hardware or software state changes that are
 * necessary for a soft reset to succeed.
 * Returns 0 on success, negative error code on failure.
 */
3871
static int amdgpu_device_ip_pre_soft_reset(struct amdgpu_device *adev)
3872 3873 3874 3875
{
	int i, r = 0;

	for (i = 0; i < adev->num_ip_blocks; i++) {
3876
		if (!adev->ip_blocks[i].status.valid)
3877
			continue;
3878 3879 3880
		if (adev->ip_blocks[i].status.hang &&
		    adev->ip_blocks[i].version->funcs->pre_soft_reset) {
			r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev);
3881 3882 3883 3884 3885 3886 3887 3888
			if (r)
				return r;
		}
	}

	return 0;
}

3889 3890 3891 3892 3893 3894 3895 3896 3897
/**
 * amdgpu_device_ip_need_full_reset - check if a full asic reset is needed
 *
 * @adev: amdgpu_device pointer
 *
 * Some hardware IPs cannot be soft reset.  If they are hung, a full gpu
 * reset is necessary to recover.
 * Returns true if a full asic reset is required, false if not.
 */
3898
static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)
3899
{
3900 3901
	int i;

3902 3903 3904
	if (amdgpu_asic_need_full_reset(adev))
		return true;

3905
	for (i = 0; i < adev->num_ip_blocks; i++) {
3906
		if (!adev->ip_blocks[i].status.valid)
3907
			continue;
3908 3909 3910
		if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
3911 3912
		    (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
		     adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
3913
			if (adev->ip_blocks[i].status.hang) {
3914
				dev_info(adev->dev, "Some block need full reset!\n");
3915 3916 3917
				return true;
			}
		}
3918 3919 3920 3921
	}
	return false;
}

3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932
/**
 * amdgpu_device_ip_soft_reset - do a soft reset
 *
 * @adev: amdgpu_device pointer
 *
 * The list of all the hardware IPs that make up the asic is walked and the
 * soft_reset callbacks are run if the block is hung.  soft_reset handles any
 * IP specific hardware or software state changes that are necessary to soft
 * reset the IP.
 * Returns 0 on success, negative error code on failure.
 */
3933
static int amdgpu_device_ip_soft_reset(struct amdgpu_device *adev)
3934 3935 3936 3937
{
	int i, r = 0;

	for (i = 0; i < adev->num_ip_blocks; i++) {
3938
		if (!adev->ip_blocks[i].status.valid)
3939
			continue;
3940 3941 3942
		if (adev->ip_blocks[i].status.hang &&
		    adev->ip_blocks[i].version->funcs->soft_reset) {
			r = adev->ip_blocks[i].version->funcs->soft_reset(adev);
3943 3944 3945 3946 3947 3948 3949 3950
			if (r)
				return r;
		}
	}

	return 0;
}

3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961
/**
 * amdgpu_device_ip_post_soft_reset - clean up from soft reset
 *
 * @adev: amdgpu_device pointer
 *
 * The list of all the hardware IPs that make up the asic is walked and the
 * post_soft_reset callbacks are run if the asic was hung.  post_soft_reset
 * handles any IP specific hardware or software state changes that are
 * necessary after the IP has been soft reset.
 * Returns 0 on success, negative error code on failure.
 */
3962
static int amdgpu_device_ip_post_soft_reset(struct amdgpu_device *adev)
3963 3964 3965 3966
{
	int i, r = 0;

	for (i = 0; i < adev->num_ip_blocks; i++) {
3967
		if (!adev->ip_blocks[i].status.valid)
3968
			continue;
3969 3970 3971
		if (adev->ip_blocks[i].status.hang &&
		    adev->ip_blocks[i].version->funcs->post_soft_reset)
			r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev);
3972 3973 3974 3975 3976 3977 3978
		if (r)
			return r;
	}

	return 0;
}

3979
/**
3980
 * amdgpu_device_recover_vram - Recover some VRAM contents
3981 3982 3983 3984 3985 3986
 *
 * @adev: amdgpu_device pointer
 *
 * Restores the contents of VRAM buffers from the shadows in GTT.  Used to
 * restore things like GPUVM page tables after a GPU reset where
 * the contents of VRAM might be lost.
3987 3988 3989
 *
 * Returns:
 * 0 on success, negative error code on failure.
3990
 */
3991
static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
3992 3993
{
	struct dma_fence *fence = NULL, *next = NULL;
3994 3995
	struct amdgpu_bo *shadow;
	long r = 1, tmo;
3996 3997

	if (amdgpu_sriov_runtime(adev))
3998
		tmo = msecs_to_jiffies(8000);
3999 4000 4001
	else
		tmo = msecs_to_jiffies(100);

4002
	dev_info(adev->dev, "recover vram bo from shadow start\n");
4003
	mutex_lock(&adev->shadow_list_lock);
4004 4005 4006 4007
	list_for_each_entry(shadow, &adev->shadow_list, shadow_list) {

		/* No need to recover an evicted BO */
		if (shadow->tbo.mem.mem_type != TTM_PL_TT ||
4008
		    shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET ||
4009 4010 4011 4012 4013 4014 4015
		    shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM)
			continue;

		r = amdgpu_bo_restore_shadow(shadow, &next);
		if (r)
			break;

4016
		if (fence) {
4017
			tmo = dma_fence_wait_timeout(fence, false, tmo);
4018 4019
			dma_fence_put(fence);
			fence = next;
4020 4021
			if (tmo == 0) {
				r = -ETIMEDOUT;
4022
				break;
4023 4024 4025 4026
			} else if (tmo < 0) {
				r = tmo;
				break;
			}
4027 4028
		} else {
			fence = next;
4029 4030 4031 4032
		}
	}
	mutex_unlock(&adev->shadow_list_lock);

4033 4034
	if (fence)
		tmo = dma_fence_wait_timeout(fence, false, tmo);
4035 4036
	dma_fence_put(fence);

4037
	if (r < 0 || tmo <= 0) {
4038
		dev_err(adev->dev, "recover vram bo from shadow failed, r is %ld, tmo is %ld\n", r, tmo);
4039 4040
		return -EIO;
	}
4041

4042
	dev_info(adev->dev, "recover vram bo from shadow done\n");
4043
	return 0;
4044 4045
}

4046

4047
/**
4048
 * amdgpu_device_reset_sriov - reset ASIC for SR-IOV vf
4049
 *
4050
 * @adev: amdgpu_device pointer
4051
 * @from_hypervisor: request from hypervisor
4052 4053
 *
 * do VF FLR and reinitialize Asic
4054
 * return 0 means succeeded otherwise failed
4055 4056 4057
 */
static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
				     bool from_hypervisor)
4058 4059 4060 4061 4062 4063 4064 4065 4066
{
	int r;

	if (from_hypervisor)
		r = amdgpu_virt_request_full_gpu(adev, true);
	else
		r = amdgpu_virt_reset_gpu(adev);
	if (r)
		return r;
4067

4068 4069
	amdgpu_amdkfd_pre_reset(adev);

4070
	/* Resume IP prior to SMC */
4071
	r = amdgpu_device_ip_reinit_early_sriov(adev);
4072 4073
	if (r)
		goto error;
4074

4075
	amdgpu_virt_init_data_exchange(adev);
4076
	/* we need recover gart prior to run SMC/CP/SDMA resume */
4077
	amdgpu_gtt_mgr_recover(ttm_manager_type(&adev->mman.bdev, TTM_PL_TT));
4078

4079 4080 4081 4082
	r = amdgpu_device_fw_loading(adev);
	if (r)
		return r;

4083
	/* now we are okay to resume SMC/CP/SDMA */
4084
	r = amdgpu_device_ip_reinit_late_sriov(adev);
4085 4086
	if (r)
		goto error;
4087 4088

	amdgpu_irq_gpu_reset_resume_helper(adev);
4089
	r = amdgpu_ib_ring_tests(adev);
4090
	amdgpu_amdkfd_post_reset(adev);
4091

4092 4093
error:
	amdgpu_virt_release_full_gpu(adev, true);
4094
	if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
4095
		amdgpu_inc_vram_lost(adev);
4096
		r = amdgpu_device_recover_vram(adev);
4097 4098 4099 4100 4101
	}

	return r;
}

J
jqdeng 已提交
4102 4103 4104
/**
 * amdgpu_device_has_job_running - check if there is any job in mirror list
 *
4105
 * @adev: amdgpu_device pointer
J
jqdeng 已提交
4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120
 *
 * check if there is any job in mirror list
 */
bool amdgpu_device_has_job_running(struct amdgpu_device *adev)
{
	int i;
	struct drm_sched_job *job;

	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
		struct amdgpu_ring *ring = adev->rings[i];

		if (!ring || !ring->sched.thread)
			continue;

		spin_lock(&ring->sched.job_list_lock);
4121 4122
		job = list_first_entry_or_null(&ring->sched.pending_list,
					       struct drm_sched_job, list);
J
jqdeng 已提交
4123 4124 4125 4126 4127 4128 4129
		spin_unlock(&ring->sched.job_list_lock);
		if (job)
			return true;
	}
	return false;
}

4130 4131 4132
/**
 * amdgpu_device_should_recover_gpu - check if we should try GPU recovery
 *
4133
 * @adev: amdgpu_device pointer
4134 4135 4136 4137 4138 4139 4140
 *
 * Check amdgpu_gpu_recovery and SRIOV status to see if we should try to recover
 * a hung GPU.
 */
bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
{
	if (!amdgpu_device_ip_check_soft_reset(adev)) {
4141
		dev_info(adev->dev, "Timeout, but no hardware hang detected.\n");
4142 4143 4144
		return false;
	}

4145 4146 4147 4148 4149 4150 4151 4152
	if (amdgpu_gpu_recovery == 0)
		goto disabled;

	if (amdgpu_sriov_vf(adev))
		return true;

	if (amdgpu_gpu_recovery == -1) {
		switch (adev->asic_type) {
4153 4154
		case CHIP_BONAIRE:
		case CHIP_HAWAII:
4155 4156 4157 4158 4159 4160 4161 4162 4163 4164
		case CHIP_TOPAZ:
		case CHIP_TONGA:
		case CHIP_FIJI:
		case CHIP_POLARIS10:
		case CHIP_POLARIS11:
		case CHIP_POLARIS12:
		case CHIP_VEGAM:
		case CHIP_VEGA20:
		case CHIP_VEGA10:
		case CHIP_VEGA12:
4165
		case CHIP_RAVEN:
4166
		case CHIP_ARCTURUS:
4167
		case CHIP_RENOIR:
4168 4169 4170
		case CHIP_NAVI10:
		case CHIP_NAVI14:
		case CHIP_NAVI12:
4171
		case CHIP_SIENNA_CICHLID:
4172
		case CHIP_NAVY_FLOUNDER:
4173
		case CHIP_DIMGREY_CAVEFISH:
4174
		case CHIP_VANGOGH:
4175 4176 4177 4178
			break;
		default:
			goto disabled;
		}
4179 4180 4181
	}

	return true;
4182 4183

disabled:
4184
		dev_info(adev->dev, "GPU recovery disabled.\n");
4185
		return false;
4186 4187
}

4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226
int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
{
        u32 i;
        int ret = 0;

        amdgpu_atombios_scratch_regs_engine_hung(adev, true);

        dev_info(adev->dev, "GPU mode1 reset\n");

        /* disable BM */
        pci_clear_master(adev->pdev);

        amdgpu_device_cache_pci_state(adev->pdev);

        if (amdgpu_dpm_is_mode1_reset_supported(adev)) {
                dev_info(adev->dev, "GPU smu mode1 reset\n");
                ret = amdgpu_dpm_mode1_reset(adev);
        } else {
                dev_info(adev->dev, "GPU psp mode1 reset\n");
                ret = psp_gpu_reset(adev);
        }

        if (ret)
                dev_err(adev->dev, "GPU mode1 reset failed\n");

        amdgpu_device_load_pci_state(adev->pdev);

        /* wait for asic to come out of reset */
        for (i = 0; i < adev->usec_timeout; i++) {
                u32 memsize = adev->nbio.funcs->get_memsize(adev);

                if (memsize != 0xffffffff)
                        break;
                udelay(1);
        }

        amdgpu_atombios_scratch_regs_engine_hung(adev, false);
        return ret;
}
4227

4228 4229 4230
int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
				  struct amdgpu_job *job,
				  bool *need_full_reset_arg)
4231 4232 4233
{
	int i, r = 0;
	bool need_full_reset  = *need_full_reset_arg;
4234

4235 4236 4237
	/* no need to dump if device is not in good state during probe period */
	if (!adev->gmc.xgmi.pending_reset)
		amdgpu_debugfs_wait_dump(adev);
4238

4239 4240 4241 4242 4243
	if (amdgpu_sriov_vf(adev)) {
		/* stop the data exchange thread */
		amdgpu_virt_fini_data_exchange(adev);
	}

4244
	/* block all schedulers and reset given job's ring */
4245 4246 4247
	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
		struct amdgpu_ring *ring = adev->rings[i];

C
Chunming Zhou 已提交
4248
		if (!ring || !ring->sched.thread)
4249
			continue;
4250

M
Monk Liu 已提交
4251 4252
		/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
		amdgpu_fence_driver_force_completion(ring);
4253
	}
A
Alex Deucher 已提交
4254

4255 4256 4257
	if(job)
		drm_sched_increase_karma(&job->base);

4258
	/* Don't suspend on bare metal if we are not going to HW reset the ASIC */
4259 4260 4261 4262 4263 4264 4265 4266 4267 4268
	if (!amdgpu_sriov_vf(adev)) {

		if (!need_full_reset)
			need_full_reset = amdgpu_device_ip_need_full_reset(adev);

		if (!need_full_reset) {
			amdgpu_device_ip_pre_soft_reset(adev);
			r = amdgpu_device_ip_soft_reset(adev);
			amdgpu_device_ip_post_soft_reset(adev);
			if (r || amdgpu_device_ip_check_soft_reset(adev)) {
4269
				dev_info(adev->dev, "soft reset failed, will fallback to full reset!\n");
4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282
				need_full_reset = true;
			}
		}

		if (need_full_reset)
			r = amdgpu_device_ip_suspend(adev);

		*need_full_reset_arg = need_full_reset;
	}

	return r;
}

4283 4284 4285 4286
int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
			  struct list_head *device_list_handle,
			  bool *need_full_reset_arg,
			  bool skip_hw_reset)
4287 4288 4289 4290 4291 4292
{
	struct amdgpu_device *tmp_adev = NULL;
	bool need_full_reset = *need_full_reset_arg, vram_lost = false;
	int r = 0;

	/*
4293
	 * ASIC reset has to be done on all XGMI hive nodes ASAP
4294 4295
	 * to allow proper links negotiation in FW (within 1 sec)
	 */
4296
	if (!skip_hw_reset && need_full_reset) {
4297
		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4298
			/* For XGMI run all resets in parallel to speed up the process */
4299
			if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
4300
				tmp_adev->gmc.xgmi.pending_reset = false;
4301
				if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work))
4302 4303 4304 4305
					r = -EALREADY;
			} else
				r = amdgpu_asic_reset(tmp_adev);

4306
			if (r) {
4307
				dev_err(tmp_adev->dev, "ASIC reset failed with error, %d for drm dev, %s",
4308
					 r, adev_to_drm(tmp_adev)->unique);
4309
				break;
4310 4311 4312
			}
		}

4313 4314
		/* For XGMI wait for all resets to complete before proceed */
		if (!r) {
4315
			list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4316 4317 4318 4319 4320 4321 4322 4323 4324
				if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
					flush_work(&tmp_adev->xgmi_reset_work);
					r = tmp_adev->asic_reset_res;
					if (r)
						break;
				}
			}
		}
	}
4325

4326
	if (!r && amdgpu_ras_intr_triggered()) {
4327
		list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4328 4329 4330 4331 4332
			if (tmp_adev->mmhub.funcs &&
			    tmp_adev->mmhub.funcs->reset_ras_error_count)
				tmp_adev->mmhub.funcs->reset_ras_error_count(tmp_adev);
		}

4333
		amdgpu_ras_intr_cleared();
4334
	}
4335

4336
	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4337 4338
		if (need_full_reset) {
			/* post card */
4339 4340
			r = amdgpu_device_asic_init(tmp_adev);
			if (r) {
4341
				dev_warn(tmp_adev->dev, "asic atom init failed!");
4342
			} else {
4343 4344 4345 4346 4347 4348 4349
				dev_info(tmp_adev->dev, "GPU reset succeeded, trying to resume\n");
				r = amdgpu_device_ip_resume_phase1(tmp_adev);
				if (r)
					goto out;

				vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
				if (vram_lost) {
4350
					DRM_INFO("VRAM is lost due to GPU reset!\n");
4351
					amdgpu_inc_vram_lost(tmp_adev);
4352 4353
				}

4354
				r = amdgpu_gtt_mgr_recover(ttm_manager_type(&tmp_adev->mman.bdev, TTM_PL_TT));
4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368
				if (r)
					goto out;

				r = amdgpu_device_fw_loading(tmp_adev);
				if (r)
					return r;

				r = amdgpu_device_ip_resume_phase2(tmp_adev);
				if (r)
					goto out;

				if (vram_lost)
					amdgpu_device_fill_reset_magic(tmp_adev);

4369 4370 4371 4372 4373 4374
				/*
				 * Add this ASIC as tracked as reset was already
				 * complete successfully.
				 */
				amdgpu_register_gpu_instance(tmp_adev);

4375 4376 4377
				if (!hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
					amdgpu_xgmi_add_device(tmp_adev);

4378 4379 4380 4381
				r = amdgpu_device_ip_late_init(tmp_adev);
				if (r)
					goto out;

4382 4383
				amdgpu_fbdev_set_suspend(tmp_adev, 0);

4384 4385 4386 4387 4388 4389 4390 4391 4392 4393
				/*
				 * The GPU enters bad state once faulty pages
				 * by ECC has reached the threshold, and ras
				 * recovery is scheduled next. So add one check
				 * here to break recovery if it indeed exceeds
				 * bad page threshold, and remind user to
				 * retire this GPU or setting one bigger
				 * bad_page_threshold value to fix this once
				 * probing driver again.
				 */
4394
				if (!amdgpu_ras_eeprom_check_err_threshold(tmp_adev)) {
4395 4396 4397 4398 4399 4400
					/* must succeed. */
					amdgpu_ras_resume(tmp_adev);
				} else {
					r = -EINVAL;
					goto out;
				}
4401

4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431
				/* Update PSP FW topology after reset */
				if (hive && tmp_adev->gmc.xgmi.num_physical_nodes > 1)
					r = amdgpu_xgmi_update_topology(hive, tmp_adev);
			}
		}

out:
		if (!r) {
			amdgpu_irq_gpu_reset_resume_helper(tmp_adev);
			r = amdgpu_ib_ring_tests(tmp_adev);
			if (r) {
				dev_err(tmp_adev->dev, "ib ring test failed (%d).\n", r);
				r = amdgpu_device_ip_suspend(tmp_adev);
				need_full_reset = true;
				r = -EAGAIN;
				goto end;
			}
		}

		if (!r)
			r = amdgpu_device_recover_vram(tmp_adev);
		else
			tmp_adev->asic_reset_res = r;
	}

end:
	*need_full_reset_arg = need_full_reset;
	return r;
}

4432 4433
static bool amdgpu_device_lock_adev(struct amdgpu_device *adev,
				struct amdgpu_hive_info *hive)
4434
{
4435 4436 4437
	if (atomic_cmpxchg(&adev->in_gpu_reset, 0, 1) != 0)
		return false;

4438 4439 4440 4441 4442
	if (hive) {
		down_write_nest_lock(&adev->reset_sem, &hive->hive_lock);
	} else {
		down_write(&adev->reset_sem);
	}
4443

4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454
	switch (amdgpu_asic_reset_method(adev)) {
	case AMD_RESET_METHOD_MODE1:
		adev->mp1_state = PP_MP1_STATE_SHUTDOWN;
		break;
	case AMD_RESET_METHOD_MODE2:
		adev->mp1_state = PP_MP1_STATE_RESET;
		break;
	default:
		adev->mp1_state = PP_MP1_STATE_NONE;
		break;
	}
4455 4456

	return true;
4457
}
A
Alex Deucher 已提交
4458

4459 4460
static void amdgpu_device_unlock_adev(struct amdgpu_device *adev)
{
4461
	amdgpu_vf_error_trans_all(adev);
4462
	adev->mp1_state = PP_MP1_STATE_NONE;
4463
	atomic_set(&adev->in_gpu_reset, 0);
4464
	up_write(&adev->reset_sem);
4465 4466
}

4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506
/*
 * to lockup a list of amdgpu devices in a hive safely, if not a hive
 * with multiple nodes, it will be similar as amdgpu_device_lock_adev.
 *
 * unlock won't require roll back.
 */
static int amdgpu_device_lock_hive_adev(struct amdgpu_device *adev, struct amdgpu_hive_info *hive)
{
	struct amdgpu_device *tmp_adev = NULL;

	if (adev->gmc.xgmi.num_physical_nodes > 1) {
		if (!hive) {
			dev_err(adev->dev, "Hive is NULL while device has multiple xgmi nodes");
			return -ENODEV;
		}
		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
			if (!amdgpu_device_lock_adev(tmp_adev, hive))
				goto roll_back;
		}
	} else if (!amdgpu_device_lock_adev(adev, hive))
		return -EAGAIN;

	return 0;
roll_back:
	if (!list_is_first(&tmp_adev->gmc.xgmi.head, &hive->device_list)) {
		/*
		 * if the lockup iteration break in the middle of a hive,
		 * it may means there may has a race issue,
		 * or a hive device locked up independently.
		 * we may be in trouble and may not, so will try to roll back
		 * the lock and give out a warnning.
		 */
		dev_warn(tmp_adev->dev, "Hive lock iteration broke in the middle. Rolling back to unlock");
		list_for_each_entry_continue_reverse(tmp_adev, &hive->device_list, gmc.xgmi.head) {
			amdgpu_device_unlock_adev(tmp_adev);
		}
	}
	return -EAGAIN;
}

4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546
static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev)
{
	struct pci_dev *p = NULL;

	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
			adev->pdev->bus->number, 1);
	if (p) {
		pm_runtime_enable(&(p->dev));
		pm_runtime_resume(&(p->dev));
	}
}

static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev)
{
	enum amd_reset_method reset_method;
	struct pci_dev *p = NULL;
	u64 expires;

	/*
	 * For now, only BACO and mode1 reset are confirmed
	 * to suffer the audio issue without proper suspended.
	 */
	reset_method = amdgpu_asic_reset_method(adev);
	if ((reset_method != AMD_RESET_METHOD_BACO) &&
	     (reset_method != AMD_RESET_METHOD_MODE1))
		return -EINVAL;

	p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
			adev->pdev->bus->number, 1);
	if (!p)
		return -ENODEV;

	expires = pm_runtime_autosuspend_expiration(&(p->dev));
	if (!expires)
		/*
		 * If we cannot get the audio device autosuspend delay,
		 * a fixed 4S interval will be used. Considering 3S is
		 * the audio controller default autosuspend delay setting.
		 * 4S used here is guaranteed to cover that.
		 */
4547
		expires = ktime_get_mono_fast_ns() + NSEC_PER_SEC * 4ULL;
4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564

	while (!pm_runtime_status_suspended(&(p->dev))) {
		if (!pm_runtime_suspend(&(p->dev)))
			break;

		if (expires < ktime_get_mono_fast_ns()) {
			dev_warn(adev->dev, "failed to suspend display audio\n");
			/* TODO: abort the succeeding gpu reset? */
			return -ETIMEDOUT;
		}
	}

	pm_runtime_disable(&(p->dev));

	return 0;
}

4565 4566 4567
/**
 * amdgpu_device_gpu_recover - reset the asic and recover scheduler
 *
4568
 * @adev: amdgpu_device pointer
4569 4570 4571 4572 4573 4574 4575 4576 4577 4578
 * @job: which job trigger hang
 *
 * Attempt to reset the GPU if it has hung (all asics).
 * Attempt to do soft-reset or full-reset and reinitialize Asic
 * Returns 0 for success or an error on failure.
 */

int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
			      struct amdgpu_job *job)
{
4579
	struct list_head device_list, *device_list_handle =  NULL;
4580 4581
	bool need_full_reset = false;
	bool job_signaled = false;
4582 4583
	struct amdgpu_hive_info *hive = NULL;
	struct amdgpu_device *tmp_adev = NULL;
4584
	int i, r = 0;
4585
	bool need_emergency_restart = false;
4586
	bool audio_suspended = false;
4587

4588
	/*
4589 4590 4591 4592
	 * Special case: RAS triggered and full reset isn't supported
	 */
	need_emergency_restart = amdgpu_ras_need_emergency_restart(adev);

4593 4594 4595 4596
	/*
	 * Flush RAM to disk so that after reboot
	 * the user can read log and see why the system rebooted.
	 */
4597
	if (need_emergency_restart && amdgpu_ras_get_context(adev)->reboot) {
4598 4599 4600 4601 4602 4603
		DRM_WARN("Emergency reboot.");

		ksys_sync_helper();
		emergency_restart();
	}

4604
	dev_info(adev->dev, "GPU %s begin!\n",
4605
		need_emergency_restart ? "jobs stop":"reset");
4606 4607

	/*
4608 4609 4610 4611 4612
	 * Here we trylock to avoid chain of resets executing from
	 * either trigger by jobs on different adevs in XGMI hive or jobs on
	 * different schedulers for same device while this TO handler is running.
	 * We always reset all schedulers for device and all devices for XGMI
	 * hive so that should take care of them too.
4613
	 */
4614
	hive = amdgpu_get_xgmi_hive(adev);
4615 4616 4617 4618
	if (hive) {
		if (atomic_cmpxchg(&hive->in_reset, 0, 1) != 0) {
			DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
				job ? job->base.id : -1, hive->hive_id);
4619
			amdgpu_put_xgmi_hive(hive);
4620 4621
			if (job)
				drm_sched_increase_karma(&job->base);
4622 4623 4624
			return 0;
		}
		mutex_lock(&hive->hive_lock);
4625
	}
4626

4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642
	/*
	 * lock the device before we try to operate the linked list
	 * if didn't get the device lock, don't touch the linked list since
	 * others may iterating it.
	 */
	r = amdgpu_device_lock_hive_adev(adev, hive);
	if (r) {
		dev_info(adev->dev, "Bailing on TDR for s_job:%llx, as another already in progress",
					job ? job->base.id : -1);

		/* even we skipped this reset, still need to set the job to guilty */
		if (job)
			drm_sched_increase_karma(&job->base);
		goto skip_recovery;
	}

4643 4644 4645 4646 4647 4648 4649
	/*
	 * Build list of devices to reset.
	 * In case we are in XGMI hive mode, resort the device list
	 * to put adev in the 1st position.
	 */
	INIT_LIST_HEAD(&device_list);
	if (adev->gmc.xgmi.num_physical_nodes > 1) {
4650 4651 4652 4653 4654
		list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
			list_add_tail(&tmp_adev->reset_list, &device_list);
		if (!list_is_first(&adev->reset_list, &device_list))
			list_rotate_to_front(&adev->reset_list, &device_list);
		device_list_handle = &device_list;
4655
	} else {
4656
		list_add_tail(&adev->reset_list, &device_list);
4657 4658 4659
		device_list_handle = &device_list;
	}

4660
	/* block all schedulers and reset given job's ring */
4661
	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674
		/*
		 * Try to put the audio codec into suspend state
		 * before gpu reset started.
		 *
		 * Due to the power domain of the graphics device
		 * is shared with AZ power domain. Without this,
		 * we may change the audio hardware from behind
		 * the audio driver's back. That will trigger
		 * some audio codec errors.
		 */
		if (!amdgpu_device_suspend_display_audio(tmp_adev))
			audio_suspended = true;

4675 4676
		amdgpu_ras_set_error_query_ready(tmp_adev, false);

4677 4678
		cancel_delayed_work_sync(&tmp_adev->delayed_init_work);

4679 4680 4681
		if (!amdgpu_sriov_vf(tmp_adev))
			amdgpu_amdkfd_pre_reset(tmp_adev);

4682 4683 4684 4685 4686 4687
		/*
		 * Mark these ASICs to be reseted as untracked first
		 * And add them back after reset completed
		 */
		amdgpu_unregister_gpu_instance(tmp_adev);

4688
		amdgpu_fbdev_set_suspend(tmp_adev, 1);
4689

4690
		/* disable ras on ALL IPs */
4691
		if (!need_emergency_restart &&
4692
		      amdgpu_device_ip_need_full_reset(tmp_adev))
4693 4694
			amdgpu_ras_suspend(tmp_adev);

4695 4696 4697 4698 4699 4700
		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
			struct amdgpu_ring *ring = tmp_adev->rings[i];

			if (!ring || !ring->sched.thread)
				continue;

4701
			drm_sched_stop(&ring->sched, job ? &job->base : NULL);
4702

4703
			if (need_emergency_restart)
4704
				amdgpu_job_stop_all_jobs_on_sched(&ring->sched);
4705
		}
4706
		atomic_inc(&tmp_adev->gpu_reset_counter);
4707 4708
	}

4709
	if (need_emergency_restart)
4710 4711
		goto skip_sched_resume;

4712 4713 4714 4715 4716 4717 4718
	/*
	 * Must check guilty signal here since after this point all old
	 * HW fences are force signaled.
	 *
	 * job->base holds a reference to parent fence
	 */
	if (job && job->base.s_fence->parent &&
4719
	    dma_fence_is_signaled(job->base.s_fence->parent)) {
4720 4721 4722 4723 4724
		job_signaled = true;
		dev_info(adev->dev, "Guilty job already signaled, skipping HW reset");
		goto skip_hw_reset;
	}

4725
retry:	/* Rest of adevs pre asic reset from XGMI hive. */
4726
	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4727
		r = amdgpu_device_pre_asic_reset(tmp_adev,
4728
						 (tmp_adev == adev) ? job : NULL,
4729 4730 4731
						 &need_full_reset);
		/*TODO Should we stop ?*/
		if (r) {
4732
			dev_err(tmp_adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
4733
				  r, adev_to_drm(tmp_adev)->unique);
4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744
			tmp_adev->asic_reset_res = r;
		}
	}

	/* Actual ASIC resets if needed.*/
	/* TODO Implement XGMI hive reset logic for SRIOV */
	if (amdgpu_sriov_vf(adev)) {
		r = amdgpu_device_reset_sriov(adev, job ? false : true);
		if (r)
			adev->asic_reset_res = r;
	} else {
4745
		r  = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset, false);
4746 4747 4748 4749
		if (r && r == -EAGAIN)
			goto retry;
	}

4750 4751
skip_hw_reset:

4752
	/* Post ASIC reset for all devs .*/
4753
	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4754

4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768
		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
			struct amdgpu_ring *ring = tmp_adev->rings[i];

			if (!ring || !ring->sched.thread)
				continue;

			/* No point to resubmit jobs if we didn't HW reset*/
			if (!tmp_adev->asic_reset_res && !job_signaled)
				drm_sched_resubmit_jobs(&ring->sched);

			drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
		}

		if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
4769
			drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
4770 4771 4772
		}

		tmp_adev->asic_reset_res = 0;
4773 4774 4775

		if (r) {
			/* bad news, how to tell it to userspace ? */
4776
			dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter));
4777 4778
			amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
		} else {
4779
			dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter));
4780
		}
4781
	}
4782

4783
skip_sched_resume:
4784
	list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
4785
		/* unlock kfd: SRIOV would do it separately */
4786
		if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
4787
	                amdgpu_amdkfd_post_reset(tmp_adev);
4788 4789 4790 4791 4792 4793 4794

		/* kfd_post_reset will do nothing if kfd device is not initialized,
		 * need to bring up kfd here if it's not be initialized before
		 */
		if (!adev->kfd.init_complete)
			amdgpu_amdkfd_device_init(adev);

4795 4796
		if (audio_suspended)
			amdgpu_device_resume_display_audio(tmp_adev);
4797 4798 4799
		amdgpu_device_unlock_adev(tmp_adev);
	}

4800
skip_recovery:
4801
	if (hive) {
4802
		atomic_set(&hive->in_reset, 0);
4803
		mutex_unlock(&hive->hive_lock);
4804
		amdgpu_put_xgmi_hive(hive);
4805
	}
4806

4807
	if (r && r != -EAGAIN)
4808
		dev_info(adev->dev, "GPU reset end with ret = %d\n", r);
A
Alex Deucher 已提交
4809 4810 4811
	return r;
}

4812 4813 4814 4815 4816 4817 4818 4819 4820
/**
 * amdgpu_device_get_pcie_info - fence pcie info about the PCIE slot
 *
 * @adev: amdgpu_device pointer
 *
 * Fetchs and stores in the driver the PCIE capabilities (gen speed
 * and lanes) of the slot the device is in. Handles APUs and
 * virtualized environments where PCIE config space may not be available.
 */
4821
static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev)
4822
{
4823
	struct pci_dev *pdev;
4824 4825
	enum pci_bus_speed speed_cap, platform_speed_cap;
	enum pcie_link_width platform_link_width;
4826

4827 4828
	if (amdgpu_pcie_gen_cap)
		adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
4829

4830 4831
	if (amdgpu_pcie_lane_cap)
		adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
4832

4833 4834 4835 4836 4837 4838
	/* covers APUs as well */
	if (pci_is_root_bus(adev->pdev->bus)) {
		if (adev->pm.pcie_gen_mask == 0)
			adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
		if (adev->pm.pcie_mlw_mask == 0)
			adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
4839
		return;
4840
	}
4841

4842 4843 4844
	if (adev->pm.pcie_gen_mask && adev->pm.pcie_mlw_mask)
		return;

4845 4846
	pcie_bandwidth_available(adev->pdev, NULL,
				 &platform_speed_cap, &platform_link_width);
4847

4848
	if (adev->pm.pcie_gen_mask == 0) {
4849 4850 4851 4852 4853
		/* asic caps */
		pdev = adev->pdev;
		speed_cap = pcie_get_speed_cap(pdev);
		if (speed_cap == PCI_SPEED_UNKNOWN) {
			adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
4854 4855 4856
						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
						  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
		} else {
4857 4858 4859 4860 4861 4862 4863
			if (speed_cap == PCIE_SPEED_32_0GT)
				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4 |
							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN5);
			else if (speed_cap == PCIE_SPEED_16_0GT)
4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878
				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 |
							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN4);
			else if (speed_cap == PCIE_SPEED_8_0GT)
				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
			else if (speed_cap == PCIE_SPEED_5_0GT)
				adev->pm.pcie_gen_mask |= (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
							  CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2);
			else
				adev->pm.pcie_gen_mask |= CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1;
		}
		/* platform caps */
4879
		if (platform_speed_cap == PCI_SPEED_UNKNOWN) {
4880 4881 4882
			adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
						   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
		} else {
4883 4884 4885 4886 4887 4888 4889
			if (platform_speed_cap == PCIE_SPEED_32_0GT)
				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4 |
							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5);
			else if (platform_speed_cap == PCIE_SPEED_16_0GT)
4890 4891 4892 4893
				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 |
							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4);
4894
			else if (platform_speed_cap == PCIE_SPEED_8_0GT)
4895 4896 4897
				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3);
4898
			else if (platform_speed_cap == PCIE_SPEED_5_0GT)
4899 4900 4901 4902 4903
				adev->pm.pcie_gen_mask |= (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 |
							   CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2);
			else
				adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;

4904 4905 4906
		}
	}
	if (adev->pm.pcie_mlw_mask == 0) {
4907
		if (platform_link_width == PCIE_LNK_WIDTH_UNKNOWN) {
4908 4909
			adev->pm.pcie_mlw_mask |= AMDGPU_DEFAULT_PCIE_MLW_MASK;
		} else {
4910
			switch (platform_link_width) {
4911
			case PCIE_LNK_X32:
4912 4913 4914 4915 4916 4917 4918 4919
				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
				break;
4920
			case PCIE_LNK_X16:
4921 4922 4923 4924 4925 4926 4927
				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
				break;
4928
			case PCIE_LNK_X12:
4929 4930 4931 4932 4933 4934
				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
				break;
4935
			case PCIE_LNK_X8:
4936 4937 4938 4939 4940
				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
				break;
4941
			case PCIE_LNK_X4:
4942 4943 4944 4945
				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
				break;
4946
			case PCIE_LNK_X2:
4947 4948 4949
				adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
							  CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
				break;
4950
			case PCIE_LNK_X1:
4951 4952 4953 4954 4955
				adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
				break;
			default:
				break;
			}
4956 4957 4958
		}
	}
}
A
Alex Deucher 已提交
4959

4960 4961
int amdgpu_device_baco_enter(struct drm_device *dev)
{
4962
	struct amdgpu_device *adev = drm_to_adev(dev);
4963
	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4964

4965
	if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
4966 4967
		return -ENOTSUPP;

4968
	if (ras && ras->supported && adev->nbio.funcs->enable_doorbell_interrupt)
4969 4970
		adev->nbio.funcs->enable_doorbell_interrupt(adev, false);

4971
	return amdgpu_dpm_baco_enter(adev);
4972 4973 4974 4975
}

int amdgpu_device_baco_exit(struct drm_device *dev)
{
4976
	struct amdgpu_device *adev = drm_to_adev(dev);
4977
	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
4978
	int ret = 0;
4979

4980
	if (!amdgpu_device_supports_baco(adev_to_drm(adev)))
4981 4982
		return -ENOTSUPP;

4983 4984 4985
	ret = amdgpu_dpm_baco_exit(adev);
	if (ret)
		return ret;
4986

4987
	if (ras && ras->supported && adev->nbio.funcs->enable_doorbell_interrupt)
4988 4989 4990
		adev->nbio.funcs->enable_doorbell_interrupt(adev, true);

	return 0;
4991
}
4992

4993 4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006
static void amdgpu_cancel_all_tdr(struct amdgpu_device *adev)
{
	int i;

	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
		struct amdgpu_ring *ring = adev->rings[i];

		if (!ring || !ring->sched.thread)
			continue;

		cancel_delayed_work_sync(&ring->sched.work_tdr);
	}
}

5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019
/**
 * amdgpu_pci_error_detected - Called when a PCI error is detected.
 * @pdev: PCI device struct
 * @state: PCI channel state
 *
 * Description: Called when a PCI error is detected.
 *
 * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT.
 */
pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
{
	struct drm_device *dev = pci_get_drvdata(pdev);
	struct amdgpu_device *adev = drm_to_adev(dev);
5020
	int i;
5021 5022 5023

	DRM_INFO("PCI error: detected callback, state(%d)!!\n", state);

5024 5025 5026 5027 5028
	if (adev->gmc.xgmi.num_physical_nodes > 1) {
		DRM_WARN("No support for XGMI hive yet...");
		return PCI_ERS_RESULT_DISCONNECT;
	}

5029 5030 5031
	switch (state) {
	case pci_channel_io_normal:
		return PCI_ERS_RESULT_CAN_RECOVER;
5032
	/* Fatal error, prepare for slot reset */
5033 5034
	case pci_channel_io_frozen:
		/*
5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055
		 * Cancel and wait for all TDRs in progress if failing to
		 * set  adev->in_gpu_reset in amdgpu_device_lock_adev
		 *
		 * Locking adev->reset_sem will prevent any external access
		 * to GPU during PCI error recovery
		 */
		while (!amdgpu_device_lock_adev(adev, NULL))
			amdgpu_cancel_all_tdr(adev);

		/*
		 * Block any work scheduling as we do for regular GPU reset
		 * for the duration of the recovery
		 */
		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
			struct amdgpu_ring *ring = adev->rings[i];

			if (!ring || !ring->sched.thread)
				continue;

			drm_sched_stop(&ring->sched, NULL);
		}
5056
		atomic_inc(&adev->gpu_reset_counter);
5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096
		return PCI_ERS_RESULT_NEED_RESET;
	case pci_channel_io_perm_failure:
		/* Permanent error, prepare for device removal */
		return PCI_ERS_RESULT_DISCONNECT;
	}

	return PCI_ERS_RESULT_NEED_RESET;
}

/**
 * amdgpu_pci_mmio_enabled - Enable MMIO and dump debug registers
 * @pdev: pointer to PCI device
 */
pci_ers_result_t amdgpu_pci_mmio_enabled(struct pci_dev *pdev)
{

	DRM_INFO("PCI error: mmio enabled callback!!\n");

	/* TODO - dump whatever for debugging purposes */

	/* This called only if amdgpu_pci_error_detected returns
	 * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
	 * works, no need to reset slot.
	 */

	return PCI_ERS_RESULT_RECOVERED;
}

/**
 * amdgpu_pci_slot_reset - Called when PCI slot has been reset.
 * @pdev: PCI device struct
 *
 * Description: This routine is called by the pci error recovery
 * code after the PCI slot has been reset, just before we
 * should resume normal operations.
 */
pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev)
{
	struct drm_device *dev = pci_get_drvdata(pdev);
	struct amdgpu_device *adev = drm_to_adev(dev);
5097
	int r, i;
5098
	bool need_full_reset = true;
5099
	u32 memsize;
5100
	struct list_head device_list;
5101 5102 5103

	DRM_INFO("PCI error: slot reset callback!!\n");

5104
	INIT_LIST_HEAD(&device_list);
5105
	list_add_tail(&adev->reset_list, &device_list);
5106

5107 5108 5109
	/* wait for asic to come out of reset */
	msleep(500);

5110
	/* Restore PCI confspace */
5111
	amdgpu_device_load_pci_state(pdev);
5112

5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125
	/* confirm  ASIC came out of reset */
	for (i = 0; i < adev->usec_timeout; i++) {
		memsize = amdgpu_asic_get_config_memsize(adev);

		if (memsize != 0xffffffff)
			break;
		udelay(1);
	}
	if (memsize == 0xffffffff) {
		r = -ETIME;
		goto out;
	}

5126
	adev->in_pci_err_recovery = true;
5127
	r = amdgpu_device_pre_asic_reset(adev, NULL, &need_full_reset);
5128
	adev->in_pci_err_recovery = false;
5129 5130 5131
	if (r)
		goto out;

5132
	r = amdgpu_do_asic_reset(NULL, &device_list, &need_full_reset, true);
5133 5134 5135

out:
	if (!r) {
5136 5137 5138
		if (amdgpu_device_cache_pci_state(adev->pdev))
			pci_restore_state(adev->pdev);

5139 5140 5141 5142 5143 5144 5145 5146 5147 5148 5149 5150 5151 5152
		DRM_INFO("PCIe error recovery succeeded\n");
	} else {
		DRM_ERROR("PCIe error recovery failed, err:%d", r);
		amdgpu_device_unlock_adev(adev);
	}

	return r ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
}

/**
 * amdgpu_pci_resume() - resume normal ops after PCI reset
 * @pdev: pointer to PCI device
 *
 * Called when the error recovery driver tells us that its
5153
 * OK to resume normal operation.
5154 5155 5156 5157 5158
 */
void amdgpu_pci_resume(struct pci_dev *pdev)
{
	struct drm_device *dev = pci_get_drvdata(pdev);
	struct amdgpu_device *adev = drm_to_adev(dev);
5159
	int i;
5160 5161 5162


	DRM_INFO("PCI error: resume callback!!\n");
5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174 5175

	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
		struct amdgpu_ring *ring = adev->rings[i];

		if (!ring || !ring->sched.thread)
			continue;


		drm_sched_resubmit_jobs(&ring->sched);
		drm_sched_start(&ring->sched, true);
	}

	amdgpu_device_unlock_adev(adev);
5176
}
5177 5178 5179 5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223

bool amdgpu_device_cache_pci_state(struct pci_dev *pdev)
{
	struct drm_device *dev = pci_get_drvdata(pdev);
	struct amdgpu_device *adev = drm_to_adev(dev);
	int r;

	r = pci_save_state(pdev);
	if (!r) {
		kfree(adev->pci_state);

		adev->pci_state = pci_store_saved_state(pdev);

		if (!adev->pci_state) {
			DRM_ERROR("Failed to store PCI saved state");
			return false;
		}
	} else {
		DRM_WARN("Failed to save PCI state, err:%d\n", r);
		return false;
	}

	return true;
}

bool amdgpu_device_load_pci_state(struct pci_dev *pdev)
{
	struct drm_device *dev = pci_get_drvdata(pdev);
	struct amdgpu_device *adev = drm_to_adev(dev);
	int r;

	if (!adev->pci_state)
		return false;

	r = pci_load_saved_state(pdev, adev->pci_state);

	if (!r) {
		pci_restore_state(pdev);
	} else {
		DRM_WARN("Failed to load PCI state, err:%d\n", r);
		return false;
	}

	return true;
}