amdgpu_drv.c 73.1 KB
Newer Older
A
Alex Deucher 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 */

#include <drm/amdgpu_drm.h>
26
#include <drm/drm_aperture.h>
27
#include <drm/drm_drv.h>
A
Alex Deucher 已提交
28
#include <drm/drm_gem.h>
29
#include <drm/drm_vblank.h>
30
#include <drm/drm_managed.h>
A
Alex Deucher 已提交
31 32 33 34 35 36 37
#include "amdgpu_drv.h"

#include <drm/drm_pciids.h>
#include <linux/console.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include <linux/vga_switcheroo.h>
38
#include <drm/drm_probe_helper.h>
39
#include <linux/mmu_notifier.h>
40
#include <linux/suspend.h>
A
Alex Deucher 已提交
41 42 43

#include "amdgpu.h"
#include "amdgpu_irq.h"
44
#include "amdgpu_dma_buf.h"
45
#include "amdgpu_sched.h"
46
#include "amdgpu_fdinfo.h"
47 48
#include "amdgpu_amdkfd.h"

49
#include "amdgpu_ras.h"
50
#include "amdgpu_xgmi.h"
51
#include "amdgpu_reset.h"
52

A
Alex Deucher 已提交
53 54 55
/*
 * KMS wrapper.
 * - 3.0.0 - initial driver
56
 * - 3.1.0 - allow reading more status registers (GRBM, SRBM, SDMA, CP)
57 58
 * - 3.2.0 - GFX8: Uses EOP_TC_WB_ACTION_EN, so UMDs don't have to do the same
 *           at the end of IBs.
59
 * - 3.3.0 - Add VM support for UVD on supported hardware.
60
 * - 3.4.0 - Add AMDGPU_INFO_NUM_EVICTIONS.
61
 * - 3.5.0 - Add support for new UVD_NO_OP register.
62
 * - 3.6.0 - kmd involves use CONTEXT_CONTROL in ring buffer.
63
 * - 3.7.0 - Add support for VCE clock list packet
64
 * - 3.8.0 - Add support raster config init in the kernel
65
 * - 3.9.0 - Add support for memory query info about VRAM and GTT.
66
 * - 3.10.0 - Add support for new fences ioctl, new gem ioctl flags
67
 * - 3.11.0 - Add support for sensor query info (clocks, temp, etc).
68
 * - 3.12.0 - Add query for double offchip LDS buffers
69
 * - 3.13.0 - Add PRT support
70
 * - 3.14.0 - Fix race in amdgpu_ctx_get_fence() and note new functionality
71
 * - 3.15.0 - Export more gpu info for gfx9
72
 * - 3.16.0 - Add reserved vmid support
73
 * - 3.17.0 - Add AMDGPU_NUM_VRAM_CPU_PAGE_FAULTS.
74
 * - 3.18.0 - Export gpu always on cu bitmap
75
 * - 3.19.0 - Add support for UVD MJPEG decode
76
 * - 3.20.0 - Add support for local BOs
77
 * - 3.21.0 - Add DRM_AMDGPU_FENCE_TO_HANDLE ioctl
78
 * - 3.22.0 - Add DRM_AMDGPU_SCHED ioctl
79
 * - 3.23.0 - Add query for VRAM lost counter
80
 * - 3.24.0 - Add high priority compute support for gfx9
81
 * - 3.25.0 - Add support for sensor query info (stable pstate sclk/mclk).
82
 * - 3.26.0 - GFX9: Process AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE.
83
 * - 3.27.0 - Add new chunk to to AMDGPU_CS to enable BO_LIST creation.
84
 * - 3.28.0 - Add AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES
85
 * - 3.29.0 - Add AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID
86
 * - 3.30.0 - Add AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE.
87
 * - 3.31.0 - Add support for per-flip tiling attribute changes with DC
88
 * - 3.32.0 - Add syncobj timeline support to AMDGPU_CS.
89
 * - 3.33.0 - Fixes for GDS ENOMEM failures in AMDGPU_CS.
90
 * - 3.34.0 - Non-DC can flip correctly between buffers with different pitches
91
 * - 3.35.0 - Add drm_amdgpu_info_device::tcc_disabled_mask
92
 * - 3.36.0 - Allow reading more status registers on si/cik
93
 * - 3.37.0 - L2 is invalidated before SDMA IBs, needed for correctness
94
 * - 3.38.0 - Add AMDGPU_IB_FLAG_EMIT_MEM_SYNC
95
 * - 3.39.0 - DMABUF implicit sync does a full pipeline sync
96
 * - 3.40.0 - Add AMDGPU_IDS_FLAGS_TMZ
97
 * - 3.41.0 - Add video codec query
A
Alex Deucher 已提交
98
 * - 3.42.0 - Add 16bpc fixed point display support
A
Alex Deucher 已提交
99 100
 */
#define KMS_DRIVER_MAJOR	3
A
Alex Deucher 已提交
101
#define KMS_DRIVER_MINOR	42
A
Alex Deucher 已提交
102 103
#define KMS_DRIVER_PATCHLEVEL	0

104 105
int amdgpu_vram_limit;
int amdgpu_vis_vram_limit;
106
int amdgpu_gart_size = -1; /* auto */
107
int amdgpu_gtt_size = -1; /* auto */
108
int amdgpu_moverate = -1; /* auto */
109 110
int amdgpu_benchmarking;
int amdgpu_testing;
A
Alex Deucher 已提交
111
int amdgpu_audio = -1;
112 113
int amdgpu_disp_priority;
int amdgpu_hw_i2c;
A
Alex Deucher 已提交
114 115
int amdgpu_pcie_gen2 = -1;
int amdgpu_msi = -1;
116
char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENGTH];
A
Alex Deucher 已提交
117
int amdgpu_dpm = -1;
118
int amdgpu_fw_load_type = -1;
A
Alex Deucher 已提交
119 120
int amdgpu_aspm = -1;
int amdgpu_runtime_pm = -1;
121
uint amdgpu_ip_block_mask = 0xffffffff;
A
Alex Deucher 已提交
122
int amdgpu_bapm = -1;
123
int amdgpu_deep_color;
124
int amdgpu_vm_size = -1;
125
int amdgpu_vm_fragment_size = -1;
A
Alex Deucher 已提交
126
int amdgpu_vm_block_size = -1;
127 128
int amdgpu_vm_fault_stop;
int amdgpu_vm_debug;
129
int amdgpu_vm_update_mode = -1;
130
int amdgpu_exp_hw_support;
131
int amdgpu_dc = -1;
132
int amdgpu_sched_jobs = 32;
133
int amdgpu_sched_hw_submission = 2;
134 135
uint amdgpu_pcie_gen_cap;
uint amdgpu_pcie_lane_cap;
136 137 138
uint amdgpu_cg_mask = 0xffffffff;
uint amdgpu_pg_mask = 0xffffffff;
uint amdgpu_sdma_phase_quantum = 32;
139
char *amdgpu_disable_cu = NULL;
140
char *amdgpu_virtual_display = NULL;
K
Kenneth Feng 已提交
141 142 143 144 145 146

/*
 * OverDrive(bit 14) disabled by default
 * GFX DCS(bit 19) disabled by default
 */
uint amdgpu_pp_feature_mask = 0xfff7bfff;
147 148
uint amdgpu_force_long_training;
int amdgpu_job_hang_limit;
H
Hawking Zhang 已提交
149
int amdgpu_lbpw = -1;
150
int amdgpu_compute_multipipe = -1;
151
int amdgpu_gpu_recovery = -1; /* auto */
152 153
int amdgpu_emu_mode;
uint amdgpu_smu_memory_pool_size;
154
int amdgpu_smu_pptable_id = -1;
155 156 157 158 159 160 161 162
/*
 * FBC (bit 0) disabled by default
 * MULTI_MON_PP_MCLK_SWITCH (bit 1) enabled by default
 *   - With this, for multiple monitors in sync(e.g. with the same model),
 *     mclk switching will be allowed. And the mclk will be not foced to the
 *     highest. That helps saving some idle power.
 * DISABLE_FRACTIONAL_PWM (bit 2) disabled by default
 * PSR (bit 3) disabled by default
163
 * EDP NO POWER SEQUENCING (bit 4) disabled by default
164 165
 */
uint amdgpu_dc_feature_mask = 2;
166
uint amdgpu_dc_debug_mask;
167
int amdgpu_async_gfx_ring = 1;
168
int amdgpu_mcbp;
169
int amdgpu_discovery = -1;
170
int amdgpu_mes;
171
int amdgpu_noretry = -1;
172
int amdgpu_force_asic_type = -1;
173
int amdgpu_tmz = -1; /* auto */
174
uint amdgpu_freesync_vid_mode;
175
int amdgpu_reset_method = -1; /* auto */
176
int amdgpu_num_kcq = -1;
177
int amdgpu_smartshift_bias;
178

179 180
static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work);

181 182
struct amdgpu_mgpu_info mgpu_info = {
	.mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
183 184 185
	.delayed_reset_work = __DELAYED_WORK_INITIALIZER(
			mgpu_info.delayed_reset_work,
			amdgpu_drv_delayed_reset_work_handler, 0),
186
};
187
int amdgpu_ras_enable = -1;
188
uint amdgpu_ras_mask = 0xffffffff;
189
int amdgpu_bad_page_threshold = -1;
190 191
struct amdgpu_watchdog_timer amdgpu_watchdog_timer = {
	.timeout_fatal_disable = false,
192
	.period = 0x0, /* default to 0x0 (timeout disable) */
193
};
A
Alex Deucher 已提交
194

195 196 197 198
/**
 * DOC: vramlimit (int)
 * Restrict the total amount of VRAM in MiB for testing.  The default is 0 (Use full VRAM).
 */
A
Alex Deucher 已提交
199 200 201
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);

202 203 204 205
/**
 * DOC: vis_vramlimit (int)
 * Restrict the amount of CPU visible VRAM in MiB for testing.  The default is 0 (Use full CPU visible VRAM).
 */
206 207 208
MODULE_PARM_DESC(vis_vramlimit, "Restrict visible VRAM for testing, in megabytes");
module_param_named(vis_vramlimit, amdgpu_vis_vram_limit, int, 0444);

209 210 211 212
/**
 * DOC: gartsize (uint)
 * Restrict the size of GART in Mib (32, 64, etc.) for testing. The default is -1 (The size depends on asic).
 */
213
MODULE_PARM_DESC(gartsize, "Size of GART to setup in megabytes (32, 64, etc., -1=auto)");
214
module_param_named(gartsize, amdgpu_gart_size, uint, 0600);
A
Alex Deucher 已提交
215

216 217 218 219 220
/**
 * DOC: gttsize (int)
 * Restrict the size of GTT domain in MiB for testing. The default is -1 (It's VRAM size if 3GB < VRAM < 3/4 RAM,
 * otherwise 3/4 RAM size).
 */
221 222
MODULE_PARM_DESC(gttsize, "Size of the GTT domain in megabytes (-1 = auto)");
module_param_named(gttsize, amdgpu_gtt_size, int, 0600);
A
Alex Deucher 已提交
223

224 225 226 227
/**
 * DOC: moverate (int)
 * Set maximum buffer migration rate in MB/s. The default is -1 (8 MB/s).
 */
228 229 230
MODULE_PARM_DESC(moverate, "Maximum buffer migration rate in MB/s. (32, 64, etc., -1=auto, 0=1=disabled)");
module_param_named(moverate, amdgpu_moverate, int, 0600);

231 232 233 234
/**
 * DOC: benchmark (int)
 * Run benchmarks. The default is 0 (Skip benchmarks).
 */
A
Alex Deucher 已提交
235 236 237
MODULE_PARM_DESC(benchmark, "Run benchmark");
module_param_named(benchmark, amdgpu_benchmarking, int, 0444);

238 239 240 241
/**
 * DOC: test (int)
 * Test BO GTT->VRAM and VRAM->GTT GPU copies. The default is 0 (Skip test, only set 1 to run test).
 */
A
Alex Deucher 已提交
242 243 244
MODULE_PARM_DESC(test, "Run tests");
module_param_named(test, amdgpu_testing, int, 0444);

245 246 247 248
/**
 * DOC: audio (int)
 * Set HDMI/DPAudio. Only affects non-DC display handling. The default is -1 (Enabled), set 0 to disabled it.
 */
A
Alex Deucher 已提交
249 250 251
MODULE_PARM_DESC(audio, "Audio enable (-1 = auto, 0 = disable, 1 = enable)");
module_param_named(audio, amdgpu_audio, int, 0444);

252 253 254 255
/**
 * DOC: disp_priority (int)
 * Set display Priority (1 = normal, 2 = high). Only affects non-DC display handling. The default is 0 (auto).
 */
A
Alex Deucher 已提交
256 257 258
MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)");
module_param_named(disp_priority, amdgpu_disp_priority, int, 0444);

259 260 261 262
/**
 * DOC: hw_i2c (int)
 * To enable hw i2c engine. Only affects non-DC display handling. The default is 0 (Disabled).
 */
A
Alex Deucher 已提交
263 264 265
MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)");
module_param_named(hw_i2c, amdgpu_hw_i2c, int, 0444);

266 267 268 269
/**
 * DOC: pcie_gen2 (int)
 * To disable PCIE Gen2/3 mode (0 = disable, 1 = enable). The default is -1 (auto, enabled).
 */
A
Alex Deucher 已提交
270 271 272
MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (-1 = auto, 0 = disable, 1 = enable)");
module_param_named(pcie_gen2, amdgpu_pcie_gen2, int, 0444);

273 274 275 276
/**
 * DOC: msi (int)
 * To disable Message Signaled Interrupts (MSI) functionality (1 = enable, 0 = disable). The default is -1 (auto, enabled).
 */
A
Alex Deucher 已提交
277 278 279
MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(msi, amdgpu_msi, int, 0444);

280
/**
281 282 283 284 285
 * DOC: lockup_timeout (string)
 * Set GPU scheduler timeout value in ms.
 *
 * The format can be [Non-Compute] or [GFX,Compute,SDMA,Video]. That is there can be one or
 * multiple values specified. 0 and negative values are invalidated. They will be adjusted
286 287 288 289 290 291 292
 * to the default timeout.
 *
 * - With one value specified, the setting will apply to all non-compute jobs.
 * - With multiple values specified, the first one will be for GFX.
 *   The second one is for Compute. The third and fourth ones are
 *   for SDMA and Video.
 *
293
 * By default(with no lockup_timeout settings), the timeout for all non-compute(GFX, SDMA and Video)
294
 * jobs is 10000. The timeout for compute is 60000.
295
 */
296
MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: for bare metal 10000 for non-compute jobs and 60000 for compute jobs; "
297
		"for passthrough or sriov, 10000 for all jobs."
298
		" 0: keep default value. negative: infinity timeout), "
299 300
		"format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; "
		"for passthrough or sriov [all jobs] or [GFX,Compute,SDMA,Video].");
301
module_param_string(lockup_timeout, amdgpu_lockup_timeout, sizeof(amdgpu_lockup_timeout), 0444);
A
Alex Deucher 已提交
302

303 304
/**
 * DOC: dpm (int)
305
 * Override for dynamic power management setting
306
 * (0 = disable, 1 = enable)
307
 * The default is -1 (auto).
308
 */
A
Alex Deucher 已提交
309 310 311
MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(dpm, amdgpu_dpm, int, 0444);

312 313 314 315
/**
 * DOC: fw_load_type (int)
 * Set different firmware loading type for debugging (0 = direct, 1 = SMU, 2 = PSP). The default is -1 (auto).
 */
316 317
MODULE_PARM_DESC(fw_load_type, "firmware loading type (0 = direct, 1 = SMU, 2 = PSP, -1 = auto)");
module_param_named(fw_load_type, amdgpu_fw_load_type, int, 0444);
A
Alex Deucher 已提交
318

319 320 321 322
/**
 * DOC: aspm (int)
 * To disable ASPM (1 = enable, 0 = disable). The default is -1 (auto, enabled).
 */
A
Alex Deucher 已提交
323 324 325
MODULE_PARM_DESC(aspm, "ASPM support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(aspm, amdgpu_aspm, int, 0444);

326 327 328 329 330
/**
 * DOC: runpm (int)
 * Override for runtime power management control for dGPUs in PX/HG laptops. The amdgpu driver can dynamically power down
 * the dGPU on PX/HG laptops when it is idle. The default is -1 (auto enable). Setting the value to 0 disables this functionality.
 */
331
MODULE_PARM_DESC(runpm, "PX runtime pm (2 = force enable with BAMACO, 1 = force enable with BACO, 0 = disable, -1 = PX only default)");
A
Alex Deucher 已提交
332 333
module_param_named(runpm, amdgpu_runtime_pm, int, 0444);

334 335 336 337 338 339 340
/**
 * DOC: ip_block_mask (uint)
 * Override what IP blocks are enabled on the GPU. Each GPU is a collection of IP blocks (gfx, display, video, etc.).
 * Use this parameter to disable specific blocks. Note that the IP blocks do not have a fixed index. Some asics may not have
 * some IPs or may include multiple instances of an IP so the ordering various from asic to asic. See the driver output in
 * the kernel log for the list of IPs on the asic. The default is 0xffffffff (enable all blocks on a device).
 */
A
Alex Deucher 已提交
341 342 343
MODULE_PARM_DESC(ip_block_mask, "IP Block Mask (all blocks enabled (default))");
module_param_named(ip_block_mask, amdgpu_ip_block_mask, uint, 0444);

344 345 346 347 348
/**
 * DOC: bapm (int)
 * Bidirectional Application Power Management (BAPM) used to dynamically share TDP between CPU and GPU. Set value 0 to disable it.
 * The default -1 (auto, enabled)
 */
A
Alex Deucher 已提交
349 350 351
MODULE_PARM_DESC(bapm, "BAPM support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(bapm, amdgpu_bapm, int, 0444);

352 353 354 355
/**
 * DOC: deep_color (int)
 * Set 1 to enable Deep Color support. Only affects non-DC display handling. The default is 0 (disabled).
 */
A
Alex Deucher 已提交
356 357 358
MODULE_PARM_DESC(deep_color, "Deep Color support (1 = enable, 0 = disable (default))");
module_param_named(deep_color, amdgpu_deep_color, int, 0444);

359 360 361 362
/**
 * DOC: vm_size (int)
 * Override the size of the GPU's per client virtual address space in GiB.  The default is -1 (automatic for each asic).
 */
363
MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 64GB)");
A
Alex Deucher 已提交
364
module_param_named(vm_size, amdgpu_vm_size, int, 0444);
365

366 367 368 369
/**
 * DOC: vm_fragment_size (int)
 * Override VM fragment size in bits (4, 5, etc. 4 = 64K, 9 = 2M). The default is -1 (automatic for each asic).
 */
370 371
MODULE_PARM_DESC(vm_fragment_size, "VM fragment size in bits (4, 5, etc. 4 = 64K (default), Max 9 = 2M)");
module_param_named(vm_fragment_size, amdgpu_vm_fragment_size, int, 0444);
A
Alex Deucher 已提交
372

373 374 375 376
/**
 * DOC: vm_block_size (int)
 * Override VM page table size in bits (default depending on vm_size and hw setup). The default is -1 (automatic for each asic).
 */
A
Alex Deucher 已提交
377 378 379
MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default depending on vm_size)");
module_param_named(vm_block_size, amdgpu_vm_block_size, int, 0444);

380 381 382 383
/**
 * DOC: vm_fault_stop (int)
 * Stop on VM fault for debugging (0 = never, 1 = print first, 2 = always). The default is 0 (No stop).
 */
384 385 386
MODULE_PARM_DESC(vm_fault_stop, "Stop on VM fault (0 = never (default), 1 = print first, 2 = always)");
module_param_named(vm_fault_stop, amdgpu_vm_fault_stop, int, 0444);

387 388 389 390
/**
 * DOC: vm_debug (int)
 * Debug VM handling (0 = disabled, 1 = enabled). The default is 0 (Disabled).
 */
391 392 393
MODULE_PARM_DESC(vm_debug, "Debug VM handling (0 = disabled (default), 1 = enabled)");
module_param_named(vm_debug, amdgpu_vm_debug, int, 0644);

394 395 396 397 398
/**
 * DOC: vm_update_mode (int)
 * Override VM update mode. VM updated by using CPU (0 = never, 1 = Graphics only, 2 = Compute only, 3 = Both). The default
 * is -1 (Only in large BAR(LB) systems Compute VM tables will be updated by CPU, otherwise 0, never).
 */
399 400 401
MODULE_PARM_DESC(vm_update_mode, "VM update using CPU (0 = never (default except for large BAR(LB)), 1 = Graphics only, 2 = Compute only (default for LB), 3 = Both");
module_param_named(vm_update_mode, amdgpu_vm_update_mode, int, 0444);

402 403 404 405
/**
 * DOC: exp_hw_support (int)
 * Enable experimental hw support (1 = enable). The default is 0 (disabled).
 */
A
Alex Deucher 已提交
406 407 408
MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);

409 410 411 412
/**
 * DOC: dc (int)
 * Disable/Enable Display Core driver for debugging (1 = enable, 0 = disable). The default is -1 (automatic for each asic).
 */
413 414 415
MODULE_PARM_DESC(dc, "Display Core driver (1 = enable, 0 = disable, -1 = auto (default))");
module_param_named(dc, amdgpu_dc, int, 0444);

416 417 418 419
/**
 * DOC: sched_jobs (int)
 * Override the max number of jobs supported in the sw queue. The default is 32.
 */
420
MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 32)");
421 422
module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);

423 424 425 426
/**
 * DOC: sched_hw_submission (int)
 * Override the max number of HW submissions. The default is 2.
 */
427 428 429
MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)");
module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);

430
/**
431
 * DOC: ppfeaturemask (hexint)
432 433 434
 * Override power features enabled. See enum PP_FEATURE_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
 * The default is the current set of stable power features.
 */
435
MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))");
436
module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, hexint, 0444);
437

438 439 440 441 442 443 444 445
/**
 * DOC: forcelongtraining (uint)
 * Force long memory training in resume.
 * The default is zero, indicates short training in resume.
 */
MODULE_PARM_DESC(forcelongtraining, "force memory long training");
module_param_named(forcelongtraining, amdgpu_force_long_training, uint, 0444);

446 447 448 449 450
/**
 * DOC: pcie_gen_cap (uint)
 * Override PCIE gen speed capabilities. See the CAIL flags in drivers/gpu/drm/amd/include/amd_pcie.h.
 * The default is 0 (automatic for each asic).
 */
451 452 453
MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))");
module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444);

454 455 456 457 458
/**
 * DOC: pcie_lane_cap (uint)
 * Override PCIE lanes capabilities. See the CAIL flags in drivers/gpu/drm/amd/include/amd_pcie.h.
 * The default is 0 (automatic for each asic).
 */
459 460 461
MODULE_PARM_DESC(pcie_lane_cap, "PCIE Lane Caps (0: autodetect (default))");
module_param_named(pcie_lane_cap, amdgpu_pcie_lane_cap, uint, 0444);

462 463 464 465 466
/**
 * DOC: cg_mask (uint)
 * Override Clockgating features enabled on GPU (0 = disable clock gating). See the AMD_CG_SUPPORT flags in
 * drivers/gpu/drm/amd/include/amd_shared.h. The default is 0xffffffff (all enabled).
 */
467 468 469
MODULE_PARM_DESC(cg_mask, "Clockgating flags mask (0 = disable clock gating)");
module_param_named(cg_mask, amdgpu_cg_mask, uint, 0444);

470 471 472 473 474
/**
 * DOC: pg_mask (uint)
 * Override Powergating features enabled on GPU (0 = disable power gating). See the AMD_PG_SUPPORT flags in
 * drivers/gpu/drm/amd/include/amd_shared.h. The default is 0xffffffff (all enabled).
 */
475 476 477
MODULE_PARM_DESC(pg_mask, "Powergating flags mask (0 = disable power gating)");
module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444);

478 479 480 481
/**
 * DOC: sdma_phase_quantum (uint)
 * Override SDMA context switch phase quantum (x 1K GPU clock cycles, 0 = no change). The default is 32.
 */
482 483 484
MODULE_PARM_DESC(sdma_phase_quantum, "SDMA context switch phase quantum (x 1K GPU clock cycles, 0 = no change (default 32))");
module_param_named(sdma_phase_quantum, amdgpu_sdma_phase_quantum, uint, 0444);

485 486 487 488
/**
 * DOC: disable_cu (charp)
 * Set to disable CUs (It's set like se.sh.cu,...). The default is NULL.
 */
489 490 491
MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)");
module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444);

492 493 494 495 496 497 498
/**
 * DOC: virtual_display (charp)
 * Set to enable virtual display feature. This feature provides a virtual display hardware on headless boards
 * or in virtualized environments. It will be set like xxxx:xx:xx.x,x;xxxx:xx:xx.x,x. It's the pci address of
 * the device, plus the number of crtcs to expose. E.g., 0000:26:00.0,4 would enable 4 virtual crtcs on the pci
 * device at 26:00.0. The default is NULL.
 */
499 500
MODULE_PARM_DESC(virtual_display,
		 "Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x,x;xxxx:xx:xx.x,x)");
501
module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444);
502

503 504 505 506
/**
 * DOC: job_hang_limit (int)
 * Set how much time allow a job hang and not drop it. The default is 0.
 */
507 508 509
MODULE_PARM_DESC(job_hang_limit, "how much time allow a job hang and not drop it (default 0)");
module_param_named(job_hang_limit, amdgpu_job_hang_limit, int ,0444);

510 511 512 513
/**
 * DOC: lbpw (int)
 * Override Load Balancing Per Watt (LBPW) support (1 = enable, 0 = disable). The default is -1 (auto, enabled).
 */
H
Hawking Zhang 已提交
514 515
MODULE_PARM_DESC(lbpw, "Load Balancing Per Watt (LBPW) support (1 = enable, 0 = disable, -1 = auto)");
module_param_named(lbpw, amdgpu_lbpw, int, 0444);
A
Alex Deucher 已提交
516

517 518 519
MODULE_PARM_DESC(compute_multipipe, "Force compute queues to be spread across pipes (1 = enable, 0 = disable, -1 = auto)");
module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444);

520 521 522 523
/**
 * DOC: gpu_recovery (int)
 * Set to enable GPU recovery mechanism (1 = enable, 0 = disable). The default is -1 (auto, disabled except SRIOV).
 */
524
MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (2 = advanced tdr mode, 1 = enable, 0 = disable, -1 = auto)");
525 526
module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 0444);

527 528 529 530
/**
 * DOC: emu_mode (int)
 * Set value 1 to enable emulation mode. This is only needed when running on an emulator. The default is 0 (disabled).
 */
531
MODULE_PARM_DESC(emu_mode, "Emulation mode, (1 = enable, 0 = disable)");
532 533
module_param_named(emu_mode, amdgpu_emu_mode, int, 0444);

534
/**
535
 * DOC: ras_enable (int)
536 537
 * Enable RAS features on the GPU (0 = disable, 1 = enable, -1 = auto (default))
 */
538
MODULE_PARM_DESC(ras_enable, "Enable RAS features on the GPU (0 = disable, 1 = enable, -1 = auto (default))");
539 540 541
module_param_named(ras_enable, amdgpu_ras_enable, int, 0444);

/**
542
 * DOC: ras_mask (uint)
543 544 545
 * Mask of RAS features to enable (default 0xffffffff), only valid when ras_enable == 1
 * See the flags in drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
 */
546
MODULE_PARM_DESC(ras_mask, "Mask of RAS features to enable (default 0xffffffff), only valid when ras_enable == 1");
547 548
module_param_named(ras_mask, amdgpu_ras_mask, uint, 0444);

549 550 551 552 553 554 555 556 557 558 559
/**
 * DOC: timeout_fatal_disable (bool)
 * Disable Watchdog timeout fatal error event
 */
MODULE_PARM_DESC(timeout_fatal_disable, "disable watchdog timeout fatal error (false = default)");
module_param_named(timeout_fatal_disable, amdgpu_watchdog_timer.timeout_fatal_disable, bool, 0644);

/**
 * DOC: timeout_period (uint)
 * Modify the watchdog timeout max_cycles as (1 << period)
 */
560
MODULE_PARM_DESC(timeout_period, "watchdog timeout period (0 = timeout disabled, 1 ~ 0x23 = timeout maxcycles = (1 << period)");
561 562
module_param_named(timeout_period, amdgpu_watchdog_timer.period, uint, 0644);

563 564 565 566 567 568
/**
 * DOC: si_support (int)
 * Set SI support driver. This parameter works after set config CONFIG_DRM_AMDGPU_SI. For SI asic, when radeon driver is enabled,
 * set value 0 to use radeon driver, while set value 1 to use amdgpu driver. The default is using radeon driver when it available,
 * otherwise using amdgpu driver.
 */
569
#ifdef CONFIG_DRM_AMDGPU_SI
570 571

#if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)
572 573
int amdgpu_si_support = 0;
MODULE_PARM_DESC(si_support, "SI support (1 = enabled, 0 = disabled (default))");
574 575 576 577 578
#else
int amdgpu_si_support = 1;
MODULE_PARM_DESC(si_support, "SI support (1 = enabled (default), 0 = disabled)");
#endif

579 580 581
module_param_named(si_support, amdgpu_si_support, int, 0444);
#endif

582 583 584 585 586 587
/**
 * DOC: cik_support (int)
 * Set CIK support driver. This parameter works after set config CONFIG_DRM_AMDGPU_CIK. For CIK asic, when radeon driver is enabled,
 * set value 0 to use radeon driver, while set value 1 to use amdgpu driver. The default is using radeon driver when it available,
 * otherwise using amdgpu driver.
 */
588
#ifdef CONFIG_DRM_AMDGPU_CIK
589 590

#if defined(CONFIG_DRM_RADEON) || defined(CONFIG_DRM_RADEON_MODULE)
591 592
int amdgpu_cik_support = 0;
MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled, 0 = disabled (default))");
593 594 595 596 597
#else
int amdgpu_cik_support = 1;
MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled (default), 0 = disabled)");
#endif

598 599 600
module_param_named(cik_support, amdgpu_cik_support, int, 0444);
#endif

601 602 603 604 605
/**
 * DOC: smu_memory_pool_size (uint)
 * It is used to reserve gtt for smu debug usage, setting value 0 to disable it. The actual size is value * 256MiB.
 * E.g. 0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte. The default is 0 (disabled).
 */
606 607 608 609 610
MODULE_PARM_DESC(smu_memory_pool_size,
	"reserve gtt for smu debug usage, 0 = disable,"
		"0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte");
module_param_named(smu_memory_pool_size, amdgpu_smu_memory_pool_size, uint, 0444);

611 612 613 614 615
/**
 * DOC: async_gfx_ring (int)
 * It is used to enable gfx rings that could be configured with different prioritites or equal priorities
 */
MODULE_PARM_DESC(async_gfx_ring,
616
	"Asynchronous GFX rings that could be configured with either different priorities (HP3D ring and LP3D ring), or equal priorities (0 = disabled, 1 = enabled (default))");
617 618
module_param_named(async_gfx_ring, amdgpu_async_gfx_ring, int, 0444);

619 620 621 622
/**
 * DOC: mcbp (int)
 * It is used to enable mid command buffer preemption. (0 = disabled (default), 1 = enabled)
 */
623 624 625 626
MODULE_PARM_DESC(mcbp,
	"Enable Mid-command buffer preemption (0 = disabled (default), 1 = enabled)");
module_param_named(mcbp, amdgpu_mcbp, int, 0444);

627 628 629
/**
 * DOC: discovery (int)
 * Allow driver to discover hardware IP information from IP Discovery table at the top of VRAM.
630
 * (-1 = auto (default), 0 = disabled, 1 = enabled)
631
 */
632 633 634 635
MODULE_PARM_DESC(discovery,
	"Allow driver to discover hardware IPs from IP Discovery table at the top of VRAM");
module_param_named(discovery, amdgpu_discovery, int, 0444);

636 637 638 639 640
/**
 * DOC: mes (int)
 * Enable Micro Engine Scheduler. This is a new hw scheduling engine for gfx, sdma, and compute.
 * (0 = disabled (default), 1 = enabled)
 */
641 642 643 644
MODULE_PARM_DESC(mes,
	"Enable Micro Engine Scheduler (0 = disabled (default), 1 = enabled)");
module_param_named(mes, amdgpu_mes, int, 0444);

645 646
/**
 * DOC: noretry (int)
647 648
 * Disable XNACK retry in the SQ by default on GFXv9 hardware. On ASICs that
 * do not support per-process XNACK this also disables retry page faults.
649 650
 * (0 = retry enabled, 1 = retry disabled, -1 auto (default))
 */
651
MODULE_PARM_DESC(noretry,
652
	"Disable retry faults (0 = retry enabled, 1 = retry disabled, -1 auto (default))");
653 654
module_param_named(noretry, amdgpu_noretry, int, 0644);

655 656 657 658 659 660 661 662 663 664
/**
 * DOC: force_asic_type (int)
 * A non negative value used to specify the asic type for all supported GPUs.
 */
MODULE_PARM_DESC(force_asic_type,
	"A non negative value used to specify the asic type for all supported GPUs");
module_param_named(force_asic_type, amdgpu_force_asic_type, int, 0444);



665
#ifdef CONFIG_HSA_AMD
666 667 668 669 670 671
/**
 * DOC: sched_policy (int)
 * Set scheduling policy. Default is HWS(hardware scheduling) with over-subscription.
 * Setting 1 disables over-subscription. Setting 2 disables HWS and statically
 * assigns queues to HQDs.
 */
672
int sched_policy = KFD_SCHED_POLICY_HWS;
673 674 675 676 677 678 679 680 681
module_param(sched_policy, int, 0444);
MODULE_PARM_DESC(sched_policy,
	"Scheduling policy (0 = HWS (Default), 1 = HWS without over-subscription, 2 = Non-HWS (Used for debugging only)");

/**
 * DOC: hws_max_conc_proc (int)
 * Maximum number of processes that HWS can schedule concurrently. The maximum is the
 * number of VMIDs assigned to the HWS, which is also the default.
 */
682
int hws_max_conc_proc = 8;
683 684 685 686 687 688 689 690 691 692
module_param(hws_max_conc_proc, int, 0444);
MODULE_PARM_DESC(hws_max_conc_proc,
	"Max # processes HWS can execute concurrently when sched_policy=0 (0 = no concurrency, #VMIDs for KFD = Maximum(default))");

/**
 * DOC: cwsr_enable (int)
 * CWSR(compute wave store and resume) allows the GPU to preempt shader execution in
 * the middle of a compute wave. Default is 1 to enable this feature. Setting 0
 * disables it.
 */
693
int cwsr_enable = 1;
694 695 696 697 698 699 700 701
module_param(cwsr_enable, int, 0444);
MODULE_PARM_DESC(cwsr_enable, "CWSR enable (0 = Off, 1 = On (Default))");

/**
 * DOC: max_num_of_queues_per_device (int)
 * Maximum number of queues per device. Valid setting is between 1 and 4096. Default
 * is 4096.
 */
702
int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT;
703 704 705 706 707 708 709 710 711
module_param(max_num_of_queues_per_device, int, 0444);
MODULE_PARM_DESC(max_num_of_queues_per_device,
	"Maximum number of supported queues per device (1 = Minimum, 4096 = default)");

/**
 * DOC: send_sigterm (int)
 * Send sigterm to HSA process on unhandled exceptions. Default is not to send sigterm
 * but just print errors on dmesg. Setting 1 enables sending sigterm.
 */
712
int send_sigterm;
713 714 715 716 717 718 719 720 721 722 723
module_param(send_sigterm, int, 0444);
MODULE_PARM_DESC(send_sigterm,
	"Send sigterm to HSA process on unhandled exception (0 = disable, 1 = enable)");

/**
 * DOC: debug_largebar (int)
 * Set debug_largebar as 1 to enable simulating large-bar capability on non-large bar
 * system. This limits the VRAM size reported to ROCm applications to the visible
 * size, usually 256MB.
 * Default value is 0, diabled.
 */
724
int debug_largebar;
725 726 727 728 729 730 731 732 733
module_param(debug_largebar, int, 0444);
MODULE_PARM_DESC(debug_largebar,
	"Debug large-bar flag used to simulate large-bar capability on non-large bar machine (0 = disable, 1 = enable)");

/**
 * DOC: ignore_crat (int)
 * Ignore CRAT table during KFD initialization. By default, KFD uses the ACPI CRAT
 * table to get information about AMD APUs. This option can serve as a workaround on
 * systems with a broken CRAT table.
734 735 736
 *
 * Default is auto (according to asic type, iommu_v2, and crat table, to decide
 * whehter use CRAT)
737
 */
738
int ignore_crat;
739 740
module_param(ignore_crat, int, 0444);
MODULE_PARM_DESC(ignore_crat,
741
	"Ignore CRAT table during KFD initialization (0 = auto (default), 1 = ignore CRAT)");
742 743 744 745 746 747

/**
 * DOC: halt_if_hws_hang (int)
 * Halt if HWS hang is detected. Default value, 0, disables the halt on hang.
 * Setting 1 enables halt on hang.
 */
748
int halt_if_hws_hang;
749 750
module_param(halt_if_hws_hang, int, 0644);
MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)");
751 752 753

/**
 * DOC: hws_gws_support(bool)
754 755
 * Assume that HWS supports GWS barriers regardless of what firmware version
 * check says. Default value: false (rely on MEC2 firmware version check).
756 757 758
 */
bool hws_gws_support;
module_param(hws_gws_support, bool, 0444);
759
MODULE_PARM_DESC(hws_gws_support, "Assume MEC2 FW supports GWS barriers (false = rely on FW version check (Default), true = force supported)");
760 761 762 763 764

/**
  * DOC: queue_preemption_timeout_ms (int)
  * queue preemption timeout in ms (1 = Minimum, 9000 = default)
  */
765
int queue_preemption_timeout_ms = 9000;
766 767
module_param(queue_preemption_timeout_ms, int, 0644);
MODULE_PARM_DESC(queue_preemption_timeout_ms, "queue preemption timeout in ms (1 = Minimum, 9000 = default)");
768 769 770 771 772 773 774 775

/**
 * DOC: debug_evictions(bool)
 * Enable extra debug messages to help determine the cause of evictions
 */
bool debug_evictions;
module_param(debug_evictions, bool, 0644);
MODULE_PARM_DESC(debug_evictions, "enable eviction debug messages (false = default)");
776 777 778 779 780 781 782 783 784

/**
 * DOC: no_system_mem_limit(bool)
 * Disable system memory limit, to support multiple process shared memory
 */
bool no_system_mem_limit;
module_param(no_system_mem_limit, bool, 0644);
MODULE_PARM_DESC(no_system_mem_limit, "disable system memory limit (false = default)");

785 786 787 788 789 790 791
/**
 * DOC: no_queue_eviction_on_vm_fault (int)
 * If set, process queues will not be evicted on gpuvm fault. This is to keep the wavefront context for debugging (0 = queue eviction, 1 = no queue eviction). The default is 0 (queue eviction).
 */
int amdgpu_no_queue_eviction_on_vm_fault = 0;
MODULE_PARM_DESC(no_queue_eviction_on_vm_fault, "No queue eviction on VM fault (0 = queue eviction, 1 = no queue eviction)");
module_param_named(no_queue_eviction_on_vm_fault, amdgpu_no_queue_eviction_on_vm_fault, int, 0444);
792
#endif
793

794 795 796 797 798 799 800 801
/**
 * DOC: dcfeaturemask (uint)
 * Override display features enabled. See enum DC_FEATURE_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
 * The default is the current set of stable display features.
 */
MODULE_PARM_DESC(dcfeaturemask, "all stable DC features enabled (default))");
module_param_named(dcfeaturemask, amdgpu_dc_feature_mask, uint, 0444);

802 803 804 805 806 807 808
/**
 * DOC: dcdebugmask (uint)
 * Override display features enabled. See enum DC_DEBUG_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
 */
MODULE_PARM_DESC(dcdebugmask, "all debug options disabled (default))");
module_param_named(dcdebugmask, amdgpu_dc_debug_mask, uint, 0444);

809 810 811 812 813 814 815 816 817 818 819 820
/**
 * DOC: abmlevel (uint)
 * Override the default ABM (Adaptive Backlight Management) level used for DC
 * enabled hardware. Requires DMCU to be supported and loaded.
 * Valid levels are 0-4. A value of 0 indicates that ABM should be disabled by
 * default. Values 1-4 control the maximum allowable brightness reduction via
 * the ABM algorithm, with 1 being the least reduction and 4 being the most
 * reduction.
 *
 * Defaults to 0, or disabled. Userspace can still override this level later
 * after boot.
 */
821
uint amdgpu_dm_abm_level;
822 823 824
MODULE_PARM_DESC(abmlevel, "ABM level (0 = off (default), 1-4 = backlight reduction level) ");
module_param_named(abmlevel, amdgpu_dm_abm_level, uint, 0444);

825 826 827 828
int amdgpu_backlight = -1;
MODULE_PARM_DESC(backlight, "Backlight control (0 = pwm, 1 = aux, -1 auto (default))");
module_param_named(backlight, amdgpu_backlight, bint, 0444);

829 830 831 832 833 834 835
/**
 * DOC: tmz (int)
 * Trusted Memory Zone (TMZ) is a method to protect data being written
 * to or read from memory.
 *
 * The default value: 0 (off).  TODO: change to auto till it is completed.
 */
836
MODULE_PARM_DESC(tmz, "Enable TMZ feature (-1 = auto (default), 0 = off, 1 = on)");
837 838
module_param_named(tmz, amdgpu_tmz, int, 0444);

839 840
/**
 * DOC: freesync_video (uint)
841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857
 * Enable the optimization to adjust front porch timing to achieve seamless
 * mode change experience when setting a freesync supported mode for which full
 * modeset is not needed.
 *
 * The Display Core will add a set of modes derived from the base FreeSync
 * video mode into the corresponding connector's mode list based on commonly
 * used refresh rates and VRR range of the connected display, when users enable
 * this feature. From the userspace perspective, they can see a seamless mode
 * change experience when the change between different refresh rates under the
 * same resolution. Additionally, userspace applications such as Video playback
 * can read this modeset list and change the refresh rate based on the video
 * frame rate. Finally, the userspace can also derive an appropriate mode for a
 * particular refresh rate based on the FreeSync Mode and add it to the
 * connector's mode list.
 *
 * Note: This is an experimental feature.
 *
858 859 860 861 862 863 864
 * The default value: 0 (off).
 */
MODULE_PARM_DESC(
	freesync_video,
	"Enable freesync modesetting optimization feature (0 = off (default), 1 = on)");
module_param_named(freesync_video, amdgpu_freesync_vid_mode, uint, 0444);

865 866
/**
 * DOC: reset_method (int)
867
 * GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco, 5 = pci)
868
 */
869
MODULE_PARM_DESC(reset_method, "GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco/bamaco, 5 = pci)");
870 871
module_param_named(reset_method, amdgpu_reset_method, int, 0444);

872 873 874 875 876 877 878 879 880 881
/**
 * DOC: bad_page_threshold (int)
 * Bad page threshold is to specify the threshold value of faulty pages
 * detected by RAS ECC, that may result in GPU entering bad status if total
 * faulty pages by ECC exceed threshold value and leave it for user's further
 * check.
 */
MODULE_PARM_DESC(bad_page_threshold, "Bad page threshold(-1 = auto(default value), 0 = disable bad page retirement)");
module_param_named(bad_page_threshold, amdgpu_bad_page_threshold, int, 0444);

882 883 884
MODULE_PARM_DESC(num_kcq, "number of kernel compute queue user want to setup (8 if set to greater than 8 or less than 0, only affect gfx 8+)");
module_param_named(num_kcq, amdgpu_num_kcq, int, 0444);

885 886 887 888 889 890 891 892 893
/**
 * DOC: smu_pptable_id (int)
 * Used to override pptable id. id = 0 use VBIOS pptable.
 * id > 0 use the soft pptable with specicfied id.
 */
MODULE_PARM_DESC(smu_pptable_id,
	"specify pptable id to be used (-1 = auto(default) value, 0 = use pptable from vbios, > 0 = soft pptable id)");
module_param_named(smu_pptable_id, amdgpu_smu_pptable_id, int, 0444);

894
static const struct pci_device_id pciidlist[] = {
K
Ken Wang 已提交
895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968
#ifdef  CONFIG_DRM_AMDGPU_SI
	{0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
	{0x1002, 0x6784, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
	{0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
	{0x1002, 0x678A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
	{0x1002, 0x6790, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
	{0x1002, 0x6791, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
	{0x1002, 0x6792, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
	{0x1002, 0x6798, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
	{0x1002, 0x6799, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
	{0x1002, 0x679A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
	{0x1002, 0x679B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
	{0x1002, 0x679E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
	{0x1002, 0x679F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
	{0x1002, 0x6800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|AMD_IS_MOBILITY},
	{0x1002, 0x6801, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|AMD_IS_MOBILITY},
	{0x1002, 0x6802, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|AMD_IS_MOBILITY},
	{0x1002, 0x6806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
	{0x1002, 0x6808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
	{0x1002, 0x6809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
	{0x1002, 0x6810, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
	{0x1002, 0x6811, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
	{0x1002, 0x6816, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
	{0x1002, 0x6817, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
	{0x1002, 0x6818, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
	{0x1002, 0x6819, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
	{0x1002, 0x6600, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
	{0x1002, 0x6601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
	{0x1002, 0x6602, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
	{0x1002, 0x6603, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
	{0x1002, 0x6604, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
	{0x1002, 0x6605, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
	{0x1002, 0x6606, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
	{0x1002, 0x6607, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
	{0x1002, 0x6608, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
	{0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
	{0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
	{0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
	{0x1002, 0x6617, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
	{0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
	{0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
	{0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
	{0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
	{0x1002, 0x6820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
	{0x1002, 0x6821, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
	{0x1002, 0x6822, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
	{0x1002, 0x6823, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
	{0x1002, 0x6824, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
	{0x1002, 0x6825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
	{0x1002, 0x6826, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
	{0x1002, 0x6827, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
	{0x1002, 0x6828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
	{0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
	{0x1002, 0x682A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
	{0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
	{0x1002, 0x682C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
	{0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
	{0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
	{0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
	{0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
	{0x1002, 0x6835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
	{0x1002, 0x6837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
	{0x1002, 0x6838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
	{0x1002, 0x6839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
	{0x1002, 0x683B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
	{0x1002, 0x683D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
	{0x1002, 0x683F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
	{0x1002, 0x6660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
	{0x1002, 0x6663, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
	{0x1002, 0x6664, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
	{0x1002, 0x6665, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
	{0x1002, 0x6667, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
	{0x1002, 0x666F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
#endif
A
Alex Deucher 已提交
969 970
#ifdef CONFIG_DRM_AMDGPU_CIK
	/* Kaveri */
971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992
	{0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
	{0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
	{0x1002, 0x1306, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
	{0x1002, 0x1307, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
	{0x1002, 0x1309, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
	{0x1002, 0x130A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
	{0x1002, 0x130B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
	{0x1002, 0x130C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
	{0x1002, 0x130D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
	{0x1002, 0x130E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
	{0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
	{0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
	{0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
	{0x1002, 0x1312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
	{0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
	{0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
	{0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
	{0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
	{0x1002, 0x1318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
	{0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
	{0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
	{0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
A
Alex Deucher 已提交
993
	/* Bonaire */
994 995 996 997
	{0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY},
	{0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY},
	{0x1002, 0x6646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY},
	{0x1002, 0x6647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY},
A
Alex Deucher 已提交
998 999 1000 1001 1002 1003
	{0x1002, 0x6649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
	{0x1002, 0x6650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
	{0x1002, 0x6651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
	{0x1002, 0x6658, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
	{0x1002, 0x665c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
	{0x1002, 0x665d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
1004
	{0x1002, 0x665f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
A
Alex Deucher 已提交
1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018
	/* Hawaii */
	{0x1002, 0x67A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
	{0x1002, 0x67A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
	{0x1002, 0x67A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
	{0x1002, 0x67A8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
	{0x1002, 0x67A9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
	{0x1002, 0x67AA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
	{0x1002, 0x67B0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
	{0x1002, 0x67B1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
	{0x1002, 0x67B8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
	{0x1002, 0x67B9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
	{0x1002, 0x67BA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
	{0x1002, 0x67BE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
	/* Kabini */
1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034
	{0x1002, 0x9830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
	{0x1002, 0x9831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
	{0x1002, 0x9832, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
	{0x1002, 0x9833, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
	{0x1002, 0x9834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
	{0x1002, 0x9835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
	{0x1002, 0x9836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
	{0x1002, 0x9837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
	{0x1002, 0x9838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
	{0x1002, 0x9839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
	{0x1002, 0x983a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
	{0x1002, 0x983b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
	{0x1002, 0x983c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
	{0x1002, 0x983d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
	{0x1002, 0x983e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
	{0x1002, 0x983f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
A
Alex Deucher 已提交
1035
	/* mullins */
1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051
	{0x1002, 0x9850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
	{0x1002, 0x9851, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
	{0x1002, 0x9852, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
	{0x1002, 0x9853, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
	{0x1002, 0x9854, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
	{0x1002, 0x9855, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
	{0x1002, 0x9856, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
	{0x1002, 0x9857, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
	{0x1002, 0x9858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
	{0x1002, 0x9859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
	{0x1002, 0x985A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
	{0x1002, 0x985B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
	{0x1002, 0x985C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
	{0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
	{0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
	{0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
A
Alex Deucher 已提交
1052
#endif
A
Alex Deucher 已提交
1053
	/* topaz */
1054 1055 1056 1057 1058
	{0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
	{0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
	{0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
	{0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
	{0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
A
Alex Deucher 已提交
1059 1060 1061 1062
	/* tonga */
	{0x1002, 0x6920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
	{0x1002, 0x6921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
	{0x1002, 0x6928, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
1063
	{0x1002, 0x6929, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
A
Alex Deucher 已提交
1064 1065
	{0x1002, 0x692B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
	{0x1002, 0x692F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
1066
	{0x1002, 0x6930, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
A
Alex Deucher 已提交
1067 1068
	{0x1002, 0x6938, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
	{0x1002, 0x6939, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
1069 1070
	/* fiji */
	{0x1002, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_FIJI},
F
Frank Min 已提交
1071
	{0x1002, 0x730F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_FIJI},
A
Alex Deucher 已提交
1072
	/* carrizo */
1073 1074 1075 1076 1077
	{0x1002, 0x9870, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
	{0x1002, 0x9874, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
	{0x1002, 0x9875, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
	{0x1002, 0x9876, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
	{0x1002, 0x9877, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
S
Samuel Li 已提交
1078 1079
	/* stoney */
	{0x1002, 0x98E4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_STONEY|AMD_IS_APU},
1080 1081
	/* Polaris11 */
	{0x1002, 0x67E0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
1082
	{0x1002, 0x67E3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
1083 1084
	{0x1002, 0x67E8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
	{0x1002, 0x67EB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
1085
	{0x1002, 0x67EF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
1086
	{0x1002, 0x67FF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
1087 1088 1089
	{0x1002, 0x67E1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
	{0x1002, 0x67E7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
	{0x1002, 0x67E9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
1090 1091
	/* Polaris10 */
	{0x1002, 0x67C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
F
Flora Cui 已提交
1092 1093 1094 1095
	{0x1002, 0x67C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
	{0x1002, 0x67C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
	{0x1002, 0x67C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
	{0x1002, 0x67C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
1096
	{0x1002, 0x67D0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
1097
	{0x1002, 0x67DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
F
Flora Cui 已提交
1098 1099 1100 1101 1102
	{0x1002, 0x67C8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
	{0x1002, 0x67C9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
	{0x1002, 0x67CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
	{0x1002, 0x67CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
	{0x1002, 0x67CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
1103
	{0x1002, 0x6FDF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
1104 1105 1106 1107 1108 1109
	/* Polaris12 */
	{0x1002, 0x6980, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
	{0x1002, 0x6981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
	{0x1002, 0x6985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
	{0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
	{0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
E
Evan Quan 已提交
1110
	{0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
J
Junshan Fang 已提交
1111
	{0x1002, 0x6997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
1112
	{0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
L
Leo Liu 已提交
1113 1114 1115
	/* VEGAM */
	{0x1002, 0x694C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
	{0x1002, 0x694E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
A
Alex Deucher 已提交
1116
	{0x1002, 0x694F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
1117
	/* Vega 10 */
1118 1119 1120 1121 1122 1123 1124
	{0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
	{0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
	{0x1002, 0x6862, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
	{0x1002, 0x6863, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
	{0x1002, 0x6864, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
	{0x1002, 0x6867, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
	{0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
1125 1126 1127
	{0x1002, 0x6869, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
	{0x1002, 0x686a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
	{0x1002, 0x686b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
1128
	{0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
1129 1130 1131
	{0x1002, 0x686d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
	{0x1002, 0x686e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
	{0x1002, 0x686f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
1132
	{0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
1133 1134 1135 1136 1137 1138
	/* Vega 12 */
	{0x1002, 0x69A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
	{0x1002, 0x69A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
	{0x1002, 0x69A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
	{0x1002, 0x69A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
	{0x1002, 0x69AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
F
Feifei Xu 已提交
1139
	/* Vega 20 */
1140 1141 1142 1143
	{0x1002, 0x66A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
	{0x1002, 0x66A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
	{0x1002, 0x66A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
	{0x1002, 0x66A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
1144
	{0x1002, 0x66A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
1145 1146
	{0x1002, 0x66A7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
	{0x1002, 0x66AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
1147
	/* Raven */
1148
	{0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU},
1149
	{0x1002, 0x15d8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU},
1150
	/* Arcturus */
1151 1152 1153 1154
	{0x1002, 0x738C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS},
	{0x1002, 0x7388, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS},
	{0x1002, 0x738E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS},
	{0x1002, 0x7390, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS},
A
Alex Deucher 已提交
1155 1156 1157 1158
	/* Navi10 */
	{0x1002, 0x7310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
	{0x1002, 0x7312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
	{0x1002, 0x7318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
T
tiancyin 已提交
1159
	{0x1002, 0x7319, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
A
Alex Deucher 已提交
1160
	{0x1002, 0x731A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
T
tiancyin 已提交
1161
	{0x1002, 0x731B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
1162
	{0x1002, 0x731E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
A
Alex Deucher 已提交
1163
	{0x1002, 0x731F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
A
Alex Deucher 已提交
1164
	/* Navi14 */
1165 1166 1167 1168
	{0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
	{0x1002, 0x7341, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
	{0x1002, 0x7347, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
	{0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
1169

H
Huang Rui 已提交
1170
	/* Renoir */
J
Jinzhou Su 已提交
1171
	{0x1002, 0x15E7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
1172
	{0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
1173
	{0x1002, 0x1638, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
1174
	{0x1002, 0x164C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
H
Huang Rui 已提交
1175

T
Tianci.Yin 已提交
1176
	/* Navi12 */
1177 1178
	{0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12},
	{0x1002, 0x7362, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12},
T
Tianci.Yin 已提交
1179

1180 1181
	/* Sienna_Cichlid */
	{0x1002, 0x73A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
1182
	{0x1002, 0x73A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
1183 1184 1185 1186
	{0x1002, 0x73A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
	{0x1002, 0x73A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
	{0x1002, 0x73AB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
	{0x1002, 0x73AE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
1187
	{0x1002, 0x73AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
1188
	{0x1002, 0x73BF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
T
Tianci.Yin 已提交
1189

H
Huang Rui 已提交
1190 1191 1192
	/* Van Gogh */
	{0x1002, 0x163F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VANGOGH|AMD_IS_APU},

1193 1194 1195 1196
	/* Yellow Carp */
	{0x1002, 0x164D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_YELLOW_CARP|AMD_IS_APU},
	{0x1002, 0x1681, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_YELLOW_CARP|AMD_IS_APU},

1197 1198 1199 1200 1201 1202
	/* Navy_Flounder */
	{0x1002, 0x73C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
	{0x1002, 0x73C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
	{0x1002, 0x73C3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
	{0x1002, 0x73DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},

1203 1204 1205 1206
	/* DIMGREY_CAVEFISH */
	{0x1002, 0x73E0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
	{0x1002, 0x73E1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
	{0x1002, 0x73E2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
1207
	{0x1002, 0x73E3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
1208 1209
	{0x1002, 0x73FF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},

F
Feifei Xu 已提交
1210
	/* Aldebaran */
1211 1212 1213
	{0x1002, 0x7408, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT},
	{0x1002, 0x740C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT},
	{0x1002, 0x740F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT},
1214
	{0x1002, 0x7410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT},
F
Feifei Xu 已提交
1215

1216 1217 1218 1219 1220 1221 1222
	/* BEIGE_GOBY */
	{0x1002, 0x7420, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
	{0x1002, 0x7421, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
	{0x1002, 0x7422, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
	{0x1002, 0x7423, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
	{0x1002, 0x743F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},

A
Alex Deucher 已提交
1223 1224 1225 1226 1227
	{0, 0, 0}
};

MODULE_DEVICE_TABLE(pci, pciidlist);

1228
static const struct drm_driver amdgpu_kms_driver;
A
Alex Deucher 已提交
1229 1230 1231 1232

static int amdgpu_pci_probe(struct pci_dev *pdev,
			    const struct pci_device_id *ent)
{
1233
	struct drm_device *ddev;
1234
	struct amdgpu_device *adev;
A
Alex Deucher 已提交
1235
	unsigned long flags = ent->driver_data;
1236
	int ret, retry = 0;
1237 1238 1239 1240 1241
	bool supports_atomic = false;

	if (!amdgpu_virtual_display &&
	    amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK))
		supports_atomic = true;
A
Alex Deucher 已提交
1242

1243
	if ((flags & AMD_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) {
A
Alex Deucher 已提交
1244 1245 1246 1247 1248
		DRM_INFO("This hardware requires experimental hardware support.\n"
			 "See modparam exp_hw_support\n");
		return -ENODEV;
	}

1249 1250 1251 1252 1253 1254 1255 1256 1257 1258
	/* Due to hardware bugs, S/G Display on raven requires a 1:1 IOMMU mapping,
	 * however, SME requires an indirect IOMMU mapping because the encryption
	 * bit is beyond the DMA mask of the chip.
	 */
	if (mem_encrypt_active() && ((flags & AMD_ASIC_MASK) == CHIP_RAVEN)) {
		dev_info(&pdev->dev,
			 "SME is not compatible with RAVEN\n");
		return -ENOTSUPP;
	}

1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293
#ifdef CONFIG_DRM_AMDGPU_SI
	if (!amdgpu_si_support) {
		switch (flags & AMD_ASIC_MASK) {
		case CHIP_TAHITI:
		case CHIP_PITCAIRN:
		case CHIP_VERDE:
		case CHIP_OLAND:
		case CHIP_HAINAN:
			dev_info(&pdev->dev,
				 "SI support provided by radeon.\n");
			dev_info(&pdev->dev,
				 "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
				);
			return -ENODEV;
		}
	}
#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
	if (!amdgpu_cik_support) {
		switch (flags & AMD_ASIC_MASK) {
		case CHIP_KAVERI:
		case CHIP_BONAIRE:
		case CHIP_HAWAII:
		case CHIP_KABINI:
		case CHIP_MULLINS:
			dev_info(&pdev->dev,
				 "CIK support provided by radeon.\n");
			dev_info(&pdev->dev,
				 "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
				);
			return -ENODEV;
		}
	}
#endif

A
Alex Deucher 已提交
1294
	/* Get rid of things like offb */
1295
	ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "amdgpudrmfb");
A
Alex Deucher 已提交
1296 1297 1298
	if (ret)
		return ret;

1299
	adev = devm_drm_dev_alloc(&pdev->dev, &amdgpu_kms_driver, typeof(*adev), ddev);
1300 1301
	if (IS_ERR(adev))
		return PTR_ERR(adev);
1302 1303 1304 1305

	adev->dev  = &pdev->dev;
	adev->pdev = pdev;
	ddev = adev_to_drm(adev);
1306

1307
	if (!supports_atomic)
1308
		ddev->driver_features &= ~DRIVER_ATOMIC;
1309

1310 1311
	ret = pci_enable_device(pdev);
	if (ret)
1312
		return ret;
1313

1314
	pci_set_drvdata(pdev, ddev);
1315

1316
	ret = amdgpu_driver_load_kms(adev, ent->driver_data);
1317 1318
	if (ret)
		goto err_pci;
1319

1320
retry_init:
1321
	ret = drm_dev_register(ddev, ent->driver_data);
1322 1323 1324 1325 1326
	if (ret == -EAGAIN && ++retry <= 3) {
		DRM_INFO("retry init %d\n", retry);
		/* Don't request EX mode too frequently which is attacking */
		msleep(5000);
		goto retry_init;
1327
	} else if (ret) {
1328
		goto err_pci;
1329
	}
1330

1331 1332 1333 1334
	ret = amdgpu_debugfs_init(adev);
	if (ret)
		DRM_ERROR("Creating debugfs files failed (%d).\n", ret);

1335 1336 1337 1338 1339
	return 0;

err_pci:
	pci_disable_device(pdev);
	return ret;
A
Alex Deucher 已提交
1340 1341 1342 1343 1344 1345 1346
}

static void
amdgpu_pci_remove(struct pci_dev *pdev)
{
	struct drm_device *dev = pci_get_drvdata(pdev);

1347
	drm_dev_unplug(dev);
1348
	amdgpu_driver_unload_kms(dev);
1349

1350 1351 1352 1353 1354
	/*
	 * Flush any in flight DMA operations from device.
	 * Clear the Bus Master Enable bit and then wait on the PCIe Device
	 * StatusTransactions Pending bit.
	 */
1355
	pci_disable_device(pdev);
1356
	pci_wait_for_pending_transaction(pdev);
A
Alex Deucher 已提交
1357 1358
}

1359 1360 1361
static void
amdgpu_pci_shutdown(struct pci_dev *pdev)
{
1362
	struct drm_device *dev = pci_get_drvdata(pdev);
1363
	struct amdgpu_device *adev = drm_to_adev(dev);
1364

1365 1366 1367
	if (amdgpu_ras_intr_triggered())
		return;

1368
	/* if we are running in a VM, make sure the device
1369 1370 1371
	 * torn down properly on reboot/shutdown.
	 * unfortunately we can't detect certain
	 * hypervisors so just do this all the time.
1372
	 */
1373 1374
	if (!amdgpu_passthrough(adev))
		adev->mp1_state = PP_MP1_STATE_UNLOAD;
1375
	amdgpu_device_ip_suspend(adev);
1376
	adev->mp1_state = PP_MP1_STATE_NONE;
1377 1378
}

1379 1380 1381 1382 1383 1384 1385 1386 1387 1388
/**
 * amdgpu_drv_delayed_reset_work_handler - work handler for reset
 *
 * @work: work_struct.
 */
static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work)
{
	struct list_head device_list;
	struct amdgpu_device *adev;
	int i, r;
1389 1390 1391
	struct amdgpu_reset_context reset_context;

	memset(&reset_context, 0, sizeof(reset_context));
1392 1393 1394 1395 1396 1397 1398 1399 1400

	mutex_lock(&mgpu_info.mutex);
	if (mgpu_info.pending_reset == true) {
		mutex_unlock(&mgpu_info.mutex);
		return;
	}
	mgpu_info.pending_reset = true;
	mutex_unlock(&mgpu_info.mutex);

1401 1402 1403 1404
	/* Use a common context, just need to make sure full reset is done */
	reset_context.method = AMD_RESET_METHOD_NONE;
	set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);

1405 1406
	for (i = 0; i < mgpu_info.num_dgpu; i++) {
		adev = mgpu_info.gpu_ins[i].adev;
1407 1408
		reset_context.reset_req_dev = adev;
		r = amdgpu_device_pre_asic_reset(adev, &reset_context);
1409 1410 1411 1412 1413 1414 1415 1416 1417 1418
		if (r) {
			dev_err(adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
				r, adev_to_drm(adev)->unique);
		}
		if (!queue_work(system_unbound_wq, &adev->xgmi_reset_work))
			r = -EALREADY;
	}
	for (i = 0; i < mgpu_info.num_dgpu; i++) {
		adev = mgpu_info.gpu_ins[i].adev;
		flush_work(&adev->xgmi_reset_work);
1419
		adev->gmc.xgmi.pending_reset = false;
1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434
	}

	/* reset function will rebuild the xgmi hive info , clear it now */
	for (i = 0; i < mgpu_info.num_dgpu; i++)
		amdgpu_xgmi_remove_device(mgpu_info.gpu_ins[i].adev);

	INIT_LIST_HEAD(&device_list);

	for (i = 0; i < mgpu_info.num_dgpu; i++)
		list_add_tail(&mgpu_info.gpu_ins[i].adev->reset_list, &device_list);

	/* unregister the GPU first, reset function will add them back */
	list_for_each_entry(adev, &device_list, reset_list)
		amdgpu_unregister_gpu_instance(adev);

1435 1436 1437 1438
	/* Use a common context, just need to make sure full reset is done */
	set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
	r = amdgpu_do_asic_reset(&device_list, &reset_context);

1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451
	if (r) {
		DRM_ERROR("reinit gpus failure");
		return;
	}
	for (i = 0; i < mgpu_info.num_dgpu; i++) {
		adev = mgpu_info.gpu_ins[i].adev;
		if (!adev->kfd.init_complete)
			amdgpu_amdkfd_device_init(adev);
		amdgpu_ttm_set_buffer_funcs_status(adev, true);
	}
	return;
}

1452 1453 1454 1455 1456 1457 1458
static int amdgpu_pmops_prepare(struct device *dev)
{
	struct drm_device *drm_dev = dev_get_drvdata(dev);

	/* Return a positive number here so
	 * DPM_FLAG_SMART_SUSPEND works properly
	 */
1459
	if (amdgpu_device_supports_boco(drm_dev))
1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470
		return pm_runtime_suspended(dev) &&
			pm_suspend_via_firmware();

	return 0;
}

static void amdgpu_pmops_complete(struct device *dev)
{
	/* nothing to do */
}

A
Alex Deucher 已提交
1471 1472
static int amdgpu_pmops_suspend(struct device *dev)
{
1473
	struct drm_device *drm_dev = dev_get_drvdata(dev);
1474 1475
	struct amdgpu_device *adev = drm_to_adev(drm_dev);
	int r;
J
jimqu 已提交
1476

1477 1478 1479 1480 1481 1482 1483
	if (amdgpu_acpi_is_s0ix_supported(adev))
		adev->in_s0ix = true;
	adev->in_s3 = true;
	r = amdgpu_device_suspend(drm_dev, true);
	adev->in_s3 = false;

	return r;
A
Alex Deucher 已提交
1484 1485 1486 1487
}

static int amdgpu_pmops_resume(struct device *dev)
{
1488
	struct drm_device *drm_dev = dev_get_drvdata(dev);
1489 1490
	struct amdgpu_device *adev = drm_to_adev(drm_dev);
	int r;
1491

1492 1493 1494 1495
	r = amdgpu_device_resume(drm_dev, true);
	if (amdgpu_acpi_is_s0ix_supported(adev))
		adev->in_s0ix = false;
	return r;
A
Alex Deucher 已提交
1496 1497 1498 1499
}

static int amdgpu_pmops_freeze(struct device *dev)
{
1500
	struct drm_device *drm_dev = dev_get_drvdata(dev);
1501
	struct amdgpu_device *adev = drm_to_adev(drm_dev);
1502
	int r;
J
jimqu 已提交
1503

1504
	adev->in_s4 = true;
1505
	r = amdgpu_device_suspend(drm_dev, true);
1506
	adev->in_s4 = false;
1507 1508 1509
	if (r)
		return r;
	return amdgpu_asic_reset(adev);
A
Alex Deucher 已提交
1510 1511 1512 1513
}

static int amdgpu_pmops_thaw(struct device *dev)
{
1514
	struct drm_device *drm_dev = dev_get_drvdata(dev);
J
jimqu 已提交
1515

1516
	return amdgpu_device_resume(drm_dev, true);
J
jimqu 已提交
1517 1518 1519 1520
}

static int amdgpu_pmops_poweroff(struct device *dev)
{
1521
	struct drm_device *drm_dev = dev_get_drvdata(dev);
J
jimqu 已提交
1522

1523
	return amdgpu_device_suspend(drm_dev, true);
J
jimqu 已提交
1524 1525 1526 1527
}

static int amdgpu_pmops_restore(struct device *dev)
{
1528
	struct drm_device *drm_dev = dev_get_drvdata(dev);
J
jimqu 已提交
1529

1530
	return amdgpu_device_resume(drm_dev, true);
A
Alex Deucher 已提交
1531 1532 1533 1534 1535 1536
}

static int amdgpu_pmops_runtime_suspend(struct device *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev);
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
1537
	struct amdgpu_device *adev = drm_to_adev(drm_dev);
1538
	int ret, i;
A
Alex Deucher 已提交
1539

1540
	if (!adev->runpm) {
A
Alex Deucher 已提交
1541 1542 1543 1544
		pm_runtime_forbid(dev);
		return -EBUSY;
	}

1545 1546 1547 1548 1549 1550 1551 1552 1553 1554
	/* wait for all rings to drain before suspending */
	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
		struct amdgpu_ring *ring = adev->rings[i];
		if (ring && ring->sched.ready) {
			ret = amdgpu_fence_wait_empty(ring);
			if (ret)
				return -EBUSY;
		}
	}

1555
	adev->in_runpm = true;
1556
	if (amdgpu_device_supports_px(drm_dev))
1557
		drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
A
Alex Deucher 已提交
1558

1559
	ret = amdgpu_device_suspend(drm_dev, false);
1560 1561
	if (ret) {
		adev->in_runpm = false;
1562
		return ret;
1563
	}
1564

1565
	if (amdgpu_device_supports_px(drm_dev)) {
1566 1567 1568
		/* Only need to handle PCI state in the driver for ATPX
		 * PCI core handles it for _PR3.
		 */
1569 1570 1571 1572
		amdgpu_device_cache_pci_state(pdev);
		pci_disable_device(pdev);
		pci_ignore_hotplug(pdev);
		pci_set_power_state(pdev, PCI_D3cold);
1573
		drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
1574 1575
	} else if (amdgpu_device_supports_baco(drm_dev)) {
		amdgpu_device_baco_enter(drm_dev);
1576
	}
A
Alex Deucher 已提交
1577 1578 1579 1580 1581 1582 1583 1584

	return 0;
}

static int amdgpu_pmops_runtime_resume(struct device *dev)
{
	struct pci_dev *pdev = to_pci_dev(dev);
	struct drm_device *drm_dev = pci_get_drvdata(pdev);
1585
	struct amdgpu_device *adev = drm_to_adev(drm_dev);
A
Alex Deucher 已提交
1586 1587
	int ret;

1588
	if (!adev->runpm)
A
Alex Deucher 已提交
1589 1590
		return -EINVAL;

1591 1592 1593 1594
	/* Avoids registers access if device is physically gone */
	if (!pci_device_is_present(adev->pdev))
		adev->no_hw_access = true;

1595
	if (amdgpu_device_supports_px(drm_dev)) {
1596 1597
		drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;

1598 1599 1600
		/* Only need to handle PCI state in the driver for ATPX
		 * PCI core handles it for _PR3.
		 */
1601 1602 1603 1604 1605
		pci_set_power_state(pdev, PCI_D0);
		amdgpu_device_load_pci_state(pdev);
		ret = pci_enable_device(pdev);
		if (ret)
			return ret;
1606
		pci_set_master(pdev);
1607 1608 1609 1610 1611
	} else if (amdgpu_device_supports_boco(drm_dev)) {
		/* Only need to handle PCI state in the driver for ATPX
		 * PCI core handles it for _PR3.
		 */
		pci_set_master(pdev);
1612 1613
	} else if (amdgpu_device_supports_baco(drm_dev)) {
		amdgpu_device_baco_exit(drm_dev);
1614
	}
1615
	ret = amdgpu_device_resume(drm_dev, false);
1616 1617 1618
	if (ret)
		return ret;

1619
	if (amdgpu_device_supports_px(drm_dev))
1620
		drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
1621
	adev->in_runpm = false;
A
Alex Deucher 已提交
1622 1623 1624 1625 1626
	return 0;
}

static int amdgpu_pmops_runtime_idle(struct device *dev)
{
1627
	struct drm_device *drm_dev = dev_get_drvdata(dev);
1628
	struct amdgpu_device *adev = drm_to_adev(drm_dev);
1629 1630
	/* we don't want the main rpm_idle to call suspend - we want to autosuspend */
	int ret = 1;
A
Alex Deucher 已提交
1631

1632
	if (!adev->runpm) {
A
Alex Deucher 已提交
1633 1634 1635 1636
		pm_runtime_forbid(dev);
		return -EBUSY;
	}

1637 1638 1639 1640
	if (amdgpu_device_has_dc_support(adev)) {
		struct drm_crtc *crtc;

		drm_for_each_crtc(crtc, drm_dev) {
1641 1642
			drm_modeset_lock(&crtc->mutex, NULL);
			if (crtc->state->active)
1643
				ret = -EBUSY;
1644 1645
			drm_modeset_unlock(&crtc->mutex);
			if (ret < 0)
1646
				break;
A
Alex Deucher 已提交
1647
		}
1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667

	} else {
		struct drm_connector *list_connector;
		struct drm_connector_list_iter iter;

		mutex_lock(&drm_dev->mode_config.mutex);
		drm_modeset_lock(&drm_dev->mode_config.connection_mutex, NULL);

		drm_connector_list_iter_begin(drm_dev, &iter);
		drm_for_each_connector_iter(list_connector, &iter) {
			if (list_connector->dpms ==  DRM_MODE_DPMS_ON) {
				ret = -EBUSY;
				break;
			}
		}

		drm_connector_list_iter_end(&iter);

		drm_modeset_unlock(&drm_dev->mode_config.connection_mutex);
		mutex_unlock(&drm_dev->mode_config.mutex);
A
Alex Deucher 已提交
1668 1669
	}

1670 1671 1672
	if (ret == -EBUSY)
		DRM_DEBUG_DRIVER("failing to power off - crtc active\n");

A
Alex Deucher 已提交
1673 1674
	pm_runtime_mark_last_busy(dev);
	pm_runtime_autosuspend(dev);
1675
	return ret;
A
Alex Deucher 已提交
1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686
}

long amdgpu_drm_ioctl(struct file *filp,
		      unsigned int cmd, unsigned long arg)
{
	struct drm_file *file_priv = filp->private_data;
	struct drm_device *dev;
	long ret;
	dev = file_priv->minor->dev;
	ret = pm_runtime_get_sync(dev->dev);
	if (ret < 0)
1687
		goto out;
A
Alex Deucher 已提交
1688 1689 1690 1691

	ret = drm_ioctl(filp, cmd, arg);

	pm_runtime_mark_last_busy(dev->dev);
1692
out:
A
Alex Deucher 已提交
1693 1694 1695 1696 1697
	pm_runtime_put_autosuspend(dev->dev);
	return ret;
}

static const struct dev_pm_ops amdgpu_pm_ops = {
1698 1699
	.prepare = amdgpu_pmops_prepare,
	.complete = amdgpu_pmops_complete,
A
Alex Deucher 已提交
1700 1701 1702 1703
	.suspend = amdgpu_pmops_suspend,
	.resume = amdgpu_pmops_resume,
	.freeze = amdgpu_pmops_freeze,
	.thaw = amdgpu_pmops_thaw,
J
jimqu 已提交
1704 1705
	.poweroff = amdgpu_pmops_poweroff,
	.restore = amdgpu_pmops_restore,
A
Alex Deucher 已提交
1706 1707 1708 1709 1710
	.runtime_suspend = amdgpu_pmops_runtime_suspend,
	.runtime_resume = amdgpu_pmops_runtime_resume,
	.runtime_idle = amdgpu_pmops_runtime_idle,
};

1711 1712 1713 1714
static int amdgpu_flush(struct file *f, fl_owner_t id)
{
	struct drm_file *file_priv = f->private_data;
	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
1715
	long timeout = MAX_WAIT_SCHED_ENTITY_Q_EMPTY;
1716

1717 1718
	timeout = amdgpu_ctx_mgr_entity_flush(&fpriv->ctx_mgr, timeout);
	timeout = amdgpu_vm_wait_idle(&fpriv->vm, timeout);
1719

1720
	return timeout >= 0 ? 0 : timeout;
1721 1722
}

A
Alex Deucher 已提交
1723 1724 1725
static const struct file_operations amdgpu_driver_kms_fops = {
	.owner = THIS_MODULE,
	.open = drm_open,
1726
	.flush = amdgpu_flush,
A
Alex Deucher 已提交
1727 1728
	.release = drm_release,
	.unlocked_ioctl = amdgpu_drm_ioctl,
1729
	.mmap = drm_gem_mmap,
A
Alex Deucher 已提交
1730 1731 1732 1733 1734
	.poll = drm_poll,
	.read = drm_read,
#ifdef CONFIG_COMPAT
	.compat_ioctl = amdgpu_kms_compat_ioctl,
#endif
1735 1736 1737
#ifdef CONFIG_PROC_FS
	.show_fdinfo = amdgpu_show_fdinfo
#endif
A
Alex Deucher 已提交
1738 1739
};

1740 1741
int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv)
{
1742
	struct drm_file *file;
1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755

	if (!filp)
		return -EINVAL;

	if (filp->f_op != &amdgpu_driver_kms_fops) {
		return -EINVAL;
	}

	file = filp->private_data;
	*fpriv = file->driver_priv;
	return 0;
}

1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776
const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(AMDGPU_SCHED, amdgpu_sched_ioctl, DRM_MASTER),
	DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
	/* KMS */
	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
};

static const struct drm_driver amdgpu_kms_driver = {
A
Alex Deucher 已提交
1777
	.driver_features =
D
Daniel Vetter 已提交
1778
	    DRIVER_ATOMIC |
D
Daniel Vetter 已提交
1779
	    DRIVER_GEM |
1780 1781
	    DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ |
	    DRIVER_SYNCOBJ_TIMELINE,
A
Alex Deucher 已提交
1782 1783 1784 1785 1786
	.open = amdgpu_driver_open_kms,
	.postclose = amdgpu_driver_postclose_kms,
	.lastclose = amdgpu_driver_lastclose_kms,
	.irq_handler = amdgpu_irq_handler,
	.ioctls = amdgpu_ioctls_kms,
1787
	.num_ioctls = ARRAY_SIZE(amdgpu_ioctls_kms),
A
Alex Deucher 已提交
1788 1789 1790
	.dumb_create = amdgpu_mode_dumb_create,
	.dumb_map_offset = amdgpu_mode_dumb_mmap,
	.fops = &amdgpu_driver_kms_fops,
1791
	.release = &amdgpu_driver_release_kms,
A
Alex Deucher 已提交
1792 1793 1794

	.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
	.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1795
	.gem_prime_import = amdgpu_gem_prime_import,
1796
	.gem_prime_mmap = drm_gem_prime_mmap,
A
Alex Deucher 已提交
1797 1798 1799 1800 1801 1802 1803 1804 1805

	.name = DRIVER_NAME,
	.desc = DRIVER_DESC,
	.date = DRIVER_DATE,
	.major = KMS_DRIVER_MAJOR,
	.minor = KMS_DRIVER_MINOR,
	.patchlevel = KMS_DRIVER_PATCHLEVEL,
};

1806 1807 1808 1809 1810 1811 1812
static struct pci_error_handlers amdgpu_pci_err_handler = {
	.error_detected	= amdgpu_pci_error_detected,
	.mmio_enabled	= amdgpu_pci_mmio_enabled,
	.slot_reset	= amdgpu_pci_slot_reset,
	.resume		= amdgpu_pci_resume,
};

1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824
extern const struct attribute_group amdgpu_vram_mgr_attr_group;
extern const struct attribute_group amdgpu_gtt_mgr_attr_group;
extern const struct attribute_group amdgpu_vbios_version_attr_group;

static const struct attribute_group *amdgpu_sysfs_groups[] = {
	&amdgpu_vram_mgr_attr_group,
	&amdgpu_gtt_mgr_attr_group,
	&amdgpu_vbios_version_attr_group,
	NULL,
};


A
Alex Deucher 已提交
1825 1826 1827 1828 1829
static struct pci_driver amdgpu_kms_pci_driver = {
	.name = DRIVER_NAME,
	.id_table = pciidlist,
	.probe = amdgpu_pci_probe,
	.remove = amdgpu_pci_remove,
1830
	.shutdown = amdgpu_pci_shutdown,
A
Alex Deucher 已提交
1831
	.driver.pm = &amdgpu_pm_ops,
1832
	.err_handler = &amdgpu_pci_err_handler,
1833
	.dev_groups = amdgpu_sysfs_groups,
A
Alex Deucher 已提交
1834 1835 1836 1837
};

static int __init amdgpu_init(void)
{
1838 1839
	int r;

1840 1841 1842 1843 1844
	if (vgacon_text_force()) {
		DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
		return -EINVAL;
	}

1845 1846 1847 1848 1849 1850 1851 1852
	r = amdgpu_sync_init();
	if (r)
		goto error_sync;

	r = amdgpu_fence_slab_init();
	if (r)
		goto error_fence;

A
Alex Deucher 已提交
1853 1854
	DRM_INFO("amdgpu kernel modesetting enabled.\n");
	amdgpu_register_atpx_handler();
1855
	amdgpu_acpi_detect();
1856 1857 1858 1859

	/* Ignore KFD init failures. Normal when CONFIG_HSA_AMD is not set. */
	amdgpu_amdkfd_init();

A
Alex Deucher 已提交
1860
	/* let modprobe override vga console setting */
R
Rex Zhu 已提交
1861
	return pci_register_driver(&amdgpu_kms_pci_driver);
1862 1863 1864 1865 1866 1867

error_fence:
	amdgpu_sync_fini();

error_sync:
	return r;
A
Alex Deucher 已提交
1868 1869 1870 1871
}

static void __exit amdgpu_exit(void)
{
1872
	amdgpu_amdkfd_fini();
R
Rex Zhu 已提交
1873
	pci_unregister_driver(&amdgpu_kms_pci_driver);
A
Alex Deucher 已提交
1874
	amdgpu_unregister_atpx_handler();
1875
	amdgpu_sync_fini();
1876
	amdgpu_fence_slab_fini();
1877
	mmu_notifier_synchronize();
A
Alex Deucher 已提交
1878 1879 1880 1881 1882 1883 1884 1885
}

module_init(amdgpu_init);
module_exit(amdgpu_exit);

MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");