提交 fd3e14ff 编写于 作者: D Dave Airlie

Merge branch 'drm-next-4.5' of git://people.freedesktop.org/~agd5f/linux into drm-next

[airlied: fixup build problems on arm - added errno.h include]
* 'drm-next-4.5' of git://people.freedesktop.org/~agd5f/linux: (152 commits)
  amd/powerplay: fix copy paste typo in hardwaremanager.c
  amd/powerplay: disable powerplay by default initially
  amd/powerplay: don't enable ucode fan control if vbios has no fan table
  drm/amd/powerplay: show gpu load when print gpu performance for Cz. (v2)
  drm/amd/powerplay: check whether need to enable thermal control. (v2)
  drm/amd/powerplay: add point check to avoid NULL point hang.
  drm/amdgpu/powerplay: Program a calculated value as Deep Sleep clock.
  drm/amd/powerplay: Don't return an error if fan table is missing
  drm/powerplay/hwmgr: log errors in tonga_hwmgr_backend_init
  drm/powerplay: add debugging output to processpptables.c
  drm/powerplay: add debugging output to tonga_processpptables.c
  amd/powerplay: Add structures required to report configuration change
  amd/powerplay: Fix get dal power level
  amd\powerplay Implement get dal power level
  drm/amd/powerplay: display gpu load when print performance for tonga.
  drm/amdgpu/powerplay: enable sysfs and debugfs interfaces late
  drm/amd/powerplay: move shared function of vi to hwmgr. (v2)
  drm/amd/powerplay: check whether enable dpm in powerplay.
  drm/amd/powerplay: fix bug that dpm funcs in debugfs/sysfs missing.
  drm/amd/powerplay: fix boolreturn.cocci warnings
  ...
...@@ -160,6 +160,7 @@ config DRM_AMDGPU ...@@ -160,6 +160,7 @@ config DRM_AMDGPU
If M is selected, the module will be called amdgpu. If M is selected, the module will be called amdgpu.
source "drivers/gpu/drm/amd/amdgpu/Kconfig" source "drivers/gpu/drm/amd/amdgpu/Kconfig"
source "drivers/gpu/drm/amd/powerplay/Kconfig"
source "drivers/gpu/drm/nouveau/Kconfig" source "drivers/gpu/drm/nouveau/Kconfig"
......
...@@ -2,10 +2,13 @@ ...@@ -2,10 +2,13 @@
# Makefile for the drm device driver. This driver provides support for the # Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/amd/include/asic_reg \ FULL_AMD_PATH=$(src)/..
-Idrivers/gpu/drm/amd/include \
-Idrivers/gpu/drm/amd/amdgpu \ ccflags-y := -Iinclude/drm -I$(FULL_AMD_PATH)/include/asic_reg \
-Idrivers/gpu/drm/amd/scheduler -I$(FULL_AMD_PATH)/include \
-I$(FULL_AMD_PATH)/amdgpu \
-I$(FULL_AMD_PATH)/scheduler \
-I$(FULL_AMD_PATH)/powerplay/inc
amdgpu-y := amdgpu_drv.o amdgpu-y := amdgpu_drv.o
...@@ -44,6 +47,7 @@ amdgpu-y += \ ...@@ -44,6 +47,7 @@ amdgpu-y += \
# add SMC block # add SMC block
amdgpu-y += \ amdgpu-y += \
amdgpu_dpm.o \ amdgpu_dpm.o \
amdgpu_powerplay.o \
cz_smc.o cz_dpm.o \ cz_smc.o cz_dpm.o \
tonga_smc.o tonga_dpm.o \ tonga_smc.o tonga_dpm.o \
fiji_smc.o fiji_dpm.o \ fiji_smc.o fiji_dpm.o \
...@@ -94,6 +98,14 @@ amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o ...@@ -94,6 +98,14 @@ amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o
amdgpu-$(CONFIG_ACPI) += amdgpu_acpi.o amdgpu-$(CONFIG_ACPI) += amdgpu_acpi.o
amdgpu-$(CONFIG_MMU_NOTIFIER) += amdgpu_mn.o amdgpu-$(CONFIG_MMU_NOTIFIER) += amdgpu_mn.o
ifneq ($(CONFIG_DRM_AMD_POWERPLAY),)
include $(FULL_AMD_PATH)/powerplay/Makefile
amdgpu-y += $(AMD_POWERPLAY_FILES)
endif
obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o
CFLAGS_amdgpu_trace_points.o := -I$(src) CFLAGS_amdgpu_trace_points.o := -I$(src)
...@@ -52,6 +52,7 @@ ...@@ -52,6 +52,7 @@
#include "amdgpu_irq.h" #include "amdgpu_irq.h"
#include "amdgpu_ucode.h" #include "amdgpu_ucode.h"
#include "amdgpu_gds.h" #include "amdgpu_gds.h"
#include "amd_powerplay.h"
#include "gpu_scheduler.h" #include "gpu_scheduler.h"
...@@ -85,6 +86,7 @@ extern int amdgpu_enable_scheduler; ...@@ -85,6 +86,7 @@ extern int amdgpu_enable_scheduler;
extern int amdgpu_sched_jobs; extern int amdgpu_sched_jobs;
extern int amdgpu_sched_hw_submission; extern int amdgpu_sched_hw_submission;
extern int amdgpu_enable_semaphores; extern int amdgpu_enable_semaphores;
extern int amdgpu_powerplay;
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
...@@ -918,8 +920,8 @@ struct amdgpu_ring { ...@@ -918,8 +920,8 @@ struct amdgpu_ring {
#define AMDGPU_VM_FAULT_STOP_ALWAYS 2 #define AMDGPU_VM_FAULT_STOP_ALWAYS 2
struct amdgpu_vm_pt { struct amdgpu_vm_pt {
struct amdgpu_bo *bo; struct amdgpu_bo_list_entry entry;
uint64_t addr; uint64_t addr;
}; };
struct amdgpu_vm_id { struct amdgpu_vm_id {
...@@ -981,9 +983,10 @@ struct amdgpu_vm_manager { ...@@ -981,9 +983,10 @@ struct amdgpu_vm_manager {
void amdgpu_vm_manager_fini(struct amdgpu_device *adev); void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm); int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev, void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
struct amdgpu_vm *vm, struct list_head *validated,
struct list_head *head); struct amdgpu_bo_list_entry *entry);
void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates);
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_sync *sync); struct amdgpu_sync *sync);
void amdgpu_vm_flush(struct amdgpu_ring *ring, void amdgpu_vm_flush(struct amdgpu_ring *ring,
...@@ -1024,11 +1027,9 @@ int amdgpu_vm_free_job(struct amdgpu_job *job); ...@@ -1024,11 +1027,9 @@ int amdgpu_vm_free_job(struct amdgpu_job *job);
* context related structures * context related structures
*/ */
#define AMDGPU_CTX_MAX_CS_PENDING 16
struct amdgpu_ctx_ring { struct amdgpu_ctx_ring {
uint64_t sequence; uint64_t sequence;
struct fence *fences[AMDGPU_CTX_MAX_CS_PENDING]; struct fence **fences;
struct amd_sched_entity entity; struct amd_sched_entity entity;
}; };
...@@ -1037,6 +1038,7 @@ struct amdgpu_ctx { ...@@ -1037,6 +1038,7 @@ struct amdgpu_ctx {
struct amdgpu_device *adev; struct amdgpu_device *adev;
unsigned reset_counter; unsigned reset_counter;
spinlock_t ring_lock; spinlock_t ring_lock;
struct fence **fences;
struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS]; struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
}; };
...@@ -1047,7 +1049,7 @@ struct amdgpu_ctx_mgr { ...@@ -1047,7 +1049,7 @@ struct amdgpu_ctx_mgr {
struct idr ctx_handles; struct idr ctx_handles;
}; };
int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel, int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri,
struct amdgpu_ctx *ctx); struct amdgpu_ctx *ctx);
void amdgpu_ctx_fini(struct amdgpu_ctx *ctx); void amdgpu_ctx_fini(struct amdgpu_ctx *ctx);
...@@ -1254,7 +1256,7 @@ struct amdgpu_cs_parser { ...@@ -1254,7 +1256,7 @@ struct amdgpu_cs_parser {
unsigned nchunks; unsigned nchunks;
struct amdgpu_cs_chunk *chunks; struct amdgpu_cs_chunk *chunks;
/* relocations */ /* relocations */
struct amdgpu_bo_list_entry *vm_bos; struct amdgpu_bo_list_entry vm_pd;
struct list_head validated; struct list_head validated;
struct fence *fence; struct fence *fence;
...@@ -1300,31 +1302,7 @@ struct amdgpu_wb { ...@@ -1300,31 +1302,7 @@ struct amdgpu_wb {
int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb); int amdgpu_wb_get(struct amdgpu_device *adev, u32 *wb);
void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb); void amdgpu_wb_free(struct amdgpu_device *adev, u32 wb);
/**
* struct amdgpu_pm - power management datas
* It keeps track of various data needed to take powermanagement decision.
*/
enum amdgpu_pm_state_type {
/* not used for dpm */
POWER_STATE_TYPE_DEFAULT,
POWER_STATE_TYPE_POWERSAVE,
/* user selectable states */
POWER_STATE_TYPE_BATTERY,
POWER_STATE_TYPE_BALANCED,
POWER_STATE_TYPE_PERFORMANCE,
/* internal states */
POWER_STATE_TYPE_INTERNAL_UVD,
POWER_STATE_TYPE_INTERNAL_UVD_SD,
POWER_STATE_TYPE_INTERNAL_UVD_HD,
POWER_STATE_TYPE_INTERNAL_UVD_HD2,
POWER_STATE_TYPE_INTERNAL_UVD_MVC,
POWER_STATE_TYPE_INTERNAL_BOOT,
POWER_STATE_TYPE_INTERNAL_THERMAL,
POWER_STATE_TYPE_INTERNAL_ACPI,
POWER_STATE_TYPE_INTERNAL_ULV,
POWER_STATE_TYPE_INTERNAL_3DPERF,
};
enum amdgpu_int_thermal_type { enum amdgpu_int_thermal_type {
THERMAL_TYPE_NONE, THERMAL_TYPE_NONE,
...@@ -1606,8 +1584,8 @@ struct amdgpu_dpm { ...@@ -1606,8 +1584,8 @@ struct amdgpu_dpm {
/* vce requirements */ /* vce requirements */
struct amdgpu_vce_state vce_states[AMDGPU_MAX_VCE_LEVELS]; struct amdgpu_vce_state vce_states[AMDGPU_MAX_VCE_LEVELS];
enum amdgpu_vce_level vce_level; enum amdgpu_vce_level vce_level;
enum amdgpu_pm_state_type state; enum amd_pm_state_type state;
enum amdgpu_pm_state_type user_state; enum amd_pm_state_type user_state;
u32 platform_caps; u32 platform_caps;
u32 voltage_response_time; u32 voltage_response_time;
u32 backbias_response_time; u32 backbias_response_time;
...@@ -1660,8 +1638,13 @@ struct amdgpu_pm { ...@@ -1660,8 +1638,13 @@ struct amdgpu_pm {
const struct firmware *fw; /* SMC firmware */ const struct firmware *fw; /* SMC firmware */
uint32_t fw_version; uint32_t fw_version;
const struct amdgpu_dpm_funcs *funcs; const struct amdgpu_dpm_funcs *funcs;
uint32_t pcie_gen_mask;
uint32_t pcie_mlw_mask;
struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */
}; };
void amdgpu_get_pcie_info(struct amdgpu_device *adev);
/* /*
* UVD * UVD
*/ */
...@@ -1829,6 +1812,8 @@ struct amdgpu_cu_info { ...@@ -1829,6 +1812,8 @@ struct amdgpu_cu_info {
*/ */
struct amdgpu_asic_funcs { struct amdgpu_asic_funcs {
bool (*read_disabled_bios)(struct amdgpu_device *adev); bool (*read_disabled_bios)(struct amdgpu_device *adev);
bool (*read_bios_from_rom)(struct amdgpu_device *adev,
u8 *bios, u32 length_bytes);
int (*read_register)(struct amdgpu_device *adev, u32 se_num, int (*read_register)(struct amdgpu_device *adev, u32 se_num,
u32 sh_num, u32 reg_offset, u32 *value); u32 sh_num, u32 reg_offset, u32 *value);
void (*set_vga_state)(struct amdgpu_device *adev, bool state); void (*set_vga_state)(struct amdgpu_device *adev, bool state);
...@@ -2059,6 +2044,10 @@ struct amdgpu_device { ...@@ -2059,6 +2044,10 @@ struct amdgpu_device {
/* interrupts */ /* interrupts */
struct amdgpu_irq irq; struct amdgpu_irq irq;
/* powerplay */
struct amd_powerplay powerplay;
bool pp_enabled;
/* dpm */ /* dpm */
struct amdgpu_pm pm; struct amdgpu_pm pm;
u32 cg_flags; u32 cg_flags;
...@@ -2235,6 +2224,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) ...@@ -2235,6 +2224,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec)) #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev)) #define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
#define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev)) #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v))) #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
#define amdgpu_asic_get_cu_info(adev, info) (adev)->asic_funcs->get_cu_info((adev), (info)) #define amdgpu_asic_get_cu_info(adev, info) (adev)->asic_funcs->get_cu_info((adev), (info))
#define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid)) #define amdgpu_gart_flush_gpu_tlb(adev, vmid) (adev)->gart.gart_funcs->flush_gpu_tlb((adev), (vmid))
...@@ -2276,24 +2266,78 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) ...@@ -2276,24 +2266,78 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s)) #define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s))
#define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b)) #define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b))
#define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b)) #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
#define amdgpu_dpm_get_temperature(adev) (adev)->pm.funcs->get_temperature((adev))
#define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev)) #define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev))
#define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev)) #define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev))
#define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev)) #define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev))
#define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev)) #define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev))
#define amdgpu_dpm_get_sclk(adev, l) (adev)->pm.funcs->get_sclk((adev), (l))
#define amdgpu_dpm_get_mclk(adev, l) (adev)->pm.funcs->get_mclk((adev), (l))
#define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps)) #define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps))
#define amdgpu_dpm_debugfs_print_current_performance_level(adev, m) (adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m))
#define amdgpu_dpm_force_performance_level(adev, l) (adev)->pm.funcs->force_performance_level((adev), (l))
#define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev)) #define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
#define amdgpu_dpm_powergate_uvd(adev, g) (adev)->pm.funcs->powergate_uvd((adev), (g))
#define amdgpu_dpm_powergate_vce(adev, g) (adev)->pm.funcs->powergate_vce((adev), (g))
#define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e)) #define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
#define amdgpu_dpm_set_fan_control_mode(adev, m) (adev)->pm.funcs->set_fan_control_mode((adev), (m))
#define amdgpu_dpm_get_fan_control_mode(adev) (adev)->pm.funcs->get_fan_control_mode((adev)) #define amdgpu_dpm_get_temperature(adev) \
#define amdgpu_dpm_set_fan_speed_percent(adev, s) (adev)->pm.funcs->set_fan_speed_percent((adev), (s)) (adev)->pp_enabled ? \
#define amdgpu_dpm_get_fan_speed_percent(adev, s) (adev)->pm.funcs->get_fan_speed_percent((adev), (s)) (adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \
(adev)->pm.funcs->get_temperature((adev))
#define amdgpu_dpm_set_fan_control_mode(adev, m) \
(adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \
(adev)->pm.funcs->set_fan_control_mode((adev), (m))
#define amdgpu_dpm_get_fan_control_mode(adev) \
(adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \
(adev)->pm.funcs->get_fan_control_mode((adev))
#define amdgpu_dpm_set_fan_speed_percent(adev, s) \
(adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
(adev)->pm.funcs->set_fan_speed_percent((adev), (s))
#define amdgpu_dpm_get_fan_speed_percent(adev, s) \
(adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
(adev)->pm.funcs->get_fan_speed_percent((adev), (s))
#define amdgpu_dpm_get_sclk(adev, l) \
(adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \
(adev)->pm.funcs->get_sclk((adev), (l))
#define amdgpu_dpm_get_mclk(adev, l) \
(adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \
(adev)->pm.funcs->get_mclk((adev), (l))
#define amdgpu_dpm_force_performance_level(adev, l) \
(adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \
(adev)->pm.funcs->force_performance_level((adev), (l))
#define amdgpu_dpm_powergate_uvd(adev, g) \
(adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \
(adev)->pm.funcs->powergate_uvd((adev), (g))
#define amdgpu_dpm_powergate_vce(adev, g) \
(adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \
(adev)->pm.funcs->powergate_vce((adev), (g))
#define amdgpu_dpm_debugfs_print_current_performance_level(adev, m) \
(adev)->pp_enabled ? \
(adev)->powerplay.pp_funcs->print_current_performance_level((adev)->powerplay.pp_handle, (m)) : \
(adev)->pm.funcs->debugfs_print_current_performance_level((adev), (m))
#define amdgpu_dpm_get_current_power_state(adev) \
(adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)
#define amdgpu_dpm_get_performance_level(adev) \
(adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle)
#define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \
(adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output))
#define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a)) #define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a))
......
...@@ -29,66 +29,10 @@ ...@@ -29,66 +29,10 @@
#include <drm/drmP.h> #include <drm/drmP.h>
#include <drm/drm_crtc_helper.h> #include <drm/drm_crtc_helper.h>
#include "amdgpu.h" #include "amdgpu.h"
#include "amdgpu_acpi.h" #include "amd_acpi.h"
#include "atom.h" #include "atom.h"
#define ACPI_AC_CLASS "ac_adapter"
extern void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev); extern void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev);
struct atif_verify_interface {
u16 size; /* structure size in bytes (includes size field) */
u16 version; /* version */
u32 notification_mask; /* supported notifications mask */
u32 function_bits; /* supported functions bit vector */
} __packed;
struct atif_system_params {
u16 size; /* structure size in bytes (includes size field) */
u32 valid_mask; /* valid flags mask */
u32 flags; /* flags */
u8 command_code; /* notify command code */
} __packed;
struct atif_sbios_requests {
u16 size; /* structure size in bytes (includes size field) */
u32 pending; /* pending sbios requests */
u8 panel_exp_mode; /* panel expansion mode */
u8 thermal_gfx; /* thermal state: target gfx controller */
u8 thermal_state; /* thermal state: state id (0: exit state, non-0: state) */
u8 forced_power_gfx; /* forced power state: target gfx controller */
u8 forced_power_state; /* forced power state: state id */
u8 system_power_src; /* system power source */
u8 backlight_level; /* panel backlight level (0-255) */
} __packed;
#define ATIF_NOTIFY_MASK 0x3
#define ATIF_NOTIFY_NONE 0
#define ATIF_NOTIFY_81 1
#define ATIF_NOTIFY_N 2
struct atcs_verify_interface {
u16 size; /* structure size in bytes (includes size field) */
u16 version; /* version */
u32 function_bits; /* supported functions bit vector */
} __packed;
#define ATCS_VALID_FLAGS_MASK 0x3
struct atcs_pref_req_input {
u16 size; /* structure size in bytes (includes size field) */
u16 client_id; /* client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) */
u16 valid_flags_mask; /* valid flags mask */
u16 flags; /* flags */
u8 req_type; /* request type */
u8 perf_req; /* performance request */
} __packed;
struct atcs_pref_req_output {
u16 size; /* structure size in bytes (includes size field) */
u8 ret_val; /* return value */
} __packed;
/* Call the ATIF method /* Call the ATIF method
*/ */
/** /**
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
#include <linux/acpi.h> #include <linux/acpi.h>
#include <linux/pci.h> #include <linux/pci.h>
#include "amdgpu_acpi.h" #include "amd_acpi.h"
struct amdgpu_atpx_functions { struct amdgpu_atpx_functions {
bool px_params; bool px_params;
......
...@@ -35,6 +35,13 @@ ...@@ -35,6 +35,13 @@
* BIOS. * BIOS.
*/ */
#define AMD_VBIOS_SIGNATURE " 761295520"
#define AMD_VBIOS_SIGNATURE_OFFSET 0x30
#define AMD_VBIOS_SIGNATURE_SIZE sizeof(AMD_VBIOS_SIGNATURE)
#define AMD_VBIOS_SIGNATURE_END (AMD_VBIOS_SIGNATURE_OFFSET + AMD_VBIOS_SIGNATURE_SIZE)
#define AMD_IS_VALID_VBIOS(p) ((p)[0] == 0x55 && (p)[1] == 0xAA)
#define AMD_VBIOS_LENGTH(p) ((p)[2] << 9)
/* If you boot an IGP board with a discrete card as the primary, /* If you boot an IGP board with a discrete card as the primary,
* the IGP rom is not accessible via the rom bar as the IGP rom is * the IGP rom is not accessible via the rom bar as the IGP rom is
* part of the system bios. On boot, the system bios puts a * part of the system bios. On boot, the system bios puts a
...@@ -58,7 +65,7 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev) ...@@ -58,7 +65,7 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev)
return false; return false;
} }
if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { if (size == 0 || !AMD_IS_VALID_VBIOS(bios)) {
iounmap(bios); iounmap(bios);
return false; return false;
} }
...@@ -74,7 +81,7 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev) ...@@ -74,7 +81,7 @@ static bool igp_read_bios_from_vram(struct amdgpu_device *adev)
bool amdgpu_read_bios(struct amdgpu_device *adev) bool amdgpu_read_bios(struct amdgpu_device *adev)
{ {
uint8_t __iomem *bios, val1, val2; uint8_t __iomem *bios, val[2];
size_t size; size_t size;
adev->bios = NULL; adev->bios = NULL;
...@@ -84,10 +91,10 @@ bool amdgpu_read_bios(struct amdgpu_device *adev) ...@@ -84,10 +91,10 @@ bool amdgpu_read_bios(struct amdgpu_device *adev)
return false; return false;
} }
val1 = readb(&bios[0]); val[0] = readb(&bios[0]);
val2 = readb(&bios[1]); val[1] = readb(&bios[1]);
if (size == 0 || val1 != 0x55 || val2 != 0xaa) { if (size == 0 || !AMD_IS_VALID_VBIOS(val)) {
pci_unmap_rom(adev->pdev, bios); pci_unmap_rom(adev->pdev, bios);
return false; return false;
} }
...@@ -101,6 +108,38 @@ bool amdgpu_read_bios(struct amdgpu_device *adev) ...@@ -101,6 +108,38 @@ bool amdgpu_read_bios(struct amdgpu_device *adev)
return true; return true;
} }
static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev)
{
u8 header[AMD_VBIOS_SIGNATURE_END+1] = {0};
int len;
if (!adev->asic_funcs->read_bios_from_rom)
return false;
/* validate VBIOS signature */
if (amdgpu_asic_read_bios_from_rom(adev, &header[0], sizeof(header)) == false)
return false;
header[AMD_VBIOS_SIGNATURE_END] = 0;
if ((!AMD_IS_VALID_VBIOS(header)) ||
0 != memcmp((char *)&header[AMD_VBIOS_SIGNATURE_OFFSET],
AMD_VBIOS_SIGNATURE,
strlen(AMD_VBIOS_SIGNATURE)))
return false;
/* valid vbios, go on */
len = AMD_VBIOS_LENGTH(header);
len = ALIGN(len, 4);
adev->bios = kmalloc(len, GFP_KERNEL);
if (!adev->bios) {
DRM_ERROR("no memory to allocate for BIOS\n");
return false;
}
/* read complete BIOS */
return amdgpu_asic_read_bios_from_rom(adev, adev->bios, len);
}
static bool amdgpu_read_platform_bios(struct amdgpu_device *adev) static bool amdgpu_read_platform_bios(struct amdgpu_device *adev)
{ {
uint8_t __iomem *bios; uint8_t __iomem *bios;
...@@ -113,7 +152,7 @@ static bool amdgpu_read_platform_bios(struct amdgpu_device *adev) ...@@ -113,7 +152,7 @@ static bool amdgpu_read_platform_bios(struct amdgpu_device *adev)
return false; return false;
} }
if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) { if (size == 0 || !AMD_IS_VALID_VBIOS(bios)) {
return false; return false;
} }
adev->bios = kmemdup(bios, size, GFP_KERNEL); adev->bios = kmemdup(bios, size, GFP_KERNEL);
...@@ -230,7 +269,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev) ...@@ -230,7 +269,7 @@ static bool amdgpu_atrm_get_bios(struct amdgpu_device *adev)
break; break;
} }
if (i == 0 || adev->bios[0] != 0x55 || adev->bios[1] != 0xaa) { if (i == 0 || !AMD_IS_VALID_VBIOS(adev->bios)) {
kfree(adev->bios); kfree(adev->bios);
return false; return false;
} }
...@@ -319,6 +358,9 @@ bool amdgpu_get_bios(struct amdgpu_device *adev) ...@@ -319,6 +358,9 @@ bool amdgpu_get_bios(struct amdgpu_device *adev)
r = igp_read_bios_from_vram(adev); r = igp_read_bios_from_vram(adev);
if (r == false) if (r == false)
r = amdgpu_read_bios(adev); r = amdgpu_read_bios(adev);
if (r == false) {
r = amdgpu_read_bios_from_rom(adev);
}
if (r == false) { if (r == false) {
r = amdgpu_read_disabled_bios(adev); r = amdgpu_read_disabled_bios(adev);
} }
...@@ -330,7 +372,7 @@ bool amdgpu_get_bios(struct amdgpu_device *adev) ...@@ -330,7 +372,7 @@ bool amdgpu_get_bios(struct amdgpu_device *adev)
adev->bios = NULL; adev->bios = NULL;
return false; return false;
} }
if (adev->bios[0] != 0x55 || adev->bios[1] != 0xaa) { if (!AMD_IS_VALID_VBIOS(adev->bios)) {
printk("BIOS signature incorrect %x %x\n", adev->bios[0], adev->bios[1]); printk("BIOS signature incorrect %x %x\n", adev->bios[0], adev->bios[1]);
goto free_bios; goto free_bios;
} }
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/acpi.h>
#include <drm/drmP.h> #include <drm/drmP.h>
#include <linux/firmware.h> #include <linux/firmware.h>
#include <drm/amdgpu_drm.h> #include <drm/amdgpu_drm.h>
...@@ -32,7 +33,6 @@ ...@@ -32,7 +33,6 @@
#include "atom.h" #include "atom.h"
#include "amdgpu_ucode.h" #include "amdgpu_ucode.h"
struct amdgpu_cgs_device { struct amdgpu_cgs_device {
struct cgs_device base; struct cgs_device base;
struct amdgpu_device *adev; struct amdgpu_device *adev;
...@@ -703,6 +703,9 @@ static int amdgpu_cgs_get_firmware_info(void *cgs_device, ...@@ -703,6 +703,9 @@ static int amdgpu_cgs_get_firmware_info(void *cgs_device,
case CHIP_TONGA: case CHIP_TONGA:
strcpy(fw_name, "amdgpu/tonga_smc.bin"); strcpy(fw_name, "amdgpu/tonga_smc.bin");
break; break;
case CHIP_FIJI:
strcpy(fw_name, "amdgpu/fiji_smc.bin");
break;
default: default:
DRM_ERROR("SMC firmware not supported\n"); DRM_ERROR("SMC firmware not supported\n");
return -EINVAL; return -EINVAL;
...@@ -736,6 +739,288 @@ static int amdgpu_cgs_get_firmware_info(void *cgs_device, ...@@ -736,6 +739,288 @@ static int amdgpu_cgs_get_firmware_info(void *cgs_device,
return 0; return 0;
} }
static int amdgpu_cgs_query_system_info(void *cgs_device,
struct cgs_system_info *sys_info)
{
CGS_FUNC_ADEV;
if (NULL == sys_info)
return -ENODEV;
if (sizeof(struct cgs_system_info) != sys_info->size)
return -ENODEV;
switch (sys_info->info_id) {
case CGS_SYSTEM_INFO_ADAPTER_BDF_ID:
sys_info->value = adev->pdev->devfn | (adev->pdev->bus->number << 8);
break;
case CGS_SYSTEM_INFO_PCIE_GEN_INFO:
sys_info->value = adev->pm.pcie_gen_mask;
break;
case CGS_SYSTEM_INFO_PCIE_MLW:
sys_info->value = adev->pm.pcie_mlw_mask;
break;
default:
return -ENODEV;
}
return 0;
}
static int amdgpu_cgs_get_active_displays_info(void *cgs_device,
struct cgs_display_info *info)
{
CGS_FUNC_ADEV;
struct amdgpu_crtc *amdgpu_crtc;
struct drm_device *ddev = adev->ddev;
struct drm_crtc *crtc;
uint32_t line_time_us, vblank_lines;
if (info == NULL)
return -EINVAL;
if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
list_for_each_entry(crtc,
&ddev->mode_config.crtc_list, head) {
amdgpu_crtc = to_amdgpu_crtc(crtc);
if (crtc->enabled) {
info->active_display_mask |= (1 << amdgpu_crtc->crtc_id);
info->display_count++;
}
if (info->mode_info != NULL &&
crtc->enabled && amdgpu_crtc->enabled &&
amdgpu_crtc->hw_mode.clock) {
line_time_us = (amdgpu_crtc->hw_mode.crtc_htotal * 1000) /
amdgpu_crtc->hw_mode.clock;
vblank_lines = amdgpu_crtc->hw_mode.crtc_vblank_end -
amdgpu_crtc->hw_mode.crtc_vdisplay +
(amdgpu_crtc->v_border * 2);
info->mode_info->vblank_time_us = vblank_lines * line_time_us;
info->mode_info->refresh_rate = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
info->mode_info->ref_clock = adev->clock.spll.reference_freq;
info->mode_info++;
}
}
}
return 0;
}
/** \brief evaluate acpi namespace object, handle or pathname must be valid
* \param cgs_device
* \param info input/output arguments for the control method
* \return status
*/
#if defined(CONFIG_ACPI)
static int amdgpu_cgs_acpi_eval_object(void *cgs_device,
struct cgs_acpi_method_info *info)
{
CGS_FUNC_ADEV;
acpi_handle handle;
struct acpi_object_list input;
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *params = NULL;
union acpi_object *obj = NULL;
uint8_t name[5] = {'\0'};
struct cgs_acpi_method_argument *argument = NULL;
uint32_t i, count;
acpi_status status;
int result;
uint32_t func_no = 0xFFFFFFFF;
handle = ACPI_HANDLE(&adev->pdev->dev);
if (!handle)
return -ENODEV;
memset(&input, 0, sizeof(struct acpi_object_list));
/* validate input info */
if (info->size != sizeof(struct cgs_acpi_method_info))
return -EINVAL;
input.count = info->input_count;
if (info->input_count > 0) {
if (info->pinput_argument == NULL)
return -EINVAL;
argument = info->pinput_argument;
func_no = argument->value;
for (i = 0; i < info->input_count; i++) {
if (((argument->type == ACPI_TYPE_STRING) ||
(argument->type == ACPI_TYPE_BUFFER))
&& (argument->pointer == NULL))
return -EINVAL;
argument++;
}
}
if (info->output_count > 0) {
if (info->poutput_argument == NULL)
return -EINVAL;
argument = info->poutput_argument;
for (i = 0; i < info->output_count; i++) {
if (((argument->type == ACPI_TYPE_STRING) ||
(argument->type == ACPI_TYPE_BUFFER))
&& (argument->pointer == NULL))
return -EINVAL;
argument++;
}
}
/* The path name passed to acpi_evaluate_object should be null terminated */
if ((info->field & CGS_ACPI_FIELD_METHOD_NAME) != 0) {
strncpy(name, (char *)&(info->name), sizeof(uint32_t));
name[4] = '\0';
}
/* parse input parameters */
if (input.count > 0) {
input.pointer = params =
kzalloc(sizeof(union acpi_object) * input.count, GFP_KERNEL);
if (params == NULL)
return -EINVAL;
argument = info->pinput_argument;
for (i = 0; i < input.count; i++) {
params->type = argument->type;
switch (params->type) {
case ACPI_TYPE_INTEGER:
params->integer.value = argument->value;
break;
case ACPI_TYPE_STRING:
params->string.length = argument->method_length;
params->string.pointer = argument->pointer;
break;
case ACPI_TYPE_BUFFER:
params->buffer.length = argument->method_length;
params->buffer.pointer = argument->pointer;
break;
default:
break;
}
params++;
argument++;
}
}
/* parse output info */
count = info->output_count;
argument = info->poutput_argument;
/* evaluate the acpi method */
status = acpi_evaluate_object(handle, name, &input, &output);
if (ACPI_FAILURE(status)) {
result = -EIO;
goto error;
}
/* return the output info */
obj = output.pointer;
if (count > 1) {
if ((obj->type != ACPI_TYPE_PACKAGE) ||
(obj->package.count != count)) {
result = -EIO;
goto error;
}
params = obj->package.elements;
} else
params = obj;
if (params == NULL) {
result = -EIO;
goto error;
}
for (i = 0; i < count; i++) {
if (argument->type != params->type) {
result = -EIO;
goto error;
}
switch (params->type) {
case ACPI_TYPE_INTEGER:
argument->value = params->integer.value;
break;
case ACPI_TYPE_STRING:
if ((params->string.length != argument->data_length) ||
(params->string.pointer == NULL)) {
result = -EIO;
goto error;
}
strncpy(argument->pointer,
params->string.pointer,
params->string.length);
break;
case ACPI_TYPE_BUFFER:
if (params->buffer.pointer == NULL) {
result = -EIO;
goto error;
}
memcpy(argument->pointer,
params->buffer.pointer,
argument->data_length);
break;
default:
break;
}
argument++;
params++;
}
error:
if (obj != NULL)
kfree(obj);
kfree((void *)input.pointer);
return result;
}
#else
static int amdgpu_cgs_acpi_eval_object(void *cgs_device,
struct cgs_acpi_method_info *info)
{
return -EIO;
}
#endif
int amdgpu_cgs_call_acpi_method(void *cgs_device,
uint32_t acpi_method,
uint32_t acpi_function,
void *pinput, void *poutput,
uint32_t output_count,
uint32_t input_size,
uint32_t output_size)
{
struct cgs_acpi_method_argument acpi_input[2] = { {0}, {0} };
struct cgs_acpi_method_argument acpi_output = {0};
struct cgs_acpi_method_info info = {0};
acpi_input[0].type = CGS_ACPI_TYPE_INTEGER;
acpi_input[0].method_length = sizeof(uint32_t);
acpi_input[0].data_length = sizeof(uint32_t);
acpi_input[0].value = acpi_function;
acpi_input[1].type = CGS_ACPI_TYPE_BUFFER;
acpi_input[1].method_length = CGS_ACPI_MAX_BUFFER_SIZE;
acpi_input[1].data_length = input_size;
acpi_input[1].pointer = pinput;
acpi_output.type = CGS_ACPI_TYPE_BUFFER;
acpi_output.method_length = CGS_ACPI_MAX_BUFFER_SIZE;
acpi_output.data_length = output_size;
acpi_output.pointer = poutput;
info.size = sizeof(struct cgs_acpi_method_info);
info.field = CGS_ACPI_FIELD_METHOD_NAME | CGS_ACPI_FIELD_INPUT_ARGUMENT_COUNT;
info.input_count = 2;
info.name = acpi_method;
info.pinput_argument = acpi_input;
info.output_count = output_count;
info.poutput_argument = &acpi_output;
return amdgpu_cgs_acpi_eval_object(cgs_device, &info);
}
static const struct cgs_ops amdgpu_cgs_ops = { static const struct cgs_ops amdgpu_cgs_ops = {
amdgpu_cgs_gpu_mem_info, amdgpu_cgs_gpu_mem_info,
amdgpu_cgs_gmap_kmem, amdgpu_cgs_gmap_kmem,
...@@ -768,7 +1053,10 @@ static const struct cgs_ops amdgpu_cgs_ops = { ...@@ -768,7 +1053,10 @@ static const struct cgs_ops amdgpu_cgs_ops = {
amdgpu_cgs_set_camera_voltages, amdgpu_cgs_set_camera_voltages,
amdgpu_cgs_get_firmware_info, amdgpu_cgs_get_firmware_info,
amdgpu_cgs_set_powergating_state, amdgpu_cgs_set_powergating_state,
amdgpu_cgs_set_clockgating_state amdgpu_cgs_set_clockgating_state,
amdgpu_cgs_get_active_displays_info,
amdgpu_cgs_call_acpi_method,
amdgpu_cgs_query_system_info,
}; };
static const struct cgs_os_ops amdgpu_cgs_os_ops = { static const struct cgs_os_ops amdgpu_cgs_os_ops = {
......
...@@ -388,17 +388,18 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p) ...@@ -388,17 +388,18 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p)
amdgpu_cs_buckets_get_list(&buckets, &p->validated); amdgpu_cs_buckets_get_list(&buckets, &p->validated);
} }
p->vm_bos = amdgpu_vm_get_bos(p->adev, &fpriv->vm, INIT_LIST_HEAD(&duplicates);
&p->validated); amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
if (need_mmap_lock) if (need_mmap_lock)
down_read(&current->mm->mmap_sem); down_read(&current->mm->mmap_sem);
INIT_LIST_HEAD(&duplicates);
r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates); r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, &duplicates);
if (unlikely(r != 0)) if (unlikely(r != 0))
goto error_reserve; goto error_reserve;
amdgpu_vm_get_pt_bos(&fpriv->vm, &duplicates);
r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &p->validated); r = amdgpu_cs_list_validate(p->adev, &fpriv->vm, &p->validated);
if (r) if (r)
goto error_validate; goto error_validate;
...@@ -480,7 +481,6 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo ...@@ -480,7 +481,6 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
if (parser->bo_list) if (parser->bo_list)
amdgpu_bo_list_put(parser->bo_list); amdgpu_bo_list_put(parser->bo_list);
drm_free_large(parser->vm_bos);
for (i = 0; i < parser->nchunks; i++) for (i = 0; i < parser->nchunks; i++)
drm_free_large(parser->chunks[i].kdata); drm_free_large(parser->chunks[i].kdata);
kfree(parser->chunks); kfree(parser->chunks);
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
#include <drm/drmP.h> #include <drm/drmP.h>
#include "amdgpu.h" #include "amdgpu.h"
int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel, int amdgpu_ctx_init(struct amdgpu_device *adev, enum amd_sched_priority pri,
struct amdgpu_ctx *ctx) struct amdgpu_ctx *ctx)
{ {
unsigned i, j; unsigned i, j;
...@@ -35,17 +35,25 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel, ...@@ -35,17 +35,25 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
ctx->adev = adev; ctx->adev = adev;
kref_init(&ctx->refcount); kref_init(&ctx->refcount);
spin_lock_init(&ctx->ring_lock); spin_lock_init(&ctx->ring_lock);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) ctx->fences = kzalloc(sizeof(struct fence *) * amdgpu_sched_jobs *
ctx->rings[i].sequence = 1; AMDGPU_MAX_RINGS, GFP_KERNEL);
if (!ctx->fences)
return -ENOMEM;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
ctx->rings[i].sequence = 1;
ctx->rings[i].fences = (void *)ctx->fences + sizeof(struct fence *) *
amdgpu_sched_jobs * i;
}
if (amdgpu_enable_scheduler) { if (amdgpu_enable_scheduler) {
/* create context entity for each ring */ /* create context entity for each ring */
for (i = 0; i < adev->num_rings; i++) { for (i = 0; i < adev->num_rings; i++) {
struct amd_sched_rq *rq; struct amd_sched_rq *rq;
if (kernel) if (pri >= AMD_SCHED_MAX_PRIORITY) {
rq = &adev->rings[i]->sched.kernel_rq; kfree(ctx->fences);
else return -EINVAL;
rq = &adev->rings[i]->sched.sched_rq; }
rq = &adev->rings[i]->sched.sched_rq[pri];
r = amd_sched_entity_init(&adev->rings[i]->sched, r = amd_sched_entity_init(&adev->rings[i]->sched,
&ctx->rings[i].entity, &ctx->rings[i].entity,
rq, amdgpu_sched_jobs); rq, amdgpu_sched_jobs);
...@@ -57,7 +65,7 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel, ...@@ -57,7 +65,7 @@ int amdgpu_ctx_init(struct amdgpu_device *adev, bool kernel,
for (j = 0; j < i; j++) for (j = 0; j < i; j++)
amd_sched_entity_fini(&adev->rings[j]->sched, amd_sched_entity_fini(&adev->rings[j]->sched,
&ctx->rings[j].entity); &ctx->rings[j].entity);
kfree(ctx); kfree(ctx->fences);
return r; return r;
} }
} }
...@@ -73,8 +81,9 @@ void amdgpu_ctx_fini(struct amdgpu_ctx *ctx) ...@@ -73,8 +81,9 @@ void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
return; return;
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
for (j = 0; j < AMDGPU_CTX_MAX_CS_PENDING; ++j) for (j = 0; j < amdgpu_sched_jobs; ++j)
fence_put(ctx->rings[i].fences[j]); fence_put(ctx->rings[i].fences[j]);
kfree(ctx->fences);
if (amdgpu_enable_scheduler) { if (amdgpu_enable_scheduler) {
for (i = 0; i < adev->num_rings; i++) for (i = 0; i < adev->num_rings; i++)
...@@ -103,9 +112,13 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev, ...@@ -103,9 +112,13 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
return r; return r;
} }
*id = (uint32_t)r; *id = (uint32_t)r;
r = amdgpu_ctx_init(adev, false, ctx); r = amdgpu_ctx_init(adev, AMD_SCHED_PRIORITY_NORMAL, ctx);
if (r) {
idr_remove(&mgr->ctx_handles, *id);
*id = 0;
kfree(ctx);
}
mutex_unlock(&mgr->lock); mutex_unlock(&mgr->lock);
return r; return r;
} }
...@@ -239,7 +252,7 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, ...@@ -239,7 +252,7 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
unsigned idx = 0; unsigned idx = 0;
struct fence *other = NULL; struct fence *other = NULL;
idx = seq % AMDGPU_CTX_MAX_CS_PENDING; idx = seq & (amdgpu_sched_jobs - 1);
other = cring->fences[idx]; other = cring->fences[idx];
if (other) { if (other) {
signed long r; signed long r;
...@@ -274,12 +287,12 @@ struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, ...@@ -274,12 +287,12 @@ struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
} }
if (seq + AMDGPU_CTX_MAX_CS_PENDING < cring->sequence) { if (seq + amdgpu_sched_jobs < cring->sequence) {
spin_unlock(&ctx->ring_lock); spin_unlock(&ctx->ring_lock);
return NULL; return NULL;
} }
fence = fence_get(cring->fences[seq % AMDGPU_CTX_MAX_CS_PENDING]); fence = fence_get(cring->fences[seq & (amdgpu_sched_jobs - 1)]);
spin_unlock(&ctx->ring_lock); spin_unlock(&ctx->ring_lock);
return fence; return fence;
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include "amdgpu_i2c.h" #include "amdgpu_i2c.h"
#include "atom.h" #include "atom.h"
#include "amdgpu_atombios.h" #include "amdgpu_atombios.h"
#include "amd_pcie.h"
#ifdef CONFIG_DRM_AMDGPU_CIK #ifdef CONFIG_DRM_AMDGPU_CIK
#include "cik.h" #include "cik.h"
#endif #endif
...@@ -949,6 +950,15 @@ static bool amdgpu_check_pot_argument(int arg) ...@@ -949,6 +950,15 @@ static bool amdgpu_check_pot_argument(int arg)
*/ */
static void amdgpu_check_arguments(struct amdgpu_device *adev) static void amdgpu_check_arguments(struct amdgpu_device *adev)
{ {
if (amdgpu_sched_jobs < 4) {
dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n",
amdgpu_sched_jobs);
amdgpu_sched_jobs = 4;
} else if (!amdgpu_check_pot_argument(amdgpu_sched_jobs)){
dev_warn(adev->dev, "sched jobs (%d) must be a power of 2\n",
amdgpu_sched_jobs);
amdgpu_sched_jobs = roundup_pow_of_two(amdgpu_sched_jobs);
}
/* vramlimit must be a power of two */ /* vramlimit must be a power of two */
if (!amdgpu_check_pot_argument(amdgpu_vram_limit)) { if (!amdgpu_check_pot_argument(amdgpu_vram_limit)) {
dev_warn(adev->dev, "vram limit (%d) must be a power of 2\n", dev_warn(adev->dev, "vram limit (%d) must be a power of 2\n",
...@@ -1214,12 +1224,14 @@ static int amdgpu_early_init(struct amdgpu_device *adev) ...@@ -1214,12 +1224,14 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
} else { } else {
if (adev->ip_blocks[i].funcs->early_init) { if (adev->ip_blocks[i].funcs->early_init) {
r = adev->ip_blocks[i].funcs->early_init((void *)adev); r = adev->ip_blocks[i].funcs->early_init((void *)adev);
if (r == -ENOENT) if (r == -ENOENT) {
adev->ip_block_status[i].valid = false; adev->ip_block_status[i].valid = false;
else if (r) } else if (r) {
DRM_ERROR("early_init %d failed %d\n", i, r);
return r; return r;
else } else {
adev->ip_block_status[i].valid = true; adev->ip_block_status[i].valid = true;
}
} else { } else {
adev->ip_block_status[i].valid = true; adev->ip_block_status[i].valid = true;
} }
...@@ -1237,20 +1249,28 @@ static int amdgpu_init(struct amdgpu_device *adev) ...@@ -1237,20 +1249,28 @@ static int amdgpu_init(struct amdgpu_device *adev)
if (!adev->ip_block_status[i].valid) if (!adev->ip_block_status[i].valid)
continue; continue;
r = adev->ip_blocks[i].funcs->sw_init((void *)adev); r = adev->ip_blocks[i].funcs->sw_init((void *)adev);
if (r) if (r) {
DRM_ERROR("sw_init %d failed %d\n", i, r);
return r; return r;
}
adev->ip_block_status[i].sw = true; adev->ip_block_status[i].sw = true;
/* need to do gmc hw init early so we can allocate gpu mem */ /* need to do gmc hw init early so we can allocate gpu mem */
if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) { if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
r = amdgpu_vram_scratch_init(adev); r = amdgpu_vram_scratch_init(adev);
if (r) if (r) {
DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r);
return r; return r;
}
r = adev->ip_blocks[i].funcs->hw_init((void *)adev); r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
if (r) if (r) {
DRM_ERROR("hw_init %d failed %d\n", i, r);
return r; return r;
}
r = amdgpu_wb_init(adev); r = amdgpu_wb_init(adev);
if (r) if (r) {
DRM_ERROR("amdgpu_wb_init failed %d\n", r);
return r; return r;
}
adev->ip_block_status[i].hw = true; adev->ip_block_status[i].hw = true;
} }
} }
...@@ -1262,8 +1282,10 @@ static int amdgpu_init(struct amdgpu_device *adev) ...@@ -1262,8 +1282,10 @@ static int amdgpu_init(struct amdgpu_device *adev)
if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC)
continue; continue;
r = adev->ip_blocks[i].funcs->hw_init((void *)adev); r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
if (r) if (r) {
DRM_ERROR("hw_init %d failed %d\n", i, r);
return r; return r;
}
adev->ip_block_status[i].hw = true; adev->ip_block_status[i].hw = true;
} }
...@@ -1280,12 +1302,16 @@ static int amdgpu_late_init(struct amdgpu_device *adev) ...@@ -1280,12 +1302,16 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
/* enable clockgating to save power */ /* enable clockgating to save power */
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
AMD_CG_STATE_GATE); AMD_CG_STATE_GATE);
if (r) if (r) {
DRM_ERROR("set_clockgating_state(gate) %d failed %d\n", i, r);
return r; return r;
}
if (adev->ip_blocks[i].funcs->late_init) { if (adev->ip_blocks[i].funcs->late_init) {
r = adev->ip_blocks[i].funcs->late_init((void *)adev); r = adev->ip_blocks[i].funcs->late_init((void *)adev);
if (r) if (r) {
DRM_ERROR("late_init %d failed %d\n", i, r);
return r; return r;
}
} }
} }
...@@ -1306,10 +1332,15 @@ static int amdgpu_fini(struct amdgpu_device *adev) ...@@ -1306,10 +1332,15 @@ static int amdgpu_fini(struct amdgpu_device *adev)
/* ungate blocks before hw fini so that we can shutdown the blocks safely */ /* ungate blocks before hw fini so that we can shutdown the blocks safely */
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
AMD_CG_STATE_UNGATE); AMD_CG_STATE_UNGATE);
if (r) if (r) {
DRM_ERROR("set_clockgating_state(ungate) %d failed %d\n", i, r);
return r; return r;
}
r = adev->ip_blocks[i].funcs->hw_fini((void *)adev); r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
/* XXX handle errors */ /* XXX handle errors */
if (r) {
DRM_DEBUG("hw_fini %d failed %d\n", i, r);
}
adev->ip_block_status[i].hw = false; adev->ip_block_status[i].hw = false;
} }
...@@ -1318,6 +1349,9 @@ static int amdgpu_fini(struct amdgpu_device *adev) ...@@ -1318,6 +1349,9 @@ static int amdgpu_fini(struct amdgpu_device *adev)
continue; continue;
r = adev->ip_blocks[i].funcs->sw_fini((void *)adev); r = adev->ip_blocks[i].funcs->sw_fini((void *)adev);
/* XXX handle errors */ /* XXX handle errors */
if (r) {
DRM_DEBUG("sw_fini %d failed %d\n", i, r);
}
adev->ip_block_status[i].sw = false; adev->ip_block_status[i].sw = false;
adev->ip_block_status[i].valid = false; adev->ip_block_status[i].valid = false;
} }
...@@ -1335,9 +1369,15 @@ static int amdgpu_suspend(struct amdgpu_device *adev) ...@@ -1335,9 +1369,15 @@ static int amdgpu_suspend(struct amdgpu_device *adev)
/* ungate blocks so that suspend can properly shut them down */ /* ungate blocks so that suspend can properly shut them down */
r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
AMD_CG_STATE_UNGATE); AMD_CG_STATE_UNGATE);
if (r) {
DRM_ERROR("set_clockgating_state(ungate) %d failed %d\n", i, r);
}
/* XXX handle errors */ /* XXX handle errors */
r = adev->ip_blocks[i].funcs->suspend(adev); r = adev->ip_blocks[i].funcs->suspend(adev);
/* XXX handle errors */ /* XXX handle errors */
if (r) {
DRM_ERROR("suspend %d failed %d\n", i, r);
}
} }
return 0; return 0;
...@@ -1351,8 +1391,10 @@ static int amdgpu_resume(struct amdgpu_device *adev) ...@@ -1351,8 +1391,10 @@ static int amdgpu_resume(struct amdgpu_device *adev)
if (!adev->ip_block_status[i].valid) if (!adev->ip_block_status[i].valid)
continue; continue;
r = adev->ip_blocks[i].funcs->resume(adev); r = adev->ip_blocks[i].funcs->resume(adev);
if (r) if (r) {
DRM_ERROR("resume %d failed %d\n", i, r);
return r; return r;
}
} }
return 0; return 0;
...@@ -1484,8 +1526,10 @@ int amdgpu_device_init(struct amdgpu_device *adev, ...@@ -1484,8 +1526,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
return -EINVAL; return -EINVAL;
} }
r = amdgpu_atombios_init(adev); r = amdgpu_atombios_init(adev);
if (r) if (r) {
dev_err(adev->dev, "amdgpu_atombios_init failed\n");
return r; return r;
}
/* Post card if necessary */ /* Post card if necessary */
if (!amdgpu_card_posted(adev)) { if (!amdgpu_card_posted(adev)) {
...@@ -1499,21 +1543,26 @@ int amdgpu_device_init(struct amdgpu_device *adev, ...@@ -1499,21 +1543,26 @@ int amdgpu_device_init(struct amdgpu_device *adev,
/* Initialize clocks */ /* Initialize clocks */
r = amdgpu_atombios_get_clock_info(adev); r = amdgpu_atombios_get_clock_info(adev);
if (r) if (r) {
dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
return r; return r;
}
/* init i2c buses */ /* init i2c buses */
amdgpu_atombios_i2c_init(adev); amdgpu_atombios_i2c_init(adev);
/* Fence driver */ /* Fence driver */
r = amdgpu_fence_driver_init(adev); r = amdgpu_fence_driver_init(adev);
if (r) if (r) {
dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
return r; return r;
}
/* init the mode config */ /* init the mode config */
drm_mode_config_init(adev->ddev); drm_mode_config_init(adev->ddev);
r = amdgpu_init(adev); r = amdgpu_init(adev);
if (r) { if (r) {
dev_err(adev->dev, "amdgpu_init failed\n");
amdgpu_fini(adev); amdgpu_fini(adev);
return r; return r;
} }
...@@ -1528,7 +1577,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, ...@@ -1528,7 +1577,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
return r; return r;
} }
r = amdgpu_ctx_init(adev, true, &adev->kernel_ctx); r = amdgpu_ctx_init(adev, AMD_SCHED_PRIORITY_KERNEL, &adev->kernel_ctx);
if (r) { if (r) {
dev_err(adev->dev, "failed to create kernel context (%d).\n", r); dev_err(adev->dev, "failed to create kernel context (%d).\n", r);
return r; return r;
...@@ -1570,8 +1619,10 @@ int amdgpu_device_init(struct amdgpu_device *adev, ...@@ -1570,8 +1619,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
* explicit gating rather than handling it automatically. * explicit gating rather than handling it automatically.
*/ */
r = amdgpu_late_init(adev); r = amdgpu_late_init(adev);
if (r) if (r) {
dev_err(adev->dev, "amdgpu_late_init failed\n");
return r; return r;
}
return 0; return 0;
} }
...@@ -1788,6 +1839,7 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon) ...@@ -1788,6 +1839,7 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
} }
drm_kms_helper_poll_enable(dev); drm_kms_helper_poll_enable(dev);
drm_helper_hpd_irq_event(dev);
if (fbcon) { if (fbcon) {
amdgpu_fbdev_set_suspend(adev, 0); amdgpu_fbdev_set_suspend(adev, 0);
...@@ -1881,6 +1933,83 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev) ...@@ -1881,6 +1933,83 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
return r; return r;
} }
void amdgpu_get_pcie_info(struct amdgpu_device *adev)
{
u32 mask;
int ret;
if (pci_is_root_bus(adev->pdev->bus))
return;
if (amdgpu_pcie_gen2 == 0)
return;
if (adev->flags & AMD_IS_APU)
return;
ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
if (!ret) {
adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
if (mask & DRM_PCIE_SPEED_25)
adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
if (mask & DRM_PCIE_SPEED_50)
adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
if (mask & DRM_PCIE_SPEED_80)
adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
}
ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
if (!ret) {
switch (mask) {
case 32:
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
case 16:
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
case 12:
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
case 8:
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
case 4:
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
case 2:
adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
break;
case 1:
adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
break;
default:
break;
}
}
}
/* /*
* Debugfs * Debugfs
......
...@@ -79,9 +79,10 @@ int amdgpu_vm_fault_stop = 0; ...@@ -79,9 +79,10 @@ int amdgpu_vm_fault_stop = 0;
int amdgpu_vm_debug = 0; int amdgpu_vm_debug = 0;
int amdgpu_exp_hw_support = 0; int amdgpu_exp_hw_support = 0;
int amdgpu_enable_scheduler = 1; int amdgpu_enable_scheduler = 1;
int amdgpu_sched_jobs = 16; int amdgpu_sched_jobs = 32;
int amdgpu_sched_hw_submission = 2; int amdgpu_sched_hw_submission = 2;
int amdgpu_enable_semaphores = 0; int amdgpu_enable_semaphores = 0;
int amdgpu_powerplay = -1;
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
...@@ -155,7 +156,7 @@ module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444); ...@@ -155,7 +156,7 @@ module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
MODULE_PARM_DESC(enable_scheduler, "enable SW GPU scheduler (1 = enable (default), 0 = disable)"); MODULE_PARM_DESC(enable_scheduler, "enable SW GPU scheduler (1 = enable (default), 0 = disable)");
module_param_named(enable_scheduler, amdgpu_enable_scheduler, int, 0444); module_param_named(enable_scheduler, amdgpu_enable_scheduler, int, 0444);
MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 16)"); MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 32)");
module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444); module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)"); MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)");
...@@ -164,6 +165,11 @@ module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444); ...@@ -164,6 +165,11 @@ module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
MODULE_PARM_DESC(enable_semaphores, "Enable semaphores (1 = enable, 0 = disable (default))"); MODULE_PARM_DESC(enable_semaphores, "Enable semaphores (1 = enable, 0 = disable (default))");
module_param_named(enable_semaphores, amdgpu_enable_semaphores, int, 0644); module_param_named(enable_semaphores, amdgpu_enable_semaphores, int, 0644);
#ifdef CONFIG_DRM_AMD_POWERPLAY
MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 = auto (default))");
module_param_named(powerplay, amdgpu_powerplay, int, 0444);
#endif
static struct pci_device_id pciidlist[] = { static struct pci_device_id pciidlist[] = {
#ifdef CONFIG_DRM_AMDGPU_CIK #ifdef CONFIG_DRM_AMDGPU_CIK
/* Kaveri */ /* Kaveri */
......
...@@ -263,7 +263,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper, ...@@ -263,7 +263,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
} }
if (fb && ret) { if (fb && ret) {
drm_gem_object_unreference(gobj); drm_gem_object_unreference_unlocked(gobj);
drm_framebuffer_unregister_private(fb); drm_framebuffer_unregister_private(fb);
drm_framebuffer_cleanup(fb); drm_framebuffer_cleanup(fb);
kfree(fb); kfree(fb);
......
...@@ -448,7 +448,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, ...@@ -448,7 +448,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va, uint32_t operation) struct amdgpu_bo_va *bo_va, uint32_t operation)
{ {
struct ttm_validate_buffer tv, *entry; struct ttm_validate_buffer tv, *entry;
struct amdgpu_bo_list_entry *vm_bos; struct amdgpu_bo_list_entry vm_pd;
struct ww_acquire_ctx ticket; struct ww_acquire_ctx ticket;
struct list_head list, duplicates; struct list_head list, duplicates;
unsigned domain; unsigned domain;
...@@ -461,15 +461,14 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, ...@@ -461,15 +461,14 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
tv.shared = true; tv.shared = true;
list_add(&tv.head, &list); list_add(&tv.head, &list);
vm_bos = amdgpu_vm_get_bos(adev, bo_va->vm, &list); amdgpu_vm_get_pd_bo(bo_va->vm, &list, &vm_pd);
if (!vm_bos)
return;
/* Provide duplicates to avoid -EALREADY */ /* Provide duplicates to avoid -EALREADY */
r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
if (r) if (r)
goto error_free; goto error_print;
amdgpu_vm_get_pt_bos(bo_va->vm, &duplicates);
list_for_each_entry(entry, &list, head) { list_for_each_entry(entry, &list, head) {
domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type); domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
/* if anything is swapped out don't swap it in here, /* if anything is swapped out don't swap it in here,
...@@ -491,9 +490,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, ...@@ -491,9 +490,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
error_unreserve: error_unreserve:
ttm_eu_backoff_reservation(&ticket, &list); ttm_eu_backoff_reservation(&ticket, &list);
error_free: error_print:
drm_free_large(vm_bos);
if (r && r != -ERESTARTSYS) if (r && r != -ERESTARTSYS)
DRM_ERROR("Couldn't update BO_VA (%d)\n", r); DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
} }
......
...@@ -30,10 +30,16 @@ ...@@ -30,10 +30,16 @@
#include <linux/hwmon.h> #include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h> #include <linux/hwmon-sysfs.h>
#include "amd_powerplay.h"
static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev); static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
{ {
if (adev->pp_enabled)
/* TODO */
return;
if (adev->pm.dpm_enabled) { if (adev->pm.dpm_enabled) {
mutex_lock(&adev->pm.mutex); mutex_lock(&adev->pm.mutex);
if (power_supply_is_system_supplied() > 0) if (power_supply_is_system_supplied() > 0)
...@@ -52,7 +58,12 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev, ...@@ -52,7 +58,12 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev,
{ {
struct drm_device *ddev = dev_get_drvdata(dev); struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private; struct amdgpu_device *adev = ddev->dev_private;
enum amdgpu_pm_state_type pm = adev->pm.dpm.user_state; enum amd_pm_state_type pm;
if (adev->pp_enabled) {
pm = amdgpu_dpm_get_current_power_state(adev);
} else
pm = adev->pm.dpm.user_state;
return snprintf(buf, PAGE_SIZE, "%s\n", return snprintf(buf, PAGE_SIZE, "%s\n",
(pm == POWER_STATE_TYPE_BATTERY) ? "battery" : (pm == POWER_STATE_TYPE_BATTERY) ? "battery" :
...@@ -66,40 +77,57 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev, ...@@ -66,40 +77,57 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev,
{ {
struct drm_device *ddev = dev_get_drvdata(dev); struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private; struct amdgpu_device *adev = ddev->dev_private;
enum amd_pm_state_type state;
mutex_lock(&adev->pm.mutex);
if (strncmp("battery", buf, strlen("battery")) == 0) if (strncmp("battery", buf, strlen("battery")) == 0)
adev->pm.dpm.user_state = POWER_STATE_TYPE_BATTERY; state = POWER_STATE_TYPE_BATTERY;
else if (strncmp("balanced", buf, strlen("balanced")) == 0) else if (strncmp("balanced", buf, strlen("balanced")) == 0)
adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; state = POWER_STATE_TYPE_BALANCED;
else if (strncmp("performance", buf, strlen("performance")) == 0) else if (strncmp("performance", buf, strlen("performance")) == 0)
adev->pm.dpm.user_state = POWER_STATE_TYPE_PERFORMANCE; state = POWER_STATE_TYPE_PERFORMANCE;
else { else {
mutex_unlock(&adev->pm.mutex);
count = -EINVAL; count = -EINVAL;
goto fail; goto fail;
} }
mutex_unlock(&adev->pm.mutex);
/* Can't set dpm state when the card is off */ if (adev->pp_enabled) {
if (!(adev->flags & AMD_IS_PX) || amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
(ddev->switch_power_state == DRM_SWITCH_POWER_ON)) } else {
amdgpu_pm_compute_clocks(adev); mutex_lock(&adev->pm.mutex);
adev->pm.dpm.user_state = state;
mutex_unlock(&adev->pm.mutex);
/* Can't set dpm state when the card is off */
if (!(adev->flags & AMD_IS_PX) ||
(ddev->switch_power_state == DRM_SWITCH_POWER_ON))
amdgpu_pm_compute_clocks(adev);
}
fail: fail:
return count; return count;
} }
static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev, static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
struct device_attribute *attr, struct device_attribute *attr,
char *buf) char *buf)
{ {
struct drm_device *ddev = dev_get_drvdata(dev); struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private; struct amdgpu_device *adev = ddev->dev_private;
enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level;
return snprintf(buf, PAGE_SIZE, "%s\n", if (adev->pp_enabled) {
(level == AMDGPU_DPM_FORCED_LEVEL_AUTO) ? "auto" : enum amd_dpm_forced_level level;
(level == AMDGPU_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
level = amdgpu_dpm_get_performance_level(adev);
return snprintf(buf, PAGE_SIZE, "%s\n",
(level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
(level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
} else {
enum amdgpu_dpm_forced_level level;
level = adev->pm.dpm.forced_level;
return snprintf(buf, PAGE_SIZE, "%s\n",
(level == AMDGPU_DPM_FORCED_LEVEL_AUTO) ? "auto" :
(level == AMDGPU_DPM_FORCED_LEVEL_LOW) ? "low" : "high");
}
} }
static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
...@@ -112,7 +140,6 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, ...@@ -112,7 +140,6 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
enum amdgpu_dpm_forced_level level; enum amdgpu_dpm_forced_level level;
int ret = 0; int ret = 0;
mutex_lock(&adev->pm.mutex);
if (strncmp("low", buf, strlen("low")) == 0) { if (strncmp("low", buf, strlen("low")) == 0) {
level = AMDGPU_DPM_FORCED_LEVEL_LOW; level = AMDGPU_DPM_FORCED_LEVEL_LOW;
} else if (strncmp("high", buf, strlen("high")) == 0) { } else if (strncmp("high", buf, strlen("high")) == 0) {
...@@ -123,7 +150,11 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, ...@@ -123,7 +150,11 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
count = -EINVAL; count = -EINVAL;
goto fail; goto fail;
} }
if (adev->pm.funcs->force_performance_level) {
if (adev->pp_enabled)
amdgpu_dpm_force_performance_level(adev, level);
else {
mutex_lock(&adev->pm.mutex);
if (adev->pm.dpm.thermal_active) { if (adev->pm.dpm.thermal_active) {
count = -EINVAL; count = -EINVAL;
goto fail; goto fail;
...@@ -131,6 +162,9 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, ...@@ -131,6 +162,9 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
ret = amdgpu_dpm_force_performance_level(adev, level); ret = amdgpu_dpm_force_performance_level(adev, level);
if (ret) if (ret)
count = -EINVAL; count = -EINVAL;
else
adev->pm.dpm.forced_level = level;
mutex_unlock(&adev->pm.mutex);
} }
fail: fail:
mutex_unlock(&adev->pm.mutex); mutex_unlock(&adev->pm.mutex);
...@@ -150,10 +184,10 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev, ...@@ -150,10 +184,10 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
struct amdgpu_device *adev = dev_get_drvdata(dev); struct amdgpu_device *adev = dev_get_drvdata(dev);
int temp; int temp;
if (adev->pm.funcs->get_temperature) if (!adev->pp_enabled && !adev->pm.funcs->get_temperature)
temp = amdgpu_dpm_get_temperature(adev);
else
temp = 0; temp = 0;
else
temp = amdgpu_dpm_get_temperature(adev);
return snprintf(buf, PAGE_SIZE, "%d\n", temp); return snprintf(buf, PAGE_SIZE, "%d\n", temp);
} }
...@@ -181,8 +215,10 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev, ...@@ -181,8 +215,10 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
struct amdgpu_device *adev = dev_get_drvdata(dev); struct amdgpu_device *adev = dev_get_drvdata(dev);
u32 pwm_mode = 0; u32 pwm_mode = 0;
if (adev->pm.funcs->get_fan_control_mode) if (!adev->pp_enabled && !adev->pm.funcs->get_fan_control_mode)
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); return -EINVAL;
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
/* never 0 (full-speed), fuse or smc-controlled always */ /* never 0 (full-speed), fuse or smc-controlled always */
return sprintf(buf, "%i\n", pwm_mode == FDO_PWM_MODE_STATIC ? 1 : 2); return sprintf(buf, "%i\n", pwm_mode == FDO_PWM_MODE_STATIC ? 1 : 2);
...@@ -197,7 +233,7 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, ...@@ -197,7 +233,7 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
int err; int err;
int value; int value;
if(!adev->pm.funcs->set_fan_control_mode) if (!adev->pp_enabled && !adev->pm.funcs->set_fan_control_mode)
return -EINVAL; return -EINVAL;
err = kstrtoint(buf, 10, &value); err = kstrtoint(buf, 10, &value);
...@@ -294,7 +330,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, ...@@ -294,7 +330,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
struct amdgpu_device *adev = dev_get_drvdata(dev); struct amdgpu_device *adev = dev_get_drvdata(dev);
umode_t effective_mode = attr->mode; umode_t effective_mode = attr->mode;
/* Skip attributes if DPM is not enabled */ /* Skip limit attributes if DPM is not enabled */
if (!adev->pm.dpm_enabled && if (!adev->pm.dpm_enabled &&
(attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr || attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr ||
...@@ -304,6 +340,9 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, ...@@ -304,6 +340,9 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
return 0; return 0;
if (adev->pp_enabled)
return effective_mode;
/* Skip fan attributes if fan is not present */ /* Skip fan attributes if fan is not present */
if (adev->pm.no_fan && if (adev->pm.no_fan &&
(attr == &sensor_dev_attr_pwm1.dev_attr.attr || (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
...@@ -351,7 +390,7 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work) ...@@ -351,7 +390,7 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
container_of(work, struct amdgpu_device, container_of(work, struct amdgpu_device,
pm.dpm.thermal.work); pm.dpm.thermal.work);
/* switch to the thermal state */ /* switch to the thermal state */
enum amdgpu_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL; enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
if (!adev->pm.dpm_enabled) if (!adev->pm.dpm_enabled)
return; return;
...@@ -379,7 +418,7 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work) ...@@ -379,7 +418,7 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
} }
static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev, static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
enum amdgpu_pm_state_type dpm_state) enum amd_pm_state_type dpm_state)
{ {
int i; int i;
struct amdgpu_ps *ps; struct amdgpu_ps *ps;
...@@ -516,7 +555,7 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) ...@@ -516,7 +555,7 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
{ {
int i; int i;
struct amdgpu_ps *ps; struct amdgpu_ps *ps;
enum amdgpu_pm_state_type dpm_state; enum amd_pm_state_type dpm_state;
int ret; int ret;
/* if dpm init failed */ /* if dpm init failed */
...@@ -635,49 +674,54 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) ...@@ -635,49 +674,54 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
{ {
if (adev->pm.funcs->powergate_uvd) { if (adev->pp_enabled)
mutex_lock(&adev->pm.mutex);
/* enable/disable UVD */
amdgpu_dpm_powergate_uvd(adev, !enable); amdgpu_dpm_powergate_uvd(adev, !enable);
mutex_unlock(&adev->pm.mutex); else {
} else { if (adev->pm.funcs->powergate_uvd) {
if (enable) {
mutex_lock(&adev->pm.mutex); mutex_lock(&adev->pm.mutex);
adev->pm.dpm.uvd_active = true; /* enable/disable UVD */
adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; amdgpu_dpm_powergate_uvd(adev, !enable);
mutex_unlock(&adev->pm.mutex); mutex_unlock(&adev->pm.mutex);
} else { } else {
mutex_lock(&adev->pm.mutex); if (enable) {
adev->pm.dpm.uvd_active = false; mutex_lock(&adev->pm.mutex);
mutex_unlock(&adev->pm.mutex); adev->pm.dpm.uvd_active = true;
adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
mutex_unlock(&adev->pm.mutex);
} else {
mutex_lock(&adev->pm.mutex);
adev->pm.dpm.uvd_active = false;
mutex_unlock(&adev->pm.mutex);
}
amdgpu_pm_compute_clocks(adev);
} }
amdgpu_pm_compute_clocks(adev);
} }
} }
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
{ {
if (adev->pm.funcs->powergate_vce) { if (adev->pp_enabled)
mutex_lock(&adev->pm.mutex);
/* enable/disable VCE */
amdgpu_dpm_powergate_vce(adev, !enable); amdgpu_dpm_powergate_vce(adev, !enable);
else {
mutex_unlock(&adev->pm.mutex); if (adev->pm.funcs->powergate_vce) {
} else {
if (enable) {
mutex_lock(&adev->pm.mutex); mutex_lock(&adev->pm.mutex);
adev->pm.dpm.vce_active = true; amdgpu_dpm_powergate_vce(adev, !enable);
/* XXX select vce level based on ring/task */
adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL;
mutex_unlock(&adev->pm.mutex); mutex_unlock(&adev->pm.mutex);
} else { } else {
mutex_lock(&adev->pm.mutex); if (enable) {
adev->pm.dpm.vce_active = false; mutex_lock(&adev->pm.mutex);
mutex_unlock(&adev->pm.mutex); adev->pm.dpm.vce_active = true;
/* XXX select vce level based on ring/task */
adev->pm.dpm.vce_level = AMDGPU_VCE_LEVEL_AC_ALL;
mutex_unlock(&adev->pm.mutex);
} else {
mutex_lock(&adev->pm.mutex);
adev->pm.dpm.vce_active = false;
mutex_unlock(&adev->pm.mutex);
}
amdgpu_pm_compute_clocks(adev);
} }
amdgpu_pm_compute_clocks(adev);
} }
} }
...@@ -685,10 +729,13 @@ void amdgpu_pm_print_power_states(struct amdgpu_device *adev) ...@@ -685,10 +729,13 @@ void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
{ {
int i; int i;
for (i = 0; i < adev->pm.dpm.num_ps; i++) { if (adev->pp_enabled)
printk("== power state %d ==\n", i); /* TO DO */
return;
for (i = 0; i < adev->pm.dpm.num_ps; i++)
amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]); amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
}
} }
int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
...@@ -698,8 +745,11 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) ...@@ -698,8 +745,11 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
if (adev->pm.sysfs_initialized) if (adev->pm.sysfs_initialized)
return 0; return 0;
if (adev->pm.funcs->get_temperature == NULL) if (!adev->pp_enabled) {
return 0; if (adev->pm.funcs->get_temperature == NULL)
return 0;
}
adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev, adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
DRIVER_NAME, adev, DRIVER_NAME, adev,
hwmon_groups); hwmon_groups);
...@@ -748,32 +798,43 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) ...@@ -748,32 +798,43 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
if (!adev->pm.dpm_enabled) if (!adev->pm.dpm_enabled)
return; return;
mutex_lock(&adev->pm.mutex); if (adev->pp_enabled) {
int i = 0;
amdgpu_display_bandwidth_update(adev);
mutex_lock(&adev->ring_lock);
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
if (ring && ring->ready)
amdgpu_fence_wait_empty(ring);
}
mutex_unlock(&adev->ring_lock);
/* update active crtc counts */ amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL);
adev->pm.dpm.new_active_crtcs = 0; } else {
adev->pm.dpm.new_active_crtc_count = 0; mutex_lock(&adev->pm.mutex);
if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { adev->pm.dpm.new_active_crtcs = 0;
list_for_each_entry(crtc, adev->pm.dpm.new_active_crtc_count = 0;
&ddev->mode_config.crtc_list, head) { if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
amdgpu_crtc = to_amdgpu_crtc(crtc); list_for_each_entry(crtc,
if (crtc->enabled) { &ddev->mode_config.crtc_list, head) {
adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id); amdgpu_crtc = to_amdgpu_crtc(crtc);
adev->pm.dpm.new_active_crtc_count++; if (crtc->enabled) {
adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
adev->pm.dpm.new_active_crtc_count++;
}
} }
} }
} /* update battery/ac status */
if (power_supply_is_system_supplied() > 0)
/* update battery/ac status */ adev->pm.dpm.ac_power = true;
if (power_supply_is_system_supplied() > 0) else
adev->pm.dpm.ac_power = true; adev->pm.dpm.ac_power = false;
else
adev->pm.dpm.ac_power = false;
amdgpu_dpm_change_power_state_locked(adev);
mutex_unlock(&adev->pm.mutex); amdgpu_dpm_change_power_state_locked(adev);
mutex_unlock(&adev->pm.mutex);
}
} }
/* /*
...@@ -787,7 +848,13 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data) ...@@ -787,7 +848,13 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev; struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = dev->dev_private;
if (adev->pm.dpm_enabled) { if (!adev->pm.dpm_enabled) {
seq_printf(m, "dpm not enabled\n");
return 0;
}
if (adev->pp_enabled) {
amdgpu_dpm_debugfs_print_current_performance_level(adev, m);
} else {
mutex_lock(&adev->pm.mutex); mutex_lock(&adev->pm.mutex);
if (adev->pm.funcs->debugfs_print_current_performance_level) if (adev->pm.funcs->debugfs_print_current_performance_level)
amdgpu_dpm_debugfs_print_current_performance_level(adev, m); amdgpu_dpm_debugfs_print_current_performance_level(adev, m);
......
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#include "atom.h"
#include "amdgpu.h"
#include "amd_shared.h"
#include <linux/module.h>
#include <linux/moduleparam.h>
#include "amdgpu_pm.h"
#include <drm/amdgpu_drm.h>
#include "amdgpu_powerplay.h"
#include "cik_dpm.h"
#include "vi_dpm.h"
static int amdgpu_powerplay_init(struct amdgpu_device *adev)
{
int ret = 0;
struct amd_powerplay *amd_pp;
amd_pp = &(adev->powerplay);
if (adev->pp_enabled) {
#ifdef CONFIG_DRM_AMD_POWERPLAY
struct amd_pp_init *pp_init;
pp_init = kzalloc(sizeof(struct amd_pp_init), GFP_KERNEL);
if (pp_init == NULL)
return -ENOMEM;
pp_init->chip_family = adev->family;
pp_init->chip_id = adev->asic_type;
pp_init->device = amdgpu_cgs_create_device(adev);
ret = amd_powerplay_init(pp_init, amd_pp);
kfree(pp_init);
#endif
} else {
amd_pp->pp_handle = (void *)adev;
switch (adev->asic_type) {
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_BONAIRE:
case CHIP_HAWAII:
amd_pp->ip_funcs = &ci_dpm_ip_funcs;
break;
case CHIP_KABINI:
case CHIP_MULLINS:
case CHIP_KAVERI:
amd_pp->ip_funcs = &kv_dpm_ip_funcs;
break;
#endif
case CHIP_TOPAZ:
amd_pp->ip_funcs = &iceland_dpm_ip_funcs;
break;
case CHIP_TONGA:
amd_pp->ip_funcs = &tonga_dpm_ip_funcs;
break;
case CHIP_FIJI:
amd_pp->ip_funcs = &fiji_dpm_ip_funcs;
break;
case CHIP_CARRIZO:
case CHIP_STONEY:
amd_pp->ip_funcs = &cz_dpm_ip_funcs;
break;
default:
ret = -EINVAL;
break;
}
}
return ret;
}
static int amdgpu_pp_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret = 0;
#ifdef CONFIG_DRM_AMD_POWERPLAY
switch (adev->asic_type) {
case CHIP_TONGA:
case CHIP_FIJI:
adev->pp_enabled = (amdgpu_powerplay > 0) ? true : false;
break;
default:
adev->pp_enabled = (amdgpu_powerplay > 0) ? true : false;
break;
}
#else
adev->pp_enabled = false;
#endif
ret = amdgpu_powerplay_init(adev);
if (ret)
return ret;
if (adev->powerplay.ip_funcs->early_init)
ret = adev->powerplay.ip_funcs->early_init(
adev->powerplay.pp_handle);
return ret;
}
static int amdgpu_pp_late_init(void *handle)
{
int ret = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->powerplay.ip_funcs->late_init)
ret = adev->powerplay.ip_funcs->late_init(
adev->powerplay.pp_handle);
#ifdef CONFIG_DRM_AMD_POWERPLAY
if (adev->pp_enabled)
amdgpu_pm_sysfs_init(adev);
#endif
return ret;
}
static int amdgpu_pp_sw_init(void *handle)
{
int ret = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->powerplay.ip_funcs->sw_init)
ret = adev->powerplay.ip_funcs->sw_init(
adev->powerplay.pp_handle);
#ifdef CONFIG_DRM_AMD_POWERPLAY
if (adev->pp_enabled) {
if (amdgpu_dpm == 0)
adev->pm.dpm_enabled = false;
else
adev->pm.dpm_enabled = true;
}
#endif
return ret;
}
static int amdgpu_pp_sw_fini(void *handle)
{
int ret = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->powerplay.ip_funcs->sw_fini)
ret = adev->powerplay.ip_funcs->sw_fini(
adev->powerplay.pp_handle);
if (ret)
return ret;
#ifdef CONFIG_DRM_AMD_POWERPLAY
if (adev->pp_enabled) {
amdgpu_pm_sysfs_fini(adev);
amd_powerplay_fini(adev->powerplay.pp_handle);
}
#endif
return ret;
}
static int amdgpu_pp_hw_init(void *handle)
{
int ret = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->pp_enabled && adev->firmware.smu_load)
amdgpu_ucode_init_bo(adev);
if (adev->powerplay.ip_funcs->hw_init)
ret = adev->powerplay.ip_funcs->hw_init(
adev->powerplay.pp_handle);
return ret;
}
static int amdgpu_pp_hw_fini(void *handle)
{
int ret = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->powerplay.ip_funcs->hw_fini)
ret = adev->powerplay.ip_funcs->hw_fini(
adev->powerplay.pp_handle);
if (adev->pp_enabled && adev->firmware.smu_load)
amdgpu_ucode_fini_bo(adev);
return ret;
}
static int amdgpu_pp_suspend(void *handle)
{
int ret = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->powerplay.ip_funcs->suspend)
ret = adev->powerplay.ip_funcs->suspend(
adev->powerplay.pp_handle);
return ret;
}
static int amdgpu_pp_resume(void *handle)
{
int ret = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->powerplay.ip_funcs->resume)
ret = adev->powerplay.ip_funcs->resume(
adev->powerplay.pp_handle);
return ret;
}
static int amdgpu_pp_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
int ret = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->powerplay.ip_funcs->set_clockgating_state)
ret = adev->powerplay.ip_funcs->set_clockgating_state(
adev->powerplay.pp_handle, state);
return ret;
}
static int amdgpu_pp_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
int ret = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->powerplay.ip_funcs->set_powergating_state)
ret = adev->powerplay.ip_funcs->set_powergating_state(
adev->powerplay.pp_handle, state);
return ret;
}
static bool amdgpu_pp_is_idle(void *handle)
{
bool ret = true;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->powerplay.ip_funcs->is_idle)
ret = adev->powerplay.ip_funcs->is_idle(
adev->powerplay.pp_handle);
return ret;
}
static int amdgpu_pp_wait_for_idle(void *handle)
{
int ret = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->powerplay.ip_funcs->wait_for_idle)
ret = adev->powerplay.ip_funcs->wait_for_idle(
adev->powerplay.pp_handle);
return ret;
}
static int amdgpu_pp_soft_reset(void *handle)
{
int ret = 0;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->powerplay.ip_funcs->soft_reset)
ret = adev->powerplay.ip_funcs->soft_reset(
adev->powerplay.pp_handle);
return ret;
}
static void amdgpu_pp_print_status(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->powerplay.ip_funcs->print_status)
adev->powerplay.ip_funcs->print_status(
adev->powerplay.pp_handle);
}
const struct amd_ip_funcs amdgpu_pp_ip_funcs = {
.early_init = amdgpu_pp_early_init,
.late_init = amdgpu_pp_late_init,
.sw_init = amdgpu_pp_sw_init,
.sw_fini = amdgpu_pp_sw_fini,
.hw_init = amdgpu_pp_hw_init,
.hw_fini = amdgpu_pp_hw_fini,
.suspend = amdgpu_pp_suspend,
.resume = amdgpu_pp_resume,
.is_idle = amdgpu_pp_is_idle,
.wait_for_idle = amdgpu_pp_wait_for_idle,
.soft_reset = amdgpu_pp_soft_reset,
.print_status = amdgpu_pp_print_status,
.set_clockgating_state = amdgpu_pp_set_clockgating_state,
.set_powergating_state = amdgpu_pp_set_powergating_state,
};
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __AMDGPU_POPWERPLAY_H__
#define __AMDGPU_POPWERPLAY_H__
#include "amd_shared.h"
extern const struct amd_ip_funcs amdgpu_pp_ip_funcs;
#endif /* __AMDSOC_DM_H__ */
...@@ -75,50 +75,50 @@ static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev) ...@@ -75,50 +75,50 @@ static unsigned amdgpu_vm_directory_size(struct amdgpu_device *adev)
} }
/** /**
* amdgpu_vm_get_bos - add the vm BOs to a validation list * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
* *
* @vm: vm providing the BOs * @vm: vm providing the BOs
* @head: head of validation list * @validated: head of validation list
* @entry: entry to add
* *
* Add the page directory to the list of BOs to * Add the page directory to the list of BOs to
* validate for command submission (cayman+). * validate for command submission.
*/ */
struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev, void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
struct amdgpu_vm *vm, struct list_head *validated,
struct list_head *head) struct amdgpu_bo_list_entry *entry)
{ {
struct amdgpu_bo_list_entry *list; entry->robj = vm->page_directory;
unsigned i, idx; entry->prefered_domains = AMDGPU_GEM_DOMAIN_VRAM;
entry->allowed_domains = AMDGPU_GEM_DOMAIN_VRAM;
entry->priority = 0;
entry->tv.bo = &vm->page_directory->tbo;
entry->tv.shared = true;
list_add(&entry->tv.head, validated);
}
list = drm_malloc_ab(vm->max_pde_used + 2, /**
sizeof(struct amdgpu_bo_list_entry)); * amdgpu_vm_get_bos - add the vm BOs to a duplicates list
if (!list) { *
return NULL; * @vm: vm providing the BOs
} * @duplicates: head of duplicates list
*
* Add the page directory to the BO duplicates list
* for command submission.
*/
void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates)
{
unsigned i;
/* add the vm page table to the list */ /* add the vm page table to the list */
list[0].robj = vm->page_directory; for (i = 0; i <= vm->max_pde_used; ++i) {
list[0].prefered_domains = AMDGPU_GEM_DOMAIN_VRAM; struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
list[0].allowed_domains = AMDGPU_GEM_DOMAIN_VRAM;
list[0].priority = 0; if (!entry->robj)
list[0].tv.bo = &vm->page_directory->tbo;
list[0].tv.shared = true;
list_add(&list[0].tv.head, head);
for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
if (!vm->page_tables[i].bo)
continue; continue;
list[idx].robj = vm->page_tables[i].bo; list_add(&entry->tv.head, duplicates);
list[idx].prefered_domains = AMDGPU_GEM_DOMAIN_VRAM;
list[idx].allowed_domains = AMDGPU_GEM_DOMAIN_VRAM;
list[idx].priority = 0;
list[idx].tv.bo = &list[idx].robj->tbo;
list[idx].tv.shared = true;
list_add(&list[idx++].tv.head, head);
} }
return list;
} }
/** /**
...@@ -461,7 +461,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, ...@@ -461,7 +461,7 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
/* walk over the address space and update the page directory */ /* walk over the address space and update the page directory */
for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
struct amdgpu_bo *bo = vm->page_tables[pt_idx].bo; struct amdgpu_bo *bo = vm->page_tables[pt_idx].entry.robj;
uint64_t pde, pt; uint64_t pde, pt;
if (bo == NULL) if (bo == NULL)
...@@ -638,7 +638,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_device *adev, ...@@ -638,7 +638,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_device *adev,
/* walk over the address space and update the page tables */ /* walk over the address space and update the page tables */
for (addr = start; addr < end; ) { for (addr = start; addr < end; ) {
uint64_t pt_idx = addr >> amdgpu_vm_block_size; uint64_t pt_idx = addr >> amdgpu_vm_block_size;
struct amdgpu_bo *pt = vm->page_tables[pt_idx].bo; struct amdgpu_bo *pt = vm->page_tables[pt_idx].entry.robj;
unsigned nptes; unsigned nptes;
uint64_t pte; uint64_t pte;
int r; int r;
...@@ -1010,13 +1010,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, ...@@ -1010,13 +1010,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
return -EINVAL; return -EINVAL;
/* make sure object fit at this offset */ /* make sure object fit at this offset */
eaddr = saddr + size; eaddr = saddr + size - 1;
if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo)))
return -EINVAL; return -EINVAL;
last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE; last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
if (last_pfn > adev->vm_manager.max_pfn) { if (last_pfn >= adev->vm_manager.max_pfn) {
dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n", dev_err(adev->dev, "va above limit (0x%08X >= 0x%08X)\n",
last_pfn, adev->vm_manager.max_pfn); last_pfn, adev->vm_manager.max_pfn);
return -EINVAL; return -EINVAL;
} }
...@@ -1025,7 +1025,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, ...@@ -1025,7 +1025,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
eaddr /= AMDGPU_GPU_PAGE_SIZE; eaddr /= AMDGPU_GPU_PAGE_SIZE;
spin_lock(&vm->it_lock); spin_lock(&vm->it_lock);
it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1); it = interval_tree_iter_first(&vm->va, saddr, eaddr);
spin_unlock(&vm->it_lock); spin_unlock(&vm->it_lock);
if (it) { if (it) {
struct amdgpu_bo_va_mapping *tmp; struct amdgpu_bo_va_mapping *tmp;
...@@ -1046,7 +1046,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, ...@@ -1046,7 +1046,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
INIT_LIST_HEAD(&mapping->list); INIT_LIST_HEAD(&mapping->list);
mapping->it.start = saddr; mapping->it.start = saddr;
mapping->it.last = eaddr - 1; mapping->it.last = eaddr;
mapping->offset = offset; mapping->offset = offset;
mapping->flags = flags; mapping->flags = flags;
...@@ -1070,9 +1070,11 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, ...@@ -1070,9 +1070,11 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
/* walk over the address space and allocate the page tables */ /* walk over the address space and allocate the page tables */
for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) { for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
struct reservation_object *resv = vm->page_directory->tbo.resv; struct reservation_object *resv = vm->page_directory->tbo.resv;
struct amdgpu_bo_list_entry *entry;
struct amdgpu_bo *pt; struct amdgpu_bo *pt;
if (vm->page_tables[pt_idx].bo) entry = &vm->page_tables[pt_idx].entry;
if (entry->robj)
continue; continue;
r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
...@@ -1094,8 +1096,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, ...@@ -1094,8 +1096,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
goto error_free; goto error_free;
} }
entry->robj = pt;
entry->prefered_domains = AMDGPU_GEM_DOMAIN_VRAM;
entry->allowed_domains = AMDGPU_GEM_DOMAIN_VRAM;
entry->priority = 0;
entry->tv.bo = &entry->robj->tbo;
entry->tv.shared = true;
vm->page_tables[pt_idx].addr = 0; vm->page_tables[pt_idx].addr = 0;
vm->page_tables[pt_idx].bo = pt;
} }
return 0; return 0;
...@@ -1326,7 +1333,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) ...@@ -1326,7 +1333,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
} }
for (i = 0; i < amdgpu_vm_num_pdes(adev); i++) for (i = 0; i < amdgpu_vm_num_pdes(adev); i++)
amdgpu_bo_unref(&vm->page_tables[i].bo); amdgpu_bo_unref(&vm->page_tables[i].entry.robj);
kfree(vm->page_tables); kfree(vm->page_tables);
amdgpu_bo_unref(&vm->page_directory); amdgpu_bo_unref(&vm->page_directory);
......
...@@ -243,7 +243,7 @@ static void amdgpu_atombios_dp_get_adjust_train(const u8 link_status[DP_LINK_STA ...@@ -243,7 +243,7 @@ static void amdgpu_atombios_dp_get_adjust_train(const u8 link_status[DP_LINK_STA
/* convert bits per color to bits per pixel */ /* convert bits per color to bits per pixel */
/* get bpc from the EDID */ /* get bpc from the EDID */
static int amdgpu_atombios_dp_convert_bpc_to_bpp(int bpc) static unsigned amdgpu_atombios_dp_convert_bpc_to_bpp(int bpc)
{ {
if (bpc == 0) if (bpc == 0)
return 24; return 24;
...@@ -251,64 +251,32 @@ static int amdgpu_atombios_dp_convert_bpc_to_bpp(int bpc) ...@@ -251,64 +251,32 @@ static int amdgpu_atombios_dp_convert_bpc_to_bpp(int bpc)
return bpc * 3; return bpc * 3;
} }
/* get the max pix clock supported by the link rate and lane num */
static int amdgpu_atombios_dp_get_max_dp_pix_clock(int link_rate,
int lane_num,
int bpp)
{
return (link_rate * lane_num * 8) / bpp;
}
/***** amdgpu specific DP functions *****/ /***** amdgpu specific DP functions *****/
/* First get the min lane# when low rate is used according to pixel clock static int amdgpu_atombios_dp_get_dp_link_config(struct drm_connector *connector,
* (prefer low rate), second check max lane# supported by DP panel,
* if the max lane# < low rate lane# then use max lane# instead.
*/
static int amdgpu_atombios_dp_get_dp_lane_number(struct drm_connector *connector,
const u8 dpcd[DP_DPCD_SIZE], const u8 dpcd[DP_DPCD_SIZE],
int pix_clock) unsigned pix_clock,
{ unsigned *dp_lanes, unsigned *dp_rate)
int bpp = amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector));
int max_link_rate = drm_dp_max_link_rate(dpcd);
int max_lane_num = drm_dp_max_lane_count(dpcd);
int lane_num;
int max_dp_pix_clock;
for (lane_num = 1; lane_num < max_lane_num; lane_num <<= 1) {
max_dp_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(max_link_rate, lane_num, bpp);
if (pix_clock <= max_dp_pix_clock)
break;
}
return lane_num;
}
static int amdgpu_atombios_dp_get_dp_link_clock(struct drm_connector *connector,
const u8 dpcd[DP_DPCD_SIZE],
int pix_clock)
{ {
int bpp = amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector)); unsigned bpp =
int lane_num, max_pix_clock; amdgpu_atombios_dp_convert_bpc_to_bpp(amdgpu_connector_get_monitor_bpc(connector));
static const unsigned link_rates[3] = { 162000, 270000, 540000 };
if (amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) == unsigned max_link_rate = drm_dp_max_link_rate(dpcd);
ENCODER_OBJECT_ID_NUTMEG) unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
return 270000; unsigned lane_num, i, max_pix_clock;
lane_num = amdgpu_atombios_dp_get_dp_lane_number(connector, dpcd, pix_clock); for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(162000, lane_num, bpp); for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
if (pix_clock <= max_pix_clock) max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
return 162000; if (max_pix_clock >= pix_clock) {
max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(270000, lane_num, bpp); *dp_lanes = lane_num;
if (pix_clock <= max_pix_clock) *dp_rate = link_rates[i];
return 270000; return 0;
if (amdgpu_connector_is_dp12_capable(connector)) { }
max_pix_clock = amdgpu_atombios_dp_get_max_dp_pix_clock(540000, lane_num, bpp); }
if (pix_clock <= max_pix_clock)
return 540000;
} }
return drm_dp_max_link_rate(dpcd); return -EINVAL;
} }
static u8 amdgpu_atombios_dp_encoder_service(struct amdgpu_device *adev, static u8 amdgpu_atombios_dp_encoder_service(struct amdgpu_device *adev,
...@@ -422,6 +390,7 @@ void amdgpu_atombios_dp_set_link_config(struct drm_connector *connector, ...@@ -422,6 +390,7 @@ void amdgpu_atombios_dp_set_link_config(struct drm_connector *connector,
{ {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct amdgpu_connector_atom_dig *dig_connector; struct amdgpu_connector_atom_dig *dig_connector;
int ret;
if (!amdgpu_connector->con_priv) if (!amdgpu_connector->con_priv)
return; return;
...@@ -429,10 +398,14 @@ void amdgpu_atombios_dp_set_link_config(struct drm_connector *connector, ...@@ -429,10 +398,14 @@ void amdgpu_atombios_dp_set_link_config(struct drm_connector *connector,
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) { (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
dig_connector->dp_clock = ret = amdgpu_atombios_dp_get_dp_link_config(connector, dig_connector->dpcd,
amdgpu_atombios_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock); mode->clock,
dig_connector->dp_lane_count = &dig_connector->dp_lane_count,
amdgpu_atombios_dp_get_dp_lane_number(connector, dig_connector->dpcd, mode->clock); &dig_connector->dp_clock);
if (ret) {
dig_connector->dp_clock = 0;
dig_connector->dp_lane_count = 0;
}
} }
} }
...@@ -441,14 +414,17 @@ int amdgpu_atombios_dp_mode_valid_helper(struct drm_connector *connector, ...@@ -441,14 +414,17 @@ int amdgpu_atombios_dp_mode_valid_helper(struct drm_connector *connector,
{ {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
struct amdgpu_connector_atom_dig *dig_connector; struct amdgpu_connector_atom_dig *dig_connector;
int dp_clock; unsigned dp_lanes, dp_clock;
int ret;
if (!amdgpu_connector->con_priv) if (!amdgpu_connector->con_priv)
return MODE_CLOCK_HIGH; return MODE_CLOCK_HIGH;
dig_connector = amdgpu_connector->con_priv; dig_connector = amdgpu_connector->con_priv;
dp_clock = ret = amdgpu_atombios_dp_get_dp_link_config(connector, dig_connector->dpcd,
amdgpu_atombios_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock); mode->clock, &dp_lanes, &dp_clock);
if (ret)
return MODE_CLOCK_HIGH;
if ((dp_clock == 540000) && if ((dp_clock == 540000) &&
(!amdgpu_connector_is_dp12_capable(connector))) (!amdgpu_connector_is_dp12_capable(connector)))
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include "amdgpu_vce.h" #include "amdgpu_vce.h"
#include "cikd.h" #include "cikd.h"
#include "atom.h" #include "atom.h"
#include "amd_pcie.h"
#include "cik.h" #include "cik.h"
#include "gmc_v7_0.h" #include "gmc_v7_0.h"
...@@ -65,6 +66,7 @@ ...@@ -65,6 +66,7 @@
#include "oss/oss_2_0_sh_mask.h" #include "oss/oss_2_0_sh_mask.h"
#include "amdgpu_amdkfd.h" #include "amdgpu_amdkfd.h"
#include "amdgpu_powerplay.h"
/* /*
* Indirect registers accessor * Indirect registers accessor
...@@ -929,6 +931,37 @@ static bool cik_read_disabled_bios(struct amdgpu_device *adev) ...@@ -929,6 +931,37 @@ static bool cik_read_disabled_bios(struct amdgpu_device *adev)
return r; return r;
} }
static bool cik_read_bios_from_rom(struct amdgpu_device *adev,
u8 *bios, u32 length_bytes)
{
u32 *dw_ptr;
unsigned long flags;
u32 i, length_dw;
if (bios == NULL)
return false;
if (length_bytes == 0)
return false;
/* APU vbios image is part of sbios image */
if (adev->flags & AMD_IS_APU)
return false;
dw_ptr = (u32 *)bios;
length_dw = ALIGN(length_bytes, 4) / 4;
/* take the smc lock since we are using the smc index */
spin_lock_irqsave(&adev->smc_idx_lock, flags);
/* set rom index to 0 */
WREG32(mmSMC_IND_INDEX_0, ixROM_INDEX);
WREG32(mmSMC_IND_DATA_0, 0);
/* set index to data for continous read */
WREG32(mmSMC_IND_INDEX_0, ixROM_DATA);
for (i = 0; i < length_dw; i++)
dw_ptr[i] = RREG32(mmSMC_IND_DATA_0);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return true;
}
static struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = { static struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = {
{mmGRBM_STATUS, false}, {mmGRBM_STATUS, false},
{mmGB_ADDR_CONFIG, false}, {mmGB_ADDR_CONFIG, false},
...@@ -1563,8 +1596,8 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev) ...@@ -1563,8 +1596,8 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
{ {
struct pci_dev *root = adev->pdev->bus->self; struct pci_dev *root = adev->pdev->bus->self;
int bridge_pos, gpu_pos; int bridge_pos, gpu_pos;
u32 speed_cntl, mask, current_data_rate; u32 speed_cntl, current_data_rate;
int ret, i; int i;
u16 tmp16; u16 tmp16;
if (pci_is_root_bus(adev->pdev->bus)) if (pci_is_root_bus(adev->pdev->bus))
...@@ -1576,23 +1609,20 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev) ...@@ -1576,23 +1609,20 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
if (adev->flags & AMD_IS_APU) if (adev->flags & AMD_IS_APU)
return; return;
ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
if (ret != 0) CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
return;
if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
return; return;
speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL); speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL);
current_data_rate = (speed_cntl & PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) >> current_data_rate = (speed_cntl & PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) >>
PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
if (mask & DRM_PCIE_SPEED_80) { if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
if (current_data_rate == 2) { if (current_data_rate == 2) {
DRM_INFO("PCIE gen 3 link speeds already enabled\n"); DRM_INFO("PCIE gen 3 link speeds already enabled\n");
return; return;
} }
DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n"); DRM_INFO("enabling PCIE gen 3 link speeds, disable with amdgpu.pcie_gen2=0\n");
} else if (mask & DRM_PCIE_SPEED_50) { } else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) {
if (current_data_rate == 1) { if (current_data_rate == 1) {
DRM_INFO("PCIE gen 2 link speeds already enabled\n"); DRM_INFO("PCIE gen 2 link speeds already enabled\n");
return; return;
...@@ -1608,7 +1638,7 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev) ...@@ -1608,7 +1638,7 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
if (!gpu_pos) if (!gpu_pos)
return; return;
if (mask & DRM_PCIE_SPEED_80) { if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) {
/* re-try equalization if gen3 is not already enabled */ /* re-try equalization if gen3 is not already enabled */
if (current_data_rate != 2) { if (current_data_rate != 2) {
u16 bridge_cfg, gpu_cfg; u16 bridge_cfg, gpu_cfg;
...@@ -1703,9 +1733,9 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev) ...@@ -1703,9 +1733,9 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16); pci_read_config_word(adev->pdev, gpu_pos + PCI_EXP_LNKCTL2, &tmp16);
tmp16 &= ~0xf; tmp16 &= ~0xf;
if (mask & DRM_PCIE_SPEED_80) if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
tmp16 |= 3; /* gen3 */ tmp16 |= 3; /* gen3 */
else if (mask & DRM_PCIE_SPEED_50) else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
tmp16 |= 2; /* gen2 */ tmp16 |= 2; /* gen2 */
else else
tmp16 |= 1; /* gen1 */ tmp16 |= 1; /* gen1 */
...@@ -1922,7 +1952,7 @@ static const struct amdgpu_ip_block_version bonaire_ip_blocks[] = ...@@ -1922,7 +1952,7 @@ static const struct amdgpu_ip_block_version bonaire_ip_blocks[] =
.major = 7, .major = 7,
.minor = 0, .minor = 0,
.rev = 0, .rev = 0,
.funcs = &ci_dpm_ip_funcs, .funcs = &amdgpu_pp_ip_funcs,
}, },
{ {
.type = AMD_IP_BLOCK_TYPE_DCE, .type = AMD_IP_BLOCK_TYPE_DCE,
...@@ -1990,7 +2020,7 @@ static const struct amdgpu_ip_block_version hawaii_ip_blocks[] = ...@@ -1990,7 +2020,7 @@ static const struct amdgpu_ip_block_version hawaii_ip_blocks[] =
.major = 7, .major = 7,
.minor = 0, .minor = 0,
.rev = 0, .rev = 0,
.funcs = &ci_dpm_ip_funcs, .funcs = &amdgpu_pp_ip_funcs,
}, },
{ {
.type = AMD_IP_BLOCK_TYPE_DCE, .type = AMD_IP_BLOCK_TYPE_DCE,
...@@ -2058,7 +2088,7 @@ static const struct amdgpu_ip_block_version kabini_ip_blocks[] = ...@@ -2058,7 +2088,7 @@ static const struct amdgpu_ip_block_version kabini_ip_blocks[] =
.major = 7, .major = 7,
.minor = 0, .minor = 0,
.rev = 0, .rev = 0,
.funcs = &kv_dpm_ip_funcs, .funcs = &amdgpu_pp_ip_funcs,
}, },
{ {
.type = AMD_IP_BLOCK_TYPE_DCE, .type = AMD_IP_BLOCK_TYPE_DCE,
...@@ -2126,7 +2156,7 @@ static const struct amdgpu_ip_block_version mullins_ip_blocks[] = ...@@ -2126,7 +2156,7 @@ static const struct amdgpu_ip_block_version mullins_ip_blocks[] =
.major = 7, .major = 7,
.minor = 0, .minor = 0,
.rev = 0, .rev = 0,
.funcs = &kv_dpm_ip_funcs, .funcs = &amdgpu_pp_ip_funcs,
}, },
{ {
.type = AMD_IP_BLOCK_TYPE_DCE, .type = AMD_IP_BLOCK_TYPE_DCE,
...@@ -2194,7 +2224,7 @@ static const struct amdgpu_ip_block_version kaveri_ip_blocks[] = ...@@ -2194,7 +2224,7 @@ static const struct amdgpu_ip_block_version kaveri_ip_blocks[] =
.major = 7, .major = 7,
.minor = 0, .minor = 0,
.rev = 0, .rev = 0,
.funcs = &kv_dpm_ip_funcs, .funcs = &amdgpu_pp_ip_funcs,
}, },
{ {
.type = AMD_IP_BLOCK_TYPE_DCE, .type = AMD_IP_BLOCK_TYPE_DCE,
...@@ -2267,6 +2297,7 @@ int cik_set_ip_blocks(struct amdgpu_device *adev) ...@@ -2267,6 +2297,7 @@ int cik_set_ip_blocks(struct amdgpu_device *adev)
static const struct amdgpu_asic_funcs cik_asic_funcs = static const struct amdgpu_asic_funcs cik_asic_funcs =
{ {
.read_disabled_bios = &cik_read_disabled_bios, .read_disabled_bios = &cik_read_disabled_bios,
.read_bios_from_rom = &cik_read_bios_from_rom,
.read_register = &cik_read_register, .read_register = &cik_read_register,
.reset = &cik_asic_reset, .reset = &cik_asic_reset,
.set_vga_state = &cik_vga_set_state, .set_vga_state = &cik_vga_set_state,
...@@ -2417,6 +2448,8 @@ static int cik_common_early_init(void *handle) ...@@ -2417,6 +2448,8 @@ static int cik_common_early_init(void *handle)
return -EINVAL; return -EINVAL;
} }
amdgpu_get_pcie_info(adev);
return 0; return 0;
} }
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#include <linux/firmware.h> #include <linux/firmware.h>
#include "drmP.h" #include "drmP.h"
#include "amdgpu.h" #include "amdgpu.h"
#include "fiji_smumgr.h" #include "fiji_smum.h"
MODULE_FIRMWARE("amdgpu/fiji_smc.bin"); MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
......
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef FIJI_PP_SMC_H
#define FIJI_PP_SMC_H
#pragma pack(push, 1)
#define PPSMC_SWSTATE_FLAG_DC 0x01
#define PPSMC_SWSTATE_FLAG_UVD 0x02
#define PPSMC_SWSTATE_FLAG_VCE 0x04
#define PPSMC_THERMAL_PROTECT_TYPE_INTERNAL 0x00
#define PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL 0x01
#define PPSMC_THERMAL_PROTECT_TYPE_NONE 0xff
#define PPSMC_SYSTEMFLAG_GPIO_DC 0x01
#define PPSMC_SYSTEMFLAG_STEPVDDC 0x02
#define PPSMC_SYSTEMFLAG_GDDR5 0x04
#define PPSMC_SYSTEMFLAG_DISABLE_BABYSTEP 0x08
#define PPSMC_SYSTEMFLAG_REGULATOR_HOT 0x10
#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_ANALOG 0x20
#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_MASK 0x07
#define PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK 0x08
#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTODPMLOWSTATE 0x00
#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE 0x01
#define PPSMC_DPM2FLAGS_TDPCLMP 0x01
#define PPSMC_DPM2FLAGS_PWRSHFT 0x02
#define PPSMC_DPM2FLAGS_OCP 0x04
#define PPSMC_DISPLAY_WATERMARK_LOW 0
#define PPSMC_DISPLAY_WATERMARK_HIGH 1
#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP 0x01
#define PPSMC_STATEFLAG_POWERBOOST 0x02
#define PPSMC_STATEFLAG_PSKIP_ON_TDP_FAULT 0x04
#define PPSMC_STATEFLAG_POWERSHIFT 0x08
#define PPSMC_STATEFLAG_SLOW_READ_MARGIN 0x10
#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20
#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS 0x40
#define FDO_MODE_HARDWARE 0
#define FDO_MODE_PIECE_WISE_LINEAR 1
enum FAN_CONTROL {
FAN_CONTROL_FUZZY,
FAN_CONTROL_TABLE
};
//Gemini Modes
#define PPSMC_GeminiModeNone 0 //Single GPU board
#define PPSMC_GeminiModeMaster 1 //Master GPU on a Gemini board
#define PPSMC_GeminiModeSlave 2 //Slave GPU on a Gemini board
#define PPSMC_Result_OK ((uint16_t)0x01)
#define PPSMC_Result_NoMore ((uint16_t)0x02)
#define PPSMC_Result_NotNow ((uint16_t)0x03)
#define PPSMC_Result_Failed ((uint16_t)0xFF)
#define PPSMC_Result_UnknownCmd ((uint16_t)0xFE)
#define PPSMC_Result_UnknownVT ((uint16_t)0xFD)
typedef uint16_t PPSMC_Result;
#define PPSMC_isERROR(x) ((uint16_t)0x80 & (x))
#define PPSMC_MSG_Halt ((uint16_t)0x10)
#define PPSMC_MSG_Resume ((uint16_t)0x11)
#define PPSMC_MSG_EnableDPMLevel ((uint16_t)0x12)
#define PPSMC_MSG_ZeroLevelsDisabled ((uint16_t)0x13)
#define PPSMC_MSG_OneLevelsDisabled ((uint16_t)0x14)
#define PPSMC_MSG_TwoLevelsDisabled ((uint16_t)0x15)
#define PPSMC_MSG_EnableThermalInterrupt ((uint16_t)0x16)
#define PPSMC_MSG_RunningOnAC ((uint16_t)0x17)
#define PPSMC_MSG_LevelUp ((uint16_t)0x18)
#define PPSMC_MSG_LevelDown ((uint16_t)0x19)
#define PPSMC_MSG_ResetDPMCounters ((uint16_t)0x1a)
#define PPSMC_MSG_SwitchToSwState ((uint16_t)0x20)
#define PPSMC_MSG_SwitchToSwStateLast ((uint16_t)0x3f)
#define PPSMC_MSG_SwitchToInitialState ((uint16_t)0x40)
#define PPSMC_MSG_NoForcedLevel ((uint16_t)0x41)
#define PPSMC_MSG_ForceHigh ((uint16_t)0x42)
#define PPSMC_MSG_ForceMediumOrHigh ((uint16_t)0x43)
#define PPSMC_MSG_SwitchToMinimumPower ((uint16_t)0x51)
#define PPSMC_MSG_ResumeFromMinimumPower ((uint16_t)0x52)
#define PPSMC_MSG_EnableCac ((uint16_t)0x53)
#define PPSMC_MSG_DisableCac ((uint16_t)0x54)
#define PPSMC_DPMStateHistoryStart ((uint16_t)0x55)
#define PPSMC_DPMStateHistoryStop ((uint16_t)0x56)
#define PPSMC_CACHistoryStart ((uint16_t)0x57)
#define PPSMC_CACHistoryStop ((uint16_t)0x58)
#define PPSMC_TDPClampingActive ((uint16_t)0x59)
#define PPSMC_TDPClampingInactive ((uint16_t)0x5A)
#define PPSMC_StartFanControl ((uint16_t)0x5B)
#define PPSMC_StopFanControl ((uint16_t)0x5C)
#define PPSMC_NoDisplay ((uint16_t)0x5D)
#define PPSMC_HasDisplay ((uint16_t)0x5E)
#define PPSMC_MSG_UVDPowerOFF ((uint16_t)0x60)
#define PPSMC_MSG_UVDPowerON ((uint16_t)0x61)
#define PPSMC_MSG_EnableULV ((uint16_t)0x62)
#define PPSMC_MSG_DisableULV ((uint16_t)0x63)
#define PPSMC_MSG_EnterULV ((uint16_t)0x64)
#define PPSMC_MSG_ExitULV ((uint16_t)0x65)
#define PPSMC_PowerShiftActive ((uint16_t)0x6A)
#define PPSMC_PowerShiftInactive ((uint16_t)0x6B)
#define PPSMC_OCPActive ((uint16_t)0x6C)
#define PPSMC_OCPInactive ((uint16_t)0x6D)
#define PPSMC_CACLongTermAvgEnable ((uint16_t)0x6E)
#define PPSMC_CACLongTermAvgDisable ((uint16_t)0x6F)
#define PPSMC_MSG_InferredStateSweep_Start ((uint16_t)0x70)
#define PPSMC_MSG_InferredStateSweep_Stop ((uint16_t)0x71)
#define PPSMC_MSG_SwitchToLowestInfState ((uint16_t)0x72)
#define PPSMC_MSG_SwitchToNonInfState ((uint16_t)0x73)
#define PPSMC_MSG_AllStateSweep_Start ((uint16_t)0x74)
#define PPSMC_MSG_AllStateSweep_Stop ((uint16_t)0x75)
#define PPSMC_MSG_SwitchNextLowerInfState ((uint16_t)0x76)
#define PPSMC_MSG_SwitchNextHigherInfState ((uint16_t)0x77)
#define PPSMC_MSG_MclkRetrainingTest ((uint16_t)0x78)
#define PPSMC_MSG_ForceTDPClamping ((uint16_t)0x79)
#define PPSMC_MSG_CollectCAC_PowerCorreln ((uint16_t)0x7A)
#define PPSMC_MSG_CollectCAC_WeightCalib ((uint16_t)0x7B)
#define PPSMC_MSG_CollectCAC_SQonly ((uint16_t)0x7C)
#define PPSMC_MSG_CollectCAC_TemperaturePwr ((uint16_t)0x7D)
#define PPSMC_MSG_ExtremitiesTest_Start ((uint16_t)0x7E)
#define PPSMC_MSG_ExtremitiesTest_Stop ((uint16_t)0x7F)
#define PPSMC_FlushDataCache ((uint16_t)0x80)
#define PPSMC_FlushInstrCache ((uint16_t)0x81)
#define PPSMC_MSG_SetEnabledLevels ((uint16_t)0x82)
#define PPSMC_MSG_SetForcedLevels ((uint16_t)0x83)
#define PPSMC_MSG_ResetToDefaults ((uint16_t)0x84)
#define PPSMC_MSG_SetForcedLevelsAndJump ((uint16_t)0x85)
#define PPSMC_MSG_SetCACHistoryMode ((uint16_t)0x86)
#define PPSMC_MSG_EnableDTE ((uint16_t)0x87)
#define PPSMC_MSG_DisableDTE ((uint16_t)0x88)
#define PPSMC_MSG_SmcSpaceSetAddress ((uint16_t)0x89)
#define PPSMC_MSG_SmcSpaceWriteDWordInc ((uint16_t)0x8A)
#define PPSMC_MSG_SmcSpaceWriteWordInc ((uint16_t)0x8B)
#define PPSMC_MSG_SmcSpaceWriteByteInc ((uint16_t)0x8C)
#define PPSMC_MSG_BREAK ((uint16_t)0xF8)
#define PPSMC_MSG_Test ((uint16_t)0x100)
#define PPSMC_MSG_DRV_DRAM_ADDR_HI ((uint16_t)0x250)
#define PPSMC_MSG_DRV_DRAM_ADDR_LO ((uint16_t)0x251)
#define PPSMC_MSG_SMU_DRAM_ADDR_HI ((uint16_t)0x252)
#define PPSMC_MSG_SMU_DRAM_ADDR_LO ((uint16_t)0x253)
#define PPSMC_MSG_LoadUcodes ((uint16_t)0x254)
typedef uint16_t PPSMC_Msg;
#define PPSMC_EVENT_STATUS_THERMAL 0x00000001
#define PPSMC_EVENT_STATUS_REGULATORHOT 0x00000002
#define PPSMC_EVENT_STATUS_DC 0x00000004
#define PPSMC_EVENT_STATUS_GPIO17 0x00000008
#pragma pack(pop)
#endif
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
#include "drmP.h" #include "drmP.h"
#include "amdgpu.h" #include "amdgpu.h"
#include "fiji_ppsmc.h" #include "fiji_ppsmc.h"
#include "fiji_smumgr.h" #include "fiji_smum.h"
#include "smu_ucode_xfer_vi.h" #include "smu_ucode_xfer_vi.h"
#include "amdgpu_ucode.h" #include "amdgpu_ucode.h"
......
...@@ -370,6 +370,10 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev) ...@@ -370,6 +370,10 @@ static int gmc_v7_0_mc_init(struct amdgpu_device *adev)
adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
adev->mc.visible_vram_size = adev->mc.aper_size; adev->mc.visible_vram_size = adev->mc.aper_size;
/* In case the PCI BAR is larger than the actual amount of vram */
if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
adev->mc.visible_vram_size = adev->mc.real_vram_size;
/* unless the user had overridden it, set the gart /* unless the user had overridden it, set the gart
* size equal to the 1024 or vram, whichever is larger. * size equal to the 1024 or vram, whichever is larger.
*/ */
......
...@@ -476,6 +476,10 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev) ...@@ -476,6 +476,10 @@ static int gmc_v8_0_mc_init(struct amdgpu_device *adev)
adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; adev->mc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL;
adev->mc.visible_vram_size = adev->mc.aper_size; adev->mc.visible_vram_size = adev->mc.aper_size;
/* In case the PCI BAR is larger than the actual amount of vram */
if (adev->mc.visible_vram_size > adev->mc.real_vram_size)
adev->mc.visible_vram_size = adev->mc.real_vram_size;
/* unless the user had overridden it, set the gart /* unless the user had overridden it, set the gart
* size equal to the 1024 or vram, whichever is larger. * size equal to the 1024 or vram, whichever is larger.
*/ */
...@@ -1324,9 +1328,181 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev, ...@@ -1324,9 +1328,181 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
return 0; return 0;
} }
static void fiji_update_mc_medium_grain_clock_gating(struct amdgpu_device *adev,
bool enable)
{
uint32_t data;
if (enable) {
data = RREG32(mmMC_HUB_MISC_HUB_CG);
data |= MC_HUB_MISC_HUB_CG__ENABLE_MASK;
WREG32(mmMC_HUB_MISC_HUB_CG, data);
data = RREG32(mmMC_HUB_MISC_SIP_CG);
data |= MC_HUB_MISC_SIP_CG__ENABLE_MASK;
WREG32(mmMC_HUB_MISC_SIP_CG, data);
data = RREG32(mmMC_HUB_MISC_VM_CG);
data |= MC_HUB_MISC_VM_CG__ENABLE_MASK;
WREG32(mmMC_HUB_MISC_VM_CG, data);
data = RREG32(mmMC_XPB_CLK_GAT);
data |= MC_XPB_CLK_GAT__ENABLE_MASK;
WREG32(mmMC_XPB_CLK_GAT, data);
data = RREG32(mmATC_MISC_CG);
data |= ATC_MISC_CG__ENABLE_MASK;
WREG32(mmATC_MISC_CG, data);
data = RREG32(mmMC_CITF_MISC_WR_CG);
data |= MC_CITF_MISC_WR_CG__ENABLE_MASK;
WREG32(mmMC_CITF_MISC_WR_CG, data);
data = RREG32(mmMC_CITF_MISC_RD_CG);
data |= MC_CITF_MISC_RD_CG__ENABLE_MASK;
WREG32(mmMC_CITF_MISC_RD_CG, data);
data = RREG32(mmMC_CITF_MISC_VM_CG);
data |= MC_CITF_MISC_VM_CG__ENABLE_MASK;
WREG32(mmMC_CITF_MISC_VM_CG, data);
data = RREG32(mmVM_L2_CG);
data |= VM_L2_CG__ENABLE_MASK;
WREG32(mmVM_L2_CG, data);
} else {
data = RREG32(mmMC_HUB_MISC_HUB_CG);
data &= ~MC_HUB_MISC_HUB_CG__ENABLE_MASK;
WREG32(mmMC_HUB_MISC_HUB_CG, data);
data = RREG32(mmMC_HUB_MISC_SIP_CG);
data &= ~MC_HUB_MISC_SIP_CG__ENABLE_MASK;
WREG32(mmMC_HUB_MISC_SIP_CG, data);
data = RREG32(mmMC_HUB_MISC_VM_CG);
data &= ~MC_HUB_MISC_VM_CG__ENABLE_MASK;
WREG32(mmMC_HUB_MISC_VM_CG, data);
data = RREG32(mmMC_XPB_CLK_GAT);
data &= ~MC_XPB_CLK_GAT__ENABLE_MASK;
WREG32(mmMC_XPB_CLK_GAT, data);
data = RREG32(mmATC_MISC_CG);
data &= ~ATC_MISC_CG__ENABLE_MASK;
WREG32(mmATC_MISC_CG, data);
data = RREG32(mmMC_CITF_MISC_WR_CG);
data &= ~MC_CITF_MISC_WR_CG__ENABLE_MASK;
WREG32(mmMC_CITF_MISC_WR_CG, data);
data = RREG32(mmMC_CITF_MISC_RD_CG);
data &= ~MC_CITF_MISC_RD_CG__ENABLE_MASK;
WREG32(mmMC_CITF_MISC_RD_CG, data);
data = RREG32(mmMC_CITF_MISC_VM_CG);
data &= ~MC_CITF_MISC_VM_CG__ENABLE_MASK;
WREG32(mmMC_CITF_MISC_VM_CG, data);
data = RREG32(mmVM_L2_CG);
data &= ~VM_L2_CG__ENABLE_MASK;
WREG32(mmVM_L2_CG, data);
}
}
static void fiji_update_mc_light_sleep(struct amdgpu_device *adev,
bool enable)
{
uint32_t data;
if (enable) {
data = RREG32(mmMC_HUB_MISC_HUB_CG);
data |= MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
WREG32(mmMC_HUB_MISC_HUB_CG, data);
data = RREG32(mmMC_HUB_MISC_SIP_CG);
data |= MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
WREG32(mmMC_HUB_MISC_SIP_CG, data);
data = RREG32(mmMC_HUB_MISC_VM_CG);
data |= MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
WREG32(mmMC_HUB_MISC_VM_CG, data);
data = RREG32(mmMC_XPB_CLK_GAT);
data |= MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
WREG32(mmMC_XPB_CLK_GAT, data);
data = RREG32(mmATC_MISC_CG);
data |= ATC_MISC_CG__MEM_LS_ENABLE_MASK;
WREG32(mmATC_MISC_CG, data);
data = RREG32(mmMC_CITF_MISC_WR_CG);
data |= MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
WREG32(mmMC_CITF_MISC_WR_CG, data);
data = RREG32(mmMC_CITF_MISC_RD_CG);
data |= MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
WREG32(mmMC_CITF_MISC_RD_CG, data);
data = RREG32(mmMC_CITF_MISC_VM_CG);
data |= MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
WREG32(mmMC_CITF_MISC_VM_CG, data);
data = RREG32(mmVM_L2_CG);
data |= VM_L2_CG__MEM_LS_ENABLE_MASK;
WREG32(mmVM_L2_CG, data);
} else {
data = RREG32(mmMC_HUB_MISC_HUB_CG);
data &= ~MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK;
WREG32(mmMC_HUB_MISC_HUB_CG, data);
data = RREG32(mmMC_HUB_MISC_SIP_CG);
data &= ~MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK;
WREG32(mmMC_HUB_MISC_SIP_CG, data);
data = RREG32(mmMC_HUB_MISC_VM_CG);
data &= ~MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK;
WREG32(mmMC_HUB_MISC_VM_CG, data);
data = RREG32(mmMC_XPB_CLK_GAT);
data &= ~MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK;
WREG32(mmMC_XPB_CLK_GAT, data);
data = RREG32(mmATC_MISC_CG);
data &= ~ATC_MISC_CG__MEM_LS_ENABLE_MASK;
WREG32(mmATC_MISC_CG, data);
data = RREG32(mmMC_CITF_MISC_WR_CG);
data &= ~MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK;
WREG32(mmMC_CITF_MISC_WR_CG, data);
data = RREG32(mmMC_CITF_MISC_RD_CG);
data &= ~MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK;
WREG32(mmMC_CITF_MISC_RD_CG, data);
data = RREG32(mmMC_CITF_MISC_VM_CG);
data &= ~MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK;
WREG32(mmMC_CITF_MISC_VM_CG, data);
data = RREG32(mmVM_L2_CG);
data &= ~VM_L2_CG__MEM_LS_ENABLE_MASK;
WREG32(mmVM_L2_CG, data);
}
}
static int gmc_v8_0_set_clockgating_state(void *handle, static int gmc_v8_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state) enum amd_clockgating_state state)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
switch (adev->asic_type) {
case CHIP_FIJI:
fiji_update_mc_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
fiji_update_mc_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
break;
default:
break;
}
return 0; return 0;
} }
......
...@@ -727,18 +727,20 @@ static int sdma_v3_0_start(struct amdgpu_device *adev) ...@@ -727,18 +727,20 @@ static int sdma_v3_0_start(struct amdgpu_device *adev)
{ {
int r, i; int r, i;
if (!adev->firmware.smu_load) { if (!adev->pp_enabled) {
r = sdma_v3_0_load_microcode(adev); if (!adev->firmware.smu_load) {
if (r) r = sdma_v3_0_load_microcode(adev);
return r;
} else {
for (i = 0; i < adev->sdma.num_instances; i++) {
r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
(i == 0) ?
AMDGPU_UCODE_ID_SDMA0 :
AMDGPU_UCODE_ID_SDMA1);
if (r) if (r)
return -EINVAL; return r;
} else {
for (i = 0; i < adev->sdma.num_instances; i++) {
r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
(i == 0) ?
AMDGPU_UCODE_ID_SDMA0 :
AMDGPU_UCODE_ID_SDMA1);
if (r)
return -EINVAL;
}
} }
} }
...@@ -1427,9 +1429,114 @@ static int sdma_v3_0_process_illegal_inst_irq(struct amdgpu_device *adev, ...@@ -1427,9 +1429,114 @@ static int sdma_v3_0_process_illegal_inst_irq(struct amdgpu_device *adev,
return 0; return 0;
} }
static void fiji_update_sdma_medium_grain_clock_gating(
struct amdgpu_device *adev,
bool enable)
{
uint32_t temp, data;
if (enable) {
temp = data = RREG32(mmSDMA0_CLK_CTRL);
data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK);
if (data != temp)
WREG32(mmSDMA0_CLK_CTRL, data);
temp = data = RREG32(mmSDMA1_CLK_CTRL);
data &= ~(SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK |
SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK |
SDMA1_CLK_CTRL__SOFT_OVERRIDE5_MASK |
SDMA1_CLK_CTRL__SOFT_OVERRIDE4_MASK |
SDMA1_CLK_CTRL__SOFT_OVERRIDE3_MASK |
SDMA1_CLK_CTRL__SOFT_OVERRIDE2_MASK |
SDMA1_CLK_CTRL__SOFT_OVERRIDE1_MASK |
SDMA1_CLK_CTRL__SOFT_OVERRIDE0_MASK);
if (data != temp)
WREG32(mmSDMA1_CLK_CTRL, data);
} else {
temp = data = RREG32(mmSDMA0_CLK_CTRL);
data |= SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK |
SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK;
if (data != temp)
WREG32(mmSDMA0_CLK_CTRL, data);
temp = data = RREG32(mmSDMA1_CLK_CTRL);
data |= SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK |
SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK |
SDMA1_CLK_CTRL__SOFT_OVERRIDE5_MASK |
SDMA1_CLK_CTRL__SOFT_OVERRIDE4_MASK |
SDMA1_CLK_CTRL__SOFT_OVERRIDE3_MASK |
SDMA1_CLK_CTRL__SOFT_OVERRIDE2_MASK |
SDMA1_CLK_CTRL__SOFT_OVERRIDE1_MASK |
SDMA1_CLK_CTRL__SOFT_OVERRIDE0_MASK;
if (data != temp)
WREG32(mmSDMA1_CLK_CTRL, data);
}
}
static void fiji_update_sdma_medium_grain_light_sleep(
struct amdgpu_device *adev,
bool enable)
{
uint32_t temp, data;
if (enable) {
temp = data = RREG32(mmSDMA0_POWER_CNTL);
data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
if (temp != data)
WREG32(mmSDMA0_POWER_CNTL, data);
temp = data = RREG32(mmSDMA1_POWER_CNTL);
data |= SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
if (temp != data)
WREG32(mmSDMA1_POWER_CNTL, data);
} else {
temp = data = RREG32(mmSDMA0_POWER_CNTL);
data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
if (temp != data)
WREG32(mmSDMA0_POWER_CNTL, data);
temp = data = RREG32(mmSDMA1_POWER_CNTL);
data &= ~SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK;
if (temp != data)
WREG32(mmSDMA1_POWER_CNTL, data);
}
}
static int sdma_v3_0_set_clockgating_state(void *handle, static int sdma_v3_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state) enum amd_clockgating_state state)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
switch (adev->asic_type) {
case CHIP_FIJI:
fiji_update_sdma_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
fiji_update_sdma_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
break;
default:
break;
}
return 0; return 0;
} }
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#include <linux/firmware.h> #include <linux/firmware.h>
#include "drmP.h" #include "drmP.h"
#include "amdgpu.h" #include "amdgpu.h"
#include "tonga_smumgr.h" #include "tonga_smum.h"
MODULE_FIRMWARE("amdgpu/tonga_smc.bin"); MODULE_FIRMWARE("amdgpu/tonga_smc.bin");
......
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef TONGA_PP_SMC_H
#define TONGA_PP_SMC_H
#pragma pack(push, 1)
#define PPSMC_SWSTATE_FLAG_DC 0x01
#define PPSMC_SWSTATE_FLAG_UVD 0x02
#define PPSMC_SWSTATE_FLAG_VCE 0x04
#define PPSMC_SWSTATE_FLAG_PCIE_X1 0x08
#define PPSMC_THERMAL_PROTECT_TYPE_INTERNAL 0x00
#define PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL 0x01
#define PPSMC_THERMAL_PROTECT_TYPE_NONE 0xff
#define PPSMC_SYSTEMFLAG_GPIO_DC 0x01
#define PPSMC_SYSTEMFLAG_STEPVDDC 0x02
#define PPSMC_SYSTEMFLAG_GDDR5 0x04
#define PPSMC_SYSTEMFLAG_DISABLE_BABYSTEP 0x08
#define PPSMC_SYSTEMFLAG_REGULATOR_HOT 0x10
#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_ANALOG 0x20
#define PPSMC_SYSTEMFLAG_12CHANNEL 0x40
#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_MASK 0x07
#define PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK 0x08
#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTODPMLOWSTATE 0x00
#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE 0x01
#define PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH 0x10
#define PPSMC_EXTRAFLAGS_DRIVER_TO_GPIO17 0x20
#define PPSMC_EXTRAFLAGS_PCC_TO_GPIO17 0x40
#define PPSMC_DPM2FLAGS_TDPCLMP 0x01
#define PPSMC_DPM2FLAGS_PWRSHFT 0x02
#define PPSMC_DPM2FLAGS_OCP 0x04
#define PPSMC_DISPLAY_WATERMARK_LOW 0
#define PPSMC_DISPLAY_WATERMARK_HIGH 1
#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP 0x01
#define PPSMC_STATEFLAG_POWERBOOST 0x02
#define PPSMC_STATEFLAG_PSKIP_ON_TDP_FAULT 0x04
#define PPSMC_STATEFLAG_POWERSHIFT 0x08
#define PPSMC_STATEFLAG_SLOW_READ_MARGIN 0x10
#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20
#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS 0x40
#define FDO_MODE_HARDWARE 0
#define FDO_MODE_PIECE_WISE_LINEAR 1
enum FAN_CONTROL {
FAN_CONTROL_FUZZY,
FAN_CONTROL_TABLE
};
#define PPSMC_Result_OK ((uint16_t)0x01)
#define PPSMC_Result_NoMore ((uint16_t)0x02)
#define PPSMC_Result_NotNow ((uint16_t)0x03)
#define PPSMC_Result_Failed ((uint16_t)0xFF)
#define PPSMC_Result_UnknownCmd ((uint16_t)0xFE)
#define PPSMC_Result_UnknownVT ((uint16_t)0xFD)
typedef uint16_t PPSMC_Result;
#define PPSMC_isERROR(x) ((uint16_t)0x80 & (x))
#define PPSMC_MSG_Halt ((uint16_t)0x10)
#define PPSMC_MSG_Resume ((uint16_t)0x11)
#define PPSMC_MSG_EnableDPMLevel ((uint16_t)0x12)
#define PPSMC_MSG_ZeroLevelsDisabled ((uint16_t)0x13)
#define PPSMC_MSG_OneLevelsDisabled ((uint16_t)0x14)
#define PPSMC_MSG_TwoLevelsDisabled ((uint16_t)0x15)
#define PPSMC_MSG_EnableThermalInterrupt ((uint16_t)0x16)
#define PPSMC_MSG_RunningOnAC ((uint16_t)0x17)
#define PPSMC_MSG_LevelUp ((uint16_t)0x18)
#define PPSMC_MSG_LevelDown ((uint16_t)0x19)
#define PPSMC_MSG_ResetDPMCounters ((uint16_t)0x1a)
#define PPSMC_MSG_SwitchToSwState ((uint16_t)0x20)
#define PPSMC_MSG_SwitchToSwStateLast ((uint16_t)0x3f)
#define PPSMC_MSG_SwitchToInitialState ((uint16_t)0x40)
#define PPSMC_MSG_NoForcedLevel ((uint16_t)0x41)
#define PPSMC_MSG_ForceHigh ((uint16_t)0x42)
#define PPSMC_MSG_ForceMediumOrHigh ((uint16_t)0x43)
#define PPSMC_MSG_SwitchToMinimumPower ((uint16_t)0x51)
#define PPSMC_MSG_ResumeFromMinimumPower ((uint16_t)0x52)
#define PPSMC_MSG_EnableCac ((uint16_t)0x53)
#define PPSMC_MSG_DisableCac ((uint16_t)0x54)
#define PPSMC_DPMStateHistoryStart ((uint16_t)0x55)
#define PPSMC_DPMStateHistoryStop ((uint16_t)0x56)
#define PPSMC_CACHistoryStart ((uint16_t)0x57)
#define PPSMC_CACHistoryStop ((uint16_t)0x58)
#define PPSMC_TDPClampingActive ((uint16_t)0x59)
#define PPSMC_TDPClampingInactive ((uint16_t)0x5A)
#define PPSMC_StartFanControl ((uint16_t)0x5B)
#define PPSMC_StopFanControl ((uint16_t)0x5C)
#define PPSMC_NoDisplay ((uint16_t)0x5D)
#define PPSMC_HasDisplay ((uint16_t)0x5E)
#define PPSMC_MSG_UVDPowerOFF ((uint16_t)0x60)
#define PPSMC_MSG_UVDPowerON ((uint16_t)0x61)
#define PPSMC_MSG_EnableULV ((uint16_t)0x62)
#define PPSMC_MSG_DisableULV ((uint16_t)0x63)
#define PPSMC_MSG_EnterULV ((uint16_t)0x64)
#define PPSMC_MSG_ExitULV ((uint16_t)0x65)
#define PPSMC_PowerShiftActive ((uint16_t)0x6A)
#define PPSMC_PowerShiftInactive ((uint16_t)0x6B)
#define PPSMC_OCPActive ((uint16_t)0x6C)
#define PPSMC_OCPInactive ((uint16_t)0x6D)
#define PPSMC_CACLongTermAvgEnable ((uint16_t)0x6E)
#define PPSMC_CACLongTermAvgDisable ((uint16_t)0x6F)
#define PPSMC_MSG_InferredStateSweep_Start ((uint16_t)0x70)
#define PPSMC_MSG_InferredStateSweep_Stop ((uint16_t)0x71)
#define PPSMC_MSG_SwitchToLowestInfState ((uint16_t)0x72)
#define PPSMC_MSG_SwitchToNonInfState ((uint16_t)0x73)
#define PPSMC_MSG_AllStateSweep_Start ((uint16_t)0x74)
#define PPSMC_MSG_AllStateSweep_Stop ((uint16_t)0x75)
#define PPSMC_MSG_SwitchNextLowerInfState ((uint16_t)0x76)
#define PPSMC_MSG_SwitchNextHigherInfState ((uint16_t)0x77)
#define PPSMC_MSG_MclkRetrainingTest ((uint16_t)0x78)
#define PPSMC_MSG_ForceTDPClamping ((uint16_t)0x79)
#define PPSMC_MSG_CollectCAC_PowerCorreln ((uint16_t)0x7A)
#define PPSMC_MSG_CollectCAC_WeightCalib ((uint16_t)0x7B)
#define PPSMC_MSG_CollectCAC_SQonly ((uint16_t)0x7C)
#define PPSMC_MSG_CollectCAC_TemperaturePwr ((uint16_t)0x7D)
#define PPSMC_MSG_ExtremitiesTest_Start ((uint16_t)0x7E)
#define PPSMC_MSG_ExtremitiesTest_Stop ((uint16_t)0x7F)
#define PPSMC_FlushDataCache ((uint16_t)0x80)
#define PPSMC_FlushInstrCache ((uint16_t)0x81)
#define PPSMC_MSG_SetEnabledLevels ((uint16_t)0x82)
#define PPSMC_MSG_SetForcedLevels ((uint16_t)0x83)
#define PPSMC_MSG_ResetToDefaults ((uint16_t)0x84)
#define PPSMC_MSG_SetForcedLevelsAndJump ((uint16_t)0x85)
#define PPSMC_MSG_SetCACHistoryMode ((uint16_t)0x86)
#define PPSMC_MSG_EnableDTE ((uint16_t)0x87)
#define PPSMC_MSG_DisableDTE ((uint16_t)0x88)
#define PPSMC_MSG_SmcSpaceSetAddress ((uint16_t)0x89)
#define PPSMC_MSG_SmcSpaceWriteDWordInc ((uint16_t)0x8A)
#define PPSMC_MSG_SmcSpaceWriteWordInc ((uint16_t)0x8B)
#define PPSMC_MSG_SmcSpaceWriteByteInc ((uint16_t)0x8C)
#define PPSMC_MSG_ChangeNearTDPLimit ((uint16_t)0x90)
#define PPSMC_MSG_ChangeSafePowerLimit ((uint16_t)0x91)
#define PPSMC_MSG_DPMStateSweepStart ((uint16_t)0x92)
#define PPSMC_MSG_DPMStateSweepStop ((uint16_t)0x93)
#define PPSMC_MSG_OVRDDisableSCLKDS ((uint16_t)0x94)
#define PPSMC_MSG_CancelDisableOVRDSCLKDS ((uint16_t)0x95)
#define PPSMC_MSG_ThrottleOVRDSCLKDS ((uint16_t)0x96)
#define PPSMC_MSG_CancelThrottleOVRDSCLKDS ((uint16_t)0x97)
#define PPSMC_MSG_GPIO17 ((uint16_t)0x98)
#define PPSMC_MSG_API_SetSvi2Volt_Vddc ((uint16_t)0x99)
#define PPSMC_MSG_API_SetSvi2Volt_Vddci ((uint16_t)0x9A)
#define PPSMC_MSG_API_SetSvi2Volt_Mvdd ((uint16_t)0x9B)
#define PPSMC_MSG_API_GetSvi2Volt_Vddc ((uint16_t)0x9C)
#define PPSMC_MSG_API_GetSvi2Volt_Vddci ((uint16_t)0x9D)
#define PPSMC_MSG_API_GetSvi2Volt_Mvdd ((uint16_t)0x9E)
#define PPSMC_MSG_BREAK ((uint16_t)0xF8)
#define PPSMC_MSG_Test ((uint16_t)0x100)
#define PPSMC_MSG_DRV_DRAM_ADDR_HI ((uint16_t)0x250)
#define PPSMC_MSG_DRV_DRAM_ADDR_LO ((uint16_t)0x251)
#define PPSMC_MSG_SMU_DRAM_ADDR_HI ((uint16_t)0x252)
#define PPSMC_MSG_SMU_DRAM_ADDR_LO ((uint16_t)0x253)
#define PPSMC_MSG_LoadUcodes ((uint16_t)0x254)
typedef uint16_t PPSMC_Msg;
#define PPSMC_EVENT_STATUS_THERMAL 0x00000001
#define PPSMC_EVENT_STATUS_REGULATORHOT 0x00000002
#define PPSMC_EVENT_STATUS_DC 0x00000004
#define PPSMC_EVENT_STATUS_GPIO17 0x00000008
#pragma pack(pop)
#endif
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
#include "drmP.h" #include "drmP.h"
#include "amdgpu.h" #include "amdgpu.h"
#include "tonga_ppsmc.h" #include "tonga_ppsmc.h"
#include "tonga_smumgr.h" #include "tonga_smum.h"
#include "smu_ucode_xfer_vi.h" #include "smu_ucode_xfer_vi.h"
#include "amdgpu_ucode.h" #include "amdgpu_ucode.h"
......
...@@ -279,6 +279,234 @@ static void uvd_v6_0_mc_resume(struct amdgpu_device *adev) ...@@ -279,6 +279,234 @@ static void uvd_v6_0_mc_resume(struct amdgpu_device *adev)
WREG32(mmUVD_VCPU_CACHE_SIZE2, size); WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
} }
static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
bool enable)
{
u32 data, data1;
data = RREG32(mmUVD_CGC_GATE);
data1 = RREG32(mmUVD_SUVD_CGC_GATE);
if (enable) {
data |= UVD_CGC_GATE__SYS_MASK |
UVD_CGC_GATE__UDEC_MASK |
UVD_CGC_GATE__MPEG2_MASK |
UVD_CGC_GATE__RBC_MASK |
UVD_CGC_GATE__LMI_MC_MASK |
UVD_CGC_GATE__IDCT_MASK |
UVD_CGC_GATE__MPRD_MASK |
UVD_CGC_GATE__MPC_MASK |
UVD_CGC_GATE__LBSI_MASK |
UVD_CGC_GATE__LRBBM_MASK |
UVD_CGC_GATE__UDEC_RE_MASK |
UVD_CGC_GATE__UDEC_CM_MASK |
UVD_CGC_GATE__UDEC_IT_MASK |
UVD_CGC_GATE__UDEC_DB_MASK |
UVD_CGC_GATE__UDEC_MP_MASK |
UVD_CGC_GATE__WCB_MASK |
UVD_CGC_GATE__VCPU_MASK |
UVD_CGC_GATE__SCPU_MASK;
data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
UVD_SUVD_CGC_GATE__SIT_MASK |
UVD_SUVD_CGC_GATE__SMP_MASK |
UVD_SUVD_CGC_GATE__SCM_MASK |
UVD_SUVD_CGC_GATE__SDB_MASK |
UVD_SUVD_CGC_GATE__SRE_H264_MASK |
UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
UVD_SUVD_CGC_GATE__SIT_H264_MASK |
UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
UVD_SUVD_CGC_GATE__SCM_H264_MASK |
UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
UVD_SUVD_CGC_GATE__SDB_H264_MASK |
UVD_SUVD_CGC_GATE__SDB_HEVC_MASK;
} else {
data &= ~(UVD_CGC_GATE__SYS_MASK |
UVD_CGC_GATE__UDEC_MASK |
UVD_CGC_GATE__MPEG2_MASK |
UVD_CGC_GATE__RBC_MASK |
UVD_CGC_GATE__LMI_MC_MASK |
UVD_CGC_GATE__LMI_UMC_MASK |
UVD_CGC_GATE__IDCT_MASK |
UVD_CGC_GATE__MPRD_MASK |
UVD_CGC_GATE__MPC_MASK |
UVD_CGC_GATE__LBSI_MASK |
UVD_CGC_GATE__LRBBM_MASK |
UVD_CGC_GATE__UDEC_RE_MASK |
UVD_CGC_GATE__UDEC_CM_MASK |
UVD_CGC_GATE__UDEC_IT_MASK |
UVD_CGC_GATE__UDEC_DB_MASK |
UVD_CGC_GATE__UDEC_MP_MASK |
UVD_CGC_GATE__WCB_MASK |
UVD_CGC_GATE__VCPU_MASK |
UVD_CGC_GATE__SCPU_MASK);
data1 &= ~(UVD_SUVD_CGC_GATE__SRE_MASK |
UVD_SUVD_CGC_GATE__SIT_MASK |
UVD_SUVD_CGC_GATE__SMP_MASK |
UVD_SUVD_CGC_GATE__SCM_MASK |
UVD_SUVD_CGC_GATE__SDB_MASK |
UVD_SUVD_CGC_GATE__SRE_H264_MASK |
UVD_SUVD_CGC_GATE__SRE_HEVC_MASK |
UVD_SUVD_CGC_GATE__SIT_H264_MASK |
UVD_SUVD_CGC_GATE__SIT_HEVC_MASK |
UVD_SUVD_CGC_GATE__SCM_H264_MASK |
UVD_SUVD_CGC_GATE__SCM_HEVC_MASK |
UVD_SUVD_CGC_GATE__SDB_H264_MASK |
UVD_SUVD_CGC_GATE__SDB_HEVC_MASK);
}
WREG32(mmUVD_CGC_GATE, data);
WREG32(mmUVD_SUVD_CGC_GATE, data1);
}
static void tonga_set_uvd_clock_gating_branches(struct amdgpu_device *adev,
bool enable)
{
u32 data, data1;
data = RREG32(mmUVD_CGC_GATE);
data1 = RREG32(mmUVD_SUVD_CGC_GATE);
if (enable) {
data |= UVD_CGC_GATE__SYS_MASK |
UVD_CGC_GATE__UDEC_MASK |
UVD_CGC_GATE__MPEG2_MASK |
UVD_CGC_GATE__RBC_MASK |
UVD_CGC_GATE__LMI_MC_MASK |
UVD_CGC_GATE__IDCT_MASK |
UVD_CGC_GATE__MPRD_MASK |
UVD_CGC_GATE__MPC_MASK |
UVD_CGC_GATE__LBSI_MASK |
UVD_CGC_GATE__LRBBM_MASK |
UVD_CGC_GATE__UDEC_RE_MASK |
UVD_CGC_GATE__UDEC_CM_MASK |
UVD_CGC_GATE__UDEC_IT_MASK |
UVD_CGC_GATE__UDEC_DB_MASK |
UVD_CGC_GATE__UDEC_MP_MASK |
UVD_CGC_GATE__WCB_MASK |
UVD_CGC_GATE__VCPU_MASK |
UVD_CGC_GATE__SCPU_MASK;
data1 |= UVD_SUVD_CGC_GATE__SRE_MASK |
UVD_SUVD_CGC_GATE__SIT_MASK |
UVD_SUVD_CGC_GATE__SMP_MASK |
UVD_SUVD_CGC_GATE__SCM_MASK |
UVD_SUVD_CGC_GATE__SDB_MASK;
} else {
data &= ~(UVD_CGC_GATE__SYS_MASK |
UVD_CGC_GATE__UDEC_MASK |
UVD_CGC_GATE__MPEG2_MASK |
UVD_CGC_GATE__RBC_MASK |
UVD_CGC_GATE__LMI_MC_MASK |
UVD_CGC_GATE__LMI_UMC_MASK |
UVD_CGC_GATE__IDCT_MASK |
UVD_CGC_GATE__MPRD_MASK |
UVD_CGC_GATE__MPC_MASK |
UVD_CGC_GATE__LBSI_MASK |
UVD_CGC_GATE__LRBBM_MASK |
UVD_CGC_GATE__UDEC_RE_MASK |
UVD_CGC_GATE__UDEC_CM_MASK |
UVD_CGC_GATE__UDEC_IT_MASK |
UVD_CGC_GATE__UDEC_DB_MASK |
UVD_CGC_GATE__UDEC_MP_MASK |
UVD_CGC_GATE__WCB_MASK |
UVD_CGC_GATE__VCPU_MASK |
UVD_CGC_GATE__SCPU_MASK);
data1 &= ~(UVD_SUVD_CGC_GATE__SRE_MASK |
UVD_SUVD_CGC_GATE__SIT_MASK |
UVD_SUVD_CGC_GATE__SMP_MASK |
UVD_SUVD_CGC_GATE__SCM_MASK |
UVD_SUVD_CGC_GATE__SDB_MASK);
}
WREG32(mmUVD_CGC_GATE, data);
WREG32(mmUVD_SUVD_CGC_GATE, data1);
}
static void uvd_v6_0_set_uvd_dynamic_clock_mode(struct amdgpu_device *adev,
bool swmode)
{
u32 data, data1 = 0, data2;
/* Always un-gate UVD REGS bit */
data = RREG32(mmUVD_CGC_GATE);
data &= ~(UVD_CGC_GATE__REGS_MASK);
WREG32(mmUVD_CGC_GATE, data);
data = RREG32(mmUVD_CGC_CTRL);
data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER) |
4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY);
data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
if (swmode) {
data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
UVD_CGC_CTRL__SYS_MODE_MASK |
UVD_CGC_CTRL__UDEC_MODE_MASK |
UVD_CGC_CTRL__MPEG2_MODE_MASK |
UVD_CGC_CTRL__REGS_MODE_MASK |
UVD_CGC_CTRL__RBC_MODE_MASK |
UVD_CGC_CTRL__LMI_MC_MODE_MASK |
UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
UVD_CGC_CTRL__IDCT_MODE_MASK |
UVD_CGC_CTRL__MPRD_MODE_MASK |
UVD_CGC_CTRL__MPC_MODE_MASK |
UVD_CGC_CTRL__LBSI_MODE_MASK |
UVD_CGC_CTRL__LRBBM_MODE_MASK |
UVD_CGC_CTRL__WCB_MODE_MASK |
UVD_CGC_CTRL__VCPU_MODE_MASK |
UVD_CGC_CTRL__JPEG_MODE_MASK |
UVD_CGC_CTRL__SCPU_MODE_MASK);
data1 |= UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN_MASK |
UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN_MASK;
data1 &= ~UVD_CGC_CTRL2__GATER_DIV_ID_MASK;
data1 |= 7 << REG_FIELD_SHIFT(UVD_CGC_CTRL2, GATER_DIV_ID);
data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
} else {
data |= UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
UVD_CGC_CTRL__SYS_MODE_MASK |
UVD_CGC_CTRL__UDEC_MODE_MASK |
UVD_CGC_CTRL__MPEG2_MODE_MASK |
UVD_CGC_CTRL__REGS_MODE_MASK |
UVD_CGC_CTRL__RBC_MODE_MASK |
UVD_CGC_CTRL__LMI_MC_MODE_MASK |
UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
UVD_CGC_CTRL__IDCT_MODE_MASK |
UVD_CGC_CTRL__MPRD_MODE_MASK |
UVD_CGC_CTRL__MPC_MODE_MASK |
UVD_CGC_CTRL__LBSI_MODE_MASK |
UVD_CGC_CTRL__LRBBM_MODE_MASK |
UVD_CGC_CTRL__WCB_MODE_MASK |
UVD_CGC_CTRL__VCPU_MODE_MASK |
UVD_CGC_CTRL__SCPU_MODE_MASK;
data2 |= UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
UVD_SUVD_CGC_CTRL__SDB_MODE_MASK;
}
WREG32(mmUVD_CGC_CTRL, data);
WREG32(mmUVD_SUVD_CGC_CTRL, data2);
data = RREG32_UVD_CTX(ixUVD_CGC_CTRL2);
data &= ~(REG_FIELD_MASK(UVD_CGC_CTRL2, DYN_OCLK_RAMP_EN) |
REG_FIELD_MASK(UVD_CGC_CTRL2, DYN_RCLK_RAMP_EN) |
REG_FIELD_MASK(UVD_CGC_CTRL2, GATER_DIV_ID));
data1 &= (REG_FIELD_MASK(UVD_CGC_CTRL2, DYN_OCLK_RAMP_EN) |
REG_FIELD_MASK(UVD_CGC_CTRL2, DYN_RCLK_RAMP_EN) |
REG_FIELD_MASK(UVD_CGC_CTRL2, GATER_DIV_ID));
data |= data1;
WREG32_UVD_CTX(ixUVD_CGC_CTRL2, data);
}
/** /**
* uvd_v6_0_start - start UVD block * uvd_v6_0_start - start UVD block
* *
...@@ -303,8 +531,19 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) ...@@ -303,8 +531,19 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
uvd_v6_0_mc_resume(adev); uvd_v6_0_mc_resume(adev);
/* disable clock gating */ /* Set dynamic clock gating in S/W control mode */
WREG32(mmUVD_CGC_GATE, 0); if (adev->cg_flags & AMDGPU_CG_SUPPORT_UVD_MGCG) {
if (adev->flags & AMD_IS_APU)
cz_set_uvd_clock_gating_branches(adev, false);
else
tonga_set_uvd_clock_gating_branches(adev, false);
uvd_v6_0_set_uvd_dynamic_clock_mode(adev, true);
} else {
/* disable clock gating */
uint32_t data = RREG32(mmUVD_CGC_CTRL);
data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
WREG32(mmUVD_CGC_CTRL, data);
}
/* disable interupt */ /* disable interupt */
WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
...@@ -758,6 +997,24 @@ static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev, ...@@ -758,6 +997,24 @@ static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev,
static int uvd_v6_0_set_clockgating_state(void *handle, static int uvd_v6_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state) enum amd_clockgating_state state)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
if (!(adev->cg_flags & AMDGPU_CG_SUPPORT_UVD_MGCG))
return 0;
if (enable) {
if (adev->flags & AMD_IS_APU)
cz_set_uvd_clock_gating_branches(adev, enable);
else
tonga_set_uvd_clock_gating_branches(adev, enable);
uvd_v6_0_set_uvd_dynamic_clock_mode(adev, true);
} else {
uint32_t data = RREG32(mmUVD_CGC_CTRL);
data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
WREG32(mmUVD_CGC_CTRL, data);
}
return 0; return 0;
} }
......
...@@ -103,6 +103,108 @@ static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring) ...@@ -103,6 +103,108 @@ static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
WREG32(mmVCE_RB_WPTR2, ring->wptr); WREG32(mmVCE_RB_WPTR2, ring->wptr);
} }
static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override)
{
u32 tmp, data;
tmp = data = RREG32(mmVCE_RB_ARB_CTRL);
if (override)
data |= VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
else
data &= ~VCE_RB_ARB_CTRL__VCE_CGTT_OVERRIDE_MASK;
if (tmp != data)
WREG32(mmVCE_RB_ARB_CTRL, data);
}
static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
bool gated)
{
u32 tmp, data;
/* Set Override to disable Clock Gating */
vce_v3_0_override_vce_clock_gating(adev, true);
if (!gated) {
/* Force CLOCK ON for VCE_CLOCK_GATING_B,
* {*_FORCE_ON, *_FORCE_OFF} = {1, 0}
* VREG can be FORCE ON or set to Dynamic, but can't be OFF
*/
tmp = data = RREG32(mmVCE_CLOCK_GATING_B);
data |= 0x1ff;
data &= ~0xef0000;
if (tmp != data)
WREG32(mmVCE_CLOCK_GATING_B, data);
/* Force CLOCK ON for VCE_UENC_CLOCK_GATING,
* {*_FORCE_ON, *_FORCE_OFF} = {1, 0}
*/
tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING);
data |= 0x3ff000;
data &= ~0xffc00000;
if (tmp != data)
WREG32(mmVCE_UENC_CLOCK_GATING, data);
/* set VCE_UENC_CLOCK_GATING_2 */
tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
data |= 0x2;
data &= ~0x2;
if (tmp != data)
WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
/* Force CLOCK ON for VCE_UENC_REG_CLOCK_GATING */
tmp = data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
data |= 0x37f;
if (tmp != data)
WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
/* Force VCE_UENC_DMA_DCLK_CTRL Clock ON */
tmp = data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
data |= VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
0x8;
if (tmp != data)
WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
} else {
/* Force CLOCK OFF for VCE_CLOCK_GATING_B,
* {*, *_FORCE_OFF} = {*, 1}
* set VREG to Dynamic, as it can't be OFF
*/
tmp = data = RREG32(mmVCE_CLOCK_GATING_B);
data &= ~0x80010;
data |= 0xe70008;
if (tmp != data)
WREG32(mmVCE_CLOCK_GATING_B, data);
/* Force CLOCK OFF for VCE_UENC_CLOCK_GATING,
* Force ClOCK OFF takes precedent over Force CLOCK ON setting.
* {*_FORCE_ON, *_FORCE_OFF} = {*, 1}
*/
tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING);
data |= 0xffc00000;
if (tmp != data)
WREG32(mmVCE_UENC_CLOCK_GATING, data);
/* Set VCE_UENC_CLOCK_GATING_2 */
tmp = data = RREG32(mmVCE_UENC_CLOCK_GATING_2);
data |= 0x10000;
if (tmp != data)
WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
/* Set VCE_UENC_REG_CLOCK_GATING to dynamic */
tmp = data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
data &= ~0xffc00000;
if (tmp != data)
WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
/* Set VCE_UENC_DMA_DCLK_CTRL CG always in dynamic mode */
tmp = data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
data &= ~(VCE_UENC_DMA_DCLK_CTRL__WRDMCLK_FORCEON_MASK |
VCE_UENC_DMA_DCLK_CTRL__RDDMCLK_FORCEON_MASK |
VCE_UENC_DMA_DCLK_CTRL__REGCLK_FORCEON_MASK |
0x8);
if (tmp != data)
WREG32(mmVCE_UENC_DMA_DCLK_CTRL, data);
}
vce_v3_0_override_vce_clock_gating(adev, false);
}
/** /**
* vce_v3_0_start - start VCE block * vce_v3_0_start - start VCE block
* *
...@@ -121,7 +223,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev) ...@@ -121,7 +223,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
if (adev->vce.harvest_config & (1 << idx)) if (adev->vce.harvest_config & (1 << idx))
continue; continue;
if(idx == 0) if (idx == 0)
WREG32_P(mmGRBM_GFX_INDEX, 0, WREG32_P(mmGRBM_GFX_INDEX, 0,
~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
else else
...@@ -174,6 +276,10 @@ static int vce_v3_0_start(struct amdgpu_device *adev) ...@@ -174,6 +276,10 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
/* clear BUSY flag */ /* clear BUSY flag */
WREG32_P(mmVCE_STATUS, 0, ~1); WREG32_P(mmVCE_STATUS, 0, ~1);
/* Set Clock-Gating off */
if (adev->cg_flags & AMDGPU_CG_SUPPORT_VCE_MGCG)
vce_v3_0_set_vce_sw_clock_gating(adev, false);
if (r) { if (r) {
DRM_ERROR("VCE not responding, giving up!!!\n"); DRM_ERROR("VCE not responding, giving up!!!\n");
mutex_unlock(&adev->grbm_idx_mutex); mutex_unlock(&adev->grbm_idx_mutex);
...@@ -609,6 +715,47 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev, ...@@ -609,6 +715,47 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
static int vce_v3_0_set_clockgating_state(void *handle, static int vce_v3_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state) enum amd_clockgating_state state)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
int i;
if (!(adev->cg_flags & AMDGPU_CG_SUPPORT_VCE_MGCG))
return 0;
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < 2; i++) {
/* Program VCE Instance 0 or 1 if not harvested */
if (adev->vce.harvest_config & (1 << i))
continue;
if (i == 0)
WREG32_P(mmGRBM_GFX_INDEX, 0,
~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
else
WREG32_P(mmGRBM_GFX_INDEX,
GRBM_GFX_INDEX__VCE_INSTANCE_MASK,
~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
if (enable) {
/* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
uint32_t data = RREG32(mmVCE_CLOCK_GATING_A);
data &= ~(0xf | 0xff0);
data |= ((0x0 << 0) | (0x04 << 4));
WREG32(mmVCE_CLOCK_GATING_A, data);
/* initialize VCE_UENC_CLOCK_GATING: Clock ON/OFF delay */
data = RREG32(mmVCE_UENC_CLOCK_GATING);
data &= ~(0xf | 0xff0);
data |= ((0x0 << 0) | (0x04 << 4));
WREG32(mmVCE_UENC_CLOCK_GATING, data);
}
vce_v3_0_set_vce_sw_clock_gating(adev, enable);
}
WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
mutex_unlock(&adev->grbm_idx_mutex);
return 0; return 0;
} }
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include "amdgpu_vce.h" #include "amdgpu_vce.h"
#include "amdgpu_ucode.h" #include "amdgpu_ucode.h"
#include "atom.h" #include "atom.h"
#include "amd_pcie.h"
#include "gmc/gmc_8_1_d.h" #include "gmc/gmc_8_1_d.h"
#include "gmc/gmc_8_1_sh_mask.h" #include "gmc/gmc_8_1_sh_mask.h"
...@@ -71,6 +72,7 @@ ...@@ -71,6 +72,7 @@
#include "uvd_v5_0.h" #include "uvd_v5_0.h"
#include "uvd_v6_0.h" #include "uvd_v6_0.h"
#include "vce_v3_0.h" #include "vce_v3_0.h"
#include "amdgpu_powerplay.h"
/* /*
* Indirect registers accessor * Indirect registers accessor
...@@ -376,6 +378,38 @@ static bool vi_read_disabled_bios(struct amdgpu_device *adev) ...@@ -376,6 +378,38 @@ static bool vi_read_disabled_bios(struct amdgpu_device *adev)
WREG32_SMC(ixROM_CNTL, rom_cntl); WREG32_SMC(ixROM_CNTL, rom_cntl);
return r; return r;
} }
static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
u8 *bios, u32 length_bytes)
{
u32 *dw_ptr;
unsigned long flags;
u32 i, length_dw;
if (bios == NULL)
return false;
if (length_bytes == 0)
return false;
/* APU vbios image is part of sbios image */
if (adev->flags & AMD_IS_APU)
return false;
dw_ptr = (u32 *)bios;
length_dw = ALIGN(length_bytes, 4) / 4;
/* take the smc lock since we are using the smc index */
spin_lock_irqsave(&adev->smc_idx_lock, flags);
/* set rom index to 0 */
WREG32(mmSMC_IND_INDEX_0, ixROM_INDEX);
WREG32(mmSMC_IND_DATA_0, 0);
/* set index to data for continous read */
WREG32(mmSMC_IND_INDEX_0, ixROM_DATA);
for (i = 0; i < length_dw; i++)
dw_ptr[i] = RREG32(mmSMC_IND_DATA_0);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return true;
}
static struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = { static struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = {
{mmGB_MACROTILE_MODE7, true}, {mmGB_MACROTILE_MODE7, true},
}; };
...@@ -1019,9 +1053,6 @@ static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) ...@@ -1019,9 +1053,6 @@ static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
static void vi_pcie_gen3_enable(struct amdgpu_device *adev) static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
{ {
u32 mask;
int ret;
if (pci_is_root_bus(adev->pdev->bus)) if (pci_is_root_bus(adev->pdev->bus))
return; return;
...@@ -1031,11 +1062,8 @@ static void vi_pcie_gen3_enable(struct amdgpu_device *adev) ...@@ -1031,11 +1062,8 @@ static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
if (adev->flags & AMD_IS_APU) if (adev->flags & AMD_IS_APU)
return; return;
ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask); if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 |
if (ret != 0) CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)))
return;
if (!(mask & (DRM_PCIE_SPEED_50 | DRM_PCIE_SPEED_80)))
return; return;
/* todo */ /* todo */
...@@ -1098,7 +1126,7 @@ static const struct amdgpu_ip_block_version topaz_ip_blocks[] = ...@@ -1098,7 +1126,7 @@ static const struct amdgpu_ip_block_version topaz_ip_blocks[] =
.major = 7, .major = 7,
.minor = 1, .minor = 1,
.rev = 0, .rev = 0,
.funcs = &iceland_dpm_ip_funcs, .funcs = &amdgpu_pp_ip_funcs,
}, },
{ {
.type = AMD_IP_BLOCK_TYPE_GFX, .type = AMD_IP_BLOCK_TYPE_GFX,
...@@ -1145,7 +1173,7 @@ static const struct amdgpu_ip_block_version tonga_ip_blocks[] = ...@@ -1145,7 +1173,7 @@ static const struct amdgpu_ip_block_version tonga_ip_blocks[] =
.major = 7, .major = 7,
.minor = 1, .minor = 1,
.rev = 0, .rev = 0,
.funcs = &tonga_dpm_ip_funcs, .funcs = &amdgpu_pp_ip_funcs,
}, },
{ {
.type = AMD_IP_BLOCK_TYPE_DCE, .type = AMD_IP_BLOCK_TYPE_DCE,
...@@ -1213,7 +1241,7 @@ static const struct amdgpu_ip_block_version fiji_ip_blocks[] = ...@@ -1213,7 +1241,7 @@ static const struct amdgpu_ip_block_version fiji_ip_blocks[] =
.major = 7, .major = 7,
.minor = 1, .minor = 1,
.rev = 0, .rev = 0,
.funcs = &fiji_dpm_ip_funcs, .funcs = &amdgpu_pp_ip_funcs,
}, },
{ {
.type = AMD_IP_BLOCK_TYPE_DCE, .type = AMD_IP_BLOCK_TYPE_DCE,
...@@ -1281,7 +1309,7 @@ static const struct amdgpu_ip_block_version cz_ip_blocks[] = ...@@ -1281,7 +1309,7 @@ static const struct amdgpu_ip_block_version cz_ip_blocks[] =
.major = 8, .major = 8,
.minor = 0, .minor = 0,
.rev = 0, .rev = 0,
.funcs = &cz_dpm_ip_funcs, .funcs = &amdgpu_pp_ip_funcs
}, },
{ {
.type = AMD_IP_BLOCK_TYPE_DCE, .type = AMD_IP_BLOCK_TYPE_DCE,
...@@ -1354,20 +1382,18 @@ int vi_set_ip_blocks(struct amdgpu_device *adev) ...@@ -1354,20 +1382,18 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
static uint32_t vi_get_rev_id(struct amdgpu_device *adev) static uint32_t vi_get_rev_id(struct amdgpu_device *adev)
{ {
if (adev->asic_type == CHIP_TOPAZ) if (adev->flags & AMD_IS_APU)
return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK)
>> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT;
else if (adev->flags & AMD_IS_APU)
return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK) return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK)
>> ATI_REV_ID_FUSE_MACRO__SHIFT; >> ATI_REV_ID_FUSE_MACRO__SHIFT;
else else
return (RREG32(mmCC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK) return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK)
>> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT; >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT;
} }
static const struct amdgpu_asic_funcs vi_asic_funcs = static const struct amdgpu_asic_funcs vi_asic_funcs =
{ {
.read_disabled_bios = &vi_read_disabled_bios, .read_disabled_bios = &vi_read_disabled_bios,
.read_bios_from_rom = &vi_read_bios_from_rom,
.read_register = &vi_read_register, .read_register = &vi_read_register,
.reset = &vi_asic_reset, .reset = &vi_asic_reset,
.set_vga_state = &vi_vga_set_state, .set_vga_state = &vi_vga_set_state,
...@@ -1416,7 +1442,8 @@ static int vi_common_early_init(void *handle) ...@@ -1416,7 +1442,8 @@ static int vi_common_early_init(void *handle)
break; break;
case CHIP_FIJI: case CHIP_FIJI:
adev->has_uvd = true; adev->has_uvd = true;
adev->cg_flags = 0; adev->cg_flags = AMDGPU_CG_SUPPORT_UVD_MGCG |
AMDGPU_CG_SUPPORT_VCE_MGCG;
adev->pg_flags = 0; adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x3c; adev->external_rev_id = adev->rev_id + 0x3c;
break; break;
...@@ -1442,6 +1469,8 @@ static int vi_common_early_init(void *handle) ...@@ -1442,6 +1469,8 @@ static int vi_common_early_init(void *handle)
if (amdgpu_smc_load_fw && smc_enabled) if (amdgpu_smc_load_fw && smc_enabled)
adev->firmware.smu_load = true; adev->firmware.smu_load = true;
amdgpu_get_pcie_info(adev);
return 0; return 0;
} }
...@@ -1515,9 +1544,95 @@ static int vi_common_soft_reset(void *handle) ...@@ -1515,9 +1544,95 @@ static int vi_common_soft_reset(void *handle)
return 0; return 0;
} }
static void fiji_update_bif_medium_grain_light_sleep(struct amdgpu_device *adev,
bool enable)
{
uint32_t temp, data;
temp = data = RREG32_PCIE(ixPCIE_CNTL2);
if (enable)
data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
PCIE_CNTL2__MST_MEM_LS_EN_MASK |
PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK;
else
data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK |
PCIE_CNTL2__MST_MEM_LS_EN_MASK |
PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK);
if (temp != data)
WREG32_PCIE(ixPCIE_CNTL2, data);
}
static void fiji_update_hdp_medium_grain_clock_gating(struct amdgpu_device *adev,
bool enable)
{
uint32_t temp, data;
temp = data = RREG32(mmHDP_HOST_PATH_CNTL);
if (enable)
data &= ~HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
else
data |= HDP_HOST_PATH_CNTL__CLOCK_GATING_DIS_MASK;
if (temp != data)
WREG32(mmHDP_HOST_PATH_CNTL, data);
}
static void fiji_update_hdp_light_sleep(struct amdgpu_device *adev,
bool enable)
{
uint32_t temp, data;
temp = data = RREG32(mmHDP_MEM_POWER_LS);
if (enable)
data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK;
else
data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK;
if (temp != data)
WREG32(mmHDP_MEM_POWER_LS, data);
}
static void fiji_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev,
bool enable)
{
uint32_t temp, data;
temp = data = RREG32_SMC(ixCGTT_ROM_CLK_CTRL0);
if (enable)
data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK);
else
data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK |
CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK;
if (temp != data)
WREG32_SMC(ixCGTT_ROM_CLK_CTRL0, data);
}
static int vi_common_set_clockgating_state(void *handle, static int vi_common_set_clockgating_state(void *handle,
enum amd_clockgating_state state) enum amd_clockgating_state state)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
switch (adev->asic_type) {
case CHIP_FIJI:
fiji_update_bif_medium_grain_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
fiji_update_hdp_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
fiji_update_hdp_light_sleep(adev,
state == AMD_CG_STATE_GATE ? true : false);
fiji_update_rom_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE ? true : false);
break;
default:
break;
}
return 0; return 0;
} }
......
...@@ -21,14 +21,63 @@ ...@@ -21,14 +21,63 @@
* *
*/ */
#ifndef AMDGPU_ACPI_H #ifndef AMD_ACPI_H
#define AMDGPU_ACPI_H #define AMD_ACPI_H
struct amdgpu_device; #define ACPI_AC_CLASS "ac_adapter"
struct acpi_bus_event;
int amdgpu_atif_handler(struct amdgpu_device *adev, struct atif_verify_interface {
struct acpi_bus_event *event); u16 size; /* structure size in bytes (includes size field) */
u16 version; /* version */
u32 notification_mask; /* supported notifications mask */
u32 function_bits; /* supported functions bit vector */
} __packed;
struct atif_system_params {
u16 size; /* structure size in bytes (includes size field) */
u32 valid_mask; /* valid flags mask */
u32 flags; /* flags */
u8 command_code; /* notify command code */
} __packed;
struct atif_sbios_requests {
u16 size; /* structure size in bytes (includes size field) */
u32 pending; /* pending sbios requests */
u8 panel_exp_mode; /* panel expansion mode */
u8 thermal_gfx; /* thermal state: target gfx controller */
u8 thermal_state; /* thermal state: state id (0: exit state, non-0: state) */
u8 forced_power_gfx; /* forced power state: target gfx controller */
u8 forced_power_state; /* forced power state: state id */
u8 system_power_src; /* system power source */
u8 backlight_level; /* panel backlight level (0-255) */
} __packed;
#define ATIF_NOTIFY_MASK 0x3
#define ATIF_NOTIFY_NONE 0
#define ATIF_NOTIFY_81 1
#define ATIF_NOTIFY_N 2
struct atcs_verify_interface {
u16 size; /* structure size in bytes (includes size field) */
u16 version; /* version */
u32 function_bits; /* supported functions bit vector */
} __packed;
#define ATCS_VALID_FLAGS_MASK 0x3
struct atcs_pref_req_input {
u16 size; /* structure size in bytes (includes size field) */
u16 client_id; /* client id (bit 2-0: func num, 7-3: dev num, 15-8: bus num) */
u16 valid_flags_mask; /* valid flags mask */
u16 flags; /* flags */
u8 req_type; /* request type */
u8 perf_req; /* performance request */
} __packed;
struct atcs_pref_req_output {
u16 size; /* structure size in bytes (includes size field) */
u8 ret_val; /* return value */
} __packed;
/* AMD hw uses four ACPI control methods: /* AMD hw uses four ACPI control methods:
* 1. ATIF * 1. ATIF
......
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef __AMD_PCIE_H__
#define __AMD_PCIE_H__
/* Following flags shows PCIe link speed supported in driver which are decided by chipset and ASIC */
#define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 0x00010000
#define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 0x00020000
#define CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3 0x00040000
#define CAIL_PCIE_LINK_SPEED_SUPPORT_MASK 0xFFFF0000
#define CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT 16
/* Following flags shows PCIe link speed supported by ASIC H/W.*/
#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 0x00000001
#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 0x00000002
#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3 0x00000004
#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_MASK 0x0000FFFF
#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_SHIFT 0
/* Following flags shows PCIe lane width switch supported in driver which are decided by chipset and ASIC */
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X1 0x00010000
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 0x00020000
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 0x00040000
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 0x00080000
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 0x00100000
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 0x00200000
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 0x00400000
#define CAIL_PCIE_LINK_WIDTH_SUPPORT_SHIFT 16
#endif
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef __AMD_PCIE_HELPERS_H__
#define __AMD_PCIE_HELPERS_H__
#include "amd_pcie.h"
static inline bool is_pcie_gen3_supported(uint32_t pcie_link_speed_cap)
{
if (pcie_link_speed_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
return true;
return false;
}
static inline bool is_pcie_gen2_supported(uint32_t pcie_link_speed_cap)
{
if (pcie_link_speed_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2)
return true;
return false;
}
/* Get the new PCIE speed given the ASIC PCIE Cap and the NewState's requested PCIE speed*/
static inline uint16_t get_pcie_gen_support(uint32_t pcie_link_speed_cap,
uint16_t ns_pcie_gen)
{
uint32_t asic_pcie_link_speed_cap = (pcie_link_speed_cap &
CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_MASK);
uint32_t sys_pcie_link_speed_cap = (pcie_link_speed_cap &
CAIL_PCIE_LINK_SPEED_SUPPORT_MASK);
switch (asic_pcie_link_speed_cap) {
case CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1:
return PP_PCIEGen1;
case CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2:
return PP_PCIEGen2;
case CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3:
return PP_PCIEGen3;
default:
if (is_pcie_gen3_supported(sys_pcie_link_speed_cap) &&
(ns_pcie_gen == PP_PCIEGen3)) {
return PP_PCIEGen3;
} else if (is_pcie_gen2_supported(sys_pcie_link_speed_cap) &&
((ns_pcie_gen == PP_PCIEGen3) || (ns_pcie_gen == PP_PCIEGen2))) {
return PP_PCIEGen2;
}
}
return PP_PCIEGen1;
}
static inline uint16_t get_pcie_lane_support(uint32_t pcie_lane_width_cap,
uint16_t ns_pcie_lanes)
{
int i, j;
uint16_t new_pcie_lanes = ns_pcie_lanes;
uint16_t pcie_lanes[7] = {1, 2, 4, 8, 12, 16, 32};
switch (pcie_lane_width_cap) {
case 0:
printk(KERN_ERR "No valid PCIE lane width reported");
break;
case CAIL_PCIE_LINK_WIDTH_SUPPORT_X1:
new_pcie_lanes = 1;
break;
case CAIL_PCIE_LINK_WIDTH_SUPPORT_X2:
new_pcie_lanes = 2;
break;
case CAIL_PCIE_LINK_WIDTH_SUPPORT_X4:
new_pcie_lanes = 4;
break;
case CAIL_PCIE_LINK_WIDTH_SUPPORT_X8:
new_pcie_lanes = 8;
break;
case CAIL_PCIE_LINK_WIDTH_SUPPORT_X12:
new_pcie_lanes = 12;
break;
case CAIL_PCIE_LINK_WIDTH_SUPPORT_X16:
new_pcie_lanes = 16;
break;
case CAIL_PCIE_LINK_WIDTH_SUPPORT_X32:
new_pcie_lanes = 32;
break;
default:
for (i = 0; i < 7; i++) {
if (ns_pcie_lanes == pcie_lanes[i]) {
if (pcie_lane_width_cap & (0x10000 << i)) {
break;
} else {
for (j = i - 1; j >= 0; j--) {
if (pcie_lane_width_cap & (0x10000 << j)) {
new_pcie_lanes = pcie_lanes[j];
break;
}
}
if (j < 0) {
for (j = i + 1; j < 7; j++) {
if (pcie_lane_width_cap & (0x10000 << j)) {
new_pcie_lanes = pcie_lanes[j];
break;
}
}
if (j > 7)
printk(KERN_ERR "Cannot find a valid PCIE lane width!");
}
}
break;
}
}
break;
}
return new_pcie_lanes;
}
#endif
...@@ -85,6 +85,27 @@ enum amd_powergating_state { ...@@ -85,6 +85,27 @@ enum amd_powergating_state {
AMD_PG_STATE_UNGATE, AMD_PG_STATE_UNGATE,
}; };
enum amd_pm_state_type {
/* not used for dpm */
POWER_STATE_TYPE_DEFAULT,
POWER_STATE_TYPE_POWERSAVE,
/* user selectable states */
POWER_STATE_TYPE_BATTERY,
POWER_STATE_TYPE_BALANCED,
POWER_STATE_TYPE_PERFORMANCE,
/* internal states */
POWER_STATE_TYPE_INTERNAL_UVD,
POWER_STATE_TYPE_INTERNAL_UVD_SD,
POWER_STATE_TYPE_INTERNAL_UVD_HD,
POWER_STATE_TYPE_INTERNAL_UVD_HD2,
POWER_STATE_TYPE_INTERNAL_UVD_MVC,
POWER_STATE_TYPE_INTERNAL_BOOT,
POWER_STATE_TYPE_INTERNAL_THERMAL,
POWER_STATE_TYPE_INTERNAL_ACPI,
POWER_STATE_TYPE_INTERNAL_ULV,
POWER_STATE_TYPE_INTERNAL_3DPERF,
};
struct amd_ip_funcs { struct amd_ip_funcs {
/* sets up early driver state (pre sw_init), does not configure hw - Optional */ /* sets up early driver state (pre sw_init), does not configure hw - Optional */
int (*early_init)(void *handle); int (*early_init)(void *handle);
......
...@@ -596,6 +596,7 @@ ...@@ -596,6 +596,7 @@
#define mmSWRST_EP_CONTROL_0 0x14ac #define mmSWRST_EP_CONTROL_0 0x14ac
#define mmCPM_CONTROL 0x14b8 #define mmCPM_CONTROL 0x14b8
#define mmGSKT_CONTROL 0x14bf #define mmGSKT_CONTROL 0x14bf
#define ixSWRST_COMMAND_1 0x1400103
#define ixLM_CONTROL 0x1400120 #define ixLM_CONTROL 0x1400120
#define ixLM_PCIETXMUX0 0x1400121 #define ixLM_PCIETXMUX0 0x1400121
#define ixLM_PCIETXMUX1 0x1400122 #define ixLM_PCIETXMUX1 0x1400122
......
...@@ -2807,5 +2807,18 @@ ...@@ -2807,5 +2807,18 @@
#define ixDIDT_DBR_WEIGHT0_3 0x90 #define ixDIDT_DBR_WEIGHT0_3 0x90
#define ixDIDT_DBR_WEIGHT4_7 0x91 #define ixDIDT_DBR_WEIGHT4_7 0x91
#define ixDIDT_DBR_WEIGHT8_11 0x92 #define ixDIDT_DBR_WEIGHT8_11 0x92
#define mmTD_EDC_CNT 0x252e
#define mmCPF_EDC_TAG_CNT 0x3188
#define mmCPF_EDC_ROQ_CNT 0x3189
#define mmCPF_EDC_ATC_CNT 0x318a
#define mmCPG_EDC_TAG_CNT 0x318b
#define mmCPG_EDC_ATC_CNT 0x318c
#define mmCPG_EDC_DMA_CNT 0x318d
#define mmCPC_EDC_SCRATCH_CNT 0x318e
#define mmCPC_EDC_UCODE_CNT 0x318f
#define mmCPC_EDC_ATC_CNT 0x3190
#define mmDC_EDC_STATE_CNT 0x3191
#define mmDC_EDC_CSINVOC_CNT 0x3192
#define mmDC_EDC_RESTORE_CNT 0x3193
#endif /* GFX_8_0_D_H */ #endif /* GFX_8_0_D_H */
...@@ -550,6 +550,13 @@ typedef struct _COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1 ...@@ -550,6 +550,13 @@ typedef struct _COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1
//MPLL_CNTL_FLAG_BYPASS_AD_PLL has a wrong name, should be BYPASS_DQ_PLL //MPLL_CNTL_FLAG_BYPASS_AD_PLL has a wrong name, should be BYPASS_DQ_PLL
#define MPLL_CNTL_FLAG_BYPASS_AD_PLL 0x04 #define MPLL_CNTL_FLAG_BYPASS_AD_PLL 0x04
// use for ComputeMemoryClockParamTable
typedef struct _COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2
{
COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 ulClock;
ULONG ulReserved;
}COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2;
typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER
{ {
ATOM_COMPUTE_CLOCK_FREQ ulClock; ATOM_COMPUTE_CLOCK_FREQ ulClock;
...@@ -4988,6 +4995,78 @@ typedef struct _ATOM_ASIC_PROFILING_INFO_V3_3 ...@@ -4988,6 +4995,78 @@ typedef struct _ATOM_ASIC_PROFILING_INFO_V3_3
ULONG ulSDCMargine; ULONG ulSDCMargine;
}ATOM_ASIC_PROFILING_INFO_V3_3; }ATOM_ASIC_PROFILING_INFO_V3_3;
// for Fiji speed EVV algorithm
typedef struct _ATOM_ASIC_PROFILING_INFO_V3_4
{
ATOM_COMMON_TABLE_HEADER asHeader;
ULONG ulEvvLkgFactor;
ULONG ulBoardCoreTemp;
ULONG ulMaxVddc;
ULONG ulMinVddc;
ULONG ulLoadLineSlop;
ULONG ulLeakageTemp;
ULONG ulLeakageVoltage;
EFUSE_LINEAR_FUNC_PARAM sCACm;
EFUSE_LINEAR_FUNC_PARAM sCACb;
EFUSE_LOGISTIC_FUNC_PARAM sKt_b;
EFUSE_LOGISTIC_FUNC_PARAM sKv_m;
EFUSE_LOGISTIC_FUNC_PARAM sKv_b;
USHORT usLkgEuseIndex;
UCHAR ucLkgEfuseBitLSB;
UCHAR ucLkgEfuseLength;
ULONG ulLkgEncodeLn_MaxDivMin;
ULONG ulLkgEncodeMax;
ULONG ulLkgEncodeMin;
ULONG ulEfuseLogisticAlpha;
USHORT usPowerDpm0;
USHORT usPowerDpm1;
USHORT usPowerDpm2;
USHORT usPowerDpm3;
USHORT usPowerDpm4;
USHORT usPowerDpm5;
USHORT usPowerDpm6;
USHORT usPowerDpm7;
ULONG ulTdpDerateDPM0;
ULONG ulTdpDerateDPM1;
ULONG ulTdpDerateDPM2;
ULONG ulTdpDerateDPM3;
ULONG ulTdpDerateDPM4;
ULONG ulTdpDerateDPM5;
ULONG ulTdpDerateDPM6;
ULONG ulTdpDerateDPM7;
EFUSE_LINEAR_FUNC_PARAM sRoFuse;
ULONG ulEvvDefaultVddc;
ULONG ulEvvNoCalcVddc;
USHORT usParamNegFlag;
USHORT usSpeed_Model;
ULONG ulSM_A0;
ULONG ulSM_A1;
ULONG ulSM_A2;
ULONG ulSM_A3;
ULONG ulSM_A4;
ULONG ulSM_A5;
ULONG ulSM_A6;
ULONG ulSM_A7;
UCHAR ucSM_A0_sign;
UCHAR ucSM_A1_sign;
UCHAR ucSM_A2_sign;
UCHAR ucSM_A3_sign;
UCHAR ucSM_A4_sign;
UCHAR ucSM_A5_sign;
UCHAR ucSM_A6_sign;
UCHAR ucSM_A7_sign;
ULONG ulMargin_RO_a;
ULONG ulMargin_RO_b;
ULONG ulMargin_RO_c;
ULONG ulMargin_fixed;
ULONG ulMargin_Fmax_mean;
ULONG ulMargin_plat_mean;
ULONG ulMargin_Fmax_sigma;
ULONG ulMargin_plat_sigma;
ULONG ulMargin_DC_sigma;
ULONG ulReserved[8]; // Reserved for future ASIC
}ATOM_ASIC_PROFILING_INFO_V3_4;
typedef struct _ATOM_POWER_SOURCE_OBJECT typedef struct _ATOM_POWER_SOURCE_OBJECT
{ {
UCHAR ucPwrSrcId; // Power source UCHAR ucPwrSrcId; // Power source
......
...@@ -105,6 +105,23 @@ enum cgs_ucode_id { ...@@ -105,6 +105,23 @@ enum cgs_ucode_id {
CGS_UCODE_ID_MAXIMUM, CGS_UCODE_ID_MAXIMUM,
}; };
enum cgs_system_info_id {
CGS_SYSTEM_INFO_ADAPTER_BDF_ID = 1,
CGS_SYSTEM_INFO_PCIE_GEN_INFO,
CGS_SYSTEM_INFO_PCIE_MLW,
CGS_SYSTEM_INFO_ID_MAXIMUM,
};
struct cgs_system_info {
uint64_t size;
uint64_t info_id;
union {
void *ptr;
uint64_t value;
};
uint64_t padding[13];
};
/** /**
* struct cgs_clock_limits - Clock limits * struct cgs_clock_limits - Clock limits
* *
...@@ -127,8 +144,53 @@ struct cgs_firmware_info { ...@@ -127,8 +144,53 @@ struct cgs_firmware_info {
void *kptr; void *kptr;
}; };
struct cgs_mode_info {
uint32_t refresh_rate;
uint32_t ref_clock;
uint32_t vblank_time_us;
};
struct cgs_display_info {
uint32_t display_count;
uint32_t active_display_mask;
struct cgs_mode_info *mode_info;
};
typedef unsigned long cgs_handle_t; typedef unsigned long cgs_handle_t;
#define CGS_ACPI_METHOD_ATCS 0x53435441
#define CGS_ACPI_METHOD_ATIF 0x46495441
#define CGS_ACPI_METHOD_ATPX 0x58505441
#define CGS_ACPI_FIELD_METHOD_NAME 0x00000001
#define CGS_ACPI_FIELD_INPUT_ARGUMENT_COUNT 0x00000002
#define CGS_ACPI_MAX_BUFFER_SIZE 256
#define CGS_ACPI_TYPE_ANY 0x00
#define CGS_ACPI_TYPE_INTEGER 0x01
#define CGS_ACPI_TYPE_STRING 0x02
#define CGS_ACPI_TYPE_BUFFER 0x03
#define CGS_ACPI_TYPE_PACKAGE 0x04
struct cgs_acpi_method_argument {
uint32_t type;
uint32_t method_length;
uint32_t data_length;
union{
uint32_t value;
void *pointer;
};
};
struct cgs_acpi_method_info {
uint32_t size;
uint32_t field;
uint32_t input_count;
uint32_t name;
struct cgs_acpi_method_argument *pinput_argument;
uint32_t output_count;
struct cgs_acpi_method_argument *poutput_argument;
uint32_t padding[9];
};
/** /**
* cgs_gpu_mem_info() - Return information about memory heaps * cgs_gpu_mem_info() - Return information about memory heaps
* @cgs_device: opaque device handle * @cgs_device: opaque device handle
...@@ -493,6 +555,21 @@ typedef int(*cgs_set_clockgating_state)(void *cgs_device, ...@@ -493,6 +555,21 @@ typedef int(*cgs_set_clockgating_state)(void *cgs_device,
enum amd_ip_block_type block_type, enum amd_ip_block_type block_type,
enum amd_clockgating_state state); enum amd_clockgating_state state);
typedef int(*cgs_get_active_displays_info)(
void *cgs_device,
struct cgs_display_info *info);
typedef int (*cgs_call_acpi_method)(void *cgs_device,
uint32_t acpi_method,
uint32_t acpi_function,
void *pinput, void *poutput,
uint32_t output_count,
uint32_t input_size,
uint32_t output_size);
typedef int (*cgs_query_system_info)(void *cgs_device,
struct cgs_system_info *sys_info);
struct cgs_ops { struct cgs_ops {
/* memory management calls (similar to KFD interface) */ /* memory management calls (similar to KFD interface) */
cgs_gpu_mem_info_t gpu_mem_info; cgs_gpu_mem_info_t gpu_mem_info;
...@@ -533,7 +610,12 @@ struct cgs_ops { ...@@ -533,7 +610,12 @@ struct cgs_ops {
/* cg pg interface*/ /* cg pg interface*/
cgs_set_powergating_state set_powergating_state; cgs_set_powergating_state set_powergating_state;
cgs_set_clockgating_state set_clockgating_state; cgs_set_clockgating_state set_clockgating_state;
/* ACPI (TODO) */ /* display manager */
cgs_get_active_displays_info get_active_displays_info;
/* ACPI */
cgs_call_acpi_method call_acpi_method;
/* get system info */
cgs_query_system_info query_system_info;
}; };
struct cgs_os_ops; /* To be define in OS-specific CGS header */ struct cgs_os_ops; /* To be define in OS-specific CGS header */
...@@ -620,5 +702,11 @@ struct cgs_device ...@@ -620,5 +702,11 @@ struct cgs_device
CGS_CALL(set_powergating_state, dev, block_type, state) CGS_CALL(set_powergating_state, dev, block_type, state)
#define cgs_set_clockgating_state(dev, block_type, state) \ #define cgs_set_clockgating_state(dev, block_type, state) \
CGS_CALL(set_clockgating_state, dev, block_type, state) CGS_CALL(set_clockgating_state, dev, block_type, state)
#define cgs_get_active_displays_info(dev, info) \
CGS_CALL(get_active_displays_info, dev, info)
#define cgs_call_acpi_method(dev, acpi_method, acpi_function, pintput, poutput, output_count, input_size, output_size) \
CGS_CALL(call_acpi_method, dev, acpi_method, acpi_function, pintput, poutput, output_count, input_size, output_size)
#define cgs_query_system_info(dev, sys_info) \
CGS_CALL(query_system_info, dev, sys_info)
#endif /* _CGS_COMMON_H */ #endif /* _CGS_COMMON_H */
config DRM_AMD_POWERPLAY
bool "Enable AMD powerplay component"
depends on DRM_AMDGPU
default n
help
select this option will enable AMD powerplay component.
subdir-ccflags-y += -Iinclude/drm \
-Idrivers/gpu/drm/amd/powerplay/inc/ \
-Idrivers/gpu/drm/amd/include/asic_reg \
-Idrivers/gpu/drm/amd/include \
-Idrivers/gpu/drm/amd/powerplay/smumgr\
-Idrivers/gpu/drm/amd/powerplay/hwmgr \
-Idrivers/gpu/drm/amd/powerplay/eventmgr
AMD_PP_PATH = ../powerplay
PP_LIBS = smumgr hwmgr eventmgr
AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix drivers/gpu/drm/amd/powerplay/,$(PP_LIBS)))
include $(AMD_POWERPLAY)
POWER_MGR = amd_powerplay.o
AMD_PP_POWER = $(addprefix $(AMD_PP_PATH)/,$(POWER_MGR))
AMD_POWERPLAY_FILES += $(AMD_PP_POWER)
此差异已折叠。
#
# Makefile for the 'event manager' sub-component of powerplay.
# It provides the event management services for the driver.
EVENT_MGR = eventmgr.o eventinit.o eventmanagement.o \
eventactionchains.o eventsubchains.o eventtasks.o psm.o
AMD_PP_EVENT = $(addprefix $(AMD_PP_PATH)/eventmgr/,$(EVENT_MGR))
AMD_POWERPLAY_FILES += $(AMD_PP_EVENT)
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "eventmgr.h"
#include "eventactionchains.h"
#include "eventsubchains.h"
static const pem_event_action *initialize_event[] = {
block_adjust_power_state_tasks,
power_budget_tasks,
system_config_tasks,
setup_asic_tasks,
enable_dynamic_state_management_tasks,
enable_clock_power_gatings_tasks,
get_2d_performance_state_tasks,
set_performance_state_tasks,
initialize_thermal_controller_tasks,
conditionally_force_3d_performance_state_tasks,
process_vbios_eventinfo_tasks,
broadcast_power_policy_tasks,
NULL
};
const struct action_chain initialize_action_chain = {
"Initialize",
initialize_event
};
static const pem_event_action *uninitialize_event[] = {
ungate_all_display_phys_tasks,
uninitialize_display_phy_access_tasks,
disable_gfx_voltage_island_power_gating_tasks,
disable_gfx_clock_gating_tasks,
set_boot_state_tasks,
adjust_power_state_tasks,
disable_dynamic_state_management_tasks,
disable_clock_power_gatings_tasks,
cleanup_asic_tasks,
prepare_for_pnp_stop_tasks,
NULL
};
const struct action_chain uninitialize_action_chain = {
"Uninitialize",
uninitialize_event
};
static const pem_event_action *power_source_change_event_pp_enabled[] = {
set_power_source_tasks,
set_power_saving_state_tasks,
adjust_power_state_tasks,
enable_disable_fps_tasks,
set_nbmcu_state_tasks,
broadcast_power_policy_tasks,
NULL
};
const struct action_chain power_source_change_action_chain_pp_enabled = {
"Power source change - PowerPlay enabled",
power_source_change_event_pp_enabled
};
static const pem_event_action *power_source_change_event_pp_disabled[] = {
set_power_source_tasks,
set_nbmcu_state_tasks,
NULL
};
const struct action_chain power_source_changes_action_chain_pp_disabled = {
"Power source change - PowerPlay disabled",
power_source_change_event_pp_disabled
};
static const pem_event_action *power_source_change_event_hardware_dc[] = {
set_power_source_tasks,
set_power_saving_state_tasks,
adjust_power_state_tasks,
enable_disable_fps_tasks,
reset_hardware_dc_notification_tasks,
set_nbmcu_state_tasks,
broadcast_power_policy_tasks,
NULL
};
const struct action_chain power_source_change_action_chain_hardware_dc = {
"Power source change - with Hardware DC switching",
power_source_change_event_hardware_dc
};
static const pem_event_action *suspend_event[] = {
reset_display_phy_access_tasks,
unregister_interrupt_tasks,
disable_gfx_voltage_island_power_gating_tasks,
disable_gfx_clock_gating_tasks,
notify_smu_suspend_tasks,
disable_smc_firmware_ctf_tasks,
set_boot_state_tasks,
adjust_power_state_tasks,
disable_fps_tasks,
vari_bright_suspend_tasks,
reset_fan_speed_to_default_tasks,
power_down_asic_tasks,
disable_stutter_mode_tasks,
set_connected_standby_tasks,
block_hw_access_tasks,
NULL
};
const struct action_chain suspend_action_chain = {
"Suspend",
suspend_event
};
static const pem_event_action *resume_event[] = {
unblock_hw_access_tasks,
resume_connected_standby_tasks,
notify_smu_resume_tasks,
reset_display_configCounter_tasks,
update_dal_configuration_tasks,
vari_bright_resume_tasks,
block_adjust_power_state_tasks,
setup_asic_tasks,
enable_stutter_mode_tasks, /*must do this in boot state and before SMC is started */
enable_dynamic_state_management_tasks,
enable_clock_power_gatings_tasks,
enable_disable_bapm_tasks,
reset_boot_state_tasks,
adjust_power_state_tasks,
enable_disable_fps_tasks,
notify_hw_power_source_tasks,
process_vbios_event_info_tasks,
enable_gfx_clock_gating_tasks,
enable_gfx_voltage_island_power_gating_tasks,
reset_clock_gating_tasks,
notify_smu_vpu_recovery_end_tasks,
disable_vpu_cap_tasks,
execute_escape_sequence_tasks,
NULL
};
const struct action_chain resume_action_chain = {
"resume",
resume_event
};
static const pem_event_action *complete_init_event[] = {
adjust_power_state_tasks,
enable_gfx_clock_gating_tasks,
enable_gfx_voltage_island_power_gating_tasks,
notify_power_state_change_tasks,
NULL
};
const struct action_chain complete_init_action_chain = {
"complete init",
complete_init_event
};
static const pem_event_action *enable_gfx_clock_gating_event[] = {
enable_gfx_clock_gating_tasks,
NULL
};
const struct action_chain enable_gfx_clock_gating_action_chain = {
"enable gfx clock gate",
enable_gfx_clock_gating_event
};
static const pem_event_action *disable_gfx_clock_gating_event[] = {
disable_gfx_clock_gating_tasks,
NULL
};
const struct action_chain disable_gfx_clock_gating_action_chain = {
"disable gfx clock gate",
disable_gfx_clock_gating_event
};
static const pem_event_action *enable_cgpg_event[] = {
enable_cgpg_tasks,
NULL
};
const struct action_chain enable_cgpg_action_chain = {
"eable cg pg",
enable_cgpg_event
};
static const pem_event_action *disable_cgpg_event[] = {
disable_cgpg_tasks,
NULL
};
const struct action_chain disable_cgpg_action_chain = {
"disable cg pg",
disable_cgpg_event
};
/* Enable user _2d performance and activate */
static const pem_event_action *enable_user_state_event[] = {
create_new_user_performance_state_tasks,
adjust_power_state_tasks,
NULL
};
const struct action_chain enable_user_state_action_chain = {
"Enable user state",
enable_user_state_event
};
static const pem_event_action *enable_user_2d_performance_event[] = {
enable_user_2d_performance_tasks,
add_user_2d_performance_state_tasks,
set_performance_state_tasks,
adjust_power_state_tasks,
delete_user_2d_performance_state_tasks,
NULL
};
const struct action_chain enable_user_2d_performance_action_chain = {
"enable_user_2d_performance_event_activate",
enable_user_2d_performance_event
};
static const pem_event_action *disable_user_2d_performance_event[] = {
disable_user_2d_performance_tasks,
delete_user_2d_performance_state_tasks,
NULL
};
const struct action_chain disable_user_2d_performance_action_chain = {
"disable_user_2d_performance_event",
disable_user_2d_performance_event
};
static const pem_event_action *display_config_change_event[] = {
/* countDisplayConfigurationChangeEventTasks, */
unblock_adjust_power_state_tasks,
set_cpu_power_state,
notify_hw_power_source_tasks,
/* updateDALConfigurationTasks,
variBrightDisplayConfigurationChangeTasks, */
adjust_power_state_tasks,
/*enableDisableFPSTasks,
setNBMCUStateTasks,
notifyPCIEDeviceReadyTasks,*/
NULL
};
const struct action_chain display_config_change_action_chain = {
"Display configuration change",
display_config_change_event
};
static const pem_event_action *readjust_power_state_event[] = {
adjust_power_state_tasks,
NULL
};
const struct action_chain readjust_power_state_action_chain = {
"re-adjust power state",
readjust_power_state_event
};
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef _EVENT_ACTION_CHAINS_H_
#define _EVENT_ACTION_CHAINS_H_
#include "eventmgr.h"
extern const struct action_chain initialize_action_chain;
extern const struct action_chain uninitialize_action_chain;
extern const struct action_chain power_source_change_action_chain_pp_enabled;
extern const struct action_chain power_source_changes_action_chain_pp_disabled;
extern const struct action_chain power_source_change_action_chain_hardware_dc;
extern const struct action_chain suspend_action_chain;
extern const struct action_chain resume_action_chain;
extern const struct action_chain complete_init_action_chain;
extern const struct action_chain enable_gfx_clock_gating_action_chain;
extern const struct action_chain disable_gfx_clock_gating_action_chain;
extern const struct action_chain enable_cgpg_action_chain;
extern const struct action_chain disable_cgpg_action_chain;
extern const struct action_chain enable_user_2d_performance_action_chain;
extern const struct action_chain disable_user_2d_performance_action_chain;
extern const struct action_chain enable_user_state_action_chain;
extern const struct action_chain readjust_power_state_action_chain;
extern const struct action_chain display_config_change_action_chain;
#endif /*_EVENT_ACTION_CHAINS_H_*/
此差异已折叠。
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef _EVENTINIT_H_
#define _EVENTINIT_H_
#define PEM_CURRENT_POWERPLAY_FEATURE_VERSION 4
void pem_init_feature_info(struct pp_eventmgr *eventmgr);
void pem_uninit_featureInfo(struct pp_eventmgr *eventmgr);
int pem_register_interrupts(struct pp_eventmgr *eventmgr);
int pem_unregister_interrupts(struct pp_eventmgr *eventmgr);
#endif /* _EVENTINIT_H_ */
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef _EVENT_MANAGEMENT_H_
#define _EVENT_MANAGEMENT_H_
#include "eventmgr.h"
int pem_init_event_action_chains(struct pp_eventmgr *eventmgr);
int pem_excute_event_chain(struct pp_eventmgr *eventmgr, const struct action_chain *event_chain, struct pem_event_data *event_data);
const struct action_chain *pem_get_suspend_action_chain(struct pp_eventmgr *eventmgr);
const struct action_chain *pem_get_initialize_action_chain(struct pp_eventmgr *eventmgr);
const struct action_chain *pem_get_uninitialize_action_chain(struct pp_eventmgr *eventmgr);
const struct action_chain *pem_get_power_source_change_action_chain(struct pp_eventmgr *eventmgr);
const struct action_chain *pem_get_resume_action_chain(struct pp_eventmgr *eventmgr);
const struct action_chain *pem_get_hibernate_action_chain(struct pp_eventmgr *eventmgr);
const struct action_chain *pem_get_thermal_notification_action_chain(struct pp_eventmgr *eventmgr);
const struct action_chain *pem_get_vbios_notification_action_chain(struct pp_eventmgr *eventmgr);
const struct action_chain *pem_get_enter_thermal_state_action_chain(struct pp_eventmgr *eventmgr);
const struct action_chain *pem_get_exit_thermal_state_action_chain(struct pp_eventmgr *eventmgr);
const struct action_chain *pem_get_enable_powerplay_action_chain(struct pp_eventmgr *eventmgr);
const struct action_chain *pem_get_disable_powerplay_action_chain(struct pp_eventmgr *eventmgr);
const struct action_chain *pem_get_enable_overdrive_test_action_chain(struct pp_eventmgr *eventmgr);
const struct action_chain *pem_get_disable_overdrive_test_action_chain(struct pp_eventmgr *eventmgr);
const struct action_chain *pem_get_enable_gfx_clock_gating_action_chain(struct pp_eventmgr *eventmgr);
const struct action_chain *pem_get_disable_gfx_clock_gating_action_chain(struct pp_eventmgr *eventmgr);
const struct action_chain *pem_get_enable_cgpg_action_chain(struct pp_eventmgr *eventmgr);
const struct action_chain *pem_get_disable_cgpg_action_chain(struct pp_eventmgr *eventmgr);
const struct action_chain *pem_get_complete_init_action_chain(struct pp_eventmgr *eventmgr);
const struct action_chain *pem_get_screen_on_action_chain(struct pp_eventmgr *eventmgr);
const struct action_chain *pem_get_screen_off_action_chain(struct pp_eventmgr *eventmgr);
const struct action_chain *pem_get_pre_suspend_action_chain(struct pp_eventmgr *eventmgr);
const struct action_chain *pem_get_pre_resume_action_chain(struct pp_eventmgr *eventmgr);
extern const struct action_chain *pem_enable_user_state_action_chain(struct pp_eventmgr *eventmgr);
extern const struct action_chain *pem_readjust_power_state_action_chain(struct pp_eventmgr *eventmgr);
const struct action_chain *pem_display_config_change_action_chain(struct pp_eventmgr *eventmgr);
#endif /* _EVENT_MANAGEMENT_H_ */
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "eventmgr.h"
#include "eventinit.h"
#include "eventmanagement.h"
#include "eventmanager.h"
#include "power_state.h"
#include "hardwaremanager.h"
int psm_get_ui_state(struct pp_eventmgr *eventmgr, enum PP_StateUILabel ui_label, unsigned long *state_id);
int psm_get_state_by_classification(struct pp_eventmgr *eventmgr, enum PP_StateClassificationFlag flag, unsigned long *state_id);
int psm_set_performance_states(struct pp_eventmgr *eventmgr, unsigned long *state_id);
int psm_adjust_power_state_dynamic(struct pp_eventmgr *eventmgr, bool skip);
int psm_adjust_power_state_static(struct pp_eventmgr *eventmgr, bool skip);
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
...@@ -164,6 +164,7 @@ enum DPM_ARRAY { ...@@ -164,6 +164,7 @@ enum DPM_ARRAY {
#define PPSMC_MSG_SetLoggerAddressHigh ((uint16_t) 0x26C) #define PPSMC_MSG_SetLoggerAddressHigh ((uint16_t) 0x26C)
#define PPSMC_MSG_SetLoggerAddressLow ((uint16_t) 0x26D) #define PPSMC_MSG_SetLoggerAddressLow ((uint16_t) 0x26D)
#define PPSMC_MSG_SetWatermarkFrequency ((uint16_t) 0x26E) #define PPSMC_MSG_SetWatermarkFrequency ((uint16_t) 0x26E)
#define PPSMC_MSG_SetDisplaySizePowerParams ((uint16_t) 0x26F)
/* REMOVE LATER*/ /* REMOVE LATER*/
#define PPSMC_MSG_DPM_ForceState ((uint16_t) 0x104) #define PPSMC_MSG_DPM_ForceState ((uint16_t) 0x104)
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册