提交 876ca8f3 编写于 作者: L Linus Torvalds

Merge tag 'drm-fixes-for-v4.12-rc3' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "Not a whole lot happening here, a set of amdgpu fixes and one core
  deadlock fix, and some misc drivers fixes"

* tag 'drm-fixes-for-v4.12-rc3' of git://people.freedesktop.org/~airlied/linux:
  drm/amdgpu: fix null point error when rmmod amdgpu.
  drm/amd/powerplay: fix a signedness bugs
  drm/amdgpu: fix NULL pointer panic of emit_gds_switch
  drm/radeon: Unbreak HPD handling for r600+
  drm/amd/powerplay/smu7: disable mclk switching for high refresh rates
  drm/amd/powerplay/smu7: add vblank check for mclk switching (v2)
  drm/radeon/ci: disable mclk switching for high refresh rates (v2)
  drm/amdgpu/ci: disable mclk switching for high refresh rates (v2)
  drm/amdgpu: fix fundamental suspend/resume issue
  drm/gma500/psb: Actually use VBT mode when it is found
  drm: Fix deadlock retry loop in page_flip_ioctl
  drm: qxl: Delay entering atomic context during cursor update
  drm/radeon: Fix oops upon driver load on PowerXpress laptops
......@@ -425,10 +425,15 @@ bool amdgpu_fbdev_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
void amdgpu_fbdev_restore_mode(struct amdgpu_device *adev)
{
struct amdgpu_fbdev *afbdev = adev->mode_info.rfbdev;
struct amdgpu_fbdev *afbdev;
struct drm_fb_helper *fb_helper;
int ret;
if (!adev)
return;
afbdev = adev->mode_info.rfbdev;
if (!afbdev)
return;
......
......@@ -634,7 +634,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
mutex_unlock(&id_mgr->lock);
}
if (gds_switch_needed) {
if (ring->funcs->emit_gds_switch && gds_switch_needed) {
id->gds_base = job->gds_base;
id->gds_size = job->gds_size;
id->gws_base = job->gws_base;
......@@ -672,6 +672,7 @@ void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
struct amdgpu_vm_id_manager *id_mgr = &adev->vm_manager.id_mgr[vmhub];
struct amdgpu_vm_id *id = &id_mgr->ids[vmid];
atomic64_set(&id->owner, 0);
id->gds_base = 0;
id->gds_size = 0;
id->gws_base = 0;
......@@ -680,6 +681,26 @@ void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
id->oa_size = 0;
}
/**
* amdgpu_vm_reset_all_id - reset VMID to zero
*
* @adev: amdgpu device structure
*
* Reset VMID to force flush on next use
*/
void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev)
{
unsigned i, j;
for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
struct amdgpu_vm_id_manager *id_mgr =
&adev->vm_manager.id_mgr[i];
for (j = 1; j < id_mgr->num_ids; ++j)
amdgpu_vm_reset_id(adev, i, j);
}
}
/**
* amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
*
......@@ -2270,7 +2291,6 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
adev->vm_manager.seqno[i] = 0;
atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
atomic64_set(&adev->vm_manager.client_counter, 0);
spin_lock_init(&adev->vm_manager.prt_lock);
......
......@@ -204,6 +204,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vmhub,
unsigned vmid);
void amdgpu_vm_reset_all_ids(struct amdgpu_device *adev);
int amdgpu_vm_update_directories(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
......
......@@ -906,6 +906,12 @@ static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev)
u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
/* disable mclk switching if the refresh is >120Hz, even if the
* blanking period would allow it
*/
if (amdgpu_dpm_get_vrefresh(adev) > 120)
return true;
if (vblank_time < switch_limit)
return true;
else
......
......@@ -950,10 +950,6 @@ static int gmc_v6_0_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->vm_manager.enabled) {
gmc_v6_0_vm_fini(adev);
adev->vm_manager.enabled = false;
}
gmc_v6_0_hw_fini(adev);
return 0;
......@@ -968,16 +964,9 @@ static int gmc_v6_0_resume(void *handle)
if (r)
return r;
if (!adev->vm_manager.enabled) {
r = gmc_v6_0_vm_init(adev);
if (r) {
dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
return r;
}
adev->vm_manager.enabled = true;
}
amdgpu_vm_reset_all_ids(adev);
return r;
return 0;
}
static bool gmc_v6_0_is_idle(void *handle)
......
......@@ -1117,10 +1117,6 @@ static int gmc_v7_0_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->vm_manager.enabled) {
gmc_v7_0_vm_fini(adev);
adev->vm_manager.enabled = false;
}
gmc_v7_0_hw_fini(adev);
return 0;
......@@ -1135,16 +1131,9 @@ static int gmc_v7_0_resume(void *handle)
if (r)
return r;
if (!adev->vm_manager.enabled) {
r = gmc_v7_0_vm_init(adev);
if (r) {
dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
return r;
}
adev->vm_manager.enabled = true;
}
amdgpu_vm_reset_all_ids(adev);
return r;
return 0;
}
static bool gmc_v7_0_is_idle(void *handle)
......
......@@ -1209,10 +1209,6 @@ static int gmc_v8_0_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->vm_manager.enabled) {
gmc_v8_0_vm_fini(adev);
adev->vm_manager.enabled = false;
}
gmc_v8_0_hw_fini(adev);
return 0;
......@@ -1227,16 +1223,9 @@ static int gmc_v8_0_resume(void *handle)
if (r)
return r;
if (!adev->vm_manager.enabled) {
r = gmc_v8_0_vm_init(adev);
if (r) {
dev_err(adev->dev, "vm manager initialization failed (%d).\n", r);
return r;
}
adev->vm_manager.enabled = true;
}
amdgpu_vm_reset_all_ids(adev);
return r;
return 0;
}
static bool gmc_v8_0_is_idle(void *handle)
......
......@@ -791,10 +791,6 @@ static int gmc_v9_0_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->vm_manager.enabled) {
gmc_v9_0_vm_fini(adev);
adev->vm_manager.enabled = false;
}
gmc_v9_0_hw_fini(adev);
return 0;
......@@ -809,17 +805,9 @@ static int gmc_v9_0_resume(void *handle)
if (r)
return r;
if (!adev->vm_manager.enabled) {
r = gmc_v9_0_vm_init(adev);
if (r) {
dev_err(adev->dev,
"vm manager initialization failed (%d).\n", r);
return r;
}
adev->vm_manager.enabled = true;
}
amdgpu_vm_reset_all_ids(adev);
return r;
return 0;
}
static bool gmc_v9_0_is_idle(void *handle)
......
......@@ -2655,6 +2655,28 @@ static int smu7_get_power_state_size(struct pp_hwmgr *hwmgr)
return sizeof(struct smu7_power_state);
}
static int smu7_vblank_too_short(struct pp_hwmgr *hwmgr,
uint32_t vblank_time_us)
{
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
uint32_t switch_limit_us;
switch (hwmgr->chip_id) {
case CHIP_POLARIS10:
case CHIP_POLARIS11:
case CHIP_POLARIS12:
switch_limit_us = data->is_memory_gddr5 ? 190 : 150;
break;
default:
switch_limit_us = data->is_memory_gddr5 ? 450 : 150;
break;
}
if (vblank_time_us < switch_limit_us)
return true;
else
return false;
}
static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
struct pp_power_state *request_ps,
......@@ -2669,6 +2691,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
bool disable_mclk_switching;
bool disable_mclk_switching_for_frame_lock;
struct cgs_display_info info = {0};
struct cgs_mode_info mode_info = {0};
const struct phm_clock_and_voltage_limits *max_limits;
uint32_t i;
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
......@@ -2677,6 +2700,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
int32_t count;
int32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
info.mode_info = &mode_info;
data->battery_state = (PP_StateUILabel_Battery ==
request_ps->classification.ui_label);
......@@ -2703,8 +2727,6 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
cgs_get_active_displays_info(hwmgr->device, &info);
/*TO DO result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock;
minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
......@@ -2769,8 +2791,10 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
disable_mclk_switching = (1 < info.display_count) ||
disable_mclk_switching_for_frame_lock;
disable_mclk_switching = ((1 < info.display_count) ||
disable_mclk_switching_for_frame_lock ||
smu7_vblank_too_short(hwmgr, mode_info.vblank_time_us) ||
(mode_info.refresh_rate > 120));
sclk = smu7_ps->performance_levels[0].engine_clock;
mclk = smu7_ps->performance_levels[0].memory_clock;
......
......@@ -4186,7 +4186,7 @@ static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
enum pp_clock_type type, uint32_t mask)
{
struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
uint32_t i;
int i;
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
return -EINVAL;
......
......@@ -948,8 +948,6 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
}
out:
if (ret && crtc->funcs->page_flip_target)
drm_crtc_vblank_put(crtc);
if (fb)
drm_framebuffer_put(fb);
if (crtc->primary->old_fb)
......@@ -964,5 +962,8 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
if (ret && crtc->funcs->page_flip_target)
drm_crtc_vblank_put(crtc);
return ret;
}
......@@ -759,20 +759,23 @@ void psb_intel_lvds_init(struct drm_device *dev,
if (scan->type & DRM_MODE_TYPE_PREFERRED) {
mode_dev->panel_fixed_mode =
drm_mode_duplicate(dev, scan);
DRM_DEBUG_KMS("Using mode from DDC\n");
goto out; /* FIXME: check for quirks */
}
}
/* Failed to get EDID, what about VBT? do we need this? */
if (mode_dev->vbt_mode)
if (dev_priv->lfp_lvds_vbt_mode) {
mode_dev->panel_fixed_mode =
drm_mode_duplicate(dev, mode_dev->vbt_mode);
drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
if (!mode_dev->panel_fixed_mode)
if (dev_priv->lfp_lvds_vbt_mode)
mode_dev->panel_fixed_mode =
drm_mode_duplicate(dev,
dev_priv->lfp_lvds_vbt_mode);
if (mode_dev->panel_fixed_mode) {
mode_dev->panel_fixed_mode->type |=
DRM_MODE_TYPE_PREFERRED;
DRM_DEBUG_KMS("Using mode from VBT\n");
goto out;
}
}
/*
* If we didn't get EDID, try checking if the panel is already turned
......@@ -789,6 +792,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
if (mode_dev->panel_fixed_mode) {
mode_dev->panel_fixed_mode->type |=
DRM_MODE_TYPE_PREFERRED;
DRM_DEBUG_KMS("Using pre-programmed mode\n");
goto out; /* FIXME: check for quirks */
}
}
......
......@@ -575,8 +575,6 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
if (ret)
return;
cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release);
if (fb != old_state->fb) {
obj = to_qxl_framebuffer(fb)->obj;
user_bo = gem_to_qxl_bo(obj);
......@@ -614,6 +612,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
qxl_bo_kunmap(cursor_bo);
qxl_bo_kunmap(user_bo);
cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release);
cmd->u.set.visible = 1;
cmd->u.set.shape = qxl_bo_physical_address(qdev,
cursor_bo, 0);
......@@ -624,6 +623,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
if (ret)
goto out_free_release;
cmd = (struct qxl_cursor_cmd *) qxl_release_map(qdev, release);
cmd->type = QXL_CURSOR_MOVE;
}
......
......@@ -776,6 +776,12 @@ bool ci_dpm_vblank_too_short(struct radeon_device *rdev)
u32 vblank_time = r600_dpm_get_vblank_time(rdev);
u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
/* disable mclk switching if the refresh is >120Hz, even if the
* blanking period would allow it
*/
if (r600_dpm_get_vrefresh(rdev) > 120)
return true;
if (vblank_time < switch_limit)
return true;
else
......
......@@ -7401,7 +7401,7 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
tmp = RREG32(DC_HPD5_INT_CONTROL);
tmp = RREG32(DC_HPD6_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
......@@ -7431,7 +7431,7 @@ static inline void cik_irq_ack(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
tmp = RREG32(DC_HPD5_INT_CONTROL);
tmp = RREG32(DC_HPD6_INT_CONTROL);
tmp |= DC_HPDx_RX_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
......
......@@ -4927,7 +4927,7 @@ static void evergreen_irq_ack(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
tmp = RREG32(DC_HPD5_INT_CONTROL);
tmp = RREG32(DC_HPD6_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
......@@ -4958,7 +4958,7 @@ static void evergreen_irq_ack(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
tmp = RREG32(DC_HPD5_INT_CONTROL);
tmp = RREG32(DC_HPD6_INT_CONTROL);
tmp |= DC_HPDx_RX_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
......
......@@ -3988,7 +3988,7 @@ static void r600_irq_ack(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
tmp = RREG32(DC_HPD5_INT_CONTROL);
tmp = RREG32(DC_HPD6_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
......
......@@ -116,7 +116,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
if ((radeon_runtime_pm != 0) &&
radeon_has_atpx() &&
((flags & RADEON_IS_IGP) == 0) &&
!pci_is_thunderbolt_attached(rdev->pdev))
!pci_is_thunderbolt_attached(dev->pdev))
flags |= RADEON_IS_PX;
/* radeon_device_init should report only fatal error
......
......@@ -6317,7 +6317,7 @@ static inline void si_irq_ack(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
tmp = RREG32(DC_HPD5_INT_CONTROL);
tmp = RREG32(DC_HPD6_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
......@@ -6348,7 +6348,7 @@ static inline void si_irq_ack(struct radeon_device *rdev)
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
tmp = RREG32(DC_HPD5_INT_CONTROL);
tmp = RREG32(DC_HPD6_INT_CONTROL);
tmp |= DC_HPDx_RX_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册