提交 4fb68f97 编写于 作者: L Linus Torvalds

Merge tag 'drm-fixes-for-v4.9-rc5' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "AMD, radeon, i915, imx, msm and udl fixes:

   - amdgpu/radeon have a number of power management regressions and
     fixes along with some better error checking

   - imx has a single regression fix

   - udl has a single kmalloc instead of stack for usb control msg fix

   - msm has some fixes for modesetting bugs and regressions

   - i915 has a one fix for a Sandybridge regression along with some
     others for DP audio.

  They all seem pretty okay at this stage, we've got one MST fix I know
  going through process for i915, but I expect it'll be next week"

* tag 'drm-fixes-for-v4.9-rc5' of git://people.freedesktop.org/~airlied/linux: (30 commits)
  drm/udl: make control msg static const. (v2)
  drm/amd/powerplay: implement get_clock_by_type for iceland.
  drm/amd/powerplay/smu7: fix checks in smu7_get_evv_voltages (v2)
  drm/amd/powerplay: update phm_get_voltage_evv_on_sclk for iceland
  drm/amd/powerplay: propagate errors in phm_get_voltage_evv_on_sclk
  drm/imx: disable planes before DC
  drm/amd/powerplay: return false instead of -EINVAL
  drm/amdgpu/powerplay/smu7: fix unintialized data usage
  drm/amdgpu: fix crash in acp_hw_fini
  drm/i915: Limit Valleyview and earlier to only using mappable scanout
  drm/i915: Round tile chunks up for constructing partial VMAs
  drm/i915/dp: Extend BDW DP audio workaround to GEN9 platforms
  drm/i915/dp: BDW cdclk fix for DP audio
  drm/i915/vlv: Prevent enabling hpd polling in late suspend
  drm/i915: Respect alternate_ddc_pin for all DDI ports
  drm/msm: Fix error handling crashes seen when VRAM allocation fails
  drm/msm/mdp5: 8x16 actually has 8 mixer stages
  drm/msm/mdp5: no scaling support on RGBn pipes for 8x16
  drm/msm/mdp5: handle non-fullscreen base plane case
  drm/msm: Set CLK_IGNORE_UNUSED flag for PLL clocks
  ...
...@@ -395,9 +395,12 @@ static int acp_hw_fini(void *handle) ...@@ -395,9 +395,12 @@ static int acp_hw_fini(void *handle)
{ {
int i, ret; int i, ret;
struct device *dev; struct device *dev;
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* return early if no ACP */
if (!adev->acp.acp_genpd)
return 0;
for (i = 0; i < ACP_DEVS ; i++) { for (i = 0; i < ACP_DEVS ; i++) {
dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev); ret = pm_genpd_remove_device(&adev->acp.acp_genpd->gpd, dev);
......
...@@ -795,10 +795,19 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, ...@@ -795,10 +795,19 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
if (!adev->pm.fw) { if (!adev->pm.fw) {
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_TOPAZ: case CHIP_TOPAZ:
strcpy(fw_name, "amdgpu/topaz_smc.bin"); if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) ||
((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) ||
((adev->pdev->device == 0x6907) && (adev->pdev->revision == 0x87)))
strcpy(fw_name, "amdgpu/topaz_k_smc.bin");
else
strcpy(fw_name, "amdgpu/topaz_smc.bin");
break; break;
case CHIP_TONGA: case CHIP_TONGA:
strcpy(fw_name, "amdgpu/tonga_smc.bin"); if (((adev->pdev->device == 0x6939) && (adev->pdev->revision == 0xf1)) ||
((adev->pdev->device == 0x6938) && (adev->pdev->revision == 0xf1)))
strcpy(fw_name, "amdgpu/tonga_k_smc.bin");
else
strcpy(fw_name, "amdgpu/tonga_smc.bin");
break; break;
case CHIP_FIJI: case CHIP_FIJI:
strcpy(fw_name, "amdgpu/fiji_smc.bin"); strcpy(fw_name, "amdgpu/fiji_smc.bin");
......
...@@ -769,7 +769,7 @@ static void amdgpu_connector_unregister(struct drm_connector *connector) ...@@ -769,7 +769,7 @@ static void amdgpu_connector_unregister(struct drm_connector *connector)
{ {
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
if (amdgpu_connector->ddc_bus->has_aux) { if (amdgpu_connector->ddc_bus && amdgpu_connector->ddc_bus->has_aux) {
drm_dp_aux_unregister(&amdgpu_connector->ddc_bus->aux); drm_dp_aux_unregister(&amdgpu_connector->ddc_bus->aux);
amdgpu_connector->ddc_bus->has_aux = false; amdgpu_connector->ddc_bus->has_aux = false;
} }
......
...@@ -735,8 +735,20 @@ static struct pci_driver amdgpu_kms_pci_driver = { ...@@ -735,8 +735,20 @@ static struct pci_driver amdgpu_kms_pci_driver = {
static int __init amdgpu_init(void) static int __init amdgpu_init(void)
{ {
amdgpu_sync_init(); int r;
amdgpu_fence_slab_init();
r = amdgpu_sync_init();
if (r)
goto error_sync;
r = amdgpu_fence_slab_init();
if (r)
goto error_fence;
r = amd_sched_fence_slab_init();
if (r)
goto error_sched;
if (vgacon_text_force()) { if (vgacon_text_force()) {
DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n"); DRM_ERROR("VGACON disables amdgpu kernel modesetting.\n");
return -EINVAL; return -EINVAL;
...@@ -748,6 +760,15 @@ static int __init amdgpu_init(void) ...@@ -748,6 +760,15 @@ static int __init amdgpu_init(void)
amdgpu_register_atpx_handler(); amdgpu_register_atpx_handler();
/* let modprobe override vga console setting */ /* let modprobe override vga console setting */
return drm_pci_init(driver, pdriver); return drm_pci_init(driver, pdriver);
error_sched:
amdgpu_fence_slab_fini();
error_fence:
amdgpu_sync_fini();
error_sync:
return r;
} }
static void __exit amdgpu_exit(void) static void __exit amdgpu_exit(void)
...@@ -756,6 +777,7 @@ static void __exit amdgpu_exit(void) ...@@ -756,6 +777,7 @@ static void __exit amdgpu_exit(void)
drm_pci_exit(driver, pdriver); drm_pci_exit(driver, pdriver);
amdgpu_unregister_atpx_handler(); amdgpu_unregister_atpx_handler();
amdgpu_sync_fini(); amdgpu_sync_fini();
amd_sched_fence_slab_fini();
amdgpu_fence_slab_fini(); amdgpu_fence_slab_fini();
} }
......
...@@ -99,6 +99,8 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) ...@@ -99,6 +99,8 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
if ((amdgpu_runtime_pm != 0) && if ((amdgpu_runtime_pm != 0) &&
amdgpu_has_atpx() && amdgpu_has_atpx() &&
(amdgpu_is_atpx_hybrid() ||
amdgpu_has_atpx_dgpu_power_cntl()) &&
((flags & AMD_IS_APU) == 0)) ((flags & AMD_IS_APU) == 0))
flags |= AMD_IS_PX; flags |= AMD_IS_PX;
......
...@@ -80,7 +80,9 @@ ...@@ -80,7 +80,9 @@
#include "dce_virtual.h" #include "dce_virtual.h"
MODULE_FIRMWARE("amdgpu/topaz_smc.bin"); MODULE_FIRMWARE("amdgpu/topaz_smc.bin");
MODULE_FIRMWARE("amdgpu/topaz_k_smc.bin");
MODULE_FIRMWARE("amdgpu/tonga_smc.bin"); MODULE_FIRMWARE("amdgpu/tonga_smc.bin");
MODULE_FIRMWARE("amdgpu/tonga_k_smc.bin");
MODULE_FIRMWARE("amdgpu/fiji_smc.bin"); MODULE_FIRMWARE("amdgpu/fiji_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_smc.bin"); MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin"); MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
......
...@@ -272,7 +272,7 @@ bool phm_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hw ...@@ -272,7 +272,7 @@ bool phm_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hw
PHM_FUNC_CHECK(hwmgr); PHM_FUNC_CHECK(hwmgr);
if (hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration == NULL) if (hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration == NULL)
return -EINVAL; return false;
return hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration(hwmgr); return hwmgr->hwmgr_func->check_smc_update_required_for_display_configuration(hwmgr);
} }
......
...@@ -710,8 +710,10 @@ int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, ...@@ -710,8 +710,10 @@ int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
uint32_t vol; uint32_t vol;
int ret = 0; int ret = 0;
if (hwmgr->chip_id < CHIP_POLARIS10) { if (hwmgr->chip_id < CHIP_TONGA) {
atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage); ret = atomctrl_get_voltage_evv(hwmgr, id, voltage);
} else if (hwmgr->chip_id < CHIP_POLARIS10) {
ret = atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage);
if (*voltage >= 2000 || *voltage == 0) if (*voltage >= 2000 || *voltage == 0)
*voltage = 1150; *voltage = 1150;
} else { } else {
......
...@@ -1460,19 +1460,19 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr) ...@@ -1460,19 +1460,19 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL; struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = NULL;
if (table_info == NULL)
return -EINVAL;
sclk_table = table_info->vdd_dep_on_sclk;
for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) { for (i = 0; i < SMU7_MAX_LEAKAGE_COUNT; i++) {
vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i; vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) { if (data->vdd_gfx_control == SMU7_VOLTAGE_CONTROL_BY_SVID2) {
if (0 == phm_get_sclk_for_voltage_evv(hwmgr, if ((hwmgr->pp_table_version == PP_TABLE_V1)
&& !phm_get_sclk_for_voltage_evv(hwmgr,
table_info->vddgfx_lookup_table, vv_id, &sclk)) { table_info->vddgfx_lookup_table, vv_id, &sclk)) {
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ClockStretcher)) { PHM_PlatformCaps_ClockStretcher)) {
if (table_info == NULL)
return -EINVAL;
sclk_table = table_info->vdd_dep_on_sclk;
for (j = 1; j < sclk_table->count; j++) { for (j = 1; j < sclk_table->count; j++) {
if (sclk_table->entries[j].clk == sclk && if (sclk_table->entries[j].clk == sclk &&
sclk_table->entries[j].cks_enable == 0) { sclk_table->entries[j].cks_enable == 0) {
...@@ -1498,12 +1498,15 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr) ...@@ -1498,12 +1498,15 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr)
} }
} }
} else { } else {
if ((hwmgr->pp_table_version == PP_TABLE_V0) if ((hwmgr->pp_table_version == PP_TABLE_V0)
|| !phm_get_sclk_for_voltage_evv(hwmgr, || !phm_get_sclk_for_voltage_evv(hwmgr,
table_info->vddc_lookup_table, vv_id, &sclk)) { table_info->vddc_lookup_table, vv_id, &sclk)) {
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ClockStretcher)) { PHM_PlatformCaps_ClockStretcher)) {
if (table_info == NULL)
return -EINVAL;
sclk_table = table_info->vdd_dep_on_sclk;
for (j = 1; j < sclk_table->count; j++) { for (j = 1; j < sclk_table->count; j++) {
if (sclk_table->entries[j].clk == sclk && if (sclk_table->entries[j].clk == sclk &&
sclk_table->entries[j].cks_enable == 0) { sclk_table->entries[j].cks_enable == 0) {
...@@ -2133,9 +2136,11 @@ static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr, ...@@ -2133,9 +2136,11 @@ static int smu7_patch_limits_vddc(struct pp_hwmgr *hwmgr,
struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend); struct smu7_hwmgr *data = (struct smu7_hwmgr *)(hwmgr->backend);
if (tab) { if (tab) {
vddc = tab->vddc;
smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc, smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddc,
&data->vddc_leakage); &data->vddc_leakage);
tab->vddc = vddc; tab->vddc = vddc;
vddci = tab->vddci;
smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddci, smu7_patch_ppt_v0_with_vdd_leakage(hwmgr, &vddci,
&data->vddci_leakage); &data->vddci_leakage);
tab->vddci = vddci; tab->vddci = vddci;
...@@ -4228,18 +4233,26 @@ static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks) ...@@ -4228,18 +4233,26 @@ static int smu7_get_sclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
{ {
struct phm_ppt_v1_information *table_info = struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)hwmgr->pptable; (struct phm_ppt_v1_information *)hwmgr->pptable;
struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table; struct phm_ppt_v1_clock_voltage_dependency_table *dep_sclk_table = NULL;
struct phm_clock_voltage_dependency_table *sclk_table;
int i; int i;
if (table_info == NULL) if (hwmgr->pp_table_version == PP_TABLE_V1) {
return -EINVAL; if (table_info == NULL || table_info->vdd_dep_on_sclk == NULL)
return -EINVAL;
dep_sclk_table = table_info->vdd_dep_on_sclk; dep_sclk_table = table_info->vdd_dep_on_sclk;
for (i = 0; i < dep_sclk_table->count; i++) {
for (i = 0; i < dep_sclk_table->count; i++) { clocks->clock[i] = dep_sclk_table->entries[i].clk;
clocks->clock[i] = dep_sclk_table->entries[i].clk; clocks->count++;
clocks->count++; }
} else if (hwmgr->pp_table_version == PP_TABLE_V0) {
sclk_table = hwmgr->dyn_state.vddc_dependency_on_sclk;
for (i = 0; i < sclk_table->count; i++) {
clocks->clock[i] = sclk_table->entries[i].clk;
clocks->count++;
}
} }
return 0; return 0;
} }
...@@ -4261,17 +4274,24 @@ static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks) ...@@ -4261,17 +4274,24 @@ static int smu7_get_mclks(struct pp_hwmgr *hwmgr, struct amd_pp_clocks *clocks)
(struct phm_ppt_v1_information *)hwmgr->pptable; (struct phm_ppt_v1_information *)hwmgr->pptable;
struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table; struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table;
int i; int i;
struct phm_clock_voltage_dependency_table *mclk_table;
if (table_info == NULL) if (hwmgr->pp_table_version == PP_TABLE_V1) {
return -EINVAL; if (table_info == NULL)
return -EINVAL;
dep_mclk_table = table_info->vdd_dep_on_mclk; dep_mclk_table = table_info->vdd_dep_on_mclk;
for (i = 0; i < dep_mclk_table->count; i++) {
for (i = 0; i < dep_mclk_table->count; i++) { clocks->clock[i] = dep_mclk_table->entries[i].clk;
clocks->clock[i] = dep_mclk_table->entries[i].clk; clocks->latency[i] = smu7_get_mem_latency(hwmgr,
clocks->latency[i] = smu7_get_mem_latency(hwmgr,
dep_mclk_table->entries[i].clk); dep_mclk_table->entries[i].clk);
clocks->count++; clocks->count++;
}
} else if (hwmgr->pp_table_version == PP_TABLE_V0) {
mclk_table = hwmgr->dyn_state.vddc_dependency_on_mclk;
for (i = 0; i < mclk_table->count; i++) {
clocks->clock[i] = mclk_table->entries[i].clk;
clocks->count++;
}
} }
return 0; return 0;
} }
......
...@@ -30,7 +30,7 @@ int smu7_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr, ...@@ -30,7 +30,7 @@ int smu7_fan_ctrl_get_fan_speed_info(struct pp_hwmgr *hwmgr,
struct phm_fan_speed_info *fan_speed_info) struct phm_fan_speed_info *fan_speed_info)
{ {
if (hwmgr->thermal_controller.fanInfo.bNoFan) if (hwmgr->thermal_controller.fanInfo.bNoFan)
return 0; return -ENODEV;
fan_speed_info->supports_percent_read = true; fan_speed_info->supports_percent_read = true;
fan_speed_info->supports_percent_write = true; fan_speed_info->supports_percent_write = true;
...@@ -60,7 +60,7 @@ int smu7_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr, ...@@ -60,7 +60,7 @@ int smu7_fan_ctrl_get_fan_speed_percent(struct pp_hwmgr *hwmgr,
uint64_t tmp64; uint64_t tmp64;
if (hwmgr->thermal_controller.fanInfo.bNoFan) if (hwmgr->thermal_controller.fanInfo.bNoFan)
return 0; return -ENODEV;
duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, duty100 = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_FDO_CTRL1, FMAX_DUTY100); CG_FDO_CTRL1, FMAX_DUTY100);
...@@ -89,7 +89,7 @@ int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed) ...@@ -89,7 +89,7 @@ int smu7_fan_ctrl_get_fan_speed_rpm(struct pp_hwmgr *hwmgr, uint32_t *speed)
if (hwmgr->thermal_controller.fanInfo.bNoFan || if (hwmgr->thermal_controller.fanInfo.bNoFan ||
(hwmgr->thermal_controller.fanInfo. (hwmgr->thermal_controller.fanInfo.
ucTachometerPulsesPerRevolution == 0)) ucTachometerPulsesPerRevolution == 0))
return 0; return -ENODEV;
tach_period = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, tach_period = PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
CG_TACH_STATUS, TACH_PERIOD); CG_TACH_STATUS, TACH_PERIOD);
......
...@@ -34,9 +34,6 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity); ...@@ -34,9 +34,6 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
static void amd_sched_process_job(struct fence *f, struct fence_cb *cb); static void amd_sched_process_job(struct fence *f, struct fence_cb *cb);
struct kmem_cache *sched_fence_slab;
atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
/* Initialize a given run queue struct */ /* Initialize a given run queue struct */
static void amd_sched_rq_init(struct amd_sched_rq *rq) static void amd_sched_rq_init(struct amd_sched_rq *rq)
{ {
...@@ -618,13 +615,6 @@ int amd_sched_init(struct amd_gpu_scheduler *sched, ...@@ -618,13 +615,6 @@ int amd_sched_init(struct amd_gpu_scheduler *sched,
INIT_LIST_HEAD(&sched->ring_mirror_list); INIT_LIST_HEAD(&sched->ring_mirror_list);
spin_lock_init(&sched->job_list_lock); spin_lock_init(&sched->job_list_lock);
atomic_set(&sched->hw_rq_count, 0); atomic_set(&sched->hw_rq_count, 0);
if (atomic_inc_return(&sched_fence_slab_ref) == 1) {
sched_fence_slab = kmem_cache_create(
"amd_sched_fence", sizeof(struct amd_sched_fence), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!sched_fence_slab)
return -ENOMEM;
}
/* Each scheduler will run on a seperate kernel thread */ /* Each scheduler will run on a seperate kernel thread */
sched->thread = kthread_run(amd_sched_main, sched, sched->name); sched->thread = kthread_run(amd_sched_main, sched, sched->name);
...@@ -645,7 +635,4 @@ void amd_sched_fini(struct amd_gpu_scheduler *sched) ...@@ -645,7 +635,4 @@ void amd_sched_fini(struct amd_gpu_scheduler *sched)
{ {
if (sched->thread) if (sched->thread)
kthread_stop(sched->thread); kthread_stop(sched->thread);
rcu_barrier();
if (atomic_dec_and_test(&sched_fence_slab_ref))
kmem_cache_destroy(sched_fence_slab);
} }
...@@ -30,9 +30,6 @@ ...@@ -30,9 +30,6 @@
struct amd_gpu_scheduler; struct amd_gpu_scheduler;
struct amd_sched_rq; struct amd_sched_rq;
extern struct kmem_cache *sched_fence_slab;
extern atomic_t sched_fence_slab_ref;
/** /**
* A scheduler entity is a wrapper around a job queue or a group * A scheduler entity is a wrapper around a job queue or a group
* of other entities. Entities take turns emitting jobs from their * of other entities. Entities take turns emitting jobs from their
...@@ -145,6 +142,9 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, ...@@ -145,6 +142,9 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
struct amd_sched_entity *entity); struct amd_sched_entity *entity);
void amd_sched_entity_push_job(struct amd_sched_job *sched_job); void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
int amd_sched_fence_slab_init(void);
void amd_sched_fence_slab_fini(void);
struct amd_sched_fence *amd_sched_fence_create( struct amd_sched_fence *amd_sched_fence_create(
struct amd_sched_entity *s_entity, void *owner); struct amd_sched_entity *s_entity, void *owner);
void amd_sched_fence_scheduled(struct amd_sched_fence *fence); void amd_sched_fence_scheduled(struct amd_sched_fence *fence);
......
...@@ -27,6 +27,25 @@ ...@@ -27,6 +27,25 @@
#include <drm/drmP.h> #include <drm/drmP.h>
#include "gpu_scheduler.h" #include "gpu_scheduler.h"
static struct kmem_cache *sched_fence_slab;
int amd_sched_fence_slab_init(void)
{
sched_fence_slab = kmem_cache_create(
"amd_sched_fence", sizeof(struct amd_sched_fence), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!sched_fence_slab)
return -ENOMEM;
return 0;
}
void amd_sched_fence_slab_fini(void)
{
rcu_barrier();
kmem_cache_destroy(sched_fence_slab);
}
struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity, struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity,
void *owner) void *owner)
{ {
......
...@@ -1806,7 +1806,7 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf) ...@@ -1806,7 +1806,7 @@ int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
/* Use a partial view if it is bigger than available space */ /* Use a partial view if it is bigger than available space */
chunk_size = MIN_CHUNK_PAGES; chunk_size = MIN_CHUNK_PAGES;
if (i915_gem_object_is_tiled(obj)) if (i915_gem_object_is_tiled(obj))
chunk_size = max(chunk_size, tile_row_pages(obj)); chunk_size = roundup(chunk_size, tile_row_pages(obj));
memset(&view, 0, sizeof(view)); memset(&view, 0, sizeof(view));
view.type = I915_GGTT_VIEW_PARTIAL; view.type = I915_GGTT_VIEW_PARTIAL;
...@@ -3543,8 +3543,22 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, ...@@ -3543,8 +3543,22 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
if (view->type == I915_GGTT_VIEW_NORMAL) if (view->type == I915_GGTT_VIEW_NORMAL)
vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
PIN_MAPPABLE | PIN_NONBLOCK); PIN_MAPPABLE | PIN_NONBLOCK);
if (IS_ERR(vma)) if (IS_ERR(vma)) {
vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, 0); struct drm_i915_private *i915 = to_i915(obj->base.dev);
unsigned int flags;
/* Valleyview is definitely limited to scanning out the first
* 512MiB. Lets presume this behaviour was inherited from the
* g4x display engine and that all earlier gen are similarly
* limited. Testing suggests that it is a little more
* complicated than this. For example, Cherryview appears quite
* happy to scanout from anywhere within its global aperture.
*/
flags = 0;
if (HAS_GMCH_DISPLAY(i915))
flags = PIN_MAPPABLE;
vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
}
if (IS_ERR(vma)) if (IS_ERR(vma))
goto err_unpin_display; goto err_unpin_display;
......
...@@ -10243,6 +10243,29 @@ static void bxt_modeset_commit_cdclk(struct drm_atomic_state *old_state) ...@@ -10243,6 +10243,29 @@ static void bxt_modeset_commit_cdclk(struct drm_atomic_state *old_state)
bxt_set_cdclk(to_i915(dev), req_cdclk); bxt_set_cdclk(to_i915(dev), req_cdclk);
} }
static int bdw_adjust_min_pipe_pixel_rate(struct intel_crtc_state *crtc_state,
int pixel_rate)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
/* BSpec says "Do not use DisplayPort with CDCLK less than
* 432 MHz, audio enabled, port width x4, and link rate
* HBR2 (5.4 GHz), or else there may be audio corruption or
* screen corruption."
*/
if (intel_crtc_has_dp_encoder(crtc_state) &&
crtc_state->has_audio &&
crtc_state->port_clock >= 540000 &&
crtc_state->lane_count == 4)
pixel_rate = max(432000, pixel_rate);
return pixel_rate;
}
/* compute the max rate for new configuration */ /* compute the max rate for new configuration */
static int ilk_max_pixel_rate(struct drm_atomic_state *state) static int ilk_max_pixel_rate(struct drm_atomic_state *state)
{ {
...@@ -10268,9 +10291,9 @@ static int ilk_max_pixel_rate(struct drm_atomic_state *state) ...@@ -10268,9 +10291,9 @@ static int ilk_max_pixel_rate(struct drm_atomic_state *state)
pixel_rate = ilk_pipe_pixel_rate(crtc_state); pixel_rate = ilk_pipe_pixel_rate(crtc_state);
/* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ if (IS_BROADWELL(dev_priv) || IS_GEN9(dev_priv))
if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled) pixel_rate = bdw_adjust_min_pipe_pixel_rate(crtc_state,
pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95); pixel_rate);
intel_state->min_pixclk[i] = pixel_rate; intel_state->min_pixclk[i] = pixel_rate;
} }
......
...@@ -1799,6 +1799,50 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c ...@@ -1799,6 +1799,50 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE; intel_hdmi->aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
} }
static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
enum port port)
{
const struct ddi_vbt_port_info *info =
&dev_priv->vbt.ddi_port_info[port];
u8 ddc_pin;
if (info->alternate_ddc_pin) {
DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (VBT)\n",
info->alternate_ddc_pin, port_name(port));
return info->alternate_ddc_pin;
}
switch (port) {
case PORT_B:
if (IS_BROXTON(dev_priv))
ddc_pin = GMBUS_PIN_1_BXT;
else
ddc_pin = GMBUS_PIN_DPB;
break;
case PORT_C:
if (IS_BROXTON(dev_priv))
ddc_pin = GMBUS_PIN_2_BXT;
else
ddc_pin = GMBUS_PIN_DPC;
break;
case PORT_D:
if (IS_CHERRYVIEW(dev_priv))
ddc_pin = GMBUS_PIN_DPD_CHV;
else
ddc_pin = GMBUS_PIN_DPD;
break;
default:
MISSING_CASE(port);
ddc_pin = GMBUS_PIN_DPB;
break;
}
DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (platform default)\n",
ddc_pin, port_name(port));
return ddc_pin;
}
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector) struct intel_connector *intel_connector)
{ {
...@@ -1808,7 +1852,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, ...@@ -1808,7 +1852,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct drm_device *dev = intel_encoder->base.dev; struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
enum port port = intel_dig_port->port; enum port port = intel_dig_port->port;
uint8_t alternate_ddc_pin;
DRM_DEBUG_KMS("Adding HDMI connector on port %c\n", DRM_DEBUG_KMS("Adding HDMI connector on port %c\n",
port_name(port)); port_name(port));
...@@ -1826,12 +1869,10 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, ...@@ -1826,12 +1869,10 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
connector->doublescan_allowed = 0; connector->doublescan_allowed = 0;
connector->stereo_allowed = 1; connector->stereo_allowed = 1;
intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(dev_priv, port);
switch (port) { switch (port) {
case PORT_B: case PORT_B:
if (IS_BROXTON(dev_priv))
intel_hdmi->ddc_bus = GMBUS_PIN_1_BXT;
else
intel_hdmi->ddc_bus = GMBUS_PIN_DPB;
/* /*
* On BXT A0/A1, sw needs to activate DDIA HPD logic and * On BXT A0/A1, sw needs to activate DDIA HPD logic and
* interrupts to check the external panel connection. * interrupts to check the external panel connection.
...@@ -1842,46 +1883,17 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, ...@@ -1842,46 +1883,17 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
intel_encoder->hpd_pin = HPD_PORT_B; intel_encoder->hpd_pin = HPD_PORT_B;
break; break;
case PORT_C: case PORT_C:
if (IS_BROXTON(dev_priv))
intel_hdmi->ddc_bus = GMBUS_PIN_2_BXT;
else
intel_hdmi->ddc_bus = GMBUS_PIN_DPC;
intel_encoder->hpd_pin = HPD_PORT_C; intel_encoder->hpd_pin = HPD_PORT_C;
break; break;
case PORT_D: case PORT_D:
if (WARN_ON(IS_BROXTON(dev_priv)))
intel_hdmi->ddc_bus = GMBUS_PIN_DISABLED;
else if (IS_CHERRYVIEW(dev_priv))
intel_hdmi->ddc_bus = GMBUS_PIN_DPD_CHV;
else
intel_hdmi->ddc_bus = GMBUS_PIN_DPD;
intel_encoder->hpd_pin = HPD_PORT_D; intel_encoder->hpd_pin = HPD_PORT_D;
break; break;
case PORT_E: case PORT_E:
/* On SKL PORT E doesn't have seperate GMBUS pin
* We rely on VBT to set a proper alternate GMBUS pin. */
alternate_ddc_pin =
dev_priv->vbt.ddi_port_info[PORT_E].alternate_ddc_pin;
switch (alternate_ddc_pin) {
case DDC_PIN_B:
intel_hdmi->ddc_bus = GMBUS_PIN_DPB;
break;
case DDC_PIN_C:
intel_hdmi->ddc_bus = GMBUS_PIN_DPC;
break;
case DDC_PIN_D:
intel_hdmi->ddc_bus = GMBUS_PIN_DPD;
break;
default:
MISSING_CASE(alternate_ddc_pin);
}
intel_encoder->hpd_pin = HPD_PORT_E; intel_encoder->hpd_pin = HPD_PORT_E;
break; break;
case PORT_A:
intel_encoder->hpd_pin = HPD_PORT_A;
/* Internal port only for eDP. */
default: default:
BUG(); MISSING_CASE(port);
return;
} }
if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
......
...@@ -1139,7 +1139,9 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) ...@@ -1139,7 +1139,9 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
intel_power_sequencer_reset(dev_priv); intel_power_sequencer_reset(dev_priv);
intel_hpd_poll_init(dev_priv); /* Prevent us from re-enabling polling on accident in late suspend */
if (!dev_priv->drm.dev->power.is_suspended)
intel_hpd_poll_init(dev_priv);
} }
static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
......
...@@ -68,6 +68,12 @@ static void ipu_crtc_atomic_disable(struct drm_crtc *crtc, ...@@ -68,6 +68,12 @@ static void ipu_crtc_atomic_disable(struct drm_crtc *crtc,
ipu_dc_disable_channel(ipu_crtc->dc); ipu_dc_disable_channel(ipu_crtc->dc);
ipu_di_disable(ipu_crtc->di); ipu_di_disable(ipu_crtc->di);
/*
* Planes must be disabled before DC clock is removed, as otherwise the
* attached IDMACs will be left in undefined state, possibly hanging
* the IPU or even system.
*/
drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, false);
ipu_dc_disable(ipu); ipu_dc_disable(ipu);
spin_lock_irq(&crtc->dev->event_lock); spin_lock_irq(&crtc->dev->event_lock);
...@@ -77,9 +83,6 @@ static void ipu_crtc_atomic_disable(struct drm_crtc *crtc, ...@@ -77,9 +83,6 @@ static void ipu_crtc_atomic_disable(struct drm_crtc *crtc,
} }
spin_unlock_irq(&crtc->dev->event_lock); spin_unlock_irq(&crtc->dev->event_lock);
/* always disable planes on the CRTC */
drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, true);
drm_crtc_vblank_off(crtc); drm_crtc_vblank_off(crtc);
} }
......
...@@ -139,6 +139,7 @@ struct msm_dsi_host { ...@@ -139,6 +139,7 @@ struct msm_dsi_host {
u32 err_work_state; u32 err_work_state;
struct work_struct err_work; struct work_struct err_work;
struct work_struct hpd_work;
struct workqueue_struct *workqueue; struct workqueue_struct *workqueue;
/* DSI 6G TX buffer*/ /* DSI 6G TX buffer*/
...@@ -1294,6 +1295,14 @@ static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host) ...@@ -1294,6 +1295,14 @@ static void dsi_sw_reset_restore(struct msm_dsi_host *msm_host)
wmb(); /* make sure dsi controller enabled again */ wmb(); /* make sure dsi controller enabled again */
} }
static void dsi_hpd_worker(struct work_struct *work)
{
struct msm_dsi_host *msm_host =
container_of(work, struct msm_dsi_host, hpd_work);
drm_helper_hpd_irq_event(msm_host->dev);
}
static void dsi_err_worker(struct work_struct *work) static void dsi_err_worker(struct work_struct *work)
{ {
struct msm_dsi_host *msm_host = struct msm_dsi_host *msm_host =
...@@ -1480,7 +1489,7 @@ static int dsi_host_attach(struct mipi_dsi_host *host, ...@@ -1480,7 +1489,7 @@ static int dsi_host_attach(struct mipi_dsi_host *host,
DBG("id=%d", msm_host->id); DBG("id=%d", msm_host->id);
if (msm_host->dev) if (msm_host->dev)
drm_helper_hpd_irq_event(msm_host->dev); queue_work(msm_host->workqueue, &msm_host->hpd_work);
return 0; return 0;
} }
...@@ -1494,7 +1503,7 @@ static int dsi_host_detach(struct mipi_dsi_host *host, ...@@ -1494,7 +1503,7 @@ static int dsi_host_detach(struct mipi_dsi_host *host,
DBG("id=%d", msm_host->id); DBG("id=%d", msm_host->id);
if (msm_host->dev) if (msm_host->dev)
drm_helper_hpd_irq_event(msm_host->dev); queue_work(msm_host->workqueue, &msm_host->hpd_work);
return 0; return 0;
} }
...@@ -1748,6 +1757,7 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi) ...@@ -1748,6 +1757,7 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
/* setup workqueue */ /* setup workqueue */
msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0); msm_host->workqueue = alloc_ordered_workqueue("dsi_drm_work", 0);
INIT_WORK(&msm_host->err_work, dsi_err_worker); INIT_WORK(&msm_host->err_work, dsi_err_worker);
INIT_WORK(&msm_host->hpd_work, dsi_hpd_worker);
msm_dsi->host = &msm_host->base; msm_dsi->host = &msm_host->base;
msm_dsi->id = msm_host->id; msm_dsi->id = msm_host->id;
......
...@@ -521,6 +521,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm) ...@@ -521,6 +521,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
.parent_names = (const char *[]){ "xo" }, .parent_names = (const char *[]){ "xo" },
.num_parents = 1, .num_parents = 1,
.name = vco_name, .name = vco_name,
.flags = CLK_IGNORE_UNUSED,
.ops = &clk_ops_dsi_pll_28nm_vco, .ops = &clk_ops_dsi_pll_28nm_vco,
}; };
struct device *dev = &pll_28nm->pdev->dev; struct device *dev = &pll_28nm->pdev->dev;
......
...@@ -412,6 +412,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm) ...@@ -412,6 +412,7 @@ static int pll_28nm_register(struct dsi_pll_28nm *pll_28nm)
struct clk_init_data vco_init = { struct clk_init_data vco_init = {
.parent_names = (const char *[]){ "pxo" }, .parent_names = (const char *[]){ "pxo" },
.num_parents = 1, .num_parents = 1,
.flags = CLK_IGNORE_UNUSED,
.ops = &clk_ops_dsi_pll_28nm_vco, .ops = &clk_ops_dsi_pll_28nm_vco,
}; };
struct device *dev = &pll_28nm->pdev->dev; struct device *dev = &pll_28nm->pdev->dev;
......
...@@ -702,6 +702,7 @@ static struct clk_init_data pll_init = { ...@@ -702,6 +702,7 @@ static struct clk_init_data pll_init = {
.ops = &hdmi_8996_pll_ops, .ops = &hdmi_8996_pll_ops,
.parent_names = hdmi_pll_parents, .parent_names = hdmi_pll_parents,
.num_parents = ARRAY_SIZE(hdmi_pll_parents), .num_parents = ARRAY_SIZE(hdmi_pll_parents),
.flags = CLK_IGNORE_UNUSED,
}; };
int msm_hdmi_pll_8996_init(struct platform_device *pdev) int msm_hdmi_pll_8996_init(struct platform_device *pdev)
......
...@@ -424,6 +424,7 @@ static struct clk_init_data pll_init = { ...@@ -424,6 +424,7 @@ static struct clk_init_data pll_init = {
.ops = &hdmi_pll_ops, .ops = &hdmi_pll_ops,
.parent_names = hdmi_pll_parents, .parent_names = hdmi_pll_parents,
.num_parents = ARRAY_SIZE(hdmi_pll_parents), .num_parents = ARRAY_SIZE(hdmi_pll_parents),
.flags = CLK_IGNORE_UNUSED,
}; };
int msm_hdmi_pll_8960_init(struct platform_device *pdev) int msm_hdmi_pll_8960_init(struct platform_device *pdev)
......
...@@ -272,7 +272,7 @@ const struct mdp5_cfg_hw msm8x16_config = { ...@@ -272,7 +272,7 @@ const struct mdp5_cfg_hw msm8x16_config = {
.count = 2, .count = 2,
.base = { 0x14000, 0x16000 }, .base = { 0x14000, 0x16000 },
.caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION, MDP_PIPE_CAP_DECIMATION,
}, },
.pipe_dma = { .pipe_dma = {
.count = 1, .count = 1,
...@@ -282,7 +282,7 @@ const struct mdp5_cfg_hw msm8x16_config = { ...@@ -282,7 +282,7 @@ const struct mdp5_cfg_hw msm8x16_config = {
.lm = { .lm = {
.count = 2, /* LM0 and LM3 */ .count = 2, /* LM0 and LM3 */
.base = { 0x44000, 0x47000 }, .base = { 0x44000, 0x47000 },
.nb_stages = 5, .nb_stages = 8,
.max_width = 2048, .max_width = 2048,
.max_height = 0xFFFF, .max_height = 0xFFFF,
}, },
......
...@@ -223,12 +223,7 @@ static void blend_setup(struct drm_crtc *crtc) ...@@ -223,12 +223,7 @@ static void blend_setup(struct drm_crtc *crtc)
plane_cnt++; plane_cnt++;
} }
/* if (!pstates[STAGE_BASE]) {
* If there is no base layer, enable border color.
* Although it's not possbile in current blend logic,
* put it here as a reminder.
*/
if (!pstates[STAGE_BASE] && plane_cnt) {
ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT; ctl_blend_flags |= MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT;
DBG("Border Color is enabled"); DBG("Border Color is enabled");
} }
...@@ -365,6 +360,15 @@ static int pstate_cmp(const void *a, const void *b) ...@@ -365,6 +360,15 @@ static int pstate_cmp(const void *a, const void *b)
return pa->state->zpos - pb->state->zpos; return pa->state->zpos - pb->state->zpos;
} }
/* is there a helper for this? */
static bool is_fullscreen(struct drm_crtc_state *cstate,
struct drm_plane_state *pstate)
{
return (pstate->crtc_x <= 0) && (pstate->crtc_y <= 0) &&
((pstate->crtc_x + pstate->crtc_w) >= cstate->mode.hdisplay) &&
((pstate->crtc_y + pstate->crtc_h) >= cstate->mode.vdisplay);
}
static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state) struct drm_crtc_state *state)
{ {
...@@ -375,21 +379,11 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, ...@@ -375,21 +379,11 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
struct plane_state pstates[STAGE_MAX + 1]; struct plane_state pstates[STAGE_MAX + 1];
const struct mdp5_cfg_hw *hw_cfg; const struct mdp5_cfg_hw *hw_cfg;
const struct drm_plane_state *pstate; const struct drm_plane_state *pstate;
int cnt = 0, i; int cnt = 0, base = 0, i;
DBG("%s: check", mdp5_crtc->name); DBG("%s: check", mdp5_crtc->name);
/* verify that there are not too many planes attached to crtc
* and that we don't have conflicting mixer stages:
*/
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) { drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
if (cnt >= (hw_cfg->lm.nb_stages)) {
dev_err(dev->dev, "too many planes!\n");
return -EINVAL;
}
pstates[cnt].plane = plane; pstates[cnt].plane = plane;
pstates[cnt].state = to_mdp5_plane_state(pstate); pstates[cnt].state = to_mdp5_plane_state(pstate);
...@@ -399,8 +393,24 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc, ...@@ -399,8 +393,24 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
/* assign a stage based on sorted zpos property */ /* assign a stage based on sorted zpos property */
sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL); sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
/* if the bottom-most layer is not fullscreen, we need to use
* it for solid-color:
*/
if ((cnt > 0) && !is_fullscreen(state, &pstates[0].state->base))
base++;
/* verify that there are not too many planes attached to crtc
* and that we don't have conflicting mixer stages:
*/
hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
if ((cnt + base) >= hw_cfg->lm.nb_stages) {
dev_err(dev->dev, "too many planes!\n");
return -EINVAL;
}
for (i = 0; i < cnt; i++) { for (i = 0; i < cnt; i++) {
pstates[i].state->stage = STAGE_BASE + i; pstates[i].state->stage = STAGE_BASE + i + base;
DBG("%s: assign pipe %s on stage=%d", mdp5_crtc->name, DBG("%s: assign pipe %s on stage=%d", mdp5_crtc->name,
pipe2name(mdp5_plane_pipe(pstates[i].plane)), pipe2name(mdp5_plane_pipe(pstates[i].plane)),
pstates[i].state->stage); pstates[i].state->stage);
......
...@@ -292,8 +292,7 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane, ...@@ -292,8 +292,7 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
format = to_mdp_format(msm_framebuffer_format(state->fb)); format = to_mdp_format(msm_framebuffer_format(state->fb));
if (MDP_FORMAT_IS_YUV(format) && if (MDP_FORMAT_IS_YUV(format) &&
!pipe_supports_yuv(mdp5_plane->caps)) { !pipe_supports_yuv(mdp5_plane->caps)) {
dev_err(plane->dev->dev, DBG("Pipe doesn't support YUV\n");
"Pipe doesn't support YUV\n");
return -EINVAL; return -EINVAL;
} }
...@@ -301,8 +300,7 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane, ...@@ -301,8 +300,7 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
if (!(mdp5_plane->caps & MDP_PIPE_CAP_SCALE) && if (!(mdp5_plane->caps & MDP_PIPE_CAP_SCALE) &&
(((state->src_w >> 16) != state->crtc_w) || (((state->src_w >> 16) != state->crtc_w) ||
((state->src_h >> 16) != state->crtc_h))) { ((state->src_h >> 16) != state->crtc_h))) {
dev_err(plane->dev->dev, DBG("Pipe doesn't support scaling (%dx%d -> %dx%d)\n",
"Pipe doesn't support scaling (%dx%d -> %dx%d)\n",
state->src_w >> 16, state->src_h >> 16, state->src_w >> 16, state->src_h >> 16,
state->crtc_w, state->crtc_h); state->crtc_w, state->crtc_h);
...@@ -313,8 +311,7 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane, ...@@ -313,8 +311,7 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
vflip = !!(state->rotation & DRM_REFLECT_Y); vflip = !!(state->rotation & DRM_REFLECT_Y);
if ((vflip && !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) || if ((vflip && !(mdp5_plane->caps & MDP_PIPE_CAP_VFLIP)) ||
(hflip && !(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP))) { (hflip && !(mdp5_plane->caps & MDP_PIPE_CAP_HFLIP))) {
dev_err(plane->dev->dev, DBG("Pipe doesn't support flip\n");
"Pipe doesn't support flip\n");
return -EINVAL; return -EINVAL;
} }
......
...@@ -228,7 +228,7 @@ static int msm_drm_uninit(struct device *dev) ...@@ -228,7 +228,7 @@ static int msm_drm_uninit(struct device *dev)
flush_workqueue(priv->atomic_wq); flush_workqueue(priv->atomic_wq);
destroy_workqueue(priv->atomic_wq); destroy_workqueue(priv->atomic_wq);
if (kms) if (kms && kms->funcs)
kms->funcs->destroy(kms); kms->funcs->destroy(kms);
if (gpu) { if (gpu) {
......
...@@ -163,6 +163,9 @@ void msm_gem_shrinker_init(struct drm_device *dev) ...@@ -163,6 +163,9 @@ void msm_gem_shrinker_init(struct drm_device *dev)
void msm_gem_shrinker_cleanup(struct drm_device *dev) void msm_gem_shrinker_cleanup(struct drm_device *dev)
{ {
struct msm_drm_private *priv = dev->dev_private; struct msm_drm_private *priv = dev->dev_private;
WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
unregister_shrinker(&priv->shrinker); if (priv->shrinker.nr_deferred) {
WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
unregister_shrinker(&priv->shrinker);
}
} }
...@@ -931,7 +931,7 @@ static void radeon_connector_unregister(struct drm_connector *connector) ...@@ -931,7 +931,7 @@ static void radeon_connector_unregister(struct drm_connector *connector)
{ {
struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_connector *radeon_connector = to_radeon_connector(connector);
if (radeon_connector->ddc_bus->has_aux) { if (radeon_connector->ddc_bus && radeon_connector->ddc_bus->has_aux) {
drm_dp_aux_unregister(&radeon_connector->ddc_bus->aux); drm_dp_aux_unregister(&radeon_connector->ddc_bus->aux);
radeon_connector->ddc_bus->has_aux = false; radeon_connector->ddc_bus->has_aux = false;
} }
......
...@@ -104,6 +104,14 @@ static const char radeon_family_name[][16] = { ...@@ -104,6 +104,14 @@ static const char radeon_family_name[][16] = {
"LAST", "LAST",
}; };
#if defined(CONFIG_VGA_SWITCHEROO)
bool radeon_has_atpx_dgpu_power_cntl(void);
bool radeon_is_atpx_hybrid(void);
#else
static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
static inline bool radeon_is_atpx_hybrid(void) { return false; }
#endif
#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0) #define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1) #define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
...@@ -160,6 +168,11 @@ static void radeon_device_handle_px_quirks(struct radeon_device *rdev) ...@@ -160,6 +168,11 @@ static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX) if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
rdev->flags &= ~RADEON_IS_PX; rdev->flags &= ~RADEON_IS_PX;
/* disable PX is the system doesn't support dGPU power control or hybrid gfx */
if (!radeon_is_atpx_hybrid() &&
!radeon_has_atpx_dgpu_power_cntl())
rdev->flags &= ~RADEON_IS_PX;
} }
/** /**
......
...@@ -98,17 +98,23 @@ static int udl_parse_vendor_descriptor(struct drm_device *dev, ...@@ -98,17 +98,23 @@ static int udl_parse_vendor_descriptor(struct drm_device *dev,
static int udl_select_std_channel(struct udl_device *udl) static int udl_select_std_channel(struct udl_device *udl)
{ {
int ret; int ret;
u8 set_def_chn[] = {0x57, 0xCD, 0xDC, 0xA7, static const u8 set_def_chn[] = {0x57, 0xCD, 0xDC, 0xA7,
0x1C, 0x88, 0x5E, 0x15, 0x1C, 0x88, 0x5E, 0x15,
0x60, 0xFE, 0xC6, 0x97, 0x60, 0xFE, 0xC6, 0x97,
0x16, 0x3D, 0x47, 0xF2}; 0x16, 0x3D, 0x47, 0xF2};
void *sendbuf;
sendbuf = kmemdup(set_def_chn, sizeof(set_def_chn), GFP_KERNEL);
if (!sendbuf)
return -ENOMEM;
ret = usb_control_msg(udl->udev, ret = usb_control_msg(udl->udev,
usb_sndctrlpipe(udl->udev, 0), usb_sndctrlpipe(udl->udev, 0),
NR_USB_REQUEST_CHANNEL, NR_USB_REQUEST_CHANNEL,
(USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0, (USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0,
set_def_chn, sizeof(set_def_chn), sendbuf, sizeof(set_def_chn),
USB_CTRL_SET_TIMEOUT); USB_CTRL_SET_TIMEOUT);
kfree(sendbuf);
return ret < 0 ? ret : 0; return ret < 0 ? ret : 0;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册