提交 dc7d19d2 编写于 作者: D Dave Airlie

Merge tag 'amd-drm-fixes-5.18-2022-04-06' of...

Merge tag 'amd-drm-fixes-5.18-2022-04-06' of https://gitlab.freedesktop.org/agd5f/linux into drm-fixes

amd-drm-fixes-5.18-2022-04-06:

amdgpu:
- VCN 3.0 fixes
- DCN 3.1.5 fix
- Misc display fixes
- GC 10.3 golden register fix
- Suspend fix
- SMU 10 fix

amdkfd:
- Event fix
Signed-off-by: NDave Airlie <airlied@redhat.com>
From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220406170441.5779-1-alexander.deucher@amd.com
......@@ -300,8 +300,8 @@ void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
void amdgpu_ring_commit(struct amdgpu_ring *ring);
void amdgpu_ring_undo(struct amdgpu_ring *ring);
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
unsigned int ring_size, struct amdgpu_irq_src *irq_src,
unsigned int irq_type, unsigned int prio,
unsigned int max_dw, struct amdgpu_irq_src *irq_src,
unsigned int irq_type, unsigned int hw_prio,
atomic_t *sched_score);
void amdgpu_ring_fini(struct amdgpu_ring *ring);
void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
......
......@@ -159,6 +159,7 @@
#define AMDGPU_VCN_MULTI_QUEUE_FLAG (1 << 8)
#define AMDGPU_VCN_SW_RING_FLAG (1 << 9)
#define AMDGPU_VCN_FW_LOGGING_FLAG (1 << 10)
#define AMDGPU_VCN_SMU_VERSION_INFO_FLAG (1 << 11)
#define AMDGPU_VCN_IB_FLAG_DECODE_BUFFER 0x00000001
#define AMDGPU_VCN_CMD_FLAG_MSG_BUFFER 0x00000001
......@@ -279,6 +280,11 @@ struct amdgpu_fw_shared_fw_logging {
uint32_t size;
};
struct amdgpu_fw_shared_smu_interface_info {
uint8_t smu_interface_type;
uint8_t padding[3];
};
struct amdgpu_fw_shared {
uint32_t present_flag_0;
uint8_t pad[44];
......@@ -287,6 +293,7 @@ struct amdgpu_fw_shared {
struct amdgpu_fw_shared_multi_queue multi_queue;
struct amdgpu_fw_shared_sw_ring sw_ring;
struct amdgpu_fw_shared_fw_logging fw_log;
struct amdgpu_fw_shared_smu_interface_info smu_interface_info;
};
struct amdgpu_vcn_fwlog {
......
......@@ -3293,7 +3293,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_3[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1807ff, 0x00000242),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Vangogh, 0x1ff1ffff, 0x00000500),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0x000000ff, 0x000000e4),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x32103210),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x32103210),
......@@ -3429,7 +3429,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_6[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1807ff, 0x00000042),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Vangogh, 0x1ff1ffff, 0x00000500),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0x000000ff, 0x00000044),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x32103210),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x32103210),
......@@ -3454,7 +3454,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_7[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1807ff, 0x00000041),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Vangogh, 0x1ff1ffff, 0x00000500),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0x000000ff, 0x000000e4),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x32103210),
SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x32103210),
......
......@@ -219,6 +219,11 @@ static int vcn_v3_0_sw_init(void *handle)
cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG) |
cpu_to_le32(AMDGPU_VCN_FW_SHARED_FLAG_0_RB);
fw_shared->sw_ring.is_enabled = cpu_to_le32(DEC_SW_RING_ENABLED);
fw_shared->present_flag_0 |= AMDGPU_VCN_SMU_VERSION_INFO_FLAG;
if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 1, 2))
fw_shared->smu_interface_info.smu_interface_type = 2;
else if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 1, 1))
fw_shared->smu_interface_info.smu_interface_type = 1;
if (amdgpu_vcnfw_log)
amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
......@@ -1483,7 +1488,7 @@ static int vcn_v3_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE};
uint32_t tmp;
vcn_v3_0_pause_dpg_mode(adev, 0, &state);
vcn_v3_0_pause_dpg_mode(adev, inst_idx, &state);
/* Wait for power status to be 1 */
SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
......
......@@ -247,15 +247,6 @@ int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd)
return ret;
}
ret = anon_inode_getfd(kfd_smi_name, &kfd_smi_ev_fops, (void *)client,
O_RDWR);
if (ret < 0) {
kfifo_free(&client->fifo);
kfree(client);
return ret;
}
*fd = ret;
init_waitqueue_head(&client->wait_queue);
spin_lock_init(&client->lock);
client->events = 0;
......@@ -265,5 +256,20 @@ int kfd_smi_event_open(struct kfd_dev *dev, uint32_t *fd)
list_add_rcu(&client->list, &dev->smi_clients);
spin_unlock(&dev->smi_lock);
ret = anon_inode_getfd(kfd_smi_name, &kfd_smi_ev_fops, (void *)client,
O_RDWR);
if (ret < 0) {
spin_lock(&dev->smi_lock);
list_del_rcu(&client->list);
spin_unlock(&dev->smi_lock);
synchronize_rcu();
kfifo_free(&client->fifo);
kfree(client);
return ret;
}
*fd = ret;
return 0;
}
......@@ -2714,7 +2714,8 @@ static int dm_resume(void *handle)
* this is the case when traversing through already created
* MST connectors, should be skipped
*/
if (aconnector->mst_port)
if (aconnector->dc_link &&
aconnector->dc_link->type == dc_connection_mst_branch)
continue;
mutex_lock(&aconnector->hpd_lock);
......@@ -3972,7 +3973,7 @@ static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *cap
max - min);
}
static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
int bl_idx,
u32 user_brightness)
{
......@@ -4003,7 +4004,8 @@ static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
}
return rc ? 0 : 1;
if (rc)
dm->actual_brightness[bl_idx] = user_brightness;
}
static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
......@@ -9947,7 +9949,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
/* restore the backlight level */
for (i = 0; i < dm->num_of_edps; i++) {
if (dm->backlight_dev[i] &&
(amdgpu_dm_backlight_get_level(dm, i) != dm->brightness[i]))
(dm->actual_brightness[i] != dm->brightness[i]))
amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
}
#endif
......
......@@ -540,6 +540,12 @@ struct amdgpu_display_manager {
* cached backlight values.
*/
u32 brightness[AMDGPU_DM_MAX_NUM_EDP];
/**
* @actual_brightness:
*
* last successfully applied backlight values.
*/
u32 actual_brightness[AMDGPU_DM_MAX_NUM_EDP];
};
enum dsc_clock_force_state {
......
......@@ -436,57 +436,84 @@ static void dcn315_clk_mgr_helper_populate_bw_params(
struct integrated_info *bios_info,
const DpmClocks_315_t *clock_table)
{
int i, j;
int i;
struct clk_bw_params *bw_params = clk_mgr->base.bw_params;
uint32_t max_dispclk = 0, max_dppclk = 0;
j = -1;
ASSERT(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL);
/* Find lowest DPM, FCLK is filled in reverse order*/
for (i = NUM_DF_PSTATE_LEVELS - 1; i >= 0; i--) {
if (clock_table->DfPstateTable[i].FClk != 0) {
j = i;
break;
uint32_t max_dispclk, max_dppclk, max_pstate, max_socclk, max_fclk = 0, min_pstate = 0;
struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_params->clk_table.num_entries - 1];
max_dispclk = find_max_clk_value(clock_table->DispClocks, clock_table->NumDispClkLevelsEnabled);
max_dppclk = find_max_clk_value(clock_table->DppClocks, clock_table->NumDispClkLevelsEnabled);
max_socclk = find_max_clk_value(clock_table->SocClocks, clock_table->NumSocClkLevelsEnabled);
/* Find highest fclk pstate */
for (i = 0; i < clock_table->NumDfPstatesEnabled; i++) {
if (clock_table->DfPstateTable[i].FClk > max_fclk) {
max_fclk = clock_table->DfPstateTable[i].FClk;
max_pstate = i;
}
}
if (j == -1) {
/* clock table is all 0s, just use our own hardcode */
ASSERT(0);
return;
}
bw_params->clk_table.num_entries = j + 1;
/* dispclk and dppclk can be max at any voltage, same number of levels for both */
if (clock_table->NumDispClkLevelsEnabled <= NUM_DISPCLK_DPM_LEVELS &&
clock_table->NumDispClkLevelsEnabled <= NUM_DPPCLK_DPM_LEVELS) {
max_dispclk = find_max_clk_value(clock_table->DispClocks, clock_table->NumDispClkLevelsEnabled);
max_dppclk = find_max_clk_value(clock_table->DppClocks, clock_table->NumDispClkLevelsEnabled);
} else {
ASSERT(0);
}
/* For 315 we want to base clock table on dcfclk, need at least one entry regardless of pmfw table */
for (i = 0; i < clock_table->NumDcfClkLevelsEnabled; i++) {
int j;
uint32_t min_fclk = clock_table->DfPstateTable[0].FClk;
for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) {
int temp;
for (j = 1; j < clock_table->NumDfPstatesEnabled; j++) {
if (clock_table->DfPstateTable[j].Voltage <= clock_table->SocVoltage[i]
&& clock_table->DfPstateTable[j].FClk < min_fclk) {
min_fclk = clock_table->DfPstateTable[j].FClk;
min_pstate = j;
}
}
bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].FClk;
bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].MemClk;
bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].Voltage;
bw_params->clk_table.entries[i].fclk_mhz = min_fclk;
bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[min_pstate].MemClk;
bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[min_pstate].Voltage;
bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[i];
bw_params->clk_table.entries[i].socclk_mhz = clock_table->SocClocks[i];
bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
bw_params->clk_table.entries[i].wck_ratio = 1;
temp = find_clk_for_voltage(clock_table, clock_table->DcfClocks, clock_table->DfPstateTable[j].Voltage);
if (temp)
bw_params->clk_table.entries[i].dcfclk_mhz = temp;
temp = find_clk_for_voltage(clock_table, clock_table->SocClocks, clock_table->DfPstateTable[j].Voltage);
if (temp)
bw_params->clk_table.entries[i].socclk_mhz = temp;
};
/* Make sure to include at least one entry and highest pstate */
if (max_pstate != min_pstate) {
bw_params->clk_table.entries[i].fclk_mhz = max_fclk;
bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[max_pstate].MemClk;
bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[max_pstate].Voltage;
bw_params->clk_table.entries[i].dcfclk_mhz = find_clk_for_voltage(
clock_table, clock_table->DcfClocks, clock_table->DfPstateTable[max_pstate].Voltage);
bw_params->clk_table.entries[i].socclk_mhz = find_clk_for_voltage(
clock_table, clock_table->SocClocks, clock_table->DfPstateTable[max_pstate].Voltage);
bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
bw_params->clk_table.entries[i].wck_ratio = 1;
i++;
}
bw_params->clk_table.num_entries = i;
/* Include highest socclk */
if (bw_params->clk_table.entries[i-1].socclk_mhz < max_socclk)
bw_params->clk_table.entries[i-1].socclk_mhz = max_socclk;
/* Set any 0 clocks to max default setting. Not an issue for
* power since we aren't doing switching in such case anyway
*/
for (i = 0; i < bw_params->clk_table.num_entries; i++) {
if (!bw_params->clk_table.entries[i].fclk_mhz) {
bw_params->clk_table.entries[i].fclk_mhz = def_max.fclk_mhz;
bw_params->clk_table.entries[i].memclk_mhz = def_max.memclk_mhz;
bw_params->clk_table.entries[i].voltage = def_max.voltage;
}
if (!bw_params->clk_table.entries[i].dcfclk_mhz)
bw_params->clk_table.entries[i].dcfclk_mhz = def_max.dcfclk_mhz;
if (!bw_params->clk_table.entries[i].socclk_mhz)
bw_params->clk_table.entries[i].socclk_mhz = def_max.socclk_mhz;
if (!bw_params->clk_table.entries[i].dispclk_mhz)
bw_params->clk_table.entries[i].dispclk_mhz = def_max.dispclk_mhz;
if (!bw_params->clk_table.entries[i].dppclk_mhz)
bw_params->clk_table.entries[i].dppclk_mhz = def_max.dppclk_mhz;
}
bw_params->vram_type = bios_info->memory_type;
bw_params->num_channels = bios_info->ma_channel_number;
......
......@@ -1496,10 +1496,6 @@ bool dc_validate_boot_timing(const struct dc *dc,
if (!link->link_enc->funcs->is_dig_enabled(link->link_enc))
return false;
/* Check for FEC status*/
if (link->link_enc->funcs->fec_is_active(link->link_enc))
return false;
enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc);
if (enc_inst == ENGINE_ID_UNKNOWN)
......
......@@ -5216,6 +5216,62 @@ static void retrieve_cable_id(struct dc_link *link)
&link->dpcd_caps.cable_id, &usbc_cable_id);
}
/* DPRX may take some time to respond to AUX messages after HPD asserted.
* If AUX read unsuccessful, try to wake unresponsive DPRX by toggling DPCD SET_POWER (0x600).
*/
static enum dc_status wa_try_to_wake_dprx(struct dc_link *link, uint64_t timeout_ms)
{
enum dc_status status = DC_ERROR_UNEXPECTED;
uint8_t dpcd_data = 0;
uint64_t start_ts = 0;
uint64_t current_ts = 0;
uint64_t time_taken_ms = 0;
enum dc_connection_type type = dc_connection_none;
status = core_link_read_dpcd(
link,
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV,
&dpcd_data,
sizeof(dpcd_data));
if (status != DC_OK) {
DC_LOG_WARNING("%s: Read DPCD LTTPR_CAP failed - try to toggle DPCD SET_POWER for %lld ms.",
__func__,
timeout_ms);
start_ts = dm_get_timestamp(link->ctx);
do {
if (!dc_link_detect_sink(link, &type) || type == dc_connection_none)
break;
dpcd_data = DP_SET_POWER_D3;
status = core_link_write_dpcd(
link,
DP_SET_POWER,
&dpcd_data,
sizeof(dpcd_data));
dpcd_data = DP_SET_POWER_D0;
status = core_link_write_dpcd(
link,
DP_SET_POWER,
&dpcd_data,
sizeof(dpcd_data));
current_ts = dm_get_timestamp(link->ctx);
time_taken_ms = div_u64(dm_get_elapse_time_in_ns(link->ctx, current_ts, start_ts), 1000000);
} while (status != DC_OK && time_taken_ms < timeout_ms);
DC_LOG_WARNING("%s: DPCD SET_POWER %s after %lld ms%s",
__func__,
(status == DC_OK) ? "succeeded" : "failed",
time_taken_ms,
(type == dc_connection_none) ? ". Unplugged." : ".");
}
return status;
}
static bool retrieve_link_cap(struct dc_link *link)
{
/* DP_ADAPTER_CAP - DP_DPCD_REV + 1 == 16 and also DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT + 1 == 16,
......@@ -5251,6 +5307,15 @@ static bool retrieve_link_cap(struct dc_link *link)
dc_link_aux_try_to_configure_timeout(link->ddc,
LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD);
/* Try to ensure AUX channel active before proceeding. */
if (link->dc->debug.aux_wake_wa.bits.enable_wa) {
uint64_t timeout_ms = link->dc->debug.aux_wake_wa.bits.timeout_ms;
if (link->dc->debug.aux_wake_wa.bits.use_default_timeout)
timeout_ms = LINK_AUX_WAKE_TIMEOUT_MS;
status = wa_try_to_wake_dprx(link, timeout_ms);
}
is_lttpr_present = dp_retrieve_lttpr_cap(link);
/* Read DP tunneling information. */
status = dpcd_get_tunneling_device_data(link);
......
......@@ -526,6 +526,22 @@ union dpia_debug_options {
uint32_t raw;
};
/* AUX wake work around options
* 0: enable/disable work around
* 1: use default timeout LINK_AUX_WAKE_TIMEOUT_MS
* 15-2: reserved
* 31-16: timeout in ms
*/
union aux_wake_wa_options {
struct {
uint32_t enable_wa : 1;
uint32_t use_default_timeout : 1;
uint32_t rsvd: 14;
uint32_t timeout_ms : 16;
} bits;
uint32_t raw;
};
struct dc_debug_data {
uint32_t ltFailCount;
uint32_t i2cErrorCount;
......@@ -712,6 +728,7 @@ struct dc_debug_options {
#endif
bool apply_vendor_specific_lttpr_wa;
bool extended_blank_optimization;
union aux_wake_wa_options aux_wake_wa;
bool ignore_dpref_ss;
uint8_t psr_power_use_phy_fsm;
};
......
......@@ -1497,16 +1497,12 @@ void dcn10_init_hw(struct dc *dc)
link->link_status.link_active = true;
}
/* Power gate DSCs */
if (!is_optimized_init_done) {
for (i = 0; i < res_pool->res_cap->num_dsc; i++)
if (hws->funcs.dsc_pg_control != NULL)
hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
}
/* we want to turn off all dp displays before doing detection */
dc_link_blank_all_dp_displays(dc);
if (hws->funcs.enable_power_gating_plane)
hws->funcs.enable_power_gating_plane(dc->hwseq, true);
/* If taking control over from VBIOS, we may want to optimize our first
* mode set, so we need to skip powering down pipes until we know which
* pipes we want to use.
......@@ -1559,8 +1555,6 @@ void dcn10_init_hw(struct dc *dc)
REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
}
if (hws->funcs.enable_power_gating_plane)
hws->funcs.enable_power_gating_plane(dc->hwseq, true);
if (dc->clk_mgr->funcs->notify_wm_ranges)
dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
......
......@@ -1976,7 +1976,6 @@ int dcn20_validate_apply_pipe_split_flags(
/*If need split for odm but 4 way split already*/
if (split[i] == 2 && ((pipe->prev_odm_pipe && !pipe->prev_odm_pipe->prev_odm_pipe)
|| !pipe->next_odm_pipe)) {
ASSERT(0); /* NOT expected yet */
merge[i] = true; /* 4 -> 2 ODM */
} else if (split[i] == 0 && pipe->prev_odm_pipe) {
ASSERT(0); /* NOT expected yet */
......
......@@ -644,7 +644,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.clock_trace = true,
.disable_pplib_clock_request = true,
.min_disp_clk_khz = 100000,
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
......
......@@ -547,6 +547,9 @@ void dcn30_init_hw(struct dc *dc)
/* we want to turn off all dp displays before doing detection */
dc_link_blank_all_dp_displays(dc);
if (hws->funcs.enable_power_gating_plane)
hws->funcs.enable_power_gating_plane(dc->hwseq, true);
/* If taking control over from VBIOS, we may want to optimize our first
* mode set, so we need to skip powering down pipes until we know which
* pipes we want to use.
......@@ -624,8 +627,6 @@ void dcn30_init_hw(struct dc *dc)
REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
}
if (hws->funcs.enable_power_gating_plane)
hws->funcs.enable_power_gating_plane(dc->hwseq, true);
if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
......
......@@ -199,6 +199,9 @@ void dcn31_init_hw(struct dc *dc)
/* we want to turn off all dp displays before doing detection */
dc_link_blank_all_dp_displays(dc);
if (hws->funcs.enable_power_gating_plane)
hws->funcs.enable_power_gating_plane(dc->hwseq, true);
/* If taking control over from VBIOS, we may want to optimize our first
* mode set, so we need to skip powering down pipes until we know which
* pipes we want to use.
......@@ -248,8 +251,6 @@ void dcn31_init_hw(struct dc *dc)
REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
}
if (hws->funcs.enable_power_gating_plane)
hws->funcs.enable_power_gating_plane(dc->hwseq, true);
if (!dcb->funcs->is_accelerated_mode(dcb) && dc->res_pool->hubbub->funcs->init_watermarks)
dc->res_pool->hubbub->funcs->init_watermarks(dc->res_pool->hubbub);
......@@ -338,20 +339,20 @@ void dcn31_enable_power_gating_plane(
bool enable)
{
bool force_on = true; /* disable power gating */
uint32_t org_ip_request_cntl = 0;
if (enable && !hws->ctx->dc->debug.disable_hubp_power_gate)
force_on = false;
REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
if (org_ip_request_cntl == 0)
REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
/* DCHUBP0/1/2/3/4/5 */
REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
REG_WAIT(DOMAIN0_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000);
REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
REG_WAIT(DOMAIN2_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000);
/* DPP0/1/2/3/4/5 */
REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
REG_WAIT(DOMAIN1_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000);
REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
REG_WAIT(DOMAIN3_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000);
force_on = true; /* disable power gating */
if (enable && !hws->ctx->dc->debug.disable_dsc_power_gate)
......@@ -359,11 +360,11 @@ void dcn31_enable_power_gating_plane(
/* DCS0/1/2/3/4/5 */
REG_UPDATE(DOMAIN16_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
REG_WAIT(DOMAIN16_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000);
REG_UPDATE(DOMAIN17_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
REG_WAIT(DOMAIN17_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000);
REG_UPDATE(DOMAIN18_PG_CONFIG, DOMAIN_POWER_FORCEON, force_on);
REG_WAIT(DOMAIN18_PG_STATUS, DOMAIN_PGFSM_PWR_STATUS, force_on, 1, 1000);
if (org_ip_request_cntl == 0)
REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0);
}
void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx)
......
......@@ -124,7 +124,6 @@ static bool optc31_enable_crtc(struct timing_generator *optc)
static bool optc31_disable_crtc(struct timing_generator *optc)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
/* disable otg request until end of the first line
* in the vertical blank region
*/
......@@ -138,6 +137,7 @@ static bool optc31_disable_crtc(struct timing_generator *optc)
REG_WAIT(OTG_CLOCK_CONTROL,
OTG_BUSY, 0,
1, 100000);
optc1_clear_optc_underflow(optc);
return true;
}
......@@ -158,6 +158,9 @@ static bool optc31_immediate_disable_crtc(struct timing_generator *optc)
OTG_BUSY, 0,
1, 100000);
/* clear the false state */
optc1_clear_optc_underflow(optc);
return true;
}
......
......@@ -2032,7 +2032,9 @@ bool dcn31_validate_bandwidth(struct dc *dc,
BW_VAL_TRACE_COUNT();
DC_FP_START();
out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
DC_FP_END();
// Disable fast_validate to set min dcfclk in alculate_wm_and_dlg
if (pipe_cnt == 0)
......
......@@ -864,11 +864,11 @@ static bool setup_dsc_config(
min_slices_h = inc_num_slices(dsc_common_caps.slice_caps, min_slices_h);
}
is_dsc_possible = (min_slices_h <= max_slices_h);
if (pic_width % min_slices_h != 0)
min_slices_h = 0; // DSC TODO: Maybe try increasing the number of slices first?
is_dsc_possible = (min_slices_h <= max_slices_h);
if (min_slices_h == 0 && max_slices_h == 0)
is_dsc_possible = false;
......
......@@ -33,6 +33,7 @@
#define MAX_MTP_SLOT_COUNT 64
#define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE 0x50
#define TRAINING_AUX_RD_INTERVAL 100 //us
#define LINK_AUX_WAKE_TIMEOUT_MS 1500 // Timeout when trying to wake unresponsive DPRX.
struct dc_link;
struct dc_stream_state;
......
......@@ -173,6 +173,17 @@ bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
return false;
/* Don't use baco for reset in S3.
* This is a workaround for some platforms
* where entering BACO during suspend
* seems to cause reboots or hangs.
* This might be related to the fact that BACO controls
* power to the whole GPU including devices like audio and USB.
* Powering down/up everything may adversely affect these other
* devices. Needs more investigation.
*/
if (adev->in_s3)
return false;
mutex_lock(&adev->pm.mutex);
......
......@@ -773,13 +773,13 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinFclkByFreq,
hwmgr->display_config->num_display > 3 ?
data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk :
(data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk / 100) :
min_mclk,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinSocclkByFreq,
data->clock_vol_info.vdd_dep_on_socclk->entries[0].clk,
data->clock_vol_info.vdd_dep_on_socclk->entries[0].clk / 100,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetHardMinVcn,
......@@ -792,11 +792,11 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxFclkByFreq,
data->clock_vol_info.vdd_dep_on_fclk->entries[index_fclk].clk,
data->clock_vol_info.vdd_dep_on_fclk->entries[index_fclk].clk / 100,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxSocclkByFreq,
data->clock_vol_info.vdd_dep_on_socclk->entries[index_socclk].clk,
data->clock_vol_info.vdd_dep_on_socclk->entries[index_socclk].clk / 100,
NULL);
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetSoftMaxVcn,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册