diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 9925d0e01c04b5c0857db28563bc4cef9561ee1e..c874dfd099d398394d4f3ddb76adc654d8bbb70b 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -850,6 +850,8 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c if (!dcb->funcs->is_accelerated_mode(dcb)) dc->hwss.enable_accelerated_mode(dc); + + for (i = 0; i < context->stream_count; i++) { const struct dc_sink *sink = context->streams[i]->sink; @@ -928,7 +930,6 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context) return (result == DC_OK); } - bool dc_post_update_surfaces_to_stream(struct dc *dc) { int i; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 3891e91184851959e1f063fb3117c88f6aa542bd..f7434ae4aec2e79a02afc760db01e024f9c7976f 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -617,32 +617,6 @@ static void plane_atomic_disable(struct dc *dc, verify_allow_pstate_change_high(dc->hwseq); } -/* - * kill power to plane hw - * note: cannot power down until plane is disable - */ -static void plane_atomic_power_down(struct dc *dc, int fe_idx) -{ - struct dce_hwseq *hws = dc->hwseq; - struct dpp *dpp = dc->res_pool->dpps[fe_idx]; - - if (REG(DC_IP_REQUEST_CNTL)) { - REG_SET(DC_IP_REQUEST_CNTL, 0, - IP_REQUEST_EN, 1); - dpp_pg_control(hws, fe_idx, false); - hubp_pg_control(hws, fe_idx, false); - dpp->funcs->dpp_reset(dpp); - REG_SET(DC_IP_REQUEST_CNTL, 0, - IP_REQUEST_EN, 0); - dm_logger_write(dc->ctx->logger, LOG_DEBUG, - "Power gated front end %d\n", fe_idx); - - if (dc->debug.sanity_checks) - verify_allow_pstate_change_high(dc->hwseq); - } -} - - static void reset_front_end( struct dc *dc, int fe_idx) @@ -792,56 +766,6 @@ static void reset_hw_ctx_wrap( { int i; - /* Reset Front End*/ - /* Lock*/ - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *cur_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; - struct timing_generator *tg = cur_pipe_ctx->stream_res.tg; - - if (cur_pipe_ctx->stream) - tg->funcs->lock(tg); - } - /* Disconnect*/ - for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) { - struct pipe_ctx *pipe_ctx_old = - &dc->current_state->res_ctx.pipe_ctx[i]; - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - - if (!pipe_ctx->stream || - !pipe_ctx->plane_state || - pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) { - - plane_atomic_disconnect(dc, i); - } - } - /* Unlock*/ - for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) { - struct pipe_ctx *cur_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; - struct timing_generator *tg = cur_pipe_ctx->stream_res.tg; - - if (cur_pipe_ctx->stream) - tg->funcs->unlock(tg); - } - - /* Disable and Powerdown*/ - for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) { - struct pipe_ctx *pipe_ctx_old = - &dc->current_state->res_ctx.pipe_ctx[i]; - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - - /*if (!pipe_ctx_old->stream) - continue;*/ - - if (pipe_ctx->stream && pipe_ctx->plane_state - && !pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) - continue; - - plane_atomic_disable(dc, i); - - if (!pipe_ctx->stream || !pipe_ctx->plane_state) - plane_atomic_power_down(dc, i); - } - /* Reset Back End*/ for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) { struct pipe_ctx *pipe_ctx_old =