dce_clocks.c 26.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright 2012-16 Advanced Micro Devices, Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: AMD
 *
 */

#include "dce_clocks.h"
#include "dm_services.h"
#include "reg_helper.h"
29
#include "fixed31_32.h"
30 31
#include "bios_parser_interface.h"
#include "dc.h"
32
#include "dmcu.h"
33 34 35
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
#include "dcn_calcs.h"
#endif
36
#include "core_types.h"
37
#include "dc_types.h"
38
#include "dal_asic_id.h"
39

40
#define TO_DCE_CLOCKS(clocks)\
41
	container_of(clocks, struct dce_dccg, base)
42 43 44 45 46 47 48 49 50 51

#define REG(reg) \
	(clk_dce->regs->reg)

#undef FN
#define FN(reg_name, field_name) \
	clk_dce->clk_shift->field_name, clk_dce->clk_mask->field_name

#define CTX \
	clk_dce->base.ctx
52 53
#define DC_LOGGER \
	clk->ctx->logger
54

55
/* Max clock values for each state indexed by "enum clocks_state": */
56
static const struct state_dependent_clocks dce80_max_clks_by_state[] = {
57 58 59 60 61 62 63 64 65 66 67
/* ClocksStateInvalid - should not be used */
{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
/* ClocksStateUltraLow - not expected to be used for DCE 8.0 */
{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
/* ClocksStateLow */
{ .display_clk_khz = 352000, .pixel_clk_khz = 330000},
/* ClocksStateNominal */
{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 },
/* ClocksStatePerformance */
{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 } };

68
static const struct state_dependent_clocks dce110_max_clks_by_state[] = {
69 70 71 72 73 74 75 76 77 78 79
/*ClocksStateInvalid - should not be used*/
{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
/*ClocksStateLow*/
{ .display_clk_khz = 352000, .pixel_clk_khz = 330000 },
/*ClocksStateNominal*/
{ .display_clk_khz = 467000, .pixel_clk_khz = 400000 },
/*ClocksStatePerformance*/
{ .display_clk_khz = 643000, .pixel_clk_khz = 400000 } };

80
static const struct state_dependent_clocks dce112_max_clks_by_state[] = {
81 82 83 84 85 86 87 88 89 90 91
/*ClocksStateInvalid - should not be used*/
{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
{ .display_clk_khz = 389189, .pixel_clk_khz = 346672 },
/*ClocksStateLow*/
{ .display_clk_khz = 459000, .pixel_clk_khz = 400000 },
/*ClocksStateNominal*/
{ .display_clk_khz = 667000, .pixel_clk_khz = 600000 },
/*ClocksStatePerformance*/
{ .display_clk_khz = 1132000, .pixel_clk_khz = 600000 } };

92
static const struct state_dependent_clocks dce120_max_clks_by_state[] = {
93 94 95 96 97 98 99 100 101 102 103
/*ClocksStateInvalid - should not be used*/
{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
/*ClocksStateUltraLow - currently by HW design team not supposed to be used*/
{ .display_clk_khz = 0, .pixel_clk_khz = 0 },
/*ClocksStateLow*/
{ .display_clk_khz = 460000, .pixel_clk_khz = 400000 },
/*ClocksStateNominal*/
{ .display_clk_khz = 670000, .pixel_clk_khz = 600000 },
/*ClocksStatePerformance*/
{ .display_clk_khz = 1133000, .pixel_clk_khz = 600000 } };

104 105
/* Starting DID for each range */
enum dentist_base_divider_id {
106 107 108 109
	DENTIST_BASE_DID_1 = 0x08,
	DENTIST_BASE_DID_2 = 0x40,
	DENTIST_BASE_DID_3 = 0x60,
	DENTIST_MAX_DID    = 0x80
110 111
};

112 113
/* Starting point and step size for each divider range.*/
enum dentist_divider_range {
114 115 116 117 118 119 120
	DENTIST_DIVIDER_RANGE_1_START = 8,   /* 2.00  */
	DENTIST_DIVIDER_RANGE_1_STEP  = 1,   /* 0.25  */
	DENTIST_DIVIDER_RANGE_2_START = 64,  /* 16.00 */
	DENTIST_DIVIDER_RANGE_2_STEP  = 2,   /* 0.50  */
	DENTIST_DIVIDER_RANGE_3_START = 128, /* 32.00 */
	DENTIST_DIVIDER_RANGE_3_STEP  = 4,   /* 1.00  */
	DENTIST_DIVIDER_RANGE_SCALE_FACTOR = 4
121 122
};

123
static int dentist_get_divider_from_did(int did)
124
{
125 126 127 128 129 130 131 132 133 134 135
	if (did < DENTIST_BASE_DID_1)
		did = DENTIST_BASE_DID_1;
	if (did > DENTIST_MAX_DID)
		did = DENTIST_MAX_DID;

	if (did < DENTIST_BASE_DID_2) {
		return DENTIST_DIVIDER_RANGE_1_START + DENTIST_DIVIDER_RANGE_1_STEP
							* (did - DENTIST_BASE_DID_1);
	} else if (did < DENTIST_BASE_DID_3) {
		return DENTIST_DIVIDER_RANGE_2_START + DENTIST_DIVIDER_RANGE_2_STEP
							* (did - DENTIST_BASE_DID_2);
136
	} else {
137 138
		return DENTIST_DIVIDER_RANGE_3_START + DENTIST_DIVIDER_RANGE_3_STEP
							* (did - DENTIST_BASE_DID_3);
139 140 141
	}
}

142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
/* SW will adjust DP REF Clock average value for all purposes
 * (DP DTO / DP Audio DTO and DP GTC)
 if clock is spread for all cases:
 -if SS enabled on DP Ref clock and HW de-spreading enabled with SW
 calculations for DS_INCR/DS_MODULO (this is planned to be default case)
 -if SS enabled on DP Ref clock and HW de-spreading enabled with HW
 calculations (not planned to be used, but average clock should still
 be valid)
 -if SS enabled on DP Ref clock and HW de-spreading disabled
 (should not be case with CIK) then SW should program all rates
 generated according to average value (case as with previous ASICs)
  */
static int dccg_adjust_dp_ref_freq_for_ss(struct dce_dccg *clk_dce, int dp_ref_clk_khz)
{
	if (clk_dce->ss_on_dprefclk && clk_dce->dprefclk_ss_divider != 0) {
		struct fixed31_32 ss_percentage = dc_fixpt_div_int(
				dc_fixpt_from_fraction(clk_dce->dprefclk_ss_percentage,
							clk_dce->dprefclk_ss_divider), 200);
		struct fixed31_32 adj_dp_ref_clk_khz;

		ss_percentage = dc_fixpt_sub(dc_fixpt_one, ss_percentage);
		adj_dp_ref_clk_khz = dc_fixpt_mul_int(ss_percentage, dp_ref_clk_khz);
		dp_ref_clk_khz = dc_fixpt_floor(adj_dp_ref_clk_khz);
	}
	return dp_ref_clk_khz;
}

static int dce_get_dp_ref_freq_khz(struct dccg *clk)
170
{
171
	struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
172 173 174
	int dprefclk_wdivider;
	int dprefclk_src_sel;
	int dp_ref_clk_khz = 600000;
175
	int target_div;
176 177 178 179 180 181 182 183 184 185

	/* ASSERT DP Reference Clock source is from DFS*/
	REG_GET(DPREFCLK_CNTL, DPREFCLK_SRC_SEL, &dprefclk_src_sel);
	ASSERT(dprefclk_src_sel == 0);

	/* Read the mmDENTIST_DISPCLK_CNTL to get the currently
	 * programmed DID DENTIST_DPREFCLK_WDIVIDER*/
	REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider);

	/* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/
186 187 188
	target_div = dentist_get_divider_from_did(dprefclk_wdivider);

	/* Calculate the current DFS clock, in kHz.*/
189
	dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR
190
		* clk_dce->dentist_vco_freq_khz) / target_div;
191

192
	return dccg_adjust_dp_ref_freq_for_ss(clk_dce, dp_ref_clk_khz);
193 194
}

195
static int dce12_get_dp_ref_freq_khz(struct dccg *clk)
196
{
197
	struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
198

199
	return dccg_adjust_dp_ref_freq_for_ss(clk_dce, 600000);
200
}
201

202
static enum dm_pp_clocks_state dce_get_required_clocks_state(
203
	struct dccg *clk,
204
	struct dc_clocks *req_clocks)
205
{
206
	struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
207 208 209 210 211 212 213 214
	int i;
	enum dm_pp_clocks_state low_req_clk;

	/* Iterate from highest supported to lowest valid state, and update
	 * lowest RequiredState with the lowest state that satisfies
	 * all required clocks
	 */
	for (i = clk->max_clks_state; i >= DM_PP_CLOCKS_STATE_ULTRA_LOW; i--)
215
		if (req_clocks->dispclk_khz >
216
				clk_dce->max_clks_by_state[i].display_clk_khz
217
			|| req_clocks->phyclk_khz >
218 219 220 221 222
				clk_dce->max_clks_by_state[i].pixel_clk_khz)
			break;

	low_req_clk = i + 1;
	if (low_req_clk > clk->max_clks_state) {
223 224 225 226 227 228
		/* set max clock state for high phyclock, invalid on exceeding display clock */
		if (clk_dce->max_clks_by_state[clk->max_clks_state].display_clk_khz
				< req_clocks->dispclk_khz)
			low_req_clk = DM_PP_CLOCKS_STATE_INVALID;
		else
			low_req_clk = clk->max_clks_state;
229 230 231 232 233
	}

	return low_req_clk;
}

234
static int dce_set_clock(
235
	struct dccg *clk,
236
	int requested_clk_khz)
237
{
238
	struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
239 240
	struct bp_pixel_clock_parameters pxl_clk_params = { 0 };
	struct dc_bios *bp = clk->ctx->dc_bios;
241
	int actual_clock = requested_clk_khz;
242 243 244

	/* Make sure requested clock isn't lower than minimum threshold*/
	if (requested_clk_khz > 0)
245
		requested_clk_khz = max(requested_clk_khz,
246 247 248 249 250 251 252 253 254 255 256 257 258
				clk_dce->dentist_vco_freq_khz / 64);

	/* Prepare to program display clock*/
	pxl_clk_params.target_pixel_clock = requested_clk_khz;
	pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;

	bp->funcs->program_display_engine_pll(bp, &pxl_clk_params);

	if (clk_dce->dfs_bypass_enabled) {

		/* Cache the fixed display clock*/
		clk_dce->dfs_bypass_disp_clk =
			pxl_clk_params.dfs_bypass_display_clock;
259
		actual_clock = pxl_clk_params.dfs_bypass_display_clock;
260 261 262 263 264 265
	}

	/* from power down, we need mark the clock state as ClocksStateNominal
	 * from HWReset, so when resume we will call pplib voltage regulator.*/
	if (requested_clk_khz == 0)
		clk->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
266
	return actual_clock;
267 268
}

269
static int dce_psr_set_clock(
270
	struct dccg *clk,
271
	int requested_clk_khz)
272
{
273
	struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
274
	struct dc_context *ctx = clk_dce->base.ctx;
275
	struct dc *core_dc = ctx->dc;
276
	struct dmcu *dmcu = core_dc->res_pool->dmcu;
277
	int actual_clk_khz = requested_clk_khz;
278

279
	actual_clk_khz = dce_set_clock(clk, requested_clk_khz);
280

281 282
	dmcu->funcs->set_psr_wait_loop(dmcu, actual_clk_khz / 1000 / 7);
	return actual_clk_khz;
283 284
}

285
static int dce112_set_clock(
286
	struct dccg *clk,
287
	int requested_clk_khz)
288
{
289
	struct dce_dccg *clk_dce = TO_DCE_CLOCKS(clk);
290 291
	struct bp_set_dce_clock_parameters dce_clk_params;
	struct dc_bios *bp = clk->ctx->dc_bios;
292
	struct dc *core_dc = clk->ctx->dc;
293
	struct dmcu *dmcu = core_dc->res_pool->dmcu;
294
	int actual_clock = requested_clk_khz;
295 296 297 298 299
	/* Prepare to program display clock*/
	memset(&dce_clk_params, 0, sizeof(dce_clk_params));

	/* Make sure requested clock isn't lower than minimum threshold*/
	if (requested_clk_khz > 0)
300
		requested_clk_khz = max(requested_clk_khz,
301
				clk_dce->dentist_vco_freq_khz / 62);
302 303 304 305 306 307

	dce_clk_params.target_clock_frequency = requested_clk_khz;
	dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
	dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK;

	bp->funcs->set_dce_clock(bp, &dce_clk_params);
308
	actual_clock = dce_clk_params.target_clock_frequency;
309 310 311 312 313 314 315 316 317 318

	/* from power down, we need mark the clock state as ClocksStateNominal
	 * from HWReset, so when resume we will call pplib voltage regulator.*/
	if (requested_clk_khz == 0)
		clk->cur_min_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;

	/*Program DP ref Clock*/
	/*VBIOS will determine DPREFCLK frequency, so we don't set it*/
	dce_clk_params.target_clock_frequency = 0;
	dce_clk_params.clock_type = DCECLOCK_TYPE_DPREFCLK;
319 320 321 322 323 324
	if (!ASICREV_IS_VEGA20_P(clk->ctx->asic_id.hw_internal_rev))
		dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK =
			(dce_clk_params.pll_id ==
					CLOCK_SOURCE_COMBO_DISPLAY_PLL0);
	else
		dce_clk_params.flags.USE_GENLOCK_AS_SOURCE_FOR_DPREFCLK = false;
325 326

	bp->funcs->set_dce_clock(bp, &dce_clk_params);
327

328 329 330 331 332 333
	if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) {
		if (clk_dce->dfs_bypass_disp_clk != actual_clock)
			dmcu->funcs->set_psr_wait_loop(dmcu,
					actual_clock / 1000 / 7);
	}

334
	clk_dce->dfs_bypass_disp_clk = actual_clock;
335
	return actual_clock;
336 337
}

338
static void dce_clock_read_integrated_info(struct dce_dccg *clk_dce)
339 340 341
{
	struct dc_debug *debug = &clk_dce->base.ctx->dc->debug;
	struct dc_bios *bp = clk_dce->base.ctx->dc_bios;
342
	struct integrated_info info = { { { 0 } } };
343
	struct dc_firmware_info fw_info = { { 0 } };
344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390
	int i;

	if (bp->integrated_info)
		info = *bp->integrated_info;

	clk_dce->dentist_vco_freq_khz = info.dentist_vco_freq;
	if (clk_dce->dentist_vco_freq_khz == 0) {
		bp->funcs->get_firmware_info(bp, &fw_info);
		clk_dce->dentist_vco_freq_khz =
			fw_info.smu_gpu_pll_output_freq;
		if (clk_dce->dentist_vco_freq_khz == 0)
			clk_dce->dentist_vco_freq_khz = 3600000;
	}

	/*update the maximum display clock for each power state*/
	for (i = 0; i < NUMBER_OF_DISP_CLK_VOLTAGE; ++i) {
		enum dm_pp_clocks_state clk_state = DM_PP_CLOCKS_STATE_INVALID;

		switch (i) {
		case 0:
			clk_state = DM_PP_CLOCKS_STATE_ULTRA_LOW;
			break;

		case 1:
			clk_state = DM_PP_CLOCKS_STATE_LOW;
			break;

		case 2:
			clk_state = DM_PP_CLOCKS_STATE_NOMINAL;
			break;

		case 3:
			clk_state = DM_PP_CLOCKS_STATE_PERFORMANCE;
			break;

		default:
			clk_state = DM_PP_CLOCKS_STATE_INVALID;
			break;
		}

		/*Do not allow bad VBIOS/SBIOS to override with invalid values,
		 * check for > 100MHz*/
		if (info.disp_clk_voltage[i].max_supported_clk >= 100000)
			clk_dce->max_clks_by_state[clk_state].display_clk_khz =
				info.disp_clk_voltage[i].max_supported_clk;
	}

391
	if (!debug->disable_dfs_bypass && bp->integrated_info)
392 393 394 395
		if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
			clk_dce->dfs_bypass_enabled = true;
}

396
static void dce_clock_read_ss_info(struct dce_dccg *clk_dce)
397 398 399 400 401 402
{
	struct dc_bios *bp = clk_dce->base.ctx->dc_bios;
	int ss_info_num = bp->funcs->get_ss_entry_number(
			bp, AS_SIGNAL_TYPE_GPU_PLL);

	if (ss_info_num) {
403
		struct spread_spectrum_info info = { { 0 } };
404 405 406 407 408 409 410 411 412 413
		enum bp_result result = bp->funcs->get_spread_spectrum_info(
				bp, AS_SIGNAL_TYPE_GPU_PLL, 0, &info);

		/* Based on VBIOS, VBIOS will keep entry for GPU PLL SS
		 * even if SS not enabled and in that case
		 * SSInfo.spreadSpectrumPercentage !=0 would be sign
		 * that SS is enabled
		 */
		if (result == BP_RESULT_OK &&
				info.spread_spectrum_percentage != 0) {
414 415
			clk_dce->ss_on_dprefclk = true;
			clk_dce->dprefclk_ss_divider = info.spread_percentage_divider;
416 417

			if (info.type.CENTER_MODE == 0) {
418
				/* TODO: Currently for DP Reference clock we
419 420
				 * need only SS percentage for
				 * downspread */
421
				clk_dce->dprefclk_ss_percentage =
422 423
						info.spread_spectrum_percentage;
			}
424 425

			return;
426 427
		}

428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448
		result = bp->funcs->get_spread_spectrum_info(
				bp, AS_SIGNAL_TYPE_DISPLAY_PORT, 0, &info);

		/* Based on VBIOS, VBIOS will keep entry for DPREFCLK SS
		 * even if SS not enabled and in that case
		 * SSInfo.spreadSpectrumPercentage !=0 would be sign
		 * that SS is enabled
		 */
		if (result == BP_RESULT_OK &&
				info.spread_spectrum_percentage != 0) {
			clk_dce->ss_on_dprefclk = true;
			clk_dce->dprefclk_ss_divider = info.spread_percentage_divider;

			if (info.type.CENTER_MODE == 0) {
				/* Currently for DP Reference clock we
				 * need only SS percentage for
				 * downspread */
				clk_dce->dprefclk_ss_percentage =
						info.spread_spectrum_percentage;
			}
		}
449 450 451
	}
}

452 453 454 455 456
static inline bool should_set_clock(bool safe_to_lower, int calc_clk, int cur_clk)
{
	return ((safe_to_lower && calc_clk < cur_clk) || calc_clk > cur_clk);
}

457
static void dce12_update_clocks(struct dccg *dccg,
458 459
			struct dc_clocks *new_clocks,
			bool safe_to_lower)
460 461 462
{
	struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};

463
	if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
464 465 466 467 468 469
		clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
		clock_voltage_req.clocks_in_khz = new_clocks->dispclk_khz;
		dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
		dccg->clks.dispclk_khz = new_clocks->dispclk_khz;

		dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
470 471
	}

472
	if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, dccg->clks.phyclk_khz)) {
473 474 475
		clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAYPHYCLK;
		clock_voltage_req.clocks_in_khz = new_clocks->phyclk_khz;
		dccg->clks.phyclk_khz = new_clocks->phyclk_khz;
476

477
		dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
478
	}
479 480
}

481
#ifdef CONFIG_DRM_AMD_DC_DCN1_0
482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560
static int dcn1_determine_dppclk_threshold(struct dccg *dccg, struct dc_clocks *new_clocks)
{
	bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
	bool dispclk_increase = new_clocks->dispclk_khz > dccg->clks.dispclk_khz;
	int disp_clk_threshold = new_clocks->max_supported_dppclk_khz;
	bool cur_dpp_div = dccg->clks.dispclk_khz > dccg->clks.dppclk_khz;

	/* increase clock, looking for div is 0 for current, request div is 1*/
	if (dispclk_increase) {
		/* already divided by 2, no need to reach target clk with 2 steps*/
		if (cur_dpp_div)
			return new_clocks->dispclk_khz;

		/* request disp clk is lower than maximum supported dpp clk,
		 * no need to reach target clk with two steps.
		 */
		if (new_clocks->dispclk_khz <= disp_clk_threshold)
			return new_clocks->dispclk_khz;

		/* target dpp clk not request divided by 2, still within threshold */
		if (!request_dpp_div)
			return new_clocks->dispclk_khz;

	} else {
		/* decrease clock, looking for current dppclk divided by 2,
		 * request dppclk not divided by 2.
		 */

		/* current dpp clk not divided by 2, no need to ramp*/
		if (!cur_dpp_div)
			return new_clocks->dispclk_khz;

		/* current disp clk is lower than current maximum dpp clk,
		 * no need to ramp
		 */
		if (dccg->clks.dispclk_khz <= disp_clk_threshold)
			return new_clocks->dispclk_khz;

		/* request dpp clk need to be divided by 2 */
		if (request_dpp_div)
			return new_clocks->dispclk_khz;
	}

	return disp_clk_threshold;
}

static void dcn1_ramp_up_dispclk_with_dpp(struct dccg *dccg, struct dc_clocks *new_clocks)
{
	struct dc *dc = dccg->ctx->dc;
	int dispclk_to_dpp_threshold = dcn1_determine_dppclk_threshold(dccg, new_clocks);
	bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
	int i;

	/* set disp clk to dpp clk threshold */
	dccg->funcs->set_dispclk(dccg, dispclk_to_dpp_threshold);

	/* update request dpp clk division option */
	for (i = 0; i < dc->res_pool->pipe_count; i++) {
		struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];

		if (!pipe_ctx->plane_state)
			continue;

		pipe_ctx->plane_res.dpp->funcs->dpp_dppclk_control(
				pipe_ctx->plane_res.dpp,
				request_dpp_div,
				true);
	}

	/* If target clk not same as dppclk threshold, set to target clock */
	if (dispclk_to_dpp_threshold != new_clocks->dispclk_khz)
		dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);

	dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
	dccg->clks.dppclk_khz = new_clocks->dppclk_khz;
	dccg->clks.max_supported_dppclk_khz = new_clocks->max_supported_dppclk_khz;
}

static void dcn1_update_clocks(struct dccg *dccg,
561 562 563
			struct dc_clocks *new_clocks,
			bool safe_to_lower)
{
564 565 566 567 568
	struct dc *dc = dccg->ctx->dc;
	struct pp_smu_display_requirement_rv *smu_req_cur =
			&dc->res_pool->pp_smu_req;
	struct pp_smu_display_requirement_rv smu_req = *smu_req_cur;
	struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
569 570 571 572
	struct dm_pp_clock_for_voltage_req clock_voltage_req = {0};
	bool send_request_to_increase = false;
	bool send_request_to_lower = false;

573 574 575 576 577
	if (new_clocks->phyclk_khz)
		smu_req.display_count = 1;
	else
		smu_req.display_count = 0;

578 579 580 581 582 583
	if (new_clocks->dispclk_khz > dccg->clks.dispclk_khz
			|| new_clocks->phyclk_khz > dccg->clks.phyclk_khz
			|| new_clocks->fclk_khz > dccg->clks.fclk_khz
			|| new_clocks->dcfclk_khz > dccg->clks.dcfclk_khz)
		send_request_to_increase = true;

584
	if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, dccg->clks.phyclk_khz)) {
585
		dccg->clks.phyclk_khz = new_clocks->phyclk_khz;
586 587 588 589

		send_request_to_lower = true;
	}

590
	if (should_set_clock(safe_to_lower, new_clocks->fclk_khz, dccg->clks.fclk_khz)) {
591
		dccg->clks.fclk_khz = new_clocks->fclk_khz;
592 593
		clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_FCLK;
		clock_voltage_req.clocks_in_khz = new_clocks->fclk_khz;
594
		smu_req.hard_min_fclk_khz = new_clocks->fclk_khz;
595 596 597 598 599

		dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
		send_request_to_lower = true;
	}

600
	if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, dccg->clks.dcfclk_khz)) {
601
		dccg->clks.dcfclk_khz = new_clocks->dcfclk_khz;
602
		smu_req.hard_min_dcefclk_khz = new_clocks->dcfclk_khz;
603 604 605 606

		send_request_to_lower = true;
	}

607 608 609 610
	if (should_set_clock(safe_to_lower,
			new_clocks->dcfclk_deep_sleep_khz, dccg->clks.dcfclk_deep_sleep_khz)) {
		dccg->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
		smu_req.min_deep_sleep_dcefclk_mhz = new_clocks->dcfclk_deep_sleep_khz;
611 612

		send_request_to_lower = true;
613 614
	}

615 616 617 618
	/* make sure dcf clk is before dpp clk to
	 * make sure we have enough voltage to run dpp clk
	 */
	if (send_request_to_increase) {
619 620
		/*use dcfclk to request voltage*/
		clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
621
		clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
622
		dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
623 624
		if (pp_smu->set_display_requirement)
			pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
625
	}
626

627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642
	/* dcn1 dppclk is tied to dispclk */
	if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
		dcn1_ramp_up_dispclk_with_dpp(dccg, new_clocks);
		dccg->clks.dispclk_khz = new_clocks->dispclk_khz;

		send_request_to_lower = true;
	}

	if (!send_request_to_increase && send_request_to_lower) {
		/*use dcfclk to request voltage*/
		clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DCFCLK;
		clock_voltage_req.clocks_in_khz = dcn_find_dcfclk_suits_all(dc, new_clocks);
		dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
		if (pp_smu->set_display_requirement)
			pp_smu->set_display_requirement(&pp_smu->pp_smu, &smu_req);
	}
643 644 645


	*smu_req_cur = smu_req;
646
}
647
#endif
648

649
static void dce_update_clocks(struct dccg *dccg,
650 651 652 653 654 655 656 657 658 659 660 661 662
			struct dc_clocks *new_clocks,
			bool safe_to_lower)
{
	struct dm_pp_power_level_change_request level_change_req;

	level_change_req.power_level = dce_get_required_clocks_state(dccg, new_clocks);
	/* get max clock state from PPLIB */
	if ((level_change_req.power_level < dccg->cur_min_clks_state && safe_to_lower)
			|| level_change_req.power_level > dccg->cur_min_clks_state) {
		if (dm_pp_apply_power_level_change_request(dccg->ctx, &level_change_req))
			dccg->cur_min_clks_state = level_change_req.power_level;
	}

663
	if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
664 665 666
		dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
		dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
	}
667 668
}

669
#ifdef CONFIG_DRM_AMD_DC_DCN1_0
670
static const struct display_clock_funcs dcn1_funcs = {
671
	.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
672
	.set_dispclk = dce112_set_clock,
673
	.update_clocks = dcn1_update_clocks
674
};
675
#endif
676

677
static const struct display_clock_funcs dce120_funcs = {
678
	.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
679 680
	.set_dispclk = dce112_set_clock,
	.update_clocks = dce12_update_clocks
681 682
};

683
static const struct display_clock_funcs dce112_funcs = {
684
	.get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
685 686
	.set_dispclk = dce112_set_clock,
	.update_clocks = dce_update_clocks
687 688 689
};

static const struct display_clock_funcs dce110_funcs = {
690
	.get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
691 692
	.set_dispclk = dce_psr_set_clock,
	.update_clocks = dce_update_clocks
693 694 695
};

static const struct display_clock_funcs dce_funcs = {
696
	.get_dp_ref_clk_frequency = dce_get_dp_ref_freq_khz,
697 698
	.set_dispclk = dce_set_clock,
	.update_clocks = dce_update_clocks
699 700
};

701 702
static void dce_dccg_construct(
	struct dce_dccg *clk_dce,
703
	struct dc_context *ctx,
704 705 706
	const struct dccg_registers *regs,
	const struct dccg_shift *clk_shift,
	const struct dccg_mask *clk_mask)
707
{
708
	struct dccg *base = &clk_dce->base;
709 710 711 712 713 714 715 716 717

	base->ctx = ctx;
	base->funcs = &dce_funcs;

	clk_dce->regs = regs;
	clk_dce->clk_shift = clk_shift;
	clk_dce->clk_mask = clk_mask;

	clk_dce->dfs_bypass_disp_clk = 0;
718 719 720 721 722

	clk_dce->dprefclk_ss_percentage = 0;
	clk_dce->dprefclk_ss_divider = 1000;
	clk_dce->ss_on_dprefclk = false;

723 724 725 726 727 728 729
	base->max_clks_state = DM_PP_CLOCKS_STATE_NOMINAL;
	base->cur_min_clks_state = DM_PP_CLOCKS_STATE_INVALID;

	dce_clock_read_integrated_info(clk_dce);
	dce_clock_read_ss_info(clk_dce);
}

730
struct dccg *dce_dccg_create(
731
	struct dc_context *ctx,
732 733 734
	const struct dccg_registers *regs,
	const struct dccg_shift *clk_shift,
	const struct dccg_mask *clk_mask)
735
{
736
	struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
737 738 739 740 741 742

	if (clk_dce == NULL) {
		BREAK_TO_DEBUGGER();
		return NULL;
	}

743 744 745 746
	memcpy(clk_dce->max_clks_by_state,
		dce80_max_clks_by_state,
		sizeof(dce80_max_clks_by_state));

747
	dce_dccg_construct(
748 749 750 751 752
		clk_dce, ctx, regs, clk_shift, clk_mask);

	return &clk_dce->base;
}

753
struct dccg *dce110_dccg_create(
754
	struct dc_context *ctx,
755 756 757
	const struct dccg_registers *regs,
	const struct dccg_shift *clk_shift,
	const struct dccg_mask *clk_mask)
758
{
759
	struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
760 761 762 763 764 765

	if (clk_dce == NULL) {
		BREAK_TO_DEBUGGER();
		return NULL;
	}

766 767 768 769
	memcpy(clk_dce->max_clks_by_state,
		dce110_max_clks_by_state,
		sizeof(dce110_max_clks_by_state));

770
	dce_dccg_construct(
771 772 773 774 775 776 777
		clk_dce, ctx, regs, clk_shift, clk_mask);

	clk_dce->base.funcs = &dce110_funcs;

	return &clk_dce->base;
}

778
struct dccg *dce112_dccg_create(
779
	struct dc_context *ctx,
780 781 782
	const struct dccg_registers *regs,
	const struct dccg_shift *clk_shift,
	const struct dccg_mask *clk_mask)
783
{
784
	struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
785 786 787 788 789 790

	if (clk_dce == NULL) {
		BREAK_TO_DEBUGGER();
		return NULL;
	}

791 792 793 794
	memcpy(clk_dce->max_clks_by_state,
		dce112_max_clks_by_state,
		sizeof(dce112_max_clks_by_state));

795
	dce_dccg_construct(
796 797 798 799 800 801 802
		clk_dce, ctx, regs, clk_shift, clk_mask);

	clk_dce->base.funcs = &dce112_funcs;

	return &clk_dce->base;
}

803
struct dccg *dce120_dccg_create(struct dc_context *ctx)
804
{
805
	struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
806 807 808 809 810 811 812 813 814 815

	if (clk_dce == NULL) {
		BREAK_TO_DEBUGGER();
		return NULL;
	}

	memcpy(clk_dce->max_clks_by_state,
		dce120_max_clks_by_state,
		sizeof(dce120_max_clks_by_state));

816
	dce_dccg_construct(
817
		clk_dce, ctx, NULL, NULL, NULL);
818 819 820

	clk_dce->base.funcs = &dce120_funcs;

821 822 823
	return &clk_dce->base;
}

824
#ifdef CONFIG_DRM_AMD_DC_DCN1_0
825
struct dccg *dcn1_dccg_create(struct dc_context *ctx)
826
{
827 828 829
	struct dc_debug *debug = &ctx->dc->debug;
	struct dc_bios *bp = ctx->dc_bios;
	struct dc_firmware_info fw_info = { { 0 } };
830
	struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
831 832 833 834 835 836

	if (clk_dce == NULL) {
		BREAK_TO_DEBUGGER();
		return NULL;
	}

837
	clk_dce->base.ctx = ctx;
838
	clk_dce->base.funcs = &dcn1_funcs;
839

840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860
	clk_dce->dfs_bypass_disp_clk = 0;

	clk_dce->dprefclk_ss_percentage = 0;
	clk_dce->dprefclk_ss_divider = 1000;
	clk_dce->ss_on_dprefclk = false;

	if (bp->integrated_info)
		clk_dce->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq;
	if (clk_dce->dentist_vco_freq_khz == 0) {
		bp->funcs->get_firmware_info(bp, &fw_info);
		clk_dce->dentist_vco_freq_khz = fw_info.smu_gpu_pll_output_freq;
		if (clk_dce->dentist_vco_freq_khz == 0)
			clk_dce->dentist_vco_freq_khz = 3600000;
	}

	if (!debug->disable_dfs_bypass && bp->integrated_info)
		if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
			clk_dce->dfs_bypass_enabled = true;

	dce_clock_read_ss_info(clk_dce);

861 862
	return &clk_dce->base;
}
863
#endif
864

865
void dce_dccg_destroy(struct dccg **dccg)
866
{
867
	struct dce_dccg *clk_dce = TO_DCE_CLOCKS(*dccg);
868

869
	kfree(clk_dce);
870
	*dccg = NULL;
871
}