atombios_dp.c 25.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * Copyright 2007-8 Advanced Micro Devices, Inc.
 * Copyright 2008 Red Hat Inc.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Dave Airlie
 *          Alex Deucher
25
 *          Jerome Glisse
26
 */
27 28
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
29 30 31 32
#include "radeon.h"

#include "atom.h"
#include "atom-bits.h"
33
#include <drm/drm_dp_helper.h>
34

35
/* move these to drm_dp_helper.c/h */
36
#define DP_LINK_CONFIGURATION_SIZE 9
37
#define DP_DPCD_SIZE DP_RECEIVER_CAP_SIZE
38 39 40 41 42 43 44

static char *voltage_names[] = {
        "0.4V", "0.6V", "0.8V", "1.2V"
};
static char *pre_emph_names[] = {
        "0dB", "3.5dB", "6dB", "9.5dB"
};
45

46
/***** radeon AUX functions *****/
47 48 49 50 51 52

/* Atom needs data in little endian format
 * so swap as appropriate when copying data to
 * or from atom. Note that atom operates on
 * dw units.
 */
53
void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
{
#ifdef __BIG_ENDIAN
	u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */
	u32 *dst32, *src32;
	int i;

	memcpy(src_tmp, src, num_bytes);
	src32 = (u32 *)src_tmp;
	dst32 = (u32 *)dst_tmp;
	if (to_le) {
		for (i = 0; i < ((num_bytes + 3) / 4); i++)
			dst32[i] = cpu_to_le32(src32[i]);
		memcpy(dst, dst_tmp, num_bytes);
	} else {
		u8 dws = num_bytes & ~3;
		for (i = 0; i < ((num_bytes + 3) / 4); i++)
			dst32[i] = le32_to_cpu(src32[i]);
		memcpy(dst, dst_tmp, dws);
		if (num_bytes % 4) {
			for (i = 0; i < (num_bytes % 4); i++)
				dst[dws+i] = dst_tmp[dws+i];
		}
	}
#else
	memcpy(dst, src, num_bytes);
#endif
}

82 83 84
union aux_channel_transaction {
	PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1;
	PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2;
85 86
};

87 88 89 90 91 92 93 94 95 96 97 98 99
static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
				 u8 *send, int send_bytes,
				 u8 *recv, int recv_size,
				 u8 delay, u8 *ack)
{
	struct drm_device *dev = chan->dev;
	struct radeon_device *rdev = dev->dev_private;
	union aux_channel_transaction args;
	int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
	unsigned char *base;
	int recv_bytes;

	memset(&args, 0, sizeof(args));
100

101
	base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1);
102

103
	radeon_atom_copy_swap(base, send, send_bytes, true);
104

105 106
	args.v1.lpAuxRequest = cpu_to_le16((u16)(0 + 4));
	args.v1.lpDataOut = cpu_to_le16((u16)(16 + 4));
107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
	args.v1.ucDataOutLen = 0;
	args.v1.ucChannelID = chan->rec.i2c_id;
	args.v1.ucDelay = delay / 10;
	if (ASIC_IS_DCE4(rdev))
		args.v2.ucHPD_ID = chan->rec.hpd;

	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);

	*ack = args.v1.ucReplyStatus;

	/* timeout */
	if (args.v1.ucReplyStatus == 1) {
		DRM_DEBUG_KMS("dp_aux_ch timeout\n");
		return -ETIMEDOUT;
	}

	/* flags not zero */
	if (args.v1.ucReplyStatus == 2) {
		DRM_DEBUG_KMS("dp_aux_ch flags not zero\n");
		return -EBUSY;
	}

	/* error */
	if (args.v1.ucReplyStatus == 3) {
		DRM_DEBUG_KMS("dp_aux_ch error\n");
		return -EIO;
	}

	recv_bytes = args.v1.ucDataOutLen;
	if (recv_bytes > recv_size)
		recv_bytes = recv_size;

	if (recv && recv_size)
140
		radeon_atom_copy_swap(recv, base + 16, recv_bytes, false);
141 142 143 144 145 146

	return recv_bytes;
}

static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector,
				      u16 address, u8 *send, u8 send_bytes, u8 delay)
147
{
148 149 150 151 152
	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
	int ret;
	u8 msg[20];
	int msg_bytes = send_bytes + 4;
	u8 ack;
153
	unsigned retry;
154

155 156
	if (send_bytes > 16)
		return -1;
157

158 159 160 161 162
	msg[0] = address;
	msg[1] = address >> 8;
	msg[2] = AUX_NATIVE_WRITE << 4;
	msg[3] = (msg_bytes << 4) | (send_bytes - 1);
	memcpy(&msg[4], send, send_bytes);
163

164
	for (retry = 0; retry < 4; retry++) {
165 166
		ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
					    msg, msg_bytes, NULL, 0, delay, &ack);
167 168 169
		if (ret == -EBUSY)
			continue;
		else if (ret < 0)
170 171
			return ret;
		if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
172
			return send_bytes;
173 174 175 176
		else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
			udelay(400);
		else
			return -EIO;
177 178
	}

179
	return -EIO;
180 181
}

182 183
static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector,
				     u16 address, u8 *recv, int recv_bytes, u8 delay)
184
{
185 186 187 188 189
	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
	u8 msg[4];
	int msg_bytes = 4;
	u8 ack;
	int ret;
190
	unsigned retry;
191

192 193 194 195
	msg[0] = address;
	msg[1] = address >> 8;
	msg[2] = AUX_NATIVE_READ << 4;
	msg[3] = (msg_bytes << 4) | (recv_bytes - 1);
196

197
	for (retry = 0; retry < 4; retry++) {
198 199
		ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
					    msg, msg_bytes, recv, recv_bytes, delay, &ack);
200 201 202
		if (ret == -EBUSY)
			continue;
		else if (ret < 0)
203 204 205 206 207
			return ret;
		if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
			return ret;
		else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
			udelay(400);
208 209
		else if (ret == 0)
			return -EPROTO;
210 211 212
		else
			return -EIO;
	}
213 214

	return -EIO;
215
}
216

217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268
static void radeon_write_dpcd_reg(struct radeon_connector *radeon_connector,
				 u16 reg, u8 val)
{
	radeon_dp_aux_native_write(radeon_connector, reg, &val, 1, 0);
}

static u8 radeon_read_dpcd_reg(struct radeon_connector *radeon_connector,
			       u16 reg)
{
	u8 val = 0;

	radeon_dp_aux_native_read(radeon_connector, reg, &val, 1, 0);

	return val;
}

int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
			 u8 write_byte, u8 *read_byte)
{
	struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
	struct radeon_i2c_chan *auxch = (struct radeon_i2c_chan *)adapter;
	u16 address = algo_data->address;
	u8 msg[5];
	u8 reply[2];
	unsigned retry;
	int msg_bytes;
	int reply_bytes = 1;
	int ret;
	u8 ack;

	/* Set up the command byte */
	if (mode & MODE_I2C_READ)
		msg[2] = AUX_I2C_READ << 4;
	else
		msg[2] = AUX_I2C_WRITE << 4;

	if (!(mode & MODE_I2C_STOP))
		msg[2] |= AUX_I2C_MOT << 4;

	msg[0] = address;
	msg[1] = address >> 8;

	switch (mode) {
	case MODE_I2C_WRITE:
		msg_bytes = 5;
		msg[3] = msg_bytes << 4;
		msg[4] = write_byte;
		break;
	case MODE_I2C_READ:
		msg_bytes = 4;
		msg[3] = msg_bytes << 4;
		break;
269
	default:
270 271
		msg_bytes = 4;
		msg[3] = 3 << 4;
272 273 274
		break;
	}

275 276 277
	for (retry = 0; retry < 4; retry++) {
		ret = radeon_process_aux_ch(auxch,
					    msg, msg_bytes, reply, reply_bytes, 0, &ack);
278 279 280
		if (ret == -EBUSY)
			continue;
		else if (ret < 0) {
281 282 283
			DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
			return ret;
		}
284

285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301
		switch (ack & AUX_NATIVE_REPLY_MASK) {
		case AUX_NATIVE_REPLY_ACK:
			/* I2C-over-AUX Reply field is only valid
			 * when paired with AUX ACK.
			 */
			break;
		case AUX_NATIVE_REPLY_NACK:
			DRM_DEBUG_KMS("aux_ch native nack\n");
			return -EREMOTEIO;
		case AUX_NATIVE_REPLY_DEFER:
			DRM_DEBUG_KMS("aux_ch native defer\n");
			udelay(400);
			continue;
		default:
			DRM_ERROR("aux_ch invalid native reply 0x%02x\n", ack);
			return -EREMOTEIO;
		}
302

303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319
		switch (ack & AUX_I2C_REPLY_MASK) {
		case AUX_I2C_REPLY_ACK:
			if (mode == MODE_I2C_READ)
				*read_byte = reply[0];
			return ret;
		case AUX_I2C_REPLY_NACK:
			DRM_DEBUG_KMS("aux_i2c nack\n");
			return -EREMOTEIO;
		case AUX_I2C_REPLY_DEFER:
			DRM_DEBUG_KMS("aux_i2c defer\n");
			udelay(400);
			break;
		default:
			DRM_ERROR("aux_i2c invalid reply 0x%02x\n", ack);
			return -EREMOTEIO;
		}
	}
320

321
	DRM_DEBUG_KMS("aux i2c too many retries, giving up\n");
322
	return -EREMOTEIO;
323 324
}

325 326
/***** general DP utility functions *****/

327
#define DP_VOLTAGE_MAX         DP_TRAIN_VOLTAGE_SWING_1200
328
#define DP_PRE_EMPHASIS_MAX    DP_TRAIN_PRE_EMPHASIS_9_5
329 330 331 332 333 334 335 336 337 338

static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
				int lane_count,
				u8 train_set[4])
{
	u8 v = 0;
	u8 p = 0;
	int lane;

	for (lane = 0; lane < lane_count; lane++) {
339 340
		u8 this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
		u8 this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
341

342
		DRM_DEBUG_KMS("requested signal parameters: lane %d voltage %s pre_emph %s\n",
343 344 345
			  lane,
			  voltage_names[this_v >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
			  pre_emph_names[this_p >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
346 347 348 349 350 351 352 353

		if (this_v > v)
			v = this_v;
		if (this_p > p)
			p = this_p;
	}

	if (v >= DP_VOLTAGE_MAX)
354
		v |= DP_TRAIN_MAX_SWING_REACHED;
355

356 357
	if (p >= DP_PRE_EMPHASIS_MAX)
		p |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
358

359
	DRM_DEBUG_KMS("using signal parameters: voltage %s pre_emph %s\n",
360 361
		  voltage_names[(v & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
		  pre_emph_names[(p & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
362 363 364 365 366

	for (lane = 0; lane < 4; lane++)
		train_set[lane] = v | p;
}

367 368 369
/* convert bits per color to bits per pixel */
/* get bpc from the EDID */
static int convert_bpc_to_bpp(int bpc)
370
{
371 372 373 374 375
	if (bpc == 0)
		return 24;
	else
		return bpc * 3;
}
376

377 378 379 380 381 382 383
/* get the max pix clock supported by the link rate and lane num */
static int dp_get_max_dp_pix_clock(int link_rate,
				   int lane_num,
				   int bpp)
{
	return (link_rate * lane_num * 8) / bpp;
}
384

385
/***** radeon specific DP functions *****/
386

387 388 389 390 391 392 393 394
/* First get the min lane# when low rate is used according to pixel clock
 * (prefer low rate), second check max lane# supported by DP panel,
 * if the max lane# < low rate lane# then use max lane# instead.
 */
static int radeon_dp_get_dp_lane_number(struct drm_connector *connector,
					u8 dpcd[DP_DPCD_SIZE],
					int pix_clock)
{
395
	int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
D
Daniel Vetter 已提交
396
	int max_link_rate = drm_dp_max_link_rate(dpcd);
397
	int max_lane_num = drm_dp_max_lane_count(dpcd);
398 399 400 401 402 403 404
	int lane_num;
	int max_dp_pix_clock;

	for (lane_num = 1; lane_num < max_lane_num; lane_num <<= 1) {
		max_dp_pix_clock = dp_get_max_dp_pix_clock(max_link_rate, lane_num, bpp);
		if (pix_clock <= max_dp_pix_clock)
			break;
405
	}
406

407
	return lane_num;
408 409
}

410 411 412
static int radeon_dp_get_dp_link_clock(struct drm_connector *connector,
				       u8 dpcd[DP_DPCD_SIZE],
				       int pix_clock)
413
{
414
	int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
415 416
	int lane_num, max_pix_clock;

417 418
	if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
	    ENCODER_OBJECT_ID_NUTMEG)
419 420 421 422 423 424 425 426 427 428 429 430 431
		return 270000;

	lane_num = radeon_dp_get_dp_lane_number(connector, dpcd, pix_clock);
	max_pix_clock = dp_get_max_dp_pix_clock(162000, lane_num, bpp);
	if (pix_clock <= max_pix_clock)
		return 162000;
	max_pix_clock = dp_get_max_dp_pix_clock(270000, lane_num, bpp);
	if (pix_clock <= max_pix_clock)
		return 270000;
	if (radeon_connector_is_dp12_capable(connector)) {
		max_pix_clock = dp_get_max_dp_pix_clock(540000, lane_num, bpp);
		if (pix_clock <= max_pix_clock)
			return 540000;
432
	}
433

D
Daniel Vetter 已提交
434
	return drm_dp_max_link_rate(dpcd);
435 436
}

437 438
static u8 radeon_dp_encoder_service(struct radeon_device *rdev,
				    int action, int dp_clock,
439
				    u8 ucconfig, u8 lane_num)
440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464
{
	DP_ENCODER_SERVICE_PARAMETERS args;
	int index = GetIndexIntoMasterTable(COMMAND, DPEncoderService);

	memset(&args, 0, sizeof(args));
	args.ucLinkClock = dp_clock / 10;
	args.ucConfig = ucconfig;
	args.ucAction = action;
	args.ucLaneNum = lane_num;
	args.ucStatus = 0;

	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
	return args.ucStatus;
}

u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector)
{
	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
	struct drm_device *dev = radeon_connector->base.dev;
	struct radeon_device *rdev = dev->dev_private;

	return radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_GET_SINK_TYPE, 0,
					 dig_connector->dp_i2c_bus->rec.i2c_id, 0);
}

465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481
static void radeon_dp_probe_oui(struct radeon_connector *radeon_connector)
{
	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
	u8 buf[3];

	if (!(dig_connector->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
		return;

	if (radeon_dp_aux_native_read(radeon_connector, DP_SINK_OUI, buf, 3, 0))
		DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
			      buf[0], buf[1], buf[2]);

	if (radeon_dp_aux_native_read(radeon_connector, DP_BRANCH_OUI, buf, 3, 0))
		DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
			      buf[0], buf[1], buf[2]);
}

A
Alex Deucher 已提交
482
bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
483
{
484
	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
485
	u8 msg[DP_DPCD_SIZE];
486
	int ret, i;
487

488 489
	ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, msg,
					DP_DPCD_SIZE, 0);
490
	if (ret > 0) {
491
		memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
492
		DRM_DEBUG_KMS("DPCD: ");
493
		for (i = 0; i < DP_DPCD_SIZE; i++)
494 495
			DRM_DEBUG_KMS("%02x ", msg[i]);
		DRM_DEBUG_KMS("\n");
496 497 498

		radeon_dp_probe_oui(radeon_connector);

A
Alex Deucher 已提交
499
		return true;
500
	}
501
	dig_connector->dpcd[0] = 0;
A
Alex Deucher 已提交
502
	return false;
503 504
}

505 506
int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
			     struct drm_connector *connector)
507 508 509
{
	struct drm_device *dev = encoder->dev;
	struct radeon_device *rdev = dev->dev_private;
510
	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
511
	int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
512 513
	u16 dp_bridge = radeon_connector_encoder_get_dp_bridge_encoder_id(connector);
	u8 tmp;
514 515

	if (!ASIC_IS_DCE4(rdev))
516
		return panel_mode;
517

518 519 520 521 522 523 524
	if (dp_bridge != ENCODER_OBJECT_ID_NONE) {
		/* DP bridge chips */
		tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP);
		if (tmp & 1)
			panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
		else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) ||
			 (dp_bridge == ENCODER_OBJECT_ID_TRAVIS))
525 526
			panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
		else
527
			panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
528
	} else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
529 530
		/* eDP */
		tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP);
531 532 533
		if (tmp & 1)
			panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
	}
534

535
	return panel_mode;
536 537
}

538
void radeon_dp_set_link_config(struct drm_connector *connector,
539
			       const struct drm_display_mode *mode)
540
{
541
	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
542 543 544 545 546 547
	struct radeon_connector_atom_dig *dig_connector;

	if (!radeon_connector->con_priv)
		return;
	dig_connector = radeon_connector->con_priv;

548 549 550 551 552 553 554
	if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
	    (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
		dig_connector->dp_clock =
			radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);
		dig_connector->dp_lane_count =
			radeon_dp_get_dp_lane_number(connector, dig_connector->dpcd, mode->clock);
	}
555 556
}

557
int radeon_dp_mode_valid_helper(struct drm_connector *connector,
558 559
				struct drm_display_mode *mode)
{
560 561 562
	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
	struct radeon_connector_atom_dig *dig_connector;
	int dp_clock;
563

564 565 566 567 568 569 570 571 572 573 574 575
	if (!radeon_connector->con_priv)
		return MODE_CLOCK_HIGH;
	dig_connector = radeon_connector->con_priv;

	dp_clock =
		radeon_dp_get_dp_link_clock(connector, dig_connector->dpcd, mode->clock);

	if ((dp_clock == 540000) &&
	    (!radeon_connector_is_dp12_capable(connector)))
		return MODE_CLOCK_HIGH;

	return MODE_OK;
576 577
}

578 579
static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector,
				      u8 link_status[DP_LINK_STATUS_SIZE])
580 581
{
	int ret;
582 583 584
	ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS,
					link_status, DP_LINK_STATUS_SIZE, 100);
	if (ret <= 0) {
585 586 587
		return false;
	}

588
	DRM_DEBUG_KMS("link status %6ph\n", link_status);
589 590 591
	return true;
}

592 593 594 595 596 597 598
bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector)
{
	u8 link_status[DP_LINK_STATUS_SIZE];
	struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;

	if (!radeon_dp_get_link_status(radeon_connector, link_status))
		return false;
599
	if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count))
600 601 602 603
		return false;
	return true;
}

604 605 606 607 608 609 610 611 612
struct radeon_dp_link_train_info {
	struct radeon_device *rdev;
	struct drm_encoder *encoder;
	struct drm_connector *connector;
	struct radeon_connector *radeon_connector;
	int enc_id;
	int dp_clock;
	int dp_lane_count;
	bool tp3_supported;
613
	u8 dpcd[DP_RECEIVER_CAP_SIZE];
614 615 616
	u8 train_set[4];
	u8 link_status[DP_LINK_STATUS_SIZE];
	u8 tries;
617
	bool use_dpencoder;
618
};
619

620
static void radeon_dp_update_vs_emph(struct radeon_dp_link_train_info *dp_info)
621
{
622 623 624 625 626 627 628 629
	/* set the initial vs/emph on the source */
	atombios_dig_transmitter_setup(dp_info->encoder,
				       ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH,
				       0, dp_info->train_set[0]); /* sets all lanes at once */

	/* set the vs/emph on the sink */
	radeon_dp_aux_native_write(dp_info->radeon_connector, DP_TRAINING_LANE0_SET,
				   dp_info->train_set, dp_info->dp_lane_count, 0);
630 631
}

632
static void radeon_dp_set_tp(struct radeon_dp_link_train_info *dp_info, int tp)
633
{
634
	int rtp = 0;
635

636
	/* set training pattern on the source */
637
	if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder) {
638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661
		switch (tp) {
		case DP_TRAINING_PATTERN_1:
			rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1;
			break;
		case DP_TRAINING_PATTERN_2:
			rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2;
			break;
		case DP_TRAINING_PATTERN_3:
			rtp = ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN3;
			break;
		}
		atombios_dig_encoder_setup(dp_info->encoder, rtp, 0);
	} else {
		switch (tp) {
		case DP_TRAINING_PATTERN_1:
			rtp = 0;
			break;
		case DP_TRAINING_PATTERN_2:
			rtp = 1;
			break;
		}
		radeon_dp_encoder_service(dp_info->rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
					  dp_info->dp_clock, dp_info->enc_id, rtp);
	}
662

663 664
	/* enable training pattern on the sink */
	radeon_write_dpcd_reg(dp_info->radeon_connector, DP_TRAINING_PATTERN_SET, tp);
665 666
}

667
static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
668
{
669 670
	struct radeon_encoder *radeon_encoder = to_radeon_encoder(dp_info->encoder);
	struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
671
	u8 tmp;
672

673 674 675 676 677 678 679 680 681 682 683 684
	/* power up the sink */
	if (dp_info->dpcd[0] >= 0x11)
		radeon_write_dpcd_reg(dp_info->radeon_connector,
				      DP_SET_POWER, DP_SET_POWER_D0);

	/* possibly enable downspread on the sink */
	if (dp_info->dpcd[3] & 0x1)
		radeon_write_dpcd_reg(dp_info->radeon_connector,
				      DP_DOWNSPREAD_CTRL, DP_SPREAD_AMP_0_5);
	else
		radeon_write_dpcd_reg(dp_info->radeon_connector,
				      DP_DOWNSPREAD_CTRL, 0);
685

686 687 688 689
	if ((dp_info->connector->connector_type == DRM_MODE_CONNECTOR_eDP) &&
	    (dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)) {
		radeon_write_dpcd_reg(dp_info->radeon_connector, DP_EDP_CONFIGURATION_SET, 1);
	}
690

691 692
	/* set the lane count on the sink */
	tmp = dp_info->dp_lane_count;
693
	if (drm_dp_enhanced_frame_cap(dp_info->dpcd))
694 695
		tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
	radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp);
696

697
	/* set the link rate on the sink */
D
Daniel Vetter 已提交
698
	tmp = drm_dp_link_rate_to_bw_code(dp_info->dp_clock);
699
	radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LINK_BW_SET, tmp);
700

701
	/* start training on the source */
702
	if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder)
703 704
		atombios_dig_encoder_setup(dp_info->encoder,
					   ATOM_ENCODER_CMD_DP_LINK_TRAINING_START, 0);
705
	else
706 707
		radeon_dp_encoder_service(dp_info->rdev, ATOM_DP_ACTION_TRAINING_START,
					  dp_info->dp_clock, dp_info->enc_id, 0);
708 709

	/* disable the training pattern on the sink */
710 711 712 713 714 715
	radeon_write_dpcd_reg(dp_info->radeon_connector,
			      DP_TRAINING_PATTERN_SET,
			      DP_TRAINING_PATTERN_DISABLE);

	return 0;
}
716

717 718
static int radeon_dp_link_train_finish(struct radeon_dp_link_train_info *dp_info)
{
719 720
	udelay(400);

721 722 723 724 725 726
	/* disable the training pattern on the sink */
	radeon_write_dpcd_reg(dp_info->radeon_connector,
			      DP_TRAINING_PATTERN_SET,
			      DP_TRAINING_PATTERN_DISABLE);

	/* disable the training pattern on the source */
727
	if (ASIC_IS_DCE4(dp_info->rdev) || !dp_info->use_dpencoder)
728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747
		atombios_dig_encoder_setup(dp_info->encoder,
					   ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE, 0);
	else
		radeon_dp_encoder_service(dp_info->rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
					  dp_info->dp_clock, dp_info->enc_id, 0);

	return 0;
}

static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info)
{
	bool clock_recovery;
 	u8 voltage;
	int i;

	radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_1);
	memset(dp_info->train_set, 0, 4);
	radeon_dp_update_vs_emph(dp_info);

	udelay(400);
748

749 750
	/* clock recovery loop */
	clock_recovery = false;
751
	dp_info->tries = 0;
752
	voltage = 0xff;
753
	while (1) {
754
		drm_dp_link_train_clock_recovery_delay(dp_info->dpcd);
755

756 757
		if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
			DRM_ERROR("displayport link status failed\n");
758
			break;
759
		}
760

761
		if (drm_dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) {
762 763 764 765
			clock_recovery = true;
			break;
		}

766 767
		for (i = 0; i < dp_info->dp_lane_count; i++) {
			if ((dp_info->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
768 769
				break;
		}
770
		if (i == dp_info->dp_lane_count) {
771 772 773 774
			DRM_ERROR("clock recovery reached max voltage\n");
			break;
		}

775 776 777
		if ((dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
			++dp_info->tries;
			if (dp_info->tries == 5) {
778 779 780 781
				DRM_ERROR("clock recovery tried 5 times\n");
				break;
			}
		} else
782
			dp_info->tries = 0;
783

784
		voltage = dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
785 786

		/* Compute new train_set as requested by sink */
787 788 789
		dp_get_adjust_train(dp_info->link_status, dp_info->dp_lane_count, dp_info->train_set);

		radeon_dp_update_vs_emph(dp_info);
790
	}
791
	if (!clock_recovery) {
792
		DRM_ERROR("clock recovery failed\n");
793 794
		return -1;
	} else {
795
		DRM_DEBUG_KMS("clock recovery at voltage %d pre-emphasis %d\n",
796 797
			  dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
			  (dp_info->train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >>
798
			  DP_TRAIN_PRE_EMPHASIS_SHIFT);
799 800 801
		return 0;
	}
}
802

803 804 805
static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info)
{
	bool channel_eq;
806

807 808
	if (dp_info->tp3_supported)
		radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_3);
809
	else
810
		radeon_dp_set_tp(dp_info, DP_TRAINING_PATTERN_2);
811 812

	/* channel equalization loop */
813
	dp_info->tries = 0;
814
	channel_eq = false;
815
	while (1) {
816
		drm_dp_link_train_channel_eq_delay(dp_info->dpcd);
817

818 819
		if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
			DRM_ERROR("displayport link status failed\n");
820
			break;
821
		}
822

823
		if (drm_dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) {
824 825 826 827 828
			channel_eq = true;
			break;
		}

		/* Try 5 times */
829
		if (dp_info->tries > 5) {
830 831 832 833 834
			DRM_ERROR("channel eq failed: 5 tries\n");
			break;
		}

		/* Compute new train_set as requested by sink */
835
		dp_get_adjust_train(dp_info->link_status, dp_info->dp_lane_count, dp_info->train_set);
836

837 838
		radeon_dp_update_vs_emph(dp_info);
		dp_info->tries++;
839 840
	}

841
	if (!channel_eq) {
842
		DRM_ERROR("channel eq failed\n");
843 844
		return -1;
	} else {
845
		DRM_DEBUG_KMS("channel eq at voltage %d pre-emphasis %d\n",
846 847
			  dp_info->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
			  (dp_info->train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK)
848
			  >> DP_TRAIN_PRE_EMPHASIS_SHIFT);
849 850
		return 0;
	}
851 852
}

853 854
void radeon_dp_link_train(struct drm_encoder *encoder,
			  struct drm_connector *connector)
855
{
856 857 858 859 860 861 862
	struct drm_device *dev = encoder->dev;
	struct radeon_device *rdev = dev->dev_private;
	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
	struct radeon_encoder_atom_dig *dig;
	struct radeon_connector *radeon_connector;
	struct radeon_connector_atom_dig *dig_connector;
	struct radeon_dp_link_train_info dp_info;
863 864
	int index;
	u8 tmp, frev, crev;
865

866 867 868
	if (!radeon_encoder->enc_priv)
		return;
	dig = radeon_encoder->enc_priv;
869

870 871 872 873
	radeon_connector = to_radeon_connector(connector);
	if (!radeon_connector->con_priv)
		return;
	dig_connector = radeon_connector->con_priv;
874

875 876 877
	if ((dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_DISPLAYPORT) &&
	    (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_eDP))
		return;
878

879 880 881 882 883 884 885 886 887 888 889 890
	/* DPEncoderService newer than 1.1 can't program properly the
	 * training pattern. When facing such version use the
	 * DIGXEncoderControl (X== 1 | 2)
	 */
	dp_info.use_dpencoder = true;
	index = GetIndexIntoMasterTable(COMMAND, DPEncoderService);
	if (atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) {
		if (crev > 1) {
			dp_info.use_dpencoder = false;
		}
	}

891 892 893 894 895 896 897 898 899
	dp_info.enc_id = 0;
	if (dig->dig_encoder)
		dp_info.enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
	else
		dp_info.enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER;
	if (dig->linkb)
		dp_info.enc_id |= ATOM_DP_CONFIG_LINK_B;
	else
		dp_info.enc_id |= ATOM_DP_CONFIG_LINK_A;
900

901 902 903 904 905 906
	tmp = radeon_read_dpcd_reg(radeon_connector, DP_MAX_LANE_COUNT);
	if (ASIC_IS_DCE5(rdev) && (tmp & DP_TPS3_SUPPORTED))
		dp_info.tp3_supported = true;
	else
		dp_info.tp3_supported = false;

907
	memcpy(dp_info.dpcd, dig_connector->dpcd, DP_RECEIVER_CAP_SIZE);
908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923
	dp_info.rdev = rdev;
	dp_info.encoder = encoder;
	dp_info.connector = connector;
	dp_info.radeon_connector = radeon_connector;
	dp_info.dp_lane_count = dig_connector->dp_lane_count;
	dp_info.dp_clock = dig_connector->dp_clock;

	if (radeon_dp_link_train_init(&dp_info))
		goto done;
	if (radeon_dp_link_train_cr(&dp_info))
		goto done;
	if (radeon_dp_link_train_ce(&dp_info))
		goto done;
done:
	if (radeon_dp_link_train_finish(&dp_info))
		return;
924
}