rs690.c 28.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
/*
 * Copyright 2008 Advanced Micro Devices, Inc.
 * Copyright 2008 Red Hat Inc.
 * Copyright 2009 Jerome Glisse.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Dave Airlie
 *          Alex Deucher
 *          Jerome Glisse
 */
28
#include <drm/drmP.h>
29
#include "radeon.h"
30
#include "radeon_asic.h"
31
#include "atom.h"
32
#include "rs690d.h"
33

34
int rs690_mc_wait_for_idle(struct radeon_device *rdev)
35 36 37 38 39 40
{
	unsigned i;
	uint32_t tmp;

	for (i = 0; i < rdev->usec_timeout; i++) {
		/* read MC_STATUS */
41 42
		tmp = RREG32_MC(R_000090_MC_SYSTEM_STATUS);
		if (G_000090_MC_SYSTEM_IDLE(tmp))
43
			return 0;
44
		udelay(1);
45 46 47 48
	}
	return -1;
}

49
static void rs690_gpu_init(struct radeon_device *rdev)
50 51 52 53 54 55 56 57 58
{
	/* FIXME: is this correct ? */
	r420_pipes_init(rdev);
	if (rs690_mc_wait_for_idle(rdev)) {
		printk(KERN_WARNING "Failed to wait MC idle while "
		       "programming pipes. Bad things might happen.\n");
	}
}

59 60 61 62 63
union igp_info {
	struct _ATOM_INTEGRATED_SYSTEM_INFO info;
	struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_v2;
};

64 65 66
void rs690_pm_info(struct radeon_device *rdev)
{
	int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
67
	union igp_info *info;
68 69 70 71
	uint16_t data_offset;
	uint8_t frev, crev;
	fixed20_12 tmp;

72 73 74 75 76 77 78
	if (atom_parse_data_header(rdev->mode_info.atom_context, index, NULL,
				   &frev, &crev, &data_offset)) {
		info = (union igp_info *)(rdev->mode_info.atom_context->bios + data_offset);

		/* Get various system informations from bios */
		switch (crev) {
		case 1:
79
			tmp.full = dfixed_const(100);
80
			rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info.ulBootUpMemoryClock));
81
			rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
82
			if (le16_to_cpu(info->info.usK8MemoryClock))
83 84 85 86 87 88
				rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock));
			else if (rdev->clock.default_mclk) {
				rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk);
				rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
			} else
				rdev->pm.igp_system_mclk.full = dfixed_const(400);
89 90
			rdev->pm.igp_ht_link_clk.full = dfixed_const(le16_to_cpu(info->info.usFSBClock));
			rdev->pm.igp_ht_link_width.full = dfixed_const(info->info.ucHTLinkWidth);
91 92
			break;
		case 2:
93
			tmp.full = dfixed_const(100);
94
			rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpSidePortClock));
95
			rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
96 97
			if (le32_to_cpu(info->info_v2.ulBootUpUMAClock))
				rdev->pm.igp_system_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpUMAClock));
98 99 100 101
			else if (rdev->clock.default_mclk)
				rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk);
			else
				rdev->pm.igp_system_mclk.full = dfixed_const(66700);
102
			rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
103
			rdev->pm.igp_ht_link_clk.full = dfixed_const(le32_to_cpu(info->info_v2.ulHTLinkFreq));
104 105
			rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp);
			rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth));
106 107 108
			break;
		default:
			/* We assume the slower possible clock ie worst case */
109 110 111
			rdev->pm.igp_sideport_mclk.full = dfixed_const(200);
			rdev->pm.igp_system_mclk.full = dfixed_const(200);
			rdev->pm.igp_ht_link_clk.full = dfixed_const(1000);
112
			rdev->pm.igp_ht_link_width.full = dfixed_const(8);
113 114 115 116
			DRM_ERROR("No integrated system info for your GPU, using safe default\n");
			break;
		}
	} else {
117
		/* We assume the slower possible clock ie worst case */
118 119 120
		rdev->pm.igp_sideport_mclk.full = dfixed_const(200);
		rdev->pm.igp_system_mclk.full = dfixed_const(200);
		rdev->pm.igp_ht_link_clk.full = dfixed_const(1000);
121
		rdev->pm.igp_ht_link_width.full = dfixed_const(8);
122 123 124 125
		DRM_ERROR("No integrated system info for your GPU, using safe default\n");
	}
	/* Compute various bandwidth */
	/* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4  */
126 127
	tmp.full = dfixed_const(4);
	rdev->pm.k8_bandwidth.full = dfixed_mul(rdev->pm.igp_system_mclk, tmp);
128 129 130
	/* ht_bandwidth = ht_clk * 2 * ht_width / 8 * 0.8
	 *              = ht_clk * ht_width / 5
	 */
131 132
	tmp.full = dfixed_const(5);
	rdev->pm.ht_bandwidth.full = dfixed_mul(rdev->pm.igp_ht_link_clk,
133
						rdev->pm.igp_ht_link_width);
134
	rdev->pm.ht_bandwidth.full = dfixed_div(rdev->pm.ht_bandwidth, tmp);
135 136 137 138 139 140 141
	if (tmp.full < rdev->pm.max_bandwidth.full) {
		/* HT link is a limiting factor */
		rdev->pm.max_bandwidth.full = tmp.full;
	}
	/* sideport_bandwidth = (sideport_clk / 2) * 2 * 2 * 0.7
	 *                    = (sideport_clk * 14) / 10
	 */
142 143 144 145
	tmp.full = dfixed_const(14);
	rdev->pm.sideport_bandwidth.full = dfixed_mul(rdev->pm.igp_sideport_mclk, tmp);
	tmp.full = dfixed_const(10);
	rdev->pm.sideport_bandwidth.full = dfixed_div(rdev->pm.sideport_bandwidth, tmp);
146 147
}

148
static void rs690_mc_init(struct radeon_device *rdev)
149
{
150
	u64 base;
151 152
	uint32_t h_addr, l_addr;
	unsigned long long k8_addr;
153 154 155

	rs400_gart_adjust_size(rdev);
	rdev->mc.vram_is_ddr = true;
156
	rdev->mc.vram_width = 128;
157 158
	rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
	rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
159 160
	rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
	rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
161
	rdev->mc.visible_vram_size = rdev->mc.aper_size;
162 163
	base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
	base = G_000100_MC_FB_START(base) << 16;
164
	rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
165 166 167 168 169 170 171 172 173 174
	/* Some boards seem to be configured for 128MB of sideport memory,
	 * but really only have 64MB.  Just skip the sideport and use
	 * UMA memory.
	 */
	if (rdev->mc.igp_sideport_enabled &&
	    (rdev->mc.real_vram_size == (384 * 1024 * 1024))) {
		base += 128 * 1024 * 1024;
		rdev->mc.real_vram_size -= 128 * 1024 * 1024;
		rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
	}
175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195

	/* Use K8 direct mapping for fast fb access. */ 
	rdev->fastfb_working = false;
	h_addr = G_00005F_K8_ADDR_EXT(RREG32_MC(R_00005F_MC_MISC_UMA_CNTL));
	l_addr = RREG32_MC(R_00001E_K8_FB_LOCATION);
	k8_addr = ((unsigned long long)h_addr) << 32 | l_addr;
#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
	if (k8_addr + rdev->mc.visible_vram_size < 0x100000000ULL)	
#endif
	{
		/* FastFB shall be used with UMA memory. Here it is simply disabled when sideport 
		 * memory is present.
		 */
		if (rdev->mc.igp_sideport_enabled == false && radeon_fastfb == 1) {
			DRM_INFO("Direct mapping: aper base at 0x%llx, replaced by direct mapping base 0x%llx.\n", 
					(unsigned long long)rdev->mc.aper_base, k8_addr);
			rdev->mc.aper_base = (resource_size_t)k8_addr;
			rdev->fastfb_working = true;
		}
	}  

196
	rs690_pm_info(rdev);
197
	radeon_vram_location(rdev, &rdev->mc, base);
198
	rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1;
199
	radeon_gtt_location(rdev, &rdev->mc);
200
	radeon_update_bandwidth_info(rdev);
201 202
}

203 204 205 206 207 208 209 210 211
void rs690_line_buffer_adjust(struct radeon_device *rdev,
			      struct drm_display_mode *mode1,
			      struct drm_display_mode *mode2)
{
	u32 tmp;

	/*
	 * Line Buffer Setup
	 * There is a single line buffer shared by both display controllers.
212
	 * R_006520_DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
213 214 215 216 217 218
	 * the display controllers.  The paritioning can either be done
	 * manually or via one of four preset allocations specified in bits 1:0:
	 *  0 - line buffer is divided in half and shared between crtc
	 *  1 - D1 gets 3/4 of the line buffer, D2 gets 1/4
	 *  2 - D1 gets the whole buffer
	 *  3 - D1 gets 1/4 of the line buffer, D2 gets 3/4
219
	 * Setting bit 2 of R_006520_DC_LB_MEMORY_SPLIT controls switches to manual
220 221 222
	 * allocation mode. In manual allocation mode, D1 always starts at 0,
	 * D1 end/2 is specified in bits 14:4; D2 allocation follows D1.
	 */
223 224
	tmp = RREG32(R_006520_DC_LB_MEMORY_SPLIT) & C_006520_DC_LB_MEMORY_SPLIT;
	tmp &= ~C_006520_DC_LB_MEMORY_SPLIT_MODE;
225 226 227 228
	/* auto */
	if (mode1 && mode2) {
		if (mode1->hdisplay > mode2->hdisplay) {
			if (mode1->hdisplay > 2560)
229
				tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q;
230
			else
231
				tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
232 233
		} else if (mode2->hdisplay > mode1->hdisplay) {
			if (mode2->hdisplay > 2560)
234
				tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
235
			else
236
				tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
237
		} else
238
			tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF;
239
	} else if (mode1) {
240
		tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_ONLY;
241
	} else if (mode2) {
242
		tmp |= V_006520_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q;
243
	}
244
	WREG32(R_006520_DC_LB_MEMORY_SPLIT, tmp);
245 246
}

247 248 249 250 251 252 253 254 255 256 257 258 259
struct rs690_watermark {
	u32        lb_request_fifo_depth;
	fixed20_12 num_line_pair;
	fixed20_12 estimated_width;
	fixed20_12 worst_case_latency;
	fixed20_12 consumption_rate;
	fixed20_12 active_time;
	fixed20_12 dbpp;
	fixed20_12 priority_mark_max;
	fixed20_12 priority_mark;
	fixed20_12 sclk;
};

260
static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
261 262 263
					 struct radeon_crtc *crtc,
					 struct rs690_watermark *wm,
					 bool low)
264 265 266 267 268
{
	struct drm_display_mode *mode = &crtc->base.mode;
	fixed20_12 a, b, c;
	fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width;
	fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency;
269 270
	fixed20_12 sclk, core_bandwidth, max_bandwidth;
	u32 selected_sclk;
271 272 273 274 275 276 277

	if (!crtc->base.enabled) {
		/* FIXME: wouldn't it better to set priority mark to maximum */
		wm->lb_request_fifo_depth = 4;
		return;
	}

278 279 280 281 282 283 284 285 286 287 288 289 290 291 292
	if (((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) &&
	    (rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
		selected_sclk = radeon_dpm_get_sclk(rdev, low);
	else
		selected_sclk = rdev->pm.current_sclk;

	/* sclk in Mhz */
	a.full = dfixed_const(100);
	sclk.full = dfixed_const(selected_sclk);
	sclk.full = dfixed_div(sclk, a);

	/* core_bandwidth = sclk(Mhz) * 16 */
	a.full = dfixed_const(16);
	core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);

293 294
	if (crtc->vsc.full > dfixed_const(2))
		wm->num_line_pair.full = dfixed_const(2);
295
	else
296 297 298 299 300 301 302 303
		wm->num_line_pair.full = dfixed_const(1);

	b.full = dfixed_const(mode->crtc_hdisplay);
	c.full = dfixed_const(256);
	a.full = dfixed_div(b, c);
	request_fifo_depth.full = dfixed_mul(a, wm->num_line_pair);
	request_fifo_depth.full = dfixed_ceil(request_fifo_depth);
	if (a.full < dfixed_const(4)) {
304 305
		wm->lb_request_fifo_depth = 4;
	} else {
306
		wm->lb_request_fifo_depth = dfixed_trunc(request_fifo_depth);
307 308 309 310 311 312 313 314
	}

	/* Determine consumption rate
	 *  pclk = pixel clock period(ns) = 1000 / (mode.clock / 1000)
	 *  vtaps = number of vertical taps,
	 *  vsc = vertical scaling ratio, defined as source/destination
	 *  hsc = horizontal scaling ration, defined as source/destination
	 */
315 316 317 318
	a.full = dfixed_const(mode->clock);
	b.full = dfixed_const(1000);
	a.full = dfixed_div(a, b);
	pclk.full = dfixed_div(b, a);
319
	if (crtc->rmx_type != RMX_OFF) {
320
		b.full = dfixed_const(2);
321 322
		if (crtc->vsc.full > b.full)
			b.full = crtc->vsc.full;
323 324 325 326
		b.full = dfixed_mul(b, crtc->hsc);
		c.full = dfixed_const(2);
		b.full = dfixed_div(b, c);
		consumption_time.full = dfixed_div(pclk, b);
327 328 329
	} else {
		consumption_time.full = pclk.full;
	}
330 331
	a.full = dfixed_const(1);
	wm->consumption_rate.full = dfixed_div(a, consumption_time);
332 333 334 335 336 337 338


	/* Determine line time
	 *  LineTime = total time for one line of displayhtotal
	 *  LineTime = total number of horizontal pixels
	 *  pclk = pixel clock period(ns)
	 */
339 340
	a.full = dfixed_const(crtc->base.mode.crtc_htotal);
	line_time.full = dfixed_mul(a, pclk);
341 342 343 344 345 346

	/* Determine active time
	 *  ActiveTime = time of active region of display within one line,
	 *  hactive = total number of horizontal active pixels
	 *  htotal = total number of horizontal pixels
	 */
347 348 349 350
	a.full = dfixed_const(crtc->base.mode.crtc_htotal);
	b.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
	wm->active_time.full = dfixed_mul(line_time, b);
	wm->active_time.full = dfixed_div(wm->active_time, a);
351 352

	/* Maximun bandwidth is the minimun bandwidth of all component */
353
	max_bandwidth = core_bandwidth;
354
	if (rdev->mc.igp_sideport_enabled) {
355
		if (max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
356
			rdev->pm.sideport_bandwidth.full)
357
			max_bandwidth = rdev->pm.sideport_bandwidth;
358 359 360 361 362
		read_delay_latency.full = dfixed_const(370 * 800);
		a.full = dfixed_const(1000);
		b.full = dfixed_div(rdev->pm.igp_sideport_mclk, a);
		read_delay_latency.full = dfixed_div(read_delay_latency, b);
		read_delay_latency.full = dfixed_mul(read_delay_latency, a);
363
	} else {
364
		if (max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
365
			rdev->pm.k8_bandwidth.full)
366 367
			max_bandwidth = rdev->pm.k8_bandwidth;
		if (max_bandwidth.full > rdev->pm.ht_bandwidth.full &&
368
			rdev->pm.ht_bandwidth.full)
369
			max_bandwidth = rdev->pm.ht_bandwidth;
370
		read_delay_latency.full = dfixed_const(5000);
371 372 373
	}

	/* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */
374
	a.full = dfixed_const(16);
375
	sclk.full = dfixed_mul(max_bandwidth, a);
376
	a.full = dfixed_const(1000);
377
	sclk.full = dfixed_div(a, sclk);
378 379 380 381 382
	/* Determine chunk time
	 * ChunkTime = the time it takes the DCP to send one chunk of data
	 * to the LB which consists of pipeline delay and inter chunk gap
	 * sclk = system clock(ns)
	 */
383
	a.full = dfixed_const(256 * 13);
384
	chunk_time.full = dfixed_mul(sclk, a);
385 386
	a.full = dfixed_const(10);
	chunk_time.full = dfixed_div(chunk_time, a);
387 388 389 390 391 392 393 394 395

	/* Determine the worst case latency
	 * NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines)
	 * WorstCaseLatency = worst case time from urgent to when the MC starts
	 *                    to return data
	 * READ_DELAY_IDLE_MAX = constant of 1us
	 * ChunkTime = time it takes the DCP to send one chunk of data to the LB
	 *             which consists of pipeline delay and inter chunk gap
	 */
396 397 398
	if (dfixed_trunc(wm->num_line_pair) > 1) {
		a.full = dfixed_const(3);
		wm->worst_case_latency.full = dfixed_mul(a, chunk_time);
399 400
		wm->worst_case_latency.full += read_delay_latency.full;
	} else {
401 402
		a.full = dfixed_const(2);
		wm->worst_case_latency.full = dfixed_mul(a, chunk_time);
403 404 405 406 407 408 409 410 411 412 413 414 415
		wm->worst_case_latency.full += read_delay_latency.full;
	}

	/* Determine the tolerable latency
	 * TolerableLatency = Any given request has only 1 line time
	 *                    for the data to be returned
	 * LBRequestFifoDepth = Number of chunk requests the LB can
	 *                      put into the request FIFO for a display
	 *  LineTime = total time for one line of display
	 *  ChunkTime = the time it takes the DCP to send one chunk
	 *              of data to the LB which consists of
	 *  pipeline delay and inter chunk gap
	 */
416
	if ((2+wm->lb_request_fifo_depth) >= dfixed_trunc(request_fifo_depth)) {
417 418
		tolerable_latency.full = line_time.full;
	} else {
419
		tolerable_latency.full = dfixed_const(wm->lb_request_fifo_depth - 2);
420
		tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full;
421
		tolerable_latency.full = dfixed_mul(tolerable_latency, chunk_time);
422 423 424
		tolerable_latency.full = line_time.full - tolerable_latency.full;
	}
	/* We assume worst case 32bits (4 bytes) */
425
	wm->dbpp.full = dfixed_const(4 * 8);
426 427 428 429

	/* Determine the maximum priority mark
	 *  width = viewport width in pixels
	 */
430 431 432 433
	a.full = dfixed_const(16);
	wm->priority_mark_max.full = dfixed_const(crtc->base.mode.crtc_hdisplay);
	wm->priority_mark_max.full = dfixed_div(wm->priority_mark_max, a);
	wm->priority_mark_max.full = dfixed_ceil(wm->priority_mark_max);
434 435 436

	/* Determine estimated width */
	estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
437 438 439
	estimated_width.full = dfixed_div(estimated_width, consumption_time);
	if (dfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
		wm->priority_mark.full = dfixed_const(10);
440
	} else {
441 442 443
		a.full = dfixed_const(16);
		wm->priority_mark.full = dfixed_div(estimated_width, a);
		wm->priority_mark.full = dfixed_ceil(wm->priority_mark);
444 445 446 447
		wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
	}
}

448 449 450 451 452 453 454
static void rs690_compute_mode_priority(struct radeon_device *rdev,
					struct rs690_watermark *wm0,
					struct rs690_watermark *wm1,
					struct drm_display_mode *mode0,
					struct drm_display_mode *mode1,
					u32 *d1mode_priority_a_cnt,
					u32 *d2mode_priority_a_cnt)
455 456 457 458
{
	fixed20_12 priority_mark02, priority_mark12, fill_rate;
	fixed20_12 a, b;

459 460
	*d1mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1);
	*d2mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1);
461 462

	if (mode0 && mode1) {
463 464
		if (dfixed_trunc(wm0->dbpp) > 64)
			a.full = dfixed_mul(wm0->dbpp, wm0->num_line_pair);
465
		else
466 467 468
			a.full = wm0->num_line_pair.full;
		if (dfixed_trunc(wm1->dbpp) > 64)
			b.full = dfixed_mul(wm1->dbpp, wm1->num_line_pair);
469
		else
470
			b.full = wm1->num_line_pair.full;
471
		a.full += b.full;
472 473 474 475 476 477
		fill_rate.full = dfixed_div(wm0->sclk, a);
		if (wm0->consumption_rate.full > fill_rate.full) {
			b.full = wm0->consumption_rate.full - fill_rate.full;
			b.full = dfixed_mul(b, wm0->active_time);
			a.full = dfixed_mul(wm0->worst_case_latency,
						wm0->consumption_rate);
478
			a.full = a.full + b.full;
479 480
			b.full = dfixed_const(16 * 1000);
			priority_mark02.full = dfixed_div(a, b);
481
		} else {
482 483
			a.full = dfixed_mul(wm0->worst_case_latency,
						wm0->consumption_rate);
484 485
			b.full = dfixed_const(16 * 1000);
			priority_mark02.full = dfixed_div(a, b);
486
		}
487 488 489 490 491
		if (wm1->consumption_rate.full > fill_rate.full) {
			b.full = wm1->consumption_rate.full - fill_rate.full;
			b.full = dfixed_mul(b, wm1->active_time);
			a.full = dfixed_mul(wm1->worst_case_latency,
						wm1->consumption_rate);
492
			a.full = a.full + b.full;
493 494
			b.full = dfixed_const(16 * 1000);
			priority_mark12.full = dfixed_div(a, b);
495
		} else {
496 497
			a.full = dfixed_mul(wm1->worst_case_latency,
						wm1->consumption_rate);
498 499
			b.full = dfixed_const(16 * 1000);
			priority_mark12.full = dfixed_div(a, b);
500
		}
501 502 503 504 505 506 507 508 509 510
		if (wm0->priority_mark.full > priority_mark02.full)
			priority_mark02.full = wm0->priority_mark.full;
		if (wm0->priority_mark_max.full > priority_mark02.full)
			priority_mark02.full = wm0->priority_mark_max.full;
		if (wm1->priority_mark.full > priority_mark12.full)
			priority_mark12.full = wm1->priority_mark.full;
		if (wm1->priority_mark_max.full > priority_mark12.full)
			priority_mark12.full = wm1->priority_mark_max.full;
		*d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
		*d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
511
		if (rdev->disp_priority == 2) {
512 513
			*d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
			*d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
514
		}
515
	} else if (mode0) {
516 517
		if (dfixed_trunc(wm0->dbpp) > 64)
			a.full = dfixed_mul(wm0->dbpp, wm0->num_line_pair);
518
		else
519 520 521 522 523 524 525
			a.full = wm0->num_line_pair.full;
		fill_rate.full = dfixed_div(wm0->sclk, a);
		if (wm0->consumption_rate.full > fill_rate.full) {
			b.full = wm0->consumption_rate.full - fill_rate.full;
			b.full = dfixed_mul(b, wm0->active_time);
			a.full = dfixed_mul(wm0->worst_case_latency,
						wm0->consumption_rate);
526
			a.full = a.full + b.full;
527 528
			b.full = dfixed_const(16 * 1000);
			priority_mark02.full = dfixed_div(a, b);
529
		} else {
530 531
			a.full = dfixed_mul(wm0->worst_case_latency,
						wm0->consumption_rate);
532 533
			b.full = dfixed_const(16 * 1000);
			priority_mark02.full = dfixed_div(a, b);
534
		}
535 536 537 538 539
		if (wm0->priority_mark.full > priority_mark02.full)
			priority_mark02.full = wm0->priority_mark.full;
		if (wm0->priority_mark_max.full > priority_mark02.full)
			priority_mark02.full = wm0->priority_mark_max.full;
		*d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
540
		if (rdev->disp_priority == 2)
541
			*d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
542
	} else if (mode1) {
543 544
		if (dfixed_trunc(wm1->dbpp) > 64)
			a.full = dfixed_mul(wm1->dbpp, wm1->num_line_pair);
545
		else
546 547 548 549 550 551 552
			a.full = wm1->num_line_pair.full;
		fill_rate.full = dfixed_div(wm1->sclk, a);
		if (wm1->consumption_rate.full > fill_rate.full) {
			b.full = wm1->consumption_rate.full - fill_rate.full;
			b.full = dfixed_mul(b, wm1->active_time);
			a.full = dfixed_mul(wm1->worst_case_latency,
						wm1->consumption_rate);
553
			a.full = a.full + b.full;
554 555
			b.full = dfixed_const(16 * 1000);
			priority_mark12.full = dfixed_div(a, b);
556
		} else {
557 558
			a.full = dfixed_mul(wm1->worst_case_latency,
						wm1->consumption_rate);
559 560
			b.full = dfixed_const(16 * 1000);
			priority_mark12.full = dfixed_div(a, b);
561
		}
562 563 564 565 566
		if (wm1->priority_mark.full > priority_mark12.full)
			priority_mark12.full = wm1->priority_mark.full;
		if (wm1->priority_mark_max.full > priority_mark12.full)
			priority_mark12.full = wm1->priority_mark_max.full;
		*d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
567
		if (rdev->disp_priority == 2)
568
			*d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
569
	}
570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628
}

void rs690_bandwidth_update(struct radeon_device *rdev)
{
	struct drm_display_mode *mode0 = NULL;
	struct drm_display_mode *mode1 = NULL;
	struct rs690_watermark wm0_high, wm0_low;
	struct rs690_watermark wm1_high, wm1_low;
	u32 tmp;
	u32 d1mode_priority_a_cnt, d1mode_priority_b_cnt;
	u32 d2mode_priority_a_cnt, d2mode_priority_b_cnt;

	radeon_update_display_priority(rdev);

	if (rdev->mode_info.crtcs[0]->base.enabled)
		mode0 = &rdev->mode_info.crtcs[0]->base.mode;
	if (rdev->mode_info.crtcs[1]->base.enabled)
		mode1 = &rdev->mode_info.crtcs[1]->base.mode;
	/*
	 * Set display0/1 priority up in the memory controller for
	 * modes if the user specifies HIGH for displaypriority
	 * option.
	 */
	if ((rdev->disp_priority == 2) &&
	    ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))) {
		tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER);
		tmp &= C_000104_MC_DISP0R_INIT_LAT;
		tmp &= C_000104_MC_DISP1R_INIT_LAT;
		if (mode0)
			tmp |= S_000104_MC_DISP0R_INIT_LAT(1);
		if (mode1)
			tmp |= S_000104_MC_DISP1R_INIT_LAT(1);
		WREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER, tmp);
	}
	rs690_line_buffer_adjust(rdev, mode0, mode1);

	if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))
		WREG32(R_006C9C_DCP_CONTROL, 0);
	if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
		WREG32(R_006C9C_DCP_CONTROL, 2);

	rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0_high, false);
	rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1_high, false);

	rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0_low, true);
	rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1_low, true);

	tmp = (wm0_high.lb_request_fifo_depth - 1);
	tmp |= (wm1_high.lb_request_fifo_depth - 1) << 16;
	WREG32(R_006D58_LB_MAX_REQ_OUTSTANDING, tmp);

	rs690_compute_mode_priority(rdev,
				    &wm0_high, &wm1_high,
				    mode0, mode1,
				    &d1mode_priority_a_cnt, &d2mode_priority_a_cnt);
	rs690_compute_mode_priority(rdev,
				    &wm0_low, &wm1_low,
				    mode0, mode1,
				    &d1mode_priority_b_cnt, &d2mode_priority_b_cnt);
629 630

	WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
631
	WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_b_cnt);
632
	WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
633
	WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_b_cnt);
634
}
635 636 637

uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg)
{
638
	unsigned long flags;
639 640
	uint32_t r;

641
	spin_lock_irqsave(&rdev->mc_idx_lock, flags);
642 643 644
	WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg));
	r = RREG32(R_00007C_MC_DATA);
	WREG32(R_000078_MC_INDEX, ~C_000078_MC_IND_ADDR);
645
	spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
646 647 648 649 650
	return r;
}

void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
651 652 653
	unsigned long flags;

	spin_lock_irqsave(&rdev->mc_idx_lock, flags);
654 655 656 657
	WREG32(R_000078_MC_INDEX, S_000078_MC_IND_ADDR(reg) |
		S_000078_MC_IND_WR_EN(1));
	WREG32(R_00007C_MC_DATA, v);
	WREG32(R_000078_MC_INDEX, 0x7F);
658
	spin_unlock_irqrestore(&rdev->mc_idx_lock, flags);
659 660
}

661
static void rs690_mc_program(struct radeon_device *rdev)
662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694
{
	struct rv515_mc_save save;

	/* Stops all mc clients */
	rv515_mc_stop(rdev, &save);

	/* Wait for mc idle */
	if (rs690_mc_wait_for_idle(rdev))
		dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
	/* Program MC, should be a 32bits limited address space */
	WREG32_MC(R_000100_MCCFG_FB_LOCATION,
			S_000100_MC_FB_START(rdev->mc.vram_start >> 16) |
			S_000100_MC_FB_TOP(rdev->mc.vram_end >> 16));
	WREG32(R_000134_HDP_FB_LOCATION,
		S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));

	rv515_mc_resume(rdev, &save);
}

static int rs690_startup(struct radeon_device *rdev)
{
	int r;

	rs690_mc_program(rdev);
	/* Resume clock */
	rv515_clock_startup(rdev);
	/* Initialize GPU configuration (# pipes, ...) */
	rs690_gpu_init(rdev);
	/* Initialize GART (initialize after TTM so we can allocate
	 * memory through TTM but finalize after TTM) */
	r = rs400_gart_enable(rdev);
	if (r)
		return r;
695 696 697 698 699 700

	/* allocate wb buffer */
	r = radeon_wb_init(rdev);
	if (r)
		return r;

701 702 703 704 705 706
	r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
	if (r) {
		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
		return r;
	}

707
	/* Enable IRQ */
708 709 710 711 712 713
	if (!rdev->irq.installed) {
		r = radeon_irq_kms_init(rdev);
		if (r)
			return r;
	}

714
	rs600_irq_set(rdev);
715
	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
716 717 718
	/* 1M ring buffer */
	r = r100_cp_init(rdev, 1024 * 1024);
	if (r) {
P
Paul Bolle 已提交
719
		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
720 721
		return r;
	}
722

723 724 725
	r = radeon_ib_pool_init(rdev);
	if (r) {
		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
726
		return r;
727
	}
728

729 730 731 732 733 734
	r = r600_audio_init(rdev);
	if (r) {
		dev_err(rdev->dev, "failed initializing audio\n");
		return r;
	}

735 736 737 738 739
	return 0;
}

int rs690_resume(struct radeon_device *rdev)
{
740 741
	int r;

742 743 744 745 746
	/* Make sur GART are not working */
	rs400_gart_disable(rdev);
	/* Resume clock before doing reset */
	rv515_clock_startup(rdev);
	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
747
	if (radeon_asic_reset(rdev)) {
748 749 750 751 752 753 754 755
		dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
			RREG32(R_000E40_RBBM_STATUS),
			RREG32(R_0007C0_CP_STAT));
	}
	/* post */
	atom_asic_init(rdev->mode_info.atom_context);
	/* Resume clock after posting */
	rv515_clock_startup(rdev);
756 757
	/* Initialize surface registers */
	radeon_surface_init(rdev);
758 759

	rdev->accel_working = true;
760 761 762 763 764
	r = rs690_startup(rdev);
	if (r) {
		rdev->accel_working = false;
	}
	return r;
765 766 767 768
}

int rs690_suspend(struct radeon_device *rdev)
{
769
	r600_audio_fini(rdev);
770
	r100_cp_disable(rdev);
771
	radeon_wb_disable(rdev);
772
	rs600_irq_disable(rdev);
773 774 775 776 777 778
	rs400_gart_disable(rdev);
	return 0;
}

void rs690_fini(struct radeon_device *rdev)
{
779
	r600_audio_fini(rdev);
780
	r100_cp_fini(rdev);
781
	radeon_wb_fini(rdev);
782
	radeon_ib_pool_fini(rdev);
783 784 785 786
	radeon_gem_fini(rdev);
	rs400_gart_fini(rdev);
	radeon_irq_kms_fini(rdev);
	radeon_fence_driver_fini(rdev);
787
	radeon_bo_fini(rdev);
788 789 790 791 792 793 794 795 796 797 798 799 800 801 802
	radeon_atombios_fini(rdev);
	kfree(rdev->bios);
	rdev->bios = NULL;
}

int rs690_init(struct radeon_device *rdev)
{
	int r;

	/* Disable VGA */
	rv515_vga_render_disable(rdev);
	/* Initialize scratch registers */
	radeon_scratch_init(rdev);
	/* Initialize surface registers */
	radeon_surface_init(rdev);
803 804
	/* restore some register to sane defaults */
	r100_restore_sanity(rdev);
805 806 807 808 809 810 811 812 813 814 815 816 817 818 819
	/* TODO: disable VGA need to use VGA request */
	/* BIOS*/
	if (!radeon_get_bios(rdev)) {
		if (ASIC_IS_AVIVO(rdev))
			return -EINVAL;
	}
	if (rdev->is_atom_bios) {
		r = radeon_atombios_init(rdev);
		if (r)
			return r;
	} else {
		dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n");
		return -EINVAL;
	}
	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
820
	if (radeon_asic_reset(rdev)) {
821 822 823 824 825 826
		dev_warn(rdev->dev,
			"GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
			RREG32(R_000E40_RBBM_STATUS),
			RREG32(R_0007C0_CP_STAT));
	}
	/* check if cards are posted or not */
827 828 829
	if (radeon_boot_test_post_card(rdev) == false)
		return -EINVAL;

830 831
	/* Initialize clocks */
	radeon_get_clock_info(rdev->ddev);
832 833
	/* initialize memory controller */
	rs690_mc_init(rdev);
834 835
	rv515_debugfs(rdev);
	/* Fence driver */
836
	r = radeon_fence_driver_init(rdev);
837 838 839
	if (r)
		return r;
	/* Memory manager */
840
	r = radeon_bo_init(rdev);
841 842 843 844 845 846
	if (r)
		return r;
	r = rs400_gart_init(rdev);
	if (r)
		return r;
	rs600_set_safe_registers(rdev);
847

848 849 850 851 852 853
	rdev->accel_working = true;
	r = rs690_startup(rdev);
	if (r) {
		/* Somethings want wront with the accel init stop accel */
		dev_err(rdev->dev, "Disabling GPU acceleration\n");
		r100_cp_fini(rdev);
854
		radeon_wb_fini(rdev);
855
		radeon_ib_pool_fini(rdev);
856 857 858 859 860
		rs400_gart_fini(rdev);
		radeon_irq_kms_fini(rdev);
		rdev->accel_working = false;
	}
	return 0;
861
}