nv04.c 13.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * Copyright (C) 2010 Francisco Jerez.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining
 * a copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sublicense, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial
 * portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 */
26 27
#include "nv04.h"
#include "fbmem.h"
28

29 30 31 32
#include <subdev/bios.h>
#include <subdev/bios/init.h>
#include <subdev/bios/pll.h>
#include <subdev/clk/pll.h>
33 34 35
#include <subdev/vga.h>

static void
36
nv04_devinit_meminit(struct nvkm_devinit *init)
37
{
38 39
	struct nvkm_subdev *subdev = &init->subdev;
	struct nvkm_device *device = subdev->device;
40 41 42 43 44
	u32 patt = 0xdeadbeef;
	struct io_mapping *fb;
	int i;

	/* Map the framebuffer aperture */
45
	fb = fbmem_init(device);
46
	if (!fb) {
47
		nvkm_error(subdev, "failed to map fb\n");
48 49 50 51
		return;
	}

	/* Sequencer and refresh off */
52
	nvkm_wrvgas(device, 0, 1, nvkm_rdvgas(device, 0, 1) | 0x20);
53
	nvkm_mask(device, NV04_PFB_DEBUG_0, 0, NV04_PFB_DEBUG_0_REFRESH_OFF);
54

55
	nvkm_mask(device, NV04_PFB_BOOT_0, ~0,
56 57 58 59 60 61 62 63 64 65
		      NV04_PFB_BOOT_0_RAM_AMOUNT_16MB |
		      NV04_PFB_BOOT_0_RAM_WIDTH_128 |
		      NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT);

	for (i = 0; i < 4; i++)
		fbmem_poke(fb, 4 * i, patt);

	fbmem_poke(fb, 0x400000, patt + 1);

	if (fbmem_peek(fb, 0) == patt + 1) {
66
		nvkm_mask(device, NV04_PFB_BOOT_0,
67 68
			      NV04_PFB_BOOT_0_RAM_TYPE,
			      NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_16MBIT);
69
		nvkm_mask(device, NV04_PFB_DEBUG_0,
70 71 72 73 74 75
			      NV04_PFB_DEBUG_0_REFRESH_OFF, 0);

		for (i = 0; i < 4; i++)
			fbmem_poke(fb, 4 * i, patt);

		if ((fbmem_peek(fb, 0xc) & 0xffff) != (patt & 0xffff))
76
			nvkm_mask(device, NV04_PFB_BOOT_0,
77 78 79 80 81
				      NV04_PFB_BOOT_0_RAM_WIDTH_128 |
				      NV04_PFB_BOOT_0_RAM_AMOUNT,
				      NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
	} else
	if ((fbmem_peek(fb, 0xc) & 0xffff0000) != (patt & 0xffff0000)) {
82
		nvkm_mask(device, NV04_PFB_BOOT_0,
83 84 85 86 87 88
			      NV04_PFB_BOOT_0_RAM_WIDTH_128 |
			      NV04_PFB_BOOT_0_RAM_AMOUNT,
			      NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
	} else
	if (fbmem_peek(fb, 0) != patt) {
		if (fbmem_readback(fb, 0x800000, patt))
89
			nvkm_mask(device, NV04_PFB_BOOT_0,
90 91 92
				      NV04_PFB_BOOT_0_RAM_AMOUNT,
				      NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
		else
93
			nvkm_mask(device, NV04_PFB_BOOT_0,
94 95 96
				      NV04_PFB_BOOT_0_RAM_AMOUNT,
				      NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);

97
		nvkm_mask(device, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_TYPE,
98 99 100
			      NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_8MBIT);
	} else
	if (!fbmem_readback(fb, 0x800000, patt)) {
101
		nvkm_mask(device, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
102 103 104 105 106
			      NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);

	}

	/* Refresh on, sequencer on */
107
	nvkm_mask(device, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
108
	nvkm_wrvgas(device, 0, 1, nvkm_rdvgas(device, 0, 1) & ~0x20);
109 110 111 112
	fbmem_fini(fb);
}

static int
113
powerctrl_1_shift(int chip_version, int reg)
114
{
115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
	int shift = -4;

	if (chip_version < 0x17 || chip_version == 0x1a || chip_version == 0x20)
		return shift;

	switch (reg) {
	case 0x680520:
		shift += 4;
	case 0x680508:
		shift += 4;
	case 0x680504:
		shift += 4;
	case 0x680500:
		shift += 4;
	}

	/*
	 * the shift for vpll regs is only used for nv3x chips with a single
	 * stage pll
	 */
	if (shift > 4 && (chip_version < 0x32 || chip_version == 0x35 ||
			  chip_version == 0x36 || chip_version >= 0x40))
		shift = -4;

	return shift;
}

void
143
setPLL_single(struct nvkm_devinit *init, u32 reg,
144
	      struct nvkm_pll_vals *pv)
145
{
146 147 148
	struct nvkm_device *device = init->subdev.device;
	int chip_version = device->bios->version.chip;
	uint32_t oldpll = nvkm_rd32(device, reg);
149 150 151 152 153 154 155 156 157
	int oldN = (oldpll >> 8) & 0xff, oldM = oldpll & 0xff;
	uint32_t pll = (oldpll & 0xfff80000) | pv->log2P << 16 | pv->NM1;
	uint32_t saved_powerctrl_1 = 0;
	int shift_powerctrl_1 = powerctrl_1_shift(chip_version, reg);

	if (oldpll == pll)
		return;	/* already set */

	if (shift_powerctrl_1 >= 0) {
158 159
		saved_powerctrl_1 = nvkm_rd32(device, 0x001584);
		nvkm_wr32(device, 0x001584,
160 161 162 163 164 165
			(saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
			1 << shift_powerctrl_1);
	}

	if (oldM && pv->M1 && (oldN / oldM < pv->N1 / pv->M1))
		/* upclock -- write new post divider first */
166
		nvkm_wr32(device, reg, pv->log2P << 16 | (oldpll & 0xffff));
167 168
	else
		/* downclock -- write new NM first */
169
		nvkm_wr32(device, reg, (oldpll & 0xffff0000) | pv->NM1);
170

171 172
	if ((chip_version < 0x17 || chip_version == 0x1a) &&
	    chip_version != 0x11)
173 174
		/* wait a bit on older chips */
		msleep(64);
175
	nvkm_rd32(device, reg);
176 177

	/* then write the other half as well */
178
	nvkm_wr32(device, reg, pll);
179 180

	if (shift_powerctrl_1 >= 0)
181
		nvkm_wr32(device, 0x001584, saved_powerctrl_1);
182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
}

static uint32_t
new_ramdac580(uint32_t reg1, bool ss, uint32_t ramdac580)
{
	bool head_a = (reg1 == 0x680508);

	if (ss)	/* single stage pll mode */
		ramdac580 |= head_a ? 0x00000100 : 0x10000000;
	else
		ramdac580 &= head_a ? 0xfffffeff : 0xefffffff;

	return ramdac580;
}

void
198
setPLL_double_highregs(struct nvkm_devinit *init, u32 reg1,
199
		       struct nvkm_pll_vals *pv)
200
{
201 202
	struct nvkm_device *device = init->subdev.device;
	int chip_version = device->bios->version.chip;
203 204
	bool nv3035 = chip_version == 0x30 || chip_version == 0x35;
	uint32_t reg2 = reg1 + ((reg1 == 0x680520) ? 0x5c : 0x70);
205 206
	uint32_t oldpll1 = nvkm_rd32(device, reg1);
	uint32_t oldpll2 = !nv3035 ? nvkm_rd32(device, reg2) : 0;
207 208 209 210 211 212 213 214 215 216 217 218 219 220
	uint32_t pll1 = (oldpll1 & 0xfff80000) | pv->log2P << 16 | pv->NM1;
	uint32_t pll2 = (oldpll2 & 0x7fff0000) | 1 << 31 | pv->NM2;
	uint32_t oldramdac580 = 0, ramdac580 = 0;
	bool single_stage = !pv->NM2 || pv->N2 == pv->M2;	/* nv41+ only */
	uint32_t saved_powerctrl_1 = 0, savedc040 = 0;
	int shift_powerctrl_1 = powerctrl_1_shift(chip_version, reg1);

	/* model specific additions to generic pll1 and pll2 set up above */
	if (nv3035) {
		pll1 = (pll1 & 0xfcc7ffff) | (pv->N2 & 0x18) << 21 |
		       (pv->N2 & 0x7) << 19 | 8 << 4 | (pv->M2 & 7) << 4;
		pll2 = 0;
	}
	if (chip_version > 0x40 && reg1 >= 0x680508) { /* !nv40 */
221
		oldramdac580 = nvkm_rd32(device, 0x680580);
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
		ramdac580 = new_ramdac580(reg1, single_stage, oldramdac580);
		if (oldramdac580 != ramdac580)
			oldpll1 = ~0;	/* force mismatch */
		if (single_stage)
			/* magic value used by nvidia in single stage mode */
			pll2 |= 0x011f;
	}
	if (chip_version > 0x70)
		/* magic bits set by the blob (but not the bios) on g71-73 */
		pll1 = (pll1 & 0x7fffffff) | (single_stage ? 0x4 : 0xc) << 28;

	if (oldpll1 == pll1 && oldpll2 == pll2)
		return;	/* already set */

	if (shift_powerctrl_1 >= 0) {
237 238
		saved_powerctrl_1 = nvkm_rd32(device, 0x001584);
		nvkm_wr32(device, 0x001584,
239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
			(saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
			1 << shift_powerctrl_1);
	}

	if (chip_version >= 0x40) {
		int shift_c040 = 14;

		switch (reg1) {
		case 0x680504:
			shift_c040 += 2;
		case 0x680500:
			shift_c040 += 2;
		case 0x680520:
			shift_c040 += 2;
		case 0x680508:
			shift_c040 += 2;
		}

257
		savedc040 = nvkm_rd32(device, 0xc040);
258
		if (shift_c040 != 14)
259
			nvkm_wr32(device, 0xc040, savedc040 & ~(3 << shift_c040));
260 261 262
	}

	if (oldramdac580 != ramdac580)
263
		nvkm_wr32(device, 0x680580, ramdac580);
264 265

	if (!nv3035)
266 267
		nvkm_wr32(device, reg2, pll2);
	nvkm_wr32(device, reg1, pll1);
268 269

	if (shift_powerctrl_1 >= 0)
270
		nvkm_wr32(device, 0x001584, saved_powerctrl_1);
271
	if (chip_version >= 0x40)
272
		nvkm_wr32(device, 0xc040, savedc040);
273 274 275
}

void
276
setPLL_double_lowregs(struct nvkm_devinit *init, u32 NMNMreg,
277
		      struct nvkm_pll_vals *pv)
278 279 280 281 282 283 284 285
{
	/* When setting PLLs, there is a merry game of disabling and enabling
	 * various bits of hardware during the process. This function is a
	 * synthesis of six nv4x traces, nearly each card doing a subtly
	 * different thing. With luck all the necessary bits for each card are
	 * combined herein. Without luck it deviates from each card's formula
	 * so as to not work on any :)
	 */
286
	struct nvkm_device *device = init->subdev.device;
287 288
	uint32_t Preg = NMNMreg - 4;
	bool mpll = Preg == 0x4020;
289
	uint32_t oldPval = nvkm_rd32(device, Preg);
290 291 292 293 294 295 296 297
	uint32_t NMNM = pv->NM2 << 16 | pv->NM1;
	uint32_t Pval = (oldPval & (mpll ? ~(0x77 << 16) : ~(7 << 16))) |
			0xc << 28 | pv->log2P << 16;
	uint32_t saved4600 = 0;
	/* some cards have different maskc040s */
	uint32_t maskc040 = ~(3 << 14), savedc040;
	bool single_stage = !pv->NM2 || pv->N2 == pv->M2;

298
	if (nvkm_rd32(device, NMNMreg) == NMNM && (oldPval & 0xc0070000) == Pval)
299 300 301 302 303 304 305 306 307 308 309
		return;

	if (Preg == 0x4000)
		maskc040 = ~0x333;
	if (Preg == 0x4058)
		maskc040 = ~(0xc << 24);

	if (mpll) {
		struct nvbios_pll info;
		uint8_t Pval2;

310
		if (nvbios_pll_parse(device->bios, Preg, &info))
311 312 313 314 315 316 317
			return;

		Pval2 = pv->log2P + info.bias_p;
		if (Pval2 > info.max_p)
			Pval2 = info.max_p;
		Pval |= 1 << 28 | Pval2 << 20;

318 319
		saved4600 = nvkm_rd32(device, 0x4600);
		nvkm_wr32(device, 0x4600, saved4600 | 8 << 28);
320 321 322 323
	}
	if (single_stage)
		Pval |= mpll ? 1 << 12 : 1 << 8;

324 325
	nvkm_wr32(device, Preg, oldPval | 1 << 28);
	nvkm_wr32(device, Preg, Pval & ~(4 << 28));
326 327
	if (mpll) {
		Pval |= 8 << 20;
328 329
		nvkm_wr32(device, 0x4020, Pval & ~(0xc << 28));
		nvkm_wr32(device, 0x4038, Pval & ~(0xc << 28));
330 331
	}

332 333
	savedc040 = nvkm_rd32(device, 0xc040);
	nvkm_wr32(device, 0xc040, savedc040 & maskc040);
334

335
	nvkm_wr32(device, NMNMreg, NMNM);
336
	if (NMNMreg == 0x4024)
337
		nvkm_wr32(device, 0x403c, NMNM);
338

339
	nvkm_wr32(device, Preg, Pval);
340 341
	if (mpll) {
		Pval &= ~(8 << 20);
342 343 344
		nvkm_wr32(device, 0x4020, Pval);
		nvkm_wr32(device, 0x4038, Pval);
		nvkm_wr32(device, 0x4600, saved4600);
345 346
	}

347
	nvkm_wr32(device, 0xc040, savedc040);
348 349

	if (mpll) {
350 351
		nvkm_wr32(device, 0x4020, Pval & ~(1 << 28));
		nvkm_wr32(device, 0x4038, Pval & ~(1 << 28));
352 353 354 355
	}
}

int
356
nv04_devinit_pll_set(struct nvkm_devinit *devinit, u32 type, u32 freq)
357
{
358 359
	struct nvkm_bios *bios = nvkm_bios(devinit);
	struct nvkm_pll_vals pv;
360 361 362
	struct nvbios_pll info;
	int cv = bios->version.chip;
	int N1, M1, N2, M2, P;
363 364
	int ret;

365
	ret = nvbios_pll_parse(bios, type > 0x405c ? type : type - 4, &info);
366 367 368
	if (ret)
		return ret;

369
	ret = nv04_pll_calc(nv_subdev(devinit), &info, freq,
370
			    &N1, &M1, &N2, &M2, &P);
371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
	if (!ret)
		return -EINVAL;

	pv.refclk = info.refclk;
	pv.N1 = N1;
	pv.M1 = M1;
	pv.N2 = N2;
	pv.M2 = M2;
	pv.log2P = P;

	if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 ||
	    cv >= 0x40) {
		if (type > 0x405c)
			setPLL_double_highregs(devinit, type, &pv);
		else
			setPLL_double_lowregs(devinit, type, &pv);
	} else
		setPLL_single(devinit, type, &pv);

390 391 392
	return 0;
}

393
int
394
nv04_devinit_fini(struct nvkm_object *object, bool suspend)
395
{
396
	struct nv04_devinit *init = (void *)object;
397
	struct nvkm_device *device = init->base.subdev.device;
398
	int ret;
399

400
	/* make i2c busses accessible */
401
	nvkm_mask(device, 0x000200, 0x00000001, 0x00000001);
402

403
	ret = nvkm_devinit_fini(&init->base, suspend);
404 405 406 407
	if (ret)
		return ret;

	/* unslave crtcs */
408
	if (init->owner < 0)
409 410
		init->owner = nvkm_rdvgaowner(device);
	nvkm_wrvgaowner(device, 0);
411
	return 0;
412 413 414
}

int
415
nv04_devinit_init(struct nvkm_object *object)
416
{
417
	struct nv04_devinit *init = (void *)object;
418
	struct nvkm_subdev *subdev = &init->base.subdev;
419
	struct nvkm_device *device = subdev->device;
420 421

	if (!init->base.post) {
422 423 424 425 426
		u32 htotal = nvkm_rdvgac(device, 0, 0x06);
		htotal |= (nvkm_rdvgac(device, 0, 0x07) & 0x01) << 8;
		htotal |= (nvkm_rdvgac(device, 0, 0x07) & 0x20) << 4;
		htotal |= (nvkm_rdvgac(device, 0, 0x25) & 0x01) << 10;
		htotal |= (nvkm_rdvgac(device, 0, 0x41) & 0x01) << 11;
427
		if (!htotal) {
428
			nvkm_debug(subdev, "adaptor not initialised\n");
429
			init->base.post = true;
430 431 432
		}
	}

433
	return nvkm_devinit_init(&init->base);
434 435
}

436
void
437
nv04_devinit_dtor(struct nvkm_object *object)
438
{
439
	struct nv04_devinit *init = (void *)object;
440

441
	/* restore vga owner saved at first init */
442
	nvkm_wrvgaowner(init->base.subdev.device, init->owner);
443

444
	nvkm_devinit_destroy(&init->base);
445
}
446

447
int
448 449 450
nv04_devinit_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
		  struct nvkm_oclass *oclass, void *data, u32 size,
		  struct nvkm_object **pobject)
451
{
452
	struct nv04_devinit *init;
453 454
	int ret;

455 456
	ret = nvkm_devinit_create(parent, engine, oclass, &init);
	*pobject = nv_object(init);
457 458 459
	if (ret)
		return ret;

460
	init->owner = -1;
461
	return 0;
462 463
}

464 465
struct nvkm_oclass *
nv04_devinit_oclass = &(struct nvkm_devinit_impl) {
466
	.base.handle = NV_SUBDEV(DEVINIT, 0x04),
467
	.base.ofuncs = &(struct nvkm_ofuncs) {
468 469 470 471 472
		.ctor = nv04_devinit_ctor,
		.dtor = nv04_devinit_dtor,
		.init = nv04_devinit_init,
		.fini = nv04_devinit_fini,
	},
473 474
	.meminit = nv04_devinit_meminit,
	.pll_set = nv04_devinit_pll_set,
475
	.post = nvbios_init,
476
}.base;