radeon_device.c 26.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright 2008 Advanced Micro Devices, Inc.
 * Copyright 2008 Red Hat Inc.
 * Copyright 2009 Jerome Glisse.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Dave Airlie
 *          Alex Deucher
 *          Jerome Glisse
 */
#include <linux/console.h>
29
#include <linux/slab.h>
30 31 32
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h>
33
#include <linux/vgaarb.h>
34
#include <linux/vga_switcheroo.h>
35 36 37 38
#include "radeon_reg.h"
#include "radeon.h"
#include "atom.h"

39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
static const char radeon_family_name[][16] = {
	"R100",
	"RV100",
	"RS100",
	"RV200",
	"RS200",
	"R200",
	"RV250",
	"RS300",
	"RV280",
	"R300",
	"R350",
	"RV350",
	"RV380",
	"R420",
	"R423",
	"RV410",
	"RS400",
	"RS480",
	"RS600",
	"RS690",
	"RS740",
	"RV515",
	"R520",
	"RV530",
	"RV560",
	"RV570",
	"R580",
	"R600",
	"RV610",
	"RV630",
	"RV670",
	"RV620",
	"RV635",
	"RS780",
	"RS880",
	"RV770",
	"RV730",
	"RV710",
	"RV740",
	"CEDAR",
	"REDWOOD",
	"JUNIPER",
	"CYPRESS",
	"HEMLOCK",
84
	"PALM",
85 86 87
	"BARTS",
	"TURKS",
	"CAICOS",
88 89 90
	"LAST",
};

91 92 93
/*
 * Clear GPU surface registers.
 */
94
void radeon_surface_init(struct radeon_device *rdev)
95 96 97 98 99
{
	/* FIXME: check this out */
	if (rdev->family < CHIP_R600) {
		int i;

100 101 102 103 104
		for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
			if (rdev->surface_regs[i].bo)
				radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
			else
				radeon_clear_surface_reg(rdev, i);
105
		}
106 107
		/* enable surfaces */
		WREG32(RADEON_SURFACE_CNTL, 0);
108 109 110
	}
}

111 112 113
/*
 * GPU scratch registers helpers function.
 */
114
void radeon_scratch_init(struct radeon_device *rdev)
115 116 117 118 119 120 121 122 123
{
	int i;

	/* FIXME: check this out */
	if (rdev->family < CHIP_R300) {
		rdev->scratch.num_reg = 5;
	} else {
		rdev->scratch.num_reg = 7;
	}
124
	rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
125 126
	for (i = 0; i < rdev->scratch.num_reg; i++) {
		rdev->scratch.free[i] = true;
127
		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156
	}
}

int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
{
	int i;

	for (i = 0; i < rdev->scratch.num_reg; i++) {
		if (rdev->scratch.free[i]) {
			rdev->scratch.free[i] = false;
			*reg = rdev->scratch.reg[i];
			return 0;
		}
	}
	return -EINVAL;
}

void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
{
	int i;

	for (i = 0; i < rdev->scratch.num_reg; i++) {
		if (rdev->scratch.reg[i] == reg) {
			rdev->scratch.free[i] = true;
			return;
		}
	}
}

157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186
void radeon_wb_disable(struct radeon_device *rdev)
{
	int r;

	if (rdev->wb.wb_obj) {
		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
		if (unlikely(r != 0))
			return;
		radeon_bo_kunmap(rdev->wb.wb_obj);
		radeon_bo_unpin(rdev->wb.wb_obj);
		radeon_bo_unreserve(rdev->wb.wb_obj);
	}
	rdev->wb.enabled = false;
}

void radeon_wb_fini(struct radeon_device *rdev)
{
	radeon_wb_disable(rdev);
	if (rdev->wb.wb_obj) {
		radeon_bo_unref(&rdev->wb.wb_obj);
		rdev->wb.wb = NULL;
		rdev->wb.wb_obj = NULL;
	}
}

int radeon_wb_init(struct radeon_device *rdev)
{
	int r;

	if (rdev->wb.wb_obj == NULL) {
187
		r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214
				RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
		if (r) {
			dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
			return r;
		}
	}
	r = radeon_bo_reserve(rdev->wb.wb_obj, false);
	if (unlikely(r != 0)) {
		radeon_wb_fini(rdev);
		return r;
	}
	r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
			  &rdev->wb.gpu_addr);
	if (r) {
		radeon_bo_unreserve(rdev->wb.wb_obj);
		dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
		radeon_wb_fini(rdev);
		return r;
	}
	r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
	radeon_bo_unreserve(rdev->wb.wb_obj);
	if (r) {
		dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
		radeon_wb_fini(rdev);
		return r;
	}

215 216
	/* disable event_write fences */
	rdev->wb.use_event = false;
217 218 219 220 221 222 223
	/* disabled via module param */
	if (radeon_no_wb == 1)
		rdev->wb.enabled = false;
	else {
		/* often unreliable on AGP */
		if (rdev->flags & RADEON_IS_AGP) {
			rdev->wb.enabled = false;
224
		} else {
225
			rdev->wb.enabled = true;
226 227 228 229
			/* event_write fences are only available on r600+ */
			if (rdev->family >= CHIP_R600)
				rdev->wb.use_event = true;
		}
230
	}
231 232 233 234 235
	/* always use writeback/events on NI */
	if (ASIC_IS_DCE5(rdev)) {
		rdev->wb.enabled = true;
		rdev->wb.use_event = true;
	}
236 237 238 239 240 241

	dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");

	return 0;
}

242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281
/**
 * radeon_vram_location - try to find VRAM location
 * @rdev: radeon device structure holding all necessary informations
 * @mc: memory controller structure holding memory informations
 * @base: base address at which to put VRAM
 *
 * Function will place try to place VRAM at base address provided
 * as parameter (which is so far either PCI aperture address or
 * for IGP TOM base address).
 *
 * If there is not enough space to fit the unvisible VRAM in the 32bits
 * address space then we limit the VRAM size to the aperture.
 *
 * If we are using AGP and if the AGP aperture doesn't allow us to have
 * room for all the VRAM than we restrict the VRAM to the PCI aperture
 * size and print a warning.
 *
 * This function will never fails, worst case are limiting VRAM.
 *
 * Note: GTT start, end, size should be initialized before calling this
 * function on AGP platform.
 *
 * Note: We don't explictly enforce VRAM start to be aligned on VRAM size,
 * this shouldn't be a problem as we are using the PCI aperture as a reference.
 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
 * not IGP.
 *
 * Note: we use mc_vram_size as on some board we need to program the mc to
 * cover the whole aperture even if VRAM size is inferior to aperture size
 * Novell bug 204882 + along with lots of ubuntu ones
 *
 * Note: when limiting vram it's safe to overwritte real_vram_size because
 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
 * ones)
 *
 * Note: IGP TOM addr should be the same as the aperture addr, we don't
 * explicitly check for that thought.
 *
 * FIXME: when reducing VRAM size align new size on power of 2.
282
 */
283
void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
284
{
285 286 287 288 289 290 291
	mc->vram_start = base;
	if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) {
		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
		mc->real_vram_size = mc->aper_size;
		mc->mc_vram_size = mc->aper_size;
	}
	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
292
	if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
293 294 295 296 297
		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
		mc->real_vram_size = mc->aper_size;
		mc->mc_vram_size = mc->aper_size;
	}
	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
298
	dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
299 300 301
			mc->mc_vram_size >> 20, mc->vram_start,
			mc->vram_end, mc->real_vram_size >> 20);
}
302

303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
/**
 * radeon_gtt_location - try to find GTT location
 * @rdev: radeon device structure holding all necessary informations
 * @mc: memory controller structure holding memory informations
 *
 * Function will place try to place GTT before or after VRAM.
 *
 * If GTT size is bigger than space left then we ajust GTT size.
 * Thus function will never fails.
 *
 * FIXME: when reducing GTT size align new size on power of 2.
 */
void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
{
	u64 size_af, size_bf;

319 320
	size_af = ((0xFFFFFFFF - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
	size_bf = mc->vram_start & ~mc->gtt_base_align;
321 322 323 324
	if (size_bf > size_af) {
		if (mc->gtt_size > size_bf) {
			dev_warn(rdev->dev, "limiting GTT\n");
			mc->gtt_size = size_bf;
325
		}
326
		mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
327
	} else {
328 329 330 331
		if (mc->gtt_size > size_af) {
			dev_warn(rdev->dev, "limiting GTT\n");
			mc->gtt_size = size_af;
		}
332
		mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
333
	}
334
	mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
335
	dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
336
			mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
337 338 339 340 341
}

/*
 * GPU helpers function.
 */
342
bool radeon_card_posted(struct radeon_device *rdev)
343 344 345 346
{
	uint32_t reg;

	/* first check CRTCs */
347 348 349 350 351 352
	if (ASIC_IS_DCE41(rdev)) {
		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
		if (reg & EVERGREEN_CRTC_MASTER_EN)
			return true;
	} else if (ASIC_IS_DCE4(rdev)) {
353 354 355 356 357 358 359 360 361
		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
		if (reg & EVERGREEN_CRTC_MASTER_EN)
			return true;
	} else if (ASIC_IS_AVIVO(rdev)) {
362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387
		reg = RREG32(AVIVO_D1CRTC_CONTROL) |
		      RREG32(AVIVO_D2CRTC_CONTROL);
		if (reg & AVIVO_CRTC_EN) {
			return true;
		}
	} else {
		reg = RREG32(RADEON_CRTC_GEN_CNTL) |
		      RREG32(RADEON_CRTC2_GEN_CNTL);
		if (reg & RADEON_CRTC_EN) {
			return true;
		}
	}

	/* then check MEM_SIZE, in case the crtcs are off */
	if (rdev->family >= CHIP_R600)
		reg = RREG32(R600_CONFIG_MEMSIZE);
	else
		reg = RREG32(RADEON_CONFIG_MEMSIZE);

	if (reg)
		return true;

	return false;

}

388 389 390
void radeon_update_bandwidth_info(struct radeon_device *rdev)
{
	fixed20_12 a;
391 392
	u32 sclk = rdev->pm.current_sclk;
	u32 mclk = rdev->pm.current_mclk;
393

394 395 396 397 398 399
	/* sclk/mclk in Mhz */
	a.full = dfixed_const(100);
	rdev->pm.sclk.full = dfixed_const(sclk);
	rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
	rdev->pm.mclk.full = dfixed_const(mclk);
	rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
400

401
	if (rdev->flags & RADEON_IS_IGP) {
402
		a.full = dfixed_const(16);
403
		/* core_bandwidth = sclk(Mhz) * 16 */
404
		rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
405 406 407
	}
}

408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425
bool radeon_boot_test_post_card(struct radeon_device *rdev)
{
	if (radeon_card_posted(rdev))
		return true;

	if (rdev->bios) {
		DRM_INFO("GPU not posted. posting now...\n");
		if (rdev->is_atom_bios)
			atom_asic_init(rdev->mode_info.atom_context);
		else
			radeon_combios_asic_init(rdev->ddev);
		return true;
	} else {
		dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
		return false;
	}
}

426 427
int radeon_dummy_page_init(struct radeon_device *rdev)
{
428 429
	if (rdev->dummy_page.page)
		return 0;
430 431 432 433 434
	rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
	if (rdev->dummy_page.page == NULL)
		return -ENOMEM;
	rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
					0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
435 436
	if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
		dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
		__free_page(rdev->dummy_page.page);
		rdev->dummy_page.page = NULL;
		return -ENOMEM;
	}
	return 0;
}

void radeon_dummy_page_fini(struct radeon_device *rdev)
{
	if (rdev->dummy_page.page == NULL)
		return;
	pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
			PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
	__free_page(rdev->dummy_page.page);
	rdev->dummy_page.page = NULL;
}

454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503

/* ATOM accessor methods */
static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
{
	struct radeon_device *rdev = info->dev->dev_private;
	uint32_t r;

	r = rdev->pll_rreg(rdev, reg);
	return r;
}

static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
{
	struct radeon_device *rdev = info->dev->dev_private;

	rdev->pll_wreg(rdev, reg, val);
}

static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
{
	struct radeon_device *rdev = info->dev->dev_private;
	uint32_t r;

	r = rdev->mc_rreg(rdev, reg);
	return r;
}

static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
{
	struct radeon_device *rdev = info->dev->dev_private;

	rdev->mc_wreg(rdev, reg, val);
}

static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
{
	struct radeon_device *rdev = info->dev->dev_private;

	WREG32(reg*4, val);
}

static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
{
	struct radeon_device *rdev = info->dev->dev_private;
	uint32_t r;

	r = RREG32(reg*4);
	return r;
}

504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519
static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
{
	struct radeon_device *rdev = info->dev->dev_private;

	WREG32_IO(reg*4, val);
}

static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
{
	struct radeon_device *rdev = info->dev->dev_private;
	uint32_t r;

	r = RREG32_IO(reg*4);
	return r;
}

520 521
int radeon_atombios_init(struct radeon_device *rdev)
{
522 523 524 525 526 527 528 529 530 531
	struct card_info *atom_card_info =
	    kzalloc(sizeof(struct card_info), GFP_KERNEL);

	if (!atom_card_info)
		return -ENOMEM;

	rdev->mode_info.atom_card_info = atom_card_info;
	atom_card_info->dev = rdev->ddev;
	atom_card_info->reg_read = cail_reg_read;
	atom_card_info->reg_write = cail_reg_write;
532 533 534 535 536 537 538 539 540
	/* needed for iio ops */
	if (rdev->rio_mem) {
		atom_card_info->ioreg_read = cail_ioreg_read;
		atom_card_info->ioreg_write = cail_ioreg_write;
	} else {
		DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
		atom_card_info->ioreg_read = cail_reg_read;
		atom_card_info->ioreg_write = cail_reg_write;
	}
541 542 543 544 545 546
	atom_card_info->mc_read = cail_mc_read;
	atom_card_info->mc_write = cail_mc_write;
	atom_card_info->pll_read = cail_pll_read;
	atom_card_info->pll_write = cail_pll_write;

	rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
547
	mutex_init(&rdev->mode_info.atom_context->mutex);
548
	radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
549
	atom_allocate_fb_scratch(rdev->mode_info.atom_context);
550 551 552 553 554
	return 0;
}

void radeon_atombios_fini(struct radeon_device *rdev)
{
555 556 557 558
	if (rdev->mode_info.atom_context) {
		kfree(rdev->mode_info.atom_context->scratch);
		kfree(rdev->mode_info.atom_context);
	}
559
	kfree(rdev->mode_info.atom_card_info);
560 561 562 563 564 565 566 567 568 569 570 571
}

int radeon_combios_init(struct radeon_device *rdev)
{
	radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
	return 0;
}

void radeon_combios_fini(struct radeon_device *rdev)
{
}

572 573 574 575 576 577 578 579 580 581 582
/* if we get transitioned to only one device, tak VGA back */
static unsigned int radeon_vga_set_decode(void *cookie, bool state)
{
	struct radeon_device *rdev = cookie;
	radeon_vga_set_state(rdev, state);
	if (state)
		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
	else
		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}
583

584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649
void radeon_check_arguments(struct radeon_device *rdev)
{
	/* vramlimit must be a power of two */
	switch (radeon_vram_limit) {
	case 0:
	case 4:
	case 8:
	case 16:
	case 32:
	case 64:
	case 128:
	case 256:
	case 512:
	case 1024:
	case 2048:
	case 4096:
		break;
	default:
		dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
				radeon_vram_limit);
		radeon_vram_limit = 0;
		break;
	}
	radeon_vram_limit = radeon_vram_limit << 20;
	/* gtt size must be power of two and greater or equal to 32M */
	switch (radeon_gart_size) {
	case 4:
	case 8:
	case 16:
		dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
				radeon_gart_size);
		radeon_gart_size = 512;
		break;
	case 32:
	case 64:
	case 128:
	case 256:
	case 512:
	case 1024:
	case 2048:
	case 4096:
		break;
	default:
		dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
				radeon_gart_size);
		radeon_gart_size = 512;
		break;
	}
	rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
	/* AGP mode can only be -1, 1, 2, 4, 8 */
	switch (radeon_agpmode) {
	case -1:
	case 0:
	case 1:
	case 2:
	case 4:
	case 8:
		break;
	default:
		dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
				"-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
		radeon_agpmode = 0;
		break;
	}
}

650 651 652 653 654 655 656
static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
{
	struct drm_device *dev = pci_get_drvdata(pdev);
	pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
	if (state == VGA_SWITCHEROO_ON) {
		printk(KERN_INFO "radeon: switched on\n");
		/* don't suspend or resume card normally */
657
		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
658
		radeon_resume_kms(dev);
659
		dev->switch_power_state = DRM_SWITCH_POWER_ON;
660
		drm_kms_helper_poll_enable(dev);
661 662
	} else {
		printk(KERN_INFO "radeon: switched off\n");
663
		drm_kms_helper_poll_disable(dev);
664
		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
665
		radeon_suspend_kms(dev, pmm);
666
		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
667 668 669 670 671 672 673 674 675 676 677 678 679 680 681
	}
}

static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
{
	struct drm_device *dev = pci_get_drvdata(pdev);
	bool can_switch;

	spin_lock(&dev->count_lock);
	can_switch = (dev->open_count == 0);
	spin_unlock(&dev->count_lock);
	return can_switch;
}


682 683 684 685 686
int radeon_device_init(struct radeon_device *rdev,
		       struct drm_device *ddev,
		       struct pci_dev *pdev,
		       uint32_t flags)
{
687
	int r, i;
D
Dave Airlie 已提交
688
	int dma_bits;
689 690

	rdev->shutdown = false;
691
	rdev->dev = &pdev->dev;
692 693 694 695 696 697 698 699
	rdev->ddev = ddev;
	rdev->pdev = pdev;
	rdev->flags = flags;
	rdev->family = flags & RADEON_FAMILY_MASK;
	rdev->is_atom_bios = false;
	rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
	rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
	rdev->gpu_lockup = false;
700
	rdev->accel_working = false;
701 702 703 704

	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X).\n",
		radeon_family_name[rdev->family], pdev->vendor, pdev->device);

705 706 707 708 709
	/* mutex initialization are all done here so we
	 * can recall function without having locking issues */
	mutex_init(&rdev->cs_mutex);
	mutex_init(&rdev->ib_pool.mutex);
	mutex_init(&rdev->cp.mutex);
710
	mutex_init(&rdev->dc_hw_i2c_mutex);
711 712
	if (rdev->family >= CHIP_R600)
		spin_lock_init(&rdev->ih.lock);
713
	mutex_init(&rdev->gem.mutex);
714
	mutex_init(&rdev->pm.mutex);
715
	mutex_init(&rdev->vram_mutex);
716
	rwlock_init(&rdev->fence_drv.lock);
717
	INIT_LIST_HEAD(&rdev->gem.objects);
718
	init_waitqueue_head(&rdev->irq.vblank_queue);
719
	init_waitqueue_head(&rdev->irq.idle_queue);
720

721 722
	/* Set asic functions */
	r = radeon_asic_init(rdev);
723
	if (r)
724
		return r;
725
	radeon_check_arguments(rdev);
726

727 728 729 730 731 732 733 734
	/* all of the newer IGP chips have an internal gart
	 * However some rs4xx report as AGP, so remove that here.
	 */
	if ((rdev->family >= CHIP_RS400) &&
	    (rdev->flags & RADEON_IS_IGP)) {
		rdev->flags &= ~RADEON_IS_AGP;
	}

735
	if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
736
		radeon_agp_disable(rdev);
737 738
	}

D
Dave Airlie 已提交
739 740 741 742 743 744 745 746 747 748 749 750 751 752
	/* set DMA mask + need_dma32 flags.
	 * PCIE - can handle 40-bits.
	 * IGP - can handle 40-bits (in theory)
	 * AGP - generally dma32 is safest
	 * PCI - only dma32
	 */
	rdev->need_dma32 = false;
	if (rdev->flags & RADEON_IS_AGP)
		rdev->need_dma32 = true;
	if (rdev->flags & RADEON_IS_PCI)
		rdev->need_dma32 = true;

	dma_bits = rdev->need_dma32 ? 32 : 40;
	r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
753 754 755 756 757 758
	if (r) {
		printk(KERN_WARNING "radeon: No suitable DMA available.\n");
	}

	/* Registers mapping */
	/* TODO: block userspace mapping of io register */
759 760
	rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
	rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
761 762 763 764 765 766 767
	rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
	if (rdev->rmmio == NULL) {
		return -ENOMEM;
	}
	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
	DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);

768 769 770 771 772 773 774 775 776 777 778
	/* io port mapping */
	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
		if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
			rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
			rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
			break;
		}
	}
	if (rdev->rio_mem == NULL)
		DRM_ERROR("Unable to find PCI I/O BAR\n");

779
	/* if we have > 1 VGA cards, then disable the radeon VGA resources */
780 781 782
	/* this will fail for cards that aren't VGA class devices, just
	 * ignore it */
	vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
783 784
	vga_switcheroo_register_client(rdev->pdev,
				       radeon_switcheroo_set_state,
785
				       NULL,
786
				       radeon_switcheroo_can_switch);
787

788
	r = radeon_init(rdev);
789
	if (r)
790 791
		return r;

792 793 794 795
	if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
		/* Acceleration not working on AGP card try again
		 * with fallback to PCI or PCIE GART
		 */
796
		radeon_asic_reset(rdev);
797 798 799
		radeon_fini(rdev);
		radeon_agp_disable(rdev);
		r = radeon_init(rdev);
800 801
		if (r)
			return r;
802
	}
803 804 805
	if (radeon_testing) {
		radeon_test_moves(rdev);
	}
806 807 808
	if (radeon_benchmarking) {
		radeon_benchmark(rdev);
	}
809
	return 0;
810 811 812 813 814 815
}

void radeon_device_fini(struct radeon_device *rdev)
{
	DRM_INFO("radeon: finishing device.\n");
	rdev->shutdown = true;
816 817
	/* evict vram memory */
	radeon_bo_evict_vram(rdev);
818
	radeon_fini(rdev);
819
	vga_switcheroo_unregister_client(rdev->pdev);
820
	vga_client_register(rdev->pdev, NULL, NULL, NULL);
821 822
	if (rdev->rio_mem)
		pci_iounmap(rdev->pdev, rdev->rio_mem);
823
	rdev->rio_mem = NULL;
824 825 826 827 828 829 830 831 832 833
	iounmap(rdev->rmmio);
	rdev->rmmio = NULL;
}


/*
 * Suspend & resume.
 */
int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
{
834
	struct radeon_device *rdev;
835
	struct drm_crtc *crtc;
836
	struct drm_connector *connector;
837
	int r;
838

839
	if (dev == NULL || dev->dev_private == NULL) {
840 841 842 843 844
		return -ENODEV;
	}
	if (state.event == PM_EVENT_PRETHAW) {
		return 0;
	}
845 846
	rdev = dev->dev_private;

847
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
848
		return 0;
849 850 851 852 853 854

	/* turn off display hw */
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
		drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
	}

855 856 857
	/* unpin the front buffers */
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
		struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
858
		struct radeon_bo *robj;
859 860 861 862 863

		if (rfb == NULL || rfb->obj == NULL) {
			continue;
		}
		robj = rfb->obj->driver_private;
864 865
		/* don't unpin kernel fb objects */
		if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
866
			r = radeon_bo_reserve(robj, false);
867
			if (r == 0) {
868 869 870
				radeon_bo_unpin(robj);
				radeon_bo_unreserve(robj);
			}
871 872 873
		}
	}
	/* evict vram memory */
874
	radeon_bo_evict_vram(rdev);
875 876 877
	/* wait for gpu to finish processing current batch */
	radeon_fence_wait_last(rdev);

878 879
	radeon_save_bios_scratch_regs(rdev);

880
	radeon_pm_suspend(rdev);
881
	radeon_suspend(rdev);
A
Alex Deucher 已提交
882
	radeon_hpd_fini(rdev);
883
	/* evict remaining vram memory */
884
	radeon_bo_evict_vram(rdev);
885

886 887
	radeon_agp_suspend(rdev);

888 889 890 891 892 893
	pci_save_state(dev->pdev);
	if (state.event == PM_EVENT_SUSPEND) {
		/* Shut down the device */
		pci_disable_device(dev->pdev);
		pci_set_power_state(dev->pdev, PCI_D3hot);
	}
894
	console_lock();
895
	radeon_fbdev_set_suspend(rdev, 1);
896
	console_unlock();
897 898 899 900 901
	return 0;
}

int radeon_resume_kms(struct drm_device *dev)
{
902
	struct drm_connector *connector;
903 904
	struct radeon_device *rdev = dev->dev_private;

905
	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
906 907
		return 0;

908
	console_lock();
909 910 911
	pci_set_power_state(dev->pdev, PCI_D0);
	pci_restore_state(dev->pdev);
	if (pci_enable_device(dev->pdev)) {
912
		console_unlock();
913 914 915
		return -1;
	}
	pci_set_master(dev->pdev);
916 917
	/* resume AGP if in use */
	radeon_agp_resume(rdev);
918
	radeon_resume(rdev);
919
	radeon_pm_resume(rdev);
920
	radeon_restore_bios_scratch_regs(rdev);
921

922
	radeon_fbdev_set_suspend(rdev, 0);
923
	console_unlock();
924

A
Alex Deucher 已提交
925 926
	/* reset hpd state */
	radeon_hpd_init(rdev);
927 928
	/* blat the mode back in */
	drm_helper_resume_force_mode(dev);
929 930 931 932
	/* turn on display hw */
	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
		drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
	}
933 934 935
	return 0;
}

936 937 938
int radeon_gpu_reset(struct radeon_device *rdev)
{
	int r;
939
	int resched;
940 941

	radeon_save_bios_scratch_regs(rdev);
942 943
	/* block TTM */
	resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
944 945 946 947 948 949 950 951
	radeon_suspend(rdev);

	r = radeon_asic_reset(rdev);
	if (!r) {
		dev_info(rdev->dev, "GPU reset succeed\n");
		radeon_resume(rdev);
		radeon_restore_bios_scratch_regs(rdev);
		drm_helper_resume_force_mode(rdev->ddev);
952
		ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
953 954 955 956 957 958 959
		return 0;
	}
	/* bad news, how to tell it to userspace ? */
	dev_info(rdev->dev, "GPU reset failed\n");
	return r;
}

960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017

/*
 * Debugfs
 */
struct radeon_debugfs {
	struct drm_info_list	*files;
	unsigned		num_files;
};
static struct radeon_debugfs _radeon_debugfs[RADEON_DEBUGFS_MAX_NUM_FILES];
static unsigned _radeon_debugfs_count = 0;

int radeon_debugfs_add_files(struct radeon_device *rdev,
			     struct drm_info_list *files,
			     unsigned nfiles)
{
	unsigned i;

	for (i = 0; i < _radeon_debugfs_count; i++) {
		if (_radeon_debugfs[i].files == files) {
			/* Already registered */
			return 0;
		}
	}
	if ((_radeon_debugfs_count + nfiles) > RADEON_DEBUGFS_MAX_NUM_FILES) {
		DRM_ERROR("Reached maximum number of debugfs files.\n");
		DRM_ERROR("Report so we increase RADEON_DEBUGFS_MAX_NUM_FILES.\n");
		return -EINVAL;
	}
	_radeon_debugfs[_radeon_debugfs_count].files = files;
	_radeon_debugfs[_radeon_debugfs_count].num_files = nfiles;
	_radeon_debugfs_count++;
#if defined(CONFIG_DEBUG_FS)
	drm_debugfs_create_files(files, nfiles,
				 rdev->ddev->control->debugfs_root,
				 rdev->ddev->control);
	drm_debugfs_create_files(files, nfiles,
				 rdev->ddev->primary->debugfs_root,
				 rdev->ddev->primary);
#endif
	return 0;
}

#if defined(CONFIG_DEBUG_FS)
int radeon_debugfs_init(struct drm_minor *minor)
{
	return 0;
}

void radeon_debugfs_cleanup(struct drm_minor *minor)
{
	unsigned i;

	for (i = 0; i < _radeon_debugfs_count; i++) {
		drm_debugfs_remove_files(_radeon_debugfs[i].files,
					 _radeon_debugfs[i].num_files, minor);
	}
}
#endif