radeon.h 55.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44
/*
 * Copyright 2008 Advanced Micro Devices, Inc.
 * Copyright 2008 Red Hat Inc.
 * Copyright 2009 Jerome Glisse.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Dave Airlie
 *          Alex Deucher
 *          Jerome Glisse
 */
#ifndef __RADEON_H__
#define __RADEON_H__

/* TODO: Here are things that needs to be done :
 *	- surface allocator & initializer : (bit like scratch reg) should
 *	  initialize HDP_ stuff on RS600, R600, R700 hw, well anythings
 *	  related to surface
 *	- WB : write back stuff (do it bit like scratch reg things)
 *	- Vblank : look at Jesse's rework and what we should do
 *	- r600/r700: gart & cp
 *	- cs : clean cs ioctl use bitmap & things like that.
 *	- power management stuff
 *	- Barrier in gart code
 *	- Unmappabled vram ?
 *	- TESTING, TESTING, TESTING
 */

45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
/* Initialization path:
 *  We expect that acceleration initialization might fail for various
 *  reasons even thought we work hard to make it works on most
 *  configurations. In order to still have a working userspace in such
 *  situation the init path must succeed up to the memory controller
 *  initialization point. Failure before this point are considered as
 *  fatal error. Here is the init callchain :
 *      radeon_device_init  perform common structure, mutex initialization
 *      asic_init           setup the GPU memory layout and perform all
 *                          one time initialization (failure in this
 *                          function are considered fatal)
 *      asic_startup        setup the GPU acceleration, in order to
 *                          follow guideline the first thing this
 *                          function should do is setting the GPU
 *                          memory controller (only MC setup failure
 *                          are considered as fatal)
 */

A
Arun Sharma 已提交
63
#include <linux/atomic.h>
64 65 66 67
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/kref.h>

68 69 70 71
#include <ttm/ttm_bo_api.h>
#include <ttm/ttm_bo_driver.h>
#include <ttm/ttm_placement.h>
#include <ttm/ttm_module.h>
72
#include <ttm/ttm_execbuf_util.h>
73

74
#include "radeon_family.h"
75 76 77 78 79 80 81 82 83 84 85 86 87 88
#include "radeon_mode.h"
#include "radeon_reg.h"

/*
 * Modules parameters.
 */
extern int radeon_no_wb;
extern int radeon_modeset;
extern int radeon_dynclks;
extern int radeon_r4xx_atom;
extern int radeon_agpmode;
extern int radeon_vram_limit;
extern int radeon_gart_size;
extern int radeon_benchmarking;
89
extern int radeon_testing;
90
extern int radeon_connector_table;
91
extern int radeon_tv;
92
extern int radeon_audio;
93
extern int radeon_disp_priority;
94
extern int radeon_hw_i2c;
95
extern int radeon_pcie_gen2;
96
extern int radeon_msi;
97 98 99 100 101 102

/*
 * Copy from radeon_drv.h so we don't have to include both and have conflicting
 * symbol;
 */
#define RADEON_MAX_USEC_TIMEOUT		100000	/* 100 ms */
103
#define RADEON_FENCE_JIFFIES_TIMEOUT	(HZ / 2)
104
/* RADEON_IB_POOL_SIZE must be a power of 2 */
105
#define RADEON_IB_POOL_SIZE		16
106
#define RADEON_DEBUGFS_MAX_COMPONENTS	32
107
#define RADEONFB_CONN_LIMIT		4
108
#define RADEON_BIOS_NUM_SCRATCH		8
109

110 111 112 113 114 115 116 117 118 119 120
/* max number of rings */
#define RADEON_NUM_RINGS 3

/* internal ring indices */
/* r1xx+ has gfx CP ring */
#define RADEON_RING_TYPE_GFX_INDEX  0

/* cayman has 2 compute CP rings */
#define CAYMAN_RING_TYPE_CP1_INDEX 1
#define CAYMAN_RING_TYPE_CP2_INDEX 2

121 122 123 124
/* hardcode those limit for now */
#define RADEON_VA_RESERVED_SIZE		(8 << 20)
#define RADEON_IB_VM_MAX_SIZE		(64 << 10)

125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
/*
 * Errata workarounds.
 */
enum radeon_pll_errata {
	CHIP_ERRATA_R300_CG             = 0x00000001,
	CHIP_ERRATA_PLL_DUMMYREADS      = 0x00000002,
	CHIP_ERRATA_PLL_DELAY           = 0x00000004
};


struct radeon_device;


/*
 * BIOS.
 */
141 142
#define ATRM_BIOS_PAGE 4096

143
#if defined(CONFIG_VGA_SWITCHEROO)
144 145
bool radeon_atrm_supported(struct pci_dev *pdev);
int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len);
146 147 148 149 150 151 152 153 154 155
#else
static inline bool radeon_atrm_supported(struct pci_dev *pdev)
{
	return false;
}

static inline int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len){
	return -EINVAL;
}
#endif
156 157
bool radeon_get_bios(struct radeon_device *rdev);

158

159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199
/*
 * Mutex which allows recursive locking from the same process.
 */
struct radeon_mutex {
	struct mutex		mutex;
	struct task_struct	*owner;
	int			level;
};

static inline void radeon_mutex_init(struct radeon_mutex *mutex)
{
	mutex_init(&mutex->mutex);
	mutex->owner = NULL;
	mutex->level = 0;
}

static inline void radeon_mutex_lock(struct radeon_mutex *mutex)
{
	if (mutex_trylock(&mutex->mutex)) {
		/* The mutex was unlocked before, so it's ours now */
		mutex->owner = current;
	} else if (mutex->owner != current) {
		/* Another process locked the mutex, take it */
		mutex_lock(&mutex->mutex);
		mutex->owner = current;
	}
	/* Otherwise the mutex was already locked by this process */

	mutex->level++;
}

static inline void radeon_mutex_unlock(struct radeon_mutex *mutex)
{
	if (--mutex->level > 0)
		return;

	mutex->owner = NULL;
	mutex_unlock(&mutex->mutex);
}


200
/*
201
 * Dummy page
202
 */
203 204 205 206 207 208 209
struct radeon_dummy_page {
	struct page	*page;
	dma_addr_t	addr;
};
int radeon_dummy_page_init(struct radeon_device *rdev);
void radeon_dummy_page_fini(struct radeon_device *rdev);

210

211 212 213
/*
 * Clocks
 */
214 215 216
struct radeon_clock {
	struct radeon_pll p1pll;
	struct radeon_pll p2pll;
217
	struct radeon_pll dcpll;
218 219 220 221 222
	struct radeon_pll spll;
	struct radeon_pll mpll;
	/* 10 Khz units */
	uint32_t default_mclk;
	uint32_t default_sclk;
223 224
	uint32_t default_dispclk;
	uint32_t dp_extclk;
225
	uint32_t max_pixel_clock;
226 227
};

228 229 230 231
/*
 * Power management
 */
int radeon_pm_init(struct radeon_device *rdev);
232
void radeon_pm_fini(struct radeon_device *rdev);
233
void radeon_pm_compute_clocks(struct radeon_device *rdev);
234 235
void radeon_pm_suspend(struct radeon_device *rdev);
void radeon_pm_resume(struct radeon_device *rdev);
236 237
void radeon_combios_get_power_modes(struct radeon_device *rdev);
void radeon_atombios_get_power_modes(struct radeon_device *rdev);
238
void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type);
239
int radeon_atom_get_max_vddc(struct radeon_device *rdev, u16 *voltage);
240
void rs690_pm_info(struct radeon_device *rdev);
241 242 243 244
extern int rv6xx_get_temp(struct radeon_device *rdev);
extern int rv770_get_temp(struct radeon_device *rdev);
extern int evergreen_get_temp(struct radeon_device *rdev);
extern int sumo_get_temp(struct radeon_device *rdev);
245 246 247
extern void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
				    unsigned *bankh, unsigned *mtaspect,
				    unsigned *tile_split);
248

249 250 251 252 253
/*
 * Fences.
 */
struct radeon_fence_driver {
	uint32_t			scratch_reg;
254 255
	uint64_t			gpu_addr;
	volatile uint32_t		*cpu_addr;
256 257
	atomic_t			seq;
	uint32_t			last_seq;
258 259
	unsigned long			last_jiffies;
	unsigned long			last_timeout;
260 261
	wait_queue_head_t		queue;
	struct list_head		created;
262
	struct list_head		emitted;
263
	struct list_head		signaled;
264
	bool				initialized;
265 266 267 268 269 270 271 272
};

struct radeon_fence {
	struct radeon_device		*rdev;
	struct kref			kref;
	struct list_head		list;
	/* protected by radeon_fence.lock */
	uint32_t			seq;
273
	bool				emitted;
274
	bool				signaled;
275 276
	/* RB, DMA, etc. */
	int				ring;
277
	struct radeon_semaphore		*semaphore;
278 279
};

280 281
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
int radeon_fence_driver_init(struct radeon_device *rdev);
282
void radeon_fence_driver_fini(struct radeon_device *rdev);
283
int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
284
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence);
285
void radeon_fence_process(struct radeon_device *rdev, int ring);
286 287
bool radeon_fence_signaled(struct radeon_fence *fence);
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
288 289
int radeon_fence_wait_next(struct radeon_device *rdev, int ring);
int radeon_fence_wait_last(struct radeon_device *rdev, int ring);
290 291
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
void radeon_fence_unref(struct radeon_fence **fence);
292
int radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
293

294 295 296 297
/*
 * Tiling registers
 */
struct radeon_surface_reg {
298
	struct radeon_bo *bo;
299 300 301
};

#define RADEON_GEM_MAX_SURFACES 8
302 303

/*
304
 * TTM.
305
 */
306 307
struct radeon_mman {
	struct ttm_bo_global_ref        bo_global_ref;
308
	struct drm_global_reference	mem_global_ref;
309
	struct ttm_bo_device		bdev;
310 311
	bool				mem_global_referenced;
	bool				initialized;
312 313
};

314 315 316 317 318 319 320 321 322 323 324 325 326 327 328
/* bo virtual address in a specific vm */
struct radeon_bo_va {
	/* bo list is protected by bo being reserved */
	struct list_head		bo_list;
	/* vm list is protected by vm mutex */
	struct list_head		vm_list;
	/* constant after initialization */
	struct radeon_vm		*vm;
	struct radeon_bo		*bo;
	uint64_t			soffset;
	uint64_t			eoffset;
	uint32_t			flags;
	bool				valid;
};

329 330 331 332
struct radeon_bo {
	/* Protected by gem.mutex */
	struct list_head		list;
	/* Protected by tbo.reserved */
333 334
	u32				placements[3];
	struct ttm_placement		placement;
335 336 337 338 339 340 341
	struct ttm_buffer_object	tbo;
	struct ttm_bo_kmap_obj		kmap;
	unsigned			pin_count;
	void				*kptr;
	u32				tiling_flags;
	u32				pitch;
	int				surface_reg;
342 343 344 345
	/* list of all virtual address to which this bo
	 * is associated to
	 */
	struct list_head		va;
346 347
	/* Constant after initialization */
	struct radeon_device		*rdev;
348
	struct drm_gem_object		gem_base;
349
};
350
#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
351

352
struct radeon_bo_list {
353
	struct ttm_validate_buffer tv;
354
	struct radeon_bo	*bo;
355 356 357
	uint64_t		gpu_offset;
	unsigned		rdomain;
	unsigned		wdomain;
358
	u32			tiling_flags;
359 360
};

361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402
/* sub-allocation manager, it has to be protected by another lock.
 * By conception this is an helper for other part of the driver
 * like the indirect buffer or semaphore, which both have their
 * locking.
 *
 * Principe is simple, we keep a list of sub allocation in offset
 * order (first entry has offset == 0, last entry has the highest
 * offset).
 *
 * When allocating new object we first check if there is room at
 * the end total_size - (last_object_offset + last_object_size) >=
 * alloc_size. If so we allocate new object there.
 *
 * When there is not enough room at the end, we start waiting for
 * each sub object until we reach object_offset+object_size >=
 * alloc_size, this object then become the sub object we return.
 *
 * Alignment can't be bigger than page size.
 *
 * Hole are not considered for allocation to keep things simple.
 * Assumption is that there won't be hole (all object on same
 * alignment).
 */
struct radeon_sa_manager {
	struct radeon_bo	*bo;
	struct list_head	sa_bo;
	unsigned		size;
	uint64_t		gpu_addr;
	void			*cpu_ptr;
	uint32_t		domain;
};

struct radeon_sa_bo;

/* sub-allocation buffer */
struct radeon_sa_bo {
	struct list_head		list;
	struct radeon_sa_manager	*manager;
	unsigned			offset;
	unsigned			size;
};

403 404 405 406
/*
 * GEM objects.
 */
struct radeon_gem {
407
	struct mutex		mutex;
408 409 410 411 412 413
	struct list_head	objects;
};

int radeon_gem_init(struct radeon_device *rdev);
void radeon_gem_fini(struct radeon_device *rdev);
int radeon_gem_object_create(struct radeon_device *rdev, int size,
414 415 416
				int alignment, int initial_domain,
				bool discardable, bool kernel,
				struct drm_gem_object **obj);
417 418 419 420
int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
			  uint64_t *gpu_addr);
void radeon_gem_object_unpin(struct drm_gem_object *obj);

421 422 423 424 425 426 427 428 429
int radeon_mode_dumb_create(struct drm_file *file_priv,
			    struct drm_device *dev,
			    struct drm_mode_create_dumb *args);
int radeon_mode_dumb_mmap(struct drm_file *filp,
			  struct drm_device *dev,
			  uint32_t handle, uint64_t *offset_p);
int radeon_mode_dumb_destroy(struct drm_file *file_priv,
			     struct drm_device *dev,
			     uint32_t handle);
430

431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470
/*
 * Semaphores.
 */
struct radeon_ring;

#define	RADEON_SEMAPHORE_BO_SIZE	256

struct radeon_semaphore_driver {
	rwlock_t			lock;
	struct list_head		bo;
};

struct radeon_semaphore_bo;

/* everything here is constant */
struct radeon_semaphore {
	struct list_head		list;
	uint64_t			gpu_addr;
	uint32_t			*cpu_ptr;
	struct radeon_semaphore_bo	*bo;
};

struct radeon_semaphore_bo {
	struct list_head		list;
	struct radeon_ib		*ib;
	struct list_head		free;
	struct radeon_semaphore		semaphores[RADEON_SEMAPHORE_BO_SIZE/8];
	unsigned			nused;
};

void radeon_semaphore_driver_fini(struct radeon_device *rdev);
int radeon_semaphore_create(struct radeon_device *rdev,
			    struct radeon_semaphore **semaphore);
void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
				  struct radeon_semaphore *semaphore);
void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
				struct radeon_semaphore *semaphore);
void radeon_semaphore_free(struct radeon_device *rdev,
			   struct radeon_semaphore *semaphore);

471 472 473 474 475
/*
 * GART structures, functions & helpers
 */
struct radeon_mc;

476
#define RADEON_GPU_PAGE_SIZE 4096
477
#define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
478
#define RADEON_GPU_PAGE_SHIFT 12
479
#define RADEON_GPU_PAGE_ALIGN(a) (((a) + RADEON_GPU_PAGE_MASK) & ~RADEON_GPU_PAGE_MASK)
480

481 482
struct radeon_gart {
	dma_addr_t			table_addr;
483 484
	struct radeon_bo		*robj;
	void				*ptr;
485 486 487 488 489 490 491 492 493 494 495 496
	unsigned			num_gpu_pages;
	unsigned			num_cpu_pages;
	unsigned			table_size;
	struct page			**pages;
	dma_addr_t			*pages_addr;
	bool				ready;
};

int radeon_gart_table_ram_alloc(struct radeon_device *rdev);
void radeon_gart_table_ram_free(struct radeon_device *rdev);
int radeon_gart_table_vram_alloc(struct radeon_device *rdev);
void radeon_gart_table_vram_free(struct radeon_device *rdev);
497 498
int radeon_gart_table_vram_pin(struct radeon_device *rdev);
void radeon_gart_table_vram_unpin(struct radeon_device *rdev);
499 500 501 502 503
int radeon_gart_init(struct radeon_device *rdev);
void radeon_gart_fini(struct radeon_device *rdev);
void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
			int pages);
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
504 505
		     int pages, struct page **pagelist,
		     dma_addr_t *dma_addr);
506
void radeon_gart_restore(struct radeon_device *rdev);
507 508 509 510 511 512 513 514 515


/*
 * GPU MC structures, functions & helpers
 */
struct radeon_mc {
	resource_size_t		aper_size;
	resource_size_t		aper_base;
	resource_size_t		agp_base;
516 517
	/* for some chips with <= 32MB we need to lie
	 * about vram size near mc fb location */
518
	u64			mc_vram_size;
519
	u64			visible_vram_size;
520 521 522 523 524
	u64			gtt_size;
	u64			gtt_start;
	u64			gtt_end;
	u64			vram_start;
	u64			vram_end;
525
	unsigned		vram_width;
526
	u64			real_vram_size;
527 528
	int			vram_mtrr;
	bool			vram_is_ddr;
529
	bool			igp_sideport_enabled;
530
	u64                     gtt_base_align;
531 532
};

533 534
bool radeon_combios_sideport_present(struct radeon_device *rdev);
bool radeon_atombios_sideport_present(struct radeon_device *rdev);
535 536 537 538 539 540

/*
 * GPU scratch registers structures, functions & helpers
 */
struct radeon_scratch {
	unsigned		num_reg;
541
	uint32_t                reg_base;
542 543 544 545 546 547 548 549 550 551 552
	bool			free[32];
	uint32_t		reg[32];
};

int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg);
void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg);


/*
 * IRQS.
 */
553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596

struct radeon_unpin_work {
	struct work_struct work;
	struct radeon_device *rdev;
	int crtc_id;
	struct radeon_fence *fence;
	struct drm_pending_vblank_event *event;
	struct radeon_bo *old_rbo;
	u64 new_crtc_base;
};

struct r500_irq_stat_regs {
	u32 disp_int;
};

struct r600_irq_stat_regs {
	u32 disp_int;
	u32 disp_int_cont;
	u32 disp_int_cont2;
	u32 d1grph_int;
	u32 d2grph_int;
};

struct evergreen_irq_stat_regs {
	u32 disp_int;
	u32 disp_int_cont;
	u32 disp_int_cont2;
	u32 disp_int_cont3;
	u32 disp_int_cont4;
	u32 disp_int_cont5;
	u32 d1grph_int;
	u32 d2grph_int;
	u32 d3grph_int;
	u32 d4grph_int;
	u32 d5grph_int;
	u32 d6grph_int;
};

union radeon_irq_stat_regs {
	struct r500_irq_stat_regs r500;
	struct r600_irq_stat_regs r600;
	struct evergreen_irq_stat_regs evergreen;
};

597 598 599 600
#define RADEON_MAX_HPD_PINS 6
#define RADEON_MAX_CRTCS 6
#define RADEON_MAX_HDMI_BLOCKS 2

601 602
struct radeon_irq {
	bool		installed;
603
	bool		sw_int[RADEON_NUM_RINGS];
604 605
	bool		crtc_vblank_int[RADEON_MAX_CRTCS];
	bool		pflip[RADEON_MAX_CRTCS];
606
	wait_queue_head_t	vblank_queue;
607
	bool            hpd[RADEON_MAX_HPD_PINS];
608 609 610
	bool            gui_idle;
	bool            gui_idle_acked;
	wait_queue_head_t	idle_queue;
611
	bool		hdmi[RADEON_MAX_HDMI_BLOCKS];
612
	spinlock_t sw_lock;
613
	int sw_refcount[RADEON_NUM_RINGS];
614
	union radeon_irq_stat_regs stat_regs;
615 616
	spinlock_t pflip_lock[RADEON_MAX_CRTCS];
	int pflip_refcount[RADEON_MAX_CRTCS];
617 618 619 620
};

int radeon_irq_kms_init(struct radeon_device *rdev);
void radeon_irq_kms_fini(struct radeon_device *rdev);
621 622
void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring);
void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring);
623 624
void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc);
void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
625 626

/*
627
 * CP & rings.
628
 */
629

630
struct radeon_ib {
631
	struct radeon_sa_bo	sa_bo;
632
	unsigned		idx;
633
	uint32_t		length_dw;
634
	uint64_t		gpu_addr;
635
	uint32_t		*ptr;
636
	struct radeon_fence	*fence;
637
	unsigned		vm_id;
638 639
};

640 641 642 643
/*
 * locking -
 * mutex protects scheduled_ibs, ready, alloc_bm
 */
644
struct radeon_ib_pool {
645
	struct radeon_mutex		mutex;
646 647 648 649
	struct radeon_sa_manager	sa_manager;
	struct radeon_ib		ibs[RADEON_IB_POOL_SIZE];
	bool				ready;
	unsigned			head_id;
650 651
};

652
struct radeon_ring {
653
	struct radeon_bo	*ring_obj;
654 655
	volatile uint32_t	*ring;
	unsigned		rptr;
656 657
	unsigned		rptr_offs;
	unsigned		rptr_reg;
658 659
	unsigned		wptr;
	unsigned		wptr_old;
660
	unsigned		wptr_reg;
661 662 663 664 665 666 667 668
	unsigned		ring_size;
	unsigned		ring_free_dw;
	int			count_dw;
	uint64_t		gpu_addr;
	uint32_t		align_mask;
	uint32_t		ptr_mask;
	struct mutex		mutex;
	bool			ready;
669 670 671
	u32			ptr_reg_shift;
	u32			ptr_reg_mask;
	u32			nop;
672 673
};

674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714
/*
 * VM
 */
struct radeon_vm {
	struct list_head		list;
	struct list_head		va;
	int				id;
	unsigned			last_pfn;
	u64				pt_gpu_addr;
	u64				*pt;
	struct radeon_sa_bo		sa_bo;
	struct mutex			mutex;
	/* last fence for cs using this vm */
	struct radeon_fence		*fence;
};

struct radeon_vm_funcs {
	int (*init)(struct radeon_device *rdev);
	void (*fini)(struct radeon_device *rdev);
	/* cs mutex must be lock for schedule_ib */
	int (*bind)(struct radeon_device *rdev, struct radeon_vm *vm, int id);
	void (*unbind)(struct radeon_device *rdev, struct radeon_vm *vm);
	void (*tlb_flush)(struct radeon_device *rdev, struct radeon_vm *vm);
	uint32_t (*page_flags)(struct radeon_device *rdev,
			       struct radeon_vm *vm,
			       uint32_t flags);
	void (*set_page)(struct radeon_device *rdev, struct radeon_vm *vm,
			unsigned pfn, uint64_t addr, uint32_t flags);
};

struct radeon_vm_manager {
	struct list_head		lru_vm;
	uint32_t			use_bitmap;
	struct radeon_sa_manager	sa_manager;
	uint32_t			max_pfn;
	/* fields constant after init */
	const struct radeon_vm_funcs	*funcs;
	/* number of VMIDs */
	unsigned			nvm;
	/* vram base address for page table entry  */
	u64				vram_base_offset;
715 716
	/* is vm enabled? */
	bool				enabled;
717 718 719 720 721 722 723 724 725
};

/*
 * file private structure
 */
struct radeon_fpriv {
	struct radeon_vm		vm;
};

726 727 728 729
/*
 * R6xx+ IH ring
 */
struct r600_ih {
730
	struct radeon_bo	*ring_obj;
731 732
	volatile uint32_t	*ring;
	unsigned		rptr;
733
	unsigned		rptr_offs;
734 735 736 737 738 739 740 741 742
	unsigned		wptr;
	unsigned		wptr_old;
	unsigned		ring_size;
	uint64_t		gpu_addr;
	uint32_t		ptr_mask;
	spinlock_t              lock;
	bool                    enabled;
};

743 744 745 746 747 748 749 750 751 752
struct r600_blit_cp_primitives {
	void (*set_render_target)(struct radeon_device *rdev, int format,
				  int w, int h, u64 gpu_addr);
	void (*cp_set_surface_sync)(struct radeon_device *rdev,
				    u32 sync_type, u32 size,
				    u64 mc_addr);
	void (*set_shaders)(struct radeon_device *rdev);
	void (*set_vtx_resource)(struct radeon_device *rdev, u64 gpu_addr);
	void (*set_tex_resource)(struct radeon_device *rdev,
				 int format, int w, int h, int pitch,
753
				 u64 gpu_addr, u32 size);
754 755 756 757 758 759
	void (*set_scissors)(struct radeon_device *rdev, int x1, int y1,
			     int x2, int y2);
	void (*draw_auto)(struct radeon_device *rdev);
	void (*set_default_state)(struct radeon_device *rdev);
};

760
struct r600_blit {
761
	struct mutex		mutex;
762
	struct radeon_bo	*shader_obj;
763 764 765 766
	struct r600_blit_cp_primitives primitives;
	int max_dim;
	int ring_size_common;
	int ring_size_per_loop;
767 768 769 770 771 772 773 774
	u64 shader_gpu_addr;
	u32 vs_offset, ps_offset;
	u32 state_offset;
	u32 state_len;
	u32 vb_used, vb_total;
	struct radeon_ib *vb_ib;
};

775 776
void r600_blit_suspend(struct radeon_device *rdev);

777 778
int radeon_ib_get(struct radeon_device *rdev, int ring,
		  struct radeon_ib **ib, unsigned size);
779
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib);
780
bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib);
781 782 783
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
int radeon_ib_pool_init(struct radeon_device *rdev);
void radeon_ib_pool_fini(struct radeon_device *rdev);
784 785
int radeon_ib_pool_start(struct radeon_device *rdev);
int radeon_ib_pool_suspend(struct radeon_device *rdev);
786
/* Ring access between begin & end cannot sleep */
787 788 789 790 791 792 793 794 795
int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *cp);
void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);
int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp);
void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp);
void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size,
796 797
		     unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
		     u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop);
798
void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp);
799 800 801 802 803 804 805


/*
 * CS.
 */
struct radeon_cs_reloc {
	struct drm_gem_object		*gobj;
806 807
	struct radeon_bo		*robj;
	struct radeon_bo_list		lobj;
808 809 810 811 812 813 814
	uint32_t			handle;
	uint32_t			flags;
};

struct radeon_cs_chunk {
	uint32_t		chunk_id;
	uint32_t		length_dw;
815 816
	int			kpage_idx[2];
	uint32_t		*kpage[2];
817
	uint32_t		*kdata;
818 819 820
	void __user		*user_ptr;
	int			last_copied_page;
	int			last_page_index;
821 822 823
};

struct radeon_cs_parser {
824
	struct device		*dev;
825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840
	struct radeon_device	*rdev;
	struct drm_file		*filp;
	/* chunks */
	unsigned		nchunks;
	struct radeon_cs_chunk	*chunks;
	uint64_t		*chunks_array;
	/* IB */
	unsigned		idx;
	/* relocations */
	unsigned		nrelocs;
	struct radeon_cs_reloc	*relocs;
	struct radeon_cs_reloc	**relocs_ptr;
	struct list_head	validated;
	/* indices of various chunks */
	int			chunk_ib_idx;
	int			chunk_relocs_idx;
841
	int			chunk_flags_idx;
842 843
	struct radeon_ib	*ib;
	void			*track;
844
	unsigned		family;
845
	int			parser_error;
846 847 848
	u32			cs_flags;
	u32			ring;
	s32			priority;
849 850
};

851 852
extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx);
extern int radeon_cs_finish_pages(struct radeon_cs_parser *p);
853
extern u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx);
854

855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874
struct radeon_cs_packet {
	unsigned	idx;
	unsigned	type;
	unsigned	reg;
	unsigned	opcode;
	int		count;
	unsigned	one_reg_wr;
};

typedef int (*radeon_packet0_check_t)(struct radeon_cs_parser *p,
				      struct radeon_cs_packet *pkt,
				      unsigned idx, unsigned reg);
typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p,
				      struct radeon_cs_packet *pkt);


/*
 * AGP
 */
int radeon_agp_init(struct radeon_device *rdev);
875
void radeon_agp_resume(struct radeon_device *rdev);
876
void radeon_agp_suspend(struct radeon_device *rdev);
877 878 879 880 881 882 883
void radeon_agp_fini(struct radeon_device *rdev);


/*
 * Writeback
 */
struct radeon_wb {
884
	struct radeon_bo	*wb_obj;
885 886
	volatile uint32_t	*wb;
	uint64_t		gpu_addr;
887
	bool                    enabled;
888
	bool                    use_event;
889 890
};

891 892
#define RADEON_WB_SCRATCH_OFFSET 0
#define RADEON_WB_CP_RPTR_OFFSET 1024
893 894
#define RADEON_WB_CP1_RPTR_OFFSET 1280
#define RADEON_WB_CP2_RPTR_OFFSET 1536
895
#define R600_WB_IH_WPTR_OFFSET   2048
896
#define R600_WB_EVENT_OFFSET     3072
897

898 899 900 901 902 903 904 905 906 907 908
/**
 * struct radeon_pm - power management datas
 * @max_bandwidth:      maximum bandwidth the gpu has (MByte/s)
 * @igp_sideport_mclk:  sideport memory clock Mhz (rs690,rs740,rs780,rs880)
 * @igp_system_mclk:    system clock Mhz (rs690,rs740,rs780,rs880)
 * @igp_ht_link_clk:    ht link clock Mhz (rs690,rs740,rs780,rs880)
 * @igp_ht_link_width:  ht link width in bits (rs690,rs740,rs780,rs880)
 * @k8_bandwidth:       k8 bandwidth the gpu has (MByte/s) (IGP)
 * @sideport_bandwidth: sideport bandwidth the gpu has (MByte/s) (IGP)
 * @ht_bandwidth:       ht bandwidth the gpu has (MByte/s) (IGP)
 * @core_bandwidth:     core GPU bandwidth the gpu has (MByte/s) (IGP)
L
Lucas De Marchi 已提交
909
 * @sclk:          	GPU clock Mhz (core bandwidth depends of this clock)
910 911 912
 * @needed_bandwidth:   current bandwidth needs
 *
 * It keeps track of various data needed to take powermanagement decision.
L
Lucas De Marchi 已提交
913
 * Bandwidth need is used to determine minimun clock of the GPU and memory.
914 915 916
 * Equation between gpu/memory clock and available bandwidth is hw dependent
 * (type of memory, bus size, efficiency, ...)
 */
917 918 919 920 921 922 923 924 925 926

enum radeon_pm_method {
	PM_METHOD_PROFILE,
	PM_METHOD_DYNPM,
};

enum radeon_dynpm_state {
	DYNPM_STATE_DISABLED,
	DYNPM_STATE_MINIMUM,
	DYNPM_STATE_PAUSED,
927 928
	DYNPM_STATE_ACTIVE,
	DYNPM_STATE_SUSPENDED,
929
};
930 931 932 933 934 935
enum radeon_dynpm_action {
	DYNPM_ACTION_NONE,
	DYNPM_ACTION_MINIMUM,
	DYNPM_ACTION_DOWNCLOCK,
	DYNPM_ACTION_UPCLOCK,
	DYNPM_ACTION_DEFAULT
936
};
937 938 939 940 941 942 943 944

enum radeon_voltage_type {
	VOLTAGE_NONE = 0,
	VOLTAGE_GPIO,
	VOLTAGE_VDDC,
	VOLTAGE_SW
};

945 946 947 948 949 950 951 952
enum radeon_pm_state_type {
	POWER_STATE_TYPE_DEFAULT,
	POWER_STATE_TYPE_POWERSAVE,
	POWER_STATE_TYPE_BATTERY,
	POWER_STATE_TYPE_BALANCED,
	POWER_STATE_TYPE_PERFORMANCE,
};

953 954 955 956
enum radeon_pm_profile_type {
	PM_PROFILE_DEFAULT,
	PM_PROFILE_AUTO,
	PM_PROFILE_LOW,
957
	PM_PROFILE_MID,
958 959 960 961 962
	PM_PROFILE_HIGH,
};

#define PM_PROFILE_DEFAULT_IDX 0
#define PM_PROFILE_LOW_SH_IDX  1
963 964 965 966 967 968
#define PM_PROFILE_MID_SH_IDX  2
#define PM_PROFILE_HIGH_SH_IDX 3
#define PM_PROFILE_LOW_MH_IDX  4
#define PM_PROFILE_MID_MH_IDX  5
#define PM_PROFILE_HIGH_MH_IDX 6
#define PM_PROFILE_MAX         7
969 970 971 972 973 974

struct radeon_pm_profile {
	int dpms_off_ps_idx;
	int dpms_on_ps_idx;
	int dpms_off_cm_idx;
	int dpms_on_cm_idx;
975 976
};

977 978 979 980 981
enum radeon_int_thermal_type {
	THERMAL_TYPE_NONE,
	THERMAL_TYPE_RV6XX,
	THERMAL_TYPE_RV770,
	THERMAL_TYPE_EVERGREEN,
982
	THERMAL_TYPE_SUMO,
983
	THERMAL_TYPE_NI,
984 985
};

986 987 988 989 990 991 992 993 994 995 996
struct radeon_voltage {
	enum radeon_voltage_type type;
	/* gpio voltage */
	struct radeon_gpio_rec gpio;
	u32 delay; /* delay in usec from voltage drop to sclk change */
	bool active_high; /* voltage drop is active when bit is high */
	/* VDDC voltage */
	u8 vddc_id; /* index into vddc voltage table */
	u8 vddci_id; /* index into vddci voltage table */
	bool vddci_enabled;
	/* r6xx+ sw */
997 998 999
	u16 voltage;
	/* evergreen+ vddci */
	u16 vddci;
1000 1001
};

1002 1003 1004
/* clock mode flags */
#define RADEON_PM_MODE_NO_DISPLAY          (1 << 0)

1005 1006 1007 1008 1009 1010 1011
struct radeon_pm_clock_info {
	/* memory clock */
	u32 mclk;
	/* engine clock */
	u32 sclk;
	/* voltage info */
	struct radeon_voltage voltage;
1012
	/* standardized clock flags */
1013 1014 1015
	u32 flags;
};

1016
/* state flags */
1017
#define RADEON_PM_STATE_SINGLE_DISPLAY_ONLY (1 << 0)
1018

1019
struct radeon_power_state {
1020
	enum radeon_pm_state_type type;
1021
	struct radeon_pm_clock_info *clock_info;
1022 1023 1024
	/* number of valid clock modes in this power state */
	int num_clock_modes;
	struct radeon_pm_clock_info *default_clock_mode;
1025 1026
	/* standardized state flags */
	u32 flags;
A
Alex Deucher 已提交
1027 1028 1029
	u32 misc; /* vbios specific flags */
	u32 misc2; /* vbios specific flags */
	int pcie_lanes; /* pcie lanes */
1030 1031
};

1032 1033 1034 1035 1036
/*
 * Some modes are overclocked by very low value, accept them
 */
#define RADEON_MODE_OVERCLOCK_MARGIN 500 /* 5 MHz */

1037
struct radeon_pm {
1038
	struct mutex		mutex;
1039 1040
	u32			active_crtcs;
	int			active_crtc_count;
1041
	int			req_vblank;
1042
	bool			vblank_sync;
1043
	bool			gui_idle;
1044 1045 1046 1047 1048 1049 1050 1051 1052 1053
	fixed20_12		max_bandwidth;
	fixed20_12		igp_sideport_mclk;
	fixed20_12		igp_system_mclk;
	fixed20_12		igp_ht_link_clk;
	fixed20_12		igp_ht_link_width;
	fixed20_12		k8_bandwidth;
	fixed20_12		sideport_bandwidth;
	fixed20_12		ht_bandwidth;
	fixed20_12		core_bandwidth;
	fixed20_12		sclk;
1054
	fixed20_12		mclk;
1055
	fixed20_12		needed_bandwidth;
1056
	struct radeon_power_state *power_state;
1057 1058
	/* number of valid power states */
	int                     num_power_states;
1059 1060 1061 1062 1063 1064 1065
	int                     current_power_state_index;
	int                     current_clock_mode_index;
	int                     requested_power_state_index;
	int                     requested_clock_mode_index;
	int                     default_power_state_index;
	u32                     current_sclk;
	u32                     current_mclk;
1066 1067
	u16                     current_vddc;
	u16                     current_vddci;
1068 1069
	u32                     default_sclk;
	u32                     default_mclk;
1070 1071
	u16                     default_vddc;
	u16                     default_vddci;
1072
	struct radeon_i2c_chan *i2c_bus;
1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085
	/* selected pm method */
	enum radeon_pm_method     pm_method;
	/* dynpm power management */
	struct delayed_work	dynpm_idle_work;
	enum radeon_dynpm_state	dynpm_state;
	enum radeon_dynpm_action	dynpm_planned_action;
	unsigned long		dynpm_action_timeout;
	bool                    dynpm_can_upclock;
	bool                    dynpm_can_downclock;
	/* profile-based power management */
	enum radeon_pm_profile_type profile;
	int                     profile_index;
	struct radeon_pm_profile profiles[PM_PROFILE_MAX];
1086 1087 1088
	/* internal thermal controller on rv6xx+ */
	enum radeon_int_thermal_type int_thermal_type;
	struct device	        *int_hwmon_dev;
1089 1090
};

1091 1092 1093
int radeon_pm_get_type_index(struct radeon_device *rdev,
			     enum radeon_pm_state_type ps_type,
			     int instance);
1094 1095 1096 1097

/*
 * Benchmarking
 */
1098
void radeon_benchmark(struct radeon_device *rdev, int test_number);
1099 1100


1101 1102 1103 1104
/*
 * Testing
 */
void radeon_test_moves(struct radeon_device *rdev);
1105
void radeon_test_ring_sync(struct radeon_device *rdev,
1106 1107
			   struct radeon_ring *cpA,
			   struct radeon_ring *cpB);
1108
void radeon_test_syncing(struct radeon_device *rdev);
1109 1110


1111 1112 1113
/*
 * Debugfs
 */
1114 1115 1116 1117 1118
struct radeon_debugfs {
	struct drm_info_list	*files;
	unsigned		num_files;
};

1119 1120 1121 1122 1123 1124 1125 1126 1127 1128
int radeon_debugfs_add_files(struct radeon_device *rdev,
			     struct drm_info_list *files,
			     unsigned nfiles);
int radeon_debugfs_fence_init(struct radeon_device *rdev);


/*
 * ASIC specific functions.
 */
struct radeon_asic {
1129
	int (*init)(struct radeon_device *rdev);
1130 1131 1132
	void (*fini)(struct radeon_device *rdev);
	int (*resume)(struct radeon_device *rdev);
	int (*suspend)(struct radeon_device *rdev);
1133
	void (*vga_set_state)(struct radeon_device *rdev, bool state);
1134
	bool (*gpu_is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
1135
	int (*asic_reset)(struct radeon_device *rdev);
1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147
	/* ioctl hw specific callback. Some hw might want to perform special
	 * operation on specific ioctl. For instance on wait idle some hw
	 * might want to perform and HDP flush through MMIO as it seems that
	 * some R6XX/R7XX hw doesn't take HDP flush into account if programmed
	 * through ring.
	 */
	void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
	/* check if 3D engine is idle */
	bool (*gui_idle)(struct radeon_device *rdev);
	/* wait for mc_idle */
	int (*mc_wait_for_idle)(struct radeon_device *rdev);
	/* gart */
1148 1149 1150 1151
	struct {
		void (*tlb_flush)(struct radeon_device *rdev);
		int (*set_page)(struct radeon_device *rdev, int i, uint64_t addr);
	} gart;
1152
	/* ring specific callbacks */
1153 1154
	struct {
		void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
1155
		int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib);
1156
		void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
1157
		void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
1158
				       struct radeon_semaphore *semaphore, bool emit_wait);
1159
		int (*cs_parse)(struct radeon_cs_parser *p);
1160 1161 1162
		void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp);
		int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
		int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp);
1163
	} ring[RADEON_NUM_RINGS];
1164
	/* irqs */
1165 1166 1167 1168
	struct {
		int (*set)(struct radeon_device *rdev);
		int (*process)(struct radeon_device *rdev);
	} irq;
1169
	/* displays */
1170 1171 1172 1173 1174 1175 1176 1177
	struct {
		/* display watermarks */
		void (*bandwidth_update)(struct radeon_device *rdev);
		/* get frame count */
		u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc);
		/* wait for vblank */
		void (*wait_for_vblank)(struct radeon_device *rdev, int crtc);
	} display;
1178
	/* copy functions for bo handling */
1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200
	struct {
		int (*blit)(struct radeon_device *rdev,
			    uint64_t src_offset,
			    uint64_t dst_offset,
			    unsigned num_gpu_pages,
			    struct radeon_fence *fence);
		u32 blit_ring_index;
		int (*dma)(struct radeon_device *rdev,
			   uint64_t src_offset,
			   uint64_t dst_offset,
			   unsigned num_gpu_pages,
			   struct radeon_fence *fence);
		u32 dma_ring_index;
		/* method used for bo copy */
		int (*copy)(struct radeon_device *rdev,
			    uint64_t src_offset,
			    uint64_t dst_offset,
			    unsigned num_gpu_pages,
			    struct radeon_fence *fence);
		/* ring used for bo copies */
		u32 copy_ring_index;
	} copy;
1201
	/* surfaces */
1202 1203 1204 1205 1206 1207
	struct {
		int (*set_reg)(struct radeon_device *rdev, int reg,
				       uint32_t tiling_flags, uint32_t pitch,
				       uint32_t offset, uint32_t obj_size);
		void (*clear_reg)(struct radeon_device *rdev, int reg);
	} surface;
1208
	/* hotplug detect */
1209 1210 1211 1212 1213 1214
	struct {
		void (*init)(struct radeon_device *rdev);
		void (*fini)(struct radeon_device *rdev);
		bool (*sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
		void (*set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
	} hpd;
1215
	/* power management */
1216 1217 1218 1219 1220 1221
	struct {
		void (*misc)(struct radeon_device *rdev);
		void (*prepare)(struct radeon_device *rdev);
		void (*finish)(struct radeon_device *rdev);
		void (*init_profile)(struct radeon_device *rdev);
		void (*get_dynpm_state)(struct radeon_device *rdev);
1222 1223 1224 1225 1226 1227 1228
		uint32_t (*get_engine_clock)(struct radeon_device *rdev);
		void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
		uint32_t (*get_memory_clock)(struct radeon_device *rdev);
		void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock);
		int (*get_pcie_lanes)(struct radeon_device *rdev);
		void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
		void (*set_clock_gating)(struct radeon_device *rdev, int enable);
1229
	} pm;
1230
	/* pageflipping */
1231 1232 1233 1234 1235
	struct {
		void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
		u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
		void (*post_page_flip)(struct radeon_device *rdev, int crtc);
	} pflip;
1236 1237
};

1238 1239 1240
/*
 * Asic structures
 */
1241 1242 1243 1244 1245
struct r100_gpu_lockup {
	unsigned long	last_jiffies;
	u32		last_cp_rptr;
};

1246
struct r100_asic {
1247 1248 1249 1250
	const unsigned		*reg_safe_bm;
	unsigned		reg_safe_bm_size;
	u32			hdp_cntl;
	struct r100_gpu_lockup	lockup;
1251 1252
};

1253
struct r300_asic {
1254 1255 1256 1257 1258
	const unsigned		*reg_safe_bm;
	unsigned		reg_safe_bm_size;
	u32			resync_scratch;
	u32			hdp_cntl;
	struct r100_gpu_lockup	lockup;
1259 1260 1261
};

struct r600_asic {
1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277
	unsigned		max_pipes;
	unsigned		max_tile_pipes;
	unsigned		max_simds;
	unsigned		max_backends;
	unsigned		max_gprs;
	unsigned		max_threads;
	unsigned		max_stack_entries;
	unsigned		max_hw_contexts;
	unsigned		max_gs_threads;
	unsigned		sx_max_export_size;
	unsigned		sx_max_export_pos_size;
	unsigned		sx_max_export_smx_size;
	unsigned		sq_num_cf_insts;
	unsigned		tiling_nbanks;
	unsigned		tiling_npipes;
	unsigned		tiling_group_size;
1278
	unsigned		tile_config;
1279
	unsigned		backend_map;
1280
	struct r100_gpu_lockup	lockup;
1281 1282 1283
};

struct rv770_asic {
1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303
	unsigned		max_pipes;
	unsigned		max_tile_pipes;
	unsigned		max_simds;
	unsigned		max_backends;
	unsigned		max_gprs;
	unsigned		max_threads;
	unsigned		max_stack_entries;
	unsigned		max_hw_contexts;
	unsigned		max_gs_threads;
	unsigned		sx_max_export_size;
	unsigned		sx_max_export_pos_size;
	unsigned		sx_max_export_smx_size;
	unsigned		sq_num_cf_insts;
	unsigned		sx_num_of_sets;
	unsigned		sc_prim_fifo_size;
	unsigned		sc_hiz_tile_fifo_size;
	unsigned		sc_earlyz_tile_fifo_fize;
	unsigned		tiling_nbanks;
	unsigned		tiling_npipes;
	unsigned		tiling_group_size;
1304
	unsigned		tile_config;
1305
	unsigned		backend_map;
1306
	struct r100_gpu_lockup	lockup;
1307 1308
};

1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330
struct evergreen_asic {
	unsigned num_ses;
	unsigned max_pipes;
	unsigned max_tile_pipes;
	unsigned max_simds;
	unsigned max_backends;
	unsigned max_gprs;
	unsigned max_threads;
	unsigned max_stack_entries;
	unsigned max_hw_contexts;
	unsigned max_gs_threads;
	unsigned sx_max_export_size;
	unsigned sx_max_export_pos_size;
	unsigned sx_max_export_smx_size;
	unsigned sq_num_cf_insts;
	unsigned sx_num_of_sets;
	unsigned sc_prim_fifo_size;
	unsigned sc_hiz_tile_fifo_size;
	unsigned sc_earlyz_tile_fifo_size;
	unsigned tiling_nbanks;
	unsigned tiling_npipes;
	unsigned tiling_group_size;
1331
	unsigned tile_config;
1332
	unsigned backend_map;
1333
	struct r100_gpu_lockup	lockup;
1334 1335
};

1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374
struct cayman_asic {
	unsigned max_shader_engines;
	unsigned max_pipes_per_simd;
	unsigned max_tile_pipes;
	unsigned max_simds_per_se;
	unsigned max_backends_per_se;
	unsigned max_texture_channel_caches;
	unsigned max_gprs;
	unsigned max_threads;
	unsigned max_gs_threads;
	unsigned max_stack_entries;
	unsigned sx_num_of_sets;
	unsigned sx_max_export_size;
	unsigned sx_max_export_pos_size;
	unsigned sx_max_export_smx_size;
	unsigned max_hw_contexts;
	unsigned sq_num_cf_insts;
	unsigned sc_prim_fifo_size;
	unsigned sc_hiz_tile_fifo_size;
	unsigned sc_earlyz_tile_fifo_size;

	unsigned num_shader_engines;
	unsigned num_shader_pipes_per_simd;
	unsigned num_tile_pipes;
	unsigned num_simds_per_se;
	unsigned num_backends_per_se;
	unsigned backend_disable_mask_per_asic;
	unsigned backend_map;
	unsigned num_texture_channel_caches;
	unsigned mem_max_burst_length_bytes;
	unsigned mem_row_size_in_kb;
	unsigned shader_engine_tile_size;
	unsigned num_gpus;
	unsigned multi_gpu_tile_size;

	unsigned tile_config;
	struct r100_gpu_lockup	lockup;
};

1375 1376
union radeon_asic_config {
	struct r300_asic	r300;
1377
	struct r100_asic	r100;
1378 1379
	struct r600_asic	r600;
	struct rv770_asic	rv770;
1380
	struct evergreen_asic	evergreen;
1381
	struct cayman_asic	cayman;
1382 1383
};

D
Daniel Vetter 已提交
1384 1385 1386 1387 1388 1389
/*
 * asic initizalization from radeon_asic.c
 */
void radeon_agp_disable(struct radeon_device *rdev);
int radeon_asic_init(struct radeon_device *rdev);

1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413

/*
 * IOCTL.
 */
int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
			  struct drm_file *filp);
int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
			    struct drm_file *filp);
int radeon_gem_pin_ioctl(struct drm_device *dev, void *data,
			 struct drm_file *file_priv);
int radeon_gem_unpin_ioctl(struct drm_device *dev, void *data,
			   struct drm_file *file_priv);
int radeon_gem_pwrite_ioctl(struct drm_device *dev, void *data,
			    struct drm_file *file_priv);
int radeon_gem_pread_ioctl(struct drm_device *dev, void *data,
			   struct drm_file *file_priv);
int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
				struct drm_file *filp);
int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
			  struct drm_file *filp);
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
			  struct drm_file *filp);
int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
			      struct drm_file *filp);
1414 1415
int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
			  struct drm_file *filp);
1416
int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
1417 1418 1419 1420
int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
				struct drm_file *filp);
int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
				struct drm_file *filp);
1421

1422 1423
/* VRAM scratch page for HDP bug, default vram page */
struct r600_vram_scratch {
1424 1425
	struct radeon_bo		*robj;
	volatile uint32_t		*ptr;
1426
	u64				gpu_addr;
1427
};
1428

1429

1430 1431 1432 1433 1434 1435 1436
/*
 * Core structure, functions and helpers.
 */
typedef uint32_t (*radeon_rreg_t)(struct radeon_device*, uint32_t);
typedef void (*radeon_wreg_t)(struct radeon_device*, uint32_t, uint32_t);

struct radeon_device {
1437
	struct device			*dev;
1438 1439 1440
	struct drm_device		*ddev;
	struct pci_dev			*pdev;
	/* ASIC */
1441
	union radeon_asic_config	config;
1442 1443 1444 1445 1446
	enum radeon_family		family;
	unsigned long			flags;
	int				usec_timeout;
	enum radeon_pll_errata		pll_errata;
	int				num_gb_pipes;
1447
	int				num_z_pipes;
1448 1449 1450 1451 1452
	int				disp_priority;
	/* BIOS */
	uint8_t				*bios;
	bool				is_atom_bios;
	uint16_t			bios_header_start;
1453
	struct radeon_bo		*stollen_vga_memory;
1454
	/* Register mmio */
1455 1456
	resource_size_t			rmmio_base;
	resource_size_t			rmmio_size;
1457
	void __iomem			*rmmio;
1458 1459 1460 1461
	radeon_rreg_t			mc_rreg;
	radeon_wreg_t			mc_wreg;
	radeon_rreg_t			pll_rreg;
	radeon_wreg_t			pll_wreg;
1462
	uint32_t                        pcie_reg_mask;
1463 1464
	radeon_rreg_t			pciep_rreg;
	radeon_wreg_t			pciep_wreg;
1465 1466 1467
	/* io port */
	void __iomem                    *rio_mem;
	resource_size_t			rio_mem_size;
1468 1469 1470 1471 1472 1473
	struct radeon_clock             clock;
	struct radeon_mc		mc;
	struct radeon_gart		gart;
	struct radeon_mode_info		mode_info;
	struct radeon_scratch		scratch;
	struct radeon_mman		mman;
1474 1475
	rwlock_t			fence_lock;
	struct radeon_fence_driver	fence_drv[RADEON_NUM_RINGS];
1476
	struct radeon_semaphore_driver	semaphore_drv;
1477
	struct radeon_ring		ring[RADEON_NUM_RINGS];
1478 1479 1480 1481
	struct radeon_ib_pool		ib_pool;
	struct radeon_irq		irq;
	struct radeon_asic		*asic;
	struct radeon_gem		gem;
1482
	struct radeon_pm		pm;
1483
	uint32_t			bios_scratch[RADEON_BIOS_NUM_SCRATCH];
1484
	struct radeon_mutex		cs_mutex;
1485
	struct radeon_wb		wb;
1486
	struct radeon_dummy_page	dummy_page;
1487 1488 1489
	bool				gpu_lockup;
	bool				shutdown;
	bool				suspend;
D
Dave Airlie 已提交
1490
	bool				need_dma32;
1491
	bool				accel_working;
1492
	struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
1493 1494
	const struct firmware *me_fw;	/* all family ME firmware */
	const struct firmware *pfp_fw;	/* r6/700 PFP firmware */
1495
	const struct firmware *rlc_fw;	/* r6/700 RLC firmware */
1496
	const struct firmware *mc_fw;	/* NI MC firmware */
1497
	struct r600_blit r600_blit;
1498
	struct r600_vram_scratch vram_scratch;
A
Alex Deucher 已提交
1499
	int msi_enabled; /* msi enabled */
1500
	struct r600_ih ih; /* r6/700 interrupt ring */
A
Alex Deucher 已提交
1501
	struct work_struct hotplug_work;
1502
	int num_crtc; /* number of crtcs */
1503
	struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
1504
	struct mutex vram_mutex;
1505 1506

	/* audio stuff */
1507
	bool			audio_enabled;
1508 1509 1510 1511 1512 1513
	struct timer_list	audio_timer;
	int			audio_channels;
	int			audio_rate;
	int			audio_bits_per_sample;
	uint8_t			audio_status_bits;
	uint8_t			audio_category_code;
1514

1515
	struct notifier_block acpi_nb;
1516
	/* only one userspace can use Hyperz features or CMASK at a time */
1517
	struct drm_file *hyperz_filp;
1518
	struct drm_file *cmask_filp;
1519 1520
	/* i2c buses */
	struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS];
1521 1522 1523
	/* debugfs */
	struct radeon_debugfs	debugfs[RADEON_DEBUGFS_MAX_COMPONENTS];
	unsigned 		debugfs_count;
1524 1525
	/* virtual memory */
	struct radeon_vm_manager	vm_manager;
1526 1527 1528 1529 1530 1531 1532 1533 1534
};

int radeon_device_init(struct radeon_device *rdev,
		       struct drm_device *ddev,
		       struct pci_dev *pdev,
		       uint32_t flags);
void radeon_device_fini(struct radeon_device *rdev);
int radeon_gpu_wait_for_idle(struct radeon_device *rdev);

1535 1536 1537 1538
uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
1539

1540 1541 1542 1543
/*
 * Cast helper
 */
#define to_radeon_fence(p) ((struct radeon_fence *)(p))
1544 1545 1546 1547

/*
 * Registers read & write functions.
 */
1548 1549 1550 1551
#define RREG8(reg) readb((rdev->rmmio) + (reg))
#define WREG8(reg, v) writeb(v, (rdev->rmmio) + (reg))
#define RREG16(reg) readw((rdev->rmmio) + (reg))
#define WREG16(reg, v) writew(v, (rdev->rmmio) + (reg))
1552
#define RREG32(reg) r100_mm_rreg(rdev, (reg))
1553
#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg)))
1554
#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v))
1555 1556 1557 1558 1559 1560
#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
#define RREG32_PLL(reg) rdev->pll_rreg(rdev, (reg))
#define WREG32_PLL(reg, v) rdev->pll_wreg(rdev, (reg), (v))
#define RREG32_MC(reg) rdev->mc_rreg(rdev, (reg))
#define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v))
1561 1562
#define RREG32_PCIE(reg) rv370_pcie_rreg(rdev, (reg))
#define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v))
1563 1564
#define RREG32_PCIE_P(reg) rdev->pciep_rreg(rdev, (reg))
#define WREG32_PCIE_P(reg, v) rdev->pciep_wreg(rdev, (reg), (v))
1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578
#define WREG32_P(reg, val, mask)				\
	do {							\
		uint32_t tmp_ = RREG32(reg);			\
		tmp_ &= (mask);					\
		tmp_ |= ((val) & ~(mask));			\
		WREG32(reg, tmp_);				\
	} while (0)
#define WREG32_PLL_P(reg, val, mask)				\
	do {							\
		uint32_t tmp_ = RREG32_PLL(reg);		\
		tmp_ &= (mask);					\
		tmp_ |= ((val) & ~(mask));			\
		WREG32_PLL(reg, tmp_);				\
	} while (0)
1579
#define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg)))
1580 1581
#define RREG32_IO(reg) r100_io_rreg(rdev, (reg))
#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v))
1582

1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600
/*
 * Indirect registers accessor
 */
static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
{
	uint32_t r;

	WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
	r = RREG32(RADEON_PCIE_DATA);
	return r;
}

static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
	WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask));
	WREG32(RADEON_PCIE_DATA, (v));
}

1601 1602 1603 1604 1605 1606
void r100_pll_errata_after_index(struct radeon_device *rdev);


/*
 * ASICs helpers.
 */
1607 1608
#define ASIC_IS_RN50(rdev) ((rdev->pdev->device == 0x515e) || \
			    (rdev->pdev->device == 0x5969))
1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624
#define ASIC_IS_RV100(rdev) ((rdev->family == CHIP_RV100) || \
		(rdev->family == CHIP_RV200) || \
		(rdev->family == CHIP_RS100) || \
		(rdev->family == CHIP_RS200) || \
		(rdev->family == CHIP_RV250) || \
		(rdev->family == CHIP_RV280) || \
		(rdev->family == CHIP_RS300))
#define ASIC_IS_R300(rdev) ((rdev->family == CHIP_R300)  ||	\
		(rdev->family == CHIP_RV350) ||			\
		(rdev->family == CHIP_R350)  ||			\
		(rdev->family == CHIP_RV380) ||			\
		(rdev->family == CHIP_R420)  ||			\
		(rdev->family == CHIP_R423)  ||			\
		(rdev->family == CHIP_RV410) ||			\
		(rdev->family == CHIP_RS400) ||			\
		(rdev->family == CHIP_RS480))
1625 1626 1627 1628 1629 1630 1631 1632
#define ASIC_IS_X2(rdev) ((rdev->ddev->pdev->device == 0x9441) || \
		(rdev->ddev->pdev->device == 0x9443) || \
		(rdev->ddev->pdev->device == 0x944B) || \
		(rdev->ddev->pdev->device == 0x9506) || \
		(rdev->ddev->pdev->device == 0x9509) || \
		(rdev->ddev->pdev->device == 0x950F) || \
		(rdev->ddev->pdev->device == 0x689C) || \
		(rdev->ddev->pdev->device == 0x689D))
1633
#define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600))
1634 1635 1636 1637
#define ASIC_IS_DCE2(rdev) ((rdev->family == CHIP_RS600)  ||	\
			    (rdev->family == CHIP_RS690)  ||	\
			    (rdev->family == CHIP_RS740)  ||	\
			    (rdev->family >= CHIP_R600))
1638 1639
#define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620))
#define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730))
1640
#define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR))
1641 1642
#define ASIC_IS_DCE41(rdev) ((rdev->family >= CHIP_PALM) && \
			     (rdev->flags & RADEON_IS_IGP))
1643
#define ASIC_IS_DCE5(rdev) ((rdev->family >= CHIP_BARTS))
1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660

/*
 * BIOS helpers.
 */
#define RBIOS8(i) (rdev->bios[i])
#define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8))
#define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))

int radeon_combios_init(struct radeon_device *rdev);
void radeon_combios_fini(struct radeon_device *rdev);
int radeon_atombios_init(struct radeon_device *rdev);
void radeon_atombios_fini(struct radeon_device *rdev);


/*
 * RING helpers.
 */
1661
#if DRM_DEBUG_CODE == 0
1662
static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
1663
{
1664 1665 1666 1667
	ring->ring[ring->wptr++] = v;
	ring->wptr &= ring->ptr_mask;
	ring->count_dw--;
	ring->ring_free_dw--;
1668
}
1669 1670
#else
/* With debugging this is just too big to inline */
1671
void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
1672
#endif
1673 1674 1675 1676

/*
 * ASICs macro.
 */
1677
#define radeon_init(rdev) (rdev)->asic->init((rdev))
1678 1679 1680
#define radeon_fini(rdev) (rdev)->asic->fini((rdev))
#define radeon_resume(rdev) (rdev)->asic->resume((rdev))
#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
1681
#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)].cs_parse((p))
1682
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
1683
#define radeon_gpu_is_lockup(rdev, cp) (rdev)->asic->gpu_is_lockup((rdev), (cp))
1684
#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
1685 1686
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p))
1687 1688 1689
#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)].ring_start((rdev), (cp))
#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)].ring_test((rdev), (cp))
#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp))
1690
#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib))
1691
#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib))
1692 1693
#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev))
#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev))
1694
#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc))
1695 1696
#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence))
#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
1697 1698 1699 1700 1701 1702
#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f))
#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (f))
#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (f))
#define radeon_copy_blit_ring_index(rdev) (rdev)->asic->copy.blit_ring_index
#define radeon_copy_dma_ring_index(rdev) (rdev)->asic->copy.dma_ring_index
#define radeon_copy_ring_index(rdev) (rdev)->asic->copy.copy_ring_index
1703 1704 1705 1706 1707 1708 1709
#define radeon_get_engine_clock(rdev) (rdev)->asic->pm.get_engine_clock((rdev))
#define radeon_set_engine_clock(rdev, e) (rdev)->asic->pm.set_engine_clock((rdev), (e))
#define radeon_get_memory_clock(rdev) (rdev)->asic->pm.get_memory_clock((rdev))
#define radeon_set_memory_clock(rdev, e) (rdev)->asic->pm.set_memory_clock((rdev), (e))
#define radeon_get_pcie_lanes(rdev) (rdev)->asic->pm.get_pcie_lanes((rdev))
#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->pm.set_pcie_lanes((rdev), (l))
#define radeon_set_clock_gating(rdev, e) (rdev)->asic->pm.set_clock_gating((rdev), (e))
1710 1711
#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s)))
#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->surface.clear_reg((rdev), (r)))
1712
#define radeon_bandwidth_update(rdev) (rdev)->asic->display.bandwidth_update((rdev))
1713 1714 1715 1716
#define radeon_hpd_init(rdev) (rdev)->asic->hpd.init((rdev))
#define radeon_hpd_fini(rdev) (rdev)->asic->hpd.fini((rdev))
#define radeon_hpd_sense(rdev, h) (rdev)->asic->hpd.sense((rdev), (h))
#define radeon_hpd_set_polarity(rdev, h) (rdev)->asic->hpd.set_polarity((rdev), (h))
1717
#define radeon_gui_idle(rdev) (rdev)->asic->gui_idle((rdev))
1718 1719 1720 1721 1722
#define radeon_pm_misc(rdev) (rdev)->asic->pm.misc((rdev))
#define radeon_pm_prepare(rdev) (rdev)->asic->pm.prepare((rdev))
#define radeon_pm_finish(rdev) (rdev)->asic->pm.finish((rdev))
#define radeon_pm_init_profile(rdev) (rdev)->asic->pm.init_profile((rdev))
#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm.get_dynpm_state((rdev))
1723 1724 1725
#define radeon_pre_page_flip(rdev, crtc) rdev->asic->pflip.pre_page_flip((rdev), (crtc))
#define radeon_page_flip(rdev, crtc, base) rdev->asic->pflip.page_flip((rdev), (crtc), (base))
#define radeon_post_page_flip(rdev, crtc) rdev->asic->pflip.post_page_flip((rdev), (crtc))
1726
#define radeon_wait_for_vblank(rdev, crtc) rdev->asic->display.wait_for_vblank((rdev), (crtc))
1727
#define radeon_mc_wait_for_idle(rdev) rdev->asic->mc_wait_for_idle((rdev))
1728

1729
/* Common functions */
1730
/* AGP */
1731
extern int radeon_gpu_reset(struct radeon_device *rdev);
1732
extern void radeon_agp_disable(struct radeon_device *rdev);
1733 1734
extern int radeon_modeset_init(struct radeon_device *rdev);
extern void radeon_modeset_fini(struct radeon_device *rdev);
1735
extern bool radeon_card_posted(struct radeon_device *rdev);
1736
extern void radeon_update_bandwidth_info(struct radeon_device *rdev);
1737
extern void radeon_update_display_priority(struct radeon_device *rdev);
1738
extern bool radeon_boot_test_post_card(struct radeon_device *rdev);
1739
extern void radeon_scratch_init(struct radeon_device *rdev);
1740 1741 1742
extern void radeon_wb_fini(struct radeon_device *rdev);
extern int radeon_wb_init(struct radeon_device *rdev);
extern void radeon_wb_disable(struct radeon_device *rdev);
1743 1744
extern void radeon_surface_init(struct radeon_device *rdev);
extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
1745
extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
1746
extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
1747
extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
1748
extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
1749 1750
extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base);
extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
1751 1752
extern int radeon_resume_kms(struct drm_device *dev);
extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
1753
extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
1754

1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781
/*
 * vm
 */
int radeon_vm_manager_init(struct radeon_device *rdev);
void radeon_vm_manager_fini(struct radeon_device *rdev);
int radeon_vm_manager_start(struct radeon_device *rdev);
int radeon_vm_manager_suspend(struct radeon_device *rdev);
int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm);
void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm);
int radeon_vm_bo_update_pte(struct radeon_device *rdev,
			    struct radeon_vm *vm,
			    struct radeon_bo *bo,
			    struct ttm_mem_reg *mem);
void radeon_vm_bo_invalidate(struct radeon_device *rdev,
			     struct radeon_bo *bo);
int radeon_vm_bo_add(struct radeon_device *rdev,
		     struct radeon_vm *vm,
		     struct radeon_bo *bo,
		     uint64_t offset,
		     uint32_t flags);
int radeon_vm_bo_rmv(struct radeon_device *rdev,
		     struct radeon_vm *vm,
		     struct radeon_bo *bo);


1782 1783 1784 1785 1786 1787
/*
 * R600 vram scratch functions
 */
int r600_vram_scratch_init(struct radeon_device *rdev);
void r600_vram_scratch_fini(struct radeon_device *rdev);

1788 1789 1790 1791 1792 1793 1794 1795 1796 1797
/*
 * r600 cs checking helper
 */
unsigned r600_mip_minify(unsigned size, unsigned level);
bool r600_fmt_is_valid_color(u32 format);
bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family);
int r600_fmt_get_blocksize(u32 format);
int r600_fmt_get_nblocksx(u32 format, u32 w);
int r600_fmt_get_nblocksy(u32 format, u32 h);

1798 1799 1800
/*
 * r600 functions used by radeon_encoder.c
 */
1801 1802
extern void r600_hdmi_enable(struct drm_encoder *encoder);
extern void r600_hdmi_disable(struct drm_encoder *encoder);
1803
extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
1804

1805
extern int ni_init_microcode(struct radeon_device *rdev);
1806
extern int ni_mc_load_microcode(struct radeon_device *rdev);
1807

1808 1809 1810 1811 1812 1813 1814
/* radeon_acpi.c */ 
#if defined(CONFIG_ACPI) 
extern int radeon_acpi_init(struct radeon_device *rdev); 
#else 
static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; } 
#endif 

1815 1816
#include "radeon_object.h"

1817
#endif