i915_drv.h 100.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
 */
D
Dave Airlie 已提交
3
/*
4
 *
L
Linus Torvalds 已提交
5 6
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
 * All Rights Reserved.
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
D
Dave Airlie 已提交
28
 */
L
Linus Torvalds 已提交
29 30 31 32

#ifndef _I915_DRV_H_
#define _I915_DRV_H_

33
#include <uapi/drm/i915_drm.h>
34
#include <uapi/drm/drm_fourcc.h>
35

36
#include "i915_reg.h"
J
Jesse Barnes 已提交
37
#include "intel_bios.h"
38
#include "intel_ringbuffer.h"
39
#include "intel_lrc.h"
40
#include "i915_gem_gtt.h"
41
#include "i915_gem_render_state.h"
42
#include <linux/io-mapping.h>
43
#include <linux/i2c.h>
44
#include <linux/i2c-algo-bit.h>
45
#include <drm/intel-gtt.h>
46
#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
D
Daniel Vetter 已提交
47
#include <drm/drm_gem.h>
48
#include <linux/backlight.h>
49
#include <linux/hashtable.h>
50
#include <linux/intel-iommu.h>
51
#include <linux/kref.h>
52
#include <linux/pm_qos.h>
53

L
Linus Torvalds 已提交
54 55 56 57 58
/* General customization:
 */

#define DRIVER_NAME		"i915"
#define DRIVER_DESC		"Intel Graphics"
59
#define DRIVER_DATE		"20150410"
L
Linus Torvalds 已提交
60

61
#undef WARN_ON
62 63 64 65 66 67 68 69 70 71 72
/* Many gcc seem to no see through this and fall over :( */
#if 0
#define WARN_ON(x) ({ \
	bool __i915_warn_cond = (x); \
	if (__builtin_constant_p(__i915_warn_cond)) \
		BUILD_BUG_ON(__i915_warn_cond); \
	WARN(__i915_warn_cond, "WARN_ON(" #x ")"); })
#else
#define WARN_ON(x) WARN((x), "WARN_ON(" #x ")")
#endif

73 74 75
#undef WARN_ON_ONCE
#define WARN_ON_ONCE(x) WARN_ONCE((x), "WARN_ON_ONCE(" #x ")")

76 77
#define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \
			     (long) (x), __func__);
78

R
Rob Clark 已提交
79 80 81 82 83 84 85 86 87 88 89
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
 * WARN_ON()) for hw state sanity checks to check for unexpected conditions
 * which may not necessarily be a user visible problem.  This will either
 * WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to
 * enable distros and users to tailor their preferred amount of i915 abrt
 * spam.
 */
#define I915_STATE_WARN(condition, format...) ({			\
	int __ret_warn_on = !!(condition);				\
	if (unlikely(__ret_warn_on)) {					\
		if (i915.verbose_state_checks)				\
90
			WARN(1, format);				\
R
Rob Clark 已提交
91 92 93 94 95 96 97 98 99 100
		else 							\
			DRM_ERROR(format);				\
	}								\
	unlikely(__ret_warn_on);					\
})

#define I915_STATE_WARN_ON(condition) ({				\
	int __ret_warn_on = !!(condition);				\
	if (unlikely(__ret_warn_on)) {					\
		if (i915.verbose_state_checks)				\
101
			WARN(1, "WARN_ON(" #condition ")\n");		\
R
Rob Clark 已提交
102 103 104 105 106
		else 							\
			DRM_ERROR("WARN_ON(" #condition ")\n");		\
	}								\
	unlikely(__ret_warn_on);					\
})
107

108
enum pipe {
109
	INVALID_PIPE = -1,
110 111
	PIPE_A = 0,
	PIPE_B,
112
	PIPE_C,
113 114
	_PIPE_EDP,
	I915_MAX_PIPES = _PIPE_EDP
115
};
116
#define pipe_name(p) ((p) + 'A')
117

P
Paulo Zanoni 已提交
118 119 120 121
enum transcoder {
	TRANSCODER_A = 0,
	TRANSCODER_B,
	TRANSCODER_C,
122 123
	TRANSCODER_EDP,
	I915_MAX_TRANSCODERS
P
Paulo Zanoni 已提交
124 125 126
};
#define transcoder_name(t) ((t) + 'A')

127 128 129 130 131 132
/*
 * This is the maximum (across all platforms) number of planes (primary +
 * sprites) that can be active at the same time on one pipe.
 *
 * This value doesn't count the cursor plane.
 */
133
#define I915_MAX_PLANES	4
134

135 136 137
enum plane {
	PLANE_A = 0,
	PLANE_B,
138
	PLANE_C,
139
};
140
#define plane_name(p) ((p) + 'A')
141

142
#define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites[(p)] + (s) + 'A')
143

144 145 146 147 148 149 150 151 152 153
enum port {
	PORT_A = 0,
	PORT_B,
	PORT_C,
	PORT_D,
	PORT_E,
	I915_MAX_PORTS
};
#define port_name(p) ((p) + 'A')

154
#define I915_NUM_PHYS_VLV 2
155 156 157 158 159 160 161 162 163 164 165

enum dpio_channel {
	DPIO_CH0,
	DPIO_CH1
};

enum dpio_phy {
	DPIO_PHY0,
	DPIO_PHY1
};

166 167 168 169 170 171 172 173 174 175
enum intel_display_power_domain {
	POWER_DOMAIN_PIPE_A,
	POWER_DOMAIN_PIPE_B,
	POWER_DOMAIN_PIPE_C,
	POWER_DOMAIN_PIPE_A_PANEL_FITTER,
	POWER_DOMAIN_PIPE_B_PANEL_FITTER,
	POWER_DOMAIN_PIPE_C_PANEL_FITTER,
	POWER_DOMAIN_TRANSCODER_A,
	POWER_DOMAIN_TRANSCODER_B,
	POWER_DOMAIN_TRANSCODER_C,
176
	POWER_DOMAIN_TRANSCODER_EDP,
I
Imre Deak 已提交
177 178 179 180 181 182 183 184 185 186 187
	POWER_DOMAIN_PORT_DDI_A_2_LANES,
	POWER_DOMAIN_PORT_DDI_A_4_LANES,
	POWER_DOMAIN_PORT_DDI_B_2_LANES,
	POWER_DOMAIN_PORT_DDI_B_4_LANES,
	POWER_DOMAIN_PORT_DDI_C_2_LANES,
	POWER_DOMAIN_PORT_DDI_C_4_LANES,
	POWER_DOMAIN_PORT_DDI_D_2_LANES,
	POWER_DOMAIN_PORT_DDI_D_4_LANES,
	POWER_DOMAIN_PORT_DSI,
	POWER_DOMAIN_PORT_CRT,
	POWER_DOMAIN_PORT_OTHER,
V
Ville Syrjälä 已提交
188
	POWER_DOMAIN_VGA,
I
Imre Deak 已提交
189
	POWER_DOMAIN_AUDIO,
P
Paulo Zanoni 已提交
190
	POWER_DOMAIN_PLLS,
191 192 193 194
	POWER_DOMAIN_AUX_A,
	POWER_DOMAIN_AUX_B,
	POWER_DOMAIN_AUX_C,
	POWER_DOMAIN_AUX_D,
195
	POWER_DOMAIN_INIT,
196 197

	POWER_DOMAIN_NUM,
198 199 200 201 202
};

#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
		((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
203 204 205
#define POWER_DOMAIN_TRANSCODER(tran) \
	((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
	 (tran) + POWER_DOMAIN_TRANSCODER_A)
206

207 208 209 210 211 212 213 214 215 216 217 218 219
enum hpd_pin {
	HPD_NONE = 0,
	HPD_PORT_A = HPD_NONE, /* PORT_A is internal */
	HPD_TV = HPD_NONE,     /* TV is known to be unreliable */
	HPD_CRT,
	HPD_SDVO_B,
	HPD_SDVO_C,
	HPD_PORT_B,
	HPD_PORT_C,
	HPD_PORT_D,
	HPD_NUM_PINS
};

220 221 222 223 224 225
#define I915_GEM_GPU_DOMAINS \
	(I915_GEM_DOMAIN_RENDER | \
	 I915_GEM_DOMAIN_SAMPLER | \
	 I915_GEM_DOMAIN_COMMAND | \
	 I915_GEM_DOMAIN_INSTRUCTION | \
	 I915_GEM_DOMAIN_VERTEX)
226

227 228
#define for_each_pipe(__dev_priv, __p) \
	for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++)
229 230 231 232
#define for_each_plane(__dev_priv, __pipe, __p)				\
	for ((__p) = 0;							\
	     (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1;	\
	     (__p)++)
233 234 235 236
#define for_each_sprite(__dev_priv, __p, __s)				\
	for ((__s) = 0;							\
	     (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)];	\
	     (__s)++)
237

238 239 240
#define for_each_crtc(dev, crtc) \
	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)

241 242 243
#define for_each_intel_crtc(dev, intel_crtc) \
	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)

244 245 246 247 248
#define for_each_intel_encoder(dev, intel_encoder)		\
	list_for_each_entry(intel_encoder,			\
			    &(dev)->mode_config.encoder_list,	\
			    base.head)

249 250 251 252 253
#define for_each_intel_connector(dev, intel_connector)		\
	list_for_each_entry(intel_connector,			\
			    &dev->mode_config.connector_list,	\
			    base.head)

254 255 256 257
#define for_each_digital_port(dev, digital_port)		\
	list_for_each_entry(digital_port,			\
			    &dev->mode_config.encoder_list,	\
			    base.base.head)
258

259 260 261 262
#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
	list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
		if ((intel_encoder)->base.crtc == (__crtc))

263 264 265 266
#define for_each_connector_on_encoder(dev, __encoder, intel_connector) \
	list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \
		if ((intel_connector)->base.encoder == (__encoder))

267 268 269 270
#define for_each_power_domain(domain, mask)				\
	for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++)	\
		if ((1 << (domain)) & (mask))

271
struct drm_i915_private;
272
struct i915_mm_struct;
273
struct i915_mmu_object;
274

275 276 277
enum intel_dpll_id {
	DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
	/* real shared dpll ids must be >= 0 */
278 279
	DPLL_ID_PCH_PLL_A = 0,
	DPLL_ID_PCH_PLL_B = 1,
280
	/* hsw/bdw */
281 282
	DPLL_ID_WRPLL1 = 0,
	DPLL_ID_WRPLL2 = 1,
283 284 285 286
	/* skl */
	DPLL_ID_SKL_DPLL1 = 0,
	DPLL_ID_SKL_DPLL2 = 1,
	DPLL_ID_SKL_DPLL3 = 2,
287
};
288
#define I915_NUM_PLLS 3
289

290
struct intel_dpll_hw_state {
291
	/* i9xx, pch plls */
292
	uint32_t dpll;
293
	uint32_t dpll_md;
294 295
	uint32_t fp0;
	uint32_t fp1;
296 297

	/* hsw, bdw */
298
	uint32_t wrpll;
299 300 301 302 303 304 305 306 307 308 309

	/* skl */
	/*
	 * DPLL_CTRL1 has 6 bits for each each this DPLL. We store those in
	 * lower part of crtl1 and they get shifted into position when writing
	 * the register.  This allows us to easily compare the state to share
	 * the DPLL.
	 */
	uint32_t ctrl1;
	/* HDMI only, 0 when used for DP */
	uint32_t cfgcr1, cfgcr2;
310 311 312

	/* bxt */
	uint32_t ebb0, pll0, pll1, pll2, pll3, pll6, pll8, pcsdw12;
313 314
};

315
struct intel_shared_dpll_config {
316
	unsigned crtc_mask; /* mask of CRTCs sharing this PLL */
317 318 319 320 321
	struct intel_dpll_hw_state hw_state;
};

struct intel_shared_dpll {
	struct intel_shared_dpll_config config;
322 323
	struct intel_shared_dpll_config *new_config;

324 325
	int active; /* count of number of active CRTCs (i.e. DPMS on) */
	bool on; /* is the PLL actually active? Disabled during modeset */
326 327 328
	const char *name;
	/* should match the index in the dev_priv->shared_dplls array */
	enum intel_dpll_id id;
329 330
	/* The mode_set hook is optional and should be used together with the
	 * intel_prepare_shared_dpll function. */
331 332
	void (*mode_set)(struct drm_i915_private *dev_priv,
			 struct intel_shared_dpll *pll);
333 334 335 336
	void (*enable)(struct drm_i915_private *dev_priv,
		       struct intel_shared_dpll *pll);
	void (*disable)(struct drm_i915_private *dev_priv,
			struct intel_shared_dpll *pll);
337 338 339
	bool (*get_hw_state)(struct drm_i915_private *dev_priv,
			     struct intel_shared_dpll *pll,
			     struct intel_dpll_hw_state *hw_state);
340 341
};

342 343 344 345 346
#define SKL_DPLL0 0
#define SKL_DPLL1 1
#define SKL_DPLL2 2
#define SKL_DPLL3 3

347 348 349 350 351 352 353 354 355 356 357 358 359
/* Used by dp and fdi links */
struct intel_link_m_n {
	uint32_t	tu;
	uint32_t	gmch_m;
	uint32_t	gmch_n;
	uint32_t	link_m;
	uint32_t	link_n;
};

void intel_link_compute_m_n(int bpp, int nlanes,
			    int pixel_clock, int link_clock,
			    struct intel_link_m_n *m_n);

L
Linus Torvalds 已提交
360 361 362
/* Interface history:
 *
 * 1.1: Original.
D
Dave Airlie 已提交
363 364
 * 1.2: Add Power Management
 * 1.3: Add vblank support
365
 * 1.4: Fix cmdbuffer path, add heap destroy
366
 * 1.5: Add vblank pipe configuration
=
=?utf-8?q?Michel_D=C3=A4nzer?= 已提交
367 368
 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
 *      - Support vertical blank on secondary display pipe
L
Linus Torvalds 已提交
369 370
 */
#define DRIVER_MAJOR		1
=
=?utf-8?q?Michel_D=C3=A4nzer?= 已提交
371
#define DRIVER_MINOR		6
L
Linus Torvalds 已提交
372 373
#define DRIVER_PATCHLEVEL	0

374
#define WATCH_LISTS	0
375

376 377 378 379 380
struct opregion_header;
struct opregion_acpi;
struct opregion_swsci;
struct opregion_asle;

381
struct intel_opregion {
382 383 384
	struct opregion_header __iomem *header;
	struct opregion_acpi __iomem *acpi;
	struct opregion_swsci __iomem *swsci;
J
Jani Nikula 已提交
385 386
	u32 swsci_gbda_sub_functions;
	u32 swsci_sbcb_sub_functions;
387 388
	struct opregion_asle __iomem *asle;
	void __iomem *vbt;
389
	u32 __iomem *lid_state;
390
	struct work_struct asle_work;
391
};
392
#define OPREGION_SIZE            (8*1024)
393

394 395 396
struct intel_overlay;
struct intel_overlay_error_state;

397
#define I915_FENCE_REG_NONE -1
398 399 400
#define I915_MAX_NUM_FENCES 32
/* 32 fences + sign bit for FENCE_REG_NONE */
#define I915_MAX_NUM_FENCE_BITS 6
401 402

struct drm_i915_fence_reg {
403
	struct list_head lru_list;
404
	struct drm_i915_gem_object *obj;
405
	int pin_count;
406
};
407

408
struct sdvo_device_mapping {
C
Chris Wilson 已提交
409
	u8 initialized;
410 411 412
	u8 dvo_port;
	u8 slave_addr;
	u8 dvo_wiring;
C
Chris Wilson 已提交
413
	u8 i2c_pin;
414
	u8 ddc_pin;
415 416
};

417 418
struct intel_display_error_state;

419
struct drm_i915_error_state {
420
	struct kref ref;
B
Ben Widawsky 已提交
421 422
	struct timeval time;

423
	char error_msg[128];
424
	u32 reset_count;
425
	u32 suspend_count;
426

B
Ben Widawsky 已提交
427
	/* Generic register state */
428 429
	u32 eir;
	u32 pgtbl_er;
430
	u32 ier;
431
	u32 gtier[4];
B
Ben Widawsky 已提交
432
	u32 ccid;
433 434
	u32 derrmr;
	u32 forcewake;
B
Ben Widawsky 已提交
435 436
	u32 error; /* gen6+ */
	u32 err_int; /* gen7 */
437 438
	u32 fault_data0; /* gen8, gen9 */
	u32 fault_data1; /* gen8, gen9 */
B
Ben Widawsky 已提交
439
	u32 done_reg;
440 441 442 443
	u32 gac_eco;
	u32 gam_ecochk;
	u32 gab_ctl;
	u32 gfx_mode;
B
Ben Widawsky 已提交
444 445 446 447
	u32 extra_instdone[I915_NUM_INSTDONE_REG];
	u64 fence[I915_MAX_NUM_FENCES];
	struct intel_overlay_error_state *overlay;
	struct intel_display_error_state *display;
448
	struct drm_i915_error_object *semaphore_obj;
B
Ben Widawsky 已提交
449

450
	struct drm_i915_error_ring {
451
		bool valid;
452 453 454 455 456 457 458 459 460 461 462 463 464
		/* Software tracked state */
		bool waiting;
		int hangcheck_score;
		enum intel_ring_hangcheck_action hangcheck_action;
		int num_requests;

		/* our own tracking of ring head and tail */
		u32 cpu_ring_head;
		u32 cpu_ring_tail;

		u32 semaphore_seqno[I915_NUM_RINGS - 1];

		/* Register state */
465
		u32 start;
466 467 468 469 470 471 472 473 474 475 476 477
		u32 tail;
		u32 head;
		u32 ctl;
		u32 hws;
		u32 ipeir;
		u32 ipehr;
		u32 instdone;
		u32 bbstate;
		u32 instpm;
		u32 instps;
		u32 seqno;
		u64 bbaddr;
478
		u64 acthd;
479
		u32 fault_reg;
480
		u64 faddr;
481 482 483
		u32 rc_psmi; /* sleep state */
		u32 semaphore_mboxes[I915_NUM_RINGS - 1];

484 485 486 487
		struct drm_i915_error_object {
			int page_count;
			u32 gtt_offset;
			u32 *pages[0];
488
		} *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page;
489

490 491 492
		struct drm_i915_error_request {
			long jiffies;
			u32 seqno;
493
			u32 tail;
494
		} *requests;
495 496 497 498 499 500 501 502

		struct {
			u32 gfx_mode;
			union {
				u64 pdp[4];
				u32 pp_dir_base;
			};
		} vm_info;
503 504 505

		pid_t pid;
		char comm[TASK_COMM_LEN];
506
	} ring[I915_NUM_RINGS];
507

508
	struct drm_i915_error_buffer {
509
		u32 size;
510
		u32 name;
511
		u32 rseqno, wseqno;
512 513 514
		u32 gtt_offset;
		u32 read_domains;
		u32 write_domain;
515
		s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
516 517 518 519
		s32 pinned:2;
		u32 tiling:2;
		u32 dirty:1;
		u32 purgeable:1;
520
		u32 userptr:1;
521
		s32 ring:4;
522
		u32 cache_level:3;
523
	} **active_bo, **pinned_bo;
524

525
	u32 *active_bo_count, *pinned_bo_count;
526
	u32 vm_count;
527 528
};

529
struct intel_connector;
530
struct intel_encoder;
531
struct intel_crtc_state;
532
struct intel_initial_plane_config;
533
struct intel_crtc;
534 535
struct intel_limit;
struct dpll;
536

537
struct drm_i915_display_funcs {
538
	bool (*fbc_enabled)(struct drm_device *dev);
539
	void (*enable_fbc)(struct drm_crtc *crtc);
540 541 542
	void (*disable_fbc)(struct drm_device *dev);
	int (*get_display_clock_speed)(struct drm_device *dev);
	int (*get_fifo_size)(struct drm_device *dev, int plane);
543 544 545 546 547 548 549 550 551 552 553 554 555 556
	/**
	 * find_dpll() - Find the best values for the PLL
	 * @limit: limits for the PLL
	 * @crtc: current CRTC
	 * @target: target frequency in kHz
	 * @refclk: reference clock frequency in kHz
	 * @match_clock: if provided, @best_clock P divider must
	 *               match the P divider from @match_clock
	 *               used for LVDS downclocking
	 * @best_clock: best PLL values found
	 *
	 * Returns true on success, false on failure.
	 */
	bool (*find_dpll)(const struct intel_limit *limit,
557
			  struct intel_crtc_state *crtc_state,
558 559 560
			  int target, int refclk,
			  struct dpll *match_clock,
			  struct dpll *best_clock);
561
	void (*update_wm)(struct drm_crtc *crtc);
562 563
	void (*update_sprite_wm)(struct drm_plane *plane,
				 struct drm_crtc *crtc,
564 565
				 uint32_t sprite_width, uint32_t sprite_height,
				 int pixel_size, bool enable, bool scaled);
566
	void (*modeset_global_resources)(struct drm_atomic_state *state);
567 568 569
	/* Returns the active state of the crtc, and if the crtc is active,
	 * fills out the pipe-config with the hw state. */
	bool (*get_pipe_config)(struct intel_crtc *,
570
				struct intel_crtc_state *);
571 572
	void (*get_initial_plane_config)(struct intel_crtc *,
					 struct intel_initial_plane_config *);
573 574
	int (*crtc_compute_clock)(struct intel_crtc *crtc,
				  struct intel_crtc_state *crtc_state);
575 576
	void (*crtc_enable)(struct drm_crtc *crtc);
	void (*crtc_disable)(struct drm_crtc *crtc);
577
	void (*off)(struct drm_crtc *crtc);
578 579 580 581
	void (*audio_codec_enable)(struct drm_connector *connector,
				   struct intel_encoder *encoder,
				   struct drm_display_mode *mode);
	void (*audio_codec_disable)(struct intel_encoder *encoder);
582
	void (*fdi_link_train)(struct drm_crtc *crtc);
583
	void (*init_clock_gating)(struct drm_device *dev);
584 585
	int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
			  struct drm_framebuffer *fb,
586
			  struct drm_i915_gem_object *obj,
587
			  struct intel_engine_cs *ring,
588
			  uint32_t flags);
589 590 591
	void (*update_primary_plane)(struct drm_crtc *crtc,
				     struct drm_framebuffer *fb,
				     int x, int y);
592
	void (*hpd_irq_setup)(struct drm_device *dev);
593 594 595 596 597
	/* clock updates for mode set */
	/* cursor updates */
	/* render clock increase/decrease */
	/* display clock increase/decrease */
	/* pll clock increase/decrease */
598

599
	int (*setup_backlight)(struct intel_connector *connector, enum pipe pipe);
600 601 602 603 604
	uint32_t (*get_backlight)(struct intel_connector *connector);
	void (*set_backlight)(struct intel_connector *connector,
			      uint32_t level);
	void (*disable_backlight)(struct intel_connector *connector);
	void (*enable_backlight)(struct intel_connector *connector);
605 606
};

607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623
enum forcewake_domain_id {
	FW_DOMAIN_ID_RENDER = 0,
	FW_DOMAIN_ID_BLITTER,
	FW_DOMAIN_ID_MEDIA,

	FW_DOMAIN_ID_COUNT
};

enum forcewake_domains {
	FORCEWAKE_RENDER = (1 << FW_DOMAIN_ID_RENDER),
	FORCEWAKE_BLITTER = (1 << FW_DOMAIN_ID_BLITTER),
	FORCEWAKE_MEDIA	= (1 << FW_DOMAIN_ID_MEDIA),
	FORCEWAKE_ALL = (FORCEWAKE_RENDER |
			 FORCEWAKE_BLITTER |
			 FORCEWAKE_MEDIA)
};

624
struct intel_uncore_funcs {
625
	void (*force_wake_get)(struct drm_i915_private *dev_priv,
626
							enum forcewake_domains domains);
627
	void (*force_wake_put)(struct drm_i915_private *dev_priv,
628
							enum forcewake_domains domains);
629 630 631 632 633 634 635 636 637 638 639 640 641 642

	uint8_t  (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
	uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
	uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
	uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, off_t offset, bool trace);

	void (*mmio_writeb)(struct drm_i915_private *dev_priv, off_t offset,
				uint8_t val, bool trace);
	void (*mmio_writew)(struct drm_i915_private *dev_priv, off_t offset,
				uint16_t val, bool trace);
	void (*mmio_writel)(struct drm_i915_private *dev_priv, off_t offset,
				uint32_t val, bool trace);
	void (*mmio_writeq)(struct drm_i915_private *dev_priv, off_t offset,
				uint64_t val, bool trace);
643 644
};

645 646 647 648 649 650
struct intel_uncore {
	spinlock_t lock; /** lock is also taken in irq contexts. */

	struct intel_uncore_funcs funcs;

	unsigned fifo_count;
651
	enum forcewake_domains fw_domains;
652 653 654

	struct intel_uncore_forcewake_domain {
		struct drm_i915_private *i915;
655
		enum forcewake_domain_id id;
656 657
		unsigned wake_count;
		struct timer_list timer;
658 659 660 661 662 663
		u32 reg_set;
		u32 val_set;
		u32 val_clear;
		u32 reg_ack;
		u32 reg_post;
		u32 val_reset;
664 665 666 667 668 669 670 671 672 673 674 675
	} fw_domain[FW_DOMAIN_ID_COUNT];
};

/* Iterate over initialised fw domains */
#define for_each_fw_domain_mask(domain__, mask__, dev_priv__, i__) \
	for ((i__) = 0, (domain__) = &(dev_priv__)->uncore.fw_domain[0]; \
	     (i__) < FW_DOMAIN_ID_COUNT; \
	     (i__)++, (domain__) = &(dev_priv__)->uncore.fw_domain[i__]) \
		if (((mask__) & (dev_priv__)->uncore.fw_domains) & (1 << (i__)))

#define for_each_fw_domain(domain__, dev_priv__, i__) \
	for_each_fw_domain_mask(domain__, FORCEWAKE_ALL, dev_priv__, i__)
676

677 678 679 680 681 682 683 684 685 686 687 688 689 690
#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
	func(is_mobile) sep \
	func(is_i85x) sep \
	func(is_i915g) sep \
	func(is_i945gm) sep \
	func(is_g33) sep \
	func(need_gfx_hws) sep \
	func(is_g4x) sep \
	func(is_pineview) sep \
	func(is_broadwater) sep \
	func(is_crestline) sep \
	func(is_ivybridge) sep \
	func(is_valleyview) sep \
	func(is_haswell) sep \
691
	func(is_skylake) sep \
692
	func(is_preliminary) sep \
693 694 695 696 697 698 699
	func(has_fbc) sep \
	func(has_pipe_cxsr) sep \
	func(has_hotplug) sep \
	func(cursor_needs_physical) sep \
	func(has_overlay) sep \
	func(overlay_needs_physical) sep \
	func(supports_tv) sep \
700
	func(has_llc) sep \
701 702
	func(has_ddi) sep \
	func(has_fpga_dbg)
D
Daniel Vetter 已提交
703

704 705
#define DEFINE_FLAG(name) u8 name:1
#define SEP_SEMICOLON ;
D
Daniel Vetter 已提交
706

707
struct intel_device_info {
708
	u32 display_mmio_offset;
709
	u16 device_id;
710
	u8 num_pipes:3;
711
	u8 num_sprites[I915_MAX_PIPES];
712
	u8 gen;
713
	u8 ring_mask; /* Rings supported by the HW */
714
	DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
715 716 717 718
	/* Register offsets for the various display pipes and transcoders */
	int pipe_offsets[I915_MAX_TRANSCODERS];
	int trans_offsets[I915_MAX_TRANSCODERS];
	int palette_offsets[I915_MAX_PIPES];
719
	int cursor_offsets[I915_MAX_PIPES];
720 721 722 723 724 725 726

	/* Slice/subslice/EU info */
	u8 slice_total;
	u8 subslice_total;
	u8 subslice_per_slice;
	u8 eu_total;
	u8 eu_per_subslice;
727 728
	/* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
	u8 subslice_7eu[3];
729 730 731
	u8 has_slice_pg:1;
	u8 has_subslice_pg:1;
	u8 has_eu_pg:1;
732 733
};

734 735 736
#undef DEFINE_FLAG
#undef SEP_SEMICOLON

737 738
enum i915_cache_level {
	I915_CACHE_NONE = 0,
739 740 741 742 743
	I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
	I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc
			      caches, eg sampler/render caches, and the
			      large Last-Level-Cache. LLC is coherent with
			      the CPU, but L3 is only visible to the GPU. */
744
	I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
745 746
};

747 748 749 750 751 752
struct i915_ctx_hang_stats {
	/* This context had batch pending when hang was declared */
	unsigned batch_pending;

	/* This context had batch active when hang was declared */
	unsigned batch_active;
753 754 755 756

	/* Time when this context was last blamed for a GPU reset */
	unsigned long guilty_ts;

757 758 759 760 761
	/* If the contexts causes a second GPU hang within this time,
	 * it is permanently banned from submitting any more work.
	 */
	unsigned long ban_period_seconds;

762 763
	/* This context is banned to submit more work */
	bool banned;
764
};
765 766

/* This must match up with the value previously used for execbuf2.rsvd1. */
767
#define DEFAULT_CONTEXT_HANDLE 0
768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784
/**
 * struct intel_context - as the name implies, represents a context.
 * @ref: reference count.
 * @user_handle: userspace tracking identity for this context.
 * @remap_slice: l3 row remapping information.
 * @file_priv: filp associated with this context (NULL for global default
 *	       context).
 * @hang_stats: information about the role of this context in possible GPU
 *		hangs.
 * @vm: virtual memory space used by this context.
 * @legacy_hw_ctx: render context backing object and whether it is correctly
 *                initialized (legacy ring submission mechanism only).
 * @link: link in the global list of contexts.
 *
 * Contexts are memory images used by the hardware to store copies of their
 * internal state.
 */
785
struct intel_context {
786
	struct kref ref;
787
	int user_handle;
788
	uint8_t remap_slice;
789
	struct drm_i915_file_private *file_priv;
790
	struct i915_ctx_hang_stats hang_stats;
791
	struct i915_hw_ppgtt *ppgtt;
792

793
	/* Legacy ring buffer submission */
794 795 796 797 798
	struct {
		struct drm_i915_gem_object *rcs_state;
		bool initialized;
	} legacy_hw_ctx;

799
	/* Execlists */
800
	bool rcs_initialized;
801 802
	struct {
		struct drm_i915_gem_object *state;
803
		struct intel_ringbuffer *ringbuf;
804
		int pin_count;
805 806
	} engine[I915_NUM_RINGS];

807
	struct list_head link;
808 809
};

810 811 812 813 814 815 816
enum fb_op_origin {
	ORIGIN_GTT,
	ORIGIN_CPU,
	ORIGIN_CS,
	ORIGIN_FLIP,
};

817
struct i915_fbc {
818
	unsigned long uncompressed_size;
B
Ben Widawsky 已提交
819
	unsigned threshold;
820
	unsigned int fb_id;
821 822
	unsigned int possible_framebuffer_bits;
	unsigned int busy_bits;
823
	struct intel_crtc *crtc;
824 825
	int y;

826
	struct drm_mm_node compressed_fb;
827 828
	struct drm_mm_node *compressed_llb;

829 830
	bool false_color;

831 832 833 834
	/* Tracks whether the HW is actually enabled, not whether the feature is
	 * possible. */
	bool enabled;

835 836 837 838 839 840
	struct intel_fbc_work {
		struct delayed_work work;
		struct drm_crtc *crtc;
		struct drm_framebuffer *fb;
	} *fbc_work;

841 842 843
	enum no_fbc_reason {
		FBC_OK, /* FBC is enabled */
		FBC_UNSUPPORTED, /* FBC is not supported by this chipset */
844 845 846 847 848 849 850 851 852 853
		FBC_NO_OUTPUT, /* no outputs enabled to compress */
		FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */
		FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
		FBC_MODE_TOO_LARGE, /* mode too large for compression */
		FBC_BAD_PLANE, /* fbc not supported on plane */
		FBC_NOT_TILED, /* buffer not tiled */
		FBC_MULTIPLE_PIPES, /* more than one pipe active */
		FBC_MODULE_PARAM,
		FBC_CHIP_DEFAULT, /* disabled by default on this chip */
	} no_fbc_reason;
854 855
};

856 857 858 859 860 861 862 863 864 865 866 867 868 869 870
/**
 * HIGH_RR is the highest eDP panel refresh rate read from EDID
 * LOW_RR is the lowest eDP panel refresh rate found from EDID
 * parsing for same resolution.
 */
enum drrs_refresh_rate_type {
	DRRS_HIGH_RR,
	DRRS_LOW_RR,
	DRRS_MAX_RR, /* RR count */
};

enum drrs_support_type {
	DRRS_NOT_SUPPORTED = 0,
	STATIC_DRRS_SUPPORT = 1,
	SEAMLESS_DRRS_SUPPORT = 2
871 872
};

873
struct intel_dp;
874 875 876 877 878 879 880 881 882
struct i915_drrs {
	struct mutex mutex;
	struct delayed_work work;
	struct intel_dp *dp;
	unsigned busy_frontbuffer_bits;
	enum drrs_refresh_rate_type refresh_rate_type;
	enum drrs_support_type type;
};

R
Rodrigo Vivi 已提交
883
struct i915_psr {
884
	struct mutex lock;
R
Rodrigo Vivi 已提交
885 886
	bool sink_support;
	bool source_ok;
887
	struct intel_dp *enabled;
888 889
	bool active;
	struct delayed_work work;
890
	unsigned busy_frontbuffer_bits;
891 892
	bool psr2_support;
	bool aux_frame_sync;
893
};
894

895
enum intel_pch {
896
	PCH_NONE = 0,	/* No PCH present */
897 898
	PCH_IBX,	/* Ibexpeak PCH */
	PCH_CPT,	/* Cougarpoint PCH */
899
	PCH_LPT,	/* Lynxpoint PCH */
900
	PCH_SPT,        /* Sunrisepoint PCH */
B
Ben Widawsky 已提交
901
	PCH_NOP,
902 903
};

904 905 906 907 908
enum intel_sbi_destination {
	SBI_ICLK,
	SBI_MPHY,
};

909
#define QUIRK_PIPEA_FORCE (1<<0)
910
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
911
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
912
#define QUIRK_BACKLIGHT_PRESENT (1<<3)
913
#define QUIRK_PIPEB_FORCE (1<<4)
914
#define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
915

916
struct intel_fbdev;
917
struct intel_fbc_work;
918

919 920
struct intel_gmbus {
	struct i2c_adapter adapter;
921
	u32 force_bit;
922
	u32 reg0;
923
	u32 gpio_reg;
924
	struct i2c_algo_bit_data bit_algo;
925 926 927
	struct drm_i915_private *dev_priv;
};

928
struct i915_suspend_saved_registers {
929
	u32 saveDSPARB;
J
Jesse Barnes 已提交
930
	u32 saveLVDS;
931 932
	u32 savePP_ON_DELAYS;
	u32 savePP_OFF_DELAYS;
J
Jesse Barnes 已提交
933 934 935
	u32 savePP_ON;
	u32 savePP_OFF;
	u32 savePP_CONTROL;
936
	u32 savePP_DIVISOR;
J
Jesse Barnes 已提交
937
	u32 saveFBC_CONTROL;
938 939
	u32 saveCACHE_MODE_0;
	u32 saveMI_ARB_STATE;
J
Jesse Barnes 已提交
940 941 942
	u32 saveSWF0[16];
	u32 saveSWF1[16];
	u32 saveSWF2[3];
943
	uint64_t saveFENCE[I915_MAX_NUM_FENCES];
944
	u32 savePCH_PORT_HOTPLUG;
945
	u16 saveGCDGMBUS;
946
};
947

948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008
struct vlv_s0ix_state {
	/* GAM */
	u32 wr_watermark;
	u32 gfx_prio_ctrl;
	u32 arb_mode;
	u32 gfx_pend_tlb0;
	u32 gfx_pend_tlb1;
	u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
	u32 media_max_req_count;
	u32 gfx_max_req_count;
	u32 render_hwsp;
	u32 ecochk;
	u32 bsd_hwsp;
	u32 blt_hwsp;
	u32 tlb_rd_addr;

	/* MBC */
	u32 g3dctl;
	u32 gsckgctl;
	u32 mbctl;

	/* GCP */
	u32 ucgctl1;
	u32 ucgctl3;
	u32 rcgctl1;
	u32 rcgctl2;
	u32 rstctl;
	u32 misccpctl;

	/* GPM */
	u32 gfxpause;
	u32 rpdeuhwtc;
	u32 rpdeuc;
	u32 ecobus;
	u32 pwrdwnupctl;
	u32 rp_down_timeout;
	u32 rp_deucsw;
	u32 rcubmabdtmr;
	u32 rcedata;
	u32 spare2gh;

	/* Display 1 CZ domain */
	u32 gt_imr;
	u32 gt_ier;
	u32 pm_imr;
	u32 pm_ier;
	u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];

	/* GT SA CZ domain */
	u32 tilectl;
	u32 gt_fifoctl;
	u32 gtlc_wake_ctrl;
	u32 gtlc_survive;
	u32 pmwgicz;

	/* Display 2 CZ domain */
	u32 gu_ctl0;
	u32 gu_ctl1;
	u32 clock_gate_dis2;
};

1009 1010 1011 1012
struct intel_rps_ei {
	u32 cz_clock;
	u32 render_c0;
	u32 media_c0;
1013 1014
};

1015
struct intel_gen6_power_mgmt {
I
Imre Deak 已提交
1016 1017 1018 1019
	/*
	 * work, interrupts_enabled and pm_iir are protected by
	 * dev_priv->irq_lock
	 */
1020
	struct work_struct work;
I
Imre Deak 已提交
1021
	bool interrupts_enabled;
1022
	u32 pm_iir;
1023

1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038
	/* Frequencies are stored in potentially platform dependent multiples.
	 * In other words, *_freq needs to be multiplied by X to be interesting.
	 * Soft limits are those which are used for the dynamic reclocking done
	 * by the driver (raise frequencies under heavy loads, and lower for
	 * lighter loads). Hard limits are those imposed by the hardware.
	 *
	 * A distinction is made for overclocking, which is never enabled by
	 * default, and is considered to be above the hard limit if it's
	 * possible at all.
	 */
	u8 cur_freq;		/* Current frequency (cached, may not == HW) */
	u8 min_freq_softlimit;	/* Minimum frequency permitted by the driver */
	u8 max_freq_softlimit;	/* Max frequency permitted by the driver */
	u8 max_freq;		/* Maximum frequency, RP0 if not overclocking */
	u8 min_freq;		/* AKA RPn. Minimum frequency */
1039
	u8 idle_freq;		/* Frequency to request when we are idle */
1040 1041 1042
	u8 efficient_freq;	/* AKA RPe. Pre-determined balanced frequency */
	u8 rp1_freq;		/* "less than" RP0 power/freqency */
	u8 rp0_freq;		/* Non-overclocked max frequency. */
1043
	u32 cz_freq;
1044

1045 1046 1047
	u8 up_threshold; /* Current %busy required to uplock */
	u8 down_threshold; /* Current %busy required to downclock */

1048 1049 1050
	int last_adj;
	enum { LOW_POWER, BETWEEN, HIGH_POWER } power;

1051
	bool enabled;
1052
	struct delayed_work delayed_resume_work;
1053 1054
	struct list_head clients;
	unsigned boosts;
1055

1056 1057 1058
	/* manual wa residency calculations */
	struct intel_rps_ei up_ei, down_ei;

1059 1060 1061 1062 1063
	/*
	 * Protects RPS/RC6 register access and PCU communication.
	 * Must be taken after struct_mutex if nested.
	 */
	struct mutex hw_lock;
1064 1065
};

D
Daniel Vetter 已提交
1066 1067 1068
/* defined intel_pm.c */
extern spinlock_t mchdev_lock;

1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079
struct intel_ilk_power_mgmt {
	u8 cur_delay;
	u8 min_delay;
	u8 max_delay;
	u8 fmax;
	u8 fstart;

	u64 last_count1;
	unsigned long last_time1;
	unsigned long chipset_power;
	u64 last_count2;
1080
	u64 last_time2;
1081 1082 1083 1084 1085 1086 1087
	unsigned long gfx_power;
	u8 corr;

	int c_m;
	int r_t;
};

1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117
struct drm_i915_private;
struct i915_power_well;

struct i915_power_well_ops {
	/*
	 * Synchronize the well's hw state to match the current sw state, for
	 * example enable/disable it based on the current refcount. Called
	 * during driver init and resume time, possibly after first calling
	 * the enable/disable handlers.
	 */
	void (*sync_hw)(struct drm_i915_private *dev_priv,
			struct i915_power_well *power_well);
	/*
	 * Enable the well and resources that depend on it (for example
	 * interrupts located on the well). Called after the 0->1 refcount
	 * transition.
	 */
	void (*enable)(struct drm_i915_private *dev_priv,
		       struct i915_power_well *power_well);
	/*
	 * Disable the well and resources that depend on it. Called after
	 * the 1->0 refcount transition.
	 */
	void (*disable)(struct drm_i915_private *dev_priv,
			struct i915_power_well *power_well);
	/* Returns the hw enabled state. */
	bool (*is_enabled)(struct drm_i915_private *dev_priv,
			   struct i915_power_well *power_well);
};

1118 1119
/* Power well structure for haswell */
struct i915_power_well {
1120
	const char *name;
1121
	bool always_on;
1122 1123
	/* power well enable/disable usage count */
	int count;
1124 1125
	/* cached hw enabled state */
	bool hw_enabled;
1126
	unsigned long domains;
1127
	unsigned long data;
1128
	const struct i915_power_well_ops *ops;
1129 1130
};

1131
struct i915_power_domains {
1132 1133 1134 1135 1136
	/*
	 * Power wells needed for initialization at driver init and suspend
	 * time are on. They are kept on until after the first modeset.
	 */
	bool init_power_on;
1137
	bool initializing;
1138
	int power_well_count;
1139

1140
	struct mutex lock;
1141
	int domain_use_count[POWER_DOMAIN_NUM];
1142
	struct i915_power_well *power_wells;
1143 1144
};

1145
#define MAX_L3_SLICES 2
1146
struct intel_l3_parity {
1147
	u32 *remap_info[MAX_L3_SLICES];
1148
	struct work_struct error_work;
1149
	int which_slice;
1150 1151
};

1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170
struct i915_gem_mm {
	/** Memory allocator for GTT stolen memory */
	struct drm_mm stolen;
	/** List of all objects in gtt_space. Used to restore gtt
	 * mappings on resume */
	struct list_head bound_list;
	/**
	 * List of objects which are not bound to the GTT (thus
	 * are idle and not used by the GPU) but still have
	 * (presumably uncached) pages still attached.
	 */
	struct list_head unbound_list;

	/** Usable portion of the GTT for GEM */
	unsigned long stolen_base; /* limited to low memory (32-bit) */

	/** PPGTT used for aliasing the PPGTT with the GTT */
	struct i915_hw_ppgtt *aliasing_ppgtt;

1171
	struct notifier_block oom_notifier;
1172
	struct shrinker shrinker;
1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186
	bool shrinker_no_lock_stealing;

	/** LRU list of objects with fence regs on them. */
	struct list_head fence_list;

	/**
	 * We leave the user IRQ off as much as possible,
	 * but this means that requests will finish and never
	 * be retired once the system goes idle. Set a timer to
	 * fire periodically while the ring is running. When it
	 * fires, go retire requests.
	 */
	struct delayed_work retire_work;

1187 1188 1189 1190 1191 1192 1193 1194 1195
	/**
	 * When we detect an idle GPU, we want to turn on
	 * powersaving features. So once we see that there
	 * are no more requests outstanding and no more
	 * arrive within a small period of time, we fire
	 * off the idle_work.
	 */
	struct delayed_work idle_work;

1196 1197 1198 1199 1200 1201
	/**
	 * Are we in a non-interruptible section of code like
	 * modesetting?
	 */
	bool interruptible;

1202 1203 1204 1205 1206 1207 1208 1209
	/**
	 * Is the GPU currently considered idle, or busy executing userspace
	 * requests?  Whilst idle, we attempt to power down the hardware and
	 * display clocks. In order to reduce the effect on performance, there
	 * is a slight delay before we do so.
	 */
	bool busy;

1210 1211 1212
	/* the indicator for dispatch video commands on two BSD rings */
	int bsd_ring_dispatch_index;

1213 1214 1215 1216 1217 1218
	/** Bit 6 swizzling required for X tiling */
	uint32_t bit_6_swizzle_x;
	/** Bit 6 swizzling required for Y tiling */
	uint32_t bit_6_swizzle_y;

	/* accounting, useful for userland debugging */
1219
	spinlock_t object_stat_lock;
1220 1221 1222 1223
	size_t object_memory;
	u32 object_count;
};

1224
struct drm_i915_error_state_buf {
1225
	struct drm_i915_private *i915;
1226 1227 1228 1229 1230 1231 1232 1233
	unsigned bytes;
	unsigned size;
	int err;
	u8 *buf;
	loff_t start;
	loff_t pos;
};

1234 1235 1236 1237 1238
struct i915_error_state_file_priv {
	struct drm_device *dev;
	struct drm_i915_error_state *error;
};

1239 1240 1241 1242
struct i915_gpu_error {
	/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
1243 1244 1245
	/* Hang gpu twice in this window and your context gets banned */
#define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)

1246 1247
	struct workqueue_struct *hangcheck_wq;
	struct delayed_work hangcheck_work;
1248 1249 1250 1251 1252

	/* For reset and error_state handling. */
	spinlock_t lock;
	/* Protected by the above dev->gpu_error.lock. */
	struct drm_i915_error_state *first_error;
1253 1254 1255

	unsigned long missed_irq_rings;

1256
	/**
M
Mika Kuoppala 已提交
1257
	 * State variable controlling the reset flow and count
1258
	 *
M
Mika Kuoppala 已提交
1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271
	 * This is a counter which gets incremented when reset is triggered,
	 * and again when reset has been handled. So odd values (lowest bit set)
	 * means that reset is in progress and even values that
	 * (reset_counter >> 1):th reset was successfully completed.
	 *
	 * If reset is not completed succesfully, the I915_WEDGE bit is
	 * set meaning that hardware is terminally sour and there is no
	 * recovery. All waiters on the reset_queue will be woken when
	 * that happens.
	 *
	 * This counter is used by the wait_seqno code to notice that reset
	 * event happened and it needs to restart the entire ioctl (since most
	 * likely the seqno it waited for won't ever signal anytime soon).
1272 1273 1274 1275
	 *
	 * This is important for lock-free wait paths, where no contended lock
	 * naturally enforces the correct ordering between the bail-out of the
	 * waiter and the gpu reset work code.
1276 1277 1278 1279
	 */
	atomic_t reset_counter;

#define I915_RESET_IN_PROGRESS_FLAG	1
M
Mika Kuoppala 已提交
1280
#define I915_WEDGED			(1 << 31)
1281 1282 1283 1284 1285 1286

	/**
	 * Waitqueue to signal when the reset has completed. Used by clients
	 * that wait for dev_priv->mm.wedged to settle.
	 */
	wait_queue_head_t reset_queue;
1287

1288 1289 1290 1291 1292 1293
	/* Userspace knobs for gpu hang simulation;
	 * combines both a ring mask, and extra flags
	 */
	u32 stop_rings;
#define I915_STOP_RING_ALLOW_BAN       (1 << 31)
#define I915_STOP_RING_ALLOW_WARN      (1 << 30)
1294 1295 1296

	/* For missed irq/seqno simulation. */
	unsigned int test_irq_rings;
1297 1298 1299

	/* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset   */
	bool reload_in_reset;
1300 1301
};

1302 1303 1304 1305 1306 1307
enum modeset_restore {
	MODESET_ON_LID_OPEN,
	MODESET_DONE,
	MODESET_SUSPENDED,
};

1308
struct ddi_vbt_port_info {
1309 1310 1311 1312 1313 1314
	/*
	 * This is an index in the HDMI/DVI DDI buffer translation table.
	 * The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't
	 * populate this field.
	 */
#define HDMI_LEVEL_SHIFT_UNKNOWN	0xff
1315
	uint8_t hdmi_level_shift;
1316 1317 1318 1319

	uint8_t supports_dvi:1;
	uint8_t supports_hdmi:1;
	uint8_t supports_dp:1;
1320 1321
};

R
Rodrigo Vivi 已提交
1322 1323 1324 1325 1326
enum psr_lines_to_wait {
	PSR_0_LINES_TO_WAIT = 0,
	PSR_1_LINE_TO_WAIT,
	PSR_4_LINES_TO_WAIT,
	PSR_8_LINES_TO_WAIT
1327 1328
};

1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340
struct intel_vbt_data {
	struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
	struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */

	/* Feature bits */
	unsigned int int_tv_support:1;
	unsigned int lvds_dither:1;
	unsigned int lvds_vbt:1;
	unsigned int int_crt_support:1;
	unsigned int lvds_use_ssc:1;
	unsigned int display_clock_mode:1;
	unsigned int fdi_rx_polarity_inverted:1;
1341
	unsigned int has_mipi:1;
1342 1343 1344
	int lvds_ssc_freq;
	unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */

1345 1346
	enum drrs_support_type drrs_type;

1347 1348 1349 1350 1351 1352 1353 1354
	/* eDP */
	int edp_rate;
	int edp_lanes;
	int edp_preemphasis;
	int edp_vswing;
	bool edp_initialized;
	bool edp_support;
	int edp_bpp;
1355
	bool edp_low_vswing;
1356 1357
	struct edp_power_seq edp_pps;

R
Rodrigo Vivi 已提交
1358 1359 1360 1361 1362 1363 1364 1365 1366
	struct {
		bool full_link;
		bool require_aux_wakeup;
		int idle_frames;
		enum psr_lines_to_wait lines_to_wait;
		int tp1_wakeup_time;
		int tp2_tp3_wakeup_time;
	} psr;

1367 1368
	struct {
		u16 pwm_freq_hz;
1369
		bool present;
1370
		bool active_low_pwm;
1371
		u8 min_brightness;	/* min_brightness/255 of max */
1372 1373
	} backlight;

1374 1375
	/* MIPI DSI */
	struct {
1376
		u16 port;
1377
		u16 panel_id;
1378 1379 1380 1381 1382 1383
		struct mipi_config *config;
		struct mipi_pps_data *pps;
		u8 seq_version;
		u32 size;
		u8 *data;
		u8 *sequence[MIPI_SEQ_MAX];
1384 1385
	} dsi;

1386 1387 1388
	int crt_ddc_pin;

	int child_dev_num;
1389
	union child_device_config *child_dev;
1390 1391

	struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
1392 1393
};

1394 1395 1396 1397 1398
enum intel_ddb_partitioning {
	INTEL_DDB_PART_1_2,
	INTEL_DDB_PART_5_6, /* IVB+ */
};

1399 1400 1401 1402 1403 1404 1405 1406
struct intel_wm_level {
	bool enable;
	uint32_t pri_val;
	uint32_t spr_val;
	uint32_t cur_val;
	uint32_t fbc_val;
};

1407
struct ilk_wm_values {
1408 1409 1410 1411 1412 1413 1414 1415
	uint32_t wm_pipe[3];
	uint32_t wm_lp[3];
	uint32_t wm_lp_spr[3];
	uint32_t wm_linetime[3];
	bool enable_fbc_wm;
	enum intel_ddb_partitioning partitioning;
};

1416
struct vlv_wm_values {
1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427
	struct {
		uint16_t primary;
		uint16_t sprite[2];
		uint8_t cursor;
	} pipe[3];

	struct {
		uint16_t plane;
		uint8_t cursor;
	} sr;

1428 1429 1430 1431 1432 1433 1434
	struct {
		uint8_t cursor;
		uint8_t sprite[2];
		uint8_t primary;
	} ddl[3];
};

1435
struct skl_ddb_entry {
1436
	uint16_t start, end;	/* in number of blocks, 'end' is exclusive */
1437 1438 1439 1440
};

static inline uint16_t skl_ddb_entry_size(const struct skl_ddb_entry *entry)
{
1441
	return entry->end - entry->start;
1442 1443
}

1444 1445 1446 1447 1448 1449 1450 1451 1452
static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
				       const struct skl_ddb_entry *e2)
{
	if (e1->start == e2->start && e1->end == e2->end)
		return true;

	return false;
}

1453
struct skl_ddb_allocation {
1454
	struct skl_ddb_entry pipe[I915_MAX_PIPES];
1455 1456 1457 1458
	struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES];
	struct skl_ddb_entry cursor[I915_MAX_PIPES];
};

1459 1460
struct skl_wm_values {
	bool dirty[I915_MAX_PIPES];
1461
	struct skl_ddb_allocation ddb;
1462 1463 1464 1465 1466 1467 1468 1469 1470
	uint32_t wm_linetime[I915_MAX_PIPES];
	uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8];
	uint32_t cursor[I915_MAX_PIPES][8];
	uint32_t plane_trans[I915_MAX_PIPES][I915_MAX_PLANES];
	uint32_t cursor_trans[I915_MAX_PIPES];
};

struct skl_wm_level {
	bool plane_en[I915_MAX_PLANES];
1471
	bool cursor_en;
1472 1473 1474 1475 1476 1477
	uint16_t plane_res_b[I915_MAX_PLANES];
	uint8_t plane_res_l[I915_MAX_PLANES];
	uint16_t cursor_res_b;
	uint8_t cursor_res_l;
};

1478
/*
1479 1480 1481 1482
 * This struct helps tracking the state needed for runtime PM, which puts the
 * device in PCI D3 state. Notice that when this happens, nothing on the
 * graphics device works, even register access, so we don't get interrupts nor
 * anything else.
1483
 *
1484 1485 1486
 * Every piece of our code that needs to actually touch the hardware needs to
 * either call intel_runtime_pm_get or call intel_display_power_get with the
 * appropriate power domain.
1487
 *
1488 1489
 * Our driver uses the autosuspend delay feature, which means we'll only really
 * suspend if we stay with zero refcount for a certain amount of time. The
1490
 * default value is currently very conservative (see intel_runtime_pm_enable), but
1491
 * it can be changed with the standard runtime PM files from sysfs.
1492 1493 1494 1495 1496
 *
 * The irqs_disabled variable becomes true exactly after we disable the IRQs and
 * goes back to false exactly before we reenable the IRQs. We use this variable
 * to check if someone is trying to enable/disable IRQs while they're supposed
 * to be disabled. This shouldn't happen and we'll print some error messages in
1497
 * case it happens.
1498
 *
1499
 * For more, read the Documentation/power/runtime_pm.txt.
1500
 */
1501 1502
struct i915_runtime_pm {
	bool suspended;
1503
	bool irqs_enabled;
1504 1505
};

1506 1507 1508 1509 1510
enum intel_pipe_crc_source {
	INTEL_PIPE_CRC_SOURCE_NONE,
	INTEL_PIPE_CRC_SOURCE_PLANE1,
	INTEL_PIPE_CRC_SOURCE_PLANE2,
	INTEL_PIPE_CRC_SOURCE_PF,
1511
	INTEL_PIPE_CRC_SOURCE_PIPE,
D
Daniel Vetter 已提交
1512 1513 1514 1515 1516
	/* TV/DP on pre-gen5/vlv can't use the pipe source. */
	INTEL_PIPE_CRC_SOURCE_TV,
	INTEL_PIPE_CRC_SOURCE_DP_B,
	INTEL_PIPE_CRC_SOURCE_DP_C,
	INTEL_PIPE_CRC_SOURCE_DP_D,
1517
	INTEL_PIPE_CRC_SOURCE_AUTO,
1518 1519 1520
	INTEL_PIPE_CRC_SOURCE_MAX,
};

1521
struct intel_pipe_crc_entry {
1522
	uint32_t frame;
1523 1524 1525
	uint32_t crc[5];
};

1526
#define INTEL_PIPE_CRC_ENTRIES_NR	128
1527
struct intel_pipe_crc {
1528 1529
	spinlock_t lock;
	bool opened;		/* exclusive access to the result file */
1530
	struct intel_pipe_crc_entry *entries;
1531
	enum intel_pipe_crc_source source;
1532
	int head, tail;
1533
	wait_queue_head_t wq;
1534 1535
};

1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546
struct i915_frontbuffer_tracking {
	struct mutex lock;

	/*
	 * Tracking bits for delayed frontbuffer flushing du to gpu activity or
	 * scheduled flips.
	 */
	unsigned busy_bits;
	unsigned flip_bits;
};

1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560
struct i915_wa_reg {
	u32 addr;
	u32 value;
	/* bitmask representing WA bits */
	u32 mask;
};

#define I915_MAX_WA_REGS 16

struct i915_workarounds {
	struct i915_wa_reg reg[I915_MAX_WA_REGS];
	u32 count;
};

1561 1562 1563 1564
struct i915_virtual_gpu {
	bool active;
};

1565
struct drm_i915_private {
1566
	struct drm_device *dev;
1567
	struct kmem_cache *objects;
1568
	struct kmem_cache *vmas;
1569
	struct kmem_cache *requests;
1570

1571
	const struct intel_device_info info;
1572 1573 1574 1575 1576

	int relative_constants_mode;

	void __iomem *regs;

1577
	struct intel_uncore uncore;
1578

1579 1580
	struct i915_virtual_gpu vgpu;

1581
	struct intel_gmbus gmbus[GMBUS_NUM_PINS];
1582

1583 1584 1585 1586 1587 1588 1589 1590 1591
	/** gmbus_mutex protects against concurrent usage of the single hw gmbus
	 * controller on different i2c buses. */
	struct mutex gmbus_mutex;

	/**
	 * Base address of the gmbus and gpio block.
	 */
	uint32_t gpio_mmio_base;

1592 1593 1594
	/* MMIO base address for MIPI regs */
	uint32_t mipi_mmio_base;

1595 1596
	wait_queue_head_t gmbus_wait_queue;

1597
	struct pci_dev *bridge_dev;
1598
	struct intel_engine_cs ring[I915_NUM_RINGS];
1599
	struct drm_i915_gem_object *semaphore_obj;
1600
	uint32_t last_seqno, next_seqno;
1601

1602
	struct drm_dma_handle *status_page_dmah;
1603 1604 1605 1606 1607
	struct resource mch_res;

	/* protects the irq masks */
	spinlock_t irq_lock;

1608 1609 1610
	/* protects the mmio flip data */
	spinlock_t mmio_flip_lock;

1611 1612
	bool display_irqs_enabled;

1613 1614 1615
	/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
	struct pm_qos_request pm_qos;

1616
	/* DPIO indirect register protection */
1617
	struct mutex dpio_lock;
1618 1619

	/** Cached value of IMR to avoid reads in updating the bitfield */
1620 1621 1622 1623
	union {
		u32 irq_mask;
		u32 de_irq_mask[I915_MAX_PIPES];
	};
1624
	u32 gt_irq_mask;
1625
	u32 pm_irq_mask;
1626
	u32 pm_rps_events;
1627
	u32 pipestat_irq_mask[I915_MAX_PIPES];
1628 1629

	struct work_struct hotplug_work;
1630 1631 1632 1633 1634 1635 1636 1637 1638
	struct {
		unsigned long hpd_last_jiffies;
		int hpd_cnt;
		enum {
			HPD_ENABLED = 0,
			HPD_DISABLED = 1,
			HPD_MARK_DISABLED = 2
		} hpd_mark;
	} hpd_stats[HPD_NUM_PINS];
1639
	u32 hpd_event_bits;
1640
	struct delayed_work hotplug_reenable_work;
1641

1642
	struct i915_fbc fbc;
1643
	struct i915_drrs drrs;
1644
	struct intel_opregion opregion;
1645
	struct intel_vbt_data vbt;
1646

1647 1648
	bool preserve_bios_swizzle;

1649 1650 1651
	/* overlay */
	struct intel_overlay *overlay;

1652
	/* backlight registers and fields in struct intel_panel */
1653
	struct mutex backlight_lock;
1654

1655 1656 1657
	/* LVDS info */
	bool no_aux_handshake;

V
Ville Syrjälä 已提交
1658 1659 1660
	/* protects panel power sequencer state */
	struct mutex pps_mutex;

1661 1662 1663 1664 1665
	struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
	int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
	int num_fence_regs; /* 8 on pre-965, 16 otherwise */

	unsigned int fsb_freq, mem_freq, is_ddr3;
1666
	unsigned int cdclk_freq;
1667
	unsigned int hpll_freq;
1668

1669 1670 1671 1672 1673 1674 1675
	/**
	 * wq - Driver workqueue for GEM.
	 *
	 * NOTE: Work items scheduled here are not allowed to grab any modeset
	 * locks, for otherwise the flushing done in the pageflip code will
	 * result in deadlocks.
	 */
1676 1677 1678 1679 1680 1681 1682
	struct workqueue_struct *wq;

	/* Display functions */
	struct drm_i915_display_funcs display;

	/* PCH chipset type */
	enum intel_pch pch_type;
1683
	unsigned short pch_id;
1684 1685 1686

	unsigned long quirks;

1687 1688
	enum modeset_restore modeset_restore;
	struct mutex modeset_restore_lock;
1689

1690
	struct list_head vm_list; /* Global list of all address spaces */
1691
	struct i915_gtt gtt; /* VM representing the global address space */
B
Ben Widawsky 已提交
1692

1693
	struct i915_gem_mm mm;
1694 1695
	DECLARE_HASHTABLE(mm_structs, 7);
	struct mutex mm_lock;
1696 1697 1698

	/* Kernel Modesetting */

1699
	struct sdvo_device_mapping sdvo_mappings[2];
1700

1701 1702
	struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
	struct drm_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
1703 1704
	wait_queue_head_t pending_flip_queue;

1705 1706 1707 1708
#ifdef CONFIG_DEBUG_FS
	struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
#endif

D
Daniel Vetter 已提交
1709 1710
	int num_shared_dpll;
	struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
1711
	int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
1712

1713
	struct i915_workarounds workarounds;
1714

1715 1716 1717
	/* Reclocking support */
	bool render_reclock_avail;
	bool lvds_downclock_avail;
1718 1719
	/* indicates the reduced downclock for LVDS*/
	int lvds_downclock;
1720 1721 1722

	struct i915_frontbuffer_tracking fb_tracking;

1723
	u16 orig_clock;
1724

1725
	bool mchbar_need_disable;
1726

1727 1728
	struct intel_l3_parity l3_parity;

B
Ben Widawsky 已提交
1729 1730 1731
	/* Cannot be determined by PCIID. You must always read a register. */
	size_t ellc_size;

1732
	/* gen6+ rps state */
1733
	struct intel_gen6_power_mgmt rps;
1734

1735 1736
	/* ilk-only ips/rps state. Everything in here is protected by the global
	 * mchdev_lock in intel_pm.c */
1737
	struct intel_ilk_power_mgmt ips;
1738

1739
	struct i915_power_domains power_domains;
1740

R
Rodrigo Vivi 已提交
1741
	struct i915_psr psr;
1742

1743
	struct i915_gpu_error gpu_error;
1744

1745 1746
	struct drm_i915_gem_object *vlv_pctx;

1747
#ifdef CONFIG_DRM_I915_FBDEV
1748 1749
	/* list of fbdev register on this device */
	struct intel_fbdev *fbdev;
1750
	struct work_struct fbdev_suspend_work;
1751
#endif
1752 1753

	struct drm_property *broadcast_rgb_property;
1754
	struct drm_property *force_audio_property;
1755

I
Imre Deak 已提交
1756 1757 1758
	/* hda/i915 audio component */
	bool audio_component_registered;

1759
	uint32_t hw_context_size;
1760
	struct list_head context_list;
1761

1762
	u32 fdi_rx_config;
1763

1764
	u32 suspend_count;
1765
	struct i915_suspend_saved_registers regfile;
1766
	struct vlv_s0ix_state vlv_s0ix_state;
1767

1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779
	struct {
		/*
		 * Raw watermark latency values:
		 * in 0.1us units for WM0,
		 * in 0.5us units for WM1+.
		 */
		/* primary */
		uint16_t pri_latency[5];
		/* sprite */
		uint16_t spr_latency[5];
		/* cursor */
		uint16_t cur_latency[5];
1780 1781 1782 1783 1784 1785
		/*
		 * Raw watermark memory latency values
		 * for SKL for all 8 levels
		 * in 1us units.
		 */
		uint16_t skl_latency[8];
1786

1787 1788 1789 1790 1791 1792 1793
		/*
		 * The skl_wm_values structure is a bit too big for stack
		 * allocation, so we keep the staging struct where we store
		 * intermediate results here instead.
		 */
		struct skl_wm_values skl_results;

1794
		/* current hardware state */
1795 1796 1797
		union {
			struct ilk_wm_values hw;
			struct skl_wm_values skl_hw;
1798
			struct vlv_wm_values vlv;
1799
		};
1800 1801
	} wm;

1802 1803
	struct i915_runtime_pm pm;

1804 1805 1806 1807 1808
	struct intel_digital_port *hpd_irq_port[I915_MAX_PORTS];
	u32 long_hpd_port_mask;
	u32 short_hpd_port_mask;
	struct work_struct dig_port_work;

1809 1810 1811 1812 1813 1814 1815 1816 1817
	/*
	 * if we get a HPD irq from DP and a HPD irq from non-DP
	 * the non-DP HPD could block the workqueue on a mode config
	 * mutex getting, that userspace may have taken. However
	 * userspace is waiting on the DP workqueue to run which is
	 * blocked behind the non-DP one.
	 */
	struct workqueue_struct *dp_wq;

1818 1819
	/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
	struct {
1820 1821 1822 1823 1824 1825 1826
		int (*execbuf_submit)(struct drm_device *dev, struct drm_file *file,
				      struct intel_engine_cs *ring,
				      struct intel_context *ctx,
				      struct drm_i915_gem_execbuffer2 *args,
				      struct list_head *vmas,
				      struct drm_i915_gem_object *batch_obj,
				      u64 exec_start, u32 flags);
1827 1828 1829 1830 1831
		int (*init_rings)(struct drm_device *dev);
		void (*cleanup_ring)(struct intel_engine_cs *ring);
		void (*stop_ring)(struct intel_engine_cs *ring);
	} gt;

1832 1833 1834 1835
	/*
	 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
	 * will be rejected. Instead look for a better place.
	 */
1836
};
L
Linus Torvalds 已提交
1837

1838 1839 1840 1841 1842
static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
{
	return dev->dev_private;
}

I
Imre Deak 已提交
1843 1844 1845 1846 1847
static inline struct drm_i915_private *dev_to_i915(struct device *dev)
{
	return to_i915(dev_get_drvdata(dev));
}

1848 1849 1850 1851 1852
/* Iterate over initialised rings */
#define for_each_ring(ring__, dev_priv__, i__) \
	for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
		if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))

1853 1854 1855 1856 1857 1858 1859
enum hdmi_force_audio {
	HDMI_AUDIO_OFF_DVI = -2,	/* no aux data for HDMI-DVI converter */
	HDMI_AUDIO_OFF,			/* force turn off HDMI audio */
	HDMI_AUDIO_AUTO,		/* trust EDID */
	HDMI_AUDIO_ON,			/* force turn on HDMI audio */
};

1860
#define I915_GTT_OFFSET_NONE ((u32)-1)
1861

1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877
struct drm_i915_gem_object_ops {
	/* Interface between the GEM object and its backing storage.
	 * get_pages() is called once prior to the use of the associated set
	 * of pages before to binding them into the GTT, and put_pages() is
	 * called after we no longer need them. As we expect there to be
	 * associated cost with migrating pages between the backing storage
	 * and making them available for the GPU (e.g. clflush), we may hold
	 * onto the pages after they are no longer referenced by the GPU
	 * in case they may be used again shortly (for example migrating the
	 * pages to a different memory domain within the GTT). put_pages()
	 * will therefore most likely be called when the object itself is
	 * being released or under memory pressure (where we attempt to
	 * reap pages for the shrinker).
	 */
	int (*get_pages)(struct drm_i915_gem_object *);
	void (*put_pages)(struct drm_i915_gem_object *);
1878 1879
	int (*dmabuf_export)(struct drm_i915_gem_object *);
	void (*release)(struct drm_i915_gem_object *);
1880 1881
};

1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900
/*
 * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
 * considered to be the frontbuffer for the given plane interface-vise. This
 * doesn't mean that the hw necessarily already scans it out, but that any
 * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
 *
 * We have one bit per pipe and per scanout plane type.
 */
#define INTEL_FRONTBUFFER_BITS_PER_PIPE 4
#define INTEL_FRONTBUFFER_BITS \
	(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES)
#define INTEL_FRONTBUFFER_PRIMARY(pipe) \
	(1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
#define INTEL_FRONTBUFFER_CURSOR(pipe) \
	(1 << (1 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
#define INTEL_FRONTBUFFER_SPRITE(pipe) \
	(1 << (2 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
#define INTEL_FRONTBUFFER_OVERLAY(pipe) \
	(1 << (3 +(INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))))
1901 1902
#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
	(0xf << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
1903

1904
struct drm_i915_gem_object {
1905
	struct drm_gem_object base;
1906

1907 1908
	const struct drm_i915_gem_object_ops *ops;

B
Ben Widawsky 已提交
1909 1910 1911
	/** List of VMAs backed by this object */
	struct list_head vma_list;

1912 1913
	/** Stolen memory for this object, instead of being backed by shmem. */
	struct drm_mm_node *stolen;
1914
	struct list_head global_list;
1915

1916
	struct list_head ring_list;
1917 1918
	/** Used in execbuf to temporarily hold a ref */
	struct list_head obj_exec_link;
1919

1920
	struct list_head batch_pool_link;
1921

1922
	/**
1923 1924 1925
	 * This is set if the object is on the active lists (has pending
	 * rendering and so a non-zero seqno), and is not set if it i s on
	 * inactive (ready to be unbound) list.
1926
	 */
1927
	unsigned int active:1;
1928 1929 1930 1931 1932

	/**
	 * This is set if the object has been written to since last bound
	 * to the GTT
	 */
1933
	unsigned int dirty:1;
1934 1935 1936 1937 1938 1939

	/**
	 * Fence register bits (if any) for this object.  Will be set
	 * as needed when mapped into the GTT.
	 * Protected by dev->struct_mutex.
	 */
1940
	signed int fence_reg:I915_MAX_NUM_FENCE_BITS;
1941 1942 1943 1944

	/**
	 * Advice: are the backing pages purgeable?
	 */
1945
	unsigned int madv:2;
1946 1947 1948 1949

	/**
	 * Current tiling mode for the object.
	 */
1950
	unsigned int tiling_mode:2;
1951 1952 1953 1954 1955 1956 1957 1958
	/**
	 * Whether the tiling parameters for the currently associated fence
	 * register have changed. Note that for the purposes of tracking
	 * tiling changes we also treat the unfenced register, the register
	 * slot that the object occupies whilst it executes a fenced
	 * command (such as BLT on gen2/3), as a "fence".
	 */
	unsigned int fence_dirty:1;
1959

1960 1961 1962 1963
	/**
	 * Is the object at the current location in the gtt mappable and
	 * fenceable? Used to avoid costly recalculations.
	 */
1964
	unsigned int map_and_fenceable:1;
1965

1966 1967 1968 1969 1970
	/**
	 * Whether the current gtt mapping needs to be mappable (and isn't just
	 * mappable by accident). Track pin and fault separate for a more
	 * accurate mappable working set.
	 */
1971
	unsigned int fault_mappable:1;
1972

1973 1974 1975 1976 1977
	/*
	 * Is the object to be mapped as read-only to the GPU
	 * Only honoured if hardware has relevant pte bit
	 */
	unsigned long gt_ro:1;
1978
	unsigned int cache_level:3;
1979
	unsigned int cache_dirty:1;
1980

1981
	unsigned int has_dma_mapping:1;
1982

1983 1984
	unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;

1985 1986
	unsigned int pin_display;

1987
	struct sg_table *pages;
1988
	int pages_pin_count;
1989 1990 1991 1992
	struct get_page {
		struct scatterlist *sg;
		int last;
	} get_page;
1993

1994
	/* prime dma-buf support */
1995 1996 1997
	void *dma_buf_vmapping;
	int vmapping_count;

1998
	/** Breadcrumb of last rendering to the buffer. */
1999 2000
	struct drm_i915_gem_request *last_read_req;
	struct drm_i915_gem_request *last_write_req;
2001
	/** Breadcrumb of last fenced GPU access to the buffer. */
2002
	struct drm_i915_gem_request *last_fenced_req;
2003

2004
	/** Current tiling stride for the object, if it's tiled. */
2005
	uint32_t stride;
2006

2007 2008 2009
	/** References from framebuffers, locks out tiling changes. */
	unsigned long framebuffer_references;

2010
	/** Record of address bit 17 of each page at last unbind. */
2011
	unsigned long *bit_17;
2012

2013
	union {
2014 2015 2016
		/** for phy allocated objects */
		struct drm_dma_handle *phys_handle;

2017 2018 2019 2020 2021 2022
		struct i915_gem_userptr {
			uintptr_t ptr;
			unsigned read_only :1;
			unsigned workers :4;
#define I915_GEM_USERPTR_MAX_WORKERS 15

2023 2024
			struct i915_mm_struct *mm;
			struct i915_mmu_object *mmu_object;
2025 2026 2027 2028
			struct work_struct *work;
		} userptr;
	};
};
2029
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
2030

2031 2032 2033 2034
void i915_gem_track_fb(struct drm_i915_gem_object *old,
		       struct drm_i915_gem_object *new,
		       unsigned frontbuffer_bits);

2035 2036 2037 2038 2039 2040
/**
 * Request queue structure.
 *
 * The request queue allows us to note sequence numbers that have been emitted
 * and may be associated with active buffers to be retired.
 *
2041 2042 2043 2044
 * By keeping this list, we can avoid having to do questionable sequence
 * number comparisons on buffer last_read|write_seqno. It also allows an
 * emission time to be associated with the request for tracking how far ahead
 * of the GPU the submission is.
2045 2046 2047
 *
 * The requests are reference counted, so upon creation they should have an
 * initial reference taken using kref_init
2048 2049
 */
struct drm_i915_gem_request {
2050 2051
	struct kref ref;

2052
	/** On Which ring this request was generated */
2053
	struct drm_i915_private *i915;
2054
	struct intel_engine_cs *ring;
2055

2056 2057 2058
	/** GEM sequence number associated with this request. */
	uint32_t seqno;

2059 2060 2061
	/** Position in the ringbuffer of the start of the request */
	u32 head;

2062 2063 2064 2065 2066 2067 2068 2069
	/**
	 * Position in the ringbuffer of the start of the postfix.
	 * This is required to calculate the maximum available ringbuffer
	 * space without overwriting the postfix.
	 */
	 u32 postfix;

	/** Position in the ringbuffer of the end of the whole request */
2070 2071
	u32 tail;

2072
	/**
D
Dave Airlie 已提交
2073
	 * Context and ring buffer related to this request
2074 2075 2076 2077 2078 2079 2080 2081
	 * Contexts are refcounted, so when this request is associated with a
	 * context, we must increment the context's refcount, to guarantee that
	 * it persists while any request is linked to it. Requests themselves
	 * are also refcounted, so the request will only be freed when the last
	 * reference to it is dismissed, and the code in
	 * i915_gem_request_free() will then decrement the refcount on the
	 * context.
	 */
2082
	struct intel_context *ctx;
2083
	struct intel_ringbuffer *ringbuf;
2084

2085 2086 2087
	/** Batch buffer related to this request if any */
	struct drm_i915_gem_object *batch_obj;

2088 2089 2090
	/** Time at which this request was emitted, in jiffies. */
	unsigned long emitted_jiffies;

2091
	/** global list entry for this request */
2092
	struct list_head list;
2093

2094
	struct drm_i915_file_private *file_priv;
2095 2096
	/** file_priv list entry for this request */
	struct list_head client_list;
2097

2098 2099 2100
	/** process identifier submitting this request */
	struct pid *pid;

2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119
	/**
	 * The ELSP only accepts two elements at a time, so we queue
	 * context/tail pairs on a given queue (ring->execlist_queue) until the
	 * hardware is available. The queue serves a double purpose: we also use
	 * it to keep track of the up to 2 contexts currently in the hardware
	 * (usually one in execution and the other queued up by the GPU): We
	 * only remove elements from the head of the queue when the hardware
	 * informs us that an element has been completed.
	 *
	 * All accesses to the queue are mediated by a spinlock
	 * (ring->execlist_lock).
	 */

	/** Execlist link in the submission queue.*/
	struct list_head execlist_link;

	/** Execlists no. of times this request has been sent to the ELSP */
	int elsp_submitted;

2120 2121
};

2122 2123
int i915_gem_request_alloc(struct intel_engine_cs *ring,
			   struct intel_context *ctx);
2124 2125
void i915_gem_request_free(struct kref *req_ref);

2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137
static inline uint32_t
i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
{
	return req ? req->seqno : 0;
}

static inline struct intel_engine_cs *
i915_gem_request_get_ring(struct drm_i915_gem_request *req)
{
	return req ? req->ring : NULL;
}

2138 2139 2140 2141 2142 2143 2144 2145 2146
static inline void
i915_gem_request_reference(struct drm_i915_gem_request *req)
{
	kref_get(&req->ref);
}

static inline void
i915_gem_request_unreference(struct drm_i915_gem_request *req)
{
2147
	WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
2148 2149 2150
	kref_put(&req->ref, i915_gem_request_free);
}

2151 2152 2153
static inline void
i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req)
{
2154 2155 2156 2157
	struct drm_device *dev;

	if (!req)
		return;
2158

2159 2160
	dev = req->ring->dev;
	if (kref_put_mutex(&req->ref, i915_gem_request_free, &dev->struct_mutex))
2161 2162 2163
		mutex_unlock(&dev->struct_mutex);
}

2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175
static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
					   struct drm_i915_gem_request *src)
{
	if (src)
		i915_gem_request_reference(src);

	if (*pdst)
		i915_gem_request_unreference(*pdst);

	*pdst = src;
}

2176 2177 2178 2179 2180 2181
/*
 * XXX: i915_gem_request_completed should be here but currently needs the
 * definition of i915_seqno_passed() which is below. It will be moved in
 * a later patch when the call to i915_seqno_passed() is obsoleted...
 */

2182
struct drm_i915_file_private {
2183
	struct drm_i915_private *dev_priv;
2184
	struct drm_file *file;
2185

2186
	struct {
2187
		spinlock_t lock;
2188
		struct list_head request_list;
2189
	} mm;
2190
	struct idr context_idr;
2191

2192 2193 2194 2195
	struct list_head rps_boost;
	struct intel_engine_cs *bsd_ring;

	unsigned rps_boosts;
2196 2197
};

2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262
/*
 * A command that requires special handling by the command parser.
 */
struct drm_i915_cmd_descriptor {
	/*
	 * Flags describing how the command parser processes the command.
	 *
	 * CMD_DESC_FIXED: The command has a fixed length if this is set,
	 *                 a length mask if not set
	 * CMD_DESC_SKIP: The command is allowed but does not follow the
	 *                standard length encoding for the opcode range in
	 *                which it falls
	 * CMD_DESC_REJECT: The command is never allowed
	 * CMD_DESC_REGISTER: The command should be checked against the
	 *                    register whitelist for the appropriate ring
	 * CMD_DESC_MASTER: The command is allowed if the submitting process
	 *                  is the DRM master
	 */
	u32 flags;
#define CMD_DESC_FIXED    (1<<0)
#define CMD_DESC_SKIP     (1<<1)
#define CMD_DESC_REJECT   (1<<2)
#define CMD_DESC_REGISTER (1<<3)
#define CMD_DESC_BITMASK  (1<<4)
#define CMD_DESC_MASTER   (1<<5)

	/*
	 * The command's unique identification bits and the bitmask to get them.
	 * This isn't strictly the opcode field as defined in the spec and may
	 * also include type, subtype, and/or subop fields.
	 */
	struct {
		u32 value;
		u32 mask;
	} cmd;

	/*
	 * The command's length. The command is either fixed length (i.e. does
	 * not include a length field) or has a length field mask. The flag
	 * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has
	 * a length mask. All command entries in a command table must include
	 * length information.
	 */
	union {
		u32 fixed;
		u32 mask;
	} length;

	/*
	 * Describes where to find a register address in the command to check
	 * against the ring's register whitelist. Only valid if flags has the
	 * CMD_DESC_REGISTER bit set.
	 */
	struct {
		u32 offset;
		u32 mask;
	} reg;

#define MAX_CMD_DESC_BITMASKS 3
	/*
	 * Describes command checks where a particular dword is masked and
	 * compared against an expected value. If the command does not match
	 * the expected value, the parser rejects it. Only valid if flags has
	 * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero
	 * are valid.
2263 2264 2265 2266
	 *
	 * If the check specifies a non-zero condition_mask then the parser
	 * only performs the check when the bits specified by condition_mask
	 * are non-zero.
2267 2268 2269 2270 2271
	 */
	struct {
		u32 offset;
		u32 mask;
		u32 expected;
2272 2273
		u32 condition_offset;
		u32 condition_mask;
2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287
	} bits[MAX_CMD_DESC_BITMASKS];
};

/*
 * A table of commands requiring special handling by the command parser.
 *
 * Each ring has an array of tables. Each table consists of an array of command
 * descriptors, which must be sorted with command opcodes in ascending order.
 */
struct drm_i915_cmd_table {
	const struct drm_i915_cmd_descriptor *table;
	int count;
};

C
Chris Wilson 已提交
2288
/* Note that the (struct drm_i915_private *) cast is just to shut up gcc. */
2289 2290 2291 2292 2293 2294 2295 2296 2297 2298
#define __I915__(p) ({ \
	struct drm_i915_private *__p; \
	if (__builtin_types_compatible_p(typeof(*p), struct drm_i915_private)) \
		__p = (struct drm_i915_private *)p; \
	else if (__builtin_types_compatible_p(typeof(*p), struct drm_device)) \
		__p = to_i915((struct drm_device *)p); \
	else \
		BUILD_BUG(); \
	__p; \
})
C
Chris Wilson 已提交
2299
#define INTEL_INFO(p) 	(&__I915__(p)->info)
2300
#define INTEL_DEVID(p)	(INTEL_INFO(p)->device_id)
2301
#define INTEL_REVID(p)	(__I915__(p)->dev->pdev->revision)
2302

2303 2304
#define IS_I830(dev)		(INTEL_DEVID(dev) == 0x3577)
#define IS_845G(dev)		(INTEL_DEVID(dev) == 0x2562)
2305
#define IS_I85X(dev)		(INTEL_INFO(dev)->is_i85x)
2306
#define IS_I865G(dev)		(INTEL_DEVID(dev) == 0x2572)
2307
#define IS_I915G(dev)		(INTEL_INFO(dev)->is_i915g)
2308 2309
#define IS_I915GM(dev)		(INTEL_DEVID(dev) == 0x2592)
#define IS_I945G(dev)		(INTEL_DEVID(dev) == 0x2772)
2310 2311 2312
#define IS_I945GM(dev)		(INTEL_INFO(dev)->is_i945gm)
#define IS_BROADWATER(dev)	(INTEL_INFO(dev)->is_broadwater)
#define IS_CRESTLINE(dev)	(INTEL_INFO(dev)->is_crestline)
2313
#define IS_GM45(dev)		(INTEL_DEVID(dev) == 0x2A42)
2314
#define IS_G4X(dev)		(INTEL_INFO(dev)->is_g4x)
2315 2316
#define IS_PINEVIEW_G(dev)	(INTEL_DEVID(dev) == 0xa001)
#define IS_PINEVIEW_M(dev)	(INTEL_DEVID(dev) == 0xa011)
2317 2318
#define IS_PINEVIEW(dev)	(INTEL_INFO(dev)->is_pineview)
#define IS_G33(dev)		(INTEL_INFO(dev)->is_g33)
2319
#define IS_IRONLAKE_M(dev)	(INTEL_DEVID(dev) == 0x0046)
2320
#define IS_IVYBRIDGE(dev)	(INTEL_INFO(dev)->is_ivybridge)
2321 2322 2323
#define IS_IVB_GT1(dev)		(INTEL_DEVID(dev) == 0x0156 || \
				 INTEL_DEVID(dev) == 0x0152 || \
				 INTEL_DEVID(dev) == 0x015a)
2324
#define IS_VALLEYVIEW(dev)	(INTEL_INFO(dev)->is_valleyview)
2325
#define IS_CHERRYVIEW(dev)	(INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
2326
#define IS_HASWELL(dev)	(INTEL_INFO(dev)->is_haswell)
2327
#define IS_BROADWELL(dev)	(!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
2328
#define IS_SKYLAKE(dev)	(INTEL_INFO(dev)->is_skylake)
2329
#define IS_BROXTON(dev)	(!INTEL_INFO(dev)->is_skylake && IS_GEN9(dev))
2330
#define IS_MOBILE(dev)		(INTEL_INFO(dev)->is_mobile)
2331
#define IS_HSW_EARLY_SDV(dev)	(IS_HASWELL(dev) && \
2332
				 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
B
Ben Widawsky 已提交
2333
#define IS_BDW_ULT(dev)		(IS_BROADWELL(dev) && \
2334
				 ((INTEL_DEVID(dev) & 0xf) == 0x6 ||	\
2335
				 (INTEL_DEVID(dev) & 0xf) == 0xb ||	\
2336
				 (INTEL_DEVID(dev) & 0xf) == 0xe))
R
Rodrigo Vivi 已提交
2337 2338
#define IS_BDW_GT3(dev)		(IS_BROADWELL(dev) && \
				 (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
B
Ben Widawsky 已提交
2339
#define IS_HSW_ULT(dev)		(IS_HASWELL(dev) && \
2340
				 (INTEL_DEVID(dev) & 0xFF00) == 0x0A00)
2341
#define IS_HSW_GT3(dev)		(IS_HASWELL(dev) && \
2342
				 (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
2343
/* ULX machines are also considered ULT. */
2344 2345
#define IS_HSW_ULX(dev)		(INTEL_DEVID(dev) == 0x0A0E || \
				 INTEL_DEVID(dev) == 0x0A1E)
2346
#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
2347

2348 2349 2350 2351
#define SKL_REVID_A0		(0x0)
#define SKL_REVID_B0		(0x1)
#define SKL_REVID_C0		(0x2)
#define SKL_REVID_D0		(0x3)
2352
#define SKL_REVID_E0		(0x4)
2353

N
Nick Hoath 已提交
2354 2355 2356 2357
#define BXT_REVID_A0		(0x0)
#define BXT_REVID_B0		(0x3)
#define BXT_REVID_C0		(0x6)

2358 2359 2360 2361 2362 2363
/*
 * The genX designation typically refers to the render engine, so render
 * capability related checks should use IS_GEN, while display and other checks
 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
 * chips, etc.).
 */
2364 2365 2366 2367 2368
#define IS_GEN2(dev)	(INTEL_INFO(dev)->gen == 2)
#define IS_GEN3(dev)	(INTEL_INFO(dev)->gen == 3)
#define IS_GEN4(dev)	(INTEL_INFO(dev)->gen == 4)
#define IS_GEN5(dev)	(INTEL_INFO(dev)->gen == 5)
#define IS_GEN6(dev)	(INTEL_INFO(dev)->gen == 6)
2369
#define IS_GEN7(dev)	(INTEL_INFO(dev)->gen == 7)
B
Ben Widawsky 已提交
2370
#define IS_GEN8(dev)	(INTEL_INFO(dev)->gen == 8)
2371
#define IS_GEN9(dev)	(INTEL_INFO(dev)->gen == 9)
2372

2373 2374 2375 2376
#define RENDER_RING		(1<<RCS)
#define BSD_RING		(1<<VCS)
#define BLT_RING		(1<<BCS)
#define VEBOX_RING		(1<<VECS)
2377
#define BSD2_RING		(1<<VCS2)
2378
#define HAS_BSD(dev)		(INTEL_INFO(dev)->ring_mask & BSD_RING)
2379
#define HAS_BSD2(dev)		(INTEL_INFO(dev)->ring_mask & BSD2_RING)
2380 2381 2382 2383
#define HAS_BLT(dev)		(INTEL_INFO(dev)->ring_mask & BLT_RING)
#define HAS_VEBOX(dev)		(INTEL_INFO(dev)->ring_mask & VEBOX_RING)
#define HAS_LLC(dev)		(INTEL_INFO(dev)->has_llc)
#define HAS_WT(dev)		((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
2384
				 __I915__(dev)->ellc_size)
2385 2386
#define I915_NEED_GFX_HWS(dev)	(INTEL_INFO(dev)->need_gfx_hws)

2387
#define HAS_HW_CONTEXTS(dev)	(INTEL_INFO(dev)->gen >= 6)
2388
#define HAS_LOGICAL_RING_CONTEXTS(dev)	(INTEL_INFO(dev)->gen >= 8)
2389 2390
#define USES_PPGTT(dev)		(i915.enable_ppgtt)
#define USES_FULL_PPGTT(dev)	(i915.enable_ppgtt == 2)
2391

2392
#define HAS_OVERLAY(dev)		(INTEL_INFO(dev)->has_overlay)
2393 2394
#define OVERLAY_NEEDS_PHYSICAL(dev)	(INTEL_INFO(dev)->overlay_needs_physical)

2395 2396
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
#define HAS_BROKEN_CS_TLB(dev)		(IS_I830(dev) || IS_845G(dev))
2397 2398 2399 2400 2401 2402 2403 2404
/*
 * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
 * even when in MSI mode. This results in spurious interrupt warnings if the
 * legacy irq no. is shared with another device. The kernel then disables that
 * interrupt source and so prevents the other device from working properly.
 */
#define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
2405

2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
 * rows, which changed the alignment requirements and fence programming.
 */
#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
						      IS_I915GM(dev)))
#define SUPPORTS_DIGITAL_OUTPUTS(dev)	(!IS_GEN2(dev) && !IS_PINEVIEW(dev))
#define SUPPORTS_INTEGRATED_HDMI(dev)	(IS_G4X(dev) || IS_GEN5(dev))
#define SUPPORTS_INTEGRATED_DP(dev)	(IS_G4X(dev) || IS_GEN5(dev))
#define SUPPORTS_TV(dev)		(INTEL_INFO(dev)->supports_tv)
#define I915_HAS_HOTPLUG(dev)		 (INTEL_INFO(dev)->has_hotplug)

#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
2419
#define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
2420

2421
#define HAS_IPS(dev)		(IS_HSW_ULT(dev) || IS_BROADWELL(dev))
2422

2423
#define HAS_DDI(dev)		(INTEL_INFO(dev)->has_ddi)
2424
#define HAS_FPGA_DBG_UNCLAIMED(dev)	(INTEL_INFO(dev)->has_fpga_dbg)
2425
#define HAS_PSR(dev)		(IS_HASWELL(dev) || IS_BROADWELL(dev) || \
2426 2427
				 IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || \
				 IS_SKYLAKE(dev))
2428
#define HAS_RUNTIME_PM(dev)	(IS_GEN6(dev) || IS_HASWELL(dev) || \
I
Imre Deak 已提交
2429
				 IS_BROADWELL(dev) || IS_VALLEYVIEW(dev))
2430 2431
#define HAS_RC6(dev)		(INTEL_INFO(dev)->gen >= 6)
#define HAS_RC6p(dev)		(INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
P
Paulo Zanoni 已提交
2432

2433 2434 2435 2436 2437 2438
#define INTEL_PCH_DEVICE_ID_MASK		0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE		0x3b00
#define INTEL_PCH_CPT_DEVICE_ID_TYPE		0x1c00
#define INTEL_PCH_PPT_DEVICE_ID_TYPE		0x1e00
#define INTEL_PCH_LPT_DEVICE_ID_TYPE		0x8c00
#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE		0x9c00
2439 2440
#define INTEL_PCH_SPT_DEVICE_ID_TYPE		0xA100
#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE		0x9D00
2441

2442
#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
2443
#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
2444
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
2445 2446
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
B
Ben Widawsky 已提交
2447
#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
2448
#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
2449

2450 2451
#define HAS_GMCH_DISPLAY(dev) (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev))

2452 2453 2454
/* DPF == dynamic parity feature */
#define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
2455

2456
#define GT_FREQUENCY_MULTIPLIER 50
A
Akash Goel 已提交
2457
#define GEN9_FREQ_SCALER 3
2458

2459 2460
#include "i915_trace.h"

R
Rob Clark 已提交
2461
extern const struct drm_ioctl_desc i915_ioctls[];
2462 2463
extern int i915_max_ioctl;

2464 2465
extern int i915_suspend_legacy(struct drm_device *dev, pm_message_t state);
extern int i915_resume_legacy(struct drm_device *dev);
2466

2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478
/* i915_params.c */
struct i915_params {
	int modeset;
	int panel_ignore_lid;
	int semaphores;
	unsigned int lvds_downclock;
	int lvds_channel_mode;
	int panel_use_ssc;
	int vbt_sdvo_panel_type;
	int enable_rc6;
	int enable_fbc;
	int enable_ppgtt;
2479
	int enable_execlists;
2480 2481 2482 2483
	int enable_psr;
	unsigned int preliminary_hw_support;
	int disable_power_well;
	int enable_ips;
2484
	int invert_brightness;
2485
	int enable_cmd_parser;
2486 2487 2488
	/* leave bools at the end to not create holes */
	bool enable_hangcheck;
	bool fastboot;
2489
	bool prefault_disable;
2490
	bool load_detect_test;
2491
	bool reset;
2492
	bool disable_display;
2493
	bool disable_vtd_wa;
2494
	int use_mmio_flip;
2495
	int mmio_debug;
R
Rob Clark 已提交
2496
	bool verbose_state_checks;
2497
	bool nuclear_pageflip;
2498 2499 2500
};
extern struct i915_params i915 __read_mostly;

L
Linus Torvalds 已提交
2501
				/* i915_dma.c */
2502
extern int i915_driver_load(struct drm_device *, unsigned long flags);
J
Jesse Barnes 已提交
2503
extern int i915_driver_unload(struct drm_device *);
2504
extern int i915_driver_open(struct drm_device *dev, struct drm_file *file);
2505
extern void i915_driver_lastclose(struct drm_device * dev);
2506
extern void i915_driver_preclose(struct drm_device *dev,
2507
				 struct drm_file *file);
2508
extern void i915_driver_postclose(struct drm_device *dev,
2509
				  struct drm_file *file);
2510
extern int i915_driver_device_is_agp(struct drm_device * dev);
2511
#ifdef CONFIG_COMPAT
D
Dave Airlie 已提交
2512 2513
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
			      unsigned long arg);
2514
#endif
2515
extern int intel_gpu_reset(struct drm_device *dev);
2516
extern int i915_reset(struct drm_device *dev);
2517 2518 2519 2520
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
2521
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
2522
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
2523

L
Linus Torvalds 已提交
2524
/* i915_irq.c */
2525
void i915_queue_hangcheck(struct drm_device *dev);
2526 2527 2528
__printf(3, 4)
void i915_handle_error(struct drm_device *dev, bool wedged,
		       const char *fmt, ...);
L
Linus Torvalds 已提交
2529

2530 2531
extern void intel_irq_init(struct drm_i915_private *dev_priv);
extern void intel_hpd_init(struct drm_i915_private *dev_priv);
2532 2533
int intel_irq_install(struct drm_i915_private *dev_priv);
void intel_irq_uninstall(struct drm_i915_private *dev_priv);
2534 2535

extern void intel_uncore_sanitize(struct drm_device *dev);
2536 2537
extern void intel_uncore_early_sanitize(struct drm_device *dev,
					bool restore_forcewake);
2538 2539
extern void intel_uncore_init(struct drm_device *dev);
extern void intel_uncore_check_errors(struct drm_device *dev);
2540
extern void intel_uncore_fini(struct drm_device *dev);
2541
extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore);
2542
const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
2543
void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
2544
				enum forcewake_domains domains);
2545
void intel_uncore_forcewake_put(struct drm_i915_private *dev_priv,
2546
				enum forcewake_domains domains);
2547 2548 2549 2550 2551 2552 2553
/* Like above but the caller must manage the uncore.lock itself.
 * Must be used with I915_READ_FW and friends.
 */
void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
					enum forcewake_domains domains);
void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
					enum forcewake_domains domains);
2554
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
2555 2556 2557 2558
static inline bool intel_vgpu_active(struct drm_device *dev)
{
	return to_i915(dev)->vgpu.active;
}
2559

2560
void
2561
i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
2562
		     u32 status_mask);
2563 2564

void
2565
i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
2566
		      u32 status_mask);
2567

2568 2569
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580
void
ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask);
void
ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask);
void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
				  uint32_t interrupt_mask,
				  uint32_t enabled_irq_mask);
#define ibx_enable_display_interrupt(dev_priv, bits) \
	ibx_display_interrupt_update((dev_priv), (bits), (bits))
#define ibx_disable_display_interrupt(dev_priv, bits) \
	ibx_display_interrupt_update((dev_priv), (bits), 0)
2581

2582 2583 2584 2585 2586 2587 2588 2589 2590
/* i915_gem.c */
int i915_gem_create_ioctl(struct drm_device *dev, void *data,
			  struct drm_file *file_priv);
int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
			 struct drm_file *file_priv);
int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
			  struct drm_file *file_priv);
int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file_priv);
2591 2592
int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file_priv);
2593 2594 2595 2596
int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
			      struct drm_file *file_priv);
int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
			     struct drm_file *file_priv);
2597 2598 2599 2600 2601 2602
void i915_gem_execbuffer_move_to_active(struct list_head *vmas,
					struct intel_engine_cs *ring);
void i915_gem_execbuffer_retire_commands(struct drm_device *dev,
					 struct drm_file *file,
					 struct intel_engine_cs *ring,
					 struct drm_i915_gem_object *obj);
2603 2604 2605 2606 2607 2608 2609 2610
int i915_gem_ringbuffer_submission(struct drm_device *dev,
				   struct drm_file *file,
				   struct intel_engine_cs *ring,
				   struct intel_context *ctx,
				   struct drm_i915_gem_execbuffer2 *args,
				   struct list_head *vmas,
				   struct drm_i915_gem_object *batch_obj,
				   u64 exec_start, u32 flags);
2611 2612
int i915_gem_execbuffer(struct drm_device *dev, void *data,
			struct drm_file *file_priv);
J
Jesse Barnes 已提交
2613 2614
int i915_gem_execbuffer2(struct drm_device *dev, void *data,
			 struct drm_file *file_priv);
2615 2616
int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file_priv);
B
Ben Widawsky 已提交
2617 2618 2619 2620
int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
			       struct drm_file *file);
int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
			       struct drm_file *file);
2621 2622
int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
			    struct drm_file *file_priv);
2623 2624
int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
			   struct drm_file *file_priv);
2625 2626 2627 2628
int i915_gem_set_tiling(struct drm_device *dev, void *data,
			struct drm_file *file_priv);
int i915_gem_get_tiling(struct drm_device *dev, void *data,
			struct drm_file *file_priv);
2629 2630 2631
int i915_gem_init_userptr(struct drm_device *dev);
int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
			   struct drm_file *file);
2632 2633
int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
				struct drm_file *file_priv);
2634 2635
int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file_priv);
2636
void i915_gem_load(struct drm_device *dev);
2637 2638
void *i915_gem_object_alloc(struct drm_device *dev);
void i915_gem_object_free(struct drm_i915_gem_object *obj);
2639 2640
void i915_gem_object_init(struct drm_i915_gem_object *obj,
			 const struct drm_i915_gem_object_ops *ops);
2641 2642
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
						  size_t size);
2643 2644
void i915_init_vm(struct drm_i915_private *dev_priv,
		  struct i915_address_space *vm);
2645
void i915_gem_free_object(struct drm_gem_object *obj);
B
Ben Widawsky 已提交
2646
void i915_gem_vma_destroy(struct i915_vma *vma);
2647

2648 2649
#define PIN_MAPPABLE 0x1
#define PIN_NONBLOCK 0x2
2650
#define PIN_GLOBAL 0x4
2651 2652
#define PIN_OFFSET_BIAS 0x8
#define PIN_OFFSET_MASK (~4095)
2653 2654 2655 2656 2657 2658 2659 2660 2661 2662
int __must_check
i915_gem_object_pin(struct drm_i915_gem_object *obj,
		    struct i915_address_space *vm,
		    uint32_t alignment,
		    uint64_t flags);
int __must_check
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
			 const struct i915_ggtt_view *view,
			 uint32_t alignment,
			 uint64_t flags);
2663 2664 2665

int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
		  u32 flags);
2666
int __must_check i915_vma_unbind(struct i915_vma *vma);
2667
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
2668
void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
2669
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
2670

2671 2672 2673
int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
				    int *needs_clflush);

2674
int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
2675 2676 2677 2678 2679 2680 2681 2682

static inline int __sg_page_count(struct scatterlist *sg)
{
	return sg->length >> PAGE_SHIFT;
}

static inline struct page *
i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
2683
{
2684 2685 2686 2687 2688 2689 2690
	if (WARN_ON(n >= obj->base.size >> PAGE_SHIFT))
		return NULL;

	if (n < obj->get_page.last) {
		obj->get_page.sg = obj->pages->sgl;
		obj->get_page.last = 0;
	}
2691

2692 2693 2694 2695 2696
	while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n) {
		obj->get_page.last += __sg_page_count(obj->get_page.sg++);
		if (unlikely(sg_is_chain(obj->get_page.sg)))
			obj->get_page.sg = sg_chain_ptr(obj->get_page.sg);
	}
2697

2698
	return nth_page(sg_page(obj->get_page.sg), n - obj->get_page.last);
2699
}
2700

2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711
static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
	BUG_ON(obj->pages == NULL);
	obj->pages_pin_count++;
}
static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
{
	BUG_ON(obj->pages_pin_count == 0);
	obj->pages_pin_count--;
}

2712
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
2713
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
2714
			 struct intel_engine_cs *to);
B
Ben Widawsky 已提交
2715
void i915_vma_move_to_active(struct i915_vma *vma,
2716
			     struct intel_engine_cs *ring);
2717 2718 2719
int i915_gem_dumb_create(struct drm_file *file_priv,
			 struct drm_device *dev,
			 struct drm_mode_create_dumb *args);
2720 2721
int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
		      uint32_t handle, uint64_t *offset);
2722 2723 2724 2725 2726 2727 2728 2729 2730
/**
 * Returns true if seq1 is later than seq2.
 */
static inline bool
i915_seqno_passed(uint32_t seq1, uint32_t seq2)
{
	return (int32_t)(seq1 - seq2) >= 0;
}

2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742
static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req,
					      bool lazy_coherency)
{
	u32 seqno;

	BUG_ON(req == NULL);

	seqno = req->ring->get_seqno(req->ring, lazy_coherency);

	return i915_seqno_passed(seqno, req->seqno);
}

2743 2744
int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
2745
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
2746
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
2747

2748 2749
bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj);
void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj);
2750

2751
struct drm_i915_gem_request *
2752
i915_gem_find_active_request(struct intel_engine_cs *ring);
2753

2754
bool i915_gem_retire_requests(struct drm_device *dev);
2755
void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
2756
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
2757
				      bool interruptible);
2758
int __must_check i915_gem_check_olr(struct drm_i915_gem_request *req);
2759

2760 2761 2762
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
{
	return unlikely(atomic_read(&error->reset_counter)
M
Mika Kuoppala 已提交
2763
			& (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));
2764 2765 2766 2767
}

static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
{
M
Mika Kuoppala 已提交
2768 2769 2770 2771 2772 2773
	return atomic_read(&error->reset_counter) & I915_WEDGED;
}

static inline u32 i915_reset_count(struct i915_gpu_error *error)
{
	return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2;
2774
}
2775

2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787
static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv)
{
	return dev_priv->gpu_error.stop_rings == 0 ||
		dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_BAN;
}

static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv)
{
	return dev_priv->gpu_error.stop_rings == 0 ||
		dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_WARN;
}

2788
void i915_gem_reset(struct drm_device *dev);
2789
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
2790
int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
2791
int __must_check i915_gem_init(struct drm_device *dev);
2792
int i915_gem_init_rings(struct drm_device *dev);
2793
int __must_check i915_gem_init_hw(struct drm_device *dev);
2794
int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice);
2795
void i915_gem_init_swizzling(struct drm_device *dev);
J
Jesse Barnes 已提交
2796
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
2797
int __must_check i915_gpu_idle(struct drm_device *dev);
2798
int __must_check i915_gem_suspend(struct drm_device *dev);
2799
int __i915_add_request(struct intel_engine_cs *ring,
2800
		       struct drm_file *file,
2801 2802 2803
		       struct drm_i915_gem_object *batch_obj);
#define i915_add_request(ring) \
	__i915_add_request(ring, NULL, NULL)
2804
int __i915_wait_request(struct drm_i915_gem_request *req,
2805 2806 2807 2808
			unsigned reset_counter,
			bool interruptible,
			s64 *timeout,
			struct drm_i915_file_private *file_priv);
2809
int __must_check i915_wait_request(struct drm_i915_gem_request *req);
2810
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
2811 2812 2813 2814
int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
				  bool write);
int __must_check
2815 2816
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
int __must_check
2817 2818
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
				     u32 alignment,
2819 2820 2821 2822
				     struct intel_engine_cs *pipelined,
				     const struct i915_ggtt_view *view);
void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
					      const struct i915_ggtt_view *view);
2823
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
2824
				int align);
2825
int i915_gem_open(struct drm_device *dev, struct drm_file *file);
2826
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
2827

2828 2829
uint32_t
i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode);
2830
uint32_t
2831 2832
i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
			    int tiling_mode, bool fenced);
2833

2834 2835 2836
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
				    enum i915_cache_level cache_level);

2837 2838 2839 2840 2841 2842
struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
				struct dma_buf *dma_buf);

struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
				struct drm_gem_object *gem_obj, int flags);

2843 2844
void i915_gem_restore_fences(struct drm_device *dev);

2845 2846
unsigned long
i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
2847
			      const struct i915_ggtt_view *view);
2848 2849 2850 2851 2852
unsigned long
i915_gem_obj_offset(struct drm_i915_gem_object *o,
		    struct i915_address_space *vm);
static inline unsigned long
i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
2853
{
2854
	return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal);
2855
}
2856

2857
bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
2858
bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
2859
				  const struct i915_ggtt_view *view);
2860
bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
2861
			struct i915_address_space *vm);
2862

2863 2864
unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
				struct i915_address_space *vm);
2865
struct i915_vma *
2866 2867 2868 2869 2870
i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
		    struct i915_address_space *vm);
struct i915_vma *
i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
			  const struct i915_ggtt_view *view);
2871

2872 2873
struct i915_vma *
i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
2874 2875 2876 2877
				  struct i915_address_space *vm);
struct i915_vma *
i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
				       const struct i915_ggtt_view *view);
2878

2879 2880 2881 2882
static inline struct i915_vma *
i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
{
	return i915_gem_obj_to_ggtt_view(obj, &i915_ggtt_view_normal);
B
Ben Widawsky 已提交
2883
}
2884
bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj);
2885

2886
/* Some GGTT VM helpers */
2887
#define i915_obj_to_ggtt(obj) \
2888 2889 2890 2891 2892 2893 2894 2895
	(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
static inline bool i915_is_ggtt(struct i915_address_space *vm)
{
	struct i915_address_space *ggtt =
		&((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base;
	return vm == ggtt;
}

2896 2897 2898 2899 2900 2901 2902 2903 2904
static inline struct i915_hw_ppgtt *
i915_vm_to_ppgtt(struct i915_address_space *vm)
{
	WARN_ON(i915_is_ggtt(vm));

	return container_of(vm, struct i915_hw_ppgtt, base);
}


2905 2906
static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
{
2907
	return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal);
2908 2909 2910 2911 2912
}

static inline unsigned long
i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
{
2913
	return i915_gem_obj_size(obj, i915_obj_to_ggtt(obj));
2914
}
B
Ben Widawsky 已提交
2915 2916 2917 2918

static inline int __must_check
i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
		      uint32_t alignment,
2919
		      unsigned flags)
B
Ben Widawsky 已提交
2920
{
2921 2922
	return i915_gem_object_pin(obj, i915_obj_to_ggtt(obj),
				   alignment, flags | PIN_GLOBAL);
B
Ben Widawsky 已提交
2923
}
2924

2925 2926 2927 2928 2929 2930
static inline int
i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
{
	return i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
}

2931 2932 2933 2934 2935 2936 2937
void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
				     const struct i915_ggtt_view *view);
static inline void
i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
{
	i915_gem_object_ggtt_unpin_view(obj, &i915_ggtt_view_normal);
}
2938

2939
/* i915_gem_context.c */
2940
int __must_check i915_gem_context_init(struct drm_device *dev);
2941
void i915_gem_context_fini(struct drm_device *dev);
2942
void i915_gem_context_reset(struct drm_device *dev);
2943
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
2944
int i915_gem_context_enable(struct drm_i915_private *dev_priv);
2945
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
2946
int i915_switch_context(struct intel_engine_cs *ring,
2947 2948
			struct intel_context *to);
struct intel_context *
2949
i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
2950
void i915_gem_context_free(struct kref *ctx_ref);
2951 2952
struct drm_i915_gem_object *
i915_gem_alloc_context_obj(struct drm_device *dev, size_t size);
2953
static inline void i915_gem_context_reference(struct intel_context *ctx)
2954
{
2955
	kref_get(&ctx->ref);
2956 2957
}

2958
static inline void i915_gem_context_unreference(struct intel_context *ctx)
2959
{
2960
	kref_put(&ctx->ref, i915_gem_context_free);
2961 2962
}

2963
static inline bool i915_gem_context_is_default(const struct intel_context *c)
2964
{
2965
	return c->user_handle == DEFAULT_CONTEXT_HANDLE;
2966 2967
}

2968 2969 2970 2971
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
				  struct drm_file *file);
int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
				   struct drm_file *file);
2972 2973 2974 2975
int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
				    struct drm_file *file_priv);
int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
				    struct drm_file *file_priv);
2976

2977 2978 2979 2980 2981 2982
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct drm_device *dev,
					  struct i915_address_space *vm,
					  int min_size,
					  unsigned alignment,
					  unsigned cache_level,
2983 2984
					  unsigned long start,
					  unsigned long end,
2985
					  unsigned flags);
2986 2987
int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
int i915_gem_evict_everything(struct drm_device *dev);
2988

2989
/* belongs in i915_gem_gtt.h */
2990
static inline void i915_gem_chipset_flush(struct drm_device *dev)
2991 2992 2993 2994
{
	if (INTEL_INFO(dev)->gen < 6)
		intel_gtt_chipset_flush();
}
2995

2996 2997
/* i915_gem_stolen.c */
int i915_gem_init_stolen(struct drm_device *dev);
B
Ben Widawsky 已提交
2998
int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp);
2999
void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
3000
void i915_gem_cleanup_stolen(struct drm_device *dev);
3001 3002
struct drm_i915_gem_object *
i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
3003 3004 3005 3006 3007
struct drm_i915_gem_object *
i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
					       u32 stolen_offset,
					       u32 gtt_offset,
					       u32 size);
3008

3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019
/* i915_gem_shrinker.c */
unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv,
			      long target,
			      unsigned flags);
#define I915_SHRINK_PURGEABLE 0x1
#define I915_SHRINK_UNBOUND 0x2
#define I915_SHRINK_BOUND 0x4
unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
void i915_gem_shrinker_init(struct drm_i915_private *dev_priv);


3020
/* i915_gem_tiling.c */
3021
static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
3022
{
3023
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3024 3025 3026 3027 3028

	return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
		obj->tiling_mode != I915_TILING_NONE;
}

3029
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
3030 3031
void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
3032 3033

/* i915_gem_debug.c */
3034 3035
#if WATCH_LISTS
int i915_verify_lists(struct drm_device *dev);
3036
#else
3037
#define i915_verify_lists(dev) 0
3038
#endif
L
Linus Torvalds 已提交
3039

3040
/* i915_debugfs.c */
3041 3042
int i915_debugfs_init(struct drm_minor *minor);
void i915_debugfs_cleanup(struct drm_minor *minor);
3043
#ifdef CONFIG_DEBUG_FS
J
Jani Nikula 已提交
3044
int i915_debugfs_connector_add(struct drm_connector *connector);
3045 3046
void intel_display_crc_init(struct drm_device *dev);
#else
J
Jani Nikula 已提交
3047
static inline int i915_debugfs_connector_add(struct drm_connector *connector) {}
3048
static inline void intel_display_crc_init(struct drm_device *dev) {}
3049
#endif
3050 3051

/* i915_gpu_error.c */
3052 3053
__printf(2, 3)
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
3054 3055
int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
			    const struct i915_error_state_file_priv *error);
3056
int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
3057
			      struct drm_i915_private *i915,
3058 3059 3060 3061 3062 3063
			      size_t count, loff_t pos);
static inline void i915_error_state_buf_release(
	struct drm_i915_error_state_buf *eb)
{
	kfree(eb->buf);
}
3064 3065
void i915_capture_error_state(struct drm_device *dev, bool wedge,
			      const char *error_msg);
3066 3067 3068 3069 3070 3071
void i915_error_state_get(struct drm_device *dev,
			  struct i915_error_state_file_priv *error_priv);
void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
void i915_destroy_error_state(struct drm_device *dev);

void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
3072
const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
3073

3074
/* i915_cmd_parser.c */
3075
int i915_cmd_parser_get_version(void);
3076 3077 3078 3079
int i915_cmd_parser_init_ring(struct intel_engine_cs *ring);
void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring);
bool i915_needs_cmd_parser(struct intel_engine_cs *ring);
int i915_parse_cmds(struct intel_engine_cs *ring,
3080
		    struct drm_i915_gem_object *batch_obj,
3081
		    struct drm_i915_gem_object *shadow_batch_obj,
3082
		    u32 batch_start_offset,
3083
		    u32 batch_len,
3084 3085
		    bool is_master);

3086 3087 3088
/* i915_suspend.c */
extern int i915_save_state(struct drm_device *dev);
extern int i915_restore_state(struct drm_device *dev);
3089

B
Ben Widawsky 已提交
3090 3091 3092 3093
/* i915_sysfs.c */
void i915_setup_sysfs(struct drm_device *dev_priv);
void i915_teardown_sysfs(struct drm_device *dev_priv);

3094 3095 3096
/* intel_i2c.c */
extern int intel_setup_gmbus(struct drm_device *dev);
extern void intel_teardown_gmbus(struct drm_device *dev);
3097 3098
extern bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
				     unsigned int pin);
3099

3100 3101
extern struct i2c_adapter *
intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin);
C
Chris Wilson 已提交
3102 3103
extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
3104
static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
3105 3106 3107
{
	return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
}
3108 3109
extern void intel_i2c_reset(struct drm_device *dev);

3110
/* intel_opregion.c */
3111
#ifdef CONFIG_ACPI
3112
extern int intel_opregion_setup(struct drm_device *dev);
3113 3114
extern void intel_opregion_init(struct drm_device *dev);
extern void intel_opregion_fini(struct drm_device *dev);
3115
extern void intel_opregion_asle_intr(struct drm_device *dev);
3116 3117
extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
					 bool enable);
3118 3119
extern int intel_opregion_notify_adapter(struct drm_device *dev,
					 pci_power_t state);
3120
#else
3121
static inline int intel_opregion_setup(struct drm_device *dev) { return 0; }
3122 3123
static inline void intel_opregion_init(struct drm_device *dev) { return; }
static inline void intel_opregion_fini(struct drm_device *dev) { return; }
3124
static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
3125 3126 3127 3128 3129
static inline int
intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
{
	return 0;
}
3130 3131 3132 3133 3134
static inline int
intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
{
	return 0;
}
3135
#endif
3136

J
Jesse Barnes 已提交
3137 3138 3139 3140 3141 3142 3143 3144 3145
/* intel_acpi.c */
#ifdef CONFIG_ACPI
extern void intel_register_dsm_handler(void);
extern void intel_unregister_dsm_handler(void);
#else
static inline void intel_register_dsm_handler(void) { return; }
static inline void intel_unregister_dsm_handler(void) { return; }
#endif /* CONFIG_ACPI */

J
Jesse Barnes 已提交
3146
/* modesetting */
3147
extern void intel_modeset_init_hw(struct drm_device *dev);
J
Jesse Barnes 已提交
3148
extern void intel_modeset_init(struct drm_device *dev);
3149
extern void intel_modeset_gem_init(struct drm_device *dev);
J
Jesse Barnes 已提交
3150
extern void intel_modeset_cleanup(struct drm_device *dev);
3151
extern void intel_connector_unregister(struct intel_connector *);
3152
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
3153 3154
extern void intel_modeset_setup_hw_state(struct drm_device *dev,
					 bool force_restore);
3155
extern void i915_redisable_vga(struct drm_device *dev);
3156
extern void i915_redisable_vga_power_on(struct drm_device *dev);
3157
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
P
Paulo Zanoni 已提交
3158
extern void intel_init_pch_refclk(struct drm_device *dev);
3159
extern void intel_set_rps(struct drm_device *dev, u8 val);
3160 3161
extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
				  bool enable);
3162 3163
extern void intel_detect_pch(struct drm_device *dev);
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
B
Ben Widawsky 已提交
3164
extern int intel_enable_rc6(const struct drm_device *dev);
3165

3166
extern bool i915_semaphore_is_enabled(struct drm_device *dev);
B
Ben Widawsky 已提交
3167 3168
int i915_reg_read_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file);
3169 3170
int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
			       struct drm_file *file);
3171

3172 3173
/* overlay */
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
3174 3175
extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
					    struct intel_overlay_error_state *error);
3176 3177

extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
3178
extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
3179 3180
					    struct drm_device *dev,
					    struct intel_display_error_state *error);
3181

3182 3183
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val);
3184 3185

/* intel_sideband.c */
3186 3187
u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr);
void vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val);
3188
u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
3189 3190 3191 3192 3193 3194
u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
3195 3196
u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
3197 3198
u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
3199 3200
u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val);
3201 3202 3203 3204
u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
		   enum intel_sbi_destination destination);
void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
		     enum intel_sbi_destination destination);
3205 3206
u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
3207

3208 3209
int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
3210

3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223
#define I915_READ8(reg)		dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
#define I915_WRITE8(reg, val)	dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)

#define I915_READ16(reg)	dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true)
#define I915_WRITE16(reg, val)	dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true)
#define I915_READ16_NOTRACE(reg)	dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false)
#define I915_WRITE16_NOTRACE(reg, val)	dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false)

#define I915_READ(reg)		dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true)
#define I915_WRITE(reg, val)	dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true)
#define I915_READ_NOTRACE(reg)		dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false)
#define I915_WRITE_NOTRACE(reg, val)	dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false)

3224 3225 3226 3227 3228 3229
/* Be very careful with read/write 64-bit values. On 32-bit machines, they
 * will be implemented using 2 32-bit writes in an arbitrary order with
 * an arbitrary delay between them. This can cause the hardware to
 * act upon the intermediate value, possibly leading to corruption and
 * machine death. You have been warned.
 */
3230 3231
#define I915_WRITE64(reg, val)	dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true)
#define I915_READ64(reg)	dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
3232

3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243
#define I915_READ64_2x32(lower_reg, upper_reg) ({			\
		u32 upper = I915_READ(upper_reg);			\
		u32 lower = I915_READ(lower_reg);			\
		u32 tmp = I915_READ(upper_reg);				\
		if (upper != tmp) {					\
			upper = tmp;					\
			lower = I915_READ(lower_reg);			\
			WARN_ON(I915_READ(upper_reg) != upper);		\
		}							\
		(u64)upper << 32 | lower; })

3244 3245 3246
#define POSTING_READ(reg)	(void)I915_READ_NOTRACE(reg)
#define POSTING_READ16(reg)	(void)I915_READ16_NOTRACE(reg)

3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257
/* These are untraced mmio-accessors that are only valid to be used inside
 * criticial sections inside IRQ handlers where forcewake is explicitly
 * controlled.
 * Think twice, and think again, before using these.
 * Note: Should only be used between intel_uncore_forcewake_irqlock() and
 * intel_uncore_forcewake_irqunlock().
 */
#define I915_READ_FW(reg__) readl(dev_priv->regs + (reg__))
#define I915_WRITE_FW(reg__, val__) writel(val__, dev_priv->regs + (reg__))
#define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__)

3258 3259 3260 3261
/* "Broadcast RGB" property */
#define INTEL_BROADCAST_RGB_AUTO 0
#define INTEL_BROADCAST_RGB_FULL 1
#define INTEL_BROADCAST_RGB_LIMITED 2
3262

3263 3264
static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
{
3265
	if (IS_VALLEYVIEW(dev))
3266
		return VLV_VGACNTRL;
3267 3268
	else if (INTEL_INFO(dev)->gen >= 5)
		return CPU_VGACNTRL;
3269 3270 3271 3272
	else
		return VGACNTRL;
}

V
Ville Syrjälä 已提交
3273 3274 3275 3276 3277
static inline void __user *to_user_ptr(u64 address)
{
	return (void __user *)(uintptr_t)address;
}

3278 3279 3280 3281 3282 3283 3284
static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
{
	unsigned long j = msecs_to_jiffies(m);

	return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
}

3285 3286 3287 3288 3289
static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
{
        return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
}

3290 3291 3292 3293 3294 3295 3296 3297
static inline unsigned long
timespec_to_jiffies_timeout(const struct timespec *value)
{
	unsigned long j = timespec_to_jiffies(value);

	return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
}

3298 3299 3300 3301 3302 3303 3304 3305 3306
/*
 * If you need to wait X milliseconds between events A and B, but event B
 * doesn't happen exactly after event A, you record the timestamp (jiffies) of
 * when event A happened, then just before event B you call this function and
 * pass the timestamp as the first argument, and X as the second argument.
 */
static inline void
wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
{
3307
	unsigned long target_jiffies, tmp_jiffies, remaining_jiffies;
3308 3309 3310 3311 3312 3313 3314 3315 3316 3317

	/*
	 * Don't re-read the value of "jiffies" every time since it may change
	 * behind our back and break the math.
	 */
	tmp_jiffies = jiffies;
	target_jiffies = timestamp_jiffies +
			 msecs_to_jiffies_timeout(to_wait_ms);

	if (time_after(target_jiffies, tmp_jiffies)) {
3318 3319 3320 3321
		remaining_jiffies = target_jiffies - tmp_jiffies;
		while (remaining_jiffies)
			remaining_jiffies =
			    schedule_timeout_uninterruptible(remaining_jiffies);
3322 3323 3324
	}
}

3325 3326 3327 3328 3329 3330 3331
static inline void i915_trace_irq_get(struct intel_engine_cs *ring,
				      struct drm_i915_gem_request *req)
{
	if (ring->trace_irq_req == NULL && ring->irq_get(ring))
		i915_gem_request_assign(&ring->trace_irq_req, req);
}

L
Linus Torvalds 已提交
3332
#endif