i915_drv.h 112.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2
/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
 */
D
Dave Airlie 已提交
3
/*
4
 *
L
Linus Torvalds 已提交
5 6
 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
 * All Rights Reserved.
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
D
Dave Airlie 已提交
28
 */
L
Linus Torvalds 已提交
29 30 31 32

#ifndef _I915_DRV_H_
#define _I915_DRV_H_

33
#include <uapi/drm/i915_drm.h>
34
#include <uapi/drm/drm_fourcc.h>
35

36
#include <linux/io-mapping.h>
37
#include <linux/i2c.h>
38
#include <linux/i2c-algo-bit.h>
39
#include <linux/backlight.h>
40
#include <linux/hash.h>
41
#include <linux/intel-iommu.h>
42
#include <linux/kref.h>
43
#include <linux/mm_types.h>
44
#include <linux/perf_event.h>
45
#include <linux/pm_qos.h>
46
#include <linux/reservation.h>
47
#include <linux/shmem_fs.h>
48
#include <linux/stackdepot.h>
49 50 51 52

#include <drm/intel-gtt.h>
#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
#include <drm/drm_gem.h>
D
Daniel Vetter 已提交
53
#include <drm/drm_auth.h>
54
#include <drm/drm_cache.h>
55
#include <drm/drm_util.h>
56
#include <drm/drm_dsc.h>
J
Jani Nikula 已提交
57
#include <drm/drm_connector.h>
58
#include <drm/i915_mei_hdcp_interface.h>
59

60
#include "i915_fixed.h"
61 62
#include "i915_params.h"
#include "i915_reg.h"
63
#include "i915_utils.h"
64 65

#include "intel_bios.h"
66
#include "intel_device_info.h"
67
#include "intel_display.h"
68
#include "intel_dpll_mgr.h"
69
#include "intel_lrc.h"
70
#include "intel_opregion.h"
71
#include "intel_ringbuffer.h"
72
#include "intel_uncore.h"
73
#include "intel_wopcm.h"
74
#include "intel_workarounds.h"
75
#include "intel_uc.h"
76

77
#include "i915_gem.h"
78
#include "i915_gem_context.h"
J
Joonas Lahtinen 已提交
79 80
#include "i915_gem_fence_reg.h"
#include "i915_gem_object.h"
81
#include "i915_gem_gtt.h"
82
#include "i915_gpu_error.h"
83
#include "i915_request.h"
84
#include "i915_scheduler.h"
85
#include "i915_timeline.h"
J
Joonas Lahtinen 已提交
86 87
#include "i915_vma.h"

88 89
#include "intel_gvt.h"

L
Linus Torvalds 已提交
90 91 92 93 94
/* General customization:
 */

#define DRIVER_NAME		"i915"
#define DRIVER_DESC		"Intel Graphics"
95
#define DRIVER_DATE		"20190328"
96
#define DRIVER_TIMESTAMP	1553776914
L
Linus Torvalds 已提交
97

R
Rob Clark 已提交
98 99 100 101 102 103 104 105 106
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
 * WARN_ON()) for hw state sanity checks to check for unexpected conditions
 * which may not necessarily be a user visible problem.  This will either
 * WARN() or DRM_ERROR() depending on the verbose_checks moduleparam, to
 * enable distros and users to tailor their preferred amount of i915 abrt
 * spam.
 */
#define I915_STATE_WARN(condition, format...) ({			\
	int __ret_warn_on = !!(condition);				\
107
	if (unlikely(__ret_warn_on))					\
108
		if (!WARN(i915_modparams.verbose_state_checks, format))	\
R
Rob Clark 已提交
109 110 111 112
			DRM_ERROR(format);				\
	unlikely(__ret_warn_on);					\
})

113 114
#define I915_STATE_WARN_ON(x)						\
	I915_STATE_WARN((x), "%s", "WARN_ON(" __stringify(x) ")")
115

116
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
117

118 119 120
bool __i915_inject_load_failure(const char *func, int line);
#define i915_inject_load_failure() \
	__i915_inject_load_failure(__func__, __LINE__)
121 122 123

bool i915_error_injected(void);

124
#else
125

126
#define i915_inject_load_failure() false
127 128
#define i915_error_injected() false

129
#endif
130

131 132 133 134
#define i915_load_error(i915, fmt, ...)					 \
	__i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \
		      fmt, ##__VA_ARGS__)

135 136
typedef depot_stack_handle_t intel_wakeref_t;

137 138 139 140 141 142
enum hpd_pin {
	HPD_NONE = 0,
	HPD_TV = HPD_NONE,     /* TV is known to be unreliable */
	HPD_CRT,
	HPD_SDVO_B,
	HPD_SDVO_C,
143
	HPD_PORT_A,
144 145 146
	HPD_PORT_B,
	HPD_PORT_C,
	HPD_PORT_D,
X
Xiong Zhang 已提交
147
	HPD_PORT_E,
148
	HPD_PORT_F,
149 150 151
	HPD_NUM_PINS
};

152 153 154
#define for_each_hpd_pin(__pin) \
	for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++)

155 156
/* Threshold == 5 for long IRQs, 50 for short */
#define HPD_STORM_DEFAULT_THRESHOLD 50
L
Lyude 已提交
157

158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
struct i915_hotplug {
	struct work_struct hotplug_work;

	struct {
		unsigned long last_jiffies;
		int count;
		enum {
			HPD_ENABLED = 0,
			HPD_DISABLED = 1,
			HPD_MARK_DISABLED = 2
		} state;
	} stats[HPD_NUM_PINS];
	u32 event_bits;
	struct delayed_work reenable_work;

	u32 long_port_mask;
	u32 short_port_mask;
	struct work_struct dig_port_work;

177 178 179
	struct work_struct poll_init_work;
	bool poll_enabled;

L
Lyude 已提交
180
	unsigned int hpd_storm_threshold;
181 182
	/* Whether or not to count short HPD IRQs in HPD storms */
	u8 hpd_short_storm_enabled;
L
Lyude 已提交
183

184 185 186 187 188 189 190 191 192 193
	/*
	 * if we get a HPD irq from DP and a HPD irq from non-DP
	 * the non-DP HPD could block the workqueue on a mode config
	 * mutex getting, that userspace may have taken. However
	 * userspace is waiting on the DP workqueue to run which is
	 * blocked behind the non-DP one.
	 */
	struct workqueue_struct *dp_wq;
};

194 195 196 197 198 199
#define I915_GEM_GPU_DOMAINS \
	(I915_GEM_DOMAIN_RENDER | \
	 I915_GEM_DOMAIN_SAMPLER | \
	 I915_GEM_DOMAIN_COMMAND | \
	 I915_GEM_DOMAIN_INSTRUCTION | \
	 I915_GEM_DOMAIN_VERTEX)
200

201
struct drm_i915_private;
202
struct i915_mm_struct;
203
struct i915_mmu_object;
204

205 206 207 208 209 210 211
struct drm_i915_file_private {
	struct drm_i915_private *dev_priv;
	struct drm_file *file;

	struct {
		spinlock_t lock;
		struct list_head request_list;
212 213 214 215 216 217
/* 20ms is a fairly arbitrary limit (greater than the average frame time)
 * chosen to prevent the CPU getting more than a frame ahead of the GPU
 * (when using lax throttling for the frontbuffer). We also use it to
 * offer free GPU waitboosts for severely congested workloads.
 */
#define DRM_I915_THROTTLE_JIFFIES msecs_to_jiffies(20)
218
	} mm;
219

220
	struct idr context_idr;
221
	struct mutex context_idr_lock; /* guards context_idr */
222

223 224 225
	struct idr vm_idr;
	struct mutex vm_idr_lock; /* guards vm_idr */

226
	unsigned int bsd_engine;
227

228 229 230 231 232 233 234
/*
 * Every context ban increments per client ban score. Also
 * hangs in short succession increments ban score. If ban threshold
 * is reached, client is considered banned and submitting more work
 * will fail. This is a stop gap measure to limit the badly behaving
 * clients access to gpu. Note that unbannable contexts never increment
 * the client ban score.
235
 */
236 237 238 239 240 241 242
#define I915_CLIENT_SCORE_HANG_FAST	1
#define   I915_CLIENT_FAST_HANG_JIFFIES (60 * HZ)
#define I915_CLIENT_SCORE_CONTEXT_BAN   3
#define I915_CLIENT_SCORE_BANNED	9
	/** ban_score: Accumulated score of all ctx bans and fast hangs. */
	atomic_t ban_score;
	unsigned long hang_timestamp;
243 244
};

L
Linus Torvalds 已提交
245 246 247
/* Interface history:
 *
 * 1.1: Original.
D
Dave Airlie 已提交
248 249
 * 1.2: Add Power Management
 * 1.3: Add vblank support
250
 * 1.4: Fix cmdbuffer path, add heap destroy
251
 * 1.5: Add vblank pipe configuration
=
=?utf-8?q?Michel_D=C3=A4nzer?= 已提交
252 253
 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
 *      - Support vertical blank on secondary display pipe
L
Linus Torvalds 已提交
254 255
 */
#define DRIVER_MAJOR		1
=
=?utf-8?q?Michel_D=C3=A4nzer?= 已提交
256
#define DRIVER_MINOR		6
L
Linus Torvalds 已提交
257 258
#define DRIVER_PATCHLEVEL	0

259 260 261
struct intel_overlay;
struct intel_overlay_error_state;

262
struct sdvo_device_mapping {
C
Chris Wilson 已提交
263
	u8 initialized;
264 265 266
	u8 dvo_port;
	u8 slave_addr;
	u8 dvo_wiring;
C
Chris Wilson 已提交
267
	u8 i2c_pin;
268
	u8 ddc_pin;
269 270
};

271
struct intel_connector;
272
struct intel_encoder;
273
struct intel_atomic_state;
274
struct intel_crtc_state;
275
struct intel_initial_plane_config;
276
struct intel_crtc;
277 278
struct intel_limit;
struct dpll;
279
struct intel_cdclk_state;
280

281
struct drm_i915_display_funcs {
282 283
	void (*get_cdclk)(struct drm_i915_private *dev_priv,
			  struct intel_cdclk_state *cdclk_state);
284 285
	void (*set_cdclk)(struct drm_i915_private *dev_priv,
			  const struct intel_cdclk_state *cdclk_state);
286 287
	int (*get_fifo_size)(struct drm_i915_private *dev_priv,
			     enum i9xx_plane_id i9xx_plane);
288
	int (*compute_pipe_wm)(struct intel_crtc_state *cstate);
289
	int (*compute_intermediate_wm)(struct intel_crtc_state *newstate);
290 291 292 293 294 295
	void (*initial_watermarks)(struct intel_atomic_state *state,
				   struct intel_crtc_state *cstate);
	void (*atomic_update_watermarks)(struct intel_atomic_state *state,
					 struct intel_crtc_state *cstate);
	void (*optimize_watermarks)(struct intel_atomic_state *state,
				    struct intel_crtc_state *cstate);
296
	int (*compute_global_watermarks)(struct intel_atomic_state *state);
297
	void (*update_wm)(struct intel_crtc *crtc);
298
	int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
299 300 301
	/* Returns the active state of the crtc, and if the crtc is active,
	 * fills out the pipe-config with the hw state. */
	bool (*get_pipe_config)(struct intel_crtc *,
302
				struct intel_crtc_state *);
303 304
	void (*get_initial_plane_config)(struct intel_crtc *,
					 struct intel_initial_plane_config *);
305 306
	int (*crtc_compute_clock)(struct intel_crtc *crtc,
				  struct intel_crtc_state *crtc_state);
307 308 309 310
	void (*crtc_enable)(struct intel_crtc_state *pipe_config,
			    struct drm_atomic_state *old_state);
	void (*crtc_disable)(struct intel_crtc_state *old_crtc_state,
			     struct drm_atomic_state *old_state);
311
	void (*update_crtcs)(struct drm_atomic_state *state);
312 313 314 315 316 317
	void (*audio_codec_enable)(struct intel_encoder *encoder,
				   const struct intel_crtc_state *crtc_state,
				   const struct drm_connector_state *conn_state);
	void (*audio_codec_disable)(struct intel_encoder *encoder,
				    const struct intel_crtc_state *old_crtc_state,
				    const struct drm_connector_state *old_conn_state);
318 319
	void (*fdi_link_train)(struct intel_crtc *crtc,
			       const struct intel_crtc_state *crtc_state);
320
	void (*init_clock_gating)(struct drm_i915_private *dev_priv);
321
	void (*hpd_irq_setup)(struct drm_i915_private *dev_priv);
322 323 324 325 326
	/* clock updates for mode set */
	/* cursor updates */
	/* render clock increase/decrease */
	/* display clock increase/decrease */
	/* pll clock increase/decrease */
327

328
	int (*color_check)(struct intel_crtc_state *crtc_state);
329 330 331 332 333 334 335 336 337 338 339 340 341
	/*
	 * Program double buffered color management registers during
	 * vblank evasion. The registers should then latch during the
	 * next vblank start, alongside any other double buffered registers
	 * involved with the same commit.
	 */
	void (*color_commit)(const struct intel_crtc_state *crtc_state);
	/*
	 * Load LUTs (and other single buffered color management
	 * registers). Will (hopefully) be called during the vblank
	 * following the latching of any double buffered registers
	 * involved with the same commit.
	 */
342
	void (*load_luts)(const struct intel_crtc_state *crtc_state);
343 344
};

345 346 347 348
#define CSR_VERSION(major, minor)	((major) << 16 | (minor))
#define CSR_VERSION_MAJOR(version)	((version) >> 16)
#define CSR_VERSION_MINOR(version)	((version) & 0xffff)

349
struct intel_csr {
350
	struct work_struct work;
351
	const char *fw_path;
352 353 354 355 356 357
	u32 required_version;
	u32 max_fw_size; /* bytes */
	u32 *dmc_payload;
	u32 dmc_fw_size; /* dwords */
	u32 version;
	u32 mmio_count;
358
	i915_reg_t mmioaddr[8];
359 360 361
	u32 mmiodata[8];
	u32 dc_state;
	u32 allowed_dc_mask;
362
	intel_wakeref_t wakeref;
363 364
};

365 366
enum i915_cache_level {
	I915_CACHE_NONE = 0,
367 368 369 370 371
	I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
	I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc
			      caches, eg sampler/render caches, and the
			      large Last-Level-Cache. LLC is coherent with
			      the CPU, but L3 is only visible to the GPU. */
372
	I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
373 374
};

375 376
#define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */

377 378 379 380 381
enum fb_op_origin {
	ORIGIN_GTT,
	ORIGIN_CPU,
	ORIGIN_CS,
	ORIGIN_FLIP,
382
	ORIGIN_DIRTYFB,
383 384
};

385
struct intel_fbc {
P
Paulo Zanoni 已提交
386 387 388
	/* This is always the inner lock when overlapping with struct_mutex and
	 * it's the outer lock when overlapping with stolen_lock. */
	struct mutex lock;
B
Ben Widawsky 已提交
389
	unsigned threshold;
390 391
	unsigned int possible_framebuffer_bits;
	unsigned int busy_bits;
392
	unsigned int visible_pipes_mask;
393
	struct intel_crtc *crtc;
394

395
	struct drm_mm_node compressed_fb;
396 397
	struct drm_mm_node *compressed_llb;

398 399
	bool false_color;

400
	bool enabled;
401
	bool active;
402
	bool flip_pending;
403

404 405 406
	bool underrun_detected;
	struct work_struct underrun_work;

407 408 409 410 411
	/*
	 * Due to the atomic rules we can't access some structures without the
	 * appropriate locking, so we cache information here in order to avoid
	 * these problems.
	 */
412
	struct intel_fbc_state_cache {
413
		struct i915_vma *vma;
414
		unsigned long flags;
415

416 417
		struct {
			unsigned int mode_flags;
418
			u32 hsw_bdw_pixel_rate;
419 420 421 422 423 424 425
		} crtc;

		struct {
			unsigned int rotation;
			int src_w;
			int src_h;
			bool visible;
426 427 428 429 430 431 432 433
			/*
			 * Display surface base address adjustement for
			 * pageflips. Note that on gen4+ this only adjusts up
			 * to a tile, offsets within a tile are handled in
			 * the hw itself (with the TILEOFF register).
			 */
			int adjusted_x;
			int adjusted_y;
434 435

			int y;
436

437
			u16 pixel_blend_mode;
438 439 440
		} plane;

		struct {
441
			const struct drm_format_info *format;
442 443 444 445
			unsigned int stride;
		} fb;
	} state_cache;

446 447 448 449 450 451 452
	/*
	 * This structure contains everything that's relevant to program the
	 * hardware registers. When we want to figure out if we need to disable
	 * and re-enable FBC for a new configuration we just check if there's
	 * something different in the struct. The genx_fbc_activate functions
	 * are supposed to read from it in order to program the registers.
	 */
453
	struct intel_fbc_reg_params {
454
		struct i915_vma *vma;
455
		unsigned long flags;
456

457 458
		struct {
			enum pipe pipe;
459
			enum i9xx_plane_id i9xx_plane;
460 461 462 463
			unsigned int fence_y_offset;
		} crtc;

		struct {
464
			const struct drm_format_info *format;
465 466 467 468
			unsigned int stride;
		} fb;

		int cfb_size;
469
		unsigned int gen9_wa_cfb_stride;
470 471
	} params;

472
	const char *no_fbc_reason;
473 474
};

475
/*
476 477 478 479 480 481 482 483 484 485 486 487 488 489
 * HIGH_RR is the highest eDP panel refresh rate read from EDID
 * LOW_RR is the lowest eDP panel refresh rate found from EDID
 * parsing for same resolution.
 */
enum drrs_refresh_rate_type {
	DRRS_HIGH_RR,
	DRRS_LOW_RR,
	DRRS_MAX_RR, /* RR count */
};

enum drrs_support_type {
	DRRS_NOT_SUPPORTED = 0,
	STATIC_DRRS_SUPPORT = 1,
	SEAMLESS_DRRS_SUPPORT = 2
490 491
};

492
struct intel_dp;
493 494 495 496 497 498 499 500 501
struct i915_drrs {
	struct mutex mutex;
	struct delayed_work work;
	struct intel_dp *dp;
	unsigned busy_frontbuffer_bits;
	enum drrs_refresh_rate_type refresh_rate_type;
	enum drrs_support_type type;
};

R
Rodrigo Vivi 已提交
502
struct i915_psr {
503
	struct mutex lock;
504 505 506 507 508

#define I915_PSR_DEBUG_MODE_MASK	0x0f
#define I915_PSR_DEBUG_DEFAULT		0x00
#define I915_PSR_DEBUG_DISABLE		0x01
#define I915_PSR_DEBUG_ENABLE		0x02
509
#define I915_PSR_DEBUG_FORCE_PSR1	0x03
510 511 512
#define I915_PSR_DEBUG_IRQ		0x10

	u32 debug;
R
Rodrigo Vivi 已提交
513
	bool sink_support;
514
	bool enabled;
515
	struct intel_dp *dp;
516
	enum pipe pipe;
517
	bool active;
518
	struct work_struct work;
519
	unsigned busy_frontbuffer_bits;
520
	bool sink_psr2_support;
521
	bool link_standby;
522
	bool colorimetry_support;
523
	bool psr2_enabled;
524
	u8 sink_sync_latency;
525 526
	ktime_t last_entry_attempt;
	ktime_t last_exit;
527
	bool sink_not_reliable;
528
	bool irq_aux_error;
529
	u16 su_x_granularity;
530
};
531

532 533 534 535 536 537
/*
 * Sorted by south display engine compatibility.
 * If the new PCH comes with a south display engine that is not
 * inherited from the latest item, please do not add it to the
 * end. Instead, add it right after its "parent" PCH.
 */
538
enum intel_pch {
R
Rodrigo Vivi 已提交
539
	PCH_NOP = -1,	/* PCH without south display */
540
	PCH_NONE = 0,	/* No PCH present */
541
	PCH_IBX,	/* Ibexpeak PCH */
542 543
	PCH_CPT,	/* Cougarpoint/Pantherpoint PCH */
	PCH_LPT,	/* Lynxpoint/Wildcatpoint PCH */
544
	PCH_SPT,        /* Sunrisepoint PCH */
545
	PCH_KBP,        /* Kaby Lake PCH */
546
	PCH_CNP,        /* Cannon/Comet Lake PCH */
547
	PCH_ICP,	/* Ice Lake PCH */
548 549
};

550 551 552 553 554
enum intel_sbi_destination {
	SBI_ICLK,
	SBI_MPHY,
};

555
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
556
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
557
#define QUIRK_BACKLIGHT_PRESENT (1<<3)
558
#define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
559
#define QUIRK_INCREASE_T12_DELAY (1<<6)
560
#define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7)
561

562
struct intel_fbdev;
563
struct intel_fbc_work;
564

565 566
struct intel_gmbus {
	struct i2c_adapter adapter;
567
#define GMBUS_FORCE_BIT_RETRY (1U << 31)
568
	u32 force_bit;
569
	u32 reg0;
570
	i915_reg_t gpio_reg;
571
	struct i2c_algo_bit_data bit_algo;
572 573 574
	struct drm_i915_private *dev_priv;
};

575
struct i915_suspend_saved_registers {
576
	u32 saveDSPARB;
J
Jesse Barnes 已提交
577
	u32 saveFBC_CONTROL;
578 579
	u32 saveCACHE_MODE_0;
	u32 saveMI_ARB_STATE;
J
Jesse Barnes 已提交
580 581
	u32 saveSWF0[16];
	u32 saveSWF1[16];
582
	u32 saveSWF3[3];
583
	u64 saveFENCE[I915_MAX_NUM_FENCES];
584
	u32 savePCH_PORT_HOTPLUG;
585
	u16 saveGCDGMBUS;
586
};
587

588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645
struct vlv_s0ix_state {
	/* GAM */
	u32 wr_watermark;
	u32 gfx_prio_ctrl;
	u32 arb_mode;
	u32 gfx_pend_tlb0;
	u32 gfx_pend_tlb1;
	u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
	u32 media_max_req_count;
	u32 gfx_max_req_count;
	u32 render_hwsp;
	u32 ecochk;
	u32 bsd_hwsp;
	u32 blt_hwsp;
	u32 tlb_rd_addr;

	/* MBC */
	u32 g3dctl;
	u32 gsckgctl;
	u32 mbctl;

	/* GCP */
	u32 ucgctl1;
	u32 ucgctl3;
	u32 rcgctl1;
	u32 rcgctl2;
	u32 rstctl;
	u32 misccpctl;

	/* GPM */
	u32 gfxpause;
	u32 rpdeuhwtc;
	u32 rpdeuc;
	u32 ecobus;
	u32 pwrdwnupctl;
	u32 rp_down_timeout;
	u32 rp_deucsw;
	u32 rcubmabdtmr;
	u32 rcedata;
	u32 spare2gh;

	/* Display 1 CZ domain */
	u32 gt_imr;
	u32 gt_ier;
	u32 pm_imr;
	u32 pm_ier;
	u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];

	/* GT SA CZ domain */
	u32 tilectl;
	u32 gt_fifoctl;
	u32 gtlc_wake_ctrl;
	u32 gtlc_survive;
	u32 pmwgicz;

	/* Display 2 CZ domain */
	u32 gu_ctl0;
	u32 gu_ctl1;
646
	u32 pcbr;
647 648 649
	u32 clock_gate_dis2;
};

650
struct intel_rps_ei {
651
	ktime_t ktime;
652 653
	u32 render_c0;
	u32 media_c0;
654 655
};

656
struct intel_rps {
I
Imre Deak 已提交
657 658 659 660
	/*
	 * work, interrupts_enabled and pm_iir are protected by
	 * dev_priv->irq_lock
	 */
661
	struct work_struct work;
I
Imre Deak 已提交
662
	bool interrupts_enabled;
663
	u32 pm_iir;
664

665
	/* PM interrupt bits that should never be masked */
666
	u32 pm_intrmsk_mbz;
667

668 669 670 671 672 673 674 675 676 677 678 679 680 681 682
	/* Frequencies are stored in potentially platform dependent multiples.
	 * In other words, *_freq needs to be multiplied by X to be interesting.
	 * Soft limits are those which are used for the dynamic reclocking done
	 * by the driver (raise frequencies under heavy loads, and lower for
	 * lighter loads). Hard limits are those imposed by the hardware.
	 *
	 * A distinction is made for overclocking, which is never enabled by
	 * default, and is considered to be above the hard limit if it's
	 * possible at all.
	 */
	u8 cur_freq;		/* Current frequency (cached, may not == HW) */
	u8 min_freq_softlimit;	/* Minimum frequency permitted by the driver */
	u8 max_freq_softlimit;	/* Max frequency permitted by the driver */
	u8 max_freq;		/* Maximum frequency, RP0 if not overclocking */
	u8 min_freq;		/* AKA RPn. Minimum frequency */
683
	u8 boost_freq;		/* Frequency to request when wait boosting */
684
	u8 idle_freq;		/* Frequency to request when we are idle */
685 686 687
	u8 efficient_freq;	/* AKA RPe. Pre-determined balanced frequency */
	u8 rp1_freq;		/* "less than" RP0 power/freqency */
	u8 rp0_freq;		/* Non-overclocked max frequency. */
688
	u16 gpll_ref_freq;	/* vlv/chv GPLL reference frequency */
689

690
	int last_adj;
C
Chris Wilson 已提交
691 692 693 694 695 696 697 698 699 700

	struct {
		struct mutex mutex;

		enum { LOW_POWER, BETWEEN, HIGH_POWER } mode;
		unsigned int interactive;

		u8 up_threshold; /* Current %busy required to uplock */
		u8 down_threshold; /* Current %busy required to downclock */
	} power;
701

702
	bool enabled;
703 704
	atomic_t num_waiters;
	atomic_t boosts;
705

706
	/* manual wa residency calculations */
707
	struct intel_rps_ei ei;
708 709
};

710 711
struct intel_rc6 {
	bool enabled;
712 713
	u64 prev_hw_residency[4];
	u64 cur_residency[4];
714 715 716 717 718 719
};

struct intel_llc_pstate {
	bool enabled;
};

720 721
struct intel_gen6_power_mgmt {
	struct intel_rps rps;
722 723
	struct intel_rc6 rc6;
	struct intel_llc_pstate llc_pstate;
724 725
};

D
Daniel Vetter 已提交
726 727 728
/* defined intel_pm.c */
extern spinlock_t mchdev_lock;

729 730 731 732 733 734 735 736 737 738 739
struct intel_ilk_power_mgmt {
	u8 cur_delay;
	u8 min_delay;
	u8 max_delay;
	u8 fmax;
	u8 fstart;

	u64 last_count1;
	unsigned long last_time1;
	unsigned long chipset_power;
	u64 last_count2;
740
	u64 last_time2;
741 742 743 744 745 746 747
	unsigned long gfx_power;
	u8 corr;

	int c_m;
	int r_t;
};

748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777
struct drm_i915_private;
struct i915_power_well;

struct i915_power_well_ops {
	/*
	 * Synchronize the well's hw state to match the current sw state, for
	 * example enable/disable it based on the current refcount. Called
	 * during driver init and resume time, possibly after first calling
	 * the enable/disable handlers.
	 */
	void (*sync_hw)(struct drm_i915_private *dev_priv,
			struct i915_power_well *power_well);
	/*
	 * Enable the well and resources that depend on it (for example
	 * interrupts located on the well). Called after the 0->1 refcount
	 * transition.
	 */
	void (*enable)(struct drm_i915_private *dev_priv,
		       struct i915_power_well *power_well);
	/*
	 * Disable the well and resources that depend on it. Called after
	 * the 1->0 refcount transition.
	 */
	void (*disable)(struct drm_i915_private *dev_priv,
			struct i915_power_well *power_well);
	/* Returns the hw enabled state. */
	bool (*is_enabled)(struct drm_i915_private *dev_priv,
			   struct i915_power_well *power_well);
};

778 779 780 781 782 783 784
struct i915_power_well_regs {
	i915_reg_t bios;
	i915_reg_t driver;
	i915_reg_t kvmr;
	i915_reg_t debug;
};

785
/* Power well structure for haswell */
786
struct i915_power_well_desc {
787
	const char *name;
788
	bool always_on;
789
	u64 domains;
790
	/* unique identifier for this power well */
I
Imre Deak 已提交
791
	enum i915_power_well_id id;
792 793 794 795
	/*
	 * Arbitraty data associated with this power well. Platform and power
	 * well specific.
	 */
796
	union {
797 798 799 800 801 802 803
		struct {
			/*
			 * request/status flag index in the PUNIT power well
			 * control/status registers.
			 */
			u8 idx;
		} vlv;
804 805 806
		struct {
			enum dpio_phy phy;
		} bxt;
807
		struct {
808 809 810 811 812 813
			const struct i915_power_well_regs *regs;
			/*
			 * request/status flag index in the power well
			 * constrol/status registers.
			 */
			u8 idx;
814 815 816 817
			/* Mask of pipes whose IRQ logic is backed by the pw */
			u8 irq_pipe_mask;
			/* The pw is backing the VGA functionality */
			bool has_vga:1;
818
			bool has_fuses:1;
819 820 821 822 823
			/*
			 * The pw is for an ICL+ TypeC PHY port in
			 * Thunderbolt mode.
			 */
			bool is_tc_tbt:1;
824
		} hsw;
825
	};
826
	const struct i915_power_well_ops *ops;
827 828
};

829 830 831 832 833 834 835 836
struct i915_power_well {
	const struct i915_power_well_desc *desc;
	/* power well enable/disable usage count */
	int count;
	/* cached hw enabled state */
	bool hw_enabled;
};

837
struct i915_power_domains {
838 839 840 841
	/*
	 * Power wells needed for initialization at driver init and suspend
	 * time are on. They are kept on until after the first modeset.
	 */
842
	bool initializing;
843
	bool display_core_suspended;
844
	int power_well_count;
845

846 847
	intel_wakeref_t wakeref;

848
	struct mutex lock;
849
	int domain_use_count[POWER_DOMAIN_NUM];
850
	struct i915_power_well *power_wells;
851 852
};

853
#define MAX_L3_SLICES 2
854
struct intel_l3_parity {
855
	u32 *remap_info[MAX_L3_SLICES];
856
	struct work_struct error_work;
857
	int which_slice;
858 859
};

860 861 862
struct i915_gem_mm {
	/** Memory allocator for GTT stolen memory */
	struct drm_mm stolen;
863 864 865 866
	/** Protects the usage of the GTT stolen memory allocator. This is
	 * always the inner lock when overlapping with struct_mutex. */
	struct mutex stolen_lock;

867 868 869
	/* Protects bound_list/unbound_list and #drm_i915_gem_object.mm.link */
	spinlock_t obj_lock;

870 871 872 873 874
	/** List of all objects in gtt_space. Used to restore gtt
	 * mappings on resume */
	struct list_head bound_list;
	/**
	 * List of objects which are not bound to the GTT (thus
875 876
	 * are idle and not used by the GPU). These objects may or may
	 * not actually have any pages attached.
877 878 879
	 */
	struct list_head unbound_list;

880 881 882 883 884
	/** List of all objects in gtt_space, currently mmaped by userspace.
	 * All objects within this list must also be on bound_list.
	 */
	struct list_head userfault_list;

885 886 887 888 889
	/**
	 * List of objects which are pending destruction.
	 */
	struct llist_head free_list;
	struct work_struct free_work;
890
	spinlock_t free_lock;
891 892 893 894 895
	/**
	 * Count of objects pending destructions. Used to skip needlessly
	 * waiting on an RCU barrier if no objects are waiting to be freed.
	 */
	atomic_t free_count;
896

897 898 899
	/**
	 * Small stash of WC pages
	 */
900
	struct pagestash wc_stash;
901

M
Matthew Auld 已提交
902 903 904 905 906
	/**
	 * tmpfs instance used for shmem backed objects
	 */
	struct vfsmount *gemfs;

907 908 909
	/** PPGTT used for aliasing the PPGTT with the GTT */
	struct i915_hw_ppgtt *aliasing_ppgtt;

910
	struct notifier_block oom_notifier;
911
	struct notifier_block vmap_notifier;
912
	struct shrinker shrinker;
913 914 915 916

	/** LRU list of objects with fence regs on them. */
	struct list_head fence_list;

917 918 919 920 921 922 923
	/**
	 * Workqueue to fault in userptr pages, flushed by the execbuf
	 * when required but otherwise left to userspace to try again
	 * on EAGAIN.
	 */
	struct workqueue_struct *userptr_wq;

924 925
	u64 unordered_timeline;

926
	/* the indicator for dispatch video commands on two BSD rings */
927
	atomic_t bsd_engine_dispatch_index;
928

929
	/** Bit 6 swizzling required for X tiling */
930
	u32 bit_6_swizzle_x;
931
	/** Bit 6 swizzling required for Y tiling */
932
	u32 bit_6_swizzle_y;
933 934

	/* accounting, useful for userland debugging */
935
	spinlock_t object_stat_lock;
936
	u64 object_memory;
937 938 939
	u32 object_count;
};

940 941
#define I915_IDLE_ENGINES_TIMEOUT (200) /* in ms */

942 943 944
#define I915_RESET_TIMEOUT (10 * HZ) /* 10s */
#define I915_FENCE_TIMEOUT (10 * HZ) /* 10s */

945 946 947
#define I915_ENGINE_DEAD_TIMEOUT  (4 * HZ)  /* Seqno, head and subunits dead */
#define I915_SEQNO_DEAD_TIMEOUT   (12 * HZ) /* Seqno dead with active head */

948 949
#define I915_ENGINE_WEDGED_TIMEOUT  (60 * HZ)  /* Reset but no recovery? */

950
struct ddi_vbt_port_info {
951 952
	int max_tmds_clock;

953 954 955 956 957 958
	/*
	 * This is an index in the HDMI/DVI DDI buffer translation table.
	 * The special value HDMI_LEVEL_SHIFT_UNKNOWN means the VBT didn't
	 * populate this field.
	 */
#define HDMI_LEVEL_SHIFT_UNKNOWN	0xff
959
	u8 hdmi_level_shift;
960

961
	u8 present:1;
962 963 964 965 966 967
	u8 supports_dvi:1;
	u8 supports_hdmi:1;
	u8 supports_dp:1;
	u8 supports_edp:1;
	u8 supports_typec_usb:1;
	u8 supports_tbt:1;
968

969 970
	u8 alternate_aux_channel;
	u8 alternate_ddc_pin;
971

972 973
	u8 dp_boost_level;
	u8 hdmi_boost_level;
974
	int dp_max_link_rate;		/* 0 for not limited by VBT */
975 976
};

R
Rodrigo Vivi 已提交
977 978 979 980 981
enum psr_lines_to_wait {
	PSR_0_LINES_TO_WAIT = 0,
	PSR_1_LINE_TO_WAIT,
	PSR_4_LINES_TO_WAIT,
	PSR_8_LINES_TO_WAIT
982 983
};

984 985 986 987 988 989 990 991 992
struct intel_vbt_data {
	struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
	struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */

	/* Feature bits */
	unsigned int int_tv_support:1;
	unsigned int lvds_dither:1;
	unsigned int int_crt_support:1;
	unsigned int lvds_use_ssc:1;
993
	unsigned int int_lvds_support:1;
994 995
	unsigned int display_clock_mode:1;
	unsigned int fdi_rx_polarity_inverted:1;
996
	unsigned int panel_type:4;
997 998
	int lvds_ssc_freq;
	unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
999
	enum drm_panel_orientation orientation;
1000

1001 1002
	enum drrs_support_type drrs_type;

1003 1004 1005 1006 1007
	struct {
		int rate;
		int lanes;
		int preemphasis;
		int vswing;
1008
		bool low_vswing;
1009 1010 1011 1012
		bool initialized;
		int bpp;
		struct edp_power_seq pps;
	} edp;
1013

R
Rodrigo Vivi 已提交
1014
	struct {
1015
		bool enable;
R
Rodrigo Vivi 已提交
1016 1017 1018 1019
		bool full_link;
		bool require_aux_wakeup;
		int idle_frames;
		enum psr_lines_to_wait lines_to_wait;
1020 1021
		int tp1_wakeup_time_us;
		int tp2_tp3_wakeup_time_us;
1022
		int psr2_tp2_tp3_wakeup_time_us;
R
Rodrigo Vivi 已提交
1023 1024
	} psr;

1025 1026
	struct {
		u16 pwm_freq_hz;
1027
		bool present;
1028
		bool active_low_pwm;
1029
		u8 min_brightness;	/* min_brightness/255 of max */
1030
		u8 controller;		/* brightness controller number */
1031
		enum intel_backlight_type type;
1032 1033
	} backlight;

1034 1035 1036
	/* MIPI DSI */
	struct {
		u16 panel_id;
1037 1038
		struct mipi_config *config;
		struct mipi_pps_data *pps;
1039 1040
		u16 bl_ports;
		u16 cabc_ports;
1041 1042 1043
		u8 seq_version;
		u32 size;
		u8 *data;
1044
		const u8 *sequence[MIPI_SEQ_MAX];
1045
		u8 *deassert_seq; /* Used by fixup_mipi_sequences() */
1046
		enum drm_panel_orientation orientation;
1047 1048
	} dsi;

1049 1050 1051
	int crt_ddc_pin;

	int child_dev_num;
1052
	struct child_device_config *child_dev;
1053 1054

	struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
1055
	struct sdvo_device_mapping sdvo_mappings[2];
1056 1057
};

1058 1059 1060 1061 1062
enum intel_ddb_partitioning {
	INTEL_DDB_PART_1_2,
	INTEL_DDB_PART_5_6, /* IVB+ */
};

1063 1064
struct intel_wm_level {
	bool enable;
1065 1066 1067 1068
	u32 pri_val;
	u32 spr_val;
	u32 cur_val;
	u32 fbc_val;
1069 1070
};

1071
struct ilk_wm_values {
1072 1073 1074 1075
	u32 wm_pipe[3];
	u32 wm_lp[3];
	u32 wm_lp_spr[3];
	u32 wm_linetime[3];
1076 1077 1078 1079
	bool enable_fbc_wm;
	enum intel_ddb_partitioning partitioning;
};

1080
struct g4x_pipe_wm {
1081 1082
	u16 plane[I915_MAX_PLANES];
	u16 fbc;
1083
};
1084

1085
struct g4x_sr_wm {
1086 1087 1088
	u16 plane;
	u16 cursor;
	u16 fbc;
1089 1090 1091
};

struct vlv_wm_ddl_values {
1092
	u8 plane[I915_MAX_PLANES];
1093
};
1094

1095
struct vlv_wm_values {
1096 1097
	struct g4x_pipe_wm pipe[3];
	struct g4x_sr_wm sr;
1098
	struct vlv_wm_ddl_values ddl[3];
1099
	u8 level;
1100
	bool cxsr;
1101 1102
};

1103 1104 1105 1106 1107 1108 1109 1110 1111
struct g4x_wm_values {
	struct g4x_pipe_wm pipe[2];
	struct g4x_sr_wm sr;
	struct g4x_sr_wm hpll;
	bool cxsr;
	bool hpll_en;
	bool fbc_en;
};

1112
struct skl_ddb_entry {
1113
	u16 start, end;	/* in number of blocks, 'end' is exclusive */
1114 1115
};

1116
static inline u16 skl_ddb_entry_size(const struct skl_ddb_entry *entry)
1117
{
1118
	return entry->end - entry->start;
1119 1120
}

1121 1122 1123 1124 1125 1126 1127 1128 1129
static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
				       const struct skl_ddb_entry *e2)
{
	if (e1->start == e2->start && e1->end == e2->end)
		return true;

	return false;
}

1130
struct skl_ddb_allocation {
1131
	u8 enabled_slices; /* GEN11 has configurable 2 slices */
1132 1133
};

1134
struct skl_ddb_values {
1135
	unsigned dirty_pipes;
1136
	struct skl_ddb_allocation ddb;
1137 1138 1139
};

struct skl_wm_level {
1140
	u16 min_ddb_alloc;
1141 1142
	u16 plane_res_b;
	u8 plane_res_l;
1143
	bool plane_en;
1144
	bool ignore_lines;
1145 1146
};

1147 1148 1149 1150
/* Stores plane specific WM parameters */
struct skl_wm_params {
	bool x_tiled, y_tiled;
	bool rc_surface;
1151
	bool is_planar;
1152 1153 1154 1155 1156
	u32 width;
	u8 cpp;
	u32 plane_pixel_rate;
	u32 y_min_scanlines;
	u32 plane_bytes_per_line;
1157 1158
	uint_fixed_16_16_t plane_blocks_per_line;
	uint_fixed_16_16_t y_tile_minimum;
1159 1160
	u32 linetime_us;
	u32 dbuf_block_size;
1161 1162
};

1163
/*
1164 1165 1166 1167
 * This struct helps tracking the state needed for runtime PM, which puts the
 * device in PCI D3 state. Notice that when this happens, nothing on the
 * graphics device works, even register access, so we don't get interrupts nor
 * anything else.
1168
 *
1169 1170 1171
 * Every piece of our code that needs to actually touch the hardware needs to
 * either call intel_runtime_pm_get or call intel_display_power_get with the
 * appropriate power domain.
1172
 *
1173 1174
 * Our driver uses the autosuspend delay feature, which means we'll only really
 * suspend if we stay with zero refcount for a certain amount of time. The
1175
 * default value is currently very conservative (see intel_runtime_pm_enable), but
1176
 * it can be changed with the standard runtime PM files from sysfs.
1177 1178 1179 1180 1181
 *
 * The irqs_disabled variable becomes true exactly after we disable the IRQs and
 * goes back to false exactly before we reenable the IRQs. We use this variable
 * to check if someone is trying to enable/disable IRQs while they're supposed
 * to be disabled. This shouldn't happen and we'll print some error messages in
1182
 * case it happens.
1183
 *
1184
 * For more, read the Documentation/power/runtime_pm.txt.
1185
 */
1186
struct i915_runtime_pm {
1187
	atomic_t wakeref_count;
1188
	bool suspended;
1189
	bool irqs_enabled;
1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208

#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
	/*
	 * To aide detection of wakeref leaks and general misuse, we
	 * track all wakeref holders. With manual markup (i.e. returning
	 * a cookie to each rpm_get caller which they then supply to their
	 * paired rpm_put) we can remove corresponding pairs of and keep
	 * the array trimmed to active wakerefs.
	 */
	struct intel_runtime_pm_debug {
		spinlock_t lock;

		depot_stack_handle_t last_acquire;
		depot_stack_handle_t last_release;

		depot_stack_handle_t *owners;
		unsigned long count;
	} debug;
#endif
1209 1210
};

1211 1212 1213 1214
enum intel_pipe_crc_source {
	INTEL_PIPE_CRC_SOURCE_NONE,
	INTEL_PIPE_CRC_SOURCE_PLANE1,
	INTEL_PIPE_CRC_SOURCE_PLANE2,
1215 1216 1217 1218 1219
	INTEL_PIPE_CRC_SOURCE_PLANE3,
	INTEL_PIPE_CRC_SOURCE_PLANE4,
	INTEL_PIPE_CRC_SOURCE_PLANE5,
	INTEL_PIPE_CRC_SOURCE_PLANE6,
	INTEL_PIPE_CRC_SOURCE_PLANE7,
1220
	INTEL_PIPE_CRC_SOURCE_PIPE,
D
Daniel Vetter 已提交
1221 1222 1223 1224 1225
	/* TV/DP on pre-gen5/vlv can't use the pipe source. */
	INTEL_PIPE_CRC_SOURCE_TV,
	INTEL_PIPE_CRC_SOURCE_DP_B,
	INTEL_PIPE_CRC_SOURCE_DP_C,
	INTEL_PIPE_CRC_SOURCE_DP_D,
1226
	INTEL_PIPE_CRC_SOURCE_AUTO,
1227 1228 1229
	INTEL_PIPE_CRC_SOURCE_MAX,
};

1230
#define INTEL_PIPE_CRC_ENTRIES_NR	128
1231
struct intel_pipe_crc {
1232
	spinlock_t lock;
T
Tomeu Vizoso 已提交
1233
	int skipped;
1234
	enum intel_pipe_crc_source source;
1235 1236
};

1237
struct i915_frontbuffer_tracking {
1238
	spinlock_t lock;
1239 1240 1241 1242 1243 1244 1245 1246 1247

	/*
	 * Tracking bits for delayed frontbuffer flushing du to gpu activity or
	 * scheduled flips.
	 */
	unsigned busy_bits;
	unsigned flip_bits;
};

1248 1249
struct i915_virtual_gpu {
	bool active;
1250
	u32 caps;
1251 1252
};

1253 1254 1255 1256 1257 1258 1259
/* used in computing the new watermarks state */
struct intel_wm_config {
	unsigned int num_pipes_active;
	bool sprites_enabled;
	bool sprites_scaled;
};

1260 1261 1262 1263 1264
struct i915_oa_format {
	u32 format;
	int size;
};

1265 1266 1267 1268 1269
struct i915_oa_reg {
	i915_reg_t addr;
	u32 value;
};

1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283
struct i915_oa_config {
	char uuid[UUID_STRING_LEN + 1];
	int id;

	const struct i915_oa_reg *mux_regs;
	u32 mux_regs_len;
	const struct i915_oa_reg *b_counter_regs;
	u32 b_counter_regs_len;
	const struct i915_oa_reg *flex_regs;
	u32 flex_regs_len;

	struct attribute_group sysfs_metric;
	struct attribute *attrs[2];
	struct device_attribute sysfs_metric_id;
1284 1285

	atomic_t ref_count;
1286 1287
};

1288 1289
struct i915_perf_stream;

1290 1291 1292
/**
 * struct i915_perf_stream_ops - the OPs to support a specific stream type
 */
1293
struct i915_perf_stream_ops {
1294 1295 1296 1297
	/**
	 * @enable: Enables the collection of HW samples, either in response to
	 * `I915_PERF_IOCTL_ENABLE` or implicitly called when stream is opened
	 * without `I915_PERF_FLAG_DISABLED`.
1298 1299 1300
	 */
	void (*enable)(struct i915_perf_stream *stream);

1301 1302 1303 1304
	/**
	 * @disable: Disables the collection of HW samples, either in response
	 * to `I915_PERF_IOCTL_DISABLE` or implicitly called before destroying
	 * the stream.
1305 1306 1307
	 */
	void (*disable)(struct i915_perf_stream *stream);

1308 1309
	/**
	 * @poll_wait: Call poll_wait, passing a wait queue that will be woken
1310 1311 1312 1313 1314 1315
	 * once there is something ready to read() for the stream
	 */
	void (*poll_wait)(struct i915_perf_stream *stream,
			  struct file *file,
			  poll_table *wait);

1316 1317 1318
	/**
	 * @wait_unlocked: For handling a blocking read, wait until there is
	 * something to ready to read() for the stream. E.g. wait on the same
1319
	 * wait queue that would be passed to poll_wait().
1320 1321 1322
	 */
	int (*wait_unlocked)(struct i915_perf_stream *stream);

1323 1324 1325 1326 1327 1328 1329
	/**
	 * @read: Copy buffered metrics as records to userspace
	 * **buf**: the userspace, destination buffer
	 * **count**: the number of bytes to copy, requested by userspace
	 * **offset**: zero at the start of the read, updated as the read
	 * proceeds, it represents how many bytes have been copied so far and
	 * the buffer offset for copying the next record.
1330
	 *
1331 1332
	 * Copy as many buffered i915 perf samples and records for this stream
	 * to userspace as will fit in the given buffer.
1333
	 *
1334 1335
	 * Only write complete records; returning -%ENOSPC if there isn't room
	 * for a complete record.
1336
	 *
1337 1338 1339
	 * Return any error condition that results in a short read such as
	 * -%ENOSPC or -%EFAULT, even though these may be squashed before
	 * returning to userspace.
1340 1341 1342 1343 1344 1345
	 */
	int (*read)(struct i915_perf_stream *stream,
		    char __user *buf,
		    size_t count,
		    size_t *offset);

1346 1347
	/**
	 * @destroy: Cleanup any stream specific resources.
1348 1349 1350 1351 1352 1353
	 *
	 * The stream will always be disabled before this is called.
	 */
	void (*destroy)(struct i915_perf_stream *stream);
};

1354 1355 1356
/**
 * struct i915_perf_stream - state for a single open stream FD
 */
1357
struct i915_perf_stream {
1358 1359 1360
	/**
	 * @dev_priv: i915 drm device
	 */
1361 1362
	struct drm_i915_private *dev_priv;

1363 1364 1365
	/**
	 * @link: Links the stream into ``&drm_i915_private->streams``
	 */
1366 1367
	struct list_head link;

1368 1369 1370 1371
	/**
	 * @wakeref: As we keep the device awake while the perf stream is
	 * active, we track our runtime pm reference for later release.
	 */
1372 1373
	intel_wakeref_t wakeref;

1374 1375 1376 1377 1378
	/**
	 * @sample_flags: Flags representing the `DRM_I915_PERF_PROP_SAMPLE_*`
	 * properties given when opening a stream, representing the contents
	 * of a single sample as read() by userspace.
	 */
1379
	u32 sample_flags;
1380 1381 1382 1383 1384 1385

	/**
	 * @sample_size: Considering the configured contents of a sample
	 * combined with the required header size, this is the total size
	 * of a single sample record.
	 */
1386
	int sample_size;
1387

1388 1389 1390 1391
	/**
	 * @ctx: %NULL if measuring system-wide across all contexts or a
	 * specific context that is being monitored.
	 */
1392
	struct i915_gem_context *ctx;
1393 1394 1395 1396 1397 1398

	/**
	 * @enabled: Whether the stream is currently enabled, considering
	 * whether the stream was opened in a disabled state and based
	 * on `I915_PERF_IOCTL_ENABLE` and `I915_PERF_IOCTL_DISABLE` calls.
	 */
1399 1400
	bool enabled;

1401 1402 1403 1404
	/**
	 * @ops: The callbacks providing the implementation of this specific
	 * type of configured stream.
	 */
1405
	const struct i915_perf_stream_ops *ops;
1406 1407 1408 1409 1410

	/**
	 * @oa_config: The OA configuration used by the stream.
	 */
	struct i915_oa_config *oa_config;
1411 1412
};

1413 1414 1415
/**
 * struct i915_oa_ops - Gen specific implementation of an OA unit stream
 */
1416
struct i915_oa_ops {
1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435
	/**
	 * @is_valid_b_counter_reg: Validates register's address for
	 * programming boolean counters for a particular platform.
	 */
	bool (*is_valid_b_counter_reg)(struct drm_i915_private *dev_priv,
				       u32 addr);

	/**
	 * @is_valid_mux_reg: Validates register's address for programming mux
	 * for a particular platform.
	 */
	bool (*is_valid_mux_reg)(struct drm_i915_private *dev_priv, u32 addr);

	/**
	 * @is_valid_flex_reg: Validates register's address for programming
	 * flex EU filtering for a particular platform.
	 */
	bool (*is_valid_flex_reg)(struct drm_i915_private *dev_priv, u32 addr);

1436 1437 1438 1439
	/**
	 * @enable_metric_set: Selects and applies any MUX configuration to set
	 * up the Boolean and Custom (B/C) counters that are part of the
	 * counter reports being sampled. May apply system constraints such as
1440 1441
	 * disabling EU clock gating as required.
	 */
1442
	int (*enable_metric_set)(struct i915_perf_stream *stream);
1443 1444 1445 1446 1447

	/**
	 * @disable_metric_set: Remove system constraints associated with using
	 * the OA unit.
	 */
1448
	void (*disable_metric_set)(struct drm_i915_private *dev_priv);
1449 1450 1451 1452

	/**
	 * @oa_enable: Enable periodic sampling
	 */
1453
	void (*oa_enable)(struct i915_perf_stream *stream);
1454 1455 1456 1457

	/**
	 * @oa_disable: Disable periodic sampling
	 */
1458
	void (*oa_disable)(struct i915_perf_stream *stream);
1459 1460 1461 1462 1463

	/**
	 * @read: Copy data from the circular OA buffer into a given userspace
	 * buffer.
	 */
1464 1465 1466 1467
	int (*read)(struct i915_perf_stream *stream,
		    char __user *buf,
		    size_t count,
		    size_t *offset);
1468 1469

	/**
1470
	 * @oa_hw_tail_read: read the OA tail pointer register
1471
	 *
1472 1473 1474
	 * In particular this enables us to share all the fiddly code for
	 * handling the OA unit tail pointer race that affects multiple
	 * generations.
1475
	 */
1476
	u32 (*oa_hw_tail_read)(struct drm_i915_private *dev_priv);
1477 1478
};

1479
struct intel_cdclk_state {
1480
	unsigned int cdclk, vco, ref, bypass;
1481
	u8 voltage_level;
1482 1483
};

1484
struct drm_i915_private {
1485 1486
	struct drm_device drm;

1487
	const struct intel_device_info __info; /* Use INTEL_INFO() to access. */
1488
	struct intel_runtime_info __runtime; /* Use RUNTIME_INFO() to access. */
1489
	struct intel_driver_caps caps;
1490

1491 1492 1493
	/**
	 * Data Stolen Memory - aka "i915 stolen memory" gives us the start and
	 * end of stolen which we can optionally use to create GEM objects
1494
	 * backed by stolen memory. Note that stolen_usable_size tells us
1495 1496 1497 1498
	 * exactly how much of this we are actually allowed to use, given that
	 * some portion of it is in fact reserved for use by hardware functions.
	 */
	struct resource dsm;
1499 1500 1501 1502
	/**
	 * Reseved portion of Data Stolen Memory
	 */
	struct resource dsm_reserved;
1503

1504 1505 1506 1507 1508 1509 1510 1511 1512
	/*
	 * Stolen memory is segmented in hardware with different portions
	 * offlimits to certain functions.
	 *
	 * The drm_mm is initialised to the total accessible range, as found
	 * from the PCI config. On Broadwell+, this is further restricted to
	 * avoid the first page! The upper end of stolen memory is reserved for
	 * hardware functions and similarly removed from the accessible range.
	 */
1513
	resource_size_t stolen_usable_size;	/* Total size minus reserved ranges */
1514

1515
	struct intel_uncore uncore;
1516

1517 1518
	struct i915_virtual_gpu vgpu;

1519
	struct intel_gvt *gvt;
1520

1521 1522
	struct intel_wopcm wopcm;

1523
	struct intel_huc huc;
1524 1525
	struct intel_guc guc;

1526 1527
	struct intel_csr csr;

1528
	struct intel_gmbus gmbus[GMBUS_NUM_PINS];
1529

1530 1531 1532 1533 1534
	/** gmbus_mutex protects against concurrent usage of the single hw gmbus
	 * controller on different i2c buses. */
	struct mutex gmbus_mutex;

	/**
1535 1536
	 * Base address of where the gmbus and gpio blocks are located (either
	 * on PCH or on SoC for platforms without PCH).
1537
	 */
1538
	u32 gpio_mmio_base;
1539

1540
	/* MMIO base address for MIPI regs */
1541
	u32 mipi_mmio_base;
1542

1543
	u32 psr_mmio_base;
1544

1545
	u32 pps_mmio_base;
1546

1547 1548
	wait_queue_head_t gmbus_wait_queue;

1549
	struct pci_dev *bridge_dev;
1550
	struct intel_engine_cs *engine[I915_NUM_ENGINES];
1551 1552 1553 1554
	/* Context used internally to idle the GPU and setup initial state */
	struct i915_gem_context *kernel_context;
	/* Context only to be used for injecting preemption commands */
	struct i915_gem_context *preempt_context;
1555 1556
	struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
					    [MAX_ENGINE_INSTANCE + 1];
1557 1558 1559 1560 1561 1562

	struct resource mch_res;

	/* protects the irq masks */
	spinlock_t irq_lock;

1563 1564
	bool display_irqs_enabled;

1565 1566 1567
	/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
	struct pm_qos_request pm_qos;

V
Ville Syrjälä 已提交
1568 1569
	/* Sideband mailbox protection */
	struct mutex sb_lock;
1570 1571

	/** Cached value of IMR to avoid reads in updating the bitfield */
1572 1573 1574 1575
	union {
		u32 irq_mask;
		u32 de_irq_mask[I915_MAX_PIPES];
	};
1576
	u32 gt_irq_mask;
1577 1578
	u32 pm_imr;
	u32 pm_ier;
1579
	u32 pm_rps_events;
1580
	u32 pm_guc_events;
1581
	u32 pipestat_irq_mask[I915_MAX_PIPES];
1582

1583
	struct i915_hotplug hotplug;
1584
	struct intel_fbc fbc;
1585
	struct i915_drrs drrs;
1586
	struct intel_opregion opregion;
1587
	struct intel_vbt_data vbt;
1588

1589 1590
	bool preserve_bios_swizzle;

1591 1592 1593
	/* overlay */
	struct intel_overlay *overlay;

1594
	/* backlight registers and fields in struct intel_panel */
1595
	struct mutex backlight_lock;
1596

1597 1598 1599
	/* LVDS info */
	bool no_aux_handshake;

V
Ville Syrjälä 已提交
1600 1601 1602
	/* protects panel power sequencer state */
	struct mutex pps_mutex;

1603 1604 1605 1606
	struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
	int num_fence_regs; /* 8 on pre-965, 16 otherwise */

	unsigned int fsb_freq, mem_freq, is_ddr3;
1607
	unsigned int skl_preferred_vco_freq;
1608
	unsigned int max_cdclk_freq;
1609

M
Mika Kahola 已提交
1610
	unsigned int max_dotclk_freq;
1611
	unsigned int rawclk_freq;
1612
	unsigned int hpll_freq;
1613
	unsigned int fdi_pll_freq;
1614
	unsigned int czclk_freq;
1615

1616
	struct {
1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630
		/*
		 * The current logical cdclk state.
		 * See intel_atomic_state.cdclk.logical
		 *
		 * For reading holding any crtc lock is sufficient,
		 * for writing must hold all of them.
		 */
		struct intel_cdclk_state logical;
		/*
		 * The current actual cdclk state.
		 * See intel_atomic_state.cdclk.actual
		 */
		struct intel_cdclk_state actual;
		/* The current hardware cdclk state */
1631 1632
		struct intel_cdclk_state hw;
	} cdclk;
1633

1634 1635 1636 1637 1638 1639 1640
	/**
	 * wq - Driver workqueue for GEM.
	 *
	 * NOTE: Work items scheduled here are not allowed to grab any modeset
	 * locks, for otherwise the flushing done in the pageflip code will
	 * result in deadlocks.
	 */
1641 1642
	struct workqueue_struct *wq;

1643 1644 1645
	/* ordered wq for modesets */
	struct workqueue_struct *modeset_wq;

1646 1647 1648 1649 1650
	/* Display functions */
	struct drm_i915_display_funcs display;

	/* PCH chipset type */
	enum intel_pch pch_type;
1651
	unsigned short pch_id;
1652 1653 1654

	unsigned long quirks;

1655
	struct drm_atomic_state *modeset_restore_state;
1656
	struct drm_modeset_acquire_ctx reset_ctx;
1657

1658
	struct i915_ggtt ggtt; /* VM representing the global address space */
B
Ben Widawsky 已提交
1659

1660
	struct i915_gem_mm mm;
1661 1662
	DECLARE_HASHTABLE(mm_structs, 7);
	struct mutex mm_lock;
1663

1664 1665
	struct intel_ppat ppat;

1666 1667
	/* Kernel Modesetting */

1668 1669
	struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
	struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
1670

1671 1672 1673 1674
#ifdef CONFIG_DEBUG_FS
	struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
#endif

1675
	/* dpll and cdclk state is protected by connection_mutex */
D
Daniel Vetter 已提交
1676 1677
	int num_shared_dpll;
	struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
1678
	const struct intel_dpll_mgr *dpll_mgr;
1679

1680 1681 1682 1683 1684 1685 1686
	/*
	 * dpll_lock serializes intel_{prepare,enable,disable}_shared_dpll.
	 * Must be global rather than per dpll, because on some platforms
	 * plls share registers.
	 */
	struct mutex dpll_lock;

1687
	unsigned int active_crtcs;
1688 1689
	/* minimum acceptable cdclk for each pipe */
	int min_cdclk[I915_MAX_PIPES];
1690 1691
	/* minimum acceptable voltage level for each pipe */
	u8 min_voltage_level[I915_MAX_PIPES];
1692

1693
	int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
1694

1695
	struct i915_wa_list gt_wa_list;
1696

1697 1698
	struct i915_frontbuffer_tracking fb_tracking;

1699 1700 1701 1702 1703
	struct intel_atomic_helper {
		struct llist_head free_list;
		struct work_struct free_work;
	} atomic_helper;

1704
	u16 orig_clock;
1705

1706
	bool mchbar_need_disable;
1707

1708 1709
	struct intel_l3_parity l3_parity;

1710 1711 1712 1713 1714
	/*
	 * edram size in MB.
	 * Cannot be determined by PCIID. You must always read a register.
	 */
	u32 edram_size_mb;
B
Ben Widawsky 已提交
1715

1716 1717 1718 1719 1720 1721 1722 1723
	/*
	 * Protects RPS/RC6 register access and PCU communication.
	 * Must be taken after struct_mutex if nested. Note that
	 * this lock may be held for long periods of time when
	 * talking to hw - so only take it when talking to hw!
	 */
	struct mutex pcu_lock;

1724 1725
	/* gen6+ GT PM state */
	struct intel_gen6_power_mgmt gt_pm;
1726

1727 1728
	/* ilk-only ips/rps state. Everything in here is protected by the global
	 * mchdev_lock in intel_pm.c */
1729
	struct intel_ilk_power_mgmt ips;
1730

1731
	struct i915_power_domains power_domains;
1732

R
Rodrigo Vivi 已提交
1733
	struct i915_psr psr;
1734

1735
	struct i915_gpu_error gpu_error;
1736

1737 1738
	struct drm_i915_gem_object *vlv_pctx;

1739 1740
	/* list of fbdev register on this device */
	struct intel_fbdev *fbdev;
1741
	struct work_struct fbdev_suspend_work;
1742 1743

	struct drm_property *broadcast_rgb_property;
1744
	struct drm_property *force_audio_property;
1745

I
Imre Deak 已提交
1746
	/* hda/i915 audio component */
1747
	struct i915_audio_component *audio_component;
I
Imre Deak 已提交
1748
	bool audio_component_registered;
1749 1750 1751 1752 1753
	/**
	 * av_mutex - mutex for audio/video sync
	 *
	 */
	struct mutex av_mutex;
I
Imre Deak 已提交
1754

1755
	struct {
1756
		struct mutex mutex;
1757
		struct list_head list;
1758 1759
		struct llist_head free_list;
		struct work_struct free_work;
1760 1761 1762 1763 1764 1765 1766

		/* The hw wants to have a stable context identifier for the
		 * lifetime of the context (for OA, PASID, faults, etc).
		 * This is limited in execlists to 21 bits.
		 */
		struct ida hw_ida;
#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
1767
#define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */
1768
#define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */
1769
		struct list_head hw_id_list;
1770
	} contexts;
1771

1772
	u32 fdi_rx_config;
1773

1774
	/* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */
1775
	u32 chv_phy_control;
1776 1777 1778 1779 1780 1781
	/*
	 * Shadows for CHV DPLL_MD regs to keep the state
	 * checker somewhat working in the presence hardware
	 * crappiness (can't read out DPLL_MD for pipes B & C).
	 */
	u32 chv_dpll_md[I915_MAX_PIPES];
1782
	u32 bxt_phy_grc;
1783

1784
	u32 suspend_count;
1785
	bool power_domains_suspended;
1786
	struct i915_suspend_saved_registers regfile;
1787
	struct vlv_s0ix_state vlv_s0ix_state;
1788

1789
	enum {
1790 1791 1792 1793 1794
		I915_SAGV_UNKNOWN = 0,
		I915_SAGV_DISABLED,
		I915_SAGV_ENABLED,
		I915_SAGV_NOT_CONTROLLED
	} sagv_status;
1795

1796 1797 1798 1799 1800 1801 1802
	struct {
		/*
		 * Raw watermark latency values:
		 * in 0.1us units for WM0,
		 * in 0.5us units for WM1+.
		 */
		/* primary */
1803
		u16 pri_latency[5];
1804
		/* sprite */
1805
		u16 spr_latency[5];
1806
		/* cursor */
1807
		u16 cur_latency[5];
1808 1809 1810 1811 1812
		/*
		 * Raw watermark memory latency values
		 * for SKL for all 8 levels
		 * in 1us units.
		 */
1813
		u16 skl_latency[8];
1814 1815

		/* current hardware state */
1816 1817
		union {
			struct ilk_wm_values hw;
1818
			struct skl_ddb_values skl_hw;
1819
			struct vlv_wm_values vlv;
1820
			struct g4x_wm_values g4x;
1821
		};
1822

1823
		u8 max_level;
1824 1825 1826 1827 1828 1829 1830

		/*
		 * Should be held around atomic WM register writing; also
		 * protects * intel_crtc->wm.active and
		 * cstate->wm.need_postvbl_update.
		 */
		struct mutex wm_mutex;
1831 1832 1833 1834 1835 1836 1837

		/*
		 * Set during HW readout of watermarks/DDB.  Some platforms
		 * need to know when we're still using BIOS-provided values
		 * (which we don't fully trust).
		 */
		bool distrust_bios_wm;
1838 1839
	} wm;

1840 1841
	struct dram_info {
		bool valid;
1842
		bool is_16gb_dimm;
1843
		u8 num_channels;
1844
		u8 ranks;
1845
		u32 bandwidth_kbps;
1846
		bool symmetric_memory;
V
Ville Syrjälä 已提交
1847 1848 1849 1850 1851 1852 1853
		enum intel_dram_type {
			INTEL_DRAM_UNKNOWN,
			INTEL_DRAM_DDR3,
			INTEL_DRAM_DDR4,
			INTEL_DRAM_LPDDR3,
			INTEL_DRAM_LPDDR4
		} type;
1854 1855
	} dram_info;

1856
	struct i915_runtime_pm runtime_pm;
1857

1858 1859
	struct {
		bool initialized;
1860

1861
		struct kobject *metrics_kobj;
1862
		struct ctl_table_header *sysctl_header;
1863

1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879
		/*
		 * Lock associated with adding/modifying/removing OA configs
		 * in dev_priv->perf.metrics_idr.
		 */
		struct mutex metrics_lock;

		/*
		 * List of dynamic configurations, you need to hold
		 * dev_priv->perf.metrics_lock to access it.
		 */
		struct idr metrics_idr;

		/*
		 * Lock associated with anything below within this structure
		 * except exclusive_stream.
		 */
1880 1881
		struct mutex lock;
		struct list_head streams;
1882 1883

		struct {
1884 1885 1886 1887 1888 1889
			/*
			 * The stream currently using the OA unit. If accessed
			 * outside a syscall associated to its file
			 * descriptor, you need to hold
			 * dev_priv->drm.struct_mutex.
			 */
1890 1891
			struct i915_perf_stream *exclusive_stream;

1892
			struct intel_context *pinned_ctx;
1893
			u32 specific_ctx_id;
1894
			u32 specific_ctx_id_mask;
1895 1896 1897 1898 1899

			struct hrtimer poll_check_timer;
			wait_queue_head_t poll_wq;
			bool pollin;

1900 1901 1902 1903 1904 1905
			/**
			 * For rate limiting any notifications of spurious
			 * invalid OA reports
			 */
			struct ratelimit_state spurious_report_rs;

1906 1907 1908
			bool periodic;
			int period_exponent;

1909
			struct i915_oa_config test_config;
1910 1911 1912 1913

			struct {
				struct i915_vma *vma;
				u8 *vaddr;
1914
				u32 last_ctx_id;
1915 1916
				int format;
				int format_size;
1917

1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970
				/**
				 * Locks reads and writes to all head/tail state
				 *
				 * Consider: the head and tail pointer state
				 * needs to be read consistently from a hrtimer
				 * callback (atomic context) and read() fop
				 * (user context) with tail pointer updates
				 * happening in atomic context and head updates
				 * in user context and the (unlikely)
				 * possibility of read() errors needing to
				 * reset all head/tail state.
				 *
				 * Note: Contention or performance aren't
				 * currently a significant concern here
				 * considering the relatively low frequency of
				 * hrtimer callbacks (5ms period) and that
				 * reads typically only happen in response to a
				 * hrtimer event and likely complete before the
				 * next callback.
				 *
				 * Note: This lock is not held *while* reading
				 * and copying data to userspace so the value
				 * of head observed in htrimer callbacks won't
				 * represent any partial consumption of data.
				 */
				spinlock_t ptr_lock;

				/**
				 * One 'aging' tail pointer and one 'aged'
				 * tail pointer ready to used for reading.
				 *
				 * Initial values of 0xffffffff are invalid
				 * and imply that an update is required
				 * (and should be ignored by an attempted
				 * read)
				 */
				struct {
					u32 offset;
				} tails[2];

				/**
				 * Index for the aged tail ready to read()
				 * data up to.
				 */
				unsigned int aged_tail_idx;

				/**
				 * A monotonic timestamp for when the current
				 * aging tail pointer was read; used to
				 * determine when it is old enough to trust.
				 */
				u64 aging_timestamp;

1971 1972 1973 1974 1975 1976 1977 1978 1979 1980
				/**
				 * Although we can always read back the head
				 * pointer register, we prefer to avoid
				 * trusting the HW state, just to avoid any
				 * risk that some hardware condition could
				 * somehow bump the head pointer unpredictably
				 * and cause us to forward the wrong OA buffer
				 * data to userspace.
				 */
				u32 head;
1981 1982 1983
			} oa_buffer;

			u32 gen7_latched_oastatus1;
1984 1985 1986 1987 1988 1989 1990 1991 1992
			u32 ctx_oactxctrl_offset;
			u32 ctx_flexeu0_offset;

			/**
			 * The RPT_ID/reason field for Gen8+ includes a bit
			 * to determine if the CTX ID in the report is valid
			 * but the specific bit differs between Gen 8 and 9
			 */
			u32 gen8_valid_ctx_bit;
1993 1994 1995

			struct i915_oa_ops ops;
			const struct i915_oa_format *oa_formats;
1996
		} oa;
1997 1998
	} perf;

1999 2000
	/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
	struct {
2001
		void (*resume)(struct drm_i915_private *);
2002
		void (*cleanup_engine)(struct intel_engine_cs *engine);
2003

2004 2005
		struct i915_gt_timelines {
			struct mutex mutex; /* protects list, tainted by GPU */
C
Chris Wilson 已提交
2006
			struct list_head active_list;
2007 2008 2009 2010

			/* Pack multiple timelines' seqnos into the same page */
			spinlock_t hwsp_lock;
			struct list_head hwsp_free_list;
2011
		} timelines;
2012

2013
		intel_engine_mask_t active_engines;
2014
		struct list_head active_rings;
2015
		struct list_head closed_vma;
2016
		u32 active_requests;
2017

2018 2019 2020 2021 2022 2023 2024
		/**
		 * Is the GPU currently considered idle, or busy executing
		 * userspace requests? Whilst idle, we allow runtime power
		 * management to power down the hardware and display clocks.
		 * In order to reduce the effect on performance, there
		 * is a slight delay before we do so.
		 */
C
Chris Wilson 已提交
2025
		intel_wakeref_t awake;
2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043

		/**
		 * We leave the user IRQ off as much as possible,
		 * but this means that requests will finish and never
		 * be retired once the system goes idle. Set a timer to
		 * fire periodically while the ring is running. When it
		 * fires, go retire requests.
		 */
		struct delayed_work retire_work;

		/**
		 * When we detect an idle GPU, we want to turn on
		 * powersaving features. So once we see that there
		 * are no more requests outstanding and no more
		 * arrive within a small period of time, we fire
		 * off the idle_work.
		 */
		struct delayed_work idle_work;
2044 2045

		ktime_t last_init_time;
2046 2047

		struct i915_vma *scratch;
2048 2049
	} gt;

2050 2051 2052 2053 2054 2055 2056 2057
	/* For i945gm vblank irq vs. C3 workaround */
	struct {
		struct work_struct work;
		struct pm_qos_request pm_qos;
		u8 c3_disable_latency;
		u8 enabled;
	} i945gm_vblank;

2058 2059 2060
	/* perform PHY state sanity checks? */
	bool chv_phy_assert[2];

M
Mahesh Kumar 已提交
2061 2062
	bool ipc_enabled;

2063 2064
	/* Used to save the pipe-to-encoder mapping for audio */
	struct intel_encoder *av_enc_map[I915_MAX_PIPES];
2065

2066 2067 2068 2069 2070 2071
	/* necessary resource sharing with HDMI LPE audio driver. */
	struct {
		struct platform_device *platdev;
		int	irq;
	} lpe_audio;

2072 2073
	struct i915_pmu pmu;

2074 2075 2076 2077 2078 2079
	struct i915_hdcp_comp_master *hdcp_master;
	bool hdcp_comp_added;

	/* Mutex to protect the above hdcp component related values. */
	struct mutex hdcp_comp_mutex;

2080 2081 2082 2083
	/*
	 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
	 * will be rejected. Instead look for a better place.
	 */
2084
};
L
Linus Torvalds 已提交
2085

2086 2087 2088 2089
struct dram_dimm_info {
	u8 size, width, ranks;
};

2090
struct dram_channel_info {
2091
	struct dram_dimm_info dimm_l, dimm_s;
2092
	u8 ranks;
2093
	bool is_16gb_dimm;
2094 2095
};

2096 2097
static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
{
2098
	return container_of(dev, struct drm_i915_private, drm);
2099 2100
}

2101
static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
I
Imre Deak 已提交
2102
{
2103
	return to_i915(dev_get_drvdata(kdev));
I
Imre Deak 已提交
2104 2105
}

2106 2107 2108 2109 2110
static inline struct drm_i915_private *wopcm_to_i915(struct intel_wopcm *wopcm)
{
	return container_of(wopcm, struct drm_i915_private, wopcm);
}

2111 2112 2113 2114 2115
static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
{
	return container_of(guc, struct drm_i915_private, guc);
}

A
Arkadiusz Hiler 已提交
2116 2117 2118 2119 2120
static inline struct drm_i915_private *huc_to_i915(struct intel_huc *huc)
{
	return container_of(huc, struct drm_i915_private, huc);
}

2121 2122 2123 2124 2125
static inline struct drm_i915_private *uncore_to_i915(struct intel_uncore *uncore)
{
	return container_of(uncore, struct drm_i915_private, uncore);
}

2126
/* Simple iterator over all initialised engines */
2127 2128 2129 2130 2131
#define for_each_engine(engine__, dev_priv__, id__) \
	for ((id__) = 0; \
	     (id__) < I915_NUM_ENGINES; \
	     (id__)++) \
		for_each_if ((engine__) = (dev_priv__)->engine[(id__)])
2132 2133

/* Iterator over subset of engines selected by mask */
2134
#define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \
2135
	for ((tmp__) = (mask__) & INTEL_INFO(dev_priv__)->engine_mask; \
2136 2137 2138
	     (tmp__) ? \
	     ((engine__) = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : \
	     0;)
2139

2140 2141 2142 2143 2144 2145 2146
enum hdmi_force_audio {
	HDMI_AUDIO_OFF_DVI = -2,	/* no aux data for HDMI-DVI converter */
	HDMI_AUDIO_OFF,			/* force turn off HDMI audio */
	HDMI_AUDIO_AUTO,		/* trust EDID */
	HDMI_AUDIO_ON,			/* force turn on HDMI audio */
};

2147
#define I915_GTT_OFFSET_NONE ((u32)-1)
2148

2149 2150
/*
 * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
2151
 * considered to be the frontbuffer for the given plane interface-wise. This
2152 2153 2154 2155 2156
 * doesn't mean that the hw necessarily already scans it out, but that any
 * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
 *
 * We have one bit per pipe and per scanout plane type.
 */
2157
#define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
2158 2159 2160 2161 2162
#define INTEL_FRONTBUFFER(pipe, plane_id) ({ \
	BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > 32); \
	BUILD_BUG_ON(I915_MAX_PLANES > INTEL_FRONTBUFFER_BITS_PER_PIPE); \
	BIT((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)); \
})
2163
#define INTEL_FRONTBUFFER_OVERLAY(pipe) \
2164
	BIT(INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
2165
#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
2166 2167
	GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \
		INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
2168

2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194
/*
 * Optimised SGL iterator for GEM objects
 */
static __always_inline struct sgt_iter {
	struct scatterlist *sgp;
	union {
		unsigned long pfn;
		dma_addr_t dma;
	};
	unsigned int curr;
	unsigned int max;
} __sgt_iter(struct scatterlist *sgl, bool dma) {
	struct sgt_iter s = { .sgp = sgl };

	if (s.sgp) {
		s.max = s.curr = s.sgp->offset;
		s.max += s.sgp->length;
		if (dma)
			s.dma = sg_dma_address(s.sgp);
		else
			s.pfn = page_to_pfn(sg_page(s.sgp));
	}

	return s;
}

2195 2196 2197 2198 2199 2200 2201 2202
static inline struct scatterlist *____sg_next(struct scatterlist *sg)
{
	++sg;
	if (unlikely(sg_is_chain(sg)))
		sg = sg_chain_ptr(sg);
	return sg;
}

2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213
/**
 * __sg_next - return the next scatterlist entry in a list
 * @sg:		The current sg entry
 *
 * Description:
 *   If the entry is the last, return NULL; otherwise, step to the next
 *   element in the array (@sg@+1). If that's a chain pointer, follow it;
 *   otherwise just return the pointer to the current element.
 **/
static inline struct scatterlist *__sg_next(struct scatterlist *sg)
{
2214
	return sg_is_last(sg) ? NULL : ____sg_next(sg);
2215 2216
}

2217 2218 2219 2220 2221 2222 2223 2224 2225
/**
 * for_each_sgt_dma - iterate over the DMA addresses of the given sg_table
 * @__dmap:	DMA address (output)
 * @__iter:	'struct sgt_iter' (iterator state, internal)
 * @__sgt:	sg_table to iterate over (input)
 */
#define for_each_sgt_dma(__dmap, __iter, __sgt)				\
	for ((__iter) = __sgt_iter((__sgt)->sgl, true);			\
	     ((__dmap) = (__iter).dma + (__iter).curr);			\
2226
	     (((__iter).curr += I915_GTT_PAGE_SIZE) >= (__iter).max) ?	\
2227
	     (__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0)
2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238

/**
 * for_each_sgt_page - iterate over the pages of the given sg_table
 * @__pp:	page pointer (output)
 * @__iter:	'struct sgt_iter' (iterator state, internal)
 * @__sgt:	sg_table to iterate over (input)
 */
#define for_each_sgt_page(__pp, __iter, __sgt)				\
	for ((__iter) = __sgt_iter((__sgt)->sgl, false);		\
	     ((__pp) = (__iter).pfn == 0 ? NULL :			\
	      pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \
2239 2240
	     (((__iter).curr += PAGE_SIZE) >= (__iter).max) ?		\
	     (__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0 : 0)
2241

2242 2243
bool i915_sg_trim(struct sg_table *orig_st);

2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258
static inline unsigned int i915_sg_page_sizes(struct scatterlist *sg)
{
	unsigned int page_sizes;

	page_sizes = 0;
	while (sg) {
		GEM_BUG_ON(sg->offset);
		GEM_BUG_ON(!IS_ALIGNED(sg->length, PAGE_SIZE));
		page_sizes |= sg->length;
		sg = __sg_next(sg);
	}

	return page_sizes;
}

2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273
static inline unsigned int i915_sg_segment_size(void)
{
	unsigned int size = swiotlb_max_segment();

	if (size == 0)
		return SCATTERLIST_MAX_SEGMENT;

	size = rounddown(size, PAGE_SIZE);
	/* swiotlb_max_segment_size can return 1 byte when it means one page. */
	if (size < PAGE_SIZE)
		size = PAGE_SIZE;

	return size;
}

2274
#define INTEL_INFO(dev_priv)	(&(dev_priv)->__info)
2275
#define RUNTIME_INFO(dev_priv)	(&(dev_priv)->__runtime)
2276
#define DRIVER_CAPS(dev_priv)	(&(dev_priv)->caps)
2277

2278
#define INTEL_GEN(dev_priv)	(INTEL_INFO(dev_priv)->gen)
2279
#define INTEL_DEVID(dev_priv)	(RUNTIME_INFO(dev_priv)->device_id)
2280

2281
#define REVID_FOREVER		0xff
2282
#define INTEL_REVID(dev_priv)	((dev_priv)->drm.pdev->revision)
2283

2284 2285 2286
#define INTEL_GEN_MASK(s, e) ( \
	BUILD_BUG_ON_ZERO(!__builtin_constant_p(s)) + \
	BUILD_BUG_ON_ZERO(!__builtin_constant_p(e)) + \
R
Rodrigo Vivi 已提交
2287
	GENMASK((e) - 1, (s) - 1))
2288

R
Rodrigo Vivi 已提交
2289
/* Returns true if Gen is in inclusive range [Start, End] */
2290
#define IS_GEN_RANGE(dev_priv, s, e) \
2291
	(!!(INTEL_INFO(dev_priv)->gen_mask & INTEL_GEN_MASK((s), (e))))
2292

2293 2294
#define IS_GEN(dev_priv, n) \
	(BUILD_BUG_ON_ZERO(!__builtin_constant_p(n)) + \
2295
	 INTEL_INFO(dev_priv)->gen == (n))
2296

2297 2298 2299 2300 2301 2302 2303 2304
/*
 * Return true if revision is in range [since,until] inclusive.
 *
 * Use 0 for open-ended since, and REVID_FOREVER for open-ended until.
 */
#define IS_REVID(p, since, until) \
	(INTEL_REVID(p) >= (since) && INTEL_REVID(p) <= (until))

2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365
static __always_inline unsigned int
__platform_mask_index(const struct intel_runtime_info *info,
		      enum intel_platform p)
{
	const unsigned int pbits =
		BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;

	/* Expand the platform_mask array if this fails. */
	BUILD_BUG_ON(INTEL_MAX_PLATFORMS >
		     pbits * ARRAY_SIZE(info->platform_mask));

	return p / pbits;
}

static __always_inline unsigned int
__platform_mask_bit(const struct intel_runtime_info *info,
		    enum intel_platform p)
{
	const unsigned int pbits =
		BITS_PER_TYPE(info->platform_mask[0]) - INTEL_SUBPLATFORM_BITS;

	return p % pbits + INTEL_SUBPLATFORM_BITS;
}

static inline u32
intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p)
{
	const unsigned int pi = __platform_mask_index(info, p);

	return info->platform_mask[pi] & INTEL_SUBPLATFORM_BITS;
}

static __always_inline bool
IS_PLATFORM(const struct drm_i915_private *i915, enum intel_platform p)
{
	const struct intel_runtime_info *info = RUNTIME_INFO(i915);
	const unsigned int pi = __platform_mask_index(info, p);
	const unsigned int pb = __platform_mask_bit(info, p);

	BUILD_BUG_ON(!__builtin_constant_p(p));

	return info->platform_mask[pi] & BIT(pb);
}

static __always_inline bool
IS_SUBPLATFORM(const struct drm_i915_private *i915,
	       enum intel_platform p, unsigned int s)
{
	const struct intel_runtime_info *info = RUNTIME_INFO(i915);
	const unsigned int pi = __platform_mask_index(info, p);
	const unsigned int pb = __platform_mask_bit(info, p);
	const unsigned int msb = BITS_PER_TYPE(info->platform_mask[0]) - 1;
	const u32 mask = info->platform_mask[pi];

	BUILD_BUG_ON(!__builtin_constant_p(p));
	BUILD_BUG_ON(!__builtin_constant_p(s));
	BUILD_BUG_ON((s) >= INTEL_SUBPLATFORM_BITS);

	/* Shift and test on the MSB position so sign flag can be used. */
	return ((mask << (msb - pb)) & (mask << (msb - s))) & BIT(msb);
}
T
Tvrtko Ursulin 已提交
2366

2367 2368
#define IS_MOBILE(dev_priv)	(INTEL_INFO(dev_priv)->is_mobile)

T
Tvrtko Ursulin 已提交
2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380
#define IS_I830(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I830)
#define IS_I845G(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I845G)
#define IS_I85X(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I85X)
#define IS_I865G(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I865G)
#define IS_I915G(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I915G)
#define IS_I915GM(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I915GM)
#define IS_I945G(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I945G)
#define IS_I945GM(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I945GM)
#define IS_I965G(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I965G)
#define IS_I965GM(dev_priv)	IS_PLATFORM(dev_priv, INTEL_I965GM)
#define IS_G45(dev_priv)	IS_PLATFORM(dev_priv, INTEL_G45)
#define IS_GM45(dev_priv)	IS_PLATFORM(dev_priv, INTEL_GM45)
2381
#define IS_G4X(dev_priv)	(IS_G45(dev_priv) || IS_GM45(dev_priv))
T
Tvrtko Ursulin 已提交
2382 2383
#define IS_PINEVIEW(dev_priv)	IS_PLATFORM(dev_priv, INTEL_PINEVIEW)
#define IS_G33(dev_priv)	IS_PLATFORM(dev_priv, INTEL_G33)
2384 2385 2386
#define IS_IRONLAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_IRONLAKE)
#define IS_IRONLAKE_M(dev_priv) \
	(IS_PLATFORM(dev_priv, INTEL_IRONLAKE) && IS_MOBILE(dev_priv))
T
Tvrtko Ursulin 已提交
2387
#define IS_IVYBRIDGE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_IVYBRIDGE)
2388
#define IS_IVB_GT1(dev_priv)	(IS_IVYBRIDGE(dev_priv) && \
2389
				 INTEL_INFO(dev_priv)->gt == 1)
T
Tvrtko Ursulin 已提交
2390 2391 2392 2393 2394 2395 2396 2397 2398 2399
#define IS_VALLEYVIEW(dev_priv)	IS_PLATFORM(dev_priv, INTEL_VALLEYVIEW)
#define IS_CHERRYVIEW(dev_priv)	IS_PLATFORM(dev_priv, INTEL_CHERRYVIEW)
#define IS_HASWELL(dev_priv)	IS_PLATFORM(dev_priv, INTEL_HASWELL)
#define IS_BROADWELL(dev_priv)	IS_PLATFORM(dev_priv, INTEL_BROADWELL)
#define IS_SKYLAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_SKYLAKE)
#define IS_BROXTON(dev_priv)	IS_PLATFORM(dev_priv, INTEL_BROXTON)
#define IS_KABYLAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_KABYLAKE)
#define IS_GEMINILAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_GEMINILAKE)
#define IS_COFFEELAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_COFFEELAKE)
#define IS_CANNONLAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_CANNONLAKE)
2400
#define IS_ICELAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_ICELAKE)
2401
#define IS_ELKHARTLAKE(dev_priv)	IS_PLATFORM(dev_priv, INTEL_ELKHARTLAKE)
2402 2403
#define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \
				    (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00)
2404 2405 2406 2407
#define IS_BDW_ULT(dev_priv) \
	IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULT)
#define IS_BDW_ULX(dev_priv) \
	IS_SUBPLATFORM(dev_priv, INTEL_BROADWELL, INTEL_SUBPLATFORM_ULX)
2408
#define IS_BDW_GT3(dev_priv)	(IS_BROADWELL(dev_priv) && \
2409
				 INTEL_INFO(dev_priv)->gt == 3)
2410 2411
#define IS_HSW_ULT(dev_priv) \
	IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULT)
2412
#define IS_HSW_GT3(dev_priv)	(IS_HASWELL(dev_priv) && \
2413
				 INTEL_INFO(dev_priv)->gt == 3)
2414
#define IS_HSW_GT1(dev_priv)	(IS_HASWELL(dev_priv) && \
2415
				 INTEL_INFO(dev_priv)->gt == 1)
2416
/* ULX machines are also considered ULT. */
2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429
#define IS_HSW_ULX(dev_priv) \
	IS_SUBPLATFORM(dev_priv, INTEL_HASWELL, INTEL_SUBPLATFORM_ULX)
#define IS_SKL_ULT(dev_priv) \
	IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULT)
#define IS_SKL_ULX(dev_priv) \
	IS_SUBPLATFORM(dev_priv, INTEL_SKYLAKE, INTEL_SUBPLATFORM_ULX)
#define IS_KBL_ULT(dev_priv) \
	IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULT)
#define IS_KBL_ULX(dev_priv) \
	IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_ULX)
#define IS_AML_ULX(dev_priv) \
	(IS_SUBPLATFORM(dev_priv, INTEL_KABYLAKE, INTEL_SUBPLATFORM_AML) || \
	 IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_AML))
2430
#define IS_SKL_GT2(dev_priv)	(IS_SKYLAKE(dev_priv) && \
2431
				 INTEL_INFO(dev_priv)->gt == 2)
2432
#define IS_SKL_GT3(dev_priv)	(IS_SKYLAKE(dev_priv) && \
2433
				 INTEL_INFO(dev_priv)->gt == 3)
2434
#define IS_SKL_GT4(dev_priv)	(IS_SKYLAKE(dev_priv) && \
2435
				 INTEL_INFO(dev_priv)->gt == 4)
2436
#define IS_KBL_GT2(dev_priv)	(IS_KABYLAKE(dev_priv) && \
2437
				 INTEL_INFO(dev_priv)->gt == 2)
2438
#define IS_KBL_GT3(dev_priv)	(IS_KABYLAKE(dev_priv) && \
2439
				 INTEL_INFO(dev_priv)->gt == 3)
2440 2441
#define IS_CFL_ULT(dev_priv) \
	IS_SUBPLATFORM(dev_priv, INTEL_COFFEELAKE, INTEL_SUBPLATFORM_ULT)
2442
#define IS_CFL_GT2(dev_priv)	(IS_COFFEELAKE(dev_priv) && \
2443
				 INTEL_INFO(dev_priv)->gt == 2)
2444
#define IS_CFL_GT3(dev_priv)	(IS_COFFEELAKE(dev_priv) && \
2445
				 INTEL_INFO(dev_priv)->gt == 3)
2446 2447 2448 2449
#define IS_CNL_WITH_PORT_F(dev_priv) \
	IS_SUBPLATFORM(dev_priv, INTEL_CANNONLAKE, INTEL_SUBPLATFORM_PORTF)
#define IS_ICL_WITH_PORT_F(dev_priv) \
	IS_SUBPLATFORM(dev_priv, INTEL_ICELAKE, INTEL_SUBPLATFORM_PORTF)
2450

2451
#define IS_ALPHA_SUPPORT(intel_info) ((intel_info)->is_alpha_support)
2452

2453 2454 2455 2456 2457 2458
#define SKL_REVID_A0		0x0
#define SKL_REVID_B0		0x1
#define SKL_REVID_C0		0x2
#define SKL_REVID_D0		0x3
#define SKL_REVID_E0		0x4
#define SKL_REVID_F0		0x5
2459 2460
#define SKL_REVID_G0		0x6
#define SKL_REVID_H0		0x7
2461

2462 2463
#define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until))

2464
#define BXT_REVID_A0		0x0
2465
#define BXT_REVID_A1		0x1
2466
#define BXT_REVID_B0		0x3
2467
#define BXT_REVID_B_LAST	0x8
2468
#define BXT_REVID_C0		0x9
N
Nick Hoath 已提交
2469

2470 2471
#define IS_BXT_REVID(dev_priv, since, until) \
	(IS_BROXTON(dev_priv) && IS_REVID(dev_priv, since, until))
2472

M
Mika Kuoppala 已提交
2473 2474
#define KBL_REVID_A0		0x0
#define KBL_REVID_B0		0x1
2475 2476 2477
#define KBL_REVID_C0		0x2
#define KBL_REVID_D0		0x3
#define KBL_REVID_E0		0x4
M
Mika Kuoppala 已提交
2478

2479 2480
#define IS_KBL_REVID(dev_priv, since, until) \
	(IS_KABYLAKE(dev_priv) && IS_REVID(dev_priv, since, until))
M
Mika Kuoppala 已提交
2481

2482 2483 2484 2485 2486 2487
#define GLK_REVID_A0		0x0
#define GLK_REVID_A1		0x1

#define IS_GLK_REVID(dev_priv, since, until) \
	(IS_GEMINILAKE(dev_priv) && IS_REVID(dev_priv, since, until))

2488 2489
#define CNL_REVID_A0		0x0
#define CNL_REVID_B0		0x1
R
Rodrigo Vivi 已提交
2490
#define CNL_REVID_C0		0x2
2491 2492 2493 2494

#define IS_CNL_REVID(p, since, until) \
	(IS_CANNONLAKE(p) && IS_REVID(p, since, until))

2495 2496 2497 2498 2499 2500 2501 2502 2503
#define ICL_REVID_A0		0x0
#define ICL_REVID_A2		0x1
#define ICL_REVID_B0		0x3
#define ICL_REVID_B2		0x4
#define ICL_REVID_C0		0x5

#define IS_ICL_REVID(p, since, until) \
	(IS_ICELAKE(p) && IS_REVID(p, since, until))

2504
#define IS_LP(dev_priv)	(INTEL_INFO(dev_priv)->is_lp)
2505 2506
#define IS_GEN9_LP(dev_priv)	(IS_GEN(dev_priv, 9) && IS_LP(dev_priv))
#define IS_GEN9_BC(dev_priv)	(IS_GEN(dev_priv, 9) && !IS_LP(dev_priv))
2507

2508
#define HAS_ENGINE(dev_priv, id) (INTEL_INFO(dev_priv)->engine_mask & BIT(id))
2509

2510 2511 2512 2513
#define ENGINE_INSTANCES_MASK(dev_priv, first, count) ({		\
	unsigned int first__ = (first);					\
	unsigned int count__ = (count);					\
	(INTEL_INFO(dev_priv)->engine_mask &				\
2514
	 GENMASK(first__ + count__ - 1, first__)) >> first__;		\
2515 2516 2517 2518 2519 2520
})
#define VDBOX_MASK(dev_priv) \
	ENGINE_INSTANCES_MASK(dev_priv, VCS0, I915_MAX_VCS)
#define VEBOX_MASK(dev_priv) \
	ENGINE_INSTANCES_MASK(dev_priv, VECS0, I915_MAX_VECS)

2521 2522
#define HAS_LLC(dev_priv)	(INTEL_INFO(dev_priv)->has_llc)
#define HAS_SNOOP(dev_priv)	(INTEL_INFO(dev_priv)->has_snoop)
2523
#define HAS_EDRAM(dev_priv)	((dev_priv)->edram_size_mb)
2524 2525
#define HAS_WT(dev_priv)	((IS_HASWELL(dev_priv) || \
				 IS_BROADWELL(dev_priv)) && HAS_EDRAM(dev_priv))
2526

2527
#define HWS_NEEDS_PHYSICAL(dev_priv)	(INTEL_INFO(dev_priv)->hws_needs_physical)
2528

2529
#define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
2530
		(INTEL_INFO(dev_priv)->has_logical_ring_contexts)
2531
#define HAS_LOGICAL_RING_ELSQ(dev_priv) \
2532
		(INTEL_INFO(dev_priv)->has_logical_ring_elsq)
2533
#define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \
2534
		(INTEL_INFO(dev_priv)->has_logical_ring_preemption)
2535 2536 2537

#define HAS_EXECLISTS(dev_priv) HAS_LOGICAL_RING_CONTEXTS(dev_priv)

2538
#define INTEL_PPGTT(dev_priv) (INTEL_INFO(dev_priv)->ppgtt_type)
2539 2540 2541 2542 2543
#define HAS_PPGTT(dev_priv) \
	(INTEL_PPGTT(dev_priv) != INTEL_PPGTT_NONE)
#define HAS_FULL_PPGTT(dev_priv) \
	(INTEL_PPGTT(dev_priv) >= INTEL_PPGTT_FULL)

2544 2545
#define HAS_PAGE_SIZES(dev_priv, sizes) ({ \
	GEM_BUG_ON((sizes) == 0); \
2546
	((sizes) & ~INTEL_INFO(dev_priv)->page_sizes) == 0; \
2547
})
2548

2549
#define HAS_OVERLAY(dev_priv)		 (INTEL_INFO(dev_priv)->display.has_overlay)
2550
#define OVERLAY_NEEDS_PHYSICAL(dev_priv) \
2551
		(INTEL_INFO(dev_priv)->display.overlay_needs_physical)
2552

2553
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
2554
#define HAS_BROKEN_CS_TLB(dev_priv)	(IS_I830(dev_priv) || IS_I845G(dev_priv))
2555

2556
/* WaRsDisableCoarsePowerGating:skl,cnl */
2557
#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
2558 2559
	(IS_CANNONLAKE(dev_priv) || \
	 IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv))
2560

2561
#define HAS_GMBUS_IRQ(dev_priv) (INTEL_GEN(dev_priv) >= 4)
R
Ramalingam C 已提交
2562 2563 2564
#define HAS_GMBUS_BURST_READ(dev_priv) (INTEL_GEN(dev_priv) >= 10 || \
					IS_GEMINILAKE(dev_priv) || \
					IS_KABYLAKE(dev_priv))
2565

2566 2567 2568
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
 * rows, which changed the alignment requirements and fence programming.
 */
2569
#define HAS_128_BYTE_Y_TILING(dev_priv) (!IS_GEN(dev_priv, 2) && \
2570 2571
					 !(IS_I915G(dev_priv) || \
					 IS_I915GM(dev_priv)))
2572 2573
#define SUPPORTS_TV(dev_priv)		(INTEL_INFO(dev_priv)->display.supports_tv)
#define I915_HAS_HOTPLUG(dev_priv)	(INTEL_INFO(dev_priv)->display.has_hotplug)
2574

2575
#define HAS_FW_BLC(dev_priv) 	(INTEL_GEN(dev_priv) > 2)
2576
#define HAS_FBC(dev_priv)	(INTEL_INFO(dev_priv)->display.has_fbc)
R
Rodrigo Vivi 已提交
2577
#define HAS_CUR_FBC(dev_priv)	(!HAS_GMCH(dev_priv) && INTEL_GEN(dev_priv) >= 7)
2578

2579
#define HAS_IPS(dev_priv)	(IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
2580

2581
#define HAS_DP_MST(dev_priv)	(INTEL_INFO(dev_priv)->display.has_dp_mst)
2582

2583 2584 2585
#define HAS_DDI(dev_priv)		 (INTEL_INFO(dev_priv)->display.has_ddi)
#define HAS_FPGA_DBG_UNCLAIMED(dev_priv) (INTEL_INFO(dev_priv)->has_fpga_dbg)
#define HAS_PSR(dev_priv)		 (INTEL_INFO(dev_priv)->display.has_psr)
2586
#define HAS_TRANSCODER_EDP(dev_priv)	 (INTEL_INFO(dev_priv)->trans_offsets[TRANSCODER_EDP] != 0)
2587

2588 2589
#define HAS_RC6(dev_priv)		 (INTEL_INFO(dev_priv)->has_rc6)
#define HAS_RC6p(dev_priv)		 (INTEL_INFO(dev_priv)->has_rc6p)
2590
#define HAS_RC6pp(dev_priv)		 (false) /* HW was never validated */
P
Paulo Zanoni 已提交
2591

2592
#define HAS_CSR(dev_priv)	(INTEL_INFO(dev_priv)->display.has_csr)
2593

2594 2595
#define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm)
#define HAS_64BIT_RELOC(dev_priv) (INTEL_INFO(dev_priv)->has_64bit_reloc)
2596

2597
#define HAS_IPC(dev_priv)		 (INTEL_INFO(dev_priv)->display.has_ipc)
2598

2599 2600 2601 2602 2603
/*
 * For now, anything with a GuC requires uCode loading, and then supports
 * command submission once loaded. But these are logically independent
 * properties, so we have separate macros to test them.
 */
2604 2605
#define HAS_GUC(dev_priv)	(INTEL_INFO(dev_priv)->has_guc)
#define HAS_GUC_CT(dev_priv)	(INTEL_INFO(dev_priv)->has_guc_ct)
2606 2607
#define HAS_GUC_UCODE(dev_priv)	(HAS_GUC(dev_priv))
#define HAS_GUC_SCHED(dev_priv)	(HAS_GUC(dev_priv))
2608 2609 2610

/* For now, anything with a GuC has also HuC */
#define HAS_HUC(dev_priv)	(HAS_GUC(dev_priv))
2611
#define HAS_HUC_UCODE(dev_priv)	(HAS_GUC(dev_priv))
2612

2613
/* Having a GuC is not the same as using a GuC */
2614 2615 2616
#define USES_GUC(dev_priv)		intel_uc_is_using_guc(dev_priv)
#define USES_GUC_SUBMISSION(dev_priv)	intel_uc_is_using_guc_submission(dev_priv)
#define USES_HUC(dev_priv)		intel_uc_is_using_huc(dev_priv)
2617

2618
#define HAS_POOLED_EU(dev_priv)	(INTEL_INFO(dev_priv)->has_pooled_eu)
2619

2620
#define INTEL_PCH_DEVICE_ID_MASK		0xff80
2621 2622 2623 2624 2625
#define INTEL_PCH_IBX_DEVICE_ID_TYPE		0x3b00
#define INTEL_PCH_CPT_DEVICE_ID_TYPE		0x1c00
#define INTEL_PCH_PPT_DEVICE_ID_TYPE		0x1e00
#define INTEL_PCH_LPT_DEVICE_ID_TYPE		0x8c00
#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE		0x9c00
2626 2627
#define INTEL_PCH_WPT_DEVICE_ID_TYPE		0x8c80
#define INTEL_PCH_WPT_LP_DEVICE_ID_TYPE		0x9c80
2628 2629
#define INTEL_PCH_SPT_DEVICE_ID_TYPE		0xA100
#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE		0x9D00
2630
#define INTEL_PCH_KBP_DEVICE_ID_TYPE		0xA280
2631
#define INTEL_PCH_CNP_DEVICE_ID_TYPE		0xA300
2632
#define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE		0x9D80
2633
#define INTEL_PCH_CMP_DEVICE_ID_TYPE		0x0280
2634
#define INTEL_PCH_ICP_DEVICE_ID_TYPE		0x3480
2635
#define INTEL_PCH_P2X_DEVICE_ID_TYPE		0x7100
2636
#define INTEL_PCH_P3X_DEVICE_ID_TYPE		0x7000
2637
#define INTEL_PCH_QEMU_DEVICE_ID_TYPE		0x2900 /* qemu q35 has 2918 */
2638

2639
#define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type)
2640
#define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id)
2641
#define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP)
2642
#define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP)
2643 2644 2645
#define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP)
#define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT)
#define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT)
2646
#define HAS_PCH_LPT_LP(dev_priv) \
2647 2648
	(INTEL_PCH_ID(dev_priv) == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE || \
	 INTEL_PCH_ID(dev_priv) == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE)
2649
#define HAS_PCH_LPT_H(dev_priv) \
2650 2651
	(INTEL_PCH_ID(dev_priv) == INTEL_PCH_LPT_DEVICE_ID_TYPE || \
	 INTEL_PCH_ID(dev_priv) == INTEL_PCH_WPT_DEVICE_ID_TYPE)
2652 2653 2654 2655
#define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT)
#define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX)
#define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP)
#define HAS_PCH_SPLIT(dev_priv) (INTEL_PCH_TYPE(dev_priv) != PCH_NONE)
2656

R
Rodrigo Vivi 已提交
2657
#define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch)
2658

2659
#define HAS_LSPCON(dev_priv) (INTEL_GEN(dev_priv) >= 9)
2660

2661
/* DPF == dynamic parity feature */
2662
#define HAS_L3_DPF(dev_priv) (INTEL_INFO(dev_priv)->has_l3_dpf)
2663 2664
#define NUM_L3_SLICES(dev_priv) (IS_HSW_GT3(dev_priv) ? \
				 2 : HAS_L3_DPF(dev_priv))
2665

2666
#define GT_FREQUENCY_MULTIPLIER 50
A
Akash Goel 已提交
2667
#define GEN9_FREQ_SCALER 3
2668

2669 2670
#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->num_pipes > 0)

2671 2672
#include "i915_trace.h"

2673
static inline bool intel_vtd_active(void)
2674 2675
{
#ifdef CONFIG_INTEL_IOMMU
2676
	if (intel_iommu_gfx_mapped)
2677 2678 2679 2680 2681
		return true;
#endif
	return false;
}

2682 2683 2684 2685 2686
static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
{
	return INTEL_GEN(dev_priv) >= 6 && intel_vtd_active();
}

2687 2688 2689
static inline bool
intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
{
2690
	return IS_BROXTON(dev_priv) && intel_vtd_active();
2691 2692
}

2693
/* i915_drv.c */
2694 2695 2696 2697 2698 2699 2700
void __printf(3, 4)
__i915_printk(struct drm_i915_private *dev_priv, const char *level,
	      const char *fmt, ...);

#define i915_report_error(dev_priv, fmt, ...)				   \
	__i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__)

2701
#ifdef CONFIG_COMPAT
D
Dave Airlie 已提交
2702 2703
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
			      unsigned long arg);
2704 2705
#else
#define i915_compat_ioctl NULL
2706
#endif
2707 2708 2709 2710 2711
extern const struct dev_pm_ops i915_pm_ops;

extern int i915_driver_load(struct pci_dev *pdev,
			    const struct pci_device_id *ent);
extern void i915_driver_unload(struct drm_device *dev);
2712

2713
extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
2714
extern void intel_hangcheck_init(struct drm_i915_private *dev_priv);
2715 2716 2717 2718
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
2719
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
2720

2721
int intel_engines_init_mmio(struct drm_i915_private *dev_priv);
2722 2723
int intel_engines_init(struct drm_i915_private *dev_priv);

2724 2725
u32 intel_calculate_mcr_s_ss_select(struct drm_i915_private *dev_priv);

2726
/* intel_hotplug.c */
2727 2728
void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
			   u32 pin_mask, u32 long_mask);
2729 2730 2731
void intel_hpd_init(struct drm_i915_private *dev_priv);
void intel_hpd_init_work(struct drm_i915_private *dev_priv);
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
2732 2733
enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
				   enum port port);
2734 2735
bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
2736

L
Linus Torvalds 已提交
2737
/* i915_irq.c */
2738 2739 2740 2741
static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
{
	unsigned long delay;

2742
	if (unlikely(!i915_modparams.enable_hangcheck))
2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754
		return;

	/* Don't continually defer the hangcheck so that it is always run at
	 * least once after work has been scheduled on any ring. Otherwise,
	 * we will ignore a hung ring if a second ring is kept busy.
	 */

	delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES);
	queue_delayed_work(system_long_wq,
			   &dev_priv->gpu_error.hangcheck_work, delay);
}

2755
extern void intel_irq_init(struct drm_i915_private *dev_priv);
2756
extern void intel_irq_fini(struct drm_i915_private *dev_priv);
2757 2758
int intel_irq_install(struct drm_i915_private *dev_priv);
void intel_irq_uninstall(struct drm_i915_private *dev_priv);
2759

2760 2761
static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
{
2762
	return dev_priv->gvt;
2763 2764
}

2765
static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
2766
{
2767
	return dev_priv->vgpu.active;
2768
}
2769

2770 2771
u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
			      enum pipe pipe);
2772
void
2773
i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
2774
		     u32 status_mask);
2775 2776

void
2777
i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
2778
		      u32 status_mask);
2779

2780 2781
void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
2782
void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
2783 2784
				   u32 mask,
				   u32 bits);
2785
void ilk_update_display_irq(struct drm_i915_private *dev_priv,
2786 2787
			    u32 interrupt_mask,
			    u32 enabled_irq_mask);
2788
static inline void
2789
ilk_enable_display_irq(struct drm_i915_private *dev_priv, u32 bits)
2790 2791 2792 2793
{
	ilk_update_display_irq(dev_priv, bits, bits);
}
static inline void
2794
ilk_disable_display_irq(struct drm_i915_private *dev_priv, u32 bits)
2795 2796 2797
{
	ilk_update_display_irq(dev_priv, bits, 0);
}
2798 2799
void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
			 enum pipe pipe,
2800 2801
			 u32 interrupt_mask,
			 u32 enabled_irq_mask);
2802
static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv,
2803
				       enum pipe pipe, u32 bits)
2804 2805 2806 2807
{
	bdw_update_pipe_irq(dev_priv, pipe, bits, bits);
}
static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv,
2808
					enum pipe pipe, u32 bits)
2809 2810 2811
{
	bdw_update_pipe_irq(dev_priv, pipe, bits, 0);
}
2812
void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
2813 2814
				  u32 interrupt_mask,
				  u32 enabled_irq_mask);
2815
static inline void
2816
ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, u32 bits)
2817 2818 2819 2820
{
	ibx_display_interrupt_update(dev_priv, bits, bits);
}
static inline void
2821
ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, u32 bits)
2822 2823 2824 2825
{
	ibx_display_interrupt_update(dev_priv, bits, 0);
}

2826 2827 2828 2829 2830 2831 2832 2833 2834
/* i915_gem.c */
int i915_gem_create_ioctl(struct drm_device *dev, void *data,
			  struct drm_file *file_priv);
int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
			 struct drm_file *file_priv);
int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
			  struct drm_file *file_priv);
int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file_priv);
2835 2836
int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file_priv);
2837 2838 2839 2840
int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
			      struct drm_file *file_priv);
int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
			     struct drm_file *file_priv);
2841 2842 2843 2844
int i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
			      struct drm_file *file_priv);
int i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
			       struct drm_file *file_priv);
2845 2846
int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file_priv);
B
Ben Widawsky 已提交
2847 2848 2849 2850
int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
			       struct drm_file *file);
int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
			       struct drm_file *file);
2851 2852
int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
			    struct drm_file *file_priv);
2853 2854
int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
			   struct drm_file *file_priv);
2855 2856 2857 2858
int i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
			      struct drm_file *file_priv);
int i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
			      struct drm_file *file_priv);
2859 2860
int i915_gem_init_userptr(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv);
2861 2862
int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
			   struct drm_file *file);
2863 2864
int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
				struct drm_file *file_priv);
2865 2866
int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file_priv);
2867
void i915_gem_sanitize(struct drm_i915_private *i915);
2868 2869
int i915_gem_init_early(struct drm_i915_private *dev_priv);
void i915_gem_cleanup_early(struct drm_i915_private *dev_priv);
2870
void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
2871
int i915_gem_freeze(struct drm_i915_private *dev_priv);
2872 2873
int i915_gem_freeze_late(struct drm_i915_private *dev_priv);

2874 2875
void i915_gem_object_init(struct drm_i915_gem_object *obj,
			 const struct drm_i915_gem_object_ops *ops);
2876 2877 2878 2879 2880
struct drm_i915_gem_object *
i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size);
struct drm_i915_gem_object *
i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
				 const void *data, size_t size);
2881
void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
2882
void i915_gem_free_object(struct drm_gem_object *obj);
2883

2884 2885
static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915)
{
2886 2887 2888
	if (!atomic_read(&i915->mm.free_count))
		return;

2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899
	/* A single pass should suffice to release all the freed objects (along
	 * most call paths) , but be a little more paranoid in that freeing
	 * the objects does take a little amount of time, during which the rcu
	 * callbacks could have added new objects into the freed list, and
	 * armed the work again.
	 */
	do {
		rcu_barrier();
	} while (flush_work(&i915->mm.free_work));
}

2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919
static inline void i915_gem_drain_workqueue(struct drm_i915_private *i915)
{
	/*
	 * Similar to objects above (see i915_gem_drain_freed-objects), in
	 * general we have workers that are armed by RCU and then rearm
	 * themselves in their callbacks. To be paranoid, we need to
	 * drain the workqueue a second time after waiting for the RCU
	 * grace period so that we catch work queued via RCU from the first
	 * pass. As neither drain_workqueue() nor flush_workqueue() report
	 * a result, we make an assumption that we only don't require more
	 * than 2 passes to catch all recursive RCU delayed work.
	 *
	 */
	int pass = 2;
	do {
		rcu_barrier();
		drain_workqueue(i915->wq);
	} while (--pass);
}

C
Chris Wilson 已提交
2920
struct i915_vma * __must_check
2921 2922
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
			 const struct i915_ggtt_view *view,
2923
			 u64 size,
2924 2925
			 u64 alignment,
			 u64 flags);
2926

2927
int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
2928
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
2929

2930 2931
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);

C
Chris Wilson 已提交
2932
static inline int __sg_page_count(const struct scatterlist *sg)
2933
{
2934 2935
	return sg->length >> PAGE_SHIFT;
}
2936

2937 2938 2939
struct scatterlist *
i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
		       unsigned int n, unsigned int *offset);
2940

2941 2942 2943
struct page *
i915_gem_object_get_page(struct drm_i915_gem_object *obj,
			 unsigned int n);
2944

2945 2946 2947
struct page *
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
			       unsigned int n);
2948

2949 2950 2951
dma_addr_t
i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
				unsigned long n);
2952

2953
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
2954
				 struct sg_table *pages,
M
Matthew Auld 已提交
2955
				 unsigned int sg_page_sizes);
C
Chris Wilson 已提交
2956 2957 2958 2959 2960
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);

static inline int __must_check
i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
2961
	might_lock(&obj->mm.lock);
C
Chris Wilson 已提交
2962

2963
	if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
C
Chris Wilson 已提交
2964 2965 2966 2967 2968
		return 0;

	return __i915_gem_object_get_pages(obj);
}

2969 2970 2971 2972 2973 2974
static inline bool
i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
{
	return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
}

C
Chris Wilson 已提交
2975 2976
static inline void
__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
2977
{
2978
	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
C
Chris Wilson 已提交
2979

2980
	atomic_inc(&obj->mm.pages_pin_count);
C
Chris Wilson 已提交
2981 2982 2983 2984 2985
}

static inline bool
i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
{
2986
	return atomic_read(&obj->mm.pages_pin_count);
C
Chris Wilson 已提交
2987 2988 2989 2990 2991
}

static inline void
__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
{
2992
	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
C
Chris Wilson 已提交
2993 2994
	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));

2995
	atomic_dec(&obj->mm.pages_pin_count);
2996
}
2997

2998 2999
static inline void
i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
3000
{
C
Chris Wilson 已提交
3001
	__i915_gem_object_unpin_pages(obj);
3002 3003
}

3004
enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
3005
	I915_MM_NORMAL = 0,
3006
	I915_MM_SHRINKER /* called "recursively" from direct-reclaim-esque */
3007 3008
};

3009 3010
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
				enum i915_mm_subclass subclass);
3011
void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj);
C
Chris Wilson 已提交
3012

3013 3014 3015
enum i915_map_type {
	I915_MAP_WB = 0,
	I915_MAP_WC,
3016 3017 3018
#define I915_MAP_OVERRIDE BIT(31)
	I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
	I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
3019 3020
};

3021 3022 3023 3024 3025 3026
static inline enum i915_map_type
i915_coherent_map_type(struct drm_i915_private *i915)
{
	return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
}

3027 3028
/**
 * i915_gem_object_pin_map - return a contiguous mapping of the entire object
3029 3030
 * @obj: the object to map into kernel address space
 * @type: the type of mapping, used to select pgprot_t
3031 3032 3033
 *
 * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
 * pages and then returns a contiguous mapping of the backing storage into
3034 3035
 * the kernel address space. Based on the @type of mapping, the PTE will be
 * set to either WriteBack or WriteCombine (via pgprot_t).
3036
 *
3037 3038
 * The caller is responsible for calling i915_gem_object_unpin_map() when the
 * mapping is no longer required.
3039
 *
3040 3041
 * Returns the pointer through which to access the mapped object, or an
 * ERR_PTR() on error.
3042
 */
3043 3044
void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
					   enum i915_map_type type);
3045

3046 3047 3048 3049 3050 3051 3052 3053
void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
				 unsigned long offset,
				 unsigned long size);
static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj)
{
	__i915_gem_object_flush_map(obj, 0, obj->base.size);
}

3054 3055
/**
 * i915_gem_object_unpin_map - releases an earlier mapping
3056
 * @obj: the object to unmap
3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067
 *
 * After pinning the object and mapping its pages, once you are finished
 * with your access, call i915_gem_object_unpin_map() to release the pin
 * upon the mapping. Once the pin count reaches zero, that mapping may be
 * removed.
 */
static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
{
	i915_gem_object_unpin_pages(obj);
}

3068 3069 3070 3071
int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
				    unsigned int *needs_clflush);
int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
				     unsigned int *needs_clflush);
3072 3073 3074
#define CLFLUSH_BEFORE	BIT(0)
#define CLFLUSH_AFTER	BIT(1)
#define CLFLUSH_FLAGS	(CLFLUSH_BEFORE | CLFLUSH_AFTER)
3075 3076 3077 3078 3079 3080 3081

static inline void
i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj)
{
	i915_gem_object_unpin_pages(obj);
}

3082 3083 3084 3085 3086 3087
static inline int __must_check
i915_mutex_lock_interruptible(struct drm_device *dev)
{
	return mutex_lock_interruptible(&dev->struct_mutex);
}

3088 3089 3090
int i915_gem_dumb_create(struct drm_file *file_priv,
			 struct drm_device *dev,
			 struct drm_mode_create_dumb *args);
3091
int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
3092
		      u32 handle, u64 *offset);
3093
int i915_gem_mmap_gtt_version(void);
3094 3095 3096 3097 3098

void i915_gem_track_fb(struct drm_i915_gem_object *old,
		       struct drm_i915_gem_object *new,
		       unsigned frontbuffer_bits);

3099
int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
3100

3101
static inline bool __i915_wedged(struct i915_gpu_error *error)
3102
{
3103
	return unlikely(test_bit(I915_WEDGED, &error->flags));
3104 3105
}

3106 3107 3108 3109 3110
static inline bool i915_reset_failed(struct drm_i915_private *i915)
{
	return __i915_wedged(&i915->gpu_error);
}

M
Mika Kuoppala 已提交
3111 3112
static inline u32 i915_reset_count(struct i915_gpu_error *error)
{
3113
	return READ_ONCE(error->reset_count);
3114
}
3115

3116 3117 3118 3119 3120 3121
static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
					  struct intel_engine_cs *engine)
{
	return READ_ONCE(error->reset_engine_count[engine->id]);
}

3122
void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
3123
bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv);
3124

3125
void i915_gem_init_mmio(struct drm_i915_private *i915);
3126 3127
int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv);
3128
void i915_gem_init_swizzling(struct drm_i915_private *dev_priv);
3129
void i915_gem_fini(struct drm_i915_private *dev_priv);
3130
void i915_gem_cleanup_engines(struct drm_i915_private *dev_priv);
3131
int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv,
3132
			   unsigned int flags, long timeout);
3133
void i915_gem_suspend(struct drm_i915_private *dev_priv);
3134
void i915_gem_suspend_late(struct drm_i915_private *dev_priv);
3135
void i915_gem_resume(struct drm_i915_private *dev_priv);
3136
vm_fault_t i915_gem_fault(struct vm_fault *vmf);
3137 3138
int i915_gem_object_wait(struct drm_i915_gem_object *obj,
			 unsigned int flags,
3139
			 long timeout);
3140 3141
int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
				  unsigned int flags,
3142
				  const struct i915_sched_attr *attr);
3143
#define I915_PRIORITY_DISPLAY I915_USER_PRIORITY(I915_PRIORITY_MAX)
3144

3145
int __must_check
3146 3147 3148
i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write);
3149
int __must_check
3150
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
C
Chris Wilson 已提交
3151
struct i915_vma * __must_check
3152 3153
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
				     u32 alignment,
3154 3155
				     const struct i915_ggtt_view *view,
				     unsigned int flags);
C
Chris Wilson 已提交
3156
void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);
3157
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
3158
				int align);
3159
int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file);
3160
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
3161

3162 3163 3164
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
				    enum i915_cache_level cache_level);

3165 3166 3167 3168 3169 3170
struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
				struct dma_buf *dma_buf);

struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
				struct drm_gem_object *gem_obj, int flags);

3171 3172 3173
static inline struct i915_hw_ppgtt *
i915_vm_to_ppgtt(struct i915_address_space *vm)
{
3174
	return container_of(vm, struct i915_hw_ppgtt, vm);
3175 3176
}

J
Joonas Lahtinen 已提交
3177
/* i915_gem_fence_reg.c */
3178 3179 3180
struct drm_i915_fence_reg *
i915_reserve_fence(struct drm_i915_private *dev_priv);
void i915_unreserve_fence(struct drm_i915_fence_reg *fence);
3181

3182
void i915_gem_restore_fences(struct drm_i915_private *dev_priv);
3183

3184
void i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv);
3185 3186 3187 3188
void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
				       struct sg_table *pages);
void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
					 struct sg_table *pages);
3189

3190 3191 3192 3193 3194 3195
static inline struct i915_gem_context *
__i915_gem_context_lookup_rcu(struct drm_i915_file_private *file_priv, u32 id)
{
	return idr_find(&file_priv->context_idr, id);
}

3196 3197 3198 3199 3200
static inline struct i915_gem_context *
i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
{
	struct i915_gem_context *ctx;

3201 3202 3203 3204 3205
	rcu_read_lock();
	ctx = __i915_gem_context_lookup_rcu(file_priv, id);
	if (ctx && !kref_get_unless_zero(&ctx->ref))
		ctx = NULL;
	rcu_read_unlock();
3206 3207 3208 3209

	return ctx;
}

3210 3211
int i915_perf_open_ioctl(struct drm_device *dev, void *data,
			 struct drm_file *file);
3212 3213 3214 3215
int i915_perf_add_config_ioctl(struct drm_device *dev, void *data,
			       struct drm_file *file);
int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data,
				  struct drm_file *file);
3216
void i915_oa_init_reg_state(struct intel_engine_cs *engine,
3217
			    struct intel_context *ce,
3218
			    u32 *reg_state);
3219

3220
/* i915_gem_evict.c */
3221
int __must_check i915_gem_evict_something(struct i915_address_space *vm,
3222
					  u64 min_size, u64 alignment,
3223
					  unsigned cache_level,
3224
					  u64 start, u64 end,
3225
					  unsigned flags);
3226 3227 3228
int __must_check i915_gem_evict_for_node(struct i915_address_space *vm,
					 struct drm_mm_node *node,
					 unsigned int flags);
3229
int i915_gem_evict_vm(struct i915_address_space *vm);
3230

3231 3232
void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv);

3233
/* belongs in i915_gem_gtt.h */
3234
static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
3235
{
3236
	wmb();
3237
	if (INTEL_GEN(dev_priv) < 6)
3238 3239
		intel_gtt_chipset_flush();
}
3240

3241
/* i915_gem_stolen.c */
3242 3243 3244
int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
				struct drm_mm_node *node, u64 size,
				unsigned alignment);
3245 3246 3247 3248
int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
					 struct drm_mm_node *node, u64 size,
					 unsigned alignment, u64 start,
					 u64 end);
3249 3250
void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
				 struct drm_mm_node *node);
3251
int i915_gem_init_stolen(struct drm_i915_private *dev_priv);
3252
void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv);
3253
struct drm_i915_gem_object *
3254 3255
i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
			      resource_size_t size);
3256
struct drm_i915_gem_object *
3257
i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
3258 3259 3260
					       resource_size_t stolen_offset,
					       resource_size_t gtt_offset,
					       resource_size_t size);
3261

3262 3263 3264
/* i915_gem_internal.c */
struct drm_i915_gem_object *
i915_gem_object_create_internal(struct drm_i915_private *dev_priv,
3265
				phys_addr_t size);
3266

3267
/* i915_gem_shrinker.c */
3268
unsigned long i915_gem_shrink(struct drm_i915_private *i915,
3269
			      unsigned long target,
3270
			      unsigned long *nr_scanned,
3271 3272 3273 3274
			      unsigned flags);
#define I915_SHRINK_PURGEABLE 0x1
#define I915_SHRINK_UNBOUND 0x2
#define I915_SHRINK_BOUND 0x4
3275
#define I915_SHRINK_ACTIVE 0x8
3276
#define I915_SHRINK_VMAPS 0x10
3277 3278 3279
unsigned long i915_gem_shrink_all(struct drm_i915_private *i915);
void i915_gem_shrinker_register(struct drm_i915_private *i915);
void i915_gem_shrinker_unregister(struct drm_i915_private *i915);
3280 3281
void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
				    struct mutex *mutex);
3282

3283
/* i915_gem_tiling.c */
3284
static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
3285
{
3286
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3287 3288

	return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
3289
		i915_gem_object_is_tiled(obj);
3290 3291
}

3292 3293 3294 3295 3296
u32 i915_gem_fence_size(struct drm_i915_private *dev_priv, u32 size,
			unsigned int tiling, unsigned int stride);
u32 i915_gem_fence_alignment(struct drm_i915_private *dev_priv, u32 size,
			     unsigned int tiling, unsigned int stride);

3297
/* i915_debugfs.c */
3298
#ifdef CONFIG_DEBUG_FS
3299
int i915_debugfs_register(struct drm_i915_private *dev_priv);
J
Jani Nikula 已提交
3300
int i915_debugfs_connector_add(struct drm_connector *connector);
3301
void intel_display_crc_init(struct drm_i915_private *dev_priv);
3302
#else
3303
static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) {return 0;}
3304 3305
static inline int i915_debugfs_connector_add(struct drm_connector *connector)
{ return 0; }
3306
static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {}
3307
#endif
3308

3309
const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
3310

3311
/* i915_cmd_parser.c */
3312
int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
3313
void intel_engine_init_cmd_parser(struct intel_engine_cs *engine);
3314 3315 3316 3317 3318 3319 3320
void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine);
int intel_engine_cmd_parser(struct intel_engine_cs *engine,
			    struct drm_i915_gem_object *batch_obj,
			    struct drm_i915_gem_object *shadow_batch_obj,
			    u32 batch_start_offset,
			    u32 batch_len,
			    bool is_master);
3321

3322 3323 3324
/* i915_perf.c */
extern void i915_perf_init(struct drm_i915_private *dev_priv);
extern void i915_perf_fini(struct drm_i915_private *dev_priv);
3325 3326
extern void i915_perf_register(struct drm_i915_private *dev_priv);
extern void i915_perf_unregister(struct drm_i915_private *dev_priv);
3327

3328
/* i915_suspend.c */
3329 3330
extern int i915_save_state(struct drm_i915_private *dev_priv);
extern int i915_restore_state(struct drm_i915_private *dev_priv);
3331

B
Ben Widawsky 已提交
3332
/* i915_sysfs.c */
D
David Weinehall 已提交
3333 3334
void i915_setup_sysfs(struct drm_i915_private *dev_priv);
void i915_teardown_sysfs(struct drm_i915_private *dev_priv);
B
Ben Widawsky 已提交
3335

3336 3337 3338 3339
/* intel_lpe_audio.c */
int  intel_lpe_audio_init(struct drm_i915_private *dev_priv);
void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv);
void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv);
3340
void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
3341 3342
			    enum pipe pipe, enum port port,
			    const void *eld, int ls_clock, bool dp_output);
3343

3344
/* intel_i2c.c */
3345 3346
extern int intel_setup_gmbus(struct drm_i915_private *dev_priv);
extern void intel_teardown_gmbus(struct drm_i915_private *dev_priv);
3347 3348
extern bool intel_gmbus_is_valid_pin(struct drm_i915_private *dev_priv,
				     unsigned int pin);
3349
extern int intel_gmbus_output_aksv(struct i2c_adapter *adapter);
3350

3351 3352
extern struct i2c_adapter *
intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin);
C
Chris Wilson 已提交
3353 3354
extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
3355
static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
3356 3357 3358
{
	return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
}
3359
extern void intel_i2c_reset(struct drm_i915_private *dev_priv);
3360

3361
/* intel_bios.c */
3362
void intel_bios_init(struct drm_i915_private *dev_priv);
3363
void intel_bios_cleanup(struct drm_i915_private *dev_priv);
J
Jani Nikula 已提交
3364
bool intel_bios_is_valid_vbt(const void *buf, size_t size);
3365
bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
3366
bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
3367
bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port);
3368
bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
3369
bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port);
3370
bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
3371 3372
bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
				     enum port port);
3373 3374
bool intel_bios_is_lspcon_present(struct drm_i915_private *dev_priv,
				enum port port);
3375
enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, enum port port);
3376

J
Jesse Barnes 已提交
3377 3378 3379 3380 3381 3382 3383 3384 3385
/* intel_acpi.c */
#ifdef CONFIG_ACPI
extern void intel_register_dsm_handler(void);
extern void intel_unregister_dsm_handler(void);
#else
static inline void intel_register_dsm_handler(void) { return; }
static inline void intel_unregister_dsm_handler(void) { return; }
#endif /* CONFIG_ACPI */

3386 3387 3388 3389
/* intel_device_info.c */
static inline struct intel_device_info *
mkwrite_device_info(struct drm_i915_private *dev_priv)
{
3390
	return (struct intel_device_info *)INTEL_INFO(dev_priv);
3391 3392
}

3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406
static inline struct intel_sseu
intel_device_default_sseu(struct drm_i915_private *i915)
{
	const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu;
	struct intel_sseu value = {
		.slice_mask = sseu->slice_mask,
		.subslice_mask = sseu->subslice_mask[0],
		.min_eus_per_subslice = sseu->max_eus_per_subslice,
		.max_eus_per_subslice = sseu->max_eus_per_subslice,
	};

	return value;
}

J
Jesse Barnes 已提交
3407
/* modesetting */
3408
extern void intel_modeset_init_hw(struct drm_device *dev);
3409
extern int intel_modeset_init(struct drm_device *dev);
J
Jesse Barnes 已提交
3410
extern void intel_modeset_cleanup(struct drm_device *dev);
3411 3412
extern int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv,
				       bool state);
3413
extern void intel_display_resume(struct drm_device *dev);
3414 3415
extern void i915_redisable_vga(struct drm_i915_private *dev_priv);
extern void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv);
3416
extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
3417
extern void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
3418
extern int intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
C
Chris Wilson 已提交
3419 3420
extern void intel_rps_mark_interactive(struct drm_i915_private *i915,
				       bool interactive);
3421
extern bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
3422
				  bool enable);
3423 3424
void intel_dsc_enable(struct intel_encoder *encoder,
		      const struct intel_crtc_state *crtc_state);
3425
void intel_dsc_disable(const struct intel_crtc_state *crtc_state);
3426

B
Ben Widawsky 已提交
3427 3428
int i915_reg_read_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file);
3429

3430
/* overlay */
3431 3432
extern struct intel_overlay_error_state *
intel_overlay_capture_error_state(struct drm_i915_private *dev_priv);
3433 3434
extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
					    struct intel_overlay_error_state *error);
3435

3436 3437
extern struct intel_display_error_state *
intel_display_capture_error_state(struct drm_i915_private *dev_priv);
3438
extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
3439
					    struct intel_display_error_state *error);
3440

3441
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
3442
int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv, u32 mbox,
3443 3444
				    u32 val, int fast_timeout_us,
				    int slow_timeout_ms);
3445
#define sandybridge_pcode_write(dev_priv, mbox, val)	\
3446
	sandybridge_pcode_write_timeout(dev_priv, mbox, val, 500, 0)
3447

3448 3449
int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
		      u32 reply_mask, u32 reply, int timeout_base_ms);
3450 3451

/* intel_sideband.c */
3452
u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr);
3453
int vlv_punit_write(struct drm_i915_private *dev_priv, u32 addr, u32 val);
3454
u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
3455 3456
u32 vlv_iosf_sb_read(struct drm_i915_private *dev_priv, u8 port, u32 reg);
void vlv_iosf_sb_write(struct drm_i915_private *dev_priv, u8 port, u32 reg, u32 val);
3457 3458 3459 3460
u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
3461 3462
u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
3463 3464
u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val);
3465 3466 3467 3468
u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
		   enum intel_sbi_destination destination);
void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
		     enum intel_sbi_destination destination);
3469 3470
u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
3471

3472
/* intel_dpio_phy.c */
3473
void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
3474
			     enum dpio_phy *phy, enum dpio_channel *ch);
3475 3476 3477
void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
				  enum port port, u32 margin, u32 scale,
				  u32 enable, u32 deemphasis);
3478 3479 3480 3481 3482 3483
void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy);
void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy);
bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
			    enum dpio_phy phy);
bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
			      enum dpio_phy phy);
3484
u8 bxt_ddi_phy_calc_lane_lat_optim_mask(u8 lane_count);
3485
void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
3486 3487
				     u8 lane_lat_optim_mask);
u8 bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder);
3488

3489 3490 3491
void chv_set_phy_signal_level(struct intel_encoder *encoder,
			      u32 deemph_reg_value, u32 margin_reg_value,
			      bool uniq_trans_scale);
3492
void chv_data_lane_soft_reset(struct intel_encoder *encoder,
3493
			      const struct intel_crtc_state *crtc_state,
3494
			      bool reset);
3495 3496 3497 3498
void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
			    const struct intel_crtc_state *crtc_state);
void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
				const struct intel_crtc_state *crtc_state);
3499
void chv_phy_release_cl2_override(struct intel_encoder *encoder);
3500 3501
void chv_phy_post_pll_disable(struct intel_encoder *encoder,
			      const struct intel_crtc_state *old_crtc_state);
3502

3503 3504 3505
void vlv_set_phy_signal_level(struct intel_encoder *encoder,
			      u32 demph_reg_value, u32 preemph_reg_value,
			      u32 uniqtranscale_reg_value, u32 tx3_demph);
3506 3507 3508 3509 3510 3511
void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
			    const struct intel_crtc_state *crtc_state);
void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
				const struct intel_crtc_state *crtc_state);
void vlv_phy_reset_lanes(struct intel_encoder *encoder,
			 const struct intel_crtc_state *old_crtc_state);
3512

3513 3514 3515 3516 3517 3518
/* intel_combo_phy.c */
void icl_combo_phys_init(struct drm_i915_private *dev_priv);
void icl_combo_phys_uninit(struct drm_i915_private *dev_priv);
void cnl_combo_phys_init(struct drm_i915_private *dev_priv);
void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv);

3519 3520
int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
3521
u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
3522
			   const i915_reg_t reg);
3523

T
Tvrtko Ursulin 已提交
3524 3525
u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat1);

3526 3527 3528 3529 3530 3531
static inline u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv,
					 const i915_reg_t reg)
{
	return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(dev_priv, reg), 1000);
}

3532 3533
#define __I915_REG_OP(op__, dev_priv__, ...) \
	intel_uncore_##op__(&(dev_priv__)->uncore, __VA_ARGS__)
3534

3535 3536
#define I915_READ8(reg__)	  __I915_REG_OP(read8, dev_priv, (reg__))
#define I915_WRITE8(reg__, val__) __I915_REG_OP(write8, dev_priv, (reg__), (val__))
3537

3538 3539 3540 3541 3542 3543 3544 3545 3546
#define I915_READ16(reg__)	   __I915_REG_OP(read16, dev_priv, (reg__))
#define I915_WRITE16(reg__, val__) __I915_REG_OP(write16, dev_priv, (reg__), (val__))
#define I915_READ16_NOTRACE(reg__)	   __I915_REG_OP(read16_notrace, dev_priv, (reg__))
#define I915_WRITE16_NOTRACE(reg__, val__) __I915_REG_OP(write16_notrace, dev_priv, (reg__), (val__))

#define I915_READ(reg__)	 __I915_REG_OP(read, dev_priv, (reg__))
#define I915_WRITE(reg__, val__) __I915_REG_OP(write, dev_priv, (reg__), (val__))
#define I915_READ_NOTRACE(reg__)	 __I915_REG_OP(read_notrace, dev_priv, (reg__))
#define I915_WRITE_NOTRACE(reg__, val__) __I915_REG_OP(write_notrace, dev_priv, (reg__), (val__))
3547

3548 3549 3550 3551
/* Be very careful with read/write 64-bit values. On 32-bit machines, they
 * will be implemented using 2 32-bit writes in an arbitrary order with
 * an arbitrary delay between them. This can cause the hardware to
 * act upon the intermediate value, possibly leading to corruption and
3552 3553 3554 3555 3556 3557 3558 3559 3560
 * machine death. For this reason we do not support I915_WRITE64, or
 * dev_priv->uncore.funcs.mmio_writeq.
 *
 * When reading a 64-bit value as two 32-bit values, the delay may cause
 * the two reads to mismatch, e.g. a timestamp overflowing. Also note that
 * occasionally a 64-bit register does not actualy support a full readq
 * and must be read using two 32-bit reads.
 *
 * You have been warned.
3561
 */
3562 3563 3564
#define I915_READ64(reg__)	__I915_REG_OP(read64, dev_priv, (reg__))
#define I915_READ64_2x32(lower_reg__, upper_reg__) \
	__I915_REG_OP(read64_2x32, dev_priv, (lower_reg__), (upper_reg__))
3565

3566 3567
#define POSTING_READ(reg__)	__I915_REG_OP(posting_read, dev_priv, (reg__))
#define POSTING_READ16(reg__)	__I915_REG_OP(posting_read16, dev_priv, (reg__))
3568

3569
/* These are untraced mmio-accessors that are only valid to be used inside
3570
 * critical sections, such as inside IRQ handlers, where forcewake is explicitly
3571
 * controlled.
3572
 *
3573
 * Think twice, and think again, before using these.
3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593
 *
 * As an example, these accessors can possibly be used between:
 *
 * spin_lock_irq(&dev_priv->uncore.lock);
 * intel_uncore_forcewake_get__locked();
 *
 * and
 *
 * intel_uncore_forcewake_put__locked();
 * spin_unlock_irq(&dev_priv->uncore.lock);
 *
 *
 * Note: some registers may not need forcewake held, so
 * intel_uncore_forcewake_{get,put} can be omitted, see
 * intel_uncore_forcewake_for_reg().
 *
 * Certain architectures will die if the same cacheline is concurrently accessed
 * by different clients (e.g. on Ivybridge). Access to registers should
 * therefore generally be serialised, by either the dev_priv->uncore.lock or
 * a more localised lock guarding all access to that bank of registers.
3594
 */
3595 3596 3597 3598
#define I915_READ_FW(reg__) __I915_REG_OP(read_fw, dev_priv, (reg__))
#define I915_WRITE_FW(reg__, val__) __I915_REG_OP(write_fw, dev_priv, (reg__), (val__))
#define I915_WRITE64_FW(reg__, val__) __I915_REG_OP(write64_fw, dev_priv, (reg__), (val__))
#define POSTING_READ_FW(reg__) __I915_REG_OP(posting_read_fw, dev_priv, (reg__))
3599

3600 3601 3602 3603
/* "Broadcast RGB" property */
#define INTEL_BROADCAST_RGB_AUTO 0
#define INTEL_BROADCAST_RGB_FULL 1
#define INTEL_BROADCAST_RGB_LIMITED 2
3604

3605
static inline i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv)
3606
{
3607
	if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3608
		return VLV_VGACNTRL;
3609
	else if (INTEL_GEN(dev_priv) >= 5)
3610
		return CPU_VGACNTRL;
3611 3612 3613 3614
	else
		return VGACNTRL;
}

3615 3616 3617 3618 3619 3620 3621
static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
{
	unsigned long j = msecs_to_jiffies(m);

	return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
}

3622 3623
static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
{
3624 3625 3626 3627 3628
	/* nsecs_to_jiffies64() does not guard against overflow */
	if (NSEC_PER_SEC % HZ &&
	    div_u64(n, NSEC_PER_SEC) >= MAX_JIFFY_OFFSET / HZ)
		return MAX_JIFFY_OFFSET;

3629 3630 3631
        return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
}

3632 3633 3634 3635 3636 3637 3638 3639 3640
/*
 * If you need to wait X milliseconds between events A and B, but event B
 * doesn't happen exactly after event A, you record the timestamp (jiffies) of
 * when event A happened, then just before event B you call this function and
 * pass the timestamp as the first argument, and X as the second argument.
 */
static inline void
wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
{
3641
	unsigned long target_jiffies, tmp_jiffies, remaining_jiffies;
3642 3643 3644 3645 3646 3647 3648 3649 3650 3651

	/*
	 * Don't re-read the value of "jiffies" every time since it may change
	 * behind our back and break the math.
	 */
	tmp_jiffies = jiffies;
	target_jiffies = timestamp_jiffies +
			 msecs_to_jiffies_timeout(to_wait_ms);

	if (time_after(target_jiffies, tmp_jiffies)) {
3652 3653 3654 3655
		remaining_jiffies = target_jiffies - tmp_jiffies;
		while (remaining_jiffies)
			remaining_jiffies =
			    schedule_timeout_uninterruptible(remaining_jiffies);
3656 3657
	}
}
3658

3659 3660 3661
void i915_memcpy_init_early(struct drm_i915_private *dev_priv);
bool i915_memcpy_from_wc(void *dst, const void *src, unsigned long len);

3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677
/* The movntdqa instructions used for memcpy-from-wc require 16-byte alignment,
 * as well as SSE4.1 support. i915_memcpy_from_wc() will report if it cannot
 * perform the operation. To check beforehand, pass in the parameters to
 * to i915_can_memcpy_from_wc() - since we only care about the low 4 bits,
 * you only need to pass in the minor offsets, page-aligned pointers are
 * always valid.
 *
 * For just checking for SSE4.1, in the foreknowledge that the future use
 * will be correctly aligned, just use i915_has_memcpy_from_wc().
 */
#define i915_can_memcpy_from_wc(dst, src, len) \
	i915_memcpy_from_wc((void *)((unsigned long)(dst) | (unsigned long)(src) | (len)), NULL, 0)

#define i915_has_memcpy_from_wc() \
	i915_memcpy_from_wc(NULL, NULL, 0)

3678 3679 3680 3681 3682
/* i915_mm.c */
int remap_io_mapping(struct vm_area_struct *vma,
		     unsigned long addr, unsigned long pfn, unsigned long size,
		     struct io_mapping *iomap);

3683 3684 3685 3686 3687 3688 3689 3690
static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
{
	if (INTEL_GEN(i915) >= 10)
		return CNL_HWS_CSB_WRITE_INDEX;
	else
		return I915_HWS_CSB_WRITE_INDEX;
}

3691 3692 3693 3694 3695
static inline u32 i915_scratch_offset(const struct drm_i915_private *i915)
{
	return i915_ggtt_offset(i915->gt.scratch);
}

L
Linus Torvalds 已提交
3696
#endif