i915_gem_gtt.h 17.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
/*
 * Copyright © 2014 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Please try to maintain the following order within this file unless it makes
 * sense to do otherwise. From top to bottom:
 * 1. typedefs
 * 2. #defines, and macros
 * 3. structure definitions
 * 4. function prototypes
 *
 * Within each section, please try to order by generation in ascending order,
 * from top to bottom (ie. gen6 on the top, gen8 on the bottom).
 */

#ifndef __I915_GEM_GTT_H__
#define __I915_GEM_GTT_H__

37
#include <linux/io-mapping.h>
J
Joonas Lahtinen 已提交
38
#include <linux/mm.h>
39

J
Joonas Lahtinen 已提交
40
#include "i915_gem_timeline.h"
41 42
#include "i915_gem_request.h"

43 44 45
#define I915_GTT_PAGE_SIZE 4096UL
#define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE

46 47 48 49 50
#define I915_FENCE_REG_NONE -1
#define I915_MAX_NUM_FENCES 32
/* 32 fences + sign bit for FENCE_REG_NONE */
#define I915_MAX_NUM_FENCE_BITS 6

51
struct drm_i915_file_private;
52
struct drm_i915_fence_reg;
53

54 55 56
typedef uint32_t gen6_pte_t;
typedef uint64_t gen8_pte_t;
typedef uint64_t gen8_pde_t;
57 58
typedef uint64_t gen8_ppgtt_pdpe_t;
typedef uint64_t gen8_ppgtt_pml4e_t;
59

60
#define ggtt_total_entries(ggtt) ((ggtt)->base.total >> PAGE_SHIFT)
61 62 63 64 65 66 67 68 69

/* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
#define GEN6_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0xff0))
#define GEN6_PTE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
#define GEN6_PDE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
#define GEN6_PTE_CACHE_LLC		(2 << 1)
#define GEN6_PTE_UNCACHED		(1 << 1)
#define GEN6_PTE_VALID			(1 << 0)

70 71 72 73
#define I915_PTES(pte_len)		(PAGE_SIZE / (pte_len))
#define I915_PTE_MASK(pte_len)		(I915_PTES(pte_len) - 1)
#define I915_PDES			512
#define I915_PDE_MASK			(I915_PDES - 1)
74
#define NUM_PTE(pde_shift)     (1 << (pde_shift - PAGE_SHIFT))
75 76 77

#define GEN6_PTES			I915_PTES(sizeof(gen6_pte_t))
#define GEN6_PD_SIZE		        (I915_PDES * PAGE_SIZE)
78
#define GEN6_PD_ALIGN			(PAGE_SIZE * 16)
79
#define GEN6_PDE_SHIFT			22
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106
#define GEN6_PDE_VALID			(1 << 0)

#define GEN7_PTE_CACHE_L3_LLC		(3 << 1)

#define BYT_PTE_SNOOPED_BY_CPU_CACHES	(1 << 2)
#define BYT_PTE_WRITEABLE		(1 << 1)

/* Cacheability Control is a 4-bit value. The low three bits are stored in bits
 * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
 */
#define HSW_CACHEABILITY_CONTROL(bits)	((((bits) & 0x7) << 1) | \
					 (((bits) & 0x8) << (11 - 3)))
#define HSW_WB_LLC_AGE3			HSW_CACHEABILITY_CONTROL(0x2)
#define HSW_WB_LLC_AGE0			HSW_CACHEABILITY_CONTROL(0x3)
#define HSW_WB_ELLC_LLC_AGE3		HSW_CACHEABILITY_CONTROL(0x8)
#define HSW_WB_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0xb)
#define HSW_WT_ELLC_LLC_AGE3		HSW_CACHEABILITY_CONTROL(0x7)
#define HSW_WT_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0x6)
#define HSW_PTE_UNCACHED		(0)
#define HSW_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0x7f0))
#define HSW_PTE_ADDR_ENCODE(addr)	HSW_GTT_ADDR_ENCODE(addr)

/* GEN8 legacy style address is defined as a 3 level page table:
 * 31:30 | 29:21 | 20:12 |  11:0
 * PDPE  |  PDE  |  PTE  | offset
 * The difference as compared to normal x86 3 level page table is the PDPEs are
 * programmed via register.
107 108 109 110
 *
 * GEN8 48b legacy style address is defined as a 4 level page table:
 * 47:39 | 38:30 | 29:21 | 20:12 |  11:0
 * PML4E | PDPE  |  PDE  |  PTE  | offset
111
 */
112 113
#define GEN8_PML4ES_PER_PML4		512
#define GEN8_PML4E_SHIFT		39
114
#define GEN8_PML4E_MASK			(GEN8_PML4ES_PER_PML4 - 1)
115
#define GEN8_PDPE_SHIFT			30
116 117 118
/* NB: GEN8_PDPE_MASK is untrue for 32b platforms, but it has no impact on 32b page
 * tables */
#define GEN8_PDPE_MASK			0x1ff
119 120 121 122
#define GEN8_PDE_SHIFT			21
#define GEN8_PDE_MASK			0x1ff
#define GEN8_PTE_SHIFT			12
#define GEN8_PTE_MASK			0x1ff
123
#define GEN8_LEGACY_PDPES		4
124
#define GEN8_PTES			I915_PTES(sizeof(gen8_pte_t))
125

126 127
#define I915_PDPES_PER_PDP(dev_priv)	(USES_FULL_48BIT_PPGTT(dev_priv) ?\
					GEN8_PML4ES_PER_PML4 : GEN8_LEGACY_PDPES)
128

129 130 131 132 133
#define PPAT_UNCACHED_INDEX		(_PAGE_PWT | _PAGE_PCD)
#define PPAT_CACHED_PDE_INDEX		0 /* WB LLC */
#define PPAT_CACHED_INDEX		_PAGE_PAT /* WB LLCeLLC */
#define PPAT_DISPLAY_ELLC_INDEX		_PAGE_PCD /* WT eLLC */

134
#define CHV_PPAT_SNOOP			(1<<6)
135 136 137 138 139 140 141 142 143 144 145
#define GEN8_PPAT_AGE(x)		(x<<4)
#define GEN8_PPAT_LLCeLLC		(3<<2)
#define GEN8_PPAT_LLCELLC		(2<<2)
#define GEN8_PPAT_LLC			(1<<2)
#define GEN8_PPAT_WB			(3<<0)
#define GEN8_PPAT_WT			(2<<0)
#define GEN8_PPAT_WC			(1<<0)
#define GEN8_PPAT_UC			(0<<0)
#define GEN8_PPAT_ELLC_OVERRIDE		(0<<2)
#define GEN8_PPAT(i, x)			((uint64_t) (x) << ((i) * 8))

J
Joonas Lahtinen 已提交
146 147
struct sg_table;

148 149
enum i915_ggtt_view_type {
	I915_GGTT_VIEW_NORMAL = 0,
150 151
	I915_GGTT_VIEW_ROTATED,
	I915_GGTT_VIEW_PARTIAL,
152 153 154
};

struct intel_rotation_info {
155 156
	struct {
		/* tiles */
157
		unsigned int width, height, stride, offset;
158
	} plane[2];
159 160 161 162 163
};

struct i915_ggtt_view {
	enum i915_ggtt_view_type type;

164 165
	union {
		struct {
166
			u64 offset;
167 168
			unsigned int size;
		} partial;
169
		struct intel_rotation_info rotated;
170
	} params;
171 172 173
};

extern const struct i915_ggtt_view i915_ggtt_view_normal;
174
extern const struct i915_ggtt_view i915_ggtt_view_rotated;
175

176
enum i915_cache_level;
177

J
Joonas Lahtinen 已提交
178
struct i915_vma;
179

180
struct i915_page_dma {
B
Ben Widawsky 已提交
181
	struct page *page;
182 183 184 185 186 187 188 189 190 191
	union {
		dma_addr_t daddr;

		/* For gen6/gen7 only. This is the offset in the GGTT
		 * where the page directory entries for PPGTT begin
		 */
		uint32_t ggtt_offset;
	};
};

192 193 194 195
#define px_base(px) (&(px)->base)
#define px_page(px) (px_base(px)->page)
#define px_dma(px) (px_base(px)->daddr)

196 197
struct i915_page_table {
	struct i915_page_dma base;
198 199

	unsigned long *used_ptes;
B
Ben Widawsky 已提交
200 201
};

202
struct i915_page_directory {
203
	struct i915_page_dma base;
204

205
	unsigned long *used_pdes;
206
	struct i915_page_table *page_table[I915_PDES]; /* PDEs */
B
Ben Widawsky 已提交
207 208
};

209
struct i915_page_directory_pointer {
210 211 212 213
	struct i915_page_dma base;

	unsigned long *used_pdpes;
	struct i915_page_directory **page_directory;
B
Ben Widawsky 已提交
214 215
};

216 217 218 219 220 221 222
struct i915_pml4 {
	struct i915_page_dma base;

	DECLARE_BITMAP(used_pml4es, GEN8_PML4ES_PER_PML4);
	struct i915_page_directory_pointer *pdps[GEN8_PML4ES_PER_PML4];
};

223 224
struct i915_address_space {
	struct drm_mm mm;
C
Chris Wilson 已提交
225
	struct i915_gem_timeline timeline;
226
	struct drm_i915_private *i915;
227 228 229 230 231 232 233 234 235
	/* Every address space belongs to a struct file - except for the global
	 * GTT that is owned by the driver (and so @file is set to NULL). In
	 * principle, no information should leak from one context to another
	 * (or between files/processes etc) unless explicitly shared by the
	 * owner. Tracking the owner is important in order to free up per-file
	 * objects along with the file, to aide resource tracking, and to
	 * assign blame.
	 */
	struct drm_i915_file_private *file;
236
	struct list_head global_link;
237 238
	u64 start;		/* Start offset always 0 for dri2 */
	u64 total;		/* size addr space maps (ex. 2GB for ggtt) */
239

240 241
	bool closed;

242
	struct i915_page_dma scratch_page;
243 244
	struct i915_page_table *scratch_pt;
	struct i915_page_directory *scratch_pd;
245
	struct i915_page_directory_pointer *scratch_pdp; /* GEN8+ & 48b PPGTT */
246 247 248 249 250

	/**
	 * List of objects currently involved in rendering.
	 *
	 * Includes buffers having the contents of their GPU caches
251
	 * flushed, not necessarily primitives. last_read_req
252 253 254 255 256 257 258 259 260 261
	 * represents when the rendering involved will be completed.
	 *
	 * A reference is held on the buffer while on this list.
	 */
	struct list_head active_list;

	/**
	 * LRU list of objects which are not in the ringbuffer and
	 * are ready to unbind, but are still in the GTT.
	 *
262
	 * last_read_req is NULL while an object is in this list.
263 264 265 266 267 268 269
	 *
	 * A reference is not held on the buffer while on this list,
	 * as merely being GTT-bound shouldn't prevent its being
	 * freed, and we'll pull it off the list in the free path.
	 */
	struct list_head inactive_list;

270 271 272 273 274 275 276
	/**
	 * List of vma that have been unbound.
	 *
	 * A reference is not held on the buffer while on this list.
	 */
	struct list_head unbound_list;

277
	/* FIXME: Need a more generic return type */
278 279
	gen6_pte_t (*pte_encode)(dma_addr_t addr,
				 enum i915_cache_level level,
280
				 u32 flags); /* Create a valid PTE */
281 282
	/* flags for pte_encode */
#define PTE_READ_ONLY	(1<<0)
283 284 285
	int (*allocate_va_range)(struct i915_address_space *vm,
				 uint64_t start,
				 uint64_t length);
286 287
	void (*clear_range)(struct i915_address_space *vm,
			    uint64_t start,
288
			    uint64_t length);
289 290 291 292 293
	void (*insert_page)(struct i915_address_space *vm,
			    dma_addr_t addr,
			    uint64_t offset,
			    enum i915_cache_level cache_level,
			    u32 flags);
294 295 296
	void (*insert_entries)(struct i915_address_space *vm,
			       struct sg_table *st,
			       uint64_t start,
297
			       enum i915_cache_level cache_level, u32 flags);
298
	void (*cleanup)(struct i915_address_space *vm);
299 300 301 302
	/** Unmap an object from an address space. This usually consists of
	 * setting the valid PTE entries to a reserved scratch page. */
	void (*unbind_vma)(struct i915_vma *vma);
	/* Map an object into an address space with the given cache flags. */
303 304 305
	int (*bind_vma)(struct i915_vma *vma,
			enum i915_cache_level cache_level,
			u32 flags);
306 307
};

308
#define i915_is_ggtt(V) (!(V)->file)
309

310 311 312 313 314 315 316
/* The Graphics Translation Table is the way in which GEN hardware translates a
 * Graphics Virtual Address into a Physical Address. In addition to the normal
 * collateral associated with any va->pa translations GEN hardware also has a
 * portion of the GTT which can be mapped by the CPU and remain both coherent
 * and correct (in cases like swizzling). That region is referred to as GMADR in
 * the spec.
 */
317
struct i915_ggtt {
318
	struct i915_address_space base;
319
	struct io_mapping mappable;	/* Mapping to our CPU mappable region */
320

321 322 323
	phys_addr_t mappable_base;	/* PA of our GMADR */
	u64 mappable_end;		/* End offset that we can CPU map */

324 325 326 327 328 329 330 331
	/* Stolen memory is segmented in hardware with different portions
	 * offlimits to certain functions.
	 *
	 * The drm_mm is initialised to the total accessible range, as found
	 * from the PCI config. On Broadwell+, this is further restricted to
	 * avoid the first page! The upper end of stolen memory is reserved for
	 * hardware functions and similarly removed from the accessible range.
	 */
332 333 334 335
	u32 stolen_size;		/* Total size of stolen memory */
	u32 stolen_usable_size;	/* Total size minus reserved ranges */
	u32 stolen_reserved_base;
	u32 stolen_reserved_size;
336 337 338 339 340 341 342

	/** "Graphics Stolen Memory" holds the global PTEs */
	void __iomem *gsm;

	bool do_idle_maps;

	int mtrr;
343 344

	struct drm_mm_node error_capture;
345 346 347 348 349 350
};

struct i915_hw_ppgtt {
	struct i915_address_space base;
	struct kref ref;
	struct drm_mm_node node;
351
	unsigned long pd_dirty_rings;
B
Ben Widawsky 已提交
352
	union {
353 354 355
		struct i915_pml4 pml4;		/* GEN8+ & 48b PPGTT */
		struct i915_page_directory_pointer pdp;	/* GEN8+ */
		struct i915_page_directory pd;		/* GEN6-7 */
B
Ben Widawsky 已提交
356
	};
357

358 359
	gen6_pte_t __iomem *pd_addr;

360 361
	int (*enable)(struct i915_hw_ppgtt *ppgtt);
	int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
362
			 struct drm_i915_gem_request *req);
363 364 365
	void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
};

366 367 368 369 370 371 372
/*
 * gen6_for_each_pde() iterates over every pde from start until start+length.
 * If start and start+length are not perfectly divisible, the macro will round
 * down and up as needed. Start=0 and length=2G effectively iterates over
 * every PDE in the system. The macro modifies ALL its parameters except 'pd',
 * so each of the other parameters should preferably be a simple variable, or
 * at most an lvalue with no side-effects!
373
 */
374 375 376 377 378 379 380 381 382 383 384 385 386
#define gen6_for_each_pde(pt, pd, start, length, iter)			\
	for (iter = gen6_pde_index(start);				\
	     length > 0 && iter < I915_PDES &&				\
		(pt = (pd)->page_table[iter], true);			\
	     ({ u32 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT);		\
		    temp = min(temp - start, length);			\
		    start += temp, length -= temp; }), ++iter)

#define gen6_for_all_pdes(pt, pd, iter)					\
	for (iter = 0;							\
	     iter < I915_PDES &&					\
		(pt = (pd)->page_table[iter], true);			\
	     ++iter)
387

388 389 390 391 392 393 394 395 396 397 398 399 400 401
static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift)
{
	const uint32_t mask = NUM_PTE(pde_shift) - 1;

	return (address >> PAGE_SHIFT) & mask;
}

/* Helper to counts the number of PTEs within the given length. This count
 * does not cross a page table boundary, so the max value would be
 * GEN6_PTES for GEN6, and GEN8_PTES for GEN8.
*/
static inline uint32_t i915_pte_count(uint64_t addr, size_t length,
				      uint32_t pde_shift)
{
402
	const uint64_t mask = ~((1ULL << pde_shift) - 1);
403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435
	uint64_t end;

	WARN_ON(length == 0);
	WARN_ON(offset_in_page(addr|length));

	end = addr + length;

	if ((addr & mask) != (end & mask))
		return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift);

	return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift);
}

static inline uint32_t i915_pde_index(uint64_t addr, uint32_t shift)
{
	return (addr >> shift) & I915_PDE_MASK;
}

static inline uint32_t gen6_pte_index(uint32_t addr)
{
	return i915_pte_index(addr, GEN6_PDE_SHIFT);
}

static inline size_t gen6_pte_count(uint32_t addr, uint32_t length)
{
	return i915_pte_count(addr, length, GEN6_PDE_SHIFT);
}

static inline uint32_t gen6_pde_index(uint32_t addr)
{
	return i915_pde_index(addr, GEN6_PDE_SHIFT);
}

436 437 438 439
/* Equivalent to the gen6 version, For each pde iterates over every pde
 * between from start until start + length. On gen8+ it simply iterates
 * over every page directory entry in a page directory.
 */
440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462
#define gen8_for_each_pde(pt, pd, start, length, iter)			\
	for (iter = gen8_pde_index(start);				\
	     length > 0 && iter < I915_PDES &&				\
		(pt = (pd)->page_table[iter], true);			\
	     ({ u64 temp = ALIGN(start+1, 1 << GEN8_PDE_SHIFT);		\
		    temp = min(temp - start, length);			\
		    start += temp, length -= temp; }), ++iter)

#define gen8_for_each_pdpe(pd, pdp, start, length, iter)		\
	for (iter = gen8_pdpe_index(start);				\
	     length > 0 && iter < I915_PDPES_PER_PDP(dev) &&		\
		(pd = (pdp)->page_directory[iter], true);		\
	     ({ u64 temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT);	\
		    temp = min(temp - start, length);			\
		    start += temp, length -= temp; }), ++iter)

#define gen8_for_each_pml4e(pdp, pml4, start, length, iter)		\
	for (iter = gen8_pml4e_index(start);				\
	     length > 0 && iter < GEN8_PML4ES_PER_PML4 &&		\
		(pdp = (pml4)->pdps[iter], true);			\
	     ({ u64 temp = ALIGN(start+1, 1ULL << GEN8_PML4E_SHIFT);	\
		    temp = min(temp - start, length);			\
		    start += temp, length -= temp; }), ++iter)
463

464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480
static inline uint32_t gen8_pte_index(uint64_t address)
{
	return i915_pte_index(address, GEN8_PDE_SHIFT);
}

static inline uint32_t gen8_pde_index(uint64_t address)
{
	return i915_pde_index(address, GEN8_PDE_SHIFT);
}

static inline uint32_t gen8_pdpe_index(uint64_t address)
{
	return (address >> GEN8_PDPE_SHIFT) & GEN8_PDPE_MASK;
}

static inline uint32_t gen8_pml4e_index(uint64_t address)
{
481
	return (address >> GEN8_PML4E_SHIFT) & GEN8_PML4E_MASK;
482 483
}

484 485 486 487 488
static inline size_t gen8_pte_count(uint64_t address, uint64_t length)
{
	return i915_pte_count(address, length, GEN8_PDE_SHIFT);
}

489 490 491 492
static inline dma_addr_t
i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n)
{
	return test_bit(n, ppgtt->pdp.used_pdpes) ?
493
		px_dma(ppgtt->pdp.page_directory[n]) :
494
		px_dma(ppgtt->base.scratch_pd);
495 496
}

J
Joonas Lahtinen 已提交
497 498 499 500 501 502 503
static inline struct i915_ggtt *
i915_vm_to_ggtt(struct i915_address_space *vm)
{
	GEM_BUG_ON(!i915_is_ggtt(vm));
	return container_of(vm, struct i915_ggtt, base);
}

504 505 506
int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv);
int i915_ggtt_init_hw(struct drm_i915_private *dev_priv);
int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv);
507
int i915_gem_init_ggtt(struct drm_i915_private *dev_priv);
508
void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
509

510
int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv);
511
void i915_ppgtt_release(struct kref *kref);
512
struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv,
C
Chris Wilson 已提交
513 514
					struct drm_i915_file_private *fpriv,
					const char *name);
515 516 517 518 519 520 521 522 523 524
static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
{
	if (ppgtt)
		kref_get(&ppgtt->ref);
}
static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
{
	if (ppgtt)
		kref_put(&ppgtt->ref, i915_ppgtt_release);
}
525

526
void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
527 528
void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv);
void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv);
529

530 531 532 533
int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
					    struct sg_table *pages);
void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
			       struct sg_table *pages);
534

535
/* Flags used by pin/bind&friends. */
536 537 538
#define PIN_NONBLOCK		BIT(0)
#define PIN_MAPPABLE		BIT(1)
#define PIN_ZONE_4G		BIT(2)
539
#define PIN_NONFAULT		BIT(3)
540 541 542 543 544 545 546 547 548

#define PIN_MBZ			BIT(5) /* I915_VMA_PIN_OVERFLOW */
#define PIN_GLOBAL		BIT(6) /* I915_VMA_GLOBAL_BIND */
#define PIN_USER		BIT(7) /* I915_VMA_LOCAL_BIND */
#define PIN_UPDATE		BIT(8)

#define PIN_HIGH		BIT(9)
#define PIN_OFFSET_BIAS		BIT(10)
#define PIN_OFFSET_FIXED	BIT(11)
549
#define PIN_OFFSET_MASK		(-I915_GTT_PAGE_SIZE)
550

551
#endif