i915_gem_object.h 13.3 KB
Newer Older
J
Joonas Lahtinen 已提交
1
/*
2
 * SPDX-License-Identifier: MIT
J
Joonas Lahtinen 已提交
3
 *
4
 * Copyright © 2016 Intel Corporation
J
Joonas Lahtinen 已提交
5 6 7 8 9 10
 */

#ifndef __I915_GEM_OBJECT_H__
#define __I915_GEM_OBJECT_H__

#include <drm/drm_gem.h>
J
Jani Nikula 已提交
11 12
#include <drm/drm_file.h>
#include <drm/drm_device.h>
J
Joonas Lahtinen 已提交
13 14 15

#include <drm/i915_drm.h>

16
#include "i915_gem_object_types.h"
J
Joonas Lahtinen 已提交
17

18 19
#include "i915_gem_gtt.h"

20 21
void i915_gem_init__objects(struct drm_i915_private *i915);

22 23 24
struct drm_i915_gem_object *i915_gem_object_alloc(void);
void i915_gem_object_free(struct drm_i915_gem_object *obj);

25
void i915_gem_object_init(struct drm_i915_gem_object *obj,
26 27
			  const struct drm_i915_gem_object_ops *ops,
			  struct lock_class_key *key);
28
struct drm_i915_gem_object *
M
Matthew Auld 已提交
29 30
i915_gem_object_create_shmem(struct drm_i915_private *i915,
			     resource_size_t size);
31 32
struct drm_i915_gem_object *
i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
M
Matthew Auld 已提交
33
				       const void *data, resource_size_t size);
34 35 36 37 38 39

extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops;
void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
				     struct sg_table *pages,
				     bool needs_clflush);

40 41
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align);

42 43 44 45 46
void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
void i915_gem_free_object(struct drm_gem_object *obj);

void i915_gem_flush_free_objects(struct drm_i915_private *i915);

47 48 49 50
struct sg_table *
__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj);
void i915_gem_object_truncate(struct drm_i915_gem_object *obj);

J
Joonas Lahtinen 已提交
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
/**
 * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
 * @filp: DRM file private date
 * @handle: userspace handle
 *
 * Returns:
 *
 * A pointer to the object named by the handle if such exists on @filp, NULL
 * otherwise. This object is only valid whilst under the RCU read lock, and
 * note carefully the object may be in the process of being destroyed.
 */
static inline struct drm_i915_gem_object *
i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
{
#ifdef CONFIG_LOCKDEP
	WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
#endif
	return idr_find(&file->object_idr, handle);
}

static inline struct drm_i915_gem_object *
i915_gem_object_lookup(struct drm_file *file, u32 handle)
{
	struct drm_i915_gem_object *obj;

	rcu_read_lock();
	obj = i915_gem_object_lookup_rcu(file, handle);
	if (obj && !kref_get_unless_zero(&obj->base.refcount))
		obj = NULL;
	rcu_read_unlock();

	return obj;
}

__deprecated
86
struct drm_gem_object *
J
Joonas Lahtinen 已提交
87 88 89 90 91 92
drm_gem_object_lookup(struct drm_file *file, u32 handle);

__attribute__((nonnull))
static inline struct drm_i915_gem_object *
i915_gem_object_get(struct drm_i915_gem_object *obj)
{
93
	drm_gem_object_get(&obj->base);
J
Joonas Lahtinen 已提交
94 95 96 97 98 99 100
	return obj;
}

__attribute__((nonnull))
static inline void
i915_gem_object_put(struct drm_i915_gem_object *obj)
{
101
	__drm_gem_object_put(&obj->base);
J
Joonas Lahtinen 已提交
102 103
}

104
#define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv)
105

106 107
static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
{
108
	dma_resv_lock(obj->base.resv, NULL);
109 110
}

111 112 113 114 115
static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj)
{
	return dma_resv_trylock(obj->base.resv);
}

116 117 118
static inline int
i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj)
{
119
	return dma_resv_lock_interruptible(obj->base.resv, NULL);
120 121
}

122 123
static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
{
124
	dma_resv_unlock(obj->base.resv);
125 126
}

127 128 129 130 131
struct dma_fence *
i915_gem_object_lock_fence(struct drm_i915_gem_object *obj);
void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj,
				  struct dma_fence *fence);

132 133 134 135 136 137 138 139 140 141 142 143
static inline void
i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
{
	obj->base.vma_node.readonly = true;
}

static inline bool
i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
{
	return obj->base.vma_node.readonly;
}

144 145 146 147 148 149
static inline bool
i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj)
{
	return obj->flags & I915_BO_ALLOC_CONTIGUOUS;
}

150 151 152 153 154 155 156 157 158 159 160 161
static inline bool
i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj)
{
	return obj->flags & I915_BO_ALLOC_VOLATILE;
}

static inline void
i915_gem_object_set_volatile(struct drm_i915_gem_object *obj)
{
	obj->flags |= I915_BO_ALLOC_VOLATILE;
}

162 163 164 165 166 167 168
static inline bool
i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
			 unsigned long flags)
{
	return obj->ops->flags & flags;
}

J
Joonas Lahtinen 已提交
169 170 171
static inline bool
i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
{
172
	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE);
J
Joonas Lahtinen 已提交
173 174 175 176 177
}

static inline bool
i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
{
178
	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE);
J
Joonas Lahtinen 已提交
179 180
}

T
Tina Zhang 已提交
181 182 183
static inline bool
i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
{
184
	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY);
T
Tina Zhang 已提交
185 186
}

187 188 189
static inline bool
i915_gem_object_never_bind_ggtt(const struct drm_i915_gem_object *obj)
{
190
	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_GGTT);
191 192
}

193 194 195
static inline bool
i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
{
196
	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_ASYNC_CANCEL);
197 198
}

199 200 201
static inline bool
i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
{
202
	return READ_ONCE(obj->frontbuffer);
203 204
}

J
Joonas Lahtinen 已提交
205
static inline unsigned int
206
i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj)
J
Joonas Lahtinen 已提交
207 208 209 210 211
{
	return obj->tiling_and_stride & TILING_MASK;
}

static inline bool
212
i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj)
J
Joonas Lahtinen 已提交
213 214 215 216 217
{
	return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
}

static inline unsigned int
218
i915_gem_object_get_stride(const struct drm_i915_gem_object *obj)
J
Joonas Lahtinen 已提交
219 220 221 222
{
	return obj->tiling_and_stride & STRIDE_MASK;
}

223 224 225 226 227 228 229 230
static inline unsigned int
i915_gem_tile_height(unsigned int tiling)
{
	GEM_BUG_ON(!tiling);
	return tiling == I915_TILING_Y ? 32 : 8;
}

static inline unsigned int
231
i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj)
232 233 234 235 236
{
	return i915_gem_tile_height(i915_gem_object_get_tiling(obj));
}

static inline unsigned int
237
i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj)
238 239 240 241 242
{
	return (i915_gem_object_get_stride(obj) *
		i915_gem_object_get_tile_height(obj));
}

243 244 245
int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
			       unsigned int tiling, unsigned int stride);

246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
struct scatterlist *
i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
		       unsigned int n, unsigned int *offset);

struct page *
i915_gem_object_get_page(struct drm_i915_gem_object *obj,
			 unsigned int n);

struct page *
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
			       unsigned int n);

dma_addr_t
i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
				    unsigned long n,
				    unsigned int *len);

dma_addr_t
i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
				unsigned long n);

void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
				 struct sg_table *pages,
				 unsigned int sg_page_sizes);
270 271

int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);

static inline int __must_check
i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
	might_lock(&obj->mm.lock);

	if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
		return 0;

	return __i915_gem_object_get_pages(obj);
}

static inline bool
i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
{
	return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
}

static inline void
__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
	GEM_BUG_ON(!i915_gem_object_has_pages(obj));

	atomic_inc(&obj->mm.pages_pin_count);
}

static inline bool
i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
{
	return atomic_read(&obj->mm.pages_pin_count);
}

static inline void
__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
{
	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));

	atomic_dec(&obj->mm.pages_pin_count);
}

static inline void
i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
{
	__i915_gem_object_unpin_pages(obj);
}

enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
	I915_MM_NORMAL = 0,
322 323 324 325 326 327 328 329 330 331 332 333 334
	/*
	 * Only used by struct_mutex, when called "recursively" from
	 * direct-reclaim-esque. Safe because there is only every one
	 * struct_mutex in the entire system.
	 */
	I915_MM_SHRINKER = 1,
	/*
	 * Used for obj->mm.lock when allocating pages. Safe because the object
	 * isn't yet on any LRU, and therefore the shrinker can't deadlock on
	 * it. As soon as the object has pages, obj->mm.lock nests within
	 * fs_reclaim.
	 */
	I915_MM_GET_PAGES = 1,
335 336
};

337
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
338 339
void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
void i915_gem_object_writeback(struct drm_i915_gem_object *obj);
340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389

enum i915_map_type {
	I915_MAP_WB = 0,
	I915_MAP_WC,
#define I915_MAP_OVERRIDE BIT(31)
	I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
	I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
};

/**
 * i915_gem_object_pin_map - return a contiguous mapping of the entire object
 * @obj: the object to map into kernel address space
 * @type: the type of mapping, used to select pgprot_t
 *
 * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
 * pages and then returns a contiguous mapping of the backing storage into
 * the kernel address space. Based on the @type of mapping, the PTE will be
 * set to either WriteBack or WriteCombine (via pgprot_t).
 *
 * The caller is responsible for calling i915_gem_object_unpin_map() when the
 * mapping is no longer required.
 *
 * Returns the pointer through which to access the mapped object, or an
 * ERR_PTR() on error.
 */
void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
					   enum i915_map_type type);

void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
				 unsigned long offset,
				 unsigned long size);
static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj)
{
	__i915_gem_object_flush_map(obj, 0, obj->base.size);
}

/**
 * i915_gem_object_unpin_map - releases an earlier mapping
 * @obj: the object to unmap
 *
 * After pinning the object and mapping its pages, once you are finished
 * with your access, call i915_gem_object_unpin_map() to release the pin
 * upon the mapping. Once the pin count reaches zero, that mapping may be
 * removed.
 */
static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
{
	i915_gem_object_unpin_pages(obj);
}

390 391 392 393 394 395 396
void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj);

void
i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
				   unsigned int flush_domains);

397 398 399 400 401 402 403 404 405 406 407 408
int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
				 unsigned int *needs_clflush);
int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
				  unsigned int *needs_clflush);
#define CLFLUSH_BEFORE	BIT(0)
#define CLFLUSH_AFTER	BIT(1)
#define CLFLUSH_FLAGS	(CLFLUSH_BEFORE | CLFLUSH_AFTER)

static inline void
i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
{
	i915_gem_object_unpin_pages(obj);
409
	i915_gem_object_unlock(obj);
410 411
}

J
Joonas Lahtinen 已提交
412 413 414 415 416 417 418
static inline struct intel_engine_cs *
i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
{
	struct intel_engine_cs *engine = NULL;
	struct dma_fence *fence;

	rcu_read_lock();
419
	fence = dma_resv_get_excl_rcu(obj->base.resv);
J
Joonas Lahtinen 已提交
420 421 422 423 424 425 426 427 428
	rcu_read_unlock();

	if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
		engine = to_request(fence)->engine;
	dma_fence_put(fence);

	return engine;
}

429 430
void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
					 unsigned int cache_level);
431 432
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);

433 434 435 436 437 438 439 440 441 442 443 444 445
int __must_check
i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write);
int __must_check
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
struct i915_vma * __must_check
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
				     u32 alignment,
				     const struct i915_ggtt_view *view,
				     unsigned int flags);
void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);

446 447 448 449
void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj);

450 451 452 453 454 455 456 457
static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
	if (obj->cache_dirty)
		return false;

	if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
		return true;

458 459
	/* Currently in use by HW (display engine)? Keep flushed. */
	return i915_gem_object_is_framebuffer(obj);
460 461 462 463 464 465 466 467 468
}

static inline void __start_cpu_write(struct drm_i915_gem_object *obj)
{
	obj->read_domains = I915_GEM_DOMAIN_CPU;
	obj->write_domain = I915_GEM_DOMAIN_CPU;
	if (cpu_write_needs_clflush(obj))
		obj->cache_dirty = true;
}
469

470 471 472 473 474 475 476
int i915_gem_object_wait(struct drm_i915_gem_object *obj,
			 unsigned int flags,
			 long timeout);
int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
				  unsigned int flags,
				  const struct i915_sched_attr *attr);

477
#endif