i915_gem_object.h 14.0 KB
Newer Older
J
Joonas Lahtinen 已提交
1
/*
2
 * SPDX-License-Identifier: MIT
J
Joonas Lahtinen 已提交
3
 *
4
 * Copyright © 2016 Intel Corporation
J
Joonas Lahtinen 已提交
5 6 7 8 9 10
 */

#ifndef __I915_GEM_OBJECT_H__
#define __I915_GEM_OBJECT_H__

#include <drm/drm_gem.h>
J
Jani Nikula 已提交
11 12
#include <drm/drm_file.h>
#include <drm/drm_device.h>
J
Joonas Lahtinen 已提交
13

14
#include "display/intel_frontbuffer.h"
15
#include "i915_gem_object_types.h"
16
#include "i915_gem_gtt.h"
17
#include "i915_vma_types.h"
18

19 20
void i915_gem_init__objects(struct drm_i915_private *i915);

21 22 23
struct drm_i915_gem_object *i915_gem_object_alloc(void);
void i915_gem_object_free(struct drm_i915_gem_object *obj);

24
void i915_gem_object_init(struct drm_i915_gem_object *obj,
25 26
			  const struct drm_i915_gem_object_ops *ops,
			  struct lock_class_key *key);
27
struct drm_i915_gem_object *
M
Matthew Auld 已提交
28 29
i915_gem_object_create_shmem(struct drm_i915_private *i915,
			     resource_size_t size);
30 31
struct drm_i915_gem_object *
i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915,
M
Matthew Auld 已提交
32
				       const void *data, resource_size_t size);
33 34 35 36 37 38

extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops;
void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
				     struct sg_table *pages,
				     bool needs_clflush);

39 40
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align);

41 42 43 44 45
void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
void i915_gem_free_object(struct drm_gem_object *obj);

void i915_gem_flush_free_objects(struct drm_i915_private *i915);

46 47 48 49
struct sg_table *
__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj);
void i915_gem_object_truncate(struct drm_i915_gem_object *obj);

J
Joonas Lahtinen 已提交
50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
/**
 * i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
 * @filp: DRM file private date
 * @handle: userspace handle
 *
 * Returns:
 *
 * A pointer to the object named by the handle if such exists on @filp, NULL
 * otherwise. This object is only valid whilst under the RCU read lock, and
 * note carefully the object may be in the process of being destroyed.
 */
static inline struct drm_i915_gem_object *
i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
{
#ifdef CONFIG_LOCKDEP
	WARN_ON(debug_locks && !lock_is_held(&rcu_lock_map));
#endif
	return idr_find(&file->object_idr, handle);
}

70 71 72 73 74 75 76 77 78
static inline struct drm_i915_gem_object *
i915_gem_object_get_rcu(struct drm_i915_gem_object *obj)
{
	if (obj && !kref_get_unless_zero(&obj->base.refcount))
		obj = NULL;

	return obj;
}

J
Joonas Lahtinen 已提交
79 80 81 82 83 84 85
static inline struct drm_i915_gem_object *
i915_gem_object_lookup(struct drm_file *file, u32 handle)
{
	struct drm_i915_gem_object *obj;

	rcu_read_lock();
	obj = i915_gem_object_lookup_rcu(file, handle);
86
	obj = i915_gem_object_get_rcu(obj);
J
Joonas Lahtinen 已提交
87 88 89 90 91 92
	rcu_read_unlock();

	return obj;
}

__deprecated
93
struct drm_gem_object *
J
Joonas Lahtinen 已提交
94 95 96 97 98 99
drm_gem_object_lookup(struct drm_file *file, u32 handle);

__attribute__((nonnull))
static inline struct drm_i915_gem_object *
i915_gem_object_get(struct drm_i915_gem_object *obj)
{
100
	drm_gem_object_get(&obj->base);
J
Joonas Lahtinen 已提交
101 102 103 104 105 106 107
	return obj;
}

__attribute__((nonnull))
static inline void
i915_gem_object_put(struct drm_i915_gem_object *obj)
{
108
	__drm_gem_object_put(&obj->base);
J
Joonas Lahtinen 已提交
109 110
}

111
#define assert_object_held(obj) dma_resv_assert_held((obj)->base.resv)
112

113 114
static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj)
{
115
	dma_resv_lock(obj->base.resv, NULL);
116 117
}

118 119 120 121 122
static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj)
{
	return dma_resv_trylock(obj->base.resv);
}

123 124 125
static inline int
i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj)
{
126
	return dma_resv_lock_interruptible(obj->base.resv, NULL);
127 128
}

129 130
static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
{
131
	dma_resv_unlock(obj->base.resv);
132 133
}

134 135 136 137 138
struct dma_fence *
i915_gem_object_lock_fence(struct drm_i915_gem_object *obj);
void i915_gem_object_unlock_fence(struct drm_i915_gem_object *obj,
				  struct dma_fence *fence);

139 140 141
static inline void
i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
{
142
	obj->flags |= I915_BO_READONLY;
143 144 145 146 147
}

static inline bool
i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
{
148
	return obj->flags & I915_BO_READONLY;
149 150
}

151 152 153 154 155 156
static inline bool
i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj)
{
	return obj->flags & I915_BO_ALLOC_CONTIGUOUS;
}

157 158 159 160 161 162 163 164 165 166 167 168
static inline bool
i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj)
{
	return obj->flags & I915_BO_ALLOC_VOLATILE;
}

static inline void
i915_gem_object_set_volatile(struct drm_i915_gem_object *obj)
{
	obj->flags |= I915_BO_ALLOC_VOLATILE;
}

169 170 171 172 173 174 175
static inline bool
i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
			 unsigned long flags)
{
	return obj->ops->flags & flags;
}

J
Joonas Lahtinen 已提交
176 177 178
static inline bool
i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
{
179
	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE);
J
Joonas Lahtinen 已提交
180 181 182 183 184
}

static inline bool
i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
{
185
	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE);
J
Joonas Lahtinen 已提交
186 187
}

T
Tina Zhang 已提交
188 189 190
static inline bool
i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
{
191
	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY);
T
Tina Zhang 已提交
192 193
}

194
static inline bool
195
i915_gem_object_never_mmap(const struct drm_i915_gem_object *obj)
196
{
197
	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_MMAP);
198 199
}

200 201 202
static inline bool
i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj)
{
203
	return i915_gem_object_type_has(obj, I915_GEM_OBJECT_ASYNC_CANCEL);
204 205
}

206 207 208
static inline bool
i915_gem_object_is_framebuffer(const struct drm_i915_gem_object *obj)
{
209
	return READ_ONCE(obj->frontbuffer);
210 211
}

J
Joonas Lahtinen 已提交
212
static inline unsigned int
213
i915_gem_object_get_tiling(const struct drm_i915_gem_object *obj)
J
Joonas Lahtinen 已提交
214 215 216 217 218
{
	return obj->tiling_and_stride & TILING_MASK;
}

static inline bool
219
i915_gem_object_is_tiled(const struct drm_i915_gem_object *obj)
J
Joonas Lahtinen 已提交
220 221 222 223 224
{
	return i915_gem_object_get_tiling(obj) != I915_TILING_NONE;
}

static inline unsigned int
225
i915_gem_object_get_stride(const struct drm_i915_gem_object *obj)
J
Joonas Lahtinen 已提交
226 227 228 229
{
	return obj->tiling_and_stride & STRIDE_MASK;
}

230 231 232 233 234 235 236 237
static inline unsigned int
i915_gem_tile_height(unsigned int tiling)
{
	GEM_BUG_ON(!tiling);
	return tiling == I915_TILING_Y ? 32 : 8;
}

static inline unsigned int
238
i915_gem_object_get_tile_height(const struct drm_i915_gem_object *obj)
239 240 241 242 243
{
	return i915_gem_tile_height(i915_gem_object_get_tiling(obj));
}

static inline unsigned int
244
i915_gem_object_get_tile_row_size(const struct drm_i915_gem_object *obj)
245 246 247 248 249
{
	return (i915_gem_object_get_stride(obj) *
		i915_gem_object_get_tile_height(obj));
}

250 251 252
int i915_gem_object_set_tiling(struct drm_i915_gem_object *obj,
			       unsigned int tiling, unsigned int stride);

253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276
struct scatterlist *
i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
		       unsigned int n, unsigned int *offset);

struct page *
i915_gem_object_get_page(struct drm_i915_gem_object *obj,
			 unsigned int n);

struct page *
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
			       unsigned int n);

dma_addr_t
i915_gem_object_get_dma_address_len(struct drm_i915_gem_object *obj,
				    unsigned long n,
				    unsigned int *len);

dma_addr_t
i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
				unsigned long n);

void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
				 struct sg_table *pages,
				 unsigned int sg_page_sizes);
277 278

int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
279 280
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);

281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297
enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
	I915_MM_NORMAL = 0,
	/*
	 * Only used by struct_mutex, when called "recursively" from
	 * direct-reclaim-esque. Safe because there is only every one
	 * struct_mutex in the entire system.
	 */
	I915_MM_SHRINKER = 1,
	/*
	 * Used for obj->mm.lock when allocating pages. Safe because the object
	 * isn't yet on any LRU, and therefore the shrinker can't deadlock on
	 * it. As soon as the object has pages, obj->mm.lock nests within
	 * fs_reclaim.
	 */
	I915_MM_GET_PAGES = 1,
};

298 299 300
static inline int __must_check
i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
301
	might_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES);
302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343

	if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
		return 0;

	return __i915_gem_object_get_pages(obj);
}

static inline bool
i915_gem_object_has_pages(struct drm_i915_gem_object *obj)
{
	return !IS_ERR_OR_NULL(READ_ONCE(obj->mm.pages));
}

static inline void
__i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
	GEM_BUG_ON(!i915_gem_object_has_pages(obj));

	atomic_inc(&obj->mm.pages_pin_count);
}

static inline bool
i915_gem_object_has_pinned_pages(struct drm_i915_gem_object *obj)
{
	return atomic_read(&obj->mm.pages_pin_count);
}

static inline void
__i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
{
	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));

	atomic_dec(&obj->mm.pages_pin_count);
}

static inline void
i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
{
	__i915_gem_object_unpin_pages(obj);
}

344
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
345 346
void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
void i915_gem_object_writeback(struct drm_i915_gem_object *obj);
347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396

enum i915_map_type {
	I915_MAP_WB = 0,
	I915_MAP_WC,
#define I915_MAP_OVERRIDE BIT(31)
	I915_MAP_FORCE_WB = I915_MAP_WB | I915_MAP_OVERRIDE,
	I915_MAP_FORCE_WC = I915_MAP_WC | I915_MAP_OVERRIDE,
};

/**
 * i915_gem_object_pin_map - return a contiguous mapping of the entire object
 * @obj: the object to map into kernel address space
 * @type: the type of mapping, used to select pgprot_t
 *
 * Calls i915_gem_object_pin_pages() to prevent reaping of the object's
 * pages and then returns a contiguous mapping of the backing storage into
 * the kernel address space. Based on the @type of mapping, the PTE will be
 * set to either WriteBack or WriteCombine (via pgprot_t).
 *
 * The caller is responsible for calling i915_gem_object_unpin_map() when the
 * mapping is no longer required.
 *
 * Returns the pointer through which to access the mapped object, or an
 * ERR_PTR() on error.
 */
void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
					   enum i915_map_type type);

void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
				 unsigned long offset,
				 unsigned long size);
static inline void i915_gem_object_flush_map(struct drm_i915_gem_object *obj)
{
	__i915_gem_object_flush_map(obj, 0, obj->base.size);
}

/**
 * i915_gem_object_unpin_map - releases an earlier mapping
 * @obj: the object to unmap
 *
 * After pinning the object and mapping its pages, once you are finished
 * with your access, call i915_gem_object_unpin_map() to release the pin
 * upon the mapping. Once the pin count reaches zero, that mapping may be
 * removed.
 */
static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
{
	i915_gem_object_unpin_pages(obj);
}

397 398 399 400
void
i915_gem_object_flush_write_domain(struct drm_i915_gem_object *obj,
				   unsigned int flush_domains);

401 402 403 404 405 406 407 408 409 410 411 412
int i915_gem_object_prepare_read(struct drm_i915_gem_object *obj,
				 unsigned int *needs_clflush);
int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
				  unsigned int *needs_clflush);
#define CLFLUSH_BEFORE	BIT(0)
#define CLFLUSH_AFTER	BIT(1)
#define CLFLUSH_FLAGS	(CLFLUSH_BEFORE | CLFLUSH_AFTER)

static inline void
i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
{
	i915_gem_object_unpin_pages(obj);
413
	i915_gem_object_unlock(obj);
414 415
}

J
Joonas Lahtinen 已提交
416 417 418 419 420 421 422
static inline struct intel_engine_cs *
i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
{
	struct intel_engine_cs *engine = NULL;
	struct dma_fence *fence;

	rcu_read_lock();
423
	fence = dma_resv_get_excl_rcu(obj->base.resv);
J
Joonas Lahtinen 已提交
424 425 426 427 428 429 430 431 432
	rcu_read_unlock();

	if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
		engine = to_request(fence)->engine;
	dma_fence_put(fence);

	return engine;
}

433 434
void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
					 unsigned int cache_level);
435 436
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);

437 438 439 440 441 442 443 444 445 446 447 448 449
int __must_check
i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write);
int __must_check
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
struct i915_vma * __must_check
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
				     u32 alignment,
				     const struct i915_ggtt_view *view,
				     unsigned int flags);
void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma);

450 451 452 453
void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj);
void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj);
void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj);

454 455 456 457 458 459 460 461
static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
	if (obj->cache_dirty)
		return false;

	if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
		return true;

462 463
	/* Currently in use by HW (display engine)? Keep flushed. */
	return i915_gem_object_is_framebuffer(obj);
464 465 466 467 468 469 470 471 472
}

static inline void __start_cpu_write(struct drm_i915_gem_object *obj)
{
	obj->read_domains = I915_GEM_DOMAIN_CPU;
	obj->write_domain = I915_GEM_DOMAIN_CPU;
	if (cpu_write_needs_clflush(obj))
		obj->cache_dirty = true;
}
473

474 475 476 477 478 479 480
int i915_gem_object_wait(struct drm_i915_gem_object *obj,
			 unsigned int flags,
			 long timeout);
int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
				  unsigned int flags,
				  const struct i915_sched_attr *attr);

481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501
void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
					 enum fb_op_origin origin);
void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
					      enum fb_op_origin origin);

static inline void
i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
				  enum fb_op_origin origin)
{
	if (unlikely(rcu_access_pointer(obj->frontbuffer)))
		__i915_gem_object_flush_frontbuffer(obj, origin);
}

static inline void
i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
				       enum fb_op_origin origin)
{
	if (unlikely(rcu_access_pointer(obj->frontbuffer)))
		__i915_gem_object_invalidate_frontbuffer(obj, origin);
}

502
#endif