drm_gem.c 23.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
/*
 * Copyright © 2008 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *
 */

#include <linux/types.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/module.h>
#include <linux/mman.h>
#include <linux/pagemap.h>
37
#include <linux/shmem_fs.h>
38
#include <linux/dma-buf.h>
39
#include <drm/drmP.h>
40
#include <drm/drm_vma_manager.h>
41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69

/** @file drm_gem.c
 *
 * This file provides some of the base ioctls and library routines for
 * the graphics memory manager implemented by each device driver.
 *
 * Because various devices have different requirements in terms of
 * synchronization and migration strategies, implementing that is left up to
 * the driver, and all that the general API provides should be generic --
 * allocating objects, reading/writing data with the cpu, freeing objects.
 * Even there, platform-dependent optimizations for reading/writing data with
 * the CPU mean we'll likely hook those out to driver-specific calls.  However,
 * the DRI2 implementation wants to have at least allocate/mmap be generic.
 *
 * The goal was to have swap-backed object allocation managed through
 * struct file.  However, file descriptors as handles to a struct file have
 * two major failings:
 * - Process limits prevent more than 1024 or so being used at a time by
 *   default.
 * - Inability to allocate high fds will aggravate the X Server's select()
 *   handling, and likely that of many GL client applications as well.
 *
 * This led to a plan of using our own integer IDs (called handles, following
 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
 * ioctls.  The objects themselves will still include the struct file so
 * that we can transition to fds if the required kernel infrastructure shows
 * up at a later date, and as our interface with shmfs for memory allocation.
 */

J
Jesse Barnes 已提交
70 71 72 73
/*
 * We make up offsets for buffer objects so we can recognize them at
 * mmap time.
 */
74 75 76 77 78 79

/* pgoff in mmap is an unsigned long, so we need to make sure that
 * the faked up offset will fit
 */

#if BITS_PER_LONG == 64
J
Jesse Barnes 已提交
80 81
#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
82 83 84 85
#else
#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
#endif
J
Jesse Barnes 已提交
86

87 88 89 90 91 92 93
/**
 * Initialize the GEM device fields
 */

int
drm_gem_init(struct drm_device *dev)
{
94
	struct drm_vma_offset_manager *vma_offset_manager;
J
Jesse Barnes 已提交
95

96
	mutex_init(&dev->object_name_lock);
97
	idr_init(&dev->object_name_idr);
J
Jesse Barnes 已提交
98

99 100
	vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL);
	if (!vma_offset_manager) {
J
Jesse Barnes 已提交
101 102 103 104
		DRM_ERROR("out of memory\n");
		return -ENOMEM;
	}

105 106
	dev->vma_offset_manager = vma_offset_manager;
	drm_vma_offset_manager_init(vma_offset_manager,
107 108
				    DRM_FILE_PAGE_OFFSET_START,
				    DRM_FILE_PAGE_OFFSET_SIZE);
J
Jesse Barnes 已提交
109

110 111 112
	return 0;
}

J
Jesse Barnes 已提交
113 114 115 116
void
drm_gem_destroy(struct drm_device *dev)
{

117 118 119
	drm_vma_offset_manager_destroy(dev->vma_offset_manager);
	kfree(dev->vma_offset_manager);
	dev->vma_offset_manager = NULL;
J
Jesse Barnes 已提交
120 121
}

D
Daniel Vetter 已提交
122
/**
123
 * Initialize an already allocated GEM object of the specified size with
D
Daniel Vetter 已提交
124 125 126 127 128
 * shmfs backing store.
 */
int drm_gem_object_init(struct drm_device *dev,
			struct drm_gem_object *obj, size_t size)
{
129
	struct file *filp;
D
Daniel Vetter 已提交
130

131 132
	drm_gem_private_object_init(dev, obj, size);

133 134 135
	filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
	if (IS_ERR(filp))
		return PTR_ERR(filp);
D
Daniel Vetter 已提交
136

137
	obj->filp = filp;
D
Daniel Vetter 已提交
138 139 140 141 142

	return 0;
}
EXPORT_SYMBOL(drm_gem_object_init);

143 144 145 146 147
/**
 * Initialize an already allocated GEM object of the specified size with
 * no GEM provided backing store. Instead the caller is responsible for
 * backing the object and handling it.
 */
148 149
void drm_gem_private_object_init(struct drm_device *dev,
				 struct drm_gem_object *obj, size_t size)
150 151 152 153 154 155 156
{
	BUG_ON((size & (PAGE_SIZE - 1)) != 0);

	obj->dev = dev;
	obj->filp = NULL;

	kref_init(&obj->refcount);
157
	obj->handle_count = 0;
158
	obj->size = size;
159
	drm_vma_node_reset(&obj->vma_node);
160 161 162
}
EXPORT_SYMBOL(drm_gem_private_object_init);

163 164 165
static void
drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
{
166 167 168 169
	/*
	 * Note: obj->dma_buf can't disappear as long as we still hold a
	 * handle reference in obj->handle_count.
	 */
170
	mutex_lock(&filp->prime.lock);
171
	if (obj->dma_buf) {
172 173
		drm_prime_remove_buf_handle_locked(&filp->prime,
						   obj->dma_buf);
174
	}
175
	mutex_unlock(&filp->prime.lock);
176 177
}

178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
/**
 * Called after the last handle to the object has been closed
 *
 * Removes any name for the object. Note that this must be
 * called before drm_gem_object_free or we'll be touching
 * freed memory
 */
static void drm_gem_object_handle_free(struct drm_gem_object *obj)
{
	struct drm_device *dev = obj->dev;

	/* Remove any name for this object */
	if (obj->name) {
		idr_remove(&dev->object_name_idr, obj->name);
		obj->name = 0;
193
	}
194 195
}

196 197 198 199 200 201 202 203 204
static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
{
	/* Unbreak the reference cycle if we have an exported dma_buf. */
	if (obj->dma_buf) {
		dma_buf_put(obj->dma_buf);
		obj->dma_buf = NULL;
	}
}

205
static void
206 207
drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
{
208
	if (WARN_ON(obj->handle_count == 0))
209 210 211 212 213 214 215 216
		return;

	/*
	* Must bump handle count first as this may be the last
	* ref, in which case the object would disappear before we
	* checked for a name
	*/

217
	mutex_lock(&obj->dev->object_name_lock);
218
	if (--obj->handle_count == 0) {
219
		drm_gem_object_handle_free(obj);
220 221
		drm_gem_object_exported_dma_buf_free(obj);
	}
222
	mutex_unlock(&obj->dev->object_name_lock);
223

224 225 226
	drm_gem_object_unreference_unlocked(obj);
}

227 228 229
/**
 * Removes the mapping from handle to filp for this object.
 */
230
int
231
drm_gem_handle_delete(struct drm_file *filp, u32 handle)
232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258
{
	struct drm_device *dev;
	struct drm_gem_object *obj;

	/* This is gross. The idr system doesn't let us try a delete and
	 * return an error code.  It just spews if you fail at deleting.
	 * So, we have to grab a lock around finding the object and then
	 * doing the delete on it and dropping the refcount, or the user
	 * could race us to double-decrement the refcount and cause a
	 * use-after-free later.  Given the frequency of our handle lookups,
	 * we may want to use ida for number allocation and a hash table
	 * for the pointers, anyway.
	 */
	spin_lock(&filp->table_lock);

	/* Check if we currently have a reference on the object */
	obj = idr_find(&filp->object_idr, handle);
	if (obj == NULL) {
		spin_unlock(&filp->table_lock);
		return -EINVAL;
	}
	dev = obj->dev;

	/* Release reference and decrement refcount. */
	idr_remove(&filp->object_idr, handle);
	spin_unlock(&filp->table_lock);

259 260
	if (drm_core_check_feature(dev, DRIVER_PRIME))
		drm_gem_remove_prime_handles(obj, filp);
261
	drm_vma_node_revoke(&obj->vma_node, filp->filp);
262

263 264
	if (dev->driver->gem_close_object)
		dev->driver->gem_close_object(obj, filp);
265
	drm_gem_object_handle_unreference_unlocked(obj);
266 267 268

	return 0;
}
269
EXPORT_SYMBOL(drm_gem_handle_delete);
270

271 272 273 274 275 276 277 278 279 280 281 282 283 284
/**
 * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
 * 
 * This implements the ->dumb_destroy kms driver callback for drivers which use
 * gem to manage their backing storage.
 */
int drm_gem_dumb_destroy(struct drm_file *file,
			 struct drm_device *dev,
			 uint32_t handle)
{
	return drm_gem_handle_delete(file, handle);
}
EXPORT_SYMBOL(drm_gem_dumb_destroy);

285
/**
286 287 288 289 290
 * drm_gem_handle_create_tail - internal functions to create a handle
 * 
 * This expects the dev->object_name_lock to be held already and will drop it
 * before returning. Used to avoid races in establishing new handles when
 * importing an object from either an flink name or a dma-buf.
291 292
 */
int
293 294 295
drm_gem_handle_create_tail(struct drm_file *file_priv,
			   struct drm_gem_object *obj,
			   u32 *handlep)
296
{
297 298
	struct drm_device *dev = obj->dev;
	int ret;
299

300 301
	WARN_ON(!mutex_is_locked(&dev->object_name_lock));

302
	/*
T
Tejun Heo 已提交
303 304
	 * Get the user-visible handle using idr.  Preload and perform
	 * allocation under our spinlock.
305
	 */
T
Tejun Heo 已提交
306
	idr_preload(GFP_KERNEL);
307
	spin_lock(&file_priv->table_lock);
T
Tejun Heo 已提交
308 309

	ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
310 311
	drm_gem_object_reference(obj);
	obj->handle_count++;
312
	spin_unlock(&file_priv->table_lock);
T
Tejun Heo 已提交
313
	idr_preload_end();
314
	mutex_unlock(&dev->object_name_lock);
315 316
	if (ret < 0) {
		drm_gem_object_handle_unreference_unlocked(obj);
317
		return ret;
318
	}
T
Tejun Heo 已提交
319
	*handlep = ret;
320

321 322 323 324 325
	ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
	if (ret) {
		drm_gem_handle_delete(file_priv, *handlep);
		return ret;
	}
326 327 328 329 330 331 332 333 334

	if (dev->driver->gem_open_object) {
		ret = dev->driver->gem_open_object(obj, file_priv);
		if (ret) {
			drm_gem_handle_delete(file_priv, *handlep);
			return ret;
		}
	}

335 336
	return 0;
}
337 338 339 340 341 342 343 344 345 346 347 348 349 350 351

/**
 * Create a handle for this object. This adds a handle reference
 * to the object, which includes a regular reference count. Callers
 * will likely want to dereference the object afterwards.
 */
int
drm_gem_handle_create(struct drm_file *file_priv,
		       struct drm_gem_object *obj,
		       u32 *handlep)
{
	mutex_lock(&obj->dev->object_name_lock);

	return drm_gem_handle_create_tail(file_priv, obj, handlep);
}
352 353
EXPORT_SYMBOL(drm_gem_handle_create);

354 355 356 357 358 359 360 361 362 363 364 365

/**
 * drm_gem_free_mmap_offset - release a fake mmap offset for an object
 * @obj: obj in question
 *
 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
 */
void
drm_gem_free_mmap_offset(struct drm_gem_object *obj)
{
	struct drm_device *dev = obj->dev;

366
	drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
367 368 369 370
}
EXPORT_SYMBOL(drm_gem_free_mmap_offset);

/**
371
 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
372
 * @obj: obj in question
373
 * @size: the virtual size
374 375 376 377 378 379
 *
 * GEM memory mapping works by handing back to userspace a fake mmap offset
 * it can use in a subsequent mmap(2) call.  The DRM core code then looks
 * up the object based on the offset and sets up the various memory mapping
 * structures.
 *
380 381 382
 * This routine allocates and attaches a fake offset for @obj, in cases where
 * the virtual size differs from the physical size (ie. obj->size).  Otherwise
 * just use drm_gem_create_mmap_offset().
383 384
 */
int
385
drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
386 387 388
{
	struct drm_device *dev = obj->dev;

389
	return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
				  size / PAGE_SIZE);
}
EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);

/**
 * drm_gem_create_mmap_offset - create a fake mmap offset for an object
 * @obj: obj in question
 *
 * GEM memory mapping works by handing back to userspace a fake mmap offset
 * it can use in a subsequent mmap(2) call.  The DRM core code then looks
 * up the object based on the offset and sets up the various memory mapping
 * structures.
 *
 * This routine allocates and attaches a fake offset for @obj.
 */
int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
{
	return drm_gem_create_mmap_offset_size(obj, obj->size);
408 409 410
}
EXPORT_SYMBOL(drm_gem_create_mmap_offset);

411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513
/**
 * drm_gem_get_pages - helper to allocate backing pages for a GEM object
 * from shmem
 * @obj: obj in question
 * @gfpmask: gfp mask of requested pages
 */
struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
{
	struct inode *inode;
	struct address_space *mapping;
	struct page *p, **pages;
	int i, npages;

	/* This is the shared memory object that backs the GEM resource */
	inode = file_inode(obj->filp);
	mapping = inode->i_mapping;

	/* We already BUG_ON() for non-page-aligned sizes in
	 * drm_gem_object_init(), so we should never hit this unless
	 * driver author is doing something really wrong:
	 */
	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);

	npages = obj->size >> PAGE_SHIFT;

	pages = drm_malloc_ab(npages, sizeof(struct page *));
	if (pages == NULL)
		return ERR_PTR(-ENOMEM);

	gfpmask |= mapping_gfp_mask(mapping);

	for (i = 0; i < npages; i++) {
		p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
		if (IS_ERR(p))
			goto fail;
		pages[i] = p;

		/* There is a hypothetical issue w/ drivers that require
		 * buffer memory in the low 4GB.. if the pages are un-
		 * pinned, and swapped out, they can end up swapped back
		 * in above 4GB.  If pages are already in memory, then
		 * shmem_read_mapping_page_gfp will ignore the gfpmask,
		 * even if the already in-memory page disobeys the mask.
		 *
		 * It is only a theoretical issue today, because none of
		 * the devices with this limitation can be populated with
		 * enough memory to trigger the issue.  But this BUG_ON()
		 * is here as a reminder in case the problem with
		 * shmem_read_mapping_page_gfp() isn't solved by the time
		 * it does become a real issue.
		 *
		 * See this thread: http://lkml.org/lkml/2011/7/11/238
		 */
		BUG_ON((gfpmask & __GFP_DMA32) &&
				(page_to_pfn(p) >= 0x00100000UL));
	}

	return pages;

fail:
	while (i--)
		page_cache_release(pages[i]);

	drm_free_large(pages);
	return ERR_CAST(p);
}
EXPORT_SYMBOL(drm_gem_get_pages);

/**
 * drm_gem_put_pages - helper to free backing pages for a GEM object
 * @obj: obj in question
 * @pages: pages to free
 * @dirty: if true, pages will be marked as dirty
 * @accessed: if true, the pages will be marked as accessed
 */
void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
		bool dirty, bool accessed)
{
	int i, npages;

	/* We already BUG_ON() for non-page-aligned sizes in
	 * drm_gem_object_init(), so we should never hit this unless
	 * driver author is doing something really wrong:
	 */
	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);

	npages = obj->size >> PAGE_SHIFT;

	for (i = 0; i < npages; i++) {
		if (dirty)
			set_page_dirty(pages[i]);

		if (accessed)
			mark_page_accessed(pages[i]);

		/* Undo the reference we took when populating the table */
		page_cache_release(pages[i]);
	}

	drm_free_large(pages);
}
EXPORT_SYMBOL(drm_gem_put_pages);

514 515 516
/** Returns a reference to the object named by the handle. */
struct drm_gem_object *
drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
517
		      u32 handle)
518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574
{
	struct drm_gem_object *obj;

	spin_lock(&filp->table_lock);

	/* Check if we currently have a reference on the object */
	obj = idr_find(&filp->object_idr, handle);
	if (obj == NULL) {
		spin_unlock(&filp->table_lock);
		return NULL;
	}

	drm_gem_object_reference(obj);

	spin_unlock(&filp->table_lock);

	return obj;
}
EXPORT_SYMBOL(drm_gem_object_lookup);

/**
 * Releases the handle to an mm object.
 */
int
drm_gem_close_ioctl(struct drm_device *dev, void *data,
		    struct drm_file *file_priv)
{
	struct drm_gem_close *args = data;
	int ret;

	if (!(dev->driver->driver_features & DRIVER_GEM))
		return -ENODEV;

	ret = drm_gem_handle_delete(file_priv, args->handle);

	return ret;
}

/**
 * Create a global name for an object, returning the name.
 *
 * Note that the name does not hold a reference; when the object
 * is freed, the name goes away.
 */
int
drm_gem_flink_ioctl(struct drm_device *dev, void *data,
		    struct drm_file *file_priv)
{
	struct drm_gem_flink *args = data;
	struct drm_gem_object *obj;
	int ret;

	if (!(dev->driver->driver_features & DRIVER_GEM))
		return -ENODEV;

	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
	if (obj == NULL)
575
		return -ENOENT;
576

577
	mutex_lock(&dev->object_name_lock);
T
Tejun Heo 已提交
578
	idr_preload(GFP_KERNEL);
579 580 581 582 583 584
	/* prevent races with concurrent gem_close. */
	if (obj->handle_count == 0) {
		ret = -ENOENT;
		goto err;
	}

585
	if (!obj->name) {
T
Tejun Heo 已提交
586 587
		ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
		if (ret < 0)
588
			goto err;
589 590

		obj->name = ret;
591
	}
592

593 594 595
	args->name = (uint64_t) obj->name;
	ret = 0;

596
err:
597
	idr_preload_end();
598
	mutex_unlock(&dev->object_name_lock);
599
	drm_gem_object_unreference_unlocked(obj);
600
	return ret;
601 602 603 604 605 606 607 608 609 610 611 612 613 614 615
}

/**
 * Open an object using the global name, returning a handle and the size.
 *
 * This handle (of course) holds a reference to the object, so the object
 * will not go away until the handle is deleted.
 */
int
drm_gem_open_ioctl(struct drm_device *dev, void *data,
		   struct drm_file *file_priv)
{
	struct drm_gem_open *args = data;
	struct drm_gem_object *obj;
	int ret;
616
	u32 handle;
617 618 619 620

	if (!(dev->driver->driver_features & DRIVER_GEM))
		return -ENODEV;

621
	mutex_lock(&dev->object_name_lock);
622
	obj = idr_find(&dev->object_name_idr, (int) args->name);
623
	if (obj) {
624
		drm_gem_object_reference(obj);
625 626
	} else {
		mutex_unlock(&dev->object_name_lock);
627
		return -ENOENT;
628
	}
629

630 631
	/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
	ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
632
	drm_gem_object_unreference_unlocked(obj);
633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659
	if (ret)
		return ret;

	args->handle = handle;
	args->size = obj->size;

	return 0;
}

/**
 * Called at device open time, sets up the structure for handling refcounting
 * of mm objects.
 */
void
drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
{
	idr_init(&file_private->object_idr);
	spin_lock_init(&file_private->table_lock);
}

/**
 * Called at device close to release the file's
 * handle references on objects.
 */
static int
drm_gem_object_release_handle(int id, void *ptr, void *data)
{
660
	struct drm_file *file_priv = data;
661
	struct drm_gem_object *obj = ptr;
662 663
	struct drm_device *dev = obj->dev;

664 665
	if (drm_core_check_feature(dev, DRIVER_PRIME))
		drm_gem_remove_prime_handles(obj, file_priv);
666
	drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
667

668 669
	if (dev->driver->gem_close_object)
		dev->driver->gem_close_object(obj, file_priv);
670

671
	drm_gem_object_handle_unreference_unlocked(obj);
672 673 674 675 676 677 678 679 680 681 682 683 684

	return 0;
}

/**
 * Called at close time when the filp is going away.
 *
 * Releases any remaining references on objects by this filp.
 */
void
drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
{
	idr_for_each(&file_private->object_idr,
685
		     &drm_gem_object_release_handle, file_private);
686 687 688
	idr_destroy(&file_private->object_idr);
}

689 690
void
drm_gem_object_release(struct drm_gem_object *obj)
691
{
692 693
	WARN_ON(obj->dma_buf);

694 695
	if (obj->filp)
	    fput(obj->filp);
696
}
697
EXPORT_SYMBOL(drm_gem_object_release);
698

699 700
/**
 * Called after the last reference to the object has been lost.
701
 * Must be called holding struct_ mutex
702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717
 *
 * Frees the object
 */
void
drm_gem_object_free(struct kref *kref)
{
	struct drm_gem_object *obj = (struct drm_gem_object *) kref;
	struct drm_device *dev = obj->dev;

	BUG_ON(!mutex_is_locked(&dev->struct_mutex));

	if (dev->driver->gem_free_object != NULL)
		dev->driver->gem_free_object(obj);
}
EXPORT_SYMBOL(drm_gem_object_free);

718 719 720 721 722
void drm_gem_vm_open(struct vm_area_struct *vma)
{
	struct drm_gem_object *obj = vma->vm_private_data;

	drm_gem_object_reference(obj);
C
Chris Wilson 已提交
723 724

	mutex_lock(&obj->dev->struct_mutex);
725
	drm_vm_open_locked(obj->dev, vma);
C
Chris Wilson 已提交
726
	mutex_unlock(&obj->dev->struct_mutex);
727 728 729 730 731 732
}
EXPORT_SYMBOL(drm_gem_vm_open);

void drm_gem_vm_close(struct vm_area_struct *vma)
{
	struct drm_gem_object *obj = vma->vm_private_data;
733
	struct drm_device *dev = obj->dev;
734

735
	mutex_lock(&dev->struct_mutex);
736
	drm_vm_close_locked(obj->dev, vma);
C
Chris Wilson 已提交
737
	drm_gem_object_unreference(obj);
738
	mutex_unlock(&dev->struct_mutex);
739 740 741
}
EXPORT_SYMBOL(drm_gem_vm_close);

742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758
/**
 * drm_gem_mmap_obj - memory map a GEM object
 * @obj: the GEM object to map
 * @obj_size: the object size to be mapped, in bytes
 * @vma: VMA for the area to be mapped
 *
 * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
 * provided by the driver. Depending on their requirements, drivers can either
 * provide a fault handler in their gem_vm_ops (in which case any accesses to
 * the object will be trapped, to perform migration, GTT binding, surface
 * register allocation, or performance monitoring), or mmap the buffer memory
 * synchronously after calling drm_gem_mmap_obj.
 *
 * This function is mainly intended to implement the DMABUF mmap operation, when
 * the GEM object is not looked up based on its fake offset. To implement the
 * DRM mmap operation, drivers should use the drm_gem_mmap() function.
 *
759 760 761 762
 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
 * callers must verify access restrictions before calling this helper.
 *
763 764
 * NOTE: This function has to be protected with dev->struct_mutex
 *
765 766 767 768 769 770 771 772
 * Return 0 or success or -EINVAL if the object size is smaller than the VMA
 * size, or if no gem_vm_ops are provided.
 */
int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
		     struct vm_area_struct *vma)
{
	struct drm_device *dev = obj->dev;

773 774
	lockdep_assert_held(&dev->struct_mutex);

775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798
	/* Check for valid size. */
	if (obj_size < vma->vm_end - vma->vm_start)
		return -EINVAL;

	if (!dev->driver->gem_vm_ops)
		return -EINVAL;

	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
	vma->vm_ops = dev->driver->gem_vm_ops;
	vma->vm_private_data = obj;
	vma->vm_page_prot =  pgprot_writecombine(vm_get_page_prot(vma->vm_flags));

	/* Take a ref for this mapping of the object, so that the fault
	 * handler can dereference the mmap offset's pointer to the object.
	 * This reference is cleaned up by the corresponding vm_close
	 * (which should happen whether the vma was created by this call, or
	 * by a vm_open due to mremap or partial unmap or whatever).
	 */
	drm_gem_object_reference(obj);

	drm_vm_open_locked(dev, vma);
	return 0;
}
EXPORT_SYMBOL(drm_gem_mmap_obj);
799

J
Jesse Barnes 已提交
800 801 802 803 804 805 806 807
/**
 * drm_gem_mmap - memory map routine for GEM objects
 * @filp: DRM file pointer
 * @vma: VMA for the area to be mapped
 *
 * If a driver supports GEM object mapping, mmap calls on the DRM file
 * descriptor will end up here.
 *
808
 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
J
Jesse Barnes 已提交
809
 * contain the fake offset we created when the GTT map ioctl was called on
810
 * the object) and map it with a call to drm_gem_mmap_obj().
811 812 813
 *
 * If the caller is not granted access to the buffer object, the mmap will fail
 * with EACCES. Please see the vma manager for more information.
J
Jesse Barnes 已提交
814 815 816 817 818
 */
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
	struct drm_file *priv = filp->private_data;
	struct drm_device *dev = priv->minor->dev;
819 820
	struct drm_gem_object *obj;
	struct drm_vma_offset_node *node;
J
Jesse Barnes 已提交
821 822
	int ret = 0;

823 824 825
	if (drm_device_is_unplugged(dev))
		return -ENODEV;

J
Jesse Barnes 已提交
826 827
	mutex_lock(&dev->struct_mutex);

828 829
	node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
					   vma->vm_pgoff,
830 831
					   vma_pages(vma));
	if (!node) {
J
Jesse Barnes 已提交
832 833
		mutex_unlock(&dev->struct_mutex);
		return drm_mmap(filp, vma);
834 835 836
	} else if (!drm_vma_node_is_allowed(node, filp)) {
		mutex_unlock(&dev->struct_mutex);
		return -EACCES;
J
Jesse Barnes 已提交
837 838
	}

839
	obj = container_of(node, struct drm_gem_object, vma_node);
840
	ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma);
J
Jesse Barnes 已提交
841 842 843 844 845 846

	mutex_unlock(&dev->struct_mutex);

	return ret;
}
EXPORT_SYMBOL(drm_gem_mmap);