drm_file.c 22.0 KB
Newer Older
D
Daniel Vetter 已提交
1
/*
L
Linus Torvalds 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
 * \author Rickard E. (Rik) Faith <faith@valinux.com>
 * \author Daryll Strauss <daryll@valinux.com>
 * \author Gareth Hughes <gareth@valinux.com>
 */

/*
 * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
 *
 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 */

#include <linux/poll.h>
35
#include <linux/slab.h>
36
#include <linux/module.h>
D
Daniel Vetter 已提交
37

38
#include <drm/drm_client.h>
D
Daniel Vetter 已提交
39 40 41
#include <drm/drm_file.h>
#include <drm/drmP.h>

42
#include "drm_legacy.h"
43
#include "drm_internal.h"
D
Daniel Vetter 已提交
44
#include "drm_crtc_internal.h"
L
Linus Torvalds 已提交
45

46
/* from BKL pushdown */
A
Arnd Bergmann 已提交
47 48
DEFINE_MUTEX(drm_global_mutex);

D
Daniel Vetter 已提交
49 50 51 52 53
/**
 * DOC: file operations
 *
 * Drivers must define the file operations structure that forms the DRM
 * userspace API entry point, even though most of those operations are
D
Daniel Vetter 已提交
54 55
 * implemented in the DRM core. The resulting &struct file_operations must be
 * stored in the &drm_driver.fops field. The mandatory functions are drm_open(),
56
 * drm_read(), drm_ioctl() and drm_compat_ioctl() if CONFIG_COMPAT is enabled
D
Daniel Vetter 已提交
57 58 59 60 61
 * Note that drm_compat_ioctl will be NULL if CONFIG_COMPAT=n, so there's no
 * need to sprinkle #ifdef into the code. Drivers which implement private ioctls
 * that require 32/64 bit compatibility support must provide their own
 * &file_operations.compat_ioctl handler that processes private ioctls and calls
 * drm_compat_ioctl() for core ioctls.
D
Daniel Vetter 已提交
62 63 64 65 66 67 68
 *
 * In addition drm_read() and drm_poll() provide support for DRM events. DRM
 * events are a generic and extensible means to send asynchronous events to
 * userspace through the file descriptor. They are used to send vblank event and
 * page flip completions by the KMS API. But drivers can also use it for their
 * own needs, e.g. to signal completion of rendering.
 *
D
Daniel Vetter 已提交
69 70 71
 * For the driver-side event interface see drm_event_reserve_init() and
 * drm_send_event() as the main starting points.
 *
D
Daniel Vetter 已提交
72 73 74
 * The memory mapping implementation will vary depending on how the driver
 * manages memory. Legacy drivers will use the deprecated drm_legacy_mmap()
 * function, modern drivers should use one of the provided memory-manager
D
Daniel Vetter 已提交
75 76
 * specific implementations. For GEM-based drivers this is drm_gem_mmap(), and
 * for drivers which use the CMA GEM helpers it's drm_gem_cma_mmap().
D
Daniel Vetter 已提交
77 78
 *
 * No other file operations are supported by the DRM userspace API. Overall the
79
 * following is an example &file_operations structure::
D
Daniel Vetter 已提交
80 81 82 83 84 85
 *
 *     static const example_drm_fops = {
 *             .owner = THIS_MODULE,
 *             .open = drm_open,
 *             .release = drm_release,
 *             .unlocked_ioctl = drm_ioctl,
86
 *             .compat_ioctl = drm_compat_ioctl, // NULL if CONFIG_COMPAT=n
D
Daniel Vetter 已提交
87 88 89 90 91
 *             .poll = drm_poll,
 *             .read = drm_read,
 *             .llseek = no_llseek,
 *             .mmap = drm_gem_mmap,
 *     };
D
Daniel Vetter 已提交
92
 *
D
Daniel Vetter 已提交
93 94 95
 * For plain GEM based drivers there is the DEFINE_DRM_GEM_FOPS() macro, and for
 * CMA based drivers there is the DEFINE_DRM_GEM_CMA_FOPS() macro to make this
 * simpler.
96 97 98 99 100
 *
 * The driver's &file_operations must be stored in &drm_driver.fops.
 *
 * For driver-private IOCTL handling see the more detailed discussion in
 * :ref:`IOCTL support in the userland interfaces chapter<drm_driver_ioctl>`.
D
Daniel Vetter 已提交
101 102
 */

103
static int drm_open_helper(struct file *filp, struct drm_minor *minor);
D
Dave Airlie 已提交
104

105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264
/**
 * drm_file_alloc - allocate file context
 * @minor: minor to allocate on
 *
 * This allocates a new DRM file context. It is not linked into any context and
 * can be used by the caller freely. Note that the context keeps a pointer to
 * @minor, so it must be freed before @minor is.
 *
 * RETURNS:
 * Pointer to newly allocated context, ERR_PTR on failure.
 */
struct drm_file *drm_file_alloc(struct drm_minor *minor)
{
	struct drm_device *dev = minor->dev;
	struct drm_file *file;
	int ret;

	file = kzalloc(sizeof(*file), GFP_KERNEL);
	if (!file)
		return ERR_PTR(-ENOMEM);

	file->pid = get_pid(task_pid(current));
	file->minor = minor;

	/* for compatibility root is always authenticated */
	file->authenticated = capable(CAP_SYS_ADMIN);
	file->lock_count = 0;

	INIT_LIST_HEAD(&file->lhead);
	INIT_LIST_HEAD(&file->fbs);
	mutex_init(&file->fbs_lock);
	INIT_LIST_HEAD(&file->blobs);
	INIT_LIST_HEAD(&file->pending_event_list);
	INIT_LIST_HEAD(&file->event_list);
	init_waitqueue_head(&file->event_wait);
	file->event_space = 4096; /* set aside 4k for event buffer */

	mutex_init(&file->event_read_lock);

	if (drm_core_check_feature(dev, DRIVER_GEM))
		drm_gem_open(dev, file);

	if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
		drm_syncobj_open(file);

	if (drm_core_check_feature(dev, DRIVER_PRIME))
		drm_prime_init_file_private(&file->prime);

	if (dev->driver->open) {
		ret = dev->driver->open(dev, file);
		if (ret < 0)
			goto out_prime_destroy;
	}

	return file;

out_prime_destroy:
	if (drm_core_check_feature(dev, DRIVER_PRIME))
		drm_prime_destroy_file_private(&file->prime);
	if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
		drm_syncobj_release(file);
	if (drm_core_check_feature(dev, DRIVER_GEM))
		drm_gem_release(dev, file);
	put_pid(file->pid);
	kfree(file);

	return ERR_PTR(ret);
}

static void drm_events_release(struct drm_file *file_priv)
{
	struct drm_device *dev = file_priv->minor->dev;
	struct drm_pending_event *e, *et;
	unsigned long flags;

	spin_lock_irqsave(&dev->event_lock, flags);

	/* Unlink pending events */
	list_for_each_entry_safe(e, et, &file_priv->pending_event_list,
				 pending_link) {
		list_del(&e->pending_link);
		e->file_priv = NULL;
	}

	/* Remove unconsumed events */
	list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
		list_del(&e->link);
		kfree(e);
	}

	spin_unlock_irqrestore(&dev->event_lock, flags);
}

/**
 * drm_file_free - free file context
 * @file: context to free, or NULL
 *
 * This destroys and deallocates a DRM file context previously allocated via
 * drm_file_alloc(). The caller must make sure to unlink it from any contexts
 * before calling this.
 *
 * If NULL is passed, this is a no-op.
 *
 * RETURNS:
 * 0 on success, or error code on failure.
 */
void drm_file_free(struct drm_file *file)
{
	struct drm_device *dev;

	if (!file)
		return;

	dev = file->minor->dev;

	DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
		  task_pid_nr(current),
		  (long)old_encode_dev(file->minor->kdev->devt),
		  dev->open_count);

	if (drm_core_check_feature(dev, DRIVER_LEGACY) &&
	    dev->driver->preclose)
		dev->driver->preclose(dev, file);

	if (drm_core_check_feature(dev, DRIVER_LEGACY))
		drm_legacy_lock_release(dev, file->filp);

	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
		drm_legacy_reclaim_buffers(dev, file);

	drm_events_release(file);

	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
		drm_fb_release(file);
		drm_property_destroy_user_blobs(dev, file);
	}

	if (drm_core_check_feature(dev, DRIVER_SYNCOBJ))
		drm_syncobj_release(file);

	if (drm_core_check_feature(dev, DRIVER_GEM))
		drm_gem_release(dev, file);

	drm_legacy_ctxbitmap_flush(dev, file);

	if (drm_is_primary_client(file))
		drm_master_release(file);

	if (dev->driver->postclose)
		dev->driver->postclose(dev, file);

	if (drm_core_check_feature(dev, DRIVER_PRIME))
		drm_prime_destroy_file_private(&file->prime);

	WARN_ON(!list_empty(&file->event_list));

	put_pid(file->pid);
	kfree(file);
}

265
static int drm_setup(struct drm_device * dev)
L
Linus Torvalds 已提交
266 267 268
{
	int ret;

269
	if (dev->driver->firstopen &&
270
	    drm_core_check_feature(dev, DRIVER_LEGACY)) {
271
		ret = dev->driver->firstopen(dev);
D
Dave Airlie 已提交
272
		if (ret != 0)
L
Linus Torvalds 已提交
273 274 275
			return ret;
	}

276 277 278
	ret = drm_legacy_dma_setup(dev);
	if (ret < 0)
		return ret;
L
Linus Torvalds 已提交
279 280


D
Dave Airlie 已提交
281
	DRM_DEBUG("\n");
L
Linus Torvalds 已提交
282 283 284 285
	return 0;
}

/**
D
Daniel Vetter 已提交
286 287 288
 * drm_open - open method for DRM file
 * @inode: device inode
 * @filp: file pointer.
D
Dave Airlie 已提交
289
 *
D
Daniel Vetter 已提交
290 291 292
 * This function must be used by drivers as their &file_operations.open method.
 * It looks up the correct DRM device and instantiates all the per-file
 * resources for it. It also calls the &drm_driver.open driver callback.
D
Daniel Vetter 已提交
293 294
 *
 * RETURNS:
L
Linus Torvalds 已提交
295
 *
D
Daniel Vetter 已提交
296
 * 0 on success or negative errno value on falure.
L
Linus Torvalds 已提交
297
 */
D
Dave Airlie 已提交
298
int drm_open(struct inode *inode, struct file *filp)
L
Linus Torvalds 已提交
299
{
300
	struct drm_device *dev;
301
	struct drm_minor *minor;
302
	int retcode;
303
	int need_setup = 0;
D
Dave Airlie 已提交
304

305 306 307
	minor = drm_minor_acquire(iminor(inode));
	if (IS_ERR(minor))
		return PTR_ERR(minor);
308

309
	dev = minor->dev;
310 311 312
	if (!dev->open_count++)
		need_setup = 1;

313 314
	/* share address_space across all char-devs of a single device */
	filp->f_mapping = dev->anon_inode->i_mapping;
315

316
	retcode = drm_open_helper(filp, minor);
317 318 319 320 321 322
	if (retcode)
		goto err_undo;
	if (need_setup) {
		retcode = drm_setup(dev);
		if (retcode)
			goto err_undo;
D
Dave Airlie 已提交
323
	}
324
	return 0;
J
Jesse Barnes 已提交
325

326 327
err_undo:
	dev->open_count--;
328
	drm_minor_release(minor);
L
Linus Torvalds 已提交
329 330 331 332
	return retcode;
}
EXPORT_SYMBOL(drm_open);

D
Daniel Vetter 已提交
333
/*
334 335 336 337 338 339 340 341 342 343 344 345
 * Check whether DRI will run on this CPU.
 *
 * \return non-zero if the DRI will run on this CPU, or zero otherwise.
 */
static int drm_cpu_valid(void)
{
#if defined(__sparc__) && !defined(__sparc_v9__)
	return 0;		/* No cmpxchg before v9 sparc. */
#endif
	return 1;
}

D
Daniel Vetter 已提交
346
/*
347 348 349
 * Called whenever a process opens /dev/drm.
 *
 * \param filp file pointer.
350
 * \param minor acquired minor-object.
351 352 353 354 355
 * \return zero on success or a negative number on failure.
 *
 * Creates and initializes a drm_file structure for the file private data in \p
 * filp and add it into the double linked list in \p dev.
 */
356
static int drm_open_helper(struct file *filp, struct drm_minor *minor)
357
{
358
	struct drm_device *dev = minor->dev;
359
	struct drm_file *priv;
360
	int ret;
361 362 363 364 365

	if (filp->f_flags & O_EXCL)
		return -EBUSY;	/* No exclusive opens */
	if (!drm_cpu_valid())
		return -EINVAL;
366
	if (dev->switch_power_state != DRM_SWITCH_POWER_ON && dev->switch_power_state != DRM_SWITCH_POWER_DYNAMIC_OFF)
367
		return -EINVAL;
368

369
	DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor->index);
370

371 372 373
	priv = drm_file_alloc(minor);
	if (IS_ERR(priv))
		return PTR_ERR(priv);
374

375 376 377 378 379 380 381 382
	if (drm_is_primary_client(priv)) {
		ret = drm_master_open(priv);
		if (ret) {
			drm_file_free(priv);
			return ret;
		}
	}

383
	filp->private_data = priv;
384
	filp->f_mode |= FMODE_UNSIGNED_OFFSET;
385
	priv->filp = filp;
386

387
	mutex_lock(&dev->filelist_mutex);
388
	list_add(&priv->lhead, &dev->filelist);
389
	mutex_unlock(&dev->filelist_mutex);
390 391 392 393 394 395 396 397 398 399 400 401 402

#ifdef __alpha__
	/*
	 * Default the hose
	 */
	if (!dev->hose) {
		struct pci_dev *pci_dev;
		pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
		if (pci_dev) {
			dev->hose = pci_dev->sysdata;
			pci_dev_put(pci_dev);
		}
		if (!dev->hose) {
403 404
			struct pci_bus *b = list_entry(pci_root_buses.next,
				struct pci_bus, node);
405 406 407 408 409 410 411
			if (b)
				dev->hose = b->sysdata;
		}
	}
#endif

	return 0;
412 413
}

414 415
static void drm_legacy_dev_reinit(struct drm_device *dev)
{
416 417 418 419 420 421 422 423 424 425 426 427
	if (dev->irq_enabled)
		drm_irq_uninstall(dev);

	mutex_lock(&dev->struct_mutex);

	drm_legacy_agp_clear(dev);

	drm_legacy_sg_cleanup(dev);
	drm_legacy_vma_flush(dev);
	drm_legacy_dma_takedown(dev);

	mutex_unlock(&dev->struct_mutex);
428 429 430 431 432 433

	dev->sigdata.lock = NULL;

	dev->context_flag = 0;
	dev->last_context = 0;
	dev->if_version = 0;
434 435

	DRM_DEBUG("lastclose completed\n");
436 437
}

438
void drm_lastclose(struct drm_device * dev)
439 440 441 442 443 444 445
{
	DRM_DEBUG("\n");

	if (dev->driver->lastclose)
		dev->driver->lastclose(dev);
	DRM_DEBUG("driver lastclose completed\n");

446
	if (drm_core_check_feature(dev, DRIVER_LEGACY))
447
		drm_legacy_dev_reinit(dev);
448 449

	drm_client_dev_restore(dev);
450 451
}

L
Linus Torvalds 已提交
452
/**
D
Daniel Vetter 已提交
453 454 455
 * drm_release - release method for DRM file
 * @inode: device inode
 * @filp: file pointer.
L
Linus Torvalds 已提交
456
 *
D
Daniel Vetter 已提交
457 458
 * This function must be used by drivers as their &file_operations.release
 * method. It frees any resources associated with the open file, and calls the
459 460
 * &drm_driver.postclose driver callback. If this is the last open file for the
 * DRM device also proceeds to call the &drm_driver.lastclose driver callback.
L
Linus Torvalds 已提交
461
 *
D
Daniel Vetter 已提交
462 463 464
 * RETURNS:
 *
 * Always succeeds and returns 0.
L
Linus Torvalds 已提交
465
 */
D
Dave Airlie 已提交
466
int drm_release(struct inode *inode, struct file *filp)
L
Linus Torvalds 已提交
467
{
468
	struct drm_file *file_priv = filp->private_data;
469 470
	struct drm_minor *minor = file_priv->minor;
	struct drm_device *dev = minor->dev;
L
Linus Torvalds 已提交
471

A
Arnd Bergmann 已提交
472
	mutex_lock(&drm_global_mutex);
L
Linus Torvalds 已提交
473

D
Dave Airlie 已提交
474
	DRM_DEBUG("open_count = %d\n", dev->open_count);
L
Linus Torvalds 已提交
475

476
	mutex_lock(&dev->filelist_mutex);
477
	list_del(&file_priv->lhead);
478 479
	mutex_unlock(&dev->filelist_mutex);

480
	drm_file_free(file_priv);
L
Linus Torvalds 已提交
481

482
	if (!--dev->open_count)
483
		drm_lastclose(dev);
484

A
Arnd Bergmann 已提交
485
	mutex_unlock(&drm_global_mutex);
L
Linus Torvalds 已提交
486

487 488
	drm_minor_release(minor);

489
	return 0;
L
Linus Torvalds 已提交
490 491 492
}
EXPORT_SYMBOL(drm_release);

D
Daniel Vetter 已提交
493 494 495 496 497 498 499
/**
 * drm_read - read method for DRM file
 * @filp: file pointer
 * @buffer: userspace destination pointer for the read
 * @count: count in bytes to read
 * @offset: offset to read
 *
D
Daniel Vetter 已提交
500
 * This function must be used by drivers as their &file_operations.read
D
Daniel Vetter 已提交
501 502 503 504
 * method iff they use DRM events for asynchronous signalling to userspace.
 * Since events are used by the KMS API for vblank and page flip completion this
 * means all modern display drivers must use it.
 *
D
Daniel Vetter 已提交
505 506
 * @offset is ignored, DRM events are read like a pipe. Therefore drivers also
 * must set the &file_operation.llseek to no_llseek(). Polling support is
D
Daniel Vetter 已提交
507 508 509 510 511 512 513 514 515 516 517 518
 * provided by drm_poll().
 *
 * This function will only ever read a full event. Therefore userspace must
 * supply a big enough buffer to fit any event to ensure forward progress. Since
 * the maximum event space is currently 4K it's recommended to just use that for
 * safety.
 *
 * RETURNS:
 *
 * Number of bytes read (always aligned to full events, and can be 0) or a
 * negative error code on failure.
 */
519 520
ssize_t drm_read(struct file *filp, char __user *buffer,
		 size_t count, loff_t *offset)
521
{
522
	struct drm_file *file_priv = filp->private_data;
523
	struct drm_device *dev = file_priv->minor->dev;
524
	ssize_t ret;
525

526 527
	if (!access_ok(VERIFY_WRITE, buffer, count))
		return -EFAULT;
528

529 530 531 532
	ret = mutex_lock_interruptible(&file_priv->event_read_lock);
	if (ret)
		return ret;

533
	for (;;) {
534 535 536 537 538 539 540 541 542 543 544 545
		struct drm_pending_event *e = NULL;

		spin_lock_irq(&dev->event_lock);
		if (!list_empty(&file_priv->event_list)) {
			e = list_first_entry(&file_priv->event_list,
					struct drm_pending_event, link);
			file_priv->event_space += e->event->length;
			list_del(&e->link);
		}
		spin_unlock_irq(&dev->event_lock);

		if (e == NULL) {
546 547
			if (ret)
				break;
548

549 550 551 552
			if (filp->f_flags & O_NONBLOCK) {
				ret = -EAGAIN;
				break;
			}
553

554
			mutex_unlock(&file_priv->event_read_lock);
555 556
			ret = wait_event_interruptible(file_priv->event_wait,
						       !list_empty(&file_priv->event_list));
557 558 559 560
			if (ret >= 0)
				ret = mutex_lock_interruptible(&file_priv->event_read_lock);
			if (ret)
				return ret;
561
		} else {
562 563 564 565 566 567 568 569
			unsigned length = e->event->length;

			if (length > count - ret) {
put_back_event:
				spin_lock_irq(&dev->event_lock);
				file_priv->event_space -= length;
				list_add(&e->link, &file_priv->event_list);
				spin_unlock_irq(&dev->event_lock);
570
				wake_up_interruptible(&file_priv->event_wait);
571
				break;
572
			}
573

574
			if (copy_to_user(buffer + ret, e->event, length)) {
575 576
				if (ret == 0)
					ret = -EFAULT;
577
				goto put_back_event;
578
			}
579

580
			ret += length;
581
			kfree(e);
582 583
		}
	}
584
	mutex_unlock(&file_priv->event_read_lock);
585

586
	return ret;
587 588 589
}
EXPORT_SYMBOL(drm_read);

D
Daniel Vetter 已提交
590 591 592 593 594
/**
 * drm_poll - poll method for DRM file
 * @filp: file pointer
 * @wait: poll waiter table
 *
D
Daniel Vetter 已提交
595 596 597 598
 * This function must be used by drivers as their &file_operations.read method
 * iff they use DRM events for asynchronous signalling to userspace.  Since
 * events are used by the KMS API for vblank and page flip completion this means
 * all modern display drivers must use it.
D
Daniel Vetter 已提交
599 600 601 602 603 604 605
 *
 * See also drm_read().
 *
 * RETURNS:
 *
 * Mask of POLL flags indicating the current status of the file.
 */
606
__poll_t drm_poll(struct file *filp, struct poll_table_struct *wait)
L
Linus Torvalds 已提交
607
{
608
	struct drm_file *file_priv = filp->private_data;
609
	__poll_t mask = 0;
610 611 612 613

	poll_wait(filp, &file_priv->event_wait, wait);

	if (!list_empty(&file_priv->event_list))
614
		mask |= EPOLLIN | EPOLLRDNORM;
615 616

	return mask;
L
Linus Torvalds 已提交
617
}
D
Dave Airlie 已提交
618
EXPORT_SYMBOL(drm_poll);
619 620

/**
621
 * drm_event_reserve_init_locked - init a DRM event and reserve space for it
622 623 624 625 626 627 628 629
 * @dev: DRM device
 * @file_priv: DRM file private data
 * @p: tracking structure for the pending event
 * @e: actual event data to deliver to userspace
 *
 * This function prepares the passed in event for eventual delivery. If the event
 * doesn't get delivered (because the IOCTL fails later on, before queuing up
 * anything) then the even must be cancelled and freed using
630 631 632
 * drm_event_cancel_free(). Successfully initialized events should be sent out
 * using drm_send_event() or drm_send_event_locked() to signal completion of the
 * asynchronous event to userspace.
633 634 635 636
 *
 * If callers embedded @p into a larger structure it must be allocated with
 * kmalloc and @p must be the first member element.
 *
637
 * This is the locked version of drm_event_reserve_init() for callers which
638
 * already hold &drm_device.event_lock.
639
 *
640 641 642 643
 * RETURNS:
 *
 * 0 on success or a negative error code on failure.
 */
644 645 646 647
int drm_event_reserve_init_locked(struct drm_device *dev,
				  struct drm_file *file_priv,
				  struct drm_pending_event *p,
				  struct drm_event *e)
648
{
649 650
	if (file_priv->event_space < e->length)
		return -ENOMEM;
651 652 653 654

	file_priv->event_space -= e->length;

	p->event = e;
655
	list_add(&p->pending_link, &file_priv->pending_event_list);
656 657
	p->file_priv = file_priv;

658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678
	return 0;
}
EXPORT_SYMBOL(drm_event_reserve_init_locked);

/**
 * drm_event_reserve_init - init a DRM event and reserve space for it
 * @dev: DRM device
 * @file_priv: DRM file private data
 * @p: tracking structure for the pending event
 * @e: actual event data to deliver to userspace
 *
 * This function prepares the passed in event for eventual delivery. If the event
 * doesn't get delivered (because the IOCTL fails later on, before queuing up
 * anything) then the even must be cancelled and freed using
 * drm_event_cancel_free(). Successfully initialized events should be sent out
 * using drm_send_event() or drm_send_event_locked() to signal completion of the
 * asynchronous event to userspace.
 *
 * If callers embedded @p into a larger structure it must be allocated with
 * kmalloc and @p must be the first member element.
 *
679
 * Callers which already hold &drm_device.event_lock should use
680
 * drm_event_reserve_init_locked() instead.
681 682 683 684 685 686 687 688 689 690 691 692 693 694 695
 *
 * RETURNS:
 *
 * 0 on success or a negative error code on failure.
 */
int drm_event_reserve_init(struct drm_device *dev,
			   struct drm_file *file_priv,
			   struct drm_pending_event *p,
			   struct drm_event *e)
{
	unsigned long flags;
	int ret;

	spin_lock_irqsave(&dev->event_lock, flags);
	ret = drm_event_reserve_init_locked(dev, file_priv, p, e);
696
	spin_unlock_irqrestore(&dev->event_lock, flags);
697

698 699 700 701 702 703 704 705 706 707
	return ret;
}
EXPORT_SYMBOL(drm_event_reserve_init);

/**
 * drm_event_cancel_free - free a DRM event and release it's space
 * @dev: DRM device
 * @p: tracking structure for the pending event
 *
 * This function frees the event @p initialized with drm_event_reserve_init()
D
Daniel Vetter 已提交
708 709
 * and releases any allocated space. It is used to cancel an event when the
 * nonblocking operation could not be submitted and needed to be aborted.
710 711 712 713 714 715
 */
void drm_event_cancel_free(struct drm_device *dev,
			   struct drm_pending_event *p)
{
	unsigned long flags;
	spin_lock_irqsave(&dev->event_lock, flags);
716 717 718 719
	if (p->file_priv) {
		p->file_priv->event_space += p->event->length;
		list_del(&p->pending_link);
	}
720
	spin_unlock_irqrestore(&dev->event_lock, flags);
721 722

	if (p->fence)
723
		dma_fence_put(p->fence);
724

725
	kfree(p);
726 727
}
EXPORT_SYMBOL(drm_event_cancel_free);
728 729 730 731 732 733 734 735

/**
 * drm_send_event_locked - send DRM event to file descriptor
 * @dev: DRM device
 * @e: DRM event to deliver
 *
 * This function sends the event @e, initialized with drm_event_reserve_init(),
 * to its associated userspace DRM file. Callers must already hold
736
 * &drm_device.event_lock, see drm_send_event() for the unlocked version.
737 738 739 740 741
 *
 * Note that the core will take care of unlinking and disarming events when the
 * corresponding DRM file is closed. Drivers need not worry about whether the
 * DRM file for this event still exists and can call this function upon
 * completion of the asynchronous work unconditionally.
742 743 744 745 746
 */
void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
{
	assert_spin_locked(&dev->event_lock);

747 748
	if (e->completion) {
		complete_all(e->completion);
749
		e->completion_release(e->completion);
750 751 752
		e->completion = NULL;
	}

753
	if (e->fence) {
754 755
		dma_fence_signal(e->fence);
		dma_fence_put(e->fence);
756 757
	}

758
	if (!e->file_priv) {
759
		kfree(e);
760 761 762 763
		return;
	}

	list_del(&e->pending_link);
764 765 766 767 768 769 770 771 772 773 774 775
	list_add_tail(&e->link,
		      &e->file_priv->event_list);
	wake_up_interruptible(&e->file_priv->event_wait);
}
EXPORT_SYMBOL(drm_send_event_locked);

/**
 * drm_send_event - send DRM event to file descriptor
 * @dev: DRM device
 * @e: DRM event to deliver
 *
 * This function sends the event @e, initialized with drm_event_reserve_init(),
776 777 778
 * to its associated userspace DRM file. This function acquires
 * &drm_device.event_lock, see drm_send_event_locked() for callers which already
 * hold this lock.
779 780 781 782 783
 *
 * Note that the core will take care of unlinking and disarming events when the
 * corresponding DRM file is closed. Drivers need not worry about whether the
 * DRM file for this event still exists and can call this function upon
 * completion of the asynchronous work unconditionally.
784 785 786 787 788 789 790 791 792 793
 */
void drm_send_event(struct drm_device *dev, struct drm_pending_event *e)
{
	unsigned long irqflags;

	spin_lock_irqsave(&dev->event_lock, irqflags);
	drm_send_event_locked(dev, e);
	spin_unlock_irqrestore(&dev->event_lock, irqflags);
}
EXPORT_SYMBOL(drm_send_event);