drm_fops.c 21.5 KB
Newer Older
D
Daniel Vetter 已提交
1
/*
D
Dave Airlie 已提交
2
 * \file drm_fops.c
L
Linus Torvalds 已提交
3
 * File operations for DRM
D
Dave Airlie 已提交
4
 *
L
Linus Torvalds 已提交
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
 * \author Rickard E. (Rik) Faith <faith@valinux.com>
 * \author Daryll Strauss <daryll@valinux.com>
 * \author Gareth Hughes <gareth@valinux.com>
 */

/*
 * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
 *
 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 */

37
#include <drm/drmP.h>
L
Linus Torvalds 已提交
38
#include <linux/poll.h>
39
#include <linux/slab.h>
40
#include <linux/module.h>
41
#include "drm_legacy.h"
42
#include "drm_internal.h"
L
Linus Torvalds 已提交
43

44
/* from BKL pushdown */
A
Arnd Bergmann 已提交
45 46
DEFINE_MUTEX(drm_global_mutex);

D
Daniel Vetter 已提交
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
/**
 * DOC: file operations
 *
 * Drivers must define the file operations structure that forms the DRM
 * userspace API entry point, even though most of those operations are
 * implemented in the DRM core. The mandatory functions are drm_open(),
 * drm_read(), drm_ioctl() and drm_compat_ioctl if CONFIG_COMPAT is enabled.
 * Drivers which implement private ioctls that require 32/64 bit compatibility
 * support must provided their onw .compat_ioctl() handler that processes
 * private ioctls and calls drm_compat_ioctl() for core ioctls.
 *
 * In addition drm_read() and drm_poll() provide support for DRM events. DRM
 * events are a generic and extensible means to send asynchronous events to
 * userspace through the file descriptor. They are used to send vblank event and
 * page flip completions by the KMS API. But drivers can also use it for their
 * own needs, e.g. to signal completion of rendering.
 *
 * The memory mapping implementation will vary depending on how the driver
 * manages memory. Legacy drivers will use the deprecated drm_legacy_mmap()
 * function, modern drivers should use one of the provided memory-manager
 * specific implementations. For GEM-based drivers this is drm_gem_mmap().
 *
 * No other file operations are supported by the DRM userspace API. Overall the
70
 * following is an example #file_operations structure::
D
Daniel Vetter 已提交
71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
 *
 *     static const example_drm_fops = {
 *             .owner = THIS_MODULE,
 *             .open = drm_open,
 *             .release = drm_release,
 *             .unlocked_ioctl = drm_ioctl,
 *     #ifdef CONFIG_COMPAT
 *             .compat_ioctl = drm_compat_ioctl,
 *     #endif
 *             .poll = drm_poll,
 *             .read = drm_read,
 *             .llseek = no_llseek,
 *             .mmap = drm_gem_mmap,
 *     };
 */

87
static int drm_open_helper(struct file *filp, struct drm_minor *minor);
D
Dave Airlie 已提交
88

89
static int drm_setup(struct drm_device * dev)
L
Linus Torvalds 已提交
90 91 92
{
	int ret;

93 94
	if (dev->driver->firstopen &&
	    !drm_core_check_feature(dev, DRIVER_MODESET)) {
95
		ret = dev->driver->firstopen(dev);
D
Dave Airlie 已提交
96
		if (ret != 0)
L
Linus Torvalds 已提交
97 98 99
			return ret;
	}

100 101 102
	ret = drm_legacy_dma_setup(dev);
	if (ret < 0)
		return ret;
L
Linus Torvalds 已提交
103 104


D
Dave Airlie 已提交
105
	DRM_DEBUG("\n");
L
Linus Torvalds 已提交
106 107 108 109
	return 0;
}

/**
D
Daniel Vetter 已提交
110 111 112
 * drm_open - open method for DRM file
 * @inode: device inode
 * @filp: file pointer.
D
Dave Airlie 已提交
113
 *
D
Daniel Vetter 已提交
114 115 116 117 118
 * This function must be used by drivers as their .open() #file_operations
 * method. It looks up the correct DRM device and instantiates all the per-file
 * resources for it.
 *
 * RETURNS:
L
Linus Torvalds 已提交
119
 *
D
Daniel Vetter 已提交
120
 * 0 on success or negative errno value on falure.
L
Linus Torvalds 已提交
121
 */
D
Dave Airlie 已提交
122
int drm_open(struct inode *inode, struct file *filp)
L
Linus Torvalds 已提交
123
{
124
	struct drm_device *dev;
125
	struct drm_minor *minor;
126
	int retcode;
127
	int need_setup = 0;
D
Dave Airlie 已提交
128

129 130 131
	minor = drm_minor_acquire(iminor(inode));
	if (IS_ERR(minor))
		return PTR_ERR(minor);
132

133
	dev = minor->dev;
134 135 136
	if (!dev->open_count++)
		need_setup = 1;

137 138
	/* share address_space across all char-devs of a single device */
	filp->f_mapping = dev->anon_inode->i_mapping;
139

140
	retcode = drm_open_helper(filp, minor);
141 142 143 144 145 146
	if (retcode)
		goto err_undo;
	if (need_setup) {
		retcode = drm_setup(dev);
		if (retcode)
			goto err_undo;
D
Dave Airlie 已提交
147
	}
148
	return 0;
J
Jesse Barnes 已提交
149

150 151
err_undo:
	dev->open_count--;
152
	drm_minor_release(minor);
L
Linus Torvalds 已提交
153 154 155 156
	return retcode;
}
EXPORT_SYMBOL(drm_open);

D
Daniel Vetter 已提交
157
/*
158 159 160 161 162 163 164 165 166 167 168 169
 * Check whether DRI will run on this CPU.
 *
 * \return non-zero if the DRI will run on this CPU, or zero otherwise.
 */
static int drm_cpu_valid(void)
{
#if defined(__sparc__) && !defined(__sparc_v9__)
	return 0;		/* No cmpxchg before v9 sparc. */
#endif
	return 1;
}

D
Daniel Vetter 已提交
170
/*
171 172 173
 * Called whenever a process opens /dev/drm.
 *
 * \param filp file pointer.
174
 * \param minor acquired minor-object.
175 176 177 178 179
 * \return zero on success or a negative number on failure.
 *
 * Creates and initializes a drm_file structure for the file private data in \p
 * filp and add it into the double linked list in \p dev.
 */
180
static int drm_open_helper(struct file *filp, struct drm_minor *minor)
181
{
182
	struct drm_device *dev = minor->dev;
183
	struct drm_file *priv;
184 185 186 187 188 189
	int ret;

	if (filp->f_flags & O_EXCL)
		return -EBUSY;	/* No exclusive opens */
	if (!drm_cpu_valid())
		return -EINVAL;
190
	if (dev->switch_power_state != DRM_SWITCH_POWER_ON && dev->switch_power_state != DRM_SWITCH_POWER_DYNAMIC_OFF)
191
		return -EINVAL;
192

193
	DRM_DEBUG("pid = %d, minor = %d\n", task_pid_nr(current), minor->index);
194

J
Julia Lawall 已提交
195
	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
196 197 198 199
	if (!priv)
		return -ENOMEM;

	filp->private_data = priv;
200
	priv->filp = filp;
201
	priv->uid = current_euid();
202
	priv->pid = get_pid(task_pid(current));
203
	priv->minor = minor;
204

205
	/* for compatibility root is always authenticated */
206
	priv->authenticated = capable(CAP_SYS_ADMIN);
207 208
	priv->lock_count = 0;

209
	INIT_LIST_HEAD(&priv->lhead);
D
Dave Airlie 已提交
210
	INIT_LIST_HEAD(&priv->fbs);
211
	mutex_init(&priv->fbs_lock);
212
	INIT_LIST_HEAD(&priv->blobs);
213
	INIT_LIST_HEAD(&priv->pending_event_list);
214 215 216
	INIT_LIST_HEAD(&priv->event_list);
	init_waitqueue_head(&priv->event_wait);
	priv->event_space = 4096; /* set aside 4k for event buffer */
217

218 219
	mutex_init(&priv->event_read_lock);

220
	if (drm_core_check_feature(dev, DRIVER_GEM))
221 222
		drm_gem_open(dev, priv);

223 224 225
	if (drm_core_check_feature(dev, DRIVER_PRIME))
		drm_prime_init_file_private(&priv->prime);

226 227 228
	if (dev->driver->open) {
		ret = dev->driver->open(dev, priv);
		if (ret < 0)
229
			goto out_prime_destroy;
230 231
	}

232 233
	/* if there is no current master make this fd it, but do not create
	 * any master object for render clients */
234
	mutex_lock(&dev->master_mutex);
235
	if (drm_is_primary_client(priv) && !priv->minor->master) {
236
		/* create a new master */
237 238
		ret = drm_new_set_master(dev, priv);
		if (ret)
239
			goto out_close;
240
	} else if (drm_is_primary_client(priv)) {
241 242 243
		/* get a reference to the master */
		priv->master = drm_master_get(priv->minor->master);
	}
244
	mutex_unlock(&dev->master_mutex);
245

246
	mutex_lock(&dev->filelist_mutex);
247
	list_add(&priv->lhead, &dev->filelist);
248
	mutex_unlock(&dev->filelist_mutex);
249 250 251 252 253 254 255 256 257 258 259 260 261

#ifdef __alpha__
	/*
	 * Default the hose
	 */
	if (!dev->hose) {
		struct pci_dev *pci_dev;
		pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
		if (pci_dev) {
			dev->hose = pci_dev->sysdata;
			pci_dev_put(pci_dev);
		}
		if (!dev->hose) {
262 263
			struct pci_bus *b = list_entry(pci_root_buses.next,
				struct pci_bus, node);
264 265 266 267 268 269 270
			if (b)
				dev->hose = b->sysdata;
		}
	}
#endif

	return 0;
271 272

out_close:
273
	mutex_unlock(&dev->master_mutex);
274 275 276 277 278
	if (dev->driver->postclose)
		dev->driver->postclose(dev, priv);
out_prime_destroy:
	if (drm_core_check_feature(dev, DRIVER_PRIME))
		drm_prime_destroy_file_private(&priv->prime);
279
	if (drm_core_check_feature(dev, DRIVER_GEM))
280 281
		drm_gem_release(dev, priv);
	put_pid(priv->pid);
282
	kfree(priv);
283 284 285 286
	filp->private_data = NULL;
	return ret;
}

287 288 289 290 291 292 293 294
static void drm_events_release(struct drm_file *file_priv)
{
	struct drm_device *dev = file_priv->minor->dev;
	struct drm_pending_event *e, *et;
	unsigned long flags;

	spin_lock_irqsave(&dev->event_lock, flags);

295 296 297 298 299 300 301
	/* Unlink pending events */
	list_for_each_entry_safe(e, et, &file_priv->pending_event_list,
				 pending_link) {
		list_del(&e->pending_link);
		e->file_priv = NULL;
	}

302
	/* Remove unconsumed events */
303 304
	list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
		list_del(&e->link);
305
		kfree(e);
306
	}
307 308 309 310

	spin_unlock_irqrestore(&dev->event_lock, flags);
}

D
Daniel Vetter 已提交
311
/*
312 313 314 315 316 317
 * drm_legacy_dev_reinit
 *
 * Reinitializes a legacy/ums drm device in it's lastclose function.
 */
static void drm_legacy_dev_reinit(struct drm_device *dev)
{
318 319 320 321 322 323 324 325 326 327 328 329
	if (dev->irq_enabled)
		drm_irq_uninstall(dev);

	mutex_lock(&dev->struct_mutex);

	drm_legacy_agp_clear(dev);

	drm_legacy_sg_cleanup(dev);
	drm_legacy_vma_flush(dev);
	drm_legacy_dma_takedown(dev);

	mutex_unlock(&dev->struct_mutex);
330 331 332 333 334 335

	dev->sigdata.lock = NULL;

	dev->context_flag = 0;
	dev->last_context = 0;
	dev->if_version = 0;
336 337

	DRM_DEBUG("lastclose completed\n");
338 339
}

D
Daniel Vetter 已提交
340
/*
341 342 343 344 345 346 347 348
 * Take down the DRM device.
 *
 * \param dev DRM device structure.
 *
 * Frees every resource in \p dev.
 *
 * \sa drm_device
 */
349
void drm_lastclose(struct drm_device * dev)
350 351 352 353 354 355 356
{
	DRM_DEBUG("\n");

	if (dev->driver->lastclose)
		dev->driver->lastclose(dev);
	DRM_DEBUG("driver lastclose completed\n");

357 358
	if (!drm_core_check_feature(dev, DRIVER_MODESET))
		drm_legacy_dev_reinit(dev);
359 360
}

L
Linus Torvalds 已提交
361
/**
D
Daniel Vetter 已提交
362 363 364
 * drm_release - release method for DRM file
 * @inode: device inode
 * @filp: file pointer.
L
Linus Torvalds 已提交
365
 *
D
Daniel Vetter 已提交
366 367 368
 * This function must be used by drivers as their .release() #file_operations
 * method. It frees any resources associated with the open file, and if this is
 * the last open file for the DRM device also proceeds to call drm_lastclose().
L
Linus Torvalds 已提交
369
 *
D
Daniel Vetter 已提交
370 371 372
 * RETURNS:
 *
 * Always succeeds and returns 0.
L
Linus Torvalds 已提交
373
 */
D
Dave Airlie 已提交
374
int drm_release(struct inode *inode, struct file *filp)
L
Linus Torvalds 已提交
375
{
376
	struct drm_file *file_priv = filp->private_data;
377 378
	struct drm_minor *minor = file_priv->minor;
	struct drm_device *dev = minor->dev;
L
Linus Torvalds 已提交
379

A
Arnd Bergmann 已提交
380
	mutex_lock(&drm_global_mutex);
L
Linus Torvalds 已提交
381

D
Dave Airlie 已提交
382
	DRM_DEBUG("open_count = %d\n", dev->open_count);
L
Linus Torvalds 已提交
383

384
	mutex_lock(&dev->filelist_mutex);
385
	list_del(&file_priv->lhead);
386 387 388
	mutex_unlock(&dev->filelist_mutex);

	mutex_lock(&dev->struct_mutex);
389 390
	if (file_priv->magic)
		idr_remove(&file_priv->master->magic_map, file_priv->magic);
391 392
	mutex_unlock(&dev->struct_mutex);

393
	if (dev->driver->preclose)
394
		dev->driver->preclose(dev, file_priv);
L
Linus Torvalds 已提交
395 396 397 398 399

	/* ========================================================
	 * Begin inline drm_release
	 */

D
Dave Airlie 已提交
400
	DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
401
		  task_pid_nr(current),
402
		  (long)old_encode_dev(file_priv->minor->kdev->devt),
D
Dave Airlie 已提交
403 404
		  dev->open_count);

405 406
	if (!drm_core_check_feature(dev, DRIVER_MODESET))
		drm_legacy_lock_release(dev, filp);
L
Linus Torvalds 已提交
407

408
	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
409
		drm_legacy_reclaim_buffers(dev, file_priv);
410

411 412
	drm_events_release(file_priv);

413
	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
414
		drm_fb_release(file_priv);
415 416
		drm_property_destroy_user_blobs(dev, file_priv);
	}
417

418
	if (drm_core_check_feature(dev, DRIVER_GEM))
419 420
		drm_gem_release(dev, file_priv);

421
	drm_legacy_ctxbitmap_flush(dev, file_priv);
L
Linus Torvalds 已提交
422

423
	mutex_lock(&dev->master_mutex);
424

425
	if (file_priv->is_master) {
426
		struct drm_master *master = file_priv->master;
427

D
Daniel Vetter 已提交
428
		/*
429 430 431
		 * Since the master is disappearing, so is the
		 * possibility to lock.
		 */
432
		mutex_lock(&dev->struct_mutex);
433 434 435 436 437 438 439
		if (master->lock.hw_lock) {
			if (dev->sigdata.lock == master->lock.hw_lock)
				dev->sigdata.lock = NULL;
			master->lock.hw_lock = NULL;
			master->lock.file_priv = NULL;
			wake_up_interruptible_all(&master->lock.lock_queue);
		}
440
		mutex_unlock(&dev->struct_mutex);
441

442 443
		if (file_priv->minor->master == file_priv->master) {
			/* drop the reference held my the minor */
444 445
			if (dev->driver->master_drop)
				dev->driver->master_drop(dev, file_priv, true);
446 447
			drm_master_put(&file_priv->minor->master);
		}
L
Linus Torvalds 已提交
448
	}
449

450
	/* drop the master reference held by the file priv */
451 452
	if (file_priv->master)
		drm_master_put(&file_priv->master);
453
	file_priv->is_master = 0;
454 455
	mutex_unlock(&dev->master_mutex);

456
	if (dev->driver->postclose)
457
		dev->driver->postclose(dev, file_priv);
458

459

460 461 462
	if (drm_core_check_feature(dev, DRIVER_PRIME))
		drm_prime_destroy_file_private(&file_priv->prime);

463 464
	WARN_ON(!list_empty(&file_priv->event_list));

465
	put_pid(file_priv->pid);
466
	kfree(file_priv);
L
Linus Torvalds 已提交
467 468 469 470 471

	/* ========================================================
	 * End inline drm_release
	 */

D
Dave Airlie 已提交
472
	if (!--dev->open_count) {
473
		drm_lastclose(dev);
474 475
		if (drm_device_is_unplugged(dev))
			drm_put_dev(dev);
L
Linus Torvalds 已提交
476
	}
A
Arnd Bergmann 已提交
477
	mutex_unlock(&drm_global_mutex);
L
Linus Torvalds 已提交
478

479 480
	drm_minor_release(minor);

481
	return 0;
L
Linus Torvalds 已提交
482 483 484
}
EXPORT_SYMBOL(drm_release);

D
Daniel Vetter 已提交
485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510
/**
 * drm_read - read method for DRM file
 * @filp: file pointer
 * @buffer: userspace destination pointer for the read
 * @count: count in bytes to read
 * @offset: offset to read
 *
 * This function must be used by drivers as their .read() #file_operations
 * method iff they use DRM events for asynchronous signalling to userspace.
 * Since events are used by the KMS API for vblank and page flip completion this
 * means all modern display drivers must use it.
 *
 * @offset is ignore, DRM events are read like a pipe. Therefore drivers also
 * must set the .llseek() #file_operation to no_llseek(). Polling support is
 * provided by drm_poll().
 *
 * This function will only ever read a full event. Therefore userspace must
 * supply a big enough buffer to fit any event to ensure forward progress. Since
 * the maximum event space is currently 4K it's recommended to just use that for
 * safety.
 *
 * RETURNS:
 *
 * Number of bytes read (always aligned to full events, and can be 0) or a
 * negative error code on failure.
 */
511 512
ssize_t drm_read(struct file *filp, char __user *buffer,
		 size_t count, loff_t *offset)
513
{
514
	struct drm_file *file_priv = filp->private_data;
515
	struct drm_device *dev = file_priv->minor->dev;
516
	ssize_t ret;
517

518 519
	if (!access_ok(VERIFY_WRITE, buffer, count))
		return -EFAULT;
520

521 522 523 524
	ret = mutex_lock_interruptible(&file_priv->event_read_lock);
	if (ret)
		return ret;

525
	for (;;) {
526 527 528 529 530 531 532 533 534 535 536 537
		struct drm_pending_event *e = NULL;

		spin_lock_irq(&dev->event_lock);
		if (!list_empty(&file_priv->event_list)) {
			e = list_first_entry(&file_priv->event_list,
					struct drm_pending_event, link);
			file_priv->event_space += e->event->length;
			list_del(&e->link);
		}
		spin_unlock_irq(&dev->event_lock);

		if (e == NULL) {
538 539
			if (ret)
				break;
540

541 542 543 544
			if (filp->f_flags & O_NONBLOCK) {
				ret = -EAGAIN;
				break;
			}
545

546
			mutex_unlock(&file_priv->event_read_lock);
547 548
			ret = wait_event_interruptible(file_priv->event_wait,
						       !list_empty(&file_priv->event_list));
549 550 551 552
			if (ret >= 0)
				ret = mutex_lock_interruptible(&file_priv->event_read_lock);
			if (ret)
				return ret;
553
		} else {
554 555 556 557 558 559 560 561
			unsigned length = e->event->length;

			if (length > count - ret) {
put_back_event:
				spin_lock_irq(&dev->event_lock);
				file_priv->event_space -= length;
				list_add(&e->link, &file_priv->event_list);
				spin_unlock_irq(&dev->event_lock);
562
				break;
563
			}
564

565
			if (copy_to_user(buffer + ret, e->event, length)) {
566 567
				if (ret == 0)
					ret = -EFAULT;
568
				goto put_back_event;
569
			}
570

571
			ret += length;
572
			kfree(e);
573 574
		}
	}
575
	mutex_unlock(&file_priv->event_read_lock);
576

577
	return ret;
578 579 580
}
EXPORT_SYMBOL(drm_read);

D
Daniel Vetter 已提交
581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596
/**
 * drm_poll - poll method for DRM file
 * @filp: file pointer
 * @wait: poll waiter table
 *
 * This function must be used by drivers as their .read() #file_operations
 * method iff they use DRM events for asynchronous signalling to userspace.
 * Since events are used by the KMS API for vblank and page flip completion this
 * means all modern display drivers must use it.
 *
 * See also drm_read().
 *
 * RETURNS:
 *
 * Mask of POLL flags indicating the current status of the file.
 */
L
Linus Torvalds 已提交
597 598
unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
{
599 600 601 602 603 604 605 606 607
	struct drm_file *file_priv = filp->private_data;
	unsigned int mask = 0;

	poll_wait(filp, &file_priv->event_wait, wait);

	if (!list_empty(&file_priv->event_list))
		mask |= POLLIN | POLLRDNORM;

	return mask;
L
Linus Torvalds 已提交
608
}
D
Dave Airlie 已提交
609
EXPORT_SYMBOL(drm_poll);
610 611

/**
612
 * drm_event_reserve_init_locked - init a DRM event and reserve space for it
613 614 615 616 617 618 619 620
 * @dev: DRM device
 * @file_priv: DRM file private data
 * @p: tracking structure for the pending event
 * @e: actual event data to deliver to userspace
 *
 * This function prepares the passed in event for eventual delivery. If the event
 * doesn't get delivered (because the IOCTL fails later on, before queuing up
 * anything) then the even must be cancelled and freed using
621 622 623
 * drm_event_cancel_free(). Successfully initialized events should be sent out
 * using drm_send_event() or drm_send_event_locked() to signal completion of the
 * asynchronous event to userspace.
624 625 626 627
 *
 * If callers embedded @p into a larger structure it must be allocated with
 * kmalloc and @p must be the first member element.
 *
628 629 630
 * This is the locked version of drm_event_reserve_init() for callers which
 * already hold dev->event_lock.
 *
631 632 633 634
 * RETURNS:
 *
 * 0 on success or a negative error code on failure.
 */
635 636 637 638
int drm_event_reserve_init_locked(struct drm_device *dev,
				  struct drm_file *file_priv,
				  struct drm_pending_event *p,
				  struct drm_event *e)
639
{
640 641
	if (file_priv->event_space < e->length)
		return -ENOMEM;
642 643 644 645

	file_priv->event_space -= e->length;

	p->event = e;
646
	list_add(&p->pending_link, &file_priv->pending_event_list);
647 648
	p->file_priv = file_priv;

649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686
	return 0;
}
EXPORT_SYMBOL(drm_event_reserve_init_locked);

/**
 * drm_event_reserve_init - init a DRM event and reserve space for it
 * @dev: DRM device
 * @file_priv: DRM file private data
 * @p: tracking structure for the pending event
 * @e: actual event data to deliver to userspace
 *
 * This function prepares the passed in event for eventual delivery. If the event
 * doesn't get delivered (because the IOCTL fails later on, before queuing up
 * anything) then the even must be cancelled and freed using
 * drm_event_cancel_free(). Successfully initialized events should be sent out
 * using drm_send_event() or drm_send_event_locked() to signal completion of the
 * asynchronous event to userspace.
 *
 * If callers embedded @p into a larger structure it must be allocated with
 * kmalloc and @p must be the first member element.
 *
 * Callers which already hold dev->event_lock should use
 * drm_event_reserve_init() instead.
 *
 * RETURNS:
 *
 * 0 on success or a negative error code on failure.
 */
int drm_event_reserve_init(struct drm_device *dev,
			   struct drm_file *file_priv,
			   struct drm_pending_event *p,
			   struct drm_event *e)
{
	unsigned long flags;
	int ret;

	spin_lock_irqsave(&dev->event_lock, flags);
	ret = drm_event_reserve_init_locked(dev, file_priv, p, e);
687
	spin_unlock_irqrestore(&dev->event_lock, flags);
688

689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705
	return ret;
}
EXPORT_SYMBOL(drm_event_reserve_init);

/**
 * drm_event_cancel_free - free a DRM event and release it's space
 * @dev: DRM device
 * @p: tracking structure for the pending event
 *
 * This function frees the event @p initialized with drm_event_reserve_init()
 * and releases any allocated space.
 */
void drm_event_cancel_free(struct drm_device *dev,
			   struct drm_pending_event *p)
{
	unsigned long flags;
	spin_lock_irqsave(&dev->event_lock, flags);
706 707 708 709
	if (p->file_priv) {
		p->file_priv->event_space += p->event->length;
		list_del(&p->pending_link);
	}
710
	spin_unlock_irqrestore(&dev->event_lock, flags);
711
	kfree(p);
712 713
}
EXPORT_SYMBOL(drm_event_cancel_free);
714 715 716 717 718 719 720 721 722

/**
 * drm_send_event_locked - send DRM event to file descriptor
 * @dev: DRM device
 * @e: DRM event to deliver
 *
 * This function sends the event @e, initialized with drm_event_reserve_init(),
 * to its associated userspace DRM file. Callers must already hold
 * dev->event_lock, see drm_send_event() for the unlocked version.
723 724 725 726 727
 *
 * Note that the core will take care of unlinking and disarming events when the
 * corresponding DRM file is closed. Drivers need not worry about whether the
 * DRM file for this event still exists and can call this function upon
 * completion of the asynchronous work unconditionally.
728 729 730 731 732
 */
void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
{
	assert_spin_locked(&dev->event_lock);

733 734 735 736 737 738
	if (e->completion) {
		/* ->completion might disappear as soon as it signalled. */
		complete_all(e->completion);
		e->completion = NULL;
	}

739 740 741 742 743
	if (e->fence) {
		fence_signal(e->fence);
		fence_put(e->fence);
	}

744
	if (!e->file_priv) {
745
		kfree(e);
746 747 748 749
		return;
	}

	list_del(&e->pending_link);
750 751 752 753 754 755 756 757 758 759 760 761 762 763
	list_add_tail(&e->link,
		      &e->file_priv->event_list);
	wake_up_interruptible(&e->file_priv->event_wait);
}
EXPORT_SYMBOL(drm_send_event_locked);

/**
 * drm_send_event - send DRM event to file descriptor
 * @dev: DRM device
 * @e: DRM event to deliver
 *
 * This function sends the event @e, initialized with drm_event_reserve_init(),
 * to its associated userspace DRM file. This function acquires dev->event_lock,
 * see drm_send_event_locked() for callers which already hold this lock.
764 765 766 767 768
 *
 * Note that the core will take care of unlinking and disarming events when the
 * corresponding DRM file is closed. Drivers need not worry about whether the
 * DRM file for this event still exists and can call this function upon
 * completion of the asynchronous work unconditionally.
769 770 771 772 773 774 775 776 777 778
 */
void drm_send_event(struct drm_device *dev, struct drm_pending_event *e)
{
	unsigned long irqflags;

	spin_lock_irqsave(&dev->event_lock, irqflags);
	drm_send_event_locked(dev, e);
	spin_unlock_irqrestore(&dev->event_lock, irqflags);
}
EXPORT_SYMBOL(drm_send_event);