eventfd.c 12.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 *  fs/eventfd.c
 *
 *  Copyright (C) 2007  Davide Libenzi <davidel@xmailserver.org>
 *
 */

#include <linux/file.h>
#include <linux/poll.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/kernel.h>
14
#include <linux/slab.h>
15 16 17
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/anon_inodes.h>
18
#include <linux/syscalls.h>
19
#include <linux/export.h>
20 21
#include <linux/kref.h>
#include <linux/eventfd.h>
22 23
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
24 25

struct eventfd_ctx {
26
	struct kref kref;
27 28 29 30 31 32
	wait_queue_head_t wqh;
	/*
	 * Every time that a write(2) is performed on an eventfd, the
	 * value of the __u64 being written is added to "count" and a
	 * wakeup is performed on "wqh". A read(2) will return the "count"
	 * value to userspace, and will reset "count" to zero. The kernel
33
	 * side eventfd_signal() also, adds to the "count" counter and
34 35 36
	 * issue a wakeup.
	 */
	__u64 count;
37
	unsigned int flags;
38 39
};

40 41 42 43 44 45 46 47
/**
 * eventfd_signal - Adds @n to the eventfd counter.
 * @ctx: [in] Pointer to the eventfd context.
 * @n: [in] Value of the counter to be added to the eventfd internal counter.
 *          The value cannot be negative.
 *
 * This function is supposed to be called by the kernel in paths that do not
 * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
48
 * value, and we signal this as overflow condition by returning a POLLERR
49 50
 * to poll(2).
 *
51
 * Returns the amount by which the counter was incremented.  This will be less
52
 * than @n if the counter has overflowed.
53
 */
54
__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
55 56 57
{
	unsigned long flags;

D
Davide Libenzi 已提交
58
	spin_lock_irqsave(&ctx->wqh.lock, flags);
59
	if (ULLONG_MAX - ctx->count < n)
60
		n = ULLONG_MAX - ctx->count;
61 62
	ctx->count += n;
	if (waitqueue_active(&ctx->wqh))
63
		wake_up_locked_poll(&ctx->wqh, POLLIN);
D
Davide Libenzi 已提交
64
	spin_unlock_irqrestore(&ctx->wqh.lock, flags);
65 66 67

	return n;
}
68
EXPORT_SYMBOL_GPL(eventfd_signal);
69

70 71 72 73 74
static void eventfd_free_ctx(struct eventfd_ctx *ctx)
{
	kfree(ctx);
}

75 76 77 78
static void eventfd_free(struct kref *kref)
{
	struct eventfd_ctx *ctx = container_of(kref, struct eventfd_ctx, kref);

79
	eventfd_free_ctx(ctx);
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
}

/**
 * eventfd_ctx_get - Acquires a reference to the internal eventfd context.
 * @ctx: [in] Pointer to the eventfd context.
 *
 * Returns: In case of success, returns a pointer to the eventfd context.
 */
struct eventfd_ctx *eventfd_ctx_get(struct eventfd_ctx *ctx)
{
	kref_get(&ctx->kref);
	return ctx;
}
EXPORT_SYMBOL_GPL(eventfd_ctx_get);

/**
 * eventfd_ctx_put - Releases a reference to the internal eventfd context.
 * @ctx: [in] Pointer to eventfd context.
 *
 * The eventfd context reference must have been previously acquired either
100
 * with eventfd_ctx_get() or eventfd_ctx_fdget().
101 102 103 104 105 106 107
 */
void eventfd_ctx_put(struct eventfd_ctx *ctx)
{
	kref_put(&ctx->kref, eventfd_free);
}
EXPORT_SYMBOL_GPL(eventfd_ctx_put);

108 109
static int eventfd_release(struct inode *inode, struct file *file)
{
110 111 112 113
	struct eventfd_ctx *ctx = file->private_data;

	wake_up_poll(&ctx->wqh, POLLHUP);
	eventfd_ctx_put(ctx);
114 115 116 117 118 119 120
	return 0;
}

static unsigned int eventfd_poll(struct file *file, poll_table *wait)
{
	struct eventfd_ctx *ctx = file->private_data;
	unsigned int events = 0;
121
	u64 count;
122 123

	poll_wait(file, &ctx->wqh, wait);
124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163

	/*
	 * All writes to ctx->count occur within ctx->wqh.lock.  This read
	 * can be done outside ctx->wqh.lock because we know that poll_wait
	 * takes that lock (through add_wait_queue) if our caller will sleep.
	 *
	 * The read _can_ therefore seep into add_wait_queue's critical
	 * section, but cannot move above it!  add_wait_queue's spin_lock acts
	 * as an acquire barrier and ensures that the read be ordered properly
	 * against the writes.  The following CAN happen and is safe:
	 *
	 *     poll                               write
	 *     -----------------                  ------------
	 *     lock ctx->wqh.lock (in poll_wait)
	 *     count = ctx->count
	 *     __add_wait_queue
	 *     unlock ctx->wqh.lock
	 *                                        lock ctx->qwh.lock
	 *                                        ctx->count += n
	 *                                        if (waitqueue_active)
	 *                                          wake_up_locked_poll
	 *                                        unlock ctx->qwh.lock
	 *     eventfd_poll returns 0
	 *
	 * but the following, which would miss a wakeup, cannot happen:
	 *
	 *     poll                               write
	 *     -----------------                  ------------
	 *     count = ctx->count (INVALID!)
	 *                                        lock ctx->qwh.lock
	 *                                        ctx->count += n
	 *                                        **waitqueue_active is false**
	 *                                        **no wake_up_locked_poll!**
	 *                                        unlock ctx->qwh.lock
	 *     lock ctx->wqh.lock (in poll_wait)
	 *     __add_wait_queue
	 *     unlock ctx->wqh.lock
	 *     eventfd_poll returns 0
	 */
	count = READ_ONCE(ctx->count);
164

165
	if (count > 0)
166
		events |= POLLIN;
167
	if (count == ULLONG_MAX)
168
		events |= POLLERR;
169
	if (ULLONG_MAX - 1 > count)
170 171 172 173 174
		events |= POLLOUT;

	return events;
}

175 176 177 178 179 180 181 182 183 184
static void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
{
	*cnt = (ctx->flags & EFD_SEMAPHORE) ? 1 : ctx->count;
	ctx->count -= *cnt;
}

/**
 * eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue.
 * @ctx: [in] Pointer to eventfd context.
 * @wait: [in] Wait queue to be removed.
185
 * @cnt: [out] Pointer to the 64-bit counter value.
186
 *
187
 * Returns %0 if successful, or the following error codes:
188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
 *
 * -EAGAIN      : The operation would have blocked.
 *
 * This is used to atomically remove a wait queue entry from the eventfd wait
 * queue head, and read/reset the counter value.
 */
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_t *wait,
				  __u64 *cnt)
{
	unsigned long flags;

	spin_lock_irqsave(&ctx->wqh.lock, flags);
	eventfd_ctx_do_read(ctx, cnt);
	__remove_wait_queue(&ctx->wqh, wait);
	if (*cnt != 0 && waitqueue_active(&ctx->wqh))
		wake_up_locked_poll(&ctx->wqh, POLLOUT);
	spin_unlock_irqrestore(&ctx->wqh.lock, flags);

	return *cnt != 0 ? 0 : -EAGAIN;
}
EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue);

/**
 * eventfd_ctx_read - Reads the eventfd counter or wait if it is zero.
 * @ctx: [in] Pointer to eventfd context.
 * @no_wait: [in] Different from zero if the operation should not block.
214
 * @cnt: [out] Pointer to the 64-bit counter value.
215
 *
216
 * Returns %0 if successful, or the following error codes:
217
 *
218
 * -EAGAIN      : The operation would have blocked but @no_wait was non-zero.
219 220 221 222 223 224
 * -ERESTARTSYS : A signal interrupted the wait operation.
 *
 * If @no_wait is zero, the function might sleep until the eventfd internal
 * counter becomes greater than zero.
 */
ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt)
225 226 227 228
{
	ssize_t res;
	DECLARE_WAITQUEUE(wait, current);

D
Davide Libenzi 已提交
229
	spin_lock_irq(&ctx->wqh.lock);
230
	*cnt = 0;
231
	res = -EAGAIN;
232
	if (ctx->count > 0)
233 234
		res = 0;
	else if (!no_wait) {
235
		__add_wait_queue(&ctx->wqh, &wait);
236
		for (;;) {
237 238
			set_current_state(TASK_INTERRUPTIBLE);
			if (ctx->count > 0) {
239
				res = 0;
240 241 242 243 244 245
				break;
			}
			if (signal_pending(current)) {
				res = -ERESTARTSYS;
				break;
			}
D
Davide Libenzi 已提交
246
			spin_unlock_irq(&ctx->wqh.lock);
247
			schedule();
D
Davide Libenzi 已提交
248
			spin_lock_irq(&ctx->wqh.lock);
249 250 251 252
		}
		__remove_wait_queue(&ctx->wqh, &wait);
		__set_current_state(TASK_RUNNING);
	}
253 254
	if (likely(res == 0)) {
		eventfd_ctx_do_read(ctx, cnt);
255
		if (waitqueue_active(&ctx->wqh))
256
			wake_up_locked_poll(&ctx->wqh, POLLOUT);
257
	}
D
Davide Libenzi 已提交
258
	spin_unlock_irq(&ctx->wqh.lock);
259 260 261

	return res;
}
262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
EXPORT_SYMBOL_GPL(eventfd_ctx_read);

static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count,
			    loff_t *ppos)
{
	struct eventfd_ctx *ctx = file->private_data;
	ssize_t res;
	__u64 cnt;

	if (count < sizeof(cnt))
		return -EINVAL;
	res = eventfd_ctx_read(ctx, file->f_flags & O_NONBLOCK, &cnt);
	if (res < 0)
		return res;

	return put_user(cnt, (__u64 __user *) buf) ? -EFAULT : sizeof(cnt);
}
279 280 281 282 283 284 285 286 287 288 289 290 291 292 293

static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count,
			     loff_t *ppos)
{
	struct eventfd_ctx *ctx = file->private_data;
	ssize_t res;
	__u64 ucnt;
	DECLARE_WAITQUEUE(wait, current);

	if (count < sizeof(ucnt))
		return -EINVAL;
	if (copy_from_user(&ucnt, buf, sizeof(ucnt)))
		return -EFAULT;
	if (ucnt == ULLONG_MAX)
		return -EINVAL;
D
Davide Libenzi 已提交
294
	spin_lock_irq(&ctx->wqh.lock);
295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
	res = -EAGAIN;
	if (ULLONG_MAX - ctx->count > ucnt)
		res = sizeof(ucnt);
	else if (!(file->f_flags & O_NONBLOCK)) {
		__add_wait_queue(&ctx->wqh, &wait);
		for (res = 0;;) {
			set_current_state(TASK_INTERRUPTIBLE);
			if (ULLONG_MAX - ctx->count > ucnt) {
				res = sizeof(ucnt);
				break;
			}
			if (signal_pending(current)) {
				res = -ERESTARTSYS;
				break;
			}
D
Davide Libenzi 已提交
310
			spin_unlock_irq(&ctx->wqh.lock);
311
			schedule();
D
Davide Libenzi 已提交
312
			spin_lock_irq(&ctx->wqh.lock);
313 314 315 316
		}
		__remove_wait_queue(&ctx->wqh, &wait);
		__set_current_state(TASK_RUNNING);
	}
317
	if (likely(res > 0)) {
318 319
		ctx->count += ucnt;
		if (waitqueue_active(&ctx->wqh))
320
			wake_up_locked_poll(&ctx->wqh, POLLIN);
321
	}
D
Davide Libenzi 已提交
322
	spin_unlock_irq(&ctx->wqh.lock);
323 324 325 326

	return res;
}

327
#ifdef CONFIG_PROC_FS
328
static void eventfd_show_fdinfo(struct seq_file *m, struct file *f)
329 330 331 332
{
	struct eventfd_ctx *ctx = f->private_data;

	spin_lock_irq(&ctx->wqh.lock);
333 334
	seq_printf(m, "eventfd-count: %16llx\n",
		   (unsigned long long)ctx->count);
335 336 337 338
	spin_unlock_irq(&ctx->wqh.lock);
}
#endif

339
static const struct file_operations eventfd_fops = {
340 341 342
#ifdef CONFIG_PROC_FS
	.show_fdinfo	= eventfd_show_fdinfo,
#endif
343 344 345 346
	.release	= eventfd_release,
	.poll		= eventfd_poll,
	.read		= eventfd_read,
	.write		= eventfd_write,
347
	.llseek		= noop_llseek,
348 349
};

350 351 352 353 354 355 356 357 358 359
/**
 * eventfd_fget - Acquire a reference of an eventfd file descriptor.
 * @fd: [in] Eventfd file descriptor.
 *
 * Returns a pointer to the eventfd file structure in case of success, or the
 * following error pointer:
 *
 * -EBADF    : Invalid @fd file descriptor.
 * -EINVAL   : The @fd file descriptor is not an eventfd file.
 */
360 361 362 363 364 365 366 367 368 369 370 371 372 373
struct file *eventfd_fget(int fd)
{
	struct file *file;

	file = fget(fd);
	if (!file)
		return ERR_PTR(-EBADF);
	if (file->f_op != &eventfd_fops) {
		fput(file);
		return ERR_PTR(-EINVAL);
	}

	return file;
}
374
EXPORT_SYMBOL_GPL(eventfd_fget);
375

376 377 378 379 380 381 382 383 384 385 386 387
/**
 * eventfd_ctx_fdget - Acquires a reference to the internal eventfd context.
 * @fd: [in] Eventfd file descriptor.
 *
 * Returns a pointer to the internal eventfd context, otherwise the error
 * pointers returned by the following functions:
 *
 * eventfd_fget
 */
struct eventfd_ctx *eventfd_ctx_fdget(int fd)
{
	struct eventfd_ctx *ctx;
388 389 390 391 392
	struct fd f = fdget(fd);
	if (!f.file)
		return ERR_PTR(-EBADF);
	ctx = eventfd_ctx_fileget(f.file);
	fdput(f);
393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414
	return ctx;
}
EXPORT_SYMBOL_GPL(eventfd_ctx_fdget);

/**
 * eventfd_ctx_fileget - Acquires a reference to the internal eventfd context.
 * @file: [in] Eventfd file pointer.
 *
 * Returns a pointer to the internal eventfd context, otherwise the error
 * pointer:
 *
 * -EINVAL   : The @fd file descriptor is not an eventfd file.
 */
struct eventfd_ctx *eventfd_ctx_fileget(struct file *file)
{
	if (file->f_op != &eventfd_fops)
		return ERR_PTR(-EINVAL);

	return eventfd_ctx_get(file->private_data);
}
EXPORT_SYMBOL_GPL(eventfd_ctx_fileget);

415 416 417 418 419 420 421 422 423 424 425 426 427 428 429
/**
 * eventfd_file_create - Creates an eventfd file pointer.
 * @count: Initial eventfd counter value.
 * @flags: Flags for the eventfd file.
 *
 * This function creates an eventfd file pointer, w/out installing it into
 * the fd table. This is useful when the eventfd file is used during the
 * initialization of data structures that require extra setup after the eventfd
 * creation. So the eventfd creation is split into the file pointer creation
 * phase, and the file descriptor installation phase.
 * In this way races with userspace closing the newly installed file descriptor
 * can be avoided.
 * Returns an eventfd file pointer, or a proper error pointer.
 */
struct file *eventfd_file_create(unsigned int count, int flags)
430
{
431
	struct file *file;
432 433
	struct eventfd_ctx *ctx;

434 435 436 437
	/* Check the EFD_* constants for consistency.  */
	BUILD_BUG_ON(EFD_CLOEXEC != O_CLOEXEC);
	BUILD_BUG_ON(EFD_NONBLOCK != O_NONBLOCK);

438
	if (flags & ~EFD_FLAGS_SET)
439
		return ERR_PTR(-EINVAL);
U
Ulrich Drepper 已提交
440

441 442
	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
	if (!ctx)
443
		return ERR_PTR(-ENOMEM);
444

445
	kref_init(&ctx->kref);
446 447
	init_waitqueue_head(&ctx->wqh);
	ctx->count = count;
448
	ctx->flags = flags;
449

450
	file = anon_inode_getfile("[eventfd]", &eventfd_fops, ctx,
451
				  O_RDWR | (flags & EFD_SHARED_FCNTL_FLAGS));
452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474
	if (IS_ERR(file))
		eventfd_free_ctx(ctx);

	return file;
}

SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
{
	int fd, error;
	struct file *file;

	error = get_unused_fd_flags(flags & EFD_SHARED_FCNTL_FLAGS);
	if (error < 0)
		return error;
	fd = error;

	file = eventfd_file_create(count, flags);
	if (IS_ERR(file)) {
		error = PTR_ERR(file);
		goto err_put_unused_fd;
	}
	fd_install(fd, file);

A
Al Viro 已提交
475
	return fd;
476 477 478 479 480

err_put_unused_fd:
	put_unused_fd(fd);

	return error;
481 482
}

483
SYSCALL_DEFINE1(eventfd, unsigned int, count)
U
Ulrich Drepper 已提交
484 485 486
{
	return sys_eventfd2(count, 0);
}
487