eventfd.c 11.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/*
 *  fs/eventfd.c
 *
 *  Copyright (C) 2007  Davide Libenzi <davidel@xmailserver.org>
 *
 */

#include <linux/file.h>
#include <linux/poll.h>
#include <linux/init.h>
#include <linux/fs.h>
12
#include <linux/sched/signal.h>
13
#include <linux/kernel.h>
14
#include <linux/slab.h>
15 16 17
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/anon_inodes.h>
18
#include <linux/syscalls.h>
19
#include <linux/export.h>
20 21
#include <linux/kref.h>
#include <linux/eventfd.h>
22 23
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
24 25

struct eventfd_ctx {
26
	struct kref kref;
27 28 29 30 31 32
	wait_queue_head_t wqh;
	/*
	 * Every time that a write(2) is performed on an eventfd, the
	 * value of the __u64 being written is added to "count" and a
	 * wakeup is performed on "wqh". A read(2) will return the "count"
	 * value to userspace, and will reset "count" to zero. The kernel
33
	 * side eventfd_signal() also, adds to the "count" counter and
34 35 36
	 * issue a wakeup.
	 */
	__u64 count;
37
	unsigned int flags;
38 39
};

40 41 42 43 44 45 46 47
/**
 * eventfd_signal - Adds @n to the eventfd counter.
 * @ctx: [in] Pointer to the eventfd context.
 * @n: [in] Value of the counter to be added to the eventfd internal counter.
 *          The value cannot be negative.
 *
 * This function is supposed to be called by the kernel in paths that do not
 * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
48
 * value, and we signal this as overflow condition by returning a EPOLLERR
49 50
 * to poll(2).
 *
51
 * Returns the amount by which the counter was incremented.  This will be less
52
 * than @n if the counter has overflowed.
53
 */
54
__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
55 56 57
{
	unsigned long flags;

D
Davide Libenzi 已提交
58
	spin_lock_irqsave(&ctx->wqh.lock, flags);
59
	if (ULLONG_MAX - ctx->count < n)
60
		n = ULLONG_MAX - ctx->count;
61 62
	ctx->count += n;
	if (waitqueue_active(&ctx->wqh))
63
		wake_up_locked_poll(&ctx->wqh, EPOLLIN);
D
Davide Libenzi 已提交
64
	spin_unlock_irqrestore(&ctx->wqh.lock, flags);
65 66 67

	return n;
}
68
EXPORT_SYMBOL_GPL(eventfd_signal);
69

70 71 72 73 74
static void eventfd_free_ctx(struct eventfd_ctx *ctx)
{
	kfree(ctx);
}

75 76 77 78
static void eventfd_free(struct kref *kref)
{
	struct eventfd_ctx *ctx = container_of(kref, struct eventfd_ctx, kref);

79
	eventfd_free_ctx(ctx);
80 81 82 83 84 85 86
}

/**
 * eventfd_ctx_put - Releases a reference to the internal eventfd context.
 * @ctx: [in] Pointer to eventfd context.
 *
 * The eventfd context reference must have been previously acquired either
87
 * with eventfd_ctx_fdget() or eventfd_ctx_fileget().
88 89 90 91 92 93 94
 */
void eventfd_ctx_put(struct eventfd_ctx *ctx)
{
	kref_put(&ctx->kref, eventfd_free);
}
EXPORT_SYMBOL_GPL(eventfd_ctx_put);

95 96
static int eventfd_release(struct inode *inode, struct file *file)
{
97 98
	struct eventfd_ctx *ctx = file->private_data;

99
	wake_up_poll(&ctx->wqh, EPOLLHUP);
100
	eventfd_ctx_put(ctx);
101 102 103
	return 0;
}

104 105 106 107 108 109 110 111 112
static struct wait_queue_head *
eventfd_get_poll_head(struct file *file, __poll_t events)
{
	struct eventfd_ctx *ctx = file->private_data;

	return &ctx->wqh;
}

static __poll_t eventfd_poll_mask(struct file *file, __poll_t eventmask)
113 114
{
	struct eventfd_ctx *ctx = file->private_data;
A
Al Viro 已提交
115
	__poll_t events = 0;
116
	u64 count;
117

118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156
	/*
	 * All writes to ctx->count occur within ctx->wqh.lock.  This read
	 * can be done outside ctx->wqh.lock because we know that poll_wait
	 * takes that lock (through add_wait_queue) if our caller will sleep.
	 *
	 * The read _can_ therefore seep into add_wait_queue's critical
	 * section, but cannot move above it!  add_wait_queue's spin_lock acts
	 * as an acquire barrier and ensures that the read be ordered properly
	 * against the writes.  The following CAN happen and is safe:
	 *
	 *     poll                               write
	 *     -----------------                  ------------
	 *     lock ctx->wqh.lock (in poll_wait)
	 *     count = ctx->count
	 *     __add_wait_queue
	 *     unlock ctx->wqh.lock
	 *                                        lock ctx->qwh.lock
	 *                                        ctx->count += n
	 *                                        if (waitqueue_active)
	 *                                          wake_up_locked_poll
	 *                                        unlock ctx->qwh.lock
	 *     eventfd_poll returns 0
	 *
	 * but the following, which would miss a wakeup, cannot happen:
	 *
	 *     poll                               write
	 *     -----------------                  ------------
	 *     count = ctx->count (INVALID!)
	 *                                        lock ctx->qwh.lock
	 *                                        ctx->count += n
	 *                                        **waitqueue_active is false**
	 *                                        **no wake_up_locked_poll!**
	 *                                        unlock ctx->qwh.lock
	 *     lock ctx->wqh.lock (in poll_wait)
	 *     __add_wait_queue
	 *     unlock ctx->wqh.lock
	 *     eventfd_poll returns 0
	 */
	count = READ_ONCE(ctx->count);
157

158
	if (count > 0)
159
		events |= (EPOLLIN & eventmask);
160
	if (count == ULLONG_MAX)
161
		events |= EPOLLERR;
162
	if (ULLONG_MAX - 1 > count)
163
		events |= (EPOLLOUT & eventmask);
164 165 166 167

	return events;
}

168 169 170 171 172 173 174 175 176 177
static void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
{
	*cnt = (ctx->flags & EFD_SEMAPHORE) ? 1 : ctx->count;
	ctx->count -= *cnt;
}

/**
 * eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue.
 * @ctx: [in] Pointer to eventfd context.
 * @wait: [in] Wait queue to be removed.
178
 * @cnt: [out] Pointer to the 64-bit counter value.
179
 *
180
 * Returns %0 if successful, or the following error codes:
181 182 183 184 185 186
 *
 * -EAGAIN      : The operation would have blocked.
 *
 * This is used to atomically remove a wait queue entry from the eventfd wait
 * queue head, and read/reset the counter value.
 */
187
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
188 189 190 191 192 193 194 195
				  __u64 *cnt)
{
	unsigned long flags;

	spin_lock_irqsave(&ctx->wqh.lock, flags);
	eventfd_ctx_do_read(ctx, cnt);
	__remove_wait_queue(&ctx->wqh, wait);
	if (*cnt != 0 && waitqueue_active(&ctx->wqh))
196
		wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
197 198 199 200 201 202
	spin_unlock_irqrestore(&ctx->wqh.lock, flags);

	return *cnt != 0 ? 0 : -EAGAIN;
}
EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue);

203 204
static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count,
			    loff_t *ppos)
205
{
206
	struct eventfd_ctx *ctx = file->private_data;
207
	ssize_t res;
208
	__u64 ucnt = 0;
209 210
	DECLARE_WAITQUEUE(wait, current);

211 212 213
	if (count < sizeof(ucnt))
		return -EINVAL;

D
Davide Libenzi 已提交
214
	spin_lock_irq(&ctx->wqh.lock);
215
	res = -EAGAIN;
216
	if (ctx->count > 0)
217 218
		res = sizeof(ucnt);
	else if (!(file->f_flags & O_NONBLOCK)) {
219
		__add_wait_queue(&ctx->wqh, &wait);
220
		for (;;) {
221 222
			set_current_state(TASK_INTERRUPTIBLE);
			if (ctx->count > 0) {
223
				res = sizeof(ucnt);
224 225 226 227 228 229
				break;
			}
			if (signal_pending(current)) {
				res = -ERESTARTSYS;
				break;
			}
D
Davide Libenzi 已提交
230
			spin_unlock_irq(&ctx->wqh.lock);
231
			schedule();
D
Davide Libenzi 已提交
232
			spin_lock_irq(&ctx->wqh.lock);
233 234 235 236
		}
		__remove_wait_queue(&ctx->wqh, &wait);
		__set_current_state(TASK_RUNNING);
	}
237 238
	if (likely(res > 0)) {
		eventfd_ctx_do_read(ctx, &ucnt);
239
		if (waitqueue_active(&ctx->wqh))
240
			wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
241
	}
D
Davide Libenzi 已提交
242
	spin_unlock_irq(&ctx->wqh.lock);
243

244 245
	if (res > 0 && put_user(ucnt, (__u64 __user *)buf))
		return -EFAULT;
246

247
	return res;
248
}
249 250 251 252 253 254 255 256 257 258 259 260 261 262 263

static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count,
			     loff_t *ppos)
{
	struct eventfd_ctx *ctx = file->private_data;
	ssize_t res;
	__u64 ucnt;
	DECLARE_WAITQUEUE(wait, current);

	if (count < sizeof(ucnt))
		return -EINVAL;
	if (copy_from_user(&ucnt, buf, sizeof(ucnt)))
		return -EFAULT;
	if (ucnt == ULLONG_MAX)
		return -EINVAL;
D
Davide Libenzi 已提交
264
	spin_lock_irq(&ctx->wqh.lock);
265 266 267 268 269 270 271 272 273 274 275 276 277 278 279
	res = -EAGAIN;
	if (ULLONG_MAX - ctx->count > ucnt)
		res = sizeof(ucnt);
	else if (!(file->f_flags & O_NONBLOCK)) {
		__add_wait_queue(&ctx->wqh, &wait);
		for (res = 0;;) {
			set_current_state(TASK_INTERRUPTIBLE);
			if (ULLONG_MAX - ctx->count > ucnt) {
				res = sizeof(ucnt);
				break;
			}
			if (signal_pending(current)) {
				res = -ERESTARTSYS;
				break;
			}
D
Davide Libenzi 已提交
280
			spin_unlock_irq(&ctx->wqh.lock);
281
			schedule();
D
Davide Libenzi 已提交
282
			spin_lock_irq(&ctx->wqh.lock);
283 284 285 286
		}
		__remove_wait_queue(&ctx->wqh, &wait);
		__set_current_state(TASK_RUNNING);
	}
287
	if (likely(res > 0)) {
288 289
		ctx->count += ucnt;
		if (waitqueue_active(&ctx->wqh))
290
			wake_up_locked_poll(&ctx->wqh, EPOLLIN);
291
	}
D
Davide Libenzi 已提交
292
	spin_unlock_irq(&ctx->wqh.lock);
293 294 295 296

	return res;
}

297
#ifdef CONFIG_PROC_FS
298
static void eventfd_show_fdinfo(struct seq_file *m, struct file *f)
299 300 301 302
{
	struct eventfd_ctx *ctx = f->private_data;

	spin_lock_irq(&ctx->wqh.lock);
303 304
	seq_printf(m, "eventfd-count: %16llx\n",
		   (unsigned long long)ctx->count);
305 306 307 308
	spin_unlock_irq(&ctx->wqh.lock);
}
#endif

309
static const struct file_operations eventfd_fops = {
310 311 312
#ifdef CONFIG_PROC_FS
	.show_fdinfo	= eventfd_show_fdinfo,
#endif
313
	.release	= eventfd_release,
314 315
	.get_poll_head	= eventfd_get_poll_head,
	.poll_mask	= eventfd_poll_mask,
316 317
	.read		= eventfd_read,
	.write		= eventfd_write,
318
	.llseek		= noop_llseek,
319 320
};

321 322 323 324 325 326 327 328 329 330
/**
 * eventfd_fget - Acquire a reference of an eventfd file descriptor.
 * @fd: [in] Eventfd file descriptor.
 *
 * Returns a pointer to the eventfd file structure in case of success, or the
 * following error pointer:
 *
 * -EBADF    : Invalid @fd file descriptor.
 * -EINVAL   : The @fd file descriptor is not an eventfd file.
 */
331 332 333 334 335 336 337 338 339 340 341 342 343 344
struct file *eventfd_fget(int fd)
{
	struct file *file;

	file = fget(fd);
	if (!file)
		return ERR_PTR(-EBADF);
	if (file->f_op != &eventfd_fops) {
		fput(file);
		return ERR_PTR(-EINVAL);
	}

	return file;
}
345
EXPORT_SYMBOL_GPL(eventfd_fget);
346

347 348 349 350 351 352 353 354 355 356 357 358
/**
 * eventfd_ctx_fdget - Acquires a reference to the internal eventfd context.
 * @fd: [in] Eventfd file descriptor.
 *
 * Returns a pointer to the internal eventfd context, otherwise the error
 * pointers returned by the following functions:
 *
 * eventfd_fget
 */
struct eventfd_ctx *eventfd_ctx_fdget(int fd)
{
	struct eventfd_ctx *ctx;
359 360 361 362 363
	struct fd f = fdget(fd);
	if (!f.file)
		return ERR_PTR(-EBADF);
	ctx = eventfd_ctx_fileget(f.file);
	fdput(f);
364 365 366 367 368 369 370 371 372 373 374 375 376 377 378
	return ctx;
}
EXPORT_SYMBOL_GPL(eventfd_ctx_fdget);

/**
 * eventfd_ctx_fileget - Acquires a reference to the internal eventfd context.
 * @file: [in] Eventfd file pointer.
 *
 * Returns a pointer to the internal eventfd context, otherwise the error
 * pointer:
 *
 * -EINVAL   : The @fd file descriptor is not an eventfd file.
 */
struct eventfd_ctx *eventfd_ctx_fileget(struct file *file)
{
379 380
	struct eventfd_ctx *ctx;

381 382 383
	if (file->f_op != &eventfd_fops)
		return ERR_PTR(-EINVAL);

384 385 386
	ctx = file->private_data;
	kref_get(&ctx->kref);
	return ctx;
387 388 389
}
EXPORT_SYMBOL_GPL(eventfd_ctx_fileget);

390
static int do_eventfd(unsigned int count, int flags)
391 392
{
	struct eventfd_ctx *ctx;
393
	int fd;
394

395 396 397 398
	/* Check the EFD_* constants for consistency.  */
	BUILD_BUG_ON(EFD_CLOEXEC != O_CLOEXEC);
	BUILD_BUG_ON(EFD_NONBLOCK != O_NONBLOCK);

399
	if (flags & ~EFD_FLAGS_SET)
400
		return -EINVAL;
U
Ulrich Drepper 已提交
401

402 403
	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
	if (!ctx)
404
		return -ENOMEM;
405

406
	kref_init(&ctx->kref);
407 408
	init_waitqueue_head(&ctx->wqh);
	ctx->count = count;
409
	ctx->flags = flags;
410

411 412 413
	fd = anon_inode_getfd("[eventfd]", &eventfd_fops, ctx,
			      O_RDWR | (flags & EFD_SHARED_FCNTL_FLAGS));
	if (fd < 0)
414 415
		eventfd_free_ctx(ctx);

A
Al Viro 已提交
416
	return fd;
417 418
}

419 420 421 422 423
SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
{
	return do_eventfd(count, flags);
}

424
SYSCALL_DEFINE1(eventfd, unsigned int, count)
U
Ulrich Drepper 已提交
425
{
426
	return do_eventfd(count, 0);
U
Ulrich Drepper 已提交
427
}
428