eventfd.c 10.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/*
 *  fs/eventfd.c
 *
 *  Copyright (C) 2007  Davide Libenzi <davidel@xmailserver.org>
 *
 */

#include <linux/file.h>
#include <linux/poll.h>
#include <linux/init.h>
#include <linux/fs.h>
12
#include <linux/sched/signal.h>
13
#include <linux/kernel.h>
14
#include <linux/slab.h>
15 16 17
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/anon_inodes.h>
18
#include <linux/syscalls.h>
19
#include <linux/export.h>
20 21
#include <linux/kref.h>
#include <linux/eventfd.h>
22 23
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
24 25

struct eventfd_ctx {
26
	struct kref kref;
27 28 29 30 31 32
	wait_queue_head_t wqh;
	/*
	 * Every time that a write(2) is performed on an eventfd, the
	 * value of the __u64 being written is added to "count" and a
	 * wakeup is performed on "wqh". A read(2) will return the "count"
	 * value to userspace, and will reset "count" to zero. The kernel
33
	 * side eventfd_signal() also, adds to the "count" counter and
34 35 36
	 * issue a wakeup.
	 */
	__u64 count;
37
	unsigned int flags;
38 39
};

40 41 42 43 44 45 46 47
/**
 * eventfd_signal - Adds @n to the eventfd counter.
 * @ctx: [in] Pointer to the eventfd context.
 * @n: [in] Value of the counter to be added to the eventfd internal counter.
 *          The value cannot be negative.
 *
 * This function is supposed to be called by the kernel in paths that do not
 * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
48
 * value, and we signal this as overflow condition by returning a EPOLLERR
49 50
 * to poll(2).
 *
51
 * Returns the amount by which the counter was incremented.  This will be less
52
 * than @n if the counter has overflowed.
53
 */
54
__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
55 56 57
{
	unsigned long flags;

58
	spin_lock_irqsave(&ctx->wqh.lock, flags);
59
	if (ULLONG_MAX - ctx->count < n)
60
		n = ULLONG_MAX - ctx->count;
61 62
	ctx->count += n;
	if (waitqueue_active(&ctx->wqh))
63
		wake_up_locked_poll(&ctx->wqh, EPOLLIN);
64
	spin_unlock_irqrestore(&ctx->wqh.lock, flags);
65 66 67

	return n;
}
68
EXPORT_SYMBOL_GPL(eventfd_signal);
69

70 71 72 73 74
static void eventfd_free_ctx(struct eventfd_ctx *ctx)
{
	kfree(ctx);
}

75 76 77 78
static void eventfd_free(struct kref *kref)
{
	struct eventfd_ctx *ctx = container_of(kref, struct eventfd_ctx, kref);

79
	eventfd_free_ctx(ctx);
80 81 82 83 84 85 86
}

/**
 * eventfd_ctx_put - Releases a reference to the internal eventfd context.
 * @ctx: [in] Pointer to eventfd context.
 *
 * The eventfd context reference must have been previously acquired either
87
 * with eventfd_ctx_fdget() or eventfd_ctx_fileget().
88 89 90 91 92 93 94
 */
void eventfd_ctx_put(struct eventfd_ctx *ctx)
{
	kref_put(&ctx->kref, eventfd_free);
}
EXPORT_SYMBOL_GPL(eventfd_ctx_put);

95 96
static int eventfd_release(struct inode *inode, struct file *file)
{
97 98
	struct eventfd_ctx *ctx = file->private_data;

99
	wake_up_poll(&ctx->wqh, EPOLLHUP);
100
	eventfd_ctx_put(ctx);
101 102 103
	return 0;
}

104
static __poll_t eventfd_poll(struct file *file, poll_table *wait)
105 106
{
	struct eventfd_ctx *ctx = file->private_data;
107
	__poll_t events = 0;
108
	u64 count;
109

110 111
	poll_wait(file, &ctx->wqh, wait);

112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
	/*
	 * All writes to ctx->count occur within ctx->wqh.lock.  This read
	 * can be done outside ctx->wqh.lock because we know that poll_wait
	 * takes that lock (through add_wait_queue) if our caller will sleep.
	 *
	 * The read _can_ therefore seep into add_wait_queue's critical
	 * section, but cannot move above it!  add_wait_queue's spin_lock acts
	 * as an acquire barrier and ensures that the read be ordered properly
	 * against the writes.  The following CAN happen and is safe:
	 *
	 *     poll                               write
	 *     -----------------                  ------------
	 *     lock ctx->wqh.lock (in poll_wait)
	 *     count = ctx->count
	 *     __add_wait_queue
	 *     unlock ctx->wqh.lock
	 *                                        lock ctx->qwh.lock
	 *                                        ctx->count += n
	 *                                        if (waitqueue_active)
	 *                                          wake_up_locked_poll
	 *                                        unlock ctx->qwh.lock
	 *     eventfd_poll returns 0
	 *
	 * but the following, which would miss a wakeup, cannot happen:
	 *
	 *     poll                               write
	 *     -----------------                  ------------
	 *     count = ctx->count (INVALID!)
	 *                                        lock ctx->qwh.lock
	 *                                        ctx->count += n
	 *                                        **waitqueue_active is false**
	 *                                        **no wake_up_locked_poll!**
	 *                                        unlock ctx->qwh.lock
	 *     lock ctx->wqh.lock (in poll_wait)
	 *     __add_wait_queue
	 *     unlock ctx->wqh.lock
	 *     eventfd_poll returns 0
	 */
	count = READ_ONCE(ctx->count);
151

152
	if (count > 0)
153
		events |= EPOLLIN;
154
	if (count == ULLONG_MAX)
155
		events |= EPOLLERR;
156
	if (ULLONG_MAX - 1 > count)
157
		events |= EPOLLOUT;
158 159 160 161

	return events;
}

162 163 164 165 166 167 168 169 170 171
static void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
{
	*cnt = (ctx->flags & EFD_SEMAPHORE) ? 1 : ctx->count;
	ctx->count -= *cnt;
}

/**
 * eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue.
 * @ctx: [in] Pointer to eventfd context.
 * @wait: [in] Wait queue to be removed.
172
 * @cnt: [out] Pointer to the 64-bit counter value.
173
 *
174
 * Returns %0 if successful, or the following error codes:
175 176 177 178 179 180
 *
 * -EAGAIN      : The operation would have blocked.
 *
 * This is used to atomically remove a wait queue entry from the eventfd wait
 * queue head, and read/reset the counter value.
 */
181
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
182 183 184 185 186 187 188 189
				  __u64 *cnt)
{
	unsigned long flags;

	spin_lock_irqsave(&ctx->wqh.lock, flags);
	eventfd_ctx_do_read(ctx, cnt);
	__remove_wait_queue(&ctx->wqh, wait);
	if (*cnt != 0 && waitqueue_active(&ctx->wqh))
190
		wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
191 192 193 194 195 196
	spin_unlock_irqrestore(&ctx->wqh.lock, flags);

	return *cnt != 0 ? 0 : -EAGAIN;
}
EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue);

197 198
static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count,
			    loff_t *ppos)
199
{
200
	struct eventfd_ctx *ctx = file->private_data;
201
	ssize_t res;
202
	__u64 ucnt = 0;
203 204
	DECLARE_WAITQUEUE(wait, current);

205 206 207
	if (count < sizeof(ucnt))
		return -EINVAL;

208
	spin_lock_irq(&ctx->wqh.lock);
209
	res = -EAGAIN;
210
	if (ctx->count > 0)
211 212
		res = sizeof(ucnt);
	else if (!(file->f_flags & O_NONBLOCK)) {
213
		__add_wait_queue(&ctx->wqh, &wait);
214
		for (;;) {
215 216
			set_current_state(TASK_INTERRUPTIBLE);
			if (ctx->count > 0) {
217
				res = sizeof(ucnt);
218 219 220 221 222 223
				break;
			}
			if (signal_pending(current)) {
				res = -ERESTARTSYS;
				break;
			}
224
			spin_unlock_irq(&ctx->wqh.lock);
225
			schedule();
226
			spin_lock_irq(&ctx->wqh.lock);
227 228 229 230
		}
		__remove_wait_queue(&ctx->wqh, &wait);
		__set_current_state(TASK_RUNNING);
	}
231 232
	if (likely(res > 0)) {
		eventfd_ctx_do_read(ctx, &ucnt);
233
		if (waitqueue_active(&ctx->wqh))
234
			wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
235
	}
236
	spin_unlock_irq(&ctx->wqh.lock);
237

238 239
	if (res > 0 && put_user(ucnt, (__u64 __user *)buf))
		return -EFAULT;
240

241
	return res;
242
}
243 244 245 246 247 248 249 250 251 252 253 254 255 256 257

static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count,
			     loff_t *ppos)
{
	struct eventfd_ctx *ctx = file->private_data;
	ssize_t res;
	__u64 ucnt;
	DECLARE_WAITQUEUE(wait, current);

	if (count < sizeof(ucnt))
		return -EINVAL;
	if (copy_from_user(&ucnt, buf, sizeof(ucnt)))
		return -EFAULT;
	if (ucnt == ULLONG_MAX)
		return -EINVAL;
258
	spin_lock_irq(&ctx->wqh.lock);
259 260 261 262 263 264 265 266 267 268 269 270 271 272 273
	res = -EAGAIN;
	if (ULLONG_MAX - ctx->count > ucnt)
		res = sizeof(ucnt);
	else if (!(file->f_flags & O_NONBLOCK)) {
		__add_wait_queue(&ctx->wqh, &wait);
		for (res = 0;;) {
			set_current_state(TASK_INTERRUPTIBLE);
			if (ULLONG_MAX - ctx->count > ucnt) {
				res = sizeof(ucnt);
				break;
			}
			if (signal_pending(current)) {
				res = -ERESTARTSYS;
				break;
			}
274
			spin_unlock_irq(&ctx->wqh.lock);
275
			schedule();
276
			spin_lock_irq(&ctx->wqh.lock);
277 278 279 280
		}
		__remove_wait_queue(&ctx->wqh, &wait);
		__set_current_state(TASK_RUNNING);
	}
281
	if (likely(res > 0)) {
282 283
		ctx->count += ucnt;
		if (waitqueue_active(&ctx->wqh))
284
			wake_up_locked_poll(&ctx->wqh, EPOLLIN);
285
	}
286
	spin_unlock_irq(&ctx->wqh.lock);
287 288 289 290

	return res;
}

291
#ifdef CONFIG_PROC_FS
292
static void eventfd_show_fdinfo(struct seq_file *m, struct file *f)
293 294 295 296
{
	struct eventfd_ctx *ctx = f->private_data;

	spin_lock_irq(&ctx->wqh.lock);
297 298
	seq_printf(m, "eventfd-count: %16llx\n",
		   (unsigned long long)ctx->count);
299 300 301 302
	spin_unlock_irq(&ctx->wqh.lock);
}
#endif

303
static const struct file_operations eventfd_fops = {
304 305 306
#ifdef CONFIG_PROC_FS
	.show_fdinfo	= eventfd_show_fdinfo,
#endif
307
	.release	= eventfd_release,
308
	.poll		= eventfd_poll,
309 310
	.read		= eventfd_read,
	.write		= eventfd_write,
311
	.llseek		= noop_llseek,
312 313
};

314 315 316 317 318 319 320 321 322 323
/**
 * eventfd_fget - Acquire a reference of an eventfd file descriptor.
 * @fd: [in] Eventfd file descriptor.
 *
 * Returns a pointer to the eventfd file structure in case of success, or the
 * following error pointer:
 *
 * -EBADF    : Invalid @fd file descriptor.
 * -EINVAL   : The @fd file descriptor is not an eventfd file.
 */
324 325 326 327 328 329 330 331 332 333 334 335 336 337
struct file *eventfd_fget(int fd)
{
	struct file *file;

	file = fget(fd);
	if (!file)
		return ERR_PTR(-EBADF);
	if (file->f_op != &eventfd_fops) {
		fput(file);
		return ERR_PTR(-EINVAL);
	}

	return file;
}
338
EXPORT_SYMBOL_GPL(eventfd_fget);
339

340 341 342 343 344 345 346 347 348 349 350 351
/**
 * eventfd_ctx_fdget - Acquires a reference to the internal eventfd context.
 * @fd: [in] Eventfd file descriptor.
 *
 * Returns a pointer to the internal eventfd context, otherwise the error
 * pointers returned by the following functions:
 *
 * eventfd_fget
 */
struct eventfd_ctx *eventfd_ctx_fdget(int fd)
{
	struct eventfd_ctx *ctx;
352 353 354 355 356
	struct fd f = fdget(fd);
	if (!f.file)
		return ERR_PTR(-EBADF);
	ctx = eventfd_ctx_fileget(f.file);
	fdput(f);
357 358 359 360 361 362 363 364 365 366 367 368 369 370 371
	return ctx;
}
EXPORT_SYMBOL_GPL(eventfd_ctx_fdget);

/**
 * eventfd_ctx_fileget - Acquires a reference to the internal eventfd context.
 * @file: [in] Eventfd file pointer.
 *
 * Returns a pointer to the internal eventfd context, otherwise the error
 * pointer:
 *
 * -EINVAL   : The @fd file descriptor is not an eventfd file.
 */
struct eventfd_ctx *eventfd_ctx_fileget(struct file *file)
{
372 373
	struct eventfd_ctx *ctx;

374 375 376
	if (file->f_op != &eventfd_fops)
		return ERR_PTR(-EINVAL);

377 378 379
	ctx = file->private_data;
	kref_get(&ctx->kref);
	return ctx;
380 381 382
}
EXPORT_SYMBOL_GPL(eventfd_ctx_fileget);

383
static int do_eventfd(unsigned int count, int flags)
384 385
{
	struct eventfd_ctx *ctx;
386
	int fd;
387

388 389 390 391
	/* Check the EFD_* constants for consistency.  */
	BUILD_BUG_ON(EFD_CLOEXEC != O_CLOEXEC);
	BUILD_BUG_ON(EFD_NONBLOCK != O_NONBLOCK);

392
	if (flags & ~EFD_FLAGS_SET)
393
		return -EINVAL;
394

395 396
	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
	if (!ctx)
397
		return -ENOMEM;
398

399
	kref_init(&ctx->kref);
400 401
	init_waitqueue_head(&ctx->wqh);
	ctx->count = count;
402
	ctx->flags = flags;
403

404 405 406
	fd = anon_inode_getfd("[eventfd]", &eventfd_fops, ctx,
			      O_RDWR | (flags & EFD_SHARED_FCNTL_FLAGS));
	if (fd < 0)
407 408
		eventfd_free_ctx(ctx);

409
	return fd;
410 411
}

412 413 414 415 416
SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
{
	return do_eventfd(count, flags);
}

417
SYSCALL_DEFINE1(eventfd, unsigned int, count)
418
{
419
	return do_eventfd(count, 0);
420
}
421