Root/fs/eventfd.c

1/*
2 * fs/eventfd.c
3 *
4 * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org>
5 *
6 */
7
8#include <linux/file.h>
9#include <linux/poll.h>
10#include <linux/init.h>
11#include <linux/fs.h>
12#include <linux/sched.h>
13#include <linux/kernel.h>
14#include <linux/list.h>
15#include <linux/spinlock.h>
16#include <linux/anon_inodes.h>
17#include <linux/syscalls.h>
18#include <linux/module.h>
19#include <linux/kref.h>
20#include <linux/eventfd.h>
21
22struct eventfd_ctx {
23    struct kref kref;
24    wait_queue_head_t wqh;
25    /*
26     * Every time that a write(2) is performed on an eventfd, the
27     * value of the __u64 being written is added to "count" and a
28     * wakeup is performed on "wqh". A read(2) will return the "count"
29     * value to userspace, and will reset "count" to zero. The kernel
30     * side eventfd_signal() also, adds to the "count" counter and
31     * issue a wakeup.
32     */
33    __u64 count;
34    unsigned int flags;
35};
36
37/**
38 * eventfd_signal - Adds @n to the eventfd counter.
39 * @ctx: [in] Pointer to the eventfd context.
40 * @n: [in] Value of the counter to be added to the eventfd internal counter.
41 * The value cannot be negative.
42 *
43 * This function is supposed to be called by the kernel in paths that do not
44 * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
45 * value, and we signal this as overflow condition by returining a POLLERR
46 * to poll(2).
47 *
48 * Returns @n in case of success, a non-negative number lower than @n in case
49 * of overflow, or the following error codes:
50 *
51 * -EINVAL : The value of @n is negative.
52 */
53int eventfd_signal(struct eventfd_ctx *ctx, int n)
54{
55    unsigned long flags;
56
57    if (n < 0)
58        return -EINVAL;
59    spin_lock_irqsave(&ctx->wqh.lock, flags);
60    if (ULLONG_MAX - ctx->count < n)
61        n = (int) (ULLONG_MAX - ctx->count);
62    ctx->count += n;
63    if (waitqueue_active(&ctx->wqh))
64        wake_up_locked_poll(&ctx->wqh, POLLIN);
65    spin_unlock_irqrestore(&ctx->wqh.lock, flags);
66
67    return n;
68}
69EXPORT_SYMBOL_GPL(eventfd_signal);
70
71static void eventfd_free(struct kref *kref)
72{
73    struct eventfd_ctx *ctx = container_of(kref, struct eventfd_ctx, kref);
74
75    kfree(ctx);
76}
77
78/**
79 * eventfd_ctx_get - Acquires a reference to the internal eventfd context.
80 * @ctx: [in] Pointer to the eventfd context.
81 *
82 * Returns: In case of success, returns a pointer to the eventfd context.
83 */
84struct eventfd_ctx *eventfd_ctx_get(struct eventfd_ctx *ctx)
85{
86    kref_get(&ctx->kref);
87    return ctx;
88}
89EXPORT_SYMBOL_GPL(eventfd_ctx_get);
90
91/**
92 * eventfd_ctx_put - Releases a reference to the internal eventfd context.
93 * @ctx: [in] Pointer to eventfd context.
94 *
95 * The eventfd context reference must have been previously acquired either
96 * with eventfd_ctx_get() or eventfd_ctx_fdget()).
97 */
98void eventfd_ctx_put(struct eventfd_ctx *ctx)
99{
100    kref_put(&ctx->kref, eventfd_free);
101}
102EXPORT_SYMBOL_GPL(eventfd_ctx_put);
103
104static int eventfd_release(struct inode *inode, struct file *file)
105{
106    struct eventfd_ctx *ctx = file->private_data;
107
108    wake_up_poll(&ctx->wqh, POLLHUP);
109    eventfd_ctx_put(ctx);
110    return 0;
111}
112
113static unsigned int eventfd_poll(struct file *file, poll_table *wait)
114{
115    struct eventfd_ctx *ctx = file->private_data;
116    unsigned int events = 0;
117    unsigned long flags;
118
119    poll_wait(file, &ctx->wqh, wait);
120
121    spin_lock_irqsave(&ctx->wqh.lock, flags);
122    if (ctx->count > 0)
123        events |= POLLIN;
124    if (ctx->count == ULLONG_MAX)
125        events |= POLLERR;
126    if (ULLONG_MAX - 1 > ctx->count)
127        events |= POLLOUT;
128    spin_unlock_irqrestore(&ctx->wqh.lock, flags);
129
130    return events;
131}
132
133static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count,
134                loff_t *ppos)
135{
136    struct eventfd_ctx *ctx = file->private_data;
137    ssize_t res;
138    __u64 ucnt = 0;
139    DECLARE_WAITQUEUE(wait, current);
140
141    if (count < sizeof(ucnt))
142        return -EINVAL;
143    spin_lock_irq(&ctx->wqh.lock);
144    res = -EAGAIN;
145    if (ctx->count > 0)
146        res = sizeof(ucnt);
147    else if (!(file->f_flags & O_NONBLOCK)) {
148        __add_wait_queue(&ctx->wqh, &wait);
149        for (res = 0;;) {
150            set_current_state(TASK_INTERRUPTIBLE);
151            if (ctx->count > 0) {
152                res = sizeof(ucnt);
153                break;
154            }
155            if (signal_pending(current)) {
156                res = -ERESTARTSYS;
157                break;
158            }
159            spin_unlock_irq(&ctx->wqh.lock);
160            schedule();
161            spin_lock_irq(&ctx->wqh.lock);
162        }
163        __remove_wait_queue(&ctx->wqh, &wait);
164        __set_current_state(TASK_RUNNING);
165    }
166    if (likely(res > 0)) {
167        ucnt = (ctx->flags & EFD_SEMAPHORE) ? 1 : ctx->count;
168        ctx->count -= ucnt;
169        if (waitqueue_active(&ctx->wqh))
170            wake_up_locked_poll(&ctx->wqh, POLLOUT);
171    }
172    spin_unlock_irq(&ctx->wqh.lock);
173    if (res > 0 && put_user(ucnt, (__u64 __user *) buf))
174        return -EFAULT;
175
176    return res;
177}
178
179static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count,
180                 loff_t *ppos)
181{
182    struct eventfd_ctx *ctx = file->private_data;
183    ssize_t res;
184    __u64 ucnt;
185    DECLARE_WAITQUEUE(wait, current);
186
187    if (count < sizeof(ucnt))
188        return -EINVAL;
189    if (copy_from_user(&ucnt, buf, sizeof(ucnt)))
190        return -EFAULT;
191    if (ucnt == ULLONG_MAX)
192        return -EINVAL;
193    spin_lock_irq(&ctx->wqh.lock);
194    res = -EAGAIN;
195    if (ULLONG_MAX - ctx->count > ucnt)
196        res = sizeof(ucnt);
197    else if (!(file->f_flags & O_NONBLOCK)) {
198        __add_wait_queue(&ctx->wqh, &wait);
199        for (res = 0;;) {
200            set_current_state(TASK_INTERRUPTIBLE);
201            if (ULLONG_MAX - ctx->count > ucnt) {
202                res = sizeof(ucnt);
203                break;
204            }
205            if (signal_pending(current)) {
206                res = -ERESTARTSYS;
207                break;
208            }
209            spin_unlock_irq(&ctx->wqh.lock);
210            schedule();
211            spin_lock_irq(&ctx->wqh.lock);
212        }
213        __remove_wait_queue(&ctx->wqh, &wait);
214        __set_current_state(TASK_RUNNING);
215    }
216    if (likely(res > 0)) {
217        ctx->count += ucnt;
218        if (waitqueue_active(&ctx->wqh))
219            wake_up_locked_poll(&ctx->wqh, POLLIN);
220    }
221    spin_unlock_irq(&ctx->wqh.lock);
222
223    return res;
224}
225
226static const struct file_operations eventfd_fops = {
227    .release = eventfd_release,
228    .poll = eventfd_poll,
229    .read = eventfd_read,
230    .write = eventfd_write,
231};
232
233/**
234 * eventfd_fget - Acquire a reference of an eventfd file descriptor.
235 * @fd: [in] Eventfd file descriptor.
236 *
237 * Returns a pointer to the eventfd file structure in case of success, or the
238 * following error pointer:
239 *
240 * -EBADF : Invalid @fd file descriptor.
241 * -EINVAL : The @fd file descriptor is not an eventfd file.
242 */
243struct file *eventfd_fget(int fd)
244{
245    struct file *file;
246
247    file = fget(fd);
248    if (!file)
249        return ERR_PTR(-EBADF);
250    if (file->f_op != &eventfd_fops) {
251        fput(file);
252        return ERR_PTR(-EINVAL);
253    }
254
255    return file;
256}
257EXPORT_SYMBOL_GPL(eventfd_fget);
258
259/**
260 * eventfd_ctx_fdget - Acquires a reference to the internal eventfd context.
261 * @fd: [in] Eventfd file descriptor.
262 *
263 * Returns a pointer to the internal eventfd context, otherwise the error
264 * pointers returned by the following functions:
265 *
266 * eventfd_fget
267 */
268struct eventfd_ctx *eventfd_ctx_fdget(int fd)
269{
270    struct file *file;
271    struct eventfd_ctx *ctx;
272
273    file = eventfd_fget(fd);
274    if (IS_ERR(file))
275        return (struct eventfd_ctx *) file;
276    ctx = eventfd_ctx_get(file->private_data);
277    fput(file);
278
279    return ctx;
280}
281EXPORT_SYMBOL_GPL(eventfd_ctx_fdget);
282
283/**
284 * eventfd_ctx_fileget - Acquires a reference to the internal eventfd context.
285 * @file: [in] Eventfd file pointer.
286 *
287 * Returns a pointer to the internal eventfd context, otherwise the error
288 * pointer:
289 *
290 * -EINVAL : The @fd file descriptor is not an eventfd file.
291 */
292struct eventfd_ctx *eventfd_ctx_fileget(struct file *file)
293{
294    if (file->f_op != &eventfd_fops)
295        return ERR_PTR(-EINVAL);
296
297    return eventfd_ctx_get(file->private_data);
298}
299EXPORT_SYMBOL_GPL(eventfd_ctx_fileget);
300
301SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
302{
303    int fd;
304    struct eventfd_ctx *ctx;
305
306    /* Check the EFD_* constants for consistency. */
307    BUILD_BUG_ON(EFD_CLOEXEC != O_CLOEXEC);
308    BUILD_BUG_ON(EFD_NONBLOCK != O_NONBLOCK);
309
310    if (flags & ~EFD_FLAGS_SET)
311        return -EINVAL;
312
313    ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
314    if (!ctx)
315        return -ENOMEM;
316
317    kref_init(&ctx->kref);
318    init_waitqueue_head(&ctx->wqh);
319    ctx->count = count;
320    ctx->flags = flags;
321
322    /*
323     * When we call this, the initialization must be complete, since
324     * anon_inode_getfd() will install the fd.
325     */
326    fd = anon_inode_getfd("[eventfd]", &eventfd_fops, ctx,
327                  flags & EFD_SHARED_FCNTL_FLAGS);
328    if (fd < 0)
329        kfree(ctx);
330    return fd;
331}
332
333SYSCALL_DEFINE1(eventfd, unsigned int, count)
334{
335    return sys_eventfd2(count, 0);
336}
337
338

Archive Download this file



interactive