Root/fs/aio.c

1/*
2 * An async IO implementation for Linux
3 * Written by Benjamin LaHaise <bcrl@kvack.org>
4 *
5 * Implements an efficient asynchronous io interface.
6 *
7 * Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved.
8 *
9 * See ../COPYING for licensing terms.
10 */
11#include <linux/kernel.h>
12#include <linux/init.h>
13#include <linux/errno.h>
14#include <linux/time.h>
15#include <linux/aio_abi.h>
16#include <linux/module.h>
17#include <linux/syscalls.h>
18#include <linux/backing-dev.h>
19#include <linux/uio.h>
20
21#define DEBUG 0
22
23#include <linux/sched.h>
24#include <linux/fs.h>
25#include <linux/file.h>
26#include <linux/mm.h>
27#include <linux/mman.h>
28#include <linux/mmu_context.h>
29#include <linux/slab.h>
30#include <linux/timer.h>
31#include <linux/aio.h>
32#include <linux/highmem.h>
33#include <linux/workqueue.h>
34#include <linux/security.h>
35#include <linux/eventfd.h>
36#include <linux/blkdev.h>
37#include <linux/compat.h>
38
39#include <asm/kmap_types.h>
40#include <asm/uaccess.h>
41
42#if DEBUG > 1
43#define dprintk printk
44#else
45#define dprintk(x...) do { ; } while (0)
46#endif
47
48/*------ sysctl variables----*/
49static DEFINE_SPINLOCK(aio_nr_lock);
50unsigned long aio_nr; /* current system wide number of aio requests */
51unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */
52/*----end sysctl variables---*/
53
54static struct kmem_cache *kiocb_cachep;
55static struct kmem_cache *kioctx_cachep;
56
57static struct workqueue_struct *aio_wq;
58
59/* Used for rare fput completion. */
60static void aio_fput_routine(struct work_struct *);
61static DECLARE_WORK(fput_work, aio_fput_routine);
62
63static DEFINE_SPINLOCK(fput_lock);
64static LIST_HEAD(fput_head);
65
66static void aio_kick_handler(struct work_struct *);
67static void aio_queue_work(struct kioctx *);
68
69/* aio_setup
70 * Creates the slab caches used by the aio routines, panic on
71 * failure as this is done early during the boot sequence.
72 */
73static int __init aio_setup(void)
74{
75    kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
76    kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
77
78    aio_wq = alloc_workqueue("aio", 0, 1); /* used to limit concurrency */
79    BUG_ON(!aio_wq);
80
81    pr_debug("aio_setup: sizeof(struct page) = %d\n", (int)sizeof(struct page));
82
83    return 0;
84}
85__initcall(aio_setup);
86
87static void aio_free_ring(struct kioctx *ctx)
88{
89    struct aio_ring_info *info = &ctx->ring_info;
90    long i;
91
92    for (i=0; i<info->nr_pages; i++)
93        put_page(info->ring_pages[i]);
94
95    if (info->mmap_size) {
96        down_write(&ctx->mm->mmap_sem);
97        do_munmap(ctx->mm, info->mmap_base, info->mmap_size);
98        up_write(&ctx->mm->mmap_sem);
99    }
100
101    if (info->ring_pages && info->ring_pages != info->internal_pages)
102        kfree(info->ring_pages);
103    info->ring_pages = NULL;
104    info->nr = 0;
105}
106
107static int aio_setup_ring(struct kioctx *ctx)
108{
109    struct aio_ring *ring;
110    struct aio_ring_info *info = &ctx->ring_info;
111    unsigned nr_events = ctx->max_reqs;
112    unsigned long size;
113    int nr_pages;
114
115    /* Compensate for the ring buffer's head/tail overlap entry */
116    nr_events += 2; /* 1 is required, 2 for good luck */
117
118    size = sizeof(struct aio_ring);
119    size += sizeof(struct io_event) * nr_events;
120    nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
121
122    if (nr_pages < 0)
123        return -EINVAL;
124
125    nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
126
127    info->nr = 0;
128    info->ring_pages = info->internal_pages;
129    if (nr_pages > AIO_RING_PAGES) {
130        info->ring_pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
131        if (!info->ring_pages)
132            return -ENOMEM;
133    }
134
135    info->mmap_size = nr_pages * PAGE_SIZE;
136    dprintk("attempting mmap of %lu bytes\n", info->mmap_size);
137    down_write(&ctx->mm->mmap_sem);
138    info->mmap_base = do_mmap(NULL, 0, info->mmap_size,
139                  PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE,
140                  0);
141    if (IS_ERR((void *)info->mmap_base)) {
142        up_write(&ctx->mm->mmap_sem);
143        info->mmap_size = 0;
144        aio_free_ring(ctx);
145        return -EAGAIN;
146    }
147
148    dprintk("mmap address: 0x%08lx\n", info->mmap_base);
149    info->nr_pages = get_user_pages(current, ctx->mm,
150                    info->mmap_base, nr_pages,
151                    1, 0, info->ring_pages, NULL);
152    up_write(&ctx->mm->mmap_sem);
153
154    if (unlikely(info->nr_pages != nr_pages)) {
155        aio_free_ring(ctx);
156        return -EAGAIN;
157    }
158
159    ctx->user_id = info->mmap_base;
160
161    info->nr = nr_events; /* trusted copy */
162
163    ring = kmap_atomic(info->ring_pages[0], KM_USER0);
164    ring->nr = nr_events; /* user copy */
165    ring->id = ctx->user_id;
166    ring->head = ring->tail = 0;
167    ring->magic = AIO_RING_MAGIC;
168    ring->compat_features = AIO_RING_COMPAT_FEATURES;
169    ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
170    ring->header_length = sizeof(struct aio_ring);
171    kunmap_atomic(ring, KM_USER0);
172
173    return 0;
174}
175
176
177/* aio_ring_event: returns a pointer to the event at the given index from
178 * kmap_atomic(, km). Release the pointer with put_aio_ring_event();
179 */
180#define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event))
181#define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
182#define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
183
184#define aio_ring_event(info, nr, km) ({ \
185    unsigned pos = (nr) + AIO_EVENTS_OFFSET; \
186    struct io_event *__event; \
187    __event = kmap_atomic( \
188            (info)->ring_pages[pos / AIO_EVENTS_PER_PAGE], km); \
189    __event += pos % AIO_EVENTS_PER_PAGE; \
190    __event; \
191})
192
193#define put_aio_ring_event(event, km) do { \
194    struct io_event *__event = (event); \
195    (void)__event; \
196    kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \
197} while(0)
198
199static void ctx_rcu_free(struct rcu_head *head)
200{
201    struct kioctx *ctx = container_of(head, struct kioctx, rcu_head);
202    unsigned nr_events = ctx->max_reqs;
203
204    kmem_cache_free(kioctx_cachep, ctx);
205
206    if (nr_events) {
207        spin_lock(&aio_nr_lock);
208        BUG_ON(aio_nr - nr_events > aio_nr);
209        aio_nr -= nr_events;
210        spin_unlock(&aio_nr_lock);
211    }
212}
213
214/* __put_ioctx
215 * Called when the last user of an aio context has gone away,
216 * and the struct needs to be freed.
217 */
218static void __put_ioctx(struct kioctx *ctx)
219{
220    BUG_ON(ctx->reqs_active);
221
222    cancel_delayed_work(&ctx->wq);
223    cancel_work_sync(&ctx->wq.work);
224    aio_free_ring(ctx);
225    mmdrop(ctx->mm);
226    ctx->mm = NULL;
227    pr_debug("__put_ioctx: freeing %p\n", ctx);
228    call_rcu(&ctx->rcu_head, ctx_rcu_free);
229}
230
231static inline void get_ioctx(struct kioctx *kioctx)
232{
233    BUG_ON(atomic_read(&kioctx->users) <= 0);
234    atomic_inc(&kioctx->users);
235}
236
237static inline int try_get_ioctx(struct kioctx *kioctx)
238{
239    return atomic_inc_not_zero(&kioctx->users);
240}
241
242static inline void put_ioctx(struct kioctx *kioctx)
243{
244    BUG_ON(atomic_read(&kioctx->users) <= 0);
245    if (unlikely(atomic_dec_and_test(&kioctx->users)))
246        __put_ioctx(kioctx);
247}
248
249/* ioctx_alloc
250 * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed.
251 */
252static struct kioctx *ioctx_alloc(unsigned nr_events)
253{
254    struct mm_struct *mm;
255    struct kioctx *ctx;
256    int did_sync = 0;
257
258    /* Prevent overflows */
259    if ((nr_events > (0x10000000U / sizeof(struct io_event))) ||
260        (nr_events > (0x10000000U / sizeof(struct kiocb)))) {
261        pr_debug("ENOMEM: nr_events too high\n");
262        return ERR_PTR(-EINVAL);
263    }
264
265    if ((unsigned long)nr_events > aio_max_nr)
266        return ERR_PTR(-EAGAIN);
267
268    ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL);
269    if (!ctx)
270        return ERR_PTR(-ENOMEM);
271
272    ctx->max_reqs = nr_events;
273    mm = ctx->mm = current->mm;
274    atomic_inc(&mm->mm_count);
275
276    atomic_set(&ctx->users, 1);
277    spin_lock_init(&ctx->ctx_lock);
278    spin_lock_init(&ctx->ring_info.ring_lock);
279    init_waitqueue_head(&ctx->wait);
280
281    INIT_LIST_HEAD(&ctx->active_reqs);
282    INIT_LIST_HEAD(&ctx->run_list);
283    INIT_DELAYED_WORK(&ctx->wq, aio_kick_handler);
284
285    if (aio_setup_ring(ctx) < 0)
286        goto out_freectx;
287
288    /* limit the number of system wide aios */
289    do {
290        spin_lock_bh(&aio_nr_lock);
291        if (aio_nr + nr_events > aio_max_nr ||
292            aio_nr + nr_events < aio_nr)
293            ctx->max_reqs = 0;
294        else
295            aio_nr += ctx->max_reqs;
296        spin_unlock_bh(&aio_nr_lock);
297        if (ctx->max_reqs || did_sync)
298            break;
299
300        /* wait for rcu callbacks to have completed before giving up */
301        synchronize_rcu();
302        did_sync = 1;
303        ctx->max_reqs = nr_events;
304    } while (1);
305
306    if (ctx->max_reqs == 0)
307        goto out_cleanup;
308
309    /* now link into global list. */
310    spin_lock(&mm->ioctx_lock);
311    hlist_add_head_rcu(&ctx->list, &mm->ioctx_list);
312    spin_unlock(&mm->ioctx_lock);
313
314    dprintk("aio: allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
315        ctx, ctx->user_id, current->mm, ctx->ring_info.nr);
316    return ctx;
317
318out_cleanup:
319    __put_ioctx(ctx);
320    return ERR_PTR(-EAGAIN);
321
322out_freectx:
323    mmdrop(mm);
324    kmem_cache_free(kioctx_cachep, ctx);
325    ctx = ERR_PTR(-ENOMEM);
326
327    dprintk("aio: error allocating ioctx %p\n", ctx);
328    return ctx;
329}
330
331/* aio_cancel_all
332 * Cancels all outstanding aio requests on an aio context. Used
333 * when the processes owning a context have all exited to encourage
334 * the rapid destruction of the kioctx.
335 */
336static void aio_cancel_all(struct kioctx *ctx)
337{
338    int (*cancel)(struct kiocb *, struct io_event *);
339    struct io_event res;
340    spin_lock_irq(&ctx->ctx_lock);
341    ctx->dead = 1;
342    while (!list_empty(&ctx->active_reqs)) {
343        struct list_head *pos = ctx->active_reqs.next;
344        struct kiocb *iocb = list_kiocb(pos);
345        list_del_init(&iocb->ki_list);
346        cancel = iocb->ki_cancel;
347        kiocbSetCancelled(iocb);
348        if (cancel) {
349            iocb->ki_users++;
350            spin_unlock_irq(&ctx->ctx_lock);
351            cancel(iocb, &res);
352            spin_lock_irq(&ctx->ctx_lock);
353        }
354    }
355    spin_unlock_irq(&ctx->ctx_lock);
356}
357
358static void wait_for_all_aios(struct kioctx *ctx)
359{
360    struct task_struct *tsk = current;
361    DECLARE_WAITQUEUE(wait, tsk);
362
363    spin_lock_irq(&ctx->ctx_lock);
364    if (!ctx->reqs_active)
365        goto out;
366
367    add_wait_queue(&ctx->wait, &wait);
368    set_task_state(tsk, TASK_UNINTERRUPTIBLE);
369    while (ctx->reqs_active) {
370        spin_unlock_irq(&ctx->ctx_lock);
371        io_schedule();
372        set_task_state(tsk, TASK_UNINTERRUPTIBLE);
373        spin_lock_irq(&ctx->ctx_lock);
374    }
375    __set_task_state(tsk, TASK_RUNNING);
376    remove_wait_queue(&ctx->wait, &wait);
377
378out:
379    spin_unlock_irq(&ctx->ctx_lock);
380}
381
382/* wait_on_sync_kiocb:
383 * Waits on the given sync kiocb to complete.
384 */
385ssize_t wait_on_sync_kiocb(struct kiocb *iocb)
386{
387    while (iocb->ki_users) {
388        set_current_state(TASK_UNINTERRUPTIBLE);
389        if (!iocb->ki_users)
390            break;
391        io_schedule();
392    }
393    __set_current_state(TASK_RUNNING);
394    return iocb->ki_user_data;
395}
396EXPORT_SYMBOL(wait_on_sync_kiocb);
397
398/* exit_aio: called when the last user of mm goes away. At this point,
399 * there is no way for any new requests to be submited or any of the
400 * io_* syscalls to be called on the context. However, there may be
401 * outstanding requests which hold references to the context; as they
402 * go away, they will call put_ioctx and release any pinned memory
403 * associated with the request (held via struct page * references).
404 */
405void exit_aio(struct mm_struct *mm)
406{
407    struct kioctx *ctx;
408
409    while (!hlist_empty(&mm->ioctx_list)) {
410        ctx = hlist_entry(mm->ioctx_list.first, struct kioctx, list);
411        hlist_del_rcu(&ctx->list);
412
413        aio_cancel_all(ctx);
414
415        wait_for_all_aios(ctx);
416        /*
417         * Ensure we don't leave the ctx on the aio_wq
418         */
419        cancel_work_sync(&ctx->wq.work);
420
421        if (1 != atomic_read(&ctx->users))
422            printk(KERN_DEBUG
423                "exit_aio:ioctx still alive: %d %d %d\n",
424                atomic_read(&ctx->users), ctx->dead,
425                ctx->reqs_active);
426        put_ioctx(ctx);
427    }
428}
429
430/* aio_get_req
431 * Allocate a slot for an aio request. Increments the users count
432 * of the kioctx so that the kioctx stays around until all requests are
433 * complete. Returns NULL if no requests are free.
434 *
435 * Returns with kiocb->users set to 2. The io submit code path holds
436 * an extra reference while submitting the i/o.
437 * This prevents races between the aio code path referencing the
438 * req (after submitting it) and aio_complete() freeing the req.
439 */
440static struct kiocb *__aio_get_req(struct kioctx *ctx)
441{
442    struct kiocb *req = NULL;
443    struct aio_ring *ring;
444    int okay = 0;
445
446    req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL);
447    if (unlikely(!req))
448        return NULL;
449
450    req->ki_flags = 0;
451    req->ki_users = 2;
452    req->ki_key = 0;
453    req->ki_ctx = ctx;
454    req->ki_cancel = NULL;
455    req->ki_retry = NULL;
456    req->ki_dtor = NULL;
457    req->private = NULL;
458    req->ki_iovec = NULL;
459    INIT_LIST_HEAD(&req->ki_run_list);
460    req->ki_eventfd = NULL;
461
462    /* Check if the completion queue has enough free space to
463     * accept an event from this io.
464     */
465    spin_lock_irq(&ctx->ctx_lock);
466    ring = kmap_atomic(ctx->ring_info.ring_pages[0], KM_USER0);
467    if (ctx->reqs_active < aio_ring_avail(&ctx->ring_info, ring)) {
468        list_add(&req->ki_list, &ctx->active_reqs);
469        ctx->reqs_active++;
470        okay = 1;
471    }
472    kunmap_atomic(ring, KM_USER0);
473    spin_unlock_irq(&ctx->ctx_lock);
474
475    if (!okay) {
476        kmem_cache_free(kiocb_cachep, req);
477        req = NULL;
478    }
479
480    return req;
481}
482
483static inline struct kiocb *aio_get_req(struct kioctx *ctx)
484{
485    struct kiocb *req;
486    /* Handle a potential starvation case -- should be exceedingly rare as
487     * requests will be stuck on fput_head only if the aio_fput_routine is
488     * delayed and the requests were the last user of the struct file.
489     */
490    req = __aio_get_req(ctx);
491    if (unlikely(NULL == req)) {
492        aio_fput_routine(NULL);
493        req = __aio_get_req(ctx);
494    }
495    return req;
496}
497
498static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
499{
500    assert_spin_locked(&ctx->ctx_lock);
501
502    if (req->ki_eventfd != NULL)
503        eventfd_ctx_put(req->ki_eventfd);
504    if (req->ki_dtor)
505        req->ki_dtor(req);
506    if (req->ki_iovec != &req->ki_inline_vec)
507        kfree(req->ki_iovec);
508    kmem_cache_free(kiocb_cachep, req);
509    ctx->reqs_active--;
510
511    if (unlikely(!ctx->reqs_active && ctx->dead))
512        wake_up_all(&ctx->wait);
513}
514
515static void aio_fput_routine(struct work_struct *data)
516{
517    spin_lock_irq(&fput_lock);
518    while (likely(!list_empty(&fput_head))) {
519        struct kiocb *req = list_kiocb(fput_head.next);
520        struct kioctx *ctx = req->ki_ctx;
521
522        list_del(&req->ki_list);
523        spin_unlock_irq(&fput_lock);
524
525        /* Complete the fput(s) */
526        if (req->ki_filp != NULL)
527            fput(req->ki_filp);
528
529        /* Link the iocb into the context's free list */
530        spin_lock_irq(&ctx->ctx_lock);
531        really_put_req(ctx, req);
532        spin_unlock_irq(&ctx->ctx_lock);
533
534        put_ioctx(ctx);
535        spin_lock_irq(&fput_lock);
536    }
537    spin_unlock_irq(&fput_lock);
538}
539
540/* __aio_put_req
541 * Returns true if this put was the last user of the request.
542 */
543static int __aio_put_req(struct kioctx *ctx, struct kiocb *req)
544{
545    dprintk(KERN_DEBUG "aio_put(%p): f_count=%ld\n",
546        req, atomic_long_read(&req->ki_filp->f_count));
547
548    assert_spin_locked(&ctx->ctx_lock);
549
550    req->ki_users--;
551    BUG_ON(req->ki_users < 0);
552    if (likely(req->ki_users))
553        return 0;
554    list_del(&req->ki_list); /* remove from active_reqs */
555    req->ki_cancel = NULL;
556    req->ki_retry = NULL;
557
558    /*
559     * Try to optimize the aio and eventfd file* puts, by avoiding to
560     * schedule work in case it is not final fput() time. In normal cases,
561     * we would not be holding the last reference to the file*, so
562     * this function will be executed w/out any aio kthread wakeup.
563     */
564    if (unlikely(!fput_atomic(req->ki_filp))) {
565        get_ioctx(ctx);
566        spin_lock(&fput_lock);
567        list_add(&req->ki_list, &fput_head);
568        spin_unlock(&fput_lock);
569        schedule_work(&fput_work);
570    } else {
571        req->ki_filp = NULL;
572        really_put_req(ctx, req);
573    }
574    return 1;
575}
576
577/* aio_put_req
578 * Returns true if this put was the last user of the kiocb,
579 * false if the request is still in use.
580 */
581int aio_put_req(struct kiocb *req)
582{
583    struct kioctx *ctx = req->ki_ctx;
584    int ret;
585    spin_lock_irq(&ctx->ctx_lock);
586    ret = __aio_put_req(ctx, req);
587    spin_unlock_irq(&ctx->ctx_lock);
588    return ret;
589}
590EXPORT_SYMBOL(aio_put_req);
591
592static struct kioctx *lookup_ioctx(unsigned long ctx_id)
593{
594    struct mm_struct *mm = current->mm;
595    struct kioctx *ctx, *ret = NULL;
596    struct hlist_node *n;
597
598    rcu_read_lock();
599
600    hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) {
601        /*
602         * RCU protects us against accessing freed memory but
603         * we have to be careful not to get a reference when the
604         * reference count already dropped to 0 (ctx->dead test
605         * is unreliable because of races).
606         */
607        if (ctx->user_id == ctx_id && !ctx->dead && try_get_ioctx(ctx)){
608            ret = ctx;
609            break;
610        }
611    }
612
613    rcu_read_unlock();
614    return ret;
615}
616
617/*
618 * Queue up a kiocb to be retried. Assumes that the kiocb
619 * has already been marked as kicked, and places it on
620 * the retry run list for the corresponding ioctx, if it
621 * isn't already queued. Returns 1 if it actually queued
622 * the kiocb (to tell the caller to activate the work
623 * queue to process it), or 0, if it found that it was
624 * already queued.
625 */
626static inline int __queue_kicked_iocb(struct kiocb *iocb)
627{
628    struct kioctx *ctx = iocb->ki_ctx;
629
630    assert_spin_locked(&ctx->ctx_lock);
631
632    if (list_empty(&iocb->ki_run_list)) {
633        list_add_tail(&iocb->ki_run_list,
634            &ctx->run_list);
635        return 1;
636    }
637    return 0;
638}
639
640/* aio_run_iocb
641 * This is the core aio execution routine. It is
642 * invoked both for initial i/o submission and
643 * subsequent retries via the aio_kick_handler.
644 * Expects to be invoked with iocb->ki_ctx->lock
645 * already held. The lock is released and reacquired
646 * as needed during processing.
647 *
648 * Calls the iocb retry method (already setup for the
649 * iocb on initial submission) for operation specific
650 * handling, but takes care of most of common retry
651 * execution details for a given iocb. The retry method
652 * needs to be non-blocking as far as possible, to avoid
653 * holding up other iocbs waiting to be serviced by the
654 * retry kernel thread.
655 *
656 * The trickier parts in this code have to do with
657 * ensuring that only one retry instance is in progress
658 * for a given iocb at any time. Providing that guarantee
659 * simplifies the coding of individual aio operations as
660 * it avoids various potential races.
661 */
662static ssize_t aio_run_iocb(struct kiocb *iocb)
663{
664    struct kioctx *ctx = iocb->ki_ctx;
665    ssize_t (*retry)(struct kiocb *);
666    ssize_t ret;
667
668    if (!(retry = iocb->ki_retry)) {
669        printk("aio_run_iocb: iocb->ki_retry = NULL\n");
670        return 0;
671    }
672
673    /*
674     * We don't want the next retry iteration for this
675     * operation to start until this one has returned and
676     * updated the iocb state. However, wait_queue functions
677     * can trigger a kick_iocb from interrupt context in the
678     * meantime, indicating that data is available for the next
679     * iteration. We want to remember that and enable the
680     * next retry iteration _after_ we are through with
681     * this one.
682     *
683     * So, in order to be able to register a "kick", but
684     * prevent it from being queued now, we clear the kick
685     * flag, but make the kick code *think* that the iocb is
686     * still on the run list until we are actually done.
687     * When we are done with this iteration, we check if
688     * the iocb was kicked in the meantime and if so, queue
689     * it up afresh.
690     */
691
692    kiocbClearKicked(iocb);
693
694    /*
695     * This is so that aio_complete knows it doesn't need to
696     * pull the iocb off the run list (We can't just call
697     * INIT_LIST_HEAD because we don't want a kick_iocb to
698     * queue this on the run list yet)
699     */
700    iocb->ki_run_list.next = iocb->ki_run_list.prev = NULL;
701    spin_unlock_irq(&ctx->ctx_lock);
702
703    /* Quit retrying if the i/o has been cancelled */
704    if (kiocbIsCancelled(iocb)) {
705        ret = -EINTR;
706        aio_complete(iocb, ret, 0);
707        /* must not access the iocb after this */
708        goto out;
709    }
710
711    /*
712     * Now we are all set to call the retry method in async
713     * context.
714     */
715    ret = retry(iocb);
716
717    if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) {
718        /*
719         * There's no easy way to restart the syscall since other AIO's
720         * may be already running. Just fail this IO with EINTR.
721         */
722        if (unlikely(ret == -ERESTARTSYS || ret == -ERESTARTNOINTR ||
723                 ret == -ERESTARTNOHAND || ret == -ERESTART_RESTARTBLOCK))
724            ret = -EINTR;
725        aio_complete(iocb, ret, 0);
726    }
727out:
728    spin_lock_irq(&ctx->ctx_lock);
729
730    if (-EIOCBRETRY == ret) {
731        /*
732         * OK, now that we are done with this iteration
733         * and know that there is more left to go,
734         * this is where we let go so that a subsequent
735         * "kick" can start the next iteration
736         */
737
738        /* will make __queue_kicked_iocb succeed from here on */
739        INIT_LIST_HEAD(&iocb->ki_run_list);
740        /* we must queue the next iteration ourselves, if it
741         * has already been kicked */
742        if (kiocbIsKicked(iocb)) {
743            __queue_kicked_iocb(iocb);
744
745            /*
746             * __queue_kicked_iocb will always return 1 here, because
747             * iocb->ki_run_list is empty at this point so it should
748             * be safe to unconditionally queue the context into the
749             * work queue.
750             */
751            aio_queue_work(ctx);
752        }
753    }
754    return ret;
755}
756
757/*
758 * __aio_run_iocbs:
759 * Process all pending retries queued on the ioctx
760 * run list.
761 * Assumes it is operating within the aio issuer's mm
762 * context.
763 */
764static int __aio_run_iocbs(struct kioctx *ctx)
765{
766    struct kiocb *iocb;
767    struct list_head run_list;
768
769    assert_spin_locked(&ctx->ctx_lock);
770
771    list_replace_init(&ctx->run_list, &run_list);
772    while (!list_empty(&run_list)) {
773        iocb = list_entry(run_list.next, struct kiocb,
774            ki_run_list);
775        list_del(&iocb->ki_run_list);
776        /*
777         * Hold an extra reference while retrying i/o.
778         */
779        iocb->ki_users++; /* grab extra reference */
780        aio_run_iocb(iocb);
781        __aio_put_req(ctx, iocb);
782     }
783    if (!list_empty(&ctx->run_list))
784        return 1;
785    return 0;
786}
787
788static void aio_queue_work(struct kioctx * ctx)
789{
790    unsigned long timeout;
791    /*
792     * if someone is waiting, get the work started right
793     * away, otherwise, use a longer delay
794     */
795    smp_mb();
796    if (waitqueue_active(&ctx->wait))
797        timeout = 1;
798    else
799        timeout = HZ/10;
800    queue_delayed_work(aio_wq, &ctx->wq, timeout);
801}
802
803/*
804 * aio_run_all_iocbs:
805 * Process all pending retries queued on the ioctx
806 * run list, and keep running them until the list
807 * stays empty.
808 * Assumes it is operating within the aio issuer's mm context.
809 */
810static inline void aio_run_all_iocbs(struct kioctx *ctx)
811{
812    spin_lock_irq(&ctx->ctx_lock);
813    while (__aio_run_iocbs(ctx))
814        ;
815    spin_unlock_irq(&ctx->ctx_lock);
816}
817
818/*
819 * aio_kick_handler:
820 * Work queue handler triggered to process pending
821 * retries on an ioctx. Takes on the aio issuer's
822 * mm context before running the iocbs, so that
823 * copy_xxx_user operates on the issuer's address
824 * space.
825 * Run on aiod's context.
826 */
827static void aio_kick_handler(struct work_struct *work)
828{
829    struct kioctx *ctx = container_of(work, struct kioctx, wq.work);
830    mm_segment_t oldfs = get_fs();
831    struct mm_struct *mm;
832    int requeue;
833
834    set_fs(USER_DS);
835    use_mm(ctx->mm);
836    spin_lock_irq(&ctx->ctx_lock);
837    requeue =__aio_run_iocbs(ctx);
838    mm = ctx->mm;
839    spin_unlock_irq(&ctx->ctx_lock);
840     unuse_mm(mm);
841    set_fs(oldfs);
842    /*
843     * we're in a worker thread already, don't use queue_delayed_work,
844     */
845    if (requeue)
846        queue_delayed_work(aio_wq, &ctx->wq, 0);
847}
848
849
850/*
851 * Called by kick_iocb to queue the kiocb for retry
852 * and if required activate the aio work queue to process
853 * it
854 */
855static void try_queue_kicked_iocb(struct kiocb *iocb)
856{
857     struct kioctx *ctx = iocb->ki_ctx;
858    unsigned long flags;
859    int run = 0;
860
861    spin_lock_irqsave(&ctx->ctx_lock, flags);
862    /* set this inside the lock so that we can't race with aio_run_iocb()
863     * testing it and putting the iocb on the run list under the lock */
864    if (!kiocbTryKick(iocb))
865        run = __queue_kicked_iocb(iocb);
866    spin_unlock_irqrestore(&ctx->ctx_lock, flags);
867    if (run)
868        aio_queue_work(ctx);
869}
870
871/*
872 * kick_iocb:
873 * Called typically from a wait queue callback context
874 * to trigger a retry of the iocb.
875 * The retry is usually executed by aio workqueue
876 * threads (See aio_kick_handler).
877 */
878void kick_iocb(struct kiocb *iocb)
879{
880    /* sync iocbs are easy: they can only ever be executing from a
881     * single context. */
882    if (is_sync_kiocb(iocb)) {
883        kiocbSetKicked(iocb);
884            wake_up_process(iocb->ki_obj.tsk);
885        return;
886    }
887
888    try_queue_kicked_iocb(iocb);
889}
890EXPORT_SYMBOL(kick_iocb);
891
892/* aio_complete
893 * Called when the io request on the given iocb is complete.
894 * Returns true if this is the last user of the request. The
895 * only other user of the request can be the cancellation code.
896 */
897int aio_complete(struct kiocb *iocb, long res, long res2)
898{
899    struct kioctx *ctx = iocb->ki_ctx;
900    struct aio_ring_info *info;
901    struct aio_ring *ring;
902    struct io_event *event;
903    unsigned long flags;
904    unsigned long tail;
905    int ret;
906
907    /*
908     * Special case handling for sync iocbs:
909     * - events go directly into the iocb for fast handling
910     * - the sync task with the iocb in its stack holds the single iocb
911     * ref, no other paths have a way to get another ref
912     * - the sync task helpfully left a reference to itself in the iocb
913     */
914    if (is_sync_kiocb(iocb)) {
915        BUG_ON(iocb->ki_users != 1);
916        iocb->ki_user_data = res;
917        iocb->ki_users = 0;
918        wake_up_process(iocb->ki_obj.tsk);
919        return 1;
920    }
921
922    info = &ctx->ring_info;
923
924    /* add a completion event to the ring buffer.
925     * must be done holding ctx->ctx_lock to prevent
926     * other code from messing with the tail
927     * pointer since we might be called from irq
928     * context.
929     */
930    spin_lock_irqsave(&ctx->ctx_lock, flags);
931
932    if (iocb->ki_run_list.prev && !list_empty(&iocb->ki_run_list))
933        list_del_init(&iocb->ki_run_list);
934
935    /*
936     * cancelled requests don't get events, userland was given one
937     * when the event got cancelled.
938     */
939    if (kiocbIsCancelled(iocb))
940        goto put_rq;
941
942    ring = kmap_atomic(info->ring_pages[0], KM_IRQ1);
943
944    tail = info->tail;
945    event = aio_ring_event(info, tail, KM_IRQ0);
946    if (++tail >= info->nr)
947        tail = 0;
948
949    event->obj = (u64)(unsigned long)iocb->ki_obj.user;
950    event->data = iocb->ki_user_data;
951    event->res = res;
952    event->res2 = res2;
953
954    dprintk("aio_complete: %p[%lu]: %p: %p %Lx %lx %lx\n",
955        ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data,
956        res, res2);
957
958    /* after flagging the request as done, we
959     * must never even look at it again
960     */
961    smp_wmb(); /* make event visible before updating tail */
962
963    info->tail = tail;
964    ring->tail = tail;
965
966    put_aio_ring_event(event, KM_IRQ0);
967    kunmap_atomic(ring, KM_IRQ1);
968
969    pr_debug("added to ring %p at [%lu]\n", iocb, tail);
970
971    /*
972     * Check if the user asked us to deliver the result through an
973     * eventfd. The eventfd_signal() function is safe to be called
974     * from IRQ context.
975     */
976    if (iocb->ki_eventfd != NULL)
977        eventfd_signal(iocb->ki_eventfd, 1);
978
979put_rq:
980    /* everything turned out well, dispose of the aiocb. */
981    ret = __aio_put_req(ctx, iocb);
982
983    /*
984     * We have to order our ring_info tail store above and test
985     * of the wait list below outside the wait lock. This is
986     * like in wake_up_bit() where clearing a bit has to be
987     * ordered with the unlocked test.
988     */
989    smp_mb();
990
991    if (waitqueue_active(&ctx->wait))
992        wake_up(&ctx->wait);
993
994    spin_unlock_irqrestore(&ctx->ctx_lock, flags);
995    return ret;
996}
997EXPORT_SYMBOL(aio_complete);
998
999/* aio_read_evt
1000 * Pull an event off of the ioctx's event ring. Returns the number of
1001 * events fetched (0 or 1 ;-)
1002 * FIXME: make this use cmpxchg.
1003 * TODO: make the ringbuffer user mmap()able (requires FIXME).
1004 */
1005static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent)
1006{
1007    struct aio_ring_info *info = &ioctx->ring_info;
1008    struct aio_ring *ring;
1009    unsigned long head;
1010    int ret = 0;
1011
1012    ring = kmap_atomic(info->ring_pages[0], KM_USER0);
1013    dprintk("in aio_read_evt h%lu t%lu m%lu\n",
1014         (unsigned long)ring->head, (unsigned long)ring->tail,
1015         (unsigned long)ring->nr);
1016
1017    if (ring->head == ring->tail)
1018        goto out;
1019
1020    spin_lock(&info->ring_lock);
1021
1022    head = ring->head % info->nr;
1023    if (head != ring->tail) {
1024        struct io_event *evp = aio_ring_event(info, head, KM_USER1);
1025        *ent = *evp;
1026        head = (head + 1) % info->nr;
1027        smp_mb(); /* finish reading the event before updatng the head */
1028        ring->head = head;
1029        ret = 1;
1030        put_aio_ring_event(evp, KM_USER1);
1031    }
1032    spin_unlock(&info->ring_lock);
1033
1034out:
1035    kunmap_atomic(ring, KM_USER0);
1036    dprintk("leaving aio_read_evt: %d h%lu t%lu\n", ret,
1037         (unsigned long)ring->head, (unsigned long)ring->tail);
1038    return ret;
1039}
1040
1041struct aio_timeout {
1042    struct timer_list timer;
1043    int timed_out;
1044    struct task_struct *p;
1045};
1046
1047static void timeout_func(unsigned long data)
1048{
1049    struct aio_timeout *to = (struct aio_timeout *)data;
1050
1051    to->timed_out = 1;
1052    wake_up_process(to->p);
1053}
1054
1055static inline void init_timeout(struct aio_timeout *to)
1056{
1057    setup_timer_on_stack(&to->timer, timeout_func, (unsigned long) to);
1058    to->timed_out = 0;
1059    to->p = current;
1060}
1061
1062static inline void set_timeout(long start_jiffies, struct aio_timeout *to,
1063                   const struct timespec *ts)
1064{
1065    to->timer.expires = start_jiffies + timespec_to_jiffies(ts);
1066    if (time_after(to->timer.expires, jiffies))
1067        add_timer(&to->timer);
1068    else
1069        to->timed_out = 1;
1070}
1071
1072static inline void clear_timeout(struct aio_timeout *to)
1073{
1074    del_singleshot_timer_sync(&to->timer);
1075}
1076
1077static int read_events(struct kioctx *ctx,
1078            long min_nr, long nr,
1079            struct io_event __user *event,
1080            struct timespec __user *timeout)
1081{
1082    long start_jiffies = jiffies;
1083    struct task_struct *tsk = current;
1084    DECLARE_WAITQUEUE(wait, tsk);
1085    int ret;
1086    int i = 0;
1087    struct io_event ent;
1088    struct aio_timeout to;
1089    int retry = 0;
1090
1091    /* needed to zero any padding within an entry (there shouldn't be
1092     * any, but C is fun!
1093     */
1094    memset(&ent, 0, sizeof(ent));
1095retry:
1096    ret = 0;
1097    while (likely(i < nr)) {
1098        ret = aio_read_evt(ctx, &ent);
1099        if (unlikely(ret <= 0))
1100            break;
1101
1102        dprintk("read event: %Lx %Lx %Lx %Lx\n",
1103            ent.data, ent.obj, ent.res, ent.res2);
1104
1105        /* Could we split the check in two? */
1106        ret = -EFAULT;
1107        if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) {
1108            dprintk("aio: lost an event due to EFAULT.\n");
1109            break;
1110        }
1111        ret = 0;
1112
1113        /* Good, event copied to userland, update counts. */
1114        event ++;
1115        i ++;
1116    }
1117
1118    if (min_nr <= i)
1119        return i;
1120    if (ret)
1121        return ret;
1122
1123    /* End fast path */
1124
1125    /* racey check, but it gets redone */
1126    if (!retry && unlikely(!list_empty(&ctx->run_list))) {
1127        retry = 1;
1128        aio_run_all_iocbs(ctx);
1129        goto retry;
1130    }
1131
1132    init_timeout(&to);
1133    if (timeout) {
1134        struct timespec ts;
1135        ret = -EFAULT;
1136        if (unlikely(copy_from_user(&ts, timeout, sizeof(ts))))
1137            goto out;
1138
1139        set_timeout(start_jiffies, &to, &ts);
1140    }
1141
1142    while (likely(i < nr)) {
1143        add_wait_queue_exclusive(&ctx->wait, &wait);
1144        do {
1145            set_task_state(tsk, TASK_INTERRUPTIBLE);
1146            ret = aio_read_evt(ctx, &ent);
1147            if (ret)
1148                break;
1149            if (min_nr <= i)
1150                break;
1151            if (unlikely(ctx->dead)) {
1152                ret = -EINVAL;
1153                break;
1154            }
1155            if (to.timed_out) /* Only check after read evt */
1156                break;
1157            /* Try to only show up in io wait if there are ops
1158             * in flight */
1159            if (ctx->reqs_active)
1160                io_schedule();
1161            else
1162                schedule();
1163            if (signal_pending(tsk)) {
1164                ret = -EINTR;
1165                break;
1166            }
1167            /*ret = aio_read_evt(ctx, &ent);*/
1168        } while (1) ;
1169
1170        set_task_state(tsk, TASK_RUNNING);
1171        remove_wait_queue(&ctx->wait, &wait);
1172
1173        if (unlikely(ret <= 0))
1174            break;
1175
1176        ret = -EFAULT;
1177        if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) {
1178            dprintk("aio: lost an event due to EFAULT.\n");
1179            break;
1180        }
1181
1182        /* Good, event copied to userland, update counts. */
1183        event ++;
1184        i ++;
1185    }
1186
1187    if (timeout)
1188        clear_timeout(&to);
1189out:
1190    destroy_timer_on_stack(&to.timer);
1191    return i ? i : ret;
1192}
1193
1194/* Take an ioctx and remove it from the list of ioctx's. Protects
1195 * against races with itself via ->dead.
1196 */
1197static void io_destroy(struct kioctx *ioctx)
1198{
1199    struct mm_struct *mm = current->mm;
1200    int was_dead;
1201
1202    /* delete the entry from the list is someone else hasn't already */
1203    spin_lock(&mm->ioctx_lock);
1204    was_dead = ioctx->dead;
1205    ioctx->dead = 1;
1206    hlist_del_rcu(&ioctx->list);
1207    spin_unlock(&mm->ioctx_lock);
1208
1209    dprintk("aio_release(%p)\n", ioctx);
1210    if (likely(!was_dead))
1211        put_ioctx(ioctx); /* twice for the list */
1212
1213    aio_cancel_all(ioctx);
1214    wait_for_all_aios(ioctx);
1215
1216    /*
1217     * Wake up any waiters. The setting of ctx->dead must be seen
1218     * by other CPUs at this point. Right now, we rely on the
1219     * locking done by the above calls to ensure this consistency.
1220     */
1221    wake_up_all(&ioctx->wait);
1222    put_ioctx(ioctx); /* once for the lookup */
1223}
1224
1225/* sys_io_setup:
1226 * Create an aio_context capable of receiving at least nr_events.
1227 * ctxp must not point to an aio_context that already exists, and
1228 * must be initialized to 0 prior to the call. On successful
1229 * creation of the aio_context, *ctxp is filled in with the resulting
1230 * handle. May fail with -EINVAL if *ctxp is not initialized,
1231 * if the specified nr_events exceeds internal limits. May fail
1232 * with -EAGAIN if the specified nr_events exceeds the user's limit
1233 * of available events. May fail with -ENOMEM if insufficient kernel
1234 * resources are available. May fail with -EFAULT if an invalid
1235 * pointer is passed for ctxp. Will fail with -ENOSYS if not
1236 * implemented.
1237 */
1238SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
1239{
1240    struct kioctx *ioctx = NULL;
1241    unsigned long ctx;
1242    long ret;
1243
1244    ret = get_user(ctx, ctxp);
1245    if (unlikely(ret))
1246        goto out;
1247
1248    ret = -EINVAL;
1249    if (unlikely(ctx || nr_events == 0)) {
1250        pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n",
1251                 ctx, nr_events);
1252        goto out;
1253    }
1254
1255    ioctx = ioctx_alloc(nr_events);
1256    ret = PTR_ERR(ioctx);
1257    if (!IS_ERR(ioctx)) {
1258        ret = put_user(ioctx->user_id, ctxp);
1259        if (!ret)
1260            return 0;
1261
1262        get_ioctx(ioctx); /* io_destroy() expects us to hold a ref */
1263        io_destroy(ioctx);
1264    }
1265
1266out:
1267    return ret;
1268}
1269
1270/* sys_io_destroy:
1271 * Destroy the aio_context specified. May cancel any outstanding
1272 * AIOs and block on completion. Will fail with -ENOSYS if not
1273 * implemented. May fail with -EINVAL if the context pointed to
1274 * is invalid.
1275 */
1276SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
1277{
1278    struct kioctx *ioctx = lookup_ioctx(ctx);
1279    if (likely(NULL != ioctx)) {
1280        io_destroy(ioctx);
1281        return 0;
1282    }
1283    pr_debug("EINVAL: io_destroy: invalid context id\n");
1284    return -EINVAL;
1285}
1286
1287static void aio_advance_iovec(struct kiocb *iocb, ssize_t ret)
1288{
1289    struct iovec *iov = &iocb->ki_iovec[iocb->ki_cur_seg];
1290
1291    BUG_ON(ret <= 0);
1292
1293    while (iocb->ki_cur_seg < iocb->ki_nr_segs && ret > 0) {
1294        ssize_t this = min((ssize_t)iov->iov_len, ret);
1295        iov->iov_base += this;
1296        iov->iov_len -= this;
1297        iocb->ki_left -= this;
1298        ret -= this;
1299        if (iov->iov_len == 0) {
1300            iocb->ki_cur_seg++;
1301            iov++;
1302        }
1303    }
1304
1305    /* the caller should not have done more io than what fit in
1306     * the remaining iovecs */
1307    BUG_ON(ret > 0 && iocb->ki_left == 0);
1308}
1309
1310static ssize_t aio_rw_vect_retry(struct kiocb *iocb)
1311{
1312    struct file *file = iocb->ki_filp;
1313    struct address_space *mapping = file->f_mapping;
1314    struct inode *inode = mapping->host;
1315    ssize_t (*rw_op)(struct kiocb *, const struct iovec *,
1316             unsigned long, loff_t);
1317    ssize_t ret = 0;
1318    unsigned short opcode;
1319
1320    if ((iocb->ki_opcode == IOCB_CMD_PREADV) ||
1321        (iocb->ki_opcode == IOCB_CMD_PREAD)) {
1322        rw_op = file->f_op->aio_read;
1323        opcode = IOCB_CMD_PREADV;
1324    } else {
1325        rw_op = file->f_op->aio_write;
1326        opcode = IOCB_CMD_PWRITEV;
1327    }
1328
1329    /* This matches the pread()/pwrite() logic */
1330    if (iocb->ki_pos < 0)
1331        return -EINVAL;
1332
1333    do {
1334        ret = rw_op(iocb, &iocb->ki_iovec[iocb->ki_cur_seg],
1335                iocb->ki_nr_segs - iocb->ki_cur_seg,
1336                iocb->ki_pos);
1337        if (ret > 0)
1338            aio_advance_iovec(iocb, ret);
1339
1340    /* retry all partial writes. retry partial reads as long as its a
1341     * regular file. */
1342    } while (ret > 0 && iocb->ki_left > 0 &&
1343         (opcode == IOCB_CMD_PWRITEV ||
1344          (!S_ISFIFO(inode->i_mode) && !S_ISSOCK(inode->i_mode))));
1345
1346    /* This means we must have transferred all that we could */
1347    /* No need to retry anymore */
1348    if ((ret == 0) || (iocb->ki_left == 0))
1349        ret = iocb->ki_nbytes - iocb->ki_left;
1350
1351    /* If we managed to write some out we return that, rather than
1352     * the eventual error. */
1353    if (opcode == IOCB_CMD_PWRITEV
1354        && ret < 0 && ret != -EIOCBQUEUED && ret != -EIOCBRETRY
1355        && iocb->ki_nbytes - iocb->ki_left)
1356        ret = iocb->ki_nbytes - iocb->ki_left;
1357
1358    return ret;
1359}
1360
1361static ssize_t aio_fdsync(struct kiocb *iocb)
1362{
1363    struct file *file = iocb->ki_filp;
1364    ssize_t ret = -EINVAL;
1365
1366    if (file->f_op->aio_fsync)
1367        ret = file->f_op->aio_fsync(iocb, 1);
1368    return ret;
1369}
1370
1371static ssize_t aio_fsync(struct kiocb *iocb)
1372{
1373    struct file *file = iocb->ki_filp;
1374    ssize_t ret = -EINVAL;
1375
1376    if (file->f_op->aio_fsync)
1377        ret = file->f_op->aio_fsync(iocb, 0);
1378    return ret;
1379}
1380
1381static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
1382{
1383    ssize_t ret;
1384
1385#ifdef CONFIG_COMPAT
1386    if (compat)
1387        ret = compat_rw_copy_check_uvector(type,
1388                (struct compat_iovec __user *)kiocb->ki_buf,
1389                kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
1390                &kiocb->ki_iovec);
1391    else
1392#endif
1393        ret = rw_copy_check_uvector(type,
1394                (struct iovec __user *)kiocb->ki_buf,
1395                kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
1396                &kiocb->ki_iovec);
1397    if (ret < 0)
1398        goto out;
1399
1400    kiocb->ki_nr_segs = kiocb->ki_nbytes;
1401    kiocb->ki_cur_seg = 0;
1402    /* ki_nbytes/left now reflect bytes instead of segs */
1403    kiocb->ki_nbytes = ret;
1404    kiocb->ki_left = ret;
1405
1406    ret = 0;
1407out:
1408    return ret;
1409}
1410
1411static ssize_t aio_setup_single_vector(struct kiocb *kiocb)
1412{
1413    kiocb->ki_iovec = &kiocb->ki_inline_vec;
1414    kiocb->ki_iovec->iov_base = kiocb->ki_buf;
1415    kiocb->ki_iovec->iov_len = kiocb->ki_left;
1416    kiocb->ki_nr_segs = 1;
1417    kiocb->ki_cur_seg = 0;
1418    return 0;
1419}
1420
1421/*
1422 * aio_setup_iocb:
1423 * Performs the initial checks and aio retry method
1424 * setup for the kiocb at the time of io submission.
1425 */
1426static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat)
1427{
1428    struct file *file = kiocb->ki_filp;
1429    ssize_t ret = 0;
1430
1431    switch (kiocb->ki_opcode) {
1432    case IOCB_CMD_PREAD:
1433        ret = -EBADF;
1434        if (unlikely(!(file->f_mode & FMODE_READ)))
1435            break;
1436        ret = -EFAULT;
1437        if (unlikely(!access_ok(VERIFY_WRITE, kiocb->ki_buf,
1438            kiocb->ki_left)))
1439            break;
1440        ret = security_file_permission(file, MAY_READ);
1441        if (unlikely(ret))
1442            break;
1443        ret = aio_setup_single_vector(kiocb);
1444        if (ret)
1445            break;
1446        ret = -EINVAL;
1447        if (file->f_op->aio_read)
1448            kiocb->ki_retry = aio_rw_vect_retry;
1449        break;
1450    case IOCB_CMD_PWRITE:
1451        ret = -EBADF;
1452        if (unlikely(!(file->f_mode & FMODE_WRITE)))
1453            break;
1454        ret = -EFAULT;
1455        if (unlikely(!access_ok(VERIFY_READ, kiocb->ki_buf,
1456            kiocb->ki_left)))
1457            break;
1458        ret = security_file_permission(file, MAY_WRITE);
1459        if (unlikely(ret))
1460            break;
1461        ret = aio_setup_single_vector(kiocb);
1462        if (ret)
1463            break;
1464        ret = -EINVAL;
1465        if (file->f_op->aio_write)
1466            kiocb->ki_retry = aio_rw_vect_retry;
1467        break;
1468    case IOCB_CMD_PREADV:
1469        ret = -EBADF;
1470        if (unlikely(!(file->f_mode & FMODE_READ)))
1471            break;
1472        ret = security_file_permission(file, MAY_READ);
1473        if (unlikely(ret))
1474            break;
1475        ret = aio_setup_vectored_rw(READ, kiocb, compat);
1476        if (ret)
1477            break;
1478        ret = -EINVAL;
1479        if (file->f_op->aio_read)
1480            kiocb->ki_retry = aio_rw_vect_retry;
1481        break;
1482    case IOCB_CMD_PWRITEV:
1483        ret = -EBADF;
1484        if (unlikely(!(file->f_mode & FMODE_WRITE)))
1485            break;
1486        ret = security_file_permission(file, MAY_WRITE);
1487        if (unlikely(ret))
1488            break;
1489        ret = aio_setup_vectored_rw(WRITE, kiocb, compat);
1490        if (ret)
1491            break;
1492        ret = -EINVAL;
1493        if (file->f_op->aio_write)
1494            kiocb->ki_retry = aio_rw_vect_retry;
1495        break;
1496    case IOCB_CMD_FDSYNC:
1497        ret = -EINVAL;
1498        if (file->f_op->aio_fsync)
1499            kiocb->ki_retry = aio_fdsync;
1500        break;
1501    case IOCB_CMD_FSYNC:
1502        ret = -EINVAL;
1503        if (file->f_op->aio_fsync)
1504            kiocb->ki_retry = aio_fsync;
1505        break;
1506    default:
1507        dprintk("EINVAL: io_submit: no operation provided\n");
1508        ret = -EINVAL;
1509    }
1510
1511    if (!kiocb->ki_retry)
1512        return ret;
1513
1514    return 0;
1515}
1516
1517static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1518             struct iocb *iocb, bool compat)
1519{
1520    struct kiocb *req;
1521    struct file *file;
1522    ssize_t ret;
1523
1524    /* enforce forwards compatibility on users */
1525    if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2)) {
1526        pr_debug("EINVAL: io_submit: reserve field set\n");
1527        return -EINVAL;
1528    }
1529
1530    /* prevent overflows */
1531    if (unlikely(
1532        (iocb->aio_buf != (unsigned long)iocb->aio_buf) ||
1533        (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) ||
1534        ((ssize_t)iocb->aio_nbytes < 0)
1535       )) {
1536        pr_debug("EINVAL: io_submit: overflow check\n");
1537        return -EINVAL;
1538    }
1539
1540    file = fget(iocb->aio_fildes);
1541    if (unlikely(!file))
1542        return -EBADF;
1543
1544    req = aio_get_req(ctx); /* returns with 2 references to req */
1545    if (unlikely(!req)) {
1546        fput(file);
1547        return -EAGAIN;
1548    }
1549    req->ki_filp = file;
1550    if (iocb->aio_flags & IOCB_FLAG_RESFD) {
1551        /*
1552         * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
1553         * instance of the file* now. The file descriptor must be
1554         * an eventfd() fd, and will be signaled for each completed
1555         * event using the eventfd_signal() function.
1556         */
1557        req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd);
1558        if (IS_ERR(req->ki_eventfd)) {
1559            ret = PTR_ERR(req->ki_eventfd);
1560            req->ki_eventfd = NULL;
1561            goto out_put_req;
1562        }
1563    }
1564
1565    ret = put_user(req->ki_key, &user_iocb->aio_key);
1566    if (unlikely(ret)) {
1567        dprintk("EFAULT: aio_key\n");
1568        goto out_put_req;
1569    }
1570
1571    req->ki_obj.user = user_iocb;
1572    req->ki_user_data = iocb->aio_data;
1573    req->ki_pos = iocb->aio_offset;
1574
1575    req->ki_buf = (char __user *)(unsigned long)iocb->aio_buf;
1576    req->ki_left = req->ki_nbytes = iocb->aio_nbytes;
1577    req->ki_opcode = iocb->aio_lio_opcode;
1578
1579    ret = aio_setup_iocb(req, compat);
1580
1581    if (ret)
1582        goto out_put_req;
1583
1584    spin_lock_irq(&ctx->ctx_lock);
1585    /*
1586     * We could have raced with io_destroy() and are currently holding a
1587     * reference to ctx which should be destroyed. We cannot submit IO
1588     * since ctx gets freed as soon as io_submit() puts its reference. The
1589     * check here is reliable: io_destroy() sets ctx->dead before waiting
1590     * for outstanding IO and the barrier between these two is realized by
1591     * unlock of mm->ioctx_lock and lock of ctx->ctx_lock. Analogously we
1592     * increment ctx->reqs_active before checking for ctx->dead and the
1593     * barrier is realized by unlock and lock of ctx->ctx_lock. Thus if we
1594     * don't see ctx->dead set here, io_destroy() waits for our IO to
1595     * finish.
1596     */
1597    if (ctx->dead) {
1598        spin_unlock_irq(&ctx->ctx_lock);
1599        ret = -EINVAL;
1600        goto out_put_req;
1601    }
1602    aio_run_iocb(req);
1603    if (!list_empty(&ctx->run_list)) {
1604        /* drain the run list */
1605        while (__aio_run_iocbs(ctx))
1606            ;
1607    }
1608    spin_unlock_irq(&ctx->ctx_lock);
1609
1610    aio_put_req(req); /* drop extra ref to req */
1611    return 0;
1612
1613out_put_req:
1614    aio_put_req(req); /* drop extra ref to req */
1615    aio_put_req(req); /* drop i/o ref to req */
1616    return ret;
1617}
1618
1619long do_io_submit(aio_context_t ctx_id, long nr,
1620          struct iocb __user *__user *iocbpp, bool compat)
1621{
1622    struct kioctx *ctx;
1623    long ret = 0;
1624    int i;
1625    struct blk_plug plug;
1626
1627    if (unlikely(nr < 0))
1628        return -EINVAL;
1629
1630    if (unlikely(nr > LONG_MAX/sizeof(*iocbpp)))
1631        nr = LONG_MAX/sizeof(*iocbpp);
1632
1633    if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp)))))
1634        return -EFAULT;
1635
1636    ctx = lookup_ioctx(ctx_id);
1637    if (unlikely(!ctx)) {
1638        pr_debug("EINVAL: io_submit: invalid context id\n");
1639        return -EINVAL;
1640    }
1641
1642    blk_start_plug(&plug);
1643
1644    /*
1645     * AKPM: should this return a partial result if some of the IOs were
1646     * successfully submitted?
1647     */
1648    for (i=0; i<nr; i++) {
1649        struct iocb __user *user_iocb;
1650        struct iocb tmp;
1651
1652        if (unlikely(__get_user(user_iocb, iocbpp + i))) {
1653            ret = -EFAULT;
1654            break;
1655        }
1656
1657        if (unlikely(copy_from_user(&tmp, user_iocb, sizeof(tmp)))) {
1658            ret = -EFAULT;
1659            break;
1660        }
1661
1662        ret = io_submit_one(ctx, user_iocb, &tmp, compat);
1663        if (ret)
1664            break;
1665    }
1666    blk_finish_plug(&plug);
1667
1668    put_ioctx(ctx);
1669    return i ? i : ret;
1670}
1671
1672/* sys_io_submit:
1673 * Queue the nr iocbs pointed to by iocbpp for processing. Returns
1674 * the number of iocbs queued. May return -EINVAL if the aio_context
1675 * specified by ctx_id is invalid, if nr is < 0, if the iocb at
1676 * *iocbpp[0] is not properly initialized, if the operation specified
1677 * is invalid for the file descriptor in the iocb. May fail with
1678 * -EFAULT if any of the data structures point to invalid data. May
1679 * fail with -EBADF if the file descriptor specified in the first
1680 * iocb is invalid. May fail with -EAGAIN if insufficient resources
1681 * are available to queue any iocbs. Will return 0 if nr is 0. Will
1682 * fail with -ENOSYS if not implemented.
1683 */
1684SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
1685        struct iocb __user * __user *, iocbpp)
1686{
1687    return do_io_submit(ctx_id, nr, iocbpp, 0);
1688}
1689
1690/* lookup_kiocb
1691 * Finds a given iocb for cancellation.
1692 */
1693static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb,
1694                  u32 key)
1695{
1696    struct list_head *pos;
1697
1698    assert_spin_locked(&ctx->ctx_lock);
1699
1700    /* TODO: use a hash or array, this sucks. */
1701    list_for_each(pos, &ctx->active_reqs) {
1702        struct kiocb *kiocb = list_kiocb(pos);
1703        if (kiocb->ki_obj.user == iocb && kiocb->ki_key == key)
1704            return kiocb;
1705    }
1706    return NULL;
1707}
1708
1709/* sys_io_cancel:
1710 * Attempts to cancel an iocb previously passed to io_submit. If
1711 * the operation is successfully cancelled, the resulting event is
1712 * copied into the memory pointed to by result without being placed
1713 * into the completion queue and 0 is returned. May fail with
1714 * -EFAULT if any of the data structures pointed to are invalid.
1715 * May fail with -EINVAL if aio_context specified by ctx_id is
1716 * invalid. May fail with -EAGAIN if the iocb specified was not
1717 * cancelled. Will fail with -ENOSYS if not implemented.
1718 */
1719SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
1720        struct io_event __user *, result)
1721{
1722    int (*cancel)(struct kiocb *iocb, struct io_event *res);
1723    struct kioctx *ctx;
1724    struct kiocb *kiocb;
1725    u32 key;
1726    int ret;
1727
1728    ret = get_user(key, &iocb->aio_key);
1729    if (unlikely(ret))
1730        return -EFAULT;
1731
1732    ctx = lookup_ioctx(ctx_id);
1733    if (unlikely(!ctx))
1734        return -EINVAL;
1735
1736    spin_lock_irq(&ctx->ctx_lock);
1737    ret = -EAGAIN;
1738    kiocb = lookup_kiocb(ctx, iocb, key);
1739    if (kiocb && kiocb->ki_cancel) {
1740        cancel = kiocb->ki_cancel;
1741        kiocb->ki_users ++;
1742        kiocbSetCancelled(kiocb);
1743    } else
1744        cancel = NULL;
1745    spin_unlock_irq(&ctx->ctx_lock);
1746
1747    if (NULL != cancel) {
1748        struct io_event tmp;
1749        pr_debug("calling cancel\n");
1750        memset(&tmp, 0, sizeof(tmp));
1751        tmp.obj = (u64)(unsigned long)kiocb->ki_obj.user;
1752        tmp.data = kiocb->ki_user_data;
1753        ret = cancel(kiocb, &tmp);
1754        if (!ret) {
1755            /* Cancellation succeeded -- copy the result
1756             * into the user's buffer.
1757             */
1758            if (copy_to_user(result, &tmp, sizeof(tmp)))
1759                ret = -EFAULT;
1760        }
1761    } else
1762        ret = -EINVAL;
1763
1764    put_ioctx(ctx);
1765
1766    return ret;
1767}
1768
1769/* io_getevents:
1770 * Attempts to read at least min_nr events and up to nr events from
1771 * the completion queue for the aio_context specified by ctx_id. If
1772 * it succeeds, the number of read events is returned. May fail with
1773 * -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is
1774 * out of range, if timeout is out of range. May fail with -EFAULT
1775 * if any of the memory specified is invalid. May return 0 or
1776 * < min_nr if the timeout specified by timeout has elapsed
1777 * before sufficient events are available, where timeout == NULL
1778 * specifies an infinite timeout. Note that the timeout pointed to by
1779 * timeout is relative and will be updated if not NULL and the
1780 * operation blocks. Will fail with -ENOSYS if not implemented.
1781 */
1782SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
1783        long, min_nr,
1784        long, nr,
1785        struct io_event __user *, events,
1786        struct timespec __user *, timeout)
1787{
1788    struct kioctx *ioctx = lookup_ioctx(ctx_id);
1789    long ret = -EINVAL;
1790
1791    if (likely(ioctx)) {
1792        if (likely(min_nr <= nr && min_nr >= 0))
1793            ret = read_events(ioctx, min_nr, nr, events, timeout);
1794        put_ioctx(ioctx);
1795    }
1796
1797    asmlinkage_protect(5, ret, ctx_id, min_nr, nr, events, timeout);
1798    return ret;
1799}
1800

Archive Download this file



interactive