Root/
1 | /* |
2 | * An async IO implementation for Linux |
3 | * Written by Benjamin LaHaise <bcrl@kvack.org> |
4 | * |
5 | * Implements an efficient asynchronous io interface. |
6 | * |
7 | * Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved. |
8 | * |
9 | * See ../COPYING for licensing terms. |
10 | */ |
11 | #define pr_fmt(fmt) "%s: " fmt, __func__ |
12 | |
13 | #include <linux/kernel.h> |
14 | #include <linux/init.h> |
15 | #include <linux/errno.h> |
16 | #include <linux/time.h> |
17 | #include <linux/aio_abi.h> |
18 | #include <linux/export.h> |
19 | #include <linux/syscalls.h> |
20 | #include <linux/backing-dev.h> |
21 | #include <linux/uio.h> |
22 | |
23 | #include <linux/sched.h> |
24 | #include <linux/fs.h> |
25 | #include <linux/file.h> |
26 | #include <linux/mm.h> |
27 | #include <linux/mman.h> |
28 | #include <linux/mmu_context.h> |
29 | #include <linux/percpu.h> |
30 | #include <linux/slab.h> |
31 | #include <linux/timer.h> |
32 | #include <linux/aio.h> |
33 | #include <linux/highmem.h> |
34 | #include <linux/workqueue.h> |
35 | #include <linux/security.h> |
36 | #include <linux/eventfd.h> |
37 | #include <linux/blkdev.h> |
38 | #include <linux/compat.h> |
39 | #include <linux/migrate.h> |
40 | #include <linux/ramfs.h> |
41 | #include <linux/percpu-refcount.h> |
42 | #include <linux/mount.h> |
43 | |
44 | #include <asm/kmap_types.h> |
45 | #include <asm/uaccess.h> |
46 | |
47 | #include "internal.h" |
48 | |
49 | #define AIO_RING_MAGIC 0xa10a10a1 |
50 | #define AIO_RING_COMPAT_FEATURES 1 |
51 | #define AIO_RING_INCOMPAT_FEATURES 0 |
52 | struct aio_ring { |
53 | unsigned id; /* kernel internal index number */ |
54 | unsigned nr; /* number of io_events */ |
55 | unsigned head; |
56 | unsigned tail; |
57 | |
58 | unsigned magic; |
59 | unsigned compat_features; |
60 | unsigned incompat_features; |
61 | unsigned header_length; /* size of aio_ring */ |
62 | |
63 | |
64 | struct io_event io_events[0]; |
65 | }; /* 128 bytes + ring size */ |
66 | |
67 | #define AIO_RING_PAGES 8 |
68 | |
69 | struct kioctx_table { |
70 | struct rcu_head rcu; |
71 | unsigned nr; |
72 | struct kioctx *table[]; |
73 | }; |
74 | |
75 | struct kioctx_cpu { |
76 | unsigned reqs_available; |
77 | }; |
78 | |
79 | struct kioctx { |
80 | struct percpu_ref users; |
81 | atomic_t dead; |
82 | |
83 | struct percpu_ref reqs; |
84 | |
85 | unsigned long user_id; |
86 | |
87 | struct __percpu kioctx_cpu *cpu; |
88 | |
89 | /* |
90 | * For percpu reqs_available, number of slots we move to/from global |
91 | * counter at a time: |
92 | */ |
93 | unsigned req_batch; |
94 | /* |
95 | * This is what userspace passed to io_setup(), it's not used for |
96 | * anything but counting against the global max_reqs quota. |
97 | * |
98 | * The real limit is nr_events - 1, which will be larger (see |
99 | * aio_setup_ring()) |
100 | */ |
101 | unsigned max_reqs; |
102 | |
103 | /* Size of ringbuffer, in units of struct io_event */ |
104 | unsigned nr_events; |
105 | |
106 | unsigned long mmap_base; |
107 | unsigned long mmap_size; |
108 | |
109 | struct page **ring_pages; |
110 | long nr_pages; |
111 | |
112 | struct work_struct free_work; |
113 | |
114 | struct { |
115 | /* |
116 | * This counts the number of available slots in the ringbuffer, |
117 | * so we avoid overflowing it: it's decremented (if positive) |
118 | * when allocating a kiocb and incremented when the resulting |
119 | * io_event is pulled off the ringbuffer. |
120 | * |
121 | * We batch accesses to it with a percpu version. |
122 | */ |
123 | atomic_t reqs_available; |
124 | } ____cacheline_aligned_in_smp; |
125 | |
126 | struct { |
127 | spinlock_t ctx_lock; |
128 | struct list_head active_reqs; /* used for cancellation */ |
129 | } ____cacheline_aligned_in_smp; |
130 | |
131 | struct { |
132 | struct mutex ring_lock; |
133 | wait_queue_head_t wait; |
134 | } ____cacheline_aligned_in_smp; |
135 | |
136 | struct { |
137 | unsigned tail; |
138 | spinlock_t completion_lock; |
139 | } ____cacheline_aligned_in_smp; |
140 | |
141 | struct page *internal_pages[AIO_RING_PAGES]; |
142 | struct file *aio_ring_file; |
143 | |
144 | unsigned id; |
145 | }; |
146 | |
147 | /*------ sysctl variables----*/ |
148 | static DEFINE_SPINLOCK(aio_nr_lock); |
149 | unsigned long aio_nr; /* current system wide number of aio requests */ |
150 | unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */ |
151 | /*----end sysctl variables---*/ |
152 | |
153 | static struct kmem_cache *kiocb_cachep; |
154 | static struct kmem_cache *kioctx_cachep; |
155 | |
156 | static struct vfsmount *aio_mnt; |
157 | |
158 | static const struct file_operations aio_ring_fops; |
159 | static const struct address_space_operations aio_ctx_aops; |
160 | |
161 | static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages) |
162 | { |
163 | struct qstr this = QSTR_INIT("[aio]", 5); |
164 | struct file *file; |
165 | struct path path; |
166 | struct inode *inode = alloc_anon_inode(aio_mnt->mnt_sb); |
167 | if (IS_ERR(inode)) |
168 | return ERR_CAST(inode); |
169 | |
170 | inode->i_mapping->a_ops = &aio_ctx_aops; |
171 | inode->i_mapping->private_data = ctx; |
172 | inode->i_size = PAGE_SIZE * nr_pages; |
173 | |
174 | path.dentry = d_alloc_pseudo(aio_mnt->mnt_sb, &this); |
175 | if (!path.dentry) { |
176 | iput(inode); |
177 | return ERR_PTR(-ENOMEM); |
178 | } |
179 | path.mnt = mntget(aio_mnt); |
180 | |
181 | d_instantiate(path.dentry, inode); |
182 | file = alloc_file(&path, FMODE_READ | FMODE_WRITE, &aio_ring_fops); |
183 | if (IS_ERR(file)) { |
184 | path_put(&path); |
185 | return file; |
186 | } |
187 | |
188 | file->f_flags = O_RDWR; |
189 | file->private_data = ctx; |
190 | return file; |
191 | } |
192 | |
193 | static struct dentry *aio_mount(struct file_system_type *fs_type, |
194 | int flags, const char *dev_name, void *data) |
195 | { |
196 | static const struct dentry_operations ops = { |
197 | .d_dname = simple_dname, |
198 | }; |
199 | return mount_pseudo(fs_type, "aio:", NULL, &ops, 0xa10a10a1); |
200 | } |
201 | |
202 | /* aio_setup |
203 | * Creates the slab caches used by the aio routines, panic on |
204 | * failure as this is done early during the boot sequence. |
205 | */ |
206 | static int __init aio_setup(void) |
207 | { |
208 | static struct file_system_type aio_fs = { |
209 | .name = "aio", |
210 | .mount = aio_mount, |
211 | .kill_sb = kill_anon_super, |
212 | }; |
213 | aio_mnt = kern_mount(&aio_fs); |
214 | if (IS_ERR(aio_mnt)) |
215 | panic("Failed to create aio fs mount."); |
216 | |
217 | kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); |
218 | kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); |
219 | |
220 | pr_debug("sizeof(struct page) = %zu\n", sizeof(struct page)); |
221 | |
222 | return 0; |
223 | } |
224 | __initcall(aio_setup); |
225 | |
226 | static void put_aio_ring_file(struct kioctx *ctx) |
227 | { |
228 | struct file *aio_ring_file = ctx->aio_ring_file; |
229 | if (aio_ring_file) { |
230 | truncate_setsize(aio_ring_file->f_inode, 0); |
231 | |
232 | /* Prevent further access to the kioctx from migratepages */ |
233 | spin_lock(&aio_ring_file->f_inode->i_mapping->private_lock); |
234 | aio_ring_file->f_inode->i_mapping->private_data = NULL; |
235 | ctx->aio_ring_file = NULL; |
236 | spin_unlock(&aio_ring_file->f_inode->i_mapping->private_lock); |
237 | |
238 | fput(aio_ring_file); |
239 | } |
240 | } |
241 | |
242 | static void aio_free_ring(struct kioctx *ctx) |
243 | { |
244 | int i; |
245 | |
246 | for (i = 0; i < ctx->nr_pages; i++) { |
247 | struct page *page; |
248 | pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i, |
249 | page_count(ctx->ring_pages[i])); |
250 | page = ctx->ring_pages[i]; |
251 | if (!page) |
252 | continue; |
253 | ctx->ring_pages[i] = NULL; |
254 | put_page(page); |
255 | } |
256 | |
257 | put_aio_ring_file(ctx); |
258 | |
259 | if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) { |
260 | kfree(ctx->ring_pages); |
261 | ctx->ring_pages = NULL; |
262 | } |
263 | } |
264 | |
265 | static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma) |
266 | { |
267 | vma->vm_ops = &generic_file_vm_ops; |
268 | return 0; |
269 | } |
270 | |
271 | static const struct file_operations aio_ring_fops = { |
272 | .mmap = aio_ring_mmap, |
273 | }; |
274 | |
275 | static int aio_set_page_dirty(struct page *page) |
276 | { |
277 | return 0; |
278 | } |
279 | |
280 | #if IS_ENABLED(CONFIG_MIGRATION) |
281 | static int aio_migratepage(struct address_space *mapping, struct page *new, |
282 | struct page *old, enum migrate_mode mode) |
283 | { |
284 | struct kioctx *ctx; |
285 | unsigned long flags; |
286 | int rc; |
287 | |
288 | rc = 0; |
289 | |
290 | /* Make sure the old page hasn't already been changed */ |
291 | spin_lock(&mapping->private_lock); |
292 | ctx = mapping->private_data; |
293 | if (ctx) { |
294 | pgoff_t idx; |
295 | spin_lock_irqsave(&ctx->completion_lock, flags); |
296 | idx = old->index; |
297 | if (idx < (pgoff_t)ctx->nr_pages) { |
298 | if (ctx->ring_pages[idx] != old) |
299 | rc = -EAGAIN; |
300 | } else |
301 | rc = -EINVAL; |
302 | spin_unlock_irqrestore(&ctx->completion_lock, flags); |
303 | } else |
304 | rc = -EINVAL; |
305 | spin_unlock(&mapping->private_lock); |
306 | |
307 | if (rc != 0) |
308 | return rc; |
309 | |
310 | /* Writeback must be complete */ |
311 | BUG_ON(PageWriteback(old)); |
312 | get_page(new); |
313 | |
314 | rc = migrate_page_move_mapping(mapping, new, old, NULL, mode, 1); |
315 | if (rc != MIGRATEPAGE_SUCCESS) { |
316 | put_page(new); |
317 | return rc; |
318 | } |
319 | |
320 | /* We can potentially race against kioctx teardown here. Use the |
321 | * address_space's private data lock to protect the mapping's |
322 | * private_data. |
323 | */ |
324 | spin_lock(&mapping->private_lock); |
325 | ctx = mapping->private_data; |
326 | if (ctx) { |
327 | pgoff_t idx; |
328 | spin_lock_irqsave(&ctx->completion_lock, flags); |
329 | migrate_page_copy(new, old); |
330 | idx = old->index; |
331 | if (idx < (pgoff_t)ctx->nr_pages) { |
332 | /* And only do the move if things haven't changed */ |
333 | if (ctx->ring_pages[idx] == old) |
334 | ctx->ring_pages[idx] = new; |
335 | else |
336 | rc = -EAGAIN; |
337 | } else |
338 | rc = -EINVAL; |
339 | spin_unlock_irqrestore(&ctx->completion_lock, flags); |
340 | } else |
341 | rc = -EBUSY; |
342 | spin_unlock(&mapping->private_lock); |
343 | |
344 | if (rc == MIGRATEPAGE_SUCCESS) |
345 | put_page(old); |
346 | else |
347 | put_page(new); |
348 | |
349 | return rc; |
350 | } |
351 | #endif |
352 | |
353 | static const struct address_space_operations aio_ctx_aops = { |
354 | .set_page_dirty = aio_set_page_dirty, |
355 | #if IS_ENABLED(CONFIG_MIGRATION) |
356 | .migratepage = aio_migratepage, |
357 | #endif |
358 | }; |
359 | |
360 | static int aio_setup_ring(struct kioctx *ctx) |
361 | { |
362 | struct aio_ring *ring; |
363 | unsigned nr_events = ctx->max_reqs; |
364 | struct mm_struct *mm = current->mm; |
365 | unsigned long size, unused; |
366 | int nr_pages; |
367 | int i; |
368 | struct file *file; |
369 | |
370 | /* Compensate for the ring buffer's head/tail overlap entry */ |
371 | nr_events += 2; /* 1 is required, 2 for good luck */ |
372 | |
373 | size = sizeof(struct aio_ring); |
374 | size += sizeof(struct io_event) * nr_events; |
375 | |
376 | nr_pages = PFN_UP(size); |
377 | if (nr_pages < 0) |
378 | return -EINVAL; |
379 | |
380 | file = aio_private_file(ctx, nr_pages); |
381 | if (IS_ERR(file)) { |
382 | ctx->aio_ring_file = NULL; |
383 | return -EAGAIN; |
384 | } |
385 | |
386 | ctx->aio_ring_file = file; |
387 | nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) |
388 | / sizeof(struct io_event); |
389 | |
390 | ctx->ring_pages = ctx->internal_pages; |
391 | if (nr_pages > AIO_RING_PAGES) { |
392 | ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *), |
393 | GFP_KERNEL); |
394 | if (!ctx->ring_pages) { |
395 | put_aio_ring_file(ctx); |
396 | return -ENOMEM; |
397 | } |
398 | } |
399 | |
400 | for (i = 0; i < nr_pages; i++) { |
401 | struct page *page; |
402 | page = find_or_create_page(file->f_inode->i_mapping, |
403 | i, GFP_HIGHUSER | __GFP_ZERO); |
404 | if (!page) |
405 | break; |
406 | pr_debug("pid(%d) page[%d]->count=%d\n", |
407 | current->pid, i, page_count(page)); |
408 | SetPageUptodate(page); |
409 | SetPageDirty(page); |
410 | unlock_page(page); |
411 | |
412 | ctx->ring_pages[i] = page; |
413 | } |
414 | ctx->nr_pages = i; |
415 | |
416 | if (unlikely(i != nr_pages)) { |
417 | aio_free_ring(ctx); |
418 | return -EAGAIN; |
419 | } |
420 | |
421 | ctx->mmap_size = nr_pages * PAGE_SIZE; |
422 | pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size); |
423 | |
424 | down_write(&mm->mmap_sem); |
425 | ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size, |
426 | PROT_READ | PROT_WRITE, |
427 | MAP_SHARED, 0, &unused); |
428 | up_write(&mm->mmap_sem); |
429 | if (IS_ERR((void *)ctx->mmap_base)) { |
430 | ctx->mmap_size = 0; |
431 | aio_free_ring(ctx); |
432 | return -EAGAIN; |
433 | } |
434 | |
435 | pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base); |
436 | |
437 | ctx->user_id = ctx->mmap_base; |
438 | ctx->nr_events = nr_events; /* trusted copy */ |
439 | |
440 | ring = kmap_atomic(ctx->ring_pages[0]); |
441 | ring->nr = nr_events; /* user copy */ |
442 | ring->id = ~0U; |
443 | ring->head = ring->tail = 0; |
444 | ring->magic = AIO_RING_MAGIC; |
445 | ring->compat_features = AIO_RING_COMPAT_FEATURES; |
446 | ring->incompat_features = AIO_RING_INCOMPAT_FEATURES; |
447 | ring->header_length = sizeof(struct aio_ring); |
448 | kunmap_atomic(ring); |
449 | flush_dcache_page(ctx->ring_pages[0]); |
450 | |
451 | return 0; |
452 | } |
453 | |
454 | #define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event)) |
455 | #define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event)) |
456 | #define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE) |
457 | |
458 | void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel) |
459 | { |
460 | struct kioctx *ctx = req->ki_ctx; |
461 | unsigned long flags; |
462 | |
463 | spin_lock_irqsave(&ctx->ctx_lock, flags); |
464 | |
465 | if (!req->ki_list.next) |
466 | list_add(&req->ki_list, &ctx->active_reqs); |
467 | |
468 | req->ki_cancel = cancel; |
469 | |
470 | spin_unlock_irqrestore(&ctx->ctx_lock, flags); |
471 | } |
472 | EXPORT_SYMBOL(kiocb_set_cancel_fn); |
473 | |
474 | static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb) |
475 | { |
476 | kiocb_cancel_fn *old, *cancel; |
477 | |
478 | /* |
479 | * Don't want to set kiocb->ki_cancel = KIOCB_CANCELLED unless it |
480 | * actually has a cancel function, hence the cmpxchg() |
481 | */ |
482 | |
483 | cancel = ACCESS_ONCE(kiocb->ki_cancel); |
484 | do { |
485 | if (!cancel || cancel == KIOCB_CANCELLED) |
486 | return -EINVAL; |
487 | |
488 | old = cancel; |
489 | cancel = cmpxchg(&kiocb->ki_cancel, old, KIOCB_CANCELLED); |
490 | } while (cancel != old); |
491 | |
492 | return cancel(kiocb); |
493 | } |
494 | |
495 | static void free_ioctx(struct work_struct *work) |
496 | { |
497 | struct kioctx *ctx = container_of(work, struct kioctx, free_work); |
498 | |
499 | pr_debug("freeing %p\n", ctx); |
500 | |
501 | aio_free_ring(ctx); |
502 | free_percpu(ctx->cpu); |
503 | kmem_cache_free(kioctx_cachep, ctx); |
504 | } |
505 | |
506 | static void free_ioctx_reqs(struct percpu_ref *ref) |
507 | { |
508 | struct kioctx *ctx = container_of(ref, struct kioctx, reqs); |
509 | |
510 | INIT_WORK(&ctx->free_work, free_ioctx); |
511 | schedule_work(&ctx->free_work); |
512 | } |
513 | |
514 | /* |
515 | * When this function runs, the kioctx has been removed from the "hash table" |
516 | * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted - |
517 | * now it's safe to cancel any that need to be. |
518 | */ |
519 | static void free_ioctx_users(struct percpu_ref *ref) |
520 | { |
521 | struct kioctx *ctx = container_of(ref, struct kioctx, users); |
522 | struct kiocb *req; |
523 | |
524 | spin_lock_irq(&ctx->ctx_lock); |
525 | |
526 | while (!list_empty(&ctx->active_reqs)) { |
527 | req = list_first_entry(&ctx->active_reqs, |
528 | struct kiocb, ki_list); |
529 | |
530 | list_del_init(&req->ki_list); |
531 | kiocb_cancel(ctx, req); |
532 | } |
533 | |
534 | spin_unlock_irq(&ctx->ctx_lock); |
535 | |
536 | percpu_ref_kill(&ctx->reqs); |
537 | percpu_ref_put(&ctx->reqs); |
538 | } |
539 | |
540 | static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) |
541 | { |
542 | unsigned i, new_nr; |
543 | struct kioctx_table *table, *old; |
544 | struct aio_ring *ring; |
545 | |
546 | spin_lock(&mm->ioctx_lock); |
547 | rcu_read_lock(); |
548 | table = rcu_dereference(mm->ioctx_table); |
549 | |
550 | while (1) { |
551 | if (table) |
552 | for (i = 0; i < table->nr; i++) |
553 | if (!table->table[i]) { |
554 | ctx->id = i; |
555 | table->table[i] = ctx; |
556 | rcu_read_unlock(); |
557 | spin_unlock(&mm->ioctx_lock); |
558 | |
559 | ring = kmap_atomic(ctx->ring_pages[0]); |
560 | ring->id = ctx->id; |
561 | kunmap_atomic(ring); |
562 | return 0; |
563 | } |
564 | |
565 | new_nr = (table ? table->nr : 1) * 4; |
566 | |
567 | rcu_read_unlock(); |
568 | spin_unlock(&mm->ioctx_lock); |
569 | |
570 | table = kzalloc(sizeof(*table) + sizeof(struct kioctx *) * |
571 | new_nr, GFP_KERNEL); |
572 | if (!table) |
573 | return -ENOMEM; |
574 | |
575 | table->nr = new_nr; |
576 | |
577 | spin_lock(&mm->ioctx_lock); |
578 | rcu_read_lock(); |
579 | old = rcu_dereference(mm->ioctx_table); |
580 | |
581 | if (!old) { |
582 | rcu_assign_pointer(mm->ioctx_table, table); |
583 | } else if (table->nr > old->nr) { |
584 | memcpy(table->table, old->table, |
585 | old->nr * sizeof(struct kioctx *)); |
586 | |
587 | rcu_assign_pointer(mm->ioctx_table, table); |
588 | kfree_rcu(old, rcu); |
589 | } else { |
590 | kfree(table); |
591 | table = old; |
592 | } |
593 | } |
594 | } |
595 | |
596 | static void aio_nr_sub(unsigned nr) |
597 | { |
598 | spin_lock(&aio_nr_lock); |
599 | if (WARN_ON(aio_nr - nr > aio_nr)) |
600 | aio_nr = 0; |
601 | else |
602 | aio_nr -= nr; |
603 | spin_unlock(&aio_nr_lock); |
604 | } |
605 | |
606 | /* ioctx_alloc |
607 | * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed. |
608 | */ |
609 | static struct kioctx *ioctx_alloc(unsigned nr_events) |
610 | { |
611 | struct mm_struct *mm = current->mm; |
612 | struct kioctx *ctx; |
613 | int err = -ENOMEM; |
614 | |
615 | /* |
616 | * We keep track of the number of available ringbuffer slots, to prevent |
617 | * overflow (reqs_available), and we also use percpu counters for this. |
618 | * |
619 | * So since up to half the slots might be on other cpu's percpu counters |
620 | * and unavailable, double nr_events so userspace sees what they |
621 | * expected: additionally, we move req_batch slots to/from percpu |
622 | * counters at a time, so make sure that isn't 0: |
623 | */ |
624 | nr_events = max(nr_events, num_possible_cpus() * 4); |
625 | nr_events *= 2; |
626 | |
627 | /* Prevent overflows */ |
628 | if ((nr_events > (0x10000000U / sizeof(struct io_event))) || |
629 | (nr_events > (0x10000000U / sizeof(struct kiocb)))) { |
630 | pr_debug("ENOMEM: nr_events too high\n"); |
631 | return ERR_PTR(-EINVAL); |
632 | } |
633 | |
634 | if (!nr_events || (unsigned long)nr_events > (aio_max_nr * 2UL)) |
635 | return ERR_PTR(-EAGAIN); |
636 | |
637 | ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL); |
638 | if (!ctx) |
639 | return ERR_PTR(-ENOMEM); |
640 | |
641 | ctx->max_reqs = nr_events; |
642 | |
643 | if (percpu_ref_init(&ctx->users, free_ioctx_users)) |
644 | goto err; |
645 | |
646 | if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs)) |
647 | goto err; |
648 | |
649 | spin_lock_init(&ctx->ctx_lock); |
650 | spin_lock_init(&ctx->completion_lock); |
651 | mutex_init(&ctx->ring_lock); |
652 | init_waitqueue_head(&ctx->wait); |
653 | |
654 | INIT_LIST_HEAD(&ctx->active_reqs); |
655 | |
656 | ctx->cpu = alloc_percpu(struct kioctx_cpu); |
657 | if (!ctx->cpu) |
658 | goto err; |
659 | |
660 | if (aio_setup_ring(ctx) < 0) |
661 | goto err; |
662 | |
663 | atomic_set(&ctx->reqs_available, ctx->nr_events - 1); |
664 | ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4); |
665 | if (ctx->req_batch < 1) |
666 | ctx->req_batch = 1; |
667 | |
668 | /* limit the number of system wide aios */ |
669 | spin_lock(&aio_nr_lock); |
670 | if (aio_nr + nr_events > (aio_max_nr * 2UL) || |
671 | aio_nr + nr_events < aio_nr) { |
672 | spin_unlock(&aio_nr_lock); |
673 | err = -EAGAIN; |
674 | goto err_ctx; |
675 | } |
676 | aio_nr += ctx->max_reqs; |
677 | spin_unlock(&aio_nr_lock); |
678 | |
679 | percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */ |
680 | percpu_ref_get(&ctx->reqs); /* free_ioctx_users() will drop this */ |
681 | |
682 | err = ioctx_add_table(ctx, mm); |
683 | if (err) |
684 | goto err_cleanup; |
685 | |
686 | pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n", |
687 | ctx, ctx->user_id, mm, ctx->nr_events); |
688 | return ctx; |
689 | |
690 | err_cleanup: |
691 | aio_nr_sub(ctx->max_reqs); |
692 | err_ctx: |
693 | aio_free_ring(ctx); |
694 | err: |
695 | free_percpu(ctx->cpu); |
696 | free_percpu(ctx->reqs.pcpu_count); |
697 | free_percpu(ctx->users.pcpu_count); |
698 | kmem_cache_free(kioctx_cachep, ctx); |
699 | pr_debug("error allocating ioctx %d\n", err); |
700 | return ERR_PTR(err); |
701 | } |
702 | |
703 | /* kill_ioctx |
704 | * Cancels all outstanding aio requests on an aio context. Used |
705 | * when the processes owning a context have all exited to encourage |
706 | * the rapid destruction of the kioctx. |
707 | */ |
708 | static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx) |
709 | { |
710 | if (!atomic_xchg(&ctx->dead, 1)) { |
711 | struct kioctx_table *table; |
712 | |
713 | spin_lock(&mm->ioctx_lock); |
714 | rcu_read_lock(); |
715 | table = rcu_dereference(mm->ioctx_table); |
716 | |
717 | WARN_ON(ctx != table->table[ctx->id]); |
718 | table->table[ctx->id] = NULL; |
719 | rcu_read_unlock(); |
720 | spin_unlock(&mm->ioctx_lock); |
721 | |
722 | /* percpu_ref_kill() will do the necessary call_rcu() */ |
723 | wake_up_all(&ctx->wait); |
724 | |
725 | /* |
726 | * It'd be more correct to do this in free_ioctx(), after all |
727 | * the outstanding kiocbs have finished - but by then io_destroy |
728 | * has already returned, so io_setup() could potentially return |
729 | * -EAGAIN with no ioctxs actually in use (as far as userspace |
730 | * could tell). |
731 | */ |
732 | aio_nr_sub(ctx->max_reqs); |
733 | |
734 | if (ctx->mmap_size) |
735 | vm_munmap(ctx->mmap_base, ctx->mmap_size); |
736 | |
737 | percpu_ref_kill(&ctx->users); |
738 | } |
739 | } |
740 | |
741 | /* wait_on_sync_kiocb: |
742 | * Waits on the given sync kiocb to complete. |
743 | */ |
744 | ssize_t wait_on_sync_kiocb(struct kiocb *req) |
745 | { |
746 | while (!req->ki_ctx) { |
747 | set_current_state(TASK_UNINTERRUPTIBLE); |
748 | if (req->ki_ctx) |
749 | break; |
750 | io_schedule(); |
751 | } |
752 | __set_current_state(TASK_RUNNING); |
753 | return req->ki_user_data; |
754 | } |
755 | EXPORT_SYMBOL(wait_on_sync_kiocb); |
756 | |
757 | /* |
758 | * exit_aio: called when the last user of mm goes away. At this point, there is |
759 | * no way for any new requests to be submited or any of the io_* syscalls to be |
760 | * called on the context. |
761 | * |
762 | * There may be outstanding kiocbs, but free_ioctx() will explicitly wait on |
763 | * them. |
764 | */ |
765 | void exit_aio(struct mm_struct *mm) |
766 | { |
767 | struct kioctx_table *table; |
768 | struct kioctx *ctx; |
769 | unsigned i = 0; |
770 | |
771 | while (1) { |
772 | rcu_read_lock(); |
773 | table = rcu_dereference(mm->ioctx_table); |
774 | |
775 | do { |
776 | if (!table || i >= table->nr) { |
777 | rcu_read_unlock(); |
778 | rcu_assign_pointer(mm->ioctx_table, NULL); |
779 | if (table) |
780 | kfree(table); |
781 | return; |
782 | } |
783 | |
784 | ctx = table->table[i++]; |
785 | } while (!ctx); |
786 | |
787 | rcu_read_unlock(); |
788 | |
789 | /* |
790 | * We don't need to bother with munmap() here - |
791 | * exit_mmap(mm) is coming and it'll unmap everything. |
792 | * Since aio_free_ring() uses non-zero ->mmap_size |
793 | * as indicator that it needs to unmap the area, |
794 | * just set it to 0; aio_free_ring() is the only |
795 | * place that uses ->mmap_size, so it's safe. |
796 | */ |
797 | ctx->mmap_size = 0; |
798 | |
799 | kill_ioctx(mm, ctx); |
800 | } |
801 | } |
802 | |
803 | static void put_reqs_available(struct kioctx *ctx, unsigned nr) |
804 | { |
805 | struct kioctx_cpu *kcpu; |
806 | |
807 | preempt_disable(); |
808 | kcpu = this_cpu_ptr(ctx->cpu); |
809 | |
810 | kcpu->reqs_available += nr; |
811 | while (kcpu->reqs_available >= ctx->req_batch * 2) { |
812 | kcpu->reqs_available -= ctx->req_batch; |
813 | atomic_add(ctx->req_batch, &ctx->reqs_available); |
814 | } |
815 | |
816 | preempt_enable(); |
817 | } |
818 | |
819 | static bool get_reqs_available(struct kioctx *ctx) |
820 | { |
821 | struct kioctx_cpu *kcpu; |
822 | bool ret = false; |
823 | |
824 | preempt_disable(); |
825 | kcpu = this_cpu_ptr(ctx->cpu); |
826 | |
827 | if (!kcpu->reqs_available) { |
828 | int old, avail = atomic_read(&ctx->reqs_available); |
829 | |
830 | do { |
831 | if (avail < ctx->req_batch) |
832 | goto out; |
833 | |
834 | old = avail; |
835 | avail = atomic_cmpxchg(&ctx->reqs_available, |
836 | avail, avail - ctx->req_batch); |
837 | } while (avail != old); |
838 | |
839 | kcpu->reqs_available += ctx->req_batch; |
840 | } |
841 | |
842 | ret = true; |
843 | kcpu->reqs_available--; |
844 | out: |
845 | preempt_enable(); |
846 | return ret; |
847 | } |
848 | |
849 | /* aio_get_req |
850 | * Allocate a slot for an aio request. |
851 | * Returns NULL if no requests are free. |
852 | */ |
853 | static inline struct kiocb *aio_get_req(struct kioctx *ctx) |
854 | { |
855 | struct kiocb *req; |
856 | |
857 | if (!get_reqs_available(ctx)) |
858 | return NULL; |
859 | |
860 | req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO); |
861 | if (unlikely(!req)) |
862 | goto out_put; |
863 | |
864 | percpu_ref_get(&ctx->reqs); |
865 | |
866 | req->ki_ctx = ctx; |
867 | return req; |
868 | out_put: |
869 | put_reqs_available(ctx, 1); |
870 | return NULL; |
871 | } |
872 | |
873 | static void kiocb_free(struct kiocb *req) |
874 | { |
875 | if (req->ki_filp) |
876 | fput(req->ki_filp); |
877 | if (req->ki_eventfd != NULL) |
878 | eventfd_ctx_put(req->ki_eventfd); |
879 | kmem_cache_free(kiocb_cachep, req); |
880 | } |
881 | |
882 | static struct kioctx *lookup_ioctx(unsigned long ctx_id) |
883 | { |
884 | struct aio_ring __user *ring = (void __user *)ctx_id; |
885 | struct mm_struct *mm = current->mm; |
886 | struct kioctx *ctx, *ret = NULL; |
887 | struct kioctx_table *table; |
888 | unsigned id; |
889 | |
890 | if (get_user(id, &ring->id)) |
891 | return NULL; |
892 | |
893 | rcu_read_lock(); |
894 | table = rcu_dereference(mm->ioctx_table); |
895 | |
896 | if (!table || id >= table->nr) |
897 | goto out; |
898 | |
899 | ctx = table->table[id]; |
900 | if (ctx && ctx->user_id == ctx_id) { |
901 | percpu_ref_get(&ctx->users); |
902 | ret = ctx; |
903 | } |
904 | out: |
905 | rcu_read_unlock(); |
906 | return ret; |
907 | } |
908 | |
909 | /* aio_complete |
910 | * Called when the io request on the given iocb is complete. |
911 | */ |
912 | void aio_complete(struct kiocb *iocb, long res, long res2) |
913 | { |
914 | struct kioctx *ctx = iocb->ki_ctx; |
915 | struct aio_ring *ring; |
916 | struct io_event *ev_page, *event; |
917 | unsigned long flags; |
918 | unsigned tail, pos; |
919 | |
920 | /* |
921 | * Special case handling for sync iocbs: |
922 | * - events go directly into the iocb for fast handling |
923 | * - the sync task with the iocb in its stack holds the single iocb |
924 | * ref, no other paths have a way to get another ref |
925 | * - the sync task helpfully left a reference to itself in the iocb |
926 | */ |
927 | if (is_sync_kiocb(iocb)) { |
928 | iocb->ki_user_data = res; |
929 | smp_wmb(); |
930 | iocb->ki_ctx = ERR_PTR(-EXDEV); |
931 | wake_up_process(iocb->ki_obj.tsk); |
932 | return; |
933 | } |
934 | |
935 | if (iocb->ki_list.next) { |
936 | unsigned long flags; |
937 | |
938 | spin_lock_irqsave(&ctx->ctx_lock, flags); |
939 | list_del(&iocb->ki_list); |
940 | spin_unlock_irqrestore(&ctx->ctx_lock, flags); |
941 | } |
942 | |
943 | /* |
944 | * Add a completion event to the ring buffer. Must be done holding |
945 | * ctx->completion_lock to prevent other code from messing with the tail |
946 | * pointer since we might be called from irq context. |
947 | */ |
948 | spin_lock_irqsave(&ctx->completion_lock, flags); |
949 | |
950 | tail = ctx->tail; |
951 | pos = tail + AIO_EVENTS_OFFSET; |
952 | |
953 | if (++tail >= ctx->nr_events) |
954 | tail = 0; |
955 | |
956 | ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); |
957 | event = ev_page + pos % AIO_EVENTS_PER_PAGE; |
958 | |
959 | event->obj = (u64)(unsigned long)iocb->ki_obj.user; |
960 | event->data = iocb->ki_user_data; |
961 | event->res = res; |
962 | event->res2 = res2; |
963 | |
964 | kunmap_atomic(ev_page); |
965 | flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); |
966 | |
967 | pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n", |
968 | ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data, |
969 | res, res2); |
970 | |
971 | /* after flagging the request as done, we |
972 | * must never even look at it again |
973 | */ |
974 | smp_wmb(); /* make event visible before updating tail */ |
975 | |
976 | ctx->tail = tail; |
977 | |
978 | ring = kmap_atomic(ctx->ring_pages[0]); |
979 | ring->tail = tail; |
980 | kunmap_atomic(ring); |
981 | flush_dcache_page(ctx->ring_pages[0]); |
982 | |
983 | spin_unlock_irqrestore(&ctx->completion_lock, flags); |
984 | |
985 | pr_debug("added to ring %p at [%u]\n", iocb, tail); |
986 | |
987 | /* |
988 | * Check if the user asked us to deliver the result through an |
989 | * eventfd. The eventfd_signal() function is safe to be called |
990 | * from IRQ context. |
991 | */ |
992 | if (iocb->ki_eventfd != NULL) |
993 | eventfd_signal(iocb->ki_eventfd, 1); |
994 | |
995 | /* everything turned out well, dispose of the aiocb. */ |
996 | kiocb_free(iocb); |
997 | |
998 | /* |
999 | * We have to order our ring_info tail store above and test |
1000 | * of the wait list below outside the wait lock. This is |
1001 | * like in wake_up_bit() where clearing a bit has to be |
1002 | * ordered with the unlocked test. |
1003 | */ |
1004 | smp_mb(); |
1005 | |
1006 | if (waitqueue_active(&ctx->wait)) |
1007 | wake_up(&ctx->wait); |
1008 | |
1009 | percpu_ref_put(&ctx->reqs); |
1010 | } |
1011 | EXPORT_SYMBOL(aio_complete); |
1012 | |
1013 | /* aio_read_events |
1014 | * Pull an event off of the ioctx's event ring. Returns the number of |
1015 | * events fetched |
1016 | */ |
1017 | static long aio_read_events_ring(struct kioctx *ctx, |
1018 | struct io_event __user *event, long nr) |
1019 | { |
1020 | struct aio_ring *ring; |
1021 | unsigned head, tail, pos; |
1022 | long ret = 0; |
1023 | int copy_ret; |
1024 | |
1025 | mutex_lock(&ctx->ring_lock); |
1026 | |
1027 | ring = kmap_atomic(ctx->ring_pages[0]); |
1028 | head = ring->head; |
1029 | tail = ring->tail; |
1030 | kunmap_atomic(ring); |
1031 | |
1032 | pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events); |
1033 | |
1034 | if (head == tail) |
1035 | goto out; |
1036 | |
1037 | while (ret < nr) { |
1038 | long avail; |
1039 | struct io_event *ev; |
1040 | struct page *page; |
1041 | |
1042 | avail = (head <= tail ? tail : ctx->nr_events) - head; |
1043 | if (head == tail) |
1044 | break; |
1045 | |
1046 | avail = min(avail, nr - ret); |
1047 | avail = min_t(long, avail, AIO_EVENTS_PER_PAGE - |
1048 | ((head + AIO_EVENTS_OFFSET) % AIO_EVENTS_PER_PAGE)); |
1049 | |
1050 | pos = head + AIO_EVENTS_OFFSET; |
1051 | page = ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]; |
1052 | pos %= AIO_EVENTS_PER_PAGE; |
1053 | |
1054 | ev = kmap(page); |
1055 | copy_ret = copy_to_user(event + ret, ev + pos, |
1056 | sizeof(*ev) * avail); |
1057 | kunmap(page); |
1058 | |
1059 | if (unlikely(copy_ret)) { |
1060 | ret = -EFAULT; |
1061 | goto out; |
1062 | } |
1063 | |
1064 | ret += avail; |
1065 | head += avail; |
1066 | head %= ctx->nr_events; |
1067 | } |
1068 | |
1069 | ring = kmap_atomic(ctx->ring_pages[0]); |
1070 | ring->head = head; |
1071 | kunmap_atomic(ring); |
1072 | flush_dcache_page(ctx->ring_pages[0]); |
1073 | |
1074 | pr_debug("%li h%u t%u\n", ret, head, tail); |
1075 | |
1076 | put_reqs_available(ctx, ret); |
1077 | out: |
1078 | mutex_unlock(&ctx->ring_lock); |
1079 | |
1080 | return ret; |
1081 | } |
1082 | |
1083 | static bool aio_read_events(struct kioctx *ctx, long min_nr, long nr, |
1084 | struct io_event __user *event, long *i) |
1085 | { |
1086 | long ret = aio_read_events_ring(ctx, event + *i, nr - *i); |
1087 | |
1088 | if (ret > 0) |
1089 | *i += ret; |
1090 | |
1091 | if (unlikely(atomic_read(&ctx->dead))) |
1092 | ret = -EINVAL; |
1093 | |
1094 | if (!*i) |
1095 | *i = ret; |
1096 | |
1097 | return ret < 0 || *i >= min_nr; |
1098 | } |
1099 | |
1100 | static long read_events(struct kioctx *ctx, long min_nr, long nr, |
1101 | struct io_event __user *event, |
1102 | struct timespec __user *timeout) |
1103 | { |
1104 | ktime_t until = { .tv64 = KTIME_MAX }; |
1105 | long ret = 0; |
1106 | |
1107 | if (timeout) { |
1108 | struct timespec ts; |
1109 | |
1110 | if (unlikely(copy_from_user(&ts, timeout, sizeof(ts)))) |
1111 | return -EFAULT; |
1112 | |
1113 | until = timespec_to_ktime(ts); |
1114 | } |
1115 | |
1116 | /* |
1117 | * Note that aio_read_events() is being called as the conditional - i.e. |
1118 | * we're calling it after prepare_to_wait() has set task state to |
1119 | * TASK_INTERRUPTIBLE. |
1120 | * |
1121 | * But aio_read_events() can block, and if it blocks it's going to flip |
1122 | * the task state back to TASK_RUNNING. |
1123 | * |
1124 | * This should be ok, provided it doesn't flip the state back to |
1125 | * TASK_RUNNING and return 0 too much - that causes us to spin. That |
1126 | * will only happen if the mutex_lock() call blocks, and we then find |
1127 | * the ringbuffer empty. So in practice we should be ok, but it's |
1128 | * something to be aware of when touching this code. |
1129 | */ |
1130 | wait_event_interruptible_hrtimeout(ctx->wait, |
1131 | aio_read_events(ctx, min_nr, nr, event, &ret), until); |
1132 | |
1133 | if (!ret && signal_pending(current)) |
1134 | ret = -EINTR; |
1135 | |
1136 | return ret; |
1137 | } |
1138 | |
1139 | /* sys_io_setup: |
1140 | * Create an aio_context capable of receiving at least nr_events. |
1141 | * ctxp must not point to an aio_context that already exists, and |
1142 | * must be initialized to 0 prior to the call. On successful |
1143 | * creation of the aio_context, *ctxp is filled in with the resulting |
1144 | * handle. May fail with -EINVAL if *ctxp is not initialized, |
1145 | * if the specified nr_events exceeds internal limits. May fail |
1146 | * with -EAGAIN if the specified nr_events exceeds the user's limit |
1147 | * of available events. May fail with -ENOMEM if insufficient kernel |
1148 | * resources are available. May fail with -EFAULT if an invalid |
1149 | * pointer is passed for ctxp. Will fail with -ENOSYS if not |
1150 | * implemented. |
1151 | */ |
1152 | SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp) |
1153 | { |
1154 | struct kioctx *ioctx = NULL; |
1155 | unsigned long ctx; |
1156 | long ret; |
1157 | |
1158 | ret = get_user(ctx, ctxp); |
1159 | if (unlikely(ret)) |
1160 | goto out; |
1161 | |
1162 | ret = -EINVAL; |
1163 | if (unlikely(ctx || nr_events == 0)) { |
1164 | pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n", |
1165 | ctx, nr_events); |
1166 | goto out; |
1167 | } |
1168 | |
1169 | ioctx = ioctx_alloc(nr_events); |
1170 | ret = PTR_ERR(ioctx); |
1171 | if (!IS_ERR(ioctx)) { |
1172 | ret = put_user(ioctx->user_id, ctxp); |
1173 | if (ret) |
1174 | kill_ioctx(current->mm, ioctx); |
1175 | percpu_ref_put(&ioctx->users); |
1176 | } |
1177 | |
1178 | out: |
1179 | return ret; |
1180 | } |
1181 | |
1182 | /* sys_io_destroy: |
1183 | * Destroy the aio_context specified. May cancel any outstanding |
1184 | * AIOs and block on completion. Will fail with -ENOSYS if not |
1185 | * implemented. May fail with -EINVAL if the context pointed to |
1186 | * is invalid. |
1187 | */ |
1188 | SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx) |
1189 | { |
1190 | struct kioctx *ioctx = lookup_ioctx(ctx); |
1191 | if (likely(NULL != ioctx)) { |
1192 | kill_ioctx(current->mm, ioctx); |
1193 | percpu_ref_put(&ioctx->users); |
1194 | return 0; |
1195 | } |
1196 | pr_debug("EINVAL: io_destroy: invalid context id\n"); |
1197 | return -EINVAL; |
1198 | } |
1199 | |
1200 | typedef ssize_t (aio_rw_op)(struct kiocb *, const struct iovec *, |
1201 | unsigned long, loff_t); |
1202 | |
1203 | static ssize_t aio_setup_vectored_rw(struct kiocb *kiocb, |
1204 | int rw, char __user *buf, |
1205 | unsigned long *nr_segs, |
1206 | struct iovec **iovec, |
1207 | bool compat) |
1208 | { |
1209 | ssize_t ret; |
1210 | |
1211 | *nr_segs = kiocb->ki_nbytes; |
1212 | |
1213 | #ifdef CONFIG_COMPAT |
1214 | if (compat) |
1215 | ret = compat_rw_copy_check_uvector(rw, |
1216 | (struct compat_iovec __user *)buf, |
1217 | *nr_segs, 1, *iovec, iovec); |
1218 | else |
1219 | #endif |
1220 | ret = rw_copy_check_uvector(rw, |
1221 | (struct iovec __user *)buf, |
1222 | *nr_segs, 1, *iovec, iovec); |
1223 | if (ret < 0) |
1224 | return ret; |
1225 | |
1226 | /* ki_nbytes now reflect bytes instead of segs */ |
1227 | kiocb->ki_nbytes = ret; |
1228 | return 0; |
1229 | } |
1230 | |
1231 | static ssize_t aio_setup_single_vector(struct kiocb *kiocb, |
1232 | int rw, char __user *buf, |
1233 | unsigned long *nr_segs, |
1234 | struct iovec *iovec) |
1235 | { |
1236 | if (unlikely(!access_ok(!rw, buf, kiocb->ki_nbytes))) |
1237 | return -EFAULT; |
1238 | |
1239 | iovec->iov_base = buf; |
1240 | iovec->iov_len = kiocb->ki_nbytes; |
1241 | *nr_segs = 1; |
1242 | return 0; |
1243 | } |
1244 | |
1245 | /* |
1246 | * aio_setup_iocb: |
1247 | * Performs the initial checks and aio retry method |
1248 | * setup for the kiocb at the time of io submission. |
1249 | */ |
1250 | static ssize_t aio_run_iocb(struct kiocb *req, unsigned opcode, |
1251 | char __user *buf, bool compat) |
1252 | { |
1253 | struct file *file = req->ki_filp; |
1254 | ssize_t ret; |
1255 | unsigned long nr_segs; |
1256 | int rw; |
1257 | fmode_t mode; |
1258 | aio_rw_op *rw_op; |
1259 | struct iovec inline_vec, *iovec = &inline_vec; |
1260 | |
1261 | switch (opcode) { |
1262 | case IOCB_CMD_PREAD: |
1263 | case IOCB_CMD_PREADV: |
1264 | mode = FMODE_READ; |
1265 | rw = READ; |
1266 | rw_op = file->f_op->aio_read; |
1267 | goto rw_common; |
1268 | |
1269 | case IOCB_CMD_PWRITE: |
1270 | case IOCB_CMD_PWRITEV: |
1271 | mode = FMODE_WRITE; |
1272 | rw = WRITE; |
1273 | rw_op = file->f_op->aio_write; |
1274 | goto rw_common; |
1275 | rw_common: |
1276 | if (unlikely(!(file->f_mode & mode))) |
1277 | return -EBADF; |
1278 | |
1279 | if (!rw_op) |
1280 | return -EINVAL; |
1281 | |
1282 | ret = (opcode == IOCB_CMD_PREADV || |
1283 | opcode == IOCB_CMD_PWRITEV) |
1284 | ? aio_setup_vectored_rw(req, rw, buf, &nr_segs, |
1285 | &iovec, compat) |
1286 | : aio_setup_single_vector(req, rw, buf, &nr_segs, |
1287 | iovec); |
1288 | if (ret) |
1289 | return ret; |
1290 | |
1291 | ret = rw_verify_area(rw, file, &req->ki_pos, req->ki_nbytes); |
1292 | if (ret < 0) { |
1293 | if (iovec != &inline_vec) |
1294 | kfree(iovec); |
1295 | return ret; |
1296 | } |
1297 | |
1298 | req->ki_nbytes = ret; |
1299 | |
1300 | /* XXX: move/kill - rw_verify_area()? */ |
1301 | /* This matches the pread()/pwrite() logic */ |
1302 | if (req->ki_pos < 0) { |
1303 | ret = -EINVAL; |
1304 | break; |
1305 | } |
1306 | |
1307 | if (rw == WRITE) |
1308 | file_start_write(file); |
1309 | |
1310 | ret = rw_op(req, iovec, nr_segs, req->ki_pos); |
1311 | |
1312 | if (rw == WRITE) |
1313 | file_end_write(file); |
1314 | break; |
1315 | |
1316 | case IOCB_CMD_FDSYNC: |
1317 | if (!file->f_op->aio_fsync) |
1318 | return -EINVAL; |
1319 | |
1320 | ret = file->f_op->aio_fsync(req, 1); |
1321 | break; |
1322 | |
1323 | case IOCB_CMD_FSYNC: |
1324 | if (!file->f_op->aio_fsync) |
1325 | return -EINVAL; |
1326 | |
1327 | ret = file->f_op->aio_fsync(req, 0); |
1328 | break; |
1329 | |
1330 | default: |
1331 | pr_debug("EINVAL: no operation provided\n"); |
1332 | return -EINVAL; |
1333 | } |
1334 | |
1335 | if (iovec != &inline_vec) |
1336 | kfree(iovec); |
1337 | |
1338 | if (ret != -EIOCBQUEUED) { |
1339 | /* |
1340 | * There's no easy way to restart the syscall since other AIO's |
1341 | * may be already running. Just fail this IO with EINTR. |
1342 | */ |
1343 | if (unlikely(ret == -ERESTARTSYS || ret == -ERESTARTNOINTR || |
1344 | ret == -ERESTARTNOHAND || |
1345 | ret == -ERESTART_RESTARTBLOCK)) |
1346 | ret = -EINTR; |
1347 | aio_complete(req, ret, 0); |
1348 | } |
1349 | |
1350 | return 0; |
1351 | } |
1352 | |
1353 | static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, |
1354 | struct iocb *iocb, bool compat) |
1355 | { |
1356 | struct kiocb *req; |
1357 | ssize_t ret; |
1358 | |
1359 | /* enforce forwards compatibility on users */ |
1360 | if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2)) { |
1361 | pr_debug("EINVAL: reserve field set\n"); |
1362 | return -EINVAL; |
1363 | } |
1364 | |
1365 | /* prevent overflows */ |
1366 | if (unlikely( |
1367 | (iocb->aio_buf != (unsigned long)iocb->aio_buf) || |
1368 | (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) || |
1369 | ((ssize_t)iocb->aio_nbytes < 0) |
1370 | )) { |
1371 | pr_debug("EINVAL: io_submit: overflow check\n"); |
1372 | return -EINVAL; |
1373 | } |
1374 | |
1375 | req = aio_get_req(ctx); |
1376 | if (unlikely(!req)) |
1377 | return -EAGAIN; |
1378 | |
1379 | req->ki_filp = fget(iocb->aio_fildes); |
1380 | if (unlikely(!req->ki_filp)) { |
1381 | ret = -EBADF; |
1382 | goto out_put_req; |
1383 | } |
1384 | |
1385 | if (iocb->aio_flags & IOCB_FLAG_RESFD) { |
1386 | /* |
1387 | * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an |
1388 | * instance of the file* now. The file descriptor must be |
1389 | * an eventfd() fd, and will be signaled for each completed |
1390 | * event using the eventfd_signal() function. |
1391 | */ |
1392 | req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd); |
1393 | if (IS_ERR(req->ki_eventfd)) { |
1394 | ret = PTR_ERR(req->ki_eventfd); |
1395 | req->ki_eventfd = NULL; |
1396 | goto out_put_req; |
1397 | } |
1398 | } |
1399 | |
1400 | ret = put_user(KIOCB_KEY, &user_iocb->aio_key); |
1401 | if (unlikely(ret)) { |
1402 | pr_debug("EFAULT: aio_key\n"); |
1403 | goto out_put_req; |
1404 | } |
1405 | |
1406 | req->ki_obj.user = user_iocb; |
1407 | req->ki_user_data = iocb->aio_data; |
1408 | req->ki_pos = iocb->aio_offset; |
1409 | req->ki_nbytes = iocb->aio_nbytes; |
1410 | |
1411 | ret = aio_run_iocb(req, iocb->aio_lio_opcode, |
1412 | (char __user *)(unsigned long)iocb->aio_buf, |
1413 | compat); |
1414 | if (ret) |
1415 | goto out_put_req; |
1416 | |
1417 | return 0; |
1418 | out_put_req: |
1419 | put_reqs_available(ctx, 1); |
1420 | percpu_ref_put(&ctx->reqs); |
1421 | kiocb_free(req); |
1422 | return ret; |
1423 | } |
1424 | |
1425 | long do_io_submit(aio_context_t ctx_id, long nr, |
1426 | struct iocb __user *__user *iocbpp, bool compat) |
1427 | { |
1428 | struct kioctx *ctx; |
1429 | long ret = 0; |
1430 | int i = 0; |
1431 | struct blk_plug plug; |
1432 | |
1433 | if (unlikely(nr < 0)) |
1434 | return -EINVAL; |
1435 | |
1436 | if (unlikely(nr > LONG_MAX/sizeof(*iocbpp))) |
1437 | nr = LONG_MAX/sizeof(*iocbpp); |
1438 | |
1439 | if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp))))) |
1440 | return -EFAULT; |
1441 | |
1442 | ctx = lookup_ioctx(ctx_id); |
1443 | if (unlikely(!ctx)) { |
1444 | pr_debug("EINVAL: invalid context id\n"); |
1445 | return -EINVAL; |
1446 | } |
1447 | |
1448 | blk_start_plug(&plug); |
1449 | |
1450 | /* |
1451 | * AKPM: should this return a partial result if some of the IOs were |
1452 | * successfully submitted? |
1453 | */ |
1454 | for (i=0; i<nr; i++) { |
1455 | struct iocb __user *user_iocb; |
1456 | struct iocb tmp; |
1457 | |
1458 | if (unlikely(__get_user(user_iocb, iocbpp + i))) { |
1459 | ret = -EFAULT; |
1460 | break; |
1461 | } |
1462 | |
1463 | if (unlikely(copy_from_user(&tmp, user_iocb, sizeof(tmp)))) { |
1464 | ret = -EFAULT; |
1465 | break; |
1466 | } |
1467 | |
1468 | ret = io_submit_one(ctx, user_iocb, &tmp, compat); |
1469 | if (ret) |
1470 | break; |
1471 | } |
1472 | blk_finish_plug(&plug); |
1473 | |
1474 | percpu_ref_put(&ctx->users); |
1475 | return i ? i : ret; |
1476 | } |
1477 | |
1478 | /* sys_io_submit: |
1479 | * Queue the nr iocbs pointed to by iocbpp for processing. Returns |
1480 | * the number of iocbs queued. May return -EINVAL if the aio_context |
1481 | * specified by ctx_id is invalid, if nr is < 0, if the iocb at |
1482 | * *iocbpp[0] is not properly initialized, if the operation specified |
1483 | * is invalid for the file descriptor in the iocb. May fail with |
1484 | * -EFAULT if any of the data structures point to invalid data. May |
1485 | * fail with -EBADF if the file descriptor specified in the first |
1486 | * iocb is invalid. May fail with -EAGAIN if insufficient resources |
1487 | * are available to queue any iocbs. Will return 0 if nr is 0. Will |
1488 | * fail with -ENOSYS if not implemented. |
1489 | */ |
1490 | SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr, |
1491 | struct iocb __user * __user *, iocbpp) |
1492 | { |
1493 | return do_io_submit(ctx_id, nr, iocbpp, 0); |
1494 | } |
1495 | |
1496 | /* lookup_kiocb |
1497 | * Finds a given iocb for cancellation. |
1498 | */ |
1499 | static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb, |
1500 | u32 key) |
1501 | { |
1502 | struct list_head *pos; |
1503 | |
1504 | assert_spin_locked(&ctx->ctx_lock); |
1505 | |
1506 | if (key != KIOCB_KEY) |
1507 | return NULL; |
1508 | |
1509 | /* TODO: use a hash or array, this sucks. */ |
1510 | list_for_each(pos, &ctx->active_reqs) { |
1511 | struct kiocb *kiocb = list_kiocb(pos); |
1512 | if (kiocb->ki_obj.user == iocb) |
1513 | return kiocb; |
1514 | } |
1515 | return NULL; |
1516 | } |
1517 | |
1518 | /* sys_io_cancel: |
1519 | * Attempts to cancel an iocb previously passed to io_submit. If |
1520 | * the operation is successfully cancelled, the resulting event is |
1521 | * copied into the memory pointed to by result without being placed |
1522 | * into the completion queue and 0 is returned. May fail with |
1523 | * -EFAULT if any of the data structures pointed to are invalid. |
1524 | * May fail with -EINVAL if aio_context specified by ctx_id is |
1525 | * invalid. May fail with -EAGAIN if the iocb specified was not |
1526 | * cancelled. Will fail with -ENOSYS if not implemented. |
1527 | */ |
1528 | SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, |
1529 | struct io_event __user *, result) |
1530 | { |
1531 | struct kioctx *ctx; |
1532 | struct kiocb *kiocb; |
1533 | u32 key; |
1534 | int ret; |
1535 | |
1536 | ret = get_user(key, &iocb->aio_key); |
1537 | if (unlikely(ret)) |
1538 | return -EFAULT; |
1539 | |
1540 | ctx = lookup_ioctx(ctx_id); |
1541 | if (unlikely(!ctx)) |
1542 | return -EINVAL; |
1543 | |
1544 | spin_lock_irq(&ctx->ctx_lock); |
1545 | |
1546 | kiocb = lookup_kiocb(ctx, iocb, key); |
1547 | if (kiocb) |
1548 | ret = kiocb_cancel(ctx, kiocb); |
1549 | else |
1550 | ret = -EINVAL; |
1551 | |
1552 | spin_unlock_irq(&ctx->ctx_lock); |
1553 | |
1554 | if (!ret) { |
1555 | /* |
1556 | * The result argument is no longer used - the io_event is |
1557 | * always delivered via the ring buffer. -EINPROGRESS indicates |
1558 | * cancellation is progress: |
1559 | */ |
1560 | ret = -EINPROGRESS; |
1561 | } |
1562 | |
1563 | percpu_ref_put(&ctx->users); |
1564 | |
1565 | return ret; |
1566 | } |
1567 | |
1568 | /* io_getevents: |
1569 | * Attempts to read at least min_nr events and up to nr events from |
1570 | * the completion queue for the aio_context specified by ctx_id. If |
1571 | * it succeeds, the number of read events is returned. May fail with |
1572 | * -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is |
1573 | * out of range, if timeout is out of range. May fail with -EFAULT |
1574 | * if any of the memory specified is invalid. May return 0 or |
1575 | * < min_nr if the timeout specified by timeout has elapsed |
1576 | * before sufficient events are available, where timeout == NULL |
1577 | * specifies an infinite timeout. Note that the timeout pointed to by |
1578 | * timeout is relative. Will fail with -ENOSYS if not implemented. |
1579 | */ |
1580 | SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id, |
1581 | long, min_nr, |
1582 | long, nr, |
1583 | struct io_event __user *, events, |
1584 | struct timespec __user *, timeout) |
1585 | { |
1586 | struct kioctx *ioctx = lookup_ioctx(ctx_id); |
1587 | long ret = -EINVAL; |
1588 | |
1589 | if (likely(ioctx)) { |
1590 | if (likely(min_nr <= nr && min_nr >= 0)) |
1591 | ret = read_events(ioctx, min_nr, nr, events, timeout); |
1592 | percpu_ref_put(&ioctx->users); |
1593 | } |
1594 | return ret; |
1595 | } |
1596 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9