Root/
1 | /* |
2 | * linux/fs/file.c |
3 | * |
4 | * Copyright (C) 1998-1999, Stephen Tweedie and Bill Hawes |
5 | * |
6 | * Manage the dynamic fd arrays in the process files_struct. |
7 | */ |
8 | |
9 | #include <linux/module.h> |
10 | #include <linux/fs.h> |
11 | #include <linux/mm.h> |
12 | #include <linux/mmzone.h> |
13 | #include <linux/time.h> |
14 | #include <linux/sched.h> |
15 | #include <linux/slab.h> |
16 | #include <linux/vmalloc.h> |
17 | #include <linux/file.h> |
18 | #include <linux/fdtable.h> |
19 | #include <linux/bitops.h> |
20 | #include <linux/interrupt.h> |
21 | #include <linux/spinlock.h> |
22 | #include <linux/rcupdate.h> |
23 | #include <linux/workqueue.h> |
24 | |
25 | struct fdtable_defer { |
26 | spinlock_t lock; |
27 | struct work_struct wq; |
28 | struct fdtable *next; |
29 | }; |
30 | |
31 | int sysctl_nr_open __read_mostly = 1024*1024; |
32 | int sysctl_nr_open_min = BITS_PER_LONG; |
33 | int sysctl_nr_open_max = 1024 * 1024; /* raised later */ |
34 | |
35 | /* |
36 | * We use this list to defer free fdtables that have vmalloced |
37 | * sets/arrays. By keeping a per-cpu list, we avoid having to embed |
38 | * the work_struct in fdtable itself which avoids a 64 byte (i386) increase in |
39 | * this per-task structure. |
40 | */ |
41 | static DEFINE_PER_CPU(struct fdtable_defer, fdtable_defer_list); |
42 | |
43 | static void *alloc_fdmem(unsigned int size) |
44 | { |
45 | /* |
46 | * Very large allocations can stress page reclaim, so fall back to |
47 | * vmalloc() if the allocation size will be considered "large" by the VM. |
48 | */ |
49 | if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { |
50 | void *data = kmalloc(size, GFP_KERNEL|__GFP_NOWARN); |
51 | if (data != NULL) |
52 | return data; |
53 | } |
54 | return vmalloc(size); |
55 | } |
56 | |
57 | static void free_fdmem(void *ptr) |
58 | { |
59 | is_vmalloc_addr(ptr) ? vfree(ptr) : kfree(ptr); |
60 | } |
61 | |
62 | static void __free_fdtable(struct fdtable *fdt) |
63 | { |
64 | free_fdmem(fdt->fd); |
65 | free_fdmem(fdt->open_fds); |
66 | kfree(fdt); |
67 | } |
68 | |
69 | static void free_fdtable_work(struct work_struct *work) |
70 | { |
71 | struct fdtable_defer *f = |
72 | container_of(work, struct fdtable_defer, wq); |
73 | struct fdtable *fdt; |
74 | |
75 | spin_lock_bh(&f->lock); |
76 | fdt = f->next; |
77 | f->next = NULL; |
78 | spin_unlock_bh(&f->lock); |
79 | while(fdt) { |
80 | struct fdtable *next = fdt->next; |
81 | |
82 | __free_fdtable(fdt); |
83 | fdt = next; |
84 | } |
85 | } |
86 | |
87 | void free_fdtable_rcu(struct rcu_head *rcu) |
88 | { |
89 | struct fdtable *fdt = container_of(rcu, struct fdtable, rcu); |
90 | struct fdtable_defer *fddef; |
91 | |
92 | BUG_ON(!fdt); |
93 | |
94 | if (fdt->max_fds <= NR_OPEN_DEFAULT) { |
95 | /* |
96 | * This fdtable is embedded in the files structure and that |
97 | * structure itself is getting destroyed. |
98 | */ |
99 | kmem_cache_free(files_cachep, |
100 | container_of(fdt, struct files_struct, fdtab)); |
101 | return; |
102 | } |
103 | if (!is_vmalloc_addr(fdt->fd) && !is_vmalloc_addr(fdt->open_fds)) { |
104 | kfree(fdt->fd); |
105 | kfree(fdt->open_fds); |
106 | kfree(fdt); |
107 | } else { |
108 | fddef = &get_cpu_var(fdtable_defer_list); |
109 | spin_lock(&fddef->lock); |
110 | fdt->next = fddef->next; |
111 | fddef->next = fdt; |
112 | /* vmallocs are handled from the workqueue context */ |
113 | schedule_work(&fddef->wq); |
114 | spin_unlock(&fddef->lock); |
115 | put_cpu_var(fdtable_defer_list); |
116 | } |
117 | } |
118 | |
119 | /* |
120 | * Expand the fdset in the files_struct. Called with the files spinlock |
121 | * held for write. |
122 | */ |
123 | static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt) |
124 | { |
125 | unsigned int cpy, set; |
126 | |
127 | BUG_ON(nfdt->max_fds < ofdt->max_fds); |
128 | |
129 | cpy = ofdt->max_fds * sizeof(struct file *); |
130 | set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *); |
131 | memcpy(nfdt->fd, ofdt->fd, cpy); |
132 | memset((char *)(nfdt->fd) + cpy, 0, set); |
133 | |
134 | cpy = ofdt->max_fds / BITS_PER_BYTE; |
135 | set = (nfdt->max_fds - ofdt->max_fds) / BITS_PER_BYTE; |
136 | memcpy(nfdt->open_fds, ofdt->open_fds, cpy); |
137 | memset((char *)(nfdt->open_fds) + cpy, 0, set); |
138 | memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy); |
139 | memset((char *)(nfdt->close_on_exec) + cpy, 0, set); |
140 | } |
141 | |
142 | static struct fdtable * alloc_fdtable(unsigned int nr) |
143 | { |
144 | struct fdtable *fdt; |
145 | char *data; |
146 | |
147 | /* |
148 | * Figure out how many fds we actually want to support in this fdtable. |
149 | * Allocation steps are keyed to the size of the fdarray, since it |
150 | * grows far faster than any of the other dynamic data. We try to fit |
151 | * the fdarray into comfortable page-tuned chunks: starting at 1024B |
152 | * and growing in powers of two from there on. |
153 | */ |
154 | nr /= (1024 / sizeof(struct file *)); |
155 | nr = roundup_pow_of_two(nr + 1); |
156 | nr *= (1024 / sizeof(struct file *)); |
157 | /* |
158 | * Note that this can drive nr *below* what we had passed if sysctl_nr_open |
159 | * had been set lower between the check in expand_files() and here. Deal |
160 | * with that in caller, it's cheaper that way. |
161 | * |
162 | * We make sure that nr remains a multiple of BITS_PER_LONG - otherwise |
163 | * bitmaps handling below becomes unpleasant, to put it mildly... |
164 | */ |
165 | if (unlikely(nr > sysctl_nr_open)) |
166 | nr = ((sysctl_nr_open - 1) | (BITS_PER_LONG - 1)) + 1; |
167 | |
168 | fdt = kmalloc(sizeof(struct fdtable), GFP_KERNEL); |
169 | if (!fdt) |
170 | goto out; |
171 | fdt->max_fds = nr; |
172 | data = alloc_fdmem(nr * sizeof(struct file *)); |
173 | if (!data) |
174 | goto out_fdt; |
175 | fdt->fd = (struct file **)data; |
176 | data = alloc_fdmem(max_t(unsigned int, |
177 | 2 * nr / BITS_PER_BYTE, L1_CACHE_BYTES)); |
178 | if (!data) |
179 | goto out_arr; |
180 | fdt->open_fds = (fd_set *)data; |
181 | data += nr / BITS_PER_BYTE; |
182 | fdt->close_on_exec = (fd_set *)data; |
183 | fdt->next = NULL; |
184 | |
185 | return fdt; |
186 | |
187 | out_arr: |
188 | free_fdmem(fdt->fd); |
189 | out_fdt: |
190 | kfree(fdt); |
191 | out: |
192 | return NULL; |
193 | } |
194 | |
195 | /* |
196 | * Expand the file descriptor table. |
197 | * This function will allocate a new fdtable and both fd array and fdset, of |
198 | * the given size. |
199 | * Return <0 error code on error; 1 on successful completion. |
200 | * The files->file_lock should be held on entry, and will be held on exit. |
201 | */ |
202 | static int expand_fdtable(struct files_struct *files, int nr) |
203 | __releases(files->file_lock) |
204 | __acquires(files->file_lock) |
205 | { |
206 | struct fdtable *new_fdt, *cur_fdt; |
207 | |
208 | spin_unlock(&files->file_lock); |
209 | new_fdt = alloc_fdtable(nr); |
210 | spin_lock(&files->file_lock); |
211 | if (!new_fdt) |
212 | return -ENOMEM; |
213 | /* |
214 | * extremely unlikely race - sysctl_nr_open decreased between the check in |
215 | * caller and alloc_fdtable(). Cheaper to catch it here... |
216 | */ |
217 | if (unlikely(new_fdt->max_fds <= nr)) { |
218 | __free_fdtable(new_fdt); |
219 | return -EMFILE; |
220 | } |
221 | /* |
222 | * Check again since another task may have expanded the fd table while |
223 | * we dropped the lock |
224 | */ |
225 | cur_fdt = files_fdtable(files); |
226 | if (nr >= cur_fdt->max_fds) { |
227 | /* Continue as planned */ |
228 | copy_fdtable(new_fdt, cur_fdt); |
229 | rcu_assign_pointer(files->fdt, new_fdt); |
230 | if (cur_fdt->max_fds > NR_OPEN_DEFAULT) |
231 | free_fdtable(cur_fdt); |
232 | } else { |
233 | /* Somebody else expanded, so undo our attempt */ |
234 | __free_fdtable(new_fdt); |
235 | } |
236 | return 1; |
237 | } |
238 | |
239 | /* |
240 | * Expand files. |
241 | * This function will expand the file structures, if the requested size exceeds |
242 | * the current capacity and there is room for expansion. |
243 | * Return <0 error code on error; 0 when nothing done; 1 when files were |
244 | * expanded and execution may have blocked. |
245 | * The files->file_lock should be held on entry, and will be held on exit. |
246 | */ |
247 | int expand_files(struct files_struct *files, int nr) |
248 | { |
249 | struct fdtable *fdt; |
250 | |
251 | fdt = files_fdtable(files); |
252 | |
253 | /* |
254 | * N.B. For clone tasks sharing a files structure, this test |
255 | * will limit the total number of files that can be opened. |
256 | */ |
257 | if (nr >= rlimit(RLIMIT_NOFILE)) |
258 | return -EMFILE; |
259 | |
260 | /* Do we need to expand? */ |
261 | if (nr < fdt->max_fds) |
262 | return 0; |
263 | |
264 | /* Can we expand? */ |
265 | if (nr >= sysctl_nr_open) |
266 | return -EMFILE; |
267 | |
268 | /* All good, so we try */ |
269 | return expand_fdtable(files, nr); |
270 | } |
271 | |
272 | static int count_open_files(struct fdtable *fdt) |
273 | { |
274 | int size = fdt->max_fds; |
275 | int i; |
276 | |
277 | /* Find the last open fd */ |
278 | for (i = size/(8*sizeof(long)); i > 0; ) { |
279 | if (fdt->open_fds->fds_bits[--i]) |
280 | break; |
281 | } |
282 | i = (i+1) * 8 * sizeof(long); |
283 | return i; |
284 | } |
285 | |
286 | /* |
287 | * Allocate a new files structure and copy contents from the |
288 | * passed in files structure. |
289 | * errorp will be valid only when the returned files_struct is NULL. |
290 | */ |
291 | struct files_struct *dup_fd(struct files_struct *oldf, int *errorp) |
292 | { |
293 | struct files_struct *newf; |
294 | struct file **old_fds, **new_fds; |
295 | int open_files, size, i; |
296 | struct fdtable *old_fdt, *new_fdt; |
297 | |
298 | *errorp = -ENOMEM; |
299 | newf = kmem_cache_alloc(files_cachep, GFP_KERNEL); |
300 | if (!newf) |
301 | goto out; |
302 | |
303 | atomic_set(&newf->count, 1); |
304 | |
305 | spin_lock_init(&newf->file_lock); |
306 | newf->next_fd = 0; |
307 | new_fdt = &newf->fdtab; |
308 | new_fdt->max_fds = NR_OPEN_DEFAULT; |
309 | new_fdt->close_on_exec = (fd_set *)&newf->close_on_exec_init; |
310 | new_fdt->open_fds = (fd_set *)&newf->open_fds_init; |
311 | new_fdt->fd = &newf->fd_array[0]; |
312 | new_fdt->next = NULL; |
313 | |
314 | spin_lock(&oldf->file_lock); |
315 | old_fdt = files_fdtable(oldf); |
316 | open_files = count_open_files(old_fdt); |
317 | |
318 | /* |
319 | * Check whether we need to allocate a larger fd array and fd set. |
320 | */ |
321 | while (unlikely(open_files > new_fdt->max_fds)) { |
322 | spin_unlock(&oldf->file_lock); |
323 | |
324 | if (new_fdt != &newf->fdtab) |
325 | __free_fdtable(new_fdt); |
326 | |
327 | new_fdt = alloc_fdtable(open_files - 1); |
328 | if (!new_fdt) { |
329 | *errorp = -ENOMEM; |
330 | goto out_release; |
331 | } |
332 | |
333 | /* beyond sysctl_nr_open; nothing to do */ |
334 | if (unlikely(new_fdt->max_fds < open_files)) { |
335 | __free_fdtable(new_fdt); |
336 | *errorp = -EMFILE; |
337 | goto out_release; |
338 | } |
339 | |
340 | /* |
341 | * Reacquire the oldf lock and a pointer to its fd table |
342 | * who knows it may have a new bigger fd table. We need |
343 | * the latest pointer. |
344 | */ |
345 | spin_lock(&oldf->file_lock); |
346 | old_fdt = files_fdtable(oldf); |
347 | open_files = count_open_files(old_fdt); |
348 | } |
349 | |
350 | old_fds = old_fdt->fd; |
351 | new_fds = new_fdt->fd; |
352 | |
353 | memcpy(new_fdt->open_fds->fds_bits, |
354 | old_fdt->open_fds->fds_bits, open_files/8); |
355 | memcpy(new_fdt->close_on_exec->fds_bits, |
356 | old_fdt->close_on_exec->fds_bits, open_files/8); |
357 | |
358 | for (i = open_files; i != 0; i--) { |
359 | struct file *f = *old_fds++; |
360 | if (f) { |
361 | get_file(f); |
362 | } else { |
363 | /* |
364 | * The fd may be claimed in the fd bitmap but not yet |
365 | * instantiated in the files array if a sibling thread |
366 | * is partway through open(). So make sure that this |
367 | * fd is available to the new process. |
368 | */ |
369 | FD_CLR(open_files - i, new_fdt->open_fds); |
370 | } |
371 | rcu_assign_pointer(*new_fds++, f); |
372 | } |
373 | spin_unlock(&oldf->file_lock); |
374 | |
375 | /* compute the remainder to be cleared */ |
376 | size = (new_fdt->max_fds - open_files) * sizeof(struct file *); |
377 | |
378 | /* This is long word aligned thus could use a optimized version */ |
379 | memset(new_fds, 0, size); |
380 | |
381 | if (new_fdt->max_fds > open_files) { |
382 | int left = (new_fdt->max_fds-open_files)/8; |
383 | int start = open_files / (8 * sizeof(unsigned long)); |
384 | |
385 | memset(&new_fdt->open_fds->fds_bits[start], 0, left); |
386 | memset(&new_fdt->close_on_exec->fds_bits[start], 0, left); |
387 | } |
388 | |
389 | rcu_assign_pointer(newf->fdt, new_fdt); |
390 | |
391 | return newf; |
392 | |
393 | out_release: |
394 | kmem_cache_free(files_cachep, newf); |
395 | out: |
396 | return NULL; |
397 | } |
398 | |
399 | static void __devinit fdtable_defer_list_init(int cpu) |
400 | { |
401 | struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu); |
402 | spin_lock_init(&fddef->lock); |
403 | INIT_WORK(&fddef->wq, free_fdtable_work); |
404 | fddef->next = NULL; |
405 | } |
406 | |
407 | void __init files_defer_init(void) |
408 | { |
409 | int i; |
410 | for_each_possible_cpu(i) |
411 | fdtable_defer_list_init(i); |
412 | sysctl_nr_open_max = min((size_t)INT_MAX, ~(size_t)0/sizeof(void *)) & |
413 | -BITS_PER_LONG; |
414 | } |
415 | |
416 | struct files_struct init_files = { |
417 | .count = ATOMIC_INIT(1), |
418 | .fdt = &init_files.fdtab, |
419 | .fdtab = { |
420 | .max_fds = NR_OPEN_DEFAULT, |
421 | .fd = &init_files.fd_array[0], |
422 | .close_on_exec = (fd_set *)&init_files.close_on_exec_init, |
423 | .open_fds = (fd_set *)&init_files.open_fds_init, |
424 | }, |
425 | .file_lock = __SPIN_LOCK_UNLOCKED(init_task.file_lock), |
426 | }; |
427 | |
428 | /* |
429 | * allocate a file descriptor, mark it busy. |
430 | */ |
431 | int alloc_fd(unsigned start, unsigned flags) |
432 | { |
433 | struct files_struct *files = current->files; |
434 | unsigned int fd; |
435 | int error; |
436 | struct fdtable *fdt; |
437 | |
438 | spin_lock(&files->file_lock); |
439 | repeat: |
440 | fdt = files_fdtable(files); |
441 | fd = start; |
442 | if (fd < files->next_fd) |
443 | fd = files->next_fd; |
444 | |
445 | if (fd < fdt->max_fds) |
446 | fd = find_next_zero_bit(fdt->open_fds->fds_bits, |
447 | fdt->max_fds, fd); |
448 | |
449 | error = expand_files(files, fd); |
450 | if (error < 0) |
451 | goto out; |
452 | |
453 | /* |
454 | * If we needed to expand the fs array we |
455 | * might have blocked - try again. |
456 | */ |
457 | if (error) |
458 | goto repeat; |
459 | |
460 | if (start <= files->next_fd) |
461 | files->next_fd = fd + 1; |
462 | |
463 | FD_SET(fd, fdt->open_fds); |
464 | if (flags & O_CLOEXEC) |
465 | FD_SET(fd, fdt->close_on_exec); |
466 | else |
467 | FD_CLR(fd, fdt->close_on_exec); |
468 | error = fd; |
469 | #if 1 |
470 | /* Sanity check */ |
471 | if (rcu_dereference_raw(fdt->fd[fd]) != NULL) { |
472 | printk(KERN_WARNING "alloc_fd: slot %d not NULL!\n", fd); |
473 | rcu_assign_pointer(fdt->fd[fd], NULL); |
474 | } |
475 | #endif |
476 | |
477 | out: |
478 | spin_unlock(&files->file_lock); |
479 | return error; |
480 | } |
481 | |
482 | int get_unused_fd(void) |
483 | { |
484 | return alloc_fd(0, 0); |
485 | } |
486 | EXPORT_SYMBOL(get_unused_fd); |
487 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9