Root/
Source at commit b05a5adf03613de371c77c3235f7d970d7cd0c71 created 13 years 1 month ago. By Lars-Peter Clausen, NAND: Optimize reading the eec data for the JZ4740 (evil hack) | |
---|---|
1 | /* |
2 | * linux/fs/file_table.c |
3 | * |
4 | * Copyright (C) 1991, 1992 Linus Torvalds |
5 | * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) |
6 | */ |
7 | |
8 | #include <linux/string.h> |
9 | #include <linux/slab.h> |
10 | #include <linux/file.h> |
11 | #include <linux/fdtable.h> |
12 | #include <linux/init.h> |
13 | #include <linux/module.h> |
14 | #include <linux/fs.h> |
15 | #include <linux/security.h> |
16 | #include <linux/eventpoll.h> |
17 | #include <linux/rcupdate.h> |
18 | #include <linux/mount.h> |
19 | #include <linux/capability.h> |
20 | #include <linux/cdev.h> |
21 | #include <linux/fsnotify.h> |
22 | #include <linux/sysctl.h> |
23 | #include <linux/lglock.h> |
24 | #include <linux/percpu_counter.h> |
25 | #include <linux/percpu.h> |
26 | #include <linux/ima.h> |
27 | |
28 | #include <asm/atomic.h> |
29 | |
30 | #include "internal.h" |
31 | |
32 | /* sysctl tunables... */ |
33 | struct files_stat_struct files_stat = { |
34 | .max_files = NR_FILE |
35 | }; |
36 | |
37 | DECLARE_LGLOCK(files_lglock); |
38 | DEFINE_LGLOCK(files_lglock); |
39 | |
40 | /* SLAB cache for file structures */ |
41 | static struct kmem_cache *filp_cachep __read_mostly; |
42 | |
43 | static struct percpu_counter nr_files __cacheline_aligned_in_smp; |
44 | |
45 | static inline void file_free_rcu(struct rcu_head *head) |
46 | { |
47 | struct file *f = container_of(head, struct file, f_u.fu_rcuhead); |
48 | |
49 | put_cred(f->f_cred); |
50 | kmem_cache_free(filp_cachep, f); |
51 | } |
52 | |
53 | static inline void file_free(struct file *f) |
54 | { |
55 | percpu_counter_dec(&nr_files); |
56 | file_check_state(f); |
57 | call_rcu(&f->f_u.fu_rcuhead, file_free_rcu); |
58 | } |
59 | |
60 | /* |
61 | * Return the total number of open files in the system |
62 | */ |
63 | static long get_nr_files(void) |
64 | { |
65 | return percpu_counter_read_positive(&nr_files); |
66 | } |
67 | |
68 | /* |
69 | * Return the maximum number of open files in the system |
70 | */ |
71 | unsigned long get_max_files(void) |
72 | { |
73 | return files_stat.max_files; |
74 | } |
75 | EXPORT_SYMBOL_GPL(get_max_files); |
76 | |
77 | /* |
78 | * Handle nr_files sysctl |
79 | */ |
80 | #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) |
81 | int proc_nr_files(ctl_table *table, int write, |
82 | void __user *buffer, size_t *lenp, loff_t *ppos) |
83 | { |
84 | files_stat.nr_files = get_nr_files(); |
85 | return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); |
86 | } |
87 | #else |
88 | int proc_nr_files(ctl_table *table, int write, |
89 | void __user *buffer, size_t *lenp, loff_t *ppos) |
90 | { |
91 | return -ENOSYS; |
92 | } |
93 | #endif |
94 | |
95 | /* Find an unused file structure and return a pointer to it. |
96 | * Returns NULL, if there are no more free file structures or |
97 | * we run out of memory. |
98 | * |
99 | * Be very careful using this. You are responsible for |
100 | * getting write access to any mount that you might assign |
101 | * to this filp, if it is opened for write. If this is not |
102 | * done, you will imbalance int the mount's writer count |
103 | * and a warning at __fput() time. |
104 | */ |
105 | struct file *get_empty_filp(void) |
106 | { |
107 | const struct cred *cred = current_cred(); |
108 | static long old_max; |
109 | struct file * f; |
110 | |
111 | /* |
112 | * Privileged users can go above max_files |
113 | */ |
114 | if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) { |
115 | /* |
116 | * percpu_counters are inaccurate. Do an expensive check before |
117 | * we go and fail. |
118 | */ |
119 | if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files) |
120 | goto over; |
121 | } |
122 | |
123 | f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL); |
124 | if (f == NULL) |
125 | goto fail; |
126 | |
127 | percpu_counter_inc(&nr_files); |
128 | f->f_cred = get_cred(cred); |
129 | if (security_file_alloc(f)) |
130 | goto fail_sec; |
131 | |
132 | INIT_LIST_HEAD(&f->f_u.fu_list); |
133 | atomic_long_set(&f->f_count, 1); |
134 | rwlock_init(&f->f_owner.lock); |
135 | spin_lock_init(&f->f_lock); |
136 | eventpoll_init_file(f); |
137 | /* f->f_version: 0 */ |
138 | return f; |
139 | |
140 | over: |
141 | /* Ran out of filps - report that */ |
142 | if (get_nr_files() > old_max) { |
143 | pr_info("VFS: file-max limit %lu reached\n", get_max_files()); |
144 | old_max = get_nr_files(); |
145 | } |
146 | goto fail; |
147 | |
148 | fail_sec: |
149 | file_free(f); |
150 | fail: |
151 | return NULL; |
152 | } |
153 | |
154 | /** |
155 | * alloc_file - allocate and initialize a 'struct file' |
156 | * @mnt: the vfsmount on which the file will reside |
157 | * @dentry: the dentry representing the new file |
158 | * @mode: the mode with which the new file will be opened |
159 | * @fop: the 'struct file_operations' for the new file |
160 | * |
161 | * Use this instead of get_empty_filp() to get a new |
162 | * 'struct file'. Do so because of the same initialization |
163 | * pitfalls reasons listed for init_file(). This is a |
164 | * preferred interface to using init_file(). |
165 | * |
166 | * If all the callers of init_file() are eliminated, its |
167 | * code should be moved into this function. |
168 | */ |
169 | struct file *alloc_file(struct path *path, fmode_t mode, |
170 | const struct file_operations *fop) |
171 | { |
172 | struct file *file; |
173 | |
174 | file = get_empty_filp(); |
175 | if (!file) |
176 | return NULL; |
177 | |
178 | file->f_path = *path; |
179 | file->f_mapping = path->dentry->d_inode->i_mapping; |
180 | file->f_mode = mode; |
181 | file->f_op = fop; |
182 | |
183 | /* |
184 | * These mounts don't really matter in practice |
185 | * for r/o bind mounts. They aren't userspace- |
186 | * visible. We do this for consistency, and so |
187 | * that we can do debugging checks at __fput() |
188 | */ |
189 | if ((mode & FMODE_WRITE) && !special_file(path->dentry->d_inode->i_mode)) { |
190 | file_take_write(file); |
191 | WARN_ON(mnt_clone_write(path->mnt)); |
192 | } |
193 | if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) |
194 | i_readcount_inc(path->dentry->d_inode); |
195 | return file; |
196 | } |
197 | EXPORT_SYMBOL(alloc_file); |
198 | |
199 | /** |
200 | * drop_file_write_access - give up ability to write to a file |
201 | * @file: the file to which we will stop writing |
202 | * |
203 | * This is a central place which will give up the ability |
204 | * to write to @file, along with access to write through |
205 | * its vfsmount. |
206 | */ |
207 | void drop_file_write_access(struct file *file) |
208 | { |
209 | struct vfsmount *mnt = file->f_path.mnt; |
210 | struct dentry *dentry = file->f_path.dentry; |
211 | struct inode *inode = dentry->d_inode; |
212 | |
213 | put_write_access(inode); |
214 | |
215 | if (special_file(inode->i_mode)) |
216 | return; |
217 | if (file_check_writeable(file) != 0) |
218 | return; |
219 | mnt_drop_write(mnt); |
220 | file_release_write(file); |
221 | } |
222 | EXPORT_SYMBOL_GPL(drop_file_write_access); |
223 | |
224 | /* the real guts of fput() - releasing the last reference to file |
225 | */ |
226 | static void __fput(struct file *file) |
227 | { |
228 | struct dentry *dentry = file->f_path.dentry; |
229 | struct vfsmount *mnt = file->f_path.mnt; |
230 | struct inode *inode = dentry->d_inode; |
231 | |
232 | might_sleep(); |
233 | |
234 | fsnotify_close(file); |
235 | /* |
236 | * The function eventpoll_release() should be the first called |
237 | * in the file cleanup chain. |
238 | */ |
239 | eventpoll_release(file); |
240 | locks_remove_flock(file); |
241 | |
242 | if (unlikely(file->f_flags & FASYNC)) { |
243 | if (file->f_op && file->f_op->fasync) |
244 | file->f_op->fasync(-1, file, 0); |
245 | } |
246 | if (file->f_op && file->f_op->release) |
247 | file->f_op->release(inode, file); |
248 | security_file_free(file); |
249 | ima_file_free(file); |
250 | if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL && |
251 | !(file->f_mode & FMODE_PATH))) { |
252 | cdev_put(inode->i_cdev); |
253 | } |
254 | fops_put(file->f_op); |
255 | put_pid(file->f_owner.pid); |
256 | file_sb_list_del(file); |
257 | if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) |
258 | i_readcount_dec(inode); |
259 | if (file->f_mode & FMODE_WRITE) |
260 | drop_file_write_access(file); |
261 | file->f_path.dentry = NULL; |
262 | file->f_path.mnt = NULL; |
263 | file_free(file); |
264 | dput(dentry); |
265 | mntput(mnt); |
266 | } |
267 | |
268 | void fput(struct file *file) |
269 | { |
270 | if (atomic_long_dec_and_test(&file->f_count)) |
271 | __fput(file); |
272 | } |
273 | |
274 | EXPORT_SYMBOL(fput); |
275 | |
276 | struct file *fget(unsigned int fd) |
277 | { |
278 | struct file *file; |
279 | struct files_struct *files = current->files; |
280 | |
281 | rcu_read_lock(); |
282 | file = fcheck_files(files, fd); |
283 | if (file) { |
284 | /* File object ref couldn't be taken */ |
285 | if (file->f_mode & FMODE_PATH || |
286 | !atomic_long_inc_not_zero(&file->f_count)) |
287 | file = NULL; |
288 | } |
289 | rcu_read_unlock(); |
290 | |
291 | return file; |
292 | } |
293 | |
294 | EXPORT_SYMBOL(fget); |
295 | |
296 | struct file *fget_raw(unsigned int fd) |
297 | { |
298 | struct file *file; |
299 | struct files_struct *files = current->files; |
300 | |
301 | rcu_read_lock(); |
302 | file = fcheck_files(files, fd); |
303 | if (file) { |
304 | /* File object ref couldn't be taken */ |
305 | if (!atomic_long_inc_not_zero(&file->f_count)) |
306 | file = NULL; |
307 | } |
308 | rcu_read_unlock(); |
309 | |
310 | return file; |
311 | } |
312 | |
313 | EXPORT_SYMBOL(fget_raw); |
314 | |
315 | /* |
316 | * Lightweight file lookup - no refcnt increment if fd table isn't shared. |
317 | * |
318 | * You can use this instead of fget if you satisfy all of the following |
319 | * conditions: |
320 | * 1) You must call fput_light before exiting the syscall and returning control |
321 | * to userspace (i.e. you cannot remember the returned struct file * after |
322 | * returning to userspace). |
323 | * 2) You must not call filp_close on the returned struct file * in between |
324 | * calls to fget_light and fput_light. |
325 | * 3) You must not clone the current task in between the calls to fget_light |
326 | * and fput_light. |
327 | * |
328 | * The fput_needed flag returned by fget_light should be passed to the |
329 | * corresponding fput_light. |
330 | */ |
331 | struct file *fget_light(unsigned int fd, int *fput_needed) |
332 | { |
333 | struct file *file; |
334 | struct files_struct *files = current->files; |
335 | |
336 | *fput_needed = 0; |
337 | if (atomic_read(&files->count) == 1) { |
338 | file = fcheck_files(files, fd); |
339 | if (file && (file->f_mode & FMODE_PATH)) |
340 | file = NULL; |
341 | } else { |
342 | rcu_read_lock(); |
343 | file = fcheck_files(files, fd); |
344 | if (file) { |
345 | if (!(file->f_mode & FMODE_PATH) && |
346 | atomic_long_inc_not_zero(&file->f_count)) |
347 | *fput_needed = 1; |
348 | else |
349 | /* Didn't get the reference, someone's freed */ |
350 | file = NULL; |
351 | } |
352 | rcu_read_unlock(); |
353 | } |
354 | |
355 | return file; |
356 | } |
357 | |
358 | struct file *fget_raw_light(unsigned int fd, int *fput_needed) |
359 | { |
360 | struct file *file; |
361 | struct files_struct *files = current->files; |
362 | |
363 | *fput_needed = 0; |
364 | if (atomic_read(&files->count) == 1) { |
365 | file = fcheck_files(files, fd); |
366 | } else { |
367 | rcu_read_lock(); |
368 | file = fcheck_files(files, fd); |
369 | if (file) { |
370 | if (atomic_long_inc_not_zero(&file->f_count)) |
371 | *fput_needed = 1; |
372 | else |
373 | /* Didn't get the reference, someone's freed */ |
374 | file = NULL; |
375 | } |
376 | rcu_read_unlock(); |
377 | } |
378 | |
379 | return file; |
380 | } |
381 | |
382 | void put_filp(struct file *file) |
383 | { |
384 | if (atomic_long_dec_and_test(&file->f_count)) { |
385 | security_file_free(file); |
386 | file_sb_list_del(file); |
387 | file_free(file); |
388 | } |
389 | } |
390 | |
391 | static inline int file_list_cpu(struct file *file) |
392 | { |
393 | #ifdef CONFIG_SMP |
394 | return file->f_sb_list_cpu; |
395 | #else |
396 | return smp_processor_id(); |
397 | #endif |
398 | } |
399 | |
400 | /* helper for file_sb_list_add to reduce ifdefs */ |
401 | static inline void __file_sb_list_add(struct file *file, struct super_block *sb) |
402 | { |
403 | struct list_head *list; |
404 | #ifdef CONFIG_SMP |
405 | int cpu; |
406 | cpu = smp_processor_id(); |
407 | file->f_sb_list_cpu = cpu; |
408 | list = per_cpu_ptr(sb->s_files, cpu); |
409 | #else |
410 | list = &sb->s_files; |
411 | #endif |
412 | list_add(&file->f_u.fu_list, list); |
413 | } |
414 | |
415 | /** |
416 | * file_sb_list_add - add a file to the sb's file list |
417 | * @file: file to add |
418 | * @sb: sb to add it to |
419 | * |
420 | * Use this function to associate a file with the superblock of the inode it |
421 | * refers to. |
422 | */ |
423 | void file_sb_list_add(struct file *file, struct super_block *sb) |
424 | { |
425 | lg_local_lock(files_lglock); |
426 | __file_sb_list_add(file, sb); |
427 | lg_local_unlock(files_lglock); |
428 | } |
429 | |
430 | /** |
431 | * file_sb_list_del - remove a file from the sb's file list |
432 | * @file: file to remove |
433 | * @sb: sb to remove it from |
434 | * |
435 | * Use this function to remove a file from its superblock. |
436 | */ |
437 | void file_sb_list_del(struct file *file) |
438 | { |
439 | if (!list_empty(&file->f_u.fu_list)) { |
440 | lg_local_lock_cpu(files_lglock, file_list_cpu(file)); |
441 | list_del_init(&file->f_u.fu_list); |
442 | lg_local_unlock_cpu(files_lglock, file_list_cpu(file)); |
443 | } |
444 | } |
445 | |
446 | #ifdef CONFIG_SMP |
447 | |
448 | /* |
449 | * These macros iterate all files on all CPUs for a given superblock. |
450 | * files_lglock must be held globally. |
451 | */ |
452 | #define do_file_list_for_each_entry(__sb, __file) \ |
453 | { \ |
454 | int i; \ |
455 | for_each_possible_cpu(i) { \ |
456 | struct list_head *list; \ |
457 | list = per_cpu_ptr((__sb)->s_files, i); \ |
458 | list_for_each_entry((__file), list, f_u.fu_list) |
459 | |
460 | #define while_file_list_for_each_entry \ |
461 | } \ |
462 | } |
463 | |
464 | #else |
465 | |
466 | #define do_file_list_for_each_entry(__sb, __file) \ |
467 | { \ |
468 | struct list_head *list; \ |
469 | list = &(sb)->s_files; \ |
470 | list_for_each_entry((__file), list, f_u.fu_list) |
471 | |
472 | #define while_file_list_for_each_entry \ |
473 | } |
474 | |
475 | #endif |
476 | |
477 | int fs_may_remount_ro(struct super_block *sb) |
478 | { |
479 | struct file *file; |
480 | /* Check that no files are currently opened for writing. */ |
481 | lg_global_lock(files_lglock); |
482 | do_file_list_for_each_entry(sb, file) { |
483 | struct inode *inode = file->f_path.dentry->d_inode; |
484 | |
485 | /* File with pending delete? */ |
486 | if (inode->i_nlink == 0) |
487 | goto too_bad; |
488 | |
489 | /* Writeable file? */ |
490 | if (S_ISREG(inode->i_mode) && (file->f_mode & FMODE_WRITE)) |
491 | goto too_bad; |
492 | } while_file_list_for_each_entry; |
493 | lg_global_unlock(files_lglock); |
494 | return 1; /* Tis' cool bro. */ |
495 | too_bad: |
496 | lg_global_unlock(files_lglock); |
497 | return 0; |
498 | } |
499 | |
500 | /** |
501 | * mark_files_ro - mark all files read-only |
502 | * @sb: superblock in question |
503 | * |
504 | * All files are marked read-only. We don't care about pending |
505 | * delete files so this should be used in 'force' mode only. |
506 | */ |
507 | void mark_files_ro(struct super_block *sb) |
508 | { |
509 | struct file *f; |
510 | |
511 | retry: |
512 | lg_global_lock(files_lglock); |
513 | do_file_list_for_each_entry(sb, f) { |
514 | struct vfsmount *mnt; |
515 | if (!S_ISREG(f->f_path.dentry->d_inode->i_mode)) |
516 | continue; |
517 | if (!file_count(f)) |
518 | continue; |
519 | if (!(f->f_mode & FMODE_WRITE)) |
520 | continue; |
521 | spin_lock(&f->f_lock); |
522 | f->f_mode &= ~FMODE_WRITE; |
523 | spin_unlock(&f->f_lock); |
524 | if (file_check_writeable(f) != 0) |
525 | continue; |
526 | file_release_write(f); |
527 | mnt = mntget(f->f_path.mnt); |
528 | /* This can sleep, so we can't hold the spinlock. */ |
529 | lg_global_unlock(files_lglock); |
530 | mnt_drop_write(mnt); |
531 | mntput(mnt); |
532 | goto retry; |
533 | } while_file_list_for_each_entry; |
534 | lg_global_unlock(files_lglock); |
535 | } |
536 | |
537 | void __init files_init(unsigned long mempages) |
538 | { |
539 | unsigned long n; |
540 | |
541 | filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, |
542 | SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); |
543 | |
544 | /* |
545 | * One file with associated inode and dcache is very roughly 1K. |
546 | * Per default don't use more than 10% of our memory for files. |
547 | */ |
548 | |
549 | n = (mempages * (PAGE_SIZE / 1024)) / 10; |
550 | files_stat.max_files = max_t(unsigned long, n, NR_FILE); |
551 | files_defer_init(); |
552 | lg_lock_init(files_lglock); |
553 | percpu_counter_init(&nr_files, 0); |
554 | } |
555 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9