Root/
1 | /* |
2 | * linux/fs/file_table.c |
3 | * |
4 | * Copyright (C) 1991, 1992 Linus Torvalds |
5 | * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) |
6 | */ |
7 | |
8 | #include <linux/string.h> |
9 | #include <linux/slab.h> |
10 | #include <linux/file.h> |
11 | #include <linux/fdtable.h> |
12 | #include <linux/init.h> |
13 | #include <linux/module.h> |
14 | #include <linux/fs.h> |
15 | #include <linux/security.h> |
16 | #include <linux/eventpoll.h> |
17 | #include <linux/rcupdate.h> |
18 | #include <linux/mount.h> |
19 | #include <linux/capability.h> |
20 | #include <linux/cdev.h> |
21 | #include <linux/fsnotify.h> |
22 | #include <linux/sysctl.h> |
23 | #include <linux/lglock.h> |
24 | #include <linux/percpu_counter.h> |
25 | #include <linux/percpu.h> |
26 | #include <linux/hardirq.h> |
27 | #include <linux/task_work.h> |
28 | #include <linux/ima.h> |
29 | |
30 | #include <linux/atomic.h> |
31 | |
32 | #include "internal.h" |
33 | |
34 | /* sysctl tunables... */ |
35 | struct files_stat_struct files_stat = { |
36 | .max_files = NR_FILE |
37 | }; |
38 | |
39 | /* SLAB cache for file structures */ |
40 | static struct kmem_cache *filp_cachep __read_mostly; |
41 | |
42 | static struct percpu_counter nr_files __cacheline_aligned_in_smp; |
43 | |
44 | static void file_free_rcu(struct rcu_head *head) |
45 | { |
46 | struct file *f = container_of(head, struct file, f_u.fu_rcuhead); |
47 | |
48 | put_cred(f->f_cred); |
49 | kmem_cache_free(filp_cachep, f); |
50 | } |
51 | |
52 | static inline void file_free(struct file *f) |
53 | { |
54 | percpu_counter_dec(&nr_files); |
55 | call_rcu(&f->f_u.fu_rcuhead, file_free_rcu); |
56 | } |
57 | |
58 | /* |
59 | * Return the total number of open files in the system |
60 | */ |
61 | static long get_nr_files(void) |
62 | { |
63 | return percpu_counter_read_positive(&nr_files); |
64 | } |
65 | |
66 | /* |
67 | * Return the maximum number of open files in the system |
68 | */ |
69 | unsigned long get_max_files(void) |
70 | { |
71 | return files_stat.max_files; |
72 | } |
73 | EXPORT_SYMBOL_GPL(get_max_files); |
74 | |
75 | /* |
76 | * Handle nr_files sysctl |
77 | */ |
78 | #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) |
79 | int proc_nr_files(struct ctl_table *table, int write, |
80 | void __user *buffer, size_t *lenp, loff_t *ppos) |
81 | { |
82 | files_stat.nr_files = get_nr_files(); |
83 | return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); |
84 | } |
85 | #else |
86 | int proc_nr_files(struct ctl_table *table, int write, |
87 | void __user *buffer, size_t *lenp, loff_t *ppos) |
88 | { |
89 | return -ENOSYS; |
90 | } |
91 | #endif |
92 | |
93 | /* Find an unused file structure and return a pointer to it. |
94 | * Returns an error pointer if some error happend e.g. we over file |
95 | * structures limit, run out of memory or operation is not permitted. |
96 | * |
97 | * Be very careful using this. You are responsible for |
98 | * getting write access to any mount that you might assign |
99 | * to this filp, if it is opened for write. If this is not |
100 | * done, you will imbalance int the mount's writer count |
101 | * and a warning at __fput() time. |
102 | */ |
103 | struct file *get_empty_filp(void) |
104 | { |
105 | const struct cred *cred = current_cred(); |
106 | static long old_max; |
107 | struct file *f; |
108 | int error; |
109 | |
110 | /* |
111 | * Privileged users can go above max_files |
112 | */ |
113 | if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) { |
114 | /* |
115 | * percpu_counters are inaccurate. Do an expensive check before |
116 | * we go and fail. |
117 | */ |
118 | if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files) |
119 | goto over; |
120 | } |
121 | |
122 | f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL); |
123 | if (unlikely(!f)) |
124 | return ERR_PTR(-ENOMEM); |
125 | |
126 | percpu_counter_inc(&nr_files); |
127 | f->f_cred = get_cred(cred); |
128 | error = security_file_alloc(f); |
129 | if (unlikely(error)) { |
130 | file_free(f); |
131 | return ERR_PTR(error); |
132 | } |
133 | |
134 | atomic_long_set(&f->f_count, 1); |
135 | rwlock_init(&f->f_owner.lock); |
136 | spin_lock_init(&f->f_lock); |
137 | mutex_init(&f->f_pos_lock); |
138 | eventpoll_init_file(f); |
139 | /* f->f_version: 0 */ |
140 | return f; |
141 | |
142 | over: |
143 | /* Ran out of filps - report that */ |
144 | if (get_nr_files() > old_max) { |
145 | pr_info("VFS: file-max limit %lu reached\n", get_max_files()); |
146 | old_max = get_nr_files(); |
147 | } |
148 | return ERR_PTR(-ENFILE); |
149 | } |
150 | |
151 | /** |
152 | * alloc_file - allocate and initialize a 'struct file' |
153 | * @mnt: the vfsmount on which the file will reside |
154 | * @dentry: the dentry representing the new file |
155 | * @mode: the mode with which the new file will be opened |
156 | * @fop: the 'struct file_operations' for the new file |
157 | * |
158 | * Use this instead of get_empty_filp() to get a new |
159 | * 'struct file'. Do so because of the same initialization |
160 | * pitfalls reasons listed for init_file(). This is a |
161 | * preferred interface to using init_file(). |
162 | * |
163 | * If all the callers of init_file() are eliminated, its |
164 | * code should be moved into this function. |
165 | */ |
166 | struct file *alloc_file(struct path *path, fmode_t mode, |
167 | const struct file_operations *fop) |
168 | { |
169 | struct file *file; |
170 | |
171 | file = get_empty_filp(); |
172 | if (IS_ERR(file)) |
173 | return file; |
174 | |
175 | file->f_path = *path; |
176 | file->f_inode = path->dentry->d_inode; |
177 | file->f_mapping = path->dentry->d_inode->i_mapping; |
178 | if ((mode & FMODE_READ) && |
179 | likely(fop->read || fop->aio_read || fop->read_iter)) |
180 | mode |= FMODE_CAN_READ; |
181 | if ((mode & FMODE_WRITE) && |
182 | likely(fop->write || fop->aio_write || fop->write_iter)) |
183 | mode |= FMODE_CAN_WRITE; |
184 | file->f_mode = mode; |
185 | file->f_op = fop; |
186 | if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) |
187 | i_readcount_inc(path->dentry->d_inode); |
188 | return file; |
189 | } |
190 | EXPORT_SYMBOL(alloc_file); |
191 | |
192 | /* the real guts of fput() - releasing the last reference to file |
193 | */ |
194 | static void __fput(struct file *file) |
195 | { |
196 | struct dentry *dentry = file->f_path.dentry; |
197 | struct vfsmount *mnt = file->f_path.mnt; |
198 | struct inode *inode = file->f_inode; |
199 | |
200 | might_sleep(); |
201 | |
202 | fsnotify_close(file); |
203 | /* |
204 | * The function eventpoll_release() should be the first called |
205 | * in the file cleanup chain. |
206 | */ |
207 | eventpoll_release(file); |
208 | locks_remove_file(file); |
209 | |
210 | if (unlikely(file->f_flags & FASYNC)) { |
211 | if (file->f_op->fasync) |
212 | file->f_op->fasync(-1, file, 0); |
213 | } |
214 | ima_file_free(file); |
215 | if (file->f_op->release) |
216 | file->f_op->release(inode, file); |
217 | security_file_free(file); |
218 | if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL && |
219 | !(file->f_mode & FMODE_PATH))) { |
220 | cdev_put(inode->i_cdev); |
221 | } |
222 | fops_put(file->f_op); |
223 | put_pid(file->f_owner.pid); |
224 | if ((file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) |
225 | i_readcount_dec(inode); |
226 | if (file->f_mode & FMODE_WRITER) { |
227 | put_write_access(inode); |
228 | __mnt_drop_write(mnt); |
229 | } |
230 | file->f_path.dentry = NULL; |
231 | file->f_path.mnt = NULL; |
232 | file->f_inode = NULL; |
233 | file_free(file); |
234 | dput(dentry); |
235 | mntput(mnt); |
236 | } |
237 | |
238 | static LLIST_HEAD(delayed_fput_list); |
239 | static void delayed_fput(struct work_struct *unused) |
240 | { |
241 | struct llist_node *node = llist_del_all(&delayed_fput_list); |
242 | struct llist_node *next; |
243 | |
244 | for (; node; node = next) { |
245 | next = llist_next(node); |
246 | __fput(llist_entry(node, struct file, f_u.fu_llist)); |
247 | } |
248 | } |
249 | |
250 | static void ____fput(struct callback_head *work) |
251 | { |
252 | __fput(container_of(work, struct file, f_u.fu_rcuhead)); |
253 | } |
254 | |
255 | /* |
256 | * If kernel thread really needs to have the final fput() it has done |
257 | * to complete, call this. The only user right now is the boot - we |
258 | * *do* need to make sure our writes to binaries on initramfs has |
259 | * not left us with opened struct file waiting for __fput() - execve() |
260 | * won't work without that. Please, don't add more callers without |
261 | * very good reasons; in particular, never call that with locks |
262 | * held and never call that from a thread that might need to do |
263 | * some work on any kind of umount. |
264 | */ |
265 | void flush_delayed_fput(void) |
266 | { |
267 | delayed_fput(NULL); |
268 | } |
269 | |
270 | static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput); |
271 | |
272 | void fput(struct file *file) |
273 | { |
274 | if (atomic_long_dec_and_test(&file->f_count)) { |
275 | struct task_struct *task = current; |
276 | |
277 | if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) { |
278 | init_task_work(&file->f_u.fu_rcuhead, ____fput); |
279 | if (!task_work_add(task, &file->f_u.fu_rcuhead, true)) |
280 | return; |
281 | /* |
282 | * After this task has run exit_task_work(), |
283 | * task_work_add() will fail. Fall through to delayed |
284 | * fput to avoid leaking *file. |
285 | */ |
286 | } |
287 | |
288 | if (llist_add(&file->f_u.fu_llist, &delayed_fput_list)) |
289 | schedule_delayed_work(&delayed_fput_work, 1); |
290 | } |
291 | } |
292 | |
293 | /* |
294 | * synchronous analog of fput(); for kernel threads that might be needed |
295 | * in some umount() (and thus can't use flush_delayed_fput() without |
296 | * risking deadlocks), need to wait for completion of __fput() and know |
297 | * for this specific struct file it won't involve anything that would |
298 | * need them. Use only if you really need it - at the very least, |
299 | * don't blindly convert fput() by kernel thread to that. |
300 | */ |
301 | void __fput_sync(struct file *file) |
302 | { |
303 | if (atomic_long_dec_and_test(&file->f_count)) { |
304 | struct task_struct *task = current; |
305 | BUG_ON(!(task->flags & PF_KTHREAD)); |
306 | __fput(file); |
307 | } |
308 | } |
309 | |
310 | EXPORT_SYMBOL(fput); |
311 | |
312 | void put_filp(struct file *file) |
313 | { |
314 | if (atomic_long_dec_and_test(&file->f_count)) { |
315 | security_file_free(file); |
316 | file_free(file); |
317 | } |
318 | } |
319 | |
320 | void __init files_init(unsigned long mempages) |
321 | { |
322 | unsigned long n; |
323 | |
324 | filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, |
325 | SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); |
326 | |
327 | /* |
328 | * One file with associated inode and dcache is very roughly 1K. |
329 | * Per default don't use more than 10% of our memory for files. |
330 | */ |
331 | |
332 | n = (mempages * (PAGE_SIZE / 1024)) / 10; |
333 | files_stat.max_files = max_t(unsigned long, n, NR_FILE); |
334 | percpu_counter_init(&nr_files, 0); |
335 | } |
336 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9