Root/fs/file_table.c

1/*
2 * linux/fs/file_table.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8#include <linux/string.h>
9#include <linux/slab.h>
10#include <linux/file.h>
11#include <linux/fdtable.h>
12#include <linux/init.h>
13#include <linux/module.h>
14#include <linux/fs.h>
15#include <linux/security.h>
16#include <linux/ima.h>
17#include <linux/eventpoll.h>
18#include <linux/rcupdate.h>
19#include <linux/mount.h>
20#include <linux/capability.h>
21#include <linux/cdev.h>
22#include <linux/fsnotify.h>
23#include <linux/sysctl.h>
24#include <linux/percpu_counter.h>
25
26#include <asm/atomic.h>
27
28/* sysctl tunables... */
29struct files_stat_struct files_stat = {
30    .max_files = NR_FILE
31};
32
33/* public. Not pretty! */
34__cacheline_aligned_in_smp DEFINE_SPINLOCK(files_lock);
35
36/* SLAB cache for file structures */
37static struct kmem_cache *filp_cachep __read_mostly;
38
39static struct percpu_counter nr_files __cacheline_aligned_in_smp;
40
41static inline void file_free_rcu(struct rcu_head *head)
42{
43    struct file *f = container_of(head, struct file, f_u.fu_rcuhead);
44
45    put_cred(f->f_cred);
46    kmem_cache_free(filp_cachep, f);
47}
48
49static inline void file_free(struct file *f)
50{
51    percpu_counter_dec(&nr_files);
52    file_check_state(f);
53    call_rcu(&f->f_u.fu_rcuhead, file_free_rcu);
54}
55
56/*
57 * Return the total number of open files in the system
58 */
59static int get_nr_files(void)
60{
61    return percpu_counter_read_positive(&nr_files);
62}
63
64/*
65 * Return the maximum number of open files in the system
66 */
67int get_max_files(void)
68{
69    return files_stat.max_files;
70}
71EXPORT_SYMBOL_GPL(get_max_files);
72
73/*
74 * Handle nr_files sysctl
75 */
76#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
77int proc_nr_files(ctl_table *table, int write, struct file *filp,
78                     void __user *buffer, size_t *lenp, loff_t *ppos)
79{
80    files_stat.nr_files = get_nr_files();
81    return proc_dointvec(table, write, filp, buffer, lenp, ppos);
82}
83#else
84int proc_nr_files(ctl_table *table, int write, struct file *filp,
85                     void __user *buffer, size_t *lenp, loff_t *ppos)
86{
87    return -ENOSYS;
88}
89#endif
90
91/* Find an unused file structure and return a pointer to it.
92 * Returns NULL, if there are no more free file structures or
93 * we run out of memory.
94 *
95 * Be very careful using this. You are responsible for
96 * getting write access to any mount that you might assign
97 * to this filp, if it is opened for write. If this is not
98 * done, you will imbalance int the mount's writer count
99 * and a warning at __fput() time.
100 */
101struct file *get_empty_filp(void)
102{
103    const struct cred *cred = current_cred();
104    static int old_max;
105    struct file * f;
106
107    /*
108     * Privileged users can go above max_files
109     */
110    if (get_nr_files() >= files_stat.max_files && !capable(CAP_SYS_ADMIN)) {
111        /*
112         * percpu_counters are inaccurate. Do an expensive check before
113         * we go and fail.
114         */
115        if (percpu_counter_sum_positive(&nr_files) >= files_stat.max_files)
116            goto over;
117    }
118
119    f = kmem_cache_zalloc(filp_cachep, GFP_KERNEL);
120    if (f == NULL)
121        goto fail;
122
123    percpu_counter_inc(&nr_files);
124    if (security_file_alloc(f))
125        goto fail_sec;
126
127    INIT_LIST_HEAD(&f->f_u.fu_list);
128    atomic_long_set(&f->f_count, 1);
129    rwlock_init(&f->f_owner.lock);
130    f->f_cred = get_cred(cred);
131    spin_lock_init(&f->f_lock);
132    eventpoll_init_file(f);
133    /* f->f_version: 0 */
134    return f;
135
136over:
137    /* Ran out of filps - report that */
138    if (get_nr_files() > old_max) {
139        printk(KERN_INFO "VFS: file-max limit %d reached\n",
140                    get_max_files());
141        old_max = get_nr_files();
142    }
143    goto fail;
144
145fail_sec:
146    file_free(f);
147fail:
148    return NULL;
149}
150
151EXPORT_SYMBOL(get_empty_filp);
152
153/**
154 * alloc_file - allocate and initialize a 'struct file'
155 * @mnt: the vfsmount on which the file will reside
156 * @dentry: the dentry representing the new file
157 * @mode: the mode with which the new file will be opened
158 * @fop: the 'struct file_operations' for the new file
159 *
160 * Use this instead of get_empty_filp() to get a new
161 * 'struct file'. Do so because of the same initialization
162 * pitfalls reasons listed for init_file(). This is a
163 * preferred interface to using init_file().
164 *
165 * If all the callers of init_file() are eliminated, its
166 * code should be moved into this function.
167 */
168struct file *alloc_file(struct vfsmount *mnt, struct dentry *dentry,
169        fmode_t mode, const struct file_operations *fop)
170{
171    struct file *file;
172
173    file = get_empty_filp();
174    if (!file)
175        return NULL;
176
177    init_file(file, mnt, dentry, mode, fop);
178    return file;
179}
180EXPORT_SYMBOL(alloc_file);
181
182/**
183 * init_file - initialize a 'struct file'
184 * @file: the already allocated 'struct file' to initialized
185 * @mnt: the vfsmount on which the file resides
186 * @dentry: the dentry representing this file
187 * @mode: the mode the file is opened with
188 * @fop: the 'struct file_operations' for this file
189 *
190 * Use this instead of setting the members directly. Doing so
191 * avoids making mistakes like forgetting the mntget() or
192 * forgetting to take a write on the mnt.
193 *
194 * Note: This is a crappy interface. It is here to make
195 * merging with the existing users of get_empty_filp()
196 * who have complex failure logic easier. All users
197 * of this should be moving to alloc_file().
198 */
199int init_file(struct file *file, struct vfsmount *mnt, struct dentry *dentry,
200       fmode_t mode, const struct file_operations *fop)
201{
202    int error = 0;
203    file->f_path.dentry = dentry;
204    file->f_path.mnt = mntget(mnt);
205    file->f_mapping = dentry->d_inode->i_mapping;
206    file->f_mode = mode;
207    file->f_op = fop;
208
209    /*
210     * These mounts don't really matter in practice
211     * for r/o bind mounts. They aren't userspace-
212     * visible. We do this for consistency, and so
213     * that we can do debugging checks at __fput()
214     */
215    if ((mode & FMODE_WRITE) && !special_file(dentry->d_inode->i_mode)) {
216        file_take_write(file);
217        error = mnt_clone_write(mnt);
218        WARN_ON(error);
219    }
220    return error;
221}
222EXPORT_SYMBOL(init_file);
223
224void fput(struct file *file)
225{
226    if (atomic_long_dec_and_test(&file->f_count))
227        __fput(file);
228}
229
230EXPORT_SYMBOL(fput);
231
232/**
233 * drop_file_write_access - give up ability to write to a file
234 * @file: the file to which we will stop writing
235 *
236 * This is a central place which will give up the ability
237 * to write to @file, along with access to write through
238 * its vfsmount.
239 */
240void drop_file_write_access(struct file *file)
241{
242    struct vfsmount *mnt = file->f_path.mnt;
243    struct dentry *dentry = file->f_path.dentry;
244    struct inode *inode = dentry->d_inode;
245
246    put_write_access(inode);
247
248    if (special_file(inode->i_mode))
249        return;
250    if (file_check_writeable(file) != 0)
251        return;
252    mnt_drop_write(mnt);
253    file_release_write(file);
254}
255EXPORT_SYMBOL_GPL(drop_file_write_access);
256
257/* __fput is called from task context when aio completion releases the last
258 * last use of a struct file *. Do not use otherwise.
259 */
260void __fput(struct file *file)
261{
262    struct dentry *dentry = file->f_path.dentry;
263    struct vfsmount *mnt = file->f_path.mnt;
264    struct inode *inode = dentry->d_inode;
265
266    might_sleep();
267
268    fsnotify_close(file);
269    /*
270     * The function eventpoll_release() should be the first called
271     * in the file cleanup chain.
272     */
273    eventpoll_release(file);
274    locks_remove_flock(file);
275
276    if (unlikely(file->f_flags & FASYNC)) {
277        if (file->f_op && file->f_op->fasync)
278            file->f_op->fasync(-1, file, 0);
279    }
280    if (file->f_op && file->f_op->release)
281        file->f_op->release(inode, file);
282    security_file_free(file);
283    ima_file_free(file);
284    if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL))
285        cdev_put(inode->i_cdev);
286    fops_put(file->f_op);
287    put_pid(file->f_owner.pid);
288    file_kill(file);
289    if (file->f_mode & FMODE_WRITE)
290        drop_file_write_access(file);
291    file->f_path.dentry = NULL;
292    file->f_path.mnt = NULL;
293    file_free(file);
294    dput(dentry);
295    mntput(mnt);
296}
297
298struct file *fget(unsigned int fd)
299{
300    struct file *file;
301    struct files_struct *files = current->files;
302
303    rcu_read_lock();
304    file = fcheck_files(files, fd);
305    if (file) {
306        if (!atomic_long_inc_not_zero(&file->f_count)) {
307            /* File object ref couldn't be taken */
308            rcu_read_unlock();
309            return NULL;
310        }
311    }
312    rcu_read_unlock();
313
314    return file;
315}
316
317EXPORT_SYMBOL(fget);
318
319/*
320 * Lightweight file lookup - no refcnt increment if fd table isn't shared.
321 * You can use this only if it is guranteed that the current task already
322 * holds a refcnt to that file. That check has to be done at fget() only
323 * and a flag is returned to be passed to the corresponding fput_light().
324 * There must not be a cloning between an fget_light/fput_light pair.
325 */
326struct file *fget_light(unsigned int fd, int *fput_needed)
327{
328    struct file *file;
329    struct files_struct *files = current->files;
330
331    *fput_needed = 0;
332    if (likely((atomic_read(&files->count) == 1))) {
333        file = fcheck_files(files, fd);
334    } else {
335        rcu_read_lock();
336        file = fcheck_files(files, fd);
337        if (file) {
338            if (atomic_long_inc_not_zero(&file->f_count))
339                *fput_needed = 1;
340            else
341                /* Didn't get the reference, someone's freed */
342                file = NULL;
343        }
344        rcu_read_unlock();
345    }
346
347    return file;
348}
349
350
351void put_filp(struct file *file)
352{
353    if (atomic_long_dec_and_test(&file->f_count)) {
354        security_file_free(file);
355        file_kill(file);
356        file_free(file);
357    }
358}
359
360void file_move(struct file *file, struct list_head *list)
361{
362    if (!list)
363        return;
364    file_list_lock();
365    list_move(&file->f_u.fu_list, list);
366    file_list_unlock();
367}
368
369void file_kill(struct file *file)
370{
371    if (!list_empty(&file->f_u.fu_list)) {
372        file_list_lock();
373        list_del_init(&file->f_u.fu_list);
374        file_list_unlock();
375    }
376}
377
378int fs_may_remount_ro(struct super_block *sb)
379{
380    struct file *file;
381
382    /* Check that no files are currently opened for writing. */
383    file_list_lock();
384    list_for_each_entry(file, &sb->s_files, f_u.fu_list) {
385        struct inode *inode = file->f_path.dentry->d_inode;
386
387        /* File with pending delete? */
388        if (inode->i_nlink == 0)
389            goto too_bad;
390
391        /* Writeable file? */
392        if (S_ISREG(inode->i_mode) && (file->f_mode & FMODE_WRITE))
393            goto too_bad;
394    }
395    file_list_unlock();
396    return 1; /* Tis' cool bro. */
397too_bad:
398    file_list_unlock();
399    return 0;
400}
401
402/**
403 * mark_files_ro - mark all files read-only
404 * @sb: superblock in question
405 *
406 * All files are marked read-only. We don't care about pending
407 * delete files so this should be used in 'force' mode only.
408 */
409void mark_files_ro(struct super_block *sb)
410{
411    struct file *f;
412
413retry:
414    file_list_lock();
415    list_for_each_entry(f, &sb->s_files, f_u.fu_list) {
416        struct vfsmount *mnt;
417        if (!S_ISREG(f->f_path.dentry->d_inode->i_mode))
418               continue;
419        if (!file_count(f))
420            continue;
421        if (!(f->f_mode & FMODE_WRITE))
422            continue;
423        f->f_mode &= ~FMODE_WRITE;
424        if (file_check_writeable(f) != 0)
425            continue;
426        file_release_write(f);
427        mnt = mntget(f->f_path.mnt);
428        file_list_unlock();
429        /*
430         * This can sleep, so we can't hold
431         * the file_list_lock() spinlock.
432         */
433        mnt_drop_write(mnt);
434        mntput(mnt);
435        goto retry;
436    }
437    file_list_unlock();
438}
439
440void __init files_init(unsigned long mempages)
441{
442    int n;
443
444    filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
445            SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
446
447    /*
448     * One file with associated inode and dcache is very roughly 1K.
449     * Per default don't use more than 10% of our memory for files.
450     */
451
452    n = (mempages * (PAGE_SIZE / 1024)) / 10;
453    files_stat.max_files = n;
454    if (files_stat.max_files < NR_FILE)
455        files_stat.max_files = NR_FILE;
456    files_defer_init();
457    percpu_counter_init(&nr_files, 0);
458}
459

Archive Download this file



interactive