Root/fs/fcntl.c

1/*
2 * linux/fs/fcntl.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7#include <linux/syscalls.h>
8#include <linux/init.h>
9#include <linux/mm.h>
10#include <linux/fs.h>
11#include <linux/file.h>
12#include <linux/fdtable.h>
13#include <linux/capability.h>
14#include <linux/dnotify.h>
15#include <linux/slab.h>
16#include <linux/module.h>
17#include <linux/security.h>
18#include <linux/ptrace.h>
19#include <linux/signal.h>
20#include <linux/rcupdate.h>
21#include <linux/pid_namespace.h>
22
23#include <asm/poll.h>
24#include <asm/siginfo.h>
25#include <asm/uaccess.h>
26
27void set_close_on_exec(unsigned int fd, int flag)
28{
29    struct files_struct *files = current->files;
30    struct fdtable *fdt;
31    spin_lock(&files->file_lock);
32    fdt = files_fdtable(files);
33    if (flag)
34        FD_SET(fd, fdt->close_on_exec);
35    else
36        FD_CLR(fd, fdt->close_on_exec);
37    spin_unlock(&files->file_lock);
38}
39
40static int get_close_on_exec(unsigned int fd)
41{
42    struct files_struct *files = current->files;
43    struct fdtable *fdt;
44    int res;
45    rcu_read_lock();
46    fdt = files_fdtable(files);
47    res = FD_ISSET(fd, fdt->close_on_exec);
48    rcu_read_unlock();
49    return res;
50}
51
52SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
53{
54    int err = -EBADF;
55    struct file * file, *tofree;
56    struct files_struct * files = current->files;
57    struct fdtable *fdt;
58
59    if ((flags & ~O_CLOEXEC) != 0)
60        return -EINVAL;
61
62    if (unlikely(oldfd == newfd))
63        return -EINVAL;
64
65    spin_lock(&files->file_lock);
66    err = expand_files(files, newfd);
67    file = fcheck(oldfd);
68    if (unlikely(!file))
69        goto Ebadf;
70    if (unlikely(err < 0)) {
71        if (err == -EMFILE)
72            goto Ebadf;
73        goto out_unlock;
74    }
75    /*
76     * We need to detect attempts to do dup2() over allocated but still
77     * not finished descriptor. NB: OpenBSD avoids that at the price of
78     * extra work in their equivalent of fget() - they insert struct
79     * file immediately after grabbing descriptor, mark it larval if
80     * more work (e.g. actual opening) is needed and make sure that
81     * fget() treats larval files as absent. Potentially interesting,
82     * but while extra work in fget() is trivial, locking implications
83     * and amount of surgery on open()-related paths in VFS are not.
84     * FreeBSD fails with -EBADF in the same situation, NetBSD "solution"
85     * deadlocks in rather amusing ways, AFAICS. All of that is out of
86     * scope of POSIX or SUS, since neither considers shared descriptor
87     * tables and this condition does not arise without those.
88     */
89    err = -EBUSY;
90    fdt = files_fdtable(files);
91    tofree = fdt->fd[newfd];
92    if (!tofree && FD_ISSET(newfd, fdt->open_fds))
93        goto out_unlock;
94    get_file(file);
95    rcu_assign_pointer(fdt->fd[newfd], file);
96    FD_SET(newfd, fdt->open_fds);
97    if (flags & O_CLOEXEC)
98        FD_SET(newfd, fdt->close_on_exec);
99    else
100        FD_CLR(newfd, fdt->close_on_exec);
101    spin_unlock(&files->file_lock);
102
103    if (tofree)
104        filp_close(tofree, files);
105
106    return newfd;
107
108Ebadf:
109    err = -EBADF;
110out_unlock:
111    spin_unlock(&files->file_lock);
112    return err;
113}
114
115SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd)
116{
117    if (unlikely(newfd == oldfd)) { /* corner case */
118        struct files_struct *files = current->files;
119        int retval = oldfd;
120
121        rcu_read_lock();
122        if (!fcheck_files(files, oldfd))
123            retval = -EBADF;
124        rcu_read_unlock();
125        return retval;
126    }
127    return sys_dup3(oldfd, newfd, 0);
128}
129
130SYSCALL_DEFINE1(dup, unsigned int, fildes)
131{
132    int ret = -EBADF;
133    struct file *file = fget(fildes);
134
135    if (file) {
136        ret = get_unused_fd();
137        if (ret >= 0)
138            fd_install(ret, file);
139        else
140            fput(file);
141    }
142    return ret;
143}
144
145#define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT | O_NOATIME)
146
147static int setfl(int fd, struct file * filp, unsigned long arg)
148{
149    struct inode * inode = filp->f_path.dentry->d_inode;
150    int error = 0;
151
152    /*
153     * O_APPEND cannot be cleared if the file is marked as append-only
154     * and the file is open for write.
155     */
156    if (((arg ^ filp->f_flags) & O_APPEND) && IS_APPEND(inode))
157        return -EPERM;
158
159    /* O_NOATIME can only be set by the owner or superuser */
160    if ((arg & O_NOATIME) && !(filp->f_flags & O_NOATIME))
161        if (!is_owner_or_cap(inode))
162            return -EPERM;
163
164    /* required for strict SunOS emulation */
165    if (O_NONBLOCK != O_NDELAY)
166           if (arg & O_NDELAY)
167           arg |= O_NONBLOCK;
168
169    if (arg & O_DIRECT) {
170        if (!filp->f_mapping || !filp->f_mapping->a_ops ||
171            !filp->f_mapping->a_ops->direct_IO)
172                return -EINVAL;
173    }
174
175    if (filp->f_op && filp->f_op->check_flags)
176        error = filp->f_op->check_flags(arg);
177    if (error)
178        return error;
179
180    /*
181     * ->fasync() is responsible for setting the FASYNC bit.
182     */
183    if (((arg ^ filp->f_flags) & FASYNC) && filp->f_op &&
184            filp->f_op->fasync) {
185        error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0);
186        if (error < 0)
187            goto out;
188        if (error > 0)
189            error = 0;
190    }
191    spin_lock(&filp->f_lock);
192    filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK);
193    spin_unlock(&filp->f_lock);
194
195 out:
196    return error;
197}
198
199static void f_modown(struct file *filp, struct pid *pid, enum pid_type type,
200                     int force)
201{
202    write_lock_irq(&filp->f_owner.lock);
203    if (force || !filp->f_owner.pid) {
204        put_pid(filp->f_owner.pid);
205        filp->f_owner.pid = get_pid(pid);
206        filp->f_owner.pid_type = type;
207
208        if (pid) {
209            const struct cred *cred = current_cred();
210            filp->f_owner.uid = cred->uid;
211            filp->f_owner.euid = cred->euid;
212        }
213    }
214    write_unlock_irq(&filp->f_owner.lock);
215}
216
217int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
218        int force)
219{
220    int err;
221
222    err = security_file_set_fowner(filp);
223    if (err)
224        return err;
225
226    f_modown(filp, pid, type, force);
227    return 0;
228}
229EXPORT_SYMBOL(__f_setown);
230
231int f_setown(struct file *filp, unsigned long arg, int force)
232{
233    enum pid_type type;
234    struct pid *pid;
235    int who = arg;
236    int result;
237    type = PIDTYPE_PID;
238    if (who < 0) {
239        type = PIDTYPE_PGID;
240        who = -who;
241    }
242    rcu_read_lock();
243    pid = find_vpid(who);
244    result = __f_setown(filp, pid, type, force);
245    rcu_read_unlock();
246    return result;
247}
248EXPORT_SYMBOL(f_setown);
249
250void f_delown(struct file *filp)
251{
252    f_modown(filp, NULL, PIDTYPE_PID, 1);
253}
254
255pid_t f_getown(struct file *filp)
256{
257    pid_t pid;
258    read_lock(&filp->f_owner.lock);
259    pid = pid_vnr(filp->f_owner.pid);
260    if (filp->f_owner.pid_type == PIDTYPE_PGID)
261        pid = -pid;
262    read_unlock(&filp->f_owner.lock);
263    return pid;
264}
265
266static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
267        struct file *filp)
268{
269    long err = -EINVAL;
270
271    switch (cmd) {
272    case F_DUPFD:
273    case F_DUPFD_CLOEXEC:
274        if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
275            break;
276        err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
277        if (err >= 0) {
278            get_file(filp);
279            fd_install(err, filp);
280        }
281        break;
282    case F_GETFD:
283        err = get_close_on_exec(fd) ? FD_CLOEXEC : 0;
284        break;
285    case F_SETFD:
286        err = 0;
287        set_close_on_exec(fd, arg & FD_CLOEXEC);
288        break;
289    case F_GETFL:
290        err = filp->f_flags;
291        break;
292    case F_SETFL:
293        err = setfl(fd, filp, arg);
294        break;
295    case F_GETLK:
296        err = fcntl_getlk(filp, (struct flock __user *) arg);
297        break;
298    case F_SETLK:
299    case F_SETLKW:
300        err = fcntl_setlk(fd, filp, cmd, (struct flock __user *) arg);
301        break;
302    case F_GETOWN:
303        /*
304         * XXX If f_owner is a process group, the
305         * negative return value will get converted
306         * into an error. Oops. If we keep the
307         * current syscall conventions, the only way
308         * to fix this will be in libc.
309         */
310        err = f_getown(filp);
311        force_successful_syscall_return();
312        break;
313    case F_SETOWN:
314        err = f_setown(filp, arg, 1);
315        break;
316    case F_GETSIG:
317        err = filp->f_owner.signum;
318        break;
319    case F_SETSIG:
320        /* arg == 0 restores default behaviour. */
321        if (!valid_signal(arg)) {
322            break;
323        }
324        err = 0;
325        filp->f_owner.signum = arg;
326        break;
327    case F_GETLEASE:
328        err = fcntl_getlease(filp);
329        break;
330    case F_SETLEASE:
331        err = fcntl_setlease(fd, filp, arg);
332        break;
333    case F_NOTIFY:
334        err = fcntl_dirnotify(fd, filp, arg);
335        break;
336    default:
337        break;
338    }
339    return err;
340}
341
342SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd, unsigned long, arg)
343{
344    struct file *filp;
345    long err = -EBADF;
346
347    filp = fget(fd);
348    if (!filp)
349        goto out;
350
351    err = security_file_fcntl(filp, cmd, arg);
352    if (err) {
353        fput(filp);
354        return err;
355    }
356
357    err = do_fcntl(fd, cmd, arg, filp);
358
359     fput(filp);
360out:
361    return err;
362}
363
364#if BITS_PER_LONG == 32
365SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
366        unsigned long, arg)
367{
368    struct file * filp;
369    long err;
370
371    err = -EBADF;
372    filp = fget(fd);
373    if (!filp)
374        goto out;
375
376    err = security_file_fcntl(filp, cmd, arg);
377    if (err) {
378        fput(filp);
379        return err;
380    }
381    err = -EBADF;
382    
383    switch (cmd) {
384        case F_GETLK64:
385            err = fcntl_getlk64(filp, (struct flock64 __user *) arg);
386            break;
387        case F_SETLK64:
388        case F_SETLKW64:
389            err = fcntl_setlk64(fd, filp, cmd,
390                    (struct flock64 __user *) arg);
391            break;
392        default:
393            err = do_fcntl(fd, cmd, arg, filp);
394            break;
395    }
396    fput(filp);
397out:
398    return err;
399}
400#endif
401
402/* Table to convert sigio signal codes into poll band bitmaps */
403
404static const long band_table[NSIGPOLL] = {
405    POLLIN | POLLRDNORM, /* POLL_IN */
406    POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */
407    POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */
408    POLLERR, /* POLL_ERR */
409    POLLPRI | POLLRDBAND, /* POLL_PRI */
410    POLLHUP | POLLERR /* POLL_HUP */
411};
412
413static inline int sigio_perm(struct task_struct *p,
414                             struct fown_struct *fown, int sig)
415{
416    const struct cred *cred;
417    int ret;
418
419    rcu_read_lock();
420    cred = __task_cred(p);
421    ret = ((fown->euid == 0 ||
422        fown->euid == cred->suid || fown->euid == cred->uid ||
423        fown->uid == cred->suid || fown->uid == cred->uid) &&
424           !security_file_send_sigiotask(p, fown, sig));
425    rcu_read_unlock();
426    return ret;
427}
428
429static void send_sigio_to_task(struct task_struct *p,
430                   struct fown_struct *fown,
431                   int fd,
432                   int reason)
433{
434    /*
435     * F_SETSIG can change ->signum lockless in parallel, make
436     * sure we read it once and use the same value throughout.
437     */
438    int signum = ACCESS_ONCE(fown->signum);
439
440    if (!sigio_perm(p, fown, signum))
441        return;
442
443    switch (signum) {
444        siginfo_t si;
445        default:
446            /* Queue a rt signal with the appropriate fd as its
447               value. We use SI_SIGIO as the source, not
448               SI_KERNEL, since kernel signals always get
449               delivered even if we can't queue. Failure to
450               queue in this case _should_ be reported; we fall
451               back to SIGIO in that case. --sct */
452            si.si_signo = signum;
453            si.si_errno = 0;
454                si.si_code = reason;
455            /* Make sure we are called with one of the POLL_*
456               reasons, otherwise we could leak kernel stack into
457               userspace. */
458            BUG_ON((reason & __SI_MASK) != __SI_POLL);
459            if (reason - POLL_IN >= NSIGPOLL)
460                si.si_band = ~0L;
461            else
462                si.si_band = band_table[reason - POLL_IN];
463            si.si_fd = fd;
464            if (!group_send_sig_info(signum, &si, p))
465                break;
466        /* fall-through: fall back on the old plain SIGIO signal */
467        case 0:
468            group_send_sig_info(SIGIO, SEND_SIG_PRIV, p);
469    }
470}
471
472void send_sigio(struct fown_struct *fown, int fd, int band)
473{
474    struct task_struct *p;
475    enum pid_type type;
476    struct pid *pid;
477    
478    read_lock(&fown->lock);
479    type = fown->pid_type;
480    pid = fown->pid;
481    if (!pid)
482        goto out_unlock_fown;
483    
484    read_lock(&tasklist_lock);
485    do_each_pid_task(pid, type, p) {
486        send_sigio_to_task(p, fown, fd, band);
487    } while_each_pid_task(pid, type, p);
488    read_unlock(&tasklist_lock);
489 out_unlock_fown:
490    read_unlock(&fown->lock);
491}
492
493static void send_sigurg_to_task(struct task_struct *p,
494                                struct fown_struct *fown)
495{
496    if (sigio_perm(p, fown, SIGURG))
497        group_send_sig_info(SIGURG, SEND_SIG_PRIV, p);
498}
499
500int send_sigurg(struct fown_struct *fown)
501{
502    struct task_struct *p;
503    enum pid_type type;
504    struct pid *pid;
505    int ret = 0;
506    
507    read_lock(&fown->lock);
508    type = fown->pid_type;
509    pid = fown->pid;
510    if (!pid)
511        goto out_unlock_fown;
512
513    ret = 1;
514    
515    read_lock(&tasklist_lock);
516    do_each_pid_task(pid, type, p) {
517        send_sigurg_to_task(p, fown);
518    } while_each_pid_task(pid, type, p);
519    read_unlock(&tasklist_lock);
520 out_unlock_fown:
521    read_unlock(&fown->lock);
522    return ret;
523}
524
525static DEFINE_RWLOCK(fasync_lock);
526static struct kmem_cache *fasync_cache __read_mostly;
527
528/*
529 * fasync_helper() is used by almost all character device drivers
530 * to set up the fasync queue. It returns negative on error, 0 if it did
531 * no changes and positive if it added/deleted the entry.
532 */
533int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp)
534{
535    struct fasync_struct *fa, **fp;
536    struct fasync_struct *new = NULL;
537    int result = 0;
538
539    if (on) {
540        new = kmem_cache_alloc(fasync_cache, GFP_KERNEL);
541        if (!new)
542            return -ENOMEM;
543    }
544
545    /*
546     * We need to take f_lock first since it's not an IRQ-safe
547     * lock.
548     */
549    spin_lock(&filp->f_lock);
550    write_lock_irq(&fasync_lock);
551    for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
552        if (fa->fa_file == filp) {
553            if(on) {
554                fa->fa_fd = fd;
555                kmem_cache_free(fasync_cache, new);
556            } else {
557                *fp = fa->fa_next;
558                kmem_cache_free(fasync_cache, fa);
559                result = 1;
560            }
561            goto out;
562        }
563    }
564
565    if (on) {
566        new->magic = FASYNC_MAGIC;
567        new->fa_file = filp;
568        new->fa_fd = fd;
569        new->fa_next = *fapp;
570        *fapp = new;
571        result = 1;
572    }
573out:
574    if (on)
575        filp->f_flags |= FASYNC;
576    else
577        filp->f_flags &= ~FASYNC;
578    write_unlock_irq(&fasync_lock);
579    spin_unlock(&filp->f_lock);
580    return result;
581}
582
583EXPORT_SYMBOL(fasync_helper);
584
585void __kill_fasync(struct fasync_struct *fa, int sig, int band)
586{
587    while (fa) {
588        struct fown_struct * fown;
589        if (fa->magic != FASYNC_MAGIC) {
590            printk(KERN_ERR "kill_fasync: bad magic number in "
591                   "fasync_struct!\n");
592            return;
593        }
594        fown = &fa->fa_file->f_owner;
595        /* Don't send SIGURG to processes which have not set a
596           queued signum: SIGURG has its own default signalling
597           mechanism. */
598        if (!(sig == SIGURG && fown->signum == 0))
599            send_sigio(fown, fa->fa_fd, band);
600        fa = fa->fa_next;
601    }
602}
603
604EXPORT_SYMBOL(__kill_fasync);
605
606void kill_fasync(struct fasync_struct **fp, int sig, int band)
607{
608    /* First a quick test without locking: usually
609     * the list is empty.
610     */
611    if (*fp) {
612        read_lock(&fasync_lock);
613        /* reread *fp after obtaining the lock */
614        __kill_fasync(*fp, sig, band);
615        read_unlock(&fasync_lock);
616    }
617}
618EXPORT_SYMBOL(kill_fasync);
619
620static int __init fasync_init(void)
621{
622    fasync_cache = kmem_cache_create("fasync_cache",
623        sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL);
624    return 0;
625}
626
627module_init(fasync_init)
628

Archive Download this file



interactive