Root/kernel/user.c

1/*
2 * The "user cache".
3 *
4 * (C) Copyright 1991-2000 Linus Torvalds
5 *
6 * We have a per-user structure to keep track of how many
7 * processes, files etc the user has claimed, in order to be
8 * able to have per-user limits for system resources.
9 */
10
11#include <linux/init.h>
12#include <linux/sched.h>
13#include <linux/slab.h>
14#include <linux/bitops.h>
15#include <linux/key.h>
16#include <linux/interrupt.h>
17#include <linux/module.h>
18#include <linux/user_namespace.h>
19
20struct user_namespace init_user_ns = {
21    .kref = {
22        .refcount = ATOMIC_INIT(2),
23    },
24    .creator = &root_user,
25};
26EXPORT_SYMBOL_GPL(init_user_ns);
27
28/*
29 * UID task count cache, to get fast user lookup in "alloc_uid"
30 * when changing user ID's (ie setuid() and friends).
31 */
32
33#define UIDHASH_MASK (UIDHASH_SZ - 1)
34#define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
35#define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid)))
36
37static struct kmem_cache *uid_cachep;
38
39/*
40 * The uidhash_lock is mostly taken from process context, but it is
41 * occasionally also taken from softirq/tasklet context, when
42 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
43 * But free_uid() is also called with local interrupts disabled, and running
44 * local_bh_enable() with local interrupts disabled is an error - we'll run
45 * softirq callbacks, and they can unconditionally enable interrupts, and
46 * the caller of free_uid() didn't expect that..
47 */
48static DEFINE_SPINLOCK(uidhash_lock);
49
50/* root_user.__count is 2, 1 for init task cred, 1 for init_user_ns->creator */
51struct user_struct root_user = {
52    .__count = ATOMIC_INIT(2),
53    .processes = ATOMIC_INIT(1),
54    .files = ATOMIC_INIT(0),
55    .sigpending = ATOMIC_INIT(0),
56    .locked_shm = 0,
57    .user_ns = &init_user_ns,
58};
59
60/*
61 * These routines must be called with the uidhash spinlock held!
62 */
63static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
64{
65    hlist_add_head(&up->uidhash_node, hashent);
66}
67
68static void uid_hash_remove(struct user_struct *up)
69{
70    hlist_del_init(&up->uidhash_node);
71    put_user_ns(up->user_ns);
72}
73
74static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
75{
76    struct user_struct *user;
77    struct hlist_node *h;
78
79    hlist_for_each_entry(user, h, hashent, uidhash_node) {
80        if (user->uid == uid) {
81            atomic_inc(&user->__count);
82            return user;
83        }
84    }
85
86    return NULL;
87}
88
89/* IRQs are disabled and uidhash_lock is held upon function entry.
90 * IRQ state (as stored in flags) is restored and uidhash_lock released
91 * upon function exit.
92 */
93static void free_user(struct user_struct *up, unsigned long flags)
94    __releases(&uidhash_lock)
95{
96    uid_hash_remove(up);
97    spin_unlock_irqrestore(&uidhash_lock, flags);
98    key_put(up->uid_keyring);
99    key_put(up->session_keyring);
100    kmem_cache_free(uid_cachep, up);
101}
102
103/*
104 * Locate the user_struct for the passed UID. If found, take a ref on it. The
105 * caller must undo that ref with free_uid().
106 *
107 * If the user_struct could not be found, return NULL.
108 */
109struct user_struct *find_user(uid_t uid)
110{
111    struct user_struct *ret;
112    unsigned long flags;
113    struct user_namespace *ns = current_user_ns();
114
115    spin_lock_irqsave(&uidhash_lock, flags);
116    ret = uid_hash_find(uid, uidhashentry(ns, uid));
117    spin_unlock_irqrestore(&uidhash_lock, flags);
118    return ret;
119}
120
121void free_uid(struct user_struct *up)
122{
123    unsigned long flags;
124
125    if (!up)
126        return;
127
128    local_irq_save(flags);
129    if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
130        free_user(up, flags);
131    else
132        local_irq_restore(flags);
133}
134
135struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
136{
137    struct hlist_head *hashent = uidhashentry(ns, uid);
138    struct user_struct *up, *new;
139
140    spin_lock_irq(&uidhash_lock);
141    up = uid_hash_find(uid, hashent);
142    spin_unlock_irq(&uidhash_lock);
143
144    if (!up) {
145        new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
146        if (!new)
147            goto out_unlock;
148
149        new->uid = uid;
150        atomic_set(&new->__count, 1);
151
152        new->user_ns = get_user_ns(ns);
153
154        /*
155         * Before adding this, check whether we raced
156         * on adding the same user already..
157         */
158        spin_lock_irq(&uidhash_lock);
159        up = uid_hash_find(uid, hashent);
160        if (up) {
161            put_user_ns(ns);
162            key_put(new->uid_keyring);
163            key_put(new->session_keyring);
164            kmem_cache_free(uid_cachep, new);
165        } else {
166            uid_hash_insert(new, hashent);
167            up = new;
168        }
169        spin_unlock_irq(&uidhash_lock);
170    }
171
172    return up;
173
174out_unlock:
175    return NULL;
176}
177
178static int __init uid_cache_init(void)
179{
180    int n;
181
182    uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
183            0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
184
185    for(n = 0; n < UIDHASH_SZ; ++n)
186        INIT_HLIST_HEAD(init_user_ns.uidhash_table + n);
187
188    /* Insert the root user immediately (init already runs as root) */
189    spin_lock_irq(&uidhash_lock);
190    uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0));
191    spin_unlock_irq(&uidhash_lock);
192
193    return 0;
194}
195
196module_init(uid_cache_init);
197

Archive Download this file



interactive