Root/kernel/user.c

1/*
2 * The "user cache".
3 *
4 * (C) Copyright 1991-2000 Linus Torvalds
5 *
6 * We have a per-user structure to keep track of how many
7 * processes, files etc the user has claimed, in order to be
8 * able to have per-user limits for system resources.
9 */
10
11#include <linux/init.h>
12#include <linux/sched.h>
13#include <linux/slab.h>
14#include <linux/bitops.h>
15#include <linux/key.h>
16#include <linux/interrupt.h>
17#include <linux/export.h>
18#include <linux/user_namespace.h>
19#include <linux/proc_ns.h>
20
21/*
22 * userns count is 1 for root user, 1 for init_uts_ns,
23 * and 1 for... ?
24 */
25struct user_namespace init_user_ns = {
26    .uid_map = {
27        .nr_extents = 1,
28        .extent[0] = {
29            .first = 0,
30            .lower_first = 0,
31            .count = 4294967295U,
32        },
33    },
34    .gid_map = {
35        .nr_extents = 1,
36        .extent[0] = {
37            .first = 0,
38            .lower_first = 0,
39            .count = 4294967295U,
40        },
41    },
42    .projid_map = {
43        .nr_extents = 1,
44        .extent[0] = {
45            .first = 0,
46            .lower_first = 0,
47            .count = 4294967295U,
48        },
49    },
50    .count = ATOMIC_INIT(3),
51    .owner = GLOBAL_ROOT_UID,
52    .group = GLOBAL_ROOT_GID,
53    .proc_inum = PROC_USER_INIT_INO,
54#ifdef CONFIG_PERSISTENT_KEYRINGS
55    .persistent_keyring_register_sem =
56    __RWSEM_INITIALIZER(init_user_ns.persistent_keyring_register_sem),
57#endif
58};
59EXPORT_SYMBOL_GPL(init_user_ns);
60
61/*
62 * UID task count cache, to get fast user lookup in "alloc_uid"
63 * when changing user ID's (ie setuid() and friends).
64 */
65
66#define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 7)
67#define UIDHASH_SZ (1 << UIDHASH_BITS)
68#define UIDHASH_MASK (UIDHASH_SZ - 1)
69#define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
70#define uidhashentry(uid) (uidhash_table + __uidhashfn((__kuid_val(uid))))
71
72static struct kmem_cache *uid_cachep;
73struct hlist_head uidhash_table[UIDHASH_SZ];
74
75/*
76 * The uidhash_lock is mostly taken from process context, but it is
77 * occasionally also taken from softirq/tasklet context, when
78 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
79 * But free_uid() is also called with local interrupts disabled, and running
80 * local_bh_enable() with local interrupts disabled is an error - we'll run
81 * softirq callbacks, and they can unconditionally enable interrupts, and
82 * the caller of free_uid() didn't expect that..
83 */
84static DEFINE_SPINLOCK(uidhash_lock);
85
86/* root_user.__count is 1, for init task cred */
87struct user_struct root_user = {
88    .__count = ATOMIC_INIT(1),
89    .processes = ATOMIC_INIT(1),
90    .files = ATOMIC_INIT(0),
91    .sigpending = ATOMIC_INIT(0),
92    .locked_shm = 0,
93    .uid = GLOBAL_ROOT_UID,
94};
95
96/*
97 * These routines must be called with the uidhash spinlock held!
98 */
99static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
100{
101    hlist_add_head(&up->uidhash_node, hashent);
102}
103
104static void uid_hash_remove(struct user_struct *up)
105{
106    hlist_del_init(&up->uidhash_node);
107}
108
109static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
110{
111    struct user_struct *user;
112
113    hlist_for_each_entry(user, hashent, uidhash_node) {
114        if (uid_eq(user->uid, uid)) {
115            atomic_inc(&user->__count);
116            return user;
117        }
118    }
119
120    return NULL;
121}
122
123/* IRQs are disabled and uidhash_lock is held upon function entry.
124 * IRQ state (as stored in flags) is restored and uidhash_lock released
125 * upon function exit.
126 */
127static void free_user(struct user_struct *up, unsigned long flags)
128    __releases(&uidhash_lock)
129{
130    uid_hash_remove(up);
131    spin_unlock_irqrestore(&uidhash_lock, flags);
132    key_put(up->uid_keyring);
133    key_put(up->session_keyring);
134    kmem_cache_free(uid_cachep, up);
135}
136
137/*
138 * Locate the user_struct for the passed UID. If found, take a ref on it. The
139 * caller must undo that ref with free_uid().
140 *
141 * If the user_struct could not be found, return NULL.
142 */
143struct user_struct *find_user(kuid_t uid)
144{
145    struct user_struct *ret;
146    unsigned long flags;
147
148    spin_lock_irqsave(&uidhash_lock, flags);
149    ret = uid_hash_find(uid, uidhashentry(uid));
150    spin_unlock_irqrestore(&uidhash_lock, flags);
151    return ret;
152}
153
154void free_uid(struct user_struct *up)
155{
156    unsigned long flags;
157
158    if (!up)
159        return;
160
161    local_irq_save(flags);
162    if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
163        free_user(up, flags);
164    else
165        local_irq_restore(flags);
166}
167
168struct user_struct *alloc_uid(kuid_t uid)
169{
170    struct hlist_head *hashent = uidhashentry(uid);
171    struct user_struct *up, *new;
172
173    spin_lock_irq(&uidhash_lock);
174    up = uid_hash_find(uid, hashent);
175    spin_unlock_irq(&uidhash_lock);
176
177    if (!up) {
178        new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
179        if (!new)
180            goto out_unlock;
181
182        new->uid = uid;
183        atomic_set(&new->__count, 1);
184
185        /*
186         * Before adding this, check whether we raced
187         * on adding the same user already..
188         */
189        spin_lock_irq(&uidhash_lock);
190        up = uid_hash_find(uid, hashent);
191        if (up) {
192            key_put(new->uid_keyring);
193            key_put(new->session_keyring);
194            kmem_cache_free(uid_cachep, new);
195        } else {
196            uid_hash_insert(new, hashent);
197            up = new;
198        }
199        spin_unlock_irq(&uidhash_lock);
200    }
201
202    return up;
203
204out_unlock:
205    return NULL;
206}
207
208static int __init uid_cache_init(void)
209{
210    int n;
211
212    uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
213            0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
214
215    for(n = 0; n < UIDHASH_SZ; ++n)
216        INIT_HLIST_HEAD(uidhash_table + n);
217
218    /* Insert the root user immediately (init already runs as root) */
219    spin_lock_irq(&uidhash_lock);
220    uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID));
221    spin_unlock_irq(&uidhash_lock);
222
223    return 0;
224}
225subsys_initcall(uid_cache_init);
226

Archive Download this file



interactive