Root/kernel/pid.c

1/*
2 * Generic pidhash and scalable, time-bounded PID allocator
3 *
4 * (C) 2002-2003 William Irwin, IBM
5 * (C) 2004 William Irwin, Oracle
6 * (C) 2002-2004 Ingo Molnar, Red Hat
7 *
8 * pid-structures are backing objects for tasks sharing a given ID to chain
9 * against. There is very little to them aside from hashing them and
10 * parking tasks using given ID's on a list.
11 *
12 * The hash is always changed with the tasklist_lock write-acquired,
13 * and the hash is only accessed with the tasklist_lock at least
14 * read-acquired, so there's no additional SMP locking needed here.
15 *
16 * We have a list of bitmap pages, which bitmaps represent the PID space.
17 * Allocating and freeing PIDs is completely lockless. The worst-case
18 * allocation scenario when all but one out of 1 million PIDs possible are
19 * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
20 * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
21 *
22 * Pid namespaces:
23 * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
24 * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
25 * Many thanks to Oleg Nesterov for comments and help
26 *
27 */
28
29#include <linux/mm.h>
30#include <linux/module.h>
31#include <linux/slab.h>
32#include <linux/init.h>
33#include <linux/rculist.h>
34#include <linux/bootmem.h>
35#include <linux/hash.h>
36#include <linux/pid_namespace.h>
37#include <linux/init_task.h>
38#include <linux/syscalls.h>
39
40#define pid_hashfn(nr, ns) \
41    hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift)
42static struct hlist_head *pid_hash;
43static unsigned int pidhash_shift = 4;
44struct pid init_struct_pid = INIT_STRUCT_PID;
45
46int pid_max = PID_MAX_DEFAULT;
47
48#define RESERVED_PIDS 300
49
50int pid_max_min = RESERVED_PIDS + 1;
51int pid_max_max = PID_MAX_LIMIT;
52
53#define BITS_PER_PAGE (PAGE_SIZE*8)
54#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
55
56static inline int mk_pid(struct pid_namespace *pid_ns,
57        struct pidmap *map, int off)
58{
59    return (map - pid_ns->pidmap)*BITS_PER_PAGE + off;
60}
61
62#define find_next_offset(map, off) \
63        find_next_zero_bit((map)->page, BITS_PER_PAGE, off)
64
65/*
66 * PID-map pages start out as NULL, they get allocated upon
67 * first use and are never deallocated. This way a low pid_max
68 * value does not cause lots of bitmaps to be allocated, but
69 * the scheme scales to up to 4 million PIDs, runtime.
70 */
71struct pid_namespace init_pid_ns = {
72    .kref = {
73        .refcount = ATOMIC_INIT(2),
74    },
75    .pidmap = {
76        [ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL }
77    },
78    .last_pid = 0,
79    .level = 0,
80    .child_reaper = &init_task,
81};
82EXPORT_SYMBOL_GPL(init_pid_ns);
83
84int is_container_init(struct task_struct *tsk)
85{
86    int ret = 0;
87    struct pid *pid;
88
89    rcu_read_lock();
90    pid = task_pid(tsk);
91    if (pid != NULL && pid->numbers[pid->level].nr == 1)
92        ret = 1;
93    rcu_read_unlock();
94
95    return ret;
96}
97EXPORT_SYMBOL(is_container_init);
98
99/*
100 * Note: disable interrupts while the pidmap_lock is held as an
101 * interrupt might come in and do read_lock(&tasklist_lock).
102 *
103 * If we don't disable interrupts there is a nasty deadlock between
104 * detach_pid()->free_pid() and another cpu that does
105 * spin_lock(&pidmap_lock) followed by an interrupt routine that does
106 * read_lock(&tasklist_lock);
107 *
108 * After we clean up the tasklist_lock and know there are no
109 * irq handlers that take it we can leave the interrupts enabled.
110 * For now it is easier to be safe than to prove it can't happen.
111 */
112
113static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
114
115static void free_pidmap(struct upid *upid)
116{
117    int nr = upid->nr;
118    struct pidmap *map = upid->ns->pidmap + nr / BITS_PER_PAGE;
119    int offset = nr & BITS_PER_PAGE_MASK;
120
121    clear_bit(offset, map->page);
122    atomic_inc(&map->nr_free);
123}
124
125/*
126 * If we started walking pids at 'base', is 'a' seen before 'b'?
127 */
128static int pid_before(int base, int a, int b)
129{
130    /*
131     * This is the same as saying
132     *
133     * (a - base + MAXUINT) % MAXUINT < (b - base + MAXUINT) % MAXUINT
134     * and that mapping orders 'a' and 'b' with respect to 'base'.
135     */
136    return (unsigned)(a - base) < (unsigned)(b - base);
137}
138
139/*
140 * We might be racing with someone else trying to set pid_ns->last_pid.
141 * We want the winner to have the "later" value, because if the
142 * "earlier" value prevails, then a pid may get reused immediately.
143 *
144 * Since pids rollover, it is not sufficient to just pick the bigger
145 * value. We have to consider where we started counting from.
146 *
147 * 'base' is the value of pid_ns->last_pid that we observed when
148 * we started looking for a pid.
149 *
150 * 'pid' is the pid that we eventually found.
151 */
152static void set_last_pid(struct pid_namespace *pid_ns, int base, int pid)
153{
154    int prev;
155    int last_write = base;
156    do {
157        prev = last_write;
158        last_write = cmpxchg(&pid_ns->last_pid, prev, pid);
159    } while ((prev != last_write) && (pid_before(base, last_write, pid)));
160}
161
162static int alloc_pidmap(struct pid_namespace *pid_ns)
163{
164    int i, offset, max_scan, pid, last = pid_ns->last_pid;
165    struct pidmap *map;
166
167    pid = last + 1;
168    if (pid >= pid_max)
169        pid = RESERVED_PIDS;
170    offset = pid & BITS_PER_PAGE_MASK;
171    map = &pid_ns->pidmap[pid/BITS_PER_PAGE];
172    /*
173     * If last_pid points into the middle of the map->page we
174     * want to scan this bitmap block twice, the second time
175     * we start with offset == 0 (or RESERVED_PIDS).
176     */
177    max_scan = DIV_ROUND_UP(pid_max, BITS_PER_PAGE) - !offset;
178    for (i = 0; i <= max_scan; ++i) {
179        if (unlikely(!map->page)) {
180            void *page = kzalloc(PAGE_SIZE, GFP_KERNEL);
181            /*
182             * Free the page if someone raced with us
183             * installing it:
184             */
185            spin_lock_irq(&pidmap_lock);
186            if (!map->page) {
187                map->page = page;
188                page = NULL;
189            }
190            spin_unlock_irq(&pidmap_lock);
191            kfree(page);
192            if (unlikely(!map->page))
193                break;
194        }
195        if (likely(atomic_read(&map->nr_free))) {
196            do {
197                if (!test_and_set_bit(offset, map->page)) {
198                    atomic_dec(&map->nr_free);
199                    set_last_pid(pid_ns, last, pid);
200                    return pid;
201                }
202                offset = find_next_offset(map, offset);
203                pid = mk_pid(pid_ns, map, offset);
204            } while (offset < BITS_PER_PAGE && pid < pid_max);
205        }
206        if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) {
207            ++map;
208            offset = 0;
209        } else {
210            map = &pid_ns->pidmap[0];
211            offset = RESERVED_PIDS;
212            if (unlikely(last == offset))
213                break;
214        }
215        pid = mk_pid(pid_ns, map, offset);
216    }
217    return -1;
218}
219
220int next_pidmap(struct pid_namespace *pid_ns, int last)
221{
222    int offset;
223    struct pidmap *map, *end;
224
225    offset = (last + 1) & BITS_PER_PAGE_MASK;
226    map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE];
227    end = &pid_ns->pidmap[PIDMAP_ENTRIES];
228    for (; map < end; map++, offset = 0) {
229        if (unlikely(!map->page))
230            continue;
231        offset = find_next_bit((map)->page, BITS_PER_PAGE, offset);
232        if (offset < BITS_PER_PAGE)
233            return mk_pid(pid_ns, map, offset);
234    }
235    return -1;
236}
237
238void put_pid(struct pid *pid)
239{
240    struct pid_namespace *ns;
241
242    if (!pid)
243        return;
244
245    ns = pid->numbers[pid->level].ns;
246    if ((atomic_read(&pid->count) == 1) ||
247         atomic_dec_and_test(&pid->count)) {
248        kmem_cache_free(ns->pid_cachep, pid);
249        put_pid_ns(ns);
250    }
251}
252EXPORT_SYMBOL_GPL(put_pid);
253
254static void delayed_put_pid(struct rcu_head *rhp)
255{
256    struct pid *pid = container_of(rhp, struct pid, rcu);
257    put_pid(pid);
258}
259
260void free_pid(struct pid *pid)
261{
262    /* We can be called with write_lock_irq(&tasklist_lock) held */
263    int i;
264    unsigned long flags;
265
266    spin_lock_irqsave(&pidmap_lock, flags);
267    for (i = 0; i <= pid->level; i++)
268        hlist_del_rcu(&pid->numbers[i].pid_chain);
269    spin_unlock_irqrestore(&pidmap_lock, flags);
270
271    for (i = 0; i <= pid->level; i++)
272        free_pidmap(pid->numbers + i);
273
274    call_rcu(&pid->rcu, delayed_put_pid);
275}
276
277struct pid *alloc_pid(struct pid_namespace *ns)
278{
279    struct pid *pid;
280    enum pid_type type;
281    int i, nr;
282    struct pid_namespace *tmp;
283    struct upid *upid;
284
285    pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
286    if (!pid)
287        goto out;
288
289    tmp = ns;
290    for (i = ns->level; i >= 0; i--) {
291        nr = alloc_pidmap(tmp);
292        if (nr < 0)
293            goto out_free;
294
295        pid->numbers[i].nr = nr;
296        pid->numbers[i].ns = tmp;
297        tmp = tmp->parent;
298    }
299
300    get_pid_ns(ns);
301    pid->level = ns->level;
302    atomic_set(&pid->count, 1);
303    for (type = 0; type < PIDTYPE_MAX; ++type)
304        INIT_HLIST_HEAD(&pid->tasks[type]);
305
306    upid = pid->numbers + ns->level;
307    spin_lock_irq(&pidmap_lock);
308    for ( ; upid >= pid->numbers; --upid)
309        hlist_add_head_rcu(&upid->pid_chain,
310                &pid_hash[pid_hashfn(upid->nr, upid->ns)]);
311    spin_unlock_irq(&pidmap_lock);
312
313out:
314    return pid;
315
316out_free:
317    while (++i <= ns->level)
318        free_pidmap(pid->numbers + i);
319
320    kmem_cache_free(ns->pid_cachep, pid);
321    pid = NULL;
322    goto out;
323}
324
325struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
326{
327    struct hlist_node *elem;
328    struct upid *pnr;
329
330    hlist_for_each_entry_rcu(pnr, elem,
331            &pid_hash[pid_hashfn(nr, ns)], pid_chain)
332        if (pnr->nr == nr && pnr->ns == ns)
333            return container_of(pnr, struct pid,
334                    numbers[ns->level]);
335
336    return NULL;
337}
338EXPORT_SYMBOL_GPL(find_pid_ns);
339
340struct pid *find_vpid(int nr)
341{
342    return find_pid_ns(nr, current->nsproxy->pid_ns);
343}
344EXPORT_SYMBOL_GPL(find_vpid);
345
346/*
347 * attach_pid() must be called with the tasklist_lock write-held.
348 */
349void attach_pid(struct task_struct *task, enum pid_type type,
350        struct pid *pid)
351{
352    struct pid_link *link;
353
354    link = &task->pids[type];
355    link->pid = pid;
356    hlist_add_head_rcu(&link->node, &pid->tasks[type]);
357}
358
359static void __change_pid(struct task_struct *task, enum pid_type type,
360            struct pid *new)
361{
362    struct pid_link *link;
363    struct pid *pid;
364    int tmp;
365
366    link = &task->pids[type];
367    pid = link->pid;
368
369    hlist_del_rcu(&link->node);
370    link->pid = new;
371
372    for (tmp = PIDTYPE_MAX; --tmp >= 0; )
373        if (!hlist_empty(&pid->tasks[tmp]))
374            return;
375
376    free_pid(pid);
377}
378
379void detach_pid(struct task_struct *task, enum pid_type type)
380{
381    __change_pid(task, type, NULL);
382}
383
384void change_pid(struct task_struct *task, enum pid_type type,
385        struct pid *pid)
386{
387    __change_pid(task, type, pid);
388    attach_pid(task, type, pid);
389}
390
391/* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
392void transfer_pid(struct task_struct *old, struct task_struct *new,
393               enum pid_type type)
394{
395    new->pids[type].pid = old->pids[type].pid;
396    hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node);
397}
398
399struct task_struct *pid_task(struct pid *pid, enum pid_type type)
400{
401    struct task_struct *result = NULL;
402    if (pid) {
403        struct hlist_node *first;
404        first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]),
405                          rcu_read_lock_held() ||
406                          lockdep_tasklist_lock_is_held());
407        if (first)
408            result = hlist_entry(first, struct task_struct, pids[(type)].node);
409    }
410    return result;
411}
412EXPORT_SYMBOL(pid_task);
413
414/*
415 * Must be called under rcu_read_lock().
416 */
417struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
418{
419    rcu_lockdep_assert(rcu_read_lock_held());
420    return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
421}
422
423struct task_struct *find_task_by_vpid(pid_t vnr)
424{
425    return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
426}
427
428struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
429{
430    struct pid *pid;
431    rcu_read_lock();
432    if (type != PIDTYPE_PID)
433        task = task->group_leader;
434    pid = get_pid(task->pids[type].pid);
435    rcu_read_unlock();
436    return pid;
437}
438
439struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
440{
441    struct task_struct *result;
442    rcu_read_lock();
443    result = pid_task(pid, type);
444    if (result)
445        get_task_struct(result);
446    rcu_read_unlock();
447    return result;
448}
449
450struct pid *find_get_pid(pid_t nr)
451{
452    struct pid *pid;
453
454    rcu_read_lock();
455    pid = get_pid(find_vpid(nr));
456    rcu_read_unlock();
457
458    return pid;
459}
460EXPORT_SYMBOL_GPL(find_get_pid);
461
462pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
463{
464    struct upid *upid;
465    pid_t nr = 0;
466
467    if (pid && ns->level <= pid->level) {
468        upid = &pid->numbers[ns->level];
469        if (upid->ns == ns)
470            nr = upid->nr;
471    }
472    return nr;
473}
474
475pid_t pid_vnr(struct pid *pid)
476{
477    return pid_nr_ns(pid, current->nsproxy->pid_ns);
478}
479EXPORT_SYMBOL_GPL(pid_vnr);
480
481pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
482            struct pid_namespace *ns)
483{
484    pid_t nr = 0;
485
486    rcu_read_lock();
487    if (!ns)
488        ns = current->nsproxy->pid_ns;
489    if (likely(pid_alive(task))) {
490        if (type != PIDTYPE_PID)
491            task = task->group_leader;
492        nr = pid_nr_ns(task->pids[type].pid, ns);
493    }
494    rcu_read_unlock();
495
496    return nr;
497}
498EXPORT_SYMBOL(__task_pid_nr_ns);
499
500pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
501{
502    return pid_nr_ns(task_tgid(tsk), ns);
503}
504EXPORT_SYMBOL(task_tgid_nr_ns);
505
506struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
507{
508    return ns_of_pid(task_pid(tsk));
509}
510EXPORT_SYMBOL_GPL(task_active_pid_ns);
511
512/*
513 * Used by proc to find the first pid that is greater than or equal to nr.
514 *
515 * If there is a pid at nr this function is exactly the same as find_pid_ns.
516 */
517struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
518{
519    struct pid *pid;
520
521    do {
522        pid = find_pid_ns(nr, ns);
523        if (pid)
524            break;
525        nr = next_pidmap(ns, nr);
526    } while (nr > 0);
527
528    return pid;
529}
530
531/*
532 * The pid hash table is scaled according to the amount of memory in the
533 * machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or
534 * more.
535 */
536void __init pidhash_init(void)
537{
538    int i, pidhash_size;
539
540    pid_hash = alloc_large_system_hash("PID", sizeof(*pid_hash), 0, 18,
541                       HASH_EARLY | HASH_SMALL,
542                       &pidhash_shift, NULL, 4096);
543    pidhash_size = 1 << pidhash_shift;
544
545    for (i = 0; i < pidhash_size; i++)
546        INIT_HLIST_HEAD(&pid_hash[i]);
547}
548
549void __init pidmap_init(void)
550{
551    /* bump default and minimum pid_max based on number of cpus */
552    pid_max = min(pid_max_max, max_t(int, pid_max,
553                PIDS_PER_CPU_DEFAULT * num_possible_cpus()));
554    pid_max_min = max_t(int, pid_max_min,
555                PIDS_PER_CPU_MIN * num_possible_cpus());
556    pr_info("pid_max: default: %u minimum: %u\n", pid_max, pid_max_min);
557
558    init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
559    /* Reserve PID 0. We never call free_pidmap(0) */
560    set_bit(0, init_pid_ns.pidmap[0].page);
561    atomic_dec(&init_pid_ns.pidmap[0].nr_free);
562
563    init_pid_ns.pid_cachep = KMEM_CACHE(pid,
564            SLAB_HWCACHE_ALIGN | SLAB_PANIC);
565}
566

Archive Download this file



interactive