Root/kernel/pid.c

1/*
2 * Generic pidhash and scalable, time-bounded PID allocator
3 *
4 * (C) 2002-2003 Nadia Yvette Chambers, IBM
5 * (C) 2004 Nadia Yvette Chambers, Oracle
6 * (C) 2002-2004 Ingo Molnar, Red Hat
7 *
8 * pid-structures are backing objects for tasks sharing a given ID to chain
9 * against. There is very little to them aside from hashing them and
10 * parking tasks using given ID's on a list.
11 *
12 * The hash is always changed with the tasklist_lock write-acquired,
13 * and the hash is only accessed with the tasklist_lock at least
14 * read-acquired, so there's no additional SMP locking needed here.
15 *
16 * We have a list of bitmap pages, which bitmaps represent the PID space.
17 * Allocating and freeing PIDs is completely lockless. The worst-case
18 * allocation scenario when all but one out of 1 million PIDs possible are
19 * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
20 * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
21 *
22 * Pid namespaces:
23 * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
24 * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
25 * Many thanks to Oleg Nesterov for comments and help
26 *
27 */
28
29#include <linux/mm.h>
30#include <linux/export.h>
31#include <linux/slab.h>
32#include <linux/init.h>
33#include <linux/rculist.h>
34#include <linux/bootmem.h>
35#include <linux/hash.h>
36#include <linux/pid_namespace.h>
37#include <linux/init_task.h>
38#include <linux/syscalls.h>
39#include <linux/proc_ns.h>
40#include <linux/proc_fs.h>
41
42#define pid_hashfn(nr, ns) \
43    hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift)
44static struct hlist_head *pid_hash;
45static unsigned int pidhash_shift = 4;
46struct pid init_struct_pid = INIT_STRUCT_PID;
47
48int pid_max = PID_MAX_DEFAULT;
49
50#define RESERVED_PIDS 300
51
52int pid_max_min = RESERVED_PIDS + 1;
53int pid_max_max = PID_MAX_LIMIT;
54
55static inline int mk_pid(struct pid_namespace *pid_ns,
56        struct pidmap *map, int off)
57{
58    return (map - pid_ns->pidmap)*BITS_PER_PAGE + off;
59}
60
61#define find_next_offset(map, off) \
62        find_next_zero_bit((map)->page, BITS_PER_PAGE, off)
63
64/*
65 * PID-map pages start out as NULL, they get allocated upon
66 * first use and are never deallocated. This way a low pid_max
67 * value does not cause lots of bitmaps to be allocated, but
68 * the scheme scales to up to 4 million PIDs, runtime.
69 */
70struct pid_namespace init_pid_ns = {
71    .kref = {
72        .refcount = ATOMIC_INIT(2),
73    },
74    .pidmap = {
75        [ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL }
76    },
77    .last_pid = 0,
78    .nr_hashed = PIDNS_HASH_ADDING,
79    .level = 0,
80    .child_reaper = &init_task,
81    .user_ns = &init_user_ns,
82    .proc_inum = PROC_PID_INIT_INO,
83};
84EXPORT_SYMBOL_GPL(init_pid_ns);
85
86/*
87 * Note: disable interrupts while the pidmap_lock is held as an
88 * interrupt might come in and do read_lock(&tasklist_lock).
89 *
90 * If we don't disable interrupts there is a nasty deadlock between
91 * detach_pid()->free_pid() and another cpu that does
92 * spin_lock(&pidmap_lock) followed by an interrupt routine that does
93 * read_lock(&tasklist_lock);
94 *
95 * After we clean up the tasklist_lock and know there are no
96 * irq handlers that take it we can leave the interrupts enabled.
97 * For now it is easier to be safe than to prove it can't happen.
98 */
99
100static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
101
102static void free_pidmap(struct upid *upid)
103{
104    int nr = upid->nr;
105    struct pidmap *map = upid->ns->pidmap + nr / BITS_PER_PAGE;
106    int offset = nr & BITS_PER_PAGE_MASK;
107
108    clear_bit(offset, map->page);
109    atomic_inc(&map->nr_free);
110}
111
112/*
113 * If we started walking pids at 'base', is 'a' seen before 'b'?
114 */
115static int pid_before(int base, int a, int b)
116{
117    /*
118     * This is the same as saying
119     *
120     * (a - base + MAXUINT) % MAXUINT < (b - base + MAXUINT) % MAXUINT
121     * and that mapping orders 'a' and 'b' with respect to 'base'.
122     */
123    return (unsigned)(a - base) < (unsigned)(b - base);
124}
125
126/*
127 * We might be racing with someone else trying to set pid_ns->last_pid
128 * at the pid allocation time (there's also a sysctl for this, but racing
129 * with this one is OK, see comment in kernel/pid_namespace.c about it).
130 * We want the winner to have the "later" value, because if the
131 * "earlier" value prevails, then a pid may get reused immediately.
132 *
133 * Since pids rollover, it is not sufficient to just pick the bigger
134 * value. We have to consider where we started counting from.
135 *
136 * 'base' is the value of pid_ns->last_pid that we observed when
137 * we started looking for a pid.
138 *
139 * 'pid' is the pid that we eventually found.
140 */
141static void set_last_pid(struct pid_namespace *pid_ns, int base, int pid)
142{
143    int prev;
144    int last_write = base;
145    do {
146        prev = last_write;
147        last_write = cmpxchg(&pid_ns->last_pid, prev, pid);
148    } while ((prev != last_write) && (pid_before(base, last_write, pid)));
149}
150
151static int alloc_pidmap(struct pid_namespace *pid_ns)
152{
153    int i, offset, max_scan, pid, last = pid_ns->last_pid;
154    struct pidmap *map;
155
156    pid = last + 1;
157    if (pid >= pid_max)
158        pid = RESERVED_PIDS;
159    offset = pid & BITS_PER_PAGE_MASK;
160    map = &pid_ns->pidmap[pid/BITS_PER_PAGE];
161    /*
162     * If last_pid points into the middle of the map->page we
163     * want to scan this bitmap block twice, the second time
164     * we start with offset == 0 (or RESERVED_PIDS).
165     */
166    max_scan = DIV_ROUND_UP(pid_max, BITS_PER_PAGE) - !offset;
167    for (i = 0; i <= max_scan; ++i) {
168        if (unlikely(!map->page)) {
169            void *page = kzalloc(PAGE_SIZE, GFP_KERNEL);
170            /*
171             * Free the page if someone raced with us
172             * installing it:
173             */
174            spin_lock_irq(&pidmap_lock);
175            if (!map->page) {
176                map->page = page;
177                page = NULL;
178            }
179            spin_unlock_irq(&pidmap_lock);
180            kfree(page);
181            if (unlikely(!map->page))
182                break;
183        }
184        if (likely(atomic_read(&map->nr_free))) {
185            for ( ; ; ) {
186                if (!test_and_set_bit(offset, map->page)) {
187                    atomic_dec(&map->nr_free);
188                    set_last_pid(pid_ns, last, pid);
189                    return pid;
190                }
191                offset = find_next_offset(map, offset);
192                if (offset >= BITS_PER_PAGE)
193                    break;
194                pid = mk_pid(pid_ns, map, offset);
195                if (pid >= pid_max)
196                    break;
197            }
198        }
199        if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) {
200            ++map;
201            offset = 0;
202        } else {
203            map = &pid_ns->pidmap[0];
204            offset = RESERVED_PIDS;
205            if (unlikely(last == offset))
206                break;
207        }
208        pid = mk_pid(pid_ns, map, offset);
209    }
210    return -1;
211}
212
213int next_pidmap(struct pid_namespace *pid_ns, unsigned int last)
214{
215    int offset;
216    struct pidmap *map, *end;
217
218    if (last >= PID_MAX_LIMIT)
219        return -1;
220
221    offset = (last + 1) & BITS_PER_PAGE_MASK;
222    map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE];
223    end = &pid_ns->pidmap[PIDMAP_ENTRIES];
224    for (; map < end; map++, offset = 0) {
225        if (unlikely(!map->page))
226            continue;
227        offset = find_next_bit((map)->page, BITS_PER_PAGE, offset);
228        if (offset < BITS_PER_PAGE)
229            return mk_pid(pid_ns, map, offset);
230    }
231    return -1;
232}
233
234void put_pid(struct pid *pid)
235{
236    struct pid_namespace *ns;
237
238    if (!pid)
239        return;
240
241    ns = pid->numbers[pid->level].ns;
242    if ((atomic_read(&pid->count) == 1) ||
243         atomic_dec_and_test(&pid->count)) {
244        kmem_cache_free(ns->pid_cachep, pid);
245        put_pid_ns(ns);
246    }
247}
248EXPORT_SYMBOL_GPL(put_pid);
249
250static void delayed_put_pid(struct rcu_head *rhp)
251{
252    struct pid *pid = container_of(rhp, struct pid, rcu);
253    put_pid(pid);
254}
255
256void free_pid(struct pid *pid)
257{
258    /* We can be called with write_lock_irq(&tasklist_lock) held */
259    int i;
260    unsigned long flags;
261
262    spin_lock_irqsave(&pidmap_lock, flags);
263    for (i = 0; i <= pid->level; i++) {
264        struct upid *upid = pid->numbers + i;
265        struct pid_namespace *ns = upid->ns;
266        hlist_del_rcu(&upid->pid_chain);
267        switch(--ns->nr_hashed) {
268        case 2:
269        case 1:
270            /* When all that is left in the pid namespace
271             * is the reaper wake up the reaper. The reaper
272             * may be sleeping in zap_pid_ns_processes().
273             */
274            wake_up_process(ns->child_reaper);
275            break;
276        case PIDNS_HASH_ADDING:
277            /* Handle a fork failure of the first process */
278            WARN_ON(ns->child_reaper);
279            ns->nr_hashed = 0;
280            /* fall through */
281        case 0:
282            schedule_work(&ns->proc_work);
283            break;
284        }
285    }
286    spin_unlock_irqrestore(&pidmap_lock, flags);
287
288    for (i = 0; i <= pid->level; i++)
289        free_pidmap(pid->numbers + i);
290
291    call_rcu(&pid->rcu, delayed_put_pid);
292}
293
294struct pid *alloc_pid(struct pid_namespace *ns)
295{
296    struct pid *pid;
297    enum pid_type type;
298    int i, nr;
299    struct pid_namespace *tmp;
300    struct upid *upid;
301
302    pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
303    if (!pid)
304        goto out;
305
306    tmp = ns;
307    pid->level = ns->level;
308    for (i = ns->level; i >= 0; i--) {
309        nr = alloc_pidmap(tmp);
310        if (nr < 0)
311            goto out_free;
312
313        pid->numbers[i].nr = nr;
314        pid->numbers[i].ns = tmp;
315        tmp = tmp->parent;
316    }
317
318    if (unlikely(is_child_reaper(pid))) {
319        if (pid_ns_prepare_proc(ns))
320            goto out_free;
321    }
322
323    get_pid_ns(ns);
324    atomic_set(&pid->count, 1);
325    for (type = 0; type < PIDTYPE_MAX; ++type)
326        INIT_HLIST_HEAD(&pid->tasks[type]);
327
328    upid = pid->numbers + ns->level;
329    spin_lock_irq(&pidmap_lock);
330    if (!(ns->nr_hashed & PIDNS_HASH_ADDING))
331        goto out_unlock;
332    for ( ; upid >= pid->numbers; --upid) {
333        hlist_add_head_rcu(&upid->pid_chain,
334                &pid_hash[pid_hashfn(upid->nr, upid->ns)]);
335        upid->ns->nr_hashed++;
336    }
337    spin_unlock_irq(&pidmap_lock);
338
339out:
340    return pid;
341
342out_unlock:
343    spin_unlock_irq(&pidmap_lock);
344out_free:
345    while (++i <= ns->level)
346        free_pidmap(pid->numbers + i);
347
348    kmem_cache_free(ns->pid_cachep, pid);
349    pid = NULL;
350    goto out;
351}
352
353void disable_pid_allocation(struct pid_namespace *ns)
354{
355    spin_lock_irq(&pidmap_lock);
356    ns->nr_hashed &= ~PIDNS_HASH_ADDING;
357    spin_unlock_irq(&pidmap_lock);
358}
359
360struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
361{
362    struct upid *pnr;
363
364    hlist_for_each_entry_rcu(pnr,
365            &pid_hash[pid_hashfn(nr, ns)], pid_chain)
366        if (pnr->nr == nr && pnr->ns == ns)
367            return container_of(pnr, struct pid,
368                    numbers[ns->level]);
369
370    return NULL;
371}
372EXPORT_SYMBOL_GPL(find_pid_ns);
373
374struct pid *find_vpid(int nr)
375{
376    return find_pid_ns(nr, task_active_pid_ns(current));
377}
378EXPORT_SYMBOL_GPL(find_vpid);
379
380/*
381 * attach_pid() must be called with the tasklist_lock write-held.
382 */
383void attach_pid(struct task_struct *task, enum pid_type type)
384{
385    struct pid_link *link = &task->pids[type];
386    hlist_add_head_rcu(&link->node, &link->pid->tasks[type]);
387}
388
389static void __change_pid(struct task_struct *task, enum pid_type type,
390            struct pid *new)
391{
392    struct pid_link *link;
393    struct pid *pid;
394    int tmp;
395
396    link = &task->pids[type];
397    pid = link->pid;
398
399    hlist_del_rcu(&link->node);
400    link->pid = new;
401
402    for (tmp = PIDTYPE_MAX; --tmp >= 0; )
403        if (!hlist_empty(&pid->tasks[tmp]))
404            return;
405
406    free_pid(pid);
407}
408
409void detach_pid(struct task_struct *task, enum pid_type type)
410{
411    __change_pid(task, type, NULL);
412}
413
414void change_pid(struct task_struct *task, enum pid_type type,
415        struct pid *pid)
416{
417    __change_pid(task, type, pid);
418    attach_pid(task, type);
419}
420
421/* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
422void transfer_pid(struct task_struct *old, struct task_struct *new,
423               enum pid_type type)
424{
425    new->pids[type].pid = old->pids[type].pid;
426    hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node);
427}
428
429struct task_struct *pid_task(struct pid *pid, enum pid_type type)
430{
431    struct task_struct *result = NULL;
432    if (pid) {
433        struct hlist_node *first;
434        first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]),
435                          lockdep_tasklist_lock_is_held());
436        if (first)
437            result = hlist_entry(first, struct task_struct, pids[(type)].node);
438    }
439    return result;
440}
441EXPORT_SYMBOL(pid_task);
442
443/*
444 * Must be called under rcu_read_lock().
445 */
446struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
447{
448    rcu_lockdep_assert(rcu_read_lock_held(),
449               "find_task_by_pid_ns() needs rcu_read_lock()"
450               " protection");
451    return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
452}
453
454struct task_struct *find_task_by_vpid(pid_t vnr)
455{
456    return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
457}
458
459struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
460{
461    struct pid *pid;
462    rcu_read_lock();
463    if (type != PIDTYPE_PID)
464        task = task->group_leader;
465    pid = get_pid(task->pids[type].pid);
466    rcu_read_unlock();
467    return pid;
468}
469EXPORT_SYMBOL_GPL(get_task_pid);
470
471struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
472{
473    struct task_struct *result;
474    rcu_read_lock();
475    result = pid_task(pid, type);
476    if (result)
477        get_task_struct(result);
478    rcu_read_unlock();
479    return result;
480}
481EXPORT_SYMBOL_GPL(get_pid_task);
482
483struct pid *find_get_pid(pid_t nr)
484{
485    struct pid *pid;
486
487    rcu_read_lock();
488    pid = get_pid(find_vpid(nr));
489    rcu_read_unlock();
490
491    return pid;
492}
493EXPORT_SYMBOL_GPL(find_get_pid);
494
495pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
496{
497    struct upid *upid;
498    pid_t nr = 0;
499
500    if (pid && ns->level <= pid->level) {
501        upid = &pid->numbers[ns->level];
502        if (upid->ns == ns)
503            nr = upid->nr;
504    }
505    return nr;
506}
507EXPORT_SYMBOL_GPL(pid_nr_ns);
508
509pid_t pid_vnr(struct pid *pid)
510{
511    return pid_nr_ns(pid, task_active_pid_ns(current));
512}
513EXPORT_SYMBOL_GPL(pid_vnr);
514
515pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
516            struct pid_namespace *ns)
517{
518    pid_t nr = 0;
519
520    rcu_read_lock();
521    if (!ns)
522        ns = task_active_pid_ns(current);
523    if (likely(pid_alive(task))) {
524        if (type != PIDTYPE_PID)
525            task = task->group_leader;
526        nr = pid_nr_ns(task->pids[type].pid, ns);
527    }
528    rcu_read_unlock();
529
530    return nr;
531}
532EXPORT_SYMBOL(__task_pid_nr_ns);
533
534pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
535{
536    return pid_nr_ns(task_tgid(tsk), ns);
537}
538EXPORT_SYMBOL(task_tgid_nr_ns);
539
540struct pid_namespace *task_active_pid_ns(struct task_struct *tsk)
541{
542    return ns_of_pid(task_pid(tsk));
543}
544EXPORT_SYMBOL_GPL(task_active_pid_ns);
545
546/*
547 * Used by proc to find the first pid that is greater than or equal to nr.
548 *
549 * If there is a pid at nr this function is exactly the same as find_pid_ns.
550 */
551struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
552{
553    struct pid *pid;
554
555    do {
556        pid = find_pid_ns(nr, ns);
557        if (pid)
558            break;
559        nr = next_pidmap(ns, nr);
560    } while (nr > 0);
561
562    return pid;
563}
564
565/*
566 * The pid hash table is scaled according to the amount of memory in the
567 * machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or
568 * more.
569 */
570void __init pidhash_init(void)
571{
572    unsigned int i, pidhash_size;
573
574    pid_hash = alloc_large_system_hash("PID", sizeof(*pid_hash), 0, 18,
575                       HASH_EARLY | HASH_SMALL,
576                       &pidhash_shift, NULL,
577                       0, 4096);
578    pidhash_size = 1U << pidhash_shift;
579
580    for (i = 0; i < pidhash_size; i++)
581        INIT_HLIST_HEAD(&pid_hash[i]);
582}
583
584void __init pidmap_init(void)
585{
586    /* Veryify no one has done anything silly */
587    BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_HASH_ADDING);
588
589    /* bump default and minimum pid_max based on number of cpus */
590    pid_max = min(pid_max_max, max_t(int, pid_max,
591                PIDS_PER_CPU_DEFAULT * num_possible_cpus()));
592    pid_max_min = max_t(int, pid_max_min,
593                PIDS_PER_CPU_MIN * num_possible_cpus());
594    pr_info("pid_max: default: %u minimum: %u\n", pid_max, pid_max_min);
595
596    init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
597    /* Reserve PID 0. We never call free_pidmap(0) */
598    set_bit(0, init_pid_ns.pidmap[0].page);
599    atomic_dec(&init_pid_ns.pidmap[0].nr_free);
600
601    init_pid_ns.pid_cachep = KMEM_CACHE(pid,
602            SLAB_HWCACHE_ALIGN | SLAB_PANIC);
603}
604

Archive Download this file



interactive