Root/lib/idr.c

Source at commit b386be689295730688885552666ea40b2e639b14 created 8 years 11 months ago.
By Maarten ter Huurne, Revert "MIPS: JZ4740: reset: Initialize hibernate wakeup counters."
1/*
2 * 2002-10-18 written by Jim Houston jim.houston@ccur.com
3 * Copyright (C) 2002 by Concurrent Computer Corporation
4 * Distributed under the GNU GPL license version 2.
5 *
6 * Modified by George Anzinger to reuse immediately and to use
7 * find bit instructions. Also removed _irq on spinlocks.
8 *
9 * Modified by Nadia Derbey to make it RCU safe.
10 *
11 * Small id to pointer translation service.
12 *
13 * It uses a radix tree like structure as a sparse array indexed
14 * by the id to obtain the pointer. The bitmap makes allocating
15 * a new id quick.
16 *
17 * You call it to allocate an id (an int) an associate with that id a
18 * pointer or what ever, we treat it as a (void *). You can pass this
19 * id to a user for him to pass back at a later time. You then pass
20 * that id to this code and it returns your pointer.
21
22 * You can release ids at any time. When all ids are released, most of
23 * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we
24 * don't need to go to the memory "store" during an id allocate, just
25 * so you don't need to be too concerned about locking and conflicts
26 * with the slab allocator.
27 */
28
29#ifndef TEST // to test in user space...
30#include <linux/slab.h>
31#include <linux/init.h>
32#include <linux/module.h>
33#endif
34#include <linux/err.h>
35#include <linux/string.h>
36#include <linux/idr.h>
37#include <linux/spinlock.h>
38
39static struct kmem_cache *idr_layer_cache;
40static DEFINE_SPINLOCK(simple_ida_lock);
41
42static struct idr_layer *get_from_free_list(struct idr *idp)
43{
44    struct idr_layer *p;
45    unsigned long flags;
46
47    spin_lock_irqsave(&idp->lock, flags);
48    if ((p = idp->id_free)) {
49        idp->id_free = p->ary[0];
50        idp->id_free_cnt--;
51        p->ary[0] = NULL;
52    }
53    spin_unlock_irqrestore(&idp->lock, flags);
54    return(p);
55}
56
57static void idr_layer_rcu_free(struct rcu_head *head)
58{
59    struct idr_layer *layer;
60
61    layer = container_of(head, struct idr_layer, rcu_head);
62    kmem_cache_free(idr_layer_cache, layer);
63}
64
65static inline void free_layer(struct idr_layer *p)
66{
67    call_rcu(&p->rcu_head, idr_layer_rcu_free);
68}
69
70/* only called when idp->lock is held */
71static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
72{
73    p->ary[0] = idp->id_free;
74    idp->id_free = p;
75    idp->id_free_cnt++;
76}
77
78static void move_to_free_list(struct idr *idp, struct idr_layer *p)
79{
80    unsigned long flags;
81
82    /*
83     * Depends on the return element being zeroed.
84     */
85    spin_lock_irqsave(&idp->lock, flags);
86    __move_to_free_list(idp, p);
87    spin_unlock_irqrestore(&idp->lock, flags);
88}
89
90static void idr_mark_full(struct idr_layer **pa, int id)
91{
92    struct idr_layer *p = pa[0];
93    int l = 0;
94
95    __set_bit(id & IDR_MASK, &p->bitmap);
96    /*
97     * If this layer is full mark the bit in the layer above to
98     * show that this part of the radix tree is full. This may
99     * complete the layer above and require walking up the radix
100     * tree.
101     */
102    while (p->bitmap == IDR_FULL) {
103        if (!(p = pa[++l]))
104            break;
105        id = id >> IDR_BITS;
106        __set_bit((id & IDR_MASK), &p->bitmap);
107    }
108}
109
110/**
111 * idr_pre_get - reserve resources for idr allocation
112 * @idp: idr handle
113 * @gfp_mask: memory allocation flags
114 *
115 * This function should be called prior to calling the idr_get_new* functions.
116 * It preallocates enough memory to satisfy the worst possible allocation. The
117 * caller should pass in GFP_KERNEL if possible. This of course requires that
118 * no spinning locks be held.
119 *
120 * If the system is REALLY out of memory this function returns %0,
121 * otherwise %1.
122 */
123int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
124{
125    while (idp->id_free_cnt < IDR_FREE_MAX) {
126        struct idr_layer *new;
127        new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
128        if (new == NULL)
129            return (0);
130        move_to_free_list(idp, new);
131    }
132    return 1;
133}
134EXPORT_SYMBOL(idr_pre_get);
135
136static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
137{
138    int n, m, sh;
139    struct idr_layer *p, *new;
140    int l, id, oid;
141    unsigned long bm;
142
143    id = *starting_id;
144 restart:
145    p = idp->top;
146    l = idp->layers;
147    pa[l--] = NULL;
148    while (1) {
149        /*
150         * We run around this while until we reach the leaf node...
151         */
152        n = (id >> (IDR_BITS*l)) & IDR_MASK;
153        bm = ~p->bitmap;
154        m = find_next_bit(&bm, IDR_SIZE, n);
155        if (m == IDR_SIZE) {
156            /* no space available go back to previous layer. */
157            l++;
158            oid = id;
159            id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
160
161            /* if already at the top layer, we need to grow */
162            if (id >= 1 << (idp->layers * IDR_BITS)) {
163                *starting_id = id;
164                return IDR_NEED_TO_GROW;
165            }
166            p = pa[l];
167            BUG_ON(!p);
168
169            /* If we need to go up one layer, continue the
170             * loop; otherwise, restart from the top.
171             */
172            sh = IDR_BITS * (l + 1);
173            if (oid >> sh == id >> sh)
174                continue;
175            else
176                goto restart;
177        }
178        if (m != n) {
179            sh = IDR_BITS*l;
180            id = ((id >> sh) ^ n ^ m) << sh;
181        }
182        if ((id >= MAX_ID_BIT) || (id < 0))
183            return IDR_NOMORE_SPACE;
184        if (l == 0)
185            break;
186        /*
187         * Create the layer below if it is missing.
188         */
189        if (!p->ary[m]) {
190            new = get_from_free_list(idp);
191            if (!new)
192                return -1;
193            new->layer = l-1;
194            rcu_assign_pointer(p->ary[m], new);
195            p->count++;
196        }
197        pa[l--] = p;
198        p = p->ary[m];
199    }
200
201    pa[l] = p;
202    return id;
203}
204
205static int idr_get_empty_slot(struct idr *idp, int starting_id,
206                  struct idr_layer **pa)
207{
208    struct idr_layer *p, *new;
209    int layers, v, id;
210    unsigned long flags;
211
212    id = starting_id;
213build_up:
214    p = idp->top;
215    layers = idp->layers;
216    if (unlikely(!p)) {
217        if (!(p = get_from_free_list(idp)))
218            return -1;
219        p->layer = 0;
220        layers = 1;
221    }
222    /*
223     * Add a new layer to the top of the tree if the requested
224     * id is larger than the currently allocated space.
225     */
226    while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
227        layers++;
228        if (!p->count) {
229            /* special case: if the tree is currently empty,
230             * then we grow the tree by moving the top node
231             * upwards.
232             */
233            p->layer++;
234            continue;
235        }
236        if (!(new = get_from_free_list(idp))) {
237            /*
238             * The allocation failed. If we built part of
239             * the structure tear it down.
240             */
241            spin_lock_irqsave(&idp->lock, flags);
242            for (new = p; p && p != idp->top; new = p) {
243                p = p->ary[0];
244                new->ary[0] = NULL;
245                new->bitmap = new->count = 0;
246                __move_to_free_list(idp, new);
247            }
248            spin_unlock_irqrestore(&idp->lock, flags);
249            return -1;
250        }
251        new->ary[0] = p;
252        new->count = 1;
253        new->layer = layers-1;
254        if (p->bitmap == IDR_FULL)
255            __set_bit(0, &new->bitmap);
256        p = new;
257    }
258    rcu_assign_pointer(idp->top, p);
259    idp->layers = layers;
260    v = sub_alloc(idp, &id, pa);
261    if (v == IDR_NEED_TO_GROW)
262        goto build_up;
263    return(v);
264}
265
266static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
267{
268    struct idr_layer *pa[MAX_LEVEL];
269    int id;
270
271    id = idr_get_empty_slot(idp, starting_id, pa);
272    if (id >= 0) {
273        /*
274         * Successfully found an empty slot. Install the user
275         * pointer and mark the slot full.
276         */
277        rcu_assign_pointer(pa[0]->ary[id & IDR_MASK],
278                (struct idr_layer *)ptr);
279        pa[0]->count++;
280        idr_mark_full(pa, id);
281    }
282
283    return id;
284}
285
286/**
287 * idr_get_new_above - allocate new idr entry above or equal to a start id
288 * @idp: idr handle
289 * @ptr: pointer you want associated with the id
290 * @starting_id: id to start search at
291 * @id: pointer to the allocated handle
292 *
293 * This is the allocate id function. It should be called with any
294 * required locks.
295 *
296 * If allocation from IDR's private freelist fails, idr_get_new_above() will
297 * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill
298 * IDR's preallocation and then retry the idr_get_new_above() call.
299 *
300 * If the idr is full idr_get_new_above() will return %-ENOSPC.
301 *
302 * @id returns a value in the range @starting_id ... %0x7fffffff
303 */
304int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
305{
306    int rv;
307
308    rv = idr_get_new_above_int(idp, ptr, starting_id);
309    /*
310     * This is a cheap hack until the IDR code can be fixed to
311     * return proper error values.
312     */
313    if (rv < 0)
314        return _idr_rc_to_errno(rv);
315    *id = rv;
316    return 0;
317}
318EXPORT_SYMBOL(idr_get_new_above);
319
320/**
321 * idr_get_new - allocate new idr entry
322 * @idp: idr handle
323 * @ptr: pointer you want associated with the id
324 * @id: pointer to the allocated handle
325 *
326 * If allocation from IDR's private freelist fails, idr_get_new_above() will
327 * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill
328 * IDR's preallocation and then retry the idr_get_new_above() call.
329 *
330 * If the idr is full idr_get_new_above() will return %-ENOSPC.
331 *
332 * @id returns a value in the range %0 ... %0x7fffffff
333 */
334int idr_get_new(struct idr *idp, void *ptr, int *id)
335{
336    int rv;
337
338    rv = idr_get_new_above_int(idp, ptr, 0);
339    /*
340     * This is a cheap hack until the IDR code can be fixed to
341     * return proper error values.
342     */
343    if (rv < 0)
344        return _idr_rc_to_errno(rv);
345    *id = rv;
346    return 0;
347}
348EXPORT_SYMBOL(idr_get_new);
349
350static void idr_remove_warning(int id)
351{
352    printk(KERN_WARNING
353        "idr_remove called for id=%d which is not allocated.\n", id);
354    dump_stack();
355}
356
357static void sub_remove(struct idr *idp, int shift, int id)
358{
359    struct idr_layer *p = idp->top;
360    struct idr_layer **pa[MAX_LEVEL];
361    struct idr_layer ***paa = &pa[0];
362    struct idr_layer *to_free;
363    int n;
364
365    *paa = NULL;
366    *++paa = &idp->top;
367
368    while ((shift > 0) && p) {
369        n = (id >> shift) & IDR_MASK;
370        __clear_bit(n, &p->bitmap);
371        *++paa = &p->ary[n];
372        p = p->ary[n];
373        shift -= IDR_BITS;
374    }
375    n = id & IDR_MASK;
376    if (likely(p != NULL && test_bit(n, &p->bitmap))){
377        __clear_bit(n, &p->bitmap);
378        rcu_assign_pointer(p->ary[n], NULL);
379        to_free = NULL;
380        while(*paa && ! --((**paa)->count)){
381            if (to_free)
382                free_layer(to_free);
383            to_free = **paa;
384            **paa-- = NULL;
385        }
386        if (!*paa)
387            idp->layers = 0;
388        if (to_free)
389            free_layer(to_free);
390    } else
391        idr_remove_warning(id);
392}
393
394/**
395 * idr_remove - remove the given id and free its slot
396 * @idp: idr handle
397 * @id: unique key
398 */
399void idr_remove(struct idr *idp, int id)
400{
401    struct idr_layer *p;
402    struct idr_layer *to_free;
403
404    /* Mask off upper bits we don't use for the search. */
405    id &= MAX_ID_MASK;
406
407    sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
408    if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
409        idp->top->ary[0]) {
410        /*
411         * Single child at leftmost slot: we can shrink the tree.
412         * This level is not needed anymore since when layers are
413         * inserted, they are inserted at the top of the existing
414         * tree.
415         */
416        to_free = idp->top;
417        p = idp->top->ary[0];
418        rcu_assign_pointer(idp->top, p);
419        --idp->layers;
420        to_free->bitmap = to_free->count = 0;
421        free_layer(to_free);
422    }
423    while (idp->id_free_cnt >= IDR_FREE_MAX) {
424        p = get_from_free_list(idp);
425        /*
426         * Note: we don't call the rcu callback here, since the only
427         * layers that fall into the freelist are those that have been
428         * preallocated.
429         */
430        kmem_cache_free(idr_layer_cache, p);
431    }
432    return;
433}
434EXPORT_SYMBOL(idr_remove);
435
436/**
437 * idr_remove_all - remove all ids from the given idr tree
438 * @idp: idr handle
439 *
440 * idr_destroy() only frees up unused, cached idp_layers, but this
441 * function will remove all id mappings and leave all idp_layers
442 * unused.
443 *
444 * A typical clean-up sequence for objects stored in an idr tree will
445 * use idr_for_each() to free all objects, if necessay, then
446 * idr_remove_all() to remove all ids, and idr_destroy() to free
447 * up the cached idr_layers.
448 */
449void idr_remove_all(struct idr *idp)
450{
451    int n, id, max;
452    int bt_mask;
453    struct idr_layer *p;
454    struct idr_layer *pa[MAX_LEVEL];
455    struct idr_layer **paa = &pa[0];
456
457    n = idp->layers * IDR_BITS;
458    p = idp->top;
459    rcu_assign_pointer(idp->top, NULL);
460    max = 1 << n;
461
462    id = 0;
463    while (id < max) {
464        while (n > IDR_BITS && p) {
465            n -= IDR_BITS;
466            *paa++ = p;
467            p = p->ary[(id >> n) & IDR_MASK];
468        }
469
470        bt_mask = id;
471        id += 1 << n;
472        /* Get the highest bit that the above add changed from 0->1. */
473        while (n < fls(id ^ bt_mask)) {
474            if (p)
475                free_layer(p);
476            n += IDR_BITS;
477            p = *--paa;
478        }
479    }
480    idp->layers = 0;
481}
482EXPORT_SYMBOL(idr_remove_all);
483
484/**
485 * idr_destroy - release all cached layers within an idr tree
486 * @idp: idr handle
487 */
488void idr_destroy(struct idr *idp)
489{
490    while (idp->id_free_cnt) {
491        struct idr_layer *p = get_from_free_list(idp);
492        kmem_cache_free(idr_layer_cache, p);
493    }
494}
495EXPORT_SYMBOL(idr_destroy);
496
497/**
498 * idr_find - return pointer for given id
499 * @idp: idr handle
500 * @id: lookup key
501 *
502 * Return the pointer given the id it has been registered with. A %NULL
503 * return indicates that @id is not valid or you passed %NULL in
504 * idr_get_new().
505 *
506 * This function can be called under rcu_read_lock(), given that the leaf
507 * pointers lifetimes are correctly managed.
508 */
509void *idr_find(struct idr *idp, int id)
510{
511    int n;
512    struct idr_layer *p;
513
514    p = rcu_dereference_raw(idp->top);
515    if (!p)
516        return NULL;
517    n = (p->layer+1) * IDR_BITS;
518
519    /* Mask off upper bits we don't use for the search. */
520    id &= MAX_ID_MASK;
521
522    if (id >= (1 << n))
523        return NULL;
524    BUG_ON(n == 0);
525
526    while (n > 0 && p) {
527        n -= IDR_BITS;
528        BUG_ON(n != p->layer*IDR_BITS);
529        p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
530    }
531    return((void *)p);
532}
533EXPORT_SYMBOL(idr_find);
534
535/**
536 * idr_for_each - iterate through all stored pointers
537 * @idp: idr handle
538 * @fn: function to be called for each pointer
539 * @data: data passed back to callback function
540 *
541 * Iterate over the pointers registered with the given idr. The
542 * callback function will be called for each pointer currently
543 * registered, passing the id, the pointer and the data pointer passed
544 * to this function. It is not safe to modify the idr tree while in
545 * the callback, so functions such as idr_get_new and idr_remove are
546 * not allowed.
547 *
548 * We check the return of @fn each time. If it returns anything other
549 * than %0, we break out and return that value.
550 *
551 * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
552 */
553int idr_for_each(struct idr *idp,
554         int (*fn)(int id, void *p, void *data), void *data)
555{
556    int n, id, max, error = 0;
557    struct idr_layer *p;
558    struct idr_layer *pa[MAX_LEVEL];
559    struct idr_layer **paa = &pa[0];
560
561    n = idp->layers * IDR_BITS;
562    p = rcu_dereference_raw(idp->top);
563    max = 1 << n;
564
565    id = 0;
566    while (id < max) {
567        while (n > 0 && p) {
568            n -= IDR_BITS;
569            *paa++ = p;
570            p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
571        }
572
573        if (p) {
574            error = fn(id, (void *)p, data);
575            if (error)
576                break;
577        }
578
579        id += 1 << n;
580        while (n < fls(id)) {
581            n += IDR_BITS;
582            p = *--paa;
583        }
584    }
585
586    return error;
587}
588EXPORT_SYMBOL(idr_for_each);
589
590/**
591 * idr_get_next - lookup next object of id to given id.
592 * @idp: idr handle
593 * @nextidp: pointer to lookup key
594 *
595 * Returns pointer to registered object with id, which is next number to
596 * given id. After being looked up, *@nextidp will be updated for the next
597 * iteration.
598 */
599
600void *idr_get_next(struct idr *idp, int *nextidp)
601{
602    struct idr_layer *p, *pa[MAX_LEVEL];
603    struct idr_layer **paa = &pa[0];
604    int id = *nextidp;
605    int n, max;
606
607    /* find first ent */
608    n = idp->layers * IDR_BITS;
609    max = 1 << n;
610    p = rcu_dereference_raw(idp->top);
611    if (!p)
612        return NULL;
613
614    while (id < max) {
615        while (n > 0 && p) {
616            n -= IDR_BITS;
617            *paa++ = p;
618            p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]);
619        }
620
621        if (p) {
622            *nextidp = id;
623            return p;
624        }
625
626        id += 1 << n;
627        while (n < fls(id)) {
628            n += IDR_BITS;
629            p = *--paa;
630        }
631    }
632    return NULL;
633}
634EXPORT_SYMBOL(idr_get_next);
635
636
637/**
638 * idr_replace - replace pointer for given id
639 * @idp: idr handle
640 * @ptr: pointer you want associated with the id
641 * @id: lookup key
642 *
643 * Replace the pointer registered with an id and return the old value.
644 * A %-ENOENT return indicates that @id was not found.
645 * A %-EINVAL return indicates that @id was not within valid constraints.
646 *
647 * The caller must serialize with writers.
648 */
649void *idr_replace(struct idr *idp, void *ptr, int id)
650{
651    int n;
652    struct idr_layer *p, *old_p;
653
654    p = idp->top;
655    if (!p)
656        return ERR_PTR(-EINVAL);
657
658    n = (p->layer+1) * IDR_BITS;
659
660    id &= MAX_ID_MASK;
661
662    if (id >= (1 << n))
663        return ERR_PTR(-EINVAL);
664
665    n -= IDR_BITS;
666    while ((n > 0) && p) {
667        p = p->ary[(id >> n) & IDR_MASK];
668        n -= IDR_BITS;
669    }
670
671    n = id & IDR_MASK;
672    if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
673        return ERR_PTR(-ENOENT);
674
675    old_p = p->ary[n];
676    rcu_assign_pointer(p->ary[n], ptr);
677
678    return old_p;
679}
680EXPORT_SYMBOL(idr_replace);
681
682void __init idr_init_cache(void)
683{
684    idr_layer_cache = kmem_cache_create("idr_layer_cache",
685                sizeof(struct idr_layer), 0, SLAB_PANIC, NULL);
686}
687
688/**
689 * idr_init - initialize idr handle
690 * @idp: idr handle
691 *
692 * This function is use to set up the handle (@idp) that you will pass
693 * to the rest of the functions.
694 */
695void idr_init(struct idr *idp)
696{
697    memset(idp, 0, sizeof(struct idr));
698    spin_lock_init(&idp->lock);
699}
700EXPORT_SYMBOL(idr_init);
701
702
703/**
704 * DOC: IDA description
705 * IDA - IDR based ID allocator
706 *
707 * This is id allocator without id -> pointer translation. Memory
708 * usage is much lower than full blown idr because each id only
709 * occupies a bit. ida uses a custom leaf node which contains
710 * IDA_BITMAP_BITS slots.
711 *
712 * 2007-04-25 written by Tejun Heo <htejun@gmail.com>
713 */
714
715static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
716{
717    unsigned long flags;
718
719    if (!ida->free_bitmap) {
720        spin_lock_irqsave(&ida->idr.lock, flags);
721        if (!ida->free_bitmap) {
722            ida->free_bitmap = bitmap;
723            bitmap = NULL;
724        }
725        spin_unlock_irqrestore(&ida->idr.lock, flags);
726    }
727
728    kfree(bitmap);
729}
730
731/**
732 * ida_pre_get - reserve resources for ida allocation
733 * @ida: ida handle
734 * @gfp_mask: memory allocation flag
735 *
736 * This function should be called prior to locking and calling the
737 * following function. It preallocates enough memory to satisfy the
738 * worst possible allocation.
739 *
740 * If the system is REALLY out of memory this function returns %0,
741 * otherwise %1.
742 */
743int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
744{
745    /* allocate idr_layers */
746    if (!idr_pre_get(&ida->idr, gfp_mask))
747        return 0;
748
749    /* allocate free_bitmap */
750    if (!ida->free_bitmap) {
751        struct ida_bitmap *bitmap;
752
753        bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask);
754        if (!bitmap)
755            return 0;
756
757        free_bitmap(ida, bitmap);
758    }
759
760    return 1;
761}
762EXPORT_SYMBOL(ida_pre_get);
763
764/**
765 * ida_get_new_above - allocate new ID above or equal to a start id
766 * @ida: ida handle
767 * @starting_id: id to start search at
768 * @p_id: pointer to the allocated handle
769 *
770 * Allocate new ID above or equal to @starting_id. It should be called
771 * with any required locks.
772 *
773 * If memory is required, it will return %-EAGAIN, you should unlock
774 * and go back to the ida_pre_get() call. If the ida is full, it will
775 * return %-ENOSPC.
776 *
777 * @p_id returns a value in the range @starting_id ... %0x7fffffff.
778 */
779int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
780{
781    struct idr_layer *pa[MAX_LEVEL];
782    struct ida_bitmap *bitmap;
783    unsigned long flags;
784    int idr_id = starting_id / IDA_BITMAP_BITS;
785    int offset = starting_id % IDA_BITMAP_BITS;
786    int t, id;
787
788 restart:
789    /* get vacant slot */
790    t = idr_get_empty_slot(&ida->idr, idr_id, pa);
791    if (t < 0)
792        return _idr_rc_to_errno(t);
793
794    if (t * IDA_BITMAP_BITS >= MAX_ID_BIT)
795        return -ENOSPC;
796
797    if (t != idr_id)
798        offset = 0;
799    idr_id = t;
800
801    /* if bitmap isn't there, create a new one */
802    bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];
803    if (!bitmap) {
804        spin_lock_irqsave(&ida->idr.lock, flags);
805        bitmap = ida->free_bitmap;
806        ida->free_bitmap = NULL;
807        spin_unlock_irqrestore(&ida->idr.lock, flags);
808
809        if (!bitmap)
810            return -EAGAIN;
811
812        memset(bitmap, 0, sizeof(struct ida_bitmap));
813        rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],
814                (void *)bitmap);
815        pa[0]->count++;
816    }
817
818    /* lookup for empty slot */
819    t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset);
820    if (t == IDA_BITMAP_BITS) {
821        /* no empty slot after offset, continue to the next chunk */
822        idr_id++;
823        offset = 0;
824        goto restart;
825    }
826
827    id = idr_id * IDA_BITMAP_BITS + t;
828    if (id >= MAX_ID_BIT)
829        return -ENOSPC;
830
831    __set_bit(t, bitmap->bitmap);
832    if (++bitmap->nr_busy == IDA_BITMAP_BITS)
833        idr_mark_full(pa, idr_id);
834
835    *p_id = id;
836
837    /* Each leaf node can handle nearly a thousand slots and the
838     * whole idea of ida is to have small memory foot print.
839     * Throw away extra resources one by one after each successful
840     * allocation.
841     */
842    if (ida->idr.id_free_cnt || ida->free_bitmap) {
843        struct idr_layer *p = get_from_free_list(&ida->idr);
844        if (p)
845            kmem_cache_free(idr_layer_cache, p);
846    }
847
848    return 0;
849}
850EXPORT_SYMBOL(ida_get_new_above);
851
852/**
853 * ida_get_new - allocate new ID
854 * @ida: idr handle
855 * @p_id: pointer to the allocated handle
856 *
857 * Allocate new ID. It should be called with any required locks.
858 *
859 * If memory is required, it will return %-EAGAIN, you should unlock
860 * and go back to the idr_pre_get() call. If the idr is full, it will
861 * return %-ENOSPC.
862 *
863 * @p_id returns a value in the range %0 ... %0x7fffffff.
864 */
865int ida_get_new(struct ida *ida, int *p_id)
866{
867    return ida_get_new_above(ida, 0, p_id);
868}
869EXPORT_SYMBOL(ida_get_new);
870
871/**
872 * ida_remove - remove the given ID
873 * @ida: ida handle
874 * @id: ID to free
875 */
876void ida_remove(struct ida *ida, int id)
877{
878    struct idr_layer *p = ida->idr.top;
879    int shift = (ida->idr.layers - 1) * IDR_BITS;
880    int idr_id = id / IDA_BITMAP_BITS;
881    int offset = id % IDA_BITMAP_BITS;
882    int n;
883    struct ida_bitmap *bitmap;
884
885    /* clear full bits while looking up the leaf idr_layer */
886    while ((shift > 0) && p) {
887        n = (idr_id >> shift) & IDR_MASK;
888        __clear_bit(n, &p->bitmap);
889        p = p->ary[n];
890        shift -= IDR_BITS;
891    }
892
893    if (p == NULL)
894        goto err;
895
896    n = idr_id & IDR_MASK;
897    __clear_bit(n, &p->bitmap);
898
899    bitmap = (void *)p->ary[n];
900    if (!test_bit(offset, bitmap->bitmap))
901        goto err;
902
903    /* update bitmap and remove it if empty */
904    __clear_bit(offset, bitmap->bitmap);
905    if (--bitmap->nr_busy == 0) {
906        __set_bit(n, &p->bitmap); /* to please idr_remove() */
907        idr_remove(&ida->idr, idr_id);
908        free_bitmap(ida, bitmap);
909    }
910
911    return;
912
913 err:
914    printk(KERN_WARNING
915           "ida_remove called for id=%d which is not allocated.\n", id);
916}
917EXPORT_SYMBOL(ida_remove);
918
919/**
920 * ida_destroy - release all cached layers within an ida tree
921 * @ida: ida handle
922 */
923void ida_destroy(struct ida *ida)
924{
925    idr_destroy(&ida->idr);
926    kfree(ida->free_bitmap);
927}
928EXPORT_SYMBOL(ida_destroy);
929
930/**
931 * ida_simple_get - get a new id.
932 * @ida: the (initialized) ida.
933 * @start: the minimum id (inclusive, < 0x8000000)
934 * @end: the maximum id (exclusive, < 0x8000000 or 0)
935 * @gfp_mask: memory allocation flags
936 *
937 * Allocates an id in the range start <= id < end, or returns -ENOSPC.
938 * On memory allocation failure, returns -ENOMEM.
939 *
940 * Use ida_simple_remove() to get rid of an id.
941 */
942int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
943           gfp_t gfp_mask)
944{
945    int ret, id;
946    unsigned int max;
947    unsigned long flags;
948
949    BUG_ON((int)start < 0);
950    BUG_ON((int)end < 0);
951
952    if (end == 0)
953        max = 0x80000000;
954    else {
955        BUG_ON(end < start);
956        max = end - 1;
957    }
958
959again:
960    if (!ida_pre_get(ida, gfp_mask))
961        return -ENOMEM;
962
963    spin_lock_irqsave(&simple_ida_lock, flags);
964    ret = ida_get_new_above(ida, start, &id);
965    if (!ret) {
966        if (id > max) {
967            ida_remove(ida, id);
968            ret = -ENOSPC;
969        } else {
970            ret = id;
971        }
972    }
973    spin_unlock_irqrestore(&simple_ida_lock, flags);
974
975    if (unlikely(ret == -EAGAIN))
976        goto again;
977
978    return ret;
979}
980EXPORT_SYMBOL(ida_simple_get);
981
982/**
983 * ida_simple_remove - remove an allocated id.
984 * @ida: the (initialized) ida.
985 * @id: the id returned by ida_simple_get.
986 */
987void ida_simple_remove(struct ida *ida, unsigned int id)
988{
989    unsigned long flags;
990
991    BUG_ON((int)id < 0);
992    spin_lock_irqsave(&simple_ida_lock, flags);
993    ida_remove(ida, id);
994    spin_unlock_irqrestore(&simple_ida_lock, flags);
995}
996EXPORT_SYMBOL(ida_simple_remove);
997
998/**
999 * ida_init - initialize ida handle
1000 * @ida: ida handle
1001 *
1002 * This function is use to set up the handle (@ida) that you will pass
1003 * to the rest of the functions.
1004 */
1005void ida_init(struct ida *ida)
1006{
1007    memset(ida, 0, sizeof(struct ida));
1008    idr_init(&ida->idr);
1009
1010}
1011EXPORT_SYMBOL(ida_init);
1012

Archive Download this file



interactive