Root/
Source at commit 2604e7f9a98c27be50a0c3ff7503b6a5ea8f6cfe created 12 years 7 months ago. By Maarten ter Huurne, cpufreq_stats: Support runtime changes to frequency table | |
---|---|
1 | /* |
2 | * 2002-10-18 written by Jim Houston jim.houston@ccur.com |
3 | * Copyright (C) 2002 by Concurrent Computer Corporation |
4 | * Distributed under the GNU GPL license version 2. |
5 | * |
6 | * Modified by George Anzinger to reuse immediately and to use |
7 | * find bit instructions. Also removed _irq on spinlocks. |
8 | * |
9 | * Modified by Nadia Derbey to make it RCU safe. |
10 | * |
11 | * Small id to pointer translation service. |
12 | * |
13 | * It uses a radix tree like structure as a sparse array indexed |
14 | * by the id to obtain the pointer. The bitmap makes allocating |
15 | * a new id quick. |
16 | * |
17 | * You call it to allocate an id (an int) an associate with that id a |
18 | * pointer or what ever, we treat it as a (void *). You can pass this |
19 | * id to a user for him to pass back at a later time. You then pass |
20 | * that id to this code and it returns your pointer. |
21 | |
22 | * You can release ids at any time. When all ids are released, most of |
23 | * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we |
24 | * don't need to go to the memory "store" during an id allocate, just |
25 | * so you don't need to be too concerned about locking and conflicts |
26 | * with the slab allocator. |
27 | */ |
28 | |
29 | #ifndef TEST // to test in user space... |
30 | #include <linux/slab.h> |
31 | #include <linux/init.h> |
32 | #include <linux/export.h> |
33 | #endif |
34 | #include <linux/err.h> |
35 | #include <linux/string.h> |
36 | #include <linux/idr.h> |
37 | #include <linux/spinlock.h> |
38 | |
39 | static struct kmem_cache *idr_layer_cache; |
40 | static DEFINE_SPINLOCK(simple_ida_lock); |
41 | |
42 | static struct idr_layer *get_from_free_list(struct idr *idp) |
43 | { |
44 | struct idr_layer *p; |
45 | unsigned long flags; |
46 | |
47 | spin_lock_irqsave(&idp->lock, flags); |
48 | if ((p = idp->id_free)) { |
49 | idp->id_free = p->ary[0]; |
50 | idp->id_free_cnt--; |
51 | p->ary[0] = NULL; |
52 | } |
53 | spin_unlock_irqrestore(&idp->lock, flags); |
54 | return(p); |
55 | } |
56 | |
57 | static void idr_layer_rcu_free(struct rcu_head *head) |
58 | { |
59 | struct idr_layer *layer; |
60 | |
61 | layer = container_of(head, struct idr_layer, rcu_head); |
62 | kmem_cache_free(idr_layer_cache, layer); |
63 | } |
64 | |
65 | static inline void free_layer(struct idr_layer *p) |
66 | { |
67 | call_rcu(&p->rcu_head, idr_layer_rcu_free); |
68 | } |
69 | |
70 | /* only called when idp->lock is held */ |
71 | static void __move_to_free_list(struct idr *idp, struct idr_layer *p) |
72 | { |
73 | p->ary[0] = idp->id_free; |
74 | idp->id_free = p; |
75 | idp->id_free_cnt++; |
76 | } |
77 | |
78 | static void move_to_free_list(struct idr *idp, struct idr_layer *p) |
79 | { |
80 | unsigned long flags; |
81 | |
82 | /* |
83 | * Depends on the return element being zeroed. |
84 | */ |
85 | spin_lock_irqsave(&idp->lock, flags); |
86 | __move_to_free_list(idp, p); |
87 | spin_unlock_irqrestore(&idp->lock, flags); |
88 | } |
89 | |
90 | static void idr_mark_full(struct idr_layer **pa, int id) |
91 | { |
92 | struct idr_layer *p = pa[0]; |
93 | int l = 0; |
94 | |
95 | __set_bit(id & IDR_MASK, &p->bitmap); |
96 | /* |
97 | * If this layer is full mark the bit in the layer above to |
98 | * show that this part of the radix tree is full. This may |
99 | * complete the layer above and require walking up the radix |
100 | * tree. |
101 | */ |
102 | while (p->bitmap == IDR_FULL) { |
103 | if (!(p = pa[++l])) |
104 | break; |
105 | id = id >> IDR_BITS; |
106 | __set_bit((id & IDR_MASK), &p->bitmap); |
107 | } |
108 | } |
109 | |
110 | /** |
111 | * idr_pre_get - reserve resources for idr allocation |
112 | * @idp: idr handle |
113 | * @gfp_mask: memory allocation flags |
114 | * |
115 | * This function should be called prior to calling the idr_get_new* functions. |
116 | * It preallocates enough memory to satisfy the worst possible allocation. The |
117 | * caller should pass in GFP_KERNEL if possible. This of course requires that |
118 | * no spinning locks be held. |
119 | * |
120 | * If the system is REALLY out of memory this function returns %0, |
121 | * otherwise %1. |
122 | */ |
123 | int idr_pre_get(struct idr *idp, gfp_t gfp_mask) |
124 | { |
125 | while (idp->id_free_cnt < IDR_FREE_MAX) { |
126 | struct idr_layer *new; |
127 | new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); |
128 | if (new == NULL) |
129 | return (0); |
130 | move_to_free_list(idp, new); |
131 | } |
132 | return 1; |
133 | } |
134 | EXPORT_SYMBOL(idr_pre_get); |
135 | |
136 | static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa) |
137 | { |
138 | int n, m, sh; |
139 | struct idr_layer *p, *new; |
140 | int l, id, oid; |
141 | unsigned long bm; |
142 | |
143 | id = *starting_id; |
144 | restart: |
145 | p = idp->top; |
146 | l = idp->layers; |
147 | pa[l--] = NULL; |
148 | while (1) { |
149 | /* |
150 | * We run around this while until we reach the leaf node... |
151 | */ |
152 | n = (id >> (IDR_BITS*l)) & IDR_MASK; |
153 | bm = ~p->bitmap; |
154 | m = find_next_bit(&bm, IDR_SIZE, n); |
155 | if (m == IDR_SIZE) { |
156 | /* no space available go back to previous layer. */ |
157 | l++; |
158 | oid = id; |
159 | id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1; |
160 | |
161 | /* if already at the top layer, we need to grow */ |
162 | if (id >= 1 << (idp->layers * IDR_BITS)) { |
163 | *starting_id = id; |
164 | return IDR_NEED_TO_GROW; |
165 | } |
166 | p = pa[l]; |
167 | BUG_ON(!p); |
168 | |
169 | /* If we need to go up one layer, continue the |
170 | * loop; otherwise, restart from the top. |
171 | */ |
172 | sh = IDR_BITS * (l + 1); |
173 | if (oid >> sh == id >> sh) |
174 | continue; |
175 | else |
176 | goto restart; |
177 | } |
178 | if (m != n) { |
179 | sh = IDR_BITS*l; |
180 | id = ((id >> sh) ^ n ^ m) << sh; |
181 | } |
182 | if ((id >= MAX_ID_BIT) || (id < 0)) |
183 | return IDR_NOMORE_SPACE; |
184 | if (l == 0) |
185 | break; |
186 | /* |
187 | * Create the layer below if it is missing. |
188 | */ |
189 | if (!p->ary[m]) { |
190 | new = get_from_free_list(idp); |
191 | if (!new) |
192 | return -1; |
193 | new->layer = l-1; |
194 | rcu_assign_pointer(p->ary[m], new); |
195 | p->count++; |
196 | } |
197 | pa[l--] = p; |
198 | p = p->ary[m]; |
199 | } |
200 | |
201 | pa[l] = p; |
202 | return id; |
203 | } |
204 | |
205 | static int idr_get_empty_slot(struct idr *idp, int starting_id, |
206 | struct idr_layer **pa) |
207 | { |
208 | struct idr_layer *p, *new; |
209 | int layers, v, id; |
210 | unsigned long flags; |
211 | |
212 | id = starting_id; |
213 | build_up: |
214 | p = idp->top; |
215 | layers = idp->layers; |
216 | if (unlikely(!p)) { |
217 | if (!(p = get_from_free_list(idp))) |
218 | return -1; |
219 | p->layer = 0; |
220 | layers = 1; |
221 | } |
222 | /* |
223 | * Add a new layer to the top of the tree if the requested |
224 | * id is larger than the currently allocated space. |
225 | */ |
226 | while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) { |
227 | layers++; |
228 | if (!p->count) { |
229 | /* special case: if the tree is currently empty, |
230 | * then we grow the tree by moving the top node |
231 | * upwards. |
232 | */ |
233 | p->layer++; |
234 | continue; |
235 | } |
236 | if (!(new = get_from_free_list(idp))) { |
237 | /* |
238 | * The allocation failed. If we built part of |
239 | * the structure tear it down. |
240 | */ |
241 | spin_lock_irqsave(&idp->lock, flags); |
242 | for (new = p; p && p != idp->top; new = p) { |
243 | p = p->ary[0]; |
244 | new->ary[0] = NULL; |
245 | new->bitmap = new->count = 0; |
246 | __move_to_free_list(idp, new); |
247 | } |
248 | spin_unlock_irqrestore(&idp->lock, flags); |
249 | return -1; |
250 | } |
251 | new->ary[0] = p; |
252 | new->count = 1; |
253 | new->layer = layers-1; |
254 | if (p->bitmap == IDR_FULL) |
255 | __set_bit(0, &new->bitmap); |
256 | p = new; |
257 | } |
258 | rcu_assign_pointer(idp->top, p); |
259 | idp->layers = layers; |
260 | v = sub_alloc(idp, &id, pa); |
261 | if (v == IDR_NEED_TO_GROW) |
262 | goto build_up; |
263 | return(v); |
264 | } |
265 | |
266 | static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id) |
267 | { |
268 | struct idr_layer *pa[MAX_LEVEL]; |
269 | int id; |
270 | |
271 | id = idr_get_empty_slot(idp, starting_id, pa); |
272 | if (id >= 0) { |
273 | /* |
274 | * Successfully found an empty slot. Install the user |
275 | * pointer and mark the slot full. |
276 | */ |
277 | rcu_assign_pointer(pa[0]->ary[id & IDR_MASK], |
278 | (struct idr_layer *)ptr); |
279 | pa[0]->count++; |
280 | idr_mark_full(pa, id); |
281 | } |
282 | |
283 | return id; |
284 | } |
285 | |
286 | /** |
287 | * idr_get_new_above - allocate new idr entry above or equal to a start id |
288 | * @idp: idr handle |
289 | * @ptr: pointer you want associated with the id |
290 | * @starting_id: id to start search at |
291 | * @id: pointer to the allocated handle |
292 | * |
293 | * This is the allocate id function. It should be called with any |
294 | * required locks. |
295 | * |
296 | * If allocation from IDR's private freelist fails, idr_get_new_above() will |
297 | * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill |
298 | * IDR's preallocation and then retry the idr_get_new_above() call. |
299 | * |
300 | * If the idr is full idr_get_new_above() will return %-ENOSPC. |
301 | * |
302 | * @id returns a value in the range @starting_id ... %0x7fffffff |
303 | */ |
304 | int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) |
305 | { |
306 | int rv; |
307 | |
308 | rv = idr_get_new_above_int(idp, ptr, starting_id); |
309 | /* |
310 | * This is a cheap hack until the IDR code can be fixed to |
311 | * return proper error values. |
312 | */ |
313 | if (rv < 0) |
314 | return _idr_rc_to_errno(rv); |
315 | *id = rv; |
316 | return 0; |
317 | } |
318 | EXPORT_SYMBOL(idr_get_new_above); |
319 | |
320 | /** |
321 | * idr_get_new - allocate new idr entry |
322 | * @idp: idr handle |
323 | * @ptr: pointer you want associated with the id |
324 | * @id: pointer to the allocated handle |
325 | * |
326 | * If allocation from IDR's private freelist fails, idr_get_new_above() will |
327 | * return %-EAGAIN. The caller should retry the idr_pre_get() call to refill |
328 | * IDR's preallocation and then retry the idr_get_new_above() call. |
329 | * |
330 | * If the idr is full idr_get_new_above() will return %-ENOSPC. |
331 | * |
332 | * @id returns a value in the range %0 ... %0x7fffffff |
333 | */ |
334 | int idr_get_new(struct idr *idp, void *ptr, int *id) |
335 | { |
336 | int rv; |
337 | |
338 | rv = idr_get_new_above_int(idp, ptr, 0); |
339 | /* |
340 | * This is a cheap hack until the IDR code can be fixed to |
341 | * return proper error values. |
342 | */ |
343 | if (rv < 0) |
344 | return _idr_rc_to_errno(rv); |
345 | *id = rv; |
346 | return 0; |
347 | } |
348 | EXPORT_SYMBOL(idr_get_new); |
349 | |
350 | static void idr_remove_warning(int id) |
351 | { |
352 | printk(KERN_WARNING |
353 | "idr_remove called for id=%d which is not allocated.\n", id); |
354 | dump_stack(); |
355 | } |
356 | |
357 | static void sub_remove(struct idr *idp, int shift, int id) |
358 | { |
359 | struct idr_layer *p = idp->top; |
360 | struct idr_layer **pa[MAX_LEVEL]; |
361 | struct idr_layer ***paa = &pa[0]; |
362 | struct idr_layer *to_free; |
363 | int n; |
364 | |
365 | *paa = NULL; |
366 | *++paa = &idp->top; |
367 | |
368 | while ((shift > 0) && p) { |
369 | n = (id >> shift) & IDR_MASK; |
370 | __clear_bit(n, &p->bitmap); |
371 | *++paa = &p->ary[n]; |
372 | p = p->ary[n]; |
373 | shift -= IDR_BITS; |
374 | } |
375 | n = id & IDR_MASK; |
376 | if (likely(p != NULL && test_bit(n, &p->bitmap))){ |
377 | __clear_bit(n, &p->bitmap); |
378 | rcu_assign_pointer(p->ary[n], NULL); |
379 | to_free = NULL; |
380 | while(*paa && ! --((**paa)->count)){ |
381 | if (to_free) |
382 | free_layer(to_free); |
383 | to_free = **paa; |
384 | **paa-- = NULL; |
385 | } |
386 | if (!*paa) |
387 | idp->layers = 0; |
388 | if (to_free) |
389 | free_layer(to_free); |
390 | } else |
391 | idr_remove_warning(id); |
392 | } |
393 | |
394 | /** |
395 | * idr_remove - remove the given id and free its slot |
396 | * @idp: idr handle |
397 | * @id: unique key |
398 | */ |
399 | void idr_remove(struct idr *idp, int id) |
400 | { |
401 | struct idr_layer *p; |
402 | struct idr_layer *to_free; |
403 | |
404 | /* Mask off upper bits we don't use for the search. */ |
405 | id &= MAX_ID_MASK; |
406 | |
407 | sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); |
408 | if (idp->top && idp->top->count == 1 && (idp->layers > 1) && |
409 | idp->top->ary[0]) { |
410 | /* |
411 | * Single child at leftmost slot: we can shrink the tree. |
412 | * This level is not needed anymore since when layers are |
413 | * inserted, they are inserted at the top of the existing |
414 | * tree. |
415 | */ |
416 | to_free = idp->top; |
417 | p = idp->top->ary[0]; |
418 | rcu_assign_pointer(idp->top, p); |
419 | --idp->layers; |
420 | to_free->bitmap = to_free->count = 0; |
421 | free_layer(to_free); |
422 | } |
423 | while (idp->id_free_cnt >= IDR_FREE_MAX) { |
424 | p = get_from_free_list(idp); |
425 | /* |
426 | * Note: we don't call the rcu callback here, since the only |
427 | * layers that fall into the freelist are those that have been |
428 | * preallocated. |
429 | */ |
430 | kmem_cache_free(idr_layer_cache, p); |
431 | } |
432 | return; |
433 | } |
434 | EXPORT_SYMBOL(idr_remove); |
435 | |
436 | /** |
437 | * idr_remove_all - remove all ids from the given idr tree |
438 | * @idp: idr handle |
439 | * |
440 | * idr_destroy() only frees up unused, cached idp_layers, but this |
441 | * function will remove all id mappings and leave all idp_layers |
442 | * unused. |
443 | * |
444 | * A typical clean-up sequence for objects stored in an idr tree will |
445 | * use idr_for_each() to free all objects, if necessay, then |
446 | * idr_remove_all() to remove all ids, and idr_destroy() to free |
447 | * up the cached idr_layers. |
448 | */ |
449 | void idr_remove_all(struct idr *idp) |
450 | { |
451 | int n, id, max; |
452 | int bt_mask; |
453 | struct idr_layer *p; |
454 | struct idr_layer *pa[MAX_LEVEL]; |
455 | struct idr_layer **paa = &pa[0]; |
456 | |
457 | n = idp->layers * IDR_BITS; |
458 | p = idp->top; |
459 | rcu_assign_pointer(idp->top, NULL); |
460 | max = 1 << n; |
461 | |
462 | id = 0; |
463 | while (id < max) { |
464 | while (n > IDR_BITS && p) { |
465 | n -= IDR_BITS; |
466 | *paa++ = p; |
467 | p = p->ary[(id >> n) & IDR_MASK]; |
468 | } |
469 | |
470 | bt_mask = id; |
471 | id += 1 << n; |
472 | /* Get the highest bit that the above add changed from 0->1. */ |
473 | while (n < fls(id ^ bt_mask)) { |
474 | if (p) |
475 | free_layer(p); |
476 | n += IDR_BITS; |
477 | p = *--paa; |
478 | } |
479 | } |
480 | idp->layers = 0; |
481 | } |
482 | EXPORT_SYMBOL(idr_remove_all); |
483 | |
484 | /** |
485 | * idr_destroy - release all cached layers within an idr tree |
486 | * @idp: idr handle |
487 | */ |
488 | void idr_destroy(struct idr *idp) |
489 | { |
490 | while (idp->id_free_cnt) { |
491 | struct idr_layer *p = get_from_free_list(idp); |
492 | kmem_cache_free(idr_layer_cache, p); |
493 | } |
494 | } |
495 | EXPORT_SYMBOL(idr_destroy); |
496 | |
497 | /** |
498 | * idr_find - return pointer for given id |
499 | * @idp: idr handle |
500 | * @id: lookup key |
501 | * |
502 | * Return the pointer given the id it has been registered with. A %NULL |
503 | * return indicates that @id is not valid or you passed %NULL in |
504 | * idr_get_new(). |
505 | * |
506 | * This function can be called under rcu_read_lock(), given that the leaf |
507 | * pointers lifetimes are correctly managed. |
508 | */ |
509 | void *idr_find(struct idr *idp, int id) |
510 | { |
511 | int n; |
512 | struct idr_layer *p; |
513 | |
514 | p = rcu_dereference_raw(idp->top); |
515 | if (!p) |
516 | return NULL; |
517 | n = (p->layer+1) * IDR_BITS; |
518 | |
519 | /* Mask off upper bits we don't use for the search. */ |
520 | id &= MAX_ID_MASK; |
521 | |
522 | if (id >= (1 << n)) |
523 | return NULL; |
524 | BUG_ON(n == 0); |
525 | |
526 | while (n > 0 && p) { |
527 | n -= IDR_BITS; |
528 | BUG_ON(n != p->layer*IDR_BITS); |
529 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); |
530 | } |
531 | return((void *)p); |
532 | } |
533 | EXPORT_SYMBOL(idr_find); |
534 | |
535 | /** |
536 | * idr_for_each - iterate through all stored pointers |
537 | * @idp: idr handle |
538 | * @fn: function to be called for each pointer |
539 | * @data: data passed back to callback function |
540 | * |
541 | * Iterate over the pointers registered with the given idr. The |
542 | * callback function will be called for each pointer currently |
543 | * registered, passing the id, the pointer and the data pointer passed |
544 | * to this function. It is not safe to modify the idr tree while in |
545 | * the callback, so functions such as idr_get_new and idr_remove are |
546 | * not allowed. |
547 | * |
548 | * We check the return of @fn each time. If it returns anything other |
549 | * than %0, we break out and return that value. |
550 | * |
551 | * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove(). |
552 | */ |
553 | int idr_for_each(struct idr *idp, |
554 | int (*fn)(int id, void *p, void *data), void *data) |
555 | { |
556 | int n, id, max, error = 0; |
557 | struct idr_layer *p; |
558 | struct idr_layer *pa[MAX_LEVEL]; |
559 | struct idr_layer **paa = &pa[0]; |
560 | |
561 | n = idp->layers * IDR_BITS; |
562 | p = rcu_dereference_raw(idp->top); |
563 | max = 1 << n; |
564 | |
565 | id = 0; |
566 | while (id < max) { |
567 | while (n > 0 && p) { |
568 | n -= IDR_BITS; |
569 | *paa++ = p; |
570 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); |
571 | } |
572 | |
573 | if (p) { |
574 | error = fn(id, (void *)p, data); |
575 | if (error) |
576 | break; |
577 | } |
578 | |
579 | id += 1 << n; |
580 | while (n < fls(id)) { |
581 | n += IDR_BITS; |
582 | p = *--paa; |
583 | } |
584 | } |
585 | |
586 | return error; |
587 | } |
588 | EXPORT_SYMBOL(idr_for_each); |
589 | |
590 | /** |
591 | * idr_get_next - lookup next object of id to given id. |
592 | * @idp: idr handle |
593 | * @nextidp: pointer to lookup key |
594 | * |
595 | * Returns pointer to registered object with id, which is next number to |
596 | * given id. After being looked up, *@nextidp will be updated for the next |
597 | * iteration. |
598 | * |
599 | * This function can be called under rcu_read_lock(), given that the leaf |
600 | * pointers lifetimes are correctly managed. |
601 | */ |
602 | void *idr_get_next(struct idr *idp, int *nextidp) |
603 | { |
604 | struct idr_layer *p, *pa[MAX_LEVEL]; |
605 | struct idr_layer **paa = &pa[0]; |
606 | int id = *nextidp; |
607 | int n, max; |
608 | |
609 | /* find first ent */ |
610 | p = rcu_dereference_raw(idp->top); |
611 | if (!p) |
612 | return NULL; |
613 | n = (p->layer + 1) * IDR_BITS; |
614 | max = 1 << n; |
615 | |
616 | while (id < max) { |
617 | while (n > 0 && p) { |
618 | n -= IDR_BITS; |
619 | *paa++ = p; |
620 | p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); |
621 | } |
622 | |
623 | if (p) { |
624 | *nextidp = id; |
625 | return p; |
626 | } |
627 | |
628 | id += 1 << n; |
629 | while (n < fls(id)) { |
630 | n += IDR_BITS; |
631 | p = *--paa; |
632 | } |
633 | } |
634 | return NULL; |
635 | } |
636 | EXPORT_SYMBOL(idr_get_next); |
637 | |
638 | |
639 | /** |
640 | * idr_replace - replace pointer for given id |
641 | * @idp: idr handle |
642 | * @ptr: pointer you want associated with the id |
643 | * @id: lookup key |
644 | * |
645 | * Replace the pointer registered with an id and return the old value. |
646 | * A %-ENOENT return indicates that @id was not found. |
647 | * A %-EINVAL return indicates that @id was not within valid constraints. |
648 | * |
649 | * The caller must serialize with writers. |
650 | */ |
651 | void *idr_replace(struct idr *idp, void *ptr, int id) |
652 | { |
653 | int n; |
654 | struct idr_layer *p, *old_p; |
655 | |
656 | p = idp->top; |
657 | if (!p) |
658 | return ERR_PTR(-EINVAL); |
659 | |
660 | n = (p->layer+1) * IDR_BITS; |
661 | |
662 | id &= MAX_ID_MASK; |
663 | |
664 | if (id >= (1 << n)) |
665 | return ERR_PTR(-EINVAL); |
666 | |
667 | n -= IDR_BITS; |
668 | while ((n > 0) && p) { |
669 | p = p->ary[(id >> n) & IDR_MASK]; |
670 | n -= IDR_BITS; |
671 | } |
672 | |
673 | n = id & IDR_MASK; |
674 | if (unlikely(p == NULL || !test_bit(n, &p->bitmap))) |
675 | return ERR_PTR(-ENOENT); |
676 | |
677 | old_p = p->ary[n]; |
678 | rcu_assign_pointer(p->ary[n], ptr); |
679 | |
680 | return old_p; |
681 | } |
682 | EXPORT_SYMBOL(idr_replace); |
683 | |
684 | void __init idr_init_cache(void) |
685 | { |
686 | idr_layer_cache = kmem_cache_create("idr_layer_cache", |
687 | sizeof(struct idr_layer), 0, SLAB_PANIC, NULL); |
688 | } |
689 | |
690 | /** |
691 | * idr_init - initialize idr handle |
692 | * @idp: idr handle |
693 | * |
694 | * This function is use to set up the handle (@idp) that you will pass |
695 | * to the rest of the functions. |
696 | */ |
697 | void idr_init(struct idr *idp) |
698 | { |
699 | memset(idp, 0, sizeof(struct idr)); |
700 | spin_lock_init(&idp->lock); |
701 | } |
702 | EXPORT_SYMBOL(idr_init); |
703 | |
704 | |
705 | /** |
706 | * DOC: IDA description |
707 | * IDA - IDR based ID allocator |
708 | * |
709 | * This is id allocator without id -> pointer translation. Memory |
710 | * usage is much lower than full blown idr because each id only |
711 | * occupies a bit. ida uses a custom leaf node which contains |
712 | * IDA_BITMAP_BITS slots. |
713 | * |
714 | * 2007-04-25 written by Tejun Heo <htejun@gmail.com> |
715 | */ |
716 | |
717 | static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap) |
718 | { |
719 | unsigned long flags; |
720 | |
721 | if (!ida->free_bitmap) { |
722 | spin_lock_irqsave(&ida->idr.lock, flags); |
723 | if (!ida->free_bitmap) { |
724 | ida->free_bitmap = bitmap; |
725 | bitmap = NULL; |
726 | } |
727 | spin_unlock_irqrestore(&ida->idr.lock, flags); |
728 | } |
729 | |
730 | kfree(bitmap); |
731 | } |
732 | |
733 | /** |
734 | * ida_pre_get - reserve resources for ida allocation |
735 | * @ida: ida handle |
736 | * @gfp_mask: memory allocation flag |
737 | * |
738 | * This function should be called prior to locking and calling the |
739 | * following function. It preallocates enough memory to satisfy the |
740 | * worst possible allocation. |
741 | * |
742 | * If the system is REALLY out of memory this function returns %0, |
743 | * otherwise %1. |
744 | */ |
745 | int ida_pre_get(struct ida *ida, gfp_t gfp_mask) |
746 | { |
747 | /* allocate idr_layers */ |
748 | if (!idr_pre_get(&ida->idr, gfp_mask)) |
749 | return 0; |
750 | |
751 | /* allocate free_bitmap */ |
752 | if (!ida->free_bitmap) { |
753 | struct ida_bitmap *bitmap; |
754 | |
755 | bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask); |
756 | if (!bitmap) |
757 | return 0; |
758 | |
759 | free_bitmap(ida, bitmap); |
760 | } |
761 | |
762 | return 1; |
763 | } |
764 | EXPORT_SYMBOL(ida_pre_get); |
765 | |
766 | /** |
767 | * ida_get_new_above - allocate new ID above or equal to a start id |
768 | * @ida: ida handle |
769 | * @starting_id: id to start search at |
770 | * @p_id: pointer to the allocated handle |
771 | * |
772 | * Allocate new ID above or equal to @starting_id. It should be called |
773 | * with any required locks. |
774 | * |
775 | * If memory is required, it will return %-EAGAIN, you should unlock |
776 | * and go back to the ida_pre_get() call. If the ida is full, it will |
777 | * return %-ENOSPC. |
778 | * |
779 | * @p_id returns a value in the range @starting_id ... %0x7fffffff. |
780 | */ |
781 | int ida_get_new_above(struct ida *ida, int starting_id, int *p_id) |
782 | { |
783 | struct idr_layer *pa[MAX_LEVEL]; |
784 | struct ida_bitmap *bitmap; |
785 | unsigned long flags; |
786 | int idr_id = starting_id / IDA_BITMAP_BITS; |
787 | int offset = starting_id % IDA_BITMAP_BITS; |
788 | int t, id; |
789 | |
790 | restart: |
791 | /* get vacant slot */ |
792 | t = idr_get_empty_slot(&ida->idr, idr_id, pa); |
793 | if (t < 0) |
794 | return _idr_rc_to_errno(t); |
795 | |
796 | if (t * IDA_BITMAP_BITS >= MAX_ID_BIT) |
797 | return -ENOSPC; |
798 | |
799 | if (t != idr_id) |
800 | offset = 0; |
801 | idr_id = t; |
802 | |
803 | /* if bitmap isn't there, create a new one */ |
804 | bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK]; |
805 | if (!bitmap) { |
806 | spin_lock_irqsave(&ida->idr.lock, flags); |
807 | bitmap = ida->free_bitmap; |
808 | ida->free_bitmap = NULL; |
809 | spin_unlock_irqrestore(&ida->idr.lock, flags); |
810 | |
811 | if (!bitmap) |
812 | return -EAGAIN; |
813 | |
814 | memset(bitmap, 0, sizeof(struct ida_bitmap)); |
815 | rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK], |
816 | (void *)bitmap); |
817 | pa[0]->count++; |
818 | } |
819 | |
820 | /* lookup for empty slot */ |
821 | t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset); |
822 | if (t == IDA_BITMAP_BITS) { |
823 | /* no empty slot after offset, continue to the next chunk */ |
824 | idr_id++; |
825 | offset = 0; |
826 | goto restart; |
827 | } |
828 | |
829 | id = idr_id * IDA_BITMAP_BITS + t; |
830 | if (id >= MAX_ID_BIT) |
831 | return -ENOSPC; |
832 | |
833 | __set_bit(t, bitmap->bitmap); |
834 | if (++bitmap->nr_busy == IDA_BITMAP_BITS) |
835 | idr_mark_full(pa, idr_id); |
836 | |
837 | *p_id = id; |
838 | |
839 | /* Each leaf node can handle nearly a thousand slots and the |
840 | * whole idea of ida is to have small memory foot print. |
841 | * Throw away extra resources one by one after each successful |
842 | * allocation. |
843 | */ |
844 | if (ida->idr.id_free_cnt || ida->free_bitmap) { |
845 | struct idr_layer *p = get_from_free_list(&ida->idr); |
846 | if (p) |
847 | kmem_cache_free(idr_layer_cache, p); |
848 | } |
849 | |
850 | return 0; |
851 | } |
852 | EXPORT_SYMBOL(ida_get_new_above); |
853 | |
854 | /** |
855 | * ida_get_new - allocate new ID |
856 | * @ida: idr handle |
857 | * @p_id: pointer to the allocated handle |
858 | * |
859 | * Allocate new ID. It should be called with any required locks. |
860 | * |
861 | * If memory is required, it will return %-EAGAIN, you should unlock |
862 | * and go back to the idr_pre_get() call. If the idr is full, it will |
863 | * return %-ENOSPC. |
864 | * |
865 | * @p_id returns a value in the range %0 ... %0x7fffffff. |
866 | */ |
867 | int ida_get_new(struct ida *ida, int *p_id) |
868 | { |
869 | return ida_get_new_above(ida, 0, p_id); |
870 | } |
871 | EXPORT_SYMBOL(ida_get_new); |
872 | |
873 | /** |
874 | * ida_remove - remove the given ID |
875 | * @ida: ida handle |
876 | * @id: ID to free |
877 | */ |
878 | void ida_remove(struct ida *ida, int id) |
879 | { |
880 | struct idr_layer *p = ida->idr.top; |
881 | int shift = (ida->idr.layers - 1) * IDR_BITS; |
882 | int idr_id = id / IDA_BITMAP_BITS; |
883 | int offset = id % IDA_BITMAP_BITS; |
884 | int n; |
885 | struct ida_bitmap *bitmap; |
886 | |
887 | /* clear full bits while looking up the leaf idr_layer */ |
888 | while ((shift > 0) && p) { |
889 | n = (idr_id >> shift) & IDR_MASK; |
890 | __clear_bit(n, &p->bitmap); |
891 | p = p->ary[n]; |
892 | shift -= IDR_BITS; |
893 | } |
894 | |
895 | if (p == NULL) |
896 | goto err; |
897 | |
898 | n = idr_id & IDR_MASK; |
899 | __clear_bit(n, &p->bitmap); |
900 | |
901 | bitmap = (void *)p->ary[n]; |
902 | if (!test_bit(offset, bitmap->bitmap)) |
903 | goto err; |
904 | |
905 | /* update bitmap and remove it if empty */ |
906 | __clear_bit(offset, bitmap->bitmap); |
907 | if (--bitmap->nr_busy == 0) { |
908 | __set_bit(n, &p->bitmap); /* to please idr_remove() */ |
909 | idr_remove(&ida->idr, idr_id); |
910 | free_bitmap(ida, bitmap); |
911 | } |
912 | |
913 | return; |
914 | |
915 | err: |
916 | printk(KERN_WARNING |
917 | "ida_remove called for id=%d which is not allocated.\n", id); |
918 | } |
919 | EXPORT_SYMBOL(ida_remove); |
920 | |
921 | /** |
922 | * ida_destroy - release all cached layers within an ida tree |
923 | * @ida: ida handle |
924 | */ |
925 | void ida_destroy(struct ida *ida) |
926 | { |
927 | idr_destroy(&ida->idr); |
928 | kfree(ida->free_bitmap); |
929 | } |
930 | EXPORT_SYMBOL(ida_destroy); |
931 | |
932 | /** |
933 | * ida_simple_get - get a new id. |
934 | * @ida: the (initialized) ida. |
935 | * @start: the minimum id (inclusive, < 0x8000000) |
936 | * @end: the maximum id (exclusive, < 0x8000000 or 0) |
937 | * @gfp_mask: memory allocation flags |
938 | * |
939 | * Allocates an id in the range start <= id < end, or returns -ENOSPC. |
940 | * On memory allocation failure, returns -ENOMEM. |
941 | * |
942 | * Use ida_simple_remove() to get rid of an id. |
943 | */ |
944 | int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end, |
945 | gfp_t gfp_mask) |
946 | { |
947 | int ret, id; |
948 | unsigned int max; |
949 | unsigned long flags; |
950 | |
951 | BUG_ON((int)start < 0); |
952 | BUG_ON((int)end < 0); |
953 | |
954 | if (end == 0) |
955 | max = 0x80000000; |
956 | else { |
957 | BUG_ON(end < start); |
958 | max = end - 1; |
959 | } |
960 | |
961 | again: |
962 | if (!ida_pre_get(ida, gfp_mask)) |
963 | return -ENOMEM; |
964 | |
965 | spin_lock_irqsave(&simple_ida_lock, flags); |
966 | ret = ida_get_new_above(ida, start, &id); |
967 | if (!ret) { |
968 | if (id > max) { |
969 | ida_remove(ida, id); |
970 | ret = -ENOSPC; |
971 | } else { |
972 | ret = id; |
973 | } |
974 | } |
975 | spin_unlock_irqrestore(&simple_ida_lock, flags); |
976 | |
977 | if (unlikely(ret == -EAGAIN)) |
978 | goto again; |
979 | |
980 | return ret; |
981 | } |
982 | EXPORT_SYMBOL(ida_simple_get); |
983 | |
984 | /** |
985 | * ida_simple_remove - remove an allocated id. |
986 | * @ida: the (initialized) ida. |
987 | * @id: the id returned by ida_simple_get. |
988 | */ |
989 | void ida_simple_remove(struct ida *ida, unsigned int id) |
990 | { |
991 | unsigned long flags; |
992 | |
993 | BUG_ON((int)id < 0); |
994 | spin_lock_irqsave(&simple_ida_lock, flags); |
995 | ida_remove(ida, id); |
996 | spin_unlock_irqrestore(&simple_ida_lock, flags); |
997 | } |
998 | EXPORT_SYMBOL(ida_simple_remove); |
999 | |
1000 | /** |
1001 | * ida_init - initialize ida handle |
1002 | * @ida: ida handle |
1003 | * |
1004 | * This function is use to set up the handle (@ida) that you will pass |
1005 | * to the rest of the functions. |
1006 | */ |
1007 | void ida_init(struct ida *ida) |
1008 | { |
1009 | memset(ida, 0, sizeof(struct ida)); |
1010 | idr_init(&ida->idr); |
1011 | |
1012 | } |
1013 | EXPORT_SYMBOL(ida_init); |
1014 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9