Root/
1 | #ifndef _LINUX_LIST_BL_H |
2 | #define _LINUX_LIST_BL_H |
3 | |
4 | #include <linux/list.h> |
5 | #include <linux/bit_spinlock.h> |
6 | |
7 | /* |
8 | * Special version of lists, where head of the list has a lock in the lowest |
9 | * bit. This is useful for scalable hash tables without increasing memory |
10 | * footprint overhead. |
11 | * |
12 | * For modification operations, the 0 bit of hlist_bl_head->first |
13 | * pointer must be set. |
14 | * |
15 | * With some small modifications, this can easily be adapted to store several |
16 | * arbitrary bits (not just a single lock bit), if the need arises to store |
17 | * some fast and compact auxiliary data. |
18 | */ |
19 | |
20 | #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) |
21 | #define LIST_BL_LOCKMASK 1UL |
22 | #else |
23 | #define LIST_BL_LOCKMASK 0UL |
24 | #endif |
25 | |
26 | #ifdef CONFIG_DEBUG_LIST |
27 | #define LIST_BL_BUG_ON(x) BUG_ON(x) |
28 | #else |
29 | #define LIST_BL_BUG_ON(x) |
30 | #endif |
31 | |
32 | |
33 | struct hlist_bl_head { |
34 | struct hlist_bl_node *first; |
35 | }; |
36 | |
37 | struct hlist_bl_node { |
38 | struct hlist_bl_node *next, **pprev; |
39 | }; |
40 | #define INIT_HLIST_BL_HEAD(ptr) \ |
41 | ((ptr)->first = NULL) |
42 | |
43 | static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h) |
44 | { |
45 | h->next = NULL; |
46 | h->pprev = NULL; |
47 | } |
48 | |
49 | #define hlist_bl_entry(ptr, type, member) container_of(ptr,type,member) |
50 | |
51 | static inline int hlist_bl_unhashed(const struct hlist_bl_node *h) |
52 | { |
53 | return !h->pprev; |
54 | } |
55 | |
56 | static inline struct hlist_bl_node *hlist_bl_first(struct hlist_bl_head *h) |
57 | { |
58 | return (struct hlist_bl_node *) |
59 | ((unsigned long)h->first & ~LIST_BL_LOCKMASK); |
60 | } |
61 | |
62 | static inline void hlist_bl_set_first(struct hlist_bl_head *h, |
63 | struct hlist_bl_node *n) |
64 | { |
65 | LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK); |
66 | LIST_BL_BUG_ON(((unsigned long)h->first & LIST_BL_LOCKMASK) != |
67 | LIST_BL_LOCKMASK); |
68 | h->first = (struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK); |
69 | } |
70 | |
71 | static inline int hlist_bl_empty(const struct hlist_bl_head *h) |
72 | { |
73 | return !((unsigned long)h->first & ~LIST_BL_LOCKMASK); |
74 | } |
75 | |
76 | static inline void hlist_bl_add_head(struct hlist_bl_node *n, |
77 | struct hlist_bl_head *h) |
78 | { |
79 | struct hlist_bl_node *first = hlist_bl_first(h); |
80 | |
81 | n->next = first; |
82 | if (first) |
83 | first->pprev = &n->next; |
84 | n->pprev = &h->first; |
85 | hlist_bl_set_first(h, n); |
86 | } |
87 | |
88 | static inline void __hlist_bl_del(struct hlist_bl_node *n) |
89 | { |
90 | struct hlist_bl_node *next = n->next; |
91 | struct hlist_bl_node **pprev = n->pprev; |
92 | |
93 | LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK); |
94 | |
95 | /* pprev may be `first`, so be careful not to lose the lock bit */ |
96 | *pprev = (struct hlist_bl_node *) |
97 | ((unsigned long)next | |
98 | ((unsigned long)*pprev & LIST_BL_LOCKMASK)); |
99 | if (next) |
100 | next->pprev = pprev; |
101 | } |
102 | |
103 | static inline void hlist_bl_del(struct hlist_bl_node *n) |
104 | { |
105 | __hlist_bl_del(n); |
106 | n->next = LIST_POISON1; |
107 | n->pprev = LIST_POISON2; |
108 | } |
109 | |
110 | static inline void hlist_bl_del_init(struct hlist_bl_node *n) |
111 | { |
112 | if (!hlist_bl_unhashed(n)) { |
113 | __hlist_bl_del(n); |
114 | INIT_HLIST_BL_NODE(n); |
115 | } |
116 | } |
117 | |
118 | static inline void hlist_bl_lock(struct hlist_bl_head *b) |
119 | { |
120 | bit_spin_lock(0, (unsigned long *)b); |
121 | } |
122 | |
123 | static inline void hlist_bl_unlock(struct hlist_bl_head *b) |
124 | { |
125 | __bit_spin_unlock(0, (unsigned long *)b); |
126 | } |
127 | |
128 | static inline bool hlist_bl_is_locked(struct hlist_bl_head *b) |
129 | { |
130 | return bit_spin_is_locked(0, (unsigned long *)b); |
131 | } |
132 | |
133 | /** |
134 | * hlist_bl_for_each_entry - iterate over list of given type |
135 | * @tpos: the type * to use as a loop cursor. |
136 | * @pos: the &struct hlist_node to use as a loop cursor. |
137 | * @head: the head for your list. |
138 | * @member: the name of the hlist_node within the struct. |
139 | * |
140 | */ |
141 | #define hlist_bl_for_each_entry(tpos, pos, head, member) \ |
142 | for (pos = hlist_bl_first(head); \ |
143 | pos && \ |
144 | ({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1;}); \ |
145 | pos = pos->next) |
146 | |
147 | /** |
148 | * hlist_bl_for_each_entry_safe - iterate over list of given type safe against removal of list entry |
149 | * @tpos: the type * to use as a loop cursor. |
150 | * @pos: the &struct hlist_node to use as a loop cursor. |
151 | * @n: another &struct hlist_node to use as temporary storage |
152 | * @head: the head for your list. |
153 | * @member: the name of the hlist_node within the struct. |
154 | */ |
155 | #define hlist_bl_for_each_entry_safe(tpos, pos, n, head, member) \ |
156 | for (pos = hlist_bl_first(head); \ |
157 | pos && ({ n = pos->next; 1; }) && \ |
158 | ({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1;}); \ |
159 | pos = n) |
160 | |
161 | #endif |
162 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9