Root/
1 | #ifndef _LINUX_PERCPU_COUNTER_H |
2 | #define _LINUX_PERCPU_COUNTER_H |
3 | /* |
4 | * A simple "approximate counter" for use in ext2 and ext3 superblocks. |
5 | * |
6 | * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4. |
7 | */ |
8 | |
9 | #include <linux/spinlock.h> |
10 | #include <linux/smp.h> |
11 | #include <linux/list.h> |
12 | #include <linux/threads.h> |
13 | #include <linux/percpu.h> |
14 | #include <linux/types.h> |
15 | |
16 | #ifdef CONFIG_SMP |
17 | |
18 | struct percpu_counter { |
19 | spinlock_t lock; |
20 | s64 count; |
21 | #ifdef CONFIG_HOTPLUG_CPU |
22 | struct list_head list; /* All percpu_counters are on a list */ |
23 | #endif |
24 | s32 __percpu *counters; |
25 | }; |
26 | |
27 | extern int percpu_counter_batch; |
28 | |
29 | int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, |
30 | struct lock_class_key *key); |
31 | |
32 | #define percpu_counter_init(fbc, value) \ |
33 | ({ \ |
34 | static struct lock_class_key __key; \ |
35 | \ |
36 | __percpu_counter_init(fbc, value, &__key); \ |
37 | }) |
38 | |
39 | void percpu_counter_destroy(struct percpu_counter *fbc); |
40 | void percpu_counter_set(struct percpu_counter *fbc, s64 amount); |
41 | void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch); |
42 | s64 __percpu_counter_sum(struct percpu_counter *fbc); |
43 | |
44 | static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount) |
45 | { |
46 | __percpu_counter_add(fbc, amount, percpu_counter_batch); |
47 | } |
48 | |
49 | static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc) |
50 | { |
51 | s64 ret = __percpu_counter_sum(fbc); |
52 | return ret < 0 ? 0 : ret; |
53 | } |
54 | |
55 | static inline s64 percpu_counter_sum(struct percpu_counter *fbc) |
56 | { |
57 | return __percpu_counter_sum(fbc); |
58 | } |
59 | |
60 | static inline s64 percpu_counter_read(struct percpu_counter *fbc) |
61 | { |
62 | return fbc->count; |
63 | } |
64 | |
65 | /* |
66 | * It is possible for the percpu_counter_read() to return a small negative |
67 | * number for some counter which should never be negative. |
68 | * |
69 | */ |
70 | static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) |
71 | { |
72 | s64 ret = fbc->count; |
73 | |
74 | barrier(); /* Prevent reloads of fbc->count */ |
75 | if (ret >= 0) |
76 | return ret; |
77 | return 1; |
78 | } |
79 | |
80 | #else |
81 | |
82 | struct percpu_counter { |
83 | s64 count; |
84 | }; |
85 | |
86 | static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount) |
87 | { |
88 | fbc->count = amount; |
89 | return 0; |
90 | } |
91 | |
92 | static inline void percpu_counter_destroy(struct percpu_counter *fbc) |
93 | { |
94 | } |
95 | |
96 | static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount) |
97 | { |
98 | fbc->count = amount; |
99 | } |
100 | |
101 | static inline void |
102 | percpu_counter_add(struct percpu_counter *fbc, s64 amount) |
103 | { |
104 | preempt_disable(); |
105 | fbc->count += amount; |
106 | preempt_enable(); |
107 | } |
108 | |
109 | static inline void |
110 | __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch) |
111 | { |
112 | percpu_counter_add(fbc, amount); |
113 | } |
114 | |
115 | static inline s64 percpu_counter_read(struct percpu_counter *fbc) |
116 | { |
117 | return fbc->count; |
118 | } |
119 | |
120 | static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) |
121 | { |
122 | return fbc->count; |
123 | } |
124 | |
125 | static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc) |
126 | { |
127 | return percpu_counter_read_positive(fbc); |
128 | } |
129 | |
130 | static inline s64 percpu_counter_sum(struct percpu_counter *fbc) |
131 | { |
132 | return percpu_counter_read(fbc); |
133 | } |
134 | |
135 | #endif /* CONFIG_SMP */ |
136 | |
137 | static inline void percpu_counter_inc(struct percpu_counter *fbc) |
138 | { |
139 | percpu_counter_add(fbc, 1); |
140 | } |
141 | |
142 | static inline void percpu_counter_dec(struct percpu_counter *fbc) |
143 | { |
144 | percpu_counter_add(fbc, -1); |
145 | } |
146 | |
147 | static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount) |
148 | { |
149 | percpu_counter_add(fbc, -amount); |
150 | } |
151 | |
152 | #endif /* _LINUX_PERCPU_COUNTER_H */ |
153 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9