Root/
1 | #ifndef __RES_COUNTER_H__ |
2 | #define __RES_COUNTER_H__ |
3 | |
4 | /* |
5 | * Resource Counters |
6 | * Contain common data types and routines for resource accounting |
7 | * |
8 | * Copyright 2007 OpenVZ SWsoft Inc |
9 | * |
10 | * Author: Pavel Emelianov <xemul@openvz.org> |
11 | * |
12 | * See Documentation/cgroups/resource_counter.txt for more |
13 | * info about what this counter is. |
14 | */ |
15 | |
16 | #include <linux/cgroup.h> |
17 | |
18 | /* |
19 | * The core object. the cgroup that wishes to account for some |
20 | * resource may include this counter into its structures and use |
21 | * the helpers described beyond |
22 | */ |
23 | |
24 | struct res_counter { |
25 | /* |
26 | * the current resource consumption level |
27 | */ |
28 | unsigned long long usage; |
29 | /* |
30 | * the maximal value of the usage from the counter creation |
31 | */ |
32 | unsigned long long max_usage; |
33 | /* |
34 | * the limit that usage cannot exceed |
35 | */ |
36 | unsigned long long limit; |
37 | /* |
38 | * the limit that usage can be exceed |
39 | */ |
40 | unsigned long long soft_limit; |
41 | /* |
42 | * the number of unsuccessful attempts to consume the resource |
43 | */ |
44 | unsigned long long failcnt; |
45 | /* |
46 | * the lock to protect all of the above. |
47 | * the routines below consider this to be IRQ-safe |
48 | */ |
49 | spinlock_t lock; |
50 | /* |
51 | * Parent counter, used for hierarchial resource accounting |
52 | */ |
53 | struct res_counter *parent; |
54 | }; |
55 | |
56 | #define RESOURCE_MAX (unsigned long long)LLONG_MAX |
57 | |
58 | /** |
59 | * Helpers to interact with userspace |
60 | * res_counter_read_u64() - returns the value of the specified member. |
61 | * res_counter_read/_write - put/get the specified fields from the |
62 | * res_counter struct to/from the user |
63 | * |
64 | * @counter: the counter in question |
65 | * @member: the field to work with (see RES_xxx below) |
66 | * @buf: the buffer to opeate on,... |
67 | * @nbytes: its size... |
68 | * @pos: and the offset. |
69 | */ |
70 | |
71 | u64 res_counter_read_u64(struct res_counter *counter, int member); |
72 | |
73 | ssize_t res_counter_read(struct res_counter *counter, int member, |
74 | const char __user *buf, size_t nbytes, loff_t *pos, |
75 | int (*read_strategy)(unsigned long long val, char *s)); |
76 | |
77 | typedef int (*write_strategy_fn)(const char *buf, unsigned long long *val); |
78 | |
79 | int res_counter_memparse_write_strategy(const char *buf, |
80 | unsigned long long *res); |
81 | |
82 | int res_counter_write(struct res_counter *counter, int member, |
83 | const char *buffer, write_strategy_fn write_strategy); |
84 | |
85 | /* |
86 | * the field descriptors. one for each member of res_counter |
87 | */ |
88 | |
89 | enum { |
90 | RES_USAGE, |
91 | RES_MAX_USAGE, |
92 | RES_LIMIT, |
93 | RES_FAILCNT, |
94 | RES_SOFT_LIMIT, |
95 | }; |
96 | |
97 | /* |
98 | * helpers for accounting |
99 | */ |
100 | |
101 | void res_counter_init(struct res_counter *counter, struct res_counter *parent); |
102 | |
103 | /* |
104 | * charge - try to consume more resource. |
105 | * |
106 | * @counter: the counter |
107 | * @val: the amount of the resource. each controller defines its own |
108 | * units, e.g. numbers, bytes, Kbytes, etc |
109 | * |
110 | * returns 0 on success and <0 if the counter->usage will exceed the |
111 | * counter->limit _locked call expects the counter->lock to be taken |
112 | */ |
113 | |
114 | int __must_check res_counter_charge_locked(struct res_counter *counter, |
115 | unsigned long val); |
116 | int __must_check res_counter_charge(struct res_counter *counter, |
117 | unsigned long val, struct res_counter **limit_fail_at); |
118 | |
119 | /* |
120 | * uncharge - tell that some portion of the resource is released |
121 | * |
122 | * @counter: the counter |
123 | * @val: the amount of the resource |
124 | * |
125 | * these calls check for usage underflow and show a warning on the console |
126 | * _locked call expects the counter->lock to be taken |
127 | */ |
128 | |
129 | void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val); |
130 | void res_counter_uncharge(struct res_counter *counter, unsigned long val); |
131 | |
132 | static inline bool res_counter_limit_check_locked(struct res_counter *cnt) |
133 | { |
134 | if (cnt->usage < cnt->limit) |
135 | return true; |
136 | |
137 | return false; |
138 | } |
139 | |
140 | static inline bool res_counter_soft_limit_check_locked(struct res_counter *cnt) |
141 | { |
142 | if (cnt->usage < cnt->soft_limit) |
143 | return true; |
144 | |
145 | return false; |
146 | } |
147 | |
148 | /** |
149 | * Get the difference between the usage and the soft limit |
150 | * @cnt: The counter |
151 | * |
152 | * Returns 0 if usage is less than or equal to soft limit |
153 | * The difference between usage and soft limit, otherwise. |
154 | */ |
155 | static inline unsigned long long |
156 | res_counter_soft_limit_excess(struct res_counter *cnt) |
157 | { |
158 | unsigned long long excess; |
159 | unsigned long flags; |
160 | |
161 | spin_lock_irqsave(&cnt->lock, flags); |
162 | if (cnt->usage <= cnt->soft_limit) |
163 | excess = 0; |
164 | else |
165 | excess = cnt->usage - cnt->soft_limit; |
166 | spin_unlock_irqrestore(&cnt->lock, flags); |
167 | return excess; |
168 | } |
169 | |
170 | /* |
171 | * Helper function to detect if the cgroup is within it's limit or |
172 | * not. It's currently called from cgroup_rss_prepare() |
173 | */ |
174 | static inline bool res_counter_check_under_limit(struct res_counter *cnt) |
175 | { |
176 | bool ret; |
177 | unsigned long flags; |
178 | |
179 | spin_lock_irqsave(&cnt->lock, flags); |
180 | ret = res_counter_limit_check_locked(cnt); |
181 | spin_unlock_irqrestore(&cnt->lock, flags); |
182 | return ret; |
183 | } |
184 | |
185 | static inline bool res_counter_check_under_soft_limit(struct res_counter *cnt) |
186 | { |
187 | bool ret; |
188 | unsigned long flags; |
189 | |
190 | spin_lock_irqsave(&cnt->lock, flags); |
191 | ret = res_counter_soft_limit_check_locked(cnt); |
192 | spin_unlock_irqrestore(&cnt->lock, flags); |
193 | return ret; |
194 | } |
195 | |
196 | static inline void res_counter_reset_max(struct res_counter *cnt) |
197 | { |
198 | unsigned long flags; |
199 | |
200 | spin_lock_irqsave(&cnt->lock, flags); |
201 | cnt->max_usage = cnt->usage; |
202 | spin_unlock_irqrestore(&cnt->lock, flags); |
203 | } |
204 | |
205 | static inline void res_counter_reset_failcnt(struct res_counter *cnt) |
206 | { |
207 | unsigned long flags; |
208 | |
209 | spin_lock_irqsave(&cnt->lock, flags); |
210 | cnt->failcnt = 0; |
211 | spin_unlock_irqrestore(&cnt->lock, flags); |
212 | } |
213 | |
214 | static inline int res_counter_set_limit(struct res_counter *cnt, |
215 | unsigned long long limit) |
216 | { |
217 | unsigned long flags; |
218 | int ret = -EBUSY; |
219 | |
220 | spin_lock_irqsave(&cnt->lock, flags); |
221 | if (cnt->usage <= limit) { |
222 | cnt->limit = limit; |
223 | ret = 0; |
224 | } |
225 | spin_unlock_irqrestore(&cnt->lock, flags); |
226 | return ret; |
227 | } |
228 | |
229 | static inline int |
230 | res_counter_set_soft_limit(struct res_counter *cnt, |
231 | unsigned long long soft_limit) |
232 | { |
233 | unsigned long flags; |
234 | |
235 | spin_lock_irqsave(&cnt->lock, flags); |
236 | cnt->soft_limit = soft_limit; |
237 | spin_unlock_irqrestore(&cnt->lock, flags); |
238 | return 0; |
239 | } |
240 | |
241 | #endif |
242 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9