Root/
1 | #ifndef _BLK_CGROUP_H |
2 | #define _BLK_CGROUP_H |
3 | /* |
4 | * Common Block IO controller cgroup interface |
5 | * |
6 | * Based on ideas and code from CFQ, CFS and BFQ: |
7 | * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> |
8 | * |
9 | * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> |
10 | * Paolo Valente <paolo.valente@unimore.it> |
11 | * |
12 | * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> |
13 | * Nauman Rafique <nauman@google.com> |
14 | */ |
15 | |
16 | #include <linux/cgroup.h> |
17 | #include <linux/u64_stats_sync.h> |
18 | #include <linux/seq_file.h> |
19 | #include <linux/radix-tree.h> |
20 | |
21 | /* Max limits for throttle policy */ |
22 | #define THROTL_IOPS_MAX UINT_MAX |
23 | |
24 | /* CFQ specific, out here for blkcg->cfq_weight */ |
25 | #define CFQ_WEIGHT_MIN 10 |
26 | #define CFQ_WEIGHT_MAX 1000 |
27 | #define CFQ_WEIGHT_DEFAULT 500 |
28 | |
29 | #ifdef CONFIG_BLK_CGROUP |
30 | |
31 | enum blkg_rwstat_type { |
32 | BLKG_RWSTAT_READ, |
33 | BLKG_RWSTAT_WRITE, |
34 | BLKG_RWSTAT_SYNC, |
35 | BLKG_RWSTAT_ASYNC, |
36 | |
37 | BLKG_RWSTAT_NR, |
38 | BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR, |
39 | }; |
40 | |
41 | struct blkcg_gq; |
42 | |
43 | struct blkcg { |
44 | struct cgroup_subsys_state css; |
45 | spinlock_t lock; |
46 | |
47 | struct radix_tree_root blkg_tree; |
48 | struct blkcg_gq *blkg_hint; |
49 | struct hlist_head blkg_list; |
50 | |
51 | /* for policies to test whether associated blkcg has changed */ |
52 | uint64_t id; |
53 | |
54 | /* TODO: per-policy storage in blkcg */ |
55 | unsigned int cfq_weight; /* belongs to cfq */ |
56 | }; |
57 | |
58 | struct blkg_stat { |
59 | struct u64_stats_sync syncp; |
60 | uint64_t cnt; |
61 | }; |
62 | |
63 | struct blkg_rwstat { |
64 | struct u64_stats_sync syncp; |
65 | uint64_t cnt[BLKG_RWSTAT_NR]; |
66 | }; |
67 | |
68 | /* |
69 | * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a |
70 | * request_queue (q). This is used by blkcg policies which need to track |
71 | * information per blkcg - q pair. |
72 | * |
73 | * There can be multiple active blkcg policies and each has its private |
74 | * data on each blkg, the size of which is determined by |
75 | * blkcg_policy->pd_size. blkcg core allocates and frees such areas |
76 | * together with blkg and invokes pd_init/exit_fn() methods. |
77 | * |
78 | * Such private data must embed struct blkg_policy_data (pd) at the |
79 | * beginning and pd_size can't be smaller than pd. |
80 | */ |
81 | struct blkg_policy_data { |
82 | /* the blkg this per-policy data belongs to */ |
83 | struct blkcg_gq *blkg; |
84 | |
85 | /* used during policy activation */ |
86 | struct list_head alloc_node; |
87 | }; |
88 | |
89 | /* association between a blk cgroup and a request queue */ |
90 | struct blkcg_gq { |
91 | /* Pointer to the associated request_queue */ |
92 | struct request_queue *q; |
93 | struct list_head q_node; |
94 | struct hlist_node blkcg_node; |
95 | struct blkcg *blkcg; |
96 | /* reference count */ |
97 | int refcnt; |
98 | |
99 | struct blkg_policy_data *pd[BLKCG_MAX_POLS]; |
100 | |
101 | struct rcu_head rcu_head; |
102 | }; |
103 | |
104 | typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg); |
105 | typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg); |
106 | typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg); |
107 | |
108 | struct blkcg_policy { |
109 | int plid; |
110 | /* policy specific private data size */ |
111 | size_t pd_size; |
112 | /* cgroup files for the policy */ |
113 | struct cftype *cftypes; |
114 | |
115 | /* operations */ |
116 | blkcg_pol_init_pd_fn *pd_init_fn; |
117 | blkcg_pol_exit_pd_fn *pd_exit_fn; |
118 | blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn; |
119 | }; |
120 | |
121 | extern struct blkcg blkcg_root; |
122 | |
123 | struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup); |
124 | struct blkcg *bio_blkcg(struct bio *bio); |
125 | struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q); |
126 | struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, |
127 | struct request_queue *q); |
128 | int blkcg_init_queue(struct request_queue *q); |
129 | void blkcg_drain_queue(struct request_queue *q); |
130 | void blkcg_exit_queue(struct request_queue *q); |
131 | |
132 | /* Blkio controller policy registration */ |
133 | int blkcg_policy_register(struct blkcg_policy *pol); |
134 | void blkcg_policy_unregister(struct blkcg_policy *pol); |
135 | int blkcg_activate_policy(struct request_queue *q, |
136 | const struct blkcg_policy *pol); |
137 | void blkcg_deactivate_policy(struct request_queue *q, |
138 | const struct blkcg_policy *pol); |
139 | |
140 | void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, |
141 | u64 (*prfill)(struct seq_file *, |
142 | struct blkg_policy_data *, int), |
143 | const struct blkcg_policy *pol, int data, |
144 | bool show_total); |
145 | u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v); |
146 | u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, |
147 | const struct blkg_rwstat *rwstat); |
148 | u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off); |
149 | u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd, |
150 | int off); |
151 | |
152 | struct blkg_conf_ctx { |
153 | struct gendisk *disk; |
154 | struct blkcg_gq *blkg; |
155 | u64 v; |
156 | }; |
157 | |
158 | int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, |
159 | const char *input, struct blkg_conf_ctx *ctx); |
160 | void blkg_conf_finish(struct blkg_conf_ctx *ctx); |
161 | |
162 | |
163 | /** |
164 | * blkg_to_pdata - get policy private data |
165 | * @blkg: blkg of interest |
166 | * @pol: policy of interest |
167 | * |
168 | * Return pointer to private data associated with the @blkg-@pol pair. |
169 | */ |
170 | static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, |
171 | struct blkcg_policy *pol) |
172 | { |
173 | return blkg ? blkg->pd[pol->plid] : NULL; |
174 | } |
175 | |
176 | /** |
177 | * pdata_to_blkg - get blkg associated with policy private data |
178 | * @pd: policy private data of interest |
179 | * |
180 | * @pd is policy private data. Determine the blkg it's associated with. |
181 | */ |
182 | static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) |
183 | { |
184 | return pd ? pd->blkg : NULL; |
185 | } |
186 | |
187 | /** |
188 | * blkg_path - format cgroup path of blkg |
189 | * @blkg: blkg of interest |
190 | * @buf: target buffer |
191 | * @buflen: target buffer length |
192 | * |
193 | * Format the path of the cgroup of @blkg into @buf. |
194 | */ |
195 | static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen) |
196 | { |
197 | int ret; |
198 | |
199 | rcu_read_lock(); |
200 | ret = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen); |
201 | rcu_read_unlock(); |
202 | if (ret) |
203 | strncpy(buf, "<unavailable>", buflen); |
204 | return ret; |
205 | } |
206 | |
207 | /** |
208 | * blkg_get - get a blkg reference |
209 | * @blkg: blkg to get |
210 | * |
211 | * The caller should be holding queue_lock and an existing reference. |
212 | */ |
213 | static inline void blkg_get(struct blkcg_gq *blkg) |
214 | { |
215 | lockdep_assert_held(blkg->q->queue_lock); |
216 | WARN_ON_ONCE(!blkg->refcnt); |
217 | blkg->refcnt++; |
218 | } |
219 | |
220 | void __blkg_release(struct blkcg_gq *blkg); |
221 | |
222 | /** |
223 | * blkg_put - put a blkg reference |
224 | * @blkg: blkg to put |
225 | * |
226 | * The caller should be holding queue_lock. |
227 | */ |
228 | static inline void blkg_put(struct blkcg_gq *blkg) |
229 | { |
230 | lockdep_assert_held(blkg->q->queue_lock); |
231 | WARN_ON_ONCE(blkg->refcnt <= 0); |
232 | if (!--blkg->refcnt) |
233 | __blkg_release(blkg); |
234 | } |
235 | |
236 | /** |
237 | * blkg_stat_add - add a value to a blkg_stat |
238 | * @stat: target blkg_stat |
239 | * @val: value to add |
240 | * |
241 | * Add @val to @stat. The caller is responsible for synchronizing calls to |
242 | * this function. |
243 | */ |
244 | static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val) |
245 | { |
246 | u64_stats_update_begin(&stat->syncp); |
247 | stat->cnt += val; |
248 | u64_stats_update_end(&stat->syncp); |
249 | } |
250 | |
251 | /** |
252 | * blkg_stat_read - read the current value of a blkg_stat |
253 | * @stat: blkg_stat to read |
254 | * |
255 | * Read the current value of @stat. This function can be called without |
256 | * synchroniztion and takes care of u64 atomicity. |
257 | */ |
258 | static inline uint64_t blkg_stat_read(struct blkg_stat *stat) |
259 | { |
260 | unsigned int start; |
261 | uint64_t v; |
262 | |
263 | do { |
264 | start = u64_stats_fetch_begin(&stat->syncp); |
265 | v = stat->cnt; |
266 | } while (u64_stats_fetch_retry(&stat->syncp, start)); |
267 | |
268 | return v; |
269 | } |
270 | |
271 | /** |
272 | * blkg_stat_reset - reset a blkg_stat |
273 | * @stat: blkg_stat to reset |
274 | */ |
275 | static inline void blkg_stat_reset(struct blkg_stat *stat) |
276 | { |
277 | stat->cnt = 0; |
278 | } |
279 | |
280 | /** |
281 | * blkg_rwstat_add - add a value to a blkg_rwstat |
282 | * @rwstat: target blkg_rwstat |
283 | * @rw: mask of REQ_{WRITE|SYNC} |
284 | * @val: value to add |
285 | * |
286 | * Add @val to @rwstat. The counters are chosen according to @rw. The |
287 | * caller is responsible for synchronizing calls to this function. |
288 | */ |
289 | static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat, |
290 | int rw, uint64_t val) |
291 | { |
292 | u64_stats_update_begin(&rwstat->syncp); |
293 | |
294 | if (rw & REQ_WRITE) |
295 | rwstat->cnt[BLKG_RWSTAT_WRITE] += val; |
296 | else |
297 | rwstat->cnt[BLKG_RWSTAT_READ] += val; |
298 | if (rw & REQ_SYNC) |
299 | rwstat->cnt[BLKG_RWSTAT_SYNC] += val; |
300 | else |
301 | rwstat->cnt[BLKG_RWSTAT_ASYNC] += val; |
302 | |
303 | u64_stats_update_end(&rwstat->syncp); |
304 | } |
305 | |
306 | /** |
307 | * blkg_rwstat_read - read the current values of a blkg_rwstat |
308 | * @rwstat: blkg_rwstat to read |
309 | * |
310 | * Read the current snapshot of @rwstat and return it as the return value. |
311 | * This function can be called without synchronization and takes care of |
312 | * u64 atomicity. |
313 | */ |
314 | static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat) |
315 | { |
316 | unsigned int start; |
317 | struct blkg_rwstat tmp; |
318 | |
319 | do { |
320 | start = u64_stats_fetch_begin(&rwstat->syncp); |
321 | tmp = *rwstat; |
322 | } while (u64_stats_fetch_retry(&rwstat->syncp, start)); |
323 | |
324 | return tmp; |
325 | } |
326 | |
327 | /** |
328 | * blkg_rwstat_sum - read the total count of a blkg_rwstat |
329 | * @rwstat: blkg_rwstat to read |
330 | * |
331 | * Return the total count of @rwstat regardless of the IO direction. This |
332 | * function can be called without synchronization and takes care of u64 |
333 | * atomicity. |
334 | */ |
335 | static inline uint64_t blkg_rwstat_sum(struct blkg_rwstat *rwstat) |
336 | { |
337 | struct blkg_rwstat tmp = blkg_rwstat_read(rwstat); |
338 | |
339 | return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE]; |
340 | } |
341 | |
342 | /** |
343 | * blkg_rwstat_reset - reset a blkg_rwstat |
344 | * @rwstat: blkg_rwstat to reset |
345 | */ |
346 | static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat) |
347 | { |
348 | memset(rwstat->cnt, 0, sizeof(rwstat->cnt)); |
349 | } |
350 | |
351 | #else /* CONFIG_BLK_CGROUP */ |
352 | |
353 | struct cgroup; |
354 | |
355 | struct blkg_policy_data { |
356 | }; |
357 | |
358 | struct blkcg_gq { |
359 | }; |
360 | |
361 | struct blkcg_policy { |
362 | }; |
363 | |
364 | static inline struct blkcg *cgroup_to_blkcg(struct cgroup *cgroup) { return NULL; } |
365 | static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; } |
366 | static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; } |
367 | static inline int blkcg_init_queue(struct request_queue *q) { return 0; } |
368 | static inline void blkcg_drain_queue(struct request_queue *q) { } |
369 | static inline void blkcg_exit_queue(struct request_queue *q) { } |
370 | static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; } |
371 | static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { } |
372 | static inline int blkcg_activate_policy(struct request_queue *q, |
373 | const struct blkcg_policy *pol) { return 0; } |
374 | static inline void blkcg_deactivate_policy(struct request_queue *q, |
375 | const struct blkcg_policy *pol) { } |
376 | |
377 | static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg, |
378 | struct blkcg_policy *pol) { return NULL; } |
379 | static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; } |
380 | static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; } |
381 | static inline void blkg_get(struct blkcg_gq *blkg) { } |
382 | static inline void blkg_put(struct blkcg_gq *blkg) { } |
383 | |
384 | #endif /* CONFIG_BLK_CGROUP */ |
385 | #endif /* _BLK_CGROUP_H */ |
386 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9