Root/
1 | #ifndef _BLK_CGROUP_H |
2 | #define _BLK_CGROUP_H |
3 | /* |
4 | * Common Block IO controller cgroup interface |
5 | * |
6 | * Based on ideas and code from CFQ, CFS and BFQ: |
7 | * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> |
8 | * |
9 | * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> |
10 | * Paolo Valente <paolo.valente@unimore.it> |
11 | * |
12 | * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com> |
13 | * Nauman Rafique <nauman@google.com> |
14 | */ |
15 | |
16 | #include <linux/cgroup.h> |
17 | |
18 | enum blkio_policy_id { |
19 | BLKIO_POLICY_PROP = 0, /* Proportional Bandwidth division */ |
20 | BLKIO_POLICY_THROTL, /* Throttling */ |
21 | }; |
22 | |
23 | /* Max limits for throttle policy */ |
24 | #define THROTL_IOPS_MAX UINT_MAX |
25 | |
26 | #if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE) |
27 | |
28 | #ifndef CONFIG_BLK_CGROUP |
29 | /* When blk-cgroup is a module, its subsys_id isn't a compile-time constant */ |
30 | extern struct cgroup_subsys blkio_subsys; |
31 | #define blkio_subsys_id blkio_subsys.subsys_id |
32 | #endif |
33 | |
34 | enum stat_type { |
35 | /* Total time spent (in ns) between request dispatch to the driver and |
36 | * request completion for IOs doen by this cgroup. This may not be |
37 | * accurate when NCQ is turned on. */ |
38 | BLKIO_STAT_SERVICE_TIME = 0, |
39 | /* Total bytes transferred */ |
40 | BLKIO_STAT_SERVICE_BYTES, |
41 | /* Total IOs serviced, post merge */ |
42 | BLKIO_STAT_SERVICED, |
43 | /* Total time spent waiting in scheduler queue in ns */ |
44 | BLKIO_STAT_WAIT_TIME, |
45 | /* Number of IOs merged */ |
46 | BLKIO_STAT_MERGED, |
47 | /* Number of IOs queued up */ |
48 | BLKIO_STAT_QUEUED, |
49 | /* All the single valued stats go below this */ |
50 | BLKIO_STAT_TIME, |
51 | BLKIO_STAT_SECTORS, |
52 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
53 | BLKIO_STAT_AVG_QUEUE_SIZE, |
54 | BLKIO_STAT_IDLE_TIME, |
55 | BLKIO_STAT_EMPTY_TIME, |
56 | BLKIO_STAT_GROUP_WAIT_TIME, |
57 | BLKIO_STAT_DEQUEUE |
58 | #endif |
59 | }; |
60 | |
61 | enum stat_sub_type { |
62 | BLKIO_STAT_READ = 0, |
63 | BLKIO_STAT_WRITE, |
64 | BLKIO_STAT_SYNC, |
65 | BLKIO_STAT_ASYNC, |
66 | BLKIO_STAT_TOTAL |
67 | }; |
68 | |
69 | /* blkg state flags */ |
70 | enum blkg_state_flags { |
71 | BLKG_waiting = 0, |
72 | BLKG_idling, |
73 | BLKG_empty, |
74 | }; |
75 | |
76 | /* cgroup files owned by proportional weight policy */ |
77 | enum blkcg_file_name_prop { |
78 | BLKIO_PROP_weight = 1, |
79 | BLKIO_PROP_weight_device, |
80 | BLKIO_PROP_io_service_bytes, |
81 | BLKIO_PROP_io_serviced, |
82 | BLKIO_PROP_time, |
83 | BLKIO_PROP_sectors, |
84 | BLKIO_PROP_io_service_time, |
85 | BLKIO_PROP_io_wait_time, |
86 | BLKIO_PROP_io_merged, |
87 | BLKIO_PROP_io_queued, |
88 | BLKIO_PROP_avg_queue_size, |
89 | BLKIO_PROP_group_wait_time, |
90 | BLKIO_PROP_idle_time, |
91 | BLKIO_PROP_empty_time, |
92 | BLKIO_PROP_dequeue, |
93 | }; |
94 | |
95 | /* cgroup files owned by throttle policy */ |
96 | enum blkcg_file_name_throtl { |
97 | BLKIO_THROTL_read_bps_device, |
98 | BLKIO_THROTL_write_bps_device, |
99 | BLKIO_THROTL_read_iops_device, |
100 | BLKIO_THROTL_write_iops_device, |
101 | BLKIO_THROTL_io_service_bytes, |
102 | BLKIO_THROTL_io_serviced, |
103 | }; |
104 | |
105 | struct blkio_cgroup { |
106 | struct cgroup_subsys_state css; |
107 | unsigned int weight; |
108 | spinlock_t lock; |
109 | struct hlist_head blkg_list; |
110 | struct list_head policy_list; /* list of blkio_policy_node */ |
111 | }; |
112 | |
113 | struct blkio_group_stats { |
114 | /* total disk time and nr sectors dispatched by this group */ |
115 | uint64_t time; |
116 | uint64_t sectors; |
117 | uint64_t stat_arr[BLKIO_STAT_QUEUED + 1][BLKIO_STAT_TOTAL]; |
118 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
119 | /* Sum of number of IOs queued across all samples */ |
120 | uint64_t avg_queue_size_sum; |
121 | /* Count of samples taken for average */ |
122 | uint64_t avg_queue_size_samples; |
123 | /* How many times this group has been removed from service tree */ |
124 | unsigned long dequeue; |
125 | |
126 | /* Total time spent waiting for it to be assigned a timeslice. */ |
127 | uint64_t group_wait_time; |
128 | uint64_t start_group_wait_time; |
129 | |
130 | /* Time spent idling for this blkio_group */ |
131 | uint64_t idle_time; |
132 | uint64_t start_idle_time; |
133 | /* |
134 | * Total time when we have requests queued and do not contain the |
135 | * current active queue. |
136 | */ |
137 | uint64_t empty_time; |
138 | uint64_t start_empty_time; |
139 | uint16_t flags; |
140 | #endif |
141 | }; |
142 | |
143 | struct blkio_group { |
144 | /* An rcu protected unique identifier for the group */ |
145 | void *key; |
146 | struct hlist_node blkcg_node; |
147 | unsigned short blkcg_id; |
148 | /* Store cgroup path */ |
149 | char path[128]; |
150 | /* The device MKDEV(major, minor), this group has been created for */ |
151 | dev_t dev; |
152 | /* policy which owns this blk group */ |
153 | enum blkio_policy_id plid; |
154 | |
155 | /* Need to serialize the stats in the case of reset/update */ |
156 | spinlock_t stats_lock; |
157 | struct blkio_group_stats stats; |
158 | }; |
159 | |
160 | struct blkio_policy_node { |
161 | struct list_head node; |
162 | dev_t dev; |
163 | /* This node belongs to max bw policy or porportional weight policy */ |
164 | enum blkio_policy_id plid; |
165 | /* cgroup file to which this rule belongs to */ |
166 | int fileid; |
167 | |
168 | union { |
169 | unsigned int weight; |
170 | /* |
171 | * Rate read/write in terms of byptes per second |
172 | * Whether this rate represents read or write is determined |
173 | * by file type "fileid". |
174 | */ |
175 | u64 bps; |
176 | unsigned int iops; |
177 | } val; |
178 | }; |
179 | |
180 | extern unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg, |
181 | dev_t dev); |
182 | extern uint64_t blkcg_get_read_bps(struct blkio_cgroup *blkcg, |
183 | dev_t dev); |
184 | extern uint64_t blkcg_get_write_bps(struct blkio_cgroup *blkcg, |
185 | dev_t dev); |
186 | extern unsigned int blkcg_get_read_iops(struct blkio_cgroup *blkcg, |
187 | dev_t dev); |
188 | extern unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg, |
189 | dev_t dev); |
190 | |
191 | typedef void (blkio_unlink_group_fn) (void *key, struct blkio_group *blkg); |
192 | |
193 | typedef void (blkio_update_group_weight_fn) (void *key, |
194 | struct blkio_group *blkg, unsigned int weight); |
195 | typedef void (blkio_update_group_read_bps_fn) (void * key, |
196 | struct blkio_group *blkg, u64 read_bps); |
197 | typedef void (blkio_update_group_write_bps_fn) (void *key, |
198 | struct blkio_group *blkg, u64 write_bps); |
199 | typedef void (blkio_update_group_read_iops_fn) (void *key, |
200 | struct blkio_group *blkg, unsigned int read_iops); |
201 | typedef void (blkio_update_group_write_iops_fn) (void *key, |
202 | struct blkio_group *blkg, unsigned int write_iops); |
203 | |
204 | struct blkio_policy_ops { |
205 | blkio_unlink_group_fn *blkio_unlink_group_fn; |
206 | blkio_update_group_weight_fn *blkio_update_group_weight_fn; |
207 | blkio_update_group_read_bps_fn *blkio_update_group_read_bps_fn; |
208 | blkio_update_group_write_bps_fn *blkio_update_group_write_bps_fn; |
209 | blkio_update_group_read_iops_fn *blkio_update_group_read_iops_fn; |
210 | blkio_update_group_write_iops_fn *blkio_update_group_write_iops_fn; |
211 | }; |
212 | |
213 | struct blkio_policy_type { |
214 | struct list_head list; |
215 | struct blkio_policy_ops ops; |
216 | enum blkio_policy_id plid; |
217 | }; |
218 | |
219 | /* Blkio controller policy registration */ |
220 | extern void blkio_policy_register(struct blkio_policy_type *); |
221 | extern void blkio_policy_unregister(struct blkio_policy_type *); |
222 | |
223 | static inline char *blkg_path(struct blkio_group *blkg) |
224 | { |
225 | return blkg->path; |
226 | } |
227 | |
228 | #else |
229 | |
230 | struct blkio_group { |
231 | }; |
232 | |
233 | struct blkio_policy_type { |
234 | }; |
235 | |
236 | static inline void blkio_policy_register(struct blkio_policy_type *blkiop) { } |
237 | static inline void blkio_policy_unregister(struct blkio_policy_type *blkiop) { } |
238 | |
239 | static inline char *blkg_path(struct blkio_group *blkg) { return NULL; } |
240 | |
241 | #endif |
242 | |
243 | #define BLKIO_WEIGHT_MIN 100 |
244 | #define BLKIO_WEIGHT_MAX 1000 |
245 | #define BLKIO_WEIGHT_DEFAULT 500 |
246 | |
247 | #ifdef CONFIG_DEBUG_BLK_CGROUP |
248 | void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg); |
249 | void blkiocg_update_dequeue_stats(struct blkio_group *blkg, |
250 | unsigned long dequeue); |
251 | void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg); |
252 | void blkiocg_update_idle_time_stats(struct blkio_group *blkg); |
253 | void blkiocg_set_start_empty_time(struct blkio_group *blkg); |
254 | |
255 | #define BLKG_FLAG_FNS(name) \ |
256 | static inline void blkio_mark_blkg_##name( \ |
257 | struct blkio_group_stats *stats) \ |
258 | { \ |
259 | stats->flags |= (1 << BLKG_##name); \ |
260 | } \ |
261 | static inline void blkio_clear_blkg_##name( \ |
262 | struct blkio_group_stats *stats) \ |
263 | { \ |
264 | stats->flags &= ~(1 << BLKG_##name); \ |
265 | } \ |
266 | static inline int blkio_blkg_##name(struct blkio_group_stats *stats) \ |
267 | { \ |
268 | return (stats->flags & (1 << BLKG_##name)) != 0; \ |
269 | } \ |
270 | |
271 | BLKG_FLAG_FNS(waiting) |
272 | BLKG_FLAG_FNS(idling) |
273 | BLKG_FLAG_FNS(empty) |
274 | #undef BLKG_FLAG_FNS |
275 | #else |
276 | static inline void blkiocg_update_avg_queue_size_stats( |
277 | struct blkio_group *blkg) {} |
278 | static inline void blkiocg_update_dequeue_stats(struct blkio_group *blkg, |
279 | unsigned long dequeue) {} |
280 | static inline void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg) |
281 | {} |
282 | static inline void blkiocg_update_idle_time_stats(struct blkio_group *blkg) {} |
283 | static inline void blkiocg_set_start_empty_time(struct blkio_group *blkg) {} |
284 | #endif |
285 | |
286 | #if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE) |
287 | extern struct blkio_cgroup blkio_root_cgroup; |
288 | extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup); |
289 | extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, |
290 | struct blkio_group *blkg, void *key, dev_t dev, |
291 | enum blkio_policy_id plid); |
292 | extern int blkiocg_del_blkio_group(struct blkio_group *blkg); |
293 | extern struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, |
294 | void *key); |
295 | void blkiocg_update_timeslice_used(struct blkio_group *blkg, |
296 | unsigned long time); |
297 | void blkiocg_update_dispatch_stats(struct blkio_group *blkg, uint64_t bytes, |
298 | bool direction, bool sync); |
299 | void blkiocg_update_completion_stats(struct blkio_group *blkg, |
300 | uint64_t start_time, uint64_t io_start_time, bool direction, bool sync); |
301 | void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction, |
302 | bool sync); |
303 | void blkiocg_update_io_add_stats(struct blkio_group *blkg, |
304 | struct blkio_group *curr_blkg, bool direction, bool sync); |
305 | void blkiocg_update_io_remove_stats(struct blkio_group *blkg, |
306 | bool direction, bool sync); |
307 | #else |
308 | struct cgroup; |
309 | static inline struct blkio_cgroup * |
310 | cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; } |
311 | |
312 | static inline void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg, |
313 | struct blkio_group *blkg, void *key, dev_t dev, |
314 | enum blkio_policy_id plid) {} |
315 | |
316 | static inline int |
317 | blkiocg_del_blkio_group(struct blkio_group *blkg) { return 0; } |
318 | |
319 | static inline struct blkio_group * |
320 | blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key) { return NULL; } |
321 | static inline void blkiocg_update_timeslice_used(struct blkio_group *blkg, |
322 | unsigned long time) {} |
323 | static inline void blkiocg_update_dispatch_stats(struct blkio_group *blkg, |
324 | uint64_t bytes, bool direction, bool sync) {} |
325 | static inline void blkiocg_update_completion_stats(struct blkio_group *blkg, |
326 | uint64_t start_time, uint64_t io_start_time, bool direction, |
327 | bool sync) {} |
328 | static inline void blkiocg_update_io_merged_stats(struct blkio_group *blkg, |
329 | bool direction, bool sync) {} |
330 | static inline void blkiocg_update_io_add_stats(struct blkio_group *blkg, |
331 | struct blkio_group *curr_blkg, bool direction, bool sync) {} |
332 | static inline void blkiocg_update_io_remove_stats(struct blkio_group *blkg, |
333 | bool direction, bool sync) {} |
334 | #endif |
335 | #endif /* _BLK_CGROUP_H */ |
336 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9