Root/
1 | /* |
2 | * Deadline i/o scheduler. |
3 | * |
4 | * Copyright (C) 2002 Jens Axboe <axboe@kernel.dk> |
5 | */ |
6 | #include <linux/kernel.h> |
7 | #include <linux/fs.h> |
8 | #include <linux/blkdev.h> |
9 | #include <linux/elevator.h> |
10 | #include <linux/bio.h> |
11 | #include <linux/module.h> |
12 | #include <linux/slab.h> |
13 | #include <linux/init.h> |
14 | #include <linux/compiler.h> |
15 | #include <linux/rbtree.h> |
16 | |
17 | /* |
18 | * See Documentation/block/deadline-iosched.txt |
19 | */ |
20 | static const int read_expire = HZ / 2; /* max time before a read is submitted. */ |
21 | static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */ |
22 | static const int writes_starved = 2; /* max times reads can starve a write */ |
23 | static const int fifo_batch = 16; /* # of sequential requests treated as one |
24 | by the above parameters. For throughput. */ |
25 | |
26 | struct deadline_data { |
27 | /* |
28 | * run time data |
29 | */ |
30 | |
31 | /* |
32 | * requests (deadline_rq s) are present on both sort_list and fifo_list |
33 | */ |
34 | struct rb_root sort_list[2]; |
35 | struct list_head fifo_list[2]; |
36 | |
37 | /* |
38 | * next in sort order. read, write or both are NULL |
39 | */ |
40 | struct request *next_rq[2]; |
41 | unsigned int batching; /* number of sequential requests made */ |
42 | sector_t last_sector; /* head position */ |
43 | unsigned int starved; /* times reads have starved writes */ |
44 | |
45 | /* |
46 | * settings that change how the i/o scheduler behaves |
47 | */ |
48 | int fifo_expire[2]; |
49 | int fifo_batch; |
50 | int writes_starved; |
51 | int front_merges; |
52 | }; |
53 | |
54 | static void deadline_move_request(struct deadline_data *, struct request *); |
55 | |
56 | static inline struct rb_root * |
57 | deadline_rb_root(struct deadline_data *dd, struct request *rq) |
58 | { |
59 | return &dd->sort_list[rq_data_dir(rq)]; |
60 | } |
61 | |
62 | /* |
63 | * get the request after `rq' in sector-sorted order |
64 | */ |
65 | static inline struct request * |
66 | deadline_latter_request(struct request *rq) |
67 | { |
68 | struct rb_node *node = rb_next(&rq->rb_node); |
69 | |
70 | if (node) |
71 | return rb_entry_rq(node); |
72 | |
73 | return NULL; |
74 | } |
75 | |
76 | static void |
77 | deadline_add_rq_rb(struct deadline_data *dd, struct request *rq) |
78 | { |
79 | struct rb_root *root = deadline_rb_root(dd, rq); |
80 | struct request *__alias; |
81 | |
82 | while (unlikely(__alias = elv_rb_add(root, rq))) |
83 | deadline_move_request(dd, __alias); |
84 | } |
85 | |
86 | static inline void |
87 | deadline_del_rq_rb(struct deadline_data *dd, struct request *rq) |
88 | { |
89 | const int data_dir = rq_data_dir(rq); |
90 | |
91 | if (dd->next_rq[data_dir] == rq) |
92 | dd->next_rq[data_dir] = deadline_latter_request(rq); |
93 | |
94 | elv_rb_del(deadline_rb_root(dd, rq), rq); |
95 | } |
96 | |
97 | /* |
98 | * add rq to rbtree and fifo |
99 | */ |
100 | static void |
101 | deadline_add_request(struct request_queue *q, struct request *rq) |
102 | { |
103 | struct deadline_data *dd = q->elevator->elevator_data; |
104 | const int data_dir = rq_data_dir(rq); |
105 | |
106 | deadline_add_rq_rb(dd, rq); |
107 | |
108 | /* |
109 | * set expire time and add to fifo list |
110 | */ |
111 | rq_set_fifo_time(rq, jiffies + dd->fifo_expire[data_dir]); |
112 | list_add_tail(&rq->queuelist, &dd->fifo_list[data_dir]); |
113 | } |
114 | |
115 | /* |
116 | * remove rq from rbtree and fifo. |
117 | */ |
118 | static void deadline_remove_request(struct request_queue *q, struct request *rq) |
119 | { |
120 | struct deadline_data *dd = q->elevator->elevator_data; |
121 | |
122 | rq_fifo_clear(rq); |
123 | deadline_del_rq_rb(dd, rq); |
124 | } |
125 | |
126 | static int |
127 | deadline_merge(struct request_queue *q, struct request **req, struct bio *bio) |
128 | { |
129 | struct deadline_data *dd = q->elevator->elevator_data; |
130 | struct request *__rq; |
131 | int ret; |
132 | |
133 | /* |
134 | * check for front merge |
135 | */ |
136 | if (dd->front_merges) { |
137 | sector_t sector = bio->bi_sector + bio_sectors(bio); |
138 | |
139 | __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector); |
140 | if (__rq) { |
141 | BUG_ON(sector != blk_rq_pos(__rq)); |
142 | |
143 | if (elv_rq_merge_ok(__rq, bio)) { |
144 | ret = ELEVATOR_FRONT_MERGE; |
145 | goto out; |
146 | } |
147 | } |
148 | } |
149 | |
150 | return ELEVATOR_NO_MERGE; |
151 | out: |
152 | *req = __rq; |
153 | return ret; |
154 | } |
155 | |
156 | static void deadline_merged_request(struct request_queue *q, |
157 | struct request *req, int type) |
158 | { |
159 | struct deadline_data *dd = q->elevator->elevator_data; |
160 | |
161 | /* |
162 | * if the merge was a front merge, we need to reposition request |
163 | */ |
164 | if (type == ELEVATOR_FRONT_MERGE) { |
165 | elv_rb_del(deadline_rb_root(dd, req), req); |
166 | deadline_add_rq_rb(dd, req); |
167 | } |
168 | } |
169 | |
170 | static void |
171 | deadline_merged_requests(struct request_queue *q, struct request *req, |
172 | struct request *next) |
173 | { |
174 | /* |
175 | * if next expires before rq, assign its expire time to rq |
176 | * and move into next position (next will be deleted) in fifo |
177 | */ |
178 | if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) { |
179 | if (time_before(rq_fifo_time(next), rq_fifo_time(req))) { |
180 | list_move(&req->queuelist, &next->queuelist); |
181 | rq_set_fifo_time(req, rq_fifo_time(next)); |
182 | } |
183 | } |
184 | |
185 | /* |
186 | * kill knowledge of next, this one is a goner |
187 | */ |
188 | deadline_remove_request(q, next); |
189 | } |
190 | |
191 | /* |
192 | * move request from sort list to dispatch queue. |
193 | */ |
194 | static inline void |
195 | deadline_move_to_dispatch(struct deadline_data *dd, struct request *rq) |
196 | { |
197 | struct request_queue *q = rq->q; |
198 | |
199 | deadline_remove_request(q, rq); |
200 | elv_dispatch_add_tail(q, rq); |
201 | } |
202 | |
203 | /* |
204 | * move an entry to dispatch queue |
205 | */ |
206 | static void |
207 | deadline_move_request(struct deadline_data *dd, struct request *rq) |
208 | { |
209 | const int data_dir = rq_data_dir(rq); |
210 | |
211 | dd->next_rq[READ] = NULL; |
212 | dd->next_rq[WRITE] = NULL; |
213 | dd->next_rq[data_dir] = deadline_latter_request(rq); |
214 | |
215 | dd->last_sector = rq_end_sector(rq); |
216 | |
217 | /* |
218 | * take it off the sort and fifo list, move |
219 | * to dispatch queue |
220 | */ |
221 | deadline_move_to_dispatch(dd, rq); |
222 | } |
223 | |
224 | /* |
225 | * deadline_check_fifo returns 0 if there are no expired requests on the fifo, |
226 | * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir]) |
227 | */ |
228 | static inline int deadline_check_fifo(struct deadline_data *dd, int ddir) |
229 | { |
230 | struct request *rq = rq_entry_fifo(dd->fifo_list[ddir].next); |
231 | |
232 | /* |
233 | * rq is expired! |
234 | */ |
235 | if (time_after(jiffies, rq_fifo_time(rq))) |
236 | return 1; |
237 | |
238 | return 0; |
239 | } |
240 | |
241 | /* |
242 | * deadline_dispatch_requests selects the best request according to |
243 | * read/write expire, fifo_batch, etc |
244 | */ |
245 | static int deadline_dispatch_requests(struct request_queue *q, int force) |
246 | { |
247 | struct deadline_data *dd = q->elevator->elevator_data; |
248 | const int reads = !list_empty(&dd->fifo_list[READ]); |
249 | const int writes = !list_empty(&dd->fifo_list[WRITE]); |
250 | struct request *rq; |
251 | int data_dir; |
252 | |
253 | /* |
254 | * batches are currently reads XOR writes |
255 | */ |
256 | if (dd->next_rq[WRITE]) |
257 | rq = dd->next_rq[WRITE]; |
258 | else |
259 | rq = dd->next_rq[READ]; |
260 | |
261 | if (rq && dd->batching < dd->fifo_batch) |
262 | /* we have a next request are still entitled to batch */ |
263 | goto dispatch_request; |
264 | |
265 | /* |
266 | * at this point we are not running a batch. select the appropriate |
267 | * data direction (read / write) |
268 | */ |
269 | |
270 | if (reads) { |
271 | BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[READ])); |
272 | |
273 | if (writes && (dd->starved++ >= dd->writes_starved)) |
274 | goto dispatch_writes; |
275 | |
276 | data_dir = READ; |
277 | |
278 | goto dispatch_find_request; |
279 | } |
280 | |
281 | /* |
282 | * there are either no reads or writes have been starved |
283 | */ |
284 | |
285 | if (writes) { |
286 | dispatch_writes: |
287 | BUG_ON(RB_EMPTY_ROOT(&dd->sort_list[WRITE])); |
288 | |
289 | dd->starved = 0; |
290 | |
291 | data_dir = WRITE; |
292 | |
293 | goto dispatch_find_request; |
294 | } |
295 | |
296 | return 0; |
297 | |
298 | dispatch_find_request: |
299 | /* |
300 | * we are not running a batch, find best request for selected data_dir |
301 | */ |
302 | if (deadline_check_fifo(dd, data_dir) || !dd->next_rq[data_dir]) { |
303 | /* |
304 | * A deadline has expired, the last request was in the other |
305 | * direction, or we have run out of higher-sectored requests. |
306 | * Start again from the request with the earliest expiry time. |
307 | */ |
308 | rq = rq_entry_fifo(dd->fifo_list[data_dir].next); |
309 | } else { |
310 | /* |
311 | * The last req was the same dir and we have a next request in |
312 | * sort order. No expired requests so continue on from here. |
313 | */ |
314 | rq = dd->next_rq[data_dir]; |
315 | } |
316 | |
317 | dd->batching = 0; |
318 | |
319 | dispatch_request: |
320 | /* |
321 | * rq is the selected appropriate request. |
322 | */ |
323 | dd->batching++; |
324 | deadline_move_request(dd, rq); |
325 | |
326 | return 1; |
327 | } |
328 | |
329 | static void deadline_exit_queue(struct elevator_queue *e) |
330 | { |
331 | struct deadline_data *dd = e->elevator_data; |
332 | |
333 | BUG_ON(!list_empty(&dd->fifo_list[READ])); |
334 | BUG_ON(!list_empty(&dd->fifo_list[WRITE])); |
335 | |
336 | kfree(dd); |
337 | } |
338 | |
339 | /* |
340 | * initialize elevator private data (deadline_data). |
341 | */ |
342 | static void *deadline_init_queue(struct request_queue *q) |
343 | { |
344 | struct deadline_data *dd; |
345 | |
346 | dd = kmalloc_node(sizeof(*dd), GFP_KERNEL | __GFP_ZERO, q->node); |
347 | if (!dd) |
348 | return NULL; |
349 | |
350 | INIT_LIST_HEAD(&dd->fifo_list[READ]); |
351 | INIT_LIST_HEAD(&dd->fifo_list[WRITE]); |
352 | dd->sort_list[READ] = RB_ROOT; |
353 | dd->sort_list[WRITE] = RB_ROOT; |
354 | dd->fifo_expire[READ] = read_expire; |
355 | dd->fifo_expire[WRITE] = write_expire; |
356 | dd->writes_starved = writes_starved; |
357 | dd->front_merges = 1; |
358 | dd->fifo_batch = fifo_batch; |
359 | return dd; |
360 | } |
361 | |
362 | /* |
363 | * sysfs parts below |
364 | */ |
365 | |
366 | static ssize_t |
367 | deadline_var_show(int var, char *page) |
368 | { |
369 | return sprintf(page, "%d\n", var); |
370 | } |
371 | |
372 | static ssize_t |
373 | deadline_var_store(int *var, const char *page, size_t count) |
374 | { |
375 | char *p = (char *) page; |
376 | |
377 | *var = simple_strtol(p, &p, 10); |
378 | return count; |
379 | } |
380 | |
381 | #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ |
382 | static ssize_t __FUNC(struct elevator_queue *e, char *page) \ |
383 | { \ |
384 | struct deadline_data *dd = e->elevator_data; \ |
385 | int __data = __VAR; \ |
386 | if (__CONV) \ |
387 | __data = jiffies_to_msecs(__data); \ |
388 | return deadline_var_show(__data, (page)); \ |
389 | } |
390 | SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1); |
391 | SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1); |
392 | SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0); |
393 | SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0); |
394 | SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0); |
395 | #undef SHOW_FUNCTION |
396 | |
397 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ |
398 | static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ |
399 | { \ |
400 | struct deadline_data *dd = e->elevator_data; \ |
401 | int __data; \ |
402 | int ret = deadline_var_store(&__data, (page), count); \ |
403 | if (__data < (MIN)) \ |
404 | __data = (MIN); \ |
405 | else if (__data > (MAX)) \ |
406 | __data = (MAX); \ |
407 | if (__CONV) \ |
408 | *(__PTR) = msecs_to_jiffies(__data); \ |
409 | else \ |
410 | *(__PTR) = __data; \ |
411 | return ret; \ |
412 | } |
413 | STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1); |
414 | STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1); |
415 | STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0); |
416 | STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0); |
417 | STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0); |
418 | #undef STORE_FUNCTION |
419 | |
420 | #define DD_ATTR(name) \ |
421 | __ATTR(name, S_IRUGO|S_IWUSR, deadline_##name##_show, \ |
422 | deadline_##name##_store) |
423 | |
424 | static struct elv_fs_entry deadline_attrs[] = { |
425 | DD_ATTR(read_expire), |
426 | DD_ATTR(write_expire), |
427 | DD_ATTR(writes_starved), |
428 | DD_ATTR(front_merges), |
429 | DD_ATTR(fifo_batch), |
430 | __ATTR_NULL |
431 | }; |
432 | |
433 | static struct elevator_type iosched_deadline = { |
434 | .ops = { |
435 | .elevator_merge_fn = deadline_merge, |
436 | .elevator_merged_fn = deadline_merged_request, |
437 | .elevator_merge_req_fn = deadline_merged_requests, |
438 | .elevator_dispatch_fn = deadline_dispatch_requests, |
439 | .elevator_add_req_fn = deadline_add_request, |
440 | .elevator_former_req_fn = elv_rb_former_request, |
441 | .elevator_latter_req_fn = elv_rb_latter_request, |
442 | .elevator_init_fn = deadline_init_queue, |
443 | .elevator_exit_fn = deadline_exit_queue, |
444 | }, |
445 | |
446 | .elevator_attrs = deadline_attrs, |
447 | .elevator_name = "deadline", |
448 | .elevator_owner = THIS_MODULE, |
449 | }; |
450 | |
451 | static int __init deadline_init(void) |
452 | { |
453 | elv_register(&iosched_deadline); |
454 | |
455 | return 0; |
456 | } |
457 | |
458 | static void __exit deadline_exit(void) |
459 | { |
460 | elv_unregister(&iosched_deadline); |
461 | } |
462 | |
463 | module_init(deadline_init); |
464 | module_exit(deadline_exit); |
465 | |
466 | MODULE_AUTHOR("Jens Axboe"); |
467 | MODULE_LICENSE("GPL"); |
468 | MODULE_DESCRIPTION("deadline IO scheduler"); |
469 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9