Root/
1 | /* |
2 | * Functions related to sysfs handling |
3 | */ |
4 | #include <linux/kernel.h> |
5 | #include <linux/slab.h> |
6 | #include <linux/module.h> |
7 | #include <linux/bio.h> |
8 | #include <linux/blkdev.h> |
9 | #include <linux/blktrace_api.h> |
10 | |
11 | #include "blk.h" |
12 | |
13 | struct queue_sysfs_entry { |
14 | struct attribute attr; |
15 | ssize_t (*show)(struct request_queue *, char *); |
16 | ssize_t (*store)(struct request_queue *, const char *, size_t); |
17 | }; |
18 | |
19 | static ssize_t |
20 | queue_var_show(unsigned long var, char *page) |
21 | { |
22 | return sprintf(page, "%lu\n", var); |
23 | } |
24 | |
25 | static ssize_t |
26 | queue_var_store(unsigned long *var, const char *page, size_t count) |
27 | { |
28 | char *p = (char *) page; |
29 | |
30 | *var = simple_strtoul(p, &p, 10); |
31 | return count; |
32 | } |
33 | |
34 | static ssize_t queue_requests_show(struct request_queue *q, char *page) |
35 | { |
36 | return queue_var_show(q->nr_requests, (page)); |
37 | } |
38 | |
39 | static ssize_t |
40 | queue_requests_store(struct request_queue *q, const char *page, size_t count) |
41 | { |
42 | struct request_list *rl = &q->rq; |
43 | unsigned long nr; |
44 | int ret; |
45 | |
46 | if (!q->request_fn) |
47 | return -EINVAL; |
48 | |
49 | ret = queue_var_store(&nr, page, count); |
50 | if (nr < BLKDEV_MIN_RQ) |
51 | nr = BLKDEV_MIN_RQ; |
52 | |
53 | spin_lock_irq(q->queue_lock); |
54 | q->nr_requests = nr; |
55 | blk_queue_congestion_threshold(q); |
56 | |
57 | if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q)) |
58 | blk_set_queue_congested(q, BLK_RW_SYNC); |
59 | else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q)) |
60 | blk_clear_queue_congested(q, BLK_RW_SYNC); |
61 | |
62 | if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q)) |
63 | blk_set_queue_congested(q, BLK_RW_ASYNC); |
64 | else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q)) |
65 | blk_clear_queue_congested(q, BLK_RW_ASYNC); |
66 | |
67 | if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { |
68 | blk_set_queue_full(q, BLK_RW_SYNC); |
69 | } else if (rl->count[BLK_RW_SYNC]+1 <= q->nr_requests) { |
70 | blk_clear_queue_full(q, BLK_RW_SYNC); |
71 | wake_up(&rl->wait[BLK_RW_SYNC]); |
72 | } |
73 | |
74 | if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) { |
75 | blk_set_queue_full(q, BLK_RW_ASYNC); |
76 | } else if (rl->count[BLK_RW_ASYNC]+1 <= q->nr_requests) { |
77 | blk_clear_queue_full(q, BLK_RW_ASYNC); |
78 | wake_up(&rl->wait[BLK_RW_ASYNC]); |
79 | } |
80 | spin_unlock_irq(q->queue_lock); |
81 | return ret; |
82 | } |
83 | |
84 | static ssize_t queue_ra_show(struct request_queue *q, char *page) |
85 | { |
86 | unsigned long ra_kb = q->backing_dev_info.ra_pages << |
87 | (PAGE_CACHE_SHIFT - 10); |
88 | |
89 | return queue_var_show(ra_kb, (page)); |
90 | } |
91 | |
92 | static ssize_t |
93 | queue_ra_store(struct request_queue *q, const char *page, size_t count) |
94 | { |
95 | unsigned long ra_kb; |
96 | ssize_t ret = queue_var_store(&ra_kb, page, count); |
97 | |
98 | q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10); |
99 | |
100 | return ret; |
101 | } |
102 | |
103 | static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) |
104 | { |
105 | int max_sectors_kb = queue_max_sectors(q) >> 1; |
106 | |
107 | return queue_var_show(max_sectors_kb, (page)); |
108 | } |
109 | |
110 | static ssize_t queue_max_segments_show(struct request_queue *q, char *page) |
111 | { |
112 | return queue_var_show(queue_max_segments(q), (page)); |
113 | } |
114 | |
115 | static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page) |
116 | { |
117 | if (test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags)) |
118 | return queue_var_show(queue_max_segment_size(q), (page)); |
119 | |
120 | return queue_var_show(PAGE_CACHE_SIZE, (page)); |
121 | } |
122 | |
123 | static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) |
124 | { |
125 | return queue_var_show(queue_logical_block_size(q), page); |
126 | } |
127 | |
128 | static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) |
129 | { |
130 | return queue_var_show(queue_physical_block_size(q), page); |
131 | } |
132 | |
133 | static ssize_t queue_io_min_show(struct request_queue *q, char *page) |
134 | { |
135 | return queue_var_show(queue_io_min(q), page); |
136 | } |
137 | |
138 | static ssize_t queue_io_opt_show(struct request_queue *q, char *page) |
139 | { |
140 | return queue_var_show(queue_io_opt(q), page); |
141 | } |
142 | |
143 | static ssize_t queue_discard_granularity_show(struct request_queue *q, char *page) |
144 | { |
145 | return queue_var_show(q->limits.discard_granularity, page); |
146 | } |
147 | |
148 | static ssize_t queue_discard_max_show(struct request_queue *q, char *page) |
149 | { |
150 | return queue_var_show(q->limits.max_discard_sectors << 9, page); |
151 | } |
152 | |
153 | static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *page) |
154 | { |
155 | return queue_var_show(queue_discard_zeroes_data(q), page); |
156 | } |
157 | |
158 | static ssize_t |
159 | queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) |
160 | { |
161 | unsigned long max_sectors_kb, |
162 | max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, |
163 | page_kb = 1 << (PAGE_CACHE_SHIFT - 10); |
164 | ssize_t ret = queue_var_store(&max_sectors_kb, page, count); |
165 | |
166 | if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) |
167 | return -EINVAL; |
168 | |
169 | spin_lock_irq(q->queue_lock); |
170 | q->limits.max_sectors = max_sectors_kb << 1; |
171 | spin_unlock_irq(q->queue_lock); |
172 | |
173 | return ret; |
174 | } |
175 | |
176 | static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) |
177 | { |
178 | int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; |
179 | |
180 | return queue_var_show(max_hw_sectors_kb, (page)); |
181 | } |
182 | |
183 | static ssize_t queue_nonrot_show(struct request_queue *q, char *page) |
184 | { |
185 | return queue_var_show(!blk_queue_nonrot(q), page); |
186 | } |
187 | |
188 | static ssize_t queue_nonrot_store(struct request_queue *q, const char *page, |
189 | size_t count) |
190 | { |
191 | unsigned long nm; |
192 | ssize_t ret = queue_var_store(&nm, page, count); |
193 | |
194 | spin_lock_irq(q->queue_lock); |
195 | if (nm) |
196 | queue_flag_clear(QUEUE_FLAG_NONROT, q); |
197 | else |
198 | queue_flag_set(QUEUE_FLAG_NONROT, q); |
199 | spin_unlock_irq(q->queue_lock); |
200 | |
201 | return ret; |
202 | } |
203 | |
204 | static ssize_t queue_nomerges_show(struct request_queue *q, char *page) |
205 | { |
206 | return queue_var_show((blk_queue_nomerges(q) << 1) | |
207 | blk_queue_noxmerges(q), page); |
208 | } |
209 | |
210 | static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, |
211 | size_t count) |
212 | { |
213 | unsigned long nm; |
214 | ssize_t ret = queue_var_store(&nm, page, count); |
215 | |
216 | spin_lock_irq(q->queue_lock); |
217 | queue_flag_clear(QUEUE_FLAG_NOMERGES, q); |
218 | queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); |
219 | if (nm == 2) |
220 | queue_flag_set(QUEUE_FLAG_NOMERGES, q); |
221 | else if (nm) |
222 | queue_flag_set(QUEUE_FLAG_NOXMERGES, q); |
223 | spin_unlock_irq(q->queue_lock); |
224 | |
225 | return ret; |
226 | } |
227 | |
228 | static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page) |
229 | { |
230 | bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags); |
231 | |
232 | return queue_var_show(set, page); |
233 | } |
234 | |
235 | static ssize_t |
236 | queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count) |
237 | { |
238 | ssize_t ret = -EINVAL; |
239 | #if defined(CONFIG_USE_GENERIC_SMP_HELPERS) |
240 | unsigned long val; |
241 | |
242 | ret = queue_var_store(&val, page, count); |
243 | spin_lock_irq(q->queue_lock); |
244 | if (val) |
245 | queue_flag_set(QUEUE_FLAG_SAME_COMP, q); |
246 | else |
247 | queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); |
248 | spin_unlock_irq(q->queue_lock); |
249 | #endif |
250 | return ret; |
251 | } |
252 | |
253 | static ssize_t queue_iostats_show(struct request_queue *q, char *page) |
254 | { |
255 | return queue_var_show(blk_queue_io_stat(q), page); |
256 | } |
257 | |
258 | static ssize_t queue_iostats_store(struct request_queue *q, const char *page, |
259 | size_t count) |
260 | { |
261 | unsigned long stats; |
262 | ssize_t ret = queue_var_store(&stats, page, count); |
263 | |
264 | spin_lock_irq(q->queue_lock); |
265 | if (stats) |
266 | queue_flag_set(QUEUE_FLAG_IO_STAT, q); |
267 | else |
268 | queue_flag_clear(QUEUE_FLAG_IO_STAT, q); |
269 | spin_unlock_irq(q->queue_lock); |
270 | |
271 | return ret; |
272 | } |
273 | |
274 | static struct queue_sysfs_entry queue_requests_entry = { |
275 | .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, |
276 | .show = queue_requests_show, |
277 | .store = queue_requests_store, |
278 | }; |
279 | |
280 | static struct queue_sysfs_entry queue_ra_entry = { |
281 | .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR }, |
282 | .show = queue_ra_show, |
283 | .store = queue_ra_store, |
284 | }; |
285 | |
286 | static struct queue_sysfs_entry queue_max_sectors_entry = { |
287 | .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR }, |
288 | .show = queue_max_sectors_show, |
289 | .store = queue_max_sectors_store, |
290 | }; |
291 | |
292 | static struct queue_sysfs_entry queue_max_hw_sectors_entry = { |
293 | .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO }, |
294 | .show = queue_max_hw_sectors_show, |
295 | }; |
296 | |
297 | static struct queue_sysfs_entry queue_max_segments_entry = { |
298 | .attr = {.name = "max_segments", .mode = S_IRUGO }, |
299 | .show = queue_max_segments_show, |
300 | }; |
301 | |
302 | static struct queue_sysfs_entry queue_max_segment_size_entry = { |
303 | .attr = {.name = "max_segment_size", .mode = S_IRUGO }, |
304 | .show = queue_max_segment_size_show, |
305 | }; |
306 | |
307 | static struct queue_sysfs_entry queue_iosched_entry = { |
308 | .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR }, |
309 | .show = elv_iosched_show, |
310 | .store = elv_iosched_store, |
311 | }; |
312 | |
313 | static struct queue_sysfs_entry queue_hw_sector_size_entry = { |
314 | .attr = {.name = "hw_sector_size", .mode = S_IRUGO }, |
315 | .show = queue_logical_block_size_show, |
316 | }; |
317 | |
318 | static struct queue_sysfs_entry queue_logical_block_size_entry = { |
319 | .attr = {.name = "logical_block_size", .mode = S_IRUGO }, |
320 | .show = queue_logical_block_size_show, |
321 | }; |
322 | |
323 | static struct queue_sysfs_entry queue_physical_block_size_entry = { |
324 | .attr = {.name = "physical_block_size", .mode = S_IRUGO }, |
325 | .show = queue_physical_block_size_show, |
326 | }; |
327 | |
328 | static struct queue_sysfs_entry queue_io_min_entry = { |
329 | .attr = {.name = "minimum_io_size", .mode = S_IRUGO }, |
330 | .show = queue_io_min_show, |
331 | }; |
332 | |
333 | static struct queue_sysfs_entry queue_io_opt_entry = { |
334 | .attr = {.name = "optimal_io_size", .mode = S_IRUGO }, |
335 | .show = queue_io_opt_show, |
336 | }; |
337 | |
338 | static struct queue_sysfs_entry queue_discard_granularity_entry = { |
339 | .attr = {.name = "discard_granularity", .mode = S_IRUGO }, |
340 | .show = queue_discard_granularity_show, |
341 | }; |
342 | |
343 | static struct queue_sysfs_entry queue_discard_max_entry = { |
344 | .attr = {.name = "discard_max_bytes", .mode = S_IRUGO }, |
345 | .show = queue_discard_max_show, |
346 | }; |
347 | |
348 | static struct queue_sysfs_entry queue_discard_zeroes_data_entry = { |
349 | .attr = {.name = "discard_zeroes_data", .mode = S_IRUGO }, |
350 | .show = queue_discard_zeroes_data_show, |
351 | }; |
352 | |
353 | static struct queue_sysfs_entry queue_nonrot_entry = { |
354 | .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR }, |
355 | .show = queue_nonrot_show, |
356 | .store = queue_nonrot_store, |
357 | }; |
358 | |
359 | static struct queue_sysfs_entry queue_nomerges_entry = { |
360 | .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR }, |
361 | .show = queue_nomerges_show, |
362 | .store = queue_nomerges_store, |
363 | }; |
364 | |
365 | static struct queue_sysfs_entry queue_rq_affinity_entry = { |
366 | .attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR }, |
367 | .show = queue_rq_affinity_show, |
368 | .store = queue_rq_affinity_store, |
369 | }; |
370 | |
371 | static struct queue_sysfs_entry queue_iostats_entry = { |
372 | .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR }, |
373 | .show = queue_iostats_show, |
374 | .store = queue_iostats_store, |
375 | }; |
376 | |
377 | static struct attribute *default_attrs[] = { |
378 | &queue_requests_entry.attr, |
379 | &queue_ra_entry.attr, |
380 | &queue_max_hw_sectors_entry.attr, |
381 | &queue_max_sectors_entry.attr, |
382 | &queue_max_segments_entry.attr, |
383 | &queue_max_segment_size_entry.attr, |
384 | &queue_iosched_entry.attr, |
385 | &queue_hw_sector_size_entry.attr, |
386 | &queue_logical_block_size_entry.attr, |
387 | &queue_physical_block_size_entry.attr, |
388 | &queue_io_min_entry.attr, |
389 | &queue_io_opt_entry.attr, |
390 | &queue_discard_granularity_entry.attr, |
391 | &queue_discard_max_entry.attr, |
392 | &queue_discard_zeroes_data_entry.attr, |
393 | &queue_nonrot_entry.attr, |
394 | &queue_nomerges_entry.attr, |
395 | &queue_rq_affinity_entry.attr, |
396 | &queue_iostats_entry.attr, |
397 | NULL, |
398 | }; |
399 | |
400 | #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr) |
401 | |
402 | static ssize_t |
403 | queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) |
404 | { |
405 | struct queue_sysfs_entry *entry = to_queue(attr); |
406 | struct request_queue *q = |
407 | container_of(kobj, struct request_queue, kobj); |
408 | ssize_t res; |
409 | |
410 | if (!entry->show) |
411 | return -EIO; |
412 | mutex_lock(&q->sysfs_lock); |
413 | if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { |
414 | mutex_unlock(&q->sysfs_lock); |
415 | return -ENOENT; |
416 | } |
417 | res = entry->show(q, page); |
418 | mutex_unlock(&q->sysfs_lock); |
419 | return res; |
420 | } |
421 | |
422 | static ssize_t |
423 | queue_attr_store(struct kobject *kobj, struct attribute *attr, |
424 | const char *page, size_t length) |
425 | { |
426 | struct queue_sysfs_entry *entry = to_queue(attr); |
427 | struct request_queue *q; |
428 | ssize_t res; |
429 | |
430 | if (!entry->store) |
431 | return -EIO; |
432 | |
433 | q = container_of(kobj, struct request_queue, kobj); |
434 | mutex_lock(&q->sysfs_lock); |
435 | if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { |
436 | mutex_unlock(&q->sysfs_lock); |
437 | return -ENOENT; |
438 | } |
439 | res = entry->store(q, page, length); |
440 | mutex_unlock(&q->sysfs_lock); |
441 | return res; |
442 | } |
443 | |
444 | /** |
445 | * blk_cleanup_queue: - release a &struct request_queue when it is no longer needed |
446 | * @kobj: the kobj belonging of the request queue to be released |
447 | * |
448 | * Description: |
449 | * blk_cleanup_queue is the pair to blk_init_queue() or |
450 | * blk_queue_make_request(). It should be called when a request queue is |
451 | * being released; typically when a block device is being de-registered. |
452 | * Currently, its primary task it to free all the &struct request |
453 | * structures that were allocated to the queue and the queue itself. |
454 | * |
455 | * Caveat: |
456 | * Hopefully the low level driver will have finished any |
457 | * outstanding requests first... |
458 | **/ |
459 | static void blk_release_queue(struct kobject *kobj) |
460 | { |
461 | struct request_queue *q = |
462 | container_of(kobj, struct request_queue, kobj); |
463 | struct request_list *rl = &q->rq; |
464 | |
465 | blk_sync_queue(q); |
466 | |
467 | if (rl->rq_pool) |
468 | mempool_destroy(rl->rq_pool); |
469 | |
470 | if (q->queue_tags) |
471 | __blk_queue_free_tags(q); |
472 | |
473 | blk_trace_shutdown(q); |
474 | |
475 | bdi_destroy(&q->backing_dev_info); |
476 | kmem_cache_free(blk_requestq_cachep, q); |
477 | } |
478 | |
479 | static const struct sysfs_ops queue_sysfs_ops = { |
480 | .show = queue_attr_show, |
481 | .store = queue_attr_store, |
482 | }; |
483 | |
484 | struct kobj_type blk_queue_ktype = { |
485 | .sysfs_ops = &queue_sysfs_ops, |
486 | .default_attrs = default_attrs, |
487 | .release = blk_release_queue, |
488 | }; |
489 | |
490 | int blk_register_queue(struct gendisk *disk) |
491 | { |
492 | int ret; |
493 | struct device *dev = disk_to_dev(disk); |
494 | |
495 | struct request_queue *q = disk->queue; |
496 | |
497 | if (WARN_ON(!q)) |
498 | return -ENXIO; |
499 | |
500 | ret = blk_trace_init_sysfs(dev); |
501 | if (ret) |
502 | return ret; |
503 | |
504 | ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); |
505 | if (ret < 0) |
506 | return ret; |
507 | |
508 | kobject_uevent(&q->kobj, KOBJ_ADD); |
509 | |
510 | if (!q->request_fn) |
511 | return 0; |
512 | |
513 | ret = elv_register_queue(q); |
514 | if (ret) { |
515 | kobject_uevent(&q->kobj, KOBJ_REMOVE); |
516 | kobject_del(&q->kobj); |
517 | blk_trace_remove_sysfs(disk_to_dev(disk)); |
518 | return ret; |
519 | } |
520 | |
521 | return 0; |
522 | } |
523 | |
524 | void blk_unregister_queue(struct gendisk *disk) |
525 | { |
526 | struct request_queue *q = disk->queue; |
527 | |
528 | if (WARN_ON(!q)) |
529 | return; |
530 | |
531 | if (q->request_fn) |
532 | elv_unregister_queue(q); |
533 | |
534 | kobject_uevent(&q->kobj, KOBJ_REMOVE); |
535 | kobject_del(&q->kobj); |
536 | blk_trace_remove_sysfs(disk_to_dev(disk)); |
537 | kobject_put(&disk_to_dev(disk)->kobj); |
538 | } |
539 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9