Root/
1 | /* |
2 | * Fast and scalable bitmap tagging variant. Uses sparser bitmaps spread |
3 | * over multiple cachelines to avoid ping-pong between multiple submitters |
4 | * or submitter and completer. Uses rolling wakeups to avoid falling of |
5 | * the scaling cliff when we run out of tags and have to start putting |
6 | * submitters to sleep. |
7 | * |
8 | * Uses active queue tracking to support fairer distribution of tags |
9 | * between multiple submitters when a shared tag map is used. |
10 | * |
11 | * Copyright (C) 2013-2014 Jens Axboe |
12 | */ |
13 | #include <linux/kernel.h> |
14 | #include <linux/module.h> |
15 | #include <linux/random.h> |
16 | |
17 | #include <linux/blk-mq.h> |
18 | #include "blk.h" |
19 | #include "blk-mq.h" |
20 | #include "blk-mq-tag.h" |
21 | |
22 | static bool bt_has_free_tags(struct blk_mq_bitmap_tags *bt) |
23 | { |
24 | int i; |
25 | |
26 | for (i = 0; i < bt->map_nr; i++) { |
27 | struct blk_align_bitmap *bm = &bt->map[i]; |
28 | int ret; |
29 | |
30 | ret = find_first_zero_bit(&bm->word, bm->depth); |
31 | if (ret < bm->depth) |
32 | return true; |
33 | } |
34 | |
35 | return false; |
36 | } |
37 | |
38 | bool blk_mq_has_free_tags(struct blk_mq_tags *tags) |
39 | { |
40 | if (!tags) |
41 | return true; |
42 | |
43 | return bt_has_free_tags(&tags->bitmap_tags); |
44 | } |
45 | |
46 | static inline int bt_index_inc(int index) |
47 | { |
48 | return (index + 1) & (BT_WAIT_QUEUES - 1); |
49 | } |
50 | |
51 | static inline void bt_index_atomic_inc(atomic_t *index) |
52 | { |
53 | int old = atomic_read(index); |
54 | int new = bt_index_inc(old); |
55 | atomic_cmpxchg(index, old, new); |
56 | } |
57 | |
58 | /* |
59 | * If a previously inactive queue goes active, bump the active user count. |
60 | */ |
61 | bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) |
62 | { |
63 | if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) && |
64 | !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) |
65 | atomic_inc(&hctx->tags->active_queues); |
66 | |
67 | return true; |
68 | } |
69 | |
70 | /* |
71 | * Wakeup all potentially sleeping on normal (non-reserved) tags |
72 | */ |
73 | static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags) |
74 | { |
75 | struct blk_mq_bitmap_tags *bt; |
76 | int i, wake_index; |
77 | |
78 | bt = &tags->bitmap_tags; |
79 | wake_index = atomic_read(&bt->wake_index); |
80 | for (i = 0; i < BT_WAIT_QUEUES; i++) { |
81 | struct bt_wait_state *bs = &bt->bs[wake_index]; |
82 | |
83 | if (waitqueue_active(&bs->wait)) |
84 | wake_up(&bs->wait); |
85 | |
86 | wake_index = bt_index_inc(wake_index); |
87 | } |
88 | } |
89 | |
90 | /* |
91 | * If a previously busy queue goes inactive, potential waiters could now |
92 | * be allowed to queue. Wake them up and check. |
93 | */ |
94 | void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) |
95 | { |
96 | struct blk_mq_tags *tags = hctx->tags; |
97 | |
98 | if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) |
99 | return; |
100 | |
101 | atomic_dec(&tags->active_queues); |
102 | |
103 | blk_mq_tag_wakeup_all(tags); |
104 | } |
105 | |
106 | /* |
107 | * For shared tag users, we track the number of currently active users |
108 | * and attempt to provide a fair share of the tag depth for each of them. |
109 | */ |
110 | static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, |
111 | struct blk_mq_bitmap_tags *bt) |
112 | { |
113 | unsigned int depth, users; |
114 | |
115 | if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED)) |
116 | return true; |
117 | if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state)) |
118 | return true; |
119 | |
120 | /* |
121 | * Don't try dividing an ant |
122 | */ |
123 | if (bt->depth == 1) |
124 | return true; |
125 | |
126 | users = atomic_read(&hctx->tags->active_queues); |
127 | if (!users) |
128 | return true; |
129 | |
130 | /* |
131 | * Allow at least some tags |
132 | */ |
133 | depth = max((bt->depth + users - 1) / users, 4U); |
134 | return atomic_read(&hctx->nr_active) < depth; |
135 | } |
136 | |
137 | static int __bt_get_word(struct blk_align_bitmap *bm, unsigned int last_tag) |
138 | { |
139 | int tag, org_last_tag, end; |
140 | |
141 | org_last_tag = last_tag; |
142 | end = bm->depth; |
143 | do { |
144 | restart: |
145 | tag = find_next_zero_bit(&bm->word, end, last_tag); |
146 | if (unlikely(tag >= end)) { |
147 | /* |
148 | * We started with an offset, start from 0 to |
149 | * exhaust the map. |
150 | */ |
151 | if (org_last_tag && last_tag) { |
152 | end = last_tag; |
153 | last_tag = 0; |
154 | goto restart; |
155 | } |
156 | return -1; |
157 | } |
158 | last_tag = tag + 1; |
159 | } while (test_and_set_bit_lock(tag, &bm->word)); |
160 | |
161 | return tag; |
162 | } |
163 | |
164 | /* |
165 | * Straight forward bitmap tag implementation, where each bit is a tag |
166 | * (cleared == free, and set == busy). The small twist is using per-cpu |
167 | * last_tag caches, which blk-mq stores in the blk_mq_ctx software queue |
168 | * contexts. This enables us to drastically limit the space searched, |
169 | * without dirtying an extra shared cacheline like we would if we stored |
170 | * the cache value inside the shared blk_mq_bitmap_tags structure. On top |
171 | * of that, each word of tags is in a separate cacheline. This means that |
172 | * multiple users will tend to stick to different cachelines, at least |
173 | * until the map is exhausted. |
174 | */ |
175 | static int __bt_get(struct blk_mq_hw_ctx *hctx, struct blk_mq_bitmap_tags *bt, |
176 | unsigned int *tag_cache) |
177 | { |
178 | unsigned int last_tag, org_last_tag; |
179 | int index, i, tag; |
180 | |
181 | if (!hctx_may_queue(hctx, bt)) |
182 | return -1; |
183 | |
184 | last_tag = org_last_tag = *tag_cache; |
185 | index = TAG_TO_INDEX(bt, last_tag); |
186 | |
187 | for (i = 0; i < bt->map_nr; i++) { |
188 | tag = __bt_get_word(&bt->map[index], TAG_TO_BIT(bt, last_tag)); |
189 | if (tag != -1) { |
190 | tag += (index << bt->bits_per_word); |
191 | goto done; |
192 | } |
193 | |
194 | last_tag = 0; |
195 | if (++index >= bt->map_nr) |
196 | index = 0; |
197 | } |
198 | |
199 | *tag_cache = 0; |
200 | return -1; |
201 | |
202 | /* |
203 | * Only update the cache from the allocation path, if we ended |
204 | * up using the specific cached tag. |
205 | */ |
206 | done: |
207 | if (tag == org_last_tag) { |
208 | last_tag = tag + 1; |
209 | if (last_tag >= bt->depth - 1) |
210 | last_tag = 0; |
211 | |
212 | *tag_cache = last_tag; |
213 | } |
214 | |
215 | return tag; |
216 | } |
217 | |
218 | static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt, |
219 | struct blk_mq_hw_ctx *hctx) |
220 | { |
221 | struct bt_wait_state *bs; |
222 | int wait_index; |
223 | |
224 | if (!hctx) |
225 | return &bt->bs[0]; |
226 | |
227 | wait_index = atomic_read(&hctx->wait_index); |
228 | bs = &bt->bs[wait_index]; |
229 | bt_index_atomic_inc(&hctx->wait_index); |
230 | return bs; |
231 | } |
232 | |
233 | static int bt_get(struct blk_mq_alloc_data *data, |
234 | struct blk_mq_bitmap_tags *bt, |
235 | struct blk_mq_hw_ctx *hctx, |
236 | unsigned int *last_tag) |
237 | { |
238 | struct bt_wait_state *bs; |
239 | DEFINE_WAIT(wait); |
240 | int tag; |
241 | |
242 | tag = __bt_get(hctx, bt, last_tag); |
243 | if (tag != -1) |
244 | return tag; |
245 | |
246 | if (!(data->gfp & __GFP_WAIT)) |
247 | return -1; |
248 | |
249 | bs = bt_wait_ptr(bt, hctx); |
250 | do { |
251 | prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE); |
252 | |
253 | tag = __bt_get(hctx, bt, last_tag); |
254 | if (tag != -1) |
255 | break; |
256 | |
257 | blk_mq_put_ctx(data->ctx); |
258 | |
259 | io_schedule(); |
260 | |
261 | data->ctx = blk_mq_get_ctx(data->q); |
262 | data->hctx = data->q->mq_ops->map_queue(data->q, |
263 | data->ctx->cpu); |
264 | if (data->reserved) { |
265 | bt = &data->hctx->tags->breserved_tags; |
266 | } else { |
267 | last_tag = &data->ctx->last_tag; |
268 | hctx = data->hctx; |
269 | bt = &hctx->tags->bitmap_tags; |
270 | } |
271 | finish_wait(&bs->wait, &wait); |
272 | bs = bt_wait_ptr(bt, hctx); |
273 | } while (1); |
274 | |
275 | finish_wait(&bs->wait, &wait); |
276 | return tag; |
277 | } |
278 | |
279 | static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data *data) |
280 | { |
281 | int tag; |
282 | |
283 | tag = bt_get(data, &data->hctx->tags->bitmap_tags, data->hctx, |
284 | &data->ctx->last_tag); |
285 | if (tag >= 0) |
286 | return tag + data->hctx->tags->nr_reserved_tags; |
287 | |
288 | return BLK_MQ_TAG_FAIL; |
289 | } |
290 | |
291 | static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data *data) |
292 | { |
293 | int tag, zero = 0; |
294 | |
295 | if (unlikely(!data->hctx->tags->nr_reserved_tags)) { |
296 | WARN_ON_ONCE(1); |
297 | return BLK_MQ_TAG_FAIL; |
298 | } |
299 | |
300 | tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL, &zero); |
301 | if (tag < 0) |
302 | return BLK_MQ_TAG_FAIL; |
303 | |
304 | return tag; |
305 | } |
306 | |
307 | unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) |
308 | { |
309 | if (!data->reserved) |
310 | return __blk_mq_get_tag(data); |
311 | |
312 | return __blk_mq_get_reserved_tag(data); |
313 | } |
314 | |
315 | static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt) |
316 | { |
317 | int i, wake_index; |
318 | |
319 | wake_index = atomic_read(&bt->wake_index); |
320 | for (i = 0; i < BT_WAIT_QUEUES; i++) { |
321 | struct bt_wait_state *bs = &bt->bs[wake_index]; |
322 | |
323 | if (waitqueue_active(&bs->wait)) { |
324 | int o = atomic_read(&bt->wake_index); |
325 | if (wake_index != o) |
326 | atomic_cmpxchg(&bt->wake_index, o, wake_index); |
327 | |
328 | return bs; |
329 | } |
330 | |
331 | wake_index = bt_index_inc(wake_index); |
332 | } |
333 | |
334 | return NULL; |
335 | } |
336 | |
337 | static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag) |
338 | { |
339 | const int index = TAG_TO_INDEX(bt, tag); |
340 | struct bt_wait_state *bs; |
341 | int wait_cnt; |
342 | |
343 | /* |
344 | * The unlock memory barrier need to order access to req in free |
345 | * path and clearing tag bit |
346 | */ |
347 | clear_bit_unlock(TAG_TO_BIT(bt, tag), &bt->map[index].word); |
348 | |
349 | bs = bt_wake_ptr(bt); |
350 | if (!bs) |
351 | return; |
352 | |
353 | wait_cnt = atomic_dec_return(&bs->wait_cnt); |
354 | if (wait_cnt == 0) { |
355 | wake: |
356 | atomic_add(bt->wake_cnt, &bs->wait_cnt); |
357 | bt_index_atomic_inc(&bt->wake_index); |
358 | wake_up(&bs->wait); |
359 | } else if (wait_cnt < 0) { |
360 | wait_cnt = atomic_inc_return(&bs->wait_cnt); |
361 | if (!wait_cnt) |
362 | goto wake; |
363 | } |
364 | } |
365 | |
366 | static void __blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag) |
367 | { |
368 | BUG_ON(tag >= tags->nr_tags); |
369 | |
370 | bt_clear_tag(&tags->bitmap_tags, tag); |
371 | } |
372 | |
373 | static void __blk_mq_put_reserved_tag(struct blk_mq_tags *tags, |
374 | unsigned int tag) |
375 | { |
376 | BUG_ON(tag >= tags->nr_reserved_tags); |
377 | |
378 | bt_clear_tag(&tags->breserved_tags, tag); |
379 | } |
380 | |
381 | void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag, |
382 | unsigned int *last_tag) |
383 | { |
384 | struct blk_mq_tags *tags = hctx->tags; |
385 | |
386 | if (tag >= tags->nr_reserved_tags) { |
387 | const int real_tag = tag - tags->nr_reserved_tags; |
388 | |
389 | __blk_mq_put_tag(tags, real_tag); |
390 | *last_tag = real_tag; |
391 | } else |
392 | __blk_mq_put_reserved_tag(tags, tag); |
393 | } |
394 | |
395 | static void bt_for_each_free(struct blk_mq_bitmap_tags *bt, |
396 | unsigned long *free_map, unsigned int off) |
397 | { |
398 | int i; |
399 | |
400 | for (i = 0; i < bt->map_nr; i++) { |
401 | struct blk_align_bitmap *bm = &bt->map[i]; |
402 | int bit = 0; |
403 | |
404 | do { |
405 | bit = find_next_zero_bit(&bm->word, bm->depth, bit); |
406 | if (bit >= bm->depth) |
407 | break; |
408 | |
409 | __set_bit(bit + off, free_map); |
410 | bit++; |
411 | } while (1); |
412 | |
413 | off += (1 << bt->bits_per_word); |
414 | } |
415 | } |
416 | |
417 | void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, |
418 | void (*fn)(void *, unsigned long *), void *data) |
419 | { |
420 | unsigned long *tag_map; |
421 | size_t map_size; |
422 | |
423 | map_size = ALIGN(tags->nr_tags, BITS_PER_LONG) / BITS_PER_LONG; |
424 | tag_map = kzalloc(map_size * sizeof(unsigned long), GFP_ATOMIC); |
425 | if (!tag_map) |
426 | return; |
427 | |
428 | bt_for_each_free(&tags->bitmap_tags, tag_map, tags->nr_reserved_tags); |
429 | if (tags->nr_reserved_tags) |
430 | bt_for_each_free(&tags->breserved_tags, tag_map, 0); |
431 | |
432 | fn(data, tag_map); |
433 | kfree(tag_map); |
434 | } |
435 | EXPORT_SYMBOL(blk_mq_tag_busy_iter); |
436 | |
437 | static unsigned int bt_unused_tags(struct blk_mq_bitmap_tags *bt) |
438 | { |
439 | unsigned int i, used; |
440 | |
441 | for (i = 0, used = 0; i < bt->map_nr; i++) { |
442 | struct blk_align_bitmap *bm = &bt->map[i]; |
443 | |
444 | used += bitmap_weight(&bm->word, bm->depth); |
445 | } |
446 | |
447 | return bt->depth - used; |
448 | } |
449 | |
450 | static void bt_update_count(struct blk_mq_bitmap_tags *bt, |
451 | unsigned int depth) |
452 | { |
453 | unsigned int tags_per_word = 1U << bt->bits_per_word; |
454 | unsigned int map_depth = depth; |
455 | |
456 | if (depth) { |
457 | int i; |
458 | |
459 | for (i = 0; i < bt->map_nr; i++) { |
460 | bt->map[i].depth = min(map_depth, tags_per_word); |
461 | map_depth -= bt->map[i].depth; |
462 | } |
463 | } |
464 | |
465 | bt->wake_cnt = BT_WAIT_BATCH; |
466 | if (bt->wake_cnt > depth / 4) |
467 | bt->wake_cnt = max(1U, depth / 4); |
468 | |
469 | bt->depth = depth; |
470 | } |
471 | |
472 | static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth, |
473 | int node, bool reserved) |
474 | { |
475 | int i; |
476 | |
477 | bt->bits_per_word = ilog2(BITS_PER_LONG); |
478 | |
479 | /* |
480 | * Depth can be zero for reserved tags, that's not a failure |
481 | * condition. |
482 | */ |
483 | if (depth) { |
484 | unsigned int nr, tags_per_word; |
485 | |
486 | tags_per_word = (1 << bt->bits_per_word); |
487 | |
488 | /* |
489 | * If the tag space is small, shrink the number of tags |
490 | * per word so we spread over a few cachelines, at least. |
491 | * If less than 4 tags, just forget about it, it's not |
492 | * going to work optimally anyway. |
493 | */ |
494 | if (depth >= 4) { |
495 | while (tags_per_word * 4 > depth) { |
496 | bt->bits_per_word--; |
497 | tags_per_word = (1 << bt->bits_per_word); |
498 | } |
499 | } |
500 | |
501 | nr = ALIGN(depth, tags_per_word) / tags_per_word; |
502 | bt->map = kzalloc_node(nr * sizeof(struct blk_align_bitmap), |
503 | GFP_KERNEL, node); |
504 | if (!bt->map) |
505 | return -ENOMEM; |
506 | |
507 | bt->map_nr = nr; |
508 | } |
509 | |
510 | bt->bs = kzalloc(BT_WAIT_QUEUES * sizeof(*bt->bs), GFP_KERNEL); |
511 | if (!bt->bs) { |
512 | kfree(bt->map); |
513 | return -ENOMEM; |
514 | } |
515 | |
516 | bt_update_count(bt, depth); |
517 | |
518 | for (i = 0; i < BT_WAIT_QUEUES; i++) { |
519 | init_waitqueue_head(&bt->bs[i].wait); |
520 | atomic_set(&bt->bs[i].wait_cnt, bt->wake_cnt); |
521 | } |
522 | |
523 | return 0; |
524 | } |
525 | |
526 | static void bt_free(struct blk_mq_bitmap_tags *bt) |
527 | { |
528 | kfree(bt->map); |
529 | kfree(bt->bs); |
530 | } |
531 | |
532 | static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags, |
533 | int node) |
534 | { |
535 | unsigned int depth = tags->nr_tags - tags->nr_reserved_tags; |
536 | |
537 | if (bt_alloc(&tags->bitmap_tags, depth, node, false)) |
538 | goto enomem; |
539 | if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, node, true)) |
540 | goto enomem; |
541 | |
542 | return tags; |
543 | enomem: |
544 | bt_free(&tags->bitmap_tags); |
545 | kfree(tags); |
546 | return NULL; |
547 | } |
548 | |
549 | struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, |
550 | unsigned int reserved_tags, int node) |
551 | { |
552 | struct blk_mq_tags *tags; |
553 | |
554 | if (total_tags > BLK_MQ_TAG_MAX) { |
555 | pr_err("blk-mq: tag depth too large\n"); |
556 | return NULL; |
557 | } |
558 | |
559 | tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node); |
560 | if (!tags) |
561 | return NULL; |
562 | |
563 | tags->nr_tags = total_tags; |
564 | tags->nr_reserved_tags = reserved_tags; |
565 | |
566 | return blk_mq_init_bitmap_tags(tags, node); |
567 | } |
568 | |
569 | void blk_mq_free_tags(struct blk_mq_tags *tags) |
570 | { |
571 | bt_free(&tags->bitmap_tags); |
572 | bt_free(&tags->breserved_tags); |
573 | kfree(tags); |
574 | } |
575 | |
576 | void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *tag) |
577 | { |
578 | unsigned int depth = tags->nr_tags - tags->nr_reserved_tags; |
579 | |
580 | *tag = prandom_u32() % depth; |
581 | } |
582 | |
583 | int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth) |
584 | { |
585 | tdepth -= tags->nr_reserved_tags; |
586 | if (tdepth > tags->nr_tags) |
587 | return -EINVAL; |
588 | |
589 | /* |
590 | * Don't need (or can't) update reserved tags here, they remain |
591 | * static and should never need resizing. |
592 | */ |
593 | bt_update_count(&tags->bitmap_tags, tdepth); |
594 | blk_mq_tag_wakeup_all(tags); |
595 | return 0; |
596 | } |
597 | |
598 | ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page) |
599 | { |
600 | char *orig_page = page; |
601 | unsigned int free, res; |
602 | |
603 | if (!tags) |
604 | return 0; |
605 | |
606 | page += sprintf(page, "nr_tags=%u, reserved_tags=%u, " |
607 | "bits_per_word=%u\n", |
608 | tags->nr_tags, tags->nr_reserved_tags, |
609 | tags->bitmap_tags.bits_per_word); |
610 | |
611 | free = bt_unused_tags(&tags->bitmap_tags); |
612 | res = bt_unused_tags(&tags->breserved_tags); |
613 | |
614 | page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", free, res); |
615 | page += sprintf(page, "active_queues=%u\n", atomic_read(&tags->active_queues)); |
616 | |
617 | return page - orig_page; |
618 | } |
619 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9