Root/
1 | /* |
2 | * Functions to sequence FLUSH and FUA writes. |
3 | * |
4 | * Copyright (C) 2011 Max Planck Institute for Gravitational Physics |
5 | * Copyright (C) 2011 Tejun Heo <tj@kernel.org> |
6 | * |
7 | * This file is released under the GPLv2. |
8 | * |
9 | * REQ_{FLUSH|FUA} requests are decomposed to sequences consisted of three |
10 | * optional steps - PREFLUSH, DATA and POSTFLUSH - according to the request |
11 | * properties and hardware capability. |
12 | * |
13 | * If a request doesn't have data, only REQ_FLUSH makes sense, which |
14 | * indicates a simple flush request. If there is data, REQ_FLUSH indicates |
15 | * that the device cache should be flushed before the data is executed, and |
16 | * REQ_FUA means that the data must be on non-volatile media on request |
17 | * completion. |
18 | * |
19 | * If the device doesn't have writeback cache, FLUSH and FUA don't make any |
20 | * difference. The requests are either completed immediately if there's no |
21 | * data or executed as normal requests otherwise. |
22 | * |
23 | * If the device has writeback cache and supports FUA, REQ_FLUSH is |
24 | * translated to PREFLUSH but REQ_FUA is passed down directly with DATA. |
25 | * |
26 | * If the device has writeback cache and doesn't support FUA, REQ_FLUSH is |
27 | * translated to PREFLUSH and REQ_FUA to POSTFLUSH. |
28 | * |
29 | * The actual execution of flush is double buffered. Whenever a request |
30 | * needs to execute PRE or POSTFLUSH, it queues at |
31 | * q->flush_queue[q->flush_pending_idx]. Once certain criteria are met, a |
32 | * flush is issued and the pending_idx is toggled. When the flush |
33 | * completes, all the requests which were pending are proceeded to the next |
34 | * step. This allows arbitrary merging of different types of FLUSH/FUA |
35 | * requests. |
36 | * |
37 | * Currently, the following conditions are used to determine when to issue |
38 | * flush. |
39 | * |
40 | * C1. At any given time, only one flush shall be in progress. This makes |
41 | * double buffering sufficient. |
42 | * |
43 | * C2. Flush is deferred if any request is executing DATA of its sequence. |
44 | * This avoids issuing separate POSTFLUSHes for requests which shared |
45 | * PREFLUSH. |
46 | * |
47 | * C3. The second condition is ignored if there is a request which has |
48 | * waited longer than FLUSH_PENDING_TIMEOUT. This is to avoid |
49 | * starvation in the unlikely case where there are continuous stream of |
50 | * FUA (without FLUSH) requests. |
51 | * |
52 | * For devices which support FUA, it isn't clear whether C2 (and thus C3) |
53 | * is beneficial. |
54 | * |
55 | * Note that a sequenced FLUSH/FUA request with DATA is completed twice. |
56 | * Once while executing DATA and again after the whole sequence is |
57 | * complete. The first completion updates the contained bio but doesn't |
58 | * finish it so that the bio submitter is notified only after the whole |
59 | * sequence is complete. This is implemented by testing REQ_FLUSH_SEQ in |
60 | * req_bio_endio(). |
61 | * |
62 | * The above peculiarity requires that each FLUSH/FUA request has only one |
63 | * bio attached to it, which is guaranteed as they aren't allowed to be |
64 | * merged in the usual way. |
65 | */ |
66 | |
67 | #include <linux/kernel.h> |
68 | #include <linux/module.h> |
69 | #include <linux/bio.h> |
70 | #include <linux/blkdev.h> |
71 | #include <linux/gfp.h> |
72 | #include <linux/blk-mq.h> |
73 | |
74 | #include "blk.h" |
75 | #include "blk-mq.h" |
76 | |
77 | /* FLUSH/FUA sequences */ |
78 | enum { |
79 | REQ_FSEQ_PREFLUSH = (1 << 0), /* pre-flushing in progress */ |
80 | REQ_FSEQ_DATA = (1 << 1), /* data write in progress */ |
81 | REQ_FSEQ_POSTFLUSH = (1 << 2), /* post-flushing in progress */ |
82 | REQ_FSEQ_DONE = (1 << 3), |
83 | |
84 | REQ_FSEQ_ACTIONS = REQ_FSEQ_PREFLUSH | REQ_FSEQ_DATA | |
85 | REQ_FSEQ_POSTFLUSH, |
86 | |
87 | /* |
88 | * If flush has been pending longer than the following timeout, |
89 | * it's issued even if flush_data requests are still in flight. |
90 | */ |
91 | FLUSH_PENDING_TIMEOUT = 5 * HZ, |
92 | }; |
93 | |
94 | static bool blk_kick_flush(struct request_queue *q); |
95 | |
96 | static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq) |
97 | { |
98 | unsigned int policy = 0; |
99 | |
100 | if (blk_rq_sectors(rq)) |
101 | policy |= REQ_FSEQ_DATA; |
102 | |
103 | if (fflags & REQ_FLUSH) { |
104 | if (rq->cmd_flags & REQ_FLUSH) |
105 | policy |= REQ_FSEQ_PREFLUSH; |
106 | if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA)) |
107 | policy |= REQ_FSEQ_POSTFLUSH; |
108 | } |
109 | return policy; |
110 | } |
111 | |
112 | static unsigned int blk_flush_cur_seq(struct request *rq) |
113 | { |
114 | return 1 << ffz(rq->flush.seq); |
115 | } |
116 | |
117 | static void blk_flush_restore_request(struct request *rq) |
118 | { |
119 | /* |
120 | * After flush data completion, @rq->bio is %NULL but we need to |
121 | * complete the bio again. @rq->biotail is guaranteed to equal the |
122 | * original @rq->bio. Restore it. |
123 | */ |
124 | rq->bio = rq->biotail; |
125 | |
126 | /* make @rq a normal request */ |
127 | rq->cmd_flags &= ~REQ_FLUSH_SEQ; |
128 | rq->end_io = rq->flush.saved_end_io; |
129 | |
130 | blk_clear_rq_complete(rq); |
131 | } |
132 | |
133 | static bool blk_flush_queue_rq(struct request *rq, bool add_front) |
134 | { |
135 | if (rq->q->mq_ops) { |
136 | struct request_queue *q = rq->q; |
137 | |
138 | blk_mq_add_to_requeue_list(rq, add_front); |
139 | blk_mq_kick_requeue_list(q); |
140 | return false; |
141 | } else { |
142 | if (add_front) |
143 | list_add(&rq->queuelist, &rq->q->queue_head); |
144 | else |
145 | list_add_tail(&rq->queuelist, &rq->q->queue_head); |
146 | return true; |
147 | } |
148 | } |
149 | |
150 | /** |
151 | * blk_flush_complete_seq - complete flush sequence |
152 | * @rq: FLUSH/FUA request being sequenced |
153 | * @seq: sequences to complete (mask of %REQ_FSEQ_*, can be zero) |
154 | * @error: whether an error occurred |
155 | * |
156 | * @rq just completed @seq part of its flush sequence, record the |
157 | * completion and trigger the next step. |
158 | * |
159 | * CONTEXT: |
160 | * spin_lock_irq(q->queue_lock or q->mq_flush_lock) |
161 | * |
162 | * RETURNS: |
163 | * %true if requests were added to the dispatch queue, %false otherwise. |
164 | */ |
165 | static bool blk_flush_complete_seq(struct request *rq, unsigned int seq, |
166 | int error) |
167 | { |
168 | struct request_queue *q = rq->q; |
169 | struct list_head *pending = &q->flush_queue[q->flush_pending_idx]; |
170 | bool queued = false, kicked; |
171 | |
172 | BUG_ON(rq->flush.seq & seq); |
173 | rq->flush.seq |= seq; |
174 | |
175 | if (likely(!error)) |
176 | seq = blk_flush_cur_seq(rq); |
177 | else |
178 | seq = REQ_FSEQ_DONE; |
179 | |
180 | switch (seq) { |
181 | case REQ_FSEQ_PREFLUSH: |
182 | case REQ_FSEQ_POSTFLUSH: |
183 | /* queue for flush */ |
184 | if (list_empty(pending)) |
185 | q->flush_pending_since = jiffies; |
186 | list_move_tail(&rq->flush.list, pending); |
187 | break; |
188 | |
189 | case REQ_FSEQ_DATA: |
190 | list_move_tail(&rq->flush.list, &q->flush_data_in_flight); |
191 | queued = blk_flush_queue_rq(rq, true); |
192 | break; |
193 | |
194 | case REQ_FSEQ_DONE: |
195 | /* |
196 | * @rq was previously adjusted by blk_flush_issue() for |
197 | * flush sequencing and may already have gone through the |
198 | * flush data request completion path. Restore @rq for |
199 | * normal completion and end it. |
200 | */ |
201 | BUG_ON(!list_empty(&rq->queuelist)); |
202 | list_del_init(&rq->flush.list); |
203 | blk_flush_restore_request(rq); |
204 | if (q->mq_ops) |
205 | blk_mq_end_io(rq, error); |
206 | else |
207 | __blk_end_request_all(rq, error); |
208 | break; |
209 | |
210 | default: |
211 | BUG(); |
212 | } |
213 | |
214 | kicked = blk_kick_flush(q); |
215 | return kicked | queued; |
216 | } |
217 | |
218 | static void flush_end_io(struct request *flush_rq, int error) |
219 | { |
220 | struct request_queue *q = flush_rq->q; |
221 | struct list_head *running; |
222 | bool queued = false; |
223 | struct request *rq, *n; |
224 | unsigned long flags = 0; |
225 | |
226 | if (q->mq_ops) { |
227 | spin_lock_irqsave(&q->mq_flush_lock, flags); |
228 | q->flush_rq->tag = -1; |
229 | } |
230 | |
231 | running = &q->flush_queue[q->flush_running_idx]; |
232 | BUG_ON(q->flush_pending_idx == q->flush_running_idx); |
233 | |
234 | /* account completion of the flush request */ |
235 | q->flush_running_idx ^= 1; |
236 | |
237 | if (!q->mq_ops) |
238 | elv_completed_request(q, flush_rq); |
239 | |
240 | /* and push the waiting requests to the next stage */ |
241 | list_for_each_entry_safe(rq, n, running, flush.list) { |
242 | unsigned int seq = blk_flush_cur_seq(rq); |
243 | |
244 | BUG_ON(seq != REQ_FSEQ_PREFLUSH && seq != REQ_FSEQ_POSTFLUSH); |
245 | queued |= blk_flush_complete_seq(rq, seq, error); |
246 | } |
247 | |
248 | /* |
249 | * Kick the queue to avoid stall for two cases: |
250 | * 1. Moving a request silently to empty queue_head may stall the |
251 | * queue. |
252 | * 2. When flush request is running in non-queueable queue, the |
253 | * queue is hold. Restart the queue after flush request is finished |
254 | * to avoid stall. |
255 | * This function is called from request completion path and calling |
256 | * directly into request_fn may confuse the driver. Always use |
257 | * kblockd. |
258 | */ |
259 | if (queued || q->flush_queue_delayed) { |
260 | WARN_ON(q->mq_ops); |
261 | blk_run_queue_async(q); |
262 | } |
263 | q->flush_queue_delayed = 0; |
264 | if (q->mq_ops) |
265 | spin_unlock_irqrestore(&q->mq_flush_lock, flags); |
266 | } |
267 | |
268 | /** |
269 | * blk_kick_flush - consider issuing flush request |
270 | * @q: request_queue being kicked |
271 | * |
272 | * Flush related states of @q have changed, consider issuing flush request. |
273 | * Please read the comment at the top of this file for more info. |
274 | * |
275 | * CONTEXT: |
276 | * spin_lock_irq(q->queue_lock or q->mq_flush_lock) |
277 | * |
278 | * RETURNS: |
279 | * %true if flush was issued, %false otherwise. |
280 | */ |
281 | static bool blk_kick_flush(struct request_queue *q) |
282 | { |
283 | struct list_head *pending = &q->flush_queue[q->flush_pending_idx]; |
284 | struct request *first_rq = |
285 | list_first_entry(pending, struct request, flush.list); |
286 | |
287 | /* C1 described at the top of this file */ |
288 | if (q->flush_pending_idx != q->flush_running_idx || list_empty(pending)) |
289 | return false; |
290 | |
291 | /* C2 and C3 */ |
292 | if (!list_empty(&q->flush_data_in_flight) && |
293 | time_before(jiffies, |
294 | q->flush_pending_since + FLUSH_PENDING_TIMEOUT)) |
295 | return false; |
296 | |
297 | /* |
298 | * Issue flush and toggle pending_idx. This makes pending_idx |
299 | * different from running_idx, which means flush is in flight. |
300 | */ |
301 | q->flush_pending_idx ^= 1; |
302 | |
303 | blk_rq_init(q, q->flush_rq); |
304 | if (q->mq_ops) |
305 | blk_mq_clone_flush_request(q->flush_rq, first_rq); |
306 | |
307 | q->flush_rq->cmd_type = REQ_TYPE_FS; |
308 | q->flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ; |
309 | q->flush_rq->rq_disk = first_rq->rq_disk; |
310 | q->flush_rq->end_io = flush_end_io; |
311 | |
312 | return blk_flush_queue_rq(q->flush_rq, false); |
313 | } |
314 | |
315 | static void flush_data_end_io(struct request *rq, int error) |
316 | { |
317 | struct request_queue *q = rq->q; |
318 | |
319 | /* |
320 | * After populating an empty queue, kick it to avoid stall. Read |
321 | * the comment in flush_end_io(). |
322 | */ |
323 | if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error)) |
324 | blk_run_queue_async(q); |
325 | } |
326 | |
327 | static void mq_flush_data_end_io(struct request *rq, int error) |
328 | { |
329 | struct request_queue *q = rq->q; |
330 | struct blk_mq_hw_ctx *hctx; |
331 | struct blk_mq_ctx *ctx; |
332 | unsigned long flags; |
333 | |
334 | ctx = rq->mq_ctx; |
335 | hctx = q->mq_ops->map_queue(q, ctx->cpu); |
336 | |
337 | /* |
338 | * After populating an empty queue, kick it to avoid stall. Read |
339 | * the comment in flush_end_io(). |
340 | */ |
341 | spin_lock_irqsave(&q->mq_flush_lock, flags); |
342 | if (blk_flush_complete_seq(rq, REQ_FSEQ_DATA, error)) |
343 | blk_mq_run_hw_queue(hctx, true); |
344 | spin_unlock_irqrestore(&q->mq_flush_lock, flags); |
345 | } |
346 | |
347 | /** |
348 | * blk_insert_flush - insert a new FLUSH/FUA request |
349 | * @rq: request to insert |
350 | * |
351 | * To be called from __elv_add_request() for %ELEVATOR_INSERT_FLUSH insertions. |
352 | * or __blk_mq_run_hw_queue() to dispatch request. |
353 | * @rq is being submitted. Analyze what needs to be done and put it on the |
354 | * right queue. |
355 | * |
356 | * CONTEXT: |
357 | * spin_lock_irq(q->queue_lock) in !mq case |
358 | */ |
359 | void blk_insert_flush(struct request *rq) |
360 | { |
361 | struct request_queue *q = rq->q; |
362 | unsigned int fflags = q->flush_flags; /* may change, cache */ |
363 | unsigned int policy = blk_flush_policy(fflags, rq); |
364 | |
365 | /* |
366 | * @policy now records what operations need to be done. Adjust |
367 | * REQ_FLUSH and FUA for the driver. |
368 | */ |
369 | rq->cmd_flags &= ~REQ_FLUSH; |
370 | if (!(fflags & REQ_FUA)) |
371 | rq->cmd_flags &= ~REQ_FUA; |
372 | |
373 | /* |
374 | * An empty flush handed down from a stacking driver may |
375 | * translate into nothing if the underlying device does not |
376 | * advertise a write-back cache. In this case, simply |
377 | * complete the request. |
378 | */ |
379 | if (!policy) { |
380 | if (q->mq_ops) |
381 | blk_mq_end_io(rq, 0); |
382 | else |
383 | __blk_end_bidi_request(rq, 0, 0, 0); |
384 | return; |
385 | } |
386 | |
387 | BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */ |
388 | |
389 | /* |
390 | * If there's data but flush is not necessary, the request can be |
391 | * processed directly without going through flush machinery. Queue |
392 | * for normal execution. |
393 | */ |
394 | if ((policy & REQ_FSEQ_DATA) && |
395 | !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { |
396 | if (q->mq_ops) { |
397 | blk_mq_insert_request(rq, false, false, true); |
398 | } else |
399 | list_add_tail(&rq->queuelist, &q->queue_head); |
400 | return; |
401 | } |
402 | |
403 | /* |
404 | * @rq should go through flush machinery. Mark it part of flush |
405 | * sequence and submit for further processing. |
406 | */ |
407 | memset(&rq->flush, 0, sizeof(rq->flush)); |
408 | INIT_LIST_HEAD(&rq->flush.list); |
409 | rq->cmd_flags |= REQ_FLUSH_SEQ; |
410 | rq->flush.saved_end_io = rq->end_io; /* Usually NULL */ |
411 | if (q->mq_ops) { |
412 | rq->end_io = mq_flush_data_end_io; |
413 | |
414 | spin_lock_irq(&q->mq_flush_lock); |
415 | blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0); |
416 | spin_unlock_irq(&q->mq_flush_lock); |
417 | return; |
418 | } |
419 | rq->end_io = flush_data_end_io; |
420 | |
421 | blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0); |
422 | } |
423 | |
424 | /** |
425 | * blkdev_issue_flush - queue a flush |
426 | * @bdev: blockdev to issue flush for |
427 | * @gfp_mask: memory allocation flags (for bio_alloc) |
428 | * @error_sector: error sector |
429 | * |
430 | * Description: |
431 | * Issue a flush for the block device in question. Caller can supply |
432 | * room for storing the error offset in case of a flush error, if they |
433 | * wish to. If WAIT flag is not passed then caller may check only what |
434 | * request was pushed in some internal queue for later handling. |
435 | */ |
436 | int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask, |
437 | sector_t *error_sector) |
438 | { |
439 | struct request_queue *q; |
440 | struct bio *bio; |
441 | int ret = 0; |
442 | |
443 | if (bdev->bd_disk == NULL) |
444 | return -ENXIO; |
445 | |
446 | q = bdev_get_queue(bdev); |
447 | if (!q) |
448 | return -ENXIO; |
449 | |
450 | /* |
451 | * some block devices may not have their queue correctly set up here |
452 | * (e.g. loop device without a backing file) and so issuing a flush |
453 | * here will panic. Ensure there is a request function before issuing |
454 | * the flush. |
455 | */ |
456 | if (!q->make_request_fn) |
457 | return -ENXIO; |
458 | |
459 | bio = bio_alloc(gfp_mask, 0); |
460 | bio->bi_bdev = bdev; |
461 | |
462 | ret = submit_bio_wait(WRITE_FLUSH, bio); |
463 | |
464 | /* |
465 | * The driver must store the error location in ->bi_sector, if |
466 | * it supports it. For non-stacked drivers, this should be |
467 | * copied from blk_rq_pos(rq). |
468 | */ |
469 | if (error_sector) |
470 | *error_sector = bio->bi_iter.bi_sector; |
471 | |
472 | bio_put(bio); |
473 | return ret; |
474 | } |
475 | EXPORT_SYMBOL(blkdev_issue_flush); |
476 | |
477 | void blk_mq_init_flush(struct request_queue *q) |
478 | { |
479 | spin_lock_init(&q->mq_flush_lock); |
480 | } |
481 |
Branches:
ben-wpan
ben-wpan-stefan
javiroman/ks7010
jz-2.6.34
jz-2.6.34-rc5
jz-2.6.34-rc6
jz-2.6.34-rc7
jz-2.6.35
jz-2.6.36
jz-2.6.37
jz-2.6.38
jz-2.6.39
jz-3.0
jz-3.1
jz-3.11
jz-3.12
jz-3.13
jz-3.15
jz-3.16
jz-3.18-dt
jz-3.2
jz-3.3
jz-3.4
jz-3.5
jz-3.6
jz-3.6-rc2-pwm
jz-3.9
jz-3.9-clk
jz-3.9-rc8
jz47xx
jz47xx-2.6.38
master
Tags:
od-2011-09-04
od-2011-09-18
v2.6.34-rc5
v2.6.34-rc6
v2.6.34-rc7
v3.9