Root/block/cfq-iosched.c

1/*
2 * CFQ, or complete fairness queueing, disk scheduler.
3 *
4 * Based on ideas from a previously unfinished io
5 * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
6 *
7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8 */
9#include <linux/module.h>
10#include <linux/slab.h>
11#include <linux/blkdev.h>
12#include <linux/elevator.h>
13#include <linux/jiffies.h>
14#include <linux/rbtree.h>
15#include <linux/ioprio.h>
16#include <linux/blktrace_api.h>
17#include "cfq.h"
18
19/*
20 * tunables
21 */
22/* max queue in one round of service */
23static const int cfq_quantum = 8;
24static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
25/* maximum backwards seek, in KiB */
26static const int cfq_back_max = 16 * 1024;
27/* penalty of a backwards seek */
28static const int cfq_back_penalty = 2;
29static const int cfq_slice_sync = HZ / 10;
30static int cfq_slice_async = HZ / 25;
31static const int cfq_slice_async_rq = 2;
32static int cfq_slice_idle = HZ / 125;
33static int cfq_group_idle = HZ / 125;
34static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
35static const int cfq_hist_divisor = 4;
36
37/*
38 * offset from end of service tree
39 */
40#define CFQ_IDLE_DELAY (HZ / 5)
41
42/*
43 * below this threshold, we consider thinktime immediate
44 */
45#define CFQ_MIN_TT (2)
46
47#define CFQ_SLICE_SCALE (5)
48#define CFQ_HW_QUEUE_MIN (5)
49#define CFQ_SERVICE_SHIFT 12
50
51#define CFQQ_SEEK_THR (sector_t)(8 * 100)
52#define CFQQ_CLOSE_THR (sector_t)(8 * 1024)
53#define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
54#define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8)
55
56#define RQ_CIC(rq) \
57    ((struct cfq_io_context *) (rq)->elevator_private)
58#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private2)
59#define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elevator_private3)
60
61static struct kmem_cache *cfq_pool;
62static struct kmem_cache *cfq_ioc_pool;
63
64static DEFINE_PER_CPU(unsigned long, cfq_ioc_count);
65static struct completion *ioc_gone;
66static DEFINE_SPINLOCK(ioc_gone_lock);
67
68static DEFINE_SPINLOCK(cic_index_lock);
69static DEFINE_IDA(cic_index_ida);
70
71#define CFQ_PRIO_LISTS IOPRIO_BE_NR
72#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
73#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
74
75#define sample_valid(samples) ((samples) > 80)
76#define rb_entry_cfqg(node) rb_entry((node), struct cfq_group, rb_node)
77
78/*
79 * Most of our rbtree usage is for sorting with min extraction, so
80 * if we cache the leftmost node we don't have to walk down the tree
81 * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
82 * move this into the elevator for the rq sorting as well.
83 */
84struct cfq_rb_root {
85    struct rb_root rb;
86    struct rb_node *left;
87    unsigned count;
88    unsigned total_weight;
89    u64 min_vdisktime;
90    struct rb_node *active;
91};
92#define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT, .left = NULL, \
93            .count = 0, .min_vdisktime = 0, }
94
95/*
96 * Per process-grouping structure
97 */
98struct cfq_queue {
99    /* reference count */
100    atomic_t ref;
101    /* various state flags, see below */
102    unsigned int flags;
103    /* parent cfq_data */
104    struct cfq_data *cfqd;
105    /* service_tree member */
106    struct rb_node rb_node;
107    /* service_tree key */
108    unsigned long rb_key;
109    /* prio tree member */
110    struct rb_node p_node;
111    /* prio tree root we belong to, if any */
112    struct rb_root *p_root;
113    /* sorted list of pending requests */
114    struct rb_root sort_list;
115    /* if fifo isn't expired, next request to serve */
116    struct request *next_rq;
117    /* requests queued in sort_list */
118    int queued[2];
119    /* currently allocated requests */
120    int allocated[2];
121    /* fifo list of requests in sort_list */
122    struct list_head fifo;
123
124    /* time when queue got scheduled in to dispatch first request. */
125    unsigned long dispatch_start;
126    unsigned int allocated_slice;
127    unsigned int slice_dispatch;
128    /* time when first request from queue completed and slice started. */
129    unsigned long slice_start;
130    unsigned long slice_end;
131    long slice_resid;
132
133    /* pending metadata requests */
134    int meta_pending;
135    /* number of requests that are on the dispatch list or inside driver */
136    int dispatched;
137
138    /* io prio of this group */
139    unsigned short ioprio, org_ioprio;
140    unsigned short ioprio_class, org_ioprio_class;
141
142    pid_t pid;
143
144    u32 seek_history;
145    sector_t last_request_pos;
146
147    struct cfq_rb_root *service_tree;
148    struct cfq_queue *new_cfqq;
149    struct cfq_group *cfqg;
150    struct cfq_group *orig_cfqg;
151    /* Number of sectors dispatched from queue in single dispatch round */
152    unsigned long nr_sectors;
153};
154
155/*
156 * First index in the service_trees.
157 * IDLE is handled separately, so it has negative index
158 */
159enum wl_prio_t {
160    BE_WORKLOAD = 0,
161    RT_WORKLOAD = 1,
162    IDLE_WORKLOAD = 2,
163    CFQ_PRIO_NR,
164};
165
166/*
167 * Second index in the service_trees.
168 */
169enum wl_type_t {
170    ASYNC_WORKLOAD = 0,
171    SYNC_NOIDLE_WORKLOAD = 1,
172    SYNC_WORKLOAD = 2
173};
174
175/* This is per cgroup per device grouping structure */
176struct cfq_group {
177    /* group service_tree member */
178    struct rb_node rb_node;
179
180    /* group service_tree key */
181    u64 vdisktime;
182    unsigned int weight;
183    bool on_st;
184
185    /* number of cfqq currently on this group */
186    int nr_cfqq;
187
188    /*
189     * Per group busy queus average. Useful for workload slice calc. We
190     * create the array for each prio class but at run time it is used
191     * only for RT and BE class and slot for IDLE class remains unused.
192     * This is primarily done to avoid confusion and a gcc warning.
193     */
194    unsigned int busy_queues_avg[CFQ_PRIO_NR];
195    /*
196     * rr lists of queues with requests. We maintain service trees for
197     * RT and BE classes. These trees are subdivided in subclasses
198     * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
199     * class there is no subclassification and all the cfq queues go on
200     * a single tree service_tree_idle.
201     * Counts are embedded in the cfq_rb_root
202     */
203    struct cfq_rb_root service_trees[2][3];
204    struct cfq_rb_root service_tree_idle;
205
206    unsigned long saved_workload_slice;
207    enum wl_type_t saved_workload;
208    enum wl_prio_t saved_serving_prio;
209    struct blkio_group blkg;
210#ifdef CONFIG_CFQ_GROUP_IOSCHED
211    struct hlist_node cfqd_node;
212    atomic_t ref;
213#endif
214    /* number of requests that are on the dispatch list or inside driver */
215    int dispatched;
216};
217
218/*
219 * Per block device queue structure
220 */
221struct cfq_data {
222    struct request_queue *queue;
223    /* Root service tree for cfq_groups */
224    struct cfq_rb_root grp_service_tree;
225    struct cfq_group root_group;
226
227    /*
228     * The priority currently being served
229     */
230    enum wl_prio_t serving_prio;
231    enum wl_type_t serving_type;
232    unsigned long workload_expires;
233    struct cfq_group *serving_group;
234
235    /*
236     * Each priority tree is sorted by next_request position. These
237     * trees are used when determining if two or more queues are
238     * interleaving requests (see cfq_close_cooperator).
239     */
240    struct rb_root prio_trees[CFQ_PRIO_LISTS];
241
242    unsigned int busy_queues;
243
244    int rq_in_driver;
245    int rq_in_flight[2];
246
247    /*
248     * queue-depth detection
249     */
250    int rq_queued;
251    int hw_tag;
252    /*
253     * hw_tag can be
254     * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
255     * 1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
256     * 0 => no NCQ
257     */
258    int hw_tag_est_depth;
259    unsigned int hw_tag_samples;
260
261    /*
262     * idle window management
263     */
264    struct timer_list idle_slice_timer;
265    struct work_struct unplug_work;
266
267    struct cfq_queue *active_queue;
268    struct cfq_io_context *active_cic;
269
270    /*
271     * async queue for each priority case
272     */
273    struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
274    struct cfq_queue *async_idle_cfqq;
275
276    sector_t last_position;
277
278    /*
279     * tunables, see top of file
280     */
281    unsigned int cfq_quantum;
282    unsigned int cfq_fifo_expire[2];
283    unsigned int cfq_back_penalty;
284    unsigned int cfq_back_max;
285    unsigned int cfq_slice[2];
286    unsigned int cfq_slice_async_rq;
287    unsigned int cfq_slice_idle;
288    unsigned int cfq_group_idle;
289    unsigned int cfq_latency;
290    unsigned int cfq_group_isolation;
291
292    unsigned int cic_index;
293    struct list_head cic_list;
294
295    /*
296     * Fallback dummy cfqq for extreme OOM conditions
297     */
298    struct cfq_queue oom_cfqq;
299
300    unsigned long last_delayed_sync;
301
302    /* List of cfq groups being managed on this device*/
303    struct hlist_head cfqg_list;
304    struct rcu_head rcu;
305};
306
307static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
308
309static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg,
310                        enum wl_prio_t prio,
311                        enum wl_type_t type)
312{
313    if (!cfqg)
314        return NULL;
315
316    if (prio == IDLE_WORKLOAD)
317        return &cfqg->service_tree_idle;
318
319    return &cfqg->service_trees[prio][type];
320}
321
322enum cfqq_state_flags {
323    CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */
324    CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */
325    CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */
326    CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
327    CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
328    CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */
329    CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */
330    CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
331    CFQ_CFQQ_FLAG_sync, /* synchronous queue */
332    CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
333    CFQ_CFQQ_FLAG_split_coop, /* shared cfqq will be splitted */
334    CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */
335    CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */
336};
337
338#define CFQ_CFQQ_FNS(name) \
339static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \
340{ \
341    (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \
342} \
343static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \
344{ \
345    (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \
346} \
347static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \
348{ \
349    return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \
350}
351
352CFQ_CFQQ_FNS(on_rr);
353CFQ_CFQQ_FNS(wait_request);
354CFQ_CFQQ_FNS(must_dispatch);
355CFQ_CFQQ_FNS(must_alloc_slice);
356CFQ_CFQQ_FNS(fifo_expire);
357CFQ_CFQQ_FNS(idle_window);
358CFQ_CFQQ_FNS(prio_changed);
359CFQ_CFQQ_FNS(slice_new);
360CFQ_CFQQ_FNS(sync);
361CFQ_CFQQ_FNS(coop);
362CFQ_CFQQ_FNS(split_coop);
363CFQ_CFQQ_FNS(deep);
364CFQ_CFQQ_FNS(wait_busy);
365#undef CFQ_CFQQ_FNS
366
367#ifdef CONFIG_CFQ_GROUP_IOSCHED
368#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
369    blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
370            cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
371            blkg_path(&(cfqq)->cfqg->blkg), ##args);
372
373#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) \
374    blk_add_trace_msg((cfqd)->queue, "%s " fmt, \
375                blkg_path(&(cfqg)->blkg), ##args); \
376
377#else
378#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
379    blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
380#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0);
381#endif
382#define cfq_log(cfqd, fmt, args...) \
383    blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
384
385/* Traverses through cfq group service trees */
386#define for_each_cfqg_st(cfqg, i, j, st) \
387    for (i = 0; i <= IDLE_WORKLOAD; i++) \
388        for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
389            : &cfqg->service_tree_idle; \
390            (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
391            (i == IDLE_WORKLOAD && j == 0); \
392            j++, st = i < IDLE_WORKLOAD ? \
393            &cfqg->service_trees[i][j]: NULL) \
394
395
396static inline bool iops_mode(struct cfq_data *cfqd)
397{
398    /*
399     * If we are not idling on queues and it is a NCQ drive, parallel
400     * execution of requests is on and measuring time is not possible
401     * in most of the cases until and unless we drive shallower queue
402     * depths and that becomes a performance bottleneck. In such cases
403     * switch to start providing fairness in terms of number of IOs.
404     */
405    if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
406        return true;
407    else
408        return false;
409}
410
411static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
412{
413    if (cfq_class_idle(cfqq))
414        return IDLE_WORKLOAD;
415    if (cfq_class_rt(cfqq))
416        return RT_WORKLOAD;
417    return BE_WORKLOAD;
418}
419
420
421static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
422{
423    if (!cfq_cfqq_sync(cfqq))
424        return ASYNC_WORKLOAD;
425    if (!cfq_cfqq_idle_window(cfqq))
426        return SYNC_NOIDLE_WORKLOAD;
427    return SYNC_WORKLOAD;
428}
429
430static inline int cfq_group_busy_queues_wl(enum wl_prio_t wl,
431                    struct cfq_data *cfqd,
432                    struct cfq_group *cfqg)
433{
434    if (wl == IDLE_WORKLOAD)
435        return cfqg->service_tree_idle.count;
436
437    return cfqg->service_trees[wl][ASYNC_WORKLOAD].count
438        + cfqg->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count
439        + cfqg->service_trees[wl][SYNC_WORKLOAD].count;
440}
441
442static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
443                    struct cfq_group *cfqg)
444{
445    return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count
446        + cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
447}
448
449static void cfq_dispatch_insert(struct request_queue *, struct request *);
450static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
451                       struct io_context *, gfp_t);
452static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
453                        struct io_context *);
454
455static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
456                        bool is_sync)
457{
458    return cic->cfqq[is_sync];
459}
460
461static inline void cic_set_cfqq(struct cfq_io_context *cic,
462                struct cfq_queue *cfqq, bool is_sync)
463{
464    cic->cfqq[is_sync] = cfqq;
465}
466
467#define CIC_DEAD_KEY 1ul
468#define CIC_DEAD_INDEX_SHIFT 1
469
470static inline void *cfqd_dead_key(struct cfq_data *cfqd)
471{
472    return (void *)(cfqd->cic_index << CIC_DEAD_INDEX_SHIFT | CIC_DEAD_KEY);
473}
474
475static inline struct cfq_data *cic_to_cfqd(struct cfq_io_context *cic)
476{
477    struct cfq_data *cfqd = cic->key;
478
479    if (unlikely((unsigned long) cfqd & CIC_DEAD_KEY))
480        return NULL;
481
482    return cfqd;
483}
484
485/*
486 * We regard a request as SYNC, if it's either a read or has the SYNC bit
487 * set (in which case it could also be direct WRITE).
488 */
489static inline bool cfq_bio_sync(struct bio *bio)
490{
491    return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
492}
493
494/*
495 * scheduler run of queue, if there are requests pending and no one in the
496 * driver that will restart queueing
497 */
498static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
499{
500    if (cfqd->busy_queues) {
501        cfq_log(cfqd, "schedule dispatch");
502        kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
503    }
504}
505
506static int cfq_queue_empty(struct request_queue *q)
507{
508    struct cfq_data *cfqd = q->elevator->elevator_data;
509
510    return !cfqd->rq_queued;
511}
512
513/*
514 * Scale schedule slice based on io priority. Use the sync time slice only
515 * if a queue is marked sync and has sync io queued. A sync queue with async
516 * io only, should not get full sync slice length.
517 */
518static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
519                 unsigned short prio)
520{
521    const int base_slice = cfqd->cfq_slice[sync];
522
523    WARN_ON(prio >= IOPRIO_BE_NR);
524
525    return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
526}
527
528static inline int
529cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
530{
531    return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
532}
533
534static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg)
535{
536    u64 d = delta << CFQ_SERVICE_SHIFT;
537
538    d = d * BLKIO_WEIGHT_DEFAULT;
539    do_div(d, cfqg->weight);
540    return d;
541}
542
543static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
544{
545    s64 delta = (s64)(vdisktime - min_vdisktime);
546    if (delta > 0)
547        min_vdisktime = vdisktime;
548
549    return min_vdisktime;
550}
551
552static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
553{
554    s64 delta = (s64)(vdisktime - min_vdisktime);
555    if (delta < 0)
556        min_vdisktime = vdisktime;
557
558    return min_vdisktime;
559}
560
561static void update_min_vdisktime(struct cfq_rb_root *st)
562{
563    u64 vdisktime = st->min_vdisktime;
564    struct cfq_group *cfqg;
565
566    if (st->active) {
567        cfqg = rb_entry_cfqg(st->active);
568        vdisktime = cfqg->vdisktime;
569    }
570
571    if (st->left) {
572        cfqg = rb_entry_cfqg(st->left);
573        vdisktime = min_vdisktime(vdisktime, cfqg->vdisktime);
574    }
575
576    st->min_vdisktime = max_vdisktime(st->min_vdisktime, vdisktime);
577}
578
579/*
580 * get averaged number of queues of RT/BE priority.
581 * average is updated, with a formula that gives more weight to higher numbers,
582 * to quickly follows sudden increases and decrease slowly
583 */
584
585static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
586                    struct cfq_group *cfqg, bool rt)
587{
588    unsigned min_q, max_q;
589    unsigned mult = cfq_hist_divisor - 1;
590    unsigned round = cfq_hist_divisor / 2;
591    unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
592
593    min_q = min(cfqg->busy_queues_avg[rt], busy);
594    max_q = max(cfqg->busy_queues_avg[rt], busy);
595    cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
596        cfq_hist_divisor;
597    return cfqg->busy_queues_avg[rt];
598}
599
600static inline unsigned
601cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
602{
603    struct cfq_rb_root *st = &cfqd->grp_service_tree;
604
605    return cfq_target_latency * cfqg->weight / st->total_weight;
606}
607
608static inline void
609cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
610{
611    unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
612    if (cfqd->cfq_latency) {
613        /*
614         * interested queues (we consider only the ones with the same
615         * priority class in the cfq group)
616         */
617        unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
618                        cfq_class_rt(cfqq));
619        unsigned sync_slice = cfqd->cfq_slice[1];
620        unsigned expect_latency = sync_slice * iq;
621        unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
622
623        if (expect_latency > group_slice) {
624            unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
625            /* scale low_slice according to IO priority
626             * and sync vs async */
627            unsigned low_slice =
628                min(slice, base_low_slice * slice / sync_slice);
629            /* the adapted slice value is scaled to fit all iqs
630             * into the target latency */
631            slice = max(slice * group_slice / expect_latency,
632                    low_slice);
633        }
634    }
635    cfqq->slice_start = jiffies;
636    cfqq->slice_end = jiffies + slice;
637    cfqq->allocated_slice = slice;
638    cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
639}
640
641/*
642 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
643 * isn't valid until the first request from the dispatch is activated
644 * and the slice time set.
645 */
646static inline bool cfq_slice_used(struct cfq_queue *cfqq)
647{
648    if (cfq_cfqq_slice_new(cfqq))
649        return 0;
650    if (time_before(jiffies, cfqq->slice_end))
651        return 0;
652
653    return 1;
654}
655
656/*
657 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
658 * We choose the request that is closest to the head right now. Distance
659 * behind the head is penalized and only allowed to a certain extent.
660 */
661static struct request *
662cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
663{
664    sector_t s1, s2, d1 = 0, d2 = 0;
665    unsigned long back_max;
666#define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */
667#define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */
668    unsigned wrap = 0; /* bit mask: requests behind the disk head? */
669
670    if (rq1 == NULL || rq1 == rq2)
671        return rq2;
672    if (rq2 == NULL)
673        return rq1;
674
675    if (rq_is_sync(rq1) && !rq_is_sync(rq2))
676        return rq1;
677    else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
678        return rq2;
679    if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META))
680        return rq1;
681    else if ((rq2->cmd_flags & REQ_META) &&
682         !(rq1->cmd_flags & REQ_META))
683        return rq2;
684
685    s1 = blk_rq_pos(rq1);
686    s2 = blk_rq_pos(rq2);
687
688    /*
689     * by definition, 1KiB is 2 sectors
690     */
691    back_max = cfqd->cfq_back_max * 2;
692
693    /*
694     * Strict one way elevator _except_ in the case where we allow
695     * short backward seeks which are biased as twice the cost of a
696     * similar forward seek.
697     */
698    if (s1 >= last)
699        d1 = s1 - last;
700    else if (s1 + back_max >= last)
701        d1 = (last - s1) * cfqd->cfq_back_penalty;
702    else
703        wrap |= CFQ_RQ1_WRAP;
704
705    if (s2 >= last)
706        d2 = s2 - last;
707    else if (s2 + back_max >= last)
708        d2 = (last - s2) * cfqd->cfq_back_penalty;
709    else
710        wrap |= CFQ_RQ2_WRAP;
711
712    /* Found required data */
713
714    /*
715     * By doing switch() on the bit mask "wrap" we avoid having to
716     * check two variables for all permutations: --> faster!
717     */
718    switch (wrap) {
719    case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
720        if (d1 < d2)
721            return rq1;
722        else if (d2 < d1)
723            return rq2;
724        else {
725            if (s1 >= s2)
726                return rq1;
727            else
728                return rq2;
729        }
730
731    case CFQ_RQ2_WRAP:
732        return rq1;
733    case CFQ_RQ1_WRAP:
734        return rq2;
735    case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
736    default:
737        /*
738         * Since both rqs are wrapped,
739         * start with the one that's further behind head
740         * (--> only *one* back seek required),
741         * since back seek takes more time than forward.
742         */
743        if (s1 <= s2)
744            return rq1;
745        else
746            return rq2;
747    }
748}
749
750/*
751 * The below is leftmost cache rbtree addon
752 */
753static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
754{
755    /* Service tree is empty */
756    if (!root->count)
757        return NULL;
758
759    if (!root->left)
760        root->left = rb_first(&root->rb);
761
762    if (root->left)
763        return rb_entry(root->left, struct cfq_queue, rb_node);
764
765    return NULL;
766}
767
768static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
769{
770    if (!root->left)
771        root->left = rb_first(&root->rb);
772
773    if (root->left)
774        return rb_entry_cfqg(root->left);
775
776    return NULL;
777}
778
779static void rb_erase_init(struct rb_node *n, struct rb_root *root)
780{
781    rb_erase(n, root);
782    RB_CLEAR_NODE(n);
783}
784
785static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
786{
787    if (root->left == n)
788        root->left = NULL;
789    rb_erase_init(n, &root->rb);
790    --root->count;
791}
792
793/*
794 * would be nice to take fifo expire time into account as well
795 */
796static struct request *
797cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
798          struct request *last)
799{
800    struct rb_node *rbnext = rb_next(&last->rb_node);
801    struct rb_node *rbprev = rb_prev(&last->rb_node);
802    struct request *next = NULL, *prev = NULL;
803
804    BUG_ON(RB_EMPTY_NODE(&last->rb_node));
805
806    if (rbprev)
807        prev = rb_entry_rq(rbprev);
808
809    if (rbnext)
810        next = rb_entry_rq(rbnext);
811    else {
812        rbnext = rb_first(&cfqq->sort_list);
813        if (rbnext && rbnext != &last->rb_node)
814            next = rb_entry_rq(rbnext);
815    }
816
817    return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
818}
819
820static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
821                      struct cfq_queue *cfqq)
822{
823    /*
824     * just an approximation, should be ok.
825     */
826    return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
827               cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
828}
829
830static inline s64
831cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
832{
833    return cfqg->vdisktime - st->min_vdisktime;
834}
835
836static void
837__cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
838{
839    struct rb_node **node = &st->rb.rb_node;
840    struct rb_node *parent = NULL;
841    struct cfq_group *__cfqg;
842    s64 key = cfqg_key(st, cfqg);
843    int left = 1;
844
845    while (*node != NULL) {
846        parent = *node;
847        __cfqg = rb_entry_cfqg(parent);
848
849        if (key < cfqg_key(st, __cfqg))
850            node = &parent->rb_left;
851        else {
852            node = &parent->rb_right;
853            left = 0;
854        }
855    }
856
857    if (left)
858        st->left = &cfqg->rb_node;
859
860    rb_link_node(&cfqg->rb_node, parent, node);
861    rb_insert_color(&cfqg->rb_node, &st->rb);
862}
863
864static void
865cfq_group_service_tree_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
866{
867    struct cfq_rb_root *st = &cfqd->grp_service_tree;
868    struct cfq_group *__cfqg;
869    struct rb_node *n;
870
871    cfqg->nr_cfqq++;
872    if (cfqg->on_st)
873        return;
874
875    /*
876     * Currently put the group at the end. Later implement something
877     * so that groups get lesser vtime based on their weights, so that
878     * if group does not loose all if it was not continously backlogged.
879     */
880    n = rb_last(&st->rb);
881    if (n) {
882        __cfqg = rb_entry_cfqg(n);
883        cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
884    } else
885        cfqg->vdisktime = st->min_vdisktime;
886
887    __cfq_group_service_tree_add(st, cfqg);
888    cfqg->on_st = true;
889    st->total_weight += cfqg->weight;
890}
891
892static void
893cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
894{
895    struct cfq_rb_root *st = &cfqd->grp_service_tree;
896
897    if (st->active == &cfqg->rb_node)
898        st->active = NULL;
899
900    BUG_ON(cfqg->nr_cfqq < 1);
901    cfqg->nr_cfqq--;
902
903    /* If there are other cfq queues under this group, don't delete it */
904    if (cfqg->nr_cfqq)
905        return;
906
907    cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
908    cfqg->on_st = false;
909    st->total_weight -= cfqg->weight;
910    if (!RB_EMPTY_NODE(&cfqg->rb_node))
911        cfq_rb_erase(&cfqg->rb_node, st);
912    cfqg->saved_workload_slice = 0;
913    cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1);
914}
915
916static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq)
917{
918    unsigned int slice_used;
919
920    /*
921     * Queue got expired before even a single request completed or
922     * got expired immediately after first request completion.
923     */
924    if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
925        /*
926         * Also charge the seek time incurred to the group, otherwise
927         * if there are mutiple queues in the group, each can dispatch
928         * a single request on seeky media and cause lots of seek time
929         * and group will never know it.
930         */
931        slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
932                    1);
933    } else {
934        slice_used = jiffies - cfqq->slice_start;
935        if (slice_used > cfqq->allocated_slice)
936            slice_used = cfqq->allocated_slice;
937    }
938
939    return slice_used;
940}
941
942static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
943                struct cfq_queue *cfqq)
944{
945    struct cfq_rb_root *st = &cfqd->grp_service_tree;
946    unsigned int used_sl, charge;
947    int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
948            - cfqg->service_tree_idle.count;
949
950    BUG_ON(nr_sync < 0);
951    used_sl = charge = cfq_cfqq_slice_usage(cfqq);
952
953    if (iops_mode(cfqd))
954        charge = cfqq->slice_dispatch;
955    else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
956        charge = cfqq->allocated_slice;
957
958    /* Can't update vdisktime while group is on service tree */
959    cfq_rb_erase(&cfqg->rb_node, st);
960    cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
961    __cfq_group_service_tree_add(st, cfqg);
962
963    /* This group is being expired. Save the context */
964    if (time_after(cfqd->workload_expires, jiffies)) {
965        cfqg->saved_workload_slice = cfqd->workload_expires
966                        - jiffies;
967        cfqg->saved_workload = cfqd->serving_type;
968        cfqg->saved_serving_prio = cfqd->serving_prio;
969    } else
970        cfqg->saved_workload_slice = 0;
971
972    cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
973                    st->min_vdisktime);
974    cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u"
975            " sect=%u", used_sl, cfqq->slice_dispatch, charge,
976            iops_mode(cfqd), cfqq->nr_sectors);
977    cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl);
978    cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
979}
980
981#ifdef CONFIG_CFQ_GROUP_IOSCHED
982static inline struct cfq_group *cfqg_of_blkg(struct blkio_group *blkg)
983{
984    if (blkg)
985        return container_of(blkg, struct cfq_group, blkg);
986    return NULL;
987}
988
989void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg,
990                    unsigned int weight)
991{
992    cfqg_of_blkg(blkg)->weight = weight;
993}
994
995static struct cfq_group *
996cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
997{
998    struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
999    struct cfq_group *cfqg = NULL;
1000    void *key = cfqd;
1001    int i, j;
1002    struct cfq_rb_root *st;
1003    struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
1004    unsigned int major, minor;
1005
1006    cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key));
1007    if (cfqg && !cfqg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
1008        sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
1009        cfqg->blkg.dev = MKDEV(major, minor);
1010        goto done;
1011    }
1012    if (cfqg || !create)
1013        goto done;
1014
1015    cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, cfqd->queue->node);
1016    if (!cfqg)
1017        goto done;
1018
1019    for_each_cfqg_st(cfqg, i, j, st)
1020        *st = CFQ_RB_ROOT;
1021    RB_CLEAR_NODE(&cfqg->rb_node);
1022
1023    /*
1024     * Take the initial reference that will be released on destroy
1025     * This can be thought of a joint reference by cgroup and
1026     * elevator which will be dropped by either elevator exit
1027     * or cgroup deletion path depending on who is exiting first.
1028     */
1029    atomic_set(&cfqg->ref, 1);
1030
1031    /*
1032     * Add group onto cgroup list. It might happen that bdi->dev is
1033     * not initiliazed yet. Initialize this new group without major
1034     * and minor info and this info will be filled in once a new thread
1035     * comes for IO. See code above.
1036     */
1037    if (bdi->dev) {
1038        sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
1039        cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
1040                    MKDEV(major, minor));
1041    } else
1042        cfq_blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
1043                    0);
1044
1045    cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
1046
1047    /* Add group on cfqd list */
1048    hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
1049
1050done:
1051    return cfqg;
1052}
1053
1054/*
1055 * Search for the cfq group current task belongs to. If create = 1, then also
1056 * create the cfq group if it does not exist. request_queue lock must be held.
1057 */
1058static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create)
1059{
1060    struct cgroup *cgroup;
1061    struct cfq_group *cfqg = NULL;
1062
1063    rcu_read_lock();
1064    cgroup = task_cgroup(current, blkio_subsys_id);
1065    cfqg = cfq_find_alloc_cfqg(cfqd, cgroup, create);
1066    if (!cfqg && create)
1067        cfqg = &cfqd->root_group;
1068    rcu_read_unlock();
1069    return cfqg;
1070}
1071
1072static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
1073{
1074    atomic_inc(&cfqg->ref);
1075    return cfqg;
1076}
1077
1078static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
1079{
1080    /* Currently, all async queues are mapped to root group */
1081    if (!cfq_cfqq_sync(cfqq))
1082        cfqg = &cfqq->cfqd->root_group;
1083
1084    cfqq->cfqg = cfqg;
1085    /* cfqq reference on cfqg */
1086    atomic_inc(&cfqq->cfqg->ref);
1087}
1088
1089static void cfq_put_cfqg(struct cfq_group *cfqg)
1090{
1091    struct cfq_rb_root *st;
1092    int i, j;
1093
1094    BUG_ON(atomic_read(&cfqg->ref) <= 0);
1095    if (!atomic_dec_and_test(&cfqg->ref))
1096        return;
1097    for_each_cfqg_st(cfqg, i, j, st)
1098        BUG_ON(!RB_EMPTY_ROOT(&st->rb) || st->active != NULL);
1099    kfree(cfqg);
1100}
1101
1102static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg)
1103{
1104    /* Something wrong if we are trying to remove same group twice */
1105    BUG_ON(hlist_unhashed(&cfqg->cfqd_node));
1106
1107    hlist_del_init(&cfqg->cfqd_node);
1108
1109    /*
1110     * Put the reference taken at the time of creation so that when all
1111     * queues are gone, group can be destroyed.
1112     */
1113    cfq_put_cfqg(cfqg);
1114}
1115
1116static void cfq_release_cfq_groups(struct cfq_data *cfqd)
1117{
1118    struct hlist_node *pos, *n;
1119    struct cfq_group *cfqg;
1120
1121    hlist_for_each_entry_safe(cfqg, pos, n, &cfqd->cfqg_list, cfqd_node) {
1122        /*
1123         * If cgroup removal path got to blk_group first and removed
1124         * it from cgroup list, then it will take care of destroying
1125         * cfqg also.
1126         */
1127        if (!cfq_blkiocg_del_blkio_group(&cfqg->blkg))
1128            cfq_destroy_cfqg(cfqd, cfqg);
1129    }
1130}
1131
1132/*
1133 * Blk cgroup controller notification saying that blkio_group object is being
1134 * delinked as associated cgroup object is going away. That also means that
1135 * no new IO will come in this group. So get rid of this group as soon as
1136 * any pending IO in the group is finished.
1137 *
1138 * This function is called under rcu_read_lock(). key is the rcu protected
1139 * pointer. That means "key" is a valid cfq_data pointer as long as we are rcu
1140 * read lock.
1141 *
1142 * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
1143 * it should not be NULL as even if elevator was exiting, cgroup deltion
1144 * path got to it first.
1145 */
1146void cfq_unlink_blkio_group(void *key, struct blkio_group *blkg)
1147{
1148    unsigned long flags;
1149    struct cfq_data *cfqd = key;
1150
1151    spin_lock_irqsave(cfqd->queue->queue_lock, flags);
1152    cfq_destroy_cfqg(cfqd, cfqg_of_blkg(blkg));
1153    spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
1154}
1155
1156#else /* GROUP_IOSCHED */
1157static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create)
1158{
1159    return &cfqd->root_group;
1160}
1161
1162static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
1163{
1164    return cfqg;
1165}
1166
1167static inline void
1168cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
1169    cfqq->cfqg = cfqg;
1170}
1171
1172static void cfq_release_cfq_groups(struct cfq_data *cfqd) {}
1173static inline void cfq_put_cfqg(struct cfq_group *cfqg) {}
1174
1175#endif /* GROUP_IOSCHED */
1176
1177/*
1178 * The cfqd->service_trees holds all pending cfq_queue's that have
1179 * requests waiting to be processed. It is sorted in the order that
1180 * we will service the queues.
1181 */
1182static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1183                 bool add_front)
1184{
1185    struct rb_node **p, *parent;
1186    struct cfq_queue *__cfqq;
1187    unsigned long rb_key;
1188    struct cfq_rb_root *service_tree;
1189    int left;
1190    int new_cfqq = 1;
1191    int group_changed = 0;
1192
1193#ifdef CONFIG_CFQ_GROUP_IOSCHED
1194    if (!cfqd->cfq_group_isolation
1195        && cfqq_type(cfqq) == SYNC_NOIDLE_WORKLOAD
1196        && cfqq->cfqg && cfqq->cfqg != &cfqd->root_group) {
1197        /* Move this cfq to root group */
1198        cfq_log_cfqq(cfqd, cfqq, "moving to root group");
1199        if (!RB_EMPTY_NODE(&cfqq->rb_node))
1200            cfq_group_service_tree_del(cfqd, cfqq->cfqg);
1201        cfqq->orig_cfqg = cfqq->cfqg;
1202        cfqq->cfqg = &cfqd->root_group;
1203        atomic_inc(&cfqd->root_group.ref);
1204        group_changed = 1;
1205    } else if (!cfqd->cfq_group_isolation
1206           && cfqq_type(cfqq) == SYNC_WORKLOAD && cfqq->orig_cfqg) {
1207        /* cfqq is sequential now needs to go to its original group */
1208        BUG_ON(cfqq->cfqg != &cfqd->root_group);
1209        if (!RB_EMPTY_NODE(&cfqq->rb_node))
1210            cfq_group_service_tree_del(cfqd, cfqq->cfqg);
1211        cfq_put_cfqg(cfqq->cfqg);
1212        cfqq->cfqg = cfqq->orig_cfqg;
1213        cfqq->orig_cfqg = NULL;
1214        group_changed = 1;
1215        cfq_log_cfqq(cfqd, cfqq, "moved to origin group");
1216    }
1217#endif
1218
1219    service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
1220                        cfqq_type(cfqq));
1221    if (cfq_class_idle(cfqq)) {
1222        rb_key = CFQ_IDLE_DELAY;
1223        parent = rb_last(&service_tree->rb);
1224        if (parent && parent != &cfqq->rb_node) {
1225            __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1226            rb_key += __cfqq->rb_key;
1227        } else
1228            rb_key += jiffies;
1229    } else if (!add_front) {
1230        /*
1231         * Get our rb key offset. Subtract any residual slice
1232         * value carried from last service. A negative resid
1233         * count indicates slice overrun, and this should position
1234         * the next service time further away in the tree.
1235         */
1236        rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
1237        rb_key -= cfqq->slice_resid;
1238        cfqq->slice_resid = 0;
1239    } else {
1240        rb_key = -HZ;
1241        __cfqq = cfq_rb_first(service_tree);
1242        rb_key += __cfqq ? __cfqq->rb_key : jiffies;
1243    }
1244
1245    if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
1246        new_cfqq = 0;
1247        /*
1248         * same position, nothing more to do
1249         */
1250        if (rb_key == cfqq->rb_key &&
1251            cfqq->service_tree == service_tree)
1252            return;
1253
1254        cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
1255        cfqq->service_tree = NULL;
1256    }
1257
1258    left = 1;
1259    parent = NULL;
1260    cfqq->service_tree = service_tree;
1261    p = &service_tree->rb.rb_node;
1262    while (*p) {
1263        struct rb_node **n;
1264
1265        parent = *p;
1266        __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
1267
1268        /*
1269         * sort by key, that represents service time.
1270         */
1271        if (time_before(rb_key, __cfqq->rb_key))
1272            n = &(*p)->rb_left;
1273        else {
1274            n = &(*p)->rb_right;
1275            left = 0;
1276        }
1277
1278        p = n;
1279    }
1280
1281    if (left)
1282        service_tree->left = &cfqq->rb_node;
1283
1284    cfqq->rb_key = rb_key;
1285    rb_link_node(&cfqq->rb_node, parent, p);
1286    rb_insert_color(&cfqq->rb_node, &service_tree->rb);
1287    service_tree->count++;
1288    if ((add_front || !new_cfqq) && !group_changed)
1289        return;
1290    cfq_group_service_tree_add(cfqd, cfqq->cfqg);
1291}
1292
1293static struct cfq_queue *
1294cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
1295             sector_t sector, struct rb_node **ret_parent,
1296             struct rb_node ***rb_link)
1297{
1298    struct rb_node **p, *parent;
1299    struct cfq_queue *cfqq = NULL;
1300
1301    parent = NULL;
1302    p = &root->rb_node;
1303    while (*p) {
1304        struct rb_node **n;
1305
1306        parent = *p;
1307        cfqq = rb_entry(parent, struct cfq_queue, p_node);
1308
1309        /*
1310         * Sort strictly based on sector. Smallest to the left,
1311         * largest to the right.
1312         */
1313        if (sector > blk_rq_pos(cfqq->next_rq))
1314            n = &(*p)->rb_right;
1315        else if (sector < blk_rq_pos(cfqq->next_rq))
1316            n = &(*p)->rb_left;
1317        else
1318            break;
1319        p = n;
1320        cfqq = NULL;
1321    }
1322
1323    *ret_parent = parent;
1324    if (rb_link)
1325        *rb_link = p;
1326    return cfqq;
1327}
1328
1329static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1330{
1331    struct rb_node **p, *parent;
1332    struct cfq_queue *__cfqq;
1333
1334    if (cfqq->p_root) {
1335        rb_erase(&cfqq->p_node, cfqq->p_root);
1336        cfqq->p_root = NULL;
1337    }
1338
1339    if (cfq_class_idle(cfqq))
1340        return;
1341    if (!cfqq->next_rq)
1342        return;
1343
1344    cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
1345    __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
1346                      blk_rq_pos(cfqq->next_rq), &parent, &p);
1347    if (!__cfqq) {
1348        rb_link_node(&cfqq->p_node, parent, p);
1349        rb_insert_color(&cfqq->p_node, cfqq->p_root);
1350    } else
1351        cfqq->p_root = NULL;
1352}
1353
1354/*
1355 * Update cfqq's position in the service tree.
1356 */
1357static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1358{
1359    /*
1360     * Resorting requires the cfqq to be on the RR list already.
1361     */
1362    if (cfq_cfqq_on_rr(cfqq)) {
1363        cfq_service_tree_add(cfqd, cfqq, 0);
1364        cfq_prio_tree_add(cfqd, cfqq);
1365    }
1366}
1367
1368/*
1369 * add to busy list of queues for service, trying to be fair in ordering
1370 * the pending list according to last request service
1371 */
1372static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1373{
1374    cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
1375    BUG_ON(cfq_cfqq_on_rr(cfqq));
1376    cfq_mark_cfqq_on_rr(cfqq);
1377    cfqd->busy_queues++;
1378
1379    cfq_resort_rr_list(cfqd, cfqq);
1380}
1381
1382/*
1383 * Called when the cfqq no longer has requests pending, remove it from
1384 * the service tree.
1385 */
1386static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1387{
1388    cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
1389    BUG_ON(!cfq_cfqq_on_rr(cfqq));
1390    cfq_clear_cfqq_on_rr(cfqq);
1391
1392    if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
1393        cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
1394        cfqq->service_tree = NULL;
1395    }
1396    if (cfqq->p_root) {
1397        rb_erase(&cfqq->p_node, cfqq->p_root);
1398        cfqq->p_root = NULL;
1399    }
1400
1401    cfq_group_service_tree_del(cfqd, cfqq->cfqg);
1402    BUG_ON(!cfqd->busy_queues);
1403    cfqd->busy_queues--;
1404}
1405
1406/*
1407 * rb tree support functions
1408 */
1409static void cfq_del_rq_rb(struct request *rq)
1410{
1411    struct cfq_queue *cfqq = RQ_CFQQ(rq);
1412    const int sync = rq_is_sync(rq);
1413
1414    BUG_ON(!cfqq->queued[sync]);
1415    cfqq->queued[sync]--;
1416
1417    elv_rb_del(&cfqq->sort_list, rq);
1418
1419    if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
1420        /*
1421         * Queue will be deleted from service tree when we actually
1422         * expire it later. Right now just remove it from prio tree
1423         * as it is empty.
1424         */
1425        if (cfqq->p_root) {
1426            rb_erase(&cfqq->p_node, cfqq->p_root);
1427            cfqq->p_root = NULL;
1428        }
1429    }
1430}
1431
1432static void cfq_add_rq_rb(struct request *rq)
1433{
1434    struct cfq_queue *cfqq = RQ_CFQQ(rq);
1435    struct cfq_data *cfqd = cfqq->cfqd;
1436    struct request *__alias, *prev;
1437
1438    cfqq->queued[rq_is_sync(rq)]++;
1439
1440    /*
1441     * looks a little odd, but the first insert might return an alias.
1442     * if that happens, put the alias on the dispatch list
1443     */
1444    while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
1445        cfq_dispatch_insert(cfqd->queue, __alias);
1446
1447    if (!cfq_cfqq_on_rr(cfqq))
1448        cfq_add_cfqq_rr(cfqd, cfqq);
1449
1450    /*
1451     * check if this request is a better next-serve candidate
1452     */
1453    prev = cfqq->next_rq;
1454    cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
1455
1456    /*
1457     * adjust priority tree position, if ->next_rq changes
1458     */
1459    if (prev != cfqq->next_rq)
1460        cfq_prio_tree_add(cfqd, cfqq);
1461
1462    BUG_ON(!cfqq->next_rq);
1463}
1464
1465static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
1466{
1467    elv_rb_del(&cfqq->sort_list, rq);
1468    cfqq->queued[rq_is_sync(rq)]--;
1469    cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
1470                    rq_data_dir(rq), rq_is_sync(rq));
1471    cfq_add_rq_rb(rq);
1472    cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
1473            &cfqq->cfqd->serving_group->blkg, rq_data_dir(rq),
1474            rq_is_sync(rq));
1475}
1476
1477static struct request *
1478cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
1479{
1480    struct task_struct *tsk = current;
1481    struct cfq_io_context *cic;
1482    struct cfq_queue *cfqq;
1483
1484    cic = cfq_cic_lookup(cfqd, tsk->io_context);
1485    if (!cic)
1486        return NULL;
1487
1488    cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
1489    if (cfqq) {
1490        sector_t sector = bio->bi_sector + bio_sectors(bio);
1491
1492        return elv_rb_find(&cfqq->sort_list, sector);
1493    }
1494
1495    return NULL;
1496}
1497
1498static void cfq_activate_request(struct request_queue *q, struct request *rq)
1499{
1500    struct cfq_data *cfqd = q->elevator->elevator_data;
1501
1502    cfqd->rq_in_driver++;
1503    cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
1504                        cfqd->rq_in_driver);
1505
1506    cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
1507}
1508
1509static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
1510{
1511    struct cfq_data *cfqd = q->elevator->elevator_data;
1512
1513    WARN_ON(!cfqd->rq_in_driver);
1514    cfqd->rq_in_driver--;
1515    cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
1516                        cfqd->rq_in_driver);
1517}
1518
1519static void cfq_remove_request(struct request *rq)
1520{
1521    struct cfq_queue *cfqq = RQ_CFQQ(rq);
1522
1523    if (cfqq->next_rq == rq)
1524        cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
1525
1526    list_del_init(&rq->queuelist);
1527    cfq_del_rq_rb(rq);
1528
1529    cfqq->cfqd->rq_queued--;
1530    cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
1531                    rq_data_dir(rq), rq_is_sync(rq));
1532    if (rq->cmd_flags & REQ_META) {
1533        WARN_ON(!cfqq->meta_pending);
1534        cfqq->meta_pending--;
1535    }
1536}
1537
1538static int cfq_merge(struct request_queue *q, struct request **req,
1539             struct bio *bio)
1540{
1541    struct cfq_data *cfqd = q->elevator->elevator_data;
1542    struct request *__rq;
1543
1544    __rq = cfq_find_rq_fmerge(cfqd, bio);
1545    if (__rq && elv_rq_merge_ok(__rq, bio)) {
1546        *req = __rq;
1547        return ELEVATOR_FRONT_MERGE;
1548    }
1549
1550    return ELEVATOR_NO_MERGE;
1551}
1552
1553static void cfq_merged_request(struct request_queue *q, struct request *req,
1554                   int type)
1555{
1556    if (type == ELEVATOR_FRONT_MERGE) {
1557        struct cfq_queue *cfqq = RQ_CFQQ(req);
1558
1559        cfq_reposition_rq_rb(cfqq, req);
1560    }
1561}
1562
1563static void cfq_bio_merged(struct request_queue *q, struct request *req,
1564                struct bio *bio)
1565{
1566    cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg,
1567                    bio_data_dir(bio), cfq_bio_sync(bio));
1568}
1569
1570static void
1571cfq_merged_requests(struct request_queue *q, struct request *rq,
1572            struct request *next)
1573{
1574    struct cfq_queue *cfqq = RQ_CFQQ(rq);
1575    /*
1576     * reposition in fifo if next is older than rq
1577     */
1578    if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
1579        time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
1580        list_move(&rq->queuelist, &next->queuelist);
1581        rq_set_fifo_time(rq, rq_fifo_time(next));
1582    }
1583
1584    if (cfqq->next_rq == next)
1585        cfqq->next_rq = rq;
1586    cfq_remove_request(next);
1587    cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg,
1588                    rq_data_dir(next), rq_is_sync(next));
1589}
1590
1591static int cfq_allow_merge(struct request_queue *q, struct request *rq,
1592               struct bio *bio)
1593{
1594    struct cfq_data *cfqd = q->elevator->elevator_data;
1595    struct cfq_io_context *cic;
1596    struct cfq_queue *cfqq;
1597
1598    /*
1599     * Disallow merge of a sync bio into an async request.
1600     */
1601    if (cfq_bio_sync(bio) && !rq_is_sync(rq))
1602        return false;
1603
1604    /*
1605     * Lookup the cfqq that this bio will be queued with. Allow
1606     * merge only if rq is queued there.
1607     */
1608    cic = cfq_cic_lookup(cfqd, current->io_context);
1609    if (!cic)
1610        return false;
1611
1612    cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
1613    return cfqq == RQ_CFQQ(rq);
1614}
1615
1616static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1617{
1618    del_timer(&cfqd->idle_slice_timer);
1619    cfq_blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg);
1620}
1621
1622static void __cfq_set_active_queue(struct cfq_data *cfqd,
1623                   struct cfq_queue *cfqq)
1624{
1625    if (cfqq) {
1626        cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
1627                cfqd->serving_prio, cfqd->serving_type);
1628        cfq_blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg);
1629        cfqq->slice_start = 0;
1630        cfqq->dispatch_start = jiffies;
1631        cfqq->allocated_slice = 0;
1632        cfqq->slice_end = 0;
1633        cfqq->slice_dispatch = 0;
1634        cfqq->nr_sectors = 0;
1635
1636        cfq_clear_cfqq_wait_request(cfqq);
1637        cfq_clear_cfqq_must_dispatch(cfqq);
1638        cfq_clear_cfqq_must_alloc_slice(cfqq);
1639        cfq_clear_cfqq_fifo_expire(cfqq);
1640        cfq_mark_cfqq_slice_new(cfqq);
1641
1642        cfq_del_timer(cfqd, cfqq);
1643    }
1644
1645    cfqd->active_queue = cfqq;
1646}
1647
1648/*
1649 * current cfqq expired its slice (or was too idle), select new one
1650 */
1651static void
1652__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1653            bool timed_out)
1654{
1655    cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
1656
1657    if (cfq_cfqq_wait_request(cfqq))
1658        cfq_del_timer(cfqd, cfqq);
1659
1660    cfq_clear_cfqq_wait_request(cfqq);
1661    cfq_clear_cfqq_wait_busy(cfqq);
1662
1663    /*
1664     * If this cfqq is shared between multiple processes, check to
1665     * make sure that those processes are still issuing I/Os within
1666     * the mean seek distance. If not, it may be time to break the
1667     * queues apart again.
1668     */
1669    if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
1670        cfq_mark_cfqq_split_coop(cfqq);
1671
1672    /*
1673     * store what was left of this slice, if the queue idled/timed out
1674     */
1675    if (timed_out && !cfq_cfqq_slice_new(cfqq)) {
1676        cfqq->slice_resid = cfqq->slice_end - jiffies;
1677        cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
1678    }
1679
1680    cfq_group_served(cfqd, cfqq->cfqg, cfqq);
1681
1682    if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
1683        cfq_del_cfqq_rr(cfqd, cfqq);
1684
1685    cfq_resort_rr_list(cfqd, cfqq);
1686
1687    if (cfqq == cfqd->active_queue)
1688        cfqd->active_queue = NULL;
1689
1690    if (&cfqq->cfqg->rb_node == cfqd->grp_service_tree.active)
1691        cfqd->grp_service_tree.active = NULL;
1692
1693    if (cfqd->active_cic) {
1694        put_io_context(cfqd->active_cic->ioc);
1695        cfqd->active_cic = NULL;
1696    }
1697}
1698
1699static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
1700{
1701    struct cfq_queue *cfqq = cfqd->active_queue;
1702
1703    if (cfqq)
1704        __cfq_slice_expired(cfqd, cfqq, timed_out);
1705}
1706
1707/*
1708 * Get next queue for service. Unless we have a queue preemption,
1709 * we'll simply select the first cfqq in the service tree.
1710 */
1711static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
1712{
1713    struct cfq_rb_root *service_tree =
1714        service_tree_for(cfqd->serving_group, cfqd->serving_prio,
1715                    cfqd->serving_type);
1716
1717    if (!cfqd->rq_queued)
1718        return NULL;
1719
1720    /* There is nothing to dispatch */
1721    if (!service_tree)
1722        return NULL;
1723    if (RB_EMPTY_ROOT(&service_tree->rb))
1724        return NULL;
1725    return cfq_rb_first(service_tree);
1726}
1727
1728static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
1729{
1730    struct cfq_group *cfqg;
1731    struct cfq_queue *cfqq;
1732    int i, j;
1733    struct cfq_rb_root *st;
1734
1735    if (!cfqd->rq_queued)
1736        return NULL;
1737
1738    cfqg = cfq_get_next_cfqg(cfqd);
1739    if (!cfqg)
1740        return NULL;
1741
1742    for_each_cfqg_st(cfqg, i, j, st)
1743        if ((cfqq = cfq_rb_first(st)) != NULL)
1744            return cfqq;
1745    return NULL;
1746}
1747
1748/*
1749 * Get and set a new active queue for service.
1750 */
1751static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
1752                          struct cfq_queue *cfqq)
1753{
1754    if (!cfqq)
1755        cfqq = cfq_get_next_queue(cfqd);
1756
1757    __cfq_set_active_queue(cfqd, cfqq);
1758    return cfqq;
1759}
1760
1761static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
1762                      struct request *rq)
1763{
1764    if (blk_rq_pos(rq) >= cfqd->last_position)
1765        return blk_rq_pos(rq) - cfqd->last_position;
1766    else
1767        return cfqd->last_position - blk_rq_pos(rq);
1768}
1769
1770static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1771                   struct request *rq)
1772{
1773    return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
1774}
1775
1776static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
1777                    struct cfq_queue *cur_cfqq)
1778{
1779    struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
1780    struct rb_node *parent, *node;
1781    struct cfq_queue *__cfqq;
1782    sector_t sector = cfqd->last_position;
1783
1784    if (RB_EMPTY_ROOT(root))
1785        return NULL;
1786
1787    /*
1788     * First, if we find a request starting at the end of the last
1789     * request, choose it.
1790     */
1791    __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
1792    if (__cfqq)
1793        return __cfqq;
1794
1795    /*
1796     * If the exact sector wasn't found, the parent of the NULL leaf
1797     * will contain the closest sector.
1798     */
1799    __cfqq = rb_entry(parent, struct cfq_queue, p_node);
1800    if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1801        return __cfqq;
1802
1803    if (blk_rq_pos(__cfqq->next_rq) < sector)
1804        node = rb_next(&__cfqq->p_node);
1805    else
1806        node = rb_prev(&__cfqq->p_node);
1807    if (!node)
1808        return NULL;
1809
1810    __cfqq = rb_entry(node, struct cfq_queue, p_node);
1811    if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1812        return __cfqq;
1813
1814    return NULL;
1815}
1816
1817/*
1818 * cfqd - obvious
1819 * cur_cfqq - passed in so that we don't decide that the current queue is
1820 * closely cooperating with itself.
1821 *
1822 * So, basically we're assuming that that cur_cfqq has dispatched at least
1823 * one request, and that cfqd->last_position reflects a position on the disk
1824 * associated with the I/O issued by cur_cfqq. I'm not sure this is a valid
1825 * assumption.
1826 */
1827static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
1828                          struct cfq_queue *cur_cfqq)
1829{
1830    struct cfq_queue *cfqq;
1831
1832    if (cfq_class_idle(cur_cfqq))
1833        return NULL;
1834    if (!cfq_cfqq_sync(cur_cfqq))
1835        return NULL;
1836    if (CFQQ_SEEKY(cur_cfqq))
1837        return NULL;
1838
1839    /*
1840     * Don't search priority tree if it's the only queue in the group.
1841     */
1842    if (cur_cfqq->cfqg->nr_cfqq == 1)
1843        return NULL;
1844
1845    /*
1846     * We should notice if some of the queues are cooperating, eg
1847     * working closely on the same area of the disk. In that case,
1848     * we can group them together and don't waste time idling.
1849     */
1850    cfqq = cfqq_close(cfqd, cur_cfqq);
1851    if (!cfqq)
1852        return NULL;
1853
1854    /* If new queue belongs to different cfq_group, don't choose it */
1855    if (cur_cfqq->cfqg != cfqq->cfqg)
1856        return NULL;
1857
1858    /*
1859     * It only makes sense to merge sync queues.
1860     */
1861    if (!cfq_cfqq_sync(cfqq))
1862        return NULL;
1863    if (CFQQ_SEEKY(cfqq))
1864        return NULL;
1865
1866    /*
1867     * Do not merge queues of different priority classes
1868     */
1869    if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
1870        return NULL;
1871
1872    return cfqq;
1873}
1874
1875/*
1876 * Determine whether we should enforce idle window for this queue.
1877 */
1878
1879static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1880{
1881    enum wl_prio_t prio = cfqq_prio(cfqq);
1882    struct cfq_rb_root *service_tree = cfqq->service_tree;
1883
1884    BUG_ON(!service_tree);
1885    BUG_ON(!service_tree->count);
1886
1887    if (!cfqd->cfq_slice_idle)
1888        return false;
1889
1890    /* We never do for idle class queues. */
1891    if (prio == IDLE_WORKLOAD)
1892        return false;
1893
1894    /* We do for queues that were marked with idle window flag. */
1895    if (cfq_cfqq_idle_window(cfqq) &&
1896       !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
1897        return true;
1898
1899    /*
1900     * Otherwise, we do only if they are the last ones
1901     * in their service tree.
1902     */
1903    if (service_tree->count == 1 && cfq_cfqq_sync(cfqq))
1904        return 1;
1905    cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d",
1906            service_tree->count);
1907    return 0;
1908}
1909
1910static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1911{
1912    struct cfq_queue *cfqq = cfqd->active_queue;
1913    struct cfq_io_context *cic;
1914    unsigned long sl, group_idle = 0;
1915
1916    /*
1917     * SSD device without seek penalty, disable idling. But only do so
1918     * for devices that support queuing, otherwise we still have a problem
1919     * with sync vs async workloads.
1920     */
1921    if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
1922        return;
1923
1924    WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
1925    WARN_ON(cfq_cfqq_slice_new(cfqq));
1926
1927    /*
1928     * idle is disabled, either manually or by past process history
1929     */
1930    if (!cfq_should_idle(cfqd, cfqq)) {
1931        /* no queue idling. Check for group idling */
1932        if (cfqd->cfq_group_idle)
1933            group_idle = cfqd->cfq_group_idle;
1934        else
1935            return;
1936    }
1937
1938    /*
1939     * still active requests from this queue, don't idle
1940     */
1941    if (cfqq->dispatched)
1942        return;
1943
1944    /*
1945     * task has exited, don't wait
1946     */
1947    cic = cfqd->active_cic;
1948    if (!cic || !atomic_read(&cic->ioc->nr_tasks))
1949        return;
1950
1951    /*
1952     * If our average think time is larger than the remaining time
1953     * slice, then don't idle. This avoids overrunning the allotted
1954     * time slice.
1955     */
1956    if (sample_valid(cic->ttime_samples) &&
1957        (cfqq->slice_end - jiffies < cic->ttime_mean)) {
1958        cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%d",
1959                cic->ttime_mean);
1960        return;
1961    }
1962
1963    /* There are other queues in the group, don't do group idle */
1964    if (group_idle && cfqq->cfqg->nr_cfqq > 1)
1965        return;
1966
1967    cfq_mark_cfqq_wait_request(cfqq);
1968
1969    if (group_idle)
1970        sl = cfqd->cfq_group_idle;
1971    else
1972        sl = cfqd->cfq_slice_idle;
1973
1974    mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
1975    cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
1976    cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
1977            group_idle ? 1 : 0);
1978}
1979
1980/*
1981 * Move request from internal lists to the request queue dispatch list.
1982 */
1983static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
1984{
1985    struct cfq_data *cfqd = q->elevator->elevator_data;
1986    struct cfq_queue *cfqq = RQ_CFQQ(rq);
1987
1988    cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
1989
1990    cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
1991    cfq_remove_request(rq);
1992    cfqq->dispatched++;
1993    (RQ_CFQG(rq))->dispatched++;
1994    elv_dispatch_sort(q, rq);
1995
1996    cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
1997    cfqq->nr_sectors += blk_rq_sectors(rq);
1998    cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
1999                    rq_data_dir(rq), rq_is_sync(rq));
2000}
2001
2002/*
2003 * return expired entry, or NULL to just start from scratch in rbtree
2004 */
2005static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
2006{
2007    struct request *rq = NULL;
2008
2009    if (cfq_cfqq_fifo_expire(cfqq))
2010        return NULL;
2011
2012    cfq_mark_cfqq_fifo_expire(cfqq);
2013
2014    if (list_empty(&cfqq->fifo))
2015        return NULL;
2016
2017    rq = rq_entry_fifo(cfqq->fifo.next);
2018    if (time_before(jiffies, rq_fifo_time(rq)))
2019        rq = NULL;
2020
2021    cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
2022    return rq;
2023}
2024
2025static inline int
2026cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2027{
2028    const int base_rq = cfqd->cfq_slice_async_rq;
2029
2030    WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
2031
2032    return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
2033}
2034
2035/*
2036 * Must be called with the queue_lock held.
2037 */
2038static int cfqq_process_refs(struct cfq_queue *cfqq)
2039{
2040    int process_refs, io_refs;
2041
2042    io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
2043    process_refs = atomic_read(&cfqq->ref) - io_refs;
2044    BUG_ON(process_refs < 0);
2045    return process_refs;
2046}
2047
2048static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
2049{
2050    int process_refs, new_process_refs;
2051    struct cfq_queue *__cfqq;
2052
2053    /*
2054     * If there are no process references on the new_cfqq, then it is
2055     * unsafe to follow the ->new_cfqq chain as other cfqq's in the
2056     * chain may have dropped their last reference (not just their
2057     * last process reference).
2058     */
2059    if (!cfqq_process_refs(new_cfqq))
2060        return;
2061
2062    /* Avoid a circular list and skip interim queue merges */
2063    while ((__cfqq = new_cfqq->new_cfqq)) {
2064        if (__cfqq == cfqq)
2065            return;
2066        new_cfqq = __cfqq;
2067    }
2068
2069    process_refs = cfqq_process_refs(cfqq);
2070    new_process_refs = cfqq_process_refs(new_cfqq);
2071    /*
2072     * If the process for the cfqq has gone away, there is no
2073     * sense in merging the queues.
2074     */
2075    if (process_refs == 0 || new_process_refs == 0)
2076        return;
2077
2078    /*
2079     * Merge in the direction of the lesser amount of work.
2080     */
2081    if (new_process_refs >= process_refs) {
2082        cfqq->new_cfqq = new_cfqq;
2083        atomic_add(process_refs, &new_cfqq->ref);
2084    } else {
2085        new_cfqq->new_cfqq = cfqq;
2086        atomic_add(new_process_refs, &cfqq->ref);
2087    }
2088}
2089
2090static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
2091                struct cfq_group *cfqg, enum wl_prio_t prio)
2092{
2093    struct cfq_queue *queue;
2094    int i;
2095    bool key_valid = false;
2096    unsigned long lowest_key = 0;
2097    enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
2098
2099    for (i = 0; i <= SYNC_WORKLOAD; ++i) {
2100        /* select the one with lowest rb_key */
2101        queue = cfq_rb_first(service_tree_for(cfqg, prio, i));
2102        if (queue &&
2103            (!key_valid || time_before(queue->rb_key, lowest_key))) {
2104            lowest_key = queue->rb_key;
2105            cur_best = i;
2106            key_valid = true;
2107        }
2108    }
2109
2110    return cur_best;
2111}
2112
2113static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
2114{
2115    unsigned slice;
2116    unsigned count;
2117    struct cfq_rb_root *st;
2118    unsigned group_slice;
2119
2120    if (!cfqg) {
2121        cfqd->serving_prio = IDLE_WORKLOAD;
2122        cfqd->workload_expires = jiffies + 1;
2123        return;
2124    }
2125
2126    /* Choose next priority. RT > BE > IDLE */
2127    if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
2128        cfqd->serving_prio = RT_WORKLOAD;
2129    else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
2130        cfqd->serving_prio = BE_WORKLOAD;
2131    else {
2132        cfqd->serving_prio = IDLE_WORKLOAD;
2133        cfqd->workload_expires = jiffies + 1;
2134        return;
2135    }
2136
2137    /*
2138     * For RT and BE, we have to choose also the type
2139     * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
2140     * expiration time
2141     */
2142    st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
2143    count = st->count;
2144
2145    /*
2146     * check workload expiration, and that we still have other queues ready
2147     */
2148    if (count && !time_after(jiffies, cfqd->workload_expires))
2149        return;
2150
2151    /* otherwise select new workload type */
2152    cfqd->serving_type =
2153        cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio);
2154    st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
2155    count = st->count;
2156
2157    /*
2158     * the workload slice is computed as a fraction of target latency
2159     * proportional to the number of queues in that workload, over
2160     * all the queues in the same priority class
2161     */
2162    group_slice = cfq_group_slice(cfqd, cfqg);
2163
2164    slice = group_slice * count /
2165        max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio],
2166              cfq_group_busy_queues_wl(cfqd->serving_prio, cfqd, cfqg));
2167
2168    if (cfqd->serving_type == ASYNC_WORKLOAD) {
2169        unsigned int tmp;
2170
2171        /*
2172         * Async queues are currently system wide. Just taking
2173         * proportion of queues with-in same group will lead to higher
2174         * async ratio system wide as generally root group is going
2175         * to have higher weight. A more accurate thing would be to
2176         * calculate system wide asnc/sync ratio.
2177         */
2178        tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg);
2179        tmp = tmp/cfqd->busy_queues;
2180        slice = min_t(unsigned, slice, tmp);
2181
2182        /* async workload slice is scaled down according to
2183         * the sync/async slice ratio. */
2184        slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
2185    } else
2186        /* sync workload slice is at least 2 * cfq_slice_idle */
2187        slice = max(slice, 2 * cfqd->cfq_slice_idle);
2188
2189    slice = max_t(unsigned, slice, CFQ_MIN_TT);
2190    cfq_log(cfqd, "workload slice:%d", slice);
2191    cfqd->workload_expires = jiffies + slice;
2192}
2193
2194static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
2195{
2196    struct cfq_rb_root *st = &cfqd->grp_service_tree;
2197    struct cfq_group *cfqg;
2198
2199    if (RB_EMPTY_ROOT(&st->rb))
2200        return NULL;
2201    cfqg = cfq_rb_first_group(st);
2202    st->active = &cfqg->rb_node;
2203    update_min_vdisktime(st);
2204    return cfqg;
2205}
2206
2207static void cfq_choose_cfqg(struct cfq_data *cfqd)
2208{
2209    struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
2210
2211    cfqd->serving_group = cfqg;
2212
2213    /* Restore the workload type data */
2214    if (cfqg->saved_workload_slice) {
2215        cfqd->workload_expires = jiffies + cfqg->saved_workload_slice;
2216        cfqd->serving_type = cfqg->saved_workload;
2217        cfqd->serving_prio = cfqg->saved_serving_prio;
2218    } else
2219        cfqd->workload_expires = jiffies - 1;
2220
2221    choose_service_tree(cfqd, cfqg);
2222}
2223
2224/*
2225 * Select a queue for service. If we have a current active queue,
2226 * check whether to continue servicing it, or retrieve and set a new one.
2227 */
2228static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
2229{
2230    struct cfq_queue *cfqq, *new_cfqq = NULL;
2231
2232    cfqq = cfqd->active_queue;
2233    if (!cfqq)
2234        goto new_queue;
2235
2236    if (!cfqd->rq_queued)
2237        return NULL;
2238
2239    /*
2240     * We were waiting for group to get backlogged. Expire the queue
2241     */
2242    if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
2243        goto expire;
2244
2245    /*
2246     * The active queue has run out of time, expire it and select new.
2247     */
2248    if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
2249        /*
2250         * If slice had not expired at the completion of last request
2251         * we might not have turned on wait_busy flag. Don't expire
2252         * the queue yet. Allow the group to get backlogged.
2253         *
2254         * The very fact that we have used the slice, that means we
2255         * have been idling all along on this queue and it should be
2256         * ok to wait for this request to complete.
2257         */
2258        if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
2259            && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2260            cfqq = NULL;
2261            goto keep_queue;
2262        } else
2263            goto check_group_idle;
2264    }
2265
2266    /*
2267     * The active queue has requests and isn't expired, allow it to
2268     * dispatch.
2269     */
2270    if (!RB_EMPTY_ROOT(&cfqq->sort_list))
2271        goto keep_queue;
2272
2273    /*
2274     * If another queue has a request waiting within our mean seek
2275     * distance, let it run. The expire code will check for close
2276     * cooperators and put the close queue at the front of the service
2277     * tree. If possible, merge the expiring queue with the new cfqq.
2278     */
2279    new_cfqq = cfq_close_cooperator(cfqd, cfqq);
2280    if (new_cfqq) {
2281        if (!cfqq->new_cfqq)
2282            cfq_setup_merge(cfqq, new_cfqq);
2283        goto expire;
2284    }
2285
2286    /*
2287     * No requests pending. If the active queue still has requests in
2288     * flight or is idling for a new request, allow either of these
2289     * conditions to happen (or time out) before selecting a new queue.
2290     */
2291    if (timer_pending(&cfqd->idle_slice_timer)) {
2292        cfqq = NULL;
2293        goto keep_queue;
2294    }
2295
2296    if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
2297        cfqq = NULL;
2298        goto keep_queue;
2299    }
2300
2301    /*
2302     * If group idle is enabled and there are requests dispatched from
2303     * this group, wait for requests to complete.
2304     */
2305check_group_idle:
2306    if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1
2307        && cfqq->cfqg->dispatched) {
2308        cfqq = NULL;
2309        goto keep_queue;
2310    }
2311
2312expire:
2313    cfq_slice_expired(cfqd, 0);
2314new_queue:
2315    /*
2316     * Current queue expired. Check if we have to switch to a new
2317     * service tree
2318     */
2319    if (!new_cfqq)
2320        cfq_choose_cfqg(cfqd);
2321
2322    cfqq = cfq_set_active_queue(cfqd, new_cfqq);
2323keep_queue:
2324    return cfqq;
2325}
2326
2327static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
2328{
2329    int dispatched = 0;
2330
2331    while (cfqq->next_rq) {
2332        cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
2333        dispatched++;
2334    }
2335
2336    BUG_ON(!list_empty(&cfqq->fifo));
2337
2338    /* By default cfqq is not expired if it is empty. Do it explicitly */
2339    __cfq_slice_expired(cfqq->cfqd, cfqq, 0);
2340    return dispatched;
2341}
2342
2343/*
2344 * Drain our current requests. Used for barriers and when switching
2345 * io schedulers on-the-fly.
2346 */
2347static int cfq_forced_dispatch(struct cfq_data *cfqd)
2348{
2349    struct cfq_queue *cfqq;
2350    int dispatched = 0;
2351
2352    /* Expire the timeslice of the current active queue first */
2353    cfq_slice_expired(cfqd, 0);
2354    while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
2355        __cfq_set_active_queue(cfqd, cfqq);
2356        dispatched += __cfq_forced_dispatch_cfqq(cfqq);
2357    }
2358
2359    BUG_ON(cfqd->busy_queues);
2360
2361    cfq_log(cfqd, "forced_dispatch=%d", dispatched);
2362    return dispatched;
2363}
2364
2365static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
2366    struct cfq_queue *cfqq)
2367{
2368    /* the queue hasn't finished any request, can't estimate */
2369    if (cfq_cfqq_slice_new(cfqq))
2370        return 1;
2371    if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
2372        cfqq->slice_end))
2373        return 1;
2374
2375    return 0;
2376}
2377
2378static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2379{
2380    unsigned int max_dispatch;
2381
2382    /*
2383     * Drain async requests before we start sync IO
2384     */
2385    if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
2386        return false;
2387
2388    /*
2389     * If this is an async queue and we have sync IO in flight, let it wait
2390     */
2391    if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
2392        return false;
2393
2394    max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
2395    if (cfq_class_idle(cfqq))
2396        max_dispatch = 1;
2397
2398    /*
2399     * Does this cfqq already have too much IO in flight?
2400     */
2401    if (cfqq->dispatched >= max_dispatch) {
2402        /*
2403         * idle queue must always only have a single IO in flight
2404         */
2405        if (cfq_class_idle(cfqq))
2406            return false;
2407
2408        /*
2409         * We have other queues, don't allow more IO from this one
2410         */
2411        if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq))
2412            return false;
2413
2414        /*
2415         * Sole queue user, no limit
2416         */
2417        if (cfqd->busy_queues == 1)
2418            max_dispatch = -1;
2419        else
2420            /*
2421             * Normally we start throttling cfqq when cfq_quantum/2
2422             * requests have been dispatched. But we can drive
2423             * deeper queue depths at the beginning of slice
2424             * subjected to upper limit of cfq_quantum.
2425             * */
2426            max_dispatch = cfqd->cfq_quantum;
2427    }
2428
2429    /*
2430     * Async queues must wait a bit before being allowed dispatch.
2431     * We also ramp up the dispatch depth gradually for async IO,
2432     * based on the last sync IO we serviced
2433     */
2434    if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
2435        unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
2436        unsigned int depth;
2437
2438        depth = last_sync / cfqd->cfq_slice[1];
2439        if (!depth && !cfqq->dispatched)
2440            depth = 1;
2441        if (depth < max_dispatch)
2442            max_dispatch = depth;
2443    }
2444
2445    /*
2446     * If we're below the current max, allow a dispatch
2447     */
2448    return cfqq->dispatched < max_dispatch;
2449}
2450
2451/*
2452 * Dispatch a request from cfqq, moving them to the request queue
2453 * dispatch list.
2454 */
2455static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2456{
2457    struct request *rq;
2458
2459    BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
2460
2461    if (!cfq_may_dispatch(cfqd, cfqq))
2462        return false;
2463
2464    /*
2465     * follow expired path, else get first next available
2466     */
2467    rq = cfq_check_fifo(cfqq);
2468    if (!rq)
2469        rq = cfqq->next_rq;
2470
2471    /*
2472     * insert request into driver dispatch list
2473     */
2474    cfq_dispatch_insert(cfqd->queue, rq);
2475
2476    if (!cfqd->active_cic) {
2477        struct cfq_io_context *cic = RQ_CIC(rq);
2478
2479        atomic_long_inc(&cic->ioc->refcount);
2480        cfqd->active_cic = cic;
2481    }
2482
2483    return true;
2484}
2485
2486/*
2487 * Find the cfqq that we need to service and move a request from that to the
2488 * dispatch list
2489 */
2490static int cfq_dispatch_requests(struct request_queue *q, int force)
2491{
2492    struct cfq_data *cfqd = q->elevator->elevator_data;
2493    struct cfq_queue *cfqq;
2494
2495    if (!cfqd->busy_queues)
2496        return 0;
2497
2498    if (unlikely(force))
2499        return cfq_forced_dispatch(cfqd);
2500
2501    cfqq = cfq_select_queue(cfqd);
2502    if (!cfqq)
2503        return 0;
2504
2505    /*
2506     * Dispatch a request from this cfqq, if it is allowed
2507     */
2508    if (!cfq_dispatch_request(cfqd, cfqq))
2509        return 0;
2510
2511    cfqq->slice_dispatch++;
2512    cfq_clear_cfqq_must_dispatch(cfqq);
2513
2514    /*
2515     * expire an async queue immediately if it has used up its slice. idle
2516     * queue always expire after 1 dispatch round.
2517     */
2518    if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
2519        cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
2520        cfq_class_idle(cfqq))) {
2521        cfqq->slice_end = jiffies + 1;
2522        cfq_slice_expired(cfqd, 0);
2523    }
2524
2525    cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
2526    return 1;
2527}
2528
2529/*
2530 * task holds one reference to the queue, dropped when task exits. each rq
2531 * in-flight on this queue also holds a reference, dropped when rq is freed.
2532 *
2533 * Each cfq queue took a reference on the parent group. Drop it now.
2534 * queue lock must be held here.
2535 */
2536static void cfq_put_queue(struct cfq_queue *cfqq)
2537{
2538    struct cfq_data *cfqd = cfqq->cfqd;
2539    struct cfq_group *cfqg, *orig_cfqg;
2540
2541    BUG_ON(atomic_read(&cfqq->ref) <= 0);
2542
2543    if (!atomic_dec_and_test(&cfqq->ref))
2544        return;
2545
2546    cfq_log_cfqq(cfqd, cfqq, "put_queue");
2547    BUG_ON(rb_first(&cfqq->sort_list));
2548    BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
2549    cfqg = cfqq->cfqg;
2550    orig_cfqg = cfqq->orig_cfqg;
2551
2552    if (unlikely(cfqd->active_queue == cfqq)) {
2553        __cfq_slice_expired(cfqd, cfqq, 0);
2554        cfq_schedule_dispatch(cfqd);
2555    }
2556
2557    BUG_ON(cfq_cfqq_on_rr(cfqq));
2558    kmem_cache_free(cfq_pool, cfqq);
2559    cfq_put_cfqg(cfqg);
2560    if (orig_cfqg)
2561        cfq_put_cfqg(orig_cfqg);
2562}
2563
2564/*
2565 * Must always be called with the rcu_read_lock() held
2566 */
2567static void
2568__call_for_each_cic(struct io_context *ioc,
2569            void (*func)(struct io_context *, struct cfq_io_context *))
2570{
2571    struct cfq_io_context *cic;
2572    struct hlist_node *n;
2573
2574    hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list)
2575        func(ioc, cic);
2576}
2577
2578/*
2579 * Call func for each cic attached to this ioc.
2580 */
2581static void
2582call_for_each_cic(struct io_context *ioc,
2583          void (*func)(struct io_context *, struct cfq_io_context *))
2584{
2585    rcu_read_lock();
2586    __call_for_each_cic(ioc, func);
2587    rcu_read_unlock();
2588}
2589
2590static void cfq_cic_free_rcu(struct rcu_head *head)
2591{
2592    struct cfq_io_context *cic;
2593
2594    cic = container_of(head, struct cfq_io_context, rcu_head);
2595
2596    kmem_cache_free(cfq_ioc_pool, cic);
2597    elv_ioc_count_dec(cfq_ioc_count);
2598
2599    if (ioc_gone) {
2600        /*
2601         * CFQ scheduler is exiting, grab exit lock and check
2602         * the pending io context count. If it hits zero,
2603         * complete ioc_gone and set it back to NULL
2604         */
2605        spin_lock(&ioc_gone_lock);
2606        if (ioc_gone && !elv_ioc_count_read(cfq_ioc_count)) {
2607            complete(ioc_gone);
2608            ioc_gone = NULL;
2609        }
2610        spin_unlock(&ioc_gone_lock);
2611    }
2612}
2613
2614static void cfq_cic_free(struct cfq_io_context *cic)
2615{
2616    call_rcu(&cic->rcu_head, cfq_cic_free_rcu);
2617}
2618
2619static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
2620{
2621    unsigned long flags;
2622    unsigned long dead_key = (unsigned long) cic->key;
2623
2624    BUG_ON(!(dead_key & CIC_DEAD_KEY));
2625
2626    spin_lock_irqsave(&ioc->lock, flags);
2627    radix_tree_delete(&ioc->radix_root, dead_key >> CIC_DEAD_INDEX_SHIFT);
2628    hlist_del_rcu(&cic->cic_list);
2629    spin_unlock_irqrestore(&ioc->lock, flags);
2630
2631    cfq_cic_free(cic);
2632}
2633
2634/*
2635 * Must be called with rcu_read_lock() held or preemption otherwise disabled.
2636 * Only two callers of this - ->dtor() which is called with the rcu_read_lock(),
2637 * and ->trim() which is called with the task lock held
2638 */
2639static void cfq_free_io_context(struct io_context *ioc)
2640{
2641    /*
2642     * ioc->refcount is zero here, or we are called from elv_unregister(),
2643     * so no more cic's are allowed to be linked into this ioc. So it
2644     * should be ok to iterate over the known list, we will see all cic's
2645     * since no new ones are added.
2646     */
2647    __call_for_each_cic(ioc, cic_free_func);
2648}
2649
2650static void cfq_put_cooperator(struct cfq_queue *cfqq)
2651{
2652    struct cfq_queue *__cfqq, *next;
2653
2654    /*
2655     * If this queue was scheduled to merge with another queue, be
2656     * sure to drop the reference taken on that queue (and others in
2657     * the merge chain). See cfq_setup_merge and cfq_merge_cfqqs.
2658     */
2659    __cfqq = cfqq->new_cfqq;
2660    while (__cfqq) {
2661        if (__cfqq == cfqq) {
2662            WARN(1, "cfqq->new_cfqq loop detected\n");
2663            break;
2664        }
2665        next = __cfqq->new_cfqq;
2666        cfq_put_queue(__cfqq);
2667        __cfqq = next;
2668    }
2669}
2670
2671static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
2672{
2673    if (unlikely(cfqq == cfqd->active_queue)) {
2674        __cfq_slice_expired(cfqd, cfqq, 0);
2675        cfq_schedule_dispatch(cfqd);
2676    }
2677
2678    cfq_put_cooperator(cfqq);
2679
2680    cfq_put_queue(cfqq);
2681}
2682
2683static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
2684                     struct cfq_io_context *cic)
2685{
2686    struct io_context *ioc = cic->ioc;
2687
2688    list_del_init(&cic->queue_list);
2689
2690    /*
2691     * Make sure dead mark is seen for dead queues
2692     */
2693    smp_wmb();
2694    cic->key = cfqd_dead_key(cfqd);
2695
2696    if (ioc->ioc_data == cic)
2697        rcu_assign_pointer(ioc->ioc_data, NULL);
2698
2699    if (cic->cfqq[BLK_RW_ASYNC]) {
2700        cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
2701        cic->cfqq[BLK_RW_ASYNC] = NULL;
2702    }
2703
2704    if (cic->cfqq[BLK_RW_SYNC]) {
2705        cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
2706        cic->cfqq[BLK_RW_SYNC] = NULL;
2707    }
2708}
2709
2710static void cfq_exit_single_io_context(struct io_context *ioc,
2711                       struct cfq_io_context *cic)
2712{
2713    struct cfq_data *cfqd = cic_to_cfqd(cic);
2714
2715    if (cfqd) {
2716        struct request_queue *q = cfqd->queue;
2717        unsigned long flags;
2718
2719        spin_lock_irqsave(q->queue_lock, flags);
2720
2721        /*
2722         * Ensure we get a fresh copy of the ->key to prevent
2723         * race between exiting task and queue
2724         */
2725        smp_read_barrier_depends();
2726        if (cic->key == cfqd)
2727            __cfq_exit_single_io_context(cfqd, cic);
2728
2729        spin_unlock_irqrestore(q->queue_lock, flags);
2730    }
2731}
2732
2733/*
2734 * The process that ioc belongs to has exited, we need to clean up
2735 * and put the internal structures we have that belongs to that process.
2736 */
2737static void cfq_exit_io_context(struct io_context *ioc)
2738{
2739    call_for_each_cic(ioc, cfq_exit_single_io_context);
2740}
2741
2742static struct cfq_io_context *
2743cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
2744{
2745    struct cfq_io_context *cic;
2746
2747    cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO,
2748                            cfqd->queue->node);
2749    if (cic) {
2750        cic->last_end_request = jiffies;
2751        INIT_LIST_HEAD(&cic->queue_list);
2752        INIT_HLIST_NODE(&cic->cic_list);
2753        cic->dtor = cfq_free_io_context;
2754        cic->exit = cfq_exit_io_context;
2755        elv_ioc_count_inc(cfq_ioc_count);
2756    }
2757
2758    return cic;
2759}
2760
2761static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
2762{
2763    struct task_struct *tsk = current;
2764    int ioprio_class;
2765
2766    if (!cfq_cfqq_prio_changed(cfqq))
2767        return;
2768
2769    ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
2770    switch (ioprio_class) {
2771    default:
2772        printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
2773    case IOPRIO_CLASS_NONE:
2774        /*
2775         * no prio set, inherit CPU scheduling settings
2776         */
2777        cfqq->ioprio = task_nice_ioprio(tsk);
2778        cfqq->ioprio_class = task_nice_ioclass(tsk);
2779        break;
2780    case IOPRIO_CLASS_RT:
2781        cfqq->ioprio = task_ioprio(ioc);
2782        cfqq->ioprio_class = IOPRIO_CLASS_RT;
2783        break;
2784    case IOPRIO_CLASS_BE:
2785        cfqq->ioprio = task_ioprio(ioc);
2786        cfqq->ioprio_class = IOPRIO_CLASS_BE;
2787        break;
2788    case IOPRIO_CLASS_IDLE:
2789        cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
2790        cfqq->ioprio = 7;
2791        cfq_clear_cfqq_idle_window(cfqq);
2792        break;
2793    }
2794
2795    /*
2796     * keep track of original prio settings in case we have to temporarily
2797     * elevate the priority of this queue
2798     */
2799    cfqq->org_ioprio = cfqq->ioprio;
2800    cfqq->org_ioprio_class = cfqq->ioprio_class;
2801    cfq_clear_cfqq_prio_changed(cfqq);
2802}
2803
2804static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic)
2805{
2806    struct cfq_data *cfqd = cic_to_cfqd(cic);
2807    struct cfq_queue *cfqq;
2808    unsigned long flags;
2809
2810    if (unlikely(!cfqd))
2811        return;
2812
2813    spin_lock_irqsave(cfqd->queue->queue_lock, flags);
2814
2815    cfqq = cic->cfqq[BLK_RW_ASYNC];
2816    if (cfqq) {
2817        struct cfq_queue *new_cfqq;
2818        new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->ioc,
2819                        GFP_ATOMIC);
2820        if (new_cfqq) {
2821            cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
2822            cfq_put_queue(cfqq);
2823        }
2824    }
2825
2826    cfqq = cic->cfqq[BLK_RW_SYNC];
2827    if (cfqq)
2828        cfq_mark_cfqq_prio_changed(cfqq);
2829
2830    spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
2831}
2832
2833static void cfq_ioc_set_ioprio(struct io_context *ioc)
2834{
2835    call_for_each_cic(ioc, changed_ioprio);
2836    ioc->ioprio_changed = 0;
2837}
2838
2839static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
2840              pid_t pid, bool is_sync)
2841{
2842    RB_CLEAR_NODE(&cfqq->rb_node);
2843    RB_CLEAR_NODE(&cfqq->p_node);
2844    INIT_LIST_HEAD(&cfqq->fifo);
2845
2846    atomic_set(&cfqq->ref, 0);
2847    cfqq->cfqd = cfqd;
2848
2849    cfq_mark_cfqq_prio_changed(cfqq);
2850
2851    if (is_sync) {
2852        if (!cfq_class_idle(cfqq))
2853            cfq_mark_cfqq_idle_window(cfqq);
2854        cfq_mark_cfqq_sync(cfqq);
2855    }
2856    cfqq->pid = pid;
2857}
2858
2859#ifdef CONFIG_CFQ_GROUP_IOSCHED
2860static void changed_cgroup(struct io_context *ioc, struct cfq_io_context *cic)
2861{
2862    struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1);
2863    struct cfq_data *cfqd = cic_to_cfqd(cic);
2864    unsigned long flags;
2865    struct request_queue *q;
2866
2867    if (unlikely(!cfqd))
2868        return;
2869
2870    q = cfqd->queue;
2871
2872    spin_lock_irqsave(q->queue_lock, flags);
2873
2874    if (sync_cfqq) {
2875        /*
2876         * Drop reference to sync queue. A new sync queue will be
2877         * assigned in new group upon arrival of a fresh request.
2878         */
2879        cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup");
2880        cic_set_cfqq(cic, NULL, 1);
2881        cfq_put_queue(sync_cfqq);
2882    }
2883
2884    spin_unlock_irqrestore(q->queue_lock, flags);
2885}
2886
2887static void cfq_ioc_set_cgroup(struct io_context *ioc)
2888{
2889    call_for_each_cic(ioc, changed_cgroup);
2890    ioc->cgroup_changed = 0;
2891}
2892#endif /* CONFIG_CFQ_GROUP_IOSCHED */
2893
2894static struct cfq_queue *
2895cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
2896             struct io_context *ioc, gfp_t gfp_mask)
2897{
2898    struct cfq_queue *cfqq, *new_cfqq = NULL;
2899    struct cfq_io_context *cic;
2900    struct cfq_group *cfqg;
2901
2902retry:
2903    cfqg = cfq_get_cfqg(cfqd, 1);
2904    cic = cfq_cic_lookup(cfqd, ioc);
2905    /* cic always exists here */
2906    cfqq = cic_to_cfqq(cic, is_sync);
2907
2908    /*
2909     * Always try a new alloc if we fell back to the OOM cfqq
2910     * originally, since it should just be a temporary situation.
2911     */
2912    if (!cfqq || cfqq == &cfqd->oom_cfqq) {
2913        cfqq = NULL;
2914        if (new_cfqq) {
2915            cfqq = new_cfqq;
2916            new_cfqq = NULL;
2917        } else if (gfp_mask & __GFP_WAIT) {
2918            spin_unlock_irq(cfqd->queue->queue_lock);
2919            new_cfqq = kmem_cache_alloc_node(cfq_pool,
2920                    gfp_mask | __GFP_ZERO,
2921                    cfqd->queue->node);
2922            spin_lock_irq(cfqd->queue->queue_lock);
2923            if (new_cfqq)
2924                goto retry;
2925        } else {
2926            cfqq = kmem_cache_alloc_node(cfq_pool,
2927                    gfp_mask | __GFP_ZERO,
2928                    cfqd->queue->node);
2929        }
2930
2931        if (cfqq) {
2932            cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
2933            cfq_init_prio_data(cfqq, ioc);
2934            cfq_link_cfqq_cfqg(cfqq, cfqg);
2935            cfq_log_cfqq(cfqd, cfqq, "alloced");
2936        } else
2937            cfqq = &cfqd->oom_cfqq;
2938    }
2939
2940    if (new_cfqq)
2941        kmem_cache_free(cfq_pool, new_cfqq);
2942
2943    return cfqq;
2944}
2945
2946static struct cfq_queue **
2947cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
2948{
2949    switch (ioprio_class) {
2950    case IOPRIO_CLASS_RT:
2951        return &cfqd->async_cfqq[0][ioprio];
2952    case IOPRIO_CLASS_BE:
2953        return &cfqd->async_cfqq[1][ioprio];
2954    case IOPRIO_CLASS_IDLE:
2955        return &cfqd->async_idle_cfqq;
2956    default:
2957        BUG();
2958    }
2959}
2960
2961static struct cfq_queue *
2962cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
2963          gfp_t gfp_mask)
2964{
2965    const int ioprio = task_ioprio(ioc);
2966    const int ioprio_class = task_ioprio_class(ioc);
2967    struct cfq_queue **async_cfqq = NULL;
2968    struct cfq_queue *cfqq = NULL;
2969
2970    if (!is_sync) {
2971        async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
2972        cfqq = *async_cfqq;
2973    }
2974
2975    if (!cfqq)
2976        cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
2977
2978    /*
2979     * pin the queue now that it's allocated, scheduler exit will prune it
2980     */
2981    if (!is_sync && !(*async_cfqq)) {
2982        atomic_inc(&cfqq->ref);
2983        *async_cfqq = cfqq;
2984    }
2985
2986    atomic_inc(&cfqq->ref);
2987    return cfqq;
2988}
2989
2990/*
2991 * We drop cfq io contexts lazily, so we may find a dead one.
2992 */
2993static void
2994cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc,
2995          struct cfq_io_context *cic)
2996{
2997    unsigned long flags;
2998
2999    WARN_ON(!list_empty(&cic->queue_list));
3000    BUG_ON(cic->key != cfqd_dead_key(cfqd));
3001
3002    spin_lock_irqsave(&ioc->lock, flags);
3003
3004    BUG_ON(ioc->ioc_data == cic);
3005
3006    radix_tree_delete(&ioc->radix_root, cfqd->cic_index);
3007    hlist_del_rcu(&cic->cic_list);
3008    spin_unlock_irqrestore(&ioc->lock, flags);
3009
3010    cfq_cic_free(cic);
3011}
3012
3013static struct cfq_io_context *
3014cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
3015{
3016    struct cfq_io_context *cic;
3017    unsigned long flags;
3018
3019    if (unlikely(!ioc))
3020        return NULL;
3021
3022    rcu_read_lock();
3023
3024    /*
3025     * we maintain a last-hit cache, to avoid browsing over the tree
3026     */
3027    cic = rcu_dereference(ioc->ioc_data);
3028    if (cic && cic->key == cfqd) {
3029        rcu_read_unlock();
3030        return cic;
3031    }
3032
3033    do {
3034        cic = radix_tree_lookup(&ioc->radix_root, cfqd->cic_index);
3035        rcu_read_unlock();
3036        if (!cic)
3037            break;
3038        if (unlikely(cic->key != cfqd)) {
3039            cfq_drop_dead_cic(cfqd, ioc, cic);
3040            rcu_read_lock();
3041            continue;
3042        }
3043
3044        spin_lock_irqsave(&ioc->lock, flags);
3045        rcu_assign_pointer(ioc->ioc_data, cic);
3046        spin_unlock_irqrestore(&ioc->lock, flags);
3047        break;
3048    } while (1);
3049
3050    return cic;
3051}
3052
3053/*
3054 * Add cic into ioc, using cfqd as the search key. This enables us to lookup
3055 * the process specific cfq io context when entered from the block layer.
3056 * Also adds the cic to a per-cfqd list, used when this queue is removed.
3057 */
3058static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
3059            struct cfq_io_context *cic, gfp_t gfp_mask)
3060{
3061    unsigned long flags;
3062    int ret;
3063
3064    ret = radix_tree_preload(gfp_mask);
3065    if (!ret) {
3066        cic->ioc = ioc;
3067        cic->key = cfqd;
3068
3069        spin_lock_irqsave(&ioc->lock, flags);
3070        ret = radix_tree_insert(&ioc->radix_root,
3071                        cfqd->cic_index, cic);
3072        if (!ret)
3073            hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list);
3074        spin_unlock_irqrestore(&ioc->lock, flags);
3075
3076        radix_tree_preload_end();
3077
3078        if (!ret) {
3079            spin_lock_irqsave(cfqd->queue->queue_lock, flags);
3080            list_add(&cic->queue_list, &cfqd->cic_list);
3081            spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
3082        }
3083    }
3084
3085    if (ret)
3086        printk(KERN_ERR "cfq: cic link failed!\n");
3087
3088    return ret;
3089}
3090
3091/*
3092 * Setup general io context and cfq io context. There can be several cfq
3093 * io contexts per general io context, if this process is doing io to more
3094 * than one device managed by cfq.
3095 */
3096static struct cfq_io_context *
3097cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
3098{
3099    struct io_context *ioc = NULL;
3100    struct cfq_io_context *cic;
3101
3102    might_sleep_if(gfp_mask & __GFP_WAIT);
3103
3104    ioc = get_io_context(gfp_mask, cfqd->queue->node);
3105    if (!ioc)
3106        return NULL;
3107
3108    cic = cfq_cic_lookup(cfqd, ioc);
3109    if (cic)
3110        goto out;
3111
3112    cic = cfq_alloc_io_context(cfqd, gfp_mask);
3113    if (cic == NULL)
3114        goto err;
3115
3116    if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
3117        goto err_free;
3118
3119out:
3120    smp_read_barrier_depends();
3121    if (unlikely(ioc->ioprio_changed))
3122        cfq_ioc_set_ioprio(ioc);
3123
3124#ifdef CONFIG_CFQ_GROUP_IOSCHED
3125    if (unlikely(ioc->cgroup_changed))
3126        cfq_ioc_set_cgroup(ioc);
3127#endif
3128    return cic;
3129err_free:
3130    cfq_cic_free(cic);
3131err:
3132    put_io_context(ioc);
3133    return NULL;
3134}
3135
3136static void
3137cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
3138{
3139    unsigned long elapsed = jiffies - cic->last_end_request;
3140    unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
3141
3142    cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
3143    cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
3144    cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
3145}
3146
3147static void
3148cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3149               struct request *rq)
3150{
3151    sector_t sdist = 0;
3152    sector_t n_sec = blk_rq_sectors(rq);
3153    if (cfqq->last_request_pos) {
3154        if (cfqq->last_request_pos < blk_rq_pos(rq))
3155            sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
3156        else
3157            sdist = cfqq->last_request_pos - blk_rq_pos(rq);
3158    }
3159
3160    cfqq->seek_history <<= 1;
3161    if (blk_queue_nonrot(cfqd->queue))
3162        cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
3163    else
3164        cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
3165}
3166
3167/*
3168 * Disable idle window if the process thinks too long or seeks so much that
3169 * it doesn't matter
3170 */
3171static void
3172cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3173               struct cfq_io_context *cic)
3174{
3175    int old_idle, enable_idle;
3176
3177    /*
3178     * Don't idle for async or idle io prio class
3179     */
3180    if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
3181        return;
3182
3183    enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
3184
3185    if (cfqq->queued[0] + cfqq->queued[1] >= 4)
3186        cfq_mark_cfqq_deep(cfqq);
3187
3188    if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
3189        enable_idle = 0;
3190    else if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
3191        (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
3192        enable_idle = 0;
3193    else if (sample_valid(cic->ttime_samples)) {
3194        if (cic->ttime_mean > cfqd->cfq_slice_idle)
3195            enable_idle = 0;
3196        else
3197            enable_idle = 1;
3198    }
3199
3200    if (old_idle != enable_idle) {
3201        cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
3202        if (enable_idle)
3203            cfq_mark_cfqq_idle_window(cfqq);
3204        else
3205            cfq_clear_cfqq_idle_window(cfqq);
3206    }
3207}
3208
3209/*
3210 * Check if new_cfqq should preempt the currently active queue. Return 0 for
3211 * no or if we aren't sure, a 1 will cause a preempt.
3212 */
3213static bool
3214cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3215           struct request *rq)
3216{
3217    struct cfq_queue *cfqq;
3218
3219    cfqq = cfqd->active_queue;
3220    if (!cfqq)
3221        return false;
3222
3223    if (cfq_class_idle(new_cfqq))
3224        return false;
3225
3226    if (cfq_class_idle(cfqq))
3227        return true;
3228
3229    /*
3230     * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
3231     */
3232    if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
3233        return false;
3234
3235    /*
3236     * if the new request is sync, but the currently running queue is
3237     * not, let the sync request have priority.
3238     */
3239    if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
3240        return true;
3241
3242    if (new_cfqq->cfqg != cfqq->cfqg)
3243        return false;
3244
3245    if (cfq_slice_used(cfqq))
3246        return true;
3247
3248    /* Allow preemption only if we are idling on sync-noidle tree */
3249    if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD &&
3250        cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
3251        new_cfqq->service_tree->count == 2 &&
3252        RB_EMPTY_ROOT(&cfqq->sort_list))
3253        return true;
3254
3255    /*
3256     * So both queues are sync. Let the new request get disk time if
3257     * it's a metadata request and the current queue is doing regular IO.
3258     */
3259    if ((rq->cmd_flags & REQ_META) && !cfqq->meta_pending)
3260        return true;
3261
3262    /*
3263     * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
3264     */
3265    if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
3266        return true;
3267
3268    if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
3269        return false;
3270
3271    /*
3272     * if this request is as-good as one we would expect from the
3273     * current cfqq, let it preempt
3274     */
3275    if (cfq_rq_close(cfqd, cfqq, rq))
3276        return true;
3277
3278    return false;
3279}
3280
3281/*
3282 * cfqq preempts the active queue. if we allowed preempt with no slice left,
3283 * let it have half of its nominal slice.
3284 */
3285static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3286{
3287    cfq_log_cfqq(cfqd, cfqq, "preempt");
3288    cfq_slice_expired(cfqd, 1);
3289
3290    /*
3291     * Put the new queue at the front of the of the current list,
3292     * so we know that it will be selected next.
3293     */
3294    BUG_ON(!cfq_cfqq_on_rr(cfqq));
3295
3296    cfq_service_tree_add(cfqd, cfqq, 1);
3297
3298    cfqq->slice_end = 0;
3299    cfq_mark_cfqq_slice_new(cfqq);
3300}
3301
3302/*
3303 * Called when a new fs request (rq) is added (to cfqq). Check if there's
3304 * something we should do about it
3305 */
3306static void
3307cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3308        struct request *rq)
3309{
3310    struct cfq_io_context *cic = RQ_CIC(rq);
3311
3312    cfqd->rq_queued++;
3313    if (rq->cmd_flags & REQ_META)
3314        cfqq->meta_pending++;
3315
3316    cfq_update_io_thinktime(cfqd, cic);
3317    cfq_update_io_seektime(cfqd, cfqq, rq);
3318    cfq_update_idle_window(cfqd, cfqq, cic);
3319
3320    cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
3321
3322    if (cfqq == cfqd->active_queue) {
3323        /*
3324         * Remember that we saw a request from this process, but
3325         * don't start queuing just yet. Otherwise we risk seeing lots
3326         * of tiny requests, because we disrupt the normal plugging
3327         * and merging. If the request is already larger than a single
3328         * page, let it rip immediately. For that case we assume that
3329         * merging is already done. Ditto for a busy system that
3330         * has other work pending, don't risk delaying until the
3331         * idle timer unplug to continue working.
3332         */
3333        if (cfq_cfqq_wait_request(cfqq)) {
3334            if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
3335                cfqd->busy_queues > 1) {
3336                cfq_del_timer(cfqd, cfqq);
3337                cfq_clear_cfqq_wait_request(cfqq);
3338                __blk_run_queue(cfqd->queue);
3339            } else {
3340                cfq_blkiocg_update_idle_time_stats(
3341                        &cfqq->cfqg->blkg);
3342                cfq_mark_cfqq_must_dispatch(cfqq);
3343            }
3344        }
3345    } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
3346        /*
3347         * not the active queue - expire current slice if it is
3348         * idle and has expired it's mean thinktime or this new queue
3349         * has some old slice time left and is of higher priority or
3350         * this new queue is RT and the current one is BE
3351         */
3352        cfq_preempt_queue(cfqd, cfqq);
3353        __blk_run_queue(cfqd->queue);
3354    }
3355}
3356
3357static void cfq_insert_request(struct request_queue *q, struct request *rq)
3358{
3359    struct cfq_data *cfqd = q->elevator->elevator_data;
3360    struct cfq_queue *cfqq = RQ_CFQQ(rq);
3361
3362    cfq_log_cfqq(cfqd, cfqq, "insert_request");
3363    cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc);
3364
3365    rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
3366    list_add_tail(&rq->queuelist, &cfqq->fifo);
3367    cfq_add_rq_rb(rq);
3368    cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
3369            &cfqd->serving_group->blkg, rq_data_dir(rq),
3370            rq_is_sync(rq));
3371    cfq_rq_enqueued(cfqd, cfqq, rq);
3372}
3373
3374/*
3375 * Update hw_tag based on peak queue depth over 50 samples under
3376 * sufficient load.
3377 */
3378static void cfq_update_hw_tag(struct cfq_data *cfqd)
3379{
3380    struct cfq_queue *cfqq = cfqd->active_queue;
3381
3382    if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
3383        cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
3384
3385    if (cfqd->hw_tag == 1)
3386        return;
3387
3388    if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
3389        cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
3390        return;
3391
3392    /*
3393     * If active queue hasn't enough requests and can idle, cfq might not
3394     * dispatch sufficient requests to hardware. Don't zero hw_tag in this
3395     * case
3396     */
3397    if (cfqq && cfq_cfqq_idle_window(cfqq) &&
3398        cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
3399        CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
3400        return;
3401
3402    if (cfqd->hw_tag_samples++ < 50)
3403        return;
3404
3405    if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
3406        cfqd->hw_tag = 1;
3407    else
3408        cfqd->hw_tag = 0;
3409}
3410
3411static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
3412{
3413    struct cfq_io_context *cic = cfqd->active_cic;
3414
3415    /* If there are other queues in the group, don't wait */
3416    if (cfqq->cfqg->nr_cfqq > 1)
3417        return false;
3418
3419    if (cfq_slice_used(cfqq))
3420        return true;
3421
3422    /* if slice left is less than think time, wait busy */
3423    if (cic && sample_valid(cic->ttime_samples)
3424        && (cfqq->slice_end - jiffies < cic->ttime_mean))
3425        return true;
3426
3427    /*
3428     * If think times is less than a jiffy than ttime_mean=0 and above
3429     * will not be true. It might happen that slice has not expired yet
3430     * but will expire soon (4-5 ns) during select_queue(). To cover the
3431     * case where think time is less than a jiffy, mark the queue wait
3432     * busy if only 1 jiffy is left in the slice.
3433     */
3434    if (cfqq->slice_end - jiffies == 1)
3435        return true;
3436
3437    return false;
3438}
3439
3440static void cfq_completed_request(struct request_queue *q, struct request *rq)
3441{
3442    struct cfq_queue *cfqq = RQ_CFQQ(rq);
3443    struct cfq_data *cfqd = cfqq->cfqd;
3444    const int sync = rq_is_sync(rq);
3445    unsigned long now;
3446
3447    now = jiffies;
3448    cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
3449             !!(rq->cmd_flags & REQ_NOIDLE));
3450
3451    cfq_update_hw_tag(cfqd);
3452
3453    WARN_ON(!cfqd->rq_in_driver);
3454    WARN_ON(!cfqq->dispatched);
3455    cfqd->rq_in_driver--;
3456    cfqq->dispatched--;
3457    (RQ_CFQG(rq))->dispatched--;
3458    cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg,
3459            rq_start_time_ns(rq), rq_io_start_time_ns(rq),
3460            rq_data_dir(rq), rq_is_sync(rq));
3461
3462    cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
3463
3464    if (sync) {
3465        RQ_CIC(rq)->last_end_request = now;
3466        if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
3467            cfqd->last_delayed_sync = now;
3468    }
3469
3470    /*
3471     * If this is the active queue, check if it needs to be expired,
3472     * or if we want to idle in case it has no pending requests.
3473     */
3474    if (cfqd->active_queue == cfqq) {
3475        const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
3476
3477        if (cfq_cfqq_slice_new(cfqq)) {
3478            cfq_set_prio_slice(cfqd, cfqq);
3479            cfq_clear_cfqq_slice_new(cfqq);
3480        }
3481
3482        /*
3483         * Should we wait for next request to come in before we expire
3484         * the queue.
3485         */
3486        if (cfq_should_wait_busy(cfqd, cfqq)) {
3487            unsigned long extend_sl = cfqd->cfq_slice_idle;
3488            if (!cfqd->cfq_slice_idle)
3489                extend_sl = cfqd->cfq_group_idle;
3490            cfqq->slice_end = jiffies + extend_sl;
3491            cfq_mark_cfqq_wait_busy(cfqq);
3492            cfq_log_cfqq(cfqd, cfqq, "will busy wait");
3493        }
3494
3495        /*
3496         * Idling is not enabled on:
3497         * - expired queues
3498         * - idle-priority queues
3499         * - async queues
3500         * - queues with still some requests queued
3501         * - when there is a close cooperator
3502         */
3503        if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
3504            cfq_slice_expired(cfqd, 1);
3505        else if (sync && cfqq_empty &&
3506             !cfq_close_cooperator(cfqd, cfqq)) {
3507            cfq_arm_slice_timer(cfqd);
3508        }
3509    }
3510
3511    if (!cfqd->rq_in_driver)
3512        cfq_schedule_dispatch(cfqd);
3513}
3514
3515/*
3516 * we temporarily boost lower priority queues if they are holding fs exclusive
3517 * resources. they are boosted to normal prio (CLASS_BE/4)
3518 */
3519static void cfq_prio_boost(struct cfq_queue *cfqq)
3520{
3521    if (has_fs_excl()) {
3522        /*
3523         * boost idle prio on transactions that would lock out other
3524         * users of the filesystem
3525         */
3526        if (cfq_class_idle(cfqq))
3527            cfqq->ioprio_class = IOPRIO_CLASS_BE;
3528        if (cfqq->ioprio > IOPRIO_NORM)
3529            cfqq->ioprio = IOPRIO_NORM;
3530    } else {
3531        /*
3532         * unboost the queue (if needed)
3533         */
3534        cfqq->ioprio_class = cfqq->org_ioprio_class;
3535        cfqq->ioprio = cfqq->org_ioprio;
3536    }
3537}
3538
3539static inline int __cfq_may_queue(struct cfq_queue *cfqq)
3540{
3541    if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
3542        cfq_mark_cfqq_must_alloc_slice(cfqq);
3543        return ELV_MQUEUE_MUST;
3544    }
3545
3546    return ELV_MQUEUE_MAY;
3547}
3548
3549static int cfq_may_queue(struct request_queue *q, int rw)
3550{
3551    struct cfq_data *cfqd = q->elevator->elevator_data;
3552    struct task_struct *tsk = current;
3553    struct cfq_io_context *cic;
3554    struct cfq_queue *cfqq;
3555
3556    /*
3557     * don't force setup of a queue from here, as a call to may_queue
3558     * does not necessarily imply that a request actually will be queued.
3559     * so just lookup a possibly existing queue, or return 'may queue'
3560     * if that fails
3561     */
3562    cic = cfq_cic_lookup(cfqd, tsk->io_context);
3563    if (!cic)
3564        return ELV_MQUEUE_MAY;
3565
3566    cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
3567    if (cfqq) {
3568        cfq_init_prio_data(cfqq, cic->ioc);
3569        cfq_prio_boost(cfqq);
3570
3571        return __cfq_may_queue(cfqq);
3572    }
3573
3574    return ELV_MQUEUE_MAY;
3575}
3576
3577/*
3578 * queue lock held here
3579 */
3580static void cfq_put_request(struct request *rq)
3581{
3582    struct cfq_queue *cfqq = RQ_CFQQ(rq);
3583
3584    if (cfqq) {
3585        const int rw = rq_data_dir(rq);
3586
3587        BUG_ON(!cfqq->allocated[rw]);
3588        cfqq->allocated[rw]--;
3589
3590        put_io_context(RQ_CIC(rq)->ioc);
3591
3592        rq->elevator_private = NULL;
3593        rq->elevator_private2 = NULL;
3594
3595        /* Put down rq reference on cfqg */
3596        cfq_put_cfqg(RQ_CFQG(rq));
3597        rq->elevator_private3 = NULL;
3598
3599        cfq_put_queue(cfqq);
3600    }
3601}
3602
3603static struct cfq_queue *
3604cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_context *cic,
3605        struct cfq_queue *cfqq)
3606{
3607    cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
3608    cic_set_cfqq(cic, cfqq->new_cfqq, 1);
3609    cfq_mark_cfqq_coop(cfqq->new_cfqq);
3610    cfq_put_queue(cfqq);
3611    return cic_to_cfqq(cic, 1);
3612}
3613
3614/*
3615 * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
3616 * was the last process referring to said cfqq.
3617 */
3618static struct cfq_queue *
3619split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq)
3620{
3621    if (cfqq_process_refs(cfqq) == 1) {
3622        cfqq->pid = current->pid;
3623        cfq_clear_cfqq_coop(cfqq);
3624        cfq_clear_cfqq_split_coop(cfqq);
3625        return cfqq;
3626    }
3627
3628    cic_set_cfqq(cic, NULL, 1);
3629
3630    cfq_put_cooperator(cfqq);
3631
3632    cfq_put_queue(cfqq);
3633    return NULL;
3634}
3635/*
3636 * Allocate cfq data structures associated with this request.
3637 */
3638static int
3639cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
3640{
3641    struct cfq_data *cfqd = q->elevator->elevator_data;
3642    struct cfq_io_context *cic;
3643    const int rw = rq_data_dir(rq);
3644    const bool is_sync = rq_is_sync(rq);
3645    struct cfq_queue *cfqq;
3646    unsigned long flags;
3647
3648    might_sleep_if(gfp_mask & __GFP_WAIT);
3649
3650    cic = cfq_get_io_context(cfqd, gfp_mask);
3651
3652    spin_lock_irqsave(q->queue_lock, flags);
3653
3654    if (!cic)
3655        goto queue_fail;
3656
3657new_queue:
3658    cfqq = cic_to_cfqq(cic, is_sync);
3659    if (!cfqq || cfqq == &cfqd->oom_cfqq) {
3660        cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask);
3661        cic_set_cfqq(cic, cfqq, is_sync);
3662    } else {
3663        /*
3664         * If the queue was seeky for too long, break it apart.
3665         */
3666        if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
3667            cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
3668            cfqq = split_cfqq(cic, cfqq);
3669            if (!cfqq)
3670                goto new_queue;
3671        }
3672
3673        /*
3674         * Check to see if this queue is scheduled to merge with
3675         * another, closely cooperating queue. The merging of
3676         * queues happens here as it must be done in process context.
3677         * The reference on new_cfqq was taken in merge_cfqqs.
3678         */
3679        if (cfqq->new_cfqq)
3680            cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
3681    }
3682
3683    cfqq->allocated[rw]++;
3684    atomic_inc(&cfqq->ref);
3685
3686    spin_unlock_irqrestore(q->queue_lock, flags);
3687
3688    rq->elevator_private = cic;
3689    rq->elevator_private2 = cfqq;
3690    rq->elevator_private3 = cfq_ref_get_cfqg(cfqq->cfqg);
3691    return 0;
3692
3693queue_fail:
3694    if (cic)
3695        put_io_context(cic->ioc);
3696
3697    cfq_schedule_dispatch(cfqd);
3698    spin_unlock_irqrestore(q->queue_lock, flags);
3699    cfq_log(cfqd, "set_request fail");
3700    return 1;
3701}
3702
3703static void cfq_kick_queue(struct work_struct *work)
3704{
3705    struct cfq_data *cfqd =
3706        container_of(work, struct cfq_data, unplug_work);
3707    struct request_queue *q = cfqd->queue;
3708
3709    spin_lock_irq(q->queue_lock);
3710    __blk_run_queue(cfqd->queue);
3711    spin_unlock_irq(q->queue_lock);
3712}
3713
3714/*
3715 * Timer running if the active_queue is currently idling inside its time slice
3716 */
3717static void cfq_idle_slice_timer(unsigned long data)
3718{
3719    struct cfq_data *cfqd = (struct cfq_data *) data;
3720    struct cfq_queue *cfqq;
3721    unsigned long flags;
3722    int timed_out = 1;
3723
3724    cfq_log(cfqd, "idle timer fired");
3725
3726    spin_lock_irqsave(cfqd->queue->queue_lock, flags);
3727
3728    cfqq = cfqd->active_queue;
3729    if (cfqq) {
3730        timed_out = 0;
3731
3732        /*
3733         * We saw a request before the queue expired, let it through
3734         */
3735        if (cfq_cfqq_must_dispatch(cfqq))
3736            goto out_kick;
3737
3738        /*
3739         * expired
3740         */
3741        if (cfq_slice_used(cfqq))
3742            goto expire;
3743
3744        /*
3745         * only expire and reinvoke request handler, if there are
3746         * other queues with pending requests
3747         */
3748        if (!cfqd->busy_queues)
3749            goto out_cont;
3750
3751        /*
3752         * not expired and it has a request pending, let it dispatch
3753         */
3754        if (!RB_EMPTY_ROOT(&cfqq->sort_list))
3755            goto out_kick;
3756
3757        /*
3758         * Queue depth flag is reset only when the idle didn't succeed
3759         */
3760        cfq_clear_cfqq_deep(cfqq);
3761    }
3762expire:
3763    cfq_slice_expired(cfqd, timed_out);
3764out_kick:
3765    cfq_schedule_dispatch(cfqd);
3766out_cont:
3767    spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
3768}
3769
3770static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
3771{
3772    del_timer_sync(&cfqd->idle_slice_timer);
3773    cancel_work_sync(&cfqd->unplug_work);
3774}
3775
3776static void cfq_put_async_queues(struct cfq_data *cfqd)
3777{
3778    int i;
3779
3780    for (i = 0; i < IOPRIO_BE_NR; i++) {
3781        if (cfqd->async_cfqq[0][i])
3782            cfq_put_queue(cfqd->async_cfqq[0][i]);
3783        if (cfqd->async_cfqq[1][i])
3784            cfq_put_queue(cfqd->async_cfqq[1][i]);
3785    }
3786
3787    if (cfqd->async_idle_cfqq)
3788        cfq_put_queue(cfqd->async_idle_cfqq);
3789}
3790
3791static void cfq_cfqd_free(struct rcu_head *head)
3792{
3793    kfree(container_of(head, struct cfq_data, rcu));
3794}
3795
3796static void cfq_exit_queue(struct elevator_queue *e)
3797{
3798    struct cfq_data *cfqd = e->elevator_data;
3799    struct request_queue *q = cfqd->queue;
3800
3801    cfq_shutdown_timer_wq(cfqd);
3802
3803    spin_lock_irq(q->queue_lock);
3804
3805    if (cfqd->active_queue)
3806        __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
3807
3808    while (!list_empty(&cfqd->cic_list)) {
3809        struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
3810                            struct cfq_io_context,
3811                            queue_list);
3812
3813        __cfq_exit_single_io_context(cfqd, cic);
3814    }
3815
3816    cfq_put_async_queues(cfqd);
3817    cfq_release_cfq_groups(cfqd);
3818    cfq_blkiocg_del_blkio_group(&cfqd->root_group.blkg);
3819
3820    spin_unlock_irq(q->queue_lock);
3821
3822    cfq_shutdown_timer_wq(cfqd);
3823
3824    spin_lock(&cic_index_lock);
3825    ida_remove(&cic_index_ida, cfqd->cic_index);
3826    spin_unlock(&cic_index_lock);
3827
3828    /* Wait for cfqg->blkg->key accessors to exit their grace periods. */
3829    call_rcu(&cfqd->rcu, cfq_cfqd_free);
3830}
3831
3832static int cfq_alloc_cic_index(void)
3833{
3834    int index, error;
3835
3836    do {
3837        if (!ida_pre_get(&cic_index_ida, GFP_KERNEL))
3838            return -ENOMEM;
3839
3840        spin_lock(&cic_index_lock);
3841        error = ida_get_new(&cic_index_ida, &index);
3842        spin_unlock(&cic_index_lock);
3843        if (error && error != -EAGAIN)
3844            return error;
3845    } while (error);
3846
3847    return index;
3848}
3849
3850static void *cfq_init_queue(struct request_queue *q)
3851{
3852    struct cfq_data *cfqd;
3853    int i, j;
3854    struct cfq_group *cfqg;
3855    struct cfq_rb_root *st;
3856
3857    i = cfq_alloc_cic_index();
3858    if (i < 0)
3859        return NULL;
3860
3861    cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
3862    if (!cfqd)
3863        return NULL;
3864
3865    cfqd->cic_index = i;
3866
3867    /* Init root service tree */
3868    cfqd->grp_service_tree = CFQ_RB_ROOT;
3869
3870    /* Init root group */
3871    cfqg = &cfqd->root_group;
3872    for_each_cfqg_st(cfqg, i, j, st)
3873        *st = CFQ_RB_ROOT;
3874    RB_CLEAR_NODE(&cfqg->rb_node);
3875
3876    /* Give preference to root group over other groups */
3877    cfqg->weight = 2*BLKIO_WEIGHT_DEFAULT;
3878
3879#ifdef CONFIG_CFQ_GROUP_IOSCHED
3880    /*
3881     * Take a reference to root group which we never drop. This is just
3882     * to make sure that cfq_put_cfqg() does not try to kfree root group
3883     */
3884    atomic_set(&cfqg->ref, 1);
3885    rcu_read_lock();
3886    cfq_blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg,
3887                    (void *)cfqd, 0);
3888    rcu_read_unlock();
3889#endif
3890    /*
3891     * Not strictly needed (since RB_ROOT just clears the node and we
3892     * zeroed cfqd on alloc), but better be safe in case someone decides
3893     * to add magic to the rb code
3894     */
3895    for (i = 0; i < CFQ_PRIO_LISTS; i++)
3896        cfqd->prio_trees[i] = RB_ROOT;
3897
3898    /*
3899     * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
3900     * Grab a permanent reference to it, so that the normal code flow
3901     * will not attempt to free it.
3902     */
3903    cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
3904    atomic_inc(&cfqd->oom_cfqq.ref);
3905    cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group);
3906
3907    INIT_LIST_HEAD(&cfqd->cic_list);
3908
3909    cfqd->queue = q;
3910
3911    init_timer(&cfqd->idle_slice_timer);
3912    cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
3913    cfqd->idle_slice_timer.data = (unsigned long) cfqd;
3914
3915    INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
3916
3917    cfqd->cfq_quantum = cfq_quantum;
3918    cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
3919    cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
3920    cfqd->cfq_back_max = cfq_back_max;
3921    cfqd->cfq_back_penalty = cfq_back_penalty;
3922    cfqd->cfq_slice[0] = cfq_slice_async;
3923    cfqd->cfq_slice[1] = cfq_slice_sync;
3924    cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
3925    cfqd->cfq_slice_idle = cfq_slice_idle;
3926    cfqd->cfq_group_idle = cfq_group_idle;
3927    cfqd->cfq_latency = 1;
3928    cfqd->cfq_group_isolation = 0;
3929    cfqd->hw_tag = -1;
3930    /*
3931     * we optimistically start assuming sync ops weren't delayed in last
3932     * second, in order to have larger depth for async operations.
3933     */
3934    cfqd->last_delayed_sync = jiffies - HZ;
3935    return cfqd;
3936}
3937
3938static void cfq_slab_kill(void)
3939{
3940    /*
3941     * Caller already ensured that pending RCU callbacks are completed,
3942     * so we should have no busy allocations at this point.
3943     */
3944    if (cfq_pool)
3945        kmem_cache_destroy(cfq_pool);
3946    if (cfq_ioc_pool)
3947        kmem_cache_destroy(cfq_ioc_pool);
3948}
3949
3950static int __init cfq_slab_setup(void)
3951{
3952    cfq_pool = KMEM_CACHE(cfq_queue, 0);
3953    if (!cfq_pool)
3954        goto fail;
3955
3956    cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0);
3957    if (!cfq_ioc_pool)
3958        goto fail;
3959
3960    return 0;
3961fail:
3962    cfq_slab_kill();
3963    return -ENOMEM;
3964}
3965
3966/*
3967 * sysfs parts below -->
3968 */
3969static ssize_t
3970cfq_var_show(unsigned int var, char *page)
3971{
3972    return sprintf(page, "%d\n", var);
3973}
3974
3975static ssize_t
3976cfq_var_store(unsigned int *var, const char *page, size_t count)
3977{
3978    char *p = (char *) page;
3979
3980    *var = simple_strtoul(p, &p, 10);
3981    return count;
3982}
3983
3984#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
3985static ssize_t __FUNC(struct elevator_queue *e, char *page) \
3986{ \
3987    struct cfq_data *cfqd = e->elevator_data; \
3988    unsigned int __data = __VAR; \
3989    if (__CONV) \
3990        __data = jiffies_to_msecs(__data); \
3991    return cfq_var_show(__data, (page)); \
3992}
3993SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
3994SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
3995SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
3996SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
3997SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
3998SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
3999SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
4000SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
4001SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
4002SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
4003SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
4004SHOW_FUNCTION(cfq_group_isolation_show, cfqd->cfq_group_isolation, 0);
4005#undef SHOW_FUNCTION
4006
4007#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
4008static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
4009{ \
4010    struct cfq_data *cfqd = e->elevator_data; \
4011    unsigned int __data; \
4012    int ret = cfq_var_store(&__data, (page), count); \
4013    if (__data < (MIN)) \
4014        __data = (MIN); \
4015    else if (__data > (MAX)) \
4016        __data = (MAX); \
4017    if (__CONV) \
4018        *(__PTR) = msecs_to_jiffies(__data); \
4019    else \
4020        *(__PTR) = __data; \
4021    return ret; \
4022}
4023STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
4024STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
4025        UINT_MAX, 1);
4026STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
4027        UINT_MAX, 1);
4028STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
4029STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
4030        UINT_MAX, 0);
4031STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
4032STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
4033STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
4034STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
4035STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
4036        UINT_MAX, 0);
4037STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
4038STORE_FUNCTION(cfq_group_isolation_store, &cfqd->cfq_group_isolation, 0, 1, 0);
4039#undef STORE_FUNCTION
4040
4041#define CFQ_ATTR(name) \
4042    __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
4043
4044static struct elv_fs_entry cfq_attrs[] = {
4045    CFQ_ATTR(quantum),
4046    CFQ_ATTR(fifo_expire_sync),
4047    CFQ_ATTR(fifo_expire_async),
4048    CFQ_ATTR(back_seek_max),
4049    CFQ_ATTR(back_seek_penalty),
4050    CFQ_ATTR(slice_sync),
4051    CFQ_ATTR(slice_async),
4052    CFQ_ATTR(slice_async_rq),
4053    CFQ_ATTR(slice_idle),
4054    CFQ_ATTR(group_idle),
4055    CFQ_ATTR(low_latency),
4056    CFQ_ATTR(group_isolation),
4057    __ATTR_NULL
4058};
4059
4060static struct elevator_type iosched_cfq = {
4061    .ops = {
4062        .elevator_merge_fn = cfq_merge,
4063        .elevator_merged_fn = cfq_merged_request,
4064        .elevator_merge_req_fn = cfq_merged_requests,
4065        .elevator_allow_merge_fn = cfq_allow_merge,
4066        .elevator_bio_merged_fn = cfq_bio_merged,
4067        .elevator_dispatch_fn = cfq_dispatch_requests,
4068        .elevator_add_req_fn = cfq_insert_request,
4069        .elevator_activate_req_fn = cfq_activate_request,
4070        .elevator_deactivate_req_fn = cfq_deactivate_request,
4071        .elevator_queue_empty_fn = cfq_queue_empty,
4072        .elevator_completed_req_fn = cfq_completed_request,
4073        .elevator_former_req_fn = elv_rb_former_request,
4074        .elevator_latter_req_fn = elv_rb_latter_request,
4075        .elevator_set_req_fn = cfq_set_request,
4076        .elevator_put_req_fn = cfq_put_request,
4077        .elevator_may_queue_fn = cfq_may_queue,
4078        .elevator_init_fn = cfq_init_queue,
4079        .elevator_exit_fn = cfq_exit_queue,
4080        .trim = cfq_free_io_context,
4081    },
4082    .elevator_attrs = cfq_attrs,
4083    .elevator_name = "cfq",
4084    .elevator_owner = THIS_MODULE,
4085};
4086
4087#ifdef CONFIG_CFQ_GROUP_IOSCHED
4088static struct blkio_policy_type blkio_policy_cfq = {
4089    .ops = {
4090        .blkio_unlink_group_fn = cfq_unlink_blkio_group,
4091        .blkio_update_group_weight_fn = cfq_update_blkio_group_weight,
4092    },
4093    .plid = BLKIO_POLICY_PROP,
4094};
4095#else
4096static struct blkio_policy_type blkio_policy_cfq;
4097#endif
4098
4099static int __init cfq_init(void)
4100{
4101    /*
4102     * could be 0 on HZ < 1000 setups
4103     */
4104    if (!cfq_slice_async)
4105        cfq_slice_async = 1;
4106    if (!cfq_slice_idle)
4107        cfq_slice_idle = 1;
4108
4109#ifdef CONFIG_CFQ_GROUP_IOSCHED
4110    if (!cfq_group_idle)
4111        cfq_group_idle = 1;
4112#else
4113        cfq_group_idle = 0;
4114#endif
4115    if (cfq_slab_setup())
4116        return -ENOMEM;
4117
4118    elv_register(&iosched_cfq);
4119    blkio_policy_register(&blkio_policy_cfq);
4120
4121    return 0;
4122}
4123
4124static void __exit cfq_exit(void)
4125{
4126    DECLARE_COMPLETION_ONSTACK(all_gone);
4127    blkio_policy_unregister(&blkio_policy_cfq);
4128    elv_unregister(&iosched_cfq);
4129    ioc_gone = &all_gone;
4130    /* ioc_gone's update must be visible before reading ioc_count */
4131    smp_wmb();
4132
4133    /*
4134     * this also protects us from entering cfq_slab_kill() with
4135     * pending RCU callbacks
4136     */
4137    if (elv_ioc_count_read(cfq_ioc_count))
4138        wait_for_completion(&all_gone);
4139    ida_destroy(&cic_index_ida);
4140    cfq_slab_kill();
4141}
4142
4143module_init(cfq_init);
4144module_exit(cfq_exit);
4145
4146MODULE_AUTHOR("Jens Axboe");
4147MODULE_LICENSE("GPL");
4148MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");
4149

Archive Download this file



interactive