Root/block/blk-cgroup.h

1#ifndef _BLK_CGROUP_H
2#define _BLK_CGROUP_H
3/*
4 * Common Block IO controller cgroup interface
5 *
6 * Based on ideas and code from CFQ, CFS and BFQ:
7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8 *
9 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
10 * Paolo Valente <paolo.valente@unimore.it>
11 *
12 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
13 * Nauman Rafique <nauman@google.com>
14 */
15
16#include <linux/cgroup.h>
17#include <linux/u64_stats_sync.h>
18#include <linux/seq_file.h>
19#include <linux/radix-tree.h>
20#include <linux/blkdev.h>
21
22/* Max limits for throttle policy */
23#define THROTL_IOPS_MAX UINT_MAX
24
25/* CFQ specific, out here for blkcg->cfq_weight */
26#define CFQ_WEIGHT_MIN 10
27#define CFQ_WEIGHT_MAX 1000
28#define CFQ_WEIGHT_DEFAULT 500
29
30#ifdef CONFIG_BLK_CGROUP
31
32enum blkg_rwstat_type {
33    BLKG_RWSTAT_READ,
34    BLKG_RWSTAT_WRITE,
35    BLKG_RWSTAT_SYNC,
36    BLKG_RWSTAT_ASYNC,
37
38    BLKG_RWSTAT_NR,
39    BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
40};
41
42struct blkcg_gq;
43
44struct blkcg {
45    struct cgroup_subsys_state css;
46    spinlock_t lock;
47
48    struct radix_tree_root blkg_tree;
49    struct blkcg_gq *blkg_hint;
50    struct hlist_head blkg_list;
51
52    /* for policies to test whether associated blkcg has changed */
53    uint64_t id;
54
55    /* TODO: per-policy storage in blkcg */
56    unsigned int cfq_weight; /* belongs to cfq */
57    unsigned int cfq_leaf_weight;
58};
59
60struct blkg_stat {
61    struct u64_stats_sync syncp;
62    uint64_t cnt;
63};
64
65struct blkg_rwstat {
66    struct u64_stats_sync syncp;
67    uint64_t cnt[BLKG_RWSTAT_NR];
68};
69
70/*
71 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
72 * request_queue (q). This is used by blkcg policies which need to track
73 * information per blkcg - q pair.
74 *
75 * There can be multiple active blkcg policies and each has its private
76 * data on each blkg, the size of which is determined by
77 * blkcg_policy->pd_size. blkcg core allocates and frees such areas
78 * together with blkg and invokes pd_init/exit_fn() methods.
79 *
80 * Such private data must embed struct blkg_policy_data (pd) at the
81 * beginning and pd_size can't be smaller than pd.
82 */
83struct blkg_policy_data {
84    /* the blkg and policy id this per-policy data belongs to */
85    struct blkcg_gq *blkg;
86    int plid;
87
88    /* used during policy activation */
89    struct list_head alloc_node;
90};
91
92/* association between a blk cgroup and a request queue */
93struct blkcg_gq {
94    /* Pointer to the associated request_queue */
95    struct request_queue *q;
96    struct list_head q_node;
97    struct hlist_node blkcg_node;
98    struct blkcg *blkcg;
99
100    /* all non-root blkcg_gq's are guaranteed to have access to parent */
101    struct blkcg_gq *parent;
102
103    /* request allocation list for this blkcg-q pair */
104    struct request_list rl;
105
106    /* reference count */
107    int refcnt;
108
109    /* is this blkg online? protected by both blkcg and q locks */
110    bool online;
111
112    struct blkg_policy_data *pd[BLKCG_MAX_POLS];
113
114    struct rcu_head rcu_head;
115};
116
117typedef void (blkcg_pol_init_pd_fn)(struct blkcg_gq *blkg);
118typedef void (blkcg_pol_online_pd_fn)(struct blkcg_gq *blkg);
119typedef void (blkcg_pol_offline_pd_fn)(struct blkcg_gq *blkg);
120typedef void (blkcg_pol_exit_pd_fn)(struct blkcg_gq *blkg);
121typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkcg_gq *blkg);
122
123struct blkcg_policy {
124    int plid;
125    /* policy specific private data size */
126    size_t pd_size;
127    /* cgroup files for the policy */
128    struct cftype *cftypes;
129
130    /* operations */
131    blkcg_pol_init_pd_fn *pd_init_fn;
132    blkcg_pol_online_pd_fn *pd_online_fn;
133    blkcg_pol_offline_pd_fn *pd_offline_fn;
134    blkcg_pol_exit_pd_fn *pd_exit_fn;
135    blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
136};
137
138extern struct blkcg blkcg_root;
139
140struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q);
141struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
142                    struct request_queue *q);
143int blkcg_init_queue(struct request_queue *q);
144void blkcg_drain_queue(struct request_queue *q);
145void blkcg_exit_queue(struct request_queue *q);
146
147/* Blkio controller policy registration */
148int blkcg_policy_register(struct blkcg_policy *pol);
149void blkcg_policy_unregister(struct blkcg_policy *pol);
150int blkcg_activate_policy(struct request_queue *q,
151              const struct blkcg_policy *pol);
152void blkcg_deactivate_policy(struct request_queue *q,
153                 const struct blkcg_policy *pol);
154
155void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
156               u64 (*prfill)(struct seq_file *,
157                     struct blkg_policy_data *, int),
158               const struct blkcg_policy *pol, int data,
159               bool show_total);
160u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
161u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
162             const struct blkg_rwstat *rwstat);
163u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
164u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
165               int off);
166
167u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off);
168struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
169                         int off);
170
171struct blkg_conf_ctx {
172    struct gendisk *disk;
173    struct blkcg_gq *blkg;
174    u64 v;
175};
176
177int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
178           const char *input, struct blkg_conf_ctx *ctx);
179void blkg_conf_finish(struct blkg_conf_ctx *ctx);
180
181
182static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
183{
184    return css ? container_of(css, struct blkcg, css) : NULL;
185}
186
187static inline struct blkcg *task_blkcg(struct task_struct *tsk)
188{
189    return css_to_blkcg(task_css(tsk, blkio_subsys_id));
190}
191
192static inline struct blkcg *bio_blkcg(struct bio *bio)
193{
194    if (bio && bio->bi_css)
195        return css_to_blkcg(bio->bi_css);
196    return task_blkcg(current);
197}
198
199/**
200 * blkcg_parent - get the parent of a blkcg
201 * @blkcg: blkcg of interest
202 *
203 * Return the parent blkcg of @blkcg. Can be called anytime.
204 */
205static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
206{
207    return css_to_blkcg(css_parent(&blkcg->css));
208}
209
210/**
211 * blkg_to_pdata - get policy private data
212 * @blkg: blkg of interest
213 * @pol: policy of interest
214 *
215 * Return pointer to private data associated with the @blkg-@pol pair.
216 */
217static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
218                          struct blkcg_policy *pol)
219{
220    return blkg ? blkg->pd[pol->plid] : NULL;
221}
222
223/**
224 * pdata_to_blkg - get blkg associated with policy private data
225 * @pd: policy private data of interest
226 *
227 * @pd is policy private data. Determine the blkg it's associated with.
228 */
229static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
230{
231    return pd ? pd->blkg : NULL;
232}
233
234/**
235 * blkg_path - format cgroup path of blkg
236 * @blkg: blkg of interest
237 * @buf: target buffer
238 * @buflen: target buffer length
239 *
240 * Format the path of the cgroup of @blkg into @buf.
241 */
242static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
243{
244    int ret;
245
246    ret = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
247    if (ret)
248        strncpy(buf, "<unavailable>", buflen);
249    return ret;
250}
251
252/**
253 * blkg_get - get a blkg reference
254 * @blkg: blkg to get
255 *
256 * The caller should be holding queue_lock and an existing reference.
257 */
258static inline void blkg_get(struct blkcg_gq *blkg)
259{
260    lockdep_assert_held(blkg->q->queue_lock);
261    WARN_ON_ONCE(!blkg->refcnt);
262    blkg->refcnt++;
263}
264
265void __blkg_release_rcu(struct rcu_head *rcu);
266
267/**
268 * blkg_put - put a blkg reference
269 * @blkg: blkg to put
270 *
271 * The caller should be holding queue_lock.
272 */
273static inline void blkg_put(struct blkcg_gq *blkg)
274{
275    lockdep_assert_held(blkg->q->queue_lock);
276    WARN_ON_ONCE(blkg->refcnt <= 0);
277    if (!--blkg->refcnt)
278        call_rcu(&blkg->rcu_head, __blkg_release_rcu);
279}
280
281struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
282                   bool update_hint);
283
284/**
285 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
286 * @d_blkg: loop cursor pointing to the current descendant
287 * @pos_css: used for iteration
288 * @p_blkg: target blkg to walk descendants of
289 *
290 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
291 * read locked. If called under either blkcg or queue lock, the iteration
292 * is guaranteed to include all and only online blkgs. The caller may
293 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
294 * @p_blkg is included in the iteration and the first node to be visited.
295 */
296#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
297    css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
298        if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
299                          (p_blkg)->q, false)))
300
301/**
302 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
303 * @d_blkg: loop cursor pointing to the current descendant
304 * @pos_css: used for iteration
305 * @p_blkg: target blkg to walk descendants of
306 *
307 * Similar to blkg_for_each_descendant_pre() but performs post-order
308 * traversal instead. Synchronization rules are the same. @p_blkg is
309 * included in the iteration and the last node to be visited.
310 */
311#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
312    css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
313        if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
314                          (p_blkg)->q, false)))
315
316/**
317 * blk_get_rl - get request_list to use
318 * @q: request_queue of interest
319 * @bio: bio which will be attached to the allocated request (may be %NULL)
320 *
321 * The caller wants to allocate a request from @q to use for @bio. Find
322 * the request_list to use and obtain a reference on it. Should be called
323 * under queue_lock. This function is guaranteed to return non-%NULL
324 * request_list.
325 */
326static inline struct request_list *blk_get_rl(struct request_queue *q,
327                          struct bio *bio)
328{
329    struct blkcg *blkcg;
330    struct blkcg_gq *blkg;
331
332    rcu_read_lock();
333
334    blkcg = bio_blkcg(bio);
335
336    /* bypass blkg lookup and use @q->root_rl directly for root */
337    if (blkcg == &blkcg_root)
338        goto root_rl;
339
340    /*
341     * Try to use blkg->rl. blkg lookup may fail under memory pressure
342     * or if either the blkcg or queue is going away. Fall back to
343     * root_rl in such cases.
344     */
345    blkg = blkg_lookup_create(blkcg, q);
346    if (unlikely(IS_ERR(blkg)))
347        goto root_rl;
348
349    blkg_get(blkg);
350    rcu_read_unlock();
351    return &blkg->rl;
352root_rl:
353    rcu_read_unlock();
354    return &q->root_rl;
355}
356
357/**
358 * blk_put_rl - put request_list
359 * @rl: request_list to put
360 *
361 * Put the reference acquired by blk_get_rl(). Should be called under
362 * queue_lock.
363 */
364static inline void blk_put_rl(struct request_list *rl)
365{
366    /* root_rl may not have blkg set */
367    if (rl->blkg && rl->blkg->blkcg != &blkcg_root)
368        blkg_put(rl->blkg);
369}
370
371/**
372 * blk_rq_set_rl - associate a request with a request_list
373 * @rq: request of interest
374 * @rl: target request_list
375 *
376 * Associate @rq with @rl so that accounting and freeing can know the
377 * request_list @rq came from.
378 */
379static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
380{
381    rq->rl = rl;
382}
383
384/**
385 * blk_rq_rl - return the request_list a request came from
386 * @rq: request of interest
387 *
388 * Return the request_list @rq is allocated from.
389 */
390static inline struct request_list *blk_rq_rl(struct request *rq)
391{
392    return rq->rl;
393}
394
395struct request_list *__blk_queue_next_rl(struct request_list *rl,
396                     struct request_queue *q);
397/**
398 * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
399 *
400 * Should be used under queue_lock.
401 */
402#define blk_queue_for_each_rl(rl, q) \
403    for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
404
405/**
406 * blkg_stat_add - add a value to a blkg_stat
407 * @stat: target blkg_stat
408 * @val: value to add
409 *
410 * Add @val to @stat. The caller is responsible for synchronizing calls to
411 * this function.
412 */
413static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
414{
415    u64_stats_update_begin(&stat->syncp);
416    stat->cnt += val;
417    u64_stats_update_end(&stat->syncp);
418}
419
420/**
421 * blkg_stat_read - read the current value of a blkg_stat
422 * @stat: blkg_stat to read
423 *
424 * Read the current value of @stat. This function can be called without
425 * synchroniztion and takes care of u64 atomicity.
426 */
427static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
428{
429    unsigned int start;
430    uint64_t v;
431
432    do {
433        start = u64_stats_fetch_begin(&stat->syncp);
434        v = stat->cnt;
435    } while (u64_stats_fetch_retry(&stat->syncp, start));
436
437    return v;
438}
439
440/**
441 * blkg_stat_reset - reset a blkg_stat
442 * @stat: blkg_stat to reset
443 */
444static inline void blkg_stat_reset(struct blkg_stat *stat)
445{
446    stat->cnt = 0;
447}
448
449/**
450 * blkg_stat_merge - merge a blkg_stat into another
451 * @to: the destination blkg_stat
452 * @from: the source
453 *
454 * Add @from's count to @to.
455 */
456static inline void blkg_stat_merge(struct blkg_stat *to, struct blkg_stat *from)
457{
458    blkg_stat_add(to, blkg_stat_read(from));
459}
460
461/**
462 * blkg_rwstat_add - add a value to a blkg_rwstat
463 * @rwstat: target blkg_rwstat
464 * @rw: mask of REQ_{WRITE|SYNC}
465 * @val: value to add
466 *
467 * Add @val to @rwstat. The counters are chosen according to @rw. The
468 * caller is responsible for synchronizing calls to this function.
469 */
470static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
471                   int rw, uint64_t val)
472{
473    u64_stats_update_begin(&rwstat->syncp);
474
475    if (rw & REQ_WRITE)
476        rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
477    else
478        rwstat->cnt[BLKG_RWSTAT_READ] += val;
479    if (rw & REQ_SYNC)
480        rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
481    else
482        rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
483
484    u64_stats_update_end(&rwstat->syncp);
485}
486
487/**
488 * blkg_rwstat_read - read the current values of a blkg_rwstat
489 * @rwstat: blkg_rwstat to read
490 *
491 * Read the current snapshot of @rwstat and return it as the return value.
492 * This function can be called without synchronization and takes care of
493 * u64 atomicity.
494 */
495static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
496{
497    unsigned int start;
498    struct blkg_rwstat tmp;
499
500    do {
501        start = u64_stats_fetch_begin(&rwstat->syncp);
502        tmp = *rwstat;
503    } while (u64_stats_fetch_retry(&rwstat->syncp, start));
504
505    return tmp;
506}
507
508/**
509 * blkg_rwstat_total - read the total count of a blkg_rwstat
510 * @rwstat: blkg_rwstat to read
511 *
512 * Return the total count of @rwstat regardless of the IO direction. This
513 * function can be called without synchronization and takes care of u64
514 * atomicity.
515 */
516static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
517{
518    struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
519
520    return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
521}
522
523/**
524 * blkg_rwstat_reset - reset a blkg_rwstat
525 * @rwstat: blkg_rwstat to reset
526 */
527static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
528{
529    memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
530}
531
532/**
533 * blkg_rwstat_merge - merge a blkg_rwstat into another
534 * @to: the destination blkg_rwstat
535 * @from: the source
536 *
537 * Add @from's counts to @to.
538 */
539static inline void blkg_rwstat_merge(struct blkg_rwstat *to,
540                     struct blkg_rwstat *from)
541{
542    struct blkg_rwstat v = blkg_rwstat_read(from);
543    int i;
544
545    u64_stats_update_begin(&to->syncp);
546    for (i = 0; i < BLKG_RWSTAT_NR; i++)
547        to->cnt[i] += v.cnt[i];
548    u64_stats_update_end(&to->syncp);
549}
550
551#else /* CONFIG_BLK_CGROUP */
552
553struct cgroup;
554struct blkcg;
555
556struct blkg_policy_data {
557};
558
559struct blkcg_gq {
560};
561
562struct blkcg_policy {
563};
564
565static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
566static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
567static inline void blkcg_drain_queue(struct request_queue *q) { }
568static inline void blkcg_exit_queue(struct request_queue *q) { }
569static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
570static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
571static inline int blkcg_activate_policy(struct request_queue *q,
572                    const struct blkcg_policy *pol) { return 0; }
573static inline void blkcg_deactivate_policy(struct request_queue *q,
574                       const struct blkcg_policy *pol) { }
575
576static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
577
578static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
579                          struct blkcg_policy *pol) { return NULL; }
580static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
581static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
582static inline void blkg_get(struct blkcg_gq *blkg) { }
583static inline void blkg_put(struct blkcg_gq *blkg) { }
584
585static inline struct request_list *blk_get_rl(struct request_queue *q,
586                          struct bio *bio) { return &q->root_rl; }
587static inline void blk_put_rl(struct request_list *rl) { }
588static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
589static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
590
591#define blk_queue_for_each_rl(rl, q) \
592    for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
593
594#endif /* CONFIG_BLK_CGROUP */
595#endif /* _BLK_CGROUP_H */
596

Archive Download this file



interactive