Root/mm/page-writeback.c

1/*
2 * mm/page-writeback.c
3 *
4 * Copyright (C) 2002, Linus Torvalds.
5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
6 *
7 * Contains functions related to writing back dirty pages at the
8 * address_space level.
9 *
10 * 10Apr2002 Andrew Morton
11 * Initial version
12 */
13
14#include <linux/kernel.h>
15#include <linux/export.h>
16#include <linux/spinlock.h>
17#include <linux/fs.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/slab.h>
21#include <linux/pagemap.h>
22#include <linux/writeback.h>
23#include <linux/init.h>
24#include <linux/backing-dev.h>
25#include <linux/task_io_accounting_ops.h>
26#include <linux/blkdev.h>
27#include <linux/mpage.h>
28#include <linux/rmap.h>
29#include <linux/percpu.h>
30#include <linux/notifier.h>
31#include <linux/smp.h>
32#include <linux/sysctl.h>
33#include <linux/cpu.h>
34#include <linux/syscalls.h>
35#include <linux/buffer_head.h> /* __set_page_dirty_buffers */
36#include <linux/pagevec.h>
37#include <trace/events/writeback.h>
38
39/*
40 * Sleep at most 200ms at a time in balance_dirty_pages().
41 */
42#define MAX_PAUSE max(HZ/5, 1)
43
44/*
45 * Try to keep balance_dirty_pages() call intervals higher than this many pages
46 * by raising pause time to max_pause when falls below it.
47 */
48#define DIRTY_POLL_THRESH (128 >> (PAGE_SHIFT - 10))
49
50/*
51 * Estimate write bandwidth at 200ms intervals.
52 */
53#define BANDWIDTH_INTERVAL max(HZ/5, 1)
54
55#define RATELIMIT_CALC_SHIFT 10
56
57/*
58 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
59 * will look to see if it needs to force writeback or throttling.
60 */
61static long ratelimit_pages = 32;
62
63/* The following parameters are exported via /proc/sys/vm */
64
65/*
66 * Start background writeback (via writeback threads) at this percentage
67 */
68int dirty_background_ratio = 10;
69
70/*
71 * dirty_background_bytes starts at 0 (disabled) so that it is a function of
72 * dirty_background_ratio * the amount of dirtyable memory
73 */
74unsigned long dirty_background_bytes;
75
76/*
77 * free highmem will not be subtracted from the total free memory
78 * for calculating free ratios if vm_highmem_is_dirtyable is true
79 */
80int vm_highmem_is_dirtyable;
81
82/*
83 * The generator of dirty data starts writeback at this percentage
84 */
85int vm_dirty_ratio = 20;
86
87/*
88 * vm_dirty_bytes starts at 0 (disabled) so that it is a function of
89 * vm_dirty_ratio * the amount of dirtyable memory
90 */
91unsigned long vm_dirty_bytes;
92
93/*
94 * The interval between `kupdate'-style writebacks
95 */
96unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */
97
98EXPORT_SYMBOL_GPL(dirty_writeback_interval);
99
100/*
101 * The longest time for which data is allowed to remain dirty
102 */
103unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */
104
105/*
106 * Flag that makes the machine dump writes/reads and block dirtyings.
107 */
108int block_dump;
109
110/*
111 * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
112 * a full sync is triggered after this time elapses without any disk activity.
113 */
114int laptop_mode;
115
116EXPORT_SYMBOL(laptop_mode);
117
118/* End of sysctl-exported parameters */
119
120unsigned long global_dirty_limit;
121
122/*
123 * Scale the writeback cache size proportional to the relative writeout speeds.
124 *
125 * We do this by keeping a floating proportion between BDIs, based on page
126 * writeback completions [end_page_writeback()]. Those devices that write out
127 * pages fastest will get the larger share, while the slower will get a smaller
128 * share.
129 *
130 * We use page writeout completions because we are interested in getting rid of
131 * dirty pages. Having them written out is the primary goal.
132 *
133 * We introduce a concept of time, a period over which we measure these events,
134 * because demand can/will vary over time. The length of this period itself is
135 * measured in page writeback completions.
136 *
137 */
138static struct prop_descriptor vm_completions;
139
140/*
141 * Work out the current dirty-memory clamping and background writeout
142 * thresholds.
143 *
144 * The main aim here is to lower them aggressively if there is a lot of mapped
145 * memory around. To avoid stressing page reclaim with lots of unreclaimable
146 * pages. It is better to clamp down on writers than to start swapping, and
147 * performing lots of scanning.
148 *
149 * We only allow 1/2 of the currently-unmapped memory to be dirtied.
150 *
151 * We don't permit the clamping level to fall below 5% - that is getting rather
152 * excessive.
153 *
154 * We make sure that the background writeout level is below the adjusted
155 * clamping level.
156 */
157
158/*
159 * In a memory zone, there is a certain amount of pages we consider
160 * available for the page cache, which is essentially the number of
161 * free and reclaimable pages, minus some zone reserves to protect
162 * lowmem and the ability to uphold the zone's watermarks without
163 * requiring writeback.
164 *
165 * This number of dirtyable pages is the base value of which the
166 * user-configurable dirty ratio is the effictive number of pages that
167 * are allowed to be actually dirtied. Per individual zone, or
168 * globally by using the sum of dirtyable pages over all zones.
169 *
170 * Because the user is allowed to specify the dirty limit globally as
171 * absolute number of bytes, calculating the per-zone dirty limit can
172 * require translating the configured limit into a percentage of
173 * global dirtyable memory first.
174 */
175
176static unsigned long highmem_dirtyable_memory(unsigned long total)
177{
178#ifdef CONFIG_HIGHMEM
179    int node;
180    unsigned long x = 0;
181
182    for_each_node_state(node, N_HIGH_MEMORY) {
183        struct zone *z =
184            &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
185
186        x += zone_page_state(z, NR_FREE_PAGES) +
187             zone_reclaimable_pages(z) - z->dirty_balance_reserve;
188    }
189    /*
190     * Make sure that the number of highmem pages is never larger
191     * than the number of the total dirtyable memory. This can only
192     * occur in very strange VM situations but we want to make sure
193     * that this does not occur.
194     */
195    return min(x, total);
196#else
197    return 0;
198#endif
199}
200
201/**
202 * global_dirtyable_memory - number of globally dirtyable pages
203 *
204 * Returns the global number of pages potentially available for dirty
205 * page cache. This is the base value for the global dirty limits.
206 */
207unsigned long global_dirtyable_memory(void)
208{
209    unsigned long x;
210
211    x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages() -
212        dirty_balance_reserve;
213
214    if (!vm_highmem_is_dirtyable)
215        x -= highmem_dirtyable_memory(x);
216
217    return x + 1; /* Ensure that we never return 0 */
218}
219
220/*
221 * global_dirty_limits - background-writeback and dirty-throttling thresholds
222 *
223 * Calculate the dirty thresholds based on sysctl parameters
224 * - vm.dirty_background_ratio or vm.dirty_background_bytes
225 * - vm.dirty_ratio or vm.dirty_bytes
226 * The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and
227 * real-time tasks.
228 */
229void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
230{
231    unsigned long background;
232    unsigned long dirty;
233    unsigned long uninitialized_var(available_memory);
234    struct task_struct *tsk;
235
236    if (!vm_dirty_bytes || !dirty_background_bytes)
237        available_memory = global_dirtyable_memory();
238
239    if (vm_dirty_bytes)
240        dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE);
241    else
242        dirty = (vm_dirty_ratio * available_memory) / 100;
243
244    if (dirty_background_bytes)
245        background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE);
246    else
247        background = (dirty_background_ratio * available_memory) / 100;
248
249    if (background >= dirty)
250        background = dirty / 2;
251    tsk = current;
252    if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
253        background += background / 4;
254        dirty += dirty / 4;
255    }
256    *pbackground = background;
257    *pdirty = dirty;
258    trace_global_dirty_state(background, dirty);
259}
260
261/**
262 * zone_dirtyable_memory - number of dirtyable pages in a zone
263 * @zone: the zone
264 *
265 * Returns the zone's number of pages potentially available for dirty
266 * page cache. This is the base value for the per-zone dirty limits.
267 */
268static unsigned long zone_dirtyable_memory(struct zone *zone)
269{
270    /*
271     * The effective global number of dirtyable pages may exclude
272     * highmem as a big-picture measure to keep the ratio between
273     * dirty memory and lowmem reasonable.
274     *
275     * But this function is purely about the individual zone and a
276     * highmem zone can hold its share of dirty pages, so we don't
277     * care about vm_highmem_is_dirtyable here.
278     */
279    return zone_page_state(zone, NR_FREE_PAGES) +
280           zone_reclaimable_pages(zone) -
281           zone->dirty_balance_reserve;
282}
283
284/**
285 * zone_dirty_limit - maximum number of dirty pages allowed in a zone
286 * @zone: the zone
287 *
288 * Returns the maximum number of dirty pages allowed in a zone, based
289 * on the zone's dirtyable memory.
290 */
291static unsigned long zone_dirty_limit(struct zone *zone)
292{
293    unsigned long zone_memory = zone_dirtyable_memory(zone);
294    struct task_struct *tsk = current;
295    unsigned long dirty;
296
297    if (vm_dirty_bytes)
298        dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE) *
299            zone_memory / global_dirtyable_memory();
300    else
301        dirty = vm_dirty_ratio * zone_memory / 100;
302
303    if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk))
304        dirty += dirty / 4;
305
306    return dirty;
307}
308
309/**
310 * zone_dirty_ok - tells whether a zone is within its dirty limits
311 * @zone: the zone to check
312 *
313 * Returns %true when the dirty pages in @zone are within the zone's
314 * dirty limit, %false if the limit is exceeded.
315 */
316bool zone_dirty_ok(struct zone *zone)
317{
318    unsigned long limit = zone_dirty_limit(zone);
319
320    return zone_page_state(zone, NR_FILE_DIRTY) +
321           zone_page_state(zone, NR_UNSTABLE_NFS) +
322           zone_page_state(zone, NR_WRITEBACK) <= limit;
323}
324
325/*
326 * couple the period to the dirty_ratio:
327 *
328 * period/2 ~ roundup_pow_of_two(dirty limit)
329 */
330static int calc_period_shift(void)
331{
332    unsigned long dirty_total;
333
334    if (vm_dirty_bytes)
335        dirty_total = vm_dirty_bytes / PAGE_SIZE;
336    else
337        dirty_total = (vm_dirty_ratio * global_dirtyable_memory()) /
338                100;
339    return 2 + ilog2(dirty_total - 1);
340}
341
342/*
343 * update the period when the dirty threshold changes.
344 */
345static void update_completion_period(void)
346{
347    int shift = calc_period_shift();
348    prop_change_shift(&vm_completions, shift);
349
350    writeback_set_ratelimit();
351}
352
353int dirty_background_ratio_handler(struct ctl_table *table, int write,
354        void __user *buffer, size_t *lenp,
355        loff_t *ppos)
356{
357    int ret;
358
359    ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
360    if (ret == 0 && write)
361        dirty_background_bytes = 0;
362    return ret;
363}
364
365int dirty_background_bytes_handler(struct ctl_table *table, int write,
366        void __user *buffer, size_t *lenp,
367        loff_t *ppos)
368{
369    int ret;
370
371    ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
372    if (ret == 0 && write)
373        dirty_background_ratio = 0;
374    return ret;
375}
376
377int dirty_ratio_handler(struct ctl_table *table, int write,
378        void __user *buffer, size_t *lenp,
379        loff_t *ppos)
380{
381    int old_ratio = vm_dirty_ratio;
382    int ret;
383
384    ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
385    if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
386        update_completion_period();
387        vm_dirty_bytes = 0;
388    }
389    return ret;
390}
391
392int dirty_bytes_handler(struct ctl_table *table, int write,
393        void __user *buffer, size_t *lenp,
394        loff_t *ppos)
395{
396    unsigned long old_bytes = vm_dirty_bytes;
397    int ret;
398
399    ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
400    if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
401        update_completion_period();
402        vm_dirty_ratio = 0;
403    }
404    return ret;
405}
406
407/*
408 * Increment the BDI's writeout completion count and the global writeout
409 * completion count. Called from test_clear_page_writeback().
410 */
411static inline void __bdi_writeout_inc(struct backing_dev_info *bdi)
412{
413    __inc_bdi_stat(bdi, BDI_WRITTEN);
414    __prop_inc_percpu_max(&vm_completions, &bdi->completions,
415                  bdi->max_prop_frac);
416}
417
418void bdi_writeout_inc(struct backing_dev_info *bdi)
419{
420    unsigned long flags;
421
422    local_irq_save(flags);
423    __bdi_writeout_inc(bdi);
424    local_irq_restore(flags);
425}
426EXPORT_SYMBOL_GPL(bdi_writeout_inc);
427
428/*
429 * Obtain an accurate fraction of the BDI's portion.
430 */
431static void bdi_writeout_fraction(struct backing_dev_info *bdi,
432        long *numerator, long *denominator)
433{
434    prop_fraction_percpu(&vm_completions, &bdi->completions,
435                numerator, denominator);
436}
437
438/*
439 * bdi_min_ratio keeps the sum of the minimum dirty shares of all
440 * registered backing devices, which, for obvious reasons, can not
441 * exceed 100%.
442 */
443static unsigned int bdi_min_ratio;
444
445int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
446{
447    int ret = 0;
448
449    spin_lock_bh(&bdi_lock);
450    if (min_ratio > bdi->max_ratio) {
451        ret = -EINVAL;
452    } else {
453        min_ratio -= bdi->min_ratio;
454        if (bdi_min_ratio + min_ratio < 100) {
455            bdi_min_ratio += min_ratio;
456            bdi->min_ratio += min_ratio;
457        } else {
458            ret = -EINVAL;
459        }
460    }
461    spin_unlock_bh(&bdi_lock);
462
463    return ret;
464}
465
466int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
467{
468    int ret = 0;
469
470    if (max_ratio > 100)
471        return -EINVAL;
472
473    spin_lock_bh(&bdi_lock);
474    if (bdi->min_ratio > max_ratio) {
475        ret = -EINVAL;
476    } else {
477        bdi->max_ratio = max_ratio;
478        bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100;
479    }
480    spin_unlock_bh(&bdi_lock);
481
482    return ret;
483}
484EXPORT_SYMBOL(bdi_set_max_ratio);
485
486static unsigned long dirty_freerun_ceiling(unsigned long thresh,
487                       unsigned long bg_thresh)
488{
489    return (thresh + bg_thresh) / 2;
490}
491
492static unsigned long hard_dirty_limit(unsigned long thresh)
493{
494    return max(thresh, global_dirty_limit);
495}
496
497/**
498 * bdi_dirty_limit - @bdi's share of dirty throttling threshold
499 * @bdi: the backing_dev_info to query
500 * @dirty: global dirty limit in pages
501 *
502 * Returns @bdi's dirty limit in pages. The term "dirty" in the context of
503 * dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages.
504 *
505 * Note that balance_dirty_pages() will only seriously take it as a hard limit
506 * when sleeping max_pause per page is not enough to keep the dirty pages under
507 * control. For example, when the device is completely stalled due to some error
508 * conditions, or when there are 1000 dd tasks writing to a slow 10MB/s USB key.
509 * In the other normal situations, it acts more gently by throttling the tasks
510 * more (rather than completely block them) when the bdi dirty pages go high.
511 *
512 * It allocates high/low dirty limits to fast/slow devices, in order to prevent
513 * - starving fast devices
514 * - piling up dirty pages (that will take long time to sync) on slow devices
515 *
516 * The bdi's share of dirty limit will be adapting to its throughput and
517 * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set.
518 */
519unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)
520{
521    u64 bdi_dirty;
522    long numerator, denominator;
523
524    /*
525     * Calculate this BDI's share of the dirty ratio.
526     */
527    bdi_writeout_fraction(bdi, &numerator, &denominator);
528
529    bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100;
530    bdi_dirty *= numerator;
531    do_div(bdi_dirty, denominator);
532
533    bdi_dirty += (dirty * bdi->min_ratio) / 100;
534    if (bdi_dirty > (dirty * bdi->max_ratio) / 100)
535        bdi_dirty = dirty * bdi->max_ratio / 100;
536
537    return bdi_dirty;
538}
539
540/*
541 * Dirty position control.
542 *
543 * (o) global/bdi setpoints
544 *
545 * We want the dirty pages be balanced around the global/bdi setpoints.
546 * When the number of dirty pages is higher/lower than the setpoint, the
547 * dirty position control ratio (and hence task dirty ratelimit) will be
548 * decreased/increased to bring the dirty pages back to the setpoint.
549 *
550 * pos_ratio = 1 << RATELIMIT_CALC_SHIFT
551 *
552 * if (dirty < setpoint) scale up pos_ratio
553 * if (dirty > setpoint) scale down pos_ratio
554 *
555 * if (bdi_dirty < bdi_setpoint) scale up pos_ratio
556 * if (bdi_dirty > bdi_setpoint) scale down pos_ratio
557 *
558 * task_ratelimit = dirty_ratelimit * pos_ratio >> RATELIMIT_CALC_SHIFT
559 *
560 * (o) global control line
561 *
562 * ^ pos_ratio
563 * |
564 * | |<===== global dirty control scope ======>|
565 * 2.0 .............*
566 * | .*
567 * | . *
568 * | . *
569 * | . *
570 * | . *
571 * | . *
572 * 1.0 ................................*
573 * | . . *
574 * | . . *
575 * | . . *
576 * | . . *
577 * | . . *
578 * 0 +------------.------------------.----------------------*------------->
579 * freerun^ setpoint^ limit^ dirty pages
580 *
581 * (o) bdi control line
582 *
583 * ^ pos_ratio
584 * |
585 * | *
586 * | *
587 * | *
588 * | *
589 * | * |<=========== span ============>|
590 * 1.0 .......................*
591 * | . *
592 * | . *
593 * | . *
594 * | . *
595 * | . *
596 * | . *
597 * | . *
598 * | . *
599 * | . *
600 * | . *
601 * | . *
602 * 1/4 ...............................................* * * * * * * * * * * *
603 * | . .
604 * | . .
605 * | . .
606 * 0 +----------------------.-------------------------------.------------->
607 * bdi_setpoint^ x_intercept^
608 *
609 * The bdi control line won't drop below pos_ratio=1/4, so that bdi_dirty can
610 * be smoothly throttled down to normal if it starts high in situations like
611 * - start writing to a slow SD card and a fast disk at the same time. The SD
612 * card's bdi_dirty may rush to many times higher than bdi_setpoint.
613 * - the bdi dirty thresh drops quickly due to change of JBOD workload
614 */
615static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
616                    unsigned long thresh,
617                    unsigned long bg_thresh,
618                    unsigned long dirty,
619                    unsigned long bdi_thresh,
620                    unsigned long bdi_dirty)
621{
622    unsigned long write_bw = bdi->avg_write_bandwidth;
623    unsigned long freerun = dirty_freerun_ceiling(thresh, bg_thresh);
624    unsigned long limit = hard_dirty_limit(thresh);
625    unsigned long x_intercept;
626    unsigned long setpoint; /* dirty pages' target balance point */
627    unsigned long bdi_setpoint;
628    unsigned long span;
629    long long pos_ratio; /* for scaling up/down the rate limit */
630    long x;
631
632    if (unlikely(dirty >= limit))
633        return 0;
634
635    /*
636     * global setpoint
637     *
638     * setpoint - dirty 3
639     * f(dirty) := 1.0 + (----------------)
640     * limit - setpoint
641     *
642     * it's a 3rd order polynomial that subjects to
643     *
644     * (1) f(freerun) = 2.0 => rampup dirty_ratelimit reasonably fast
645     * (2) f(setpoint) = 1.0 => the balance point
646     * (3) f(limit) = 0 => the hard limit
647     * (4) df/dx <= 0 => negative feedback control
648     * (5) the closer to setpoint, the smaller |df/dx| (and the reverse)
649     * => fast response on large errors; small oscillation near setpoint
650     */
651    setpoint = (freerun + limit) / 2;
652    x = div_s64((setpoint - dirty) << RATELIMIT_CALC_SHIFT,
653            limit - setpoint + 1);
654    pos_ratio = x;
655    pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
656    pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
657    pos_ratio += 1 << RATELIMIT_CALC_SHIFT;
658
659    /*
660     * We have computed basic pos_ratio above based on global situation. If
661     * the bdi is over/under its share of dirty pages, we want to scale
662     * pos_ratio further down/up. That is done by the following mechanism.
663     */
664
665    /*
666     * bdi setpoint
667     *
668     * f(bdi_dirty) := 1.0 + k * (bdi_dirty - bdi_setpoint)
669     *
670     * x_intercept - bdi_dirty
671     * := --------------------------
672     * x_intercept - bdi_setpoint
673     *
674     * The main bdi control line is a linear function that subjects to
675     *
676     * (1) f(bdi_setpoint) = 1.0
677     * (2) k = - 1 / (8 * write_bw) (in single bdi case)
678     * or equally: x_intercept = bdi_setpoint + 8 * write_bw
679     *
680     * For single bdi case, the dirty pages are observed to fluctuate
681     * regularly within range
682     * [bdi_setpoint - write_bw/2, bdi_setpoint + write_bw/2]
683     * for various filesystems, where (2) can yield in a reasonable 12.5%
684     * fluctuation range for pos_ratio.
685     *
686     * For JBOD case, bdi_thresh (not bdi_dirty!) could fluctuate up to its
687     * own size, so move the slope over accordingly and choose a slope that
688     * yields 100% pos_ratio fluctuation on suddenly doubled bdi_thresh.
689     */
690    if (unlikely(bdi_thresh > thresh))
691        bdi_thresh = thresh;
692    /*
693     * It's very possible that bdi_thresh is close to 0 not because the
694     * device is slow, but that it has remained inactive for long time.
695     * Honour such devices a reasonable good (hopefully IO efficient)
696     * threshold, so that the occasional writes won't be blocked and active
697     * writes can rampup the threshold quickly.
698     */
699    bdi_thresh = max(bdi_thresh, (limit - dirty) / 8);
700    /*
701     * scale global setpoint to bdi's:
702     * bdi_setpoint = setpoint * bdi_thresh / thresh
703     */
704    x = div_u64((u64)bdi_thresh << 16, thresh + 1);
705    bdi_setpoint = setpoint * (u64)x >> 16;
706    /*
707     * Use span=(8*write_bw) in single bdi case as indicated by
708     * (thresh - bdi_thresh ~= 0) and transit to bdi_thresh in JBOD case.
709     *
710     * bdi_thresh thresh - bdi_thresh
711     * span = ---------- * (8 * write_bw) + ------------------- * bdi_thresh
712     * thresh thresh
713     */
714    span = (thresh - bdi_thresh + 8 * write_bw) * (u64)x >> 16;
715    x_intercept = bdi_setpoint + span;
716
717    if (bdi_dirty < x_intercept - span / 4) {
718        pos_ratio = div_u64(pos_ratio * (x_intercept - bdi_dirty),
719                    x_intercept - bdi_setpoint + 1);
720    } else
721        pos_ratio /= 4;
722
723    /*
724     * bdi reserve area, safeguard against dirty pool underrun and disk idle
725     * It may push the desired control point of global dirty pages higher
726     * than setpoint.
727     */
728    x_intercept = bdi_thresh / 2;
729    if (bdi_dirty < x_intercept) {
730        if (bdi_dirty > x_intercept / 8)
731            pos_ratio = div_u64(pos_ratio * x_intercept, bdi_dirty);
732        else
733            pos_ratio *= 8;
734    }
735
736    return pos_ratio;
737}
738
739static void bdi_update_write_bandwidth(struct backing_dev_info *bdi,
740                       unsigned long elapsed,
741                       unsigned long written)
742{
743    const unsigned long period = roundup_pow_of_two(3 * HZ);
744    unsigned long avg = bdi->avg_write_bandwidth;
745    unsigned long old = bdi->write_bandwidth;
746    u64 bw;
747
748    /*
749     * bw = written * HZ / elapsed
750     *
751     * bw * elapsed + write_bandwidth * (period - elapsed)
752     * write_bandwidth = ---------------------------------------------------
753     * period
754     */
755    bw = written - bdi->written_stamp;
756    bw *= HZ;
757    if (unlikely(elapsed > period)) {
758        do_div(bw, elapsed);
759        avg = bw;
760        goto out;
761    }
762    bw += (u64)bdi->write_bandwidth * (period - elapsed);
763    bw >>= ilog2(period);
764
765    /*
766     * one more level of smoothing, for filtering out sudden spikes
767     */
768    if (avg > old && old >= (unsigned long)bw)
769        avg -= (avg - old) >> 3;
770
771    if (avg < old && old <= (unsigned long)bw)
772        avg += (old - avg) >> 3;
773
774out:
775    bdi->write_bandwidth = bw;
776    bdi->avg_write_bandwidth = avg;
777}
778
779/*
780 * The global dirtyable memory and dirty threshold could be suddenly knocked
781 * down by a large amount (eg. on the startup of KVM in a swapless system).
782 * This may throw the system into deep dirty exceeded state and throttle
783 * heavy/light dirtiers alike. To retain good responsiveness, maintain
784 * global_dirty_limit for tracking slowly down to the knocked down dirty
785 * threshold.
786 */
787static void update_dirty_limit(unsigned long thresh, unsigned long dirty)
788{
789    unsigned long limit = global_dirty_limit;
790
791    /*
792     * Follow up in one step.
793     */
794    if (limit < thresh) {
795        limit = thresh;
796        goto update;
797    }
798
799    /*
800     * Follow down slowly. Use the higher one as the target, because thresh
801     * may drop below dirty. This is exactly the reason to introduce
802     * global_dirty_limit which is guaranteed to lie above the dirty pages.
803     */
804    thresh = max(thresh, dirty);
805    if (limit > thresh) {
806        limit -= (limit - thresh) >> 5;
807        goto update;
808    }
809    return;
810update:
811    global_dirty_limit = limit;
812}
813
814static void global_update_bandwidth(unsigned long thresh,
815                    unsigned long dirty,
816                    unsigned long now)
817{
818    static DEFINE_SPINLOCK(dirty_lock);
819    static unsigned long update_time;
820
821    /*
822     * check locklessly first to optimize away locking for the most time
823     */
824    if (time_before(now, update_time + BANDWIDTH_INTERVAL))
825        return;
826
827    spin_lock(&dirty_lock);
828    if (time_after_eq(now, update_time + BANDWIDTH_INTERVAL)) {
829        update_dirty_limit(thresh, dirty);
830        update_time = now;
831    }
832    spin_unlock(&dirty_lock);
833}
834
835/*
836 * Maintain bdi->dirty_ratelimit, the base dirty throttle rate.
837 *
838 * Normal bdi tasks will be curbed at or below it in long term.
839 * Obviously it should be around (write_bw / N) when there are N dd tasks.
840 */
841static void bdi_update_dirty_ratelimit(struct backing_dev_info *bdi,
842                       unsigned long thresh,
843                       unsigned long bg_thresh,
844                       unsigned long dirty,
845                       unsigned long bdi_thresh,
846                       unsigned long bdi_dirty,
847                       unsigned long dirtied,
848                       unsigned long elapsed)
849{
850    unsigned long freerun = dirty_freerun_ceiling(thresh, bg_thresh);
851    unsigned long limit = hard_dirty_limit(thresh);
852    unsigned long setpoint = (freerun + limit) / 2;
853    unsigned long write_bw = bdi->avg_write_bandwidth;
854    unsigned long dirty_ratelimit = bdi->dirty_ratelimit;
855    unsigned long dirty_rate;
856    unsigned long task_ratelimit;
857    unsigned long balanced_dirty_ratelimit;
858    unsigned long pos_ratio;
859    unsigned long step;
860    unsigned long x;
861
862    /*
863     * The dirty rate will match the writeout rate in long term, except
864     * when dirty pages are truncated by userspace or re-dirtied by FS.
865     */
866    dirty_rate = (dirtied - bdi->dirtied_stamp) * HZ / elapsed;
867
868    pos_ratio = bdi_position_ratio(bdi, thresh, bg_thresh, dirty,
869                       bdi_thresh, bdi_dirty);
870    /*
871     * task_ratelimit reflects each dd's dirty rate for the past 200ms.
872     */
873    task_ratelimit = (u64)dirty_ratelimit *
874                    pos_ratio >> RATELIMIT_CALC_SHIFT;
875    task_ratelimit++; /* it helps rampup dirty_ratelimit from tiny values */
876
877    /*
878     * A linear estimation of the "balanced" throttle rate. The theory is,
879     * if there are N dd tasks, each throttled at task_ratelimit, the bdi's
880     * dirty_rate will be measured to be (N * task_ratelimit). So the below
881     * formula will yield the balanced rate limit (write_bw / N).
882     *
883     * Note that the expanded form is not a pure rate feedback:
884     * rate_(i+1) = rate_(i) * (write_bw / dirty_rate) (1)
885     * but also takes pos_ratio into account:
886     * rate_(i+1) = rate_(i) * (write_bw / dirty_rate) * pos_ratio (2)
887     *
888     * (1) is not realistic because pos_ratio also takes part in balancing
889     * the dirty rate. Consider the state
890     * pos_ratio = 0.5 (3)
891     * rate = 2 * (write_bw / N) (4)
892     * If (1) is used, it will stuck in that state! Because each dd will
893     * be throttled at
894     * task_ratelimit = pos_ratio * rate = (write_bw / N) (5)
895     * yielding
896     * dirty_rate = N * task_ratelimit = write_bw (6)
897     * put (6) into (1) we get
898     * rate_(i+1) = rate_(i) (7)
899     *
900     * So we end up using (2) to always keep
901     * rate_(i+1) ~= (write_bw / N) (8)
902     * regardless of the value of pos_ratio. As long as (8) is satisfied,
903     * pos_ratio is able to drive itself to 1.0, which is not only where
904     * the dirty count meet the setpoint, but also where the slope of
905     * pos_ratio is most flat and hence task_ratelimit is least fluctuated.
906     */
907    balanced_dirty_ratelimit = div_u64((u64)task_ratelimit * write_bw,
908                       dirty_rate | 1);
909    /*
910     * balanced_dirty_ratelimit ~= (write_bw / N) <= write_bw
911     */
912    if (unlikely(balanced_dirty_ratelimit > write_bw))
913        balanced_dirty_ratelimit = write_bw;
914
915    /*
916     * We could safely do this and return immediately:
917     *
918     * bdi->dirty_ratelimit = balanced_dirty_ratelimit;
919     *
920     * However to get a more stable dirty_ratelimit, the below elaborated
921     * code makes use of task_ratelimit to filter out sigular points and
922     * limit the step size.
923     *
924     * The below code essentially only uses the relative value of
925     *
926     * task_ratelimit - dirty_ratelimit
927     * = (pos_ratio - 1) * dirty_ratelimit
928     *
929     * which reflects the direction and size of dirty position error.
930     */
931
932    /*
933     * dirty_ratelimit will follow balanced_dirty_ratelimit iff
934     * task_ratelimit is on the same side of dirty_ratelimit, too.
935     * For example, when
936     * - dirty_ratelimit > balanced_dirty_ratelimit
937     * - dirty_ratelimit > task_ratelimit (dirty pages are above setpoint)
938     * lowering dirty_ratelimit will help meet both the position and rate
939     * control targets. Otherwise, don't update dirty_ratelimit if it will
940     * only help meet the rate target. After all, what the users ultimately
941     * feel and care are stable dirty rate and small position error.
942     *
943     * |task_ratelimit - dirty_ratelimit| is used to limit the step size
944     * and filter out the sigular points of balanced_dirty_ratelimit. Which
945     * keeps jumping around randomly and can even leap far away at times
946     * due to the small 200ms estimation period of dirty_rate (we want to
947     * keep that period small to reduce time lags).
948     */
949    step = 0;
950    if (dirty < setpoint) {
951        x = min(bdi->balanced_dirty_ratelimit,
952             min(balanced_dirty_ratelimit, task_ratelimit));
953        if (dirty_ratelimit < x)
954            step = x - dirty_ratelimit;
955    } else {
956        x = max(bdi->balanced_dirty_ratelimit,
957             max(balanced_dirty_ratelimit, task_ratelimit));
958        if (dirty_ratelimit > x)
959            step = dirty_ratelimit - x;
960    }
961
962    /*
963     * Don't pursue 100% rate matching. It's impossible since the balanced
964     * rate itself is constantly fluctuating. So decrease the track speed
965     * when it gets close to the target. Helps eliminate pointless tremors.
966     */
967    step >>= dirty_ratelimit / (2 * step + 1);
968    /*
969     * Limit the tracking speed to avoid overshooting.
970     */
971    step = (step + 7) / 8;
972
973    if (dirty_ratelimit < balanced_dirty_ratelimit)
974        dirty_ratelimit += step;
975    else
976        dirty_ratelimit -= step;
977
978    bdi->dirty_ratelimit = max(dirty_ratelimit, 1UL);
979    bdi->balanced_dirty_ratelimit = balanced_dirty_ratelimit;
980
981    trace_bdi_dirty_ratelimit(bdi, dirty_rate, task_ratelimit);
982}
983
984void __bdi_update_bandwidth(struct backing_dev_info *bdi,
985                unsigned long thresh,
986                unsigned long bg_thresh,
987                unsigned long dirty,
988                unsigned long bdi_thresh,
989                unsigned long bdi_dirty,
990                unsigned long start_time)
991{
992    unsigned long now = jiffies;
993    unsigned long elapsed = now - bdi->bw_time_stamp;
994    unsigned long dirtied;
995    unsigned long written;
996
997    /*
998     * rate-limit, only update once every 200ms.
999     */
1000    if (elapsed < BANDWIDTH_INTERVAL)
1001        return;
1002
1003    dirtied = percpu_counter_read(&bdi->bdi_stat[BDI_DIRTIED]);
1004    written = percpu_counter_read(&bdi->bdi_stat[BDI_WRITTEN]);
1005
1006    /*
1007     * Skip quiet periods when disk bandwidth is under-utilized.
1008     * (at least 1s idle time between two flusher runs)
1009     */
1010    if (elapsed > HZ && time_before(bdi->bw_time_stamp, start_time))
1011        goto snapshot;
1012
1013    if (thresh) {
1014        global_update_bandwidth(thresh, dirty, now);
1015        bdi_update_dirty_ratelimit(bdi, thresh, bg_thresh, dirty,
1016                       bdi_thresh, bdi_dirty,
1017                       dirtied, elapsed);
1018    }
1019    bdi_update_write_bandwidth(bdi, elapsed, written);
1020
1021snapshot:
1022    bdi->dirtied_stamp = dirtied;
1023    bdi->written_stamp = written;
1024    bdi->bw_time_stamp = now;
1025}
1026
1027static void bdi_update_bandwidth(struct backing_dev_info *bdi,
1028                 unsigned long thresh,
1029                 unsigned long bg_thresh,
1030                 unsigned long dirty,
1031                 unsigned long bdi_thresh,
1032                 unsigned long bdi_dirty,
1033                 unsigned long start_time)
1034{
1035    if (time_is_after_eq_jiffies(bdi->bw_time_stamp + BANDWIDTH_INTERVAL))
1036        return;
1037    spin_lock(&bdi->wb.list_lock);
1038    __bdi_update_bandwidth(bdi, thresh, bg_thresh, dirty,
1039                   bdi_thresh, bdi_dirty, start_time);
1040    spin_unlock(&bdi->wb.list_lock);
1041}
1042
1043/*
1044 * After a task dirtied this many pages, balance_dirty_pages_ratelimited_nr()
1045 * will look to see if it needs to start dirty throttling.
1046 *
1047 * If dirty_poll_interval is too low, big NUMA machines will call the expensive
1048 * global_page_state() too often. So scale it near-sqrt to the safety margin
1049 * (the number of pages we may dirty without exceeding the dirty limits).
1050 */
1051static unsigned long dirty_poll_interval(unsigned long dirty,
1052                     unsigned long thresh)
1053{
1054    if (thresh > dirty)
1055        return 1UL << (ilog2(thresh - dirty) >> 1);
1056
1057    return 1;
1058}
1059
1060static long bdi_max_pause(struct backing_dev_info *bdi,
1061              unsigned long bdi_dirty)
1062{
1063    long bw = bdi->avg_write_bandwidth;
1064    long t;
1065
1066    /*
1067     * Limit pause time for small memory systems. If sleeping for too long
1068     * time, a small pool of dirty/writeback pages may go empty and disk go
1069     * idle.
1070     *
1071     * 8 serves as the safety ratio.
1072     */
1073    t = bdi_dirty / (1 + bw / roundup_pow_of_two(1 + HZ / 8));
1074    t++;
1075
1076    return min_t(long, t, MAX_PAUSE);
1077}
1078
1079static long bdi_min_pause(struct backing_dev_info *bdi,
1080              long max_pause,
1081              unsigned long task_ratelimit,
1082              unsigned long dirty_ratelimit,
1083              int *nr_dirtied_pause)
1084{
1085    long hi = ilog2(bdi->avg_write_bandwidth);
1086    long lo = ilog2(bdi->dirty_ratelimit);
1087    long t; /* target pause */
1088    long pause; /* estimated next pause */
1089    int pages; /* target nr_dirtied_pause */
1090
1091    /* target for 10ms pause on 1-dd case */
1092    t = max(1, HZ / 100);
1093
1094    /*
1095     * Scale up pause time for concurrent dirtiers in order to reduce CPU
1096     * overheads.
1097     *
1098     * (N * 10ms) on 2^N concurrent tasks.
1099     */
1100    if (hi > lo)
1101        t += (hi - lo) * (10 * HZ) / 1024;
1102
1103    /*
1104     * This is a bit convoluted. We try to base the next nr_dirtied_pause
1105     * on the much more stable dirty_ratelimit. However the next pause time
1106     * will be computed based on task_ratelimit and the two rate limits may
1107     * depart considerably at some time. Especially if task_ratelimit goes
1108     * below dirty_ratelimit/2 and the target pause is max_pause, the next
1109     * pause time will be max_pause*2 _trimmed down_ to max_pause. As a
1110     * result task_ratelimit won't be executed faithfully, which could
1111     * eventually bring down dirty_ratelimit.
1112     *
1113     * We apply two rules to fix it up:
1114     * 1) try to estimate the next pause time and if necessary, use a lower
1115     * nr_dirtied_pause so as not to exceed max_pause. When this happens,
1116     * nr_dirtied_pause will be "dancing" with task_ratelimit.
1117     * 2) limit the target pause time to max_pause/2, so that the normal
1118     * small fluctuations of task_ratelimit won't trigger rule (1) and
1119     * nr_dirtied_pause will remain as stable as dirty_ratelimit.
1120     */
1121    t = min(t, 1 + max_pause / 2);
1122    pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
1123
1124    /*
1125     * Tiny nr_dirtied_pause is found to hurt I/O performance in the test
1126     * case fio-mmap-randwrite-64k, which does 16*{sync read, async write}.
1127     * When the 16 consecutive reads are often interrupted by some dirty
1128     * throttling pause during the async writes, cfq will go into idles
1129     * (deadline is fine). So push nr_dirtied_pause as high as possible
1130     * until reaches DIRTY_POLL_THRESH=32 pages.
1131     */
1132    if (pages < DIRTY_POLL_THRESH) {
1133        t = max_pause;
1134        pages = dirty_ratelimit * t / roundup_pow_of_two(HZ);
1135        if (pages > DIRTY_POLL_THRESH) {
1136            pages = DIRTY_POLL_THRESH;
1137            t = HZ * DIRTY_POLL_THRESH / dirty_ratelimit;
1138        }
1139    }
1140
1141    pause = HZ * pages / (task_ratelimit + 1);
1142    if (pause > max_pause) {
1143        t = max_pause;
1144        pages = task_ratelimit * t / roundup_pow_of_two(HZ);
1145    }
1146
1147    *nr_dirtied_pause = pages;
1148    /*
1149     * The minimal pause time will normally be half the target pause time.
1150     */
1151    return pages >= DIRTY_POLL_THRESH ? 1 + t / 2 : t;
1152}
1153
1154/*
1155 * balance_dirty_pages() must be called by processes which are generating dirty
1156 * data. It looks at the number of dirty pages in the machine and will force
1157 * the caller to wait once crossing the (background_thresh + dirty_thresh) / 2.
1158 * If we're over `background_thresh' then the writeback threads are woken to
1159 * perform some writeout.
1160 */
1161static void balance_dirty_pages(struct address_space *mapping,
1162                unsigned long pages_dirtied)
1163{
1164    unsigned long nr_reclaimable; /* = file_dirty + unstable_nfs */
1165    unsigned long bdi_reclaimable;
1166    unsigned long nr_dirty; /* = file_dirty + writeback + unstable_nfs */
1167    unsigned long bdi_dirty;
1168    unsigned long freerun;
1169    unsigned long background_thresh;
1170    unsigned long dirty_thresh;
1171    unsigned long bdi_thresh;
1172    long period;
1173    long pause;
1174    long max_pause;
1175    long min_pause;
1176    int nr_dirtied_pause;
1177    bool dirty_exceeded = false;
1178    unsigned long task_ratelimit;
1179    unsigned long dirty_ratelimit;
1180    unsigned long pos_ratio;
1181    struct backing_dev_info *bdi = mapping->backing_dev_info;
1182    unsigned long start_time = jiffies;
1183
1184    for (;;) {
1185        unsigned long now = jiffies;
1186
1187        /*
1188         * Unstable writes are a feature of certain networked
1189         * filesystems (i.e. NFS) in which data may have been
1190         * written to the server's write cache, but has not yet
1191         * been flushed to permanent storage.
1192         */
1193        nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
1194                    global_page_state(NR_UNSTABLE_NFS);
1195        nr_dirty = nr_reclaimable + global_page_state(NR_WRITEBACK);
1196
1197        global_dirty_limits(&background_thresh, &dirty_thresh);
1198
1199        /*
1200         * Throttle it only when the background writeback cannot
1201         * catch-up. This avoids (excessively) small writeouts
1202         * when the bdi limits are ramping up.
1203         */
1204        freerun = dirty_freerun_ceiling(dirty_thresh,
1205                        background_thresh);
1206        if (nr_dirty <= freerun) {
1207            current->dirty_paused_when = now;
1208            current->nr_dirtied = 0;
1209            current->nr_dirtied_pause =
1210                dirty_poll_interval(nr_dirty, dirty_thresh);
1211            break;
1212        }
1213
1214        if (unlikely(!writeback_in_progress(bdi)))
1215            bdi_start_background_writeback(bdi);
1216
1217        /*
1218         * bdi_thresh is not treated as some limiting factor as
1219         * dirty_thresh, due to reasons
1220         * - in JBOD setup, bdi_thresh can fluctuate a lot
1221         * - in a system with HDD and USB key, the USB key may somehow
1222         * go into state (bdi_dirty >> bdi_thresh) either because
1223         * bdi_dirty starts high, or because bdi_thresh drops low.
1224         * In this case we don't want to hard throttle the USB key
1225         * dirtiers for 100 seconds until bdi_dirty drops under
1226         * bdi_thresh. Instead the auxiliary bdi control line in
1227         * bdi_position_ratio() will let the dirtier task progress
1228         * at some rate <= (write_bw / 2) for bringing down bdi_dirty.
1229         */
1230        bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
1231
1232        /*
1233         * In order to avoid the stacked BDI deadlock we need
1234         * to ensure we accurately count the 'dirty' pages when
1235         * the threshold is low.
1236         *
1237         * Otherwise it would be possible to get thresh+n pages
1238         * reported dirty, even though there are thresh-m pages
1239         * actually dirty; with m+n sitting in the percpu
1240         * deltas.
1241         */
1242        if (bdi_thresh < 2 * bdi_stat_error(bdi)) {
1243            bdi_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
1244            bdi_dirty = bdi_reclaimable +
1245                    bdi_stat_sum(bdi, BDI_WRITEBACK);
1246        } else {
1247            bdi_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
1248            bdi_dirty = bdi_reclaimable +
1249                    bdi_stat(bdi, BDI_WRITEBACK);
1250        }
1251
1252        dirty_exceeded = (bdi_dirty > bdi_thresh) &&
1253                  (nr_dirty > dirty_thresh);
1254        if (dirty_exceeded && !bdi->dirty_exceeded)
1255            bdi->dirty_exceeded = 1;
1256
1257        bdi_update_bandwidth(bdi, dirty_thresh, background_thresh,
1258                     nr_dirty, bdi_thresh, bdi_dirty,
1259                     start_time);
1260
1261        dirty_ratelimit = bdi->dirty_ratelimit;
1262        pos_ratio = bdi_position_ratio(bdi, dirty_thresh,
1263                           background_thresh, nr_dirty,
1264                           bdi_thresh, bdi_dirty);
1265        task_ratelimit = ((u64)dirty_ratelimit * pos_ratio) >>
1266                            RATELIMIT_CALC_SHIFT;
1267        max_pause = bdi_max_pause(bdi, bdi_dirty);
1268        min_pause = bdi_min_pause(bdi, max_pause,
1269                      task_ratelimit, dirty_ratelimit,
1270                      &nr_dirtied_pause);
1271
1272        if (unlikely(task_ratelimit == 0)) {
1273            period = max_pause;
1274            pause = max_pause;
1275            goto pause;
1276        }
1277        period = HZ * pages_dirtied / task_ratelimit;
1278        pause = period;
1279        if (current->dirty_paused_when)
1280            pause -= now - current->dirty_paused_when;
1281        /*
1282         * For less than 1s think time (ext3/4 may block the dirtier
1283         * for up to 800ms from time to time on 1-HDD; so does xfs,
1284         * however at much less frequency), try to compensate it in
1285         * future periods by updating the virtual time; otherwise just
1286         * do a reset, as it may be a light dirtier.
1287         */
1288        if (pause < min_pause) {
1289            trace_balance_dirty_pages(bdi,
1290                          dirty_thresh,
1291                          background_thresh,
1292                          nr_dirty,
1293                          bdi_thresh,
1294                          bdi_dirty,
1295                          dirty_ratelimit,
1296                          task_ratelimit,
1297                          pages_dirtied,
1298                          period,
1299                          min(pause, 0L),
1300                          start_time);
1301            if (pause < -HZ) {
1302                current->dirty_paused_when = now;
1303                current->nr_dirtied = 0;
1304            } else if (period) {
1305                current->dirty_paused_when += period;
1306                current->nr_dirtied = 0;
1307            } else if (current->nr_dirtied_pause <= pages_dirtied)
1308                current->nr_dirtied_pause += pages_dirtied;
1309            break;
1310        }
1311        if (unlikely(pause > max_pause)) {
1312            /* for occasional dropped task_ratelimit */
1313            now += min(pause - max_pause, max_pause);
1314            pause = max_pause;
1315        }
1316
1317pause:
1318        trace_balance_dirty_pages(bdi,
1319                      dirty_thresh,
1320                      background_thresh,
1321                      nr_dirty,
1322                      bdi_thresh,
1323                      bdi_dirty,
1324                      dirty_ratelimit,
1325                      task_ratelimit,
1326                      pages_dirtied,
1327                      period,
1328                      pause,
1329                      start_time);
1330        __set_current_state(TASK_KILLABLE);
1331        io_schedule_timeout(pause);
1332
1333        current->dirty_paused_when = now + pause;
1334        current->nr_dirtied = 0;
1335        current->nr_dirtied_pause = nr_dirtied_pause;
1336
1337        /*
1338         * This is typically equal to (nr_dirty < dirty_thresh) and can
1339         * also keep "1000+ dd on a slow USB stick" under control.
1340         */
1341        if (task_ratelimit)
1342            break;
1343
1344        /*
1345         * In the case of an unresponding NFS server and the NFS dirty
1346         * pages exceeds dirty_thresh, give the other good bdi's a pipe
1347         * to go through, so that tasks on them still remain responsive.
1348         *
1349         * In theory 1 page is enough to keep the comsumer-producer
1350         * pipe going: the flusher cleans 1 page => the task dirties 1
1351         * more page. However bdi_dirty has accounting errors. So use
1352         * the larger and more IO friendly bdi_stat_error.
1353         */
1354        if (bdi_dirty <= bdi_stat_error(bdi))
1355            break;
1356
1357        if (fatal_signal_pending(current))
1358            break;
1359    }
1360
1361    if (!dirty_exceeded && bdi->dirty_exceeded)
1362        bdi->dirty_exceeded = 0;
1363
1364    if (writeback_in_progress(bdi))
1365        return;
1366
1367    /*
1368     * In laptop mode, we wait until hitting the higher threshold before
1369     * starting background writeout, and then write out all the way down
1370     * to the lower threshold. So slow writers cause minimal disk activity.
1371     *
1372     * In normal mode, we start background writeout at the lower
1373     * background_thresh, to keep the amount of dirty memory low.
1374     */
1375    if (laptop_mode)
1376        return;
1377
1378    if (nr_reclaimable > background_thresh)
1379        bdi_start_background_writeback(bdi);
1380}
1381
1382void set_page_dirty_balance(struct page *page, int page_mkwrite)
1383{
1384    if (set_page_dirty(page) || page_mkwrite) {
1385        struct address_space *mapping = page_mapping(page);
1386
1387        if (mapping)
1388            balance_dirty_pages_ratelimited(mapping);
1389    }
1390}
1391
1392static DEFINE_PER_CPU(int, bdp_ratelimits);
1393
1394/*
1395 * Normal tasks are throttled by
1396 * loop {
1397 * dirty tsk->nr_dirtied_pause pages;
1398 * take a snap in balance_dirty_pages();
1399 * }
1400 * However there is a worst case. If every task exit immediately when dirtied
1401 * (tsk->nr_dirtied_pause - 1) pages, balance_dirty_pages() will never be
1402 * called to throttle the page dirties. The solution is to save the not yet
1403 * throttled page dirties in dirty_throttle_leaks on task exit and charge them
1404 * randomly into the running tasks. This works well for the above worst case,
1405 * as the new task will pick up and accumulate the old task's leaked dirty
1406 * count and eventually get throttled.
1407 */
1408DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0;
1409
1410/**
1411 * balance_dirty_pages_ratelimited_nr - balance dirty memory state
1412 * @mapping: address_space which was dirtied
1413 * @nr_pages_dirtied: number of pages which the caller has just dirtied
1414 *
1415 * Processes which are dirtying memory should call in here once for each page
1416 * which was newly dirtied. The function will periodically check the system's
1417 * dirty state and will initiate writeback if needed.
1418 *
1419 * On really big machines, get_writeback_state is expensive, so try to avoid
1420 * calling it too often (ratelimiting). But once we're over the dirty memory
1421 * limit we decrease the ratelimiting by a lot, to prevent individual processes
1422 * from overshooting the limit by (ratelimit_pages) each.
1423 */
1424void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
1425                    unsigned long nr_pages_dirtied)
1426{
1427    struct backing_dev_info *bdi = mapping->backing_dev_info;
1428    int ratelimit;
1429    int *p;
1430
1431    if (!bdi_cap_account_dirty(bdi))
1432        return;
1433
1434    ratelimit = current->nr_dirtied_pause;
1435    if (bdi->dirty_exceeded)
1436        ratelimit = min(ratelimit, 32 >> (PAGE_SHIFT - 10));
1437
1438    preempt_disable();
1439    /*
1440     * This prevents one CPU to accumulate too many dirtied pages without
1441     * calling into balance_dirty_pages(), which can happen when there are
1442     * 1000+ tasks, all of them start dirtying pages at exactly the same
1443     * time, hence all honoured too large initial task->nr_dirtied_pause.
1444     */
1445    p = &__get_cpu_var(bdp_ratelimits);
1446    if (unlikely(current->nr_dirtied >= ratelimit))
1447        *p = 0;
1448    else if (unlikely(*p >= ratelimit_pages)) {
1449        *p = 0;
1450        ratelimit = 0;
1451    }
1452    /*
1453     * Pick up the dirtied pages by the exited tasks. This avoids lots of
1454     * short-lived tasks (eg. gcc invocations in a kernel build) escaping
1455     * the dirty throttling and livelock other long-run dirtiers.
1456     */
1457    p = &__get_cpu_var(dirty_throttle_leaks);
1458    if (*p > 0 && current->nr_dirtied < ratelimit) {
1459        nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied);
1460        *p -= nr_pages_dirtied;
1461        current->nr_dirtied += nr_pages_dirtied;
1462    }
1463    preempt_enable();
1464
1465    if (unlikely(current->nr_dirtied >= ratelimit))
1466        balance_dirty_pages(mapping, current->nr_dirtied);
1467}
1468EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
1469
1470void throttle_vm_writeout(gfp_t gfp_mask)
1471{
1472    unsigned long background_thresh;
1473    unsigned long dirty_thresh;
1474
1475        for ( ; ; ) {
1476        global_dirty_limits(&background_thresh, &dirty_thresh);
1477        dirty_thresh = hard_dirty_limit(dirty_thresh);
1478
1479                /*
1480                 * Boost the allowable dirty threshold a bit for page
1481                 * allocators so they don't get DoS'ed by heavy writers
1482                 */
1483                dirty_thresh += dirty_thresh / 10; /* wheeee... */
1484
1485                if (global_page_state(NR_UNSTABLE_NFS) +
1486            global_page_state(NR_WRITEBACK) <= dirty_thresh)
1487                            break;
1488                congestion_wait(BLK_RW_ASYNC, HZ/10);
1489
1490        /*
1491         * The caller might hold locks which can prevent IO completion
1492         * or progress in the filesystem. So we cannot just sit here
1493         * waiting for IO to complete.
1494         */
1495        if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO))
1496            break;
1497        }
1498}
1499
1500/*
1501 * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
1502 */
1503int dirty_writeback_centisecs_handler(ctl_table *table, int write,
1504    void __user *buffer, size_t *length, loff_t *ppos)
1505{
1506    proc_dointvec(table, write, buffer, length, ppos);
1507    bdi_arm_supers_timer();
1508    return 0;
1509}
1510
1511#ifdef CONFIG_BLOCK
1512void laptop_mode_timer_fn(unsigned long data)
1513{
1514    struct request_queue *q = (struct request_queue *)data;
1515    int nr_pages = global_page_state(NR_FILE_DIRTY) +
1516        global_page_state(NR_UNSTABLE_NFS);
1517
1518    /*
1519     * We want to write everything out, not just down to the dirty
1520     * threshold
1521     */
1522    if (bdi_has_dirty_io(&q->backing_dev_info))
1523        bdi_start_writeback(&q->backing_dev_info, nr_pages,
1524                    WB_REASON_LAPTOP_TIMER);
1525}
1526
1527/*
1528 * We've spun up the disk and we're in laptop mode: schedule writeback
1529 * of all dirty data a few seconds from now. If the flush is already scheduled
1530 * then push it back - the user is still using the disk.
1531 */
1532void laptop_io_completion(struct backing_dev_info *info)
1533{
1534    mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode);
1535}
1536
1537/*
1538 * We're in laptop mode and we've just synced. The sync's writes will have
1539 * caused another writeback to be scheduled by laptop_io_completion.
1540 * Nothing needs to be written back anymore, so we unschedule the writeback.
1541 */
1542void laptop_sync_completion(void)
1543{
1544    struct backing_dev_info *bdi;
1545
1546    rcu_read_lock();
1547
1548    list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
1549        del_timer(&bdi->laptop_mode_wb_timer);
1550
1551    rcu_read_unlock();
1552}
1553#endif
1554
1555/*
1556 * If ratelimit_pages is too high then we can get into dirty-data overload
1557 * if a large number of processes all perform writes at the same time.
1558 * If it is too low then SMP machines will call the (expensive)
1559 * get_writeback_state too often.
1560 *
1561 * Here we set ratelimit_pages to a level which ensures that when all CPUs are
1562 * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
1563 * thresholds.
1564 */
1565
1566void writeback_set_ratelimit(void)
1567{
1568    unsigned long background_thresh;
1569    unsigned long dirty_thresh;
1570    global_dirty_limits(&background_thresh, &dirty_thresh);
1571    ratelimit_pages = dirty_thresh / (num_online_cpus() * 32);
1572    if (ratelimit_pages < 16)
1573        ratelimit_pages = 16;
1574}
1575
1576static int __cpuinit
1577ratelimit_handler(struct notifier_block *self, unsigned long u, void *v)
1578{
1579    writeback_set_ratelimit();
1580    return NOTIFY_DONE;
1581}
1582
1583static struct notifier_block __cpuinitdata ratelimit_nb = {
1584    .notifier_call = ratelimit_handler,
1585    .next = NULL,
1586};
1587
1588/*
1589 * Called early on to tune the page writeback dirty limits.
1590 *
1591 * We used to scale dirty pages according to how total memory
1592 * related to pages that could be allocated for buffers (by
1593 * comparing nr_free_buffer_pages() to vm_total_pages.
1594 *
1595 * However, that was when we used "dirty_ratio" to scale with
1596 * all memory, and we don't do that any more. "dirty_ratio"
1597 * is now applied to total non-HIGHPAGE memory (by subtracting
1598 * totalhigh_pages from vm_total_pages), and as such we can't
1599 * get into the old insane situation any more where we had
1600 * large amounts of dirty pages compared to a small amount of
1601 * non-HIGHMEM memory.
1602 *
1603 * But we might still want to scale the dirty_ratio by how
1604 * much memory the box has..
1605 */
1606void __init page_writeback_init(void)
1607{
1608    int shift;
1609
1610    writeback_set_ratelimit();
1611    register_cpu_notifier(&ratelimit_nb);
1612
1613    shift = calc_period_shift();
1614    prop_descriptor_init(&vm_completions, shift);
1615}
1616
1617/**
1618 * tag_pages_for_writeback - tag pages to be written by write_cache_pages
1619 * @mapping: address space structure to write
1620 * @start: starting page index
1621 * @end: ending page index (inclusive)
1622 *
1623 * This function scans the page range from @start to @end (inclusive) and tags
1624 * all pages that have DIRTY tag set with a special TOWRITE tag. The idea is
1625 * that write_cache_pages (or whoever calls this function) will then use
1626 * TOWRITE tag to identify pages eligible for writeback. This mechanism is
1627 * used to avoid livelocking of writeback by a process steadily creating new
1628 * dirty pages in the file (thus it is important for this function to be quick
1629 * so that it can tag pages faster than a dirtying process can create them).
1630 */
1631/*
1632 * We tag pages in batches of WRITEBACK_TAG_BATCH to reduce tree_lock latency.
1633 */
1634void tag_pages_for_writeback(struct address_space *mapping,
1635                 pgoff_t start, pgoff_t end)
1636{
1637#define WRITEBACK_TAG_BATCH 4096
1638    unsigned long tagged;
1639
1640    do {
1641        spin_lock_irq(&mapping->tree_lock);
1642        tagged = radix_tree_range_tag_if_tagged(&mapping->page_tree,
1643                &start, end, WRITEBACK_TAG_BATCH,
1644                PAGECACHE_TAG_DIRTY, PAGECACHE_TAG_TOWRITE);
1645        spin_unlock_irq(&mapping->tree_lock);
1646        WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH);
1647        cond_resched();
1648        /* We check 'start' to handle wrapping when end == ~0UL */
1649    } while (tagged >= WRITEBACK_TAG_BATCH && start);
1650}
1651EXPORT_SYMBOL(tag_pages_for_writeback);
1652
1653/**
1654 * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
1655 * @mapping: address space structure to write
1656 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
1657 * @writepage: function called for each page
1658 * @data: data passed to writepage function
1659 *
1660 * If a page is already under I/O, write_cache_pages() skips it, even
1661 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
1662 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
1663 * and msync() need to guarantee that all the data which was dirty at the time
1664 * the call was made get new I/O started against them. If wbc->sync_mode is
1665 * WB_SYNC_ALL then we were called for data integrity and we must wait for
1666 * existing IO to complete.
1667 *
1668 * To avoid livelocks (when other process dirties new pages), we first tag
1669 * pages which should be written back with TOWRITE tag and only then start
1670 * writing them. For data-integrity sync we have to be careful so that we do
1671 * not miss some pages (e.g., because some other process has cleared TOWRITE
1672 * tag we set). The rule we follow is that TOWRITE tag can be cleared only
1673 * by the process clearing the DIRTY tag (and submitting the page for IO).
1674 */
1675int write_cache_pages(struct address_space *mapping,
1676              struct writeback_control *wbc, writepage_t writepage,
1677              void *data)
1678{
1679    int ret = 0;
1680    int done = 0;
1681    struct pagevec pvec;
1682    int nr_pages;
1683    pgoff_t uninitialized_var(writeback_index);
1684    pgoff_t index;
1685    pgoff_t end; /* Inclusive */
1686    pgoff_t done_index;
1687    int cycled;
1688    int range_whole = 0;
1689    int tag;
1690
1691    pagevec_init(&pvec, 0);
1692    if (wbc->range_cyclic) {
1693        writeback_index = mapping->writeback_index; /* prev offset */
1694        index = writeback_index;
1695        if (index == 0)
1696            cycled = 1;
1697        else
1698            cycled = 0;
1699        end = -1;
1700    } else {
1701        index = wbc->range_start >> PAGE_CACHE_SHIFT;
1702        end = wbc->range_end >> PAGE_CACHE_SHIFT;
1703        if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1704            range_whole = 1;
1705        cycled = 1; /* ignore range_cyclic tests */
1706    }
1707    if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1708        tag = PAGECACHE_TAG_TOWRITE;
1709    else
1710        tag = PAGECACHE_TAG_DIRTY;
1711retry:
1712    if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
1713        tag_pages_for_writeback(mapping, index, end);
1714    done_index = index;
1715    while (!done && (index <= end)) {
1716        int i;
1717
1718        nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
1719                  min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1720        if (nr_pages == 0)
1721            break;
1722
1723        for (i = 0; i < nr_pages; i++) {
1724            struct page *page = pvec.pages[i];
1725
1726            /*
1727             * At this point, the page may be truncated or
1728             * invalidated (changing page->mapping to NULL), or
1729             * even swizzled back from swapper_space to tmpfs file
1730             * mapping. However, page->index will not change
1731             * because we have a reference on the page.
1732             */
1733            if (page->index > end) {
1734                /*
1735                 * can't be range_cyclic (1st pass) because
1736                 * end == -1 in that case.
1737                 */
1738                done = 1;
1739                break;
1740            }
1741
1742            done_index = page->index;
1743
1744            lock_page(page);
1745
1746            /*
1747             * Page truncated or invalidated. We can freely skip it
1748             * then, even for data integrity operations: the page
1749             * has disappeared concurrently, so there could be no
1750             * real expectation of this data interity operation
1751             * even if there is now a new, dirty page at the same
1752             * pagecache address.
1753             */
1754            if (unlikely(page->mapping != mapping)) {
1755continue_unlock:
1756                unlock_page(page);
1757                continue;
1758            }
1759
1760            if (!PageDirty(page)) {
1761                /* someone wrote it for us */
1762                goto continue_unlock;
1763            }
1764
1765            if (PageWriteback(page)) {
1766                if (wbc->sync_mode != WB_SYNC_NONE)
1767                    wait_on_page_writeback(page);
1768                else
1769                    goto continue_unlock;
1770            }
1771
1772            BUG_ON(PageWriteback(page));
1773            if (!clear_page_dirty_for_io(page))
1774                goto continue_unlock;
1775
1776            trace_wbc_writepage(wbc, mapping->backing_dev_info);
1777            ret = (*writepage)(page, wbc, data);
1778            if (unlikely(ret)) {
1779                if (ret == AOP_WRITEPAGE_ACTIVATE) {
1780                    unlock_page(page);
1781                    ret = 0;
1782                } else {
1783                    /*
1784                     * done_index is set past this page,
1785                     * so media errors will not choke
1786                     * background writeout for the entire
1787                     * file. This has consequences for
1788                     * range_cyclic semantics (ie. it may
1789                     * not be suitable for data integrity
1790                     * writeout).
1791                     */
1792                    done_index = page->index + 1;
1793                    done = 1;
1794                    break;
1795                }
1796            }
1797
1798            /*
1799             * We stop writing back only if we are not doing
1800             * integrity sync. In case of integrity sync we have to
1801             * keep going until we have written all the pages
1802             * we tagged for writeback prior to entering this loop.
1803             */
1804            if (--wbc->nr_to_write <= 0 &&
1805                wbc->sync_mode == WB_SYNC_NONE) {
1806                done = 1;
1807                break;
1808            }
1809        }
1810        pagevec_release(&pvec);
1811        cond_resched();
1812    }
1813    if (!cycled && !done) {
1814        /*
1815         * range_cyclic:
1816         * We hit the last page and there is more work to be done: wrap
1817         * back to the start of the file
1818         */
1819        cycled = 1;
1820        index = 0;
1821        end = writeback_index - 1;
1822        goto retry;
1823    }
1824    if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1825        mapping->writeback_index = done_index;
1826
1827    return ret;
1828}
1829EXPORT_SYMBOL(write_cache_pages);
1830
1831/*
1832 * Function used by generic_writepages to call the real writepage
1833 * function and set the mapping flags on error
1834 */
1835static int __writepage(struct page *page, struct writeback_control *wbc,
1836               void *data)
1837{
1838    struct address_space *mapping = data;
1839    int ret = mapping->a_ops->writepage(page, wbc);
1840    mapping_set_error(mapping, ret);
1841    return ret;
1842}
1843
1844/**
1845 * generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them.
1846 * @mapping: address space structure to write
1847 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
1848 *
1849 * This is a library function, which implements the writepages()
1850 * address_space_operation.
1851 */
1852int generic_writepages(struct address_space *mapping,
1853               struct writeback_control *wbc)
1854{
1855    struct blk_plug plug;
1856    int ret;
1857
1858    /* deal with chardevs and other special file */
1859    if (!mapping->a_ops->writepage)
1860        return 0;
1861
1862    blk_start_plug(&plug);
1863    ret = write_cache_pages(mapping, wbc, __writepage, mapping);
1864    blk_finish_plug(&plug);
1865    return ret;
1866}
1867
1868EXPORT_SYMBOL(generic_writepages);
1869
1870int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
1871{
1872    int ret;
1873
1874    if (wbc->nr_to_write <= 0)
1875        return 0;
1876    if (mapping->a_ops->writepages)
1877        ret = mapping->a_ops->writepages(mapping, wbc);
1878    else
1879        ret = generic_writepages(mapping, wbc);
1880    return ret;
1881}
1882
1883/**
1884 * write_one_page - write out a single page and optionally wait on I/O
1885 * @page: the page to write
1886 * @wait: if true, wait on writeout
1887 *
1888 * The page must be locked by the caller and will be unlocked upon return.
1889 *
1890 * write_one_page() returns a negative error code if I/O failed.
1891 */
1892int write_one_page(struct page *page, int wait)
1893{
1894    struct address_space *mapping = page->mapping;
1895    int ret = 0;
1896    struct writeback_control wbc = {
1897        .sync_mode = WB_SYNC_ALL,
1898        .nr_to_write = 1,
1899    };
1900
1901    BUG_ON(!PageLocked(page));
1902
1903    if (wait)
1904        wait_on_page_writeback(page);
1905
1906    if (clear_page_dirty_for_io(page)) {
1907        page_cache_get(page);
1908        ret = mapping->a_ops->writepage(page, &wbc);
1909        if (ret == 0 && wait) {
1910            wait_on_page_writeback(page);
1911            if (PageError(page))
1912                ret = -EIO;
1913        }
1914        page_cache_release(page);
1915    } else {
1916        unlock_page(page);
1917    }
1918    return ret;
1919}
1920EXPORT_SYMBOL(write_one_page);
1921
1922/*
1923 * For address_spaces which do not use buffers nor write back.
1924 */
1925int __set_page_dirty_no_writeback(struct page *page)
1926{
1927    if (!PageDirty(page))
1928        return !TestSetPageDirty(page);
1929    return 0;
1930}
1931
1932/*
1933 * Helper function for set_page_dirty family.
1934 * NOTE: This relies on being atomic wrt interrupts.
1935 */
1936void account_page_dirtied(struct page *page, struct address_space *mapping)
1937{
1938    if (mapping_cap_account_dirty(mapping)) {
1939        __inc_zone_page_state(page, NR_FILE_DIRTY);
1940        __inc_zone_page_state(page, NR_DIRTIED);
1941        __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
1942        __inc_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED);
1943        task_io_account_write(PAGE_CACHE_SIZE);
1944        current->nr_dirtied++;
1945        this_cpu_inc(bdp_ratelimits);
1946    }
1947}
1948EXPORT_SYMBOL(account_page_dirtied);
1949
1950/*
1951 * Helper function for set_page_writeback family.
1952 * NOTE: Unlike account_page_dirtied this does not rely on being atomic
1953 * wrt interrupts.
1954 */
1955void account_page_writeback(struct page *page)
1956{
1957    inc_zone_page_state(page, NR_WRITEBACK);
1958}
1959EXPORT_SYMBOL(account_page_writeback);
1960
1961/*
1962 * For address_spaces which do not use buffers. Just tag the page as dirty in
1963 * its radix tree.
1964 *
1965 * This is also used when a single buffer is being dirtied: we want to set the
1966 * page dirty in that case, but not all the buffers. This is a "bottom-up"
1967 * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
1968 *
1969 * Most callers have locked the page, which pins the address_space in memory.
1970 * But zap_pte_range() does not lock the page, however in that case the
1971 * mapping is pinned by the vma's ->vm_file reference.
1972 *
1973 * We take care to handle the case where the page was truncated from the
1974 * mapping by re-checking page_mapping() inside tree_lock.
1975 */
1976int __set_page_dirty_nobuffers(struct page *page)
1977{
1978    if (!TestSetPageDirty(page)) {
1979        struct address_space *mapping = page_mapping(page);
1980        struct address_space *mapping2;
1981
1982        if (!mapping)
1983            return 1;
1984
1985        spin_lock_irq(&mapping->tree_lock);
1986        mapping2 = page_mapping(page);
1987        if (mapping2) { /* Race with truncate? */
1988            BUG_ON(mapping2 != mapping);
1989            WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
1990            account_page_dirtied(page, mapping);
1991            radix_tree_tag_set(&mapping->page_tree,
1992                page_index(page), PAGECACHE_TAG_DIRTY);
1993        }
1994        spin_unlock_irq(&mapping->tree_lock);
1995        if (mapping->host) {
1996            /* !PageAnon && !swapper_space */
1997            __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1998        }
1999        return 1;
2000    }
2001    return 0;
2002}
2003EXPORT_SYMBOL(__set_page_dirty_nobuffers);
2004
2005/*
2006 * Call this whenever redirtying a page, to de-account the dirty counters
2007 * (NR_DIRTIED, BDI_DIRTIED, tsk->nr_dirtied), so that they match the written
2008 * counters (NR_WRITTEN, BDI_WRITTEN) in long term. The mismatches will lead to
2009 * systematic errors in balanced_dirty_ratelimit and the dirty pages position
2010 * control.
2011 */
2012void account_page_redirty(struct page *page)
2013{
2014    struct address_space *mapping = page->mapping;
2015    if (mapping && mapping_cap_account_dirty(mapping)) {
2016        current->nr_dirtied--;
2017        dec_zone_page_state(page, NR_DIRTIED);
2018        dec_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED);
2019    }
2020}
2021EXPORT_SYMBOL(account_page_redirty);
2022
2023/*
2024 * When a writepage implementation decides that it doesn't want to write this
2025 * page for some reason, it should redirty the locked page via
2026 * redirty_page_for_writepage() and it should then unlock the page and return 0
2027 */
2028int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
2029{
2030    wbc->pages_skipped++;
2031    account_page_redirty(page);
2032    return __set_page_dirty_nobuffers(page);
2033}
2034EXPORT_SYMBOL(redirty_page_for_writepage);
2035
2036/*
2037 * Dirty a page.
2038 *
2039 * For pages with a mapping this should be done under the page lock
2040 * for the benefit of asynchronous memory errors who prefer a consistent
2041 * dirty state. This rule can be broken in some special cases,
2042 * but should be better not to.
2043 *
2044 * If the mapping doesn't provide a set_page_dirty a_op, then
2045 * just fall through and assume that it wants buffer_heads.
2046 */
2047int set_page_dirty(struct page *page)
2048{
2049    struct address_space *mapping = page_mapping(page);
2050
2051    if (likely(mapping)) {
2052        int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
2053        /*
2054         * readahead/lru_deactivate_page could remain
2055         * PG_readahead/PG_reclaim due to race with end_page_writeback
2056         * About readahead, if the page is written, the flags would be
2057         * reset. So no problem.
2058         * About lru_deactivate_page, if the page is redirty, the flag
2059         * will be reset. So no problem. but if the page is used by readahead
2060         * it will confuse readahead and make it restart the size rampup
2061         * process. But it's a trivial problem.
2062         */
2063        ClearPageReclaim(page);
2064#ifdef CONFIG_BLOCK
2065        if (!spd)
2066            spd = __set_page_dirty_buffers;
2067#endif
2068        return (*spd)(page);
2069    }
2070    if (!PageDirty(page)) {
2071        if (!TestSetPageDirty(page))
2072            return 1;
2073    }
2074    return 0;
2075}
2076EXPORT_SYMBOL(set_page_dirty);
2077
2078/*
2079 * set_page_dirty() is racy if the caller has no reference against
2080 * page->mapping->host, and if the page is unlocked. This is because another
2081 * CPU could truncate the page off the mapping and then free the mapping.
2082 *
2083 * Usually, the page _is_ locked, or the caller is a user-space process which
2084 * holds a reference on the inode by having an open file.
2085 *
2086 * In other cases, the page should be locked before running set_page_dirty().
2087 */
2088int set_page_dirty_lock(struct page *page)
2089{
2090    int ret;
2091
2092    lock_page(page);
2093    ret = set_page_dirty(page);
2094    unlock_page(page);
2095    return ret;
2096}
2097EXPORT_SYMBOL(set_page_dirty_lock);
2098
2099/*
2100 * Clear a page's dirty flag, while caring for dirty memory accounting.
2101 * Returns true if the page was previously dirty.
2102 *
2103 * This is for preparing to put the page under writeout. We leave the page
2104 * tagged as dirty in the radix tree so that a concurrent write-for-sync
2105 * can discover it via a PAGECACHE_TAG_DIRTY walk. The ->writepage
2106 * implementation will run either set_page_writeback() or set_page_dirty(),
2107 * at which stage we bring the page's dirty flag and radix-tree dirty tag
2108 * back into sync.
2109 *
2110 * This incoherency between the page's dirty flag and radix-tree tag is
2111 * unfortunate, but it only exists while the page is locked.
2112 */
2113int clear_page_dirty_for_io(struct page *page)
2114{
2115    struct address_space *mapping = page_mapping(page);
2116
2117    BUG_ON(!PageLocked(page));
2118
2119    if (mapping && mapping_cap_account_dirty(mapping)) {
2120        /*
2121         * Yes, Virginia, this is indeed insane.
2122         *
2123         * We use this sequence to make sure that
2124         * (a) we account for dirty stats properly
2125         * (b) we tell the low-level filesystem to
2126         * mark the whole page dirty if it was
2127         * dirty in a pagetable. Only to then
2128         * (c) clean the page again and return 1 to
2129         * cause the writeback.
2130         *
2131         * This way we avoid all nasty races with the
2132         * dirty bit in multiple places and clearing
2133         * them concurrently from different threads.
2134         *
2135         * Note! Normally the "set_page_dirty(page)"
2136         * has no effect on the actual dirty bit - since
2137         * that will already usually be set. But we
2138         * need the side effects, and it can help us
2139         * avoid races.
2140         *
2141         * We basically use the page "master dirty bit"
2142         * as a serialization point for all the different
2143         * threads doing their things.
2144         */
2145        if (page_mkclean(page))
2146            set_page_dirty(page);
2147        /*
2148         * We carefully synchronise fault handlers against
2149         * installing a dirty pte and marking the page dirty
2150         * at this point. We do this by having them hold the
2151         * page lock at some point after installing their
2152         * pte, but before marking the page dirty.
2153         * Pages are always locked coming in here, so we get
2154         * the desired exclusion. See mm/memory.c:do_wp_page()
2155         * for more comments.
2156         */
2157        if (TestClearPageDirty(page)) {
2158            dec_zone_page_state(page, NR_FILE_DIRTY);
2159            dec_bdi_stat(mapping->backing_dev_info,
2160                    BDI_RECLAIMABLE);
2161            return 1;
2162        }
2163        return 0;
2164    }
2165    return TestClearPageDirty(page);
2166}
2167EXPORT_SYMBOL(clear_page_dirty_for_io);
2168
2169int test_clear_page_writeback(struct page *page)
2170{
2171    struct address_space *mapping = page_mapping(page);
2172    int ret;
2173
2174    if (mapping) {
2175        struct backing_dev_info *bdi = mapping->backing_dev_info;
2176        unsigned long flags;
2177
2178        spin_lock_irqsave(&mapping->tree_lock, flags);
2179        ret = TestClearPageWriteback(page);
2180        if (ret) {
2181            radix_tree_tag_clear(&mapping->page_tree,
2182                        page_index(page),
2183                        PAGECACHE_TAG_WRITEBACK);
2184            if (bdi_cap_account_writeback(bdi)) {
2185                __dec_bdi_stat(bdi, BDI_WRITEBACK);
2186                __bdi_writeout_inc(bdi);
2187            }
2188        }
2189        spin_unlock_irqrestore(&mapping->tree_lock, flags);
2190    } else {
2191        ret = TestClearPageWriteback(page);
2192    }
2193    if (ret) {
2194        dec_zone_page_state(page, NR_WRITEBACK);
2195        inc_zone_page_state(page, NR_WRITTEN);
2196    }
2197    return ret;
2198}
2199
2200int test_set_page_writeback(struct page *page)
2201{
2202    struct address_space *mapping = page_mapping(page);
2203    int ret;
2204
2205    if (mapping) {
2206        struct backing_dev_info *bdi = mapping->backing_dev_info;
2207        unsigned long flags;
2208
2209        spin_lock_irqsave(&mapping->tree_lock, flags);
2210        ret = TestSetPageWriteback(page);
2211        if (!ret) {
2212            radix_tree_tag_set(&mapping->page_tree,
2213                        page_index(page),
2214                        PAGECACHE_TAG_WRITEBACK);
2215            if (bdi_cap_account_writeback(bdi))
2216                __inc_bdi_stat(bdi, BDI_WRITEBACK);
2217        }
2218        if (!PageDirty(page))
2219            radix_tree_tag_clear(&mapping->page_tree,
2220                        page_index(page),
2221                        PAGECACHE_TAG_DIRTY);
2222        radix_tree_tag_clear(&mapping->page_tree,
2223                     page_index(page),
2224                     PAGECACHE_TAG_TOWRITE);
2225        spin_unlock_irqrestore(&mapping->tree_lock, flags);
2226    } else {
2227        ret = TestSetPageWriteback(page);
2228    }
2229    if (!ret)
2230        account_page_writeback(page);
2231    return ret;
2232
2233}
2234EXPORT_SYMBOL(test_set_page_writeback);
2235
2236/*
2237 * Return true if any of the pages in the mapping are marked with the
2238 * passed tag.
2239 */
2240int mapping_tagged(struct address_space *mapping, int tag)
2241{
2242    return radix_tree_tagged(&mapping->page_tree, tag);
2243}
2244EXPORT_SYMBOL(mapping_tagged);
2245

Archive Download this file



interactive