Root/fs/jfs/jfs_metapage.c

1/*
2 * Copyright (C) International Business Machines Corp., 2000-2005
3 * Portions Copyright (C) Christoph Hellwig, 2001-2002
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/fs.h>
21#include <linux/mm.h>
22#include <linux/module.h>
23#include <linux/bio.h>
24#include <linux/slab.h>
25#include <linux/init.h>
26#include <linux/buffer_head.h>
27#include <linux/mempool.h>
28#include <linux/seq_file.h>
29#include "jfs_incore.h"
30#include "jfs_superblock.h"
31#include "jfs_filsys.h"
32#include "jfs_metapage.h"
33#include "jfs_txnmgr.h"
34#include "jfs_debug.h"
35
36#ifdef CONFIG_JFS_STATISTICS
37static struct {
38    uint pagealloc; /* # of page allocations */
39    uint pagefree; /* # of page frees */
40    uint lockwait; /* # of sleeping lock_metapage() calls */
41} mpStat;
42#endif
43
44#define metapage_locked(mp) test_bit(META_locked, &(mp)->flag)
45#define trylock_metapage(mp) test_and_set_bit_lock(META_locked, &(mp)->flag)
46
47static inline void unlock_metapage(struct metapage *mp)
48{
49    clear_bit_unlock(META_locked, &mp->flag);
50    wake_up(&mp->wait);
51}
52
53static inline void __lock_metapage(struct metapage *mp)
54{
55    DECLARE_WAITQUEUE(wait, current);
56    INCREMENT(mpStat.lockwait);
57    add_wait_queue_exclusive(&mp->wait, &wait);
58    do {
59        set_current_state(TASK_UNINTERRUPTIBLE);
60        if (metapage_locked(mp)) {
61            unlock_page(mp->page);
62            io_schedule();
63            lock_page(mp->page);
64        }
65    } while (trylock_metapage(mp));
66    __set_current_state(TASK_RUNNING);
67    remove_wait_queue(&mp->wait, &wait);
68}
69
70/*
71 * Must have mp->page locked
72 */
73static inline void lock_metapage(struct metapage *mp)
74{
75    if (trylock_metapage(mp))
76        __lock_metapage(mp);
77}
78
79#define METAPOOL_MIN_PAGES 32
80static struct kmem_cache *metapage_cache;
81static mempool_t *metapage_mempool;
82
83#define MPS_PER_PAGE (PAGE_CACHE_SIZE >> L2PSIZE)
84
85#if MPS_PER_PAGE > 1
86
87struct meta_anchor {
88    int mp_count;
89    atomic_t io_count;
90    struct metapage *mp[MPS_PER_PAGE];
91};
92#define mp_anchor(page) ((struct meta_anchor *)page_private(page))
93
94static inline struct metapage *page_to_mp(struct page *page, int offset)
95{
96    if (!PagePrivate(page))
97        return NULL;
98    return mp_anchor(page)->mp[offset >> L2PSIZE];
99}
100
101static inline int insert_metapage(struct page *page, struct metapage *mp)
102{
103    struct meta_anchor *a;
104    int index;
105    int l2mp_blocks; /* log2 blocks per metapage */
106
107    if (PagePrivate(page))
108        a = mp_anchor(page);
109    else {
110        a = kzalloc(sizeof(struct meta_anchor), GFP_NOFS);
111        if (!a)
112            return -ENOMEM;
113        set_page_private(page, (unsigned long)a);
114        SetPagePrivate(page);
115        kmap(page);
116    }
117
118    if (mp) {
119        l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits;
120        index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
121        a->mp_count++;
122        a->mp[index] = mp;
123    }
124
125    return 0;
126}
127
128static inline void remove_metapage(struct page *page, struct metapage *mp)
129{
130    struct meta_anchor *a = mp_anchor(page);
131    int l2mp_blocks = L2PSIZE - page->mapping->host->i_blkbits;
132    int index;
133
134    index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
135
136    BUG_ON(a->mp[index] != mp);
137
138    a->mp[index] = NULL;
139    if (--a->mp_count == 0) {
140        kfree(a);
141        set_page_private(page, 0);
142        ClearPagePrivate(page);
143        kunmap(page);
144    }
145}
146
147static inline void inc_io(struct page *page)
148{
149    atomic_inc(&mp_anchor(page)->io_count);
150}
151
152static inline void dec_io(struct page *page, void (*handler) (struct page *))
153{
154    if (atomic_dec_and_test(&mp_anchor(page)->io_count))
155        handler(page);
156}
157
158#else
159static inline struct metapage *page_to_mp(struct page *page, int offset)
160{
161    return PagePrivate(page) ? (struct metapage *)page_private(page) : NULL;
162}
163
164static inline int insert_metapage(struct page *page, struct metapage *mp)
165{
166    if (mp) {
167        set_page_private(page, (unsigned long)mp);
168        SetPagePrivate(page);
169        kmap(page);
170    }
171    return 0;
172}
173
174static inline void remove_metapage(struct page *page, struct metapage *mp)
175{
176    set_page_private(page, 0);
177    ClearPagePrivate(page);
178    kunmap(page);
179}
180
181#define inc_io(page) do {} while(0)
182#define dec_io(page, handler) handler(page)
183
184#endif
185
186static void init_once(void *foo)
187{
188    struct metapage *mp = (struct metapage *)foo;
189
190    mp->lid = 0;
191    mp->lsn = 0;
192    mp->flag = 0;
193    mp->data = NULL;
194    mp->clsn = 0;
195    mp->log = NULL;
196    set_bit(META_free, &mp->flag);
197    init_waitqueue_head(&mp->wait);
198}
199
200static inline struct metapage *alloc_metapage(gfp_t gfp_mask)
201{
202    return mempool_alloc(metapage_mempool, gfp_mask);
203}
204
205static inline void free_metapage(struct metapage *mp)
206{
207    mp->flag = 0;
208    set_bit(META_free, &mp->flag);
209
210    mempool_free(mp, metapage_mempool);
211}
212
213int __init metapage_init(void)
214{
215    /*
216     * Allocate the metapage structures
217     */
218    metapage_cache = kmem_cache_create("jfs_mp", sizeof(struct metapage),
219                       0, 0, init_once);
220    if (metapage_cache == NULL)
221        return -ENOMEM;
222
223    metapage_mempool = mempool_create_slab_pool(METAPOOL_MIN_PAGES,
224                            metapage_cache);
225
226    if (metapage_mempool == NULL) {
227        kmem_cache_destroy(metapage_cache);
228        return -ENOMEM;
229    }
230
231    return 0;
232}
233
234void metapage_exit(void)
235{
236    mempool_destroy(metapage_mempool);
237    kmem_cache_destroy(metapage_cache);
238}
239
240static inline void drop_metapage(struct page *page, struct metapage *mp)
241{
242    if (mp->count || mp->nohomeok || test_bit(META_dirty, &mp->flag) ||
243        test_bit(META_io, &mp->flag))
244        return;
245    remove_metapage(page, mp);
246    INCREMENT(mpStat.pagefree);
247    free_metapage(mp);
248}
249
250/*
251 * Metapage address space operations
252 */
253
254static sector_t metapage_get_blocks(struct inode *inode, sector_t lblock,
255                    int *len)
256{
257    int rc = 0;
258    int xflag;
259    s64 xaddr;
260    sector_t file_blocks = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
261                   inode->i_blkbits;
262
263    if (lblock >= file_blocks)
264        return 0;
265    if (lblock + *len > file_blocks)
266        *len = file_blocks - lblock;
267
268    if (inode->i_ino) {
269        rc = xtLookup(inode, (s64)lblock, *len, &xflag, &xaddr, len, 0);
270        if ((rc == 0) && *len)
271            lblock = (sector_t)xaddr;
272        else
273            lblock = 0;
274    } /* else no mapping */
275
276    return lblock;
277}
278
279static void last_read_complete(struct page *page)
280{
281    if (!PageError(page))
282        SetPageUptodate(page);
283    unlock_page(page);
284}
285
286static void metapage_read_end_io(struct bio *bio, int err)
287{
288    struct page *page = bio->bi_private;
289
290    if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
291        printk(KERN_ERR "metapage_read_end_io: I/O error\n");
292        SetPageError(page);
293    }
294
295    dec_io(page, last_read_complete);
296    bio_put(bio);
297}
298
299static void remove_from_logsync(struct metapage *mp)
300{
301    struct jfs_log *log = mp->log;
302    unsigned long flags;
303/*
304 * This can race. Recheck that log hasn't been set to null, and after
305 * acquiring logsync lock, recheck lsn
306 */
307    if (!log)
308        return;
309
310    LOGSYNC_LOCK(log, flags);
311    if (mp->lsn) {
312        mp->log = NULL;
313        mp->lsn = 0;
314        mp->clsn = 0;
315        log->count--;
316        list_del(&mp->synclist);
317    }
318    LOGSYNC_UNLOCK(log, flags);
319}
320
321static void last_write_complete(struct page *page)
322{
323    struct metapage *mp;
324    unsigned int offset;
325
326    for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) {
327        mp = page_to_mp(page, offset);
328        if (mp && test_bit(META_io, &mp->flag)) {
329            if (mp->lsn)
330                remove_from_logsync(mp);
331            clear_bit(META_io, &mp->flag);
332        }
333        /*
334         * I'd like to call drop_metapage here, but I don't think it's
335         * safe unless I have the page locked
336         */
337    }
338    end_page_writeback(page);
339}
340
341static void metapage_write_end_io(struct bio *bio, int err)
342{
343    struct page *page = bio->bi_private;
344
345    BUG_ON(!PagePrivate(page));
346
347    if (! test_bit(BIO_UPTODATE, &bio->bi_flags)) {
348        printk(KERN_ERR "metapage_write_end_io: I/O error\n");
349        SetPageError(page);
350    }
351    dec_io(page, last_write_complete);
352    bio_put(bio);
353}
354
355static int metapage_writepage(struct page *page, struct writeback_control *wbc)
356{
357    struct bio *bio = NULL;
358    int block_offset; /* block offset of mp within page */
359    struct inode *inode = page->mapping->host;
360    int blocks_per_mp = JFS_SBI(inode->i_sb)->nbperpage;
361    int len;
362    int xlen;
363    struct metapage *mp;
364    int redirty = 0;
365    sector_t lblock;
366    int nr_underway = 0;
367    sector_t pblock;
368    sector_t next_block = 0;
369    sector_t page_start;
370    unsigned long bio_bytes = 0;
371    unsigned long bio_offset = 0;
372    int offset;
373    int bad_blocks = 0;
374
375    page_start = (sector_t)page->index <<
376             (PAGE_CACHE_SHIFT - inode->i_blkbits);
377    BUG_ON(!PageLocked(page));
378    BUG_ON(PageWriteback(page));
379    set_page_writeback(page);
380
381    for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) {
382        mp = page_to_mp(page, offset);
383
384        if (!mp || !test_bit(META_dirty, &mp->flag))
385            continue;
386
387        if (mp->nohomeok && !test_bit(META_forcewrite, &mp->flag)) {
388            redirty = 1;
389            /*
390             * Make sure this page isn't blocked indefinitely.
391             * If the journal isn't undergoing I/O, push it
392             */
393            if (mp->log && !(mp->log->cflag & logGC_PAGEOUT))
394                jfs_flush_journal(mp->log, 0);
395            continue;
396        }
397
398        clear_bit(META_dirty, &mp->flag);
399        set_bit(META_io, &mp->flag);
400        block_offset = offset >> inode->i_blkbits;
401        lblock = page_start + block_offset;
402        if (bio) {
403            if (xlen && lblock == next_block) {
404                /* Contiguous, in memory & on disk */
405                len = min(xlen, blocks_per_mp);
406                xlen -= len;
407                bio_bytes += len << inode->i_blkbits;
408                continue;
409            }
410            /* Not contiguous */
411            if (bio_add_page(bio, page, bio_bytes, bio_offset) <
412                bio_bytes)
413                goto add_failed;
414            /*
415             * Increment counter before submitting i/o to keep
416             * count from hitting zero before we're through
417             */
418            inc_io(page);
419            if (!bio->bi_size)
420                goto dump_bio;
421            submit_bio(WRITE, bio);
422            nr_underway++;
423            bio = NULL;
424        } else
425            inc_io(page);
426        xlen = (PAGE_CACHE_SIZE - offset) >> inode->i_blkbits;
427        pblock = metapage_get_blocks(inode, lblock, &xlen);
428        if (!pblock) {
429            printk(KERN_ERR "JFS: metapage_get_blocks failed\n");
430            /*
431             * We already called inc_io(), but can't cancel it
432             * with dec_io() until we're done with the page
433             */
434            bad_blocks++;
435            continue;
436        }
437        len = min(xlen, (int)JFS_SBI(inode->i_sb)->nbperpage);
438
439        bio = bio_alloc(GFP_NOFS, 1);
440        bio->bi_bdev = inode->i_sb->s_bdev;
441        bio->bi_sector = pblock << (inode->i_blkbits - 9);
442        bio->bi_end_io = metapage_write_end_io;
443        bio->bi_private = page;
444
445        /* Don't call bio_add_page yet, we may add to this vec */
446        bio_offset = offset;
447        bio_bytes = len << inode->i_blkbits;
448
449        xlen -= len;
450        next_block = lblock + len;
451    }
452    if (bio) {
453        if (bio_add_page(bio, page, bio_bytes, bio_offset) < bio_bytes)
454                goto add_failed;
455        if (!bio->bi_size)
456            goto dump_bio;
457
458        submit_bio(WRITE, bio);
459        nr_underway++;
460    }
461    if (redirty)
462        redirty_page_for_writepage(wbc, page);
463
464    unlock_page(page);
465
466    if (bad_blocks)
467        goto err_out;
468
469    if (nr_underway == 0)
470        end_page_writeback(page);
471
472    return 0;
473add_failed:
474    /* We should never reach here, since we're only adding one vec */
475    printk(KERN_ERR "JFS: bio_add_page failed unexpectedly\n");
476    goto skip;
477dump_bio:
478    print_hex_dump(KERN_ERR, "JFS: dump of bio: ", DUMP_PREFIX_ADDRESS, 16,
479               4, bio, sizeof(*bio), 0);
480skip:
481    bio_put(bio);
482    unlock_page(page);
483    dec_io(page, last_write_complete);
484err_out:
485    while (bad_blocks--)
486        dec_io(page, last_write_complete);
487    return -EIO;
488}
489
490static int metapage_readpage(struct file *fp, struct page *page)
491{
492    struct inode *inode = page->mapping->host;
493    struct bio *bio = NULL;
494    int block_offset;
495    int blocks_per_page = PAGE_CACHE_SIZE >> inode->i_blkbits;
496    sector_t page_start; /* address of page in fs blocks */
497    sector_t pblock;
498    int xlen;
499    unsigned int len;
500    int offset;
501
502    BUG_ON(!PageLocked(page));
503    page_start = (sector_t)page->index <<
504             (PAGE_CACHE_SHIFT - inode->i_blkbits);
505
506    block_offset = 0;
507    while (block_offset < blocks_per_page) {
508        xlen = blocks_per_page - block_offset;
509        pblock = metapage_get_blocks(inode, page_start + block_offset,
510                         &xlen);
511        if (pblock) {
512            if (!PagePrivate(page))
513                insert_metapage(page, NULL);
514            inc_io(page);
515            if (bio)
516                submit_bio(READ, bio);
517
518            bio = bio_alloc(GFP_NOFS, 1);
519            bio->bi_bdev = inode->i_sb->s_bdev;
520            bio->bi_sector = pblock << (inode->i_blkbits - 9);
521            bio->bi_end_io = metapage_read_end_io;
522            bio->bi_private = page;
523            len = xlen << inode->i_blkbits;
524            offset = block_offset << inode->i_blkbits;
525            if (bio_add_page(bio, page, len, offset) < len)
526                goto add_failed;
527            block_offset += xlen;
528        } else
529            block_offset++;
530    }
531    if (bio)
532        submit_bio(READ, bio);
533    else
534        unlock_page(page);
535
536    return 0;
537
538add_failed:
539    printk(KERN_ERR "JFS: bio_add_page failed unexpectedly\n");
540    bio_put(bio);
541    dec_io(page, last_read_complete);
542    return -EIO;
543}
544
545static int metapage_releasepage(struct page *page, gfp_t gfp_mask)
546{
547    struct metapage *mp;
548    int ret = 1;
549    int offset;
550
551    for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) {
552        mp = page_to_mp(page, offset);
553
554        if (!mp)
555            continue;
556
557        jfs_info("metapage_releasepage: mp = 0x%p", mp);
558        if (mp->count || mp->nohomeok ||
559            test_bit(META_dirty, &mp->flag)) {
560            jfs_info("count = %ld, nohomeok = %d", mp->count,
561                 mp->nohomeok);
562            ret = 0;
563            continue;
564        }
565        if (mp->lsn)
566            remove_from_logsync(mp);
567        remove_metapage(page, mp);
568        INCREMENT(mpStat.pagefree);
569        free_metapage(mp);
570    }
571    return ret;
572}
573
574static void metapage_invalidatepage(struct page *page, unsigned long offset)
575{
576    BUG_ON(offset);
577
578    BUG_ON(PageWriteback(page));
579
580    metapage_releasepage(page, 0);
581}
582
583const struct address_space_operations jfs_metapage_aops = {
584    .readpage = metapage_readpage,
585    .writepage = metapage_writepage,
586    .releasepage = metapage_releasepage,
587    .invalidatepage = metapage_invalidatepage,
588    .set_page_dirty = __set_page_dirty_nobuffers,
589};
590
591struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
592                unsigned int size, int absolute,
593                unsigned long new)
594{
595    int l2BlocksPerPage;
596    int l2bsize;
597    struct address_space *mapping;
598    struct metapage *mp = NULL;
599    struct page *page;
600    unsigned long page_index;
601    unsigned long page_offset;
602
603    jfs_info("__get_metapage: ino = %ld, lblock = 0x%lx, abs=%d",
604         inode->i_ino, lblock, absolute);
605
606    l2bsize = inode->i_blkbits;
607    l2BlocksPerPage = PAGE_CACHE_SHIFT - l2bsize;
608    page_index = lblock >> l2BlocksPerPage;
609    page_offset = (lblock - (page_index << l2BlocksPerPage)) << l2bsize;
610    if ((page_offset + size) > PAGE_CACHE_SIZE) {
611        jfs_err("MetaData crosses page boundary!!");
612        jfs_err("lblock = %lx, size = %d", lblock, size);
613        dump_stack();
614        return NULL;
615    }
616    if (absolute)
617        mapping = JFS_SBI(inode->i_sb)->direct_inode->i_mapping;
618    else {
619        /*
620         * If an nfs client tries to read an inode that is larger
621         * than any existing inodes, we may try to read past the
622         * end of the inode map
623         */
624        if ((lblock << inode->i_blkbits) >= inode->i_size)
625            return NULL;
626        mapping = inode->i_mapping;
627    }
628
629    if (new && (PSIZE == PAGE_CACHE_SIZE)) {
630        page = grab_cache_page(mapping, page_index);
631        if (!page) {
632            jfs_err("grab_cache_page failed!");
633            return NULL;
634        }
635        SetPageUptodate(page);
636    } else {
637        page = read_mapping_page(mapping, page_index, NULL);
638        if (IS_ERR(page) || !PageUptodate(page)) {
639            jfs_err("read_mapping_page failed!");
640            return NULL;
641        }
642        lock_page(page);
643    }
644
645    mp = page_to_mp(page, page_offset);
646    if (mp) {
647        if (mp->logical_size != size) {
648            jfs_error(inode->i_sb,
649                  "__get_metapage: mp->logical_size != size");
650            jfs_err("logical_size = %d, size = %d",
651                mp->logical_size, size);
652            dump_stack();
653            goto unlock;
654        }
655        mp->count++;
656        lock_metapage(mp);
657        if (test_bit(META_discard, &mp->flag)) {
658            if (!new) {
659                jfs_error(inode->i_sb,
660                      "__get_metapage: using a "
661                      "discarded metapage");
662                discard_metapage(mp);
663                goto unlock;
664            }
665            clear_bit(META_discard, &mp->flag);
666        }
667    } else {
668        INCREMENT(mpStat.pagealloc);
669        mp = alloc_metapage(GFP_NOFS);
670        mp->page = page;
671        mp->flag = 0;
672        mp->xflag = COMMIT_PAGE;
673        mp->count = 1;
674        mp->nohomeok = 0;
675        mp->logical_size = size;
676        mp->data = page_address(page) + page_offset;
677        mp->index = lblock;
678        if (unlikely(insert_metapage(page, mp))) {
679            free_metapage(mp);
680            goto unlock;
681        }
682        lock_metapage(mp);
683    }
684
685    if (new) {
686        jfs_info("zeroing mp = 0x%p", mp);
687        memset(mp->data, 0, PSIZE);
688    }
689
690    unlock_page(page);
691    jfs_info("__get_metapage: returning = 0x%p data = 0x%p", mp, mp->data);
692    return mp;
693
694unlock:
695    unlock_page(page);
696    return NULL;
697}
698
699void grab_metapage(struct metapage * mp)
700{
701    jfs_info("grab_metapage: mp = 0x%p", mp);
702    page_cache_get(mp->page);
703    lock_page(mp->page);
704    mp->count++;
705    lock_metapage(mp);
706    unlock_page(mp->page);
707}
708
709void force_metapage(struct metapage *mp)
710{
711    struct page *page = mp->page;
712    jfs_info("force_metapage: mp = 0x%p", mp);
713    set_bit(META_forcewrite, &mp->flag);
714    clear_bit(META_sync, &mp->flag);
715    page_cache_get(page);
716    lock_page(page);
717    set_page_dirty(page);
718    write_one_page(page, 1);
719    clear_bit(META_forcewrite, &mp->flag);
720    page_cache_release(page);
721}
722
723void hold_metapage(struct metapage *mp)
724{
725    lock_page(mp->page);
726}
727
728void put_metapage(struct metapage *mp)
729{
730    if (mp->count || mp->nohomeok) {
731        /* Someone else will release this */
732        unlock_page(mp->page);
733        return;
734    }
735    page_cache_get(mp->page);
736    mp->count++;
737    lock_metapage(mp);
738    unlock_page(mp->page);
739    release_metapage(mp);
740}
741
742void release_metapage(struct metapage * mp)
743{
744    struct page *page = mp->page;
745    jfs_info("release_metapage: mp = 0x%p, flag = 0x%lx", mp, mp->flag);
746
747    BUG_ON(!page);
748
749    lock_page(page);
750    unlock_metapage(mp);
751
752    assert(mp->count);
753    if (--mp->count || mp->nohomeok) {
754        unlock_page(page);
755        page_cache_release(page);
756        return;
757    }
758
759    if (test_bit(META_dirty, &mp->flag)) {
760        set_page_dirty(page);
761        if (test_bit(META_sync, &mp->flag)) {
762            clear_bit(META_sync, &mp->flag);
763            write_one_page(page, 1);
764            lock_page(page); /* write_one_page unlocks the page */
765        }
766    } else if (mp->lsn) /* discard_metapage doesn't remove it */
767        remove_from_logsync(mp);
768
769    /* Try to keep metapages from using up too much memory */
770    drop_metapage(page, mp);
771
772    unlock_page(page);
773    page_cache_release(page);
774}
775
776void __invalidate_metapages(struct inode *ip, s64 addr, int len)
777{
778    sector_t lblock;
779    int l2BlocksPerPage = PAGE_CACHE_SHIFT - ip->i_blkbits;
780    int BlocksPerPage = 1 << l2BlocksPerPage;
781    /* All callers are interested in block device's mapping */
782    struct address_space *mapping =
783        JFS_SBI(ip->i_sb)->direct_inode->i_mapping;
784    struct metapage *mp;
785    struct page *page;
786    unsigned int offset;
787
788    /*
789     * Mark metapages to discard. They will eventually be
790     * released, but should not be written.
791     */
792    for (lblock = addr & ~(BlocksPerPage - 1); lblock < addr + len;
793         lblock += BlocksPerPage) {
794        page = find_lock_page(mapping, lblock >> l2BlocksPerPage);
795        if (!page)
796            continue;
797        for (offset = 0; offset < PAGE_CACHE_SIZE; offset += PSIZE) {
798            mp = page_to_mp(page, offset);
799            if (!mp)
800                continue;
801            if (mp->index < addr)
802                continue;
803            if (mp->index >= addr + len)
804                break;
805
806            clear_bit(META_dirty, &mp->flag);
807            set_bit(META_discard, &mp->flag);
808            if (mp->lsn)
809                remove_from_logsync(mp);
810        }
811        unlock_page(page);
812        page_cache_release(page);
813    }
814}
815
816#ifdef CONFIG_JFS_STATISTICS
817static int jfs_mpstat_proc_show(struct seq_file *m, void *v)
818{
819    seq_printf(m,
820               "JFS Metapage statistics\n"
821               "=======================\n"
822               "page allocations = %d\n"
823               "page frees = %d\n"
824               "lock waits = %d\n",
825               mpStat.pagealloc,
826               mpStat.pagefree,
827               mpStat.lockwait);
828    return 0;
829}
830
831static int jfs_mpstat_proc_open(struct inode *inode, struct file *file)
832{
833    return single_open(file, jfs_mpstat_proc_show, NULL);
834}
835
836const struct file_operations jfs_mpstat_proc_fops = {
837    .owner = THIS_MODULE,
838    .open = jfs_mpstat_proc_open,
839    .read = seq_read,
840    .llseek = seq_lseek,
841    .release = single_release,
842};
843#endif
844

Archive Download this file



interactive